class ov::ISyncInferRequest¶
Overview¶
Interface for syncronous infer request. More…
#include <isync_infer_request.hpp>
class ISyncInferRequest: public ov::IInferRequest
{
public:
// structs
struct FoundPort;
// construction
ISyncInferRequest();
// methods
virtual ov::SoPtr<ov::ITensor>const ov::Output<const ov::Node>& get_tensor() const;
virtual voidconst ov::Output<const ov::Node>&const ov::SoPtr<ov::ITensor>& set_tensor(
,
);
virtual std::vector<ov::SoPtr<ov::ITensor>>const ov::Output<const ov::Node>& get_tensors() const;
virtual voidconst ov::Output<const ov::Node>&const std::vector<ov::SoPtr<ov::ITensor>>& set_tensors(
,
);
virtual const std::vector<ov::Output<const ov::Node>>& get_inputs() const;
virtual const std::vector<ov::Output<const ov::Node>>& get_outputs() const;
virtual const std::shared_ptr<const ov::ICompiledModel>& get_compiled_model() const;
};
Inherited Members¶
public:
// methods
virtual void infer() = 0;
virtual std::vector<ov::ProfilingInfo> get_profiling_info() const = 0;
virtual ov::SoPtr<ov::ITensor>const ov::Output<const ov::Node>& get_tensor() const = 0;
virtual voidconst ov::Output<const ov::Node>&const ov::SoPtr<ov::ITensor>& set_tensor(
,
) = 0;
virtual std::vector<ov::SoPtr<ov::ITensor>>const ov::Output<const ov::Node>& get_tensors() const = 0;
virtual voidconst ov::Output<const ov::Node>&const std::vector<ov::SoPtr<ov::ITensor>>& set_tensors(
,
) = 0;
virtual std::vector<ov::SoPtr<ov::IVariableState>> query_state() const = 0;
virtual const std::shared_ptr<const ov::ICompiledModel>& get_compiled_model() const = 0;
virtual const std::vector<ov::Output<const ov::Node>>& get_inputs() const = 0;
virtual const std::vector<ov::Output<const ov::Node>>& get_outputs() const = 0;
Detailed Documentation¶
Interface for syncronous infer request.
Construction¶
ISyncInferRequest()
Constructs syncronous inference request.
Parameters:
compiled_model |
pointer to compiled model |
Methods¶
virtual ov::SoPtr<ov::ITensor>const ov::Output<const ov::Node>& get_tensor() const
Gets an input/output tensor for inference.
If the tensor with the specified port
is not found, an exception is thrown.
Parameters:
port |
Port of the tensor to get. |
Returns:
Tensor for the port port
.
virtual voidconst ov::Output<const ov::Node>&const ov::SoPtr<ov::ITensor>& set_tensor(
,
)
Sets an input/output tensor to infer.
Parameters:
port |
Port of the input or output tensor. |
tensor |
Reference to a tensor. The element_type and shape of a tensor must match the model’s input/output element_type and size. |
virtual std::vector<ov::SoPtr<ov::ITensor>>const ov::Output<const ov::Node>& get_tensors() const
Gets a batch of tensors for input data to infer by input port. Model input must have batch dimension, and the number of tensors
must match the batch size. The current version supports setting tensors to model inputs only. If port
is associated with output (or any other non-input node), an exception is thrown.
Parameters:
port |
Port of the input tensor. |
tensors |
Input tensors for batched infer request. The type of each tensor must match the model input element type and shape (except batch dimension). Total size of tensors must match the input size. |
Returns:
vector of tensors
virtual voidconst ov::Output<const ov::Node>&const std::vector<ov::SoPtr<ov::ITensor>>& set_tensors(
,
)
Sets a batch of tensors for input data to infer by input port. Model input must have batch dimension, and the number of tensors
must match the batch size. The current version supports setting tensors to model inputs only. If port
is associated with output (or any other non-input node), an exception is thrown.
Parameters:
port |
Port of the input tensor. |
tensors |
Input tensors for batched infer request. The type of each tensor must match the model input element type and shape (except batch dimension). Total size of tensors must match the input size. |
virtual const std::vector<ov::Output<const ov::Node>>& get_inputs() const
Gets inputs for infer request.
Returns:
vector of input ports
virtual const std::vector<ov::Output<const ov::Node>>& get_outputs() const
Gets outputs for infer request.
Returns:
vector of output ports
virtual const std::shared_ptr<const ov::ICompiledModel>& get_compiled_model() const
Gets pointer to compiled model (usually synchronous request holds the compiled model)
Returns:
Pointer to the compiled model