Global Namespace

Overview

// namespaces

namespace ExecGraphInfoSerialization;
namespace FileUtils;
namespace InferenceEngine;
    namespace InferenceEngine::CPUConfigParams;
    namespace InferenceEngine::GNAConfigParams;
    namespace InferenceEngine::GPUConfigParams;
    namespace InferenceEngine::GPUContextParams;
    namespace InferenceEngine::HeteroConfigParams;
    namespace InferenceEngine::InferRequest;
    namespace InferenceEngine::Metrics;
    namespace InferenceEngine::MultiDeviceConfigParams;
    namespace InferenceEngine::PluginConfigInternalParams;
    namespace InferenceEngine::PluginConfigParams;
    namespace InferenceEngine::PrecisionUtils;
        namespace InferenceEngine::PrecisionUtils::details;
    namespace InferenceEngine::gpu;
    namespace InferenceEngine::itt;
        namespace InferenceEngine::itt::domains;
namespace ngraph;
    namespace ngraph::builder;
        namespace ngraph::builder::opset1;
    namespace ngraph::coordinates;
    namespace ngraph::descriptor;
        namespace ngraph::descriptor::layout;
    namespace ngraph::element;
    namespace ngraph::file_util;
    namespace ngraph::op;
        namespace ngraph::op::util;
            namespace ngraph::op::util::detail;
            namespace ngraph::op::util::error;
        namespace ngraph::op::v0;
        namespace ngraph::op::v1;
        namespace ngraph::op::v10;
        namespace ngraph::op::v11;
        namespace ngraph::op::v12;
        namespace ngraph::op::v3;
        namespace ngraph::op::v4;
        namespace ngraph::op::v5;
        namespace ngraph::op::v6;
        namespace ngraph::op::v7;
        namespace ngraph::op::v8;
        namespace ngraph::op::v9;
    namespace ngraph::opset1;
    namespace ngraph::opset10;
    namespace ngraph::opset11;
    namespace ngraph::opset2;
    namespace ngraph::opset3;
    namespace ngraph::opset4;
    namespace ngraph::opset5;
    namespace ngraph::opset6;
    namespace ngraph::opset7;
    namespace ngraph::opset8;
    namespace ngraph::opset9;
    namespace ngraph::pass;
        namespace ngraph::pass::low_precision;
            namespace ngraph::pass::low_precision::itt;
                namespace ngraph::pass::low_precision::itt::domains;
            namespace ngraph::pass::low_precision::precision_set;
    namespace ngraph::pattern;
        namespace ngraph::pattern::op;
    namespace ngraph::reduction;
    namespace ngraph::runtime;
        namespace ngraph::runtime::opt_kernel;
        namespace ngraph::runtime::reference;
            namespace ngraph::runtime::reference::adaptive_pool;
            namespace ngraph::runtime::reference::detail;
            namespace ngraph::runtime::reference::details;
            namespace ngraph::runtime::reference::fake_quantize_details;
            namespace ngraph::runtime::reference::fft_common;
            namespace ngraph::runtime::reference::internal;
            namespace ngraph::runtime::reference::interpolate_pil;
            namespace ngraph::runtime::reference::kernel;
            namespace ngraph::runtime::reference::nms_common;
namespace openvino;
    namespace openvino::cc;
        namespace openvino::cc::internal;
    namespace openvino::itt;
namespace opset9;
namespace ov;
    namespace ov::batch_util;
    namespace ov::cmp;
    namespace ov::descriptor;
    namespace ov::detail;
    namespace ov::device;
        namespace ov::device::capability;
    namespace ov::element;
    namespace ov::exec_model_info;
    namespace ov::frontend;
        namespace ov::frontend::tensorflow;
        namespace ov::frontend::type;
    namespace ov::helpers;
    namespace ov::hint;
    namespace ov::intel_auto;
    namespace ov::intel_cpu;
    namespace ov::intel_gna;
    namespace ov::intel_gpu;
        namespace ov::intel_gpu::capability;
        namespace ov::intel_gpu::hint;
        namespace ov::intel_gpu::memory_type;
        namespace ov::intel_gpu::ocl;
    namespace ov::internal;
    namespace ov::layout;
    namespace ov::log;
    namespace ov::op;
        namespace ov::op::ShapeInferRange;
        namespace ov::op::convolution;
            namespace ov::op::convolution::validate;
        namespace ov::op::deformable_conv;
            namespace ov::op::deformable_conv::validate;
        namespace ov::op::detectron;
            namespace ov::op::detectron::validate;
        namespace ov::op::eye;
        namespace ov::op::gather_nd;
        namespace ov::op::internal;
        namespace ov::op::interpolate;
            namespace ov::op::interpolate::validate;
        namespace ov::op::multiclass_nms;
            namespace ov::op::multiclass_nms::validate;
        namespace ov::op::nms;
            namespace ov::op::nms::validate;
        namespace ov::op::pooling;
            namespace ov::op::pooling::validate;
        namespace ov::op::prior_box;
            namespace ov::op::prior_box::validate;
        namespace ov::op::proposal;
        namespace ov::op::psroi_pooling;
            namespace ov::op::psroi_pooling::validate;
        namespace ov::op::rnn;
        namespace ov::op::roi_align;
            namespace ov::op::roi_align::validate;
        namespace ov::op::roi_pooling;
            namespace ov::op::roi_pooling::validate;
        namespace ov::op::shape_of;
        namespace ov::op::slice;
        namespace ov::op::util;
            namespace ov::op::util::detail;
            namespace ov::op::util::embedding;
            namespace ov::op::util::error;
            namespace ov::op::util::rfft_common_validation;
        namespace ov::op::v0;
            namespace ov::op::v0::lstm_cell;
        namespace ov::op::v1;
        namespace ov::op::v10;
        namespace ov::op::v11;
        namespace ov::op::v12;
        namespace ov::op::v3;
        namespace ov::op::v4;
            namespace ov::op::v4::ctc_loss;
            namespace ov::op::v4::lstm_cell;
        namespace ov::op::v5;
        namespace ov::op::v6;
        namespace ov::op::v7;
        namespace ov::op::v8;
        namespace ov::op::v9;
        namespace ov::op::validate;
    namespace ov::opset1;
    namespace ov::opset10;
    namespace ov::opset11;
    namespace ov::opset12;
    namespace ov::opset2;
    namespace ov::opset3;
    namespace ov::opset4;
    namespace ov::opset5;
    namespace ov::opset6;
    namespace ov::opset7;
    namespace ov::opset8;
    namespace ov::opset9;
    namespace ov::pass;
        namespace ov::pass::pattern;
            namespace ov::pass::pattern::op;
        namespace ov::pass::transpose_sinking;
            namespace ov::pass::transpose_sinking::utils;
                namespace ov::pass::transpose_sinking::utils::sink_backward;
                namespace ov::pass::transpose_sinking::utils::sink_forward;
    namespace ov::preprocess;
    namespace ov::proxy;
    namespace ov::reference;
    namespace ov::runtime;
    namespace ov::streams;
    namespace ov::threading;
    namespace ov::util;
        namespace ov::util::dim;
namespace pass;
    namespace pass::low_precision;
        namespace pass::low_precision::BaseMatcherPass public ngraph;
            namespace pass::low_precision::BaseMatcherPass public ngraph::pass;
namespace pugixml;
    namespace pugixml::utils;
namespace std;
namespace util;

// typedefs

typedef struct ov_compiled_model ov_compiled_model_t;
typedef struct ov_core ov_core_t;
typedef struct ov_version ov_version_t;
typedef struct ov_dimension ov_dimension_t;
typedef struct ov_infer_request ov_infer_request_t;
typedef struct ov_layout ov_layout_t;
typedef struct ov_model ov_model_t;
typedef struct ov_output_const_port ov_output_const_port_t;
typedef struct ov_output_port ov_output_port_t;
typedef struct ov_partial_shape ov_partial_shape_t;
typedef struct ov_preprocess_prepostprocessor ov_preprocess_prepostprocessor_t;
typedef struct ov_preprocess_input_info ov_preprocess_input_info_t;
typedef struct ov_preprocess_input_tensor_info ov_preprocess_input_tensor_info_t;
typedef struct ov_preprocess_output_info ov_preprocess_output_info_t;
typedef struct ov_preprocess_output_tensor_info ov_preprocess_output_tensor_info_t;
typedef struct ov_preprocess_input_model_info ov_preprocess_input_model_info_t;
typedef struct ov_preprocess_preprocess_steps ov_preprocess_preprocess_steps_t;
typedef ov_dimension_t ov_rank_t;
typedef struct ov_remote_context ov_remote_context_t;
typedef struct ov_tensor ov_tensor_t;
typedef std::unordered_map<std::shared_ptr<ov::opset1::Parameter>, std::unordered_set<ov::label_t>> P2Btype;
typedef std::unordered_map<ov::element::Type_t, ov::element::Type, EnumClassHash> precisions_map;
typedef std::unordered_map<ov::NodeTypeInfo, std::function<bool(const std::shared_ptr<ov::Node>&, const precisions_map&)>> type_to_fuse_map;
typedef std::unordered_map<ov::Node::type_info_t, std::function<void(const ov::Node&, std::ostream&ss)>> visualize_tree_ops_map_t;

// enums

enum AttributeSource;
enum ov_color_format_e;
enum ov_element_type_e;
enum ov_preprocess_resize_algorithm_e;
enum ov_status_e;

// structs

struct EnumClassHash;
struct ov_ProfilingInfo_t;
struct ov_available_devices_t;
struct ov_callback_t;
struct ov_compiled_model_t;
struct ov_core_t;
struct ov_core_version;
struct ov_core_version_list;
struct ov_core_version_list_t;
struct ov_core_version_t;
struct ov_dimension;
struct ov_infer_request_t;
struct ov_layout_t;
struct ov_model_t;
struct ov_output_const_port_t;
struct ov_output_port_t;
struct ov_partial_shape;
struct ov_preprocess_input_info_t;
struct ov_preprocess_input_model_info_t;
struct ov_preprocess_input_tensor_info_t;
struct ov_preprocess_output_info_t;
struct ov_preprocess_output_tensor_info_t;
struct ov_preprocess_prepostprocessor_t;
struct ov_preprocess_preprocess_steps_t;
struct ov_profiling_info_list_t;
struct ov_profiling_info_t;
struct ov_rank_t;
struct ov_shape_t;
struct ov_tensor_t;
struct ov_version;
struct parse_result;

// templates

template IExecutableNetworkInternal;
template IInferRequestInternal;
template IInferencePlugin;
template IVariableStateInternal;

// classes

class AttributeParameters;
template <InferenceEngine::Precision::ePrecision precision>
class BlobFactory;
class ConvertReduceBase;
class CvtReduceBase;
class MemorySolver;
template <class T>
class SharedAttribute;
class ngraph;

// global variables

 ov_property_key_intel_auto_enable_startup_fallback;
 ov_property_key_intel_auto_enable_runtime_fallback;
 ov_property_key_supported_properties;
 ov_property_key_available_devices;
 ov_property_key_optimal_number_of_infer_requests;
 ov_property_key_range_for_async_infer_requests;
 ov_property_key_range_for_streams;
 ov_property_key_device_full_name;
 ov_property_key_device_capabilities;
 ov_property_key_model_name;
 ov_property_key_optimal_batch_size;
 ov_property_key_max_batch_size;
 ov_property_key_cache_dir;
 ov_property_key_num_streams;
 ov_property_key_affinity;
 ov_property_key_inference_num_threads;
 ov_property_key_hint_enable_cpu_pinning;
 ov_property_key_hint_enable_hyper_threading;
 ov_property_key_hint_performance_mode;
 ov_property_key_hint_scheduling_core_type;
 ov_property_key_hint_inference_precision;
 ov_property_key_hint_num_requests;
 ov_property_key_log_level;
 ov_property_key_hint_model_priority;
 ov_property_key_enable_profiling;
 ov_property_key_device_priorities;
 ov_property_key_hint_execution_mode;
 ov_property_key_force_tbb_terminate;
 ov_property_key_enable_mmap;
 ov_property_key_auto_batch_timeout;
std::vector<int64_t> strides;
std::vector<int64_t> dilation;
std::vector<int64_t> pads_begin;
std::vector<int64_t> pads_end;
std::vector<int64_t> output_padding;
NGRAPH_EXTERN_C NGRAPH_API const char \* NGRAPH_VERSION_NUMBER;

// global functions

OPENVINO_C_VAR(const char \*);
const char \* ov_get_error_info(ov_status_e status);
void ov_free(const char \* content);

ov_compiled_model_inputs_size(
    const ov_compiled_model_t \* compiled_model,
    size_t \* size
    );

ov_compiled_model_input(
    const ov_compiled_model_t \* compiled_model,
    ov_output_const_port_t \*\* input_port
    );

ov_compiled_model_input_by_index(
    const ov_compiled_model_t \* compiled_model,
    const size_t index,
    ov_output_const_port_t \*\* input_port
    );

ov_compiled_model_input_by_name(
    const ov_compiled_model_t \* compiled_model,
    const char \* name,
    ov_output_const_port_t \*\* input_port
    );

ov_compiled_model_outputs_size(
    const ov_compiled_model_t \* compiled_model,
    size_t \* size
    );

ov_compiled_model_output(
    const ov_compiled_model_t \* compiled_model,
    ov_output_const_port_t \*\* output_port
    );

ov_compiled_model_output_by_index(
    const ov_compiled_model_t \* compiled_model,
    const size_t index,
    ov_output_const_port_t \*\* output_port
    );

ov_compiled_model_output_by_name(
    const ov_compiled_model_t \* compiled_model,
    const char \* name,
    ov_output_const_port_t \*\* output_port
    );

ov_compiled_model_get_runtime_model(
    const ov_compiled_model_t \* compiled_model,
    ov_model_t \*\* model
    );

ov_compiled_model_create_infer_request(
    const ov_compiled_model_t \* compiled_model,
    ov_infer_request_t \*\* infer_request
    );

ov_compiled_model_set_property(const ov_compiled_model_t \* compiled_model, ...);

ov_compiled_model_get_property(
    const ov_compiled_model_t \* compiled_model,
    const char \* property_key,
    char \*\* property_value
    );

ov_compiled_model_export_model(
    const ov_compiled_model_t \* compiled_model,
    const char \* export_model_path
    );

ov_compiled_model_free(ov_compiled_model_t \* compiled_model);

ov_compiled_model_get_context(
    const ov_compiled_model_t \* compiled_model,
    ov_remote_context_t \*\* context
    );

ov_get_openvino_version(ov_version_t \* version);
ov_version_free(ov_version_t \* version);
ov_core_create(ov_core_t \*\* core);
ov_core_create_with_config(const char \* xml_config_file, ov_core_t \*\* core);
ov_core_free(ov_core_t \* core);

ov_core_read_model(
    const ov_core_t \* core,
    const char \* model_path,
    const char \* bin_path,
    ov_model_t \*\* model
    );

ov_core_read_model_from_memory(
    const ov_core_t \* core,
    const char \* model_str,
    const ov_tensor_t \* weights,
    ov_model_t \*\* model
    );

ov_core_compile_model(
    const ov_core_t \* core,
    const ov_model_t \* model,
    const char \* device_name,
    const size_t property_args_size,
    ov_compiled_model_t \*\* compiled_model,
    ...
    );

ov_core_compile_model_from_file(
    const ov_core_t \* core,
    const char \* model_path,
    const char \* device_name,
    const size_t property_args_size,
    ov_compiled_model_t \*\* compiled_model,
    ...
    );

ov_core_set_property(const ov_core_t \* core, const char \* device_name, ...);

ov_core_get_property(
    const ov_core_t \* core,
    const char \* device_name,
    const char \* property_key,
    char \*\* property_value
    );

ov_core_get_available_devices(
    const ov_core_t \* core,
    ov_available_devices_t \* devices
    );

ov_available_devices_free(ov_available_devices_t \* devices);

ov_core_import_model(
    const ov_core_t \* core,
    const char \* content,
    const size_t content_size,
    const char \* device_name,
    ov_compiled_model_t \*\* compiled_model
    );

ov_core_versions_free(ov_core_version_list_t \* versions);

ov_core_create_context(
    const ov_core_t \* core,
    const char \* device_name,
    const size_t context_args_size,
    ov_remote_context_t \*\* context,
    ...
    );

ov_core_compile_model_with_context(
    const ov_core_t \* core,
    const ov_model_t \* model,
    const ov_remote_context_t \* context,
    const size_t property_args_size,
    ov_compiled_model_t \*\* compiled_model,
    ...
    );

ov_core_get_default_context(
    const ov_core_t \* core,
    const char \* device_name,
    ov_remote_context_t \*\* context
    );

ov_dimension_is_dynamic(const ov_dimension_t dim);

ov_infer_request_set_tensor(
    ov_infer_request_t \* infer_request,
    const char \* tensor_name,
    const ov_tensor_t \* tensor
    );

ov_infer_request_set_tensor_by_port(
    ov_infer_request_t \* infer_request,
    const ov_output_port_t \* port,
    const ov_tensor_t \* tensor
    );

ov_infer_request_set_tensor_by_const_port(
    ov_infer_request_t \* infer_request,
    const ov_output_const_port_t \* port,
    const ov_tensor_t \* tensor
    );

ov_infer_request_set_input_tensor_by_index(
    ov_infer_request_t \* infer_request,
    const size_t idx,
    const ov_tensor_t \* tensor
    );

ov_infer_request_set_input_tensor(
    ov_infer_request_t \* infer_request,
    const ov_tensor_t \* tensor
    );

ov_infer_request_set_output_tensor_by_index(
    ov_infer_request_t \* infer_request,
    const size_t idx,
    const ov_tensor_t \* tensor
    );

ov_infer_request_set_output_tensor(
    ov_infer_request_t \* infer_request,
    const ov_tensor_t \* tensor
    );

ov_infer_request_get_tensor(
    const ov_infer_request_t \* infer_request,
    const char \* tensor_name,
    ov_tensor_t \*\* tensor
    );

ov_infer_request_get_tensor_by_const_port(
    const ov_infer_request_t \* infer_request,
    const ov_output_const_port_t \* port,
    ov_tensor_t \*\* tensor
    );

ov_infer_request_get_tensor_by_port(
    const ov_infer_request_t \* infer_request,
    const ov_output_port_t \* port,
    ov_tensor_t \*\* tensor
    );

ov_infer_request_get_input_tensor_by_index(
    const ov_infer_request_t \* infer_request,
    const size_t idx,
    ov_tensor_t \*\* tensor
    );

ov_infer_request_get_input_tensor(
    const ov_infer_request_t \* infer_request,
    ov_tensor_t \*\* tensor
    );

ov_infer_request_get_output_tensor_by_index(
    const ov_infer_request_t \* infer_request,
    const size_t idx,
    ov_tensor_t \*\* tensor
    );

ov_infer_request_get_output_tensor(
    const ov_infer_request_t \* infer_request,
    ov_tensor_t \*\* tensor
    );

ov_infer_request_infer(ov_infer_request_t \* infer_request);
ov_infer_request_cancel(ov_infer_request_t \* infer_request);
ov_infer_request_start_async(ov_infer_request_t \* infer_request);
ov_infer_request_wait(ov_infer_request_t \* infer_request);

ov_infer_request_wait_for(
    ov_infer_request_t \* infer_request,
    const int64_t timeout
    );

ov_infer_request_set_callback(
    ov_infer_request_t \* infer_request,
    const ov_callback_t \* callback
    );

ov_infer_request_free(ov_infer_request_t \* infer_request);

ov_infer_request_get_profiling_info(
    const ov_infer_request_t \* infer_request,
    ov_profiling_info_list_t \* profiling_infos
    );

ov_profiling_info_list_free(ov_profiling_info_list_t \* profiling_infos);
ov_layout_create(const char \* layout_desc, ov_layout_t \*\* layout);
ov_layout_free(ov_layout_t \* layout);
ov_layout_to_string(const ov_layout_t \* layout);
ov_model_free(ov_model_t \* model);

ov_model_const_input(
    const ov_model_t \* model,
    ov_output_const_port_t \*\* input_port
    );

ov_model_const_input_by_name(
    const ov_model_t \* model,
    const char \* tensor_name,
    ov_output_const_port_t \*\* input_port
    );

ov_model_const_input_by_index(
    const ov_model_t \* model,
    const size_t index,
    ov_output_const_port_t \*\* input_port
    );

ov_model_input(const ov_model_t \* model, ov_output_port_t \*\* input_port);

ov_model_input_by_name(
    const ov_model_t \* model,
    const char \* tensor_name,
    ov_output_port_t \*\* input_port
    );

ov_model_input_by_index(
    const ov_model_t \* model,
    const size_t index,
    ov_output_port_t \*\* input_port
    );

ov_model_const_output(
    const ov_model_t \* model,
    ov_output_const_port_t \*\* output_port
    );

ov_model_const_output_by_index(
    const ov_model_t \* model,
    const size_t index,
    ov_output_const_port_t \*\* output_port
    );

ov_model_const_output_by_name(
    const ov_model_t \* model,
    const char \* tensor_name,
    ov_output_const_port_t \*\* output_port
    );

ov_model_output(const ov_model_t \* model, ov_output_port_t \*\* output_port);

ov_model_output_by_index(
    const ov_model_t \* model,
    const size_t index,
    ov_output_port_t \*\* output_port
    );

ov_model_output_by_name(
    const ov_model_t \* model,
    const char \* tensor_name,
    ov_output_port_t \*\* output_port
    );

ov_model_inputs_size(const ov_model_t \* model, size_t \* input_size);
ov_model_outputs_size(const ov_model_t \* model, size_t \* output_size);
ov_model_is_dynamic(const ov_model_t \* model);

ov_model_reshape(
    const ov_model_t \* model,
    const char \*\* tensor_names,
    const ov_partial_shape_t \* partial_shapes,
    size_t size
    );

ov_model_reshape_input_by_name(
    const ov_model_t \* model,
    const char \* tensor_name,
    const ov_partial_shape_t partial_shape
    );

ov_model_reshape_single_input(
    const ov_model_t \* model,
    const ov_partial_shape_t partial_shape
    );

ov_model_reshape_by_port_indexes(
    const ov_model_t \* model,
    const size_t \* port_indexes,
    const ov_partial_shape_t \* partial_shape,
    size_t size
    );

ov_model_reshape_by_ports(
    const ov_model_t \* model,
    const ov_output_port_t \*\* output_ports,
    const ov_partial_shape_t \* partial_shapes,
    size_t size
    );

ov_model_get_friendly_name(const ov_model_t \* model, char \*\* friendly_name);

ov_const_port_get_shape(
    const ov_output_const_port_t \* port,
    ov_shape_t \* tensor_shape
    );

ov_port_get_shape(const ov_output_port_t \* port, ov_shape_t \* tensor_shape);

ov_port_get_any_name(
    const ov_output_const_port_t \* port,
    char \*\* tensor_name
    );

ov_port_get_partial_shape(
    const ov_output_const_port_t \* port,
    ov_partial_shape_t \* partial_shape
    );

ov_port_get_element_type(
    const ov_output_const_port_t \* port,
    ov_element_type_e \* tensor_type
    );

ov_output_port_free(ov_output_port_t \* port);
ov_output_const_port_free(ov_output_const_port_t \* port);

ov_partial_shape_create(
    const int64_t rank,
    const ov_dimension_t \* dims,
    ov_partial_shape_t \* partial_shape_obj
    );

ov_partial_shape_create_dynamic(
    const ov_rank_t rank,
    const ov_dimension_t \* dims,
    ov_partial_shape_t \* partial_shape_obj
    );

ov_partial_shape_create_static(
    const int64_t rank,
    const int64_t \* dims,
    ov_partial_shape_t \* partial_shape_obj
    );

ov_partial_shape_free(ov_partial_shape_t \* partial_shape);

ov_partial_shape_to_shape(
    const ov_partial_shape_t partial_shape,
    ov_shape_t \* shape
    );

ov_shape_to_partial_shape(
    const ov_shape_t shape,
    ov_partial_shape_t \* partial_shape
    );

ov_partial_shape_is_dynamic(const ov_partial_shape_t partial_shape);
ov_partial_shape_to_string(const ov_partial_shape_t partial_shape);

ov_preprocess_prepostprocessor_create(
    const ov_model_t \* model,
    ov_preprocess_prepostprocessor_t \*\* preprocess
    );

ov_preprocess_prepostprocessor_free(ov_preprocess_prepostprocessor_t \* preprocess);

ov_preprocess_prepostprocessor_get_input_info(
    const ov_preprocess_prepostprocessor_t \* preprocess,
    ov_preprocess_input_info_t \*\* preprocess_input_info
    );

ov_preprocess_prepostprocessor_get_input_info_by_name(
    const ov_preprocess_prepostprocessor_t \* preprocess,
    const char \* tensor_name,
    ov_preprocess_input_info_t \*\* preprocess_input_info
    );

ov_preprocess_prepostprocessor_get_input_info_by_index(
    const ov_preprocess_prepostprocessor_t \* preprocess,
    const size_t tensor_index,
    ov_preprocess_input_info_t \*\* preprocess_input_info
    );

ov_preprocess_input_info_free(ov_preprocess_input_info_t \* preprocess_input_info);

ov_preprocess_input_info_get_tensor_info(
    const ov_preprocess_input_info_t \* preprocess_input_info,
    ov_preprocess_input_tensor_info_t \*\* preprocess_input_tensor_info
    );

ov_preprocess_input_tensor_info_free(ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info);

ov_preprocess_input_info_get_preprocess_steps(
    const ov_preprocess_input_info_t \* preprocess_input_info,
    ov_preprocess_preprocess_steps_t \*\* preprocess_input_steps
    );

ov_preprocess_preprocess_steps_free(ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps);

ov_preprocess_preprocess_steps_resize(
    ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
    const ov_preprocess_resize_algorithm_e resize_algorithm
    );

ov_preprocess_preprocess_steps_scale(
    ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
    float value
    );

ov_preprocess_preprocess_steps_mean(
    ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
    float value
    );

ov_preprocess_preprocess_steps_crop(
    ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
    int32_t \* begin,
    int32_t begin_size,
    int32_t \* end,
    int32_t end_size
    );

ov_preprocess_preprocess_steps_convert_layout(
    ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
    ov_layout_t \* layout
    );

ov_preprocess_preprocess_steps_reverse_channels(ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps);

ov_preprocess_input_tensor_info_set_element_type(
    ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
    const ov_element_type_e element_type
    );

ov_preprocess_input_tensor_info_set_color_format(
    ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
    const ov_color_format_e colorFormat
    );

ov_preprocess_input_tensor_info_set_color_format_with_subname(
    ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
    const ov_color_format_e colorFormat,
    const size_t sub_names_size,
    ...
    );

ov_preprocess_input_tensor_info_set_spatial_static_shape(
    ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
    const size_t input_height,
    const size_t input_width
    );

ov_preprocess_input_tensor_info_set_memory_type(
    ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
    const char \* mem_type
    );

ov_preprocess_preprocess_steps_convert_element_type(
    ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
    const ov_element_type_e element_type
    );

ov_preprocess_preprocess_steps_convert_color(
    ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
    const ov_color_format_e colorFormat
    );

ov_preprocess_input_tensor_info_set_from(
    ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
    const ov_tensor_t \* tensor
    );

ov_preprocess_input_tensor_info_set_layout(
    ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
    ov_layout_t \* layout
    );

ov_preprocess_prepostprocessor_get_output_info(
    const ov_preprocess_prepostprocessor_t \* preprocess,
    ov_preprocess_output_info_t \*\* preprocess_output_info
    );

ov_preprocess_prepostprocessor_get_output_info_by_index(
    const ov_preprocess_prepostprocessor_t \* preprocess,
    const size_t tensor_index,
    ov_preprocess_output_info_t \*\* preprocess_output_info
    );

ov_preprocess_prepostprocessor_get_output_info_by_name(
    const ov_preprocess_prepostprocessor_t \* preprocess,
    const char \* tensor_name,
    ov_preprocess_output_info_t \*\* preprocess_output_info
    );

ov_preprocess_output_info_free(ov_preprocess_output_info_t \* preprocess_output_info);

ov_preprocess_output_info_get_tensor_info(
    const ov_preprocess_output_info_t \* preprocess_output_info,
    ov_preprocess_output_tensor_info_t \*\* preprocess_output_tensor_info
    );

ov_preprocess_output_tensor_info_free(ov_preprocess_output_tensor_info_t \* preprocess_output_tensor_info);

ov_preprocess_output_set_element_type(
    ov_preprocess_output_tensor_info_t \* preprocess_output_tensor_info,
    const ov_element_type_e element_type
    );

ov_preprocess_input_info_get_model_info(
    const ov_preprocess_input_info_t \* preprocess_input_info,
    ov_preprocess_input_model_info_t \*\* preprocess_input_model_info
    );

ov_preprocess_input_model_info_free(ov_preprocess_input_model_info_t \* preprocess_input_model_info);

ov_preprocess_input_model_info_set_layout(
    ov_preprocess_input_model_info_t \* preprocess_input_model_info,
    ov_layout_t \* layout
    );

ov_preprocess_prepostprocessor_build(
    const ov_preprocess_prepostprocessor_t \* preprocess,
    ov_model_t \*\* model
    );

ov_rank_is_dynamic(const ov_rank_t rank);

ov_remote_context_create_tensor(
    const ov_remote_context_t \* context,
    const ov_element_type_e type,
    const ov_shape_t shape,
    const size_t object_args_size,
    ov_tensor_t \*\* remote_tensor,
    ...
    );

ov_remote_context_get_params(
    const ov_remote_context_t \* context,
    size_t \* size,
    char \*\* params
    );

ov_remote_context_create_host_tensor(
    const ov_remote_context_t \* context,
    const ov_element_type_e type,
    const ov_shape_t shape,
    ov_tensor_t \*\* tensor
    );

ov_remote_context_free(ov_remote_context_t \* context);

ov_remote_tensor_get_params(
    ov_tensor_t \* tensor,
    size_t \* size,
    char \*\* params
    );

ov_shape_create(const int64_t rank, const int64_t \* dims, ov_shape_t \* shape);
ov_shape_free(ov_shape_t \* shape);

ov_tensor_create_from_host_ptr(
    const ov_element_type_e type,
    const ov_shape_t shape,
    void \* host_ptr,
    ov_tensor_t \*\* tensor
    );

ov_tensor_create(
    const ov_element_type_e type,
    const ov_shape_t shape,
    ov_tensor_t \*\* tensor
    );

ov_tensor_set_shape(ov_tensor_t \* tensor, const ov_shape_t shape);
ov_tensor_get_shape(const ov_tensor_t \* tensor, ov_shape_t \* shape);

ov_tensor_get_element_type(
    const ov_tensor_t \* tensor,
    ov_element_type_e \* type
    );

ov_tensor_get_size(const ov_tensor_t \* tensor, size_t \* elements_size);
ov_tensor_get_byte_size(const ov_tensor_t \* tensor, size_t \* byte_size);
ov_tensor_data(const ov_tensor_t \* tensor, void \*\* data);
ov_tensor_free(ov_tensor_t \* tensor);
OV_CC_DOMAINS(ov_pass);

template <class TDim>
void check_divided_result(
    const ov::Node \* op,
    const TDim& quotient,
    const TDim& dividend,
    const typename TDim::value_type& divisor
    );

void check_divided_result< ov::Dimension >(
    const ov::Node \* op,
    const ov::Dimension& quotient,
    const ov::Dimension& dividend,
    const typename ov::Dimension::value_type& divisor
    );

template <typename T>
NGRAPH_API_DEPRECATED std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::Tensor> tv);

template <class T, ngraph::element::Type_t ET>
NGRAPH_API_DEPRECATED std::vector<T> array_2_vector(
    typename ngraph::element_type_traits<ET>::value_type \* data,
    size_t size
    );

template <typename T>
NGRAPH_API_DEPRECATED std::vector<T> host_tensor_2_vector(ngraph::HostTensorPtr tensor);

NGRAPH_API_DEPRECATED std::vector<float> NGRAPH_API read_float_vector(std::shared_ptr<ngraph::runtime::Tensor> tv);
NGRAPH_API_DEPRECATED std::vector<int64_t> NGRAPH_API read_index_vector(std::shared_ptr<ngraph::runtime::Tensor> tv);

NGRAPH_API NGRAPH_API_DEPRECATED std::ostream& operator << (
    std::ostream& os,
    const ngraph::NodeVector& nv
    );

NGRAPH_API NGRAPH_API_DEPRECATED const char \* get_ngraph_version_string();

template <class T, class TRShape = ov::result_shape_t<T>>
std::vector<TRShape> shape_infer(
    const ov::op::v1::Reshape \* op,
    const std::vector<T>& input_shapes,
    const ov::ITensorAccessor& ta = ov::make_tensor_accessor()
    );

InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc);

InferenceEngine::Blob::Ptr make_blob_with_precision(
    const InferenceEngine::TensorDesc& desc,
    void \* ptr
    );

InferenceEngine::Blob::Ptr make_blob_with_precision(
    const InferenceEngine::TensorDesc& desc,
    const std::shared_ptr<InferenceEngine::IAllocator>& alloc
    );

InferenceEngine::Blob::Ptr make_plain_blob(
    InferenceEngine::Precision prec,
    const InferenceEngine::SizeVector dims
    );

template <class... Args>
InferenceEngine::Blob::Ptr make_blob_with_precision(
    InferenceEngine::Precision precision,
    Args&&... args
    );

template <typename T>
void CopyVectorToBlob(
    const InferenceEngine::Blob::Ptr outputBlob,
    const std::vector<T>& inputVector
    );

int ie_memcpy(void \* dest, size_t destsz, void const \* src, size_t count);
parse_result ParseXml(const char \* file_path);

// macros

#define ADD_MATCHER(obj, region, ...)
#define ADD_MATCHER_FOR_THIS(region, ...)
#define AUTO_CONFIG_KEY(name)
#define CALL_OVERLOAD(name, exc_class, ctx, ...)
#define CL_HPP_MINIMUM_OPENCL_VERSION
#define CL_HPP_TARGET_OPENCL_VERSION
#define CONFIG_KEY(name)
#define CONFIG_KEY_INTERNAL(name)
#define CONFIG_VALUE(name)
#define CONFIG_VALUE_INTERNAL(name)
#define COPY_TENSOR(a)
#define COUNT_ARGS_MAXN(...)
#define CPU_CONFIG_KEY(name)
#define DECLARE_GPU_PARAM_KEY(name, ...)
#define DECLARE_GPU_PARAM_VALUE(name)
#define EXEC_NETWORK_METRIC_KEY(name)
#define EXPAND_ARGS(args)
#define FOREACH_CHILD(c, p, tag)
#define FRONTEND_API
#define FRONTEND_CAT(x, y)
#define FRONTEND_CAT_(x, y)
#define FRONTEND_C_API
#define FRONTEND_EXPAND(X)
#define FRONT_END_CHECK_IMPLEMENTED(COND, NAME)
#define FRONT_END_GENERAL_CHECK(...)
#define FRONT_END_INITIALIZATION_CHECK(...)
#define FRONT_END_NOT_IMPLEMENTED(NAME)
#define FRONT_END_OP_CONVERSION_CHECK(...)
#define FRONT_END_THROW(MSG)
#define GEN_VAR_COMMON(...)
#define GEN_VAR_PADDLE(in_names, out_names, ...)
#define GLUE(x, y)
#define GNA_CONFIG_KEY(name)
#define GNA_CONFIG_VALUE(name)
#define GPU_CONFIG_KEY(name)
#define GPU_METRIC_KEY(name)
#define GPU_PARAM_KEY(name)
#define GPU_PARAM_VALUE(name)
#define HETERO_CONFIG_KEY(name)
#define IE_ASSERT(EXPRESSION)
#define IE_CREATE_EXTENSION
#define IE_CREATE_PLUGIN
#define IE_DEFINE_EXTENSION_CREATE_FUNCTION(ExtensionType)
#define IE_DEFINE_EXTENSION_CREATE_FUNCTION_DECLARATION(_IE_CREATE_EXTENSION_FUNC)
#define IE_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version, ...)
#define IE_DEFINE_PLUGIN_CREATE_FUNCTION_DECLARATION(_IE_CREATE_PLUGIN_FUNC)
#define IE_EXCEPTION_SWITCH(STATUS, TYPE_ALIAS, ...)
#define IE_SET_METRIC_RETURN(name, ...)
#define IE_THROW(...)
#define IE_VERSION_MAJOR
#define IE_VERSION_MINOR
#define IE_VERSION_PATCH
#define INFERENCE_ENGINE_1_0_DEPRECATED
#define INFERENCE_ENGINE_ENUM_DEPRECATED(msg)
#define ITT_FUNCTION_NAME
#define LP_TRANSFORMATIONS_API
#define MAKE_MAP_COMMON(FRAMEWORK, ...)
#define MAKE_MAP_onnx(...)
#define MAKE_MAP_paddle(...)
#define MAKE_MAP_tensorflow(...)
#define MAKE_MAP_tensorflow_lite(...)
#define MATCHER_SCOPE(region)
#define METRIC_KEY(name)
#define METRIC_VALUE(name)
#define MULTI_CONFIG_KEY(name)
#define NGRAPH_API
#define NGRAPH_API_C
#define NGRAPH_API_DEPRECATED
#define NGRAPH_CHECK(...)
#define NGRAPH_CHECK_HELPER(exc_class, ctx, ...)
#define NGRAPH_CHECK_HELPER1(exc_class, ctx, check)
#define NGRAPH_CHECK_HELPER2(exc_class, ctx, check, ...)
#define NGRAPH_DEBUG
#define NGRAPH_DEPRECATED(msg)
#define NGRAPH_ENUM_DEPRECATED(msg)
#define NGRAPH_ERR
#define NGRAPH_EXTERN_C
#define NGRAPH_HELPER_DLL_EXPORT
#define NGRAPH_HELPER_DLL_IMPORT
#define NGRAPH_INFO
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_RTTI_DECLARATION
#define NGRAPH_RTTI_DEFINITION(...)
#define NGRAPH_SUPPRESS_DEPRECATED_END
#define NGRAPH_SUPPRESS_DEPRECATED_START
#define NGRAPH_UNREACHABLE(...)
#define NGRAPH_WARN
#define NODE_SHAPE_INFER_CHECK(node, input_shapes, ...)
#define NODE_VALIDATION_CHECK(node, ...)
#define OPENVINO_API
#define OPENVINO_API_C(...)
#define OPENVINO_ASSERT(...)
#define OPENVINO_ASSERT_HELPER(exc_class, ctx, ...)
#define OPENVINO_ASSERT_HELPER1(exc_class, ctx, check)
#define OPENVINO_ASSERT_HELPER2(exc_class, ctx, check, ...)
#define OPENVINO_CREATE_EXTENSIONS(extensions)
#define OPENVINO_C_API(...)
#define OPENVINO_C_API_CALLBACK
#define OPENVINO_C_VAR(...)
#define OPENVINO_DEBUG
#define OPENVINO_ENUM_DEPRECATED(msg)
#define OPENVINO_ENUM_DEPRECATED(msg)
#define OPENVINO_ERR
#define OPENVINO_EXTENSION_API
#define OPENVINO_EXTENSION_C_API
#define OPENVINO_FRAMEWORK_MAP(FRAMEWORK, ...)
#define OPENVINO_INFO
#define OPENVINO_NOT_IMPLEMENTED
#define OPENVINO_OP(...)
#define OPENVINO_RTTI(...)
#define OPENVINO_SUPPRESS_DEPRECATED_END
#define OPENVINO_SUPPRESS_DEPRECATED_END
#define OPENVINO_SUPPRESS_DEPRECATED_START
#define OPENVINO_SUPPRESS_DEPRECATED_START
#define OPENVINO_THROW(...)
#define OPENVINO_THROW_HELPER(exc_class, ctx, ...)
#define OPENVINO_THROW_HELPER1(exc_class, ctx, explanation)
#define OPENVINO_THROW_HELPER2(exc_class, ctx, ...)
#define OPENVINO_VERSION_MAJOR
#define OPENVINO_VERSION_MINOR
#define OPENVINO_VERSION_PATCH
#define OPENVINO_WARN
#define OVERLOAD_MACRO(name, count)
#define OVERLOAD_MACRO1(name, count)
#define OVERLOAD_MACRO2(name, count)
#define OV_CASE(Case, Type)
#define OV_CASE2(Case1, Case2, Type1, Type2)
#define OV_CC_CAT
#define OV_CC_EXPAND
#define OV_CC_TOSTRING
#define OV_COLLECT_ATTACHED_EXTENSIONS(FRAMEWORK)
#define OV_CREATE_PLUGIN
#define OV_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version, ...)
#define OV_FRONTEND_API_VERSION
#define OV_ITT_DOMAIN(...)
#define OV_ITT_GROUP(group)
#define OV_ITT_SCOPE(group, ...)
#define OV_ITT_SCOPED_TASK(...)
#define OV_ITT_SCOPE_CHAIN(group, ...)
#define OV_ITT_SCOPE_NEXT(group, ...)
#define OV_ITT_SCOPE_SKIP(group, chainId)
#define OV_ITT_TASK_CHAIN(...)
#define OV_ITT_TASK_NEXT(...)
#define OV_ITT_TASK_SKIP(chainId)
#define OV_PP_ARG_N(_0, _1, _2, _3, _4, N, ...)
#define OV_PP_ARG_PLACEHOLDER_1
#define OV_PP_CAT(x, y)
#define OV_PP_CAT3(x, y, z)
#define OV_PP_CAT3_(x, y, z)
#define OV_PP_CAT4(x, y, z, w)
#define OV_PP_CAT4_(x, y, z, w)
#define OV_PP_CAT_(x, y)
#define OV_PP_EXPAND(X)
#define OV_PP_IS_ENABLED(x)
#define OV_PP_IS_ENABLED1(val)
#define OV_PP_IS_ENABLED2(arg1_or_junk)
#define OV_PP_NARG(...)
#define OV_PP_NARG_(...)
#define OV_PP_NO_ARGS(NAME)
#define OV_PP_OVERLOAD(NAME, ...)
#define OV_PP_RSEQ_N()
#define OV_PP_SECOND_ARG(...)
#define OV_PP_SECOND_ARG_(...)
#define OV_PP_SECOND_ARG_GET(ignored, val, ...)
#define OV_PP_TOSTRING(...)
#define OV_PP_TOSTRING_(...)
#define OV_SWITCH(Module, fn, ctx, val, ...)
#define OV_THREAD_OMP
#define OV_THREAD_SEQ
#define OV_THREAD_TBB
#define OV_THREAD_TBB_AUTO
#define REGISTER_DISABLED_PASS(obj, region, ...)
#define REGISTER_PASS(obj, region, ...)

#define RETURN_ARG_COUNT( \
    _1_, \
    _2_, \
    _3_, \
    _4_, \
    _5_, \
    _6, \
    _7, \
    _8, \
    _9, \
    _10, \
    _11, \
    _12, \
    _13, \
    _14, \
    _15, \
    _16, \
    _17, \
    _18, \
    _19, \
    _20, \
    _21, \
    _22, \
    _23, \
    _24, \
    _25, \
    count, \
    ... \
    )

#define TBB_PREVIEW_WAITING_FOR_WORKERS
#define THROW_IE_LPT_EXCEPTION(node)
#define THROW_TRANSFORMATION_EXCEPTION
#define TRANSFORMATIONS_API
#define TYPE_CASE(a)
#define USE_FACTORY(precision)
#define _NGRAPH_RTTI_DEFINITION_COMMON(CLASS)
#define _NGRAPH_RTTI_DEFINITION_NO_PARENT(CLASS, TYPE_NAME)
#define _NGRAPH_RTTI_DEFINITION_WITH_PARENT(CLASS, TYPE_NAME, PARENT_CLASS)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG
#define _OPENVINO_RTTI_DEFINITION_SELECTOR(_1, _2, _3, NAME, ...)
#define _OPENVINO_RTTI_EXPAND(X)
#define _OPENVINO_RTTI_OP_WITH_TYPE(TYPE_NAME)
#define _OPENVINO_RTTI_OP_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME)
#define _OPENVINO_RTTI_WITH_TYPE(TYPE_NAME)
#define _OPENVINO_RTTI_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME)

#define _OPENVINO_RTTI_WITH_TYPE_VERSIONS_PARENT( \
    TYPE_NAME, \
    VERSION_NAME, \
    PARENT_CLASS \
    )

#define _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT( \
    TYPE_NAME, \
    VERSION_NAME, \
    PARENT_CLASS \
    )

#define createNodeIfRegistered(Module, key, ...)
#define cu32(x)
#define registerNodeIfRequired(Module, Name, key, Impl)
#define strncasecmp

Detailed Documentation

Global Functions

OPENVINO_C_VAR(const char \*)

gpu plugin properties key for remote context/tensor

Read-write property<uint32_t string>: video decoder surface plane in a shared memory blob parameter map.

Read-write property<uint32_t string>: video decoder surface handle in a shared memory blob parameter map.

Read-write property<void *>: OpenCL memory handle in a shared memory blob parameter map.

Read-write property<void *>: OpenCL queue handle in a shared context.

Read-write property<int string>: In case of multi-tile system, this key identifies tile within given context.

Read-write property<int string>: ID of device in OpenCL context if multiple devices are present in the context.

< Read-write property: shared device context type, can be either pure OpenCL (OCL) or shared video decoder (VA_SHARED) context. Value is string, it can be one of below strings: “OCL” - Pure OpenCL context “VA_SHARED” - Context shared with a video decoding device Read-write property<void *>: identifies OpenCL context handle in a shared context or shared memory blob parameter map.

Read-write property<void *>: video acceleration device/display handle in a shared context or shared memory blob parameter map.

Read-write property: type of internal shared memory in a shared memory blob parameter map. Value is string, it can be one of below strings: “OCL_BUFFER” - Shared OpenCL buffer blob “OCL_IMAGE2D” - Shared OpenCL 2D image blob “USM_USER_BUFFER” - Shared USM pointer allocated by user “USM_HOST_BUFFER” - Shared USM pointer type with host allocation type allocated by plugin “USM_DEVICE_BUFFER” - Shared USM pointer type with device allocation type allocated by plugin “VA_SURFACE” - Shared video decoder surface or D3D 2D texture blob “DX_BUFFER” - Shared D3D buffer blob

ov_model_is_dynamic(const ov_model_t \* model)

Returns true if any of the ops defined in the model is dynamic shape..

Parameters:

model

A pointer to the ov_model_t.

Returns:

true if model contains dynamic shapes

ov_preprocess_input_tensor_info_set_memory_type(
    ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
    const char \* mem_type
    )

Set ov_preprocess_input_tensor_info_t memory type.

Parameters:

preprocess_input_tensor_info

A pointer to the ov_preprocess_input_tensor_info_t.

mem_type

Memory type. Refer to ov_remote_context.h to get memory type string info.

Returns:

Status code of the operation: OK(0) for success.

template <class TDim>
void check_divided_result(
    const ov::Node \* op,
    const TDim& quotient,
    const TDim& dividend,
    const typename TDim::value_type& divisor
    )

Check for valid quotient of dimension division.

If quotient is not valid (quotient * divisor != dividend) throw NodeValidationFailure exception.

Parameters:

TDim

Type of dimension.

op

Pointer to operator.

quotient

Dimension result after division.

dividend

Original dimension.

divisor

Dimension divide value.

InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc)

Creates Blob::Ptr with precision.

Parameters:

desc

The TensorDesc object

Returns:

A Blob::Ptr pointer

InferenceEngine::Blob::Ptr make_blob_with_precision(
    const InferenceEngine::TensorDesc& desc,
    void \* ptr
    )

Makes a blob with precision.

Parameters:

desc

The TensorDesc object

ptr

The pointer to a raw memory

Returns:

A Blob::Ptr pointer

InferenceEngine::Blob::Ptr make_blob_with_precision(
    const InferenceEngine::TensorDesc& desc,
    const std::shared_ptr<InferenceEngine::IAllocator>& alloc
    )

Makes a blob with precision.

Parameters:

desc

The description

alloc

The IAllocator object

Returns:

A Blob::Ptr pointer

Creates a plain Blob::Ptr.

Parameters:

prec

The Precision value

dims

The dims

Returns:

A Blob::Ptr pointer

template <class... Args>
InferenceEngine::Blob::Ptr make_blob_with_precision(
    InferenceEngine::Precision precision,
    Args&&... args
    )

Creates Blob::Ptr with precision.

Parameters:

precision

The precision

args

The arguments

Args

Variadic template arguments

Returns:

A Blob::Ptr pointer

template <typename T>
void CopyVectorToBlob(
    const InferenceEngine::Blob::Ptr outputBlob,
    const std::vector<T>& inputVector
    )

Copy data from std::vector to Blob.

Parameters:

T

type of data in std::vector

outputBlob

An output blob to copy to

inputVector

An input std::vector to copy from

int ie_memcpy(void \* dest, size_t destsz, void const \* src, size_t count)

Copies bytes between buffers with security enhancements Copies count bytes from src to dest. If the source and destination overlap, the behavior is undefined.

Parameters:

dest

A Pointer to the object to copy to

destsz

A max number of bytes to modify in the destination (typically the size of the destination object)

src

A pointer to the object to copy from

count

A number of bytes to copy

Returns:

zero on success and non-zero value on error.

parse_result ParseXml(const char \* file_path)

Parses a file and returns parse_result.

Parameters:

file_path

The file path

Returns:

The parse_result.

Macros

#define AUTO_CONFIG_KEY(name)

A macro which provides an AUTO-mangled name for configuration key with name name

#define CONFIG_KEY(name)

shortcut for defining configuration keys

#define CONFIG_KEY_INTERNAL(name)

Shortcut for defining internal configuration keys.

#define CONFIG_VALUE(name)

shortcut for defining configuration values

#define CONFIG_VALUE_INTERNAL(name)

Shortcut for defining internal configuration values.

#define CPU_CONFIG_KEY(name)

shortcut for defining configuration keys

#define DECLARE_GPU_PARAM_KEY(name, ...)

Shortcut for defining object parameter keys.

#define DECLARE_GPU_PARAM_VALUE(name)

Shortcut for defining possible values for object parameter keys.

#define EXEC_NETWORK_METRIC_KEY(name)

shortcut for defining common Inference Engine ExecutableNetwork metrics

#define FOREACH_CHILD(c, p, tag)

Defines convinient for-each based cycle to iterate over node children.

Parameters:

c

Child node name

p

Parent node name

tag

The tag represented as a string value

#define FRONT_END_CHECK_IMPLEMENTED(COND, NAME)

Assert macro.

Parameters:

COND

Condition. If ‘false’, throws ‘NotImplementedFailure’

NAME

Name of the function that is not implemented

ov::frontend::NotImplementedFailure

#define FRONT_END_GENERAL_CHECK(...)

Macro to check whether a boolean condition holds.

Parameters:

cond

Condition to check

Additional error message info to be added to the error message via the << stream-insertion operator. Note that the expressions here will be evaluated lazily, i.e., only if the cond evalutes to false.

ov::frontend::GeneralFailure

if cond is false.

#define FRONT_END_INITIALIZATION_CHECK(...)

Macro to check whether a boolean condition holds.

Parameters:

cond

Condition to check

Additional error message info to be added to the error message via the << stream-insertion operator. Note that the expressions here will be evaluated lazily, i.e., only if the cond evalutes to false.

ov::frontend::InitializationFailure

if cond is false.

#define FRONT_END_NOT_IMPLEMENTED(NAME)

Assert macro.

Parameters:

NAME

Name of the function that is not implemented

ov::frontend::NotImplementedFailure

#define FRONT_END_OP_CONVERSION_CHECK(...)

Macro to check whether a boolean condition holds.

Parameters:

cond

Condition to check

Additional error message info to be added to the error message via the << stream-insertion operator. Note that the expressions here will be evaluated lazily, i.e., only if the cond evalutes to false.

ov::frontend::OpConversionFailure

if cond is false.

#define FRONT_END_THROW(MSG)

Assert macro.

Parameters:

MSG

Error message

ov::frontend::GeneralFailure

#define GNA_CONFIG_KEY(name)

Shortcut for defining configuration keys.

#define GNA_CONFIG_VALUE(name)

Shortcut for defining configuration values.

#define GPU_CONFIG_KEY(name)

shortcut for defining configuration keys

#define GPU_METRIC_KEY(name)

shortcut for defining GPU plugin metrics

#define GPU_PARAM_KEY(name)

Shortcut for defining configuration keys.

#define GPU_PARAM_VALUE(name)

Shortcut for defining configuration values.

#define HETERO_CONFIG_KEY(name)

Shortcut for defining HETERO configuration keys.

#define IE_ASSERT(EXPRESSION)

Uses assert() function if NDEBUG is not defined, InferenceEngine exception otherwise.

#define IE_CREATE_EXTENSION

Defines a name of a function creating extension instance.

#define IE_CREATE_PLUGIN

Defines a name of a function creating plugin instance.

#define IE_DEFINE_EXTENSION_CREATE_FUNCTION(ExtensionType)

Generates extension creation function.

Deprecated The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html

#define IE_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version, ...)

Defines the exported IE_CREATE_PLUGIN function which is used to create a plugin instance.

#define IE_EXCEPTION_SWITCH(STATUS, TYPE_ALIAS, ...)

Generate Switch statement over error codes adn maps them to coresponding exceptions type.

#define IE_SET_METRIC_RETURN(name, ...)

Return metric value with specified name and arguments .... Example:

IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);

Parameters:

name

The metric name

A metric value

Returns:

A metric value wrapped with Parameter and returned to a calling function

#define IE_THROW(...)

A macro used to throw specified exception with a description.

#define IE_VERSION_MAJOR

Defines Inference Engine major version.

#define IE_VERSION_MINOR

Defines Inference Engine minor version.

#define IE_VERSION_PATCH

Defines Inference Engine patch version.

#define METRIC_KEY(name)

shortcut for defining common Inference Engine metrics

#define METRIC_VALUE(name)

shortcut for defining metric values

#define MULTI_CONFIG_KEY(name)

A macro which provides a MULTI-mangled name for configuration key with name name

#define NGRAPH_RTTI_DECLARATION

Helper macro that puts necessary declarations of RTTI block inside a class definition. Should be used in the scope of class that requires type identification besides one provided by C++ RTTI. Recommended to be used for all classes that are inherited from class ov::Node to enable pattern matching for them. Accepts necessary type identification details like type of the operation, version and optional parent class.

Applying this macro within a class definition provides declaration of type_info static constant for backward compatibility with old RTTI definition for Node, static function get_type_info_static which returns a reference to an object that is equal to type_info but not necessary to the same object, and get_type_info virtual function that overrides Node::get_type_info and returns a reference to the same object that get_type_info_static gives.

Use this macro as a public part of the class definition:

class MyOp : public Node
{
    public:
        // Don't use Node as a parent for type_info, it doesn't have any value and
        prohibited
        NGRAPH_RTTI_DECLARATION;

        ...
};

class MyInheritedOp : public MyOp
{
    public:
        NGRAPH_RTTI_DECLARATION;

        ...
};

To complete type identification for a class, use NGRAPH_RTTI_DEFINITION.

#define NODE_SHAPE_INFER_CHECK(node, input_shapes, ...)

Throw NodeValidationFailure with additional information about input shapes used during shape inference.

#define OPENVINO_ASSERT(...)

Macro to check whether a boolean condition holds.

Parameters:

cond

Condition to check

Additional error message info to be added to the error message via the << stream-insertion operator. Note that the expressions here will be evaluated lazily, i.e., only if the cond evaluates to false.

ov::AssertFailure

if cond is false.

#define OPENVINO_CREATE_EXTENSIONS(extensions)

Macro generates the entry point for the library.

Parameters:

vector

of extensions

#define OPENVINO_RTTI(...)

Helper macro that puts necessary declarations of RTTI block inside a class definition. Should be used in the scope of class that requires type identification besides one provided by C++ RTTI. Recommended to be used for all classes that are inherited from class ov::Node to enable pattern matching for them. Accepts necessary type identification details like type of the operation, version and optional parent class.

Applying this macro within a class definition provides declaration of type_info static constant for backward compatibility with old RTTI definition for Node, static function get_type_info_static which returns a reference to an object that is equal to type_info but not necessary to the same object, and get_type_info virtual function that overrides Node::get_type_info and returns a reference to the same object that get_type_info_static gives.

Use this macro as a public part of the class definition:

class MyClass
{
    public:
        OPENVINO_RTTI("MyClass", "my_version");

        ...
};

class MyClass2: public MyClass
{
    public:
        OPENVINO_RTTI("MyClass2", "my_version2", MyClass);

        ...
};

OPENVINO_RTTI(name) OPENVINO_RTTI(name, version_id) OPENVINO_RTTI(name, version_id, parent) OPENVINO_RTTI(name, version_id, parent, old_version)

Parameters:

TYPE_NAME

a string literal of type const char* that names your class in type identification namespace; It is your choice how to name it, but it should be unique among all OPENVINO_RTTI_DECLARATION-enabled classes that can be used in conjunction with each other in one transformation flow.

VERSION_NAME

is an name of operation version to distinguish different versions of operations that shares the same TYPE_NAME

PARENT_CLASS

is an optional direct or indirect parent class for this class; define it only in case if there is a need to capture any operation from some group of operations that all derived from some common base class. Don’t use Node as a parent, it is a base class for all operations and doesn’t provide ability to define some perfect subset of operations. PARENT_CLASS should define RTTI with OPENVINO_RTTI_{DECLARATION/DEFINITION} macros.

_VERSION_INDEX

is an unsigned integer index to distinguish different versions of operations that shares the same TYPE_NAME (for backward compatibility)

#define OPENVINO_THROW(...)

Macro to signal a code path that is unreachable in a successful execution. It’s implemented with OPENVINO_ASSERT macro.

Parameters:

Additional error message that should describe why that execution path is unreachable.

ov::Exception

if the macro is executed.

#define OPENVINO_VERSION_MAJOR

Defines OpenVINO major version.

#define OPENVINO_VERSION_MINOR

Defines OpenVINO minor version.

#define OPENVINO_VERSION_PATCH

Defines OpenVINO patch version.

#define TYPE_CASE(a)

Used in evaluator switch statement so that the case type and evaluate call are guaranteed to have the types match.

Use this in an evaluate_*() function like this switch (arg0->get_element_type()) {TYPE_CASE(i8) (arg0, arg1, out, broadcast_spec); break; TYPE_CASE(i16) (arg0, arg1, out, broadcast_spec); break; … }

Each TYPE_CASE statement expands like this: case element::Type_t::a: rc = evaluate<element::Type_t::a>(arg0, arg1, out, broadcast_spec)

Don’t forget to put a break after each statement or it will fall through and generate a runtime error.