Global Namespace¶
Overview¶
// namespaces
namespace ExecGraphInfoSerialization;
namespace FileUtils;
namespace InferenceEngine;
namespace InferenceEngine::CLDNNConfigParams;
namespace InferenceEngine::CPUConfigParams;
namespace InferenceEngine::G;
namespace InferenceEngine::GNAConfigParams;
namespace InferenceEngine::GPUConfigParams;
namespace InferenceEngine::GPUContextParams;
namespace InferenceEngine::HeteroConfigParams;
namespace InferenceEngine::Metrics;
namespace InferenceEngine::MultiDeviceConfigParams;
namespace InferenceEngine::NetPass;
namespace InferenceEngine::PluginConfigInternalParams;
namespace InferenceEngine::PluginConfigParams;
namespace InferenceEngine::PrecisionUtils;
namespace InferenceEngine::PrecisionUtils::details;
namespace InferenceEngine::gapi;
namespace InferenceEngine::gapi::kernels;
namespace InferenceEngine::gapi::kernels::areaDownscale32f;
namespace InferenceEngine::gapi::kernels::areaDownscale8u;
namespace InferenceEngine::gapi::kernels::areaUpscale;
namespace InferenceEngine::gapi::kernels::areaUpscale32f;
namespace InferenceEngine::gapi::kernels::avx;
namespace InferenceEngine::gapi::kernels::avx512;
namespace InferenceEngine::gapi::kernels::linear;
namespace InferenceEngine::gapi::kernels::linear32f;
namespace InferenceEngine::gapi::kernels::neon;
namespace InferenceEngine::gpu;
namespace InferenceEngine::itt;
namespace InferenceEngine::itt::domains;
namespace XMLParseUtils;
namespace cv;
namespace ngraph;
namespace ngraph::builder;
namespace ngraph::builder::opset1;
namespace ngraph::coordinates;
namespace ngraph::descriptor;
namespace ngraph::descriptor::layout;
namespace ngraph::element;
namespace ngraph::file_util;
namespace ngraph::op;
namespace ngraph::op::internal;
namespace ngraph::op::util;
namespace ngraph::op::util::detail;
namespace ngraph::op::util::error;
namespace ngraph::op::v0;
namespace ngraph::op::v1;
namespace ngraph::op::v10;
namespace ngraph::op::v3;
namespace ngraph::op::v4;
namespace ngraph::op::v5;
namespace ngraph::op::v6;
namespace ngraph::op::v7;
namespace ngraph::op::v8;
namespace ngraph::op::v9;
namespace ngraph::opset1;
namespace ngraph::opset10;
namespace ngraph::opset2;
namespace ngraph::opset3;
namespace ngraph::opset4;
namespace ngraph::opset5;
namespace ngraph::opset6;
namespace ngraph::opset7;
namespace ngraph::opset8;
namespace ngraph::opset9;
namespace ngraph::pass;
namespace ngraph::pass::itt;
namespace ngraph::pass::itt::domains;
namespace ngraph::pass::low_precision;
namespace ngraph::pass::low_precision::itt;
namespace ngraph::pass::low_precision::itt::domains;
namespace ngraph::pass::low_precision::precision_set;
namespace ngraph::pattern;
namespace ngraph::pattern::op;
namespace ngraph::reduction;
namespace ngraph::runtime;
namespace ngraph::runtime::opt_kernel;
namespace ngraph::runtime::reference;
namespace ngraph::runtime::reference::adaptive_pool;
namespace ngraph::runtime::reference::detail;
namespace ngraph::runtime::reference::details;
namespace ngraph::runtime::reference::fake_quantize_details;
namespace ngraph::runtime::reference::fft_common;
namespace ngraph::runtime::reference::internal;
namespace ngraph::runtime::reference::kernel;
namespace ngraph::runtime::reference::nms_common;
namespace ngraph::snippets;
namespace ngraph::snippets::isa;
namespace ngraph::snippets::op;
namespace ngraph::snippets::pass;
namespace ngraph::snippets::utils;
namespace openvino;
namespace openvino::cc;
namespace openvino::cc::internal;
namespace openvino::itt;
namespace opset9;
namespace ov;
namespace ov::batch_util;
namespace ov::cmp;
namespace ov::descriptor;
namespace ov::detail;
namespace ov::device;
namespace ov::device::capability;
namespace ov::element;
namespace ov::frontend;
namespace ov::hint;
namespace ov::intel_auto;
namespace ov::intel_cpu;
namespace ov::intel_gna;
namespace ov::intel_gpu;
namespace ov::intel_gpu::capability;
namespace ov::intel_gpu::hint;
namespace ov::intel_gpu::memory_type;
namespace ov::intel_gpu::ocl;
namespace ov::intel_myriad;
namespace ov::intel_myriad::hddl;
namespace ov::layout;
namespace ov::log;
namespace ov::op;
namespace ov::op::ShapeInferLSTM;
namespace ov::op::ShapeInferRange;
namespace ov::op::internal;
namespace ov::op::rnn;
namespace ov::op::util;
namespace ov::op::util::detail;
namespace ov::op::util::error;
namespace ov::op::util::rfft_common_validation;
namespace ov::op::v0;
namespace ov::op::v1;
namespace ov::op::v10;
namespace ov::op::v3;
namespace ov::op::v4;
namespace ov::op::v5;
namespace ov::op::v6;
namespace ov::op::v7;
namespace ov::op::v8;
namespace ov::op::v9;
namespace ov::opset1;
namespace ov::opset10;
namespace ov::opset2;
namespace ov::opset3;
namespace ov::opset4;
namespace ov::opset5;
namespace ov::opset6;
namespace ov::opset7;
namespace ov::opset8;
namespace ov::opset9;
namespace ov::pass;
namespace ov::pass::pattern;
namespace ov::pass::pattern::op;
namespace ov::preprocess;
namespace ov::runtime;
namespace ov::streams;
namespace ov::util;
namespace pass;
namespace pass::low_precision;
namespace pass::low_precision::BaseMatcherPass public ngraph;
namespace pass::low_precision::BaseMatcherPass public ngraph::pass;
namespace std;
namespace transpose_sinking;
namespace transpose_sinking::sink_backward;
namespace transpose_sinking::sink_forward;
namespace util;
// typedefs
typedef struct ov_compiled_model ov_compiled_model_t;
typedef struct ov_core ov_core_t;
typedef struct ov_version ov_version_t;
typedef struct ov_dimension ov_dimension_t;
typedef struct ov_infer_request ov_infer_request_t;
typedef struct ov_layout ov_layout_t;
typedef struct ov_model ov_model_t;
typedef struct ov_output_const_port ov_output_const_port_t;
typedef struct ov_output_port ov_output_port_t;
typedef struct ov_partial_shape ov_partial_shape_t;
typedef struct ov_preprocess_prepostprocessor ov_preprocess_prepostprocessor_t;
typedef struct ov_preprocess_input_info ov_preprocess_input_info_t;
typedef struct ov_preprocess_input_tensor_info ov_preprocess_input_tensor_info_t;
typedef struct ov_preprocess_output_info ov_preprocess_output_info_t;
typedef struct ov_preprocess_output_tensor_info ov_preprocess_output_tensor_info_t;
typedef struct ov_preprocess_input_model_info ov_preprocess_input_model_info_t;
typedef struct ov_preprocess_preprocess_steps ov_preprocess_preprocess_steps_t;
typedef ov_dimension_t ov_rank_t;
typedef struct ov_tensor ov_tensor_t;
typedef std::unordered_map<std::shared_ptr<ov::opset1::Parameter>, std::unordered_set<size_t>> P2Btype;
typedef std::unordered_map<ov::NodeTypeInfo, std::function<bool(const std::shared_ptr<ov::Node>&, ov::element::Type, size_t idx)>> type_to_fuse_map;
typedef std::vector<std::pair<ov::element::Type, ov::element::Type>> precisions_array;
typedef std::unordered_map<ov::Node::type_info_t, std::function<void(const ov::Node&, std::ostream&ss)>> visualize_tree_ops_map_t;
// enums
enum AttributeSource;
enum CONVERSION_RESULT;
enum ELTWISE_TYPE;
enum ov_color_format_e;
enum ov_element_type_e;
enum ov_preprocess_resize_algorithm_e;
enum ov_status_e;
// structs
struct ov_ProfilingInfo_t;
struct ov_available_devices_t;
struct ov_callback_t;
struct ov_compiled_model_t;
struct ov_core_t;
struct ov_core_version;
struct ov_core_version_list;
struct ov_core_version_list_t;
struct ov_core_version_t;
struct ov_dimension;
struct ov_infer_request_t;
struct ov_layout_t;
struct ov_model_t;
struct ov_output_const_port_t;
struct ov_output_port_t;
struct ov_partial_shape;
struct ov_preprocess_input_info_t;
struct ov_preprocess_input_model_info_t;
struct ov_preprocess_input_tensor_info_t;
struct ov_preprocess_output_info_t;
struct ov_preprocess_output_tensor_info_t;
struct ov_preprocess_prepostprocessor_t;
struct ov_preprocess_preprocess_steps_t;
struct ov_profiling_info_list_t;
struct ov_profiling_info_t;
struct ov_rank_t;
struct ov_shape_t;
struct ov_tensor_t;
struct ov_version;
struct parse_result;
// templates
template TransofrmConvertToConvertTruncation;
// classes
class AttributeParameters;
template <InferenceEngine::Precision::ePrecision precision>
class BlobFactory;
class ConvertReduceBase;
class CvtReduceBase;
class MemorySolver;
template <class T>
class SharedAttribute;
class logstreambuf;
class ngraph;
// global variables
ov_property_key_supported_properties;
ov_property_key_available_devices;
ov_property_key_optimal_number_of_infer_requests;
ov_property_key_range_for_async_infer_requests;
ov_property_key_range_for_streams;
ov_property_key_device_full_name;
ov_property_key_device_capabilities;
ov_property_key_model_name;
ov_property_key_optimal_batch_size;
ov_property_key_max_batch_size;
ov_property_key_cache_dir;
ov_property_key_num_streams;
ov_property_key_affinity;
ov_property_key_inference_num_threads;
ov_property_key_hint_performance_mode;
ov_property_key_hint_inference_precision;
ov_property_key_hint_num_requests;
ov_property_key_log_level;
ov_property_key_hint_model_priority;
ov_property_key_enable_profiling;
ov_property_key_device_priorities;
std::vector<int64_t> strides;
std::vector<int64_t> dilation;
std::vector<int64_t> pads_begin;
std::vector<int64_t> pads_end;
std::vector<int64_t> output_padding;
int N;
int C;
int H;
int W;
Dims d;
Strides s;
int prec;
cv::gapi::GKernelPackage& pckg;
NGRAPH_EXTERN_C NGRAPH_API const char \* NGRAPH_VERSION_NUMBER;
// global functions
const char \* ov_get_error_info(ov_status_e status);
void ov_free(const char \* content);
ov_compiled_model_inputs_size(
const ov_compiled_model_t \* compiled_model,
size_t \* size
);
ov_compiled_model_input(
const ov_compiled_model_t \* compiled_model,
ov_output_const_port_t \*\* input_port
);
ov_compiled_model_input_by_index(
const ov_compiled_model_t \* compiled_model,
const size_t index,
ov_output_const_port_t \*\* input_port
);
ov_compiled_model_input_by_name(
const ov_compiled_model_t \* compiled_model,
const char \* name,
ov_output_const_port_t \*\* input_port
);
ov_compiled_model_outputs_size(
const ov_compiled_model_t \* compiled_model,
size_t \* size
);
ov_compiled_model_output(
const ov_compiled_model_t \* compiled_model,
ov_output_const_port_t \*\* output_port
);
ov_compiled_model_output_by_index(
const ov_compiled_model_t \* compiled_model,
const size_t index,
ov_output_const_port_t \*\* output_port
);
ov_compiled_model_output_by_name(
const ov_compiled_model_t \* compiled_model,
const char \* name,
ov_output_const_port_t \*\* output_port
);
ov_compiled_model_get_runtime_model(
const ov_compiled_model_t \* compiled_model,
ov_model_t \*\* model
);
ov_compiled_model_create_infer_request(
const ov_compiled_model_t \* compiled_model,
ov_infer_request_t \*\* infer_request
);
ov_compiled_model_set_property(const ov_compiled_model_t \* compiled_model, ...);
ov_compiled_model_get_property(
const ov_compiled_model_t \* compiled_model,
const char \* property_key,
char \*\* property_value
);
ov_compiled_model_export_model(
const ov_compiled_model_t \* compiled_model,
const char \* export_model_path
);
ov_compiled_model_free(ov_compiled_model_t \* compiled_model);
ov_get_openvino_version(ov_version_t \* version);
ov_version_free(ov_version_t \* version);
ov_core_create(ov_core_t \*\* core);
ov_core_create_with_config(const char \* xml_config_file, ov_core_t \*\* core);
ov_core_free(ov_core_t \* core);
ov_core_read_model(
const ov_core_t \* core,
const char \* model_path,
const char \* bin_path,
ov_model_t \*\* model
);
ov_core_read_model_from_memory(
const ov_core_t \* core,
const char \* model_str,
const ov_tensor_t \* weights,
ov_model_t \*\* model
);
ov_core_compile_model(
const ov_core_t \* core,
const ov_model_t \* model,
const char \* device_name,
const size_t property_args_size,
ov_compiled_model_t \*\* compiled_model,
...
);
ov_core_compile_model_from_file(
const ov_core_t \* core,
const char \* model_path,
const char \* device_name,
const size_t property_args_size,
ov_compiled_model_t \*\* compiled_model,
...
);
ov_core_set_property(const ov_core_t \* core, const char \* device_name, ...);
ov_core_get_property(
const ov_core_t \* core,
const char \* device_name,
const char \* property_key,
char \*\* property_value
);
ov_core_get_available_devices(
const ov_core_t \* core,
ov_available_devices_t \* devices
);
ov_available_devices_free(ov_available_devices_t \* devices);
ov_core_import_model(
const ov_core_t \* core,
const char \* content,
const size_t content_size,
const char \* device_name,
ov_compiled_model_t \*\* compiled_model
);
ov_core_versions_free(ov_core_version_list_t \* versions);
ov_dimension_is_dynamic(const ov_dimension_t dim);
ov_infer_request_set_tensor(
ov_infer_request_t \* infer_request,
const char \* tensor_name,
const ov_tensor_t \* tensor
);
ov_infer_request_set_tensor_by_port(
ov_infer_request_t \* infer_request,
const ov_output_port_t \* port,
const ov_tensor_t \* tensor
);
ov_infer_request_set_tensor_by_const_port(
ov_infer_request_t \* infer_request,
const ov_output_const_port_t \* port,
const ov_tensor_t \* tensor
);
ov_infer_request_set_input_tensor_by_index(
ov_infer_request_t \* infer_request,
const size_t idx,
const ov_tensor_t \* tensor
);
ov_infer_request_set_input_tensor(
ov_infer_request_t \* infer_request,
const ov_tensor_t \* tensor
);
ov_infer_request_set_output_tensor_by_index(
ov_infer_request_t \* infer_request,
const size_t idx,
const ov_tensor_t \* tensor
);
ov_infer_request_set_output_tensor(
ov_infer_request_t \* infer_request,
const ov_tensor_t \* tensor
);
ov_infer_request_get_tensor(
const ov_infer_request_t \* infer_request,
const char \* tensor_name,
ov_tensor_t \*\* tensor
);
ov_infer_request_get_tensor_by_const_port(
const ov_infer_request_t \* infer_request,
const ov_output_const_port_t \* port,
ov_tensor_t \*\* tensor
);
ov_infer_request_get_tensor_by_port(
const ov_infer_request_t \* infer_request,
const ov_output_port_t \* port,
ov_tensor_t \*\* tensor
);
ov_infer_request_get_input_tensor_by_index(
const ov_infer_request_t \* infer_request,
const size_t idx,
ov_tensor_t \*\* tensor
);
ov_infer_request_get_input_tensor(
const ov_infer_request_t \* infer_request,
ov_tensor_t \*\* tensor
);
ov_infer_request_get_output_tensor_by_index(
const ov_infer_request_t \* infer_request,
const size_t idx,
ov_tensor_t \*\* tensor
);
ov_infer_request_get_output_tensor(
const ov_infer_request_t \* infer_request,
ov_tensor_t \*\* tensor
);
ov_infer_request_infer(ov_infer_request_t \* infer_request);
ov_infer_request_cancel(ov_infer_request_t \* infer_request);
ov_infer_request_start_async(ov_infer_request_t \* infer_request);
ov_infer_request_wait(ov_infer_request_t \* infer_request);
ov_infer_request_wait_for(
ov_infer_request_t \* infer_request,
const int64_t timeout
);
ov_infer_request_set_callback(
ov_infer_request_t \* infer_request,
const ov_callback_t \* callback
);
ov_infer_request_free(ov_infer_request_t \* infer_request);
ov_infer_request_get_profiling_info(
const ov_infer_request_t \* infer_request,
ov_profiling_info_list_t \* profiling_infos
);
ov_profiling_info_list_free(ov_profiling_info_list_t \* profiling_infos);
ov_layout_create(const char \* layout_desc, ov_layout_t \*\* layout);
ov_layout_free(ov_layout_t \* layout);
ov_layout_to_string(const ov_layout_t \* layout);
ov_model_free(ov_model_t \* model);
ov_model_const_input(
const ov_model_t \* model,
ov_output_const_port_t \*\* input_port
);
ov_model_const_input_by_name(
const ov_model_t \* model,
const char \* tensor_name,
ov_output_const_port_t \*\* input_port
);
ov_model_const_input_by_index(
const ov_model_t \* model,
const size_t index,
ov_output_const_port_t \*\* input_port
);
ov_model_input(const ov_model_t \* model, ov_output_port_t \*\* input_port);
ov_model_input_by_name(
const ov_model_t \* model,
const char \* tensor_name,
ov_output_port_t \*\* input_port
);
ov_model_input_by_index(
const ov_model_t \* model,
const size_t index,
ov_output_port_t \*\* input_port
);
ov_model_const_output(
const ov_model_t \* model,
ov_output_const_port_t \*\* output_port
);
ov_model_const_output_by_index(
const ov_model_t \* model,
const size_t index,
ov_output_const_port_t \*\* output_port
);
ov_model_const_output_by_name(
const ov_model_t \* model,
const char \* tensor_name,
ov_output_const_port_t \*\* output_port
);
ov_model_output(const ov_model_t \* model, ov_output_port_t \*\* output_port);
ov_model_output_by_index(
const ov_model_t \* model,
const size_t index,
ov_output_port_t \*\* output_port
);
ov_model_output_by_name(
const ov_model_t \* model,
const char \* tensor_name,
ov_output_port_t \*\* output_port
);
ov_model_inputs_size(const ov_model_t \* model, size_t \* input_size);
ov_model_outputs_size(const ov_model_t \* model, size_t \* output_size);
ov_model_is_dynamic(const ov_model_t \* model);
ov_model_reshape(
const ov_model_t \* model,
const char \*\* tensor_names,
const ov_partial_shape_t \* partial_shapes,
size_t size
);
ov_model_reshape_input_by_name(
const ov_model_t \* model,
const char \* tensor_name,
const ov_partial_shape_t partial_shape
);
ov_model_reshape_single_input(
const ov_model_t \* model,
const ov_partial_shape_t partial_shape
);
ov_model_reshape_by_port_indexes(
const ov_model_t \* model,
const size_t \* port_indexes,
const ov_partial_shape_t \* partial_shape,
size_t size
);
ov_model_reshape_by_ports(
const ov_model_t \* model,
const ov_output_port_t \*\* output_ports,
const ov_partial_shape_t \* partial_shapes,
size_t size
);
ov_model_get_friendly_name(const ov_model_t \* model, char \*\* friendly_name);
ov_const_port_get_shape(
const ov_output_const_port_t \* port,
ov_shape_t \* tensor_shape
);
ov_port_get_shape(const ov_output_port_t \* port, ov_shape_t \* tensor_shape);
ov_port_get_any_name(
const ov_output_const_port_t \* port,
char \*\* tensor_name
);
ov_port_get_partial_shape(
const ov_output_const_port_t \* port,
ov_partial_shape_t \* partial_shape
);
ov_port_get_element_type(
const ov_output_const_port_t \* port,
ov_element_type_e \* tensor_type
);
ov_output_port_free(ov_output_port_t \* port);
ov_output_const_port_free(ov_output_const_port_t \* port);
ov_partial_shape_create(
const int64_t rank,
const ov_dimension_t \* dims,
ov_partial_shape_t \* partial_shape_obj
);
ov_partial_shape_create_dynamic(
const ov_rank_t rank,
const ov_dimension_t \* dims,
ov_partial_shape_t \* partial_shape_obj
);
ov_partial_shape_create_static(
const int64_t rank,
const int64_t \* dims,
ov_partial_shape_t \* partial_shape_obj
);
ov_partial_shape_free(ov_partial_shape_t \* partial_shape);
ov_partial_shape_to_shape(
const ov_partial_shape_t partial_shape,
ov_shape_t \* shape
);
ov_shape_to_partial_shape(
const ov_shape_t shape,
ov_partial_shape_t \* partial_shape
);
ov_partial_shape_is_dynamic(const ov_partial_shape_t partial_shape);
ov_partial_shape_to_string(const ov_partial_shape_t partial_shape);
ov_preprocess_prepostprocessor_create(
const ov_model_t \* model,
ov_preprocess_prepostprocessor_t \*\* preprocess
);
ov_preprocess_prepostprocessor_free(ov_preprocess_prepostprocessor_t \* preprocess);
ov_preprocess_prepostprocessor_get_input_info(
const ov_preprocess_prepostprocessor_t \* preprocess,
ov_preprocess_input_info_t \*\* preprocess_input_info
);
ov_preprocess_prepostprocessor_get_input_info_by_name(
const ov_preprocess_prepostprocessor_t \* preprocess,
const char \* tensor_name,
ov_preprocess_input_info_t \*\* preprocess_input_info
);
ov_preprocess_prepostprocessor_get_input_info_by_index(
const ov_preprocess_prepostprocessor_t \* preprocess,
const size_t tensor_index,
ov_preprocess_input_info_t \*\* preprocess_input_info
);
ov_preprocess_input_info_free(ov_preprocess_input_info_t \* preprocess_input_info);
ov_preprocess_input_info_get_tensor_info(
const ov_preprocess_input_info_t \* preprocess_input_info,
ov_preprocess_input_tensor_info_t \*\* preprocess_input_tensor_info
);
ov_preprocess_input_tensor_info_free(ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info);
ov_preprocess_input_info_get_preprocess_steps(
const ov_preprocess_input_info_t \* preprocess_input_info,
ov_preprocess_preprocess_steps_t \*\* preprocess_input_steps
);
ov_preprocess_preprocess_steps_free(ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps);
ov_preprocess_preprocess_steps_resize(
ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
const ov_preprocess_resize_algorithm_e resize_algorithm
);
ov_preprocess_preprocess_steps_scale(
ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
float value
);
ov_preprocess_preprocess_steps_mean(
ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
float value
);
ov_preprocess_preprocess_steps_crop(
ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
int32_t \* begin,
int32_t begin_size,
int32_t \* end,
int32_t end_size
);
ov_preprocess_preprocess_steps_convert_layout(
ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
ov_layout_t \* layout
);
ov_preprocess_preprocess_steps_reverse_channels(ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps);
ov_preprocess_input_tensor_info_set_element_type(
ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
const ov_element_type_e element_type
);
ov_preprocess_input_tensor_info_set_color_format(
ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
const ov_color_format_e colorFormat
);
ov_preprocess_input_tensor_info_set_spatial_static_shape(
ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
const size_t input_height,
const size_t input_width
);
ov_preprocess_preprocess_steps_convert_element_type(
ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
const ov_element_type_e element_type
);
ov_preprocess_preprocess_steps_convert_color(
ov_preprocess_preprocess_steps_t \* preprocess_input_process_steps,
const ov_color_format_e colorFormat
);
ov_preprocess_input_tensor_info_set_from(
ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
const ov_tensor_t \* tensor
);
ov_preprocess_input_tensor_info_set_layout(
ov_preprocess_input_tensor_info_t \* preprocess_input_tensor_info,
ov_layout_t \* layout
);
ov_preprocess_prepostprocessor_get_output_info(
const ov_preprocess_prepostprocessor_t \* preprocess,
ov_preprocess_output_info_t \*\* preprocess_output_info
);
ov_preprocess_prepostprocessor_get_output_info_by_index(
const ov_preprocess_prepostprocessor_t \* preprocess,
const size_t tensor_index,
ov_preprocess_output_info_t \*\* preprocess_output_info
);
ov_preprocess_prepostprocessor_get_output_info_by_name(
const ov_preprocess_prepostprocessor_t \* preprocess,
const char \* tensor_name,
ov_preprocess_output_info_t \*\* preprocess_output_info
);
ov_preprocess_output_info_free(ov_preprocess_output_info_t \* preprocess_output_info);
ov_preprocess_output_info_get_tensor_info(
const ov_preprocess_output_info_t \* preprocess_output_info,
ov_preprocess_output_tensor_info_t \*\* preprocess_output_tensor_info
);
ov_preprocess_output_tensor_info_free(ov_preprocess_output_tensor_info_t \* preprocess_output_tensor_info);
ov_preprocess_output_set_element_type(
ov_preprocess_output_tensor_info_t \* preprocess_output_tensor_info,
const ov_element_type_e element_type
);
ov_preprocess_input_info_get_model_info(
const ov_preprocess_input_info_t \* preprocess_input_info,
ov_preprocess_input_model_info_t \*\* preprocess_input_model_info
);
ov_preprocess_input_model_info_free(ov_preprocess_input_model_info_t \* preprocess_input_model_info);
ov_preprocess_input_model_info_set_layout(
ov_preprocess_input_model_info_t \* preprocess_input_model_info,
ov_layout_t \* layout
);
ov_preprocess_prepostprocessor_build(
const ov_preprocess_prepostprocessor_t \* preprocess,
ov_model_t \*\* model
);
ov_rank_is_dynamic(const ov_rank_t rank);
ov_shape_create(const int64_t rank, const int64_t \* dims, ov_shape_t \* shape);
ov_shape_free(ov_shape_t \* shape);
ov_tensor_create_from_host_ptr(
const ov_element_type_e type,
const ov_shape_t shape,
void \* host_ptr,
ov_tensor_t \*\* tensor
);
ov_tensor_create(
const ov_element_type_e type,
const ov_shape_t shape,
ov_tensor_t \*\* tensor
);
ov_tensor_set_shape(ov_tensor_t \* tensor, const ov_shape_t shape);
ov_tensor_get_shape(const ov_tensor_t \* tensor, ov_shape_t \* shape);
ov_tensor_get_element_type(
const ov_tensor_t \* tensor,
ov_element_type_e \* type
);
ov_tensor_get_size(const ov_tensor_t \* tensor, size_t \* elements_size);
ov_tensor_get_byte_size(const ov_tensor_t \* tensor, size_t \* byte_size);
ov_tensor_data(const ov_tensor_t \* tensor, void \*\* data);
ov_tensor_free(ov_tensor_t \* tensor);
OV_CC_DOMAINS(ov_pass);
OV_CC_DOMAINS(internal_op);
CONVERSION_RESULT check_constant(
const std::shared_ptr<ngraph::op::Constant>& constant,
const ngraph::PartialShape& shape
);
template <class T>
void shape_infer(
const ov::op::v0::FakeQuantize \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes
);
template <typename T>
static auto remark(T x);
template <class OpType, class T>
void copy_shape_infer(
const OpType \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes
);
template <class OpType, class T>
void first_input_passthrough_infer(
const OpType \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes
);
template <class OpType, class T>
void eltwise_shape_infer(
const OpType \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes
);
template <class T>
bool get_data_as_int64(
size_t idx,
const ov::Node \* op,
std::vector<int64_t>& axes_value,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}
);
bool get_data_as_int64< ov::PartialShape >(
size_t idx,
const ov::Node \* op,
std::vector<int64_t>& axes_value,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data
);
template <class T>
bool get_data_as_float(
size_t idx,
const ov::Node \* op,
std::vector<float>& axes_value,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}
);
bool get_data_as_float< ov::PartialShape >(
size_t idx,
const ov::Node \* op,
std::vector<float>& axes_value,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data
);
template <class T>
bool get_data_as_shape(
size_t idx,
const ov::Node \* op,
T& shape,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}
);
bool get_data_as_shape< ov::PartialShape >(
size_t idx,
const ov::Node \* op,
ov::PartialShape& shape,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data
);
template <class T>
void check_divided_result(
const ov::Node \* op,
const T& res,
const T& divided,
const typename T::value_type& divisor
);
void check_divided_result< ov::Dimension >(
const ov::Node \* op,
const ov::Dimension& res,
const ov::Dimension& divided,
const typename ov::Dimension::value_type& divisor
);
template <typename T>
std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::Tensor> tv);
template <class T, ngraph::element::Type_t ET>
std::vector<T> array_2_vector(
typename ngraph::element_type_traits<ET>::value_type \* data,
size_t size
);
template <typename T>
std::vector<T> host_tensor_2_vector(ngraph::HostTensorPtr tensor);
std::vector<float> NGRAPH_API read_float_vector(std::shared_ptr<ngraph::runtime::Tensor> tv);
std::vector<int64_t> NGRAPH_API read_index_vector(std::shared_ptr<ngraph::runtime::Tensor> tv);
NGRAPH_API std::ostream& operator << (
std::ostream& os,
const ngraph::NodeVector& nv
);
NGRAPH_API const char \* get_ngraph_version_string();
template <class T>
void dynamic_inference(
const T& input_shape,
T& output_shape,
bool keep_dims
);
void dynamic_inference< ov::PartialShape >(
const ov::PartialShape& input_shape,
ov::PartialShape& output_shape,
bool keep_dims
);
template <class T>
void reduce_shape_infer(
const ov::op::util::ReductionBase \* op,
bool keep_dims,
const T& input_shape,
T& output_shape,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}
);
template <class T>
void shape_infer(
const ov::op::util::ArithmeticReductionKeepDims \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}
);
template <class T>
void shape_infer(
const ov::op::util::LogicalReductionKeepDims \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}
);
template <class T>
void shape_infer(
const ov::op::util::ScatterNDBase \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes
);
template <class T>
void shape_infer(
const ov::opset1::Reshape \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}
);
template <class T>
void dynamic_shape(T& output_shape);
void dynamic_shape< ov::PartialShape >(ov::PartialShape& output_shape);
template <class T>
void shape_of_shape_infer(
const T& input_shape,
T& output_shape
);
template <class T>
void shape_infer(
const ov::opset1::ShapeOf \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes
);
template <class T>
void shape_infer(
const ov::opset3::ShapeOf \* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes
);
InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc);
InferenceEngine::Blob::Ptr make_blob_with_precision(
const InferenceEngine::TensorDesc& desc,
void \* ptr
);
InferenceEngine::Blob::Ptr make_blob_with_precision(
const InferenceEngine::TensorDesc& desc,
const std::shared_ptr<InferenceEngine::IAllocator>& alloc
);
InferenceEngine::Blob::Ptr make_plain_blob(
InferenceEngine::Precision prec,
const InferenceEngine::SizeVector dims
);
template <class... Args>
InferenceEngine::Blob::Ptr make_blob_with_precision(
InferenceEngine::Precision precision,
Args&&... args
);
template <typename T>
void CopyVectorToBlob(
const InferenceEngine::Blob::Ptr outputBlob,
const std::vector<T>& inputVector
);
int ie_memcpy(void \* dest, size_t destsz, void const \* src, size_t count);
parse_result ParseXml(const char \* file_path);
// macros
#define ADD_MATCHER(obj, region, ...)
#define ADD_MATCHER_FOR_THIS(region, ...)
#define AUTO_CONFIG_KEY(name)
#define BWDCMP_RTTI_DECLARATION
#define BWDCMP_RTTI_DEFINITION(CLASS)
#define CALL_OVERLOAD(name, exc_class, ctx, ...)
#define CLDNN_CONFIG_KEY(name)
#define CL_HPP_MINIMUM_OPENCL_VERSION
#define CL_HPP_TARGET_OPENCL_VERSION
#define CONFIG_KEY(name)
#define CONFIG_KEY_INTERNAL(name)
#define CONFIG_VALUE(name)
#define CONFIG_VALUE_INTERNAL(name)
#define COPY_TENSOR(a)
#define COUNT_ARGS_MAXN(...)
#define CPU_CONFIG_KEY(name)
#define CPU_SIMD
#define CV_AVX2
#define CV_AVX512_SKX
#define CV_CPU_HAS_SUPPORT_SSE2
#define CV_CPU_HAS_SUPPORT_SSE2
#define CV_CPU_HAS_SUPPORT_SSE2
#define CV_NEON
#define CV_SIMD128
#define CV_SIMD128
#define CV_SIMD256
#define CV_SIMD512
#define CV_SSE
#define CV_SSE2
#define CV_SSE3
#define CV_SSE4_1
#define CV_SSE4_2
#define CV_SSSE3
#define DECLARE_GPU_PARAM_KEY(name, ...)
#define DECLARE_GPU_PARAM_VALUE(name)
#define DEFINE_PROP(prop_name)
#define EXEC_NETWORK_METRIC_KEY(name)
#define EXPAND_ARGS(args)
#define FOREACH_CHILD(c, p, tag)
#define FRONTEND_API
#define FRONTEND_C_API
#define FRONT_END_CHECK_IMPLEMENTED(COND, NAME)
#define FRONT_END_GENERAL_CHECK(...)
#define FRONT_END_INITIALIZATION_CHECK(...)
#define FRONT_END_NOT_IMPLEMENTED(NAME)
#define FRONT_END_OP_CONVERSION_CHECK(...)
#define FRONT_END_THROW(MSG)
#define GLUE(x, y)
#define GNA_CONFIG_KEY(name)
#define GNA_CONFIG_VALUE(name)
#define GPU_CONFIG_KEY(name)
#define GPU_METRIC_KEY(name)
#define GPU_PARAM_KEY(name)
#define GPU_PARAM_VALUE(name)
#define HETERO_CONFIG_KEY(name)
#define HSUM(xmaxdf)
#define IE_ASSERT(EXPRESSION)
#define IE_CREATE_EXTENSION
#define IE_CREATE_PLUGIN
#define IE_DEFINE_EXTENSION_CREATE_FUNCTION(ExtensionType)
#define IE_DEFINE_EXTENSION_CREATE_FUNCTION_DECLARATION(_IE_CREATE_EXTENSION_FUNC)
#define IE_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version, ...)
#define IE_DEFINE_PLUGIN_CREATE_FUNCTION_DECLARATION(_IE_CREATE_PLUGIN_FUNC)
#define IE_EXCEPTION_SWITCH(STATUS, TYPE_ALIAS, ...)
#define IE_SET_METRIC_RETURN(name, ...)
#define IE_THROW(...)
#define IE_VERSION_MAJOR
#define IE_VERSION_MINOR
#define IE_VERSION_PATCH
#define ITT_FUNCTION_NAME
#define LP_TRANSFORMATIONS_API
#define MANUAL_SIMD
#define MATCHER_SCOPE(region)
#define METRIC_KEY(name)
#define METRIC_VALUE(name)
#define MULTI_CONFIG_KEY(name)
#define NGRAPH_API
#define NGRAPH_API_C
#define NGRAPH_CHECK(...)
#define NGRAPH_CHECK_HELPER(exc_class, ctx, ...)
#define NGRAPH_CHECK_HELPER1(exc_class, ctx, check)
#define NGRAPH_CHECK_HELPER2(exc_class, ctx, check, ...)
#define NGRAPH_DEBUG
#define NGRAPH_DEPRECATED(msg)
#define NGRAPH_ENUM_DEPRECATED(msg)
#define NGRAPH_ERR
#define NGRAPH_EXTERN_C
#define NGRAPH_HELPER_DLL_EXPORT
#define NGRAPH_HELPER_DLL_IMPORT
#define NGRAPH_INFO
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_OP(a, b)
#define NGRAPH_RTTI_DECLARATION
#define NGRAPH_RTTI_DEFINITION(...)
#define NGRAPH_SUPPRESS_DEPRECATED_END
#define NGRAPH_SUPPRESS_DEPRECATED_START
#define NGRAPH_UNREACHABLE(...)
#define NGRAPH_WARN
#define NODE_VALIDATION_CHECK(node, ...)
#define OPENVINO_API
#define OPENVINO_API_C(...)
#define OPENVINO_ASSERT(...)
#define OPENVINO_ASSERT_HELPER(exc_class, ctx, ...)
#define OPENVINO_ASSERT_HELPER1(exc_class, ctx, check)
#define OPENVINO_ASSERT_HELPER2(exc_class, ctx, check, ...)
#define OPENVINO_CREATE_EXTENSIONS(extensions)
#define OPENVINO_C_API(...)
#define OPENVINO_C_API_CALLBACK
#define OPENVINO_C_VAR(...)
#define OPENVINO_DEBUG
#define OPENVINO_ERR
#define OPENVINO_EXTENSION_API
#define OPENVINO_EXTENSION_C_API
#define OPENVINO_FRAMEWORK_MAP(FRAMEWORK, ...)
#define OPENVINO_INFO
#define OPENVINO_NOT_IMPLEMENTED
#define OPENVINO_OP(...)
#define OPENVINO_RTTI(...)
#define OPENVINO_UNREACHABLE(...)
#define OPENVINO_VERSION_MAJOR
#define OPENVINO_VERSION_MINOR
#define OPENVINO_VERSION_PATCH
#define OPENVINO_WARN
#define OVERLOAD_MACRO(name, count)
#define OVERLOAD_MACRO1(name, count)
#define OVERLOAD_MACRO2(name, count)
#define OV_CASE(Case, Type)
#define OV_CASE2(Case1, Case2, Type1, Type2)
#define OV_CC_CAT
#define OV_CC_EXPAND
#define OV_CC_TOSTRING
#define OV_COLLECT_ATTACHED_EXTENSIONS(FRAMEWORK)
#define OV_FRONTEND_API_VERSION
#define OV_ITT_DOMAIN(...)
#define OV_ITT_GROUP(group)
#define OV_ITT_SCOPE(group, ...)
#define OV_ITT_SCOPED_TASK(...)
#define OV_ITT_SCOPE_CHAIN(group, ...)
#define OV_ITT_SCOPE_NEXT(group, ...)
#define OV_ITT_SCOPE_SKIP(group, chainId)
#define OV_ITT_TASK_CHAIN(...)
#define OV_ITT_TASK_NEXT(...)
#define OV_ITT_TASK_SKIP(chainId)
#define OV_PP_ARG_N(_0, _1, _2, _3, _4, N, ...)
#define OV_PP_ARG_PLACEHOLDER_1
#define OV_PP_CAT(x, y)
#define OV_PP_CAT3(x, y, z)
#define OV_PP_CAT3_(x, y, z)
#define OV_PP_CAT4(x, y, z, w)
#define OV_PP_CAT4_(x, y, z, w)
#define OV_PP_CAT_(x, y)
#define OV_PP_EXPAND(X)
#define OV_PP_IS_ENABLED(x)
#define OV_PP_IS_ENABLED1(val)
#define OV_PP_IS_ENABLED2(arg1_or_junk)
#define OV_PP_NARG(...)
#define OV_PP_NARG_(...)
#define OV_PP_NO_ARGS(NAME)
#define OV_PP_OVERLOAD(NAME, ...)
#define OV_PP_RSEQ_N()
#define OV_PP_SECOND_ARG(...)
#define OV_PP_SECOND_ARG_(...)
#define OV_PP_SECOND_ARG_GET(ignored, val, ...)
#define OV_PP_TOSTRING(...)
#define OV_PP_TOSTRING_(...)
#define OV_PREPROC_PLUGIN_CALL_STATEMENT(...)
#define OV_SWITCH(Module, fn, ctx, val, ...)
#define PROJECT_ROOT_DIR
#define PROJECT_ROOT_DIR
#define REGISTER_DISABLED_PASS(obj, region, ...)
#define REGISTER_PASS(obj, region, ...)
#define RETURN_ARG_COUNT( \
_1_, \
_2_, \
_3_, \
_4_, \
_5_, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
count, \
... \
)
#define TBB_PREVIEW_WAITING_FOR_WORKERS
#define THROW_IE_LPT_EXCEPTION(node)
#define THROW_TRANSFORMATION_EXCEPTION
#define TRANSFORMATIONS_API
#define TYPE_CASE(a)
#define USE_CVKL
#define USE_FACTORY(precision)
#define _IE_SUPPRESS_DEPRECATED_END_MSVC
#define _IE_SUPPRESS_DEPRECATED_START_MSVC
#define _NGRAPH_RTTI_DEFINITION_COMMON(CLASS)
#define _NGRAPH_RTTI_DEFINITION_NO_PARENT(CLASS, TYPE_NAME, _VERSION_INDEX)
#define _NGRAPH_RTTI_DEFINITION_WITH_PARENT( \
CLASS, \
TYPE_NAME, \
_VERSION_INDEX, \
PARENT_CLASS \
)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG
#define _OPENVINO_OP_REG(a, b)
#define _OPENVINO_OP_REG
#define _OPENVINO_RTTI_DEFINITION_SELECTOR(_1, _2, _3, _4, NAME, ...)
#define _OPENVINO_RTTI_EXPAND(X)
#define _OPENVINO_RTTI_OP_WITH_TYPE(TYPE_NAME)
#define _OPENVINO_RTTI_OP_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME)
#define _OPENVINO_RTTI_WITH_TYPE(TYPE_NAME)
#define _OPENVINO_RTTI_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME)
#define _OPENVINO_RTTI_WITH_TYPE_VERSIONS_PARENT( \
TYPE_NAME, \
VERSION_NAME, \
PARENT_CLASS, \
OLD_VERSION \
)
#define _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT( \
TYPE_NAME, \
VERSION_NAME, \
PARENT_CLASS \
)
#define createNodeIfRegistered(Module, key, ...)
#define cu32(x)
#define registerNodeIfRequired(Module, Name, key, Impl)
#define strncasecmp
Detailed Documentation¶
Global Functions¶
ov_model_is_dynamic(const ov_model_t \* model)
Returns true if any of the ops defined in the model is dynamic shape..
Parameters:
model |
A pointer to the ov_model_t. |
Returns:
true if model contains dynamic shapes
Macros¶
#define AUTO_CONFIG_KEY(name)
A macro which provides an AUTO-mangled name for configuration key with name name
#define BWDCMP_RTTI_DECLARATION
Note: Please don’t use this macros for new operations.
#define CLDNN_CONFIG_KEY(name)
shortcut for defining configuration keys
#define CONFIG_KEY(name)
shortcut for defining configuration keys
#define CONFIG_VALUE(name)
shortcut for defining configuration values
#define CPU_CONFIG_KEY(name)
shortcut for defining configuration keys
#define DECLARE_GPU_PARAM_KEY(name, ...)
Shortcut for defining object parameter keys.
#define DECLARE_GPU_PARAM_VALUE(name)
Shortcut for defining possible values for object parameter keys.
#define DEFINE_PROP(prop_name)
convinenent way to declare property with backward compatibility to 2D members
#define EXEC_NETWORK_METRIC_KEY(name)
shortcut for defining common Inference Engine ExecutableNetwork metrics
#define FRONT_END_CHECK_IMPLEMENTED(COND, NAME)
Assert macro.
Parameters:
COND |
Condition. If ‘false’, throws ‘NotImplementedFailure’ |
NAME |
Name of the function that is not implemented |
#define FRONT_END_GENERAL_CHECK(...)
Macro to check whether a boolean condition holds.
Parameters:
cond |
Condition to check |
… |
Additional error message info to be added to the error message via the |
if |
#define FRONT_END_INITIALIZATION_CHECK(...)
Macro to check whether a boolean condition holds.
Parameters:
cond |
Condition to check |
… |
Additional error message info to be added to the error message via the |
if |
#define FRONT_END_NOT_IMPLEMENTED(NAME)
Assert macro.
Parameters:
NAME |
Name of the function that is not implemented |
#define FRONT_END_OP_CONVERSION_CHECK(...)
Macro to check whether a boolean condition holds.
Parameters:
cond |
Condition to check |
… |
Additional error message info to be added to the error message via the |
if |
#define FRONT_END_THROW(MSG)
Assert macro.
Parameters:
MSG |
Error message |
#define GNA_CONFIG_KEY(name)
Shortcut for defining configuration keys.
#define GNA_CONFIG_VALUE(name)
Shortcut for defining configuration values.
#define GPU_CONFIG_KEY(name)
shortcut for defining configuration keys
#define GPU_METRIC_KEY(name)
shortcut for defining GPU plugin metrics
#define GPU_PARAM_KEY(name)
Shortcut for defining configuration keys.
#define GPU_PARAM_VALUE(name)
Shortcut for defining configuration values.
#define HETERO_CONFIG_KEY(name)
Shortcut for defining HETERO configuration keys.
#define IE_ASSERT(EXPRESSION)
Uses assert() function if NDEBUG is not defined, InferenceEngine exception otherwise.
#define IE_CREATE_EXTENSION
Defines a name of a function creating extension instance.
#define IE_DEFINE_EXTENSION_CREATE_FUNCTION(ExtensionType)
Generates extension creation function.
#define IE_EXCEPTION_SWITCH(STATUS, TYPE_ALIAS, ...)
Generate Switch statement over error codes adn maps them to coresponding exceptions type.
#define IE_THROW(...)
A macro used to throw specified exception with a description.
#define IE_VERSION_MAJOR
Defines Inference Engine major version.
#define IE_VERSION_MINOR
Defines Inference Engine minor version.
#define IE_VERSION_PATCH
Defines Inference Engine patch version.
#define METRIC_KEY(name)
shortcut for defining common Inference Engine metrics
#define METRIC_VALUE(name)
shortcut for defining metric values
#define MULTI_CONFIG_KEY(name)
A macro which provides a MULTI-mangled name for configuration key with name name
#define NGRAPH_RTTI_DECLARATION
Helper macro that puts necessary declarations of RTTI block inside a class definition. Should be used in the scope of class that requires type identification besides one provided by C++ RTTI. Recommended to be used for all classes that are inherited from class ov::Node to enable pattern matching for them. Accepts necessary type identification details like type of the operation, version and optional parent class.
Applying this macro within a class definition provides declaration of type_info static constant for backward compatibility with old RTTI definition for Node, static function get_type_info_static which returns a reference to an object that is equal to type_info but not necessary to the same object, and get_type_info virtual function that overrides Node::get_type_info and returns a reference to the same object that get_type_info_static gives.
Use this macro as a public part of the class definition:
class MyOp : public Node
{
public:
// Don't use Node as a parent for type_info, it doesn't have any value and
prohibited
NGRAPH_RTTI_DECLARATION;
...
};
class MyInheritedOp : public MyOp
{
public:
NGRAPH_RTTI_DECLARATION;
...
};
To complete type identification for a class, use NGRAPH_RTTI_DEFINITION.
#define OPENVINO_ASSERT(...)
Macro to check whether a boolean condition holds.
Parameters:
cond |
Condition to check |
… |
Additional error message info to be added to the error message via the |
if |
#define OPENVINO_CREATE_EXTENSIONS(extensions)
Macro generates the entry point for the library.
Parameters:
vector |
of extensions |
#define OPENVINO_RTTI(...)
Helper macro that puts necessary declarations of RTTI block inside a class definition. Should be used in the scope of class that requires type identification besides one provided by C++ RTTI. Recommended to be used for all classes that are inherited from class ov::Node to enable pattern matching for them. Accepts necessary type identification details like type of the operation, version and optional parent class.
Applying this macro within a class definition provides declaration of type_info static constant for backward compatibility with old RTTI definition for Node, static function get_type_info_static which returns a reference to an object that is equal to type_info but not necessary to the same object, and get_type_info virtual function that overrides Node::get_type_info and returns a reference to the same object that get_type_info_static gives.
Use this macro as a public part of the class definition:
class MyClass
{
public:
OPENVINO_RTTI("MyClass", "my_version");
...
};
class MyClass2: public MyClass
{
public:
OPENVINO_RTTI("MyClass2", "my_version2", MyClass);
...
};
OPENVINO_RTTI(name) OPENVINO_RTTI(name, version_id) OPENVINO_RTTI(name, version_id, parent) OPENVINO_RTTI(name, version_id, parent, old_version)
Parameters:
TYPE_NAME |
a string literal of type const char* that names your class in type identification namespace; It is your choice how to name it, but it should be unique among all OPENVINO_RTTI_DECLARATION-enabled classes that can be used in conjunction with each other in one transformation flow. |
VERSION_NAME |
is an name of operation version to distinguish different versions of operations that shares the same TYPE_NAME |
PARENT_CLASS |
is an optional direct or indirect parent class for this class; define it only in case if there is a need to capture any operation from some group of operations that all derived from some common base class. Don’t use Node as a parent, it is a base class for all operations and doesn’t provide ability to define some perfect subset of operations. PARENT_CLASS should define RTTI with OPENVINO_RTTI_{DECLARATION/DEFINITION} macros. |
_VERSION_INDEX |
is an unsigned integer index to distinguish different versions of operations that shares the same TYPE_NAME (for backward compatibility) |
#define OPENVINO_UNREACHABLE(...)
Macro to signal a code path that is unreachable in a successful execution. It’s implemented with OPENVINO_ASSERT macro.
Parameters:
… |
Additional error message that should describe why that execution path is unreachable. |
if the macro is executed. |
#define OPENVINO_VERSION_MAJOR
Defines OpenVINO major version.
#define OPENVINO_VERSION_MINOR
Defines OpenVINO minor version.
#define OPENVINO_VERSION_PATCH
Defines OpenVINO patch version.
#define TYPE_CASE(a)
Used in evaluator switch statement so that the case type and evaluate call are guaranteed to have the types match.
Use this in an evaluate_*() function like this switch (arg0->get_element_type()) {TYPE_CASE(i8) (arg0, arg1, out, broadcast_spec); break; TYPE_CASE(i16) (arg0, arg1, out, broadcast_spec); break; … }
Each TYPE_CASE statement expands like this: case element::Type_t::a: rc = evaluate<element::Type_t::a>(arg0, arg1, out, broadcast_spec)
Don’t forget to put a break after each statement or it will fall through and generate a runtime error.