namespace ov¶
Overview¶
transformation aligns elementwise constant inputs ranks with its output rank More…
namespace ov {
// namespaces
namespace ov::batch_util;
namespace ov::cmp;
namespace ov::descriptor;
namespace ov::detail;
namespace ov::device;
namespace ov::device::capability;
namespace ov::element;
namespace ov::exec_model_info;
namespace ov::frontend;
namespace ov::frontend::tensorflow;
namespace ov::frontend::type;
namespace ov::helpers;
namespace ov::hint;
namespace ov::intel_auto;
namespace ov::intel_cpu;
namespace ov::intel_gna;
namespace ov::intel_gpu;
namespace ov::intel_gpu::capability;
namespace ov::intel_gpu::hint;
namespace ov::intel_gpu::memory_type;
namespace ov::intel_gpu::ocl;
namespace ov::internal;
namespace ov::layout;
namespace ov::log;
namespace ov::op;
namespace ov::op::ShapeInferRange;
namespace ov::op::convolution;
namespace ov::op::convolution::validate;
namespace ov::op::deformable_conv;
namespace ov::op::deformable_conv::validate;
namespace ov::op::detectron;
namespace ov::op::detectron::validate;
namespace ov::op::eye;
namespace ov::op::gather_nd;
namespace ov::op::internal;
namespace ov::op::interpolate;
namespace ov::op::interpolate::validate;
namespace ov::op::multiclass_nms;
namespace ov::op::multiclass_nms::validate;
namespace ov::op::nms;
namespace ov::op::nms::validate;
namespace ov::op::pooling;
namespace ov::op::pooling::validate;
namespace ov::op::prior_box;
namespace ov::op::prior_box::validate;
namespace ov::op::proposal;
namespace ov::op::psroi_pooling;
namespace ov::op::psroi_pooling::validate;
namespace ov::op::rnn;
namespace ov::op::roi_align;
namespace ov::op::roi_align::validate;
namespace ov::op::roi_pooling;
namespace ov::op::roi_pooling::validate;
namespace ov::op::shape_of;
namespace ov::op::slice;
namespace ov::op::util;
namespace ov::op::util::detail;
namespace ov::op::util::embedding;
namespace ov::op::util::error;
namespace ov::op::util::rfft_common_validation;
namespace ov::op::v0;
namespace ov::op::v0::lstm_cell;
namespace ov::op::v1;
namespace ov::op::v10;
namespace ov::op::v11;
namespace ov::op::v12;
namespace ov::op::v3;
namespace ov::op::v4;
namespace ov::op::v4::ctc_loss;
namespace ov::op::v4::lstm_cell;
namespace ov::op::v5;
namespace ov::op::v6;
namespace ov::op::v7;
namespace ov::op::v8;
namespace ov::op::v9;
namespace ov::op::validate;
namespace ov::opset1;
namespace ov::opset10;
namespace ov::opset11;
namespace ov::opset12;
namespace ov::opset2;
namespace ov::opset3;
namespace ov::opset4;
namespace ov::opset5;
namespace ov::opset6;
namespace ov::opset7;
namespace ov::opset8;
namespace ov::opset9;
namespace ov::pass;
namespace ov::pass::pattern;
namespace ov::pass::pattern::op;
namespace ov::pass::transpose_sinking;
namespace ov::pass::transpose_sinking::utils;
namespace ov::pass::transpose_sinking::utils::sink_backward;
namespace ov::pass::transpose_sinking::utils::sink_forward;
namespace ov::preprocess;
namespace ov::proxy;
namespace ov::reference;
namespace ov::runtime;
namespace ov::streams;
namespace ov::threading;
namespace ov::util;
namespace ov::util::dim;
// typedefs
typedef AnyMap RTMap;
typedef std::vector<ov::Any> AnyVector;
typedef std::vector<label_t> TensorLabel;
typedef std::vector<TensorLabel> TensorLabelVector;
typedef uint32_t label_t;
typedef ngraph::runtime::HostTensor HostTensor;
typedef std::shared_ptr<HostTensor> HostTensorPtr;
typedef std::vector<HostTensorPtr> HostTensorVector;
typedef ov::RTMap EvaluationContext;
typedef Node::type_info_t NodeTypeInfo;
typedef std::map<RawNodeOutput, Output<Node>> RawNodeOutputMap;
typedef std::vector<std::shared_ptr<Node>> NodeVector;
typedef std::vector<Output<Node>> OutputVector;
typedef std::vector<std::shared_ptr<ov::op::v0::Result>> ResultVector;
typedef Dimension Rank;
typedef typename element_type_traits<Type>::value_type fundamental_type_for;
typedef std::vector<std::shared_ptr<op::v0::Parameter>> ParameterVector;
typedef std::vector<std::shared_ptr<op::Sink>> SinkVector;
typedef std::function<bool(pass::pattern::Matcher&m)> matcher_pass_callback;
typedef std::function<bool(pass::pattern::Matcher&m)> graph_rewrite_callback;
typedef std::function<bool(pass::pattern::RecurrentMatcher&m)> recurrent_graph_rewrite_callback;
typedef std::function<bool(const std::shared_ptr<Node>&node)> handler_callback;
typedef std::vector<Tensor> TensorVector;
typedef typename result_shape<TShape>::type result_shape_t;
typedef std::map<std::string, std::string> SupportedOpsMap;
// enums
enum Affinity;
enum ColumnOfCPUMappingTable;
enum ColumnOfCpuStreamsInfoTable;
enum ColumnOfProcessorTypeTable;
enum Direction;
enum ProcessorUseStatus;
enum PropertyMutability;
// structs
struct CheckLocInfo;
struct DiscreteTypeInfo;
struct MemBandwidthPressure;
struct ProfilingInfo;
template <>
struct Property<T, PropertyMutability::RO>;
struct PropertyName;
struct RawNodeOutput;
template <>
struct SoPtr;
struct TensorTransform;
struct Version;
template <>
struct element_type_traits<element::Type_t::i4>;
template <>
struct element_type_traits<element::Type_t::f16>;
template <>
struct element_type_traits<element::Type_t::f32>;
template <>
struct element_type_traits<element::Type_t::i32>;
template <>
struct element_type_traits<element::Type_t::boolean>;
template <>
struct element_type_traits;
template <>
struct element_type_traits<element::Type_t::bf16>;
template <>
struct element_type_traits<element::Type_t::u8>;
template <>
struct element_type_traits<element::Type_t::f64>;
template <>
struct element_type_traits<element::Type_t::u4>;
template <>
struct element_type_traits<element::Type_t::u64>;
template <>
struct element_type_traits<element::Type_t::i64>;
template <>
struct element_type_traits<element::Type_t::i8>;
template <>
struct element_type_traits<element::Type_t::u16>;
template <>
struct element_type_traits<element::Type_t::i16>;
template <>
struct element_type_traits<element::Type_t::u32>;
template <>
struct element_type_traits<element::Type_t::u1>;
template <>
struct result_shape<PartialShape>;
template <>
struct result_shape<ov::Shape>;
template <>
struct result_shape;
// templates
template AllocatorImpl;
template ICore;
template IVariableState;
// classes
class Allocator;
class Any;
class AssertFailure;
template <>
class AttributeAdapter<ov::NodeVector>;
template <>
class AttributeAdapter<ov::element::TypeVector>;
template <>
class AttributeAdapter<ov::op::util::FrameworkNodeAttrs>;
template <>
class AttributeAdapter<ov::PartialShape>;
template <>
class AttributeAdapter<ov::Shape>;
template <>
class AttributeAdapter<ParameterVector>;
template <>
class AttributeAdapter<ov::element::Type_t>;
template <>
class AttributeAdapter<std::set<std::string>>;
template <>
class AttributeAdapter<ResultVector>;
template <>
class AttributeAdapter<ov::element::Type>;
template <>
class AttributeAdapter<ov::Dimension>;
template <>
class AttributeAdapter<ov::AxisSet>;
template <>
class AttributeAdapter<op::v9::GridSample::InterpolationMode>;
template <>
class AttributeAdapter<op::v8::MatrixNms::SortResultType>;
template <>
class AttributeAdapter<op::v8::MatrixNms::DecayFunction>;
template <>
class AttributeAdapter<std::shared_ptr<ngraph::runtime::AlignedBuffer>>;
template <>
class AttributeAdapter<op::v9::GridSample::PaddingMode>;
template <>
class AttributeAdapter<op::v9::ROIAlign::AlignedMode>;
template <>
class AttributeAdapter<op::v9::NonMaxSuppression::BoxEncodingType>;
template <>
class AttributeAdapter<op::v9::ROIAlign::PoolingMode>;
template <>
class AttributeAdapter<std::shared_ptr<op::util::Variable>>;
template <>
class AttributeAdapter<std::shared_ptr<ov::Model>>;
template <>
class AttributeAdapter<std::shared_ptr<ov::Node>>;
template <>
class AttributeAdapter<std::vector<uint8_t>>;
template <>
class AttributeAdapter<std::vector<uint64_t>>;
template <>
class AttributeAdapter<std::vector<uint32_t>>;
template <>
class AttributeAdapter<std::vector<uint16_t>>;
template <>
class AttributeAdapter<uint16_t>;
template <>
class AttributeAdapter<uint64_t>;
template <>
class AttributeAdapter<uint32_t>;
template <>
class AttributeAdapter<uint8_t>;
template <>
class AttributeAdapter<op::v5::Round::RoundMode>;
template <>
class AttributeAdapter<std::vector<std::string>>;
template <>
class AttributeAdapter<std::vector<std::shared_ptr<op::util::MultiSubGraphOp::InputDescription>>>;
template <>
class AttributeAdapter<std::vector<float>>;
template <>
class AttributeAdapter<std::vector<double>>;
template <>
class AttributeAdapter<std::string>;
template <>
class AttributeAdapter<std::vector<std::shared_ptr<op::util::MultiSubGraphOp::OutputDescription>>>;
template <>
class AttributeAdapter<std::vector<int16_t>>;
template <>
class AttributeAdapter<std::vector<int64_t>>;
template <>
class AttributeAdapter<std::vector<int32_t>>;
template <>
class AttributeAdapter<std::vector<int8_t>>;
template <>
class AttributeAdapter<op::v5::NonMaxSuppression::BoxEncodingType>;
template <>
class AttributeAdapter<Strides>;
template <>
class AttributeAdapter<op::v3::ROIAlign::PoolingMode>;
template <>
class AttributeAdapter<op::AutoBroadcastSpec>;
template <>
class AttributeAdapter<ngraph::reduction::Type>;
template <>
class AttributeAdapter<int8_t>;
template <>
class AttributeAdapter<int64_t>;
template <>
class AttributeAdapter<op::AutoBroadcastType>;
template <>
class AttributeAdapter<op::BroadcastType>;
template <>
class AttributeAdapter<op::BroadcastModeSpec>;
template <>
class AttributeAdapter<op::EpsMode>;
template <>
class AttributeAdapter<op::GeluApproximationMode>;
template <>
class AttributeAdapter<int32_t>;
template <>
class AttributeAdapter<float>;
template <>
class AttributeAdapter<bool>;
template <>
class AttributeAdapter<AxisVector>;
template <>
class AttributeAdapter;
template <>
class AttributeAdapter<int16_t>;
template <>
class AttributeAdapter<Coordinate>;
template <>
class AttributeAdapter<CoordinateDiff>;
template <>
class AttributeAdapter<op::v5::Loop::SpecialBodyPorts>;
template <>
class AttributeAdapter<double>;
template <>
class AttributeAdapter<op::LSTMWeightsFormat>;
template <>
class AttributeAdapter<Layout>;
template <>
class AttributeAdapter<op::PadMode>;
template <>
class AttributeAdapter<op::v0::SpaceToDepth::SpaceToDepthMode>;
template <>
class AttributeAdapter<op::v0::Interpolate::InterpolateMode>;
template <>
class AttributeAdapter<op::v0::DepthToSpace::DepthToSpaceMode>;
template <>
class AttributeAdapter<op::util::MulticlassNmsBase::SortResultType>;
template <>
class AttributeAdapter<op::v12::ScatterElementsUpdate::Reduction>;
template <>
class AttributeAdapter<op::v1::NonMaxSuppression::BoxEncodingType>;
template <>
class AttributeAdapter<op::v1::BinaryConvolution::BinaryConvolutionMode>;
template <>
class AttributeAdapter<op::v3::NonMaxSuppression::BoxEncodingType>;
template <>
class AttributeAdapter<op::MVNEpsMode>;
template <>
class AttributeAdapter<op::util::InterpolateBase::ShapeCalcMode>;
template <>
class AttributeAdapter<op::v1::Reverse::Mode>;
template <>
class AttributeAdapter<op::util::InterpolateBase::InterpolateMode>;
template <>
class AttributeAdapter<op::RecurrentSequenceDirection>;
template <>
class AttributeAdapter<op::PadType>;
template <>
class AttributeAdapter<op::TopKMode>;
template <>
class AttributeAdapter<op::RoundingType>;
template <>
class AttributeAdapter<op::util::InterpolateBase::NearestMode>;
template <>
class AttributeAdapter<op::TopKSortType>;
template <>
class AttributeAdapter<op::util::InterpolateBase::CoordinateTransformMode>;
class AttributeVisitor;
class AxisSet;
class AxisVector;
class BaseOpExtension;
class BiasAttribute;
class Busy;
class Cancelled;
class CompiledModel;
class Coordinate;
class CoordinateDiff;
class Core;
class Decompression;
class DequantizationNode;
class DeviceIDParser;
class Dimension;
template <>
class DirectValueAccessor;
class DisableFP16Compression;
template <>
class EnumAttributeAdapterBase;
template <>
class EnumMask;
template <>
class EnumNames;
class Exception;
class Extension;
class FusedNames;
class IAsyncInferRequest;
class ICompiledModel;
class IInferRequest;
class IPlugin;
class IRemoteContext;
class IRemoteTensor;
class ISyncInferRequest;
class ITensorAccessor;
template <, >
class IndirectScalarValueAccessor;
template <, >
class IndirectVectorValueAccessor;
class InferRequest;
template <>
class Input<const Node>;
template <>
class Input;
template <>
class Input<Node>;
class Interval;
class KeepFP16Const;
class Layout;
class LayoutAttribute;
class MappedMemory;
class Model;
class NmsSelectedIndices;
class NoTransposeSinkingAttr;
class Node;
class NodeValidationFailure;
class NonconvertibleDivide;
class NotImplemented;
class OldApiMapElementType;
class OldApiMapOrder;
template <>
class OpExtension;
class OpSet;
template <>
class Output<Node>;
template <>
class Output<const Node>;
template <>
class Output;
class PartialShape;
class PrecisionSensitive;
class PreprocessingAttribute;
class PrimitivesPriority;
template <, >
class Property;
class RemoteContext;
class RemoteTensor;
class RoundGuard;
class RuntimeAttribute;
template <, >
class SeqGen;
class Shape;
class ShapeSubgraph;
class Strides;
class StridesPropagation;
class Tensor;
template <>
class TensorAccessor;
template <>
class ValueAccessor<void>;
template <>
class ValueAccessor;
template <>
class ValueAccessor<void \*>;
class VariableState;
class VisitorAdapter;
class bfloat16;
class float16;
template <>
class optional;
// global variables
constexpr auto caching_properties = internal::caching_properties;
constexpr auto exclusive_async_requests = internal::exclusive_async_requests;
static constexpr Property<std::vector<PropertyName>, PropertyMutability::RO> supported_properties { "SUPPORTED_PROPERTIES"};
static constexpr Property<std::vector<std::string>, PropertyMutability::RO> available_devices {"AVAILABLE_DEVICES"};
static constexpr Property<std::string, PropertyMutability::RO> model_name {"NETWORK_NAME"};
static constexpr Property<uint32_t, PropertyMutability::RO> optimal_number_of_infer_requests { "OPTIMAL_NUMBER_OF_INFER_REQUESTS"};
static constexpr Property<bool> enable_profiling {"PERF_COUNT"};
static constexpr Property<std::string> cache_dir {"CACHE_DIR"};
static constexpr Property<bool, PropertyMutability::RO> loaded_from_cache {"LOADED_FROM_CACHE"};
static constexpr Property<std::tuple<unsigned int, unsigned int>, PropertyMutability::RO> range_for_streams { "RANGE_FOR_STREAMS"};
static constexpr Property<unsigned int, PropertyMutability::RO> optimal_batch_size {"OPTIMAL_BATCH_SIZE"};
static constexpr Property<uint32_t, PropertyMutability::RO> max_batch_size {"MAX_BATCH_SIZE"};
static constexpr Property<uint32_t, PropertyMutability::RW> auto_batch_timeout {"AUTO_BATCH_TIMEOUT"};
static constexpr Property<std::tuple<unsigned int, unsigned int, unsigned int>, PropertyMutability::RO> range_for_async_infer_requests {"RANGE_FOR_ASYNC_INFER_REQUESTS"};
static constexpr Property<bool, PropertyMutability::RW> force_tbb_terminate {"FORCE_TBB_TERMINATE"};
static constexpr Property<bool, PropertyMutability::RW> enable_mmap {"ENABLE_MMAP"};
static constexpr Property<streams::Num, PropertyMutability::RW> num_streams {"NUM_STREAMS"};
static constexpr Property<int32_t, PropertyMutability::RW> inference_num_threads {"INFERENCE_NUM_THREADS"};
static constexpr Property<int32_t, PropertyMutability::RW> compilation_num_threads {"COMPILATION_NUM_THREADS"};
static constexpr Property<Affinity> affinity {"AFFINITY"};
static constexpr Property<std::vector<std::string>, PropertyMutability::RO> execution_devices {"EXECUTION_DEVICES"};
// global functions
LP_TRANSFORMATIONS_API voidconst std::shared_ptr<Node>& mark_as_bias();
LP_TRANSFORMATIONS_API boolconst std::shared_ptr<const Node>& marked_as_bias();
voidconst std::shared_ptr<Node>& mark_as_decompression();
voidconst std::shared_ptr<Node>& unmark_as_decompression();
boolconst std::shared_ptr<Node>& is_decompression();
voidconst std::shared_ptr<Node>& mark_as_dequantization_node();
boolconst std::shared_ptr<Node>& is_dequantization_node();
voidconst std::shared_ptr<Node>& disable_fp16_compression();
voidconst std::shared_ptr<Node>& enable_fp16_compression();
boolconst std::shared_ptr<const Node>& fp16_compression_is_disabled();
voidRTMap& postpone_fp16_compression();
boolconst RTMap& is_fp16_compression_postponed();
voidRTMap& do_not_postpone_fp16_compression();
std::stringconst std::shared_ptr<ov::Node>& getFusedNames();
std::vector<std::string>const std::shared_ptr<ov::Node>& getFusedNamesVector();
voidconst std::shared_ptr<Node>& mark_shape_subgraph();
voidconst std::shared_ptr<Node>& unmark_shape_subgraph();
boolconst std::shared_ptr<const Node>& is_shape_subgraph();
voidconst std::shared_ptr<Node>& enable_keep_fp16_const();
voidconst std::shared_ptr<Node>& disable_keep_fp16_const();
boolconst std::shared_ptr<const Node>& is_keep_fp16_const();
boolconst Node \* has_nms_selected_indices();
voidNode \* set_nms_selected_indices();
voidconst std::shared_ptr<Node>& disable_divide_conversion();
voidconst std::shared_ptr<Node>& enable_divide_conversion();
boolconst std::shared_ptr<Node>& divide_is_nonconvertible();
boolconst std::shared_ptr<Node>& has_old_api_map_element_type();
OldApiMapElementTypeconst std::shared_ptr<Node>& get_old_api_map_element_type();
voidconst std::shared_ptr<Node>&const OldApiMapElementType& set_old_api_map_element_type(
,
);
boolconst std::shared_ptr<Node>& has_old_api_map_order();
OldApiMapOrderconst std::shared_ptr<Node>& get_old_api_map_order();
voidstd::shared_ptr<Node>&const OldApiMapOrder& set_old_api_map_order(, );
boolconst std::shared_ptr<Node>& is_preprocesing_node();
voidstd::shared_ptr<Node> set_is_preprocessing_node();
std::stringconst std::shared_ptr<Node>& getPrimitivesPriority();
boolconst Input<Node>& has_strides_prop();
ov::Stridesconst Input<Node>& get_strides_prop();
voidInput<Node>&const Strides& insert_strides_prop(, );
voidInput<Node>& remove_strides_prop();
voidconst std::shared_ptr<Node>& mark_as_no_sinking_node();
voidconst std::shared_ptr<Node>& reset_no_sinking_attribute();
boolconst std::shared_ptr<Node>& is_sinking_node();
boolconst Node \* is_sinking_node();
boolov::Output<ov::Node> is_sinking_node();
std::shared_ptr<ov::MappedMemory>const std::string& load_mmap_object();
template <, >
AB& copy_from();
OPENVINO_API std::ostream&std::ostream&const AxisSet& operator << (, );
OPENVINO_API std::ostream&std::ostream&const AxisVector& operator << (, );
OPENVINO_API std::ostream&std::ostream&const Coordinate& operator << (, );
OPENVINO_API std::ostream&std::ostream&const CoordinateDiff& operator << (, );
OPENVINO_API std::ostream&std::ostream&const Dimension& operator << (, );
template <, >
std::enable_if<std::is_convertible<Value, std::string>::value, Type>::typeconst Value& as_enum();
template <>
const std::string&Value as_string();
static std::ostream&std::ostream& write_all_to_stream();
template <, >
static std::ostream&std::ostream&const T&TS&&... write_all_to_stream(
,
,
);
template <>
static std::stringT&& stringify();
voidstd::vector<Extension::Ptr>& create_extensions();
OPENVINO_API voidconst std::shared_ptr<const Model>&const std::function<void(const std::shared_ptr<Node>&)>& traverse_nodes(
,
);
OPENVINO_API voidconst Model \*const std::function<void(const std::shared_ptr<Node>&)>& traverse_nodes(
,
);
OPENVINO_API voidconst NodeVector&const std::function<void(const std::shared_ptr<Node>&)>&const NodeVector& traverse_nodes(
,
,
);
OPENVINO_API voidconst std::shared_ptr<Node>&const std::shared_ptr<Node>&const std::vector<int64_t>& replace_node(
,
,
);
OPENVINO_API voidconst std::shared_ptr<Node>&const OutputVector& replace_node(
,
);
OPENVINO_API voidconst std::shared_ptr<Node>&const std::shared_ptr<Node>& replace_node(
,
);
OPENVINO_API voidconst std::shared_ptr<Model>&const std::unordered_map<std::shared_ptr<op::v0::Parameter>, std::shared_ptr<op::v0::Parameter>>&const std::unordered_map<std::shared_ptr<Node>, std::shared_ptr<Node>>& replace_nodes(
,
,
);
template <>
std::vector<std::shared_ptr<Node>>T topological_sort();
OPENVINO_API std::shared_ptr<ov::Model>const ov::Model&std::unordered_map<Node \*, std::shared_ptr<Node>>& clone_model(
,
);
OPENVINO_API std::shared_ptr<ov::Model>const ov::Model& clone_model();
OPENVINO_API boolconst std::shared_ptr<Node>&const std::shared_ptr<Node>& compare_constants(
,
);
OPENVINO_API boolOutput<Node>const Output<Node>& replace_output_update_name(, );
OPENVINO_API boolconst std::shared_ptr<Node>&const std::shared_ptr<Node>& replace_node_update_name(
,
);
OPENVINO_API voidconst std::shared_ptr<const ov::Model>&const std::string&const std::string&ov::pass::Serialize::Version serialize(
,
,
,
);
OPENVINO_API voidconst std::shared_ptr<const ov::Model>&const std::string&bool save_model(
,
,
);
OPENVINO_API std::ostream&std::ostream&const Interval& operator << (, );
std::shared_ptr<Model>const Model&std::unordered_map<Node \*, std::shared_ptr<Node>>& clone_ov_model(
,
);
OPENVINO_API std::ostream&std::ostream&const Model& operator << (, );
OPENVINO_API ov::Dimensionconst std::shared_ptr<const ov::Model>& get_batch();
OPENVINO_API voidconst std::shared_ptr<ov::Model>&ov::Dimension set_batch(, );
OPENVINO_API std::stringconst Node \* node_validation_failure_loc_string();
OPENVINO_API std::ostream&std::ostream&const Node& operator << (, );
OPENVINO_API std::ostream&std::ostream&const Node \* operator << (, );
template <>
voidconst Node \*T check_new_args_count(, );
OPENVINO_API std::ostream&std::ostream&const Input<Node>& operator << (, );
OPENVINO_API std::ostream&std::ostream&const Input<const Node>& operator << (, );
OPENVINO_API std::ostream&std::ostream&const Output<Node>& operator << (, );
OPENVINO_API std::ostream&std::ostream&const Output<const Node>& operator << (, );
OPENVINO_API OutputVectorconst NodeVector& as_output_vector();
OPENVINO_API NodeVectorconst OutputVector& as_node_vector();
OPENVINO_API ResultVectorconst OutputVector& as_result_vector();
OPENVINO_API PartialShapeconst PartialShape&const PartialShape& operator + (, );
OPENVINO_API std::ostream&std::ostream&const PartialShape& operator << (, );
OPENVINO_API voidconst std::shared_ptr<ov::Node>&const std::shared_ptr<ov::Node>& copy_runtime_info(
,
);
OPENVINO_API voidconst std::shared_ptr<ov::Node>&ov::NodeVector copy_runtime_info(
,
);
OPENVINO_API voidconst ov::NodeVector&const std::shared_ptr<ov::Node>& copy_runtime_info(
,
);
OPENVINO_API voidconst ov::NodeVector&ov::NodeVector copy_runtime_info(, );
OPENVINO_API voidconst ov::OutputVector&ov::OutputVector copy_output_runtime_info(
,
);
OPENVINO_API std::ostream&std::ostream&const RuntimeAttribute& operator << (, );
template <>
size_tconst SHAPE_TYPE& shape_size();
template <>
size_tForwardItconst ForwardIt shape_size(, );
template <>
std::vector<size_t>const SHAPE_TYPE& row_major_strides();
template <>
size_tconst SHAPE_TYPE&size_t row_major_stride(, );
template <>
boolconst SHAPE_TYPE& is_scalar();
template <>
boolconst SHAPE_TYPE& is_vector();
OPENVINO_API std::ostream&std::ostream&const Shape& operator << (, );
OPENVINO_API std::ostream&std::ostream&const Strides& operator << (, );
OPENVINO_API std::ostream&std::ostream&const DiscreteTypeInfo& operator << (, );
bool ::typeValue is_type();
Type \*::typeValue as_type();
template <, >
autoconst U& as_type_ptr();
OPENVINO_API PartialShapeconst Node \*const PartialShape&const Strides&const CoordinateDiff&const CoordinateDiff&const PartialShape&const Strides&const Strides& infer_convolution_forward(
,
,
,
,
,
,
,
);
OPENVINO_API voidconst Shape&const Shape&const Strides&const Strides&const op::PadTypeCoordinateDiff&CoordinateDiff& infer_auto_padding(
,
,
,
,
,
,
);
OPENVINO_API int64_tconst Node \*std::int64_tconst Rank& normalize_axis(, , );
OPENVINO_API std::vector<size_t>const std::string&const std::vector<int64_t>&const Rank& normalize_axes(
,
,
);
OPENVINO_API int64_tconst std::string&std::int64_tconst Rank& normalize_axis(
,
,
);
OPENVINO_API int64_tconst Node \*std::int64_tstd::uint64_tstd::int64_tstd::int64_t normalize_axis(
,
,
,
,
);
OPENVINO_API int64_tconst std::string&std::int64_tstd::uint64_tstd::int64_tstd::int64_t normalize_axis(
,
,
,
,
);
OPENVINO_API voidconst Node \*const int64_t&std::vector<int64_t>& normalize_axes(
,
,
);
OPENVINO_API boolconst Output<Node>&PartialShape& evaluate_as_partial_shape(, );
OPENVINO_API std::shared_ptr<op::v0::Constant>const Output<Node>& get_constant_from_source();
OPENVINO_API boolconst Node \*TensorLabelVector& default_label_evaluator(, );
OPENVINO_API voidstd::vector<int64_t>&const size_t generate_transpose_default_order(
,
);
OPENVINO_API boolconst std::vector<int64_t>&const size_t is_valid_axes_order(, );
OPENVINO_API boolconst TensorLabel& has_no_labels();
OPENVINO_API std::vector<PartialShape>const ov::Node& get_node_input_partial_shapes();
OPENVINO_API boolconst ov::Rank&const std::vector<ov::Rank>& is_rank_compatible_any_of(
,
);
OPENVINO_API std::ostream&std::ostream&const Version& operator << (, );
OPENVINO_API std::ostream&std::ostream&const std::map<std::string, Version>& operator << (
,
);
const OPENVINO_API_C();
OPENVINO_API std::ostream&std::ostream&const op::v1::BinaryConvolution::BinaryConvolutionMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v0::DepthToSpace::DepthToSpaceMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v9::GridSample::InterpolationMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v9::GridSample::PaddingMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v0::Interpolate::InterpolateMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::LSTMWeightsFormat& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v8::MatrixNms::DecayFunction& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v8::MatrixNms::SortResultType& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v1::NonMaxSuppression::BoxEncodingType& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v3::NonMaxSuppression::BoxEncodingType& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v5::NonMaxSuppression::BoxEncodingType& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v9::NonMaxSuppression::BoxEncodingType& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v1::Reverse::Mode& operator << (
,
);
std::ostream&std::ostream&const op::v3::ROIAlign::PoolingMode& operator << (, );
std::ostream&std::ostream&const op::v9::ROIAlign::PoolingMode& operator << (, );
std::ostream&std::ostream&const op::v9::ROIAlign::AlignedMode& operator << (, );
OPENVINO_API std::ostream&std::ostream&const op::v5::Round::RoundMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::v0::SpaceToDepth::SpaceToDepthMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::util::InterpolateBase::InterpolateMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::util::InterpolateBase::CoordinateTransformMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::util::InterpolateBase::NearestMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::util::InterpolateBase::ShapeCalcMode& operator << (
,
);
OPENVINO_API std::ostream&std::ostream&const op::util::MulticlassNmsBase::SortResultType& operator << (
,
);
void OPENVINO_APIov::Input<ov::Node> mark_as_precision_sensitive();
void OPENVINO_APIov::Input<ov::Node> unmark_as_precision_sensitive();
bool OPENVINO_APIconst ov::Input<ov::Node>& is_precision_sensitive();
const OPENVINO_API OpSet& get_opset1();
const OPENVINO_API OpSet& get_opset2();
const OPENVINO_API OpSet& get_opset3();
const OPENVINO_API OpSet& get_opset4();
const OPENVINO_API OpSet& get_opset5();
const OPENVINO_API OpSet& get_opset6();
const OPENVINO_API OpSet& get_opset7();
const OPENVINO_API OpSet& get_opset8();
const OPENVINO_API OpSet& get_opset9();
const OPENVINO_API OpSet& get_opset10();
const OPENVINO_API OpSet& get_opset11();
const OPENVINO_API OpSet& get_opset12();
const OPENVINO_API std::map<std::string, std::function<const ov::OpSet&()>>& get_available_opsets();
template <>
constexpr bool is_floating_point();
template <>
constexpr autoconst TContainer& make_tensor_accessor();
auto make_tensor_accessor();
template <, , >
TResultconst element::Type_tconst void \*constconst size_tUnaryOperation&& get_raw_data_as(
,
,
,
);
template <, , >
OPENVINO_SUPPRESS_DEPRECATED_START TResultHostTensor&UnaryOperation&& get_tensor_data_as(
,
);
template <, , >
TResultHostTensor \*UnaryOperation&& get_tensor_data_as(, );
template <, , >
OPENVINO_SUPPRESS_DEPRECATED_END TResultconst Tensor&UnaryOperation&& get_tensor_data_as(
,
);
FRONTEND_API void shutdown();
std::unordered_set<std::string>const std::shared_ptr<const ov::Model>&std::function<void(std::shared_ptr<ov::Model>&)>std::function<bool(const std::shared_ptr<ov::Node>)> get_supported_nodes(
,
,
);
std::shared_ptr<ITensor>const element::Typeconst Shape&const Allocator& make_tensor(
,
,
);
std::shared_ptr<ITensor>const element::Typeconst Shape&void \*const Strides& make_tensor(
,
,
,
);
std::shared_ptr<ITensor>const std::shared_ptr<ITensor>&const Coordinate&const Coordinate& make_tensor(
,
,
);
ov::Tensorconst ov::SoPtr<ITensor>& make_tensor();
boolbool check_open_mp_env_vars();
std::vector<int> get_available_numa_nodes();
std::vector<int> get_available_cores_types();
intbool get_number_of_cpu_cores();
intbool get_number_of_logical_cpu_cores();
bool with_cpu_x86_sse42();
bool with_cpu_x86_avx();
bool with_cpu_x86_avx2();
bool with_cpu_x86_avx2_vnni();
bool with_cpu_x86_avx512f();
bool with_cpu_x86_avx512_core();
bool with_cpu_x86_avx512_core_vnni();
bool with_cpu_x86_bfloat16();
bool with_cpu_x86_avx512_core_fp16();
bool with_cpu_x86_avx512_core_amx_int8();
bool with_cpu_x86_avx512_core_amx_bf16();
bool with_cpu_x86_avx512_core_amx();
bool is_cpu_map_available();
int get_num_numa_nodes();
int get_num_sockets();
std::vector<std::vector<int>> get_proc_type_table();
std::vector<std::vector<int>> get_org_proc_type_table();
voidconst std::vector<std::vector<int>>std::vector<std::vector<int>>&const int reserve_available_cpus(
,
,
);
voidconst std::vector<int>&const int set_cpu_used(, );
intint get_socket_by_numa_node();
static MemBandwidthPressureconst std::shared_ptr<ngraph::Function>const floatconst float MemBandwidthPressureTolerance(
,
,
);
} // namespace ov
Detailed Documentation¶
transformation aligns elementwise constant inputs ranks with its output rank
A namespace with const values for Execution Graph parameters names.
Executable Model Info is represented in ov::Model format with general ExecutionNode nodes inside including connections between the nodes. Each node describes an executable hardware-specific primitive and stores its parameters within ExecutionNode::get_rt_info map. There is a list of general keys for the parameters map.
OpenVINO C++ API.
Resolves transpose_b key from MatMul operation if corresponding input is constant or FakeQuantize by inserting Transpose.
Typedefs¶
typedef std::vector<label_t> TensorLabel
Alias for label tensor.
typedef std::vector<TensorLabel> TensorLabelVector
Alias for vector of label tensors.
typedef uint32_t label_t
Alias for dimension label type.
typedef ov::RTMap EvaluationContext
EvaluationContext stores and manages a context (additional parameters, values and environment) for evaluating ov::Model.
typedef Dimension Rank
Alias for Dimension, used when the value represents the number of axes in a shape, rather than the size of one dimension in a shape.
typedef std::vector<Tensor> TensorVector
A vector of Tensor ‘s.
Global Functions¶
std::shared_ptr<ov::MappedMemory>const std::string& load_mmap_object()
Returns mapped memory for a file from provided path. Instead of reading files, we can map the memory via mmap for Linux in order to avoid time-consuming reading and reduce memory consumption.
Parameters:
path |
Path to a file which memory will be mmaped. |
Returns:
MappedMemory shared ptr object which keep mmaped memory and control the lifetime.
OPENVINO_API std::ostream&std::ostream&const Dimension& operator << (, )
Insert a human-readable representation of a dimension into an output stream.
Inserts the string ?
if dimension
is dynamic; else inserts dimension.get_length()
.
Parameters:
str |
The output stream targeted for insertion. |
dimension |
The dimension to be inserted into |
Returns:
A reference to str
after insertion.
template <, >
std::enable_if<std::is_convertible<Value, std::string>::value, Type>::typeconst Value& as_enum()
Returns the enum value matching the string.
template <>
const std::string&Value as_string()
Returns the string matching the enum value.
voidstd::vector<Extension::Ptr>& create_extensions()
The entry point for library with OpenVINO extensions.
Parameters:
vector |
of extensions |
OPENVINO_API voidconst NodeVector&const std::function<void(const std::shared_ptr<Node>&)>&const NodeVector& traverse_nodes(
,
,
)
Visit each node in a sub-graph of the entire graph.
Traverses a sub-graph starting from subgraph_results moving up towards parameter nodes. Traversal stops if it hits a node in subgraph_params.
Most useful for finding parameters of a graph directly from the result nodes and not from function parameters or extracting a subgraph relevant to the computation of certain outputs
Parameters:
subgraph_results |
The output nodes of the sub-graph |
f |
Model to execute at each node in the traversal |
subgraph_params |
Input nodes of the sub-graph (optional) |
OPENVINO_API voidconst std::shared_ptr<Node>&const std::shared_ptr<Node>&const std::vector<int64_t>& replace_node(
,
,
)
Replace the node target
with the node replacement
, i.e., redirect all users and control dependencies of target
to replacement
.
This is primarily used in graph-rewriting passes. For example, we might “fuse” two Concat operations as follows:
(Step 0: Original graph)
A B | | v v N0[Concat, concatenation_axis=3] C | | v v N1[Concat, concatenation_axis=3] | | v v some_user another_user
(Step 1: Construct replacement)
shared_ptr<Node> new_N1 = make_shared<op::Concat>({A,B,C},3);
A————————————-. | | | B————-). | | | | v v | | N0[Concat, concatenation_axis=3] C–)). | | | | | v v v v v N1[Concat, concatenation_axis=3] new_N1[Concat, concatenation_axis=3] | | v v some_user another_user
(Step 2: Replace N1 with new_N1)
replace_node(N1, new_N1);
A————————————-. | | | B————-). | | | | v v | | N0[Concat, concatenation_axis=3] C–)). | | | | | v v v v v N1[Concat, concatenation_axis=3] new_N1[Concat, concatenation_axis=3] | | v v some_user another_user
(Step 3: N0 and N1 are now dead, nodes will be freed)
[happens automatically, once all shared_ptrs to N1 are released]
A————————————-. | B————-). | | | | C–)). | | | v v v new_N1[Concat, concatenation_axis=3] | | v v some_user another_user
NOTE 1: replace_node is not type-safe (the graph is not revalidated). For example, the following is allowed, even if node some_user
requires an input of shape 2x2:
(Before) A(shape=2x2) B(shape=3x3) | v some_user(requires 2x2 input)
(After graph is now invalid)
replace_node(A, B);
A(shape=2x2) B(shape=3x3)
|
v
some_user(requires 2x2 input)
NOTE 2: it is possible to insert a cycle into the graph with replace_node, resulting in an invalid graph. Care must be taken to avoid this. One common example is when you are attempting to insert a new node M
“after” a node
N`. For example, you might expect this to work:
shared_ptr<Node> M = make_shared<SomeUnaryOp>(N); replace_node(M, N);
The problem is that at replacement time, N itself is a user of M. So we end up introducing a cycle as follows:
N
|
v
other users…
|||
vvv
N------------>M
|
v
other users…
|||
vvv
.----.
| |
| |
N `----->M
|
v
other users...
To avoid the cycle, a valid way to perform the above desired insertion would be,
auto new_N = N->clone_with_new_inputs(N->input_values());
shared_ptr<Node> M = make_shared<SomeUnaryOp>(new_N);
replace_node(N, M);
Parameters:
target |
Node to be replaced. |
replacement |
Node to replace |
output_order |
Vector determines order of replacement node’s outputs. |
OPENVINO_API voidconst std::shared_ptr<Node>&const OutputVector& replace_node(
,
)
Replace target.outputs[i] with replacement_values[i] and transfer control dependents and.
OPENVINO_API voidconst std::shared_ptr<Model>&const std::unordered_map<std::shared_ptr<op::v0::Parameter>, std::shared_ptr<op::v0::Parameter>>&const std::unordered_map<std::shared_ptr<Node>, std::shared_ptr<Node>>& replace_nodes(
,
,
)
Replace multiple nodes in a function.
Limitations:
No check is made that the replaced nodes in
parameter_replacement_map
are actually among the bound parameters off
. (If a parameter appears in the map that is not bound byf
, it will be silently ignored.)If a parameter node appears as a key in both
parameter_replacement_map
and inbody_replacement_map
, behavior is unspecified.
Parameters:
f |
Model where replacement is taking place. |
parameter_replacement_map |
A mapping from parameter shared pointers to parameter shared pointers. For each pair (k,v) in the map, parameter k is replaced by parameter v, except if k==v or k is not a parameter bound by f, in which case the pair (k,v) is ignored. |
body_replacement_map |
A mapping from node shared pointers to node shared pointers. For each pair (k,v) in the map, node k is replaced by node v, except if k==v, the pair (k,v) is ignored. Note that if k is a parameter, its users will be redirected to v, but k will not be replaced in the function’s parameter list. |
template <>
std::vector<std::shared_ptr<Node>>T topological_sort()
Topological sort of nodes needed to compute root_nodes.
OPENVINO_API voidconst std::shared_ptr<const ov::Model>&const std::string&const std::string&ov::pass::Serialize::Version serialize(
,
,
,
)
Serialize given model into IR. The generated .xml and .bin files will be saved into provided paths. This method serializes model “as-is” that means no weights compression and other possible transformations are applied. It is recommended to use ov::save_model function instead of ov::serialize, because it is aligned with default model conversion flow.
Parameters:
m |
Model which will be converted to IR representation. |
xml_path |
Path where .xml file will be saved. |
bin_path |
Path where .bin file will be saved (optional). The same name as for xml_path will be used by default. |
version |
Version of the generated IR (optional). |
OPENVINO_API voidconst std::shared_ptr<const ov::Model>&const std::string&bool save_model(
,
,
)
Save given model into IR. Floating point weights are compressed to FP16 by default. This method saves a model to IR applying all necessary transformations that usually applied in model conversion flow provided by mo tool. Paricularly, floatting point weights are compressed to FP16.
Parameters:
model |
Model which will be converted to IR representation. |
output_model |
Path to the output model file, must have extension .xml |
compress_to_fp16 |
Whether to compress floatting point weights to FP16 (true by default) |
OPENVINO_API ov::Dimensionconst std::shared_ptr<const ov::Model>& get_batch()
Helper method to get associated batch size for a Model.
Checks layout of each parameter in a Model and extracts value for N (B) dimension. All values are then merged and returned
Parameters:
with details in case of error. Possible errors are:
|
|
f |
Model where to look for a batch_size value |
Returns:
Dimension representing current batch size. Can represent a number or be a dynamic
OPENVINO_API voidconst std::shared_ptr<ov::Model>&ov::Dimension set_batch(, )
Helper method to set batch size to a Model.
Checks layout of each parameter in a Model and sets value for N (B) dimension. Then performs validation and type propagation
Parameters:
with details in case of error. Possible errors are:
|
|
model |
model where to set batch_size value |
batch_size |
Batch size value. For dynamic batch size, Dimension::dynamic() can be passed. |
OPENVINO_API ResultVectorconst OutputVector& as_result_vector()
Returns a ResultVector referencing values.
OPENVINO_API PartialShapeconst PartialShape&const PartialShape& operator + (, )
Elementwise addition of two PartialShape objects.
If
s1
ors2
has dynamic rank, returns PartialShape::dynamic().If
s1 and
s2` both have static rank, and their ranks are unequal, throws std::invalid_argument.If
s1
ands2
both have static rank, and their ranks are equal, returns a new shape whosei
th dimension iss1[i] + s2[i]
.
Parameters:
s1 |
Left operand for addition. |
s2 |
Right operand for addition. |
std::invalid_argument |
If |
Returns:
The result of elementwise adding s1
to s2
(see description).
OPENVINO_API std::ostream&std::ostream&const PartialShape& operator << (, )
Inserts a human-readable representation of a PartialShape into an output stream.
The output to the stream is in “informal” notation. In other words:
If
shape
has dynamic rank, inserts the string?
.If
shape
has static rank, inserts the string{
, then inserts each dimension ofshape
into the output stream separated by commas, then inserts}
.
Example:
PartialShape s1{PartialShape::dynamic())};
PartialShape s2{};
PartialShape s3{1,Dimension::dynamic(),2,3};
PartialShape s4{2,3,4};
std::cout << s1 << std::endl
<< s2 << std::endl
<< s3 << std::endl
<< s4 << std::endl;
Output :
?
{}
{1,?,2,3}
{2,3,4}
Parameters:
str |
The output stream targeted for insertion. |
shape |
The shape to be inserted into |
Returns:
A reference to str
after insertion.
template <>
std::vector<size_t>const SHAPE_TYPE& row_major_strides()
Row-major strides for a shape.
template <, >
autoconst U& as_type_ptr()
Casts a std::shared_ptr<Value> to a std::shared_ptr<Type> if it is of type Type, nullptr otherwise
OPENVINO_API int64_tconst Node \*std::int64_tconst Rank& normalize_axis(, , )
Handle out of range axis.
Parameters:
node |
The node with requested axis. |
axis |
The requested axis value. |
tensor_rank |
The corresponding tensor rank. |
Returns:
Checking if axis is in range [-tensor_rank, tensor_rank-1], otherwise returns error. If negative axis, it counts from the last to the first axis, by adding tensor_rank to axis.
OPENVINO_API std::vector<size_t>const std::string&const std::vector<int64_t>&const Rank& normalize_axes(
,
,
)
Handle out of range axes in vector.
Parameters:
node_description |
The name of node with requested axes. |
axes |
The requested vector of axes. |
tensor_rank |
The corresponding tensor rank. |
Returns:
If any negative axis in vector, it counts from the last to the first axis, by adding tensor_rank to axis.
OPENVINO_API int64_tconst std::string&std::int64_tconst Rank& normalize_axis(
,
,
)
Handle out of range axis.
Parameters:
node_description |
The node with requested axis. |
axis |
The requested axis value. |
tensor_rank |
The corresponding tensor rank. |
Returns:
Checking if axis is in range [-tensor_rank, tensor_rank-1], otherwise returns error. If negative axis, it counts from the last to the first axis, by adding tensor_rank to axis.
OPENVINO_API int64_tconst Node \*std::int64_tstd::uint64_tstd::int64_tstd::int64_t normalize_axis(
,
,
,
,
)
Handle out of range axis.
Parameters:
node |
The node with requested axis. |
axis |
The requested axis value. |
tensor_rank |
The corresponding tensor rank. |
axis_range_min |
The min value of accepted range for axis. |
axis_range_max |
The max value of accepted range for axis. |
Returns:
Checking if axis is in range [axis_range_min, axis_range_max], otherwise returns error. If negative axis, it counts from the last to the first axis, by adding tensor_rank to axis.
OPENVINO_API int64_tconst std::string&std::int64_tstd::uint64_tstd::int64_tstd::int64_t normalize_axis(
,
,
,
,
)
Handle out of range axis.
Parameters:
node_description |
The name of node with requested axis. |
axis |
The requested axis value. |
tensor_rank |
The corresponding tensor rank. |
axis_range_min |
The min value of accepted range for axis. |
axis_range_max |
The max value of accepted range for axis. |
Returns:
Checking if axis is in range [axis_range_min, axis_range_max], otherwise returns error. If negative axis, it counts from the last to the first axis, by adding tensor_rank to axis.
OPENVINO_API voidconst Node \*const int64_t&std::vector<int64_t>& normalize_axes(
,
,
)
Handle out of range axes in vector. If any negative axis in vector, it counts from the last to the first axis, by adding tensor_rank to axis. Changes axes vector inplace.
Parameters:
node |
The node with requested axes. |
tensor_rank |
The corresponding tensor rank. |
axes |
The requested vector of axes. |
OPENVINO_API boolconst Output<Node>&PartialShape& evaluate_as_partial_shape(, )
Evaluates lower and upper value estimations for the output tensor. Estimation would be represented as partial shape object using Dimension(min, max) for each element.
Parameters:
output |
Node output pointing to the tensor for estimation. |
pshape |
Resulting estimation would be stored in this PartialShape. |
Returns:
boolean status if value evaluation was successful.
OPENVINO_API std::shared_ptr<op::v0::Constant>const Output<Node>& get_constant_from_source()
Runs an estimation of source tensor. If it succeeded to calculate both bounds and they are the same returns Constant operation from the resulting bound, otherwise nullptr.
OPENVINO_API boolconst Node \*TensorLabelVector& default_label_evaluator(, )
Propagates value label from 0 input to the only output through an operation. Not applicable for operations which require values interaction (example: mathematical operations). Could be used for movement operations (example: gathering, shape change)
Parameters:
node |
Operation to be performed |
output_labels |
Vector of TensorLabel objects representing resulting value labels |
Returns:
boolean status if label evaluation was successful.
OPENVINO_API voidstd::vector<int64_t>&const size_t generate_transpose_default_order(
,
)
Generates transpose default axes order at end of input vector.
Default axes order is decreasing sequence numbers which start from length - 1
.
Parameters:
axes_order |
Vector where default order will be generated. |
length |
Sequence length of axes order. |
OPENVINO_API boolconst std::vector<int64_t>&const size_t is_valid_axes_order(, )
Check if vector of axes order has got valid values.
Axes order has to be unique numbers in range of [0, size).
Parameters:
axes_order |
Vector with axes order to check. |
size |
Input for transpose rank size. |
Returns:
true if axes order is valid otherwise false.
OPENVINO_API boolconst TensorLabel& has_no_labels()
Checks label tensor if there is no label.
Parameters:
labels |
Label tensor for check. |
Returns:
True if there is no labels, otherwise false.
OPENVINO_API std::vector<PartialShape>const ov::Node& get_node_input_partial_shapes()
Get the node input partial shapes.
Parameters:
node |
Node to extract input shapes. |
Returns:
Vector of PartialShapes of each input.
OPENVINO_API boolconst ov::Rank&const std::vector<ov::Rank>& is_rank_compatible_any_of(
,
)
Check if rank is compatible to any of rank from container.
Parameters:
rank |
Rank to check. |
ranks |
VEctor of ranks used to check input rank compatibility. |
Returns:
True if rank compatible to any from ranks, otherwise false.
const OPENVINO_API_C()
Gets the current OpenVINO version.
Returns:
The current OpenVINO version
template <>
constexpr autoconst TContainer& make_tensor_accessor()
Makes TensorAccessor for specific tensor container.
Parameters:
TContainer |
Type of tensor containers |
c |
Container of tensors. |
Returns:
TensorContainer for specific type.
See also:
TensorAccessor for supported types.
auto make_tensor_accessor()
Makes empty TensorAccessor which return empty tensor for any port number.
Returns:
TensorAccessor to return empty tensor.
template <, , >
TResultconst element::Type_tconst void \*constconst size_tUnaryOperation&& get_raw_data_as(
,
,
,
)
Get the raw data as TResult object.
Parameters:
T |
TResult data type. |
TResult |
Type of return object, must support creation of std::inserter. Default std::vector<T>. |
UnaryOperation |
Unary function object applied on data with signature (T f(const U u)). |
et |
Element type of input data. |
ptr |
Pointer to data of type et. |
size |
Data size as number of elements. |
func |
Unary operation function object. |
ov::AssertionFailure |
for not supported element type. |
Returns:
Object of TResult with data from input pointer and transformed by unary operation.
template <, , >
OPENVINO_SUPPRESS_DEPRECATED_START TResultHostTensor&UnaryOperation&& get_tensor_data_as(
,
)
Get data from Host tensor as object TResult.
Parameters:
T |
TResult data type. |
TResult |
Type of return object, must support creation of std::inserter. Default std::vector<T>. |
UnaryOperation |
Unary function object applied on data with signature (T f(const U u)). |
tv |
Input host tensor. |
func |
Unary operation function object. |
Returns:
Object of TResult with data from host tensor.
template <, , >
OPENVINO_SUPPRESS_DEPRECATED_END TResultconst Tensor&UnaryOperation&& get_tensor_data_as(
,
)
Get data from ov:tensor as object TResult.
Parameters:
T |
TResult data type. |
TResult |
Type of return object, must support creation of std::inserter. Default std::vector<T>. |
UnaryOperation |
Unary function object applied on data with signature (T f(const U u)). |
t |
Input tensor. |
func |
Unary operation function object. |
Returns:
Object of TResult with data from tensor.
FRONTEND_API void shutdown()
Shut down the OpenVINO by deleting all static-duration objects allocated by the library and releasing dependent resources.
This function should be used by advanced user to control unload the resources.
You might want to use this function if you are developing a dynamically-loaded library which should clean up all resources after itself when the library is unloaded.
Returns set of nodes from original model which are determined as supported after applied transformation pipeline.
Parameters:
model |
Original model |
transform |
Transformation pipeline function |
is_node_supported |
Function returning whether node is supported or not |
Returns:
Set of strings which contains supported node names
std::shared_ptr<ITensor>const element::Typeconst Shape&const Allocator& make_tensor(
,
,
)
Constructs Tensor using element type and shape. Allocate internal host storage using default allocator.
Parameters:
type |
Tensor element type |
shape |
Tensor shape |
allocator |
allocates memory for internal tensor storage |
std::shared_ptr<ITensor>const element::Typeconst Shape&void \*const Strides& make_tensor(
,
,
,
)
Constructs Tensor using element type and shape. Wraps allocated host memory.
Does not perform memory allocation internally
Parameters:
type |
Tensor element type |
shape |
Tensor shape |
host_ptr |
Pointer to pre-allocated host memory |
strides |
Optional strides parameters in bytes. Strides are supposed to be computed automatically based on shape and element size |
std::shared_ptr<ITensor>const std::shared_ptr<ITensor>&const Coordinate&const Coordinate& make_tensor(
,
,
)
Constructs region of interest (ROI) tensor form another tensor.
Does not perform memory allocation internally
A Number of dimensions in begin
and end
must match number of dimensions in other.get_shape()
Parameters:
other |
original tensor |
begin |
start coordinate of ROI object inside of the original object. |
end |
end coordinate of ROI object inside of the original object. |
ov::Tensorconst ov::SoPtr<ITensor>& make_tensor()
Constructs public ov::Tensor class.
Parameters:
tensor |
Tensor implementation |
Returns:
OpenVINO Tensor
bool is_cpu_map_available()
Checks whether cpu_mapping Available.
Returns:
True
is CPU mapping is available, false
otherwise
int get_num_numa_nodes()
Get number of numa nodes.
Returns:
Number of numa nodes
int get_num_sockets()
Get number of sockets.
Returns:
Number of sockets
std::vector<std::vector<int>> get_proc_type_table()
Returns a table of number of processor types on Linux/Windows.
Processor table of one socket CPU desktop ALL_PROC | MAIN_CORE_PROC | EFFICIENT_CORE_PROC | HYPER_THREADING_PROC 32 8 16 8 // Total number of one socket
Returns:
A table about number of CPU cores of different types defined with ColumnOfProcessorTypeTable The following are two example of processor type table.
Processor table of two socket CPUs XEON server ALL_PROC | MAIN_CORE_PROC | EFFICIENT_CORE_PROC | HYPER_THREADING_PROC 96 48 0 48 // Total number of two sockets 48 24 0 24 // Number of socket one 48 24 0 24 // Number of socket two
std::vector<std::vector<int>> get_org_proc_type_table()
Returns a table of original number of processor types without filtering other plugins occupying CPU resources. The difference from get_proc_type_table: This is used to get the configuration of current machine. For example, GPU plugin occupies all Pcores, there is only one type core in proc_type_table from get_proc_type_table(). If user wants to get the real configuration of this machine which should be got from get_org_proc_type_table.
Returns:
A table about number of CPU cores of different types defined with ColumnOfProcessorTypeTable
voidconst std::vector<std::vector<int>>std::vector<std::vector<int>>&const int reserve_available_cpus(
,
,
)
Get and reserve available cpu ids.
Parameters:
streams_info_table |
streams information table. |
stream_processors |
processors grouped in stream which is used in core binding in cpu streams executor |
cpu_status |
set cpu status |
voidconst std::vector<int>&const int set_cpu_used(, )
Set CPU_MAP_USED_FLAG of cpu_mapping.
Parameters:
cpu_ids |
cpus in cpu_mapping. |
used |
update CPU_MAP_USED_FLAG of cpu_mapping with this flag bit |
intint get_socket_by_numa_node()
Get socket id by current numa node id.
Parameters:
numa_node_id |
numa node id |
Returns:
socket id