ie_icnn_network.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief This is a header file for the ICNNNetwork class
7  * @file ie_icnn_network.hpp
8  */
9 #pragma once
10 
11 #include "ie_common.h"
12 #include "ie_layers.h"
13 #include "ie_data.h"
14 #include "ie_device.hpp"
15 #include "ie_blob.h"
16 #include "details/ie_irelease.hpp"
17 #include "ie_preprocess.hpp"
18 #include "ie_input_info.hpp"
19 #include "ie_icnn_network_stats.hpp"
20 #include "ie_iextension.h"
21 #include <memory>
22 #include <map>
23 #include <string>
24 
25 namespace InferenceEngine {
26 
27 /**
28  * @brief A collection that contains string as key, and Data smart pointer as value
29  */
30 using OutputsDataMap = std::map<std::string, DataPtr>;
31 
32 /**
33  * @brief This is the main interface to describe the NN topology
34  */
35 class ICNNNetwork : public details::IRelease {
36 public:
37  using Ptr = std::shared_ptr<ICNNNetwork>;
38 
39  /**
40  * @brief Returns the main network operating precision.
41  * This may be MIXED if not homogeneous.
42  * @return A precision type
43  */
44  virtual Precision getPrecision() const noexcept = 0;
45 
46  /**
47  * @brief Gets the network output Data node information. The received info is stored in the given Data node.
48  * For single and multiple outputs networks.
49  * @param out Reference to the OutputsDataMap object
50  */
51  virtual void getOutputsInfo(OutputsDataMap& out) const noexcept = 0;
52 
53  /**
54  * @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap object.
55  * For single and multiple inputs networks.
56  * This method must be called to find out input names for using them later during filling of a map
57  * of blobs passed later to InferenceEngine::IInferencePlugin::Infer()
58  * @param inputs Reference to InputsDataMap object.
59  */
60  virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0;
61 
62 
63  /**
64  * @brief Returns information on certain input pointed by inputName
65  * @param inputName Name of input layer to get info on
66  * @return A smart pointer to the input information
67  */
68  virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0;
69 
70  /**
71  * @brief Gets the network name. The name is stored in the given pName string.
72  * @param pName - will receive actual network name, specified in IR file,
73  * pName should point to valid memory address before invoking this function
74  * @param len - size in bytes of pName buffer, actual name is trimmed by this size
75  */
76  virtual void getName(char* pName, size_t len) const noexcept = 0;
77 
78  /**
79  * @brief Returns the network name.
80  * @return Network name
81  */
82  virtual const std::string& getName() const noexcept = 0;
83 
84  /**
85  * @brief Returns the number of layers in the network as an integer value
86  * @return The number of layers as an integer value
87  */
88  virtual size_t layerCount() const noexcept = 0;
89 
90  /**
91  * @brief Returns a smart pointer reference to a Data node given its name.
92  * If the Data node is missing, returns reference to a default initialized new empty data pointer with given name.
93  * @param dname Name of the Data node
94  * @return Data node smart pointer
95  */
96  virtual DataPtr& getData(const char* dname) noexcept = 0;
97 
98  /**
99  * @brief Insert a layer into the network. A user is responsible to connect it to other data elements.
100  * @param layer Const reference to a layer smart pointer
101  */
102  virtual void addLayer(const CNNLayerPtr& layer) noexcept = 0;
103 
104  /**
105  * @brief Adds output to the layer
106  * @param layerName Name of the layer
107  * @param outputIndex Index of the output
108  * @param resp Response message
109  * @return Status code of the operation
110  */
111  virtual StatusCode
112  addOutput(const std::string& layerName, size_t outputIndex = 0, ResponseDesc* resp = nullptr) noexcept = 0;
113 
114  /**
115  * @brief Gets network layer with the given name
116  * @param layerName Given name of the layer
117  * @param out Pointer to the found CNNLayer object with the given name
118  * @param resp Pointer to the response message that holds a description of an error if any occurred
119  * @return Status code of the operation. OK if succeeded
120  */
121  virtual StatusCode getLayerByName(const char* layerName, CNNLayerPtr& out, ResponseDesc* resp) const noexcept = 0;
122 
123  /**
124  * @deprecated Deprecated since TargetDevice is deprecated. Specify target device in InferenceEngine::Core directly.
125  * @brief Sets a desirable device to perform all work on.
126  * Some plug-ins might not support some target devices and may abort execution with an appropriate error message.
127  * @param device Device to set as a target
128  */
129  #ifndef _WIN32
130  INFERENCE_ENGINE_DEPRECATED
131  #endif
132  virtual void setTargetDevice(TargetDevice device) noexcept = 0;
133 
134  /**
135  * @deprecated Deprecated since TargetDevice is deprecated
136  * @brief Gets the target device.
137  * If setTargetDevice() was not called before, returns eDefault
138  * @return A TargetDevice instance
139  */
140  #ifndef _WIN32
141  INFERENCE_ENGINE_DEPRECATED
142  #endif
143  virtual TargetDevice getTargetDevice() const noexcept = 0;
144 
145  /**
146  * @deprecated Use ICNNNetwork::setBatchSize(size_t, ResponseDesc*)
147  * @brief Changes the inference batch size
148  */
149  INFERENCE_ENGINE_DEPRECATED
150  virtual StatusCode setBatchSize(const size_t size) noexcept {
151  ResponseDesc resp;
152  return setBatchSize(size, &resp);
153  }
154 
155  /**
156  * @brief Changes the inference batch size.
157  * @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call ICNNNetwork::reshape.
158  * @param size Size of batch to set
159  * @return Status code of the operation
160  * @note: Current implementation of the function sets batch size to the first dimension of all layers in the networks.
161  * Before calling it make sure that all your layers have batch in the first dimension, otherwise the method works incorrectly.
162  * This limitation is resolved via shape inference feature
163  * by using InferenceEngine::ICNNNetwork::reshape method.
164  * To read more refer to the Shape Inference section in documentation
165  */
166  virtual StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept = 0;
167 
168  /**
169  * @brief Gets the inference batch size
170  * @return The size of batch as a size_t value
171  */
172  virtual size_t getBatchSize() const noexcept = 0;
173 
174  /**
175  * @brief Map of pairs: name of corresponding data and its dimension.
176  */
177  using InputShapes = std::map<std::string, SizeVector>;
178 
179  /**
180  * @brief Run shape inference with new input shapes for the network
181  * @param inputShapes - map of pairs: name of corresponding data and its dimension.
182  * @param resp Pointer to the response message that holds a description of an error if any occurred
183  * @return Status code of the operation
184  */
185  virtual StatusCode reshape(const InputShapes& /*inputShapes*/, ResponseDesc* /*resp*/) noexcept { return NOT_IMPLEMENTED; };
186 
187  /**
188  * @brief Registers extension within the plugin
189  * @param extension Pointer to already loaded reader extension with shape propagation implementations
190  * @param resp Pointer to the response message that holds a description of an error if any occurred
191  * @return Status code of the operation. OK if succeeded
192  */
193  virtual StatusCode
194  AddExtension(const IShapeInferExtensionPtr& /*extension*/, ResponseDesc* /*resp*/) noexcept { return NOT_IMPLEMENTED; };
195 
196  virtual StatusCode getStats(ICNNNetworkStats** /*stats*/, ResponseDesc* /*resp*/) const noexcept { return NOT_IMPLEMENTED; };
197 
198  /**
199  * @brief Serialize network to IR and weights files.
200  * @param xmlPath Path to output IR file.
201  * @param binPath Path to output weights file.
202  * @return Status code of the operation
203  */
204  virtual StatusCode serialize(const std::string &xmlPath, const std::string &binPath, ResponseDesc* resp) const noexcept = 0;
205 };
206 } // namespace InferenceEngine
TargetDevice
Describes known device types.
Definition: ie_device.hpp:24
virtual const std::string & getName() const noexcept=0
Returns the network name.
Definition: ie_argmax_layer.hpp:11
std::shared_ptr< CNNLayer > CNNLayerPtr
A smart pointer to the CNNLayer.
Definition: ie_common.h:36
virtual DataPtr & getData(const char *dname) noexcept=0
Returns a smart pointer reference to a Data node given its name. If the Data node is missing...
virtual void getInputsInfo(InputsDataMap &inputs) const noexcept=0
Gets the network input Data node information. The received info is stored in the given InputsDataMap ...
StatusCode
This enum contains codes for all possible return values of the interface functions.
Definition: ie_common.h:205
virtual StatusCode reshape(const InputShapes &, ResponseDesc *) noexcept
Run shape inference with new input shapes for the network.
Definition: ie_icnn_network.hpp:185
A header file for Blob and generic TBlob<>
std::map< std::string, InputInfo::Ptr > InputsDataMap
A collection that contains string as key, and InputInfo smart pointer as value.
Definition: ie_input_info.hpp:198
virtual InputInfo::Ptr getInput(const std::string &inputName) const noexcept=0
Returns information on certain input pointed by inputName.
virtual StatusCode setBatchSize(const size_t size) noexcept
Changes the inference batch size.
Definition: ie_icnn_network.hpp:150
virtual StatusCode getLayerByName(const char *layerName, CNNLayerPtr &out, ResponseDesc *resp) const noexcept=0
Gets network layer with the given name.
This header file provides structures to store info about pre-processing of network inputs (scale...
virtual size_t getBatchSize() const noexcept=0
Gets the inference batch size.
Represents detailed information for an error.
Definition: ie_common.h:228
a header file for internal Layers structure to describe layers information
This is the main interface to describe the NN topology.
Definition: ie_icnn_network.hpp:35
This header file defines the main Data representation node.
This is a header file for the ICNNNetworkStats class.
virtual void setTargetDevice(TargetDevice device) noexcept=0
Sets a desirable device to perform all work on. Some plug-ins might not support some target devices a...
virtual size_t layerCount() const noexcept=0
Returns the number of layers in the network as an integer value.
virtual void addLayer(const CNNLayerPtr &layer) noexcept=0
Insert a layer into the network. A user is responsible to connect it to other data elements...
virtual StatusCode addOutput(const std::string &layerName, size_t outputIndex=0, ResponseDesc *resp=nullptr) noexcept=0
Adds output to the layer.
virtual TargetDevice getTargetDevice() const noexcept=0
Gets the target device. If setTargetDevice() was not called before, returns eDefault.
This is a header file for Inference Engine Extension Interface.
This header file contains aspects of working on different devices like CPU, GEN, FPGA, etc.
std::shared_ptr< InputInfo > Ptr
A smart pointer to the InputInfo instance.
Definition: ie_input_info.hpp:28
A header file for the Inference Engine plugins destruction mechanism.
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:50
This is the interface to describe the NN topology scoring statistics.
Definition: ie_icnn_network_stats.hpp:38
virtual StatusCode AddExtension(const IShapeInferExtensionPtr &, ResponseDesc *) noexcept
Registers extension within the plugin.
Definition: ie_icnn_network.hpp:194
std::map< std::string, DataPtr > OutputsDataMap
A collection that contains string as key, and Data smart pointer as value.
Definition: ie_icnn_network.hpp:30
std::map< std::string, SizeVector > InputShapes
Map of pairs: name of corresponding data and its dimension.
Definition: ie_icnn_network.hpp:177
virtual Precision getPrecision() const noexcept=0
Returns the main network operating precision. This may be MIXED if not homogeneous.
virtual void getOutputsInfo(OutputsDataMap &out) const noexcept=0
Gets the network output Data node information. The received info is stored in the given Data node...
a header file for InputInfo class
This is a header file with common inference engine definitions.
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:19
virtual StatusCode serialize(const std::string &xmlPath, const std::string &binPath, ResponseDesc *resp) const noexcept=0
Serialize network to IR and weights files.