ie_icnn_network.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief This is a header file for the ICNNNetwork class
7  *
8  * @file ie_icnn_network.hpp
9  */
10 #pragma once
11 
12 #include <map>
13 #include <memory>
14 #include <string>
15 
16 #include "details/ie_irelease.hpp"
17 #include "ie_blob.h"
18 #include "ie_common.h"
19 #include "ie_data.h"
21 #include "ie_iextension.h"
22 #include "ie_input_info.hpp"
23 #include "ie_layers.h"
24 #include "ie_preprocess.hpp"
25 
26 namespace ngraph {
27 
28 class Function;
29 
30 } // namespace ngraph
31 
32 namespace InferenceEngine {
33 
34 /**
35  * @brief A collection that contains string as key, and Data smart pointer as value
36  */
37 using OutputsDataMap = std::map<std::string, DataPtr>;
38 
39 /**
40  * @interface ICNNNetwork
41  * @brief This is the main interface to describe the NN topology
42  */
43 class INFERENCE_ENGINE_API_CLASS(ICNNNetwork): public details::IRelease {
44 public:
45  /**
46  * @brief A shared pointer to a ICNNNetwork interface
47  */
48  using Ptr = std::shared_ptr<ICNNNetwork>;
49 
50  /**
51  * @brief Returns nGraph function
52  * @return nGraph function
53  */
54  virtual std::shared_ptr<ngraph::Function> getFunction() noexcept = 0;
55 
56  /**
57  * @brief Returns constant nGraph function
58  * @return constant nGraph function
59  */
60  virtual std::shared_ptr<const ngraph::Function> getFunction() const noexcept = 0;
61 
62  /**
63  * @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1
64  * @brief Returns the main network operating precision.
65  *
66  * This may be MIXED if not homogeneous.
67  *
68  * @return A precision type
69  */
70  INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1")
71  virtual Precision getPrecision() const noexcept = 0;
72 
73  /**
74  * @brief Gets the network output Data node information. The received info is stored in the given Data node.
75  *
76  * For single and multiple outputs networks.
77  *
78  * This method need to be called to find output names for using them later
79  * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob
80  *
81  *
82  * @param out Reference to the OutputsDataMap object
83  */
84  virtual void getOutputsInfo(OutputsDataMap& out) const noexcept = 0;
85 
86  /**
87  * @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap
88  * object.
89  *
90  * For single and multiple inputs networks.
91  * This method need to be called to find out input names for using them later
92  * when calling InferenceEngine::InferRequest::SetBlob
93  *
94  * @param inputs Reference to InputsDataMap object.
95  */
96  virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0;
97 
98  /**
99  * @brief Returns information on certain input pointed by inputName
100  *
101  * @param inputName Name of input layer to get info on
102  * @return A smart pointer to the input information
103  */
104  virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0;
105 
106  /**
107  * @deprecated Use ICNNNetwork::getName() instead. The method will be removed in 2021.1
108  * @brief Gets the network name. The name is stored in the given pName string.
109  *
110  * @param pName - will receive actual network name, specified in IR file,
111  * pName should point to valid memory address before invoking this function
112  * @param len - size in bytes of pName buffer, actual name is trimmed by this size
113  */
114  INFERENCE_ENGINE_DEPRECATED("Use ICNNNetwork::getName() instead. The method will be removed in 2021.1")
115  virtual void getName(char* pName, size_t len) const noexcept = 0;
116 
117  /**
118  * @brief Returns the network name.
119  *
120  * @return Network name
121  */
122  virtual const std::string& getName() const noexcept = 0;
123 
124  /**
125  * @brief Returns the number of layers in the network as an integer value
126  *
127  * @return The number of layers as an integer value
128  */
129  virtual size_t layerCount() const noexcept = 0;
130 
131  /**
132  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
133  * @brief Returns a smart pointer reference to a Data node given its name.
134  *
135  * If the Data node is missing, returns reference to a default initialized new empty data pointer with given name.
136  *
137  * @param dname Name of the Data node
138  * @return Data node smart pointer
139  */
140  INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
141  virtual DataPtr& getData(const char* dname) noexcept = 0;
142 
143  /**
144  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
145  * @brief Insert a layer into the network. A user is responsible to connect it to other data elements.
146  *
147  * @param layer Const reference to a layer smart pointer
148  */
149  INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
150  virtual void addLayer(const CNNLayerPtr& layer) noexcept = 0;
151 
152  /**
153  * @brief Adds output to the layer
154  *
155  * @param layerName Name of the layer
156  * @param outputIndex Index of the output
157  * @param resp Response message
158  * @return Status code of the operation
159  */
160  virtual StatusCode addOutput(const std::string& layerName, size_t outputIndex = 0,
161  ResponseDesc* resp = nullptr) noexcept = 0;
162 
163  /**
164  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
165  * @brief Gets network layer with the given name
166  *
167  * @param layerName Given name of the layer
168  * @param out Pointer to the found CNNLayer object with the given name
169  * @param resp Pointer to the response message that holds a description of an error if any occurred
170  * @return Status code of the operation. InferenceEngine::OK if succeeded
171  */
172  INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
173  virtual StatusCode getLayerByName(const char* layerName, CNNLayerPtr& out, ResponseDesc* resp) const noexcept = 0;
174 
175  /**
176  * @brief Changes the inference batch size.
177  *
178  * @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call
179  * ICNNNetwork::reshape.
180  *
181  * @param size Size of batch to set
182  * @param responseDesc Pointer to the response message that holds a description of an error if any occurred
183  * @return Status code of the operation
184  * @note Current implementation of the function sets batch size to the first dimension of all layers in the
185  * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
186  * method works incorrectly. This limitation is resolved via shape inference feature by using
187  * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
188  *
189  * @note Current implementation of the function sets batch size to the first dimension of all layers in the
190  * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
191  * method works incorrectly. This limitation is resolved via shape inference feature by using
192  * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
193  */
194  virtual StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept = 0;
195 
196  /**
197  * @brief Gets the inference batch size
198  *
199  * @return The size of batch as a size_t value
200  */
201  virtual size_t getBatchSize() const noexcept = 0;
202 
203  /**
204  * @brief Map of pairs: name of corresponding data and its dimension.
205  */
206  using InputShapes = std::map<std::string, SizeVector>;
207 
208  /**
209  * @brief Run shape inference with new input shapes for the network
210  *
211  * @param inputShapes - map of pairs: name of corresponding data and its dimension.
212  * @param resp Pointer to the response message that holds a description of an error if any occurred
213  * @return Status code of the operation
214  */
215  virtual StatusCode reshape(const InputShapes& inputShapes, ResponseDesc* resp) noexcept {
216  (void)inputShapes;
217  (void)resp;
218  return NOT_IMPLEMENTED;
219  };
220 
221  /**
222  * @deprecated Use Core::AddExtension to add an extension to the library
223  * @brief Registers extension within the plugin
224  *
225  * @param extension Pointer to already loaded reader extension with shape propagation implementations
226  * @param resp Pointer to the response message that holds a description of an error if any occurred
227  * @return Status code of the operation. InferenceEngine::OK if succeeded
228  */
229  INFERENCE_ENGINE_DEPRECATED("Use Core::AddExtension to add an extension to the library")
230  virtual StatusCode AddExtension(const IShapeInferExtensionPtr& extension, ResponseDesc* resp) noexcept;
231 
232  /**
233  * @deprecated Migrate to IR v10 and use quantization approach with FakeQuantize
234  * @brief Gets the statistics.
235  * @param stats The statistics
236  * @param resp Pointer to the response message that holds a description of an error if any occurred
237  * @return Status code of the operation
238  */
239  IE_SUPPRESS_DEPRECATED_START
240  INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and use quantization approach with FakeQuantize")
241  virtual StatusCode getStats(ICNNNetworkStats** stats, ResponseDesc* resp) const noexcept {
242  (void)stats;
243  (void)resp;
244  return NOT_IMPLEMENTED;
245  };
246  IE_SUPPRESS_DEPRECATED_END
247 
248  /**
249  * @brief Serialize network to IR and weights files.
250  *
251  * @param xmlPath Path to output IR file.
252  * @param binPath Path to output weights file.
253  * @param resp Pointer to the response message that holds a description of an error if any occurred
254  * @return Status code of the operation
255  */
256  virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
257  noexcept = 0;
258 
259  /**
260  * @brief A virtual destructor.
261  */
262  virtual ~ICNNNetwork();
263 };
264 } // namespace InferenceEngine
std::shared_ptr< IShapeInferExtension > IShapeInferExtensionPtr
A shared pointer to a IShapeInferExtension interface.
Definition: ie_iextension.h:366
std::vector< size_t > SizeVector
Represents tensor size.
Definition: ie_common.h:29
Definition: cldnn_config.hpp:16
std::shared_ptr< CNNLayer > CNNLayerPtr
A smart pointer to the CNNLayer.
Definition: ie_common.h:39
This class contains information about each input of the network.
Definition: ie_input_info.hpp:27
StatusCode
This enum contains codes for all possible return values of the interface functions.
Definition: ie_common.h:224
A header file for Blob and generic TBlob<>
std::map< std::string, InputInfo::Ptr > InputsDataMap
A collection that contains string as key, and InputInfo smart pointer as value.
Definition: ie_input_info.hpp:160
This header file provides structures to store info about pre-processing of network inputs (scale...
std::shared_ptr< ICNNNetwork > Ptr
A shared pointer to a ICNNNetwork interface.
Definition: ie_icnn_network.hpp:48
Represents detailed information for an error.
Definition: ie_common.h:247
a header file for internal Layers structure to describe layers information
This is the main interface to describe the NN topology.
Definition: ie_icnn_network.hpp:43
Definition: ie_cnn_network.h:27
This header file defines the main Data representation node.
This is a header file for the ICNNNetworkStats class.
This is a header file for Inference Engine Extension Interface.
A header file for the Inference Engine plugins destruction mechanism.
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:53
This is the interface to describe the NN topology scoring statistics.
Definition: ie_icnn_network_stats.hpp:44
std::map< std::string, DataPtr > OutputsDataMap
A collection that contains string as key, and Data smart pointer as value.
Definition: ie_icnn_network.hpp:37
std::map< std::string, SizeVector > InputShapes
Map of pairs: name of corresponding data and its dimension.
Definition: ie_icnn_network.hpp:206
a header file for InputInfo class
This is a header file with common inference engine definitions.
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:22