ie_icnn_network.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief This is a header file for the ICNNNetwork class
7  *
8  * @file ie_icnn_network.hpp
9  */
10 #pragma once
11 
12 #include <map>
13 #include <memory>
14 #include <string>
15 
16 #include "details/ie_irelease.hpp"
17 #include "ie_blob.h"
18 #include "ie_common.h"
19 #include "ie_data.h"
21 #include "ie_iextension.h"
22 #include "ie_input_info.hpp"
23 #include "ie_layers.h"
24 #include "ie_preprocess.hpp"
25 
26 namespace ngraph {
27 
28 class Function;
29 
30 } // namespace ngraph
31 
32 namespace InferenceEngine {
33 
34 /**
35  * @brief A collection that contains string as key, and Data smart pointer as value
36  */
37 using OutputsDataMap = std::map<std::string, DataPtr>;
38 
39 /**
40  * @interface ICNNNetwork
41  * @brief This is the main interface to describe the NN topology
42  */
43 class INFERENCE_ENGINE_API_CLASS(ICNNNetwork): public details::IRelease {
44 public:
45  /**
46  * @brief A shared pointer to a ICNNNetwork interface
47  */
48  using Ptr = std::shared_ptr<ICNNNetwork>;
49 
50  /**
51  * @brief Returns constant nGraph function
52  * @return constant nGraph function
53  */
54  virtual std::shared_ptr<const ngraph::Function> getFunction() const noexcept = 0;
55 
56  /**
57  * @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3
58  * @brief Returns the main network operating precision.
59  *
60  * This may be MIXED if not homogeneous.
61  *
62  * @return A precision type
63  */
64  INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3")
65  virtual Precision getPrecision() const noexcept = 0;
66 
67  /**
68  * @brief Gets the network output Data node information. The received info is stored in the given Data node.
69  *
70  * For single and multiple outputs networks.
71  *
72  * @param out Reference to the OutputsDataMap object
73  */
74  virtual void getOutputsInfo(OutputsDataMap& out) const noexcept = 0;
75 
76  /**
77  * @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap
78  * object.
79  *
80  * For single and multiple inputs networks.
81  * This method must be called to find out input names for using them later during filling of a map
82  * of blobs passed later to InferenceEngine::IInferencePlugin::Infer()
83  *
84  * @param inputs Reference to InputsDataMap object.
85  */
86  virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0;
87 
88  /**
89  * @brief Returns information on certain input pointed by inputName
90  *
91  * @param inputName Name of input layer to get info on
92  * @return A smart pointer to the input information
93  */
94  virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0;
95 
96  /**
97  * @deprecated Use ICNNNetwork::getName() instead. The method will be removed in 2020.3
98  * @brief Gets the network name. The name is stored in the given pName string.
99  *
100  * @param pName - will receive actual network name, specified in IR file,
101  * pName should point to valid memory address before invoking this function
102  * @param len - size in bytes of pName buffer, actual name is trimmed by this size
103  */
104  INFERENCE_ENGINE_DEPRECATED("Use ICNNNetwork::getName() instead. The method will be removed in 2020.3")
105  virtual void getName(char* pName, size_t len) const noexcept = 0;
106 
107  /**
108  * @brief Returns the network name.
109  *
110  * @return Network name
111  */
112  virtual const std::string& getName() const noexcept = 0;
113 
114  /**
115  * @brief Returns the number of layers in the network as an integer value
116  *
117  * @return The number of layers as an integer value
118  */
119  virtual size_t layerCount() const noexcept = 0;
120 
121  /**
122  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
123  * @brief Returns a smart pointer reference to a Data node given its name.
124  *
125  * If the Data node is missing, returns reference to a default initialized new empty data pointer with given name.
126  *
127  * @param dname Name of the Data node
128  * @return Data node smart pointer
129  */
130  INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
131  virtual DataPtr& getData(const char* dname) noexcept = 0;
132 
133  /**
134  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
135  * @brief Insert a layer into the network. A user is responsible to connect it to other data elements.
136  *
137  * @param layer Const reference to a layer smart pointer
138  */
139  INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
140  virtual void addLayer(const CNNLayerPtr& layer) noexcept = 0;
141 
142  /**
143  * @brief Adds output to the layer
144  *
145  * @param layerName Name of the layer
146  * @param outputIndex Index of the output
147  * @param resp Response message
148  * @return Status code of the operation
149  */
150  virtual StatusCode addOutput(const std::string& layerName, size_t outputIndex = 0,
151  ResponseDesc* resp = nullptr) noexcept = 0;
152 
153  /**
154  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
155  * @brief Gets network layer with the given name
156  *
157  * @param layerName Given name of the layer
158  * @param out Pointer to the found CNNLayer object with the given name
159  * @param resp Pointer to the response message that holds a description of an error if any occurred
160  * @return Status code of the operation. InferenceEngine::OK if succeeded
161  */
162  INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
163  virtual StatusCode getLayerByName(const char* layerName, CNNLayerPtr& out, ResponseDesc* resp) const noexcept = 0;
164 
165  /**
166  * @brief Changes the inference batch size.
167  *
168  * @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call
169  * ICNNNetwork::reshape.
170  *
171  * @param size Size of batch to set
172  * @param responseDesc Pointer to the response message that holds a description of an error if any occurred
173  * @return Status code of the operation
174  * @note Current implementation of the function sets batch size to the first dimension of all layers in the
175  * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
176  * method works incorrectly. This limitation is resolved via shape inference feature by using
177  * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
178  *
179  * @note Current implementation of the function sets batch size to the first dimension of all layers in the
180  * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
181  * method works incorrectly. This limitation is resolved via shape inference feature by using
182  * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
183  */
184  virtual StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept = 0;
185 
186  /**
187  * @brief Gets the inference batch size
188  *
189  * @return The size of batch as a size_t value
190  */
191  virtual size_t getBatchSize() const noexcept = 0;
192 
193  /**
194  * @brief Map of pairs: name of corresponding data and its dimension.
195  */
196  using InputShapes = std::map<std::string, SizeVector>;
197 
198  /**
199  * @brief Run shape inference with new input shapes for the network
200  *
201  * @param inputShapes - map of pairs: name of corresponding data and its dimension.
202  * @param resp Pointer to the response message that holds a description of an error if any occurred
203  * @return Status code of the operation
204  */
205  virtual StatusCode reshape(const InputShapes& inputShapes, ResponseDesc* resp) noexcept {
206  (void)inputShapes;
207  (void)resp;
208  return NOT_IMPLEMENTED;
209  };
210 
211  /**
212  * @deprecated Use Core::AddExtension to add an extension to the library
213  * @brief Registers extension within the plugin
214  *
215  * @param extension Pointer to already loaded reader extension with shape propagation implementations
216  * @param resp Pointer to the response message that holds a description of an error if any occurred
217  * @return Status code of the operation. InferenceEngine::OK if succeeded
218  */
219  INFERENCE_ENGINE_DEPRECATED("Use Core::AddExtension to add an extension to the library")
220  virtual StatusCode AddExtension(const IShapeInferExtensionPtr& extension, ResponseDesc* resp) noexcept;
221 
222  /**
223  * @deprecated Migrate to IR v10 and use quantization approach with FakeQuantize
224  * @brief Gets the statistics.
225  * @param stats The statistics
226  * @param resp Pointer to the response message that holds a description of an error if any occurred
227  * @return Status code of the operation
228  */
229  IE_SUPPRESS_DEPRECATED_START
230  INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and use quantization approach with FakeQuantize")
231  virtual StatusCode getStats(ICNNNetworkStats** stats, ResponseDesc* resp) const noexcept {
232  (void)stats;
233  (void)resp;
234  return NOT_IMPLEMENTED;
235  };
236  IE_SUPPRESS_DEPRECATED_END
237 
238  /**
239  * @brief Serialize network to IR and weights files.
240  *
241  * @param xmlPath Path to output IR file.
242  * @param binPath Path to output weights file.
243  * @param resp Pointer to the response message that holds a description of an error if any occurred
244  * @return Status code of the operation
245  */
246  virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
247  noexcept = 0;
248 
249  /**
250  * @brief A virtual destructor.
251  */
252  virtual ~ICNNNetwork();
253 };
254 } // namespace InferenceEngine
Inference Engine API.
Definition: ie_argmax_layer.hpp:15
This class contains information about each input of the network.
Definition: ie_input_info.hpp:27
std::shared_ptr< IShapeInferExtension > IShapeInferExtensionPtr
A shared pointer to a IShapeInferExtension interface.
Definition: ie_iextension.h:350
A header file for Blob and generic TBlob<>
This header file provides structures to store info about pre-processing of network inputs (scale...
std::shared_ptr< ICNNNetwork > Ptr
A shared pointer to a ICNNNetwork interface.
Definition: ie_icnn_network.hpp:48
Represents detailed information for an error.
Definition: ie_common.h:247
StatusCode
This enum contains codes for all possible return values of the interface functions.
Definition: ie_common.h:224
a header file for internal Layers structure to describe layers information
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:53
This is the main interface to describe the NN topology.
Definition: ie_icnn_network.hpp:43
Definition: ie_cnn_network.h:27
This header file defines the main Data representation node.
This is a header file for the ICNNNetworkStats class.
std::shared_ptr< CNNLayer > CNNLayerPtr
A smart pointer to the CNNLayer.
Definition: ie_common.h:39
std::map< std::string, DataPtr > OutputsDataMap
A collection that contains string as key, and Data smart pointer as value.
Definition: ie_icnn_network.hpp:37
This is a header file for Inference Engine Extension Interface.
A header file for the Inference Engine plugins destruction mechanism.
This is the interface to describe the NN topology scoring statistics.
Definition: ie_icnn_network_stats.hpp:44
Precision precision
Layer precision.
Definition: ie_layers.h:54
std::vector< size_t > SizeVector
Represents tensor size.
Definition: ie_common.h:29
std::map< std::string, SizeVector > InputShapes
Map of pairs: name of corresponding data and its dimension.
Definition: ie_icnn_network.hpp:196
a header file for InputInfo class
std::map< std::string, InputInfo::Ptr > InputsDataMap
A collection that contains string as key, and InputInfo smart pointer as value.
Definition: ie_input_info.hpp:160
This is a header file with common inference engine definitions.
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:22