ie_icnn_network.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief This is a header file for the ICNNNetwork class
7  * @file ie_icnn_network.hpp
8  */
9 #pragma once
10 
11 #include <map>
12 #include <memory>
13 #include <string>
14 
15 #include "details/ie_irelease.hpp"
16 #include "ie_blob.h"
17 #include "ie_common.h"
18 #include "ie_data.h"
19 #include "ie_icnn_network_stats.hpp"
20 #include "ie_iextension.h"
21 #include "ie_input_info.hpp"
22 #include "ie_layers.h"
23 #include "ie_preprocess.hpp"
24 
25 namespace ngraph {
26 
27 class Function;
28 
29 } // namespace ngraph
30 
31 namespace InferenceEngine {
32 
33 /**
34  * @brief A collection that contains string as key, and Data smart pointer as value
35  */
36 using OutputsDataMap = std::map<std::string, DataPtr>;
37 
38 /**
39  * @brief This is the main interface to describe the NN topology
40  */
41 class INFERENCE_ENGINE_API_CLASS(ICNNNetwork): public details::IRelease {
42 public:
43  using Ptr = std::shared_ptr<ICNNNetwork>;
44 
45  /**
46  * @brief Returns constant nGraph function
47  * @return constant nGraph function
48  */
49  virtual const std::shared_ptr<ngraph::Function> getFunction() const noexcept = 0;
50 
51  /**
52  * @brief Returns the main network operating precision.
53  * This may be MIXED if not homogeneous.
54  * @return A precision type
55  */
56  virtual Precision getPrecision() const noexcept = 0;
57 
58  /**
59  * @brief Gets the network output Data node information. The received info is stored in the given Data node.
60  * For single and multiple outputs networks.
61  * @param out Reference to the OutputsDataMap object
62  */
63  virtual void getOutputsInfo(OutputsDataMap& out) const noexcept = 0;
64 
65  /**
66  * @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap
67  * object. For single and multiple inputs networks. This method must be called to find out input names for using
68  * them later during filling of a map of blobs passed later to InferenceEngine::IInferencePlugin::Infer()
69  * @param inputs Reference to InputsDataMap object.
70  */
71  virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0;
72 
73  /**
74  * @brief Returns information on certain input pointed by inputName
75  * @param inputName Name of input layer to get info on
76  * @return A smart pointer to the input information
77  */
78  virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0;
79 
80  /**
81  * @brief Gets the network name. The name is stored in the given pName string.
82  * @param pName - will receive actual network name, specified in IR file,
83  * pName should point to valid memory address before invoking this function
84  * @param len - size in bytes of pName buffer, actual name is trimmed by this size
85  */
86  virtual void getName(char* pName, size_t len) const noexcept = 0;
87 
88  /**
89  * @brief Returns the network name.
90  * @return Network name
91  */
92  virtual const std::string& getName() const noexcept = 0;
93 
94  /**
95  * @brief Returns the number of layers in the network as an integer value
96  * @return The number of layers as an integer value
97  */
98  virtual size_t layerCount() const noexcept = 0;
99 
100  /**
101  * @brief Returns a smart pointer reference to a Data node given its name.
102  * If the Data node is missing, returns reference to a default initialized new empty data pointer with given name.
103  * @param dname Name of the Data node
104  * @return Data node smart pointer
105  */
106  virtual DataPtr& getData(const char* dname) noexcept = 0;
107 
108  /**
109  * @brief Insert a layer into the network. A user is responsible to connect it to other data elements.
110  * @param layer Const reference to a layer smart pointer
111  */
112  virtual void addLayer(const CNNLayerPtr& layer) noexcept = 0;
113 
114  /**
115  * @brief Adds output to the layer
116  * @param layerName Name of the layer
117  * @param outputIndex Index of the output
118  * @param resp Response message
119  * @return Status code of the operation
120  */
121  virtual StatusCode addOutput(const std::string& layerName, size_t outputIndex = 0,
122  ResponseDesc* resp = nullptr) noexcept = 0;
123 
124  /**
125  * @brief Gets network layer with the given name
126  * @param layerName Given name of the layer
127  * @param out Pointer to the found CNNLayer object with the given name
128  * @param resp Pointer to the response message that holds a description of an error if any occurred
129  * @return Status code of the operation. OK if succeeded
130  */
131  virtual StatusCode getLayerByName(const char* layerName, CNNLayerPtr& out, ResponseDesc* resp) const noexcept = 0;
132 
133  /**
134  * @brief Changes the inference batch size.
135  * @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call
136  * ICNNNetwork::reshape.
137  * @param size Size of batch to set
138  * @return Status code of the operation
139  * @note: Current implementation of the function sets batch size to the first dimension of all layers in the
140  * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
141  * method works incorrectly. This limitation is resolved via shape inference feature by using
142  * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
143  */
144  virtual StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept = 0;
145 
146  /**
147  * @brief Gets the inference batch size
148  * @return The size of batch as a size_t value
149  */
150  virtual size_t getBatchSize() const noexcept = 0;
151 
152  /**
153  * @brief Map of pairs: name of corresponding data and its dimension.
154  */
155  using InputShapes = std::map<std::string, SizeVector>;
156 
157  /**
158  * @brief Run shape inference with new input shapes for the network
159  * @param inputShapes - map of pairs: name of corresponding data and its dimension.
160  * @param resp Pointer to the response message that holds a description of an error if any occurred
161  * @return Status code of the operation
162  */
163  virtual StatusCode reshape(const InputShapes& /*inputShapes*/, ResponseDesc* /*resp*/) noexcept {
164  return NOT_IMPLEMENTED;
165  };
166 
167  /**
168  * @brief Registers extension within the plugin
169  * @param extension Pointer to already loaded reader extension with shape propagation implementations
170  * @param resp Pointer to the response message that holds a description of an error if any occurred
171  * @return Status code of the operation. OK if succeeded
172  */
173  virtual StatusCode AddExtension(const IShapeInferExtensionPtr& /*extension*/, ResponseDesc* /*resp*/) noexcept {
174  return NOT_IMPLEMENTED;
175  };
176 
177  virtual StatusCode getStats(ICNNNetworkStats** /*stats*/, ResponseDesc* /*resp*/) const noexcept {
178  return NOT_IMPLEMENTED;
179  };
180 
181  /**
182  * @brief Serialize network to IR and weights files.
183  * @param xmlPath Path to output IR file.
184  * @param binPath Path to output weights file.
185  * @return Status code of the operation
186  */
187  virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
188  noexcept = 0;
189 
190  virtual ~ICNNNetwork();
191 };
192 } // namespace InferenceEngine
Inference Engine API.
Definition: ie_argmax_layer.hpp:11
virtual StatusCode reshape(const InputShapes &, ResponseDesc *) noexcept
Run shape inference with new input shapes for the network.
Definition: ie_icnn_network.hpp:163
A header file for Blob and generic TBlob<>
This header file provides structures to store info about pre-processing of network inputs (scale...
Represents detailed information for an error.
Definition: ie_common.h:235
StatusCode
This enum contains codes for all possible return values of the interface functions.
Definition: ie_common.h:212
a header file for internal Layers structure to describe layers information
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:51
This is the main interface to describe the NN topology.
Definition: ie_icnn_network.hpp:41
Definition: ie_cnn_network.h:26
This header file defines the main Data representation node.
This is a header file for the ICNNNetworkStats class.
std::shared_ptr< CNNLayer > CNNLayerPtr
A smart pointer to the CNNLayer.
Definition: ie_common.h:37
std::map< std::string, DataPtr > OutputsDataMap
A collection that contains string as key, and Data smart pointer as value.
Definition: ie_icnn_network.hpp:36
This is a header file for Inference Engine Extension Interface.
std::shared_ptr< InputInfo > Ptr
A smart pointer to the InputInfo instance.
Definition: ie_input_info.hpp:29
A header file for the Inference Engine plugins destruction mechanism.
This is the interface to describe the NN topology scoring statistics.
Definition: ie_icnn_network_stats.hpp:39
virtual StatusCode AddExtension(const IShapeInferExtensionPtr &, ResponseDesc *) noexcept
Registers extension within the plugin.
Definition: ie_icnn_network.hpp:173
std::map< std::string, SizeVector > InputShapes
Map of pairs: name of corresponding data and its dimension.
Definition: ie_icnn_network.hpp:155
a header file for InputInfo class
std::map< std::string, InputInfo::Ptr > InputsDataMap
A collection that contains string as key, and InputInfo smart pointer as value.
Definition: ie_input_info.hpp:156
This is a header file with common inference engine definitions.
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:21