ie_icnn_network.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2021 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief This is a header file for the ICNNNetwork class
7  *
8  * @file ie_icnn_network.hpp
9  */
10 #pragma once
11 
12 #include <map>
13 #include <memory>
14 #include <string>
15 
16 #include "ie_blob.h"
17 #include "ie_common.h"
18 #include "ie_data.h"
19 #include "ie_input_info.hpp"
20 #include "details/ie_irelease.hpp"
21 
22 #if defined IMPLEMENT_INFERENCE_ENGINE_API || defined IMPLEMENT_INFERENCE_ENGINE_PLUGIN || 1
23 # define INFERENCE_ENGINE_ICNNNETWORK_CLASS(...) INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
24 #else
25 # define INFERENCE_ENGINE_ICNNNETWORK_CLASS(...) \
26  INFERENCE_ENGINE_INTERNAL("Use InferenceEngine::CNNNetwork wrapper instead") \
27  INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
28 #endif
29 
30 namespace ngraph {
31 
32 class Function;
33 
34 } // namespace ngraph
35 
36 namespace InferenceEngine {
37 
38 /**
39  * @brief A collection that contains string as key, and Data smart pointer as value
40  */
41 using OutputsDataMap = std::map<std::string, DataPtr>;
42 
43 /**
44  * @deprecated Use InferenceEngine::CNNNetwork wrapper instead
45  * @interface ICNNNetwork
46  * @brief This is the main interface to describe the NN topology
47  */
48 class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease {
49 public:
50  /**
51  * @brief A shared pointer to a ICNNNetwork interface
52  */
53  using Ptr = std::shared_ptr<ICNNNetwork>;
54 
55  /**
56  * @brief Returns nGraph function
57  * @return nGraph function
58  */
59  virtual std::shared_ptr<ngraph::Function> getFunction() noexcept = 0;
60 
61  /**
62  * @brief Returns constant nGraph function
63  * @return constant nGraph function
64  */
65  virtual std::shared_ptr<const ngraph::Function> getFunction() const noexcept = 0;
66 
67  /**
68  * @brief Gets the network output Data node information. The received info is stored in the given Data node.
69  *
70  * For single and multiple outputs networks.
71  *
72  * This method need to be called to find out OpenVINO output names for using them later
73  * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob
74  *
75  * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor
76  * method to map framework names to OpenVINO names
77  *
78  * @param out Reference to the OutputsDataMap object
79  */
80  virtual void getOutputsInfo(OutputsDataMap& out) const noexcept = 0;
81 
82  /**
83  * @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap
84  * object.
85  *
86  * For single and multiple inputs networks.
87  * This method need to be called to find out OpenVINO input names for using them later
88  * when calling InferenceEngine::InferRequest::SetBlob
89  *
90  * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor
91  * method to map framework names to OpenVINO names
92  *
93  * @param inputs Reference to InputsDataMap object.
94  */
95  virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0;
96 
97  /**
98  * @brief Returns information on certain input pointed by inputName
99  *
100  * @param inputName Name of input layer to get info on
101  * @return A smart pointer to the input information
102  */
103  virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0;
104 
105  /**
106  * @brief Returns the network name.
107  *
108  * @return Network name
109  */
110  virtual const std::string& getName() const noexcept = 0;
111 
112  /**
113  * @brief Returns the number of layers in the network as an integer value
114  *
115  * @return The number of layers as an integer value
116  */
117  virtual size_t layerCount() const noexcept = 0;
118 
119  /**
120  * @brief Adds output to the layer
121  *
122  * @param layerName Name of the layer
123  * @param outputIndex Index of the output
124  * @param resp Response message
125  * @return Status code of the operation
126  */
127  virtual StatusCode addOutput(const std::string& layerName, size_t outputIndex = 0,
128  ResponseDesc* resp = nullptr) noexcept = 0;
129 
130  /**
131  * @brief Changes the inference batch size.
132  *
133  * @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call
134  * ICNNNetwork::reshape.
135  *
136  * @param size Size of batch to set
137  * @param responseDesc Pointer to the response message that holds a description of an error if any occurred
138  * @return Status code of the operation
139  * @note Current implementation of the function sets batch size to the first dimension of all layers in the
140  * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
141  * method works incorrectly. This limitation is resolved via shape inference feature by using
142  * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
143  *
144  * @note Current implementation of the function sets batch size to the first dimension of all layers in the
145  * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
146  * method works incorrectly. This limitation is resolved via shape inference feature by using
147  * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
148  */
149  virtual StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept = 0;
150 
151  /**
152  * @brief Gets the inference batch size
153  *
154  * @return The size of batch as a size_t value
155  */
156  virtual size_t getBatchSize() const noexcept = 0;
157 
158  /**
159  * @brief Map of pairs: name of corresponding data and its dimension.
160  */
161  using InputShapes = std::map<std::string, SizeVector>;
162 
163  /**
164  * @brief Run shape inference with new input shapes for the network
165  *
166  * @param inputShapes - map of pairs: name of corresponding data and its dimension.
167  * @param resp Pointer to the response message that holds a description of an error if any occurred
168  * @return Status code of the operation
169  */
170  virtual StatusCode reshape(const InputShapes& inputShapes, ResponseDesc* resp) noexcept {
171  (void)inputShapes;
172  (void)resp;
173  return NOT_IMPLEMENTED;
174  };
175 
176  /**
177  * @brief Serialize network to IR and weights files.
178  *
179  * @param xmlPath Path to output IR file.
180  * @param binPath Path to output weights file.
181  * @param resp Pointer to the response message that holds a description of an error if any occurred
182  * @return Status code of the operation
183  */
184  virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
185  noexcept = 0;
186 
187  /**
188  * @brief Methods maps framework tensor name to OpenVINO name
189  *
190  * @param ov_name OpenVINO name
191  * @param orig_name Framework tensor name
192  * @param resp Pointer to the response message that holds a description of an error if any occurred
193  *
194  * @return Status code of the operation
195  */
196  virtual StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
197  (void) ov_name;
198  (void) orig_name;
199  (void) resp;
200  return NOT_IMPLEMENTED;
201  }
202 
203  /**
204  * @brief A virtual destructor.
205  */
206  virtual ~ICNNNetwork();
207 };
208 } // namespace InferenceEngine
std::shared_ptr< ICNNNetwork > Ptr
A shared pointer to a ICNNNetwork interface.
Definition: ie_icnn_network.hpp:53
virtual ~ICNNNetwork()
A virtual destructor.
std::map< std::string, SizeVector > InputShapes
Map of pairs: name of corresponding data and its dimension.
Definition: ie_icnn_network.hpp:161
virtual StatusCode getOVNameForTensor(std::string &ov_name, const std::string &orig_name, ResponseDesc *resp) const noexcept
Methods maps framework tensor name to OpenVINO name.
Definition: ie_icnn_network.hpp:196
virtual StatusCode serialize(const std::string &xmlPath, const std::string &binPath, ResponseDesc *resp) const noexcept=0
Serialize network to IR and weights files.
virtual std::shared_ptr< ngraph::Function > getFunction() noexcept=0
Returns nGraph function.
This class contains information about each input of the network.
Definition: ie_input_info.hpp:27
A header file for Blob and generic TBlob<>
This is a header file with common inference engine definitions.
This header file defines the main Data representation node.
a header file for InputInfo class
A header file for the Inference Engine plugins destruction mechanism.
Inference Engine C++ API.
Definition: cldnn_config.hpp:15
std::map< std::string, InputInfo::Ptr > InputsDataMap
A collection that contains string as key, and InputInfo smart pointer as value.
Definition: ie_input_info.hpp:165
StatusCode
This enum contains codes for all possible return values of the interface functions.
Definition: ie_common.h:224
std::map< std::string, DataPtr > OutputsDataMap
A collection that contains string as key, and Data smart pointer as value.
Definition: ie_icnn_network.hpp:41
std::vector< size_t > SizeVector
Represents tensor size.
Definition: ie_common.h:27
Represents detailed information for an error.
Definition: ie_common.h:248