ie_icnn_network.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief This is a header file for the ICNNNetwork class
7  *
8  * @file ie_icnn_network.hpp
9  */
10 #pragma once
11 
12 #include <map>
13 #include <memory>
14 #include <string>
15 
16 #include "ie_blob.h"
17 #include "ie_common.h"
18 #include "ie_data.h"
19 #include "ie_input_info.hpp"
20 #include "details/ie_irelease.hpp"
21 
22 namespace ngraph {
23 
24 class Function;
25 
26 } // namespace ngraph
27 
28 namespace InferenceEngine {
29 
30 /**
31  * @brief A collection that contains string as key, and Data smart pointer as value
32  */
33 using OutputsDataMap = std::map<std::string, DataPtr>;
34 
35 /**
36  * @interface ICNNNetwork
37  * @brief This is the main interface to describe the NN topology
38  */
39 class INFERENCE_ENGINE_API_CLASS(ICNNNetwork): public details::IRelease {
40 public:
41  /**
42  * @brief A shared pointer to a ICNNNetwork interface
43  */
44  using Ptr = std::shared_ptr<ICNNNetwork>;
45 
46  /**
47  * @brief Returns nGraph function
48  * @return nGraph function
49  */
50  virtual std::shared_ptr<ngraph::Function> getFunction() noexcept = 0;
51 
52  /**
53  * @brief Returns constant nGraph function
54  * @return constant nGraph function
55  */
56  virtual std::shared_ptr<const ngraph::Function> getFunction() const noexcept = 0;
57 
58  /**
59  * @brief Gets the network output Data node information. The received info is stored in the given Data node.
60  *
61  * For single and multiple outputs networks.
62  *
63  * This method need to be called to find output names for using them later
64  * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob
65  *
66  *
67  * @param out Reference to the OutputsDataMap object
68  */
69  virtual void getOutputsInfo(OutputsDataMap& out) const noexcept = 0;
70 
71  /**
72  * @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap
73  * object.
74  *
75  * For single and multiple inputs networks.
76  * This method need to be called to find out input names for using them later
77  * when calling InferenceEngine::InferRequest::SetBlob
78  *
79  * @param inputs Reference to InputsDataMap object.
80  */
81  virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0;
82 
83  /**
84  * @brief Returns information on certain input pointed by inputName
85  *
86  * @param inputName Name of input layer to get info on
87  * @return A smart pointer to the input information
88  */
89  virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0;
90 
91  /**
92  * @brief Returns the network name.
93  *
94  * @return Network name
95  */
96  virtual const std::string& getName() const noexcept = 0;
97 
98  /**
99  * @brief Returns the number of layers in the network as an integer value
100  *
101  * @return The number of layers as an integer value
102  */
103  virtual size_t layerCount() const noexcept = 0;
104 
105  /**
106  * @brief Adds output to the layer
107  *
108  * @param layerName Name of the layer
109  * @param outputIndex Index of the output
110  * @param resp Response message
111  * @return Status code of the operation
112  */
113  virtual StatusCode addOutput(const std::string& layerName, size_t outputIndex = 0,
114  ResponseDesc* resp = nullptr) noexcept = 0;
115 
116  /**
117  * @brief Changes the inference batch size.
118  *
119  * @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call
120  * ICNNNetwork::reshape.
121  *
122  * @param size Size of batch to set
123  * @param responseDesc Pointer to the response message that holds a description of an error if any occurred
124  * @return Status code of the operation
125  * @note Current implementation of the function sets batch size to the first dimension of all layers in the
126  * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
127  * method works incorrectly. This limitation is resolved via shape inference feature by using
128  * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
129  *
130  * @note Current implementation of the function sets batch size to the first dimension of all layers in the
131  * networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
132  * method works incorrectly. This limitation is resolved via shape inference feature by using
133  * InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
134  */
135  virtual StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept = 0;
136 
137  /**
138  * @brief Gets the inference batch size
139  *
140  * @return The size of batch as a size_t value
141  */
142  virtual size_t getBatchSize() const noexcept = 0;
143 
144  /**
145  * @brief Map of pairs: name of corresponding data and its dimension.
146  */
147  using InputShapes = std::map<std::string, SizeVector>;
148 
149  /**
150  * @brief Run shape inference with new input shapes for the network
151  *
152  * @param inputShapes - map of pairs: name of corresponding data and its dimension.
153  * @param resp Pointer to the response message that holds a description of an error if any occurred
154  * @return Status code of the operation
155  */
156  virtual StatusCode reshape(const InputShapes& inputShapes, ResponseDesc* resp) noexcept {
157  (void)inputShapes;
158  (void)resp;
159  return NOT_IMPLEMENTED;
160  };
161 
162  /**
163  * @brief Serialize network to IR and weights files.
164  *
165  * @param xmlPath Path to output IR file.
166  * @param binPath Path to output weights file.
167  * @param resp Pointer to the response message that holds a description of an error if any occurred
168  * @return Status code of the operation
169  */
170  virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
171  noexcept = 0;
172 
173  /**
174  * @brief A virtual destructor.
175  */
176  virtual ~ICNNNetwork();
177 };
178 } // namespace InferenceEngine
std::shared_ptr< ICNNNetwork > Ptr
A shared pointer to a ICNNNetwork interface.
Definition: ie_icnn_network.hpp:44
virtual ~ICNNNetwork()
A virtual destructor.
std::map< std::string, SizeVector > InputShapes
Map of pairs: name of corresponding data and its dimension.
Definition: ie_icnn_network.hpp:147
virtual StatusCode serialize(const std::string &xmlPath, const std::string &binPath, ResponseDesc *resp) const noexcept=0
Serialize network to IR and weights files.
virtual std::shared_ptr< ngraph::Function > getFunction() noexcept=0
Returns nGraph function.
This class contains information about each input of the network.
Definition: ie_input_info.hpp:27
A header file for Blob and generic TBlob<>
This is a header file with common inference engine definitions.
StatusCode
This enum contains codes for all possible return values of the interface functions.
Definition: ie_common.h:222
std::vector< size_t > SizeVector
Represents tensor size.
Definition: ie_common.h:27
This header file defines the main Data representation node.
std::map< std::string, DataPtr > OutputsDataMap
A collection that contains string as key, and Data smart pointer as value.
Definition: ie_icnn_network.hpp:33
a header file for InputInfo class
std::map< std::string, InputInfo::Ptr > InputsDataMap
A collection that contains string as key, and InputInfo smart pointer as value.
Definition: ie_input_info.hpp:161
A header file for the Inference Engine plugins destruction mechanism.
Represents detailed information for an error.
Definition: ie_common.h:245