ie_infer_request.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief A header file that provides wrapper classes for infer requests and callbacks.
7  *
8  * @file ie_infer_request.hpp
9  */
10 #pragma once
11 
12 #include <map>
13 #include <memory>
14 #include <string>
15 
17 #include "ie_iinfer_request.hpp"
18 #include "ie_plugin_ptr.hpp"
19 
20 namespace InferenceEngine {
21 
22 namespace details {
23 
24 class ICompletionCallbackWrapper {
25 public:
26  virtual ~ICompletionCallbackWrapper() = default;
27 
28  virtual void call(InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode code) const noexcept = 0;
29 };
30 
31 template <class T>
32 class CompletionCallbackWrapper : public ICompletionCallbackWrapper {
33  T lambda;
34 
35 public:
36  explicit CompletionCallbackWrapper(const T& lambda): lambda(lambda) {}
37 
38  void call(InferenceEngine::IInferRequest::Ptr /*request*/, InferenceEngine::StatusCode /*code*/) const
39  noexcept override {
40  lambda();
41  }
42 };
43 
44 template <>
45 class CompletionCallbackWrapper<IInferRequest::CompletionCallback> : public ICompletionCallbackWrapper {
47 
48 public:
49  explicit CompletionCallbackWrapper(const IInferRequest::CompletionCallback& callBack): callBack(callBack) {}
50 
51  void call(InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode code) const noexcept override {
52  callBack(request, code);
53  }
54 };
55 
56 } // namespace details
57 
58 /**
59  * @copybrief IInferRequest
60  *
61  * Wraps IInferRequest
62  * It can throw exceptions safely for the application, where it is properly handled.
63  */
64 class InferRequest {
65  IInferRequest::Ptr actual;
67  std::shared_ptr<details::ICompletionCallbackWrapper> callback;
68 
69  static void callWrapper(InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode code) {
70  details::ICompletionCallbackWrapper* pWrapper = nullptr;
71  ResponseDesc dsc;
72  request->GetUserData(reinterpret_cast<void**>(&pWrapper), &dsc);
73  pWrapper->call(request, code);
74  }
75 
76 public:
77  /**
78  * @brief Default constructor
79  */
80  InferRequest() = default;
81 
82  /**
83  * @brief Destructor
84  */
86  actual = nullptr;
87  }
88 
89  /**
90  * @brief Sets input/output data to infer
91  *
92  * @note: Memory allocation does not happen
93  * @param name Name of input or output blob.
94  * @param data Reference to input or output blob. The type of a blob must match the network input precision and
95  * size.
96  */
97  void SetBlob(const std::string& name, const Blob::Ptr& data) {
98  CALL_STATUS_FNC(SetBlob, name.c_str(), data);
99  }
100 
101  /**
102  * @copybrief IInferRequest::GetBlob
103  *
104  * Wraps IInferRequest::GetBlob
105  */
106  Blob::Ptr GetBlob(const std::string& name) {
107  Blob::Ptr data;
108  CALL_STATUS_FNC(GetBlob, name.c_str(), data);
109  std::string error = "Internal error: blob with name `" + name + "` is not allocated!";
110  auto blobPtr = data.get();
111  if (blobPtr == nullptr) THROW_IE_EXCEPTION << error;
112  if (blobPtr->buffer() == nullptr) THROW_IE_EXCEPTION << error;
113  return data;
114  }
115 
116  /**
117  * @brief Sets pre-process for input data
118  * @note: Will return an error in case if data blob is output
119  * @param name Name of input blob.
120  * @param data - a reference to input. The type of Blob must correspond to the network input precision and size.
121  * @param info Preprocess info for blob.
122  */
123  void SetBlob(const std::string &name, const Blob::Ptr &data, const PreProcessInfo& info) {
124  CALL_STATUS_FNC(SetBlob, name.c_str(), data, info);
125  }
126 
127  /**
128  * @brief Gets pre-process for input data
129  * @param name Name of input blob.
130  * @return pointer to pre-process info of blob with name
131  */
132  const PreProcessInfo& GetPreProcess(const std::string& name) const {
133  const PreProcessInfo* info = nullptr;
134  CALL_STATUS_FNC(GetPreProcess, name.c_str(), &info);
135  return *info;
136  }
137 
138  /**
139  * @copybrief IInferRequest::Infer
140  *
141  * Wraps IInferRequest::Infer
142  */
143  void Infer() {
144  CALL_STATUS_FNC_NO_ARGS(Infer);
145  }
146 
147  /**
148  * @copybrief IInferRequest::GetPerformanceCounts
149  *
150  * Wraps IInferRequest::GetPerformanceCounts
151  */
152  std::map<std::string, InferenceEngineProfileInfo> GetPerformanceCounts() const {
153  std::map<std::string, InferenceEngineProfileInfo> perfMap;
154  CALL_STATUS_FNC(GetPerformanceCounts, perfMap);
155  return perfMap;
156  }
157 
158  /**
159  * @brief Sets input data to infer
160  *
161  * @note: Memory allocation doesn't happen
162  * @param inputs - a reference to a map of input blobs accessed by input names.
163  * The type of Blob must correspond to the network input precision and size.
164  */
165  void SetInput(const BlobMap& inputs) {
166  for (auto&& input : inputs) {
167  CALL_STATUS_FNC(SetBlob, input.first.c_str(), input.second);
168  }
169  }
170 
171  /**
172  * @brief Sets data that will contain result of the inference
173  *
174  * @note: Memory allocation doesn't happen
175  * @param results - a reference to a map of result blobs accessed by output names.
176  * The type of Blob must correspond to the network output precision and size.
177  */
178  void SetOutput(const BlobMap& results) {
179  for (auto&& result : results) {
180  CALL_STATUS_FNC(SetBlob, result.first.c_str(), result.second);
181  }
182  }
183 
184  /**
185  * @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
186  *
187  * @param batch new batch size to be used by all the following inference calls for this request.
188  */
189  void SetBatch(const int batch) {
190  CALL_STATUS_FNC(SetBatch, batch);
191  }
192 
193  /**
194  * constructs InferRequest from the initialized shared_pointer
195  * @param request Initialized shared pointer
196  * @param plg Plugin to use
197  */
198  explicit InferRequest(IInferRequest::Ptr request, InferenceEnginePluginPtr plg = {}): actual(request), plg(plg) {}
199 
200  /**
201  * @brief Start inference of specified input(s) in asynchronous mode
202  *
203  * @note: It returns immediately. Inference starts also immediately.
204  */
205  void StartAsync() {
206  CALL_STATUS_FNC_NO_ARGS(StartAsync);
207  }
208 
209  /**
210  * @copybrief IInferRequest::Wait
211  *
212  * Wraps IInferRequest::Wait
213  */
214  StatusCode Wait(int64_t millis_timeout) {
215  ResponseDesc resp;
216  auto res = actual->Wait(millis_timeout, &resp);
217  if (res != OK && res != RESULT_NOT_READY && res != INFER_NOT_STARTED) {
218  InferenceEngine::details::extract_exception(res, resp.msg);
219  }
220  return res;
221  }
222 
223  /**
224  * @copybrief IInferRequest::SetCompletionCallback
225  *
226  * Wraps IInferRequest::SetCompletionCallback
227  *
228  * @param callbackToSet Lambda callback object which will be called on processing finish.
229  */
230  template <class T>
231  void SetCompletionCallback(const T& callbackToSet) {
232  callback.reset(new details::CompletionCallbackWrapper<T>(callbackToSet));
233  CALL_STATUS_FNC(SetUserData, callback.get());
234  actual->SetCompletionCallback(callWrapper);
235  }
236 
237  /**
238  * @brief IInferRequest pointer to be used directly in CreateInferRequest functions
239  */
240  operator IInferRequest::Ptr&() {
241  return actual;
242  }
243 
244  /**
245  * @brief Checks if current InferRequest object is not initialized
246  *
247  * @return true if current InferRequest object is not initialized, false - otherwise
248  */
249  bool operator!() const noexcept {
250  return !actual;
251  }
252 
253  /**
254  * @brief Checks if current InferRequest object is initialized
255  *
256  * @return true if current InferRequest object is initialized, false - otherwise
257  */
258  explicit operator bool() const noexcept {
259  return !!actual;
260  }
261 
262  /**
263  * @brief A smart pointer to the InferRequest object
264  */
265  using Ptr = std::shared_ptr<InferRequest>;
266 };
267 
268 namespace details {
269 
270 template <>
271 class CompletionCallbackWrapper<std::function<void(InferRequest, StatusCode)>> : public ICompletionCallbackWrapper {
272  std::function<void(InferRequest, StatusCode)> lambda;
273 
274 public:
275  explicit CompletionCallbackWrapper(const std::function<void(InferRequest, InferenceEngine::StatusCode)>& lambda)
276  : lambda(lambda) {}
277 
278  void call(InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode code) const noexcept override {
279  lambda(InferRequest(request), code);
280  }
281 };
282 
283 } // namespace details
284 } // namespace InferenceEngine
InferRequest(IInferRequest::Ptr request, InferenceEnginePluginPtr plg={})
Definition: ie_infer_request.hpp:198
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:25
void StartAsync()
Start inference of specified input(s) in asynchronous mode.
Definition: ie_infer_request.hpp:205
bool operator!() const noexcept
Checks if current InferRequest object is not initialized.
Definition: ie_infer_request.hpp:249
InferenceEngine::details::SOPointer< IInferencePlugin > InferenceEnginePluginPtr
A C++ helper to work with objects created by the plugin.
Definition: ie_plugin_ptr.hpp:43
std::map< std::string, Blob::Ptr > BlobMap
This is a convenient type for working with a map containing pairs(string, pointer to a Blob instance)...
Definition: ie_blob.h:463
Inference Engine API.
Definition: ie_argmax_layer.hpp:15
Blob::Ptr GetBlob(const std::string &name)
Definition: ie_infer_request.hpp:106
StatusCode Wait(int64_t millis_timeout)
Definition: ie_infer_request.hpp:214
A header file that provides macros to handle no exception methods.
void SetInput(const BlobMap &inputs)
Sets input data to infer.
Definition: ie_infer_request.hpp:165
A header file contains a wrapper class for handling plugin instantiation and releasing resources...
void SetOutput(const BlobMap &results)
Sets data that will contain result of the inference.
Definition: ie_infer_request.hpp:178
This class stores pre-process information for the input.
Definition: ie_preprocess.hpp:55
void SetBlob(const std::string &name, const Blob::Ptr &data)
Sets input/output data to infer.
Definition: ie_infer_request.hpp:97
a header file for IInferRequest interface
Definition: ie_infer_request.hpp:64
void SetCompletionCallback(const T &callbackToSet)
Definition: ie_infer_request.hpp:231
std::shared_ptr< InferRequest > Ptr
A smart pointer to the InferRequest object.
Definition: ie_infer_request.hpp:265
Represents detailed information for an error.
Definition: ie_common.h:239
StatusCode
This enum contains codes for all possible return values of the interface functions.
Definition: ie_common.h:216
char msg[4096]
A character buffer that holds the detailed information for an error.
Definition: ie_common.h:243
std::shared_ptr< Blob > Ptr
A smart pointer containing Blob object.
Definition: ie_blob.h:42
const PreProcessInfo & GetPreProcess(const std::string &name) const
Gets pre-process for input data.
Definition: ie_infer_request.hpp:132
void(* CompletionCallback)(InferenceEngine::IInferRequest::Ptr context, InferenceEngine::StatusCode code)
Completion callback definition as pointer to a function.
Definition: ie_iinfer_request.hpp:141
std::map< std::string, InferenceEngineProfileInfo > GetPerformanceCounts() const
Definition: ie_infer_request.hpp:152
void Infer()
Definition: ie_infer_request.hpp:143
void SetBlob(const std::string &name, const Blob::Ptr &data, const PreProcessInfo &info)
Sets pre-process for input data.
Definition: ie_infer_request.hpp:123
void SetBatch(const int batch)
Sets new batch size when dynamic batching is enabled in executable network that created this request...
Definition: ie_infer_request.hpp:189
~InferRequest()
Destructor.
Definition: ie_infer_request.hpp:85
std::shared_ptr< IInferRequest > Ptr
A shared pointer to the IInferRequest object.
Definition: ie_iinfer_request.hpp:44