ie_iinfer_request.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief a header file for IInferRequest interface
7  *
8  * @file ie_iinfer_request.hpp
9  */
10 
11 #pragma once
12 
13 #include <map>
14 #include <memory>
15 #include <string>
16 
17 #include "ie_blob.h"
18 #include "ie_common.h"
19 #include "ie_preprocess.hpp"
20 #include "details/ie_irelease.hpp"
21 
22 namespace InferenceEngine {
23 
24 /**
25  * @brief This is an interface of asynchronous infer request
26  *
27  */
28 class IInferRequest : public details::IRelease {
29 public:
30  /**
31  * @enum WaitMode
32  * @brief Enumeration to hold wait mode for IInferRequest
33  */
34  enum WaitMode : int64_t {
35  /** Wait until inference result becomes available */
37  /** IInferRequest doesn't block or interrupt current thread and immediately returns inference status */
39  };
40  /**
41  * @brief A shared pointer to the IInferRequest object
42  */
43  using Ptr = std::shared_ptr<IInferRequest>;
44  /**
45  * @brief A smart pointer to the IInferRequest object
46  */
47  using WeakPtr = std::weak_ptr<IInferRequest>;
48 
49  /**
50  * @brief Sets input/output data to infer
51  *
52  * @note Memory allocation does not happen
53  * @param name Name of input or output blob.
54  * @param data Reference to input or output blob. The type of a blob must match the network input precision and
55  * size.
56  * @param resp Optional: pointer to an already allocated object to contain information in case of failure
57  * @return Status code of the operation: InferenceEngine::OK (0) for success
58  */
59  virtual StatusCode SetBlob(const char* name, const Blob::Ptr& data, ResponseDesc* resp) noexcept = 0;
60 
61  /**
62  * @brief Gets input/output data for inference
63  *
64  * @note Memory allocation does not happen
65  * @param name Name of input or output blob.
66  * @param data Reference to input or output blob. The type of Blob must match the network input precision and size.
67  * @param resp Optional: pointer to an already allocated object to contain information in case of failure
68  * @return Status code of the operation: InferenceEngine::OK (0) for success
69  */
70  virtual StatusCode GetBlob(const char* name, Blob::Ptr& data, ResponseDesc* resp) noexcept = 0;
71 
72  /**
73  * @brief Sets pre-process for input data
74  * @param name Name of input blob.
75  * @param data Reference to input or output blob. The type of Blob must match the network input precision and size.
76  * @param info Preprocess info for blob.
77  * @param resp Optional: pointer to an already allocated object to contain information in case of failure
78  * @return Status code of the operation: OK (0) for success
79  */
80  virtual StatusCode SetBlob(const char *name, const Blob::Ptr &data, const PreProcessInfo& info, ResponseDesc *resp) noexcept = 0;
81 
82  /**
83  * @brief Gets pre-process for input data
84  * @param name Name of input blob.
85  * @param info pointer to a pointer to PreProcessInfo structure
86  * @param resp Optional: pointer to an already allocated object to contain information in case of failure
87  * @return Status code of the operation: OK (0) for success
88  */
89  virtual StatusCode GetPreProcess(const char* name, const PreProcessInfo** info, ResponseDesc *resp) const noexcept = 0;
90  /**
91  * @brief Infers specified input(s) in synchronous mode
92  *
93  * @note blocks all methods of IInferRequest while request is ongoing (running or waiting in queue)
94  * @param resp Optional: pointer to an already allocated object to contain information in case of failure
95  * @return Status code of the operation: InferenceEngine::OK (0) for success
96  */
97  virtual StatusCode Infer(ResponseDesc* resp) noexcept = 0;
98 
99  /**
100  * @brief Queries performance measures per layer to get feedback of what is the most time consuming layer
101  *
102  * @note not all plugins provide meaningful data
103  * @param perfMap Map of layer names to profiling information for that layer
104  * @param resp Optional: pointer to an already allocated object to contain information in case of failure
105  * @return Status code of the operation: InferenceEngine::OK (0) for success
106  */
107  virtual StatusCode GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo>& perfMap,
108  ResponseDesc* resp) const noexcept = 0;
109 
110  /**
111  * @brief Waits for the result to become available. Blocks until specified millis_timeout has elapsed or the result
112  * becomes available, whichever comes first.
113  *
114  * @param millis_timeout Maximum duration in milliseconds to block for
115  * @note There are special cases when millis_timeout is equal some value of the WaitMode enum:
116  * * STATUS_ONLY - immediately returns inference status (IInferRequest::RequestStatus). It does not block or
117  * interrupt current thread
118  * * RESULT_READY - waits until inference result becomes available
119  * @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if
120  * occurred)
121  * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success
122  */
123  virtual InferenceEngine::StatusCode Wait(int64_t millis_timeout, ResponseDesc* resp) noexcept = 0;
124 
125  /**
126  * @brief Starts inference of specified input(s) in asynchronous mode
127  *
128  * @note It returns immediately. Inference starts also immediately
129  * @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if
130  * occurred)
131  * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success
132  */
133  virtual StatusCode StartAsync(ResponseDesc* resp) noexcept = 0;
134 
135  /**
136  * @brief Completion callback definition as pointer to a function
137  *
138  * @param context Pointer to request for providing context inside callback
139  * @param code Completion result status: InferenceEngine::OK (0) for success
140  */
142 
143  /**
144  * @brief Sets a callback function that will be called on success or failure of asynchronous request
145  *
146  * @param callback A function to be called
147  * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success
148  */
149  virtual StatusCode SetCompletionCallback(CompletionCallback callback) noexcept = 0;
150 
151  /**
152  * @brief Gets arbitrary data for the request and stores a pointer to a pointer to the obtained data
153  *
154  * @param data Pointer to a pointer to the gotten arbitrary data
155  * @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if
156  * occurred)
157  * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success
158  */
159  virtual StatusCode GetUserData(void** data, ResponseDesc* resp) noexcept = 0;
160 
161  /**
162  * @brief Sets arbitrary data for the request
163  *
164  * @param data Pointer to a pointer to arbitrary data to set
165  * @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if
166  * occurred)
167  * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success
168  */
169  virtual StatusCode SetUserData(void* data, ResponseDesc* resp) noexcept = 0;
170 
171  /**
172  * @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
173  *
174  * @param batch_size new batch size to be used by all the following inference calls for this request.
175  * @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if
176  * occurred)
177  * @return Enumeration of the resulted action: InferenceEngine::OK (0) for success
178  */
179  virtual InferenceEngine::StatusCode SetBatch(int batch_size, ResponseDesc* resp) noexcept = 0;
180 };
181 
182 } // namespace InferenceEngine
InferenceEngine::IInferRequest::StartAsync
virtual StatusCode StartAsync(ResponseDesc *resp) noexcept=0
Starts inference of specified input(s) in asynchronous mode.
InferenceEngine::PreProcessInfo
This class stores pre-process information for the input.
Definition: ie_preprocess.hpp:55
InferenceEngine::IInferRequest::Infer
virtual StatusCode Infer(ResponseDesc *resp) noexcept=0
Infers specified input(s) in synchronous mode.
InferenceEngine::IInferRequest::GetBlob
virtual StatusCode GetBlob(const char *name, Blob::Ptr &data, ResponseDesc *resp) noexcept=0
Gets input/output data for inference.
InferenceEngine::ResponseDesc
Represents detailed information for an error.
Definition: ie_common.h:231
InferenceEngine::IInferRequest::SetCompletionCallback
virtual StatusCode SetCompletionCallback(CompletionCallback callback) noexcept=0
Sets a callback function that will be called on success or failure of asynchronous request.
ie_blob.h
A header file for Blob and generic TBlob<>
InferenceEngine::IInferRequest::GetPreProcess
virtual StatusCode GetPreProcess(const char *name, const PreProcessInfo **info, ResponseDesc *resp) const noexcept=0
Gets pre-process for input data.
InferenceEngine::StatusCode
StatusCode
This enum contains codes for all possible return values of the interface functions.
Definition: ie_common.h:208
InferenceEngine::IInferRequest::WeakPtr
std::weak_ptr< IInferRequest > WeakPtr
A smart pointer to the IInferRequest object.
Definition: ie_iinfer_request.hpp:47
InferenceEngine::IInferRequest::WaitMode
WaitMode
Enumeration to hold wait mode for IInferRequest.
Definition: ie_iinfer_request.hpp:34
InferenceEngine::IInferRequest::Wait
virtual InferenceEngine::StatusCode Wait(int64_t millis_timeout, ResponseDesc *resp) noexcept=0
Waits for the result to become available. Blocks until specified millis_timeout has elapsed or the re...
InferenceEngine::IInferRequest::GetUserData
virtual StatusCode GetUserData(void **data, ResponseDesc *resp) noexcept=0
Gets arbitrary data for the request and stores a pointer to a pointer to the obtained data.
InferenceEngine::IInferRequest::RESULT_READY
@ RESULT_READY
Definition: ie_iinfer_request.hpp:36
InferenceEngine::IInferRequest
This is an interface of asynchronous infer request.
Definition: ie_iinfer_request.hpp:28
InferenceEngine::IInferRequest::STATUS_ONLY
@ STATUS_ONLY
Definition: ie_iinfer_request.hpp:38
ie_common.h
This is a header file with common inference engine definitions.
InferenceEngine::Blob::Ptr
std::shared_ptr< Blob > Ptr
A smart pointer containing Blob object.
Definition: ie_blob.h:43
InferenceEngine::IInferRequest::CompletionCallback
void(* CompletionCallback)(InferenceEngine::IInferRequest::Ptr context, InferenceEngine::StatusCode code)
Completion callback definition as pointer to a function.
Definition: ie_iinfer_request.hpp:141
InferenceEngine::IInferRequest::SetBatch
virtual InferenceEngine::StatusCode SetBatch(int batch_size, ResponseDesc *resp) noexcept=0
Sets new batch size when dynamic batching is enabled in executable network that created this request.
InferenceEngine::IInferRequest::Ptr
std::shared_ptr< IInferRequest > Ptr
A shared pointer to the IInferRequest object.
Definition: ie_iinfer_request.hpp:43
ie_preprocess.hpp
This header file provides structures to store info about pre-processing of network inputs (scale,...
ie_irelease.hpp
A header file for the Inference Engine plugins destruction mechanism.
InferenceEngine::IInferRequest::SetBlob
virtual StatusCode SetBlob(const char *name, const Blob::Ptr &data, ResponseDesc *resp) noexcept=0
Sets input/output data to infer.
InferenceEngine::IInferRequest::SetUserData
virtual StatusCode SetUserData(void *data, ResponseDesc *resp) noexcept=0
Sets arbitrary data for the request.
InferenceEngine::IInferRequest::GetPerformanceCounts
virtual StatusCode GetPerformanceCounts(std::map< std::string, InferenceEngineProfileInfo > &perfMap, ResponseDesc *resp) const noexcept=0
Queries performance measures per layer to get feedback of what is the most time consuming layer.
InferenceEngine::IInferRequest::SetBlob
virtual StatusCode SetBlob(const char *name, const Blob::Ptr &data, const PreProcessInfo &info, ResponseDesc *resp) noexcept=0
Sets pre-process for input data.