ie_iinfer_request_internal.hpp
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 #pragma once
6 
7 #include <ie_blob.h>
8 #include <ie_common.h>
9 #include <ie_preprocess.hpp>
10 
11 #include <map>
12 #include <memory>
13 #include <string>
14 
15 namespace InferenceEngine {
16 
17 /**
18  * @interface IInferRequestInternal
19  * @brief An internal API of synchronous inference request to be implemented by plugin,
20  * which is used in InferRequestBase forwarding mechanism
21  * @ingroup ie_dev_api_infer_request_api
22  */
24 public:
25  /**
26  * @brief A shared pointer to a IInferRequestInternal interface
27  */
28  typedef std::shared_ptr<IInferRequestInternal> Ptr;
29 
30  /**
31  * @brief Destroys the object.
32  */
33  virtual ~IInferRequestInternal() = default;
34 
35  /**
36  * @brief Infers specified input(s) in synchronous mode
37  * @note blocks all method of IInferRequest while request is ongoing (running or waiting in queue)
38  */
39  virtual void Infer() = 0;
40 
41  /**
42  * @brief Queries performance measures per layer to get feedback of what is the most time consuming layer.
43  * Note: not all plugins may provide meaningful data
44  * @param perfMap - a map of layer names to profiling information for that layer.
45  */
46  virtual void GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo>& perfMap) const = 0;
47 
48  /**
49  * @brief Set input/output data to infer
50  * @note Memory allocation doesn't happen
51  * @param name - a name of input or output blob.
52  * @param data - a reference to input or output blob. The type of Blob must correspond to the network input
53  * precision and size.
54  */
55  virtual void SetBlob(const char* name, const Blob::Ptr& data) = 0;
56 
57  /**
58  * @brief Get input/output data to infer
59  * @note Memory allocation doesn't happen
60  * @param name - a name of input or output blob.
61  * @param data - a reference to input or output blob. The type of Blob must correspond to the network input
62  * precision and size.
63  */
64  virtual void GetBlob(const char* name, Blob::Ptr& data) = 0;
65 
66  /**
67  * @brief Sets pre-process for input data
68  * @param name Name of input blob.
69  * @param data - a reference to input or output blob. The type of Blob must correspond to the network input precision and size.
70  * @param info Preprocess info for blob.
71  */
72  virtual void SetBlob(const char* name, const Blob::Ptr& data, const PreProcessInfo& info) = 0;
73 
74  /**
75  * @brief Gets pre-process for input data
76  * @param name Name of input blob.
77  * @param info pointer to a pointer to PreProcessInfo structure
78  */
79  virtual void GetPreProcess(const char* name, const PreProcessInfo** info) const = 0;
80 
81  /**
82  * @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
83  * @param batch - new batch size to be used by all the following inference calls for this request.
84  */
85  virtual void SetBatch(int batch) = 0;
86 };
87 
88 } // namespace InferenceEngine
InferenceEngine
Inference Engine Plugin API namespace.
InferenceEngine::IInferRequestInternal::SetBatch
virtual void SetBatch(int batch)=0
Sets new batch size when dynamic batching is enabled in executable network that created this request.
InferenceEngine::PreProcessInfo
InferenceEngine::IInferRequestInternal::SetBlob
virtual void SetBlob(const char *name, const Blob::Ptr &data)=0
Set input/output data to infer.
InferenceEngine::IInferRequestInternal::GetBlob
virtual void GetBlob(const char *name, Blob::Ptr &data)=0
Get input/output data to infer.
ie_preprocess.hpp
ie_common.h
InferenceEngine::IInferRequestInternal::GetPerformanceCounts
virtual void GetPerformanceCounts(std::map< std::string, InferenceEngineProfileInfo > &perfMap) const =0
Queries performance measures per layer to get feedback of what is the most time consuming layer....
InferenceEngine::IInferRequestInternal::Infer
virtual void Infer()=0
Infers specified input(s) in synchronous mode.
InferenceEngine::IInferRequestInternal::~IInferRequestInternal
virtual ~IInferRequestInternal()=default
Destroys the object.
InferenceEngine::IInferRequestInternal::GetPreProcess
virtual void GetPreProcess(const char *name, const PreProcessInfo **info) const =0
Gets pre-process for input data.
InferenceEngine::Blob::Ptr
std::shared_ptr< Blob > Ptr
ie_blob.h
InferenceEngine::IInferRequestInternal
An internal API of synchronous inference request to be implemented by plugin, which is used in InferR...
Definition: ie_iinfer_request_internal.hpp:23
InferenceEngine::IInferRequestInternal::Ptr
std::shared_ptr< IInferRequestInternal > Ptr
A shared pointer to a IInferRequestInternal interface.
Definition: ie_iinfer_request_internal.hpp:28
InferenceEngine::IInferRequestInternal::SetBlob
virtual void SetBlob(const char *name, const Blob::Ptr &data, const PreProcessInfo &info)=0
Sets pre-process for input data.