ie_iinfer_request_internal.hpp
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 #pragma once
6 
7 #include <cpp_interfaces/interface/ie_ivariable_state_internal.hpp>
8 #include <ie_blob.h>
9 #include <ie_common.h>
10 #include <ie_preprocess.hpp>
11 
12 #include <map>
13 #include <memory>
14 #include <string>
15 
16 namespace InferenceEngine {
17 
18 /**
19  * @interface IInferRequestInternal
20  * @brief An internal API of synchronous inference request to be implemented by plugin,
21  * which is used in InferRequestBase forwarding mechanism
22  * @ingroup ie_dev_api_infer_request_api
23  */
25 public:
26  /**
27  * @brief A shared pointer to a IInferRequestInternal interface
28  */
29  typedef std::shared_ptr<IInferRequestInternal> Ptr;
30 
31  /**
32  * @brief Destroys the object.
33  */
34  virtual ~IInferRequestInternal() = default;
35 
36  /**
37  * @brief Infers specified input(s) in synchronous mode
38  * @note blocks all method of IInferRequest while request is ongoing (running or waiting in queue)
39  */
40  virtual void Infer() = 0;
41 
42  /**
43  * @brief Cancel current inference request execution
44  */
45  virtual void Cancel() = 0;
46 
47  /**
48  * @brief Queries performance measures per layer to get feedback of what is the most time consuming layer.
49  * Note: not all plugins may provide meaningful data
50  * @return Returns a map of layer names to profiling information for that layer.
51  */
52  virtual std::map<std::string, InferenceEngineProfileInfo> GetPerformanceCounts() const = 0;
53 
54  /**
55  * @brief Set input/output data to infer
56  * @note Memory allocation doesn't happen
57  * @param name - a name of input or output blob.
58  * @param data - a reference to input or output blob. The type of Blob must correspond to the network input
59  * precision and size.
60  */
61  virtual void SetBlob(const std::string& name, const Blob::Ptr& data) = 0;
62 
63  /**
64  * @brief Get input/output data to infer
65  * @note Memory allocation doesn't happen
66  * @param name - a name of input or output blob.
67  * @return Returns input or output blob. The type of Blob must correspond to the network input
68  * precision and size.
69  */
70  virtual Blob::Ptr GetBlob(const std::string& name) = 0;
71 
72  /**
73  * @brief Sets pre-process for input data
74  * @param name Name of input blob.
75  * @param data - a reference to input or output blob. The type of Blob must correspond to the network input precision and size.
76  * @param info Preprocess info for blob.
77  */
78  virtual void SetBlob(const std::string& name, const Blob::Ptr& data, const PreProcessInfo& info) = 0;
79 
80  /**
81  * @brief Gets pre-process for input data
82  * @param name Name of input blob.
83  * @return Returns constant reference to PreProcessInfo structure
84  */
85  virtual const PreProcessInfo& GetPreProcess(const std::string& name) const = 0;
86 
87  /**
88  * @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
89  * @param batch - new batch size to be used by all the following inference calls for this request.
90  */
91  virtual void SetBatch(int batch) = 0;
92 
93  /**
94  * @brief Queries memory states.
95  * @return Returns memory states
96  */
97  virtual std::vector<IVariableStateInternal::Ptr> QueryState() = 0;
98 };
99 
100 } // namespace InferenceEngine
std::shared_ptr< Blob > Ptr
An internal API of synchronous inference request to be implemented by plugin, which is used in InferR...
Definition: ie_iinfer_request_internal.hpp:24
virtual const PreProcessInfo & GetPreProcess(const std::string &name) const =0
Gets pre-process for input data.
virtual std::vector< IVariableStateInternal::Ptr > QueryState()=0
Queries memory states.
virtual Blob::Ptr GetBlob(const std::string &name)=0
Get input/output data to infer.
virtual ~IInferRequestInternal()=default
Destroys the object.
virtual void Infer()=0
Infers specified input(s) in synchronous mode.
virtual void Cancel()=0
Cancel current inference request execution.
std::shared_ptr< IInferRequestInternal > Ptr
A shared pointer to a IInferRequestInternal interface.
Definition: ie_iinfer_request_internal.hpp:29
virtual void SetBlob(const std::string &name, const Blob::Ptr &data, const PreProcessInfo &info)=0
Sets pre-process for input data.
virtual void SetBlob(const std::string &name, const Blob::Ptr &data)=0
Set input/output data to infer.
virtual std::map< std::string, InferenceEngineProfileInfo > GetPerformanceCounts() const =0
Queries performance measures per layer to get feedback of what is the most time consuming layer....
virtual void SetBatch(int batch)=0
Sets new batch size when dynamic batching is enabled in executable network that created this request.
Inference Engine Plugin API namespace.