ie_executable_network_thread_safe_default.hpp
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 #pragma once
6 
7 #include <map>
8 #include <memory>
9 #include <string>
10 #include <vector>
11 
12 #include "cpp_interfaces/base/ie_infer_async_request_base.hpp"
13 #include "cpp_interfaces/impl/ie_executable_network_internal.hpp"
14 #include "cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp"
15 #include "cpp_interfaces/impl/ie_infer_request_internal.hpp"
17 
18 namespace InferenceEngine {
19 
20 /**
21  * @brief This class provides optimal thread safe default implementation.
22  * The class is recommended to be used as a base class for Executable Network impleentation during plugin development.
23  * @ingroup ie_dev_api_exec_network_api
24  */
26  public std::enable_shared_from_this<ExecutableNetworkThreadSafeDefault> {
27 public:
28  /**
29  * @brief A shared pointer to a ExecutableNetworkThreadSafeDefault object
30  */
31  typedef std::shared_ptr<ExecutableNetworkThreadSafeDefault> Ptr;
32 
33  /**
34  * @brief Constructs a new instance.
35  *
36  * @param[in] taskExecutor The task executor used
37  * @param[in] callbackExecutor The callback executor
38  */
39  explicit
41  = std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"Default"}),
42  const ITaskExecutor::Ptr& callbackExecutor
43  = std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"Callback"})) :
44  _taskExecutor{taskExecutor},
45  _callbackExecutor{callbackExecutor} {
46  }
47 
48  /**
49  * @brief Given optional implementation of creating asynchronous inference request to avoid
50  * need for it to be implemented by plugin
51  * @return shared_ptr for the created asynchronous inference request
52  */
55  }
56 
57 protected:
58  /**
59  * @brief Creates asyncronous inference request from synchronous request returned by CreateInferRequestImpl
60  * @tparam AsyncInferRequestType A type of asynchronous inference request to use a wrapper for synchronous request
61  * @return A shared pointer to an asynchronous inference request
62  */
63  template <typename AsyncInferRequestType = AsyncInferRequestThreadSafeDefault>
65  IInferRequest::Ptr asyncRequest;
66 
67  auto syncRequestImpl = this->CreateInferRequestImpl(_networkInputs, _networkOutputs);
68  syncRequestImpl->setPointerToExecutableNetworkInternal(shared_from_this());
69 
70  auto asyncThreadSafeImpl = std::make_shared<AsyncInferRequestType>(
71  syncRequestImpl, _taskExecutor, _callbackExecutor);
72  asyncRequest.reset(new InferRequestBase(asyncThreadSafeImpl),
73  [](IInferRequest *p) { p->Release(); });
74  asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
75 
76  return asyncRequest;
77  }
78 
79  /**
80  * @brief Creates a synchronous inference request object used to infer the network
81  * @note Used by ExecutableNetworkThreadSafeDefault::CreateInferRequest as a plugin-specific implementation
82  * @param networkInputs An input info map needed to create input blobs
83  * @param networkOutputs An output data map needed to create output blobs
84  * @return Synchronous inference request object
85  */
87  OutputsDataMap networkOutputs) = 0;
88 
89  ITaskExecutor::Ptr _taskExecutor = nullptr; //!< Holds a task executor
90  ITaskExecutor::Ptr _callbackExecutor = nullptr; //!< Holds a callback executor
91 };
92 
93 } // namespace InferenceEngine
Minimum implementation of IExecutableNetworkInternal interface. Must not be used as a base class in p...
Definition: ie_executable_network_internal.hpp:27
InferenceEngine::InputsDataMap _networkInputs
Holds infromation about network inputs info.
Definition: ie_executable_network_internal.hpp:133
InferenceEngine::OutputsDataMap _networkOutputs
Holds information about network outputs data.
Definition: ie_executable_network_internal.hpp:134
This class provides optimal thread safe default implementation. The class is recommended to be used a...
Definition: ie_executable_network_thread_safe_default.hpp:26
IInferRequest::Ptr CreateInferRequest() override
Given optional implementation of creating asynchronous inference request to avoid need for it to be i...
Definition: ie_executable_network_thread_safe_default.hpp:53
std::shared_ptr< ExecutableNetworkThreadSafeDefault > Ptr
A shared pointer to a ExecutableNetworkThreadSafeDefault object.
Definition: ie_executable_network_thread_safe_default.hpp:31
ExecutableNetworkThreadSafeDefault(const ITaskExecutor::Ptr &taskExecutor=std::make_shared< CPUStreamsExecutor >(IStreamsExecutor::Config{"Default"}), const ITaskExecutor::Ptr &callbackExecutor=std::make_shared< CPUStreamsExecutor >(IStreamsExecutor::Config{"Callback"}))
Constructs a new instance.
Definition: ie_executable_network_thread_safe_default.hpp:40
ITaskExecutor::Ptr _taskExecutor
Holds a task executor.
Definition: ie_executable_network_thread_safe_default.hpp:89
IInferRequest::Ptr CreateAsyncInferRequestFromSync()
Creates asyncronous inference request from synchronous request returned by CreateInferRequestImpl.
Definition: ie_executable_network_thread_safe_default.hpp:64
ITaskExecutor::Ptr _callbackExecutor
Holds a callback executor.
Definition: ie_executable_network_thread_safe_default.hpp:90
virtual InferRequestInternal::Ptr CreateInferRequestImpl(InputsDataMap networkInputs, OutputsDataMap networkOutputs)=0
Creates a synchronous inference request object used to infer the network.
std::shared_ptr< IInferRequest > Ptr
std::shared_ptr< ITaskExecutor > Ptr
Definition: ie_itask_executor.hpp:51
Inference request noexcept wrapper which accepts IAsyncInferRequestInternal derived instance which ca...
Definition: ie_infer_async_request_base.hpp:24
std::shared_ptr< InferRequestInternal > Ptr
A shared pointer to a InferRequestInternal implementation.
Definition: ie_infer_request_internal.hpp:37
A header file for Inference Engine CPU-Streams-based Executor implementation.
Inference Engine Plugin API namespace.
std::map< std::string, InputInfo::Ptr > InputsDataMap
std::map< std::string, DataPtr > OutputsDataMap
Defines IStreamsExecutor configuration.
Definition: ie_istreams_executor.hpp:50