ie_executable_network_thread_safe_default.hpp
1 // Copyright (C) 2018-2021 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 #pragma once
6 
7 #include <map>
8 #include <memory>
9 #include <string>
10 #include <vector>
11 
12 #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp"
13 #include "cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp"
15 
16 namespace InferenceEngine {
17 
18 /**
19  * @brief This class provides optimal thread safe default implementation.
20  * The class is recommended to be used as a base class for Executable Network impleentation during plugin development.
21  * @ingroup ie_dev_api_exec_network_api
22  */
24 public:
25  /**
26  * @brief A shared pointer to a ExecutableNetworkThreadSafeDefault object
27  */
28  typedef std::shared_ptr<ExecutableNetworkThreadSafeDefault> Ptr;
29 
30  /**
31  * @brief Constructs a new instance.
32  *
33  * @param[in] taskExecutor The task executor used
34  * @param[in] callbackExecutor The callback executor
35  */
36  explicit
38  = std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"Default"}),
39  const ITaskExecutor::Ptr& callbackExecutor
40  = std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{"Callback"})) :
41  _taskExecutor{taskExecutor},
42  _callbackExecutor{callbackExecutor} {
43  }
44 
45  /**
46  * @brief Given optional implementation of creating asynchronous inference request to avoid
47  * need for it to be implemented by plugin
48  * @return shared_ptr for the created asynchronous inference request
49  */
52  }
53 
54 protected:
55  /**
56  * @brief Creates asyncronous inference request from synchronous request returned by CreateInferRequestImpl
57  * @tparam AsyncInferRequestType A type of asynchronous inference request to use a wrapper for synchronous request
58  * @return A shared pointer to an asynchronous inference request
59  */
60  template <typename AsyncInferRequestType = AsyncInferRequestThreadSafeDefault>
62  auto syncRequestImpl = this->CreateInferRequestImpl(_networkInputs, _networkOutputs);
63  syncRequestImpl->setPointerToExecutableNetworkInternal(shared_from_this());
64  return std::make_shared<AsyncInferRequestType>(syncRequestImpl, _taskExecutor, _callbackExecutor);
65  }
66 
67  ITaskExecutor::Ptr _taskExecutor = nullptr; //!< Holds a task executor
68  ITaskExecutor::Ptr _callbackExecutor = nullptr; //!< Holds a callback executor
69 };
70 
71 } // namespace InferenceEngine
This class provides optimal thread safe default implementation. The class is recommended to be used a...
Definition: ie_executable_network_thread_safe_default.hpp:23
std::shared_ptr< ExecutableNetworkThreadSafeDefault > Ptr
A shared pointer to a ExecutableNetworkThreadSafeDefault object.
Definition: ie_executable_network_thread_safe_default.hpp:28
ExecutableNetworkThreadSafeDefault(const ITaskExecutor::Ptr &taskExecutor=std::make_shared< CPUStreamsExecutor >(IStreamsExecutor::Config{"Default"}), const ITaskExecutor::Ptr &callbackExecutor=std::make_shared< CPUStreamsExecutor >(IStreamsExecutor::Config{"Callback"}))
Constructs a new instance.
Definition: ie_executable_network_thread_safe_default.hpp:37
ITaskExecutor::Ptr _taskExecutor
Holds a task executor.
Definition: ie_executable_network_thread_safe_default.hpp:67
IInferRequestInternal::Ptr CreateInferRequest() override
Given optional implementation of creating asynchronous inference request to avoid need for it to be i...
Definition: ie_executable_network_thread_safe_default.hpp:50
ITaskExecutor::Ptr _callbackExecutor
Holds a callback executor.
Definition: ie_executable_network_thread_safe_default.hpp:68
IInferRequestInternal::Ptr CreateAsyncInferRequestFromSync()
Creates asyncronous inference request from synchronous request returned by CreateInferRequestImpl.
Definition: ie_executable_network_thread_safe_default.hpp:61
An internal API of executable network to be implemented by plugin,.
Definition: ie_iexecutable_network_internal.hpp:30
virtual std::shared_ptr< IInferRequestInternal > CreateInferRequestImpl(InputsDataMap networkInputs, OutputsDataMap networkOutputs)
Creates an inference request internal implementation.
InferenceEngine::OutputsDataMap _networkOutputs
Holds information about network outputs data.
Definition: ie_iexecutable_network_internal.hpp:144
InferenceEngine::InputsDataMap _networkInputs
Holds information about network inputs info.
Definition: ie_iexecutable_network_internal.hpp:143
std::shared_ptr< IInferRequestInternal > Ptr
A shared pointer to a IInferRequestInternal interface.
Definition: ie_iinfer_request_internal.hpp:33
std::shared_ptr< ITaskExecutor > Ptr
Definition: ie_itask_executor.hpp:51
A header file for Inference Engine CPU-Streams-based Executor implementation.
Inference Engine Plugin API namespace.
Defines IStreamsExecutor configuration.
Definition: ie_istreams_executor.hpp:52