ie_core.hpp
Go to the documentation of this file.
1 // Copyright (C) 2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief This is a header file for the Inference Engine Core class C++ API
7  * @file ie_core.hpp
8  */
9 #pragma once
10 
11 #include <map>
12 #include <memory>
13 #include <string>
14 #include <vector>
15 
16 #include "cpp/ie_plugin_cpp.hpp"
17 #include "ie_extension.h"
18 
19 namespace InferenceEngine {
20 
21 /**
22  * @brief This class represents Inference Engine Core entity.
23  * It can throw exceptions safely for the application, where it is properly handled.
24  */
25 class INFERENCE_ENGINE_API_CLASS(Core) {
26  class Impl;
27  std::shared_ptr<Impl> _impl;
28 
29 public:
30  /** @brief Constructs Inference Engine Core instance using XML configuration file with
31  * plugins description. See RegisterPlugins for more details.
32  * @param xmlConfigFile A path to .xml file with plugins to load from. If XML configuration file is not specified,
33  * then default Inference Engine plugins are loaded from the default plugin.xml file.
34  */
35  explicit Core(const std::string& xmlConfigFile = std::string());
36 
37  /**
38  * @brief Returns plugins version information
39  * @param deviceName Device name to indentify plugin
40  * @return A vector of versions
41  */
42  std::map<std::string, Version> GetVersions(const std::string& deviceName) const;
43 
44  /**
45  * @brief Sets logging callback
46  * Logging is used to track what is going on inside the plugins, Inference Engine library
47  * @param listener Logging sink
48  */
49  void SetLogCallback(IErrorListener& listener) const;
50 
51  /**
52  * @brief Creates an executable network from a network object. Users can create as many networks as they need and
53  * use them simultaneously (up to the limitation of the hardware resources)
54  * @param network CNNNetwork object acquired from CNNNetReader
55  * @param deviceName Name of device to load network to
56  * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
57  * operation
58  * @return An executable network reference
59  */
60  ExecutableNetwork LoadNetwork(
61  CNNNetwork network, const std::string& deviceName,
62  const std::map<std::string, std::string>& config = std::map<std::string, std::string>());
63 
64  /**
65  * @brief Registers extension for the specified plugin
66  * @param deviceName Device name to indentify plugin to add an extension in
67  * @param extension Pointer to already loaded extension
68  */
69  void AddExtension(IExtensionPtr extension, const std::string& deviceName);
70 
71  /**
72  * @brief Creates an executable network from a previously exported network
73  * @param deviceName Name of device load executable network on
74  * @param modelFileName Path to the location of the exported file
75  * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
76  * operation*
77  * @return An executable network reference
78  */
79  ExecutableNetwork ImportNetwork(
80  const std::string& modelFileName, const std::string& deviceName,
81  const std::map<std::string, std::string>& config = std::map<std::string, std::string>());
82 
83  /**
84  * @brief Creates an executable network from a previously exported network
85  * @param deviceName Name of device load executable network on
86  * @param networkModel network model stream
87  * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
88  * operation*
89  * @return An executable network reference
90  */
91  ExecutableNetwork ImportNetwork(std::istream& networkModel, const std::string& deviceName = {},
92  const std::map<std::string, std::string>& config = {});
93 
94  /**
95  * @brief Query device if it supports specified network with specified configuration
96  * @param deviceName A name of a device to query
97  * @param network Network object to query
98  * @param config Optional map of pairs: (config parameter name, config parameter value)
99  * @return Pointer to the response message that holds a description of an error if any occurred
100  */
101  QueryNetworkResult QueryNetwork(
102  const ICNNNetwork& network, const std::string& deviceName,
103  const std::map<std::string, std::string>& config = std::map<std::string, std::string>()) const;
104 
105  /**
106  * @brief Sets configuration for device, acceptable keys can be found in ie_plugin_config.hpp
107  * @param deviceName An optinal name of a device. If device name is not specified, the config is set for all the
108  * registered devices.
109  * @param config Map of pairs: (config parameter name, config parameter value)
110  */
111  void SetConfig(const std::map<std::string, std::string>& config, const std::string& deviceName = std::string());
112 
113  /**
114  * @brief Gets configuration dedicated to device behaviour. The method is targeted to extract information
115  * which can be set via SetConfig method.
116  * @param deviceName - A name of a device to get a configuration value.
117  * @param name - value of config corresponding to config key.
118  * @return Value of config corresponding to config key.
119  */
120  Parameter GetConfig(const std::string& deviceName, const std::string& name) const;
121 
122  /**
123  * @brief Gets general runtime metric for dedicated hardware. The method is needed to request common device
124  * properties which are executable network agnostic. It can be device name, temperature, other devices-specific
125  * values.
126  * @param deviceName - A name of a device to get a metric value.
127  * @param name - metric name to request.
128  * @return Metric value corresponding to metric key.
129  */
130  Parameter GetMetric(const std::string& deviceName, const std::string& name) const;
131 
132  /**
133  * @brief Returns devices available for neural networks inference
134  * @return A vector of devices. The devices are returned as { CPU, FPGA.0, FPGA.1, MYRIAD }
135  If there more than one device of specific type, they are enumerated with .# suffix.
136  */
137  std::vector<std::string> GetAvailableDevices() const;
138 
139  /**
140  * @brief Register new device and plugin which implement this device inside Inference Engine.
141  * @param pluginName A name of plugin. Depending on platform pluginName is wrapped with shared library suffix and
142  * prefix to identify library full name
143  * @param deviceName A device name to register plugin for. If device name is not specified, then it's taken from
144  * plugin using InferenceEnginePluginPtr::GetName function
145  */
146  void RegisterPlugin(const std::string& pluginName, const std::string& deviceName);
147 
148  /**
149  * @brief Removes plugin with specified name from Inference Engine
150  * @param deviceName Device name identifying plugin to remove from Inference Engine
151  */
152  void UnregisterPlugin(const std::string& deviceName);
153 
154  /** @brief Registers plugin to Inference Engine Core instance using XML configuration file with
155  * plugins description. XML file has the following structure:
156  *
157  * ```xml
158  * <ie>
159  * <plugins>
160  * <plugin name="" location="">
161  * <extensions>
162  * <extension location=""/>
163  * </extensions>
164  * <properties>
165  * <property key="" value=""/>
166  * </properties>
167  * </plugin>
168  * </plugins>
169  * </ie>
170  * ```
171  *
172  * - `name` identifies name of device enabled by plugin
173  * - `location` specifies absolute path to dynamic library with plugin. A path can also be relative to inference
174  * engine shared library. It allows to have common config for different systems with different configurations.
175  * - Properties are set to plugin via the `SetConfig` method.
176  * - Extensions are set to plugin via the `AddExtension` method.
177  * @param xmlConfigFile A path to .xml file with plugins to register.
178  */
179  void RegisterPlugins(const std::string& xmlConfigFile);
180 };
181 } // namespace InferenceEngine
This class represents Inference Engine Core entity. It can throw exceptions safely for the applicatio...
Definition: ie_core.hpp:25
Inference Engine API.
Definition: ie_argmax_layer.hpp:11
This is the main interface to describe the NN topology.
Definition: ie_icnn_network.hpp:41
wrapper over IExecutableNetwork
Definition: ie_executable_network.hpp:29
This class contains all the information about the Neural Network and the related binary information...
Definition: ie_cnn_network.h:37
This class represents an object to work with different parameters.
Definition: ie_parameter.hpp:36
This class represents a custom error listener. Plugin consumers can provide it via InferenceEngine::S...
Definition: ie_error.hpp:16
This is a header file for the Inference Engine plugin C++ API.
Responce structure encapsulating information about supported layer.
Definition: ie_plugin.hpp:52
A header file that defines a wrapper class for handling extension instantiation and releasing resourc...