inference_engine.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief A header file that provides a set of convenience utility functions and the main include file for all other .h
7  * files.
8  * @file inference_engine.hpp
9  */
10 #pragma once
11 
12 #include <cpp/ie_cnn_net_reader.h>
13 #include <ie_api.h>
14 #include <ie_blob.h>
15 #include <ie_layers.h>
16 
17 #include <algorithm>
19 #include <cpp/ie_plugin_cpp.hpp>
20 #include <ie_core.hpp>
21 #include <ie_error.hpp>
22 #include <ie_icnn_network.hpp>
23 #include <ie_icnn_network_stats.hpp>
24 #include <ie_plugin_config.hpp>
25 #include <ie_plugin_dispatcher.hpp>
26 #include <ie_version.hpp>
27 #include <memory>
28 #include <numeric>
29 #include <vector>
30 
31 /**
32  * @brief Inference Engine API
33  */
34 namespace InferenceEngine {
35 
36 /**
37  * @deprecated InferenceEngine utility functions are not a part of public API
38  * @brief Gets the top n results from a tblob
39  * @param n Top n count
40  * @param input 1D tblob that contains probabilities
41  * @param output Vector of indexes for the top n places
42  */
43 template <class T>
44 INFERENCE_ENGINE_DEPRECATED(
45  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
46 inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& output) {
47  SizeVector dims = input.getTensorDesc().getDims();
48  size_t input_rank = dims.size();
49  if (!input_rank || !dims[0]) THROW_IE_EXCEPTION << "Input blob has incorrect dimensions!";
50  size_t batchSize = dims[0];
51  std::vector<unsigned> indexes(input.size() / batchSize);
52 
53  n = static_cast<unsigned>(std::min<size_t>((size_t)n, input.size()));
54 
55  output.resize(n * batchSize);
56 
57  for (size_t i = 0; i < batchSize; i++) {
58  size_t offset = i * (input.size() / batchSize);
59  T* batchData = input.data();
60  batchData += offset;
61 
62  std::iota(std::begin(indexes), std::end(indexes), 0);
63  std::partial_sort(std::begin(indexes), std::begin(indexes) + n, std::end(indexes),
64  [&batchData](unsigned l, unsigned r) {
65  return batchData[l] > batchData[r];
66  });
67  for (unsigned j = 0; j < n; j++) {
68  output.at(i * n + j) = indexes.at(j);
69  }
70  }
71 }
72 
73 #define TBLOB_TOP_RESULT(precision) \
74  case InferenceEngine::Precision::precision: { \
75  using myBlobType = InferenceEngine::PrecisionTrait<Precision::precision>::value_type; \
76  TBlob<myBlobType>& tblob = dynamic_cast<TBlob<myBlobType>&>(input); \
77  TopResults(n, tblob, output); \
78  break; \
79  }
80 
81 /**
82  * @deprecated InferenceEngine utility functions are not a part of public API
83  * @brief Gets the top n results from a blob
84  * @param n Top n count
85  * @param input 1D blob that contains probabilities
86  * @param output Vector of indexes for the top n places
87  */
88 INFERENCE_ENGINE_DEPRECATED(
89  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
90 inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& output) {
91  IE_SUPPRESS_DEPRECATED_START
92  switch (input.getTensorDesc().getPrecision()) {
93  TBLOB_TOP_RESULT(FP32);
94  TBLOB_TOP_RESULT(FP16);
95  TBLOB_TOP_RESULT(Q78);
96  TBLOB_TOP_RESULT(I16);
97  TBLOB_TOP_RESULT(U8);
98  TBLOB_TOP_RESULT(I8);
99  TBLOB_TOP_RESULT(U16);
100  TBLOB_TOP_RESULT(I32);
101  default:
102  THROW_IE_EXCEPTION << "cannot locate blob for precision: " << input.getTensorDesc().getPrecision();
103  }
104  IE_SUPPRESS_DEPRECATED_END
105 }
106 
107 #undef TBLOB_TOP_RESULT
108 
109 /**
110  * @deprecated InferenceEngine utility functions are not a part of public API
111  * @brief Copies a 8-bit RGB image to the blob.
112  * Throws an exception in case of dimensions or input size mismatch
113  * @tparam data_t Type of the target blob
114  * @param RGB8 8-bit RGB image
115  * @param RGB8_size Size of the image
116  * @param blob Target blob to write image to
117  */
118 template <typename data_t>
119 INFERENCE_ENGINE_DEPRECATED(
120  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
121 void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t>* blob) {
122  SizeVector dims = blob->getTensorDesc().getDims();
123  if (4 != dims.size())
124  THROW_IE_EXCEPTION << "Cannot write data to input blob! Blob has incorrect dimensions size " << dims.size();
125  size_t num_channels = dims[1]; // because RGB
126  size_t num_images = dims[0];
127  size_t w = dims[3];
128  size_t h = dims[2];
129  size_t nPixels = w * h;
130 
131  if (RGB8_size != w * h * num_channels * num_images)
132  THROW_IE_EXCEPTION << "input pixels mismatch, expecting " << w * h * num_channels * num_images
133  << " bytes, got: " << RGB8_size;
134 
135  std::vector<data_t*> dataArray;
136  for (unsigned int n = 0; n < num_images; n++) {
137  for (unsigned int i = 0; i < num_channels; i++) {
138  if (!n && !i && dataArray.empty()) {
139  dataArray.push_back(blob->data());
140  } else {
141  dataArray.push_back(dataArray.at(n * num_channels + i - 1) + nPixels);
142  }
143  }
144  }
145  for (size_t n = 0; n < num_images; n++) {
146  size_t n_num_channels = n * num_channels;
147  size_t n_num_channels_nPixels = n_num_channels * nPixels;
148  for (size_t i = 0; i < nPixels; i++) {
149  size_t i_num_channels = i * num_channels + n_num_channels_nPixels;
150  for (size_t j = 0; j < num_channels; j++) {
151  dataArray.at(n_num_channels + j)[i] = RGB8[i_num_channels + j];
152  }
153  }
154  }
155 }
156 
157 /**
158  * @deprecated InferenceEngine utility functions are not a part of public API
159  * @brief Splits the RGB channels to either I16 Blob or float blob.
160  * The image buffer is assumed to be packed with no support for strides.
161  * @param imgBufRGB8 Packed 24bit RGB image (3 bytes per pixel: R-G-B)
162  * @param lengthbytesSize Size in bytes of the RGB image. It is equal to amount of pixels times 3 (number of channels)
163  * @param input Blob to contain the split image (to 3 channels)
164  */
165 INFERENCE_ENGINE_DEPRECATED(
166  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
167 inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSize, Blob& input) {
168  IE_SUPPRESS_DEPRECATED_START
169  TBlob<float>* float_input = dynamic_cast<TBlob<float>*>(&input);
170  if (float_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, float_input);
171 
172  TBlob<short>* short_input = dynamic_cast<TBlob<short>*>(&input);
173  if (short_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, short_input);
174 
175  TBlob<uint8_t>* byte_input = dynamic_cast<TBlob<uint8_t>*>(&input);
176  if (byte_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, byte_input);
177  IE_SUPPRESS_DEPRECATED_END
178 }
179 
180 /**
181  * @deprecated InferenceEngine utility functions are not a part of public API
182  * @brief Copies data from a certain precision to float
183  * @param dst Pointer to an output float buffer, must be allocated before the call
184  * @param src Source blob to take data from
185  */
186 template <typename T>
187 INFERENCE_ENGINE_DEPRECATED(
188  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
189 void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
190  if (!dst) {
191  return;
192  }
193  const InferenceEngine::TBlob<T>* t_blob = dynamic_cast<const InferenceEngine::TBlob<T>*>(src);
194  if (t_blob == nullptr) {
195  THROW_IE_EXCEPTION << "input type is " << src->getTensorDesc().getPrecision() << " but input is not "
196  << typeid(T).name();
197  }
198 
199  const T* srcPtr = t_blob->readOnly();
200  if (srcPtr == nullptr) {
201  THROW_IE_EXCEPTION << "Input data was not allocated.";
202  }
203  for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
204 }
205 
206 } // namespace InferenceEngine
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:24
A header file that provides wrapper classes for IExecutableNetwork.
InferenceEngine utility functions are not a part of public API Will be removed in R2 void copyToFloat(float *dst, const InferenceEngine::Blob *src)
Copies data from a certain precision to float.
Definition: inference_engine.hpp:189
A header file that provides versioning information for the inference engine shared library...
A header file for a plugin logging mechanism.
This is a header file for the Network reader class (wrapper) used to build networks from a given IR...
Inference Engine API.
Definition: ie_argmax_layer.hpp:11
A header file for Blob and generic TBlob<>
InferenceEngine utility functions are not a part of public API Will be removed in R2 void TopResults(unsigned int n, TBlob< T > &input, std::vector< unsigned > &output)
Gets the top n results from a tblob.
Definition: inference_engine.hpp:46
A header for a class to handle plugin loading.
This is a header file for the ICNNNetwork class.
Represents real host memory allocated for a Tensor/Blob per C type.
Definition: ie_blob.h:350
virtual LockedMemory< const T > readOnly() const noexcept
Creates a new empty rvalue read-only LockedMemory object.
Definition: ie_blob.h:457
InferenceEngine utility functions are not a part of public API Will be removed in R2 void ConvertImageToInput(unsigned char *imgBufRGB8, size_t lengthbytesSize, Blob &input)
Splits the RGB channels to either I16 Blob or float blob. The image buffer is assumed to be packed wi...
Definition: inference_engine.hpp:167
size_t size() const noexcept override
Returns the total number of elements, which is a product of all the dimensions.
Definition: ie_blob.h:280
This is a header file for the Inference Engine Core class C++ API.
a header file for internal Layers structure to describe layers information
a header for advanced hardware related properties for clDNN plugin To use in SetConfig() method of pl...
This is a header file for the ICNNNetworkStats class.
InferenceEngine utility functions are not a part of public API Will be removed in R2 void copyFromRGB8(uint8_t *RGB8, size_t RGB8_size, InferenceEngine::TBlob< data_t > *blob)
Copies a 8-bit RGB image to the blob. Throws an exception in case of dimensions or input size mismatc...
Definition: inference_engine.hpp:121
The macro defines a symbol import/export mechanism essential for Microsoft Windows(R) OS...
This class represents a universal container in the Inference Engine.
Definition: ie_blob.h:35
std::vector< size_t > SizeVector
Represents tensor size. The order is opposite to the order in Caffe*: (w,h,n,b) where the most freque...
Definition: ie_common.h:27
This is a header file for the Inference Engine plugin C++ API.