inference_engine.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief A header file that provides a set of convenience utility functions and the main include file for all other .h
7  * files.
8  *
9  * @file inference_engine.hpp
10  */
11 #pragma once
12 
13 #include <cpp/ie_cnn_net_reader.h>
14 #include <ie_api.h>
15 #include <ie_blob.h>
16 #include <ie_layers.h>
17 
18 #include <algorithm>
20 #include <cpp/ie_plugin_cpp.hpp>
21 #include <ie_core.hpp>
22 #include <ie_error.hpp>
23 #include <ie_icnn_network.hpp>
25 #include <ie_plugin_config.hpp>
26 #include <ie_plugin_dispatcher.hpp>
27 #include <ie_version.hpp>
28 #include <memory>
29 #include <numeric>
30 #include <vector>
31 
32 /**
33  * @brief Inference Engine API
34  */
35 namespace InferenceEngine {
36 
37 /**
38  * @deprecated InferenceEngine utility functions are not a part of public API.
39  * This method will be removed in 2020.4 release.
40  * @brief Gets the top n results from a tblob
41  *
42  * @param n Top n count
43  * @param input 1D tblob that contains probabilities
44  * @param output Vector of indexes for the top n places
45  */
46 template <class T>
47 INFERENCE_ENGINE_DEPRECATED(
48  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
49 inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& output) {
50  SizeVector dims = input.getTensorDesc().getDims();
51  size_t input_rank = dims.size();
52  if (!input_rank || !dims[0]) THROW_IE_EXCEPTION << "Input blob has incorrect dimensions!";
53  size_t batchSize = dims[0];
54  std::vector<unsigned> indexes(input.size() / batchSize);
55 
56  n = static_cast<unsigned>(std::min<size_t>((size_t)n, input.size()));
57 
58  output.resize(n * batchSize);
59 
60  for (size_t i = 0; i < batchSize; i++) {
61  size_t offset = i * (input.size() / batchSize);
62  T* batchData = input.data();
63  batchData += offset;
64 
65  std::iota(std::begin(indexes), std::end(indexes), 0);
66  std::partial_sort(std::begin(indexes), std::begin(indexes) + n, std::end(indexes),
67  [&batchData](unsigned l, unsigned r) {
68  return batchData[l] > batchData[r];
69  });
70  for (unsigned j = 0; j < n; j++) {
71  output.at(i * n + j) = indexes.at(j);
72  }
73  }
74 }
75 
76 #define TBLOB_TOP_RESULT(precision) \
77  case InferenceEngine::Precision::precision: { \
78  using myBlobType = InferenceEngine::PrecisionTrait<Precision::precision>::value_type; \
79  TBlob<myBlobType>& tblob = dynamic_cast<TBlob<myBlobType>&>(input); \
80  TopResults(n, tblob, output); \
81  break; \
82  }
83 
84 /**
85  * @deprecated InferenceEngine utility functions are not a part of public API.
86  * This method will be removed in 2020.4 release.
87  * @brief Gets the top n results from a blob
88  *
89  * @param n Top n count
90  * @param input 1D blob that contains probabilities
91  * @param output Vector of indexes for the top n places
92  */
93 INFERENCE_ENGINE_DEPRECATED(
94  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
95 inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& output) {
96  IE_SUPPRESS_DEPRECATED_START
97  switch (input.getTensorDesc().getPrecision()) {
98  TBLOB_TOP_RESULT(FP32);
99  TBLOB_TOP_RESULT(FP16);
100  TBLOB_TOP_RESULT(Q78);
101  TBLOB_TOP_RESULT(I16);
102  TBLOB_TOP_RESULT(U8);
103  TBLOB_TOP_RESULT(I8);
104  TBLOB_TOP_RESULT(U16);
105  TBLOB_TOP_RESULT(I32);
106  TBLOB_TOP_RESULT(U64);
107  TBLOB_TOP_RESULT(I64);
108  default:
109  THROW_IE_EXCEPTION << "cannot locate blob for precision: " << input.getTensorDesc().getPrecision();
110  }
111  IE_SUPPRESS_DEPRECATED_END
112 }
113 
114 #undef TBLOB_TOP_RESULT
115 
116 /**
117  * @deprecated InferenceEngine utility functions are not a part of public API.
118  * This method will be removed in 2020.4 release.
119  * @brief Copies a 8-bit RGB image to the blob.
120  *
121  * Throws an exception in case of dimensions or input size mismatch
122  *
123  * @tparam data_t Type of the target blob
124  * @param RGB8 8-bit RGB image
125  * @param RGB8_size Size of the image
126  * @param blob Target blob to write image to
127  */
128 template <typename data_t>
129 INFERENCE_ENGINE_DEPRECATED(
130  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
131 void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t>* blob) {
132  SizeVector dims = blob->getTensorDesc().getDims();
133  if (4 != dims.size())
134  THROW_IE_EXCEPTION << "Cannot write data to input blob! Blob has incorrect dimensions size " << dims.size();
135  size_t num_channels = dims[1]; // because RGB
136  size_t num_images = dims[0];
137  size_t w = dims[3];
138  size_t h = dims[2];
139  size_t nPixels = w * h;
140 
141  if (RGB8_size != w * h * num_channels * num_images)
142  THROW_IE_EXCEPTION << "input pixels mismatch, expecting " << w * h * num_channels * num_images
143  << " bytes, got: " << RGB8_size;
144 
145  std::vector<data_t*> dataArray;
146  for (unsigned int n = 0; n < num_images; n++) {
147  for (unsigned int i = 0; i < num_channels; i++) {
148  if (!n && !i && dataArray.empty()) {
149  dataArray.push_back(blob->data());
150  } else {
151  dataArray.push_back(dataArray.at(n * num_channels + i - 1) + nPixels);
152  }
153  }
154  }
155  for (size_t n = 0; n < num_images; n++) {
156  size_t n_num_channels = n * num_channels;
157  size_t n_num_channels_nPixels = n_num_channels * nPixels;
158  for (size_t i = 0; i < nPixels; i++) {
159  size_t i_num_channels = i * num_channels + n_num_channels_nPixels;
160  for (size_t j = 0; j < num_channels; j++) {
161  dataArray.at(n_num_channels + j)[i] = RGB8[i_num_channels + j];
162  }
163  }
164  }
165 }
166 
167 /**
168  * @deprecated InferenceEngine utility functions are not a part of public API.
169  * This method will be removed in 2020.4 release.
170  * @brief Splits the RGB channels to either I16 Blob or float blob.
171  *
172  * The image buffer is assumed to be packed with no support for strides.
173  *
174  * @param imgBufRGB8 Packed 24bit RGB image (3 bytes per pixel: R-G-B)
175  * @param lengthbytesSize Size in bytes of the RGB image. It is equal to amount of pixels times 3 (number of channels)
176  * @param input Blob to contain the split image (to 3 channels)
177  */
178 INFERENCE_ENGINE_DEPRECATED(
179  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
180 inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSize, Blob& input) {
181  IE_SUPPRESS_DEPRECATED_START
182  TBlob<float>* float_input = dynamic_cast<TBlob<float>*>(&input);
183  if (float_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, float_input);
184 
185  TBlob<short>* short_input = dynamic_cast<TBlob<short>*>(&input);
186  if (short_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, short_input);
187 
188  TBlob<uint8_t>* byte_input = dynamic_cast<TBlob<uint8_t>*>(&input);
189  if (byte_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, byte_input);
190  IE_SUPPRESS_DEPRECATED_END
191 }
192 
193 /**
194  * @deprecated InferenceEngine utility functions are not a part of public API.
195  * This method will be removed in 2020.4 release.
196  * @brief Copies data from a certain precision to float
197  *
198  * @param dst Pointer to an output float buffer, must be allocated before the call
199  * @param src Source blob to take data from
200  */
201 template <typename T>
202 INFERENCE_ENGINE_DEPRECATED(
203  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
204 void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
205  if (!dst) {
206  return;
207  }
208  const InferenceEngine::TBlob<T>* t_blob = dynamic_cast<const InferenceEngine::TBlob<T>*>(src);
209  if (t_blob == nullptr) {
210  THROW_IE_EXCEPTION << "input type is " << src->getTensorDesc().getPrecision() << " but input is not "
211  << typeid(T).name();
212  }
213 
214  const T* srcPtr = t_blob->readOnly();
215  if (srcPtr == nullptr) {
216  THROW_IE_EXCEPTION << "Input data was not allocated.";
217  }
218  for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
219 }
220 
221 } // namespace InferenceEngine
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:25
A header file that provides wrapper classes for IExecutableNetwork.
void TopResults(unsigned int n, TBlob< T > &input, std::vector< unsigned > &output)
Gets the top n results from a tblob.
Definition: inference_engine.hpp:49
A header file that provides versioning information for the inference engine shared library...
A header file for a plugin logging mechanism.
std::vector< int > offset
A vector of offsets for each dimension.
Definition: ie_layers.h:1016
This is a header file for the Network reader class (wrapper) used to build networks from a given IR...
Inference Engine API.
Definition: ie_argmax_layer.hpp:15
void ConvertImageToInput(unsigned char *imgBufRGB8, size_t lengthbytesSize, Blob &input)
Splits the RGB channels to either I16 Blob or float blob.
Definition: inference_engine.hpp:180
std::string name
Layer name.
Definition: ie_layers.h:42
void copyToFloat(float *dst, const InferenceEngine::Blob *src)
Copies data from a certain precision to float.
Definition: inference_engine.hpp:204
A header file for Blob and generic TBlob<>
void copyFromRGB8(uint8_t *RGB8, size_t RGB8_size, InferenceEngine::TBlob< data_t > *blob)
Copies a 8-bit RGB image to the blob.
Definition: inference_engine.hpp:131
A header for a class to handle plugin loading.
This is a header file for the ICNNNetwork class.
Represents real host memory allocated for a Tensor/Blob per C type.
Definition: ie_blob.h:470
virtual LockedMemory< const T > readOnly() const noexcept
Creates a new empty rvalue read-only LockedMemory object.
Definition: ie_blob.h:587
size_t size() const noexcept override
Returns the total number of elements, which is a product of all the dimensions.
Definition: ie_blob.h:312
This is a header file for the Inference Engine Core class C++ API.
a header file for internal Layers structure to describe layers information
a header for advanced hardware related properties for IE plugins
This is a header file for the ICNNNetworkStats class.
The macro defines a symbol import/export mechanism essential for Microsoft Windows(R) OS...
This class represents a universal container in the Inference Engine.
Definition: ie_blob.h:37
std::vector< size_t > SizeVector
Represents tensor size.
Definition: ie_common.h:29
This is a header file for the Inference Engine plugin C++ API.