inference_engine.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief A header file that provides a set of convenience utility functions and the main include file for all other .h
7  * files.
8  *
9  * @file inference_engine.hpp
10  */
11 #pragma once
12 
13 #include <cpp/ie_cnn_net_reader.h>
14 #include <ie_api.h>
15 #include <ie_blob.h>
16 #include <ie_layers.h>
17 
18 #include <algorithm>
20 #include <cpp/ie_plugin_cpp.hpp>
21 #include <ie_core.hpp>
22 #include <ie_error.hpp>
23 #include <ie_icnn_network.hpp>
25 #include <ie_plugin_config.hpp>
26 #include <ie_plugin_dispatcher.hpp>
27 #include <ie_version.hpp>
28 #include <memory>
29 #include <numeric>
30 #include <vector>
31 
32 /**
33  * @brief Inference Engine API
34  */
35 namespace InferenceEngine {
36 
37 /**
38  * @deprecated InferenceEngine utility functions are not a part of public API
39  * @brief Gets the top n results from a tblob
40  *
41  * @param n Top n count
42  * @param input 1D tblob that contains probabilities
43  * @param output Vector of indexes for the top n places
44  */
45 template <class T>
46 INFERENCE_ENGINE_DEPRECATED(
47  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
48 inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& output) {
49  SizeVector dims = input.getTensorDesc().getDims();
50  size_t input_rank = dims.size();
51  if (!input_rank || !dims[0]) THROW_IE_EXCEPTION << "Input blob has incorrect dimensions!";
52  size_t batchSize = dims[0];
53  std::vector<unsigned> indexes(input.size() / batchSize);
54 
55  n = static_cast<unsigned>(std::min<size_t>((size_t)n, input.size()));
56 
57  output.resize(n * batchSize);
58 
59  for (size_t i = 0; i < batchSize; i++) {
60  size_t offset = i * (input.size() / batchSize);
61  T* batchData = input.data();
62  batchData += offset;
63 
64  std::iota(std::begin(indexes), std::end(indexes), 0);
65  std::partial_sort(std::begin(indexes), std::begin(indexes) + n, std::end(indexes),
66  [&batchData](unsigned l, unsigned r) {
67  return batchData[l] > batchData[r];
68  });
69  for (unsigned j = 0; j < n; j++) {
70  output.at(i * n + j) = indexes.at(j);
71  }
72  }
73 }
74 
75 #define TBLOB_TOP_RESULT(precision) \
76  case InferenceEngine::Precision::precision: { \
77  using myBlobType = InferenceEngine::PrecisionTrait<Precision::precision>::value_type; \
78  TBlob<myBlobType>& tblob = dynamic_cast<TBlob<myBlobType>&>(input); \
79  TopResults(n, tblob, output); \
80  break; \
81  }
82 
83 /**
84  * @deprecated InferenceEngine utility functions are not a part of public API
85  * @brief Gets the top n results from a blob
86  *
87  * @param n Top n count
88  * @param input 1D blob that contains probabilities
89  * @param output Vector of indexes for the top n places
90  */
91 INFERENCE_ENGINE_DEPRECATED(
92  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
93 inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& output) {
94  IE_SUPPRESS_DEPRECATED_START
95  switch (input.getTensorDesc().getPrecision()) {
96  TBLOB_TOP_RESULT(FP32);
97  TBLOB_TOP_RESULT(FP16);
98  TBLOB_TOP_RESULT(Q78);
99  TBLOB_TOP_RESULT(I16);
100  TBLOB_TOP_RESULT(U8);
101  TBLOB_TOP_RESULT(I8);
102  TBLOB_TOP_RESULT(U16);
103  TBLOB_TOP_RESULT(I32);
104  default:
105  THROW_IE_EXCEPTION << "cannot locate blob for precision: " << input.getTensorDesc().getPrecision();
106  }
107  IE_SUPPRESS_DEPRECATED_END
108 }
109 
110 #undef TBLOB_TOP_RESULT
111 
112 /**
113  * @deprecated InferenceEngine utility functions are not a part of public API
114  * @brief Copies a 8-bit RGB image to the blob.
115  *
116  * Throws an exception in case of dimensions or input size mismatch
117  *
118  * @tparam data_t Type of the target blob
119  * @param RGB8 8-bit RGB image
120  * @param RGB8_size Size of the image
121  * @param blob Target blob to write image to
122  */
123 template <typename data_t>
124 INFERENCE_ENGINE_DEPRECATED(
125  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
126 void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t>* blob) {
127  SizeVector dims = blob->getTensorDesc().getDims();
128  if (4 != dims.size())
129  THROW_IE_EXCEPTION << "Cannot write data to input blob! Blob has incorrect dimensions size " << dims.size();
130  size_t num_channels = dims[1]; // because RGB
131  size_t num_images = dims[0];
132  size_t w = dims[3];
133  size_t h = dims[2];
134  size_t nPixels = w * h;
135 
136  if (RGB8_size != w * h * num_channels * num_images)
137  THROW_IE_EXCEPTION << "input pixels mismatch, expecting " << w * h * num_channels * num_images
138  << " bytes, got: " << RGB8_size;
139 
140  std::vector<data_t*> dataArray;
141  for (unsigned int n = 0; n < num_images; n++) {
142  for (unsigned int i = 0; i < num_channels; i++) {
143  if (!n && !i && dataArray.empty()) {
144  dataArray.push_back(blob->data());
145  } else {
146  dataArray.push_back(dataArray.at(n * num_channels + i - 1) + nPixels);
147  }
148  }
149  }
150  for (size_t n = 0; n < num_images; n++) {
151  size_t n_num_channels = n * num_channels;
152  size_t n_num_channels_nPixels = n_num_channels * nPixels;
153  for (size_t i = 0; i < nPixels; i++) {
154  size_t i_num_channels = i * num_channels + n_num_channels_nPixels;
155  for (size_t j = 0; j < num_channels; j++) {
156  dataArray.at(n_num_channels + j)[i] = RGB8[i_num_channels + j];
157  }
158  }
159  }
160 }
161 
162 /**
163  * @deprecated InferenceEngine utility functions are not a part of public API
164  * @brief Splits the RGB channels to either I16 Blob or float blob.
165  *
166  * The image buffer is assumed to be packed with no support for strides.
167  *
168  * @param imgBufRGB8 Packed 24bit RGB image (3 bytes per pixel: R-G-B)
169  * @param lengthbytesSize Size in bytes of the RGB image. It is equal to amount of pixels times 3 (number of channels)
170  * @param input Blob to contain the split image (to 3 channels)
171  */
172 INFERENCE_ENGINE_DEPRECATED(
173  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
174 inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSize, Blob& input) {
175  IE_SUPPRESS_DEPRECATED_START
176  TBlob<float>* float_input = dynamic_cast<TBlob<float>*>(&input);
177  if (float_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, float_input);
178 
179  TBlob<short>* short_input = dynamic_cast<TBlob<short>*>(&input);
180  if (short_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, short_input);
181 
182  TBlob<uint8_t>* byte_input = dynamic_cast<TBlob<uint8_t>*>(&input);
183  if (byte_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, byte_input);
184  IE_SUPPRESS_DEPRECATED_END
185 }
186 
187 /**
188  * @deprecated InferenceEngine utility functions are not a part of public API
189  * @brief Copies data from a certain precision to float
190  *
191  * @param dst Pointer to an output float buffer, must be allocated before the call
192  * @param src Source blob to take data from
193  */
194 template <typename T>
195 INFERENCE_ENGINE_DEPRECATED(
196  "InferenceEngine utility functions are not a part of public API. Will be removed in 2020 R2")
197 void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
198  if (!dst) {
199  return;
200  }
201  const InferenceEngine::TBlob<T>* t_blob = dynamic_cast<const InferenceEngine::TBlob<T>*>(src);
202  if (t_blob == nullptr) {
203  THROW_IE_EXCEPTION << "input type is " << src->getTensorDesc().getPrecision() << " but input is not "
204  << typeid(T).name();
205  }
206 
207  const T* srcPtr = t_blob->readOnly();
208  if (srcPtr == nullptr) {
209  THROW_IE_EXCEPTION << "Input data was not allocated.";
210  }
211  for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
212 }
213 
214 } // namespace InferenceEngine
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:25
A header file that provides wrapper classes for IExecutableNetwork.
InferenceEngine utility functions are not a part of public API Will be removed in R2 void copyToFloat(float *dst, const InferenceEngine::Blob *src)
Copies data from a certain precision to float.
Definition: inference_engine.hpp:197
A header file that provides versioning information for the inference engine shared library...
A header file for a plugin logging mechanism.
This is a header file for the Network reader class (wrapper) used to build networks from a given IR...
Inference Engine API.
Definition: ie_argmax_layer.hpp:15
A header file for Blob and generic TBlob<>
InferenceEngine utility functions are not a part of public API Will be removed in R2 void TopResults(unsigned int n, TBlob< T > &input, std::vector< unsigned > &output)
Gets the top n results from a tblob.
Definition: inference_engine.hpp:48
A header for a class to handle plugin loading.
This is a header file for the ICNNNetwork class.
Represents real host memory allocated for a Tensor/Blob per C type.
Definition: ie_blob.h:469
virtual LockedMemory< const T > readOnly() const noexcept
Creates a new empty rvalue read-only LockedMemory object.
Definition: ie_blob.h:588
InferenceEngine utility functions are not a part of public API Will be removed in R2 void ConvertImageToInput(unsigned char *imgBufRGB8, size_t lengthbytesSize, Blob &input)
Splits the RGB channels to either I16 Blob or float blob.
Definition: inference_engine.hpp:174
size_t size() const noexcept override
Returns the total number of elements, which is a product of all the dimensions.
Definition: ie_blob.h:312
This is a header file for the Inference Engine Core class C++ API.
a header file for internal Layers structure to describe layers information
a header for advanced hardware related properties for IE plugins
This is a header file for the ICNNNetworkStats class.
InferenceEngine utility functions are not a part of public API Will be removed in R2 void copyFromRGB8(uint8_t *RGB8, size_t RGB8_size, InferenceEngine::TBlob< data_t > *blob)
Copies a 8-bit RGB image to the blob.
Definition: inference_engine.hpp:126
The macro defines a symbol import/export mechanism essential for Microsoft Windows(R) OS...
This class represents a universal container in the Inference Engine.
Definition: ie_blob.h:37
std::vector< size_t > SizeVector
Represents tensor size.
Definition: ie_common.h:29
This is a header file for the Inference Engine plugin C++ API.