inference_engine.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief A header file that provides a set of convenience utility functions and the main include file for all other .h files.
7  * @file inference_engine.hpp
8  */
9 #pragma once
10 
11 #include <vector>
12 #include <numeric>
13 #include <algorithm>
14 #include <memory>
15 
16 #include <ie_blob.h>
17 #include <ie_api.h>
18 #include <ie_error.hpp>
19 #include <ie_layers.h>
20 #include <ie_device.hpp>
21 #include <ie_plugin_dispatcher.hpp>
22 #include <ie_plugin_config.hpp>
23 #include <ie_icnn_network.hpp>
24 #include <ie_icnn_network_stats.hpp>
25 #include <ie_core.hpp>
26 #include <cpp/ie_cnn_net_reader.h>
27 #include <cpp/ie_plugin_cpp.hpp>
29 #include <ie_version.hpp>
30 
31 namespace InferenceEngine {
32 /**
33  * @brief Gets the top n results from a tblob
34  * @param n Top n count
35  * @param input 1D tblob that contains probabilities
36  * @param output Vector of indexes for the top n places
37  */
38 template<class T>
39 inline void TopResults(unsigned int n, TBlob<T> &input, std::vector<unsigned> &output) {
40  SizeVector dims = input.getTensorDesc().getDims();
41  size_t input_rank = dims.size();
42  if (!input_rank || !dims[0])
43  THROW_IE_EXCEPTION << "Input blob has incorrect dimensions!";
44  size_t batchSize = dims[0];
45  std::vector<unsigned> indexes(input.size() / batchSize);
46 
47  n = static_cast<unsigned>(std::min<size_t>((size_t) n, input.size()));
48 
49  output.resize(n * batchSize);
50 
51  for (size_t i = 0; i < batchSize; i++) {
52  size_t offset = i * (input.size() / batchSize);
53  T *batchData = input.data();
54  batchData += offset;
55 
56  std::iota(std::begin(indexes), std::end(indexes), 0);
57  std::partial_sort(std::begin(indexes), std::begin(indexes) + n, std::end(indexes),
58  [&batchData](unsigned l, unsigned r) {
59  return batchData[l] > batchData[r];
60  });
61  for (unsigned j = 0; j < n; j++) {
62  output.at(i * n + j) = indexes.at(j);
63  }
64  }
65 }
66 
67 #define TBLOB_TOP_RESULT(precision)\
68  case InferenceEngine::Precision::precision : {\
69  using myBlobType = InferenceEngine::PrecisionTrait<Precision::precision>::value_type;\
70  TBlob<myBlobType> &tblob = dynamic_cast<TBlob<myBlobType> &>(input);\
71  TopResults(n, tblob, output);\
72  break;\
73  }
74 
75 /**
76  * @brief Gets the top n results from a blob
77  * @param n Top n count
78  * @param input 1D blob that contains probabilities
79  * @param output Vector of indexes for the top n places
80  */
81 inline void TopResults(unsigned int n, Blob &input, std::vector<unsigned> &output) {
82  switch (input.getTensorDesc().getPrecision()) {
83  TBLOB_TOP_RESULT(FP32);
84  TBLOB_TOP_RESULT(FP16);
85  TBLOB_TOP_RESULT(Q78);
86  TBLOB_TOP_RESULT(I16);
87  TBLOB_TOP_RESULT(U8);
88  TBLOB_TOP_RESULT(I8);
89  TBLOB_TOP_RESULT(U16);
90  TBLOB_TOP_RESULT(I32);
91  default:
92  THROW_IE_EXCEPTION << "cannot locate blob for precision: " << input.getTensorDesc().getPrecision();
93  }
94 }
95 
96 #undef TBLOB_TOP_RESULT
97 
98 /**
99  * @brief Copies a 8-bit RGB image to the blob.
100  * Throws an exception in case of dimensions or input size mismatch
101  * @tparam data_t Type of the target blob
102  * @param RGB8 8-bit RGB image
103  * @param RGB8_size Size of the image
104  * @param blob Target blob to write image to
105  */
106 template<typename data_t>
107 void copyFromRGB8(uint8_t *RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t> *blob) {
108  SizeVector dims = blob->getTensorDesc().getDims();
109  if (4 != dims.size())
110  THROW_IE_EXCEPTION << "Cannot write data to input blob! Blob has incorrect dimensions size "
111  << dims.size();
112  size_t num_channels = dims[1]; // because RGB
113  size_t num_images = dims[0];
114  size_t w = dims[3];
115  size_t h = dims[2];
116  size_t nPixels = w * h;
117 
118  if (RGB8_size != w * h * num_channels * num_images)
119  THROW_IE_EXCEPTION << "input pixels mismatch, expecting " << w * h * num_channels * num_images
120  << " bytes, got: " << RGB8_size;
121 
122  std::vector<data_t *> dataArray;
123  for (unsigned int n = 0; n < num_images; n++) {
124  for (unsigned int i = 0; i < num_channels; i++) {
125  if (!n && !i && dataArray.empty()) {
126  dataArray.push_back(blob->data());
127  } else {
128  dataArray.push_back(dataArray.at(n * num_channels + i - 1) + nPixels);
129  }
130  }
131  }
132  for (size_t n = 0; n < num_images; n++) {
133  size_t n_num_channels = n * num_channels;
134  size_t n_num_channels_nPixels = n_num_channels * nPixels;
135  for (size_t i = 0; i < nPixels; i++) {
136  size_t i_num_channels = i * num_channels + n_num_channels_nPixels;
137  for (size_t j = 0; j < num_channels; j++) {
138  dataArray.at(n_num_channels + j)[i] = RGB8[i_num_channels + j];
139  }
140  }
141  }
142 }
143 
144 /**
145  * @brief Splits the RGB channels to either I16 Blob or float blob.
146  * The image buffer is assumed to be packed with no support for strides.
147  * @param imgBufRGB8 Packed 24bit RGB image (3 bytes per pixel: R-G-B)
148  * @param lengthbytesSize Size in bytes of the RGB image. It is equal to amount of pixels times 3 (number of channels)
149  * @param input Blob to contain the split image (to 3 channels)
150  */
151 inline void ConvertImageToInput(unsigned char *imgBufRGB8, size_t lengthbytesSize, Blob &input) {
152  TBlob<float> *float_input = dynamic_cast<TBlob<float> *>(&input);
153  if (float_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, float_input);
154 
155  TBlob<short> *short_input = dynamic_cast<TBlob<short> *>(&input);
156  if (short_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, short_input);
157 
158  TBlob<uint8_t> *byte_input = dynamic_cast<TBlob<uint8_t> *>(&input);
159  if (byte_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, byte_input);
160 }
161 
162 /**
163  * @brief Copies data from a certain precision to float
164  * @param dst Pointer to an output float buffer, must be allocated before the call
165  * @param src Source blob to take data from
166  */
167 template<typename T>
168 void copyToFloat(float *dst, const InferenceEngine::Blob *src) {
169  if (!dst) {
170  return;
171  }
172  const InferenceEngine::TBlob<T> *t_blob = dynamic_cast<const InferenceEngine::TBlob<T> *>(src);
173  if (t_blob == nullptr) {
174  THROW_IE_EXCEPTION << "input type is " << src->getTensorDesc().getPrecision() << " but input is not " << typeid(T).name();
175  }
176 
177  const T *srcPtr = t_blob->readOnly();
178  if (srcPtr == nullptr) {
179  THROW_IE_EXCEPTION << "Input data was not allocated.";
180  }
181  for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
182 }
183 
184 } // namespace InferenceEngine
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:22
A header file that provides wrapper classes for IExecutableNetwork.
void copyToFloat(float *dst, const InferenceEngine::Blob *src)
Copies data from a certain precision to float.
Definition: inference_engine.hpp:168
A header file that provides versioning information for the inference engine shared library...
A header file for a plugin logging mechanism.
void TopResults(unsigned int n, TBlob< T > &input, std::vector< unsigned > &output)
Gets the top n results from a tblob.
Definition: inference_engine.hpp:39
std::vector< size_t > SizeVector
Represents tensor size. The order is opposite to the order in Caffe*: (w,h,n,b) where the most freque...
Definition: ie_common.h:26
This is a header file for the Network reader class (wrapper) used to build networks from a given IR...
Definition: ie_argmax_layer.hpp:11
virtual LockedMemory< T > data() noexcept
Creates an new empty rvalue LockedMemory object.
Definition: ie_blob.h:641
A header file for Blob and generic TBlob<>
A header for a class to handle plugin loading.
This is a header file for the ICNNNetwork class.
Represents real host memory allocated for a Tensor/Blob per C type.
Definition: ie_blob.h:485
virtual LockedMemory< const T > readOnly() const noexcept
Creates a new empty rvalue read-only LockedMemory object.
Definition: ie_blob.h:649
size_t size() const noexcept override
Returns the total number of elements, which is a product of all the dimensions.
Definition: ie_blob.h:413
This is a header file for the Inference Engine Core class C++ API.
SizeVector & getDims()
Returns the vector of dimensions.
Definition: ie_layouts.h:191
a header file for internal Layers structure to describe layers information
a header for advanced hardware related properties for clDNN plugin To use in SetConfig() method of pl...
const TensorDesc & getTensorDesc() const noexcept override
Returns the tensor description.
Definition: ie_blob.h:399
This is a header file for the ICNNNetworkStats class.
The macro defines a symbol import/export mechanism essential for Microsoft Windows(R) OS...
This class represents a universal container in the Inference Engine.
Definition: ie_blob.h:35
virtual const TensorDesc & getTensorDesc() const noexcept
Returns the tensor description.
Definition: ie_blob.h:247
This header file contains aspects of working on different devices like CPU, GEN, FPGA, etc.
void copyFromRGB8(uint8_t *RGB8, size_t RGB8_size, InferenceEngine::TBlob< data_t > *blob)
Copies a 8-bit RGB image to the blob. Throws an exception in case of dimensions or input size mismatc...
Definition: inference_engine.hpp:107
This is a header file for the Inference Engine plugin C++ API.
const Precision & getPrecision() const
Returns the memory precision.
Definition: ie_layouts.h:260
void ConvertImageToInput(unsigned char *imgBufRGB8, size_t lengthbytesSize, Blob &input)
Splits the RGB channels to either I16 Blob or float blob. The image buffer is assumed to be packed wi...
Definition: inference_engine.hpp:151