ie_layers.h
Go to the documentation of this file.
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief a header file for internal Layers structure to describe layers information
7  * @file ie_layers.h
8  */
9 #pragma once
10 
11 #include <memory>
12 #include <string>
13 #include <vector>
14 #include <algorithm>
15 #include <map>
16 #include <iterator>
17 #include <cctype>
18 #include "ie_common.h"
19 #include "ie_data.h"
20 #include "ie_blob.h"
21 #include "ie_device.hpp"
22 #include "ie_layers_property.hpp"
23 
24 namespace InferenceEngine {
25 /**
26  * @brief This is an internal common Layer parameter parsing arguments
27  */
28 struct LayerParams {
29  /// @brief Layer name
30  std::string name;
31  /// @brief Layer type
32  std::string type;
33  /// @brief Layer precision
35 };
36 
37 /**
38  * @brief This is a base abstraction Layer - all DNN Layers inherit from this class
39  */
40 class CNNLayer {
41 public:
42  /**
43  * @brief A shared pointer to CNNLayer
44  */
45  using Ptr = std::shared_ptr<CNNLayer>;
46 
47  /**
48  * @brief Layer name
49  */
50  std::string name;
51  /**
52  * @brief Layer type
53  */
54  std::string type;
55  /**
56  * @brief Layer base operating precision
57  */
59  /**
60  * @brief A vector of pointers to the output data elements of this layer in the di-graph (order matters)
61  */
62  std::vector<DataPtr> outData;
63  /**
64  * @brief A vector of weak pointers to the input data elements of this layer in the di-graph (order matters)
65  */
66  std::vector<DataWeakPtr> insData;
67  /**
68  * @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
69  */
71  /**
72  * @brief Convenience user values to store in this object as extra data
73  */
75  /**
76  * @brief Layer affinity set by user.
77  */
78  std::string affinity;
79 
80  /**
81  * @brief A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values.
82  * @param prms Basic common parsing parameters
83  */
84  explicit CNNLayer(const LayerParams &prms) : name(prms.name), type(prms.type),
85  precision(prms.precision), userValue({0}) {
86  }
87 
88  /**
89  * @brief A virtual destructor
90  */
91  virtual ~CNNLayer() = default;
92 
93  /**
94  * @brief Sets a layer to be fused with
95  * @param layer Reference to the layer to be fused with
96  */
97  void fuse(Ptr &layer) {
98  _fusedWith = layer;
99  }
100 
101  /**
102  * @brief Returns the first element of the input data for this layer
103  * @return A smart pointer to the input data element
104  */
105  virtual const DataPtr input() const {
106  if (insData.empty()) {
107  THROW_IE_EXCEPTION << "Internal error: input data is empty";
108  }
109  auto lockedFirstInsData = insData[0].lock();
110  if (!lockedFirstInsData) {
111  THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
112  }
113  return lockedFirstInsData;
114  }
115 
116  /**
117  * @brief Checks if the input data and layer data are legitimate
118  */
119  INFERENCE_ENGINE_API_CPP(void) validateLayer();
120 
121  /**
122  * @brief Gets float value for the given parameter
123  * @param param - name of the parameter to find
124  * @param def - default value of the parameter if not found
125  * @return float value
126  */
127  float GetParamAsFloat(const char* param, float def) const {
128  std::string val = GetParamAsString(param, std::to_string(def).c_str());
129  try {
130  return std::stof(val);
131  } catch (...) {
132  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name
133  << ". Value " << val << " cannot be casted to float.";
134  }
135  }
136 
137  /**
138  * @brief Returns a float value for the given layer parameter
139  * @param param Name of the layer parameter
140  * @return A float value for the specified parameter
141  */
142  float GetParamAsFloat(const char *param) const {
143  std::string val = GetParamAsString(param);
144  try {
145  return std::stof(val);
146  } catch (...) {
147  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name
148  << ". Value " << val << " cannot be casted to float.";
149  }
150  }
151 
152  /**
153  * @brief Returns a vector of float values for the given parameter or returns the default value
154  * @param param Name of the layer parameter
155  * @param def Default value of the parameter if not found
156  * @return vector of float values
157  */
158  std::vector<float> GetParamAsFloats(const char *param, std::vector<float> def) const {
159  std::string vals = GetParamAsString(param, "");
160  std::vector<float> result;
161  std::istringstream stream(vals);
162  std::string str;
163  if (vals.empty())
164  return def;
165  while (getline(stream, str, ',')) {
166  try {
167  result.push_back(std::stof(str));
168  } catch (...) {
169  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
170  << ". Value " << vals << " cannot be casted to floats.";
171  }
172  }
173  return result;
174  }
175 
176  /**
177  * @brief Returns a vector of float values for the given parameter
178  * @param param Name of the layer parameter
179  * @return vector of float values
180  */
181  std::vector<float> GetParamAsFloats(const char *param) const {
182  std::string vals = GetParamAsString(param);
183  std::vector<float> result;
184  std::istringstream stream(vals);
185  std::string str;
186  while (getline(stream, str, ',')) {
187  try {
188  result.push_back(std::stof(str));
189  } catch (...) {
190  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
191  << ". Value " << vals << " cannot be casted to floats.";
192  }
193  }
194  return result;
195  }
196 
197  /**
198  * @brief Returns an integer value for the given parameter or returns the default value
199  * @param param Name of the layer parameter
200  * @param def Default value of the parameter if not found
201  * @return An int value for the specified parameter
202  */
203  int GetParamAsInt(const char *param, int def) const {
204  std::string val = GetParamAsString(param, std::to_string(def).c_str());
205  try {
206  return std::stoi(val);
207  } catch (...) {
208  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name
209  << ". Value " << val << " cannot be casted to int.";
210  }
211  }
212 
213  /**
214  * @brief Returns an integer value for the given parameter
215  * @param param Name of the layer parameter
216  * @return An int value for the specified parameter
217  */
218  int GetParamAsInt(const char *param) const {
219  std::string val = GetParamAsString(param);
220  try {
221  return std::stoi(val);
222  } catch (...) {
223  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name
224  << ". Value " << val << " cannot be casted to int.";
225  }
226  }
227 
228 
229  /**
230  * @brief Returns a vector of int values for the given parameter or returns the default value
231  * @param param Name of the layer parameter
232  * @param def Default value of the parameter if not found
233  * @return vector of int values
234  */
235  std::vector<int> GetParamAsInts(const char *param, std::vector<int> def) const {
236  std::string vals = GetParamAsString(param, "");
237  std::vector<int> result;
238  std::istringstream stream(vals);
239  std::string str;
240  if (vals.empty())
241  return def;
242  while (getline(stream, str, ',')) {
243  try {
244  result.push_back(std::stoi(str));
245  } catch (...) {
246  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
247  << ". Value " << vals << " cannot be casted to int.";
248  }
249  }
250  return result;
251  }
252 
253  /**
254  * @brief Returns a vector of int values for the given parameter
255  * @param param Name of the layer parameter
256  * @return vector of int values
257  */
258  std::vector<int> GetParamAsInts(const char *param) const {
259  std::string vals = GetParamAsString(param);
260  std::vector<int> result;
261  std::istringstream stream(vals);
262  std::string str;
263  while (getline(stream, str, ',')) {
264  try {
265  result.push_back(std::stoi(str));
266  } catch (...) {
267  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
268  << ". Value " << vals << " cannot be casted to int.";
269  }
270  }
271  return result;
272  }
273  /**
274  * @brief Returns an unsigned integer value for the given parameter or returns the default value
275  * @param param Name of the layer parameter
276  * @param def Default value of the parameter if not found
277  * @return An unsigned integer value for the specified parameter
278  */
279  unsigned int GetParamAsUInt(const char *param, unsigned int def) const {
280  std::string val = GetParamAsString(param, std::to_string(def).c_str());
281  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name
282  + ". Value " + val + " cannot be casted to int.";
283  try {
284  int value = std::stoi(val);
285  if (value < 0) {
286  THROW_IE_EXCEPTION << message;
287  }
288  return static_cast<unsigned int>(value);
289  } catch (...) {
290  THROW_IE_EXCEPTION << message;
291  }
292  }
293 
294  /**
295  * @brief Returns an unsigned integer value for the given parameter
296  * @param param Name of the layer parameter
297  * @return An unsigned integer value for the specified parameter
298  */
299  unsigned int GetParamAsUInt(const char *param) const {
300  std::string val = GetParamAsString(param);
301  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name
302  + ". Value " + val + " cannot be casted to int.";
303  try {
304  int value = std::stoi(val);
305  if (value < 0) {
306  THROW_IE_EXCEPTION << message;
307  }
308  return static_cast<unsigned int>(value);
309  } catch (...) {
310  THROW_IE_EXCEPTION << message;
311  }
312  }
313 
314 
315  /**
316  * @brief Returns a vector of unsigned int values for the given parameter or returns the default value
317  * @param param Name of the layer parameter
318  * @param def Default value of the parameter if not found
319  * @return vector of unsigned int values
320  */
321  std::vector<unsigned int> GetParamAsUInts(const char *param, std::vector<unsigned int> def) const {
322  std::string vals = GetParamAsString(param, "");
323  std::vector<unsigned int> result;
324  std::istringstream stream(vals);
325  std::string str;
326  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " + name
327  + ". Value " + vals + " cannot be casted to int.";
328  if (vals.empty())
329  return def;
330  while (getline(stream, str, ',')) {
331  try {
332  int value = std::stoi(str);
333  if (value < 0) {
334  THROW_IE_EXCEPTION << message;
335  }
336  result.push_back(static_cast<unsigned int>(value));
337  } catch (...) {
338  THROW_IE_EXCEPTION << message;
339  }
340  }
341  return result;
342  }
343 
344  /**
345  * @brief Returns a vector of unsigned int values for the given parameter
346  * @param param Name of the layer parameter
347  * @return vector of unsigned int values
348  */
349  std::vector<unsigned int> GetParamAsUInts(const char *param) const {
350  std::string vals = GetParamAsString(param);
351  std::vector<unsigned int> result;
352  std::istringstream stream(vals);
353  std::string str;
354  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " + name
355  + ". Value " + vals + " cannot be casted to int.";
356  while (getline(stream, str, ',')) {
357  try {
358  int value = std::stoi(str);
359  if (value < 0) {
360  THROW_IE_EXCEPTION << message;
361  }
362  result.push_back(static_cast<unsigned int>(value));
363  } catch (...) {
364  THROW_IE_EXCEPTION << message;
365  }
366  }
367  return result;
368  }
369  /**
370  * @brief Returns an boolean value for the given parameter.
371  * The valid values are (true, false, 1, 0).
372  * @param param Name of the layer parameter
373  * @param def Default value of the parameter if not found
374  * @return An bool value for the specified parameter
375  */
376  bool GetParamAsBool(const char *param, bool def) const {
377  std::string val = GetParamAsString(param, std::to_string(def).c_str());
378  std::string loweredCaseValue;
379  std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
380  return std::tolower(value);
381  });
382 
383  bool result = false;
384 
385  if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
386  // attempting parse using non alpha bool
387  return (GetParamAsInt(param, def) != 0);
388  }
389 
390  return result;
391  }
392  /**
393  * @deprecated Use GetParamAsBool function for that functionality
394  */
395  bool GetParamsAsBool(const char *param, bool def) const {
396  return GetParamAsBool(param, def);
397  }
398 
399  /**
400  * @brief Returns a string value for the given parameter or returns the default one
401  * @param param Name of the layer parameter
402  * @param def Default value of the parameter if not found
403  * @return A string value
404  */
405  std::string GetParamAsString(const char *param, const char *def) const {
406  auto it = params.find(param);
407  if (it == params.end() || it->second.empty()) {
408  return def;
409  }
410  return (*it).second;
411  }
412 
413  /**
414  * @brief Checks the param presence in the layer
415  * @param param Name of the layer parameter
416  * @return a bool depending param presence
417  */
418  bool CheckParamPresence(const char *param) const {
419  auto it = params.find(param);
420  if (it == params.end()) {
421  return false;
422  }
423  return true;
424  }
425 
426  /**
427  * @brief Returns a string value for the given parameter.
428  * Throws exception if parameter was not found.
429  * @param param Name of the layer parameter
430  * @return A string value
431  */
432  std::string GetParamAsString(const char *param) const {
433  auto it = params.find(param);
434  if (it == params.end()) {
435  THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
436  }
437  return (*it).second;
438  }
439 
440  std::vector<std::string> GetParamAsStrings(const char *param, std::vector<std::string> def) const {
441  std::string vals = GetParamAsString(param, "");
442  std::vector<std::string> result;
443  std::istringstream stream(vals);
444  std::string str;
445  if (vals.empty())
446  return def;
447  while (getline(stream, str, ',')) {
448  try {
449  result.push_back(str);
450  } catch (...) {
451  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
452  }
453  }
454  return result;
455  }
456 
457  /**
458  * @brief Map of pairs: (parameter name, parameter value)
459  */
460  std::map<std::string, std::string> params;
461 
462  /**
463  * @brief Map of pairs: (name, weights/biases blob)
464  */
465  std::map<std::string, Blob::Ptr> blobs;
466 };
467 
468 /**
469  * @brief Alias for CNNLayer object
470  */
471 using GenericLayer = class CNNLayer;
472 
473 /**
474  * @brief This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
475  */
476 class WeightableLayer : public CNNLayer {
477 public:
478  /**
479  * @brief A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the given values
480  * @param prms Initial layer parameters
481  */
482  explicit WeightableLayer(const LayerParams &prms) : CNNLayer(prms) {}
483 
484  /**
485  * @brief A pointer to a weights blob
486  */
488  /**
489  * @brief A pointer to a biases blob
490  */
492 
493  /**
494  * @brief Constructs a WeightableLayer instance and initiates layer parameters with the given values
495  */
496  using CNNLayer::CNNLayer;
497 };
498 
499 /**
500  * @brief convinenent way to declare property with backward compatibility to 2D members
501  */
502 #define DEFINE_PROP(prop_name) \
503 PropertyVector<unsigned int> prop_name;\
504 unsigned int &prop_name##_x = prop_name.at(X_AXIS);\
505 unsigned int &prop_name##_y = prop_name.at(Y_AXIS);\
506 
507 /**
508  * @brief This class represents a standard 3D Convolution Layer
509  */
511 public:
512  /**
513  * @brief A convolution kernel array [X, Y, Z, ...]
514  */
515  DEFINE_PROP(_kernel);
516  /**
517  * @brief A convolution paddings begin array [X, Y, Z, ...]
518  */
519  DEFINE_PROP(_padding);
520  /**
521  * @brief A convolution paddings end array [X, Y, Z, ...]
522  */
524  /**
525  * @brief A convolution strides array [X, Y, Z, ...]
526  */
527  DEFINE_PROP(_stride);
528  /**
529  * @brief A convolution dilations array [X, Y, Z, ...]
530  */
531  DEFINE_PROP(_dilation);
532  /**
533  * @brief A number of output feature maps (size) generating the 3'rd output dimension
534  */
535  unsigned int _out_depth = 0u;
536  /**
537  * @brief Number of groups
538  */
539  unsigned int _group = 1u;
540  /**
541  * @brief Auto padding type
542  */
543  std::string _auto_pad;
544 
545  /**
546  * @brief Creates a new ConvolutionLayer instance.
547  */
549  _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
550  /**
551  * @brief assignment operator
552  */
553  ConvolutionLayer & operator = (const ConvolutionLayer & that) {
554  if (&that != this) {
555  WeightableLayer::operator=(that);
556  _kernel = that._kernel;
557  _padding = that._padding;
558  _pads_end = that._pads_end;
559  _stride = that._stride;
560  _dilation = that._dilation;
561  _out_depth = that._out_depth;
562  _group = that._group;
563  }
564  return *this;
565  }
566  /**
567  * @brief move assignment operator
568  */
569  ConvolutionLayer& operator = (ConvolutionLayer &&) = default;
570  /**
571  * @brief copy constructor
572  */
574  operator = (that);
575  }
576  /**
577  * @brief move constructor
578  */
579  ConvolutionLayer(ConvolutionLayer &&) = default;
580 };
581 
582 /**
583  * @brief This class represents a standard deconvolution layer
584  */
586  public:
588  using ConvolutionLayer::operator=;
589 };
590 
591 /**
592  * @brief This class represents a standard pooling layer
593  */
594 class PoolingLayer : public CNNLayer {
595 public:
596  /**
597  * @brief Pooling kernel array [X, Y, Z, ...]
598  */
599  DEFINE_PROP(_kernel);
600  /**
601  * @brief Pooling paddings begin array [X, Y, Z, ...]
602  */
603  DEFINE_PROP(_padding);
604  /**
605  * @brief Pooling paddings end array [X, Y, Z, ...]
606  */
608  /**
609  * @brief Pooling strides array [X, Y, Z, ...]
610  */
611  DEFINE_PROP(_stride);
612 
613  /**
614  * @enum PoolType
615  * @brief Defines available pooling types
616  */
617  enum PoolType {
618  MAX = 1,
619  AVG = 2,
620  STOCH = 3,
621  ROI = 4,
622  SPACIAL_PYRAMID = 5
623  };
624 
625  /**
626  * @brief A pooling type
627  */
628  PoolType _type = MAX;
629 
630  /**
631  * @brief A flag that indicates if padding is excluded or not
632  */
633  bool _exclude_pad = false;
634  /**
635  * @brief Auto padding type
636  */
637  std::string _auto_pad;
638 
639  /**
640  * @brief Creates a new PoolingLayer instance.
641  */
642  explicit PoolingLayer(const LayerParams &p) : CNNLayer(p),
643  _kernel(2, 0u), _padding(2, 0u), _stride(2, 0u) {}
644 
645  /**
646  * @brief assignment operator
647  */
648  PoolingLayer & operator = (const PoolingLayer & that) {
649  if (&that != this) {
650  CNNLayer::operator=(that);
651  _kernel = that._kernel;
652  _padding = that._padding;
653  _pads_end = that._pads_end;
654  _stride = that._stride;
655  _type = that._type;
656  _exclude_pad = that._exclude_pad;
657  }
658  return *this;
659  }
660  /**
661  * @brief move assignment operator
662  */
663  PoolingLayer& operator = (PoolingLayer &&) = default;
664 
665  /**
666  * @brief copy constructor
667  */
668  PoolingLayer(const PoolingLayer & that) : CNNLayer(that) {
669  operator=(that);
670  }
671 
672  /**
673  * @brief move constructor
674  */
675  PoolingLayer(PoolingLayer &&) = default;
676 };
677 
678 /**
679  * @brief This class represents a standard binary convolution layer
680  */
682 public:
683  /**
684  * @enum eBinaryConvolutionMode
685  * @brief Defines possible modes of binary convolution operation
686  */
688  xnor_popcount = 0
689  };
690 
691  /**
692  * @brief Mode of binary convolution operation
693  */
694  eBinaryConvolutionMode _mode = xnor_popcount;
695 
696  /**
697  * @brief A number of input feature maps (size) generating the 3'rd input dimension
698  */
699  unsigned int _in_depth = 0u;
700 
701  /**
702  * @brief A pad value which is used to fill pad area
703  */
704  float _pad_value = 0.0f;
705 
706  /**
707  * @brief A convolution kernel array [X, Y, Z, ...]
708  */
709  DEFINE_PROP(_kernel);
710  /**
711  * @brief A convolution paddings begin array [X, Y, Z, ...]
712  */
713  DEFINE_PROP(_padding);
714  /**
715  * @brief A convolution paddings end array [X, Y, Z, ...]
716  */
718  /**
719  * @brief A convolution strides array [X, Y, Z, ...]
720  */
721  DEFINE_PROP(_stride);
722  /**
723  * @brief A convolution dilations array [X, Y, Z, ...]
724  */
725  DEFINE_PROP(_dilation);
726  /**
727  * @brief A number of output feature maps (size) generating the 3'rd output dimension
728  */
729  unsigned int _out_depth = 0u;
730  /**
731  * @brief Number of groups
732  */
733  unsigned int _group = 1u;
734  /**
735  * @brief Auto padding type
736  */
737  std::string _auto_pad;
738 
739  /**
740  * @brief Creates a new BinaryConvolutionLayer instance.
741  */
743  _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
744  /**
745  * @brief assignment operator
746  */
747  BinaryConvolutionLayer & operator = (const BinaryConvolutionLayer & that) {
748  if (&that != this) {
749  WeightableLayer::operator=(that);
750  _kernel = that._kernel;
751  _padding = that._padding;
752  _pads_end = that._pads_end;
753  _stride = that._stride;
754  _dilation = that._dilation;
755  _out_depth = that._out_depth;
756  _group = that._group;
757  _mode = that._mode;
758  _in_depth = that._in_depth;
759  _pad_value = that._pad_value;
760  }
761  return *this;
762  }
763  /**
764  * @brief move assignment operator
765  */
766  BinaryConvolutionLayer& operator = (BinaryConvolutionLayer &&) = default;
767  /**
768  * @brief copy constructor
769  */
771  operator = (that);
772  }
773  /**
774  * @brief move constructor
775  */
777 };
778 
779 #undef DEFINE_PROP
780 
781 /**
782  * @brief This class represents a fully connected layer
783  */
785 public:
786  /**
787  * @brief A size of output
788  */
789  unsigned int _out_num = 0;
790 
791  /**
792  * @brief Creates a new FullyConnectedLayer instance and initializes layer parameters with the given values.
793  */
795 };
796 
797 /**
798  * @brief This class represents concatenation layer
799  * Takes as input several data elements and merges them to one using the supplied axis
800  */
801 class ConcatLayer : public CNNLayer {
802 public:
803  /**
804  * @brief An axis on which concatenation operation is performed
805  */
806  unsigned int _axis = 1;
807 
808  /**
809  * @brief Creates a new ConcatLayer instance and initializes layer parameters with the given values.
810  * If batch is used, then batch needs to be specified as an input dimension also
811  * In current implementation 1 means channels, 0 - batch
812  */
813  using CNNLayer::CNNLayer;
814 };
815 
816 /**
817  * @brief This class represents a layer that evenly splits the input into the supplied outputs
818  */
819 class SplitLayer : public CNNLayer {
820 public:
821  /**
822  * @brief An axis on which split operation is performed
823  */
824  unsigned int _axis = 1;
825 
826  /**
827  * @brief Creates a new SplitLayer instance.
828  */
829  using CNNLayer::CNNLayer;
830 };
831 
832 /**
833  * @brief This class represents a Linear Response Normalization (LRN) Layer
834  */
835 class NormLayer : public CNNLayer {
836 public:
837  /**
838  * @brief Response size
839  */
840  unsigned int _size = 0;
841  /**
842  * @deprecated
843  */
844  unsigned int _k = 1;
845  /**
846  * @brief Alpha coefficient
847  */
848  float _alpha = 0;
849  /**
850  * @brief Beta coefficient
851  */
852  float _beta = 0;
853  /**
854  * @brief Flag to specify normalization across feature maps (true) or across channels
855  */
856  bool _isAcrossMaps = false;
857 
858  /**
859  * @brief Creates a new NormLayer instance.
860  */
861  using CNNLayer::CNNLayer;
862 };
863 
864 /**
865  * @brief This class represents standard softmax Layer
866  */
867 class SoftMaxLayer : public CNNLayer {
868 public:
869  /**
870  * @brief Axis number for a softmax operation
871  */
872  int axis = 1;
873  /**
874  * @brief Creates a new SoftMaxLayer instance.
875  */
876  using CNNLayer::CNNLayer;
877 };
878 
879 /**
880  * @class GRNLayer
881  * @brief This class represents standard GRN Layer
882  */
883 class GRNLayer : public CNNLayer {
884 public:
885  /**
886  * @brief A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the given values.
887  * @param prms Initial layer parameters
888  */
889  explicit GRNLayer(const LayerParams &prms) : CNNLayer(prms), bias(0.f) {}
890 
891  /**
892  * @brief Bias for squares sum
893  */
894  float bias = 0.f;
895 };
896 
897 /**
898  * @class MVNLayer
899  * @brief This class represents standard MVN Layer
900  */
901 class MVNLayer : public CNNLayer {
902 public:
903  /**
904  * @brief A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the given values.
905  * @param prms Initial layer parameters
906  */
907  explicit MVNLayer(const LayerParams &prms) : CNNLayer(prms), across_channels(0), normalize(1) {}
908 
909  /**
910  * @brief Indicate that mean value is calculated across channels
911  */
913 
914  /**
915  * @brief Indicate that the result needs to be normalized
916  */
917  int normalize = 1;
918 };
919 
920 /**
921  * @brief This class represents a Rectified Linear activation layer
922  */
923 class ReLULayer : public CNNLayer {
924 public:
925  /**
926  * @brief Negative slope is used to takle negative inputs instead of setting them to 0
927  */
928  float negative_slope = 0.0f;
929 
930  /**
931  * @brief Creates a new ReLULayer instance.
932  */
933  using CNNLayer::CNNLayer;
934 };
935 
936 /**
937  * @brief This class represents a Clamp activation layer
938  * Clamps all tensor elements into the range [min_value, max_value]
939  */
940 class ClampLayer : public CNNLayer {
941 public:
942  /**
943  * @brief A minimum value
944  */
945  float min_value = 0.0f;
946 
947  /**
948  * @brief A maximum value
949  */
950  float max_value = 1.0f;
951  /**
952  * @brief Creates a new ClampLayer instance.
953  */
954  using CNNLayer::CNNLayer;
955 };
956 
957 
958 /**
959  * @brief This class represents a ReLU6 activation layer
960  * Clamps all tensor elements into the range [0, 6.0]
961  */
962 class ReLU6Layer : public ClampLayer {
963 public:
964  explicit ReLU6Layer(const LayerParams &prms) : ClampLayer(prms) {
965  max_value = 6.0f;
966  }
967 
968  using ClampLayer::ClampLayer;
969 };
970 
971 
972 /**
973  * @brief This class represents an element wise operation layer
974  */
975 class EltwiseLayer : public CNNLayer {
976 public:
977  /**
978  * @enum eOperation
979  * @brief Defines possible operations that can be used
980  */
981  enum eOperation {
982  Sum = 0, Prod, Max, Sub, Min, Div, Squared_diff, Floor_mod, Pow,
983  Equal, Not_equal, Less, Less_equal, Greater, Greater_equal,
984  Logical_AND, Logical_OR, Logical_XOR
985  };
986 
987  /**
988  * @brief A type of the operation to use
989  */
990  eOperation _operation = Sum;
991 
992  /**
993  * @brief A vector of coefficients to scale the operands
994  */
995  std::vector<float> coeff;
996 
997  /**
998  * @brief Creates a new EltwiseLayer instance.
999  */
1000  using CNNLayer::CNNLayer;
1001 };
1002 
1003 /**
1004  * @brief This class represents a standard crop layer
1005  */
1006 class CropLayer : public CNNLayer {
1007 public:
1008  /**
1009  * @brief A vector of dimensions for cropping
1010  */
1011  std::vector<int> axis;
1012  /**
1013  * @brief A vector of dimensions to be preserved
1014  */
1015  std::vector<int> dim;
1016  /**
1017  * @brief A vector of offsets for each dimension
1018  */
1019  std::vector<int> offset;
1020 
1021  /**
1022  * @brief Creates a new CropLayer instance.
1023  */
1024  using CNNLayer::CNNLayer;
1025 };
1026 
1027 /**
1028  * @brief This class represents a standard reshape layer
1029  */
1030 class ReshapeLayer : public CNNLayer {
1031 public:
1032  /**
1033  * @brief A vector of sizes of the shape
1034  */
1035  std::vector<int> shape;
1036  /**
1037  * @brief A number of axis to be taken for a reshape
1038  */
1039  int axis = 0;
1040  /**
1041  * @brief A number of first axises to be taken for a reshape
1042  */
1043  int num_axes = -1;
1044 
1045  /**
1046  * @brief Creates a new ReshapeLayer instance.
1047  */
1048  using CNNLayer::CNNLayer;
1049 };
1050 
1051 /**
1052  * @brief This class represents a standard Tile Layer
1053  */
1054 class TileLayer : public CNNLayer {
1055 public:
1056  /**
1057  * @brief An index of the axis to tile
1058  */
1059  int axis = -1;
1060  /**
1061  * @brief A number of copies to be made
1062  */
1063  int tiles = -1;
1064 
1065  /**
1066  * @brief Creates a new TileLayer instance.
1067  */
1068  using CNNLayer::CNNLayer;
1069 };
1070 
1071 
1072 /**
1073  * @brief This class represents a Layer which performs Scale and Shift
1074  */
1076 public:
1077  /**
1078  * @brief A flag that indicates if the same value is used for all the features. If false, the value is used pixel wise
1079  */
1080  unsigned int _broadcast = 0;
1081 
1082  /**
1083  * @brief Creates a new ScaleShiftLayer instance.
1084  */
1086 };
1087 
1088 /**
1089  * @brief This class represents TensorIterator layer
1090  */
1091 class TensorIterator : public CNNLayer {
1092 public:
1093  struct PortMap {
1094  // Data map rule
1095  int from; /**< Index of exteral data from ins/outs fields of CNNLayer */
1096  int to; /**< Index of internal data in iterator body */
1097 
1098  // Iteration rule
1099  int axis; /**< Axis to iterate throught */
1100  int stride; /**< Stride to iterate throught */
1101  int start; /**< Start index of iteration range */
1102  int end; /**< Last index of iteration range */
1103  int part_size; /**< Part size which will be transfered to body subnetwork */
1104  };
1105 
1106  struct Body {
1107  std::vector<DataPtr> inputs;
1108  std::vector<DataPtr> outputs;
1109  };
1110 
1111  std::vector<PortMap> input_port_map;
1112  std::vector<PortMap> output_port_map;
1113  std::vector<PortMap> back_edges;
1114 
1115  Body body;
1116 
1117  using CNNLayer::CNNLayer;
1118 };
1119 
1120 /**
1121  * @brief Base class for recurrent cell layers
1122  */
1124 public:
1126 
1127  /**
1128  * @brief Direct type of recurrent cell (including subtypes)
1129  * Description of particular cell semantics is in LSTMCell, GRUCell, RNNCell.
1130  */
1131  enum CellType {
1132  LSTM, /**< Original LSTM cell */
1133  GRU, /**< Original GRU cell */
1134  RNN, /**< Original RNN cell */
1135  GRU_LBR, /**< GRU cell modification. "Linear before reset" */
1136  };
1137 
1138  /** @copybrief CellType */
1139  CellType cellType = LSTM;
1140 
1141  /**
1142  * @brief Size of hidden state data
1143  *
1144  * In case of batch output state tensor will have shape [N, hidden_size]
1145  */
1146  int hidden_size = 0;
1147 
1148  /**
1149  * @brief Clip data into range [-clip, clip] on input of activations
1150  *
1151  * clip==0.0f means no clipping
1152  */
1153  float clip = 0.0f;
1154  /**
1155  * @brief Activations used inside recurrent cell
1156  *
1157  * Valid values: sigmoid, tanh, relu
1158  */
1159  std::vector<std::string> activations;
1160 
1161  /**
1162  * @brief Alpha parameters of activations
1163  *
1164  * Respective to activation list.
1165  */
1166  std::vector<float> activation_alpha;
1167 
1168  /**
1169  * @brief Beta parameters of activations
1170  *
1171  * Respective to activation list.
1172  */
1173  std::vector<float> activation_beta;
1174 };
1175 
1176 /**
1177  * @brief LSTM Cell layer
1178  *
1179  * G - number of gates (=4)
1180  * N - batch size
1181  * S - state size (=hidden_size)
1182  *
1183  * Inputs:
1184  * [N,D] Xt - input data
1185  * [N,S] Ht-1 - initial hidden state
1186  * [N,S] Ct-1 - initial cell state
1187  *
1188  * Outputs:
1189  * [N,S] Ht - out hidden state
1190  * [N,S] Ct - out cell state
1191  *
1192  * Weights:
1193  * - weights [G,S,D+S]
1194  * - biases [G,S]
1195  * NB! gates order is FICO {forget, input, candidate, output}
1196  *
1197  * activations is {_f, _g, _h}
1198  * default: {_f=sigm, _g=tanh, _h=tanh}
1199  *
1200  * Equations:
1201  *
1202  * * - matrix mult
1203  * (.) - eltwise mult
1204  * [,] - concatenation
1205  *
1206  * - ft = _f(Wf*[Ht-1, Xt] + Bf)
1207  * - it = _f(Wi*[Ht-1, Xt] + Bi)
1208  * - ct = _g(Wc*[Ht-1, Xt] + Bc)
1209  * - ot = _f(Wo*[Ht-1, Xt] + Bo)
1210  * - Ct = ft (.) Ct-1 + it (.) ct
1211  * - Ht = ot (.) _h(Ct)
1212  */
1214 
1215 /**
1216  * @brief GRU Cell layer
1217  *
1218  * G - number of gates (=3)
1219  * N - batch size
1220  * S - state size (=hidden_size)
1221  *
1222  * Inputs:
1223  * [N,D] Xt - input data
1224  * [N,S] Ht-1 - initial hidden state
1225  *
1226  * Outputs:
1227  * [N,S] Ht - out hidden state
1228  *
1229  * Weights:
1230  * - weights [G,S,D+S]
1231  * - biases [G,S]
1232  * NB! gates order is ZRH {update, reset, output}
1233  *
1234  * activations is {_f, _g}
1235  * default: {_f=sigm, _g=tanh}
1236  *
1237  * Equations:
1238  *
1239  * * - matrix mult
1240  * (.) - eltwise mult
1241  * [,] - concatenation
1242  *
1243  * - zt = _f(Wz*[Ht-1, Xt] + Bz)
1244  * - rt = _f(Wr*[Ht-1, Xt] + Br)
1245  * - ht = _g(Wh*[rt (.) Ht-1, Xt] + Bh)
1246  * - Ht = (1 - zt) (.) ht + zt (.) Ht-1
1247  */
1249 
1250 /**
1251  * @brief RNN Cell layer
1252  *
1253  * G - number of gates (=1)
1254  * N - batch size
1255  * S - state size (=hidden_size)
1256  *
1257  * Inputs:
1258  * [N,D] Xt - input data
1259  * [N,S] Ht-1 - initial hidden state
1260  *
1261  * Outputs:
1262  * [N,S] Ht - out hidden state
1263  *
1264  * Weights:
1265  * - weights [G,S,D+S]
1266  * - biases [G,S]
1267  *
1268  * activations is {_f}
1269  * default: {_f=tanh}
1270  *
1271  * Equations:
1272  *
1273  * * - matrix mult
1274  * [,] - concatenation
1275  *
1276  * - Ht = _f(Wi*[Ht-1, Xt] + Bi)
1277  */
1279 
1280 /**
1281  * @brief Sequence of recurrent cells
1282  *
1283  * N - batch size
1284  * T - sequence size
1285  * S - state size (=hidden_size)
1286  * NS - num of state tensors (LSTM=2, GRU/RNN=1)
1287  * ND - num of direction (BDR=2, WFD/BWD=1)
1288  *
1289  * Inputs:
1290  * [N,T,D] Xt - input data
1291  * [ND,N,S] Ht-1 - initial hidden state
1292  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1293  *
1294  * Outputs:
1295  * [ND,N,T,S] Xt - input data
1296  * [ND,N,S] Ht-1 - initial hidden state
1297  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1298  *
1299  * NB! if axis==0 batch and sequense dimensions are swapped (N <-> T) for input and output tensors
1300  *
1301  * Weights:
1302  * - weights [ND,G,S,D+S]
1303  * - biases [ND,G,S]
1304  * NB! if ND==2 weights are concatenated cell weights [forward_cell_weights, backward_cell_weights]
1305  *
1306  */
1308 public:
1309  using RNNCellBase::RNNCellBase;
1310 
1311  /**
1312  * @brief An axis by which iteration is performed
1313  * axis=0 means first input/output data blob dimension is sequence
1314  * axis=1 means first input/output data blob dimension is batch
1315  */
1316  unsigned int axis = 1;
1317 
1318  /**
1319  * @brief Direction of iteration through sequence dimension
1320  */
1321  enum Direction {
1322  FWD, /**< Forward mode. Iterate starts from index 0 with step 1. */
1323  BWD, /**< Backward mode. Iterate starts from last index with step -1. */
1324  BDR /**< Bidirectional mode. First is forward pass, second is backward. */
1325  };
1326 
1327  /** @copybrief Direction */
1328  Direction direction = FWD;
1329 };
1330 
1331 /**
1332  * @brief This class represents a Layer which performs Scale and Shift
1333  */
1334 class PReLULayer : public WeightableLayer {
1335 public:
1336  /**
1337  * @brief A flag that indicates if the same negative_slope value is used for all the features. If false, the value is used pixel wise
1338  */
1340 
1341 public:
1342  /**
1343  * @brief A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the given values.
1344  * @param prms Initial layer parameters
1345  */
1346  explicit PReLULayer(const LayerParams &prms) : WeightableLayer(prms), _channel_shared(false) {}
1347 };
1348 
1349 /**
1350  * @brief This class represents a standard Power Layer
1351  * Formula is: output = (offset + scale * input) ^ power
1352  */
1353 class PowerLayer : public CNNLayer {
1354 public:
1355  /**
1356  * @brief An exponent value
1357  */
1358  float power = 1.f;
1359  /**
1360  * @brief A scale factor
1361  */
1362  float scale = 1.f;
1363  /**
1364  * @brief An offset value
1365  */
1366  float offset = 0.f;
1367 
1368  /**
1369  * @brief Creates a new PowerLayer instance.
1370  */
1371  using CNNLayer::CNNLayer;
1372 };
1373 
1374 /**
1375  * @brief This class represents a Batch Normalization Layer
1376  */
1378 public:
1379  /**
1380  * @brief A small value to add to the variance estimate to avoid division by zero
1381  */
1382  float epsilon = 1e-3f;
1383 
1384  /**
1385  * @brief Creates a new BatchNormalizationLayer instance.
1386  */
1388 };
1389 
1390 /**
1391  * @brief This class represents a general matrix multiplication operation layer
1392  * Formula is: dst := alpha*src1*src2 + beta*src3
1393  */
1394 class GemmLayer : public CNNLayer {
1395 public:
1396  /**
1397  * @brief A scale factor of src1 matrix
1398  */
1399  float alpha = 1.f;
1400  /**
1401  * @brief A scale factor of src3 matrix
1402  */
1403  float beta = 1.f;
1404  /**
1405  * @brief A flag that indicates if the src1 matrix is to be transposed
1406  */
1407  bool transpose_a = false;
1408  /**
1409  * @brief A flag that indicates if the src2 matrix is to be transposed
1410  */
1411  bool transpose_b = false;
1412  /**
1413  * @brief Creates a new GemmLayer instance.
1414  */
1415  using CNNLayer::CNNLayer;
1416 };
1417 
1418 /**
1419  * @brief This class represents a standard Pad layer
1420  * Adds paddings to input tensor
1421  */
1422 class PadLayer : public CNNLayer {
1423 public:
1424  /**
1425  * @enum ePadMode
1426  * @brief Defines possible modes of pad operation
1427  */
1428  enum ePadMode {
1429  Constant = 0, Edge, Reflect, Symmetric
1430  };
1431 
1432  /**
1433  * @brief Size of padding in the beginning of each axis
1434  */
1436  /**
1437  * @brief Size of padding in the end of each axis
1438  */
1440  /**
1441  * @brief Mode of pad operation
1442  */
1443  ePadMode pad_mode = Constant;
1444  /**
1445  * @brief A pad value which is used for filling in Constant mode
1446  */
1447  float pad_value = 0.0f;
1448  /**
1449  * @brief Creates a new PadLayer instance.
1450  */
1451  using CNNLayer::CNNLayer;
1452 };
1453 
1454 /**
1455  * @brief This class represents a standard Gather layer
1456  * Gather slices from Dictionary according to Indexes
1457  */
1458 class GatherLayer : public CNNLayer {
1459 public:
1460  /**
1461  * @brief The axis in Dictionary to gather Indexes from
1462  */
1463  int axis = 0;
1464  /**
1465  * @brief Creates a new GatherLayer instance.
1466  */
1467  using CNNLayer::CNNLayer;
1468 };
1469 
1470 /**
1471  * @brief This class represents a standard Strided Slice layer
1472  * Strided Slice picks from input tensor according parameters
1473  */
1474 class StridedSliceLayer : public CNNLayer {
1475 public:
1476  /**
1477  * @brief The begin_mask is a bitmask where bit i being 0 means
1478  * to ignore the begin value and instead use the default value
1479  */
1480  std::string begin_mask;
1481  /**
1482  * @brief Analogous to begin_mask
1483  */
1484  std::string end_mask;
1485  /**
1486  * @brief The ellipsis_mask is a bitmask where bit i being 1 means
1487  * the i-th is actually an ellipsis
1488  */
1489  std::string ellipsis_mask;
1490  /**
1491  * @brief The new_axis_mask_ is a bitmask where bit i being 1 means
1492  * the i-th position creates a new 1 dimension shape
1493  */
1494  std::string new_axis_mask;
1495  /**
1496  * @brief The shrink_axis_mask is a bitmask where bit i being 1 means
1497  * the i-th position shrinks the dimensionality
1498  */
1499  std::string shrink_axis_mask;
1500 
1501  /**
1502  * @brief Creates a new StridedSliceLayer instance.
1503  */
1504  using CNNLayer::CNNLayer;
1505 };
1506 
1507 /**
1508 * @brief This class represents a standard Shuffle Channels layer
1509 * Shuffle Channels picks from input tensor according parameters
1510 */
1512 public:
1513  /**
1514  * @brief The axis in tensor to shuffle channels
1515  */
1516  int axis = 1;
1517 
1518  /**
1519  * @brief The group of output shuffled channels
1520  */
1521  unsigned int group = 1;
1522 
1523  /**
1524  * @brief Creates a new ShuffleChannelsLayer instance.
1525  */
1526  using CNNLayer::CNNLayer;
1527 };
1528 
1529 
1530 /**
1531 * @brief This class represents a standard Depth To Space layer
1532 * Depth To Space picks from input tensor according parameters
1533 */
1534 class DepthToSpaceLayer : public CNNLayer {
1535 public:
1536  /**
1537  * @brief The group of output shuffled channels
1538  */
1539  unsigned int block_size = 1;
1540 
1541  /**
1542  * @brief Creates a new DepthToSpaceLayer instance.
1543  */
1544  using CNNLayer::CNNLayer;
1545 };
1546 
1547 
1548 /**
1549 * @brief This class represents a standard Space To Depth layer
1550 * Depth To Space picks from input tensor according parameters
1551 */
1552 class SpaceToDepthLayer : public CNNLayer {
1553 public:
1554  /**
1555  * @brief The group of output Space To Depth
1556  */
1557  unsigned int block_size = 1;
1558 
1559  /**
1560  * @brief Creates a new SpaceToDepthLayer instance.
1561  */
1562  using CNNLayer::CNNLayer;
1563 };
1564 
1565 
1566 /**
1567 * @brief This class represents a standard Reverse Sequence layer
1568 * Reverse Sequence modifies input tensor according parameters
1569 */
1571 public:
1572  /**
1573  * @brief The seq_axis dimension in tensor which is partially reversed
1574  */
1575  int seq_axis = 1;
1576 
1577  /**
1578  * @brief The batch_axis dimension in tensor along which reversal is performed
1579  */
1580  int batch_axis = 0;
1581 
1582  /**
1583  * @brief Creates a new ReverseSequence instance.
1584  */
1585  using CNNLayer::CNNLayer;
1586 };
1587 
1588 
1589 /**
1590 * @brief This class represents a standard Squeeze layer
1591 * Squeeze modifies input tensor dimensions according parameters
1592 */
1593 class SqueezeLayer : public CNNLayer {
1594 public:
1595  /**
1596  * @brief Creates a new Squeeze instance.
1597  */
1598  using CNNLayer::CNNLayer;
1599 };
1600 
1601 
1602 /**
1603 * @brief This class represents a standard Unsqueeze layer
1604 * Unsqueeze modifies input tensor dimensions according parameters
1605 */
1606 class UnsqueezeLayer : public CNNLayer {
1607 public:
1608  /**
1609  * @brief Creates a new Unsqueeze instance.
1610  */
1611  using CNNLayer::CNNLayer;
1612 };
1613 
1614 
1615 /**
1616 * @brief This class represents a standard RangeLayer layer
1617 * RangeLayer modifies input tensor dimensions according parameters
1618 */
1619 class RangeLayer : public CNNLayer {
1620 public:
1621  /**
1622  * @brief Creates a new RangeLayer instance.
1623  */
1624  using CNNLayer::CNNLayer;
1625 };
1626 
1627 
1628 /**
1629 * @brief This class represents a standard Fill layer
1630 * RFill modifies input tensor according parameters
1631 */
1632 class FillLayer : public CNNLayer {
1633 public:
1634  /**
1635  * @brief Creates a new Fill instance.
1636  */
1637  using CNNLayer::CNNLayer;
1638 };
1639 
1640 
1641 /**
1642 * @brief This class represents a standard Expand layer
1643 * Expand modifies input tensor dimensions according parameters
1644 */
1645 class ExpandLayer : public CNNLayer {
1646 public:
1647  /**
1648  * @brief Creates a new Expand instance.
1649  */
1650  using CNNLayer::CNNLayer;
1651 };
1652 
1653 /**
1654  * @brief This class represents a quantization operation layer
1655  * Element-wise linear quantization of floating point input values into a descrete set of floating point values
1656  */
1657 class QuantizeLayer : public CNNLayer {
1658 public:
1659  /**
1660  * @brief The number of quantization levels
1661  */
1662  int levels = 1;
1663 
1664  /**
1665  * @brief Creates a new QuantizeLayer instance.
1666  */
1667  using CNNLayer::CNNLayer;
1668 };
1669 
1670 } // namespace InferenceEngine
int GetParamAsInt(const char *param, int def) const
Returns an integer value for the given parameter or returns the default value.
Definition: ie_layers.h:203
BinaryConvolutionLayer(const LayerParams &p)
Creates a new BinaryConvolutionLayer instance.
Definition: ie_layers.h:742
std::shared_ptr< CNNLayer > Ptr
A shared pointer to CNNLayer.
Definition: ie_layers.h:45
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:22
#define DEFINE_PROP(prop_name)
convinenent way to declare property with backward compatibility to 2D members
Definition: ie_layers.h:502
PoolingLayer(const PoolingLayer &that)
copy constructor
Definition: ie_layers.h:668
std::vector< int > axis
A vector of dimensions for cropping.
Definition: ie_layers.h:1011
virtual const DataPtr input() const
Returns the first element of the input data for this layer.
Definition: ie_layers.h:105
This class represents a Layer which performs Scale and Shift.
Definition: ie_layers.h:1334
std::string type
Layer type.
Definition: ie_layers.h:54
unsigned int _group
Number of groups.
Definition: ie_layers.h:539
PoolType _type
A pooling type.
Definition: ie_layers.h:628
This class represents a standard Strided Slice layer Strided Slice picks from input tensor according ...
Definition: ie_layers.h:1474
unsigned int GetParamAsUInt(const char *param, unsigned int def) const
Returns an unsigned integer value for the given parameter or returns the default value.
Definition: ie_layers.h:279
PoolType
Defines available pooling types.
Definition: ie_layers.h:617
float GetParamAsFloat(const char *param, float def) const
Gets float value for the given parameter.
Definition: ie_layers.h:127
This class represents a standard crop layer.
Definition: ie_layers.h:1006
std::vector< float > activation_beta
Beta parameters of activations.
Definition: ie_layers.h:1173
The method holds the user values to enable binding of data per graph node.
Definition: ie_common.h:66
This structure describes ROI data.
Definition: ie_blob.h:842
PReLULayer(const LayerParams &prms)
A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the gi...
Definition: ie_layers.h:1346
This class represents a standard Power Layer Formula is: output = (offset + scale * input) ^ power...
Definition: ie_layers.h:1353
std::string begin_mask
The begin_mask is a bitmask where bit i being 0 means to ignore the begin value and instead use the d...
Definition: ie_layers.h:1480
std::vector< int > GetParamAsInts(const char *param, std::vector< int > def) const
Returns a vector of int values for the given parameter or returns the default value.
Definition: ie_layers.h:235
std::vector< int > shape
A vector of sizes of the shape.
Definition: ie_layers.h:1035
Definition: ie_argmax_layer.hpp:11
Definition: ie_layers.h:1322
This is an internal common Layer parameter parsing arguments.
Definition: ie_layers.h:28
bool _channel_shared
A flag that indicates if the same negative_slope value is used for all the features. If false, the value is used pixel wise.
Definition: ie_layers.h:1339
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:527
This class represents a standard Space To Depth layer Depth To Space picks from input tensor accordin...
Definition: ie_layers.h:1552
eBinaryConvolutionMode _mode
Mode of binary convolution operation.
Definition: ie_layers.h:694
Definition: ie_layers.h:1135
Base class for recurrent cell layers.
Definition: ie_layers.h:1123
PropertyVector< unsigned int > pads_begin
Size of padding in the beginning of each axis.
Definition: ie_layers.h:1435
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:721
This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
Definition: ie_layers.h:476
Blob::Ptr _weights
A pointer to a weights blob.
Definition: ie_layers.h:487
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:527
This class represents a standard Fill layer RFill modifies input tensor according parameters...
Definition: ie_layers.h:1632
WeightableLayer(const LayerParams &prms)
A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the ...
Definition: ie_layers.h:482
std::vector< DataWeakPtr > insData
A vector of weak pointers to the input data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:66
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:543
void fuse(Ptr &layer)
Sets a layer to be fused with.
Definition: ie_layers.h:97
A header file for Blob and generic TBlob<>
This class represents a standard deconvolution layer.
Definition: ie_layers.h:585
std::string shrink_axis_mask
The shrink_axis_mask is a bitmask where bit i being 1 means the i-th position shrinks the dimensional...
Definition: ie_layers.h:1499
std::vector< unsigned int > GetParamAsUInts(const char *param) const
Returns a vector of unsigned int values for the given parameter.
Definition: ie_layers.h:349
Direction
Direction of iteration through sequence dimension.
Definition: ie_layers.h:1321
int to
Definition: ie_layers.h:1096
std::vector< int > offset
A vector of offsets for each dimension.
Definition: ie_layers.h:1019
This class represents standard MVN Layer.
Definition: ie_layers.h:901
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:515
int stride
Definition: ie_layers.h:1100
Ptr _fusedWith
If suggested to fuse - a pointer to the layer which needs to be fused with this layer.
Definition: ie_layers.h:70
ConvolutionLayer(const ConvolutionLayer &that)
copy constructor
Definition: ie_layers.h:573
PropertyVector< unsigned int > _pads_end
Pooling paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:603
std::vector< std::string > activations
Activations used inside recurrent cell.
Definition: ie_layers.h:1159
std::string name
Layer name.
Definition: ie_layers.h:30
class CNNLayer GenericLayer
Alias for CNNLayer object.
Definition: ie_layers.h:471
std::string type
Layer type.
Definition: ie_layers.h:32
This class represents a standard Reverse Sequence layer Reverse Sequence modifies input tensor accord...
Definition: ie_layers.h:1570
This class represents a Clamp activation layer Clamps all tensor elements into the range [min_value...
Definition: ie_layers.h:940
int part_size
Definition: ie_layers.h:1103
This class represents a layer that evenly splits the input into the supplied outputs.
Definition: ie_layers.h:819
Definition: ie_layers.h:1323
Definition: ie_layers.h:1133
eOperation
Defines possible operations that can be used.
Definition: ie_layers.h:981
bool _exclude_pad
A flag that indicates if padding is excluded or not.
Definition: ie_layers.h:633
This class represents an element wise operation layer.
Definition: ie_layers.h:975
PropertyVector< unsigned int > pads_end
Size of padding in the end of each axis.
Definition: ie_layers.h:1439
This class represents standard GRN Layer.
Definition: ie_layers.h:883
This class represents a standard Shuffle Channels layer Shuffle Channels picks from input tensor acco...
Definition: ie_layers.h:1511
This class represents a standard reshape layer.
Definition: ie_layers.h:1030
UserValue userValue
Convenience user values to store in this object as extra data.
Definition: ie_layers.h:74
Sequence of recurrent cells.
Definition: ie_layers.h:1307
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:713
BinaryConvolutionLayer(const BinaryConvolutionLayer &that)
copy constructor
Definition: ie_layers.h:770
GRNLayer(const LayerParams &prms)
A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the give...
Definition: ie_layers.h:889
CNNLayer(const LayerParams &prms)
A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values...
Definition: ie_layers.h:84
This class represents a fully connected layer.
Definition: ie_layers.h:784
ePadMode
Defines possible modes of pad operation.
Definition: ie_layers.h:1428
std::map< std::string, std::string > params
Map of pairs: (parameter name, parameter value)
Definition: ie_layers.h:460
Definition: ie_layers.h:1132
PoolingLayer(const LayerParams &p)
Creates a new PoolingLayer instance.
Definition: ie_layers.h:642
std::shared_ptr< Blob > Ptr
A smart pointer containing Blob object.
Definition: ie_blob.h:38
std::vector< float > coeff
A vector of coefficients to scale the operands.
Definition: ie_layers.h:995
bool GetParamsAsBool(const char *param, bool def) const
Definition: ie_layers.h:395
std::string end_mask
Analogous to begin_mask.
Definition: ie_layers.h:1484
std::vector< unsigned int > GetParamAsUInts(const char *param, std::vector< unsigned int > def) const
Returns a vector of unsigned int values for the given parameter or returns the default value...
Definition: ie_layers.h:321
std::vector< float > activation_alpha
Alpha parameters of activations.
Definition: ie_layers.h:1166
unsigned int GetParamAsUInt(const char *param) const
Returns an unsigned integer value for the given parameter.
Definition: ie_layers.h:299
This class represents a ReLU6 activation layer Clamps all tensor elements into the range [0...
Definition: ie_layers.h:962
This class represents a Batch Normalization Layer.
Definition: ie_layers.h:1377
unsigned int _group
Number of groups.
Definition: ie_layers.h:733
PropertyVector< unsigned int > _stride
Pooling strides array [X, Y, Z, ...].
Definition: ie_layers.h:611
This class represents a Layer which performs Scale and Shift.
Definition: ie_layers.h:1075
This class represents standard softmax Layer.
Definition: ie_layers.h:867
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:535
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:709
std::vector< float > GetParamAsFloats(const char *param) const
Returns a vector of float values for the given parameter.
Definition: ie_layers.h:181
This header file defines the main Data representation node.
int end
Definition: ie_layers.h:1102
bool GetParamAsBool(const char *param, bool def) const
Returns an boolean value for the given parameter. The valid values are (true, false, 1, 0).
Definition: ie_layers.h:376
std::vector< int > GetParamAsInts(const char *param) const
Returns a vector of int values for the given parameter.
Definition: ie_layers.h:258
float _pad_value
A pad value which is used to fill pad area.
Definition: ie_layers.h:704
std::string ellipsis_mask
The ellipsis_mask is a bitmask where bit i being 1 means the i-th is actually an ellipsis.
Definition: ie_layers.h:1489
unsigned int _in_depth
A number of input feature maps (size) generating the 3&#39;rd input dimension.
Definition: ie_layers.h:699
PropertyVector< unsigned int > _kernel
Pooling kernel array [X, Y, Z, ...].
Definition: ie_layers.h:599
float GetParamAsFloat(const char *param) const
Returns a float value for the given layer parameter.
Definition: ie_layers.h:142
PropertyVector< unsigned int > _padding
Pooling paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:599
Definition: ie_layers.h:1106
int GetParamAsInt(const char *param) const
Returns an integer value for the given parameter.
Definition: ie_layers.h:218
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:709
int across_channels
Indicate that mean value is calculated across channels.
Definition: ie_layers.h:912
Precision precision
Layer base operating precision.
Definition: ie_layers.h:58
This class represents a quantization operation layer Element-wise linear quantization of floating poi...
Definition: ie_layers.h:1657
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:729
MVNLayer(const LayerParams &prms)
A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the give...
Definition: ie_layers.h:907
This class represents a Rectified Linear activation layer.
Definition: ie_layers.h:923
This class represents a standard 3D Convolution Layer.
Definition: ie_layers.h:510
CellType
Direct type of recurrent cell (including subtypes) Description of particular cell semantics is in LST...
Definition: ie_layers.h:1131
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:637
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:737
This class represents a standard Pad layer Adds paddings to input tensor.
Definition: ie_layers.h:1422
This header file contains aspects of working on different devices like CPU, GEN, FPGA, etc.
This class represents a standard Tile Layer.
Definition: ie_layers.h:1054
This class represents a standard binary convolution layer.
Definition: ie_layers.h:681
Precision precision
Layer precision.
Definition: ie_layers.h:34
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:519
This class represents a standard Squeeze layer Squeeze modifies input tensor dimensions according par...
Definition: ie_layers.h:1593
Blob::Ptr _biases
A pointer to a biases blob.
Definition: ie_layers.h:491
This class represents a Linear Response Normalization (LRN) Layer.
Definition: ie_layers.h:835
This class represents TensorIterator layer.
Definition: ie_layers.h:1091
This class represents concatenation layer Takes as input several data elements and merges them to one...
Definition: ie_layers.h:801
std::vector< float > GetParamAsFloats(const char *param, std::vector< float > def) const
Returns a vector of float values for the given parameter or returns the default value.
Definition: ie_layers.h:158
This class represents a standard Expand layer Expand modifies input tensor dimensions according param...
Definition: ie_layers.h:1645
This class represents a standard RangeLayer layer RangeLayer modifies input tensor dimensions accordi...
Definition: ie_layers.h:1619
This is a base abstraction Layer - all DNN Layers inherit from this class.
Definition: ie_layers.h:40
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:50
int start
Definition: ie_layers.h:1101
std::string affinity
Layer affinity set by user.
Definition: ie_layers.h:78
This class represents a standard pooling layer.
Definition: ie_layers.h:594
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:721
Definition: ie_layers.h:1093
This class represents a standard Gather layer Gather slices from Dictionary according to Indexes...
Definition: ie_layers.h:1458
Definition: ie_layers.h:1134
std::string GetParamAsString(const char *param) const
Returns a string value for the given parameter. Throws exception if parameter was not found...
Definition: ie_layers.h:432
This class represents a standard Unsqueeze layer Unsqueeze modifies input tensor dimensions according...
Definition: ie_layers.h:1606
std::string new_axis_mask
The new_axis_mask_ is a bitmask where bit i being 1 means the i-th position creates a new 1 dimension...
Definition: ie_layers.h:1494
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:515
std::map< std::string, Blob::Ptr > blobs
Map of pairs: (name, weights/biases blob)
Definition: ie_layers.h:465
std::string GetParamAsString(const char *param, const char *def) const
Returns a string value for the given parameter or returns the default one.
Definition: ie_layers.h:405
std::vector< DataPtr > outData
A vector of pointers to the output data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:62
eBinaryConvolutionMode
Defines possible modes of binary convolution operation.
Definition: ie_layers.h:687
int from
Definition: ie_layers.h:1095
bool CheckParamPresence(const char *param) const
Checks the param presence in the layer.
Definition: ie_layers.h:418
This class represents a standard Depth To Space layer Depth To Space picks from input tensor accordin...
Definition: ie_layers.h:1534
This class represents a general matrix multiplication operation layer Formula is: dst := alpha*src1*s...
Definition: ie_layers.h:1394
int axis
Definition: ie_layers.h:1099
std::vector< int > dim
A vector of dimensions to be preserved.
Definition: ie_layers.h:1015
std::string name
Layer name.
Definition: ie_layers.h:50
This is a header file with common inference engine definitions.
ConvolutionLayer(const LayerParams &p)
Creates a new ConvolutionLayer instance.
Definition: ie_layers.h:548
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:19