ie_layers.h
Go to the documentation of this file.
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief a header file for internal Layers structure to describe layers information
7  * @file ie_layers.h
8  */
9 #pragma once
10 
11 #include <memory>
12 #include <string>
13 #include <vector>
14 #include <algorithm>
15 #include <map>
16 #include <iterator>
17 #include <limits>
18 #include <cctype>
19 #include "ie_common.h"
20 #include "ie_data.h"
21 #include "ie_blob.h"
22 #include "ie_device.hpp"
23 #include "ie_layers_property.hpp"
24 
25 namespace InferenceEngine {
26 /**
27  * @brief This is an internal common Layer parameter parsing arguments
28  */
29 struct LayerParams {
30  /// @brief Layer name
31  std::string name;
32  /// @brief Layer type
33  std::string type;
34  /// @brief Layer precision
36 };
37 
38 /**
39  * @brief This is a base abstraction Layer - all DNN Layers inherit from this class
40  */
41 class CNNLayer {
42 public:
43  /**
44  * @brief A shared pointer to CNNLayer
45  */
46  using Ptr = std::shared_ptr<CNNLayer>;
47 
48  /**
49  * @brief Layer name
50  */
51  std::string name;
52  /**
53  * @brief Layer type
54  */
55  std::string type;
56  /**
57  * @brief Layer base operating precision
58  */
60  /**
61  * @brief A vector of pointers to the output data elements of this layer in the di-graph (order matters)
62  */
63  std::vector<DataPtr> outData;
64  /**
65  * @brief A vector of weak pointers to the input data elements of this layer in the di-graph (order matters)
66  */
67  std::vector<DataWeakPtr> insData;
68  /**
69  * @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
70  */
72  /**
73  * @brief Convenience user values to store in this object as extra data
74  */
76  /**
77  * @brief Layer affinity set by user.
78  */
79  std::string affinity;
80 
81  /**
82  * @brief A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values.
83  * @param prms Basic common parsing parameters
84  */
85  explicit CNNLayer(const LayerParams &prms) : name(prms.name), type(prms.type),
86  precision(prms.precision), userValue({0}) {
87  }
88 
89  /**
90  * @brief A virtual destructor
91  */
92  virtual ~CNNLayer() = default;
93 
94  /**
95  * @brief Sets a layer to be fused with
96  * @param layer Reference to the layer to be fused with
97  */
98  void fuse(Ptr &layer) {
99  _fusedWith = layer;
100  }
101 
102  /**
103  * @brief Returns the first element of the input data for this layer
104  * @return A smart pointer to the input data element
105  */
106  virtual const DataPtr input() const {
107  if (insData.empty()) {
108  THROW_IE_EXCEPTION << "Internal error: input data is empty";
109  }
110  auto lockedFirstInsData = insData[0].lock();
111  if (!lockedFirstInsData) {
112  THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
113  }
114  return lockedFirstInsData;
115  }
116 
117  /**
118  * @brief Checks if the input data and layer data are legitimate
119  */
120  INFERENCE_ENGINE_API_CPP(void) validateLayer();
121 
122  /**
123  * @brief Parse string with float in accordance with IE rules
124  * @param str input string with float value
125  * @return float value if parsing was successful
126  * @throws InferenceEngineException in case of parsing error
127  */
128  static float ie_parse_float(const std::string &str) {
129  if (str == "-inf") {
130  return -std::numeric_limits<float>::infinity();
131  } else if (str == "inf") {
132  return std::numeric_limits<float>::infinity();
133  } else {
134  float res;
135  std::stringstream val_stream(str);
136  val_stream.imbue(std::locale("C"));
137  val_stream >> res;
138  if (!val_stream.eof()) THROW_IE_EXCEPTION;
139  return res;
140  }
141  }
142 
143  /**
144  * @brief Gets float value for the given parameter
145  * @param param name of the parameter to find
146  * @param def default value of the parameter if not found
147  * @return float value
148  */
149  float GetParamAsFloat(const char* param, float def) const {
150  std::string val = GetParamAsString(param, std::to_string(def).c_str());
151  try {
152  return ie_parse_float(val);
153  } catch (...) {
154  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name
155  << ". Value " << val << " cannot be casted to float.";
156  }
157  }
158 
159  /**
160  * @brief Returns a float value for the given layer parameter
161  * @param param Name of the layer parameter
162  * @return A float value for the specified parameter
163  */
164  float GetParamAsFloat(const char *param) const {
165  std::string val = GetParamAsString(param);
166  try {
167  return ie_parse_float(val);
168  } catch (...) {
169  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name
170  << ". Value " << val << " cannot be casted to float.";
171  }
172  }
173 
174  /**
175  * @brief Returns a vector of float values for the given parameter or returns the default value
176  * @param param Name of the layer parameter
177  * @param def Default value of the parameter if not found
178  * @return vector of float values
179  */
180  std::vector<float> GetParamAsFloats(const char *param, std::vector<float> def) const {
181  std::string vals = GetParamAsString(param, "");
182  std::vector<float> result;
183  std::istringstream stream(vals);
184  std::string str;
185  if (vals.empty())
186  return def;
187  while (getline(stream, str, ',')) {
188  try {
189  float val = ie_parse_float(str);
190  result.push_back(val);
191  } catch (...) {
192  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
193  << ". Value " << vals << " cannot be casted to floats.";
194  }
195  }
196  return result;
197  }
198 
199  /**
200  * @brief Returns a vector of float values for the given parameter
201  * @param param Name of the layer parameter
202  * @return vector of float values
203  */
204  std::vector<float> GetParamAsFloats(const char *param) const {
205  std::string vals = GetParamAsString(param);
206  std::vector<float> result;
207  std::istringstream stream(vals);
208  std::string str;
209  while (getline(stream, str, ',')) {
210  try {
211  float val = ie_parse_float(str);
212  result.push_back(val);
213  } catch (...) {
214  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
215  << ". Value " << vals << " cannot be casted to floats.";
216  }
217  }
218  return result;
219  }
220 
221  /**
222  * @brief Returns an integer value for the given parameter or returns the default value
223  * @param param Name of the layer parameter
224  * @param def Default value of the parameter if not found
225  * @return An int value for the specified parameter
226  */
227  int GetParamAsInt(const char *param, int def) const {
228  std::string val = GetParamAsString(param, std::to_string(def).c_str());
229  try {
230  return std::stoi(val);
231  } catch (...) {
232  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name
233  << ". Value " << val << " cannot be casted to int.";
234  }
235  }
236 
237  /**
238  * @brief Returns an integer value for the given parameter
239  * @param param Name of the layer parameter
240  * @return An int value for the specified parameter
241  */
242  int GetParamAsInt(const char *param) const {
243  std::string val = GetParamAsString(param);
244  try {
245  return std::stoi(val);
246  } catch (...) {
247  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name
248  << ". Value " << val << " cannot be casted to int.";
249  }
250  }
251 
252 
253  /**
254  * @brief Returns a vector of int values for the given parameter or returns the default value
255  * @param param Name of the layer parameter
256  * @param def Default value of the parameter if not found
257  * @return vector of int values
258  */
259  std::vector<int> GetParamAsInts(const char *param, std::vector<int> def) const {
260  std::string vals = GetParamAsString(param, "");
261  std::vector<int> result;
262  std::istringstream stream(vals);
263  std::string str;
264  if (vals.empty())
265  return def;
266  while (getline(stream, str, ',')) {
267  try {
268  result.push_back(std::stoi(str));
269  } catch (...) {
270  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
271  << ". Value " << vals << " cannot be casted to int.";
272  }
273  }
274  return result;
275  }
276 
277  /**
278  * @brief Returns a vector of int values for the given parameter
279  * @param param Name of the layer parameter
280  * @return vector of int values
281  */
282  std::vector<int> GetParamAsInts(const char *param) const {
283  std::string vals = GetParamAsString(param);
284  std::vector<int> result;
285  std::istringstream stream(vals);
286  std::string str;
287  while (getline(stream, str, ',')) {
288  try {
289  result.push_back(std::stoi(str));
290  } catch (...) {
291  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
292  << ". Value " << vals << " cannot be casted to int.";
293  }
294  }
295  return result;
296  }
297  /**
298  * @brief Returns an unsigned integer value for the given parameter or returns the default value
299  * @param param Name of the layer parameter
300  * @param def Default value of the parameter if not found
301  * @return An unsigned integer value for the specified parameter
302  */
303  unsigned int GetParamAsUInt(const char *param, unsigned int def) const {
304  std::string val = GetParamAsString(param, std::to_string(def).c_str());
305  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name
306  + ". Value " + val + " cannot be casted to int.";
307  try {
308  int value = std::stoi(val);
309  if (value < 0) {
310  THROW_IE_EXCEPTION << message;
311  }
312  return static_cast<unsigned int>(value);
313  } catch (...) {
314  THROW_IE_EXCEPTION << message;
315  }
316  }
317 
318  /**
319  * @brief Returns an unsigned integer value for the given parameter
320  * @param param Name of the layer parameter
321  * @return An unsigned integer value for the specified parameter
322  */
323  unsigned int GetParamAsUInt(const char *param) const {
324  std::string val = GetParamAsString(param);
325  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name
326  + ". Value " + val + " cannot be casted to unsigned int.";
327  try {
328  int value = std::stoi(val);
329  if (value < 0) {
330  THROW_IE_EXCEPTION << message;
331  }
332  return static_cast<unsigned int>(value);
333  } catch (...) {
334  THROW_IE_EXCEPTION << message;
335  }
336  }
337 
338 
339  /**
340  * @brief Returns a vector of unsigned int values for the given parameter or returns the default value
341  * @param param Name of the layer parameter
342  * @param def Default value of the parameter if not found
343  * @return vector of unsigned int values
344  */
345  std::vector<unsigned int> GetParamAsUInts(const char *param, std::vector<unsigned int> def) const {
346  std::string vals = GetParamAsString(param, "");
347  std::vector<unsigned int> result;
348  std::istringstream stream(vals);
349  std::string str;
350  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " + name
351  + ". Value " + vals + " cannot be casted to unsigned int.";
352  if (vals.empty())
353  return def;
354  while (getline(stream, str, ',')) {
355  try {
356  int value = std::stoi(str);
357  if (value < 0) {
358  THROW_IE_EXCEPTION << message;
359  }
360  result.push_back(static_cast<unsigned int>(value));
361  } catch (...) {
362  THROW_IE_EXCEPTION << message;
363  }
364  }
365  return result;
366  }
367 
368  /**
369  * @brief Returns a vector of unsigned int values for the given parameter
370  * @param param Name of the layer parameter
371  * @return vector of unsigned int values
372  */
373  std::vector<unsigned int> GetParamAsUInts(const char *param) const {
374  std::string vals = GetParamAsString(param);
375  std::vector<unsigned int> result;
376  std::istringstream stream(vals);
377  std::string str;
378  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " + name
379  + ". Value " + vals + " cannot be casted to int.";
380  while (getline(stream, str, ',')) {
381  try {
382  int value = std::stoi(str);
383  if (value < 0) {
384  THROW_IE_EXCEPTION << message;
385  }
386  result.push_back(static_cast<unsigned int>(value));
387  } catch (...) {
388  THROW_IE_EXCEPTION << message;
389  }
390  }
391  return result;
392  }
393  /**
394  * @brief Returns an boolean value for the given parameter.
395  * The valid values are (true, false, 1, 0).
396  * @param param Name of the layer parameter
397  * @param def Default value of the parameter if not found
398  * @return An bool value for the specified parameter
399  */
400  bool GetParamAsBool(const char *param, bool def) const {
401  std::string val = GetParamAsString(param, std::to_string(def).c_str());
402  std::string loweredCaseValue;
403  std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
404  return std::tolower(value);
405  });
406 
407  bool result = false;
408 
409  if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
410  // attempting parse using non alpha bool
411  return (GetParamAsInt(param, def) != 0);
412  }
413 
414  return result;
415  }
416  /**
417  * @deprecated Use CNNLayer::GetParamAsBool
418  */
419  INFERENCE_ENGINE_DEPRECATED
420  bool GetParamsAsBool(const char *param, bool def) const {
421  return GetParamAsBool(param, def);
422  }
423 
424  /**
425  * @brief Returns a string value for the given parameter or returns the default one
426  * @param param Name of the layer parameter
427  * @param def Default value of the parameter if not found
428  * @return A string value
429  */
430  std::string GetParamAsString(const char *param, const char *def) const {
431  auto it = params.find(param);
432  if (it == params.end() || it->second.empty()) {
433  return def;
434  }
435  return (*it).second;
436  }
437 
438  /**
439  * @brief Checks the param presence in the layer
440  * @param param Name of the layer parameter
441  * @return a bool depending param presence
442  */
443  bool CheckParamPresence(const char *param) const {
444  auto it = params.find(param);
445  if (it == params.end()) {
446  return false;
447  }
448  return true;
449  }
450 
451  /**
452  * @brief Returns a string value for the given parameter.
453  * Throws exception if parameter was not found.
454  * @param param Name of the layer parameter
455  * @return A string value
456  */
457  std::string GetParamAsString(const char *param) const {
458  auto it = params.find(param);
459  if (it == params.end()) {
460  THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
461  }
462  return (*it).second;
463  }
464 
465  std::vector<std::string> GetParamAsStrings(const char *param, std::vector<std::string> def) const {
466  std::string vals = GetParamAsString(param, "");
467  std::vector<std::string> result;
468  std::istringstream stream(vals);
469  std::string str;
470  if (vals.empty())
471  return def;
472  while (getline(stream, str, ',')) {
473  try {
474  result.push_back(str);
475  } catch (...) {
476  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
477  }
478  }
479  return result;
480  }
481 
482  /**
483  * @brief Map of pairs: (parameter name, parameter value)
484  */
485  std::map<std::string, std::string> params;
486 
487  /**
488  * @brief Map of pairs: (name, weights/biases blob)
489  */
490  std::map<std::string, Blob::Ptr> blobs;
491 };
492 
493 /**
494  * @brief Alias for CNNLayer object
495  */
496 using GenericLayer = class CNNLayer;
497 
498 /**
499  * @brief This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
500  */
501 class WeightableLayer : public CNNLayer {
502 public:
503  /**
504  * @brief A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the given values
505  * @param prms Initial layer parameters
506  */
507  explicit WeightableLayer(const LayerParams &prms) : CNNLayer(prms) {}
508 
509  /**
510  * @brief A pointer to a weights blob
511  */
513  /**
514  * @brief A pointer to a biases blob
515  */
517 
518  /**
519  * @brief Constructs a WeightableLayer instance and initiates layer parameters with the given values
520  */
521  using CNNLayer::CNNLayer;
522 };
523 
524 /**
525  * @brief convinenent way to declare property with backward compatibility to 2D members
526  */
527 #define DEFINE_PROP(prop_name) \
528 PropertyVector<unsigned int> prop_name;\
529 unsigned int &prop_name##_x = prop_name.at(X_AXIS);\
530 unsigned int &prop_name##_y = prop_name.at(Y_AXIS);\
531 
532 /**
533  * @brief This class represents a standard 3D Convolution Layer
534  */
536 public:
537  /**
538  * @brief A convolution kernel array [X, Y, Z, ...]
539  */
540  DEFINE_PROP(_kernel);
541  /**
542  * @brief A convolution paddings begin array [X, Y, Z, ...]
543  */
544  DEFINE_PROP(_padding);
545  /**
546  * @brief A convolution paddings end array [X, Y, Z, ...]
547  */
549  /**
550  * @brief A convolution strides array [X, Y, Z, ...]
551  */
552  DEFINE_PROP(_stride);
553  /**
554  * @brief A convolution dilations array [X, Y, Z, ...]
555  */
556  DEFINE_PROP(_dilation);
557  /**
558  * @brief A number of output feature maps (size) generating the 3'rd output dimension
559  */
560  unsigned int _out_depth = 0u;
561  /**
562  * @brief Number of groups
563  */
564  unsigned int _group = 1u;
565  /**
566  * @brief Auto padding type
567  */
568  std::string _auto_pad;
569 
570  /**
571  * @brief Creates a new ConvolutionLayer instance.
572  */
574  _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
575  /**
576  * @brief assignment operator
577  */
578  ConvolutionLayer & operator = (const ConvolutionLayer & that) {
579  if (&that != this) {
580  WeightableLayer::operator=(that);
581  _kernel = that._kernel;
582  _padding = that._padding;
583  _pads_end = that._pads_end;
584  _stride = that._stride;
585  _dilation = that._dilation;
586  _out_depth = that._out_depth;
587  _group = that._group;
588  }
589  return *this;
590  }
591  /**
592  * @brief move assignment operator
593  */
594  ConvolutionLayer& operator = (ConvolutionLayer &&) = default;
595  /**
596  * @brief copy constructor
597  */
599  operator = (that);
600  }
601  /**
602  * @brief move constructor
603  */
604  ConvolutionLayer(ConvolutionLayer &&) = default;
605 };
606 
607 /**
608  * @brief This class represents a standard deconvolution layer
609  */
611  public:
613  using ConvolutionLayer::operator=;
614 };
615 
616 /**
617  * @brief This class represents a standard deformable convolution layer
618  */
620 public:
622  using ConvolutionLayer::operator=;
623 
624  /**
625  * @brief Number of deformable groups
626  */
627  unsigned int _deformable_group = 1u;
628 };
629 
630 /**
631  * @brief This class represents a standard pooling layer
632  */
633 class PoolingLayer : public CNNLayer {
634 public:
635  /**
636  * @brief Pooling kernel array [X, Y, Z, ...]
637  */
638  DEFINE_PROP(_kernel);
639  /**
640  * @brief Pooling paddings begin array [X, Y, Z, ...]
641  */
642  DEFINE_PROP(_padding);
643  /**
644  * @brief Pooling paddings end array [X, Y, Z, ...]
645  */
647  /**
648  * @brief Pooling strides array [X, Y, Z, ...]
649  */
650  DEFINE_PROP(_stride);
651 
652  /**
653  * @enum PoolType
654  * @brief Defines available pooling types
655  */
656  enum PoolType {
657  MAX = 1,
658  AVG = 2,
659  STOCH = 3,
660  ROI = 4,
661  SPACIAL_PYRAMID = 5
662  };
663 
664  /**
665  * @brief A pooling type
666  */
667  PoolType _type = MAX;
668 
669  /**
670  * @brief A flag that indicates if padding is excluded or not
671  */
672  bool _exclude_pad = false;
673  /**
674  * @brief Auto padding type
675  */
676  std::string _auto_pad;
677 
678  /**
679  * @brief Creates a new PoolingLayer instance.
680  */
681  explicit PoolingLayer(const LayerParams &p) : CNNLayer(p),
682  _kernel(2, 0u), _padding(2, 0u), _stride(2, 0u) {}
683 
684  /**
685  * @brief assignment operator
686  */
687  PoolingLayer & operator = (const PoolingLayer & that) {
688  if (&that != this) {
689  CNNLayer::operator=(that);
690  _kernel = that._kernel;
691  _padding = that._padding;
692  _pads_end = that._pads_end;
693  _stride = that._stride;
694  _type = that._type;
695  _exclude_pad = that._exclude_pad;
696  }
697  return *this;
698  }
699  /**
700  * @brief move assignment operator
701  */
702  PoolingLayer& operator = (PoolingLayer &&) = default;
703 
704  /**
705  * @brief copy constructor
706  */
707  PoolingLayer(const PoolingLayer & that) : CNNLayer(that) {
708  operator=(that);
709  }
710 
711  /**
712  * @brief move constructor
713  */
714  PoolingLayer(PoolingLayer &&) = default;
715 };
716 
717 /**
718  * @brief This class represents a standard binary convolution layer
719  */
721 public:
722  /**
723  * @enum eBinaryConvolutionMode
724  * @brief Defines possible modes of binary convolution operation
725  */
727  xnor_popcount = 0
728  };
729 
730  /**
731  * @brief Mode of binary convolution operation
732  */
733  eBinaryConvolutionMode _mode = xnor_popcount;
734 
735  /**
736  * @brief A number of input feature maps (size) generating the 3'rd input dimension
737  */
738  unsigned int _in_depth = 0u;
739 
740  /**
741  * @brief A pad value which is used to fill pad area
742  */
743  float _pad_value = 0.0f;
744 
745  /**
746  * @brief A convolution kernel array [X, Y, Z, ...]
747  */
748  DEFINE_PROP(_kernel);
749  /**
750  * @brief A convolution paddings begin array [X, Y, Z, ...]
751  */
752  DEFINE_PROP(_padding);
753  /**
754  * @brief A convolution paddings end array [X, Y, Z, ...]
755  */
757  /**
758  * @brief A convolution strides array [X, Y, Z, ...]
759  */
760  DEFINE_PROP(_stride);
761  /**
762  * @brief A convolution dilations array [X, Y, Z, ...]
763  */
764  DEFINE_PROP(_dilation);
765  /**
766  * @brief A number of output feature maps (size) generating the 3'rd output dimension
767  */
768  unsigned int _out_depth = 0u;
769  /**
770  * @brief Number of groups
771  */
772  unsigned int _group = 1u;
773  /**
774  * @brief Auto padding type
775  */
776  std::string _auto_pad;
777 
778  /**
779  * @brief Creates a new BinaryConvolutionLayer instance.
780  */
782  _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
783  /**
784  * @brief assignment operator
785  */
786  BinaryConvolutionLayer & operator = (const BinaryConvolutionLayer & that) {
787  if (&that != this) {
788  WeightableLayer::operator=(that);
789  _kernel = that._kernel;
790  _padding = that._padding;
791  _pads_end = that._pads_end;
792  _stride = that._stride;
793  _dilation = that._dilation;
794  _out_depth = that._out_depth;
795  _group = that._group;
796  _mode = that._mode;
797  _in_depth = that._in_depth;
798  _pad_value = that._pad_value;
799  }
800  return *this;
801  }
802  /**
803  * @brief move assignment operator
804  */
805  BinaryConvolutionLayer& operator = (BinaryConvolutionLayer &&) = default;
806  /**
807  * @brief copy constructor
808  */
810  operator = (that);
811  }
812  /**
813  * @brief move constructor
814  */
816 };
817 
818 #undef DEFINE_PROP
819 
820 /**
821  * @brief This class represents a fully connected layer
822  */
824 public:
825  /**
826  * @brief A size of output
827  */
828  unsigned int _out_num = 0;
829 
830  /**
831  * @brief Creates a new FullyConnectedLayer instance and initializes layer parameters with the given values.
832  */
834 };
835 
836 /**
837  * @brief This class represents concatenation layer
838  * Takes as input several data elements and merges them to one using the supplied axis
839  */
840 class ConcatLayer : public CNNLayer {
841 public:
842  /**
843  * @brief An axis on which concatenation operation is performed
844  */
845  unsigned int _axis = 1;
846 
847  /**
848  * @brief Creates a new ConcatLayer instance and initializes layer parameters with the given values.
849  * If batch is used, then batch needs to be specified as an input dimension also
850  * In current implementation 1 means channels, 0 - batch
851  */
852  using CNNLayer::CNNLayer;
853 };
854 
855 /**
856  * @brief This class represents a layer that evenly splits the input into the supplied outputs
857  */
858 class SplitLayer : public CNNLayer {
859 public:
860  /**
861  * @brief An axis on which split operation is performed
862  */
863  unsigned int _axis = 1;
864 
865  /**
866  * @brief Creates a new SplitLayer instance.
867  */
868  using CNNLayer::CNNLayer;
869 };
870 
871 /**
872  * @brief This class represents a Linear Response Normalization (LRN) Layer
873  */
874 class NormLayer : public CNNLayer {
875 public:
876  /**
877  * @brief Response size
878  */
879  unsigned int _size = 0;
880  /**
881  * @brief K
882  */
883  unsigned int _k = 1;
884  /**
885  * @brief Alpha coefficient
886  */
887  float _alpha = 0;
888  /**
889  * @brief Beta coefficient
890  */
891  float _beta = 0;
892  /**
893  * @brief Flag to specify normalization across feature maps (true) or across channels
894  */
895  bool _isAcrossMaps = false;
896 
897  /**
898  * @brief Creates a new NormLayer instance.
899  */
900  using CNNLayer::CNNLayer;
901 };
902 
903 /**
904  * @brief This class represents standard softmax Layer
905  */
906 class SoftMaxLayer : public CNNLayer {
907 public:
908  /**
909  * @brief Axis number for a softmax operation
910  */
911  int axis = 1;
912  /**
913  * @brief Creates a new SoftMaxLayer instance.
914  */
915  using CNNLayer::CNNLayer;
916 };
917 
918 /**
919  * @class GRNLayer
920  * @brief This class represents standard GRN Layer
921  */
922 class GRNLayer : public CNNLayer {
923 public:
924  /**
925  * @brief A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the given values.
926  * @param prms Initial layer parameters
927  */
928  explicit GRNLayer(const LayerParams &prms) : CNNLayer(prms), bias(0.f) {}
929 
930  /**
931  * @brief Bias for squares sum
932  */
933  float bias = 0.f;
934 };
935 
936 /**
937  * @class MVNLayer
938  * @brief This class represents standard MVN Layer
939  */
940 class MVNLayer : public CNNLayer {
941 public:
942  /**
943  * @brief A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the given values.
944  * @param prms Initial layer parameters
945  */
946  explicit MVNLayer(const LayerParams &prms) : CNNLayer(prms), across_channels(0), normalize(1) {}
947 
948  /**
949  * @brief Indicate that mean value is calculated across channels
950  */
952 
953  /**
954  * @brief Indicate that the result needs to be normalized
955  */
956  int normalize = 1;
957 };
958 
959 /**
960  * @brief This class represents a Rectified Linear activation layer
961  */
962 class ReLULayer : public CNNLayer {
963 public:
964  /**
965  * @brief Negative slope is used to takle negative inputs instead of setting them to 0
966  */
967  float negative_slope = 0.0f;
968 
969  /**
970  * @brief Creates a new ReLULayer instance.
971  */
972  using CNNLayer::CNNLayer;
973 };
974 
975 /**
976  * @brief This class represents a Clamp activation layer
977  * Clamps all tensor elements into the range [min_value, max_value]
978  */
979 class ClampLayer : public CNNLayer {
980 public:
981  /**
982  * @brief A minimum value
983  */
984  float min_value = 0.0f;
985 
986  /**
987  * @brief A maximum value
988  */
989  float max_value = 1.0f;
990  /**
991  * @brief Creates a new ClampLayer instance.
992  */
993  using CNNLayer::CNNLayer;
994 };
995 
996 
997 /**
998  * @brief This class represents a ReLU6 activation layer
999  * Clamps all tensor elements into the range [0, 6.0]
1000  */
1001 class ReLU6Layer : public ClampLayer {
1002 public:
1003  explicit ReLU6Layer(const LayerParams &prms) : ClampLayer(prms) {
1004  max_value = 6.0f;
1005  }
1006 
1007  using ClampLayer::ClampLayer;
1008 };
1009 
1010 
1011 /**
1012  * @brief This class represents an element wise operation layer
1013  */
1014 class EltwiseLayer : public CNNLayer {
1015 public:
1016  /**
1017  * @enum eOperation
1018  * @brief Defines possible operations that can be used
1019  */
1020  enum eOperation {
1021  Sum = 0, Prod, Max, Sub, Min, Div, Squared_diff, Floor_mod, Pow,
1022  Equal, Not_equal, Less, Less_equal, Greater, Greater_equal,
1023  Logical_AND, Logical_OR, Logical_XOR, Logical_NOT, Mean, Select
1024  };
1025 
1026  /**
1027  * @brief A type of the operation to use
1028  */
1029  eOperation _operation = Sum;
1030 
1031  /**
1032  * @brief A vector of coefficients to scale the operands
1033  */
1034  std::vector<float> coeff;
1035 
1036  /**
1037  * @brief Creates a new EltwiseLayer instance.
1038  */
1039  using CNNLayer::CNNLayer;
1040 };
1041 
1042 /**
1043  * @brief This class represents a standard crop layer
1044  */
1045 class CropLayer : public CNNLayer {
1046 public:
1047  /**
1048  * @brief A vector of dimensions for cropping
1049  */
1050  std::vector<int> axis;
1051  /**
1052  * @brief A vector of dimensions to be preserved
1053  */
1054  std::vector<int> dim;
1055  /**
1056  * @brief A vector of offsets for each dimension
1057  */
1058  std::vector<int> offset;
1059 
1060  /**
1061  * @brief Creates a new CropLayer instance.
1062  */
1063  using CNNLayer::CNNLayer;
1064 };
1065 
1066 /**
1067  * @brief This class represents a standard reshape layer
1068  */
1069 class ReshapeLayer : public CNNLayer {
1070 public:
1071  /**
1072  * @brief A vector of sizes of the shape
1073  */
1074  std::vector<int> shape;
1075  /**
1076  * @brief A number of axis to be taken for a reshape
1077  */
1078  int axis = 0;
1079  /**
1080  * @brief A number of first axises to be taken for a reshape
1081  */
1082  int num_axes = -1;
1083 
1084  /**
1085  * @brief Creates a new ReshapeLayer instance.
1086  */
1087  using CNNLayer::CNNLayer;
1088 };
1089 
1090 /**
1091  * @brief This class represents a standard Tile Layer
1092  */
1093 class TileLayer : public CNNLayer {
1094 public:
1095  /**
1096  * @brief An index of the axis to tile
1097  */
1098  int axis = -1;
1099  /**
1100  * @brief A number of copies to be made
1101  */
1102  int tiles = -1;
1103 
1104  /**
1105  * @brief Creates a new TileLayer instance.
1106  */
1107  using CNNLayer::CNNLayer;
1108 };
1109 
1110 
1111 /**
1112  * @brief This class represents a Layer which performs Scale and Shift
1113  */
1115 public:
1116  /**
1117  * @brief A flag that indicates if the same value is used for all the features. If false, the value is used pixel wise
1118  */
1119  unsigned int _broadcast = 0;
1120 
1121  /**
1122  * @brief Creates a new ScaleShiftLayer instance.
1123  */
1125 };
1126 
1127 /**
1128  * @brief This class represents TensorIterator layer
1129  */
1130 class TensorIterator : public CNNLayer {
1131 public:
1132  struct PortMap {
1133  // Data map rule
1134  int from; /**< Index of exteral data from ins/outs fields of CNNLayer */
1135  int to; /**< Index of internal data in iterator body */
1136 
1137  // Iteration rule
1138  int axis; /**< Axis to iterate throught */
1139  int stride; /**< Stride to iterate throught */
1140  int start; /**< Start index of iteration range */
1141  int end; /**< Last index of iteration range */
1142  int part_size; /**< Part size which will be transfered to body subnetwork */
1143  };
1144 
1145  struct Body {
1146  std::vector<DataPtr> inputs;
1147  std::vector<DataPtr> outputs;
1148  };
1149 
1150  std::vector<PortMap> input_port_map;
1151  std::vector<PortMap> output_port_map;
1152  std::vector<PortMap> back_edges;
1153 
1154  Body body;
1155 
1156  using CNNLayer::CNNLayer;
1157 };
1158 
1159 /**
1160  * @brief Base class for recurrent cell layers
1161  */
1163 public:
1165 
1166  /**
1167  * @brief Direct type of recurrent cell (including subtypes)
1168  * Description of particular cell semantics is in LSTMCell, GRUCell, RNNCell.
1169  */
1170  enum CellType {
1171  LSTM, /**< Original LSTM cell */
1172  GRU, /**< Original GRU cell */
1173  RNN, /**< Original RNN cell */
1174  GRU_LBR, /**< GRU cell modification. "Linear before reset" */
1175  };
1176 
1177  /** @copybrief CellType */
1178  CellType cellType = LSTM;
1179 
1180  /**
1181  * @brief Size of hidden state data
1182  *
1183  * In case of batch output state tensor will have shape [N, hidden_size]
1184  */
1185  int hidden_size = 0;
1186 
1187  /**
1188  * @brief Clip data into range [-clip, clip] on input of activations
1189  *
1190  * clip==0.0f means no clipping
1191  */
1192  float clip = 0.0f;
1193  /**
1194  * @brief Activations used inside recurrent cell
1195  *
1196  * Valid values: sigmoid, tanh, relu
1197  */
1198  std::vector<std::string> activations;
1199 
1200  /**
1201  * @brief Alpha parameters of activations
1202  *
1203  * Respective to activation list.
1204  */
1205  std::vector<float> activation_alpha;
1206 
1207  /**
1208  * @brief Beta parameters of activations
1209  *
1210  * Respective to activation list.
1211  */
1212  std::vector<float> activation_beta;
1213 };
1214 
1215 /**
1216  * @brief LSTM Cell layer
1217  *
1218  * G - number of gates (=4)
1219  * N - batch size
1220  * S - state size (=hidden_size)
1221  *
1222  * Inputs:
1223  * [N,D] Xt - input data
1224  * [N,S] Ht-1 - initial hidden state
1225  * [N,S] Ct-1 - initial cell state
1226  *
1227  * Outputs:
1228  * [N,S] Ht - out hidden state
1229  * [N,S] Ct - out cell state
1230  *
1231  * Weights:
1232  * - weights [G,S,D+S]
1233  * - biases [G,S]
1234  * NB! gates order is FICO {forget, input, candidate, output}
1235  *
1236  * activations is {_f, _g, _h}
1237  * default: {_f=sigm, _g=tanh, _h=tanh}
1238  *
1239  * Equations:
1240  *
1241  * * - matrix mult
1242  * (.) - eltwise mult
1243  * [,] - concatenation
1244  *
1245  * - ft = _f(Wf*[Ht-1, Xt] + Bf)
1246  * - it = _f(Wi*[Ht-1, Xt] + Bi)
1247  * - ct = _g(Wc*[Ht-1, Xt] + Bc)
1248  * - ot = _f(Wo*[Ht-1, Xt] + Bo)
1249  * - Ct = ft (.) Ct-1 + it (.) ct
1250  * - Ht = ot (.) _h(Ct)
1251  */
1253 
1254 /**
1255  * @brief GRU Cell layer
1256  *
1257  * G - number of gates (=3)
1258  * N - batch size
1259  * S - state size (=hidden_size)
1260  *
1261  * Inputs:
1262  * [N,D] Xt - input data
1263  * [N,S] Ht-1 - initial hidden state
1264  *
1265  * Outputs:
1266  * [N,S] Ht - out hidden state
1267  *
1268  * Weights:
1269  * - weights [G,S,D+S]
1270  * - biases [G,S]
1271  * NB! gates order is ZRH {update, reset, output}
1272  *
1273  * activations is {_f, _g}
1274  * default: {_f=sigm, _g=tanh}
1275  *
1276  * Equations:
1277  *
1278  * * - matrix mult
1279  * (.) - eltwise mult
1280  * [,] - concatenation
1281  *
1282  * - zt = _f(Wz*[Ht-1, Xt] + Bz)
1283  * - rt = _f(Wr*[Ht-1, Xt] + Br)
1284  * - ht = _g(Wh*[rt (.) Ht-1, Xt] + Bh)
1285  * - Ht = (1 - zt) (.) ht + zt (.) Ht-1
1286  */
1288 
1289 /**
1290  * @brief RNN Cell layer
1291  *
1292  * G - number of gates (=1)
1293  * N - batch size
1294  * S - state size (=hidden_size)
1295  *
1296  * Inputs:
1297  * [N,D] Xt - input data
1298  * [N,S] Ht-1 - initial hidden state
1299  *
1300  * Outputs:
1301  * [N,S] Ht - out hidden state
1302  *
1303  * Weights:
1304  * - weights [G,S,D+S]
1305  * - biases [G,S]
1306  *
1307  * activations is {_f}
1308  * default: {_f=tanh}
1309  *
1310  * Equations:
1311  *
1312  * * - matrix mult
1313  * [,] - concatenation
1314  *
1315  * - Ht = _f(Wi*[Ht-1, Xt] + Bi)
1316  */
1318 
1319 /**
1320  * @brief Sequence of recurrent cells
1321  *
1322  * N - batch size
1323  * T - sequence size
1324  * S - state size (=hidden_size)
1325  * NS - num of state tensors (LSTM=2, GRU/RNN=1)
1326  * ND - num of direction (BDR=2, WFD/BWD=1)
1327  *
1328  * Inputs:
1329  * [N,T,D] Xt - input data
1330  * [ND,N,S] Ht-1 - initial hidden state
1331  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1332  * [N] SL - sequence length (optional input)
1333  *
1334  * Outputs:
1335  * [ND,N,T,S] Xt - input data
1336  * [ND,N,S] Ht-1 - initial hidden state
1337  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1338  *
1339  * NB! if axis==0 batch and sequense dimensions are swapped (N <-> T) for input and output tensors
1340  *
1341  * Weights:
1342  * - weights [ND,G,S,D+S]
1343  * - biases [ND,G,S]
1344  * NB! if ND==2 weights are concatenated cell weights [forward_cell_weights, backward_cell_weights]
1345  *
1346  */
1348 public:
1349  using RNNCellBase::RNNCellBase;
1350 
1351  /**
1352  * @brief An axis by which iteration is performed
1353  * axis=0 means first input/output data blob dimension is sequence
1354  * axis=1 means first input/output data blob dimension is batch
1355  */
1356  unsigned int axis = 1;
1357 
1358  /**
1359  * @brief Direction of iteration through sequence dimension
1360  */
1361  enum Direction {
1362  FWD, /**< Forward mode. Iterate starts from index 0 with step 1. */
1363  BWD, /**< Backward mode. Iterate starts from last index with step -1. */
1364  BDR /**< Bidirectional mode. First is forward pass, second is backward. */
1365  };
1366 
1367  /** @copybrief Direction */
1368  Direction direction = FWD;
1369 };
1370 
1371 /**
1372  * @brief This class represents a Layer which performs Scale and Shift
1373  */
1374 class PReLULayer : public WeightableLayer {
1375 public:
1376  /**
1377  * @brief A flag that indicates if the same negative_slope value is used for all the features. If false, the value is used pixel wise
1378  */
1380 
1381 public:
1382  /**
1383  * @brief A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the given values.
1384  * @param prms Initial layer parameters
1385  */
1386  explicit PReLULayer(const LayerParams &prms) : WeightableLayer(prms), _channel_shared(false) {}
1387 };
1388 
1389 /**
1390  * @brief This class represents a standard Power Layer
1391  * Formula is: output = (offset + scale * input) ^ power
1392  */
1393 class PowerLayer : public CNNLayer {
1394 public:
1395  /**
1396  * @brief An exponent value
1397  */
1398  float power = 1.f;
1399  /**
1400  * @brief A scale factor
1401  */
1402  float scale = 1.f;
1403  /**
1404  * @brief An offset value
1405  */
1406  float offset = 0.f;
1407 
1408  /**
1409  * @brief Creates a new PowerLayer instance.
1410  */
1411  using CNNLayer::CNNLayer;
1412 };
1413 
1414 /**
1415  * @brief This class represents a Batch Normalization Layer
1416  */
1418 public:
1419  /**
1420  * @brief A small value to add to the variance estimate to avoid division by zero
1421  */
1422  float epsilon = 1e-3f;
1423 
1424  /**
1425  * @brief Creates a new BatchNormalizationLayer instance.
1426  */
1428 };
1429 
1430 /**
1431  * @brief This class represents a general matrix multiplication operation layer
1432  * Formula is: dst := alpha*src1*src2 + beta*src3
1433  */
1434 class GemmLayer : public CNNLayer {
1435 public:
1436  /**
1437  * @brief A scale factor of src1 matrix
1438  */
1439  float alpha = 1.f;
1440  /**
1441  * @brief A scale factor of src3 matrix
1442  */
1443  float beta = 1.f;
1444  /**
1445  * @brief A flag that indicates if the src1 matrix is to be transposed
1446  */
1447  bool transpose_a = false;
1448  /**
1449  * @brief A flag that indicates if the src2 matrix is to be transposed
1450  */
1451  bool transpose_b = false;
1452  /**
1453  * @brief Creates a new GemmLayer instance.
1454  */
1455  using CNNLayer::CNNLayer;
1456 };
1457 
1458 /**
1459  * @brief This class represents a standard Pad layer
1460  * Adds paddings to input tensor
1461  */
1462 class PadLayer : public CNNLayer {
1463 public:
1464  /**
1465  * @enum ePadMode
1466  * @brief Defines possible modes of pad operation
1467  */
1468  enum ePadMode {
1469  Constant = 0, Edge, Reflect, Symmetric
1470  };
1471 
1472  /**
1473  * @brief Size of padding in the beginning of each axis
1474  */
1476  /**
1477  * @brief Size of padding in the end of each axis
1478  */
1480  /**
1481  * @brief Mode of pad operation
1482  */
1483  ePadMode pad_mode = Constant;
1484  /**
1485  * @brief A pad value which is used for filling in Constant mode
1486  */
1487  float pad_value = 0.0f;
1488  /**
1489  * @brief Creates a new PadLayer instance.
1490  */
1491  using CNNLayer::CNNLayer;
1492 };
1493 
1494 /**
1495  * @brief This class represents a standard Gather layer
1496  * Gather slices from Dictionary according to Indexes
1497  */
1498 class GatherLayer : public CNNLayer {
1499 public:
1500  /**
1501  * @brief The axis in Dictionary to gather Indexes from
1502  */
1503  int axis = 0;
1504  /**
1505  * @brief Creates a new GatherLayer instance.
1506  */
1507  using CNNLayer::CNNLayer;
1508 };
1509 
1510 /**
1511  * @brief This class represents a standard Strided Slice layer
1512  * Strided Slice picks from input tensor according parameters
1513  */
1514 class StridedSliceLayer : public CNNLayer {
1515 public:
1516  /**
1517  * @brief The begin_mask is a bitmask where bit i being 0 means
1518  * to ignore the begin value and instead use the default value
1519  */
1520  std::string begin_mask;
1521  /**
1522  * @brief Analogous to begin_mask
1523  */
1524  std::string end_mask;
1525  /**
1526  * @brief The ellipsis_mask is a bitmask where bit i being 1 means
1527  * the i-th is actually an ellipsis
1528  */
1529  std::string ellipsis_mask;
1530  /**
1531  * @brief The new_axis_mask_ is a bitmask where bit i being 1 means
1532  * the i-th position creates a new 1 dimension shape
1533  */
1534  std::string new_axis_mask;
1535  /**
1536  * @brief The shrink_axis_mask is a bitmask where bit i being 1 means
1537  * the i-th position shrinks the dimensionality
1538  */
1539  std::string shrink_axis_mask;
1540 
1541  /**
1542  * @brief Creates a new StridedSliceLayer instance.
1543  */
1544  using CNNLayer::CNNLayer;
1545 };
1546 
1547 /**
1548 * @brief This class represents a standard Shuffle Channels layer
1549 * Shuffle Channels picks from input tensor according parameters
1550 */
1552 public:
1553  /**
1554  * @brief The axis in tensor to shuffle channels
1555  */
1556  int axis = 1;
1557 
1558  /**
1559  * @brief The group of output shuffled channels
1560  */
1561  unsigned int group = 1;
1562 
1563  /**
1564  * @brief Creates a new ShuffleChannelsLayer instance.
1565  */
1566  using CNNLayer::CNNLayer;
1567 };
1568 
1569 
1570 /**
1571 * @brief This class represents a standard Depth To Space layer
1572 * Depth To Space picks from input tensor according parameters
1573 */
1574 class DepthToSpaceLayer : public CNNLayer {
1575 public:
1576  /**
1577  * @brief The group of output shuffled channels
1578  */
1579  unsigned int block_size = 1;
1580 
1581  /**
1582  * @brief Creates a new DepthToSpaceLayer instance.
1583  */
1584  using CNNLayer::CNNLayer;
1585 };
1586 
1587 
1588 /**
1589 * @brief This class represents a standard Space To Depth layer
1590 * Depth To Space picks from input tensor according parameters
1591 */
1592 class SpaceToDepthLayer : public CNNLayer {
1593 public:
1594  /**
1595  * @brief The group of output Space To Depth
1596  */
1597  unsigned int block_size = 1;
1598 
1599  /**
1600  * @brief Creates a new SpaceToDepthLayer instance.
1601  */
1602  using CNNLayer::CNNLayer;
1603 };
1604 
1605 
1606 /**
1607 * @brief This class represents a standard Reverse Sequence layer
1608 * Reverse Sequence modifies input tensor according parameters
1609 */
1611 public:
1612  /**
1613  * @brief The seq_axis dimension in tensor which is partially reversed
1614  */
1615  int seq_axis = 1;
1616 
1617  /**
1618  * @brief The batch_axis dimension in tensor along which reversal is performed
1619  */
1620  int batch_axis = 0;
1621 
1622  /**
1623  * @brief Creates a new ReverseSequence instance.
1624  */
1625  using CNNLayer::CNNLayer;
1626 };
1627 
1628 
1629 /**
1630 * @brief This class represents a OneHot layer
1631 * Converts input into OneHot representation.
1632 */
1633 class OneHotLayer : public CNNLayer {
1634 public:
1635  /**
1636  * @brief A depth of representation
1637  */
1638  unsigned int depth = 0;
1639 
1640  /**
1641  * @brief The locations represented by indices in input take value on_value
1642  */
1643  float on_value = 1.f;
1644 
1645  /**
1646  * @brief The locations not represented by indices in input take value off_value
1647  */
1648  float off_value = 0.f;
1649 
1650  /**
1651  * @brief Define the shape of output tensor
1652  */
1653  int axis = -1;
1654 
1655  /**
1656  * @brief Creates a new OneHot instance
1657  */
1658  using CNNLayer::CNNLayer;
1659 };
1660 
1661 
1662 /**
1663 * @brief This class represents a standard RangeLayer layer
1664 * RangeLayer modifies input tensor dimensions according parameters
1665 */
1666 class RangeLayer : public CNNLayer {
1667 public:
1668  /**
1669  * @brief Creates a new RangeLayer instance.
1670  */
1671  using CNNLayer::CNNLayer;
1672 };
1673 
1674 
1675 /**
1676 * @brief This class represents a standard Fill layer
1677 * RFill modifies input tensor according parameters
1678 */
1679 class FillLayer : public CNNLayer {
1680 public:
1681  /**
1682  * @brief Creates a new Fill instance.
1683  */
1684  using CNNLayer::CNNLayer;
1685 };
1686 
1687 
1688 /**
1689 * @brief This class represents a SelectLayer layer
1690 * SelectLayer layer takes elements from the second (“then”) or the third (“else”) input based on condition mask (“cond”) provided in the first input.
1691 * The “cond” tensor is broadcasted to “then” and “else” tensors.
1692 * The output tensor shape is equal to broadcasted shape of “cond”, “then” and “else”.
1693 */
1694 class SelectLayer : public CNNLayer {
1695 public:
1696  /**
1697  * @brief Creates a new SelectLayer instance.
1698  */
1699  using CNNLayer::CNNLayer;
1700 };
1701 
1702 
1703 /**
1704 * @brief This class represents a standard Broadcast layer
1705 * Broadcast modifies input tensor dimensions according parameters
1706 */
1707 class BroadcastLayer : public CNNLayer {
1708 public:
1709  /**
1710  * @brief Creates a new Broadcast instance.
1711  */
1712  using CNNLayer::CNNLayer;
1713 };
1714 
1715 /**
1716  * @brief This class represents a quantization operation layer
1717  * Element-wise linear quantization of floating point input values into a descrete set of floating point values
1718  */
1719 class QuantizeLayer : public CNNLayer {
1720 public:
1721  /**
1722  * @brief The number of quantization levels
1723  */
1724  int levels = 1;
1725 
1726  /**
1727  * @brief Creates a new QuantizeLayer instance.
1728  */
1729  using CNNLayer::CNNLayer;
1730 };
1731 
1732 
1733 /**
1734 * @brief This class represents a standard Math layers
1735 * Math modifies input tensor dimensions according parameters
1736 */
1737 class MathLayer : public CNNLayer {
1738 public:
1739  /**
1740  * @brief Creates a new Math instance.
1741  */
1742  using CNNLayer::CNNLayer;
1743 };
1744 
1745 
1746 /**
1747 * @brief This class represents a standard Reduce layers
1748 * Reduce modifies input tensor according parameters
1749 */
1750 class ReduceLayer : public CNNLayer {
1751 public:
1752  /**
1753  * @brief The keep_dims dimension in tensor which is partially reversed
1754  */
1755  bool keep_dims = true;
1756 
1757  /**
1758  * @brief Creates a new Reduce instance.
1759  */
1760  using CNNLayer::CNNLayer;
1761 };
1762 
1763 
1764 /**
1765  * @brief This class represents a standard TopK layer
1766  * TopK picks top K values from input tensor according parameters
1767  */
1768 class TopKLayer : public CNNLayer {
1769 public:
1770  /**
1771  * @brief The mode could be 'max' or 'min'
1772  */
1773  std::string mode;
1774  /**
1775  * @brief top K values sort mode could be 'value' or 'index'
1776  */
1777  std::string sort;
1778  /**
1779  * @brief The axis dimension in tensor which is top K values are picked
1780  */
1781  int axis = -1;
1782 
1783  /**
1784  * @brief Creates a new TopKLayer instance.
1785  */
1786  using CNNLayer::CNNLayer;
1787 };
1788 
1789 
1790 } // namespace InferenceEngine
int GetParamAsInt(const char *param, int def) const
Returns an integer value for the given parameter or returns the default value.
Definition: ie_layers.h:227
BinaryConvolutionLayer(const LayerParams &p)
Creates a new BinaryConvolutionLayer instance.
Definition: ie_layers.h:781
std::shared_ptr< CNNLayer > Ptr
A shared pointer to CNNLayer.
Definition: ie_layers.h:46
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:22
#define DEFINE_PROP(prop_name)
convinenent way to declare property with backward compatibility to 2D members
Definition: ie_layers.h:527
PoolingLayer(const PoolingLayer &that)
copy constructor
Definition: ie_layers.h:707
std::vector< int > axis
A vector of dimensions for cropping.
Definition: ie_layers.h:1050
virtual const DataPtr input() const
Returns the first element of the input data for this layer.
Definition: ie_layers.h:106
This class represents a Layer which performs Scale and Shift.
Definition: ie_layers.h:1374
std::string type
Layer type.
Definition: ie_layers.h:55
unsigned int _group
Number of groups.
Definition: ie_layers.h:564
PoolType _type
A pooling type.
Definition: ie_layers.h:667
This class represents a standard Strided Slice layer Strided Slice picks from input tensor according ...
Definition: ie_layers.h:1514
unsigned int GetParamAsUInt(const char *param, unsigned int def) const
Returns an unsigned integer value for the given parameter or returns the default value.
Definition: ie_layers.h:303
PoolType
Defines available pooling types.
Definition: ie_layers.h:656
float GetParamAsFloat(const char *param, float def) const
Gets float value for the given parameter.
Definition: ie_layers.h:149
This class represents a standard crop layer.
Definition: ie_layers.h:1045
std::vector< float > activation_beta
Beta parameters of activations.
Definition: ie_layers.h:1212
The method holds the user values to enable binding of data per graph node.
Definition: ie_common.h:66
This structure describes ROI data.
Definition: ie_blob.h:1085
PReLULayer(const LayerParams &prms)
A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the gi...
Definition: ie_layers.h:1386
This class represents a standard Power Layer Formula is: output = (offset + scale * input) ^ power...
Definition: ie_layers.h:1393
std::string begin_mask
The begin_mask is a bitmask where bit i being 0 means to ignore the begin value and instead use the d...
Definition: ie_layers.h:1520
std::vector< int > GetParamAsInts(const char *param, std::vector< int > def) const
Returns a vector of int values for the given parameter or returns the default value.
Definition: ie_layers.h:259
std::vector< int > shape
A vector of sizes of the shape.
Definition: ie_layers.h:1074
Definition: ie_argmax_layer.hpp:11
Definition: ie_layers.h:1362
This is an internal common Layer parameter parsing arguments.
Definition: ie_layers.h:29
bool _channel_shared
A flag that indicates if the same negative_slope value is used for all the features. If false, the value is used pixel wise.
Definition: ie_layers.h:1379
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:552
This class represents a standard Space To Depth layer Depth To Space picks from input tensor accordin...
Definition: ie_layers.h:1592
eBinaryConvolutionMode _mode
Mode of binary convolution operation.
Definition: ie_layers.h:733
Definition: ie_layers.h:1174
This class represents a OneHot layer Converts input into OneHot representation.
Definition: ie_layers.h:1633
Base class for recurrent cell layers.
Definition: ie_layers.h:1162
PropertyVector< unsigned int > pads_begin
Size of padding in the beginning of each axis.
Definition: ie_layers.h:1475
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:760
This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
Definition: ie_layers.h:501
Blob::Ptr _weights
A pointer to a weights blob.
Definition: ie_layers.h:512
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:552
This class represents a standard Fill layer RFill modifies input tensor according parameters...
Definition: ie_layers.h:1679
WeightableLayer(const LayerParams &prms)
A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the ...
Definition: ie_layers.h:507
std::vector< DataWeakPtr > insData
A vector of weak pointers to the input data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:67
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:568
void fuse(Ptr &layer)
Sets a layer to be fused with.
Definition: ie_layers.h:98
A header file for Blob and generic TBlob<>
This class represents a standard deconvolution layer.
Definition: ie_layers.h:610
std::string shrink_axis_mask
The shrink_axis_mask is a bitmask where bit i being 1 means the i-th position shrinks the dimensional...
Definition: ie_layers.h:1539
std::vector< unsigned int > GetParamAsUInts(const char *param) const
Returns a vector of unsigned int values for the given parameter.
Definition: ie_layers.h:373
Direction
Direction of iteration through sequence dimension.
Definition: ie_layers.h:1361
int to
Definition: ie_layers.h:1135
std::vector< int > offset
A vector of offsets for each dimension.
Definition: ie_layers.h:1058
This class represents standard MVN Layer.
Definition: ie_layers.h:940
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:540
int stride
Definition: ie_layers.h:1139
Ptr _fusedWith
If suggested to fuse - a pointer to the layer which needs to be fused with this layer.
Definition: ie_layers.h:71
This class represents a standard Reduce layers Reduce modifies input tensor according parameters...
Definition: ie_layers.h:1750
std::string sort
top K values sort mode could be &#39;value&#39; or &#39;index&#39;
Definition: ie_layers.h:1777
ConvolutionLayer(const ConvolutionLayer &that)
copy constructor
Definition: ie_layers.h:598
PropertyVector< unsigned int > _pads_end
Pooling paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:642
std::vector< std::string > activations
Activations used inside recurrent cell.
Definition: ie_layers.h:1198
std::string name
Layer name.
Definition: ie_layers.h:31
class CNNLayer GenericLayer
Alias for CNNLayer object.
Definition: ie_layers.h:496
std::string type
Layer type.
Definition: ie_layers.h:33
This class represents a standard Reverse Sequence layer Reverse Sequence modifies input tensor accord...
Definition: ie_layers.h:1610
This class represents a Clamp activation layer Clamps all tensor elements into the range [min_value...
Definition: ie_layers.h:979
int part_size
Definition: ie_layers.h:1142
This class represents a layer that evenly splits the input into the supplied outputs.
Definition: ie_layers.h:858
Definition: ie_layers.h:1363
Definition: ie_layers.h:1172
eOperation
Defines possible operations that can be used.
Definition: ie_layers.h:1020
This class represents a standard TopK layer TopK picks top K values from input tensor according param...
Definition: ie_layers.h:1768
bool _exclude_pad
A flag that indicates if padding is excluded or not.
Definition: ie_layers.h:672
This class represents an element wise operation layer.
Definition: ie_layers.h:1014
PropertyVector< unsigned int > pads_end
Size of padding in the end of each axis.
Definition: ie_layers.h:1479
This class represents standard GRN Layer.
Definition: ie_layers.h:922
This class represents a standard Shuffle Channels layer Shuffle Channels picks from input tensor acco...
Definition: ie_layers.h:1551
This class represents a standard reshape layer.
Definition: ie_layers.h:1069
std::string mode
The mode could be &#39;max&#39; or &#39;min&#39;.
Definition: ie_layers.h:1773
UserValue userValue
Convenience user values to store in this object as extra data.
Definition: ie_layers.h:75
Sequence of recurrent cells.
Definition: ie_layers.h:1347
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:752
BinaryConvolutionLayer(const BinaryConvolutionLayer &that)
copy constructor
Definition: ie_layers.h:809
GRNLayer(const LayerParams &prms)
A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the give...
Definition: ie_layers.h:928
CNNLayer(const LayerParams &prms)
A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values...
Definition: ie_layers.h:85
static float ie_parse_float(const std::string &str)
Parse string with float in accordance with IE rules.
Definition: ie_layers.h:128
This class represents a fully connected layer.
Definition: ie_layers.h:823
ePadMode
Defines possible modes of pad operation.
Definition: ie_layers.h:1468
This class represents a SelectLayer layer SelectLayer layer takes elements from the second (“then”)...
Definition: ie_layers.h:1694
std::map< std::string, std::string > params
Map of pairs: (parameter name, parameter value)
Definition: ie_layers.h:485
Definition: ie_layers.h:1171
PoolingLayer(const LayerParams &p)
Creates a new PoolingLayer instance.
Definition: ie_layers.h:681
std::shared_ptr< Blob > Ptr
A smart pointer containing Blob object.
Definition: ie_blob.h:40
std::vector< float > coeff
A vector of coefficients to scale the operands.
Definition: ie_layers.h:1034
bool GetParamsAsBool(const char *param, bool def) const
Definition: ie_layers.h:420
std::string end_mask
Analogous to begin_mask.
Definition: ie_layers.h:1524
std::vector< unsigned int > GetParamAsUInts(const char *param, std::vector< unsigned int > def) const
Returns a vector of unsigned int values for the given parameter or returns the default value...
Definition: ie_layers.h:345
std::vector< float > activation_alpha
Alpha parameters of activations.
Definition: ie_layers.h:1205
unsigned int GetParamAsUInt(const char *param) const
Returns an unsigned integer value for the given parameter.
Definition: ie_layers.h:323
This class represents a ReLU6 activation layer Clamps all tensor elements into the range [0...
Definition: ie_layers.h:1001
This class represents a Batch Normalization Layer.
Definition: ie_layers.h:1417
unsigned int _group
Number of groups.
Definition: ie_layers.h:772
PropertyVector< unsigned int > _stride
Pooling strides array [X, Y, Z, ...].
Definition: ie_layers.h:650
This class represents a Layer which performs Scale and Shift.
Definition: ie_layers.h:1114
This class represents standard softmax Layer.
Definition: ie_layers.h:906
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:560
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:748
std::vector< float > GetParamAsFloats(const char *param) const
Returns a vector of float values for the given parameter.
Definition: ie_layers.h:204
This header file defines the main Data representation node.
int end
Definition: ie_layers.h:1141
bool GetParamAsBool(const char *param, bool def) const
Returns an boolean value for the given parameter. The valid values are (true, false, 1, 0).
Definition: ie_layers.h:400
std::vector< int > GetParamAsInts(const char *param) const
Returns a vector of int values for the given parameter.
Definition: ie_layers.h:282
float _pad_value
A pad value which is used to fill pad area.
Definition: ie_layers.h:743
std::string ellipsis_mask
The ellipsis_mask is a bitmask where bit i being 1 means the i-th is actually an ellipsis.
Definition: ie_layers.h:1529
unsigned int _in_depth
A number of input feature maps (size) generating the 3&#39;rd input dimension.
Definition: ie_layers.h:738
PropertyVector< unsigned int > _kernel
Pooling kernel array [X, Y, Z, ...].
Definition: ie_layers.h:638
float GetParamAsFloat(const char *param) const
Returns a float value for the given layer parameter.
Definition: ie_layers.h:164
PropertyVector< unsigned int > _padding
Pooling paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:638
Definition: ie_layers.h:1145
int GetParamAsInt(const char *param) const
Returns an integer value for the given parameter.
Definition: ie_layers.h:242
This class represents a standard deformable convolution layer.
Definition: ie_layers.h:619
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:748
int across_channels
Indicate that mean value is calculated across channels.
Definition: ie_layers.h:951
Precision precision
Layer base operating precision.
Definition: ie_layers.h:59
This class represents a quantization operation layer Element-wise linear quantization of floating poi...
Definition: ie_layers.h:1719
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:768
MVNLayer(const LayerParams &prms)
A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the give...
Definition: ie_layers.h:946
This class represents a Rectified Linear activation layer.
Definition: ie_layers.h:962
This class represents a standard 3D Convolution Layer.
Definition: ie_layers.h:535
CellType
Direct type of recurrent cell (including subtypes) Description of particular cell semantics is in LST...
Definition: ie_layers.h:1170
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:676
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:776
This class represents a standard Pad layer Adds paddings to input tensor.
Definition: ie_layers.h:1462
This header file contains aspects of working on different devices like CPU, GEN, FPGA, etc.
This class represents a standard Tile Layer.
Definition: ie_layers.h:1093
This class represents a standard binary convolution layer.
Definition: ie_layers.h:720
Precision precision
Layer precision.
Definition: ie_layers.h:35
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:544
Blob::Ptr _biases
A pointer to a biases blob.
Definition: ie_layers.h:516
This class represents a Linear Response Normalization (LRN) Layer.
Definition: ie_layers.h:874
This class represents TensorIterator layer.
Definition: ie_layers.h:1130
This class represents concatenation layer Takes as input several data elements and merges them to one...
Definition: ie_layers.h:840
std::vector< float > GetParamAsFloats(const char *param, std::vector< float > def) const
Returns a vector of float values for the given parameter or returns the default value.
Definition: ie_layers.h:180
This class represents a standard RangeLayer layer RangeLayer modifies input tensor dimensions accordi...
Definition: ie_layers.h:1666
This is a base abstraction Layer - all DNN Layers inherit from this class.
Definition: ie_layers.h:41
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:50
int start
Definition: ie_layers.h:1140
std::string affinity
Layer affinity set by user.
Definition: ie_layers.h:79
This class represents a standard pooling layer.
Definition: ie_layers.h:633
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:760
Definition: ie_layers.h:1132
This class represents a standard Gather layer Gather slices from Dictionary according to Indexes...
Definition: ie_layers.h:1498
Definition: ie_layers.h:1173
std::string GetParamAsString(const char *param) const
Returns a string value for the given parameter. Throws exception if parameter was not found...
Definition: ie_layers.h:457
This class represents a standard Math layers Math modifies input tensor dimensions according paramete...
Definition: ie_layers.h:1737
std::string new_axis_mask
The new_axis_mask_ is a bitmask where bit i being 1 means the i-th position creates a new 1 dimension...
Definition: ie_layers.h:1534
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:540
std::map< std::string, Blob::Ptr > blobs
Map of pairs: (name, weights/biases blob)
Definition: ie_layers.h:490
std::string GetParamAsString(const char *param, const char *def) const
Returns a string value for the given parameter or returns the default one.
Definition: ie_layers.h:430
std::vector< DataPtr > outData
A vector of pointers to the output data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:63
eBinaryConvolutionMode
Defines possible modes of binary convolution operation.
Definition: ie_layers.h:726
int from
Definition: ie_layers.h:1134
bool CheckParamPresence(const char *param) const
Checks the param presence in the layer.
Definition: ie_layers.h:443
This class represents a standard Depth To Space layer Depth To Space picks from input tensor accordin...
Definition: ie_layers.h:1574
This class represents a general matrix multiplication operation layer Formula is: dst := alpha*src1*s...
Definition: ie_layers.h:1434
int axis
Definition: ie_layers.h:1138
std::vector< int > dim
A vector of dimensions to be preserved.
Definition: ie_layers.h:1054
std::string name
Layer name.
Definition: ie_layers.h:51
This is a header file with common inference engine definitions.
ConvolutionLayer(const LayerParams &p)
Creates a new ConvolutionLayer instance.
Definition: ie_layers.h:573
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:19
This class represents a standard Broadcast layer Broadcast modifies input tensor dimensions according...
Definition: ie_layers.h:1707