ie_layers.h
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief a header file for internal Layers structure to describe layers information
7  *
8  * @file ie_layers.h
9  */
10 #pragma once
11 
12 #include <algorithm>
13 #include <cctype>
14 #include <iterator>
15 #include <limits>
16 #include <map>
17 #include <memory>
18 #include <string>
19 #include <vector>
20 
21 #include "ie_blob.h"
22 #include "ie_common.h"
23 #include "ie_data.h"
24 #include "ie_layers_property.hpp"
25 
26 namespace InferenceEngine {
27 /**
28  * @brief This is an internal common Layer parameter parsing arguments
29  */
30 struct LayerParams {
31  /// @brief Layer name
32  std::string name;
33  /// @brief Layer type
34  std::string type;
35  /// @brief Layer precision
37 };
38 
39 /**
40  * @brief This is a base abstraction Layer - all DNN Layers inherit from this class
41  */
42 class INFERENCE_ENGINE_API_CLASS(CNNLayer) {
43 public:
44  /**
45  * @brief A shared pointer to CNNLayer
46  */
47  using Ptr = std::shared_ptr<CNNLayer>;
48 
49  /**
50  * @brief Layer name
51  */
52  std::string name;
53  /**
54  * @brief Layer type
55  */
56  std::string type;
57  /**
58  * @brief Layer base operating precision
59  */
61  /**
62  * @brief A vector of pointers to the output data elements of this layer in the di-graph (order matters)
63  */
64  std::vector<DataPtr> outData;
65  /**
66  * @brief A vector of weak pointers to the input data elements of this layer in the di-graph (order matters)
67  */
68  std::vector<DataWeakPtr> insData;
69  /**
70  * @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
71  */
73  /**
74  * @brief Convenience user values to store in this object as extra data
75  */
77  /**
78  * @brief Layer affinity set by user.
79  */
80  std::string affinity;
81 
82  /**
83  * @brief A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values.
84  *
85  * @param prms Basic common parsing parameters
86  */
87  explicit CNNLayer(const LayerParams& prms)
88  : name(prms.name), type(prms.type), precision(prms.precision), userValue({0}) {}
89 
90  /**
91  * @brief A virtual destructor
92  */
93  virtual ~CNNLayer();
94 
95  /**
96  * @brief Sets a layer to be fused with
97  *
98  * @param layer Reference to the layer to be fused with
99  */
100  void fuse(Ptr& layer) {
101  _fusedWith = layer;
102  }
103 
104  /**
105  * @brief Returns the first element of the input data for this layer
106  *
107  * @return A smart pointer to the input data element
108  */
109  virtual const DataPtr input() const {
110  if (insData.empty()) {
111  THROW_IE_EXCEPTION << "Internal error: input data is empty";
112  }
113  auto lockedFirstInsData = insData[0].lock();
114  if (!lockedFirstInsData) {
115  THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
116  }
117  return lockedFirstInsData;
118  }
119 
120  /**
121  * @brief Checks if the input data and layer data are legitimate
122  */
123  void validateLayer();
124 
125  /**
126  * @brief Parse string with float in accordance with IE rules
127  *
128  * @param str input string with float value
129  * @return float value if parsing was successful
130  * @throws InferenceEngineException in case of parsing error
131  */
132  static float ie_parse_float(const std::string& str) {
133  if (str == "-inf") {
134  return -std::numeric_limits<float>::infinity();
135  } else if (str == "inf") {
136  return std::numeric_limits<float>::infinity();
137  } else {
138  float res;
139  std::stringstream val_stream(str);
140  val_stream.imbue(std::locale("C"));
141  val_stream >> res;
142  if (!val_stream.eof()) THROW_IE_EXCEPTION;
143  return res;
144  }
145  }
146  /**
147  * @brief serialize float with c_locale formating
148  * used for default values serializing
149  */
150  static std::string ie_serialize_float(float value) {
151  std::stringstream val_stream;
152  val_stream.imbue(std::locale("C"));
153  val_stream << value;
154  return val_stream.str();
155  }
156 
157  /**
158  * @brief Gets float value for the given parameter
159  *
160  * @param param name of the parameter to find
161  * @param def default value of the parameter if not found
162  * @return float value
163  */
164  float GetParamAsFloat(const char* param, float def) const {
165  std::string val = GetParamAsString(param, ie_serialize_float(def).c_str());
166  try {
167  return ie_parse_float(val);
168  } catch (...) {
169  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
170  << val << " cannot be casted to float.";
171  }
172  }
173 
174  /**
175  * @brief Returns a float value for the given layer parameter
176  *
177  * @param param Name of the layer parameter
178  * @return A float value for the specified parameter
179  */
180  float GetParamAsFloat(const char* param) const {
181  std::string val = GetParamAsString(param);
182  try {
183  return ie_parse_float(val);
184  } catch (...) {
185  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
186  << val << " cannot be casted to float.";
187  }
188  }
189 
190  /**
191  * @brief Returns a vector of float values for the given parameter or returns the default value
192  *
193  * @param param Name of the layer parameter
194  * @param def Default value of the parameter if not found
195  * @return vector of float values
196  */
197  std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const {
198  std::string vals = GetParamAsString(param, "");
199  std::vector<float> result;
200  std::istringstream stream(vals);
201  std::string str;
202  if (vals.empty()) return def;
203  while (getline(stream, str, ',')) {
204  try {
205  float val = ie_parse_float(str);
206  result.push_back(val);
207  } catch (...) {
208  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
209  << ". Value " << vals << " cannot be casted to floats.";
210  }
211  }
212  return result;
213  }
214 
215  /**
216  * @brief Returns a vector of float values for the given parameter
217  *
218  * @param param Name of the layer parameter
219  * @return vector of float values
220  */
221  std::vector<float> GetParamAsFloats(const char* param) const {
222  std::string vals = GetParamAsString(param);
223  std::vector<float> result;
224  std::istringstream stream(vals);
225  std::string str;
226  while (getline(stream, str, ',')) {
227  try {
228  float val = ie_parse_float(str);
229  result.push_back(val);
230  } catch (...) {
231  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
232  << ". Value " << vals << " cannot be casted to floats.";
233  }
234  }
235  return result;
236  }
237 
238  /**
239  * @brief Returns an integer value for the given parameter or returns the default value
240  *
241  * @param param Name of the layer parameter
242  * @param def Default value of the parameter if not found
243  * @return An int value for the specified parameter
244  */
245  int GetParamAsInt(const char* param, int def) const {
246  std::string val = GetParamAsString(param, std::to_string(def).c_str());
247  try {
248  return std::stoi(val);
249  } catch (...) {
250  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
251  << val << " cannot be casted to int.";
252  }
253  }
254 
255  /**
256  * @brief Returns an integer value for the given parameter
257  *
258  * @param param Name of the layer parameter
259  * @return An int value for the specified parameter
260  */
261  int GetParamAsInt(const char* param) const {
262  std::string val = GetParamAsString(param);
263  try {
264  return std::stoi(val);
265  } catch (...) {
266  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
267  << val << " cannot be casted to int.";
268  }
269  }
270 
271  /**
272  * @brief Returns a vector of int values for the given parameter or returns the default value
273  *
274  * @param param Name of the layer parameter
275  * @param def Default value of the parameter if not found
276  * @return vector of int values
277  */
278  std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const {
279  std::string vals = GetParamAsString(param, "");
280  std::vector<int> result;
281  std::istringstream stream(vals);
282  std::string str;
283  if (vals.empty()) return def;
284  while (getline(stream, str, ',')) {
285  try {
286  result.push_back(std::stoi(str));
287  } catch (...) {
288  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
289  << ". Value " << vals << " cannot be casted to int.";
290  }
291  }
292  return result;
293  }
294 
295  /**
296  * @brief Returns a vector of int values for the given parameter
297  *
298  * @param param Name of the layer parameter
299  * @return vector of int values
300  */
301  std::vector<int> GetParamAsInts(const char* param) const {
302  std::string vals = GetParamAsString(param);
303  std::vector<int> result;
304  std::istringstream stream(vals);
305  std::string str;
306  while (getline(stream, str, ',')) {
307  try {
308  result.push_back(std::stoi(str));
309  } catch (...) {
310  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
311  << ". Value " << vals << " cannot be casted to int.";
312  }
313  }
314  return result;
315  }
316  /**
317  * @brief Returns an unsigned integer value for the given parameter or returns the default value
318  *
319  * @param param Name of the layer parameter
320  * @param def Default value of the parameter if not found
321  * @return An unsigned integer value for the specified parameter
322  */
323  unsigned int GetParamAsUInt(const char* param, unsigned int def) const {
324  std::string val = GetParamAsString(param, std::to_string(def).c_str());
325  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
326  ". Value " + val + " cannot be casted to int.";
327  try {
328  int value = std::stoi(val);
329  if (value < 0) {
330  THROW_IE_EXCEPTION << message;
331  }
332  return static_cast<unsigned int>(value);
333  } catch (...) {
334  THROW_IE_EXCEPTION << message;
335  }
336  }
337 
338  /**
339  * @brief Returns an unsigned integer value for the given parameter
340  *
341  * @param param Name of the layer parameter
342  * @return An unsigned integer value for the specified parameter
343  */
344  unsigned int GetParamAsUInt(const char* param) const {
345  std::string val = GetParamAsString(param);
346  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
347  ". Value " + val + " cannot be casted to unsigned int.";
348  try {
349  int value = std::stoi(val);
350  if (value < 0) {
351  THROW_IE_EXCEPTION << message;
352  }
353  return static_cast<unsigned int>(value);
354  } catch (...) {
355  THROW_IE_EXCEPTION << message;
356  }
357  }
358 
359  /**
360  * @brief Returns a vector of unsigned int values for the given parameter or returns the default value
361  *
362  * @param param Name of the layer parameter
363  * @param def Default value of the parameter if not found
364  * @return vector of unsigned int values
365  */
366  std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const {
367  std::string vals = GetParamAsString(param, "");
368  std::vector<unsigned int> result;
369  std::istringstream stream(vals);
370  std::string str;
371  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
372  name + ". Value " + vals + " cannot be casted to unsigned int.";
373  if (vals.empty()) return def;
374  while (getline(stream, str, ',')) {
375  try {
376  int value = std::stoi(str);
377  if (value < 0) {
378  THROW_IE_EXCEPTION << message;
379  }
380  result.push_back(static_cast<unsigned int>(value));
381  } catch (...) {
382  THROW_IE_EXCEPTION << message;
383  }
384  }
385  return result;
386  }
387 
388  /**
389  * @brief Returns a vector of unsigned int values for the given parameter
390  *
391  * @param param Name of the layer parameter
392  * @return vector of unsigned int values
393  */
394  std::vector<unsigned int> GetParamAsUInts(const char* param) const {
395  std::string vals = GetParamAsString(param);
396  std::vector<unsigned int> result;
397  std::istringstream stream(vals);
398  std::string str;
399  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
400  name + ". Value " + vals + " cannot be casted to int.";
401  while (getline(stream, str, ',')) {
402  try {
403  int value = std::stoi(str);
404  if (value < 0) {
405  THROW_IE_EXCEPTION << message;
406  }
407  result.push_back(static_cast<unsigned int>(value));
408  } catch (...) {
409  THROW_IE_EXCEPTION << message;
410  }
411  }
412  return result;
413  }
414  /**
415  * @brief Returns a boolean value for the given parameter.
416  *
417  * The valid values are (true, false, 1, 0).
418  * @param param Name of the layer parameter
419  * @param def Default value of the parameter if not found
420  * @return A bool value for the specified parameter
421  */
422  bool GetParamAsBool(const char* param, bool def) const {
423  std::string val = GetParamAsString(param, std::to_string(def).c_str());
424  std::string loweredCaseValue;
425  std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
426  return std::tolower(value);
427  });
428 
429  bool result = false;
430 
431  if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
432  // attempting parse using non alpha bool
433  return (GetParamAsInt(param, def) != 0);
434  }
435 
436  return result;
437  }
438  /**
439  * @brief Returns a boolean value for the given parameter
440  *
441  * @param param Name of the layer parameter
442  * @return A bool value for the specified parameter
443  */
444  bool GetParamAsBool(const char* param) const {
445  std::string val = GetParamAsString(param);
446  std::string loweredCaseValue;
447  std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
448  return std::tolower(value);
449  });
450 
451  bool result = false;
452 
453  if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
454  // attempting parse using non alpha bool
455  return (GetParamAsInt(param) != 0);
456  }
457 
458  return result;
459  }
460 
461  /**
462  * @brief Returns a string value for the given parameter or returns the default one
463  *
464  * @param param Name of the layer parameter
465  * @param def Default value of the parameter if not found
466  * @return A string value
467  */
468  std::string GetParamAsString(const char* param, const char* def) const {
469  auto it = params.find(param);
470  if (it == params.end() || it->second.empty()) {
471  return def;
472  }
473  return (*it).second;
474  }
475 
476  /**
477  * @brief Checks the param presence in the layer
478  *
479  * @param param Name of the layer parameter
480  * @return a bool depending param presence
481  */
482  bool CheckParamPresence(const char* param) const {
483  auto it = params.find(param);
484  if (it == params.end()) {
485  return false;
486  }
487  return true;
488  }
489 
490  /**
491  * @brief Returns a string value for the given parameter.
492  *
493  * Throws exception if parameter was not found.
494  * @param param Name of the layer parameter
495  * @return A string value
496  */
497  std::string GetParamAsString(const char* param) const {
498  auto it = params.find(param);
499  if (it == params.end()) {
500  THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
501  }
502  return (*it).second;
503  }
504 
505  std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const {
506  std::string vals = GetParamAsString(param, "");
507  std::vector<std::string> result;
508  std::istringstream stream(vals);
509  std::string str;
510  if (vals.empty()) return def;
511  while (getline(stream, str, ',')) {
512  try {
513  result.push_back(str);
514  } catch (...) {
515  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
516  }
517  }
518  return result;
519  }
520 
521  /**
522  * @brief Map of pairs: (parameter name, parameter value)
523  */
524  std::map<std::string, std::string> params;
525 
526  /**
527  * @brief Map of pairs: (name, weights/biases blob)
528  */
529  std::map<std::string, Blob::Ptr> blobs;
530 };
531 
532 /**
533  * @brief Alias for CNNLayer object
534  */
535 using GenericLayer = class CNNLayer;
536 
537 /**
538  * @brief This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
539  */
540 class INFERENCE_ENGINE_API_CLASS(WeightableLayer): public CNNLayer {
541 public:
542  /**
543  * @brief A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the given
544  * values
545  *
546  * @param prms Initial layer parameters
547  */
548  explicit WeightableLayer(const LayerParams& prms): CNNLayer(prms) {}
549 
550  /**
551  * @brief A pointer to a weights blob
552  */
554  /**
555  * @brief A pointer to a biases blob
556  */
558 
559  /**
560  * @brief Constructs a WeightableLayer instance and initiates layer parameters with the given values
561  */
562  using CNNLayer::CNNLayer;
563 
564  virtual ~WeightableLayer();
565 };
566 
567 /**
568  * @brief convinenent way to declare property with backward compatibility to 2D members
569  */
570 #define DEFINE_PROP(prop_name) \
571  PropertyVector<unsigned int> prop_name; \
572  unsigned int& prop_name##_x = prop_name.at(X_AXIS); \
573  unsigned int& prop_name##_y = prop_name.at(Y_AXIS);
574 
575 /**
576  * @brief This class represents a standard 3D Convolution Layer
577  */
578 class INFERENCE_ENGINE_API_CLASS(ConvolutionLayer): public WeightableLayer {
579 public:
580  /**
581  * @brief A convolution kernel array [X, Y, Z, ...]
582  */
583  DEFINE_PROP(_kernel);
584  /**
585  * @brief A convolution paddings begin array [X, Y, Z, ...]
586  */
587  DEFINE_PROP(_padding);
588  /**
589  * @brief A convolution paddings end array [X, Y, Z, ...]
590  */
592  /**
593  * @brief A convolution strides array [X, Y, Z, ...]
594  */
595  DEFINE_PROP(_stride);
596  /**
597  * @brief A convolution dilations array [X, Y, Z, ...]
598  */
599  DEFINE_PROP(_dilation);
600  /**
601  * @brief A number of output feature maps (size) generating the 3'rd output dimension
602  */
603  unsigned int _out_depth = 0u;
604  /**
605  * @brief Number of groups
606  */
607  unsigned int _group = 1u;
608  /**
609  * @brief Auto padding type
610  */
611  std::string _auto_pad;
612 
613  /**
614  * @brief Creates a new ConvolutionLayer instance.
615  */
616  explicit ConvolutionLayer(const LayerParams& p)
617  : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
618  /**
619  * @brief assignment operator
620  */
622  if (&that != this) {
623  WeightableLayer::operator=(that);
624  _kernel = that._kernel;
625  _padding = that._padding;
626  _pads_end = that._pads_end;
627  _stride = that._stride;
628  _dilation = that._dilation;
629  _out_depth = that._out_depth;
630  _group = that._group;
631  }
632  return *this;
633  }
634  /**
635  * @brief copy constructor
636  */
638  operator=(that);
639  }
640  /**
641  * @brief move constructor
642  */
643  ConvolutionLayer(ConvolutionLayer&&) = default;
644 
645  virtual ~ConvolutionLayer();
646 };
647 
648 /**
649  * @brief This class represents a standard deconvolution layer
650  */
651 class INFERENCE_ENGINE_API_CLASS(DeconvolutionLayer): public ConvolutionLayer {
652 public:
654  using ConvolutionLayer::operator=;
655 
656  virtual ~DeconvolutionLayer();
657 };
658 
659 /**
660  * @brief This class represents a standard deformable convolution layer
661  */
662 class INFERENCE_ENGINE_API_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
663 public:
665  using ConvolutionLayer::operator=;
666 
667  /**
668  * @brief Number of deformable groups
669  */
670  unsigned int _deformable_group = 1u;
671 
672  virtual ~DeformableConvolutionLayer();
673 };
674 
675 /**
676  * @brief This class represents a standard pooling layer
677  */
678 class INFERENCE_ENGINE_API_CLASS(PoolingLayer): public CNNLayer {
679 public:
680  /**
681  * @brief Pooling kernel array [X, Y, Z, ...]
682  */
683  DEFINE_PROP(_kernel);
684  /**
685  * @brief Pooling paddings begin array [X, Y, Z, ...]
686  */
687  DEFINE_PROP(_padding);
688  /**
689  * @brief Pooling paddings end array [X, Y, Z, ...]
690  */
692  /**
693  * @brief Pooling strides array [X, Y, Z, ...]
694  */
695  DEFINE_PROP(_stride);
696 
697  /**
698  * @enum PoolType
699  * @brief Defines available pooling types
700  */
701  enum PoolType { MAX = 1, AVG = 2, STOCH = 3, ROI = 4, SPACIAL_PYRAMID = 5 };
702 
703  /**
704  * @brief A pooling type
705  */
706  PoolType _type = MAX;
707 
708  /**
709  * @brief A flag that indicates if padding is excluded or not
710  */
711  bool _exclude_pad = false;
712  /**
713  * @brief Auto padding type
714  */
715  std::string _auto_pad;
716 
717  /**
718  * @brief Creates a new PoolingLayer instance.
719  */
720  explicit PoolingLayer(const LayerParams& p): CNNLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 0u) {}
721 
722  /**
723  * @brief assignment operator
724  */
726  if (&that != this) {
727  CNNLayer::operator=(that);
728  _kernel = that._kernel;
729  _padding = that._padding;
730  _pads_end = that._pads_end;
731  _stride = that._stride;
732  _type = that._type;
733  _exclude_pad = that._exclude_pad;
734  }
735  return *this;
736  }
737  /**
738  * @brief copy constructor
739  */
740  PoolingLayer(const PoolingLayer& that): CNNLayer(that) {
741  operator=(that);
742  }
743 
744  /**
745  * @brief move constructor
746  */
747  PoolingLayer(PoolingLayer&&) = default;
748 
749  virtual ~PoolingLayer();
750 };
751 
752 /**
753  * @brief This class represents a standard binary convolution layer
754  */
755 class INFERENCE_ENGINE_API_CLASS(BinaryConvolutionLayer): public WeightableLayer {
756 public:
757  /**
758  * @enum eBinaryConvolutionMode
759  * @brief Defines possible modes of binary convolution operation
760  */
761  enum eBinaryConvolutionMode { xnor_popcount = 0 };
762 
763  /**
764  * @brief Mode of binary convolution operation
765  */
766  eBinaryConvolutionMode _mode = xnor_popcount;
767 
768  /**
769  * @brief A number of input feature maps (size) generating the 3'rd input dimension
770  */
771  unsigned int _in_depth = 0u;
772 
773  /**
774  * @brief A pad value which is used to fill pad area
775  */
776  float _pad_value = 0.0f;
777 
778  /**
779  * @brief A convolution kernel array [X, Y, Z, ...]
780  */
781  DEFINE_PROP(_kernel);
782  /**
783  * @brief A convolution paddings begin array [X, Y, Z, ...]
784  */
785  DEFINE_PROP(_padding);
786  /**
787  * @brief A convolution paddings end array [X, Y, Z, ...]
788  */
790  /**
791  * @brief A convolution strides array [X, Y, Z, ...]
792  */
793  DEFINE_PROP(_stride);
794  /**
795  * @brief A convolution dilations array [X, Y, Z, ...]
796  */
797  DEFINE_PROP(_dilation);
798  /**
799  * @brief A number of output feature maps (size) generating the 3'rd output dimension
800  */
801  unsigned int _out_depth = 0u;
802  /**
803  * @brief Number of groups
804  */
805  unsigned int _group = 1u;
806  /**
807  * @brief Auto padding type
808  */
809  std::string _auto_pad;
810 
811  /**
812  * @brief Creates a new BinaryConvolutionLayer instance.
813  */
815  : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
816  /**
817  * @brief assignment operator
818  */
820  if (&that != this) {
821  WeightableLayer::operator=(that);
822  _kernel = that._kernel;
823  _padding = that._padding;
824  _pads_end = that._pads_end;
825  _stride = that._stride;
826  _dilation = that._dilation;
827  _out_depth = that._out_depth;
828  _group = that._group;
829  _mode = that._mode;
830  _in_depth = that._in_depth;
831  _pad_value = that._pad_value;
832  }
833  return *this;
834  }
835  /**
836  * @brief copy constructor
837  */
839  operator=(that);
840  }
841  /**
842  * @brief move constructor
843  */
845 
846  virtual ~BinaryConvolutionLayer();
847 };
848 
849 #undef DEFINE_PROP
850 
851 /**
852  * @brief This class represents a fully connected layer
853  */
854 class INFERENCE_ENGINE_API_CLASS(FullyConnectedLayer): public WeightableLayer {
855 public:
856  /**
857  * @brief A size of output
858  */
859  unsigned int _out_num = 0;
860 
861  /**
862  * @brief Creates a new FullyConnectedLayer instance and initializes layer parameters with the given values.
863  */
865 
866  virtual ~FullyConnectedLayer();
867 };
868 
869 /**
870  * @brief This class represents concatenation layer
871  *
872  * Takes as input several data elements and merges them to one using the supplied axis
873  */
874 class INFERENCE_ENGINE_API_CLASS(ConcatLayer): public CNNLayer {
875 public:
876  /**
877  * @brief An axis on which concatenation operation is performed
878  */
879  unsigned int _axis = 1;
880 
881  /**
882  * @brief Creates a new ConcatLayer instance and initializes layer parameters with the given values.
883  *
884  * If batch is used, then batch needs to be specified as an input dimension also
885  * In current implementation 1 means channels, 0 - batch
886  */
887  using CNNLayer::CNNLayer;
888 
889  virtual ~ConcatLayer();
890 };
891 
892 /**
893  * @brief This class represents a layer that evenly splits the input into the supplied outputs
894  */
895 class INFERENCE_ENGINE_API_CLASS(SplitLayer): public CNNLayer {
896 public:
897  /**
898  * @brief An axis on which split operation is performed
899  */
900  unsigned int _axis = 1;
901 
902  /**
903  * @brief Creates a new SplitLayer instance.
904  */
905  using CNNLayer::CNNLayer;
906 
907  virtual ~SplitLayer();
908 };
909 
910 /**
911  * @brief This class represents a Linear Response Normalization (LRN) Layer
912  */
913 class INFERENCE_ENGINE_API_CLASS(NormLayer): public CNNLayer {
914 public:
915  /**
916  * @brief Response size
917  */
918  unsigned int _size = 0;
919  /**
920  * @brief K
921  */
922  unsigned int _k = 1;
923  /**
924  * @brief Alpha coefficient
925  */
926  float _alpha = 0;
927  /**
928  * @brief Beta coefficient
929  */
930  float _beta = 0;
931  /**
932  * @brief Flag to specify normalization across feature maps (true) or across channels
933  */
934  bool _isAcrossMaps = false;
935 
936  /**
937  * @brief Creates a new NormLayer instance.
938  */
939  using CNNLayer::CNNLayer;
940 
941  virtual ~NormLayer();
942 };
943 
944 /**
945  * @brief This class represents standard softmax Layer
946  */
947 class INFERENCE_ENGINE_API_CLASS(SoftMaxLayer): public CNNLayer {
948 public:
949  /**
950  * @brief Axis number for a softmax operation
951  */
952  int axis = 1;
953  /**
954  * @brief Creates a new SoftMaxLayer instance.
955  */
956  using CNNLayer::CNNLayer;
957 
958  virtual ~SoftMaxLayer();
959 };
960 
961 /**
962  * @class GRNLayer
963  * @brief This class represents standard GRN Layer
964  */
965 class INFERENCE_ENGINE_API_CLASS(GRNLayer): public CNNLayer {
966 public:
967  /**
968  * @brief A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the given
969  * values.
970  *
971  * @param prms Initial layer parameters
972  */
973  explicit GRNLayer(const LayerParams& prms): CNNLayer(prms), bias(0.f) {}
974 
975  /**
976  * @brief Bias for squares sum
977  */
978  float bias = 0.f;
979 
980  virtual ~GRNLayer();
981 };
982 
983 /**
984  * @class MVNLayer
985  * @brief This class represents standard MVN Layer
986  */
987 class INFERENCE_ENGINE_API_CLASS(MVNLayer): public CNNLayer {
988 public:
989  /**
990  * @brief A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the given
991  * values.
992  *
993  * @param prms Initial layer parameters
994  */
995  explicit MVNLayer(const LayerParams& prms): CNNLayer(prms), across_channels(0), normalize(1) {}
996 
997  /**
998  * @brief Indicate that mean value is calculated across channels
999  */
1001 
1002  /**
1003  * @brief Indicate that the result needs to be normalized
1004  */
1005  int normalize = 1;
1006 
1007  virtual ~MVNLayer();
1008 };
1009 
1010 /**
1011  * @brief This class represents a Rectified Linear activation layer
1012  */
1013 class INFERENCE_ENGINE_API_CLASS(ReLULayer): public CNNLayer {
1014 public:
1015  /**
1016  * @brief Negative slope is used to takle negative inputs instead of setting them to 0
1017  */
1018  float negative_slope = 0.0f;
1019 
1020  /**
1021  * @brief Creates a new ReLULayer instance.
1022  */
1023  using CNNLayer::CNNLayer;
1024 
1025  virtual ~ReLULayer();
1026 };
1027 
1028 /**
1029  * @brief This class represents a Clamp activation layer
1030  *
1031  * Clamps all tensor elements into the range [min_value, max_value]
1032  */
1033 class INFERENCE_ENGINE_API_CLASS(ClampLayer): public CNNLayer {
1034 public:
1035  /**
1036  * @brief A minimum value
1037  */
1038  float min_value = 0.0f;
1039 
1040  /**
1041  * @brief A maximum value
1042  */
1043  float max_value = 1.0f;
1044  /**
1045  * @brief Creates a new ClampLayer instance.
1046  */
1047  using CNNLayer::CNNLayer;
1048 
1049  virtual ~ClampLayer();
1050 };
1051 
1052 /**
1053  * @brief This class represents a ReLU6 activation layer
1054  *
1055  * Clamps all tensor elements into the range [0, 6.0]
1056  */
1057 class INFERENCE_ENGINE_API_CLASS(ReLU6Layer): public ClampLayer {
1058 public:
1059  explicit ReLU6Layer(const LayerParams& prms): ClampLayer(prms) {
1060  max_value = 6.0f;
1061  }
1062 
1063  virtual ~ReLU6Layer();
1064 };
1065 
1066 /**
1067  * @brief This class represents an element wise operation layer
1068  */
1069 class INFERENCE_ENGINE_API_CLASS(EltwiseLayer): public CNNLayer {
1070 public:
1071  /**
1072  * @enum eOperation
1073  * @brief Defines possible operations that can be used
1074  */
1075  enum eOperation {
1076  Sum = 0,
1077  Prod,
1078  Max,
1079  Sub,
1080  Min,
1081  Div,
1082  Squared_diff,
1083  Floor_mod,
1084  Pow,
1085  Equal,
1086  Not_equal,
1087  Less,
1088  Less_equal,
1089  Greater,
1090  Greater_equal,
1091  Logical_AND,
1092  Logical_OR,
1093  Logical_XOR,
1094  Logical_NOT,
1095  Mean
1096  };
1097 
1098  /**
1099  * @brief A type of the operation to use
1100  */
1101  eOperation _operation = Sum;
1102 
1103  /**
1104  * @brief A vector of coefficients to scale the operands
1105  */
1106  std::vector<float> coeff;
1107 
1108  /**
1109  * @brief Creates a new EltwiseLayer instance.
1110  */
1111  using CNNLayer::CNNLayer;
1112 
1113  virtual ~EltwiseLayer();
1114 };
1115 
1116 /**
1117  * @brief This class represents a standard crop layer
1118  */
1119 class INFERENCE_ENGINE_API_CLASS(CropLayer): public CNNLayer {
1120 public:
1121  /**
1122  * @brief A vector of dimensions for cropping
1123  */
1124  std::vector<int> axis;
1125  /**
1126  * @brief A vector of dimensions to be preserved
1127  */
1128  std::vector<int> dim;
1129  /**
1130  * @brief A vector of offsets for each dimension
1131  */
1132  std::vector<int> offset;
1133 
1134  /**
1135  * @brief Creates a new CropLayer instance.
1136  */
1137  using CNNLayer::CNNLayer;
1138 
1139  virtual ~CropLayer();
1140 };
1141 
1142 /**
1143  * @brief This class represents a standard reshape layer
1144  */
1145 class INFERENCE_ENGINE_API_CLASS(ReshapeLayer): public CNNLayer {
1146 public:
1147  /**
1148  * @brief A vector of sizes of the shape
1149  */
1150  std::vector<int> shape;
1151  /**
1152  * @brief A number of axis to be taken for a reshape
1153  */
1154  int axis = 0;
1155  /**
1156  * @brief A number of first axises to be taken for a reshape
1157  */
1158  int num_axes = -1;
1159 
1160  /**
1161  * @brief Creates a new ReshapeLayer instance.
1162  */
1163  using CNNLayer::CNNLayer;
1164 
1165  virtual ~ReshapeLayer();
1166 };
1167 
1168 /**
1169  * @brief This class represents a standard Tile Layer
1170  */
1171 class INFERENCE_ENGINE_API_CLASS(TileLayer): public CNNLayer {
1172 public:
1173  /**
1174  * @brief An index of the axis to tile
1175  */
1176  int axis = -1;
1177  /**
1178  * @brief A number of copies to be made
1179  */
1180  int tiles = -1;
1181 
1182  /**
1183  * @brief Creates a new TileLayer instance.
1184  */
1185  using CNNLayer::CNNLayer;
1186 
1187  virtual ~TileLayer();
1188 };
1189 
1190 /**
1191  * @brief This class represents a Layer which performs Scale and Shift
1192  */
1193 class INFERENCE_ENGINE_API_CLASS(ScaleShiftLayer): public WeightableLayer {
1194 public:
1195  /**
1196  * @brief A flag that indicates if the same value is used for all the features. If false, the value is used pixel
1197  * wise
1198  */
1199  unsigned int _broadcast = 0;
1200 
1201  /**
1202  * @brief Creates a new ScaleShiftLayer instance.
1203  */
1205 
1206  virtual ~ScaleShiftLayer();
1207 };
1208 
1209 /**
1210  * @brief This class represents TensorIterator layer
1211  */
1212 class INFERENCE_ENGINE_API_CLASS(TensorIterator): public CNNLayer {
1213 public:
1214  struct PortMap {
1215  // Data map rule
1216  int from; /**< Index of exteral data from ins/outs fields of CNNLayer */
1217  int to; /**< Index of internal data in iterator body */
1218 
1219  // Iteration rule
1220  int axis; /**< Axis to iterate throught */
1221  int stride; /**< Stride to iterate throught */
1222  int start; /**< Start index of iteration range */
1223  int end; /**< Last index of iteration range */
1224  int part_size; /**< Part size which will be transfered to body subnetwork */
1225  };
1226 
1227  struct Body {
1228  std::vector<DataPtr> inputs;
1229  std::vector<DataPtr> outputs;
1230  };
1231 
1232  std::vector<PortMap> input_port_map;
1233  std::vector<PortMap> output_port_map;
1234  std::vector<PortMap> back_edges;
1235 
1236  Body body;
1237 
1238  using CNNLayer::CNNLayer;
1239 
1240  virtual ~TensorIterator();
1241 };
1242 
1243 /**
1244  * @brief Base class for recurrent cell layers
1245  */
1246 class INFERENCE_ENGINE_API_CLASS(RNNCellBase): public WeightableLayer {
1247 public:
1249 
1250  /**
1251  * @brief Direct type of recurrent cell (including subtypes)
1252  * Description of particular cell semantics is in LSTMCell, GRUCell, RNNCell.
1253  */
1254  enum CellType {
1255  LSTM, /**< Original LSTM cell */
1256  GRU, /**< Original GRU cell */
1257  RNN, /**< Original RNN cell */
1258  GRU_LBR, /**< GRU cell modification. "Linear before reset" */
1259  };
1260 
1261  /** @copybrief CellType */
1262  CellType cellType = LSTM;
1263 
1264  /**
1265  * @brief Size of hidden state data
1266  *
1267  * In case of batch output state tensor will have shape [N, hidden_size]
1268  */
1269  int hidden_size = 0;
1270 
1271  /**
1272  * @brief Clip data into range [-clip, clip] on input of activations
1273  *
1274  * clip==0.0f means no clipping
1275  */
1276  float clip = 0.0f;
1277  /**
1278  * @brief Activations used inside recurrent cell
1279  *
1280  * Valid values: sigmoid, tanh, relu
1281  */
1282  std::vector<std::string> activations;
1283 
1284  /**
1285  * @brief Alpha parameters of activations
1286  *
1287  * Respective to activation list.
1288  */
1289  std::vector<float> activation_alpha;
1290 
1291  /**
1292  * @brief Beta parameters of activations
1293  *
1294  * Respective to activation list.
1295  */
1296  std::vector<float> activation_beta;
1297 
1298  virtual ~RNNCellBase();
1299 };
1300 
1301 /**
1302  * @brief LSTM Cell layer
1303  *
1304  * G - number of gates (=4)
1305  * N - batch size
1306  * S - state size (=hidden_size)
1307  *
1308  * Inputs:
1309  * [N,D] Xt - input data
1310  * [N,S] Ht-1 - initial hidden state
1311  * [N,S] Ct-1 - initial cell state
1312  *
1313  * Outputs:
1314  * [N,S] Ht - out hidden state
1315  * [N,S] Ct - out cell state
1316  *
1317  * Weights:
1318  * - weights [G,S,D+S]
1319  * - biases [G,S]
1320  * NB! gates order is FICO {forget, input, candidate, output}
1321  *
1322  * activations is {_f, _g, _h}
1323  * default: {_f=sigm, _g=tanh, _h=tanh}
1324  *
1325  * Equations:
1326  *
1327  * * - matrix mult
1328  * (.) - eltwise mult
1329  * [,] - concatenation
1330  *
1331  * - ft = _f(Wf*[Ht-1, Xt] + Bf)
1332  * - it = _f(Wi*[Ht-1, Xt] + Bi)
1333  * - ct = _g(Wc*[Ht-1, Xt] + Bc)
1334  * - ot = _f(Wo*[Ht-1, Xt] + Bo)
1335  * - Ct = ft (.) Ct-1 + it (.) ct
1336  * - Ht = ot (.) _h(Ct)
1337  */
1338 class INFERENCE_ENGINE_API_CLASS(LSTMCell): public RNNCellBase {
1339 public:
1340  using RNNCellBase::RNNCellBase;
1341  using RNNCellBase::operator=;
1342 
1343  virtual ~LSTMCell();
1344 };
1345 
1346 /**
1347  * @brief GRU Cell layer
1348  *
1349  * G - number of gates (=3)
1350  * N - batch size
1351  * S - state size (=hidden_size)
1352  *
1353  * Inputs:
1354  * [N,D] Xt - input data
1355  * [N,S] Ht-1 - initial hidden state
1356  *
1357  * Outputs:
1358  * [N,S] Ht - out hidden state
1359  *
1360  * Weights:
1361  * - weights [G,S,D+S]
1362  * - biases [G,S]
1363  * NB! gates order is ZRH {update, reset, output}
1364  *
1365  * activations is {_f, _g}
1366  * default: {_f=sigm, _g=tanh}
1367  *
1368  * Equations:
1369  *
1370  * * - matrix mult
1371  * (.) - eltwise mult
1372  * [,] - concatenation
1373  *
1374  * - zt = _f(Wz*[Ht-1, Xt] + Bz)
1375  * - rt = _f(Wr*[Ht-1, Xt] + Br)
1376  * - ht = _g(Wh*[rt (.) Ht-1, Xt] + Bh)
1377  * - Ht = (1 - zt) (.) ht + zt (.) Ht-1
1378  */
1379 class INFERENCE_ENGINE_API_CLASS(GRUCell): public RNNCellBase {
1380 public:
1381  using RNNCellBase::RNNCellBase;
1382  using RNNCellBase::operator=;
1383 
1384  virtual ~GRUCell();
1385 };
1386 
1387 /**
1388  * @brief RNN Cell layer
1389  *
1390  * G - number of gates (=1)
1391  * N - batch size
1392  * S - state size (=hidden_size)
1393  *
1394  * Inputs:
1395  * [N,D] Xt - input data
1396  * [N,S] Ht-1 - initial hidden state
1397  *
1398  * Outputs:
1399  * [N,S] Ht - out hidden state
1400  *
1401  * Weights:
1402  * - weights [G,S,D+S]
1403  * - biases [G,S]
1404  *
1405  * activations is {_f}
1406  * default: {_f=tanh}
1407  *
1408  * Equations:
1409  *
1410  * * - matrix mult
1411  * [,] - concatenation
1412  *
1413  * - Ht = _f(Wi*[Ht-1, Xt] + Bi)
1414  */
1415 class INFERENCE_ENGINE_API_CLASS(RNNCell): public RNNCellBase {
1416 public:
1417  using RNNCellBase::RNNCellBase;
1418  using RNNCellBase::operator=;
1419 
1420  virtual ~RNNCell();
1421 };
1422 
1423 /**
1424  * @brief Sequence of recurrent cells
1425  *
1426  * N - batch size
1427  * T - sequence size
1428  * S - state size (=hidden_size)
1429  * NS - num of state tensors (LSTM=2, GRU/RNN=1)
1430  * ND - num of direction (BDR=2, WFD/BWD=1)
1431  *
1432  * Inputs:
1433  * [N,T,D] Xt - input data
1434  * [ND,N,S] Ht-1 - initial hidden state
1435  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1436  * [N] SL - sequence length (optional input)
1437  *
1438  * Outputs:
1439  * [ND,N,T,S] Xt - input data
1440  * [ND,N,S] Ht-1 - initial hidden state
1441  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1442  *
1443  * NB! if axis==0 batch and sequense dimensions are swapped (N <-> T) for input and output tensors
1444  *
1445  * Weights:
1446  * - weights [ND,G,S,D+S]
1447  * - biases [ND,G,S]
1448  * NB! if ND==2 weights are concatenated cell weights [forward_cell_weights, backward_cell_weights]
1449  *
1450  */
1451 class INFERENCE_ENGINE_API_CLASS(RNNSequenceLayer): public RNNCellBase {
1452 public:
1453  using RNNCellBase::RNNCellBase;
1454 
1455  /**
1456  * @brief An axis by which iteration is performed
1457  *
1458  * axis=0 means first input/output data blob dimension is sequence
1459  * axis=1 means first input/output data blob dimension is batch
1460  */
1461  unsigned int axis = 1;
1462 
1463  /**
1464  * @brief Direction of iteration through sequence dimension
1465  */
1466  enum Direction {
1467  FWD, /**< Forward mode. Iterate starts from index 0 with step 1. */
1468  BWD, /**< Backward mode. Iterate starts from last index with step -1. */
1469  BDR /**< Bidirectional mode. First is forward pass, second is backward. */
1470  };
1471 
1472  /** @copybrief Direction */
1473  Direction direction = FWD;
1474 
1475  virtual ~RNNSequenceLayer();
1476 };
1477 
1478 /**
1479  * @brief This class represents a Layer which performs Scale and Shift
1480  */
1481 class INFERENCE_ENGINE_API_CLASS(PReLULayer): public WeightableLayer {
1482 public:
1483  /**
1484  * @brief A flag that indicates if the same negative_slope value is used for all the features. If false, the value
1485  * is used pixel wise
1486  */
1488 
1489  /**
1490  * @brief A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the given
1491  * values.
1492  *
1493  * @param prms Initial layer parameters
1494  */
1495  explicit PReLULayer(const LayerParams& prms): WeightableLayer(prms), _channel_shared(false) {}
1496 
1497  virtual ~PReLULayer();
1498 };
1499 
1500 /**
1501  * @brief This class represents a standard Power Layer
1502  *
1503  * Formula is: output = (offset + scale * input) ^ power
1504  */
1505 class INFERENCE_ENGINE_API_CLASS(PowerLayer): public CNNLayer {
1506 public:
1507  /**
1508  * @brief An exponent value
1509  */
1510  float power = 1.f;
1511  /**
1512  * @brief A scale factor
1513  */
1514  float scale = 1.f;
1515  /**
1516  * @brief An offset value
1517  */
1518  float offset = 0.f;
1519 
1520  /**
1521  * @brief Creates a new PowerLayer instance.
1522  */
1523  using CNNLayer::CNNLayer;
1524 
1525  virtual ~PowerLayer();
1526 };
1527 
1528 /**
1529  * @brief This class represents a Batch Normalization Layer
1530  */
1531 class INFERENCE_ENGINE_API_CLASS(BatchNormalizationLayer): public WeightableLayer {
1532 public:
1533  /**
1534  * @brief A small value to add to the variance estimate to avoid division by zero
1535  */
1536  float epsilon = 1e-3f;
1537 
1538  /**
1539  * @brief Creates a new BatchNormalizationLayer instance.
1540  */
1542 
1543  virtual ~BatchNormalizationLayer();
1544 };
1545 
1546 /**
1547  * @brief This class represents a general matrix multiplication operation layer
1548  *
1549  * Formula is: dst := alpha*src1*src2 + beta*src3
1550  */
1551 class INFERENCE_ENGINE_API_CLASS(GemmLayer): public CNNLayer {
1552 public:
1553  /**
1554  * @brief A scale factor of src1 matrix
1555  */
1556  float alpha = 1.f;
1557  /**
1558  * @brief A scale factor of src3 matrix
1559  */
1560  float beta = 1.f;
1561  /**
1562  * @brief A flag that indicates if the src1 matrix is to be transposed
1563  */
1564  bool transpose_a = false;
1565  /**
1566  * @brief A flag that indicates if the src2 matrix is to be transposed
1567  */
1568  bool transpose_b = false;
1569  /**
1570  * @brief Creates a new GemmLayer instance.
1571  */
1572  using CNNLayer::CNNLayer;
1573 
1574  virtual ~GemmLayer();
1575 };
1576 
1577 /**
1578  * @brief This class represents a standard Pad layer
1579  *
1580  * Adds paddings to input tensor
1581  */
1582 class INFERENCE_ENGINE_API_CLASS(PadLayer): public CNNLayer {
1583 public:
1584  /**
1585  * @enum ePadMode
1586  * @brief Defines possible modes of pad operation
1587  */
1588  enum ePadMode { Constant = 0, Edge, Reflect, Symmetric };
1589 
1590  /**
1591  * @brief Size of padding in the beginning of each axis
1592  */
1594  /**
1595  * @brief Size of padding in the end of each axis
1596  */
1598  /**
1599  * @brief Mode of pad operation
1600  */
1601  ePadMode pad_mode = Constant;
1602  /**
1603  * @brief A pad value which is used for filling in Constant mode
1604  */
1605  float pad_value = 0.0f;
1606  /**
1607  * @brief Creates a new PadLayer instance.
1608  */
1609  using CNNLayer::CNNLayer;
1610 
1611  virtual ~PadLayer();
1612 };
1613 
1614 /**
1615  * @brief This class represents a standard Gather layer
1616  *
1617  * Gather slices from Dictionary according to Indexes
1618  */
1619 class INFERENCE_ENGINE_API_CLASS(GatherLayer): public CNNLayer {
1620 public:
1621  /**
1622  * @brief The axis in Dictionary to gather Indexes from
1623  */
1624  int axis = 0;
1625  /**
1626  * @brief Creates a new GatherLayer instance.
1627  */
1628  using CNNLayer::CNNLayer;
1629 
1630  virtual ~GatherLayer();
1631 };
1632 
1633 /**
1634  * @brief This class represents a standard Strided Slice layer
1635  *
1636  * Strided Slice picks from input tensor according parameters
1637  */
1638 class INFERENCE_ENGINE_API_CLASS(StridedSliceLayer): public CNNLayer {
1639 public:
1640  /**
1641  * @brief The begin_mask is a bitmask where bit i being 0 means
1642  * to ignore the begin value and instead use the default value
1643  */
1644  std::string begin_mask;
1645  /**
1646  * @brief Analogous to begin_mask
1647  */
1648  std::string end_mask;
1649  /**
1650  * @brief The ellipsis_mask is a bitmask where bit i being 1 means
1651  * the i-th is actually an ellipsis
1652  */
1653  std::string ellipsis_mask;
1654  /**
1655  * @brief The new_axis_mask_ is a bitmask where bit i being 1 means
1656  * the i-th position creates a new 1 dimension shape
1657  */
1658  std::string new_axis_mask;
1659  /**
1660  * @brief The shrink_axis_mask is a bitmask where bit i being 1 means
1661  * the i-th position shrinks the dimensionality
1662  */
1663  std::string shrink_axis_mask;
1664 
1665  /**
1666  * @brief Creates a new StridedSliceLayer instance.
1667  */
1668  using CNNLayer::CNNLayer;
1669 
1670  virtual ~StridedSliceLayer();
1671 };
1672 
1673 /**
1674  * @brief This class represents a standard Shuffle Channels layer
1675  * Shuffle Channels picks from input tensor according parameters
1676  */
1677 class INFERENCE_ENGINE_API_CLASS(ShuffleChannelsLayer): public CNNLayer {
1678 public:
1679  /**
1680  * @brief The axis in tensor to shuffle channels
1681  */
1682  int axis = 1;
1683 
1684  /**
1685  * @brief The group of output shuffled channels
1686  */
1687  unsigned int group = 1;
1688 
1689  /**
1690  * @brief Creates a new ShuffleChannelsLayer instance.
1691  */
1692  using CNNLayer::CNNLayer;
1693 
1694  virtual ~ShuffleChannelsLayer();
1695 };
1696 
1697 /**
1698  * @brief This class represents a standard Depth To Space layer
1699  * Depth To Space picks from input tensor according parameters
1700  */
1701 class INFERENCE_ENGINE_API_CLASS(DepthToSpaceLayer): public CNNLayer {
1702 public:
1703  /**
1704  * @brief The group of output shuffled channels
1705  */
1706  unsigned int block_size = 1;
1707 
1708  /**
1709  * @brief Creates a new DepthToSpaceLayer instance.
1710  */
1711  using CNNLayer::CNNLayer;
1712 
1713  virtual ~DepthToSpaceLayer();
1714 };
1715 
1716 /**
1717  * @brief This class represents a standard Space To Depth layer
1718  * Depth To Space picks from input tensor according parameters
1719  */
1720 class INFERENCE_ENGINE_API_CLASS(SpaceToDepthLayer): public CNNLayer {
1721 public:
1722  /**
1723  * @brief The group of output Space To Depth
1724  */
1725  unsigned int block_size = 1;
1726 
1727  /**
1728  * @brief Creates a new SpaceToDepthLayer instance.
1729  */
1730  using CNNLayer::CNNLayer;
1731 
1732  virtual ~SpaceToDepthLayer();
1733 };
1734 
1735 /**
1736  * @brief This class represents SparseFillEmptyRows layer
1737  *
1738  * SparseFillEmptyRows fills empty rows in a sparse tensor
1739  */
1740 class INFERENCE_ENGINE_API_CLASS(SparseFillEmptyRowsLayer): public CNNLayer {
1741 public:
1742  /**
1743  * @brief Creates a new SparseFillEmptyRowsLayer instance.
1744  */
1745  using CNNLayer::CNNLayer;
1746 
1747  virtual ~SparseFillEmptyRowsLayer();
1748 };
1749 
1750 /**
1751  * @brief This class represents SparseSegmentMean(SqrtN, Sum) layers
1752  * SparseSegmentMean(SqrtN, Sum) layer reduces data along sparse segments of a tensor.
1753  */
1754 class INFERENCE_ENGINE_API_CLASS(SparseSegmentReduceLayer): public CNNLayer {
1755 public:
1756  /**
1757  * @brief Creates a new SparseSegmentReduceLayer instance.
1758  */
1759  using CNNLayer::CNNLayer;
1760 
1761  virtual ~SparseSegmentReduceLayer();
1762 };
1763 
1764 /**
1765  * @brief This class represents ExperimentalSparseWeightedReduce layer
1766  * ExperimentalSparseWeightedReduce layer reduces data along sparse segments of a tensor.
1767  */
1768 class INFERENCE_ENGINE_API_CLASS(ExperimentalSparseWeightedReduceLayer) : public CNNLayer {
1769 public:
1770  /**
1771  * @brief Creates a new ExperimentalSparseWeightedReduceLayer instance.
1772  */
1773  using CNNLayer::CNNLayer;
1774 
1776 };
1777 
1778 /**
1779  * @brief This class represents SparseToDense layer
1780  * SparseToDense layer converts a sparse tensor to a dense tensor.
1781  */
1782 class INFERENCE_ENGINE_API_CLASS(SparseToDenseLayer) : public CNNLayer {
1783 public:
1784  /**
1785  * @brief Creates a new SparseToDenseLayer instance.
1786  */
1787  using CNNLayer::CNNLayer;
1788 
1789  virtual ~SparseToDenseLayer();
1790 };
1791 
1792 /**
1793  * @brief This class represents Bucketize layer
1794  * Bucketize layer bucketizes the input based on the boundaries.
1795  */
1796 class INFERENCE_ENGINE_API_CLASS(BucketizeLayer) : public CNNLayer {
1797 public:
1798  /**
1799  * @brief Indicates whether the intervals include the right or the left bucket edge.
1800  */
1801  bool with_right_bound = false;
1802 
1803  /**
1804  * @brief Creates a new BucketizeLayer instance.
1805  */
1806  using CNNLayer::CNNLayer;
1807 
1808  virtual ~BucketizeLayer();
1809 };
1810 
1811 /**
1812  * @brief This class represents a standard Reverse Sequence layer
1813  *
1814  * Reverse Sequence modifies input tensor according parameters
1815  */
1816 class INFERENCE_ENGINE_API_CLASS(ReverseSequenceLayer): public CNNLayer {
1817 public:
1818  /**
1819  * @brief The seq_axis dimension in tensor which is partially reversed
1820  */
1821  int seq_axis = 1;
1822 
1823  /**
1824  * @brief The batch_axis dimension in tensor along which reversal is performed
1825  */
1826  int batch_axis = 0;
1827 
1828  /**
1829  * @brief Creates a new ReverseSequence instance.
1830  */
1831  using CNNLayer::CNNLayer;
1832 
1833  virtual ~ReverseSequenceLayer();
1834 };
1835 
1836 /**
1837  * @brief This class represents a OneHot layer
1838  * Converts input into OneHot representation.
1839  */
1840 class INFERENCE_ENGINE_API_CLASS(OneHotLayer): public CNNLayer {
1841 public:
1842  /**
1843  * @brief A depth of representation
1844  */
1845  unsigned int depth = 0;
1846 
1847  /**
1848  * @brief The locations represented by indices in input take value on_value
1849  */
1850  float on_value = 1.f;
1851 
1852  /**
1853  * @brief The locations not represented by indices in input take value off_value
1854  */
1855  float off_value = 0.f;
1856 
1857  /**
1858  * @brief Define the shape of output tensor
1859  */
1860  int axis = -1;
1861 
1862  /**
1863  * @brief Creates a new OneHot instance
1864  */
1865  using CNNLayer::CNNLayer;
1866 
1867  virtual ~OneHotLayer();
1868 };
1869 
1870 /**
1871  * @brief This class represents a standard RangeLayer layer
1872  *
1873  * RangeLayer modifies input tensor dimensions according parameters
1874  */
1875 class INFERENCE_ENGINE_API_CLASS(RangeLayer): public CNNLayer {
1876 public:
1877  /**
1878  * @brief Creates a new RangeLayer instance.
1879  */
1880  using CNNLayer::CNNLayer;
1881 
1882  virtual ~RangeLayer();
1883 };
1884 
1885 /**
1886  * @brief This class represents a standard Fill layer
1887  *
1888  * RFill modifies input tensor according parameters
1889  */
1890 class INFERENCE_ENGINE_API_CLASS(FillLayer): public CNNLayer {
1891 public:
1892  /**
1893  * @brief Creates a new Fill instance.
1894  */
1895  using CNNLayer::CNNLayer;
1896 
1897  virtual ~FillLayer();
1898 };
1899 
1900 /**
1901  * @brief This class represents a SelectLayer layer
1902  *
1903  * SelectLayer layer takes elements from the second (“then”) or the third (“else”) input based on condition mask
1904  * (“cond”) provided in the first input. The “cond” tensor is broadcasted to “then” and “else” tensors. The output
1905  * tensor shape is equal to broadcasted shape of “cond”, “then” and “else”.
1906  */
1907 class INFERENCE_ENGINE_API_CLASS(SelectLayer): public CNNLayer {
1908 public:
1909  /**
1910  * @brief Creates a new SelectLayer instance.
1911  */
1912  using CNNLayer::CNNLayer;
1913 
1914  virtual ~SelectLayer();
1915 };
1916 
1917 /**
1918  * @brief This class represents a standard Broadcast layer
1919  *
1920  * Broadcast modifies input tensor dimensions according parameters
1921  */
1922 class INFERENCE_ENGINE_API_CLASS(BroadcastLayer): public CNNLayer {
1923 public:
1924  /**
1925  * @brief Creates a new Broadcast instance.
1926  */
1927  using CNNLayer::CNNLayer;
1928 
1929  virtual ~BroadcastLayer();
1930 };
1931 
1932 /**
1933  * @brief This class represents a quantization operation layer
1934  *
1935  * Element-wise linear quantization of floating point input values into a descrete set of floating point values
1936  */
1937 class INFERENCE_ENGINE_API_CLASS(QuantizeLayer): public CNNLayer {
1938 public:
1939  /**
1940  * @brief The number of quantization levels
1941  */
1942  int levels = 1;
1943 
1944  /**
1945  * @brief Creates a new QuantizeLayer instance.
1946  */
1947  using CNNLayer::CNNLayer;
1948 
1949  virtual ~QuantizeLayer();
1950 };
1951 
1952 /**
1953  * @brief This class represents a standard Math layers
1954  *
1955  * Math modifies input tensor dimensions according parameters
1956  */
1957 class INFERENCE_ENGINE_API_CLASS(MathLayer): public CNNLayer {
1958 public:
1959  /**
1960  * @brief Creates a new Math instance.
1961  */
1962  using CNNLayer::CNNLayer;
1963 
1964  virtual ~MathLayer();
1965 };
1966 
1967 /**
1968  * @brief This class represents a standard Reduce layers
1969  *
1970  * Reduce modifies input tensor according parameters
1971  */
1972 class INFERENCE_ENGINE_API_CLASS(ReduceLayer): public CNNLayer {
1973 public:
1974  /**
1975  * @brief The keep_dims dimension in tensor which is partially reversed
1976  */
1977  bool keep_dims = true;
1978 
1979  /**
1980  * @brief Creates a new Reduce instance.
1981  */
1982  using CNNLayer::CNNLayer;
1983 
1984  virtual ~ReduceLayer();
1985 };
1986 
1987 /**
1988  * @brief This class represents a standard TopK layer
1989  *
1990  * TopK picks top K values from input tensor according parameters
1991  */
1992 class INFERENCE_ENGINE_API_CLASS(TopKLayer): public CNNLayer {
1993 public:
1994  /**
1995  * @brief The mode could be 'max' or 'min'
1996  */
1997  std::string mode;
1998  /**
1999  * @brief top K values sort mode could be 'value' or 'index'
2000  */
2001  std::string sort;
2002  /**
2003  * @brief The axis dimension in tensor which is top K values are picked
2004  */
2005  int axis = -1;
2006 
2007  /**
2008  * @brief Creates a new TopKLayer instance.
2009  */
2010  using CNNLayer::CNNLayer;
2011 
2012  virtual ~TopKLayer();
2013 };
2014 
2015 /**
2016  * @brief This class represents Unique layer.
2017  *
2018  * The Unique operation searches for unique elements in 1-D input
2019  */
2020 class INFERENCE_ENGINE_API_CLASS(UniqueLayer): public CNNLayer {
2021 public:
2022  /**
2023  * @brief A flag indicating whether to sort unique elements
2024  */
2025  bool sorted;
2026  /**
2027  * @brief A flag indicating whether to return indices of input data elements in the output of uniques
2028  */
2030  /**
2031  * @brief A flag indicating whether to return a number of occurences for each unique element
2032  */
2034 
2035  /**
2036  * @brief Creates a new UniqueLayer instance.
2037  */
2038  using CNNLayer::CNNLayer;
2039 
2040  virtual ~UniqueLayer();
2041 };
2042 
2043 /**
2044  * @brief This class represents a standard NonMaxSuppression layer
2045  */
2046 class INFERENCE_ENGINE_API_CLASS(NonMaxSuppressionLayer): public CNNLayer {
2047 public:
2048  /**
2049  * @brief The 'center_point_box' indicates the format of the box data
2050  */
2051  bool center_point_box = false;
2052  /**
2053  * @brief The 'sort_result_descending' indicates that result will sort descending by score through all batches and
2054  * classes
2055  */
2056  bool sort_result_descending = true;
2057  /**
2058  * @brief Creates a new NonMaxSuppressionLayer instance.
2059  */
2060  using CNNLayer::CNNLayer;
2061 
2062  virtual ~NonMaxSuppressionLayer();
2063 };
2064 
2065 /**
2066  * @brief This class represents a standard Scatter layer
2067  */
2068 class INFERENCE_ENGINE_API_CLASS(ScatterLayer): public CNNLayer {
2069 public:
2070  /**
2071  * @brief The axis in Dictionary to scatter Indexes from
2072  */
2073  int axis = 0;
2074  /**
2075  * @brief Creates a new ScatterLayer instance.
2076  */
2077  using CNNLayer::CNNLayer;
2078 
2079  virtual ~ScatterLayer();
2080 };
2081 
2082 } // namespace InferenceEngine
int GetParamAsInt(const char *param, int def) const
Returns an integer value for the given parameter or returns the default value.
Definition: ie_layers.h:245
BinaryConvolutionLayer(const LayerParams &p)
Creates a new BinaryConvolutionLayer instance.
Definition: ie_layers.h:814
std::shared_ptr< CNNLayer > Ptr
A shared pointer to CNNLayer.
Definition: ie_layers.h:47
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:25
#define DEFINE_PROP(prop_name)
convinenent way to declare property with backward compatibility to 2D members
Definition: ie_layers.h:570
PoolingLayer(const PoolingLayer &that)
copy constructor
Definition: ie_layers.h:740
std::vector< int > axis
A vector of dimensions for cropping.
Definition: ie_layers.h:1124
virtual const DataPtr input() const
Returns the first element of the input data for this layer.
Definition: ie_layers.h:109
This class represents a Layer which performs Scale and Shift.
Definition: ie_layers.h:1481
std::string type
Layer type.
Definition: ie_layers.h:56
unsigned int _group
Number of groups.
Definition: ie_layers.h:607
LSTM Cell layer.
Definition: ie_layers.h:1338
PoolType _type
A pooling type.
Definition: ie_layers.h:706
This class represents a standard Strided Slice layer.
Definition: ie_layers.h:1638
unsigned int GetParamAsUInt(const char *param, unsigned int def) const
Returns an unsigned integer value for the given parameter or returns the default value.
Definition: ie_layers.h:323
PoolType
Defines available pooling types.
Definition: ie_layers.h:701
float GetParamAsFloat(const char *param, float def) const
Gets float value for the given parameter.
Definition: ie_layers.h:164
This class represents a standard crop layer.
Definition: ie_layers.h:1119
std::vector< float > activation_beta
Beta parameters of activations.
Definition: ie_layers.h:1296
The method holds the user values to enable binding of data per graph node.
Definition: ie_common.h:69
This structure describes ROI data.
Definition: ie_blob.h:848
PReLULayer(const LayerParams &prms)
A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the gi...
Definition: ie_layers.h:1495
This class represents a standard Power Layer.
Definition: ie_layers.h:1505
std::string begin_mask
The begin_mask is a bitmask where bit i being 0 means to ignore the begin value and instead use the d...
Definition: ie_layers.h:1644
std::vector< int > GetParamAsInts(const char *param, std::vector< int > def) const
Returns a vector of int values for the given parameter or returns the default value.
Definition: ie_layers.h:278
std::vector< int > shape
A vector of sizes of the shape.
Definition: ie_layers.h:1150
Inference Engine API.
Definition: ie_argmax_layer.hpp:15
Definition: ie_layers.h:1467
This is an internal common Layer parameter parsing arguments.
Definition: ie_layers.h:30
bool _channel_shared
A flag that indicates if the same negative_slope value is used for all the features. If false, the value is used pixel wise.
Definition: ie_layers.h:1487
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:595
This class represents a standard Space To Depth layer Depth To Space picks from input tensor accordin...
Definition: ie_layers.h:1720
eBinaryConvolutionMode _mode
Mode of binary convolution operation.
Definition: ie_layers.h:766
Definition: ie_layers.h:1258
This class represents a OneHot layer Converts input into OneHot representation.
Definition: ie_layers.h:1840
Base class for recurrent cell layers.
Definition: ie_layers.h:1246
PropertyVector< unsigned int > pads_begin
Size of padding in the beginning of each axis.
Definition: ie_layers.h:1593
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:793
This class represents ExperimentalSparseWeightedReduce layer ExperimentalSparseWeightedReduce layer r...
Definition: ie_layers.h:1768
This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
Definition: ie_layers.h:540
Blob::Ptr _weights
A pointer to a weights blob.
Definition: ie_layers.h:553
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:595
This class represents a standard Fill layer.
Definition: ie_layers.h:1890
WeightableLayer(const LayerParams &prms)
A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the ...
Definition: ie_layers.h:548
std::vector< DataWeakPtr > insData
A vector of weak pointers to the input data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:68
RNN Cell layer.
Definition: ie_layers.h:1415
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:611
void fuse(Ptr &layer)
Sets a layer to be fused with.
Definition: ie_layers.h:100
A header file for Blob and generic TBlob<>
This class represents a standard deconvolution layer.
Definition: ie_layers.h:651
std::string shrink_axis_mask
The shrink_axis_mask is a bitmask where bit i being 1 means the i-th position shrinks the dimensional...
Definition: ie_layers.h:1663
std::vector< unsigned int > GetParamAsUInts(const char *param) const
Returns a vector of unsigned int values for the given parameter.
Definition: ie_layers.h:394
Direction
Direction of iteration through sequence dimension.
Definition: ie_layers.h:1466
int to
Definition: ie_layers.h:1217
std::vector< int > offset
A vector of offsets for each dimension.
Definition: ie_layers.h:1132
This class represents a standard Scatter layer.
Definition: ie_layers.h:2068
This class represents standard MVN Layer.
Definition: ie_layers.h:987
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:583
int stride
Definition: ie_layers.h:1221
static std::string ie_serialize_float(float value)
serialize float with c_locale formating used for default values serializing
Definition: ie_layers.h:150
Ptr _fusedWith
If suggested to fuse - a pointer to the layer which needs to be fused with this layer.
Definition: ie_layers.h:72
This class represents a standard Reduce layers.
Definition: ie_layers.h:1972
bool sorted
A flag indicating whether to sort unique elements.
Definition: ie_layers.h:2025
std::string sort
top K values sort mode could be &#39;value&#39; or &#39;index&#39;
Definition: ie_layers.h:2001
ConvolutionLayer(const ConvolutionLayer &that)
copy constructor
Definition: ie_layers.h:637
PropertyVector< unsigned int > _pads_end
Pooling paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:687
BinaryConvolutionLayer & operator=(const BinaryConvolutionLayer &that)
assignment operator
Definition: ie_layers.h:819
std::vector< std::string > activations
Activations used inside recurrent cell.
Definition: ie_layers.h:1282
std::string name
Layer name.
Definition: ie_layers.h:32
std::string type
Layer type.
Definition: ie_layers.h:34
This class represents a standard Reverse Sequence layer.
Definition: ie_layers.h:1816
This class represents a Clamp activation layer.
Definition: ie_layers.h:1033
int part_size
Definition: ie_layers.h:1224
ConvolutionLayer & operator=(const ConvolutionLayer &that)
assignment operator
Definition: ie_layers.h:621
This class represents a layer that evenly splits the input into the supplied outputs.
Definition: ie_layers.h:895
Definition: ie_layers.h:1468
Definition: ie_layers.h:1256
eOperation
Defines possible operations that can be used.
Definition: ie_layers.h:1075
This class represents a standard TopK layer.
Definition: ie_layers.h:1992
bool _exclude_pad
A flag that indicates if padding is excluded or not.
Definition: ie_layers.h:711
GRU Cell layer.
Definition: ie_layers.h:1379
This class represents an element wise operation layer.
Definition: ie_layers.h:1069
PropertyVector< unsigned int > pads_end
Size of padding in the end of each axis.
Definition: ie_layers.h:1597
This class represents standard GRN Layer.
Definition: ie_layers.h:965
This class represents a standard Shuffle Channels layer Shuffle Channels picks from input tensor acco...
Definition: ie_layers.h:1677
This class represents a standard reshape layer.
Definition: ie_layers.h:1145
std::string mode
The mode could be &#39;max&#39; or &#39;min&#39;.
Definition: ie_layers.h:1997
UserValue userValue
Convenience user values to store in this object as extra data.
Definition: ie_layers.h:76
Sequence of recurrent cells.
Definition: ie_layers.h:1451
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:785
BinaryConvolutionLayer(const BinaryConvolutionLayer &that)
copy constructor
Definition: ie_layers.h:838
GRNLayer(const LayerParams &prms)
A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the give...
Definition: ie_layers.h:973
CNNLayer(const LayerParams &prms)
A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values...
Definition: ie_layers.h:87
static float ie_parse_float(const std::string &str)
Parse string with float in accordance with IE rules.
Definition: ie_layers.h:132
This class represents a fully connected layer.
Definition: ie_layers.h:854
ePadMode
Defines possible modes of pad operation.
Definition: ie_layers.h:1588
This class represents a SelectLayer layer.
Definition: ie_layers.h:1907
std::map< std::string, std::string > params
Map of pairs: (parameter name, parameter value)
Definition: ie_layers.h:524
Definition: ie_layers.h:1255
bool return_inverse
A flag indicating whether to return indices of input data elements in the output of uniques...
Definition: ie_layers.h:2029
PoolingLayer(const LayerParams &p)
Creates a new PoolingLayer instance.
Definition: ie_layers.h:720
std::shared_ptr< Blob > Ptr
A smart pointer containing Blob object.
Definition: ie_blob.h:42
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:53
std::vector< float > coeff
A vector of coefficients to scale the operands.
Definition: ie_layers.h:1106
std::string end_mask
Analogous to begin_mask.
Definition: ie_layers.h:1648
This class represents Unique layer.
Definition: ie_layers.h:2020
std::vector< unsigned int > GetParamAsUInts(const char *param, std::vector< unsigned int > def) const
Returns a vector of unsigned int values for the given parameter or returns the default value...
Definition: ie_layers.h:366
std::vector< float > activation_alpha
Alpha parameters of activations.
Definition: ie_layers.h:1289
a header file for describing property style structure used by CNNLayers
This class represents SparseToDense layer SparseToDense layer converts a sparse tensor to a dense ten...
Definition: ie_layers.h:1782
unsigned int GetParamAsUInt(const char *param) const
Returns an unsigned integer value for the given parameter.
Definition: ie_layers.h:344
This class represents a ReLU6 activation layer.
Definition: ie_layers.h:1057
This class represents a Batch Normalization Layer.
Definition: ie_layers.h:1531
unsigned int _group
Number of groups.
Definition: ie_layers.h:805
PropertyVector< unsigned int > _stride
Pooling strides array [X, Y, Z, ...].
Definition: ie_layers.h:695
This class represents a Layer which performs Scale and Shift.
Definition: ie_layers.h:1193
This class represents standard softmax Layer.
Definition: ie_layers.h:947
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:603
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:781
std::vector< float > GetParamAsFloats(const char *param) const
Returns a vector of float values for the given parameter.
Definition: ie_layers.h:221
This header file defines the main Data representation node.
bool return_counts
A flag indicating whether to return a number of occurences for each unique element.
Definition: ie_layers.h:2033
int end
Definition: ie_layers.h:1223
bool GetParamAsBool(const char *param, bool def) const
Returns a boolean value for the given parameter.
Definition: ie_layers.h:422
std::vector< int > GetParamAsInts(const char *param) const
Returns a vector of int values for the given parameter.
Definition: ie_layers.h:301
float _pad_value
A pad value which is used to fill pad area.
Definition: ie_layers.h:776
std::string ellipsis_mask
The ellipsis_mask is a bitmask where bit i being 1 means the i-th is actually an ellipsis.
Definition: ie_layers.h:1653
unsigned int _in_depth
A number of input feature maps (size) generating the 3&#39;rd input dimension.
Definition: ie_layers.h:771
PropertyVector< unsigned int > _kernel
Pooling kernel array [X, Y, Z, ...].
Definition: ie_layers.h:683
float GetParamAsFloat(const char *param) const
Returns a float value for the given layer parameter.
Definition: ie_layers.h:180
PropertyVector< unsigned int > _padding
Pooling paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:683
Definition: ie_layers.h:1227
int GetParamAsInt(const char *param) const
Returns an integer value for the given parameter.
Definition: ie_layers.h:261
This class represents a standard deformable convolution layer.
Definition: ie_layers.h:662
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:781
int across_channels
Indicate that mean value is calculated across channels.
Definition: ie_layers.h:1000
Precision precision
Layer base operating precision.
Definition: ie_layers.h:60
This class represents a quantization operation layer.
Definition: ie_layers.h:1937
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:801
MVNLayer(const LayerParams &prms)
A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the give...
Definition: ie_layers.h:995
This class represents a Rectified Linear activation layer.
Definition: ie_layers.h:1013
This class represents a standard 3D Convolution Layer.
Definition: ie_layers.h:578
CellType
Direct type of recurrent cell (including subtypes) Description of particular cell semantics is in LST...
Definition: ie_layers.h:1254
This class represents Bucketize layer Bucketize layer bucketizes the input based on the boundaries...
Definition: ie_layers.h:1796
bool GetParamAsBool(const char *param) const
Returns a boolean value for the given parameter.
Definition: ie_layers.h:444
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:715
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:809
This class represents a standard Pad layer.
Definition: ie_layers.h:1582
This class represents a standard Tile Layer.
Definition: ie_layers.h:1171
This class represents a standard binary convolution layer.
Definition: ie_layers.h:755
Precision precision
Layer precision.
Definition: ie_layers.h:36
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:587
Blob::Ptr _biases
A pointer to a biases blob.
Definition: ie_layers.h:557
This class represents a Linear Response Normalization (LRN) Layer.
Definition: ie_layers.h:913
This class represents TensorIterator layer.
Definition: ie_layers.h:1212
This class represents concatenation layer.
Definition: ie_layers.h:874
PoolingLayer & operator=(const PoolingLayer &that)
assignment operator
Definition: ie_layers.h:725
std::vector< float > GetParamAsFloats(const char *param, std::vector< float > def) const
Returns a vector of float values for the given parameter or returns the default value.
Definition: ie_layers.h:197
This class represents SparseSegmentMean(SqrtN, Sum) layers SparseSegmentMean(SqrtN, Sum) layer reduces data along sparse segments of a tensor.
Definition: ie_layers.h:1754
This class represents a standard RangeLayer layer.
Definition: ie_layers.h:1875
This is a base abstraction Layer - all DNN Layers inherit from this class.
Definition: ie_layers.h:42
int start
Definition: ie_layers.h:1222
std::string affinity
Layer affinity set by user.
Definition: ie_layers.h:80
This class represents a standard pooling layer.
Definition: ie_layers.h:678
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:793
Definition: ie_layers.h:1214
This class represents a standard Gather layer.
Definition: ie_layers.h:1619
Definition: ie_layers.h:1257
std::string GetParamAsString(const char *param) const
Returns a string value for the given parameter.
Definition: ie_layers.h:497
class CNNLayer GenericLayer
Alias for CNNLayer object.
Definition: ie_layers.h:535
This class represents a standard Math layers.
Definition: ie_layers.h:1957
std::string new_axis_mask
The new_axis_mask_ is a bitmask where bit i being 1 means the i-th position creates a new 1 dimension...
Definition: ie_layers.h:1658
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:583
std::map< std::string, Blob::Ptr > blobs
Map of pairs: (name, weights/biases blob)
Definition: ie_layers.h:529
std::string GetParamAsString(const char *param, const char *def) const
Returns a string value for the given parameter or returns the default one.
Definition: ie_layers.h:468
std::vector< DataPtr > outData
A vector of pointers to the output data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:64
eBinaryConvolutionMode
Defines possible modes of binary convolution operation.
Definition: ie_layers.h:761
int from
Definition: ie_layers.h:1216
This class represents a standard NonMaxSuppression layer.
Definition: ie_layers.h:2046
bool CheckParamPresence(const char *param) const
Checks the param presence in the layer.
Definition: ie_layers.h:482
This class represents a standard Depth To Space layer Depth To Space picks from input tensor accordin...
Definition: ie_layers.h:1701
This class represents a general matrix multiplication operation layer.
Definition: ie_layers.h:1551
int axis
Definition: ie_layers.h:1220
std::vector< int > dim
A vector of dimensions to be preserved.
Definition: ie_layers.h:1128
This class represents SparseFillEmptyRows layer.
Definition: ie_layers.h:1740
std::string name
Layer name.
Definition: ie_layers.h:52
This is a header file with common inference engine definitions.
ConvolutionLayer(const LayerParams &p)
Creates a new ConvolutionLayer instance.
Definition: ie_layers.h:616
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:22
This class represents a standard Broadcast layer.
Definition: ie_layers.h:1922