ie_layers.h
Go to the documentation of this file.
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief a header file for internal Layers structure to describe layers information
7  * @file ie_layers.h
8  */
9 #pragma once
10 
11 #include <algorithm>
12 #include <cctype>
13 #include <iterator>
14 #include <limits>
15 #include <map>
16 #include <memory>
17 #include <string>
18 #include <vector>
19 
20 #include "ie_blob.h"
21 #include "ie_common.h"
22 #include "ie_data.h"
23 #include "ie_layers_property.hpp"
24 
25 namespace InferenceEngine {
26 /**
27  * @brief This is an internal common Layer parameter parsing arguments
28  */
29 struct LayerParams {
30  /// @brief Layer name
31  std::string name;
32  /// @brief Layer type
33  std::string type;
34  /// @brief Layer precision
36 };
37 
38 /**
39  * @brief This is a base abstraction Layer - all DNN Layers inherit from this class
40  */
41 class INFERENCE_ENGINE_API_CLASS(CNNLayer) {
42 public:
43  /**
44  * @brief A shared pointer to CNNLayer
45  */
46  using Ptr = std::shared_ptr<CNNLayer>;
47 
48  /**
49  * @brief Layer name
50  */
51  std::string name;
52  /**
53  * @brief Layer type
54  */
55  std::string type;
56  /**
57  * @brief Layer base operating precision
58  */
60  /**
61  * @brief A vector of pointers to the output data elements of this layer in the di-graph (order matters)
62  */
63  std::vector<DataPtr> outData;
64  /**
65  * @brief A vector of weak pointers to the input data elements of this layer in the di-graph (order matters)
66  */
67  std::vector<DataWeakPtr> insData;
68  /**
69  * @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
70  */
72  /**
73  * @brief Convenience user values to store in this object as extra data
74  */
76  /**
77  * @brief Layer affinity set by user.
78  */
79  std::string affinity;
80 
81  /**
82  * @brief A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values.
83  * @param prms Basic common parsing parameters
84  */
85  explicit CNNLayer(const LayerParams& prms)
86  : name(prms.name), type(prms.type), precision(prms.precision), userValue({0}) {}
87 
88  /**
89  * @brief A virtual destructor
90  */
91  virtual ~CNNLayer();
92 
93  /**
94  * @brief Sets a layer to be fused with
95  * @param layer Reference to the layer to be fused with
96  */
97  void fuse(Ptr& layer) {
98  _fusedWith = layer;
99  }
100 
101  /**
102  * @brief Returns the first element of the input data for this layer
103  * @return A smart pointer to the input data element
104  */
105  virtual const DataPtr input() const {
106  if (insData.empty()) {
107  THROW_IE_EXCEPTION << "Internal error: input data is empty";
108  }
109  auto lockedFirstInsData = insData[0].lock();
110  if (!lockedFirstInsData) {
111  THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
112  }
113  return lockedFirstInsData;
114  }
115 
116  /**
117  * @brief Checks if the input data and layer data are legitimate
118  */
119  void validateLayer();
120 
121  /**
122  * @brief Parse string with float in accordance with IE rules
123  * @param str input string with float value
124  * @return float value if parsing was successful
125  * @throws InferenceEngineException in case of parsing error
126  */
127  static float ie_parse_float(const std::string& str) {
128  if (str == "-inf") {
129  return -std::numeric_limits<float>::infinity();
130  } else if (str == "inf") {
131  return std::numeric_limits<float>::infinity();
132  } else {
133  float res;
134  std::stringstream val_stream(str);
135  val_stream.imbue(std::locale("C"));
136  val_stream >> res;
137  if (!val_stream.eof()) THROW_IE_EXCEPTION;
138  return res;
139  }
140  }
141  /**
142  * @brief serialize float with c_locale formating
143  * used for default values serializing
144  */
145  static std::string ie_serialize_float(float value) {
146  std::stringstream val_stream;
147  val_stream.imbue(std::locale("C"));
148  val_stream << value;
149  return val_stream.str();
150  }
151 
152  /**
153  * @brief Gets float value for the given parameter
154  * @param param name of the parameter to find
155  * @param def default value of the parameter if not found
156  * @return float value
157  */
158  float GetParamAsFloat(const char* param, float def) const {
159  std::string val = GetParamAsString(param, ie_serialize_float(def).c_str());
160  try {
161  return ie_parse_float(val);
162  } catch (...) {
163  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
164  << val << " cannot be casted to float.";
165  }
166  }
167 
168  /**
169  * @brief Returns a float value for the given layer parameter
170  * @param param Name of the layer parameter
171  * @return A float value for the specified parameter
172  */
173  float GetParamAsFloat(const char* param) const {
174  std::string val = GetParamAsString(param);
175  try {
176  return ie_parse_float(val);
177  } catch (...) {
178  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
179  << val << " cannot be casted to float.";
180  }
181  }
182 
183  /**
184  * @brief Returns a vector of float values for the given parameter or returns the default value
185  * @param param Name of the layer parameter
186  * @param def Default value of the parameter if not found
187  * @return vector of float values
188  */
189  std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const {
190  std::string vals = GetParamAsString(param, "");
191  std::vector<float> result;
192  std::istringstream stream(vals);
193  std::string str;
194  if (vals.empty()) return def;
195  while (getline(stream, str, ',')) {
196  try {
197  float val = ie_parse_float(str);
198  result.push_back(val);
199  } catch (...) {
200  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
201  << ". Value " << vals << " cannot be casted to floats.";
202  }
203  }
204  return result;
205  }
206 
207  /**
208  * @brief Returns a vector of float values for the given parameter
209  * @param param Name of the layer parameter
210  * @return vector of float values
211  */
212  std::vector<float> GetParamAsFloats(const char* param) const {
213  std::string vals = GetParamAsString(param);
214  std::vector<float> result;
215  std::istringstream stream(vals);
216  std::string str;
217  while (getline(stream, str, ',')) {
218  try {
219  float val = ie_parse_float(str);
220  result.push_back(val);
221  } catch (...) {
222  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
223  << ". Value " << vals << " cannot be casted to floats.";
224  }
225  }
226  return result;
227  }
228 
229  /**
230  * @brief Returns an integer value for the given parameter or returns the default value
231  * @param param Name of the layer parameter
232  * @param def Default value of the parameter if not found
233  * @return An int value for the specified parameter
234  */
235  int GetParamAsInt(const char* param, int def) const {
236  std::string val = GetParamAsString(param, std::to_string(def).c_str());
237  try {
238  return std::stoi(val);
239  } catch (...) {
240  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
241  << val << " cannot be casted to int.";
242  }
243  }
244 
245  /**
246  * @brief Returns an integer value for the given parameter
247  * @param param Name of the layer parameter
248  * @return An int value for the specified parameter
249  */
250  int GetParamAsInt(const char* param) const {
251  std::string val = GetParamAsString(param);
252  try {
253  return std::stoi(val);
254  } catch (...) {
255  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
256  << val << " cannot be casted to int.";
257  }
258  }
259 
260  /**
261  * @brief Returns a vector of int values for the given parameter or returns the default value
262  * @param param Name of the layer parameter
263  * @param def Default value of the parameter if not found
264  * @return vector of int values
265  */
266  std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const {
267  std::string vals = GetParamAsString(param, "");
268  std::vector<int> result;
269  std::istringstream stream(vals);
270  std::string str;
271  if (vals.empty()) return def;
272  while (getline(stream, str, ',')) {
273  try {
274  result.push_back(std::stoi(str));
275  } catch (...) {
276  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
277  << ". Value " << vals << " cannot be casted to int.";
278  }
279  }
280  return result;
281  }
282 
283  /**
284  * @brief Returns a vector of int values for the given parameter
285  * @param param Name of the layer parameter
286  * @return vector of int values
287  */
288  std::vector<int> GetParamAsInts(const char* param) const {
289  std::string vals = GetParamAsString(param);
290  std::vector<int> result;
291  std::istringstream stream(vals);
292  std::string str;
293  while (getline(stream, str, ',')) {
294  try {
295  result.push_back(std::stoi(str));
296  } catch (...) {
297  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
298  << ". Value " << vals << " cannot be casted to int.";
299  }
300  }
301  return result;
302  }
303  /**
304  * @brief Returns an unsigned integer value for the given parameter or returns the default value
305  * @param param Name of the layer parameter
306  * @param def Default value of the parameter if not found
307  * @return An unsigned integer value for the specified parameter
308  */
309  unsigned int GetParamAsUInt(const char* param, unsigned int def) const {
310  std::string val = GetParamAsString(param, std::to_string(def).c_str());
311  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
312  ". Value " + val + " cannot be casted to int.";
313  try {
314  int value = std::stoi(val);
315  if (value < 0) {
316  THROW_IE_EXCEPTION << message;
317  }
318  return static_cast<unsigned int>(value);
319  } catch (...) {
320  THROW_IE_EXCEPTION << message;
321  }
322  }
323 
324  /**
325  * @brief Returns an unsigned integer value for the given parameter
326  * @param param Name of the layer parameter
327  * @return An unsigned integer value for the specified parameter
328  */
329  unsigned int GetParamAsUInt(const char* param) const {
330  std::string val = GetParamAsString(param);
331  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
332  ". Value " + val + " cannot be casted to unsigned int.";
333  try {
334  int value = std::stoi(val);
335  if (value < 0) {
336  THROW_IE_EXCEPTION << message;
337  }
338  return static_cast<unsigned int>(value);
339  } catch (...) {
340  THROW_IE_EXCEPTION << message;
341  }
342  }
343 
344  /**
345  * @brief Returns a vector of unsigned int values for the given parameter or returns the default value
346  * @param param Name of the layer parameter
347  * @param def Default value of the parameter if not found
348  * @return vector of unsigned int values
349  */
350  std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const {
351  std::string vals = GetParamAsString(param, "");
352  std::vector<unsigned int> result;
353  std::istringstream stream(vals);
354  std::string str;
355  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
356  name + ". Value " + vals + " cannot be casted to unsigned int.";
357  if (vals.empty()) return def;
358  while (getline(stream, str, ',')) {
359  try {
360  int value = std::stoi(str);
361  if (value < 0) {
362  THROW_IE_EXCEPTION << message;
363  }
364  result.push_back(static_cast<unsigned int>(value));
365  } catch (...) {
366  THROW_IE_EXCEPTION << message;
367  }
368  }
369  return result;
370  }
371 
372  /**
373  * @brief Returns a vector of unsigned int values for the given parameter
374  * @param param Name of the layer parameter
375  * @return vector of unsigned int values
376  */
377  std::vector<unsigned int> GetParamAsUInts(const char* param) const {
378  std::string vals = GetParamAsString(param);
379  std::vector<unsigned int> result;
380  std::istringstream stream(vals);
381  std::string str;
382  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
383  name + ". Value " + vals + " cannot be casted to int.";
384  while (getline(stream, str, ',')) {
385  try {
386  int value = std::stoi(str);
387  if (value < 0) {
388  THROW_IE_EXCEPTION << message;
389  }
390  result.push_back(static_cast<unsigned int>(value));
391  } catch (...) {
392  THROW_IE_EXCEPTION << message;
393  }
394  }
395  return result;
396  }
397  /**
398  * @brief Returns a boolean value for the given parameter.
399  * The valid values are (true, false, 1, 0).
400  * @param param Name of the layer parameter
401  * @param def Default value of the parameter if not found
402  * @return A bool value for the specified parameter
403  */
404  bool GetParamAsBool(const char* param, bool def) const {
405  std::string val = GetParamAsString(param, std::to_string(def).c_str());
406  std::string loweredCaseValue;
407  std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
408  return std::tolower(value);
409  });
410 
411  bool result = false;
412 
413  if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
414  // attempting parse using non alpha bool
415  return (GetParamAsInt(param, def) != 0);
416  }
417 
418  return result;
419  }
420  /**
421  * @brief Returns a boolean value for the given parameter
422  * @param param Name of the layer parameter
423  * @return A bool value for the specified parameter
424  */
425  bool GetParamAsBool(const char* param) const {
426  std::string val = GetParamAsString(param);
427  std::string loweredCaseValue;
428  std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
429  return std::tolower(value);
430  });
431 
432  bool result = false;
433 
434  if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
435  // attempting parse using non alpha bool
436  return (GetParamAsInt(param) != 0);
437  }
438 
439  return result;
440  }
441 
442  /**
443  * @brief Returns a string value for the given parameter or returns the default one
444  * @param param Name of the layer parameter
445  * @param def Default value of the parameter if not found
446  * @return A string value
447  */
448  std::string GetParamAsString(const char* param, const char* def) const {
449  auto it = params.find(param);
450  if (it == params.end() || it->second.empty()) {
451  return def;
452  }
453  return (*it).second;
454  }
455 
456  /**
457  * @brief Checks the param presence in the layer
458  * @param param Name of the layer parameter
459  * @return a bool depending param presence
460  */
461  bool CheckParamPresence(const char* param) const {
462  auto it = params.find(param);
463  if (it == params.end()) {
464  return false;
465  }
466  return true;
467  }
468 
469  /**
470  * @brief Returns a string value for the given parameter.
471  * Throws exception if parameter was not found.
472  * @param param Name of the layer parameter
473  * @return A string value
474  */
475  std::string GetParamAsString(const char* param) const {
476  auto it = params.find(param);
477  if (it == params.end()) {
478  THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
479  }
480  return (*it).second;
481  }
482 
483  std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const {
484  std::string vals = GetParamAsString(param, "");
485  std::vector<std::string> result;
486  std::istringstream stream(vals);
487  std::string str;
488  if (vals.empty()) return def;
489  while (getline(stream, str, ',')) {
490  try {
491  result.push_back(str);
492  } catch (...) {
493  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
494  }
495  }
496  return result;
497  }
498 
499  /**
500  * @brief Map of pairs: (parameter name, parameter value)
501  */
502  std::map<std::string, std::string> params;
503 
504  /**
505  * @brief Map of pairs: (name, weights/biases blob)
506  */
507  std::map<std::string, Blob::Ptr> blobs;
508 };
509 
510 /**
511  * @brief Alias for CNNLayer object
512  */
513 using GenericLayer = class CNNLayer;
514 
515 /**
516  * @brief This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
517  */
518 class INFERENCE_ENGINE_API_CLASS(WeightableLayer): public CNNLayer {
519 public:
520  /**
521  * @brief A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the given
522  * values
523  * @param prms Initial layer parameters
524  */
525  explicit WeightableLayer(const LayerParams& prms): CNNLayer(prms) {}
526 
527  /**
528  * @brief A pointer to a weights blob
529  */
531  /**
532  * @brief A pointer to a biases blob
533  */
535 
536  /**
537  * @brief Constructs a WeightableLayer instance and initiates layer parameters with the given values
538  */
539  using CNNLayer::CNNLayer;
540 
541  virtual ~WeightableLayer();
542 };
543 
544 /**
545  * @brief convinenent way to declare property with backward compatibility to 2D members
546  */
547 #define DEFINE_PROP(prop_name) \
548  PropertyVector<unsigned int> prop_name; \
549  unsigned int& prop_name##_x = prop_name.at(X_AXIS); \
550  unsigned int& prop_name##_y = prop_name.at(Y_AXIS);
551 
552 /**
553  * @brief This class represents a standard 3D Convolution Layer
554  */
555 class INFERENCE_ENGINE_API_CLASS(ConvolutionLayer): public WeightableLayer {
556 public:
557  /**
558  * @brief A convolution kernel array [X, Y, Z, ...]
559  */
560  DEFINE_PROP(_kernel);
561  /**
562  * @brief A convolution paddings begin array [X, Y, Z, ...]
563  */
564  DEFINE_PROP(_padding);
565  /**
566  * @brief A convolution paddings end array [X, Y, Z, ...]
567  */
569  /**
570  * @brief A convolution strides array [X, Y, Z, ...]
571  */
572  DEFINE_PROP(_stride);
573  /**
574  * @brief A convolution dilations array [X, Y, Z, ...]
575  */
576  DEFINE_PROP(_dilation);
577  /**
578  * @brief A number of output feature maps (size) generating the 3'rd output dimension
579  */
580  unsigned int _out_depth = 0u;
581  /**
582  * @brief Number of groups
583  */
584  unsigned int _group = 1u;
585  /**
586  * @brief Auto padding type
587  */
588  std::string _auto_pad;
589 
590  /**
591  * @brief Creates a new ConvolutionLayer instance.
592  */
593  explicit ConvolutionLayer(const LayerParams& p)
594  : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
595  /**
596  * @brief assignment operator
597  */
599  if (&that != this) {
600  WeightableLayer::operator=(that);
601  _kernel = that._kernel;
602  _padding = that._padding;
603  _pads_end = that._pads_end;
604  _stride = that._stride;
605  _dilation = that._dilation;
606  _out_depth = that._out_depth;
607  _group = that._group;
608  }
609  return *this;
610  }
611  /**
612  * @brief copy constructor
613  */
615  operator=(that);
616  }
617  /**
618  * @brief move constructor
619  */
620  ConvolutionLayer(ConvolutionLayer&&) = default;
621 
622  virtual ~ConvolutionLayer();
623 };
624 
625 /**
626  * @brief This class represents a standard deconvolution layer
627  */
628 class INFERENCE_ENGINE_API_CLASS(DeconvolutionLayer): public ConvolutionLayer {
629 public:
631  using ConvolutionLayer::operator=;
632 
633  virtual ~DeconvolutionLayer();
634 };
635 
636 /**
637  * @brief This class represents a standard deformable convolution layer
638  */
639 class INFERENCE_ENGINE_API_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
640 public:
642  using ConvolutionLayer::operator=;
643 
644  /**
645  * @brief Number of deformable groups
646  */
647  unsigned int _deformable_group = 1u;
648 
649  virtual ~DeformableConvolutionLayer();
650 };
651 
652 /**
653  * @brief This class represents a standard pooling layer
654  */
655 class INFERENCE_ENGINE_API_CLASS(PoolingLayer): public CNNLayer {
656 public:
657  /**
658  * @brief Pooling kernel array [X, Y, Z, ...]
659  */
660  DEFINE_PROP(_kernel);
661  /**
662  * @brief Pooling paddings begin array [X, Y, Z, ...]
663  */
664  DEFINE_PROP(_padding);
665  /**
666  * @brief Pooling paddings end array [X, Y, Z, ...]
667  */
669  /**
670  * @brief Pooling strides array [X, Y, Z, ...]
671  */
672  DEFINE_PROP(_stride);
673 
674  /**
675  * @enum PoolType
676  * @brief Defines available pooling types
677  */
678  enum PoolType { MAX = 1, AVG = 2, STOCH = 3, ROI = 4, SPACIAL_PYRAMID = 5 };
679 
680  /**
681  * @brief A pooling type
682  */
683  PoolType _type = MAX;
684 
685  /**
686  * @brief A flag that indicates if padding is excluded or not
687  */
688  bool _exclude_pad = false;
689  /**
690  * @brief Auto padding type
691  */
692  std::string _auto_pad;
693 
694  /**
695  * @brief Creates a new PoolingLayer instance.
696  */
697  explicit PoolingLayer(const LayerParams& p): CNNLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 0u) {}
698 
699  /**
700  * @brief assignment operator
701  */
703  if (&that != this) {
704  CNNLayer::operator=(that);
705  _kernel = that._kernel;
706  _padding = that._padding;
707  _pads_end = that._pads_end;
708  _stride = that._stride;
709  _type = that._type;
710  _exclude_pad = that._exclude_pad;
711  }
712  return *this;
713  }
714  /**
715  * @brief copy constructor
716  */
717  PoolingLayer(const PoolingLayer& that): CNNLayer(that) {
718  operator=(that);
719  }
720 
721  /**
722  * @brief move constructor
723  */
724  PoolingLayer(PoolingLayer&&) = default;
725 
726  virtual ~PoolingLayer();
727 };
728 
729 /**
730  * @brief This class represents a standard binary convolution layer
731  */
732 class INFERENCE_ENGINE_API_CLASS(BinaryConvolutionLayer): public WeightableLayer {
733 public:
734  /**
735  * @enum eBinaryConvolutionMode
736  * @brief Defines possible modes of binary convolution operation
737  */
738  enum eBinaryConvolutionMode { xnor_popcount = 0 };
739 
740  /**
741  * @brief Mode of binary convolution operation
742  */
743  eBinaryConvolutionMode _mode = xnor_popcount;
744 
745  /**
746  * @brief A number of input feature maps (size) generating the 3'rd input dimension
747  */
748  unsigned int _in_depth = 0u;
749 
750  /**
751  * @brief A pad value which is used to fill pad area
752  */
753  float _pad_value = 0.0f;
754 
755  /**
756  * @brief A convolution kernel array [X, Y, Z, ...]
757  */
758  DEFINE_PROP(_kernel);
759  /**
760  * @brief A convolution paddings begin array [X, Y, Z, ...]
761  */
762  DEFINE_PROP(_padding);
763  /**
764  * @brief A convolution paddings end array [X, Y, Z, ...]
765  */
767  /**
768  * @brief A convolution strides array [X, Y, Z, ...]
769  */
770  DEFINE_PROP(_stride);
771  /**
772  * @brief A convolution dilations array [X, Y, Z, ...]
773  */
774  DEFINE_PROP(_dilation);
775  /**
776  * @brief A number of output feature maps (size) generating the 3'rd output dimension
777  */
778  unsigned int _out_depth = 0u;
779  /**
780  * @brief Number of groups
781  */
782  unsigned int _group = 1u;
783  /**
784  * @brief Auto padding type
785  */
786  std::string _auto_pad;
787 
788  /**
789  * @brief Creates a new BinaryConvolutionLayer instance.
790  */
792  : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
793  /**
794  * @brief assignment operator
795  */
797  if (&that != this) {
798  WeightableLayer::operator=(that);
799  _kernel = that._kernel;
800  _padding = that._padding;
801  _pads_end = that._pads_end;
802  _stride = that._stride;
803  _dilation = that._dilation;
804  _out_depth = that._out_depth;
805  _group = that._group;
806  _mode = that._mode;
807  _in_depth = that._in_depth;
808  _pad_value = that._pad_value;
809  }
810  return *this;
811  }
812  /**
813  * @brief copy constructor
814  */
816  operator=(that);
817  }
818  /**
819  * @brief move constructor
820  */
822 
823  virtual ~BinaryConvolutionLayer();
824 };
825 
826 #undef DEFINE_PROP
827 
828 /**
829  * @brief This class represents a fully connected layer
830  */
831 class INFERENCE_ENGINE_API_CLASS(FullyConnectedLayer): public WeightableLayer {
832 public:
833  /**
834  * @brief A size of output
835  */
836  unsigned int _out_num = 0;
837 
838  /**
839  * @brief Creates a new FullyConnectedLayer instance and initializes layer parameters with the given values.
840  */
842 
843  virtual ~FullyConnectedLayer();
844 };
845 
846 /**
847  * @brief This class represents concatenation layer
848  * Takes as input several data elements and merges them to one using the supplied axis
849  */
850 class INFERENCE_ENGINE_API_CLASS(ConcatLayer): public CNNLayer {
851 public:
852  /**
853  * @brief An axis on which concatenation operation is performed
854  */
855  unsigned int _axis = 1;
856 
857  /**
858  * @brief Creates a new ConcatLayer instance and initializes layer parameters with the given values.
859  * If batch is used, then batch needs to be specified as an input dimension also
860  * In current implementation 1 means channels, 0 - batch
861  */
862  using CNNLayer::CNNLayer;
863 
864  virtual ~ConcatLayer();
865 };
866 
867 /**
868  * @brief This class represents a layer that evenly splits the input into the supplied outputs
869  */
870 class INFERENCE_ENGINE_API_CLASS(SplitLayer): public CNNLayer {
871 public:
872  /**
873  * @brief An axis on which split operation is performed
874  */
875  unsigned int _axis = 1;
876 
877  /**
878  * @brief Creates a new SplitLayer instance.
879  */
880  using CNNLayer::CNNLayer;
881 
882  virtual ~SplitLayer();
883 };
884 
885 /**
886  * @brief This class represents a Linear Response Normalization (LRN) Layer
887  */
888 class INFERENCE_ENGINE_API_CLASS(NormLayer): public CNNLayer {
889 public:
890  /**
891  * @brief Response size
892  */
893  unsigned int _size = 0;
894  /**
895  * @brief K
896  */
897  unsigned int _k = 1;
898  /**
899  * @brief Alpha coefficient
900  */
901  float _alpha = 0;
902  /**
903  * @brief Beta coefficient
904  */
905  float _beta = 0;
906  /**
907  * @brief Flag to specify normalization across feature maps (true) or across channels
908  */
909  bool _isAcrossMaps = false;
910 
911  /**
912  * @brief Creates a new NormLayer instance.
913  */
914  using CNNLayer::CNNLayer;
915 
916  virtual ~NormLayer();
917 };
918 
919 /**
920  * @brief This class represents standard softmax Layer
921  */
922 class INFERENCE_ENGINE_API_CLASS(SoftMaxLayer): public CNNLayer {
923 public:
924  /**
925  * @brief Axis number for a softmax operation
926  */
927  int axis = 1;
928  /**
929  * @brief Creates a new SoftMaxLayer instance.
930  */
931  using CNNLayer::CNNLayer;
932 
933  virtual ~SoftMaxLayer();
934 };
935 
936 /**
937  * @class GRNLayer
938  * @brief This class represents standard GRN Layer
939  */
940 class INFERENCE_ENGINE_API_CLASS(GRNLayer): public CNNLayer {
941 public:
942  /**
943  * @brief A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the given
944  * values.
945  * @param prms Initial layer parameters
946  */
947  explicit GRNLayer(const LayerParams& prms): CNNLayer(prms), bias(0.f) {}
948 
949  /**
950  * @brief Bias for squares sum
951  */
952  float bias = 0.f;
953 
954  virtual ~GRNLayer();
955 };
956 
957 /**
958  * @class MVNLayer
959  * @brief This class represents standard MVN Layer
960  */
961 class INFERENCE_ENGINE_API_CLASS(MVNLayer): public CNNLayer {
962 public:
963  /**
964  * @brief A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the given
965  * values.
966  * @param prms Initial layer parameters
967  */
968  explicit MVNLayer(const LayerParams& prms): CNNLayer(prms), across_channels(0), normalize(1) {}
969 
970  /**
971  * @brief Indicate that mean value is calculated across channels
972  */
974 
975  /**
976  * @brief Indicate that the result needs to be normalized
977  */
978  int normalize = 1;
979 
980  virtual ~MVNLayer();
981 };
982 
983 /**
984  * @brief This class represents a Rectified Linear activation layer
985  */
986 class INFERENCE_ENGINE_API_CLASS(ReLULayer): public CNNLayer {
987 public:
988  /**
989  * @brief Negative slope is used to takle negative inputs instead of setting them to 0
990  */
991  float negative_slope = 0.0f;
992 
993  /**
994  * @brief Creates a new ReLULayer instance.
995  */
996  using CNNLayer::CNNLayer;
997 
998  virtual ~ReLULayer();
999 };
1000 
1001 /**
1002  * @brief This class represents a Clamp activation layer
1003  * Clamps all tensor elements into the range [min_value, max_value]
1004  */
1005 class INFERENCE_ENGINE_API_CLASS(ClampLayer): public CNNLayer {
1006 public:
1007  /**
1008  * @brief A minimum value
1009  */
1010  float min_value = 0.0f;
1011 
1012  /**
1013  * @brief A maximum value
1014  */
1015  float max_value = 1.0f;
1016  /**
1017  * @brief Creates a new ClampLayer instance.
1018  */
1019  using CNNLayer::CNNLayer;
1020 
1021  virtual ~ClampLayer();
1022 };
1023 
1024 /**
1025  * @brief This class represents a ReLU6 activation layer
1026  * Clamps all tensor elements into the range [0, 6.0]
1027  */
1028 class INFERENCE_ENGINE_API_CLASS(ReLU6Layer): public ClampLayer {
1029 public:
1030  explicit ReLU6Layer(const LayerParams& prms): ClampLayer(prms) {
1031  max_value = 6.0f;
1032  }
1033 
1034  virtual ~ReLU6Layer();
1035 };
1036 
1037 /**
1038  * @brief This class represents an element wise operation layer
1039  */
1040 class INFERENCE_ENGINE_API_CLASS(EltwiseLayer): public CNNLayer {
1041 public:
1042  /**
1043  * @enum eOperation
1044  * @brief Defines possible operations that can be used
1045  */
1046  enum eOperation {
1047  Sum = 0,
1048  Prod,
1049  Max,
1050  Sub,
1051  Min,
1052  Div,
1053  Squared_diff,
1054  Floor_mod,
1055  Pow,
1056  Equal,
1057  Not_equal,
1058  Less,
1059  Less_equal,
1060  Greater,
1061  Greater_equal,
1062  Logical_AND,
1063  Logical_OR,
1064  Logical_XOR,
1065  Logical_NOT,
1066  Mean
1067  };
1068 
1069  /**
1070  * @brief A type of the operation to use
1071  */
1072  eOperation _operation = Sum;
1073 
1074  /**
1075  * @brief A vector of coefficients to scale the operands
1076  */
1077  std::vector<float> coeff;
1078 
1079  /**
1080  * @brief Creates a new EltwiseLayer instance.
1081  */
1082  using CNNLayer::CNNLayer;
1083 
1084  virtual ~EltwiseLayer();
1085 };
1086 
1087 /**
1088  * @brief This class represents a standard crop layer
1089  */
1090 class INFERENCE_ENGINE_API_CLASS(CropLayer): public CNNLayer {
1091 public:
1092  /**
1093  * @brief A vector of dimensions for cropping
1094  */
1095  std::vector<int> axis;
1096  /**
1097  * @brief A vector of dimensions to be preserved
1098  */
1099  std::vector<int> dim;
1100  /**
1101  * @brief A vector of offsets for each dimension
1102  */
1103  std::vector<int> offset;
1104 
1105  /**
1106  * @brief Creates a new CropLayer instance.
1107  */
1108  using CNNLayer::CNNLayer;
1109 
1110  virtual ~CropLayer();
1111 };
1112 
1113 /**
1114  * @brief This class represents a standard reshape layer
1115  */
1116 class INFERENCE_ENGINE_API_CLASS(ReshapeLayer): public CNNLayer {
1117 public:
1118  /**
1119  * @brief A vector of sizes of the shape
1120  */
1121  std::vector<int> shape;
1122  /**
1123  * @brief A number of axis to be taken for a reshape
1124  */
1125  int axis = 0;
1126  /**
1127  * @brief A number of first axises to be taken for a reshape
1128  */
1129  int num_axes = -1;
1130 
1131  /**
1132  * @brief Creates a new ReshapeLayer instance.
1133  */
1134  using CNNLayer::CNNLayer;
1135 
1136  virtual ~ReshapeLayer();
1137 };
1138 
1139 /**
1140  * @brief This class represents a standard Tile Layer
1141  */
1142 class INFERENCE_ENGINE_API_CLASS(TileLayer): public CNNLayer {
1143 public:
1144  /**
1145  * @brief An index of the axis to tile
1146  */
1147  int axis = -1;
1148  /**
1149  * @brief A number of copies to be made
1150  */
1151  int tiles = -1;
1152 
1153  /**
1154  * @brief Creates a new TileLayer instance.
1155  */
1156  using CNNLayer::CNNLayer;
1157 
1158  virtual ~TileLayer();
1159 };
1160 
1161 /**
1162  * @brief This class represents a Layer which performs Scale and Shift
1163  */
1164 class INFERENCE_ENGINE_API_CLASS(ScaleShiftLayer): public WeightableLayer {
1165 public:
1166  /**
1167  * @brief A flag that indicates if the same value is used for all the features. If false, the value is used pixel
1168  * wise
1169  */
1170  unsigned int _broadcast = 0;
1171 
1172  /**
1173  * @brief Creates a new ScaleShiftLayer instance.
1174  */
1176 
1177  virtual ~ScaleShiftLayer();
1178 };
1179 
1180 /**
1181  * @brief This class represents TensorIterator layer
1182  */
1183 class INFERENCE_ENGINE_API_CLASS(TensorIterator): public CNNLayer {
1184 public:
1185  struct PortMap {
1186  // Data map rule
1187  int from; /**< Index of exteral data from ins/outs fields of CNNLayer */
1188  int to; /**< Index of internal data in iterator body */
1189 
1190  // Iteration rule
1191  int axis; /**< Axis to iterate throught */
1192  int stride; /**< Stride to iterate throught */
1193  int start; /**< Start index of iteration range */
1194  int end; /**< Last index of iteration range */
1195  int part_size; /**< Part size which will be transfered to body subnetwork */
1196  };
1197 
1198  struct Body {
1199  std::vector<DataPtr> inputs;
1200  std::vector<DataPtr> outputs;
1201  };
1202 
1203  std::vector<PortMap> input_port_map;
1204  std::vector<PortMap> output_port_map;
1205  std::vector<PortMap> back_edges;
1206 
1207  Body body;
1208 
1209  using CNNLayer::CNNLayer;
1210 
1211  virtual ~TensorIterator();
1212 };
1213 
1214 /**
1215  * @brief Base class for recurrent cell layers
1216  */
1217 class INFERENCE_ENGINE_API_CLASS(RNNCellBase): public WeightableLayer {
1218 public:
1220 
1221  /**
1222  * @brief Direct type of recurrent cell (including subtypes)
1223  * Description of particular cell semantics is in LSTMCell, GRUCell, RNNCell.
1224  */
1225  enum CellType {
1226  LSTM, /**< Original LSTM cell */
1227  GRU, /**< Original GRU cell */
1228  RNN, /**< Original RNN cell */
1229  GRU_LBR, /**< GRU cell modification. "Linear before reset" */
1230  };
1231 
1232  /** @copybrief CellType */
1233  CellType cellType = LSTM;
1234 
1235  /**
1236  * @brief Size of hidden state data
1237  *
1238  * In case of batch output state tensor will have shape [N, hidden_size]
1239  */
1240  int hidden_size = 0;
1241 
1242  /**
1243  * @brief Clip data into range [-clip, clip] on input of activations
1244  *
1245  * clip==0.0f means no clipping
1246  */
1247  float clip = 0.0f;
1248  /**
1249  * @brief Activations used inside recurrent cell
1250  *
1251  * Valid values: sigmoid, tanh, relu
1252  */
1253  std::vector<std::string> activations;
1254 
1255  /**
1256  * @brief Alpha parameters of activations
1257  *
1258  * Respective to activation list.
1259  */
1260  std::vector<float> activation_alpha;
1261 
1262  /**
1263  * @brief Beta parameters of activations
1264  *
1265  * Respective to activation list.
1266  */
1267  std::vector<float> activation_beta;
1268 
1269  virtual ~RNNCellBase();
1270 };
1271 
1272 /**
1273  * @brief LSTM Cell layer
1274  *
1275  * G - number of gates (=4)
1276  * N - batch size
1277  * S - state size (=hidden_size)
1278  *
1279  * Inputs:
1280  * [N,D] Xt - input data
1281  * [N,S] Ht-1 - initial hidden state
1282  * [N,S] Ct-1 - initial cell state
1283  *
1284  * Outputs:
1285  * [N,S] Ht - out hidden state
1286  * [N,S] Ct - out cell state
1287  *
1288  * Weights:
1289  * - weights [G,S,D+S]
1290  * - biases [G,S]
1291  * NB! gates order is FICO {forget, input, candidate, output}
1292  *
1293  * activations is {_f, _g, _h}
1294  * default: {_f=sigm, _g=tanh, _h=tanh}
1295  *
1296  * Equations:
1297  *
1298  * * - matrix mult
1299  * (.) - eltwise mult
1300  * [,] - concatenation
1301  *
1302  * - ft = _f(Wf*[Ht-1, Xt] + Bf)
1303  * - it = _f(Wi*[Ht-1, Xt] + Bi)
1304  * - ct = _g(Wc*[Ht-1, Xt] + Bc)
1305  * - ot = _f(Wo*[Ht-1, Xt] + Bo)
1306  * - Ct = ft (.) Ct-1 + it (.) ct
1307  * - Ht = ot (.) _h(Ct)
1308  */
1309 class INFERENCE_ENGINE_API_CLASS(LSTMCell): public RNNCellBase {
1310 public:
1311  using RNNCellBase::RNNCellBase;
1312  using RNNCellBase::operator=;
1313 
1314  virtual ~LSTMCell();
1315 };
1316 
1317 /**
1318  * @brief GRU Cell layer
1319  *
1320  * G - number of gates (=3)
1321  * N - batch size
1322  * S - state size (=hidden_size)
1323  *
1324  * Inputs:
1325  * [N,D] Xt - input data
1326  * [N,S] Ht-1 - initial hidden state
1327  *
1328  * Outputs:
1329  * [N,S] Ht - out hidden state
1330  *
1331  * Weights:
1332  * - weights [G,S,D+S]
1333  * - biases [G,S]
1334  * NB! gates order is ZRH {update, reset, output}
1335  *
1336  * activations is {_f, _g}
1337  * default: {_f=sigm, _g=tanh}
1338  *
1339  * Equations:
1340  *
1341  * * - matrix mult
1342  * (.) - eltwise mult
1343  * [,] - concatenation
1344  *
1345  * - zt = _f(Wz*[Ht-1, Xt] + Bz)
1346  * - rt = _f(Wr*[Ht-1, Xt] + Br)
1347  * - ht = _g(Wh*[rt (.) Ht-1, Xt] + Bh)
1348  * - Ht = (1 - zt) (.) ht + zt (.) Ht-1
1349  */
1350 class INFERENCE_ENGINE_API_CLASS(GRUCell): public RNNCellBase {
1351 public:
1352  using RNNCellBase::RNNCellBase;
1353  using RNNCellBase::operator=;
1354 
1355  virtual ~GRUCell();
1356 };
1357 
1358 /**
1359  * @brief RNN Cell layer
1360  *
1361  * G - number of gates (=1)
1362  * N - batch size
1363  * S - state size (=hidden_size)
1364  *
1365  * Inputs:
1366  * [N,D] Xt - input data
1367  * [N,S] Ht-1 - initial hidden state
1368  *
1369  * Outputs:
1370  * [N,S] Ht - out hidden state
1371  *
1372  * Weights:
1373  * - weights [G,S,D+S]
1374  * - biases [G,S]
1375  *
1376  * activations is {_f}
1377  * default: {_f=tanh}
1378  *
1379  * Equations:
1380  *
1381  * * - matrix mult
1382  * [,] - concatenation
1383  *
1384  * - Ht = _f(Wi*[Ht-1, Xt] + Bi)
1385  */
1386 class INFERENCE_ENGINE_API_CLASS(RNNCell): public RNNCellBase {
1387 public:
1388  using RNNCellBase::RNNCellBase;
1389  using RNNCellBase::operator=;
1390 
1391  virtual ~RNNCell();
1392 };
1393 
1394 /**
1395  * @brief Sequence of recurrent cells
1396  *
1397  * N - batch size
1398  * T - sequence size
1399  * S - state size (=hidden_size)
1400  * NS - num of state tensors (LSTM=2, GRU/RNN=1)
1401  * ND - num of direction (BDR=2, WFD/BWD=1)
1402  *
1403  * Inputs:
1404  * [N,T,D] Xt - input data
1405  * [ND,N,S] Ht-1 - initial hidden state
1406  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1407  * [N] SL - sequence length (optional input)
1408  *
1409  * Outputs:
1410  * [ND,N,T,S] Xt - input data
1411  * [ND,N,S] Ht-1 - initial hidden state
1412  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1413  *
1414  * NB! if axis==0 batch and sequense dimensions are swapped (N <-> T) for input and output tensors
1415  *
1416  * Weights:
1417  * - weights [ND,G,S,D+S]
1418  * - biases [ND,G,S]
1419  * NB! if ND==2 weights are concatenated cell weights [forward_cell_weights, backward_cell_weights]
1420  *
1421  */
1422 class INFERENCE_ENGINE_API_CLASS(RNNSequenceLayer): public RNNCellBase {
1423 public:
1424  using RNNCellBase::RNNCellBase;
1425 
1426  /**
1427  * @brief An axis by which iteration is performed
1428  * axis=0 means first input/output data blob dimension is sequence
1429  * axis=1 means first input/output data blob dimension is batch
1430  */
1431  unsigned int axis = 1;
1432 
1433  /**
1434  * @brief Direction of iteration through sequence dimension
1435  */
1436  enum Direction {
1437  FWD, /**< Forward mode. Iterate starts from index 0 with step 1. */
1438  BWD, /**< Backward mode. Iterate starts from last index with step -1. */
1439  BDR /**< Bidirectional mode. First is forward pass, second is backward. */
1440  };
1441 
1442  /** @copybrief Direction */
1443  Direction direction = FWD;
1444 
1445  virtual ~RNNSequenceLayer();
1446 };
1447 
1448 /**
1449  * @brief This class represents a Layer which performs Scale and Shift
1450  */
1451 class INFERENCE_ENGINE_API_CLASS(PReLULayer): public WeightableLayer {
1452 public:
1453  /**
1454  * @brief A flag that indicates if the same negative_slope value is used for all the features. If false, the value
1455  * is used pixel wise
1456  */
1458 
1459  /**
1460  * @brief A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the given
1461  * values.
1462  * @param prms Initial layer parameters
1463  */
1464  explicit PReLULayer(const LayerParams& prms): WeightableLayer(prms), _channel_shared(false) {}
1465 
1466  virtual ~PReLULayer();
1467 };
1468 
1469 /**
1470  * @brief This class represents a standard Power Layer
1471  * Formula is: output = (offset + scale * input) ^ power
1472  */
1473 class INFERENCE_ENGINE_API_CLASS(PowerLayer): public CNNLayer {
1474 public:
1475  /**
1476  * @brief An exponent value
1477  */
1478  float power = 1.f;
1479  /**
1480  * @brief A scale factor
1481  */
1482  float scale = 1.f;
1483  /**
1484  * @brief An offset value
1485  */
1486  float offset = 0.f;
1487 
1488  /**
1489  * @brief Creates a new PowerLayer instance.
1490  */
1491  using CNNLayer::CNNLayer;
1492 
1493  virtual ~PowerLayer();
1494 };
1495 
1496 /**
1497  * @brief This class represents a Batch Normalization Layer
1498  */
1499 class INFERENCE_ENGINE_API_CLASS(BatchNormalizationLayer): public WeightableLayer {
1500 public:
1501  /**
1502  * @brief A small value to add to the variance estimate to avoid division by zero
1503  */
1504  float epsilon = 1e-3f;
1505 
1506  /**
1507  * @brief Creates a new BatchNormalizationLayer instance.
1508  */
1510 
1511  virtual ~BatchNormalizationLayer();
1512 };
1513 
1514 /**
1515  * @brief This class represents a general matrix multiplication operation layer
1516  * Formula is: dst := alpha*src1*src2 + beta*src3
1517  */
1518 class INFERENCE_ENGINE_API_CLASS(GemmLayer): public CNNLayer {
1519 public:
1520  /**
1521  * @brief A scale factor of src1 matrix
1522  */
1523  float alpha = 1.f;
1524  /**
1525  * @brief A scale factor of src3 matrix
1526  */
1527  float beta = 1.f;
1528  /**
1529  * @brief A flag that indicates if the src1 matrix is to be transposed
1530  */
1531  bool transpose_a = false;
1532  /**
1533  * @brief A flag that indicates if the src2 matrix is to be transposed
1534  */
1535  bool transpose_b = false;
1536  /**
1537  * @brief Creates a new GemmLayer instance.
1538  */
1539  using CNNLayer::CNNLayer;
1540 
1541  virtual ~GemmLayer();
1542 };
1543 
1544 /**
1545  * @brief This class represents a standard Pad layer
1546  * Adds paddings to input tensor
1547  */
1548 class INFERENCE_ENGINE_API_CLASS(PadLayer): public CNNLayer {
1549 public:
1550  /**
1551  * @enum ePadMode
1552  * @brief Defines possible modes of pad operation
1553  */
1554  enum ePadMode { Constant = 0, Edge, Reflect, Symmetric };
1555 
1556  /**
1557  * @brief Size of padding in the beginning of each axis
1558  */
1560  /**
1561  * @brief Size of padding in the end of each axis
1562  */
1564  /**
1565  * @brief Mode of pad operation
1566  */
1567  ePadMode pad_mode = Constant;
1568  /**
1569  * @brief A pad value which is used for filling in Constant mode
1570  */
1571  float pad_value = 0.0f;
1572  /**
1573  * @brief Creates a new PadLayer instance.
1574  */
1575  using CNNLayer::CNNLayer;
1576 
1577  virtual ~PadLayer();
1578 };
1579 
1580 /**
1581  * @brief This class represents a standard Gather layer
1582  * Gather slices from Dictionary according to Indexes
1583  */
1584 class INFERENCE_ENGINE_API_CLASS(GatherLayer): public CNNLayer {
1585 public:
1586  /**
1587  * @brief The axis in Dictionary to gather Indexes from
1588  */
1589  int axis = 0;
1590  /**
1591  * @brief Creates a new GatherLayer instance.
1592  */
1593  using CNNLayer::CNNLayer;
1594 
1595  virtual ~GatherLayer();
1596 };
1597 
1598 /**
1599  * @brief This class represents a standard Strided Slice layer
1600  * Strided Slice picks from input tensor according parameters
1601  */
1602 class INFERENCE_ENGINE_API_CLASS(StridedSliceLayer): public CNNLayer {
1603 public:
1604  /**
1605  * @brief The begin_mask is a bitmask where bit i being 0 means
1606  * to ignore the begin value and instead use the default value
1607  */
1608  std::string begin_mask;
1609  /**
1610  * @brief Analogous to begin_mask
1611  */
1612  std::string end_mask;
1613  /**
1614  * @brief The ellipsis_mask is a bitmask where bit i being 1 means
1615  * the i-th is actually an ellipsis
1616  */
1617  std::string ellipsis_mask;
1618  /**
1619  * @brief The new_axis_mask_ is a bitmask where bit i being 1 means
1620  * the i-th position creates a new 1 dimension shape
1621  */
1622  std::string new_axis_mask;
1623  /**
1624  * @brief The shrink_axis_mask is a bitmask where bit i being 1 means
1625  * the i-th position shrinks the dimensionality
1626  */
1627  std::string shrink_axis_mask;
1628 
1629  /**
1630  * @brief Creates a new StridedSliceLayer instance.
1631  */
1632  using CNNLayer::CNNLayer;
1633 
1634  virtual ~StridedSliceLayer();
1635 };
1636 
1637 /**
1638  * @brief This class represents a standard Shuffle Channels layer
1639  * Shuffle Channels picks from input tensor according parameters
1640  */
1641 class INFERENCE_ENGINE_API_CLASS(ShuffleChannelsLayer): public CNNLayer {
1642 public:
1643  /**
1644  * @brief The axis in tensor to shuffle channels
1645  */
1646  int axis = 1;
1647 
1648  /**
1649  * @brief The group of output shuffled channels
1650  */
1651  unsigned int group = 1;
1652 
1653  /**
1654  * @brief Creates a new ShuffleChannelsLayer instance.
1655  */
1656  using CNNLayer::CNNLayer;
1657 
1658  virtual ~ShuffleChannelsLayer();
1659 };
1660 
1661 /**
1662  * @brief This class represents a standard Depth To Space layer
1663  * Depth To Space picks from input tensor according parameters
1664  */
1665 class INFERENCE_ENGINE_API_CLASS(DepthToSpaceLayer): public CNNLayer {
1666 public:
1667  /**
1668  * @brief The group of output shuffled channels
1669  */
1670  unsigned int block_size = 1;
1671 
1672  /**
1673  * @brief Creates a new DepthToSpaceLayer instance.
1674  */
1675  using CNNLayer::CNNLayer;
1676 
1677  virtual ~DepthToSpaceLayer();
1678 };
1679 
1680 /**
1681  * @brief This class represents a standard Space To Depth layer
1682  * Depth To Space picks from input tensor according parameters
1683  */
1684 class INFERENCE_ENGINE_API_CLASS(SpaceToDepthLayer): public CNNLayer {
1685 public:
1686  /**
1687  * @brief The group of output Space To Depth
1688  */
1689  unsigned int block_size = 1;
1690 
1691  /**
1692  * @brief Creates a new SpaceToDepthLayer instance.
1693  */
1694  using CNNLayer::CNNLayer;
1695 
1696  virtual ~SpaceToDepthLayer();
1697 };
1698 
1699 /**
1700  * @brief This class represents SparseFillEmptyRows layer
1701  * SparseFillEmptyRows fills empty rows in a sparse tensor
1702  */
1703 class INFERENCE_ENGINE_API_CLASS(SparseFillEmptyRowsLayer): public CNNLayer {
1704 public:
1705  /**
1706  * @brief Creates a new SparseFillEmptyRowsLayer instance.
1707  */
1708  using CNNLayer::CNNLayer;
1709 
1710  virtual ~SparseFillEmptyRowsLayer();
1711 };
1712 
1713 /**
1714  * @brief This class represents SparseSegmentMean(SqrtN, Sum) layers
1715  * SparseSegmentMean(SqrtN, Sum) layer reduces data along sparse segments of a tensor.
1716  */
1717 class INFERENCE_ENGINE_API_CLASS(SparseSegmentReduceLayer): public CNNLayer {
1718 public:
1719  /**
1720  * @brief Creates a new SparseSegmentReduceLayer instance.
1721  */
1722  using CNNLayer::CNNLayer;
1723 
1724  virtual ~SparseSegmentReduceLayer();
1725 };
1726 
1727 /**
1728  * @brief This class represents a standard Reverse Sequence layer
1729  * Reverse Sequence modifies input tensor according parameters
1730  */
1731 class INFERENCE_ENGINE_API_CLASS(ReverseSequenceLayer): public CNNLayer {
1732 public:
1733  /**
1734  * @brief The seq_axis dimension in tensor which is partially reversed
1735  */
1736  int seq_axis = 1;
1737 
1738  /**
1739  * @brief The batch_axis dimension in tensor along which reversal is performed
1740  */
1741  int batch_axis = 0;
1742 
1743  /**
1744  * @brief Creates a new ReverseSequence instance.
1745  */
1746  using CNNLayer::CNNLayer;
1747 
1748  virtual ~ReverseSequenceLayer();
1749 };
1750 
1751 /**
1752  * @brief This class represents a OneHot layer
1753  * Converts input into OneHot representation.
1754  */
1755 class INFERENCE_ENGINE_API_CLASS(OneHotLayer): public CNNLayer {
1756 public:
1757  /**
1758  * @brief A depth of representation
1759  */
1760  unsigned int depth = 0;
1761 
1762  /**
1763  * @brief The locations represented by indices in input take value on_value
1764  */
1765  float on_value = 1.f;
1766 
1767  /**
1768  * @brief The locations not represented by indices in input take value off_value
1769  */
1770  float off_value = 0.f;
1771 
1772  /**
1773  * @brief Define the shape of output tensor
1774  */
1775  int axis = -1;
1776 
1777  /**
1778  * @brief Creates a new OneHot instance
1779  */
1780  using CNNLayer::CNNLayer;
1781 
1782  virtual ~OneHotLayer();
1783 };
1784 
1785 /**
1786  * @brief This class represents a standard RangeLayer layer
1787  * RangeLayer modifies input tensor dimensions according parameters
1788  */
1789 class INFERENCE_ENGINE_API_CLASS(RangeLayer): public CNNLayer {
1790 public:
1791  /**
1792  * @brief Creates a new RangeLayer instance.
1793  */
1794  using CNNLayer::CNNLayer;
1795 
1796  virtual ~RangeLayer();
1797 };
1798 
1799 /**
1800  * @brief This class represents a standard Fill layer
1801  * RFill modifies input tensor according parameters
1802  */
1803 class INFERENCE_ENGINE_API_CLASS(FillLayer): public CNNLayer {
1804 public:
1805  /**
1806  * @brief Creates a new Fill instance.
1807  */
1808  using CNNLayer::CNNLayer;
1809 
1810  virtual ~FillLayer();
1811 };
1812 
1813 /**
1814  * @brief This class represents a SelectLayer layer
1815  * SelectLayer layer takes elements from the second (“then”) or the third (“else”) input based on condition mask
1816  * (“cond”) provided in the first input. The “cond” tensor is broadcasted to “then” and “else” tensors. The output
1817  * tensor shape is equal to broadcasted shape of “cond”, “then” and “else”.
1818  */
1819 class INFERENCE_ENGINE_API_CLASS(SelectLayer): public CNNLayer {
1820 public:
1821  /**
1822  * @brief Creates a new SelectLayer instance.
1823  */
1824  using CNNLayer::CNNLayer;
1825 
1826  virtual ~SelectLayer();
1827 };
1828 
1829 /**
1830  * @brief This class represents a standard Broadcast layer
1831  * Broadcast modifies input tensor dimensions according parameters
1832  */
1833 class INFERENCE_ENGINE_API_CLASS(BroadcastLayer): public CNNLayer {
1834 public:
1835  /**
1836  * @brief Creates a new Broadcast instance.
1837  */
1838  using CNNLayer::CNNLayer;
1839 
1840  virtual ~BroadcastLayer();
1841 };
1842 
1843 /**
1844  * @brief This class represents a quantization operation layer
1845  * Element-wise linear quantization of floating point input values into a descrete set of floating point values
1846  */
1847 class INFERENCE_ENGINE_API_CLASS(QuantizeLayer): public CNNLayer {
1848 public:
1849  /**
1850  * @brief The number of quantization levels
1851  */
1852  int levels = 1;
1853 
1854  /**
1855  * @brief Creates a new QuantizeLayer instance.
1856  */
1857  using CNNLayer::CNNLayer;
1858 
1859  virtual ~QuantizeLayer();
1860 };
1861 
1862 /**
1863  * @brief This class represents a standard Math layers
1864  * Math modifies input tensor dimensions according parameters
1865  */
1866 class INFERENCE_ENGINE_API_CLASS(MathLayer): public CNNLayer {
1867 public:
1868  /**
1869  * @brief Creates a new Math instance.
1870  */
1871  using CNNLayer::CNNLayer;
1872 
1873  virtual ~MathLayer();
1874 };
1875 
1876 /**
1877  * @brief This class represents a standard Reduce layers
1878  * Reduce modifies input tensor according parameters
1879  */
1880 class INFERENCE_ENGINE_API_CLASS(ReduceLayer): public CNNLayer {
1881 public:
1882  /**
1883  * @brief The keep_dims dimension in tensor which is partially reversed
1884  */
1885  bool keep_dims = true;
1886 
1887  /**
1888  * @brief Creates a new Reduce instance.
1889  */
1890  using CNNLayer::CNNLayer;
1891 
1892  virtual ~ReduceLayer();
1893 };
1894 
1895 /**
1896  * @brief This class represents a standard TopK layer
1897  * TopK picks top K values from input tensor according parameters
1898  */
1899 class INFERENCE_ENGINE_API_CLASS(TopKLayer): public CNNLayer {
1900 public:
1901  /**
1902  * @brief The mode could be 'max' or 'min'
1903  */
1904  std::string mode;
1905  /**
1906  * @brief top K values sort mode could be 'value' or 'index'
1907  */
1908  std::string sort;
1909  /**
1910  * @brief The axis dimension in tensor which is top K values are picked
1911  */
1912  int axis = -1;
1913 
1914  /**
1915  * @brief Creates a new TopKLayer instance.
1916  */
1917  using CNNLayer::CNNLayer;
1918 
1919  virtual ~TopKLayer();
1920 };
1921 
1922 /**
1923  * @brief This class represents Unique layer.
1924  * The Unique operation searches for unique elements in 1-D input
1925  */
1926 class INFERENCE_ENGINE_API_CLASS(UniqueLayer): public CNNLayer {
1927 public:
1928  /**
1929  * @brief A flag indicating whether to sort unique elements
1930  */
1931  bool sorted;
1932  /**
1933  * @brief A flag indicating whether to return indices of input data elements in the output of uniques
1934  */
1936  /**
1937  * @brief A flag indicating whether to return a number of occurences for each unique element
1938  */
1940 
1941  /**
1942  * @brief Creates a new UniqueLayer instance.
1943  */
1944  using CNNLayer::CNNLayer;
1945 
1946  virtual ~UniqueLayer();
1947 };
1948 
1949 /**
1950  * @brief This class represents a standard NonMaxSuppression layer
1951  */
1952 class INFERENCE_ENGINE_API_CLASS(NonMaxSuppressionLayer): public CNNLayer {
1953 public:
1954  /**
1955  * @brief The 'center_point_box' indicates the format of the box data
1956  */
1957  bool center_point_box = false;
1958  /**
1959  * @brief The 'sort_result_descending' indicates that result will sort descending by score through all batches and
1960  * classes
1961  */
1962  bool sort_result_descending = true;
1963  /**
1964  * @brief Creates a new NonMaxSuppressionLayer instance.
1965  */
1966  using CNNLayer::CNNLayer;
1967 
1968  virtual ~NonMaxSuppressionLayer();
1969 };
1970 
1971 /**
1972  * @brief This class represents a standard Scatter layer
1973  */
1974 class INFERENCE_ENGINE_API_CLASS(ScatterLayer): public CNNLayer {
1975 public:
1976  /**
1977  * @brief The axis in Dictionary to scatter Indexes from
1978  */
1979  int axis = 0;
1980  /**
1981  * @brief Creates a new ScatterLayer instance.
1982  */
1983  using CNNLayer::CNNLayer;
1984 
1985  virtual ~ScatterLayer();
1986 };
1987 
1988 } // namespace InferenceEngine
int GetParamAsInt(const char *param, int def) const
Returns an integer value for the given parameter or returns the default value.
Definition: ie_layers.h:235
BinaryConvolutionLayer(const LayerParams &p)
Creates a new BinaryConvolutionLayer instance.
Definition: ie_layers.h:791
std::shared_ptr< CNNLayer > Ptr
A shared pointer to CNNLayer.
Definition: ie_layers.h:46
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:24
#define DEFINE_PROP(prop_name)
convinenent way to declare property with backward compatibility to 2D members
Definition: ie_layers.h:547
PoolingLayer(const PoolingLayer &that)
copy constructor
Definition: ie_layers.h:717
std::vector< int > axis
A vector of dimensions for cropping.
Definition: ie_layers.h:1095
virtual const DataPtr input() const
Returns the first element of the input data for this layer.
Definition: ie_layers.h:105
This class represents a Layer which performs Scale and Shift.
Definition: ie_layers.h:1451
std::string type
Layer type.
Definition: ie_layers.h:55
unsigned int _group
Number of groups.
Definition: ie_layers.h:584
LSTM Cell layer.
Definition: ie_layers.h:1309
PoolType _type
A pooling type.
Definition: ie_layers.h:683
This class represents a standard Strided Slice layer Strided Slice picks from input tensor according ...
Definition: ie_layers.h:1602
unsigned int GetParamAsUInt(const char *param, unsigned int def) const
Returns an unsigned integer value for the given parameter or returns the default value.
Definition: ie_layers.h:309
PoolType
Defines available pooling types.
Definition: ie_layers.h:678
float GetParamAsFloat(const char *param, float def) const
Gets float value for the given parameter.
Definition: ie_layers.h:158
This class represents a standard crop layer.
Definition: ie_layers.h:1090
std::vector< float > activation_beta
Beta parameters of activations.
Definition: ie_layers.h:1267
The method holds the user values to enable binding of data per graph node.
Definition: ie_common.h:67
This structure describes ROI data.
Definition: ie_blob.h:687
PReLULayer(const LayerParams &prms)
A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the gi...
Definition: ie_layers.h:1464
This class represents a standard Power Layer Formula is: output = (offset + scale * input) ^ power...
Definition: ie_layers.h:1473
std::string begin_mask
The begin_mask is a bitmask where bit i being 0 means to ignore the begin value and instead use the d...
Definition: ie_layers.h:1608
std::vector< int > GetParamAsInts(const char *param, std::vector< int > def) const
Returns a vector of int values for the given parameter or returns the default value.
Definition: ie_layers.h:266
std::vector< int > shape
A vector of sizes of the shape.
Definition: ie_layers.h:1121
Inference Engine API.
Definition: ie_argmax_layer.hpp:11
Definition: ie_layers.h:1437
This is an internal common Layer parameter parsing arguments.
Definition: ie_layers.h:29
bool _channel_shared
A flag that indicates if the same negative_slope value is used for all the features. If false, the value is used pixel wise.
Definition: ie_layers.h:1457
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:572
This class represents a standard Space To Depth layer Depth To Space picks from input tensor accordin...
Definition: ie_layers.h:1684
eBinaryConvolutionMode _mode
Mode of binary convolution operation.
Definition: ie_layers.h:743
Definition: ie_layers.h:1229
This class represents a OneHot layer Converts input into OneHot representation.
Definition: ie_layers.h:1755
Base class for recurrent cell layers.
Definition: ie_layers.h:1217
PropertyVector< unsigned int > pads_begin
Size of padding in the beginning of each axis.
Definition: ie_layers.h:1559
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:770
This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
Definition: ie_layers.h:518
Blob::Ptr _weights
A pointer to a weights blob.
Definition: ie_layers.h:530
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:572
This class represents a standard Fill layer RFill modifies input tensor according parameters...
Definition: ie_layers.h:1803
WeightableLayer(const LayerParams &prms)
A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the ...
Definition: ie_layers.h:525
std::vector< DataWeakPtr > insData
A vector of weak pointers to the input data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:67
RNN Cell layer.
Definition: ie_layers.h:1386
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:588
void fuse(Ptr &layer)
Sets a layer to be fused with.
Definition: ie_layers.h:97
A header file for Blob and generic TBlob<>
This class represents a standard deconvolution layer.
Definition: ie_layers.h:628
std::string shrink_axis_mask
The shrink_axis_mask is a bitmask where bit i being 1 means the i-th position shrinks the dimensional...
Definition: ie_layers.h:1627
std::vector< unsigned int > GetParamAsUInts(const char *param) const
Returns a vector of unsigned int values for the given parameter.
Definition: ie_layers.h:377
Direction
Direction of iteration through sequence dimension.
Definition: ie_layers.h:1436
int to
Definition: ie_layers.h:1188
std::vector< int > offset
A vector of offsets for each dimension.
Definition: ie_layers.h:1103
This class represents a standard Scatter layer.
Definition: ie_layers.h:1974
This class represents standard MVN Layer.
Definition: ie_layers.h:961
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:560
int stride
Definition: ie_layers.h:1192
static std::string ie_serialize_float(float value)
serialize float with c_locale formating used for default values serializing
Definition: ie_layers.h:145
Ptr _fusedWith
If suggested to fuse - a pointer to the layer which needs to be fused with this layer.
Definition: ie_layers.h:71
This class represents a standard Reduce layers Reduce modifies input tensor according parameters...
Definition: ie_layers.h:1880
bool sorted
A flag indicating whether to sort unique elements.
Definition: ie_layers.h:1931
std::string sort
top K values sort mode could be &#39;value&#39; or &#39;index&#39;
Definition: ie_layers.h:1908
ConvolutionLayer(const ConvolutionLayer &that)
copy constructor
Definition: ie_layers.h:614
PropertyVector< unsigned int > _pads_end
Pooling paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:664
BinaryConvolutionLayer & operator=(const BinaryConvolutionLayer &that)
assignment operator
Definition: ie_layers.h:796
std::vector< std::string > activations
Activations used inside recurrent cell.
Definition: ie_layers.h:1253
std::string name
Layer name.
Definition: ie_layers.h:31
std::string type
Layer type.
Definition: ie_layers.h:33
This class represents a standard Reverse Sequence layer Reverse Sequence modifies input tensor accord...
Definition: ie_layers.h:1731
This class represents a Clamp activation layer Clamps all tensor elements into the range [min_value...
Definition: ie_layers.h:1005
int part_size
Definition: ie_layers.h:1195
ConvolutionLayer & operator=(const ConvolutionLayer &that)
assignment operator
Definition: ie_layers.h:598
This class represents a layer that evenly splits the input into the supplied outputs.
Definition: ie_layers.h:870
Definition: ie_layers.h:1438
Definition: ie_layers.h:1227
eOperation
Defines possible operations that can be used.
Definition: ie_layers.h:1046
This class represents a standard TopK layer TopK picks top K values from input tensor according param...
Definition: ie_layers.h:1899
bool _exclude_pad
A flag that indicates if padding is excluded or not.
Definition: ie_layers.h:688
GRU Cell layer.
Definition: ie_layers.h:1350
This class represents an element wise operation layer.
Definition: ie_layers.h:1040
PropertyVector< unsigned int > pads_end
Size of padding in the end of each axis.
Definition: ie_layers.h:1563
This class represents standard GRN Layer.
Definition: ie_layers.h:940
This class represents a standard Shuffle Channels layer Shuffle Channels picks from input tensor acco...
Definition: ie_layers.h:1641
This class represents a standard reshape layer.
Definition: ie_layers.h:1116
std::string mode
The mode could be &#39;max&#39; or &#39;min&#39;.
Definition: ie_layers.h:1904
UserValue userValue
Convenience user values to store in this object as extra data.
Definition: ie_layers.h:75
Sequence of recurrent cells.
Definition: ie_layers.h:1422
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:762
BinaryConvolutionLayer(const BinaryConvolutionLayer &that)
copy constructor
Definition: ie_layers.h:815
GRNLayer(const LayerParams &prms)
A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the give...
Definition: ie_layers.h:947
CNNLayer(const LayerParams &prms)
A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values...
Definition: ie_layers.h:85
static float ie_parse_float(const std::string &str)
Parse string with float in accordance with IE rules.
Definition: ie_layers.h:127
This class represents a fully connected layer.
Definition: ie_layers.h:831
ePadMode
Defines possible modes of pad operation.
Definition: ie_layers.h:1554
This class represents a SelectLayer layer SelectLayer layer takes elements from the second (“then”)...
Definition: ie_layers.h:1819
std::map< std::string, std::string > params
Map of pairs: (parameter name, parameter value)
Definition: ie_layers.h:502
Definition: ie_layers.h:1226
bool return_inverse
A flag indicating whether to return indices of input data elements in the output of uniques...
Definition: ie_layers.h:1935
PoolingLayer(const LayerParams &p)
Creates a new PoolingLayer instance.
Definition: ie_layers.h:697
std::shared_ptr< Blob > Ptr
A smart pointer containing Blob object.
Definition: ie_blob.h:40
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:51
std::vector< float > coeff
A vector of coefficients to scale the operands.
Definition: ie_layers.h:1077
std::string end_mask
Analogous to begin_mask.
Definition: ie_layers.h:1612
This class represents Unique layer. The Unique operation searches for unique elements in 1-D input...
Definition: ie_layers.h:1926
std::vector< unsigned int > GetParamAsUInts(const char *param, std::vector< unsigned int > def) const
Returns a vector of unsigned int values for the given parameter or returns the default value...
Definition: ie_layers.h:350
std::vector< float > activation_alpha
Alpha parameters of activations.
Definition: ie_layers.h:1260
unsigned int GetParamAsUInt(const char *param) const
Returns an unsigned integer value for the given parameter.
Definition: ie_layers.h:329
This class represents a ReLU6 activation layer Clamps all tensor elements into the range [0...
Definition: ie_layers.h:1028
This class represents a Batch Normalization Layer.
Definition: ie_layers.h:1499
unsigned int _group
Number of groups.
Definition: ie_layers.h:782
PropertyVector< unsigned int > _stride
Pooling strides array [X, Y, Z, ...].
Definition: ie_layers.h:672
This class represents a Layer which performs Scale and Shift.
Definition: ie_layers.h:1164
This class represents standard softmax Layer.
Definition: ie_layers.h:922
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:580
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:758
std::vector< float > GetParamAsFloats(const char *param) const
Returns a vector of float values for the given parameter.
Definition: ie_layers.h:212
This header file defines the main Data representation node.
bool return_counts
A flag indicating whether to return a number of occurences for each unique element.
Definition: ie_layers.h:1939
int end
Definition: ie_layers.h:1194
bool GetParamAsBool(const char *param, bool def) const
Returns a boolean value for the given parameter. The valid values are (true, false, 1, 0).
Definition: ie_layers.h:404
std::vector< int > GetParamAsInts(const char *param) const
Returns a vector of int values for the given parameter.
Definition: ie_layers.h:288
float _pad_value
A pad value which is used to fill pad area.
Definition: ie_layers.h:753
std::string ellipsis_mask
The ellipsis_mask is a bitmask where bit i being 1 means the i-th is actually an ellipsis.
Definition: ie_layers.h:1617
unsigned int _in_depth
A number of input feature maps (size) generating the 3&#39;rd input dimension.
Definition: ie_layers.h:748
PropertyVector< unsigned int > _kernel
Pooling kernel array [X, Y, Z, ...].
Definition: ie_layers.h:660
float GetParamAsFloat(const char *param) const
Returns a float value for the given layer parameter.
Definition: ie_layers.h:173
PropertyVector< unsigned int > _padding
Pooling paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:660
Definition: ie_layers.h:1198
int GetParamAsInt(const char *param) const
Returns an integer value for the given parameter.
Definition: ie_layers.h:250
This class represents a standard deformable convolution layer.
Definition: ie_layers.h:639
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:758
int across_channels
Indicate that mean value is calculated across channels.
Definition: ie_layers.h:973
Precision precision
Layer base operating precision.
Definition: ie_layers.h:59
This class represents a quantization operation layer Element-wise linear quantization of floating poi...
Definition: ie_layers.h:1847
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:778
MVNLayer(const LayerParams &prms)
A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the give...
Definition: ie_layers.h:968
This class represents a Rectified Linear activation layer.
Definition: ie_layers.h:986
This class represents a standard 3D Convolution Layer.
Definition: ie_layers.h:555
CellType
Direct type of recurrent cell (including subtypes) Description of particular cell semantics is in LST...
Definition: ie_layers.h:1225
bool GetParamAsBool(const char *param) const
Returns a boolean value for the given parameter.
Definition: ie_layers.h:425
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:692
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:786
This class represents a standard Pad layer Adds paddings to input tensor.
Definition: ie_layers.h:1548
This class represents a standard Tile Layer.
Definition: ie_layers.h:1142
This class represents a standard binary convolution layer.
Definition: ie_layers.h:732
Precision precision
Layer precision.
Definition: ie_layers.h:35
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:564
Blob::Ptr _biases
A pointer to a biases blob.
Definition: ie_layers.h:534
This class represents a Linear Response Normalization (LRN) Layer.
Definition: ie_layers.h:888
This class represents TensorIterator layer.
Definition: ie_layers.h:1183
This class represents concatenation layer Takes as input several data elements and merges them to one...
Definition: ie_layers.h:850
PoolingLayer & operator=(const PoolingLayer &that)
assignment operator
Definition: ie_layers.h:702
std::vector< float > GetParamAsFloats(const char *param, std::vector< float > def) const
Returns a vector of float values for the given parameter or returns the default value.
Definition: ie_layers.h:189
This class represents SparseSegmentMean(SqrtN, Sum) layers SparseSegmentMean(SqrtN, Sum) layer reduces data along sparse segments of a tensor.
Definition: ie_layers.h:1717
This class represents a standard RangeLayer layer RangeLayer modifies input tensor dimensions accordi...
Definition: ie_layers.h:1789
This is a base abstraction Layer - all DNN Layers inherit from this class.
Definition: ie_layers.h:41
int start
Definition: ie_layers.h:1193
std::string affinity
Layer affinity set by user.
Definition: ie_layers.h:79
This class represents a standard pooling layer.
Definition: ie_layers.h:655
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:770
Definition: ie_layers.h:1185
This class represents a standard Gather layer Gather slices from Dictionary according to Indexes...
Definition: ie_layers.h:1584
Definition: ie_layers.h:1228
std::string GetParamAsString(const char *param) const
Returns a string value for the given parameter. Throws exception if parameter was not found...
Definition: ie_layers.h:475
class CNNLayer GenericLayer
Alias for CNNLayer object.
Definition: ie_layers.h:513
This class represents a standard Math layers Math modifies input tensor dimensions according paramete...
Definition: ie_layers.h:1866
std::string new_axis_mask
The new_axis_mask_ is a bitmask where bit i being 1 means the i-th position creates a new 1 dimension...
Definition: ie_layers.h:1622
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:560
std::map< std::string, Blob::Ptr > blobs
Map of pairs: (name, weights/biases blob)
Definition: ie_layers.h:507
std::string GetParamAsString(const char *param, const char *def) const
Returns a string value for the given parameter or returns the default one.
Definition: ie_layers.h:448
std::vector< DataPtr > outData
A vector of pointers to the output data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:63
eBinaryConvolutionMode
Defines possible modes of binary convolution operation.
Definition: ie_layers.h:738
int from
Definition: ie_layers.h:1187
This class represents a standard NonMaxSuppression layer.
Definition: ie_layers.h:1952
bool CheckParamPresence(const char *param) const
Checks the param presence in the layer.
Definition: ie_layers.h:461
This class represents a standard Depth To Space layer Depth To Space picks from input tensor accordin...
Definition: ie_layers.h:1665
This class represents a general matrix multiplication operation layer Formula is: dst := alpha*src1*s...
Definition: ie_layers.h:1518
int axis
Definition: ie_layers.h:1191
std::vector< int > dim
A vector of dimensions to be preserved.
Definition: ie_layers.h:1099
This class represents SparseFillEmptyRows layer SparseFillEmptyRows fills empty rows in a sparse tens...
Definition: ie_layers.h:1703
std::string name
Layer name.
Definition: ie_layers.h:51
This is a header file with common inference engine definitions.
ConvolutionLayer(const LayerParams &p)
Creates a new ConvolutionLayer instance.
Definition: ie_layers.h:593
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:21
This class represents a standard Broadcast layer Broadcast modifies input tensor dimensions according...
Definition: ie_layers.h:1833