ie_layers.h
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief a header file for internal Layers structure to describe layers information
7  *
8  * @file ie_layers.h
9  */
10 #pragma once
11 
12 #include <algorithm>
13 #include <cctype>
14 #include <iterator>
15 #include <limits>
16 #include <map>
17 #include <memory>
18 #include <string>
19 #include <vector>
20 
21 #include "ie_blob.h"
22 #include "ie_common.h"
23 #include "ie_data.h"
24 #include "ie_layers_property.hpp"
25 
26 namespace ngraph {
27 
28 class Node;
29 
30 } // namespace ngraph
31 
32 namespace InferenceEngine {
33 
34 /**
35  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
36  * @brief This is an internal common Layer parameter parsing arguments
37  */
38 struct INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LayerParams) {
39  /**
40  * @brief Layer name
41  */
42  std::string name;
43 
44  /**
45  * @brief Layer type
46  */
47  std::string type;
48 
49  /**
50  * deprecated Use precision of CNNLayer::outData and CNNLayer::insData
51  * @brief Layer precision
52  */
53  INFERENCE_ENGINE_DEPRECATED("Use precision of CNNLayer::outData and CNNLayer::insData")
55 
56  /**
57  * @brief A default constructor.
58  */
59  LayerParams();
60 
61  IE_SUPPRESS_DEPRECATED_START
62 
63  /**
64  * @brief A copy constructor.
65  * @param other An object to copy.
66  */
67  LayerParams(const LayerParams & other);
68 
69  /**
70  * @brief A copy assignment operator
71  * @param other An object to copy
72  * @return A value
73  */
74  LayerParams & operator= (const LayerParams & other);
75 
76  IE_SUPPRESS_DEPRECATED_END
77 
78  /**
79  * @brief A constructor with parameters.
80  * @param name A layer name.
81  * @param type A layer type.
82  * @param precision A layer precision.
83  */
84  LayerParams(const std::string & name, const std::string & type, Precision precision);
85 };
86 
87 /**
88  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
89  * @brief This is a base abstraction Layer - all DNN Layers inherit from this class
90  */
91 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(CNNLayer) {
92 protected:
93  std::shared_ptr<ngraph::Node> node;
94 public:
95  /**
96  * @brief A shared pointer to CNNLayer
97  */
98  IE_SUPPRESS_DEPRECATED_START
99  using Ptr = std::shared_ptr<CNNLayer>;
100  IE_SUPPRESS_DEPRECATED_END
101 
102  /**
103  * @brief Layer name
104  */
105  std::string name;
106 
107  /**
108  * @brief Layer type
109  */
110  std::string type;
111 
112  /**
113  * @brief Layer base operating precision
114  */
116 
117  /**
118  * @brief A vector of pointers to the output data elements of this layer in the di-graph (order matters)
119  */
120  std::vector<DataPtr> outData;
121 
122  /**
123  * @brief A vector of weak pointers to the input data elements of this layer in the di-graph (order matters)
124  */
125  std::vector<DataWeakPtr> insData;
126 
127  /**
128  * @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
129  */
131 
132  /**
133  * @brief Convenience user values to store in this object as extra data
134  */
136 
137  /**
138  * @brief Layer affinity set by user.
139  */
140  std::string affinity;
141 
142  IE_SUPPRESS_DEPRECATED_START
143 
144  /**
145  * @brief A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values.
146  *
147  * @param prms Basic common parsing parameters
148  */
149  explicit CNNLayer(const LayerParams& prms);
150 
151  /**
152  * @brief Returns the original nGraph op
153  * @return A smart pointer to nGraph op
154  */
155  std::shared_ptr<ngraph::Node> getNode() {
156  return node;
157  }
158 
159  /**
160  * @brief A copy constructor
161  * @param other An object to copy
162  */
163  CNNLayer(const CNNLayer& other);
164 
165  IE_SUPPRESS_DEPRECATED_END
166 
167  /**
168  * @brief A virtual destructor
169  */
170  virtual ~CNNLayer();
171 
172  /**
173  * @brief Sets a layer to be fused with
174  *
175  * @param layer Reference to the layer to be fused with
176  */
177  void fuse(Ptr& layer) {
178  _fusedWith = layer;
179  }
180 
181  /**
182  * @brief Returns the first element of the input data for this layer
183  *
184  * @return A smart pointer to the input data element
185  */
186  virtual const DataPtr input() const {
187  if (insData.empty()) {
188  THROW_IE_EXCEPTION << "Internal error: input data is empty";
189  }
190  auto lockedFirstInsData = insData[0].lock();
191  if (!lockedFirstInsData) {
192  THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
193  }
194  return lockedFirstInsData;
195  }
196 
197  /**
198  * @brief Checks if the input data and layer data are legitimate
199  */
200  void validateLayer();
201 
202  /**
203  * @brief Parse string with float in accordance with IE rules
204  *
205  * @param str input string with float value
206  * @return float value if parsing was successful
207  * @throws InferenceEngineException in case of parsing error
208  */
209  static float ie_parse_float(const std::string& str) {
210  if (str == "-inf") {
211  return -std::numeric_limits<float>::infinity();
212  } else if (str == "inf") {
213  return std::numeric_limits<float>::infinity();
214  } else {
215  float res;
216  std::stringstream val_stream(str);
217  val_stream.imbue(std::locale("C"));
218  val_stream >> res;
219  if (!val_stream.eof()) THROW_IE_EXCEPTION;
220  return res;
221  }
222  }
223  /**
224  * @brief serialize float with c_locale formating
225  * used for default values serializing
226  */
227  static std::string ie_serialize_float(float value) {
228  std::stringstream val_stream;
229  val_stream.imbue(std::locale("C"));
230  val_stream << value;
231  return val_stream.str();
232  }
233 
234  /**
235  * @brief Gets float value for the given parameter
236  *
237  * @param param name of the parameter to find
238  * @param def default value of the parameter if not found
239  * @return float value
240  */
241  float GetParamAsFloat(const char* param, float def) const {
242  std::string val = GetParamAsString(param, ie_serialize_float(def).c_str());
243  try {
244  return ie_parse_float(val);
245  } catch (...) {
246  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
247  << val << " cannot be casted to float.";
248  }
249  }
250 
251  /**
252  * @brief Returns a float value for the given layer parameter
253  *
254  * @param param Name of the layer parameter
255  * @return A float value for the specified parameter
256  */
257  float GetParamAsFloat(const char* param) const {
258  std::string val = GetParamAsString(param);
259  try {
260  return ie_parse_float(val);
261  } catch (...) {
262  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
263  << val << " cannot be casted to float.";
264  }
265  }
266 
267  /**
268  * @brief Returns a vector of float values for the given parameter or returns the default value
269  *
270  * @param param Name of the layer parameter
271  * @param def Default value of the parameter if not found
272  * @return vector of float values
273  */
274  std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const {
275  std::string vals = GetParamAsString(param, "");
276  std::vector<float> result;
277  std::istringstream stream(vals);
278  std::string str;
279  if (vals.empty()) return def;
280  while (getline(stream, str, ',')) {
281  try {
282  float val = ie_parse_float(str);
283  result.push_back(val);
284  } catch (...) {
285  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
286  << ". Value " << vals << " cannot be casted to floats.";
287  }
288  }
289  return result;
290  }
291 
292  /**
293  * @brief Returns a vector of float values for the given parameter
294  *
295  * @param param Name of the layer parameter
296  * @return vector of float values
297  */
298  std::vector<float> GetParamAsFloats(const char* param) const {
299  std::string vals = GetParamAsString(param);
300  std::vector<float> result;
301  std::istringstream stream(vals);
302  std::string str;
303  while (getline(stream, str, ',')) {
304  try {
305  float val = ie_parse_float(str);
306  result.push_back(val);
307  } catch (...) {
308  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
309  << ". Value " << vals << " cannot be casted to floats.";
310  }
311  }
312  return result;
313  }
314 
315  /**
316  * @brief Returns an integer value for the given parameter or returns the default value
317  *
318  * @param param Name of the layer parameter
319  * @param def Default value of the parameter if not found
320  * @return An int value for the specified parameter
321  */
322  int GetParamAsInt(const char* param, int def) const {
323  std::string val = GetParamAsString(param, std::to_string(def).c_str());
324  try {
325  return std::stoi(val);
326  } catch (...) {
327  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
328  << val << " cannot be casted to int.";
329  }
330  }
331 
332  /**
333  * @brief Returns an integer value for the given parameter
334  *
335  * @param param Name of the layer parameter
336  * @return An int value for the specified parameter
337  */
338  int GetParamAsInt(const char* param) const {
339  std::string val = GetParamAsString(param);
340  try {
341  return std::stoi(val);
342  } catch (...) {
343  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
344  << val << " cannot be casted to int.";
345  }
346  }
347 
348  /**
349  * @brief Returns a vector of int values for the given parameter or returns the default value
350  *
351  * @param param Name of the layer parameter
352  * @param def Default value of the parameter if not found
353  * @return vector of int values
354  */
355  std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const {
356  std::string vals = GetParamAsString(param, "");
357  std::vector<int> result;
358  std::istringstream stream(vals);
359  std::string str;
360  if (vals.empty()) return def;
361  while (getline(stream, str, ',')) {
362  try {
363  result.push_back(std::stoi(str));
364  } catch (...) {
365  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
366  << ". Value " << vals << " cannot be casted to int.";
367  }
368  }
369  return result;
370  }
371 
372  /**
373  * @brief Returns a vector of int values for the given parameter
374  *
375  * @param param Name of the layer parameter
376  * @return vector of int values
377  */
378  std::vector<int> GetParamAsInts(const char* param) const {
379  std::string vals = GetParamAsString(param);
380  std::vector<int> result;
381  std::istringstream stream(vals);
382  std::string str;
383  while (getline(stream, str, ',')) {
384  try {
385  result.push_back(std::stoi(str));
386  } catch (...) {
387  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
388  << ". Value " << vals << " cannot be casted to int.";
389  }
390  }
391  return result;
392  }
393  /**
394  * @brief Returns an unsigned integer value for the given parameter or returns the default value
395  *
396  * @param param Name of the layer parameter
397  * @param def Default value of the parameter if not found
398  * @return An unsigned integer value for the specified parameter
399  */
400  unsigned int GetParamAsUInt(const char* param, unsigned int def) const {
401  std::string val = GetParamAsString(param, std::to_string(def).c_str());
402  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
403  ". Value " + val + " cannot be casted to int.";
404  try {
405  int value = std::stoi(val);
406  if (value < 0) {
407  THROW_IE_EXCEPTION << message;
408  }
409  return static_cast<unsigned int>(value);
410  } catch (...) {
411  THROW_IE_EXCEPTION << message;
412  }
413  }
414 
415  /**
416  * @brief Returns an unsigned integer value for the given parameter
417  *
418  * @param param Name of the layer parameter
419  * @return An unsigned integer value for the specified parameter
420  */
421  unsigned int GetParamAsUInt(const char* param) const {
422  std::string val = GetParamAsString(param);
423  std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
424  ". Value " + val + " cannot be casted to unsigned int.";
425  try {
426  int value = std::stoi(val);
427  if (value < 0) {
428  THROW_IE_EXCEPTION << message;
429  }
430  return static_cast<unsigned int>(value);
431  } catch (...) {
432  THROW_IE_EXCEPTION << message;
433  }
434  }
435 
436  /**
437  * @brief Returns a vector of unsigned int values for the given parameter or returns the default value
438  *
439  * @param param Name of the layer parameter
440  * @param def Default value of the parameter if not found
441  * @return vector of unsigned int values
442  */
443  std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const {
444  std::string vals = GetParamAsString(param, "");
445  std::vector<unsigned int> result;
446  std::istringstream stream(vals);
447  std::string str;
448  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
449  name + ". Value " + vals + " cannot be casted to unsigned int.";
450  if (vals.empty()) return def;
451  while (getline(stream, str, ',')) {
452  try {
453  int value = std::stoi(str);
454  if (value < 0) {
455  THROW_IE_EXCEPTION << message;
456  }
457  result.push_back(static_cast<unsigned int>(value));
458  } catch (...) {
459  THROW_IE_EXCEPTION << message;
460  }
461  }
462  return result;
463  }
464 
465  /**
466  * @brief Returns a vector of unsigned int values for the given parameter
467  *
468  * @param param Name of the layer parameter
469  * @return vector of unsigned int values
470  */
471  std::vector<unsigned int> GetParamAsUInts(const char* param) const {
472  std::string vals = GetParamAsString(param);
473  std::vector<unsigned int> result;
474  std::istringstream stream(vals);
475  std::string str;
476  std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
477  name + ". Value " + vals + " cannot be casted to int.";
478  while (getline(stream, str, ',')) {
479  try {
480  int value = std::stoi(str);
481  if (value < 0) {
482  THROW_IE_EXCEPTION << message;
483  }
484  result.push_back(static_cast<unsigned int>(value));
485  } catch (...) {
486  THROW_IE_EXCEPTION << message;
487  }
488  }
489  return result;
490  }
491  /**
492  * @brief Returns a boolean value for the given parameter.
493  *
494  * The valid values are (true, false, 1, 0).
495  * @param param Name of the layer parameter
496  * @param def Default value of the parameter if not found
497  * @return A bool value for the specified parameter
498  */
499  bool GetParamAsBool(const char* param, bool def) const {
500  std::string val = GetParamAsString(param, std::to_string(def).c_str());
501  std::string loweredCaseValue;
502  std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
503  return std::tolower(value);
504  });
505 
506  bool result = false;
507 
508  if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
509  // attempting parse using non alpha bool
510  return (GetParamAsInt(param, def) != 0);
511  }
512 
513  return result;
514  }
515  /**
516  * @brief Returns a boolean value for the given parameter
517  *
518  * @param param Name of the layer parameter
519  * @return A bool value for the specified parameter
520  */
521  bool GetParamAsBool(const char* param) const {
522  std::string val = GetParamAsString(param);
523  std::string loweredCaseValue;
524  std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
525  return std::tolower(value);
526  });
527 
528  bool result = false;
529 
530  if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
531  // attempting parse using non alpha bool
532  return (GetParamAsInt(param) != 0);
533  }
534 
535  return result;
536  }
537 
538  /**
539  * @brief Returns a string value for the given parameter or returns the default one
540  *
541  * @param param Name of the layer parameter
542  * @param def Default value of the parameter if not found
543  * @return A string value
544  */
545  std::string GetParamAsString(const char* param, const char* def) const {
546  auto it = params.find(param);
547  if (it == params.end() || it->second.empty()) {
548  return def;
549  }
550  return (*it).second;
551  }
552 
553  /**
554  * @brief Checks the param presence in the layer
555  *
556  * @param param Name of the layer parameter
557  * @return a bool depending param presence
558  */
559  bool CheckParamPresence(const char* param) const {
560  auto it = params.find(param);
561  if (it == params.end()) {
562  return false;
563  }
564  return true;
565  }
566 
567  /**
568  * @brief Returns a string value for the given parameter.
569  *
570  * Throws exception if parameter was not found.
571  * @param param Name of the layer parameter
572  * @return A string value
573  */
574  std::string GetParamAsString(const char* param) const {
575  auto it = params.find(param);
576  if (it == params.end()) {
577  THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
578  }
579  return (*it).second;
580  }
581 
582  /**
583  * @brief Gets the parameter as a std::vector<std::string>
584  * @param param The parameter name
585  * @param def The default values if case of parameter is not found
586  * @return The parameter as strings.
587  */
588  std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const {
589  std::string vals = GetParamAsString(param, "");
590  std::vector<std::string> result;
591  std::istringstream stream(vals);
592  std::string str;
593  if (vals.empty()) return def;
594  while (getline(stream, str, ',')) {
595  try {
596  result.push_back(str);
597  } catch (...) {
598  THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
599  }
600  }
601  return result;
602  }
603 
604  /**
605  * @brief Map of pairs: (parameter name, parameter value)
606  */
607  std::map<std::string, std::string> params;
608 
609  /**
610  * @brief Map of pairs: (name, weights/biases blob)
611  */
612  std::map<std::string, Blob::Ptr> blobs;
613 };
614 
615 /**
616  * @brief Alias for CNNLayer object
617  */
618 IE_SUPPRESS_DEPRECATED_START
619 using GenericLayer = class CNNLayer;
620 IE_SUPPRESS_DEPRECATED_END
621 
622 IE_SUPPRESS_DEPRECATED_START_WIN
623 
624 /**
625  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
626  * @brief This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
627  */
628 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(WeightableLayer): public CNNLayer {
629 public:
630  IE_SUPPRESS_DEPRECATED_START
631 
632  /**
633  * @brief A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the given
634  * values
635  *
636  * @param prms Initial layer parameters
637  */
638  explicit WeightableLayer(const LayerParams & prms);
639 
640  IE_SUPPRESS_DEPRECATED_END
641 
642  /**
643  * @brief A pointer to a weights blob
644  */
646  /**
647  * @brief A pointer to a biases blob
648  */
650 
651  /**
652  * @brief Constructs a WeightableLayer instance and initiates layer parameters with the given values
653  */
654  using CNNLayer::CNNLayer;
655 
656  ~WeightableLayer() override;
657 };
658 
659 /**
660  * @brief convinenent way to declare property with backward compatibility to 2D members
661  */
662 #define DEFINE_PROP(prop_name) \
663  PropertyVector<unsigned int> prop_name; \
664  unsigned int& prop_name##_x = prop_name.at(X_AXIS); \
665  unsigned int& prop_name##_y = prop_name.at(Y_AXIS)
666 
667 /**
668  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
669  * @brief This class represents a standard 3D Convolution Layer
670  */
671 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ConvolutionLayer): public WeightableLayer {
672 public:
673  /**
674  * @brief A convolution kernel array [X, Y, Z, ...]
675  */
677  /**
678  * @brief A convolution paddings begin array [X, Y, Z, ...]
679  */
681  /**
682  * @brief A convolution paddings end array [X, Y, Z, ...]
683  */
685  /**
686  * @brief A convolution strides array [X, Y, Z, ...]
687  */
689  /**
690  * @brief A convolution dilations array [X, Y, Z, ...]
691  */
693  /**
694  * @brief A number of output feature maps (size) generating the 3'rd output dimension
695  */
696  unsigned int _out_depth = 0u;
697  /**
698  * @brief Number of groups
699  */
700  unsigned int _group = 1u;
701  /**
702  * @brief Auto padding type
703  */
704  std::string _auto_pad;
705 
706  IE_SUPPRESS_DEPRECATED_START
707 
708  /**
709  * @brief Creates a new ConvolutionLayer instance.
710  */
711  explicit ConvolutionLayer(const LayerParams& p)
712  : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
713 
714  /**
715  * @brief assignment operator
716  */
717  ConvolutionLayer& operator=(const ConvolutionLayer& that) {
718  if (&that != this) {
719  WeightableLayer::operator=(that);
720  _kernel = that._kernel;
721  _padding = that._padding;
722  _pads_end = that._pads_end;
723  _stride = that._stride;
724  _dilation = that._dilation;
725  _out_depth = that._out_depth;
726  _group = that._group;
727  }
728  return *this;
729  }
730 
731  /**
732  * @brief copy constructor
733  */
734  ConvolutionLayer(const ConvolutionLayer& that): WeightableLayer(that) {
735  operator=(that);
736  }
737  /**
738  * @brief move constructor
739  */
740  ConvolutionLayer(ConvolutionLayer&&) = default;
741 
742  IE_SUPPRESS_DEPRECATED_END
743 
744  ~ConvolutionLayer() override;
745 };
746 
747 /**
748  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
749  * @brief This class represents a standard deconvolution layer
750  */
751 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DeconvolutionLayer): public ConvolutionLayer {
752 public:
753  using ConvolutionLayer::ConvolutionLayer;
754  using ConvolutionLayer::operator=;
755 
756  ~DeconvolutionLayer() override;
757 };
758 
759 /**
760  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
761  * @brief This class represents a standard deformable convolution layer
762  */
763 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
764 public:
765  using ConvolutionLayer::ConvolutionLayer;
766  using ConvolutionLayer::operator=;
767 
768  /**
769  * @brief Number of deformable groups
770  */
771  unsigned int _deformable_group = 1u;
772 
773  ~DeformableConvolutionLayer() override;
774 };
775 
776 /**
777  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
778  * @brief This class represents a standard pooling layer
779  */
780 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PoolingLayer): public CNNLayer {
781 public:
782  /**
783  * @brief Pooling kernel array [X, Y, Z, ...]
784  */
786  /**
787  * @brief Pooling paddings begin array [X, Y, Z, ...]
788  */
790  /**
791  * @brief Pooling paddings end array [X, Y, Z, ...]
792  */
794  /**
795  * @brief Pooling strides array [X, Y, Z, ...]
796  */
798 
799  /**
800  * @enum PoolType
801  * @brief Defines available pooling types
802  */
803  enum PoolType { MAX = 1, AVG = 2, STOCH = 3, ROI = 4, SPACIAL_PYRAMID = 5 };
804 
805  /**
806  * @brief A pooling type
807  */
809 
810  /**
811  * @brief A flag that indicates if padding is excluded or not
812  */
813  bool _exclude_pad = false;
814  /**
815  * @brief Auto padding type
816  */
817  std::string _auto_pad;
818 
819  IE_SUPPRESS_DEPRECATED_START
820 
821  /**
822  * @brief Creates a new PoolingLayer instance.
823  */
824  explicit PoolingLayer(const LayerParams& p): CNNLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 0u) {}
825 
826  /**
827  * @brief assignment operator
828  */
829  PoolingLayer& operator=(const PoolingLayer& that) {
830  if (&that != this) {
831  CNNLayer::operator=(that);
832  _kernel = that._kernel;
833  _padding = that._padding;
834  _pads_end = that._pads_end;
835  _stride = that._stride;
836  _type = that._type;
837  _exclude_pad = that._exclude_pad;
838  }
839  return *this;
840  }
841  /**
842  * @brief copy constructor
843  */
844  PoolingLayer(const PoolingLayer& that): CNNLayer(that) {
845  operator=(that);
846  }
847 
848  /**
849  * @brief move constructor
850  */
851  PoolingLayer(PoolingLayer&&) = default;
852 
853  IE_SUPPRESS_DEPRECATED_END
854 
855  ~PoolingLayer() override;
856 };
857 
858 /**
859  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
860  * @brief This class represents a standard binary convolution layer
861  */
862 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BinaryConvolutionLayer): public WeightableLayer {
863 public:
864  /**
865  * @enum eBinaryConvolutionMode
866  * @brief Defines possible modes of binary convolution operation
867  */
868  enum eBinaryConvolutionMode { xnor_popcount = 0 };
869 
870  /**
871  * @brief Mode of binary convolution operation
872  */
873  eBinaryConvolutionMode _mode = xnor_popcount;
874 
875  /**
876  * @brief A number of input feature maps (size) generating the 3'rd input dimension
877  */
878  unsigned int _in_depth = 0u;
879 
880  /**
881  * @brief A pad value which is used to fill pad area
882  */
883  float _pad_value = 0.0f;
884 
885  /**
886  * @brief A convolution kernel array [X, Y, Z, ...]
887  */
889  /**
890  * @brief A convolution paddings begin array [X, Y, Z, ...]
891  */
893  /**
894  * @brief A convolution paddings end array [X, Y, Z, ...]
895  */
897  /**
898  * @brief A convolution strides array [X, Y, Z, ...]
899  */
901  /**
902  * @brief A convolution dilations array [X, Y, Z, ...]
903  */
905  /**
906  * @brief A number of output feature maps (size) generating the 3'rd output dimension
907  */
908  unsigned int _out_depth = 0u;
909  /**
910  * @brief Number of groups
911  */
912  unsigned int _group = 1u;
913  /**
914  * @brief Auto padding type
915  */
916  std::string _auto_pad;
917 
918  IE_SUPPRESS_DEPRECATED_START
919 
920  /**
921  * @brief Creates a new BinaryConvolutionLayer instance.
922  */
923  explicit BinaryConvolutionLayer(const LayerParams& p)
924  : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
925 
926  /**
927  * @brief assignment operator
928  */
929  BinaryConvolutionLayer& operator=(const BinaryConvolutionLayer& that) {
930  if (&that != this) {
931  WeightableLayer::operator=(that);
932  _kernel = that._kernel;
933  _padding = that._padding;
934  _pads_end = that._pads_end;
935  _stride = that._stride;
936  _dilation = that._dilation;
937  _out_depth = that._out_depth;
938  _group = that._group;
939  _mode = that._mode;
940  _in_depth = that._in_depth;
941  _pad_value = that._pad_value;
942  }
943  return *this;
944  }
945  /**
946  * @brief copy constructor
947  */
948  BinaryConvolutionLayer(const BinaryConvolutionLayer& that): WeightableLayer(that) {
949  operator=(that);
950  }
951  /**
952  * @brief move constructor
953  */
954  BinaryConvolutionLayer(BinaryConvolutionLayer&&) = default;
955 
956  IE_SUPPRESS_DEPRECATED_END
957 
958  ~BinaryConvolutionLayer() override;
959 };
960 
961 #undef DEFINE_PROP
962 
963 /**
964  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
965  * @brief This class represents a fully connected layer
966  */
967 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(FullyConnectedLayer): public WeightableLayer {
968 public:
969  /**
970  * @brief A size of output
971  */
972  unsigned int _out_num = 0;
973 
974  /**
975  * @brief Creates a new FullyConnectedLayer instance and initializes layer parameters with the given values.
976  */
977  using WeightableLayer::WeightableLayer;
978 
979  ~FullyConnectedLayer() override;
980 };
981 
982 /**
983  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
984  * @brief This class represents concatenation layer
985  *
986  * Takes as input several data elements and merges them to one using the supplied axis
987  */
988 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ConcatLayer): public CNNLayer {
989 public:
990  /**
991  * @brief An axis on which concatenation operation is performed
992  */
993  unsigned int _axis = 1;
994 
995  /**
996  * @brief Creates a new ConcatLayer instance and initializes layer parameters with the given values.
997  *
998  * If batch is used, then batch needs to be specified as an input dimension also
999  * In current implementation 1 means channels, 0 - batch
1000  */
1001  using CNNLayer::CNNLayer;
1002 
1003  ~ConcatLayer() override;
1004 };
1005 
1006 /**
1007  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1008  * @brief This class represents a layer that evenly splits the input into the supplied outputs
1009  */
1010 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SplitLayer): public CNNLayer {
1011 public:
1012  /**
1013  * @brief An axis on which split operation is performed
1014  */
1015  unsigned int _axis = 1;
1016 
1017  /**
1018  * @brief Creates a new SplitLayer instance.
1019  */
1020  using CNNLayer::CNNLayer;
1021 
1022  ~SplitLayer() override;
1023 };
1024 
1025 /**
1026  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1027  * @brief This class represents a Linear Response Normalization (LRN) Layer
1028  */
1029 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(NormLayer): public CNNLayer {
1030 public:
1031  /**
1032  * @brief Response size
1033  */
1034  unsigned int _size = 0;
1035  /**
1036  * @brief K
1037  */
1038  unsigned int _k = 1;
1039  /**
1040  * @brief Alpha coefficient
1041  */
1042  float _alpha = 0;
1043  /**
1044  * @brief Beta coefficient
1045  */
1046  float _beta = 0;
1047  /**
1048  * @brief Flag to specify normalization across feature maps (true) or across channels
1049  */
1050  bool _isAcrossMaps = false;
1051 
1052  /**
1053  * @brief Creates a new NormLayer instance.
1054  */
1055  using CNNLayer::CNNLayer;
1056 
1057  ~NormLayer() override;
1058 };
1059 
1060 /**
1061  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1062  * @brief This class represents standard softmax Layer
1063  */
1064 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SoftMaxLayer): public CNNLayer {
1065 public:
1066  /**
1067  * @brief Axis number for a softmax operation
1068  */
1069  int axis = 1;
1070  /**
1071  * @brief Creates a new SoftMaxLayer instance.
1072  */
1073  using CNNLayer::CNNLayer;
1074 
1075  ~SoftMaxLayer() override;
1076 };
1077 
1078 /**
1079  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1080  * @brief This class represents standard GRN Layer
1081  */
1082 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GRNLayer): public CNNLayer {
1083 public:
1084  /**
1085  * @brief A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the given
1086  * values.
1087  */
1088  using CNNLayer::CNNLayer;
1089 
1090  /**
1091  * @brief Bias for squares sum
1092  */
1093  float bias = 0.f;
1094 
1095  ~GRNLayer() override;
1096 };
1097 
1098 /**
1099  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1100  * @brief This class represents standard MVN Layer
1101  */
1102 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(MVNLayer): public CNNLayer {
1103 public:
1104  /**
1105  * @brief A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the given
1106  * values.
1107  */
1108  using CNNLayer::CNNLayer;
1109 
1110  /**
1111  * @brief Indicate that mean value is calculated across channels
1112  */
1114 
1115  /**
1116  * @brief Indicate that the result needs to be normalized
1117  */
1118  int normalize = 1;
1119 
1120  ~MVNLayer() override;
1121 };
1122 
1123 /**
1124  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1125  * @brief This class represents a Rectified Linear activation layer
1126  */
1127 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReLULayer): public CNNLayer {
1128 public:
1129  /**
1130  * @brief Negative slope is used to takle negative inputs instead of setting them to 0
1131  */
1132  float negative_slope = 0.0f;
1133 
1134  /**
1135  * @brief Creates a new ReLULayer instance.
1136  */
1137  using CNNLayer::CNNLayer;
1138 
1139  ~ReLULayer() override;
1140 };
1141 
1142 /**
1143  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1144  * @brief This class represents a Clamp activation layer
1145  *
1146  * Clamps all tensor elements into the range [min_value, max_value]
1147  */
1148 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ClampLayer): public CNNLayer {
1149 public:
1150  /**
1151  * @brief A minimum value
1152  */
1153  float min_value = 0.0f;
1154 
1155  /**
1156  * @brief A maximum value
1157  */
1158  float max_value = 1.0f;
1159  /**
1160  * @brief Creates a new ClampLayer instance.
1161  */
1162  using CNNLayer::CNNLayer;
1163 
1164  ~ClampLayer() override;
1165 };
1166 
1167 /**
1168  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1169  * @brief This class represents a ReLU6 activation layer
1170  *
1171  * Clamps all tensor elements into the range [0, 6.0]
1172  */
1173 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReLU6Layer): public ClampLayer {
1174 public:
1175  IE_SUPPRESS_DEPRECATED_START
1176  /**
1177  * @brief A constructor with common layer parameters
1178  * @param prms The common layer parameters
1179  */
1180  explicit ReLU6Layer(const LayerParams& prms): ClampLayer(prms) {
1181  max_value = 6.0f;
1182  }
1183  IE_SUPPRESS_DEPRECATED_END
1184 
1185  ~ReLU6Layer() override;
1186 };
1187 
1188 /**
1189  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1190  * @brief This class represents an element wise operation layer
1191  */
1192 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(EltwiseLayer): public CNNLayer {
1193 public:
1194  /**
1195  * @enum eOperation
1196  * @brief Defines possible operations that can be used
1197  */
1198  enum eOperation {
1199  Sum = 0,
1200  Prod,
1201  Max,
1202  Sub,
1203  Min,
1204  Div,
1205  Squared_diff,
1206  Floor_mod,
1207  Pow,
1208  Equal,
1209  Not_equal,
1210  Less,
1211  Less_equal,
1212  Greater,
1213  Greater_equal,
1214  Logical_AND,
1215  Logical_OR,
1216  Logical_XOR,
1217  Logical_NOT,
1218  Mean
1219  };
1220 
1221  /**
1222  * @brief A type of the operation to use
1223  */
1225 
1226  /**
1227  * @brief A vector of coefficients to scale the operands
1228  */
1229  std::vector<float> coeff;
1230 
1231  /**
1232  * @brief Creates a new EltwiseLayer instance.
1233  */
1234  using CNNLayer::CNNLayer;
1235 
1236  ~EltwiseLayer() override;
1237 };
1238 
1239 /**
1240  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1241  * @brief This class represents a standard crop layer
1242  */
1243 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(CropLayer): public CNNLayer {
1244 public:
1245  /**
1246  * @brief A vector of dimensions for cropping
1247  */
1248  std::vector<int> axis;
1249  /**
1250  * @brief A vector of dimensions to be preserved
1251  */
1252  std::vector<int> dim;
1253  /**
1254  * @brief A vector of offsets for each dimension
1255  */
1256  std::vector<int> offset;
1257 
1258  /**
1259  * @brief Creates a new CropLayer instance.
1260  */
1261  using CNNLayer::CNNLayer;
1262 
1263  ~CropLayer() override;
1264 };
1265 
1266 /**
1267  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1268  * @brief This class represents a standard reshape layer
1269  */
1270 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReshapeLayer): public CNNLayer {
1271 public:
1272  /**
1273  * @brief A vector of sizes of the shape
1274  */
1275  std::vector<int> shape;
1276  /**
1277  * @brief A number of axis to be taken for a reshape
1278  */
1279  int axis = 0;
1280  /**
1281  * @brief A number of first axises to be taken for a reshape
1282  */
1283  int num_axes = -1;
1284 
1285  /**
1286  * @brief Creates a new ReshapeLayer instance.
1287  */
1288  using CNNLayer::CNNLayer;
1289 
1290  ~ReshapeLayer() override;
1291 };
1292 
1293 /**
1294  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1295  * @brief This class represents a standard Tile Layer
1296  */
1297 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TileLayer): public CNNLayer {
1298 public:
1299  /**
1300  * @brief An index of the axis to tile
1301  */
1302  int axis = -1;
1303  /**
1304  * @brief A number of copies to be made
1305  */
1306  int tiles = -1;
1307 
1308  /**
1309  * @brief Creates a new TileLayer instance.
1310  */
1311  using CNNLayer::CNNLayer;
1312 
1313  ~TileLayer() override;
1314 };
1315 
1316 /**
1317  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1318  * @brief This class represents a Layer which performs Scale and Shift
1319  */
1320 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScaleShiftLayer): public WeightableLayer {
1321 public:
1322  /**
1323  * @brief A flag that indicates if the same value is used for all the features. If false, the value is used pixel
1324  * wise
1325  */
1326  unsigned int _broadcast = 0;
1327 
1328  /**
1329  * @brief Creates a new ScaleShiftLayer instance.
1330  */
1331  using WeightableLayer::WeightableLayer;
1332 
1333  ~ScaleShiftLayer() override;
1334 };
1335 
1336 /**
1337  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1338  * @brief This class represents TensorIterator layer
1339  */
1340 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TensorIterator): public CNNLayer {
1341 public:
1342  struct PortMap {
1343  // Data map rule
1344  int from; /**< Index of exteral data from ins/outs fields of CNNLayer */
1345  int to; /**< Index of internal data in iterator body */
1346 
1347  // Iteration rule
1348  int axis; /**< Axis to iterate throught */
1349  int stride; /**< Stride to iterate throught */
1350  int start; /**< Start index of iteration range */
1351  int end; /**< Last index of iteration range */
1352  int part_size; /**< Part size which will be transfered to body subnetwork */
1353  };
1354 
1355  /**
1356  * @brief Describes a tensor iterator body
1357  */
1358  struct Body {
1359  std::vector<DataPtr> inputs; //!< Inputs data
1360  std::vector<DataPtr> outputs; //!< Outputs data
1361  };
1362 
1363  std::vector<PortMap> input_port_map; //!< Input ports map
1364  std::vector<PortMap> output_port_map; //!< Output ports map
1365  std::vector<PortMap> back_edges; //!< Back edges map
1366 
1367  Body body; //!< A Tensor Iterator body
1368 
1369  using CNNLayer::CNNLayer;
1370 
1371  ~TensorIterator() override;
1372 };
1373 
1374 /**
1375  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1376  * @brief Base class for recurrent cell layers
1377  */
1378 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RNNCellBase): public WeightableLayer {
1379 public:
1380  using WeightableLayer::WeightableLayer;
1381 
1382  /**
1383  * @brief Direct type of recurrent cell (including subtypes)
1384  * Description of particular cell semantics is in LSTMCell, GRUCell, RNNCell.
1385  */
1386  enum CellType {
1387  LSTM, /**< Original LSTM cell */
1388  GRU, /**< Original GRU cell */
1389  RNN, /**< Original RNN cell */
1390  GRU_LBR, /**< GRU cell modification. "Linear before reset" */
1391  };
1392 
1393  /** @copybrief CellType */
1395 
1396  /**
1397  * @brief Size of hidden state data
1398  *
1399  * In case of batch output state tensor will have shape [N, hidden_size]
1400  */
1401  int hidden_size = 0;
1402 
1403  /**
1404  * @brief Clip data into range [-clip, clip] on input of activations
1405  *
1406  * clip==0.0f means no clipping
1407  */
1408  float clip = 0.0f;
1409  /**
1410  * @brief Activations used inside recurrent cell
1411  *
1412  * Valid values: sigmoid, tanh, relu
1413  */
1414  std::vector<std::string> activations;
1415 
1416  /**
1417  * @brief Alpha parameters of activations
1418  *
1419  * Respective to activation list.
1420  */
1421  std::vector<float> activation_alpha;
1422 
1423  /**
1424  * @brief Beta parameters of activations
1425  *
1426  * Respective to activation list.
1427  */
1428  std::vector<float> activation_beta;
1429 
1430  ~RNNCellBase() override;
1431 };
1432 
1433 /**
1434  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1435  * @brief LSTM Cell layer
1436  *
1437  * G - number of gates (=4)
1438  * N - batch size
1439  * S - state size (=hidden_size)
1440  *
1441  * Inputs:
1442  * [N,D] Xt - input data
1443  * [N,S] Ht-1 - initial hidden state
1444  * [N,S] Ct-1 - initial cell state
1445  *
1446  * Outputs:
1447  * [N,S] Ht - out hidden state
1448  * [N,S] Ct - out cell state
1449  *
1450  * Weights:
1451  * - weights [G,S,D+S]
1452  * - biases [G,S]
1453  * NB! gates order is FICO {forget, input, candidate, output}
1454  *
1455  * activations is {_f, _g, _h}
1456  * default: {_f=sigm, _g=tanh, _h=tanh}
1457  *
1458  * Equations:
1459  *
1460  * * - matrix mult
1461  * (.) - eltwise mult
1462  * [,] - concatenation
1463  *
1464  * - ft = _f(Wf*[Ht-1, Xt] + Bf)
1465  * - it = _f(Wi*[Ht-1, Xt] + Bi)
1466  * - ct = _g(Wc*[Ht-1, Xt] + Bc)
1467  * - ot = _f(Wo*[Ht-1, Xt] + Bo)
1468  * - Ct = ft (.) Ct-1 + it (.) ct
1469  * - Ht = ot (.) _h(Ct)
1470  */
1471 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LSTMCell): public RNNCellBase {
1472 public:
1473  using RNNCellBase::RNNCellBase;
1474  using RNNCellBase::operator=;
1475 
1476  ~LSTMCell() override;
1477 };
1478 
1479 /**
1480  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1481  * @brief GRU Cell layer
1482  *
1483  * G - number of gates (=3)
1484  * N - batch size
1485  * S - state size (=hidden_size)
1486  *
1487  * Inputs:
1488  * [N,D] Xt - input data
1489  * [N,S] Ht-1 - initial hidden state
1490  *
1491  * Outputs:
1492  * [N,S] Ht - out hidden state
1493  *
1494  * Weights:
1495  * - weights [G,S,D+S]
1496  * - biases [G,S]
1497  * NB! gates order is ZRH {update, reset, output}
1498  *
1499  * activations is {_f, _g}
1500  * default: {_f=sigm, _g=tanh}
1501  *
1502  * Equations:
1503  *
1504  * * - matrix mult
1505  * (.) - eltwise mult
1506  * [,] - concatenation
1507  *
1508  * - zt = _f(Wz*[Ht-1, Xt] + Bz)
1509  * - rt = _f(Wr*[Ht-1, Xt] + Br)
1510  * - ht = _g(Wh*[rt (.) Ht-1, Xt] + Bh)
1511  * - Ht = (1 - zt) (.) ht + zt (.) Ht-1
1512  */
1513 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GRUCell): public RNNCellBase {
1514 public:
1515  using RNNCellBase::RNNCellBase;
1516  using RNNCellBase::operator=;
1517 
1518  ~GRUCell() override;
1519 };
1520 
1521 /**
1522  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1523  * @brief RNN Cell layer
1524  *
1525  * G - number of gates (=1)
1526  * N - batch size
1527  * S - state size (=hidden_size)
1528  *
1529  * Inputs:
1530  * [N,D] Xt - input data
1531  * [N,S] Ht-1 - initial hidden state
1532  *
1533  * Outputs:
1534  * [N,S] Ht - out hidden state
1535  *
1536  * Weights:
1537  * - weights [G,S,D+S]
1538  * - biases [G,S]
1539  *
1540  * activations is {_f}
1541  * default: {_f=tanh}
1542  *
1543  * Equations:
1544  *
1545  * * - matrix mult
1546  * [,] - concatenation
1547  *
1548  * - Ht = _f(Wi*[Ht-1, Xt] + Bi)
1549  */
1550 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RNNCell): public RNNCellBase {
1551 public:
1552  using RNNCellBase::RNNCellBase;
1553  using RNNCellBase::operator=;
1554 
1555  ~RNNCell() override;
1556 };
1557 
1558 /**
1559  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1560  * @brief Sequence of recurrent cells
1561  *
1562  * N - batch size
1563  * T - sequence size
1564  * S - state size (=hidden_size)
1565  * NS - num of state tensors (LSTM=2, GRU/RNN=1)
1566  * ND - num of direction (BDR=2, WFD/BWD=1)
1567  *
1568  * Inputs:
1569  * [N,T,D] Xt - input data
1570  * [ND,N,S] Ht-1 - initial hidden state
1571  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1572  * [N] SL - sequence length (optional input)
1573  *
1574  * Outputs:
1575  * [ND,N,T,S] Xt - input data
1576  * [ND,N,S] Ht-1 - initial hidden state
1577  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1578  *
1579  * NB! if axis==0 batch and sequense dimensions are swapped (N <-> T) for input and output tensors
1580  *
1581  * Weights:
1582  * - weights [ND,G,S,D+S]
1583  * - biases [ND,G,S]
1584  * NB! if ND==2 weights are concatenated cell weights [forward_cell_weights, backward_cell_weights]
1585  *
1586  */
1587 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RNNSequenceLayer): public RNNCellBase {
1588 public:
1589  using RNNCellBase::RNNCellBase;
1590 
1591  /**
1592  * @brief An axis by which iteration is performed
1593  *
1594  * axis=0 means first input/output data blob dimension is sequence
1595  * axis=1 means first input/output data blob dimension is batch
1596  */
1597  unsigned int axis = 1;
1598 
1599  /**
1600  * @brief Direction of iteration through sequence dimension
1601  */
1602  enum Direction {
1603  FWD, /**< Forward mode. Iterate starts from index 0 with step 1. */
1604  BWD, /**< Backward mode. Iterate starts from last index with step -1. */
1605  BDR /**< Bidirectional mode. First is forward pass, second is backward. */
1606  };
1607 
1608  /** @copybrief Direction */
1610 
1611  ~RNNSequenceLayer() override;
1612 };
1613 
1614 /**
1615  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1616  * @brief This class represents a Layer which performs Scale and Shift
1617  */
1618 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PReLULayer): public WeightableLayer {
1619 public:
1620  /**
1621  * @brief A flag that indicates if the same negative_slope value is used for all the features. If false, the value
1622  * is used pixel wise
1623  */
1624  bool _channel_shared = false;
1625 
1626  /**
1627  * @brief A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the given
1628  * values.
1629  *
1630  * @param prms Initial layer parameters
1631  */
1632  using WeightableLayer::WeightableLayer;
1633 
1634  ~PReLULayer() override;
1635 };
1636 
1637 /**
1638  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1639  * @brief This class represents a standard Power Layer
1640  *
1641  * Formula is: output = (offset + scale * input) ^ power
1642  */
1643 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PowerLayer): public CNNLayer {
1644 public:
1645  /**
1646  * @brief An exponent value
1647  */
1648  float power = 1.f;
1649  /**
1650  * @brief A scale factor
1651  */
1652  float scale = 1.f;
1653  /**
1654  * @brief An offset value
1655  */
1656  float offset = 0.f;
1657 
1658  /**
1659  * @brief Creates a new PowerLayer instance.
1660  */
1661  using CNNLayer::CNNLayer;
1662 
1663  ~PowerLayer() override;
1664 };
1665 
1666 /**
1667  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1668  * @brief This class represents a Batch Normalization Layer
1669  */
1670 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BatchNormalizationLayer): public WeightableLayer {
1671 public:
1672  /**
1673  * @brief A small value to add to the variance estimate to avoid division by zero
1674  */
1675  float epsilon = 1e-3f;
1676 
1677  /**
1678  * @brief Creates a new BatchNormalizationLayer instance.
1679  */
1680  using WeightableLayer::WeightableLayer;
1681 
1682  ~BatchNormalizationLayer() override;
1683 };
1684 
1685 /**
1686  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1687  * @brief This class represents a general matrix multiplication operation layer
1688  *
1689  * Formula is: dst := alpha*src1*src2 + beta*src3
1690  */
1691 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GemmLayer): public CNNLayer {
1692 public:
1693  /**
1694  * @brief A scale factor of src1 matrix
1695  */
1696  float alpha = 1.f;
1697  /**
1698  * @brief A scale factor of src3 matrix
1699  */
1700  float beta = 1.f;
1701  /**
1702  * @brief A flag that indicates if the src1 matrix is to be transposed
1703  */
1704  bool transpose_a = false;
1705  /**
1706  * @brief A flag that indicates if the src2 matrix is to be transposed
1707  */
1708  bool transpose_b = false;
1709  /**
1710  * @brief Creates a new GemmLayer instance.
1711  */
1712  using CNNLayer::CNNLayer;
1713 
1714  ~GemmLayer() override;
1715 };
1716 
1717 /**
1718  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1719  * @brief This class represents a standard Pad layer
1720  *
1721  * Adds paddings to input tensor
1722  */
1723 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PadLayer): public CNNLayer {
1724 public:
1725  /**
1726  * @enum ePadMode
1727  * @brief Defines possible modes of pad operation
1728  */
1729  enum ePadMode { Constant = 0, Edge, Reflect, Symmetric };
1730 
1731  /**
1732  * @brief Size of padding in the beginning of each axis
1733  */
1735  /**
1736  * @brief Size of padding in the end of each axis
1737  */
1739  /**
1740  * @brief Mode of pad operation
1741  */
1742  ePadMode pad_mode = Constant;
1743  /**
1744  * @brief A pad value which is used for filling in Constant mode
1745  */
1746  float pad_value = 0.0f;
1747  /**
1748  * @brief Creates a new PadLayer instance.
1749  */
1750  using CNNLayer::CNNLayer;
1751 
1752  ~PadLayer() override;
1753 };
1754 
1755 /**
1756  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1757  * @brief This class represents a standard Gather layer
1758  *
1759  * Gather slices from Dictionary according to Indexes
1760  */
1761 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GatherLayer): public CNNLayer {
1762 public:
1763  /**
1764  * @brief The axis in Dictionary to gather Indexes from
1765  */
1766  int axis = 0;
1767  /**
1768  * @brief Creates a new GatherLayer instance.
1769  */
1770  using CNNLayer::CNNLayer;
1771 
1772  ~GatherLayer() override;
1773 };
1774 
1775 /**
1776  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1777  * @brief This class represents a standard Strided Slice layer
1778  *
1779  * Strided Slice picks from input tensor according parameters
1780  */
1781 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(StridedSliceLayer): public CNNLayer {
1782 public:
1783  /**
1784  * @brief The begin_mask is a bitmask where bit i being 0 means
1785  * to ignore the begin value and instead use the default value
1786  */
1787  std::string begin_mask;
1788  /**
1789  * @brief Analogous to begin_mask
1790  */
1791  std::string end_mask;
1792  /**
1793  * @brief The ellipsis_mask is a bitmask where bit i being 1 means
1794  * the i-th is actually an ellipsis
1795  */
1796  std::string ellipsis_mask;
1797  /**
1798  * @brief The new_axis_mask_ is a bitmask where bit i being 1 means
1799  * the i-th position creates a new 1 dimension shape
1800  */
1801  std::string new_axis_mask;
1802  /**
1803  * @brief The shrink_axis_mask is a bitmask where bit i being 1 means
1804  * the i-th position shrinks the dimensionality
1805  */
1806  std::string shrink_axis_mask;
1807 
1808  /**
1809  * @brief Creates a new StridedSliceLayer instance.
1810  */
1811  using CNNLayer::CNNLayer;
1812 
1813  ~StridedSliceLayer() override;
1814 };
1815 
1816 /**
1817  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1818  * @brief This class represents a standard Shuffle Channels layer
1819  * Shuffle Channels picks from input tensor according parameters
1820  */
1821 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ShuffleChannelsLayer): public CNNLayer {
1822 public:
1823  /**
1824  * @brief The axis in tensor to shuffle channels
1825  */
1826  int axis = 1;
1827 
1828  /**
1829  * @brief The group of output shuffled channels
1830  */
1831  unsigned int group = 1;
1832 
1833  /**
1834  * @brief Creates a new ShuffleChannelsLayer instance.
1835  */
1836  using CNNLayer::CNNLayer;
1837 
1838  ~ShuffleChannelsLayer() override;
1839 };
1840 
1841 /**
1842  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1843  * @brief This class represents a standard Depth To Space layer
1844  * Depth To Space picks from input tensor according parameters
1845  */
1846 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DepthToSpaceLayer): public CNNLayer {
1847 public:
1848  /**
1849  * @brief The group of output shuffled channels
1850  */
1851  unsigned int block_size = 1;
1852 
1853  /**
1854  * @brief Creates a new DepthToSpaceLayer instance.
1855  */
1856  using CNNLayer::CNNLayer;
1857 
1858  ~DepthToSpaceLayer() override;
1859 };
1860 
1861 /**
1862  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1863  * @brief This class represents a standard Space To Depth layer
1864  * Space To Depth picks from input tensor according parameters
1865  */
1866 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SpaceToDepthLayer): public CNNLayer {
1867 public:
1868  /**
1869  * @brief The group of output Space To Depth
1870  */
1871  unsigned int block_size = 1;
1872 
1873  /**
1874  * @brief Creates a new SpaceToDepthLayer instance.
1875  */
1876  using CNNLayer::CNNLayer;
1877 
1878  ~SpaceToDepthLayer() override;
1879 };
1880 
1881 /**
1882  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1883  * @brief This class represents a standard Space To Batch layer
1884  *
1885  * Space To Batch picks from input tensor according parameters
1886  */
1887 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SpaceToBatchLayer): public CNNLayer {
1888 public:
1889  /**
1890  * @brief Spatial dimensions blocks sizes
1891  */
1892  std::vector<size_t> _block_shape;
1893 
1894  /**
1895  * @brief Size of padding in the beginning of each axis
1896  */
1897  std::vector<size_t> _pads_begin;
1898  /**
1899  * @brief Size of padding in the end of each axis
1900  */
1901  std::vector<size_t> _pads_end;
1902 
1903  /**
1904  * @brief Creates a new SpaceToBatchLayer instance.
1905  */
1906  using CNNLayer::CNNLayer;
1907 
1908  ~SpaceToBatchLayer() override;
1909 };
1910 
1911 /**
1912  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1913  * @brief This class represents a standard Batch To Space layer
1914  *
1915  * Batch To Space picks from input tensor according parameters
1916  */
1917 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BatchToSpaceLayer): public CNNLayer {
1918 public:
1919  /**
1920  * @brief Spatial dimensions blocks sizes
1921  */
1922  std::vector<size_t> _block_shape;
1923 
1924  /**
1925  * @brief It specifies how many elements to crop from the intermediate result
1926  * across the spatial dimensions
1927  */
1928  std::vector<size_t> _crops_begin;
1929 
1930  /**
1931  * @brief It specifies how many elements to crop from the intermediate result
1932  * across the spatial dimensions
1933  */
1934  std::vector<size_t> _crops_end;
1935 
1936  /**
1937  * @brief Creates a new BatchToSpaceLayer instance.
1938  */
1939  using CNNLayer::CNNLayer;
1940 
1941  ~BatchToSpaceLayer() override;
1942 };
1943 
1944 /**
1945  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1946  * @brief This class represents SparseFillEmptyRows layer
1947  *
1948  * SparseFillEmptyRows fills empty rows in a sparse tensor
1949  */
1950 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SparseFillEmptyRowsLayer): public CNNLayer {
1951 public:
1952  /**
1953  * @brief Creates a new SparseFillEmptyRowsLayer instance.
1954  */
1955  using CNNLayer::CNNLayer;
1956 
1957  ~SparseFillEmptyRowsLayer() override;
1958 };
1959 
1960 /**
1961  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1962  * @brief This class represents SparseSegmentMean(SqrtN, Sum) layers
1963  * SparseSegmentMean(SqrtN, Sum) layer reduces data along sparse segments of a tensor.
1964  */
1965 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SparseSegmentReduceLayer): public CNNLayer {
1966 public:
1967  /**
1968  * @brief Creates a new SparseSegmentReduceLayer instance.
1969  */
1970  using CNNLayer::CNNLayer;
1971 
1972  ~SparseSegmentReduceLayer() override;
1973 };
1974 
1975 /**
1976  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1977  * @brief This class represents ExperimentalSparseWeightedReduce layer
1978  * ExperimentalSparseWeightedReduce layer reduces data along sparse segments of a tensor.
1979  */
1980 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ExperimentalSparseWeightedReduceLayer) : public CNNLayer {
1981 public:
1982  /**
1983  * @brief Creates a new ExperimentalSparseWeightedReduceLayer instance.
1984  */
1985  using CNNLayer::CNNLayer;
1986 
1987  ~ExperimentalSparseWeightedReduceLayer() override;
1988 };
1989 
1990 /**
1991  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
1992  * @brief This class represents SparseToDense layer
1993  * SparseToDense layer converts a sparse tensor to a dense tensor.
1994  */
1995 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SparseToDenseLayer) : public CNNLayer {
1996 public:
1997  /**
1998  * @brief Creates a new SparseToDenseLayer instance.
1999  */
2000  using CNNLayer::CNNLayer;
2001 
2002  ~SparseToDenseLayer() override;
2003 };
2004 
2005 /**
2006  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2007  * @brief This class represents Bucketize layer
2008  * Bucketize layer bucketizes the input based on the boundaries.
2009  */
2010 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BucketizeLayer) : public CNNLayer {
2011 public:
2012  /**
2013  * @brief Indicates whether the intervals include the right or the left bucket edge.
2014  */
2015  bool with_right_bound = false;
2016 
2017  /**
2018  * @brief Creates a new BucketizeLayer instance.
2019  */
2020  using CNNLayer::CNNLayer;
2021 
2022  ~BucketizeLayer() override;
2023 };
2024 
2025 /**
2026  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2027  * @brief This class represents a standard Reverse Sequence layer
2028  *
2029  * Reverse Sequence modifies input tensor according parameters
2030  */
2031 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReverseSequenceLayer): public CNNLayer {
2032 public:
2033  /**
2034  * @brief The seq_axis dimension in tensor which is partially reversed
2035  */
2036  int seq_axis = 1;
2037 
2038  /**
2039  * @brief The batch_axis dimension in tensor along which reversal is performed
2040  */
2041  int batch_axis = 0;
2042 
2043  /**
2044  * @brief Creates a new ReverseSequence instance.
2045  */
2046  using CNNLayer::CNNLayer;
2047 
2048  ~ReverseSequenceLayer() override;
2049 };
2050 
2051 /**
2052  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2053  * @brief This class represents a OneHot layer
2054  * Converts input into OneHot representation.
2055  */
2056 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(OneHotLayer): public CNNLayer {
2057 public:
2058  /**
2059  * @brief A depth of representation
2060  */
2061  unsigned int depth = 0;
2062 
2063  /**
2064  * @brief The locations represented by indices in input take value on_value
2065  */
2066  float on_value = 1.f;
2067 
2068  /**
2069  * @brief The locations not represented by indices in input take value off_value
2070  */
2071  float off_value = 0.f;
2072 
2073  /**
2074  * @brief Define the shape of output tensor
2075  */
2076  int axis = -1;
2077 
2078  /**
2079  * @brief Creates a new OneHot instance
2080  */
2081  using CNNLayer::CNNLayer;
2082 
2083  ~OneHotLayer() override;
2084 };
2085 
2086 /**
2087  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2088  * @brief This class represents a standard RangeLayer layer
2089  *
2090  * RangeLayer modifies input tensor dimensions according parameters
2091  */
2092 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RangeLayer): public CNNLayer {
2093 public:
2094  /**
2095  * @brief Creates a new RangeLayer instance.
2096  */
2097  using CNNLayer::CNNLayer;
2098 
2099  ~RangeLayer() override;
2100 };
2101 
2102 /**
2103  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2104  * @brief This class represents a standard Fill layer
2105  *
2106  * RFill modifies input tensor according parameters
2107  */
2108 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(FillLayer): public CNNLayer {
2109 public:
2110  /**
2111  * @brief Creates a new Fill instance.
2112  */
2113  using CNNLayer::CNNLayer;
2114 
2115  ~FillLayer() override;
2116 };
2117 
2118 /**
2119  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2120  * @brief This class represents a SelectLayer layer
2121  *
2122  * SelectLayer layer takes elements from the second (“then”) or the third (“else”) input based on condition mask
2123  * (“cond”) provided in the first input. The “cond” tensor is broadcasted to “then” and “else” tensors. The output
2124  * tensor shape is equal to broadcasted shape of “cond”, “then” and “else”.
2125  */
2126 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SelectLayer): public CNNLayer {
2127 public:
2128  /**
2129  * @brief Creates a new SelectLayer instance.
2130  */
2131  using CNNLayer::CNNLayer;
2132 
2133  ~SelectLayer() override;
2134 };
2135 
2136 /**
2137  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2138  * @brief This class represents a standard Broadcast layer
2139  *
2140  * Broadcast modifies input tensor dimensions according parameters
2141  */
2142 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BroadcastLayer): public CNNLayer {
2143 public:
2144  /**
2145  * @brief Creates a new Broadcast instance.
2146  */
2147  using CNNLayer::CNNLayer;
2148 
2149  ~BroadcastLayer() override;
2150 };
2151 
2152 /**
2153  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2154  * @brief This class represents a quantization operation layer
2155  *
2156  * Element-wise linear quantization of floating point input values into a descrete set of floating point values
2157  */
2158 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(QuantizeLayer): public CNNLayer {
2159 public:
2160  /**
2161  * @brief The number of quantization levels
2162  */
2163  int levels = 1;
2164 
2165  /**
2166  * @brief Creates a new QuantizeLayer instance.
2167  */
2168  using CNNLayer::CNNLayer;
2169 
2170  ~QuantizeLayer() override;
2171 };
2172 
2173 /**
2174  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2175  * @brief This class represents a standard Math layers
2176  *
2177  * Math modifies input tensor dimensions according parameters
2178  */
2179 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(MathLayer): public CNNLayer {
2180 public:
2181  /**
2182  * @brief Creates a new Math instance.
2183  */
2184  using CNNLayer::CNNLayer;
2185 
2186  ~MathLayer() override;
2187 };
2188 
2189 /**
2190  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2191  * @brief This class represents a standard Reduce layers
2192  *
2193  * Reduce modifies input tensor according parameters
2194  */
2195 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReduceLayer): public CNNLayer {
2196 public:
2197  /**
2198  * @brief The keep_dims dimension in tensor which is partially reversed
2199  */
2200  bool keep_dims = true;
2201 
2202  /**
2203  * @brief Creates a new Reduce instance.
2204  */
2205  using CNNLayer::CNNLayer;
2206 
2207  ~ReduceLayer() override;
2208 };
2209 
2210 /**
2211  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2212  * @brief This class represents a standard TopK layer
2213  *
2214  * TopK picks top K values from input tensor according parameters
2215  */
2216 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TopKLayer): public CNNLayer {
2217 public:
2218  /**
2219  * @brief The mode could be 'max' or 'min'
2220  */
2221  std::string mode;
2222  /**
2223  * @brief top K values sort mode could be 'value' or 'index'
2224  */
2225  std::string sort;
2226  /**
2227  * @brief The axis dimension in tensor which is top K values are picked
2228  */
2229  int axis = -1;
2230 
2231  /**
2232  * @brief Creates a new TopKLayer instance.
2233  */
2234  using CNNLayer::CNNLayer;
2235 
2236  ~TopKLayer() override;
2237 };
2238 
2239 /**
2240  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2241  * @brief This class represents Unique layer.
2242  *
2243  * The Unique operation searches for unique elements in 1-D input
2244  */
2245 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(UniqueLayer): public CNNLayer {
2246 public:
2247  /**
2248  * @brief A flag indicating whether to sort unique elements
2249  */
2250  bool sorted;
2251  /**
2252  * @brief A flag indicating whether to return indices of input data elements in the output of uniques
2253  */
2255  /**
2256  * @brief A flag indicating whether to return a number of occurences for each unique element
2257  */
2259 
2260  /**
2261  * @brief Creates a new UniqueLayer instance.
2262  */
2263  using CNNLayer::CNNLayer;
2264 
2265  ~UniqueLayer() override;
2266 };
2267 
2268 /**
2269  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2270  * @brief This class represents a standard NonMaxSuppression layer
2271  */
2272 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(NonMaxSuppressionLayer): public CNNLayer {
2273 public:
2274  /**
2275  * @brief The 'center_point_box' indicates the format of the box data
2276  */
2277  bool center_point_box = false;
2278  /**
2279  * @brief The 'sort_result_descending' indicates that result will sort descending by score through all batches and
2280  * classes
2281  */
2283  /**
2284  * @brief Creates a new NonMaxSuppressionLayer instance.
2285  */
2286  using CNNLayer::CNNLayer;
2287 
2288  ~NonMaxSuppressionLayer() override;
2289 };
2290 
2291 /**
2292  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
2293  * @brief This class represents a standard Scatter layer
2294  */
2295 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScatterLayer): public CNNLayer {
2296 public:
2297  /**
2298  * @brief The axis in Dictionary to scatter Indexes from
2299  */
2300  int axis = 0;
2301  /**
2302  * @brief Creates a new ScatterLayer instance.
2303  */
2304  using CNNLayer::CNNLayer;
2305 
2306  ~ScatterLayer() override;
2307 };
2308 
2309 /**
2310  * @brief This class represents an onnx ExperimentalDetectronPriorGridGenerator Layer
2311  */
2312 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ExperimentalDetectronPriorGridGeneratorLayer): public CNNLayer {
2313 public:
2314  /**
2315  * @brief flatten value
2316  */
2317  int flatten = 1;
2318  /**
2319  * @brief Value of grid width
2320  */
2321  int grid_w = 0;
2322  /**
2323  * @brief Value of grid height
2324  */
2325  int grid_h = 0;
2326  /**
2327  * @brief Value of width step between grid cells
2328  */
2329  float stride_w = 0.f;
2330  /**
2331  * @brief Value of height step between grid cells
2332  */
2333  float stride_h = 0.f;
2334 
2335  /**
2336  * @brief Creates a new ExperimentalDetectronPriorGridGenerator instance.
2337  */
2338  using CNNLayer::CNNLayer;
2339 
2340  virtual ~ExperimentalDetectronPriorGridGeneratorLayer();
2341 };
2342 
2343 /**
2344  * @brief This class represents an onnx ExperimentalDetectronGenerateProposalsSingleImage Layer
2345  */
2346 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ExperimentalDetectronGenerateProposalsSingleImageLayer): public CNNLayer {
2347 public:
2348  /**
2349  * @brief Minimium width and height for boxes
2350  */
2351  float min_size = 0.f;
2352  /**
2353  * @brief Non max suppression threshold
2354  */
2355  float nms_threshold = 0.7f;
2356  /**
2357  * @brief Maximum number of anchors selected before nms
2358  */
2359  int pre_nms_topn = 1000;
2360  /**
2361  * @brief Maximum number of anchors selected after nms
2362  */
2363  int post_nms_topn = 1000;
2364 
2365  /**
2366  * @brief Creates a new ExperimentalDetectronGenerateProposalsSingleImage instance.
2367  */
2368  using CNNLayer::CNNLayer;
2369 
2370  virtual ~ExperimentalDetectronGenerateProposalsSingleImageLayer();
2371 };
2372 
2373 IE_SUPPRESS_DEPRECATED_END_WIN
2374 
2375 } // namespace InferenceEngine
std::vector< size_t > _crops_begin
It specifies how many elements to crop from the intermediate result across the spatial dimensions...
Definition: ie_layers.h:1928
std::vector< float > activation_alpha
Alpha parameters of activations.
Definition: ie_layers.h:1421
#define THROW_IE_EXCEPTION
A macro used to throw the exception with a notable description.
Definition: ie_exception.hpp:25
#define DEFINE_PROP(prop_name)
convinenent way to declare property with backward compatibility to 2D members
Definition: ie_layers.h:662
bool sort_result_descending
The &#39;sort_result_descending&#39; indicates that result will sort descending by score through all batches ...
Definition: ie_layers.h:2282
PropertyVector< unsigned int > pads_begin
Size of padding in the beginning of each axis.
Definition: ie_layers.h:1734
Direction direction
Direction of iteration through sequence dimension.
Definition: ie_layers.h:1609
bool transpose_b
A flag that indicates if the src2 matrix is to be transposed.
Definition: ie_layers.h:1708
float _alpha
Alpha coefficient.
Definition: ie_layers.h:1042
Direction
Direction of iteration through sequence dimension.
Definition: ie_layers.h:1602
std::vector< DataWeakPtr > insData
A vector of weak pointers to the input data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:125
eBinaryConvolutionMode
Defines possible modes of binary convolution operation.
Definition: ie_layers.h:868
int levels
The number of quantization levels.
Definition: ie_layers.h:2163
bool with_right_bound
Indicates whether the intervals include the right or the left bucket edge.
Definition: ie_layers.h:2015
float epsilon
A small value to add to the variance estimate to avoid division by zero.
Definition: ie_layers.h:1675
The method holds the user values to enable binding of data per graph node.
Definition: ie_common.h:69
int axis
Definition: ie_layers.h:1348
std::vector< int > offset
A vector of offsets for each dimension.
Definition: ie_layers.h:1256
Inference Engine API.
Definition: ie_argmax_layer.hpp:15
Blob::Ptr _biases
A pointer to a biases blob.
Definition: ie_layers.h:649
std::vector< float > activation_beta
Beta parameters of activations.
Definition: ie_layers.h:1428
std::string name
Layer name.
Definition: ie_layers.h:42
std::vector< int > shape
A vector of sizes of the shape.
Definition: ie_layers.h:1275
unsigned int _size
Response size.
Definition: ie_layers.h:1034
std::string sort
top K values sort mode could be &#39;value&#39; or &#39;index&#39;
Definition: ie_layers.h:2225
Blob::Ptr _weights
A pointer to a weights blob.
Definition: ie_layers.h:645
A header file for Blob and generic TBlob<>
unsigned int depth
A depth of representation.
Definition: ie_layers.h:2061
PoolType _type
A pooling type.
Definition: ie_layers.h:808
std::vector< int > dim
A vector of dimensions to be preserved.
Definition: ie_layers.h:1252
int axis
Axis number for a softmax operation.
Definition: ie_layers.h:1069
std::vector< size_t > _pads_begin
Size of padding in the beginning of each axis.
Definition: ie_layers.h:1897
eBinaryConvolutionMode _mode
Mode of binary convolution operation.
Definition: ie_layers.h:873
std::string affinity
Layer affinity set by user.
Definition: ie_layers.h:140
std::vector< size_t > _block_shape
Spatial dimensions blocks sizes.
Definition: ie_layers.h:1892
Definition: ie_layers.h:1342
float on_value
The locations represented by indices in input take value on_value.
Definition: ie_layers.h:2066
Describes a tensor iterator body.
Definition: ie_layers.h:1358
float scale
A scale factor.
Definition: ie_layers.h:1652
std::vector< DataPtr > outputs
Outputs data.
Definition: ie_layers.h:1360
bool transpose_a
A flag that indicates if the src1 matrix is to be transposed.
Definition: ie_layers.h:1704
PropertyVector< unsigned int > pads_end
Size of padding in the end of each axis.
Definition: ie_layers.h:1738
float bias
Bias for squares sum.
Definition: ie_layers.h:1093
int normalize
Indicate that the result needs to be normalized.
Definition: ie_layers.h:1118
bool _isAcrossMaps
Flag to specify normalization across feature maps (true) or across channels.
Definition: ie_layers.h:1050
float negative_slope
Negative slope is used to takle negative inputs instead of setting them to 0.
Definition: ie_layers.h:1132
bool return_counts
A flag indicating whether to return a number of occurences for each unique element.
Definition: ie_layers.h:2258
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:680
std::string mode
The mode could be &#39;max&#39; or &#39;min&#39;.
Definition: ie_layers.h:2221
std::string new_axis_mask
The new_axis_mask_ is a bitmask where bit i being 1 means the i-th position creates a new 1 dimension...
Definition: ie_layers.h:1801
float stride_h
Value of height step between grid cells.
Definition: ie_layers.h:2333
unsigned int _deformable_group
Number of deformable groups.
Definition: ie_layers.h:771
Definition: ie_layers_property.hpp:21
Definition: ie_layers.h:1390
Definition: ie_layers.h:1387
Definition: ie_layers.h:1605
int num_axes
A number of first axises to be taken for a reshape.
Definition: ie_layers.h:1283
Body body
A Tensor Iterator body.
Definition: ie_layers.h:1367
int tiles
A number of copies to be made.
Definition: ie_layers.h:1306
int grid_w
Value of grid width.
Definition: ie_layers.h:2321
std::shared_ptr< Blob > Ptr
A smart pointer containing Blob object.
Definition: ie_blob.h:42
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:53
std::vector< size_t > _crops_end
It specifies how many elements to crop from the intermediate result across the spatial dimensions...
Definition: ie_layers.h:1934
int post_nms_topn
Maximum number of anchors selected after nms.
Definition: ie_layers.h:2363
eOperation _operation
A type of the operation to use.
Definition: ie_layers.h:1224
float pad_value
A pad value which is used for filling in Constant mode.
Definition: ie_layers.h:1746
int start
Definition: ie_layers.h:1350
a header file for describing property style structure used by CNNLayers
int grid_h
Value of grid height.
Definition: ie_layers.h:2325
int batch_axis
The batch_axis dimension in tensor along which reversal is performed.
Definition: ie_layers.h:2041
Definition: ie_cnn_network.h:27
CellType
Direct type of recurrent cell (including subtypes) Description of particular cell semantics is in LST...
Definition: ie_layers.h:1386
std::string shrink_axis_mask
The shrink_axis_mask is a bitmask where bit i being 1 means the i-th position shrinks the dimensional...
Definition: ie_layers.h:1806
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:692
unsigned int block_size
The group of output shuffled channels.
Definition: ie_layers.h:1851
Definition: ie_layers.h:1389
float min_value
A minimum value.
Definition: ie_layers.h:1153
This header file defines the main Data representation node.
std::string ellipsis_mask
The ellipsis_mask is a bitmask where bit i being 1 means the i-th is actually an ellipsis.
Definition: ie_layers.h:1796
unsigned int _group
Number of groups.
Definition: ie_layers.h:700
bool center_point_box
The &#39;center_point_box&#39; indicates the format of the box data.
Definition: ie_layers.h:2277
bool sorted
A flag indicating whether to sort unique elements.
Definition: ie_layers.h:2250
std::map< std::string, Blob::Ptr > blobs
Map of pairs: (name, weights/biases blob)
Definition: ie_layers.h:612
Definition: ie_layers.h:1604
std::vector< DataPtr > outData
A vector of pointers to the output data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:120
std::string begin_mask
The begin_mask is a bitmask where bit i being 0 means to ignore the begin value and instead use the d...
Definition: ie_layers.h:1787
float min_size
Minimium width and height for boxes.
Definition: ie_layers.h:2351
int seq_axis
The seq_axis dimension in tensor which is partially reversed.
Definition: ie_layers.h:2036
std::map< std::string, std::string > params
Map of pairs: (parameter name, parameter value)
Definition: ie_layers.h:607
float off_value
The locations not represented by indices in input take value off_value.
Definition: ie_layers.h:2071
float _beta
Beta coefficient.
Definition: ie_layers.h:1046
bool return_inverse
A flag indicating whether to return indices of input data elements in the output of uniques...
Definition: ie_layers.h:2254
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:688
std::vector< float > coeff
A vector of coefficients to scale the operands.
Definition: ie_layers.h:1229
int from
Definition: ie_layers.h:1344
float power
An exponent value.
Definition: ie_layers.h:1648
float alpha
A scale factor of src1 matrix.
Definition: ie_layers.h:1696
int end
Definition: ie_layers.h:1351
int part_size
Definition: ie_layers.h:1352
bool _channel_shared
A flag that indicates if the same negative_slope value is used for all the features. If false, the value is used pixel wise.
Definition: ie_layers.h:1624
unsigned int _broadcast
A flag that indicates if the same value is used for all the features. If false, the value is used pix...
Definition: ie_layers.h:1326
float clip
Clip data into range [-clip, clip] on input of activations.
Definition: ie_layers.h:1408
std::vector< PortMap > input_port_map
Input ports map.
Definition: ie_layers.h:1363
int flatten
flatten value
Definition: ie_layers.h:2317
std::vector< PortMap > back_edges
Back edges map.
Definition: ie_layers.h:1365
Definition: ie_layers.h:1603
float nms_threshold
Non max suppression threshold.
Definition: ie_layers.h:2355
bool keep_dims
The keep_dims dimension in tensor which is partially reversed.
Definition: ie_layers.h:2200
int to
Definition: ie_layers.h:1345
Definition: ie_layers.h:1388
int pre_nms_topn
Maximum number of anchors selected before nms.
Definition: ie_layers.h:2359
float beta
A scale factor of src3 matrix.
Definition: ie_layers.h:1700
float _pad_value
A pad value which is used to fill pad area.
Definition: ie_layers.h:883
std::vector< DataPtr > inputs
Inputs data.
Definition: ie_layers.h:1359
std::vector< std::string > activations
Activations used inside recurrent cell.
Definition: ie_layers.h:1414
int stride
Definition: ie_layers.h:1349
std::string type
Layer type.
Definition: ie_layers.h:47
unsigned int _k
K.
Definition: ie_layers.h:1038
Precision precision
Layer precision.
Definition: ie_layers.h:54
class CNNLayer GenericLayer
Alias for CNNLayer object.
Definition: ie_layers.h:619
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:696
float stride_w
Value of width step between grid cells.
Definition: ie_layers.h:2329
eOperation
Defines possible operations that can be used.
Definition: ie_layers.h:1198
ePadMode pad_mode
Mode of pad operation.
Definition: ie_layers.h:1742
PoolType
Defines available pooling types.
Definition: ie_layers.h:803
unsigned int _out_num
A size of output.
Definition: ie_layers.h:972
unsigned int _axis
An axis on which concatenation operation is performed.
Definition: ie_layers.h:993
float max_value
A maximum value.
Definition: ie_layers.h:1158
UserValue userValue
Convenience user values to store in this object as extra data.
Definition: ie_layers.h:135
bool _exclude_pad
A flag that indicates if padding is excluded or not.
Definition: ie_layers.h:813
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:704
std::string end_mask
Analogous to begin_mask.
Definition: ie_layers.h:1791
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:676
ePadMode
Defines possible modes of pad operation.
Definition: ie_layers.h:1729
int hidden_size
Size of hidden state data.
Definition: ie_layers.h:1401
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:684
std::vector< PortMap > output_port_map
Output ports map.
Definition: ie_layers.h:1364
This is a header file with common inference engine definitions.
int across_channels
Indicate that mean value is calculated across channels.
Definition: ie_layers.h:1113
Ptr _fusedWith
If suggested to fuse - a pointer to the layer which needs to be fused with this layer.
Definition: ie_layers.h:130
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:22
unsigned int _in_depth
A number of input feature maps (size) generating the 3&#39;rd input dimension.
Definition: ie_layers.h:878
CellType cellType
Direct type of recurrent cell (including subtypes) Description of particular cell semantics is in LST...
Definition: ie_layers.h:1394
unsigned int group
The group of output shuffled channels.
Definition: ie_layers.h:1831