ie_layers.h
Go to the documentation of this file.
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief a header file for internal Layers structure to describe layers information
7  *
8  * @file ie_layers.h
9  */
10 #pragma once
11 
12 #include <algorithm>
13 #include <cctype>
14 #include <iterator>
15 #include <limits>
16 #include <map>
17 #include <memory>
18 #include <string>
19 #include <vector>
20 
21 #include "ie_blob.h"
22 #include "ie_common.h"
23 #include "ie_data.h"
24 #include "ie_layers_property.hpp"
25 
26 namespace ngraph {
27 
28 class Node;
29 
30 } // namespace ngraph
31 
32 namespace InferenceEngine {
33 
34 /**
35  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
36  * @brief This is an internal common Layer parameter parsing arguments
37  */
38 struct INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LayerParams) {
39  /**
40  * @brief Layer name
41  */
42  std::string name;
43 
44  /**
45  * @brief Layer type
46  */
47  std::string type;
48 
49  /**
50  * @brief Layer precision
51  */
53 
54  /**
55  * @brief A default constructor.
56  */
57  LayerParams();
58 
59  IE_SUPPRESS_DEPRECATED_START
60 
61  /**
62  * @brief A copy constructor.
63  * @param other An object to copy.
64  */
65  LayerParams(const LayerParams & other);
66 
67  /**
68  * @brief A copy assignment operator
69  * @param other An object to copy
70  * @return A value
71  */
72  LayerParams & operator= (const LayerParams & other);
73 
74  IE_SUPPRESS_DEPRECATED_END
75 
76  /**
77  * @brief A constructor with parameters.
78  * @param name A layer name.
79  * @param type A layer type.
80  * @param precision A layer precision.
81  */
82  LayerParams(const std::string & name, const std::string & type, Precision precision);
83 };
84 
85 /**
86  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
87  * @brief This is a base abstraction Layer - all DNN Layers inherit from this class
88  */
89 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(CNNLayer) {
90 protected:
91  std::shared_ptr<ngraph::Node> node;
92 public:
93  /**
94  * @brief A shared pointer to CNNLayer
95  */
96  IE_SUPPRESS_DEPRECATED_START
97  using Ptr = std::shared_ptr<CNNLayer>;
98  IE_SUPPRESS_DEPRECATED_END
99 
100  /**
101  * @brief Layer name
102  */
103  std::string name;
104 
105  /**
106  * @brief Layer type
107  */
108  std::string type;
109 
110  /**
111  * @brief Layer base operating precision
112  */
114 
115  /**
116  * @brief A vector of pointers to the output data elements of this layer in the di-graph (order matters)
117  */
118  std::vector<DataPtr> outData;
119 
120  /**
121  * @brief A vector of weak pointers to the input data elements of this layer in the di-graph (order matters)
122  */
123  std::vector<DataWeakPtr> insData;
124 
125  /**
126  * @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
127  */
128  IE_SUPPRESS_DEPRECATED_START_WIN
130  IE_SUPPRESS_DEPRECATED_END_WIN
131 
132  /**
133  * @brief Convenience user values to store in this object as extra data
134  */
136 
137  /**
138  * @brief Layer affinity set by user.
139  */
140  std::string affinity;
141 
142  IE_SUPPRESS_DEPRECATED_START
143 
144  /**
145  * @brief A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values.
146  *
147  * @param prms Basic common parsing parameters
148  */
149  explicit CNNLayer(const LayerParams& prms);
150 
151  /**
152  * @brief Returns the original nGraph op
153  * @return A smart pointer to nGraph op
154  */
155  std::shared_ptr<ngraph::Node> getNode() {
156  return node;
157  }
158 
159  /**
160  * @brief A copy constructor
161  * @param other An object to copy
162  */
163  CNNLayer(const CNNLayer& other);
164 
165  IE_SUPPRESS_DEPRECATED_END
166 
167  /**
168  * @brief A virtual destructor
169  */
170  virtual ~CNNLayer();
171 
172  /**
173  * @brief Sets a layer to be fused with
174  *
175  * @param layer Reference to the layer to be fused with
176  */
177  IE_SUPPRESS_DEPRECATED_START_WIN
178  void fuse(Ptr& layer) {
179  _fusedWith = layer;
180  }
181  IE_SUPPRESS_DEPRECATED_END_WIN
182 
183  /**
184  * @brief Returns the first element of the input data for this layer
185  *
186  * @return A smart pointer to the input data element
187  */
188  virtual const DataPtr input() const;
189 
190  /**
191  * @brief Checks if the input data and layer data are legitimate
192  */
193  void validateLayer();
194 
195  /**
196  * @brief Parse string with float in accordance with IE rules
197  *
198  * @param str input string with float value
199  * @return float value if parsing was successful
200  * @throws InferenceEngineException in case of parsing error
201  */
202  static float ie_parse_float(const std::string& str);
203 
204  /**
205  * @brief serialize float with c_locale formating
206  * used for default values serializing
207  */
208  static std::string ie_serialize_float(float value);
209 
210  /**
211  * @brief Gets float value for the given parameter
212  *
213  * @param param name of the parameter to find
214  * @param def default value of the parameter if not found
215  * @return float value
216  */
217  float GetParamAsFloat(const char* param, float def) const;
218 
219  /**
220  * @brief Returns a float value for the given layer parameter
221  *
222  * @param param Name of the layer parameter
223  * @return A float value for the specified parameter
224  */
225  float GetParamAsFloat(const char* param) const;
226 
227  /**
228  * @brief Returns a vector of float values for the given parameter or returns the default value
229  *
230  * @param param Name of the layer parameter
231  * @param def Default value of the parameter if not found
232  * @return vector of float values
233  */
234  std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const;
235 
236  /**
237  * @brief Returns a vector of float values for the given parameter
238  *
239  * @param param Name of the layer parameter
240  * @return vector of float values
241  */
242  std::vector<float> GetParamAsFloats(const char* param) const;
243 
244  /**
245  * @brief Returns an integer value for the given parameter or returns the default value
246  *
247  * @param param Name of the layer parameter
248  * @param def Default value of the parameter if not found
249  * @return An int value for the specified parameter
250  */
251  int GetParamAsInt(const char* param, int def) const;
252 
253  /**
254  * @brief Returns an integer value for the given parameter
255  *
256  * @param param Name of the layer parameter
257  * @return An int value for the specified parameter
258  */
259  int GetParamAsInt(const char* param) const;
260 
261  /**
262  * @brief Returns a vector of int values for the given parameter or returns the default value
263  *
264  * @param param Name of the layer parameter
265  * @param def Default value of the parameter if not found
266  * @return vector of int values
267  */
268  std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const;
269 
270  /**
271  * @brief Returns a vector of int values for the given parameter
272  *
273  * @param param Name of the layer parameter
274  * @return vector of int values
275  */
276  std::vector<int> GetParamAsInts(const char* param) const;
277 
278  /**
279  * @brief Returns an unsigned integer value for the given parameter or returns the default value
280  *
281  * @param param Name of the layer parameter
282  * @param def Default value of the parameter if not found
283  * @return An unsigned integer value for the specified parameter
284  */
285  unsigned int GetParamAsUInt(const char* param, unsigned int def) const;
286 
287  /**
288  * @brief Returns an unsigned integer value for the given parameter
289  *
290  * @param param Name of the layer parameter
291  * @return An unsigned integer value for the specified parameter
292  */
293  unsigned int GetParamAsUInt(const char* param) const;
294 
295  /**
296  * @brief Returns a vector of unsigned int values for the given parameter or returns the default value
297  *
298  * @param param Name of the layer parameter
299  * @param def Default value of the parameter if not found
300  * @return vector of unsigned int values
301  */
302  std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const;
303 
304  /**
305  * @brief Returns a vector of unsigned int values for the given parameter
306  *
307  * @param param Name of the layer parameter
308  * @return vector of unsigned int values
309  */
310  std::vector<unsigned int> GetParamAsUInts(const char* param) const;
311 
312  /**
313  * @brief Returns a boolean value for the given parameter.
314  *
315  * The valid values are (true, false, 1, 0).
316  * @param param Name of the layer parameter
317  * @param def Default value of the parameter if not found
318  * @return A bool value for the specified parameter
319  */
320  bool GetParamAsBool(const char* param, bool def) const;
321 
322  /**
323  * @brief Returns a boolean value for the given parameter
324  *
325  * @param param Name of the layer parameter
326  * @return A bool value for the specified parameter
327  */
328  bool GetParamAsBool(const char* param) const;
329 
330  /**
331  * @brief Returns a string value for the given parameter or returns the default one
332  *
333  * @param param Name of the layer parameter
334  * @param def Default value of the parameter if not found
335  * @return A string value
336  */
337  std::string GetParamAsString(const char* param, const char* def) const;
338 
339  /**
340  * @brief Checks the param presence in the layer
341  *
342  * @param param Name of the layer parameter
343  * @return a bool depending param presence
344  */
345  bool CheckParamPresence(const char* param) const;
346 
347  /**
348  * @brief Returns a string value for the given parameter.
349  *
350  * Throws exception if parameter was not found.
351  * @param param Name of the layer parameter
352  * @return A string value
353  */
354  std::string GetParamAsString(const char* param) const;
355 
356  /**
357  * @brief Gets the parameter as a std::vector<std::string>
358  * @param param The parameter name
359  * @param def The default values if case of parameter is not found
360  * @return The parameter as strings.
361  */
362  std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const;
363 
364  /**
365  * @brief Map of pairs: (parameter name, parameter value)
366  */
367  std::map<std::string, std::string> params;
368 
369  /**
370  * @brief Map of pairs: (name, weights/biases blob)
371  */
372  std::map<std::string, Blob::Ptr> blobs;
373 };
374 
375 /**
376  * @brief Alias for CNNLayer object
377  */
378 IE_SUPPRESS_DEPRECATED_START
379 using GenericLayer = class CNNLayer;
380 IE_SUPPRESS_DEPRECATED_END
381 
382 IE_SUPPRESS_DEPRECATED_START_WIN
383 
384 /**
385  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
386  * @brief This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
387  */
388 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(WeightableLayer): public CNNLayer {
389 public:
390  IE_SUPPRESS_DEPRECATED_START
391 
392  /**
393  * @brief A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the given
394  * values
395  *
396  * @param prms Initial layer parameters
397  */
398  explicit WeightableLayer(const LayerParams & prms);
399 
400  IE_SUPPRESS_DEPRECATED_END
401 
402  /**
403  * @brief A pointer to a weights blob
404  */
406  /**
407  * @brief A pointer to a biases blob
408  */
410 
411  /**
412  * @brief Constructs a WeightableLayer instance and initiates layer parameters with the given values
413  */
414  using CNNLayer::CNNLayer;
415 
416  ~WeightableLayer() override;
417 };
418 
419 /**
420  * @brief convinenent way to declare property with backward compatibility to 2D members
421  */
422 #define DEFINE_PROP(prop_name) \
423  PropertyVector<unsigned int> prop_name; \
424  unsigned int& prop_name##_x = prop_name.at(X_AXIS); \
425  unsigned int& prop_name##_y = prop_name.at(Y_AXIS)
426 
427 /**
428  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
429  * @brief This class represents a standard 3D Convolution Layer
430  */
431 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ConvolutionLayer): public WeightableLayer {
432 public:
433  /**
434  * @brief A convolution kernel array [X, Y, Z, ...]
435  */
436  DEFINE_PROP(_kernel);
437  /**
438  * @brief A convolution paddings begin array [X, Y, Z, ...]
439  */
440  DEFINE_PROP(_padding);
441  /**
442  * @brief A convolution paddings end array [X, Y, Z, ...]
443  */
445  /**
446  * @brief A convolution strides array [X, Y, Z, ...]
447  */
448  DEFINE_PROP(_stride);
449  /**
450  * @brief A convolution dilations array [X, Y, Z, ...]
451  */
452  DEFINE_PROP(_dilation);
453  /**
454  * @brief A number of output feature maps (size) generating the 3'rd output dimension
455  */
456  unsigned int _out_depth = 0u;
457  /**
458  * @brief Number of groups
459  */
460  unsigned int _group = 1u;
461  /**
462  * @brief Auto padding type
463  */
464  std::string _auto_pad;
465 
466  IE_SUPPRESS_DEPRECATED_START
467 
468  /**
469  * @brief Creates a new ConvolutionLayer instance.
470  */
471  explicit ConvolutionLayer(const LayerParams& p)
472  : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
473 
474  /**
475  * @brief assignment operator
476  */
477  ConvolutionLayer& operator=(const ConvolutionLayer& that) {
478  if (&that != this) {
479  WeightableLayer::operator=(that);
480  _kernel = that._kernel;
481  _padding = that._padding;
482  _pads_end = that._pads_end;
483  _stride = that._stride;
484  _dilation = that._dilation;
485  _out_depth = that._out_depth;
486  _group = that._group;
487  }
488  return *this;
489  }
490 
491  /**
492  * @brief copy constructor
493  */
494  ConvolutionLayer(const ConvolutionLayer& that): WeightableLayer(that) {
495  operator=(that);
496  }
497  /**
498  * @brief move constructor
499  */
500  ConvolutionLayer(ConvolutionLayer&&) = default;
501 
502  IE_SUPPRESS_DEPRECATED_END
503 
504  ~ConvolutionLayer() override;
505 };
506 
507 /**
508  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
509  * @brief This class represents a standard deconvolution layer
510  */
511 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DeconvolutionLayer): public ConvolutionLayer {
512 public:
513  using ConvolutionLayer::ConvolutionLayer;
514  using ConvolutionLayer::operator=;
515 
516  ~DeconvolutionLayer() override;
517 };
518 
519 /**
520  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
521  * @brief This class represents a standard deformable convolution layer
522  */
523 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
524 public:
525  using ConvolutionLayer::ConvolutionLayer;
526  using ConvolutionLayer::operator=;
527 
528  /**
529  * @brief Number of deformable groups
530  */
531  unsigned int _deformable_group = 1u;
532 
533  ~DeformableConvolutionLayer() override;
534 };
535 
536 /**
537  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
538  * @brief This class represents a standard pooling layer
539  */
540 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PoolingLayer): public CNNLayer {
541 public:
542  /**
543  * @brief Pooling kernel array [X, Y, Z, ...]
544  */
545  DEFINE_PROP(_kernel);
546  /**
547  * @brief Pooling paddings begin array [X, Y, Z, ...]
548  */
549  DEFINE_PROP(_padding);
550  /**
551  * @brief Pooling paddings end array [X, Y, Z, ...]
552  */
554  /**
555  * @brief Pooling strides array [X, Y, Z, ...]
556  */
557  DEFINE_PROP(_stride);
558 
559  /**
560  * @enum PoolType
561  * @brief Defines available pooling types
562  */
563  enum PoolType { MAX = 1, AVG = 2, STOCH = 3, ROI = 4, SPACIAL_PYRAMID = 5 };
564 
565  /**
566  * @brief A pooling type
567  */
569 
570  /**
571  * @brief A flag that indicates if padding is excluded or not
572  */
573  bool _exclude_pad = false;
574  /**
575  * @brief Auto padding type
576  */
577  std::string _auto_pad;
578 
579  IE_SUPPRESS_DEPRECATED_START
580 
581  /**
582  * @brief Creates a new PoolingLayer instance.
583  */
584  explicit PoolingLayer(const LayerParams& p): CNNLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 0u) {}
585 
586  /**
587  * @brief assignment operator
588  */
589  PoolingLayer& operator=(const PoolingLayer& that) {
590  if (&that != this) {
591  CNNLayer::operator=(that);
592  _kernel = that._kernel;
593  _padding = that._padding;
594  _pads_end = that._pads_end;
595  _stride = that._stride;
596  _type = that._type;
597  _exclude_pad = that._exclude_pad;
598  }
599  return *this;
600  }
601  /**
602  * @brief copy constructor
603  */
604  PoolingLayer(const PoolingLayer& that): CNNLayer(that) {
605  operator=(that);
606  }
607 
608  /**
609  * @brief move constructor
610  */
611  PoolingLayer(PoolingLayer&&) = default;
612 
613  IE_SUPPRESS_DEPRECATED_END
614 
615  ~PoolingLayer() override;
616 };
617 
618 /**
619  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
620  * @brief This class represents a standard binary convolution layer
621  */
622 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BinaryConvolutionLayer): public WeightableLayer {
623 public:
624  /**
625  * @enum eBinaryConvolutionMode
626  * @brief Defines possible modes of binary convolution operation
627  */
628  enum eBinaryConvolutionMode { xnor_popcount = 0 };
629 
630  /**
631  * @brief Mode of binary convolution operation
632  */
633  eBinaryConvolutionMode _mode = xnor_popcount;
634 
635  /**
636  * @brief A number of input feature maps (size) generating the 3'rd input dimension
637  */
638  unsigned int _in_depth = 0u;
639 
640  /**
641  * @brief A pad value which is used to fill pad area
642  */
643  float _pad_value = 0.0f;
644 
645  /**
646  * @brief A convolution kernel array [X, Y, Z, ...]
647  */
648  DEFINE_PROP(_kernel);
649  /**
650  * @brief A convolution paddings begin array [X, Y, Z, ...]
651  */
652  DEFINE_PROP(_padding);
653  /**
654  * @brief A convolution paddings end array [X, Y, Z, ...]
655  */
657  /**
658  * @brief A convolution strides array [X, Y, Z, ...]
659  */
660  DEFINE_PROP(_stride);
661  /**
662  * @brief A convolution dilations array [X, Y, Z, ...]
663  */
664  DEFINE_PROP(_dilation);
665  /**
666  * @brief A number of output feature maps (size) generating the 3'rd output dimension
667  */
668  unsigned int _out_depth = 0u;
669  /**
670  * @brief Number of groups
671  */
672  unsigned int _group = 1u;
673  /**
674  * @brief Auto padding type
675  */
676  std::string _auto_pad;
677 
678  IE_SUPPRESS_DEPRECATED_START
679 
680  /**
681  * @brief Creates a new BinaryConvolutionLayer instance.
682  */
683  explicit BinaryConvolutionLayer(const LayerParams& p)
684  : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
685 
686  /**
687  * @brief assignment operator
688  */
689  BinaryConvolutionLayer& operator=(const BinaryConvolutionLayer& that) {
690  if (&that != this) {
691  WeightableLayer::operator=(that);
692  _kernel = that._kernel;
693  _padding = that._padding;
694  _pads_end = that._pads_end;
695  _stride = that._stride;
696  _dilation = that._dilation;
697  _out_depth = that._out_depth;
698  _group = that._group;
699  _mode = that._mode;
700  _in_depth = that._in_depth;
701  _pad_value = that._pad_value;
702  }
703  return *this;
704  }
705  /**
706  * @brief copy constructor
707  */
708  BinaryConvolutionLayer(const BinaryConvolutionLayer& that): WeightableLayer(that) {
709  operator=(that);
710  }
711  /**
712  * @brief move constructor
713  */
714  BinaryConvolutionLayer(BinaryConvolutionLayer&&) = default;
715 
716  IE_SUPPRESS_DEPRECATED_END
717 
718  ~BinaryConvolutionLayer() override;
719 };
720 
721 #undef DEFINE_PROP
722 
723 /**
724  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
725  * @brief This class represents a fully connected layer
726  */
727 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(FullyConnectedLayer): public WeightableLayer {
728 public:
729  /**
730  * @brief A size of output
731  */
732  unsigned int _out_num = 0;
733 
734  /**
735  * @brief Creates a new FullyConnectedLayer instance and initializes layer parameters with the given values.
736  */
737  using WeightableLayer::WeightableLayer;
738 
739  ~FullyConnectedLayer() override;
740 };
741 
742 /**
743  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
744  * @brief This class represents concatenation layer
745  *
746  * Takes as input several data elements and merges them to one using the supplied axis
747  */
748 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ConcatLayer): public CNNLayer {
749 public:
750  /**
751  * @brief An axis on which concatenation operation is performed
752  */
753  unsigned int _axis = 1;
754 
755  /**
756  * @brief Creates a new ConcatLayer instance and initializes layer parameters with the given values.
757  *
758  * If batch is used, then batch needs to be specified as an input dimension also
759  * In current implementation 1 means channels, 0 - batch
760  */
761  using CNNLayer::CNNLayer;
762 
763  ~ConcatLayer() override;
764 };
765 
766 /**
767  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
768  * @brief This class represents a layer that evenly splits the input into the supplied outputs
769  */
770 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SplitLayer): public CNNLayer {
771 public:
772  /**
773  * @brief An axis on which split operation is performed
774  */
775  unsigned int _axis = 1;
776 
777  /**
778  * @brief Creates a new SplitLayer instance.
779  */
780  using CNNLayer::CNNLayer;
781 
782  ~SplitLayer() override;
783 };
784 
785 /**
786  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
787  * @brief This class represents a Linear Response Normalization (LRN) Layer
788  */
789 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(NormLayer): public CNNLayer {
790 public:
791  /**
792  * @brief Response size
793  */
794  unsigned int _size = 0;
795  /**
796  * @brief K
797  */
798  unsigned int _k = 1;
799  /**
800  * @brief Alpha coefficient
801  */
802  float _alpha = 0;
803  /**
804  * @brief Beta coefficient
805  */
806  float _beta = 0;
807  /**
808  * @brief Flag to specify normalization across feature maps (true) or across channels
809  */
810  bool _isAcrossMaps = false;
811 
812  /**
813  * @brief Creates a new NormLayer instance.
814  */
815  using CNNLayer::CNNLayer;
816 
817  ~NormLayer() override;
818 };
819 
820 /**
821  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
822  * @brief This class represents standard softmax Layer
823  */
824 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SoftMaxLayer): public CNNLayer {
825 public:
826  /**
827  * @brief Axis number for a softmax operation
828  */
829  int axis = 1;
830  /**
831  * @brief Creates a new SoftMaxLayer instance.
832  */
833  using CNNLayer::CNNLayer;
834 
835  ~SoftMaxLayer() override;
836 };
837 
838 /**
839  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
840  * @brief This class represents standard GRN Layer
841  */
842 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GRNLayer): public CNNLayer {
843 public:
844  /**
845  * @brief A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the given
846  * values.
847  */
848  using CNNLayer::CNNLayer;
849 
850  /**
851  * @brief Bias for squares sum
852  */
853  float bias = 0.f;
854 
855  ~GRNLayer() override;
856 };
857 
858 /**
859  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
860  * @brief This class represents standard MVN Layer
861  */
862 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(MVNLayer): public CNNLayer {
863 public:
864  /**
865  * @brief A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the given
866  * values.
867  */
868  using CNNLayer::CNNLayer;
869 
870  /**
871  * @brief Indicate that mean value is calculated across channels
872  */
874 
875  /**
876  * @brief Indicate that the result needs to be normalized
877  */
878  int normalize = 1;
879 
880  ~MVNLayer() override;
881 };
882 
883 /**
884  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
885  * @brief This class represents a Rectified Linear activation layer
886  */
887 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReLULayer): public CNNLayer {
888 public:
889  /**
890  * @brief Negative slope is used to takle negative inputs instead of setting them to 0
891  */
892  float negative_slope = 0.0f;
893 
894  /**
895  * @brief Creates a new ReLULayer instance.
896  */
897  using CNNLayer::CNNLayer;
898 
899  ~ReLULayer() override;
900 };
901 
902 /**
903  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
904  * @brief This class represents a Clamp activation layer
905  *
906  * Clamps all tensor elements into the range [min_value, max_value]
907  */
908 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ClampLayer): public CNNLayer {
909 public:
910  /**
911  * @brief A minimum value
912  */
913  float min_value = 0.0f;
914 
915  /**
916  * @brief A maximum value
917  */
918  float max_value = 1.0f;
919  /**
920  * @brief Creates a new ClampLayer instance.
921  */
922  using CNNLayer::CNNLayer;
923 
924  ~ClampLayer() override;
925 };
926 
927 /**
928  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
929  * @brief This class represents a ReLU6 activation layer
930  *
931  * Clamps all tensor elements into the range [0, 6.0]
932  */
933 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReLU6Layer): public ClampLayer {
934 public:
935  IE_SUPPRESS_DEPRECATED_START
936  /**
937  * @brief A constructor with common layer parameters
938  * @param prms The common layer parameters
939  */
940  explicit ReLU6Layer(const LayerParams& prms): ClampLayer(prms) {
941  max_value = 6.0f;
942  }
943  IE_SUPPRESS_DEPRECATED_END
944 
945  ~ReLU6Layer() override;
946 };
947 
948 /**
949  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
950  * @brief This class represents an element wise operation layer
951  */
952 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(EltwiseLayer): public CNNLayer {
953 public:
954  /**
955  * @enum eOperation
956  * @brief Defines possible operations that can be used
957  */
958  enum eOperation {
959  Sum = 0,
960  Prod,
961  Max,
962  Sub,
963  Min,
964  Div,
965  Squared_diff,
966  Floor_mod,
967  Pow,
968  Equal,
969  Not_equal,
970  Less,
971  Less_equal,
972  Greater,
973  Greater_equal,
974  Logical_AND,
975  Logical_OR,
976  Logical_XOR,
977  Logical_NOT,
978  Mean
979  };
980 
981  /**
982  * @brief A type of the operation to use
983  */
985 
986  /**
987  * @brief A vector of coefficients to scale the operands
988  */
989  std::vector<float> coeff;
990 
991  /**
992  * @brief Creates a new EltwiseLayer instance.
993  */
994  using CNNLayer::CNNLayer;
995 
996  ~EltwiseLayer() override;
997 };
998 
999 /**
1000  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1001  * @brief This class represents a standard crop layer
1002  */
1003 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(CropLayer): public CNNLayer {
1004 public:
1005  /**
1006  * @brief A vector of dimensions for cropping
1007  */
1008  std::vector<int> axis;
1009  /**
1010  * @brief A vector of dimensions to be preserved
1011  */
1012  std::vector<int> dim;
1013  /**
1014  * @brief A vector of offsets for each dimension
1015  */
1016  std::vector<int> offset;
1017 
1018  /**
1019  * @brief Creates a new CropLayer instance.
1020  */
1021  using CNNLayer::CNNLayer;
1022 
1023  ~CropLayer() override;
1024 };
1025 
1026 /**
1027  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1028  * @brief This class represents a standard reshape layer
1029  */
1030 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReshapeLayer): public CNNLayer {
1031 public:
1032  /**
1033  * @brief A vector of sizes of the shape
1034  */
1035  std::vector<int> shape;
1036  /**
1037  * @brief A number of axis to be taken for a reshape
1038  */
1039  int axis = 0;
1040  /**
1041  * @brief A number of first axises to be taken for a reshape
1042  */
1043  int num_axes = -1;
1044 
1045  /**
1046  * @brief Creates a new ReshapeLayer instance.
1047  */
1048  using CNNLayer::CNNLayer;
1049 
1050  ~ReshapeLayer() override;
1051 };
1052 
1053 /**
1054  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1055  * @brief This class represents a standard Tile Layer
1056  */
1057 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TileLayer): public CNNLayer {
1058 public:
1059  /**
1060  * @brief An index of the axis to tile
1061  */
1062  int axis = -1;
1063  /**
1064  * @brief A number of copies to be made
1065  */
1066  int tiles = -1;
1067 
1068  /**
1069  * @brief Creates a new TileLayer instance.
1070  */
1071  using CNNLayer::CNNLayer;
1072 
1073  ~TileLayer() override;
1074 };
1075 
1076 /**
1077  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1078  * @brief This class represents a Layer which performs Scale and Shift
1079  */
1080 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScaleShiftLayer): public WeightableLayer {
1081 public:
1082  /**
1083  * @brief A flag that indicates if the same value is used for all the features. If false, the value is used pixel
1084  * wise
1085  */
1086  unsigned int _broadcast = 0;
1087 
1088  /**
1089  * @brief Creates a new ScaleShiftLayer instance.
1090  */
1091  using WeightableLayer::WeightableLayer;
1092 
1093  ~ScaleShiftLayer() override;
1094 };
1095 
1096 /**
1097  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1098  * @brief This class represents TensorIterator layer
1099  */
1100 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TensorIterator): public CNNLayer {
1101 public:
1102  struct PortMap {
1103  // Data map rule
1104  int from; /**< Index of exteral data from ins/outs fields of CNNLayer */
1105  int to; /**< Index of internal data in iterator body */
1106 
1107  // Iteration rule
1108  int axis; /**< Axis to iterate throught */
1109  int stride; /**< Stride to iterate throught */
1110  int start; /**< Start index of iteration range */
1111  int end; /**< Last index of iteration range */
1112  int part_size; /**< Part size which will be transfered to body subnetwork */
1113  };
1114 
1115  /**
1116  * @brief Describes a tensor iterator body
1117  */
1118  struct Body {
1119  std::vector<DataPtr> inputs; //!< Inputs data
1120  std::vector<DataPtr> outputs; //!< Outputs data
1121  };
1122 
1123  std::vector<PortMap> input_port_map; //!< Input ports map
1124  std::vector<PortMap> output_port_map; //!< Output ports map
1125  std::vector<PortMap> back_edges; //!< Back edges map
1126 
1127  Body body; //!< A Tensor Iterator body
1128 
1129  using CNNLayer::CNNLayer;
1130 
1131  ~TensorIterator() override;
1132 };
1133 
1134 /**
1135  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1136  * @brief Base class for recurrent cell layers
1137  */
1138 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RNNCellBase): public WeightableLayer {
1139 public:
1140  using WeightableLayer::WeightableLayer;
1141 
1142  /**
1143  * @brief Direct type of recurrent cell (including subtypes)
1144  * Description of particular cell semantics is in LSTMCell, GRUCell, RNNCell.
1145  */
1146  enum CellType {
1147  LSTM, /**< Original LSTM cell */
1148  GRU, /**< Original GRU cell */
1149  RNN, /**< Original RNN cell */
1150  GRU_LBR, /**< GRU cell modification. "Linear before reset" */
1151  };
1152 
1153  /** @copybrief CellType */
1155 
1156  /**
1157  * @brief Size of hidden state data
1158  *
1159  * In case of batch output state tensor will have shape [N, hidden_size]
1160  */
1161  int hidden_size = 0;
1162 
1163  /**
1164  * @brief Clip data into range [-clip, clip] on input of activations
1165  *
1166  * clip==0.0f means no clipping
1167  */
1168  float clip = 0.0f;
1169  /**
1170  * @brief Activations used inside recurrent cell
1171  *
1172  * Valid values: sigmoid, tanh, relu
1173  */
1174  std::vector<std::string> activations;
1175 
1176  /**
1177  * @brief Alpha parameters of activations
1178  *
1179  * Respective to activation list.
1180  */
1181  std::vector<float> activation_alpha;
1182 
1183  /**
1184  * @brief Beta parameters of activations
1185  *
1186  * Respective to activation list.
1187  */
1188  std::vector<float> activation_beta;
1189 
1190  ~RNNCellBase() override;
1191 };
1192 
1193 /**
1194  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1195  * @brief LSTM Cell layer
1196  *
1197  * G - number of gates (=4)
1198  * N - batch size
1199  * S - state size (=hidden_size)
1200  *
1201  * Inputs:
1202  * [N,D] Xt - input data
1203  * [N,S] Ht-1 - initial hidden state
1204  * [N,S] Ct-1 - initial cell state
1205  *
1206  * Outputs:
1207  * [N,S] Ht - out hidden state
1208  * [N,S] Ct - out cell state
1209  *
1210  * Weights:
1211  * - weights [G,S,D+S]
1212  * - biases [G,S]
1213  * NB! gates order is FICO {forget, input, candidate, output}
1214  *
1215  * activations is {_f, _g, _h}
1216  * default: {_f=sigm, _g=tanh, _h=tanh}
1217  *
1218  * Equations:
1219  *
1220  * * - matrix mult
1221  * (.) - eltwise mult
1222  * [,] - concatenation
1223  *
1224  * - ft = _f(Wf*[Ht-1, Xt] + Bf)
1225  * - it = _f(Wi*[Ht-1, Xt] + Bi)
1226  * - ct = _g(Wc*[Ht-1, Xt] + Bc)
1227  * - ot = _f(Wo*[Ht-1, Xt] + Bo)
1228  * - Ct = ft (.) Ct-1 + it (.) ct
1229  * - Ht = ot (.) _h(Ct)
1230  */
1231 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LSTMCell): public RNNCellBase {
1232 public:
1233  using RNNCellBase::RNNCellBase;
1234  using RNNCellBase::operator=;
1235 
1236  ~LSTMCell() override;
1237 };
1238 
1239 /**
1240  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1241  * @brief GRU Cell layer
1242  *
1243  * G - number of gates (=3)
1244  * N - batch size
1245  * S - state size (=hidden_size)
1246  *
1247  * Inputs:
1248  * [N,D] Xt - input data
1249  * [N,S] Ht-1 - initial hidden state
1250  *
1251  * Outputs:
1252  * [N,S] Ht - out hidden state
1253  *
1254  * Weights:
1255  * - weights [G,S,D+S]
1256  * - biases [G,S]
1257  * NB! gates order is ZRH {update, reset, output}
1258  *
1259  * activations is {_f, _g}
1260  * default: {_f=sigm, _g=tanh}
1261  *
1262  * Equations:
1263  *
1264  * * - matrix mult
1265  * (.) - eltwise mult
1266  * [,] - concatenation
1267  *
1268  * - zt = _f(Wz*[Ht-1, Xt] + Bz)
1269  * - rt = _f(Wr*[Ht-1, Xt] + Br)
1270  * - ht = _g(Wh*[rt (.) Ht-1, Xt] + Bh)
1271  * - Ht = (1 - zt) (.) ht + zt (.) Ht-1
1272  */
1273 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GRUCell): public RNNCellBase {
1274 public:
1275  using RNNCellBase::RNNCellBase;
1276  using RNNCellBase::operator=;
1277 
1278  ~GRUCell() override;
1279 };
1280 
1281 /**
1282  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1283  * @brief RNN Cell layer
1284  *
1285  * G - number of gates (=1)
1286  * N - batch size
1287  * S - state size (=hidden_size)
1288  *
1289  * Inputs:
1290  * [N,D] Xt - input data
1291  * [N,S] Ht-1 - initial hidden state
1292  *
1293  * Outputs:
1294  * [N,S] Ht - out hidden state
1295  *
1296  * Weights:
1297  * - weights [G,S,D+S]
1298  * - biases [G,S]
1299  *
1300  * activations is {_f}
1301  * default: {_f=tanh}
1302  *
1303  * Equations:
1304  *
1305  * * - matrix mult
1306  * [,] - concatenation
1307  *
1308  * - Ht = _f(Wi*[Ht-1, Xt] + Bi)
1309  */
1310 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RNNCell): public RNNCellBase {
1311 public:
1312  using RNNCellBase::RNNCellBase;
1313  using RNNCellBase::operator=;
1314 
1315  ~RNNCell() override;
1316 };
1317 
1318 /**
1319  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1320  * @brief Sequence of recurrent cells
1321  *
1322  * N - batch size
1323  * T - sequence size
1324  * S - state size (=hidden_size)
1325  * NS - num of state tensors (LSTM=2, GRU/RNN=1)
1326  * ND - num of direction (BDR=2, WFD/BWD=1)
1327  *
1328  * Inputs:
1329  * [N,T,D] Xt - input data
1330  * [ND,N,S] Ht-1 - initial hidden state
1331  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1332  * [N] SL - sequence length (optional input)
1333  *
1334  * Outputs:
1335  * [ND,N,T,S] Xt - input data
1336  * [ND,N,S] Ht-1 - initial hidden state
1337  * [ND,N,S] Ct-1 - initial cell state // if NS==2
1338  *
1339  * NB! if axis==0 batch and sequense dimensions are swapped (N <-> T) for input and output tensors
1340  *
1341  * Weights:
1342  * - weights [ND,G,S,D+S]
1343  * - biases [ND,G,S]
1344  * NB! if ND==2 weights are concatenated cell weights [forward_cell_weights, backward_cell_weights]
1345  *
1346  */
1347 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RNNSequenceLayer): public RNNCellBase {
1348 public:
1349  using RNNCellBase::RNNCellBase;
1350 
1351  /**
1352  * @brief An axis by which iteration is performed
1353  *
1354  * axis=0 means first input/output data blob dimension is sequence
1355  * axis=1 means first input/output data blob dimension is batch
1356  */
1357  unsigned int axis = 1;
1358 
1359  /**
1360  * @brief Direction of iteration through sequence dimension
1361  */
1362  enum Direction {
1363  FWD, /**< Forward mode. Iterate starts from index 0 with step 1. */
1364  BWD, /**< Backward mode. Iterate starts from last index with step -1. */
1365  BDR /**< Bidirectional mode. First is forward pass, second is backward. */
1366  };
1367 
1368  /** @copybrief Direction */
1370 
1371  ~RNNSequenceLayer() override;
1372 };
1373 
1374 /**
1375  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1376  * @brief This class represents a Layer which performs Scale and Shift
1377  */
1378 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PReLULayer): public WeightableLayer {
1379 public:
1380  /**
1381  * @brief A flag that indicates if the same negative_slope value is used for all the features. If false, the value
1382  * is used pixel wise
1383  */
1384  bool _channel_shared = false;
1385 
1386  /**
1387  * @brief A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the given
1388  * values.
1389  *
1390  * @param prms Initial layer parameters
1391  */
1392  using WeightableLayer::WeightableLayer;
1393 
1394  ~PReLULayer() override;
1395 };
1396 
1397 /**
1398  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1399  * @brief This class represents a standard Power Layer
1400  *
1401  * Formula is: output = (offset + scale * input) ^ power
1402  */
1403 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PowerLayer): public CNNLayer {
1404 public:
1405  /**
1406  * @brief An exponent value
1407  */
1408  float power = 1.f;
1409  /**
1410  * @brief A scale factor
1411  */
1412  float scale = 1.f;
1413  /**
1414  * @brief An offset value
1415  */
1416  float offset = 0.f;
1417 
1418  /**
1419  * @brief Creates a new PowerLayer instance.
1420  */
1421  using CNNLayer::CNNLayer;
1422 
1423  ~PowerLayer() override;
1424 };
1425 
1426 /**
1427  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1428  * @brief This class represents a Batch Normalization Layer
1429  */
1430 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BatchNormalizationLayer): public WeightableLayer {
1431 public:
1432  /**
1433  * @brief A small value to add to the variance estimate to avoid division by zero
1434  */
1435  float epsilon = 1e-3f;
1436 
1437  /**
1438  * @brief Creates a new BatchNormalizationLayer instance.
1439  */
1440  using WeightableLayer::WeightableLayer;
1441 
1442  ~BatchNormalizationLayer() override;
1443 };
1444 
1445 /**
1446  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1447  * @brief This class represents a general matrix multiplication operation layer
1448  *
1449  * Formula is: dst := alpha*src1*src2 + beta*src3
1450  */
1451 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GemmLayer): public CNNLayer {
1452 public:
1453  /**
1454  * @brief A scale factor of src1 matrix
1455  */
1456  float alpha = 1.f;
1457  /**
1458  * @brief A scale factor of src3 matrix
1459  */
1460  float beta = 1.f;
1461  /**
1462  * @brief A flag that indicates if the src1 matrix is to be transposed
1463  */
1464  bool transpose_a = false;
1465  /**
1466  * @brief A flag that indicates if the src2 matrix is to be transposed
1467  */
1468  bool transpose_b = false;
1469  /**
1470  * @brief Creates a new GemmLayer instance.
1471  */
1472  using CNNLayer::CNNLayer;
1473 
1474  ~GemmLayer() override;
1475 };
1476 
1477 /**
1478  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1479  * @brief This class represents a standard Pad layer
1480  *
1481  * Adds paddings to input tensor
1482  */
1483 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PadLayer): public CNNLayer {
1484 public:
1485  /**
1486  * @enum ePadMode
1487  * @brief Defines possible modes of pad operation
1488  */
1489  enum ePadMode { Constant = 0, Edge, Reflect, Symmetric };
1490 
1491  /**
1492  * @brief Size of padding in the beginning of each axis
1493  */
1495  /**
1496  * @brief Size of padding in the end of each axis
1497  */
1499  /**
1500  * @brief Mode of pad operation
1501  */
1502  ePadMode pad_mode = Constant;
1503  /**
1504  * @brief A pad value which is used for filling in Constant mode
1505  */
1506  float pad_value = 0.0f;
1507  /**
1508  * @brief Creates a new PadLayer instance.
1509  */
1510  using CNNLayer::CNNLayer;
1511 
1512  ~PadLayer() override;
1513 };
1514 
1515 /**
1516  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1517  * @brief This class represents a standard Gather layer
1518  *
1519  * Gather slices from Dictionary according to Indexes
1520  */
1521 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GatherLayer): public CNNLayer {
1522 public:
1523  /**
1524  * @brief The axis in Dictionary to gather Indexes from
1525  */
1526  int axis = 0;
1527  /**
1528  * @brief Creates a new GatherLayer instance.
1529  */
1530  using CNNLayer::CNNLayer;
1531 
1532  ~GatherLayer() override;
1533 };
1534 
1535 /**
1536  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1537  * @brief This class represents a standard Strided Slice layer
1538  *
1539  * Strided Slice picks from input tensor according parameters
1540  */
1541 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(StridedSliceLayer): public CNNLayer {
1542 public:
1543  /**
1544  * @brief The begin_mask is a bitmask where bit i being 0 means
1545  * to ignore the begin value and instead use the default value
1546  */
1547  std::string begin_mask;
1548  /**
1549  * @brief Analogous to begin_mask
1550  */
1551  std::string end_mask;
1552  /**
1553  * @brief The ellipsis_mask is a bitmask where bit i being 1 means
1554  * the i-th is actually an ellipsis
1555  */
1556  std::string ellipsis_mask;
1557  /**
1558  * @brief The new_axis_mask_ is a bitmask where bit i being 1 means
1559  * the i-th position creates a new 1 dimension shape
1560  */
1561  std::string new_axis_mask;
1562  /**
1563  * @brief The shrink_axis_mask is a bitmask where bit i being 1 means
1564  * the i-th position shrinks the dimensionality
1565  */
1566  std::string shrink_axis_mask;
1567 
1568  /**
1569  * @brief Creates a new StridedSliceLayer instance.
1570  */
1571  using CNNLayer::CNNLayer;
1572 
1573  ~StridedSliceLayer() override;
1574 };
1575 
1576 /**
1577  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1578  * @brief This class represents a standard Shuffle Channels layer
1579  * Shuffle Channels picks from input tensor according parameters
1580  */
1581 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ShuffleChannelsLayer): public CNNLayer {
1582 public:
1583  /**
1584  * @brief The axis in tensor to shuffle channels
1585  */
1586  int axis = 1;
1587 
1588  /**
1589  * @brief The group of output shuffled channels
1590  */
1591  unsigned int group = 1;
1592 
1593  /**
1594  * @brief Creates a new ShuffleChannelsLayer instance.
1595  */
1596  using CNNLayer::CNNLayer;
1597 
1598  ~ShuffleChannelsLayer() override;
1599 };
1600 
1601 /**
1602  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1603  * @brief This class represents a standard Depth To Space layer
1604  * Depth To Space picks from input tensor according parameters
1605  */
1606 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DepthToSpaceLayer): public CNNLayer {
1607 public:
1608  /**
1609  * @brief The group of output shuffled channels
1610  */
1611  unsigned int block_size = 1;
1612 
1613  /**
1614  * @brief Creates a new DepthToSpaceLayer instance.
1615  */
1616  using CNNLayer::CNNLayer;
1617 
1618  ~DepthToSpaceLayer() override;
1619 };
1620 
1621 /**
1622  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1623  * @brief This class represents a standard Space To Depth layer
1624  * Space To Depth picks from input tensor according parameters
1625  */
1626 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SpaceToDepthLayer): public CNNLayer {
1627 public:
1628  /**
1629  * @brief The group of output Space To Depth
1630  */
1631  unsigned int block_size = 1;
1632 
1633  /**
1634  * @brief Creates a new SpaceToDepthLayer instance.
1635  */
1636  using CNNLayer::CNNLayer;
1637 
1638  ~SpaceToDepthLayer() override;
1639 };
1640 
1641 /**
1642  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1643  * @brief This class represents a standard Space To Batch layer
1644  *
1645  * Space To Batch picks from input tensor according parameters
1646  */
1647 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SpaceToBatchLayer): public CNNLayer {
1648 public:
1649  /**
1650  * @brief Spatial dimensions blocks sizes
1651  */
1652  std::vector<size_t> _block_shape;
1653 
1654  /**
1655  * @brief Size of padding in the beginning of each axis
1656  */
1657  std::vector<size_t> _pads_begin;
1658  /**
1659  * @brief Size of padding in the end of each axis
1660  */
1661  std::vector<size_t> _pads_end;
1662 
1663  /**
1664  * @brief Creates a new SpaceToBatchLayer instance.
1665  */
1666  using CNNLayer::CNNLayer;
1667 
1668  ~SpaceToBatchLayer() override;
1669 };
1670 
1671 /**
1672  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1673  * @brief This class represents a standard Batch To Space layer
1674  *
1675  * Batch To Space picks from input tensor according parameters
1676  */
1677 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BatchToSpaceLayer): public CNNLayer {
1678 public:
1679  /**
1680  * @brief Spatial dimensions blocks sizes
1681  */
1682  std::vector<size_t> _block_shape;
1683 
1684  /**
1685  * @brief It specifies how many elements to crop from the intermediate result
1686  * across the spatial dimensions
1687  */
1688  std::vector<size_t> _crops_begin;
1689 
1690  /**
1691  * @brief It specifies how many elements to crop from the intermediate result
1692  * across the spatial dimensions
1693  */
1694  std::vector<size_t> _crops_end;
1695 
1696  /**
1697  * @brief Creates a new BatchToSpaceLayer instance.
1698  */
1699  using CNNLayer::CNNLayer;
1700 
1701  ~BatchToSpaceLayer() override;
1702 };
1703 
1704 /**
1705  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1706  * @brief This class represents SparseFillEmptyRows layer
1707  *
1708  * SparseFillEmptyRows fills empty rows in a sparse tensor
1709  */
1710 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SparseFillEmptyRowsLayer): public CNNLayer {
1711 public:
1712  /**
1713  * @brief Creates a new SparseFillEmptyRowsLayer instance.
1714  */
1715  using CNNLayer::CNNLayer;
1716 
1717  ~SparseFillEmptyRowsLayer() override;
1718 };
1719 
1720 /**
1721  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1722  * @brief This class represents SparseSegmentMean(SqrtN, Sum) layers
1723  * SparseSegmentMean(SqrtN, Sum) layer reduces data along sparse segments of a tensor.
1724  */
1725 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SparseSegmentReduceLayer): public CNNLayer {
1726 public:
1727  /**
1728  * @brief Creates a new SparseSegmentReduceLayer instance.
1729  */
1730  using CNNLayer::CNNLayer;
1731 
1732  ~SparseSegmentReduceLayer() override;
1733 };
1734 
1735 /**
1736  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1737  * @brief This class represents ExperimentalSparseWeightedReduce layer
1738  * ExperimentalSparseWeightedReduce layer reduces data along sparse segments of a tensor.
1739  */
1740 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ExperimentalSparseWeightedReduceLayer) : public CNNLayer {
1741 public:
1742  /**
1743  * @brief Creates a new ExperimentalSparseWeightedReduceLayer instance.
1744  */
1745  using CNNLayer::CNNLayer;
1746 
1747  ~ExperimentalSparseWeightedReduceLayer() override;
1748 };
1749 
1750 /**
1751  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1752  * @brief This class represents SparseToDense layer
1753  * SparseToDense layer converts a sparse tensor to a dense tensor.
1754  */
1755 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SparseToDenseLayer) : public CNNLayer {
1756 public:
1757  /**
1758  * @brief Creates a new SparseToDenseLayer instance.
1759  */
1760  using CNNLayer::CNNLayer;
1761 
1762  ~SparseToDenseLayer() override;
1763 };
1764 
1765 /**
1766  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1767  * @brief This class represents Bucketize layer
1768  * Bucketize layer bucketizes the input based on the boundaries.
1769  */
1770 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BucketizeLayer) : public CNNLayer {
1771 public:
1772  /**
1773  * @brief Indicates whether the intervals include the right or the left bucket edge.
1774  */
1775  bool with_right_bound = true;
1776 
1777  /**
1778  * @brief Creates a new BucketizeLayer instance.
1779  */
1780  using CNNLayer::CNNLayer;
1781 
1782  ~BucketizeLayer() override;
1783 };
1784 
1785 /**
1786  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1787  * @brief This class represents a standard Reverse Sequence layer
1788  *
1789  * Reverse Sequence modifies input tensor according parameters
1790  */
1791 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReverseSequenceLayer): public CNNLayer {
1792 public:
1793  /**
1794  * @brief The seq_axis dimension in tensor which is partially reversed
1795  */
1796  int seq_axis = 1;
1797 
1798  /**
1799  * @brief The batch_axis dimension in tensor along which reversal is performed
1800  */
1801  int batch_axis = 0;
1802 
1803  /**
1804  * @brief Creates a new ReverseSequence instance.
1805  */
1806  using CNNLayer::CNNLayer;
1807 
1808  ~ReverseSequenceLayer() override;
1809 };
1810 
1811 /**
1812  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1813  * @brief This class represents a OneHot layer
1814  * Converts input into OneHot representation.
1815  */
1816 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(OneHotLayer): public CNNLayer {
1817 public:
1818  /**
1819  * @brief A depth of representation
1820  */
1821  unsigned int depth = 0;
1822 
1823  /**
1824  * @brief The locations represented by indices in input take value on_value
1825  */
1826  float on_value = 1.f;
1827 
1828  /**
1829  * @brief The locations not represented by indices in input take value off_value
1830  */
1831  float off_value = 0.f;
1832 
1833  /**
1834  * @brief Define the shape of output tensor
1835  */
1836  int axis = -1;
1837 
1838  /**
1839  * @brief Creates a new OneHot instance
1840  */
1841  using CNNLayer::CNNLayer;
1842 
1843  ~OneHotLayer() override;
1844 };
1845 
1846 /**
1847  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1848  * @brief This class represents a standard RangeLayer layer
1849  *
1850  * RangeLayer modifies input tensor dimensions according parameters
1851  */
1852 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RangeLayer): public CNNLayer {
1853 public:
1854  /**
1855  * @brief Creates a new RangeLayer instance.
1856  */
1857  using CNNLayer::CNNLayer;
1858 
1859  ~RangeLayer() override;
1860 };
1861 
1862 /**
1863  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1864  * @brief This class represents a standard Fill layer
1865  *
1866  * RFill modifies input tensor according parameters
1867  */
1868 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(FillLayer): public CNNLayer {
1869 public:
1870  /**
1871  * @brief Creates a new Fill instance.
1872  */
1873  using CNNLayer::CNNLayer;
1874 
1875  ~FillLayer() override;
1876 };
1877 
1878 /**
1879  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1880  * @brief This class represents a SelectLayer layer
1881  *
1882  * SelectLayer layer takes elements from the second (“then”) or the third (“else”) input based on condition mask
1883  * (“cond”) provided in the first input. The “cond” tensor is broadcasted to “then” and “else” tensors. The output
1884  * tensor shape is equal to broadcasted shape of “cond”, “then” and “else”.
1885  */
1886 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SelectLayer): public CNNLayer {
1887 public:
1888  /**
1889  * @brief Creates a new SelectLayer instance.
1890  */
1891  using CNNLayer::CNNLayer;
1892 
1893  ~SelectLayer() override;
1894 };
1895 
1896 /**
1897  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1898  * @brief This class represents a standard Broadcast layer
1899  *
1900  * Broadcast modifies input tensor dimensions according parameters
1901  */
1902 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BroadcastLayer): public CNNLayer {
1903 public:
1904  /**
1905  * @brief Creates a new Broadcast instance.
1906  */
1907  using CNNLayer::CNNLayer;
1908 
1909  ~BroadcastLayer() override;
1910 };
1911 
1912 /**
1913  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1914  * @brief This class represents a quantization operation layer
1915  *
1916  * Element-wise linear quantization of floating point input values into a descrete set of floating point values
1917  */
1918 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(QuantizeLayer): public CNNLayer {
1919 public:
1920  /**
1921  * @brief The number of quantization levels
1922  */
1923  int levels = 1;
1924 
1925  /**
1926  * @brief Creates a new QuantizeLayer instance.
1927  */
1928  using CNNLayer::CNNLayer;
1929 
1930  ~QuantizeLayer() override;
1931 };
1932 
1933 /**
1934  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1935  * @brief This class represents a standard Math layers
1936  *
1937  * Math modifies input tensor dimensions according parameters
1938  */
1939 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(MathLayer): public CNNLayer {
1940 public:
1941  /**
1942  * @brief Creates a new Math instance.
1943  */
1944  using CNNLayer::CNNLayer;
1945 
1946  ~MathLayer() override;
1947 };
1948 
1949 /**
1950  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1951  * @brief This class represents a standard Reduce layers
1952  *
1953  * Reduce modifies input tensor according parameters
1954  */
1955 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReduceLayer): public CNNLayer {
1956 public:
1957  /**
1958  * @brief The keep_dims dimension in tensor which is partially reversed
1959  */
1960  bool keep_dims = true;
1961 
1962  /**
1963  * @brief Creates a new Reduce instance.
1964  */
1965  using CNNLayer::CNNLayer;
1966 
1967  ~ReduceLayer() override;
1968 };
1969 
1970 /**
1971  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
1972  * @brief This class represents a standard TopK layer
1973  *
1974  * TopK picks top K values from input tensor according parameters
1975  */
1976 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TopKLayer): public CNNLayer {
1977 public:
1978  /**
1979  * @brief The mode could be 'max' or 'min'
1980  */
1981  std::string mode;
1982  /**
1983  * @brief top K values sort mode could be 'value' or 'index'
1984  */
1985  std::string sort;
1986  /**
1987  * @brief The axis dimension in tensor which is top K values are picked
1988  */
1989  int axis = -1;
1990 
1991  /**
1992  * @brief Creates a new TopKLayer instance.
1993  */
1994  using CNNLayer::CNNLayer;
1995 
1996  ~TopKLayer() override;
1997 };
1998 
1999 /**
2000  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
2001  * @brief This class represents Unique layer.
2002  *
2003  * The Unique operation searches for unique elements in 1-D input
2004  */
2005 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(UniqueLayer): public CNNLayer {
2006 public:
2007  /**
2008  * @brief A flag indicating whether to sort unique elements
2009  */
2010  bool sorted;
2011  /**
2012  * @brief A flag indicating whether to return indices of input data elements in the output of uniques
2013  */
2015  /**
2016  * @brief A flag indicating whether to return a number of occurences for each unique element
2017  */
2019 
2020  /**
2021  * @brief Creates a new UniqueLayer instance.
2022  */
2023  using CNNLayer::CNNLayer;
2024 
2025  ~UniqueLayer() override;
2026 };
2027 
2028 /**
2029  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
2030  * @brief This class represents a standard NonMaxSuppression layer
2031  */
2032 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(NonMaxSuppressionLayer): public CNNLayer {
2033 public:
2034  /**
2035  * @brief The 'center_point_box' indicates the format of the box data
2036  */
2037  bool center_point_box = false;
2038  /**
2039  * @brief The 'sort_result_descending' indicates that result will sort descending by score through all batches and
2040  * classes
2041  */
2043  /**
2044  * @brief Creates a new NonMaxSuppressionLayer instance.
2045  */
2046  using CNNLayer::CNNLayer;
2047 
2048  ~NonMaxSuppressionLayer() override;
2049 };
2050 
2051 /**
2052  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
2053  * @brief This class represents a standard ScatterUpdate layer
2054  */
2055 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScatterUpdateLayer): public CNNLayer {
2056 public:
2057  /**
2058  * @brief Creates a new ScatterUpdateLayer instance.
2059  */
2060  using CNNLayer::CNNLayer;
2061 
2062  ~ScatterUpdateLayer() override;
2063 };
2064 
2065 /**
2066  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
2067  * @brief This class represents a standard ScatterElementsUpdate layer
2068  */
2069 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScatterElementsUpdateLayer): public CNNLayer {
2070 public:
2071  /**
2072  * @brief Creates a new ScatterElementsUpdateLayer instance.
2073  */
2074  using CNNLayer::CNNLayer;
2075 
2076  ~ScatterElementsUpdateLayer() override;
2077 };
2078 
2079 /**
2080  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
2081  * @brief This class represents an onnx ExperimentalDetectronPriorGridGenerator Layer
2082  */
2083 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ExperimentalDetectronPriorGridGeneratorLayer): public CNNLayer {
2084 public:
2085  /**
2086  * @brief flatten value
2087  */
2088  int flatten = 1;
2089  /**
2090  * @brief Value of grid width
2091  */
2092  int grid_w = 0;
2093  /**
2094  * @brief Value of grid height
2095  */
2096  int grid_h = 0;
2097  /**
2098  * @brief Value of width step between grid cells
2099  */
2100  float stride_w = 0.f;
2101  /**
2102  * @brief Value of height step between grid cells
2103  */
2104  float stride_h = 0.f;
2105 
2106  /**
2107  * @brief Creates a new ExperimentalDetectronPriorGridGenerator instance.
2108  */
2109  using CNNLayer::CNNLayer;
2110 
2111  virtual ~ExperimentalDetectronPriorGridGeneratorLayer();
2112 };
2113 
2114 /**
2115  * @brief This class represents a standard ExperimentalDetectronTopKROIs layer
2116  */
2117 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ExperimentalDetectronTopKROIs): public CNNLayer {
2118 public:
2119  /**
2120  * @brief The maximum number of output rois
2121  */
2122  int max_rois = 0;
2123  /**
2124  * @brief Creates a new ExperimentalDetectronTopKROIs instance.
2125  */
2126  using CNNLayer::CNNLayer;
2127 
2128  virtual ~ExperimentalDetectronTopKROIs();
2129 };
2130 
2131 /**
2132  * @brief This class represents an onnx ExperimentalDetectronGenerateProposalsSingleImage Layer
2133  */
2134 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ExperimentalDetectronGenerateProposalsSingleImageLayer): public CNNLayer {
2135 public:
2136  /**
2137  * @brief Minimium width and height for boxes
2138  */
2139  float min_size = 0.f;
2140  /**
2141  * @brief Non max suppression threshold
2142  */
2143  float nms_threshold = 0.7f;
2144  /**
2145  * @brief Maximum number of anchors selected before nms
2146  */
2147  int pre_nms_topn = 1000;
2148  /**
2149  * @brief Maximum number of anchors selected after nms
2150  */
2151  int post_nms_topn = 1000;
2152 
2153  /**
2154  * @brief Creates a new ExperimentalDetectronGenerateProposalsSingleImage instance.
2155  */
2156  using CNNLayer::CNNLayer;
2157 
2158  virtual ~ExperimentalDetectronGenerateProposalsSingleImageLayer();
2159 };
2160 
2161 IE_SUPPRESS_DEPRECATED_END_WIN
2162 
2163 } // namespace InferenceEngine
std::vector< int > dim
A vector of dimensions to be preserved.
Definition: ie_layers.h:1012
#define DEFINE_PROP(prop_name)
convinenent way to declare property with backward compatibility to 2D members
Definition: ie_layers.h:422
CellType cellType
Direct type of recurrent cell (including subtypes) Description of particular cell semantics is in LST...
Definition: ie_layers.h:1154
unsigned int depth
A depth of representation.
Definition: ie_layers.h:1821
std::string sort
top K values sort mode could be &#39;value&#39; or &#39;index&#39;
Definition: ie_layers.h:1985
Ptr _fusedWith
If suggested to fuse - a pointer to the layer which needs to be fused with this layer.
Definition: ie_layers.h:129
float epsilon
A small value to add to the variance estimate to avoid division by zero.
Definition: ie_layers.h:1435
Direction
Direction of iteration through sequence dimension.
Definition: ie_layers.h:1362
int seq_axis
The seq_axis dimension in tensor which is partially reversed.
Definition: ie_layers.h:1796
std::string type
Layer type.
Definition: ie_layers.h:47
int hidden_size
Size of hidden state data.
Definition: ie_layers.h:1161
bool sort_result_descending
The &#39;sort_result_descending&#39; indicates that result will sort descending by score through all batches ...
Definition: ie_layers.h:2042
std::vector< float > activation_beta
Beta parameters of activations.
Definition: ie_layers.h:1188
eBinaryConvolutionMode
Defines possible modes of binary convolution operation.
Definition: ie_layers.h:628
float off_value
The locations not represented by indices in input take value off_value.
Definition: ie_layers.h:1831
The method holds the user values to enable binding of data per graph node.
Definition: ie_common.h:69
bool _isAcrossMaps
Flag to specify normalization across feature maps (true) or across channels.
Definition: ie_layers.h:810
int axis
Definition: ie_layers.h:1108
Direction direction
Direction of iteration through sequence dimension.
Definition: ie_layers.h:1369
Definition: cldnn_config.hpp:16
PropertyVector< unsigned int > pads_end
Size of padding in the end of each axis.
Definition: ie_layers.h:1498
float max_value
A maximum value.
Definition: ie_layers.h:918
int axis
Axis number for a softmax operation.
Definition: ie_layers.h:829
std::string _auto_pad
Auto padding type.
Definition: ie_layers.h:464
std::vector< int > offset
A vector of offsets for each dimension.
Definition: ie_layers.h:1016
std::string new_axis_mask
The new_axis_mask_ is a bitmask where bit i being 1 means the i-th position creates a new 1 dimension...
Definition: ie_layers.h:1561
std::vector< float > activation_alpha
Alpha parameters of activations.
Definition: ie_layers.h:1181
Body body
A Tensor Iterator body.
Definition: ie_layers.h:1127
unsigned int _axis
An axis on which concatenation operation is performed.
Definition: ie_layers.h:753
unsigned int _size
Response size.
Definition: ie_layers.h:794
unsigned int block_size
The group of output shuffled channels.
Definition: ie_layers.h:1611
std::vector< DataWeakPtr > insData
A vector of weak pointers to the input data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:123
Blob::Ptr _biases
A pointer to a biases blob.
Definition: ie_layers.h:409
A header file for Blob and generic TBlob<>
std::vector< size_t > _block_shape
Spatial dimensions blocks sizes.
Definition: ie_layers.h:1652
bool transpose_a
A flag that indicates if the src1 matrix is to be transposed.
Definition: ie_layers.h:1464
eBinaryConvolutionMode _mode
Mode of binary convolution operation.
Definition: ie_layers.h:633
std::vector< PortMap > output_port_map
Output ports map.
Definition: ie_layers.h:1124
std::string shrink_axis_mask
The shrink_axis_mask is a bitmask where bit i being 1 means the i-th position shrinks the dimensional...
Definition: ie_layers.h:1566
Definition: ie_layers.h:1364
int batch_axis
The batch_axis dimension in tensor along which reversal is performed.
Definition: ie_layers.h:1801
int grid_h
Value of grid height.
Definition: ie_layers.h:2096
int across_channels
Indicate that mean value is calculated across channels.
Definition: ie_layers.h:873
Definition: ie_layers.h:1147
std::vector< size_t > _crops_begin
It specifies how many elements to crop from the intermediate result across the spatial dimensions...
Definition: ie_layers.h:1688
Definition: ie_layers.h:1102
Definition: ie_layers.h:1363
class CNNLayer GenericLayer
Alias for CNNLayer object.
Definition: ie_layers.h:379
PropertyVector< unsigned int > _kernel
A convolution kernel array [X, Y, Z, ...].
Definition: ie_layers.h:436
Describes a tensor iterator body.
Definition: ie_layers.h:1118
std::vector< DataPtr > outputs
Outputs data.
Definition: ie_layers.h:1120
std::vector< size_t > _crops_end
It specifies how many elements to crop from the intermediate result across the spatial dimensions...
Definition: ie_layers.h:1694
eOperation _operation
A type of the operation to use.
Definition: ie_layers.h:984
bool return_counts
A flag indicating whether to return a number of occurences for each unique element.
Definition: ie_layers.h:2018
PoolType _type
A pooling type.
Definition: ie_layers.h:568
Definition: ie_layers.h:1150
float stride_h
Value of height step between grid cells.
Definition: ie_layers.h:2104
std::string name
Layer name.
Definition: ie_layers.h:42
Precision precision
Layer precision.
Definition: ie_layers.h:52
std::vector< size_t > _pads_begin
Size of padding in the beginning of each axis.
Definition: ie_layers.h:1657
float nms_threshold
Non max suppression threshold.
Definition: ie_layers.h:2143
Definition: ie_layers.h:1365
int max_rois
The maximum number of output rois.
Definition: ie_layers.h:2122
float negative_slope
Negative slope is used to takle negative inputs instead of setting them to 0.
Definition: ie_layers.h:892
Definition: ie_layers_property.hpp:22
bool center_point_box
The &#39;center_point_box&#39; indicates the format of the box data.
Definition: ie_layers.h:2037
PropertyVector< unsigned int > _stride
A convolution strides array [X, Y, Z, ...].
Definition: ie_layers.h:448
int tiles
A number of copies to be made.
Definition: ie_layers.h:1066
PropertyVector< unsigned int > _dilation
A convolution dilations array [X, Y, Z, ...].
Definition: ie_layers.h:452
std::string affinity
Layer affinity set by user.
Definition: ie_layers.h:140
int post_nms_topn
Maximum number of anchors selected after nms.
Definition: ie_layers.h:2151
std::string ellipsis_mask
The ellipsis_mask is a bitmask where bit i being 1 means the i-th is actually an ellipsis.
Definition: ie_layers.h:1556
unsigned int _out_num
A size of output.
Definition: ie_layers.h:732
std::shared_ptr< Blob > Ptr
A smart pointer containing Blob object.
Definition: ie_blob.h:42
float _beta
Beta coefficient.
Definition: ie_layers.h:806
bool transpose_b
A flag that indicates if the src2 matrix is to be transposed.
Definition: ie_layers.h:1468
float min_size
Minimium width and height for boxes.
Definition: ie_layers.h:2139
Definition: ie_layers.h:1148
int grid_w
Value of grid width.
Definition: ie_layers.h:2092
PropertyVector< unsigned int > _pads_end
A convolution paddings end array [X, Y, Z, ...].
Definition: ie_layers.h:444
int start
Definition: ie_layers.h:1110
a header file for describing property style structure used by CNNLayers
Definition: ie_cnn_network.h:27
float scale
A scale factor.
Definition: ie_layers.h:1412
CellType
Direct type of recurrent cell (including subtypes) Description of particular cell semantics is in LST...
Definition: ie_layers.h:1146
float pad_value
A pad value which is used for filling in Constant mode.
Definition: ie_layers.h:1506
bool with_right_bound
Indicates whether the intervals include the right or the left bucket edge.
Definition: ie_layers.h:1775
std::vector< PortMap > back_edges
Back edges map.
Definition: ie_layers.h:1125
float alpha
A scale factor of src1 matrix.
Definition: ie_layers.h:1456
This header file defines the main Data representation node.
bool keep_dims
The keep_dims dimension in tensor which is partially reversed.
Definition: ie_layers.h:1960
bool return_inverse
A flag indicating whether to return indices of input data elements in the output of uniques...
Definition: ie_layers.h:2014
unsigned int _group
Number of groups.
Definition: ie_layers.h:460
Blob::Ptr _weights
A pointer to a weights blob.
Definition: ie_layers.h:405
std::vector< float > coeff
A vector of coefficients to scale the operands.
Definition: ie_layers.h:989
std::vector< PortMap > input_port_map
Input ports map.
Definition: ie_layers.h:1123
PropertyVector< unsigned int > _padding
A convolution paddings begin array [X, Y, Z, ...].
Definition: ie_layers.h:440
int from
Definition: ie_layers.h:1104
float clip
Clip data into range [-clip, clip] on input of activations.
Definition: ie_layers.h:1168
ePadMode pad_mode
Mode of pad operation.
Definition: ie_layers.h:1502
int end
Definition: ie_layers.h:1111
unsigned int _in_depth
A number of input feature maps (size) generating the 3&#39;rd input dimension.
Definition: ie_layers.h:638
int part_size
Definition: ie_layers.h:1112
float bias
Bias for squares sum.
Definition: ie_layers.h:853
unsigned int _k
K.
Definition: ie_layers.h:798
float power
An exponent value.
Definition: ie_layers.h:1408
float min_value
A minimum value.
Definition: ie_layers.h:913
std::map< std::string, Blob::Ptr > blobs
Map of pairs: (name, weights/biases blob)
Definition: ie_layers.h:372
int normalize
Indicate that the result needs to be normalized.
Definition: ie_layers.h:878
int num_axes
A number of first axises to be taken for a reshape.
Definition: ie_layers.h:1043
float _pad_value
A pad value which is used to fill pad area.
Definition: ie_layers.h:643
unsigned int _deformable_group
Number of deformable groups.
Definition: ie_layers.h:531
std::string end_mask
Analogous to begin_mask.
Definition: ie_layers.h:1551
std::vector< std::string > activations
Activations used inside recurrent cell.
Definition: ie_layers.h:1174
int to
Definition: ie_layers.h:1105
PropertyVector< unsigned int > pads_begin
Size of padding in the beginning of each axis.
Definition: ie_layers.h:1494
std::vector< DataPtr > outData
A vector of pointers to the output data elements of this layer in the di-graph (order matters) ...
Definition: ie_layers.h:118
float stride_w
Value of width step between grid cells.
Definition: ie_layers.h:2100
std::vector< DataPtr > inputs
Inputs data.
Definition: ie_layers.h:1119
int stride
Definition: ie_layers.h:1109
std::shared_ptr< Data > DataPtr
Smart pointer to Data.
Definition: ie_common.h:53
unsigned int _out_depth
A number of output feature maps (size) generating the 3&#39;rd output dimension.
Definition: ie_layers.h:456
Definition: ie_layers.h:1149
std::string begin_mask
The begin_mask is a bitmask where bit i being 0 means to ignore the begin value and instead use the d...
Definition: ie_layers.h:1547
std::string mode
The mode could be &#39;max&#39; or &#39;min&#39;.
Definition: ie_layers.h:1981
unsigned int group
The group of output shuffled channels.
Definition: ie_layers.h:1591
std::map< std::string, std::string > params
Map of pairs: (parameter name, parameter value)
Definition: ie_layers.h:367
bool _channel_shared
A flag that indicates if the same negative_slope value is used for all the features. If false, the value is used pixel wise.
Definition: ie_layers.h:1384
int levels
The number of quantization levels.
Definition: ie_layers.h:1923
eOperation
Defines possible operations that can be used.
Definition: ie_layers.h:958
PoolType
Defines available pooling types.
Definition: ie_layers.h:563
unsigned int _broadcast
A flag that indicates if the same value is used for all the features. If false, the value is used pix...
Definition: ie_layers.h:1086
bool sorted
A flag indicating whether to sort unique elements.
Definition: ie_layers.h:2010
float _alpha
Alpha coefficient.
Definition: ie_layers.h:802
int pre_nms_topn
Maximum number of anchors selected before nms.
Definition: ie_layers.h:2147
float on_value
The locations represented by indices in input take value on_value.
Definition: ie_layers.h:1826
float beta
A scale factor of src3 matrix.
Definition: ie_layers.h:1460
ePadMode
Defines possible modes of pad operation.
Definition: ie_layers.h:1489
This is a header file with common inference engine definitions.
bool _exclude_pad
A flag that indicates if padding is excluded or not.
Definition: ie_layers.h:573
int flatten
flatten value
Definition: ie_layers.h:2088
std::vector< int > shape
A vector of sizes of the shape.
Definition: ie_layers.h:1035
This class holds precision value and provides precision related operations.
Definition: ie_precision.hpp:22
UserValue userValue
Convenience user values to store in this object as extra data.
Definition: ie_layers.h:135