class ngraph::PerTensorQuantizationAttribute

Overview

PerTensorQuantizationAttribute defines if operation input port requires per-tensor quantization. More…

#include <per_tensor_quantization_attribute.hpp>

class PerTensorQuantizationAttribute: public ov::RuntimeAttribute
{
public:
    // methods

    OPENVINO_RTTI(
        "LowPrecision::PerTensorQuantization",
        "",
        ov::RuntimeAttribute,
        0
        );
};

Inherited Members

public:
    // typedefs

    typedef std::shared_ptr<RuntimeAttribute> Ptr;
    typedef std::tuple<::ov::RuntimeAttribute> Base;

    // methods

    static const DiscreteTypeInfo& get_type_info_static();
    virtual const DiscreteTypeInfo& get_type_info() const;
    virtual bool is_copyable() const;
    virtual Any init(const std::shared_ptr<Node>& node) const;
    virtual Any merge(const ov::NodeVector& nodes) const;
    virtual Any merge(const ov::OutputVector& outputs) const;
    virtual std::string to_string() const;
    virtual bool visit_attributes(AttributeVisitor&);
    bool visit_attributes(AttributeVisitor& visitor) const;

Detailed Documentation

PerTensorQuantizationAttribute defines if operation input port requires per-tensor quantization.

For more details about the attribute, refer to PerTensorQuantizationAttribute page in the Inference Engine Developer Guide.