ArmNN
 25.11
Loading...
Searching...
No Matches
QuantizeLayer.cpp
Go to the documentation of this file.
1//
2// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "QuantizeLayer.hpp"
7
8#include "LayerCloneBase.hpp"
9
10namespace armnn
11{
12
14: Layer(1, 1, LayerType::Quantize, name)
15{}
16
17std::unique_ptr<IWorkload> QuantizeLayer::CreateWorkload(const IWorkloadFactory& factory) const
18{
19 QuantizeQueueDescriptor descriptor;
20 SetAdditionalInfo(descriptor);
21
22 WorkloadInfo info = PrepInfoAndDesc(descriptor);
23
24 return factory.CreateWorkload(LayerType::Quantize, descriptor, info);
25}
26
28{
30 return clone;
31}
32
34{
36
37 const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
38
40
41 auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
42
43 ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
44}
45
47{
48 strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
49}
50
51} //namespace armnn
#define CHECK_LOCATION()
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition Layer.cpp:614
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition Layer.cpp:410
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
Definition Layer.hpp:409
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition Layer.cpp:526
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition Layer.cpp:260
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition Layer.cpp:432
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
LayerType * CloneBase(Graph &graph, Params &&... params) const
const char * GetName() const override
Returns the name of the layer.
Definition Layer.hpp:332
virtual const BaseDescriptor & GetParameters() const override
If the layer has a descriptor return it.
Definition Layer.hpp:378
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition Layer.cpp:457
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition Layer.cpp:303
friend class Graph
Definition Layer.hpp:382
ShapeInferenceMethod m_ShapeInferenceMethod
Definition Layer.hpp:441
const TensorInfo & GetTensorInfo() const override
Definition Layer.cpp:100
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
QuantizeLayer(const char *name)
void ValidateTensorShapesFromInputs() override
Layer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
const TensorShape & GetShape() const
Definition Tensor.hpp:193
Copyright (c) 2021 ARM Limited and Contributors.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Contains information about TensorInfos of a layer.