23.08
|
Go to the documentation of this file.
36 constexpr
int max_input_tensor_dim = 3;
41 : _memory_group(std::move(memory_manager)), _reduce_func(), _normalize_kernel(), _sumsq()
50 _memory_group.
manage(&_sumsq);
53 const uint32_t actual_axis =
wrap_around(axis, max_input_tensor_dim);
55 _normalize_kernel = std::make_unique<NEL2NormalizeLayerKernel>();
56 _normalize_kernel->configure(
input, &_sumsq, output, axis,
epsilon);
71 const uint32_t actual_axis =
wrap_around(axis, max_input_tensor_dim);
75 shape.set(actual_axis, 1);
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, int axis, float epsilon=1e-6f)
Static function to check if given info will lead to a valid configuration of NEL2NormalizeLayer.
void configure(ITensor *input, ITensor *output, int axis, float epsilon=1e-6f)
Set the input and output tensors.
Interface for CPU tensor.
ITensorInfo & set_data_type(DataType data_type) override
Set the data type to the specified value.
T wrap_around(T x, T m)
Wrap-around a number within the range 0 <= x < m.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
@ SUM_SQUARE
Sum of squares.
static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon)
Static function to check if given info will lead to a valid configuration of NEL2NormalizeLayerKernel...
static IScheduler & get()
Access the scheduler singleton.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Static function to check if given info will lead to a valid configuration of NEReductionOperation.
void run() override
Run the kernels contained in the function.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
Store the tensor's metadata.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
Memory group resources scope handling class.
Copyright (c) 2017-2023 Arm Limited.
~NEL2NormalizeLayer()
Default destructor.
Store the tensor's metadata.
void configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Set the input and output tensors.
ITensorInfo & set_tensor_shape(const TensorShape &shape) override
Set the shape of an already initialized tensor.
NEL2NormalizeLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
#define ARM_COMPUTE_LOG_PARAMS(...)
void run() override
Run the kernels contained in the function.