42 size_t reduction_window_split_dimension(
unsigned int axis)
61 : _memory_group(memory_manager), _reduction_kernel(), _reshape(), _output_internal(), _window_split(0), _reduction_axis(), _is_reshape_required(false)
70 const auto is_reshape_required = !keep_dims;
72 auto *output_internal = output;
76 if(is_reshape_required)
82 shape_before_reshape.
set(axis, 1);
91 output_internal = &info_before_reshape;
96 if(is_reshape_required)
108 _is_reshape_required = !keep_dims;
110 auto *output_internal = output;
113 if(_is_reshape_required)
121 _output_internal.
allocator()->
init(input->
info()->
clone()->set_data_type(output_data_type).set_tensor_shape(output_internal_shape).reset_padding().set_is_resizable(
true).set_num_channels(
122 num_channels).set_quantization_info(
qinfo));
123 _memory_group.
manage(&_output_internal);
124 output_internal = &_output_internal;
125 auto_init_if_empty(*output->
info(), input->
info()->
clone()->set_data_type(output_data_type).set_tensor_shape(output_external_shape).reset_padding().set_is_resizable(
true));
131 _reduction_kernel = std::make_unique<NEReductionOperationKernel>();
132 _reduction_kernel->configure(input, output_internal, axis, op);
133 _window_split = reduction_window_split_dimension(axis);
134 _reduction_axis = axis;
136 if(_is_reshape_required)
138 _reshape.
configure(output_internal, output);
147 if(_is_reshape_required)
virtual ITensorInfo & set_num_channels(int num_channels)=0
Set the number of channels to the specified value.
void run() override
Run the kernels contained in the function.
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
ReductionOperation
Available reduction operations.
static Status validate(const ITensorInfo *input, const ITensorInfo *output)
Static function to check if given info will lead to a valid configuration of NEReshapeLayer.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
virtual ITensorInfo & set_tensor_shape(const TensorShape &shape)=0
Set the shape of an already initialized tensor.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
ITensorInfo & set_data_type(DataType data_type) override
Set the data type to the specified value.
Store the tensor's metadata.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
1 channel, 1 S32 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
Static function to check if given info will lead to a valid configuration of NEReductionOperationKern...
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Static function to check if given info will lead to a valid configuration of NEReductionOperation.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis, bool keep_dims=true)
Calculate the reduced shape of a tensor given an axis.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
virtual ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info)=0
Set the quantization settings (scale and offset) of the tensor.
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
~NEReductionOperation()
Default destructor.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Memory group resources scope handling class.
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
const QuantizationInfo qinfo
void configure(const ITensor *input, ITensor *output)
Initialise the kernel's inputs and outputs.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Store the tensor's metadata.
static constexpr size_t num_max_dimensions
Number of dimensions the tensor has.
NEReductionOperation(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
virtual size_t num_channels() const =0
The number of channels for each tensor element.
TensorShape & set(size_t dimension, size_t value, bool apply_dim_correction=true, bool increase_dim_unit=true)
Accessor to set the value of one of the dimensions.
void run() override
Run the kernels contained in the function.
void configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Set the input and output tensors.
static IScheduler & get()
Access the scheduler singleton.