Compute Library
 21.02
NEReductionOperation.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
31 
32 namespace arm_compute
33 {
34 namespace
35 {
36 /** Define dimension to split the window
37  *
38  * @param[in] axis Reduction axis
39  *
40  * @return The dimension to split the window
41  */
42 size_t reduction_window_split_dimension(unsigned int axis)
43 {
44  switch(axis)
45  {
46  case 0:
47  return Window::DimY;
48  case 1:
49  case 2:
50  case 3:
51  return Window::DimX;
52  default:
53  ARM_COMPUTE_ERROR("Unsupported reduction axis");
54  }
55 }
56 } // namespace
57 
59 
60 NEReductionOperation::NEReductionOperation(std::shared_ptr<IMemoryManager> memory_manager)
61  : _memory_group(memory_manager), _reduction_kernel(), _reshape(), _output_internal(), _window_split(0), _reduction_axis(), _is_reshape_required(false)
62 {
63 }
64 
65 Status NEReductionOperation::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims)
66 {
67  ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
68  ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
69 
70  const auto is_reshape_required = !keep_dims;
71 
72  auto *output_internal = output;
73 
74  TensorInfo info_before_reshape;
75 
76  if(is_reshape_required)
77  {
78  const TensorInfo expected_output_shape = output->clone()->set_tensor_shape(arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, keep_dims));
79  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&expected_output_shape, output);
80 
81  auto shape_before_reshape = input->tensor_shape();
82  shape_before_reshape.set(axis, 1);
83 
84  const auto input_num_channles = input->num_channels();
85  const auto input_qinfo = input->quantization_info();
86  const auto is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
87  const auto output_data_type = is_arg_min_max ? DataType::S32 : output->data_type();
88 
89  info_before_reshape.set_data_type(output_data_type).set_tensor_shape(shape_before_reshape).set_num_channels(input_num_channles).set_quantization_info(input_qinfo);
90 
91  output_internal = &info_before_reshape;
92  }
93 
94  ARM_COMPUTE_RETURN_ON_ERROR(NEReductionOperationKernel::validate(input, output_internal, axis, op));
95 
96  if(is_reshape_required)
97  {
98  ARM_COMPUTE_RETURN_ON_ERROR(NEReshapeLayer::validate(output_internal, output));
99  }
100 
101  return Status{};
102 }
103 
104 void NEReductionOperation::configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op, bool keep_dims)
105 {
106  ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
107 
108  _is_reshape_required = !keep_dims;
109 
110  auto *output_internal = output;
111  const auto is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
112 
113  if(_is_reshape_required)
114  {
115  const auto output_internal_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis);
116  const auto output_external_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, false);
117  const auto output_data_type = is_arg_min_max ? DataType::S32 : input->info()->data_type();
118  const auto num_channels = input->info()->num_channels();
119  const auto qinfo = input->info()->quantization_info();
120 
121  _output_internal.allocator()->init(input->info()->clone()->set_data_type(output_data_type).set_tensor_shape(output_internal_shape).reset_padding().set_is_resizable(true).set_num_channels(
122  num_channels).set_quantization_info(qinfo));
123  _memory_group.manage(&_output_internal);
124  output_internal = &_output_internal;
125  auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_data_type).set_tensor_shape(output_external_shape).reset_padding().set_is_resizable(true));
126  }
127 
128  ARM_COMPUTE_ERROR_THROW_ON(NEReductionOperation::validate(input->info(), output->info(), axis, op, keep_dims));
129 
130  // Configure reduction kernel
131  _reduction_kernel = std::make_unique<NEReductionOperationKernel>();
132  _reduction_kernel->configure(input, output_internal, axis, op);
133  _window_split = reduction_window_split_dimension(axis);
134  _reduction_axis = axis;
135 
136  if(_is_reshape_required)
137  {
138  _reshape.configure(output_internal, output);
139  _output_internal.allocator()->allocate();
140  }
141 }
142 
144 {
145  MemoryGroupResourceScope scope_mg(_memory_group);
146  NEScheduler::get().schedule(_reduction_kernel.get(), _window_split);
147  if(_is_reshape_required)
148  {
149  _reshape.run();
150  }
151 }
152 } // namespace arm_compute
virtual ITensorInfo & set_num_channels(int num_channels)=0
Set the number of channels to the specified value.
void run() override
Run the kernels contained in the function.
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
ReductionOperation
Available reduction operations.
Definition: Types.h:521
static Status validate(const ITensorInfo *input, const ITensorInfo *output)
Static function to check if given info will lead to a valid configuration of NEReshapeLayer.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
virtual ITensorInfo & set_tensor_shape(const TensorShape &shape)=0
Set the shape of an already initialized tensor.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
ITensorInfo & set_data_type(DataType data_type) override
Set the data type to the specified value.
Definition: TensorInfo.cpp:321
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
TensorAllocator * allocator()
Return a pointer to the tensor&#39;s allocator.
Definition: Tensor.cpp:48
1 channel, 1 S32 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
Static function to check if given info will lead to a valid configuration of NEReductionOperationKern...
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Static function to check if given info will lead to a valid configuration of NEReductionOperation.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis, bool keep_dims=true)
Calculate the reduced shape of a tensor given an axis.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
virtual ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info)=0
Set the quantization settings (scale and offset) of the tensor.
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
~NEReductionOperation()
Default destructor.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:443
const QuantizationInfo qinfo
Definition: Im2Col.cpp:155
void configure(const ITensor *input, ITensor *output)
Initialise the kernel&#39;s inputs and outputs.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:45
static constexpr size_t num_max_dimensions
Number of dimensions the tensor has.
Definition: Dimensions.h:46
NEReductionOperation(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
virtual size_t num_channels() const =0
The number of channels for each tensor element.
TensorShape & set(size_t dimension, size_t value, bool apply_dim_correction=true, bool increase_dim_unit=true)
Accessor to set the value of one of the dimensions.
Definition: TensorShape.h:79
void run() override
Run the kernels contained in the function.
void configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Set the input and output tensors.
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:94