Compute Library
 20.11
NEReductionOperation.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
31 #include "support/MemorySupport.h"
32 
33 namespace arm_compute
34 {
35 namespace
36 {
37 /** Define dimension to split the window
38  *
39  * @param[in] axis Reduction axis
40  *
41  * @return The dimension to split the window
42  */
43 size_t reduction_window_split_dimension(unsigned int axis)
44 {
45  switch(axis)
46  {
47  case 0:
48  return Window::DimY;
49  case 1:
50  case 2:
51  case 3:
52  return Window::DimX;
53  default:
54  ARM_COMPUTE_ERROR("Unsupported reduction axis");
55  }
56 }
57 } // namespace
58 
60 
61 NEReductionOperation::NEReductionOperation(std::shared_ptr<IMemoryManager> memory_manager)
62  : _memory_group(memory_manager), _reduction_kernel(), _reshape(), _output_internal(), _window_split(0), _reduction_axis(), _is_reshape_required(false)
63 {
64 }
65 
66 Status NEReductionOperation::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims)
67 {
68  ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
69  ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
70 
71  const auto is_reshape_required = !keep_dims;
72 
73  auto *output_internal = output;
74 
75  TensorInfo info_before_reshape;
76 
77  if(is_reshape_required)
78  {
79  const TensorInfo expected_output_shape = output->clone()->set_tensor_shape(arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, keep_dims));
80  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&expected_output_shape, output);
81 
82  auto shape_before_reshape = input->tensor_shape();
83  shape_before_reshape.set(axis, 1);
84 
85  const auto input_num_channles = input->num_channels();
86  const auto input_qinfo = input->quantization_info();
87  const auto is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
88  const auto output_data_type = is_arg_min_max ? DataType::S32 : output->data_type();
89 
90  info_before_reshape.set_data_type(output_data_type).set_tensor_shape(shape_before_reshape).set_num_channels(input_num_channles).set_quantization_info(input_qinfo);
91 
92  output_internal = &info_before_reshape;
93  }
94 
96 
97  if(is_reshape_required)
98  {
99  ARM_COMPUTE_RETURN_ON_ERROR(NEReshapeLayer::validate(output_internal, output));
100  }
101 
102  return Status{};
103 }
104 
105 void NEReductionOperation::configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op, bool keep_dims)
106 {
108 
109  _is_reshape_required = !keep_dims;
110 
111  auto *output_internal = output;
112  const auto is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
113 
114  if(_is_reshape_required)
115  {
116  const auto output_internal_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis);
117  const auto output_external_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, false);
118  const auto output_data_type = is_arg_min_max ? DataType::S32 : input->info()->data_type();
119  const auto num_channels = input->info()->num_channels();
120  const auto qinfo = input->info()->quantization_info();
121 
122  _output_internal.allocator()->init(input->info()->clone()->set_data_type(output_data_type).set_tensor_shape(output_internal_shape).reset_padding().set_is_resizable(true).set_num_channels(
123  num_channels).set_quantization_info(qinfo));
124  _memory_group.manage(&_output_internal);
125  output_internal = &_output_internal;
126  auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_data_type).set_tensor_shape(output_external_shape).reset_padding().set_is_resizable(true));
127  }
128 
129  ARM_COMPUTE_ERROR_THROW_ON(NEReductionOperation::validate(input->info(), output->info(), axis, op, keep_dims));
130 
131  // Configure reduction kernel
132  _reduction_kernel = arm_compute::support::cpp14::make_unique<NEReductionOperationKernel>();
133  _reduction_kernel->configure(input, output_internal, axis, op);
134  _window_split = reduction_window_split_dimension(axis);
135  _reduction_axis = axis;
136 
137  if(_is_reshape_required)
138  {
139  _reshape.configure(output_internal, output);
140  _output_internal.allocator()->allocate();
141  }
142 }
143 
145 {
146  MemoryGroupResourceScope scope_mg(_memory_group);
147  NEScheduler::get().schedule(_reduction_kernel.get(), _window_split);
148  if(_is_reshape_required)
149  {
150  _reshape.run();
151  }
152 }
153 } // namespace arm_compute
virtual ITensorInfo & set_num_channels(int num_channels)=0
Set the number of channels to the specified value.
void run() override
Run the kernels contained in the function.
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
ReductionOperation
Available reduction operations.
Definition: Types.h:521
static Status validate(const ITensorInfo *input, const ITensorInfo *output)
Static function to check if given info will lead to a valid configuration of NEReshapeLayer.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
virtual ITensorInfo & set_tensor_shape(const TensorShape &shape)=0
Set the shape of an already initialized tensor.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
ITensorInfo & set_data_type(DataType data_type) override
Set the data type to the specified value.
Definition: TensorInfo.cpp:320
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
Interface for NEON tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2020 Arm Limited.
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
Definition: Tensor.cpp:48
1 channel, 1 S32 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
Static function to check if given info will lead to a valid configuration of NEReductionOperationKern...
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Static function to check if given info will lead to a valid configuration of NEReductionOperation.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis, bool keep_dims=true)
Calculate the reduced shape of a tensor given an axis.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
virtual ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info)=0
Set the quantization settings (scale and offset) of the tensor.
~NEReductionOperation()
Default destructor.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:443
const QuantizationInfo qinfo
Definition: Im2Col.cpp:150
void configure(const ITensor *input, ITensor *output)
Initialise the kernel's inputs and outputs.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
Store the tensor's metadata.
Definition: TensorInfo.h:45
static constexpr size_t num_max_dimensions
Number of dimensions the tensor has.
Definition: Dimensions.h:45
NEReductionOperation(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
void run() override
Run the kernels contained in the function.
void configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Set the input and output tensors.
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:95