Compute Library
 20.05
NEReduceMean.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2020 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
27 #include "arm_compute/core/Error.h"
31 
32 namespace arm_compute
33 {
34 namespace
35 {
36 } // namespace
37 
38 NEReduceMean::NEReduceMean(std::shared_ptr<IMemoryManager> memory_manager)
39  : _memory_group(std::move(memory_manager)), _reduction_kernels(), _reduced_outs(), _reshape(), _reduction_ops(), _keep_dims()
40 {
41 }
42 
43 Status validate_config(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output)
44 {
45  ARM_COMPUTE_UNUSED(keep_dims);
49  ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() < 1);
50  ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() > input->num_dimensions());
51 
52  const unsigned int reduction_ops = reduction_axis.num_dimensions();
53  const int input_dims = input->num_dimensions();
54  Coordinates axis_local = reduction_axis;
55 
56  for(unsigned int i = 0; i < axis_local.num_dimensions(); ++i)
57  {
58  //axis: The dimensions to reduce. Must be in the range [-rank(input_tensor), rank(input_tensor)).
59  ARM_COMPUTE_RETURN_ERROR_ON(axis_local[i] < (-static_cast<int>(input->num_dimensions())));
60  ARM_COMPUTE_RETURN_ERROR_ON(axis_local[i] >= static_cast<int>(input->num_dimensions()));
61  }
62 
63  if(output->tensor_shape().total_size() != 0)
64  {
65  // Only validate if not using auto_init for the output tensor
66  TensorShape out_shape = input->tensor_shape();
67  // Validate output_shape only if not using auto_init
68  convert_negative_axis(axis_local, input_dims);
69  std::sort(axis_local.begin(), axis_local.begin() + reduction_ops);
70  for(unsigned int i = 0; i < reduction_ops; ++i)
71  {
72  ARM_COMPUTE_RETURN_ERROR_ON(axis_local[i] > 3);
73  ARM_COMPUTE_RETURN_ERROR_ON(static_cast<unsigned int>(axis_local[i]) > input->num_dimensions() - 1);
74  if(output->total_size() > 0 && keep_dims)
75  {
76  ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(axis_local[i]) != 1);
77  }
78  if(keep_dims)
79  {
80  out_shape.set(axis_local[i], 1);
81  }
82  else
83  {
84  ARM_COMPUTE_RETURN_ERROR_ON(i > static_cast<unsigned int>(axis_local[i]));
85  const unsigned int remove_index = axis_local[i] - i;
86  ARM_COMPUTE_RETURN_ERROR_ON(remove_index >= out_shape.num_dimensions());
87  out_shape.remove_dimension(remove_index);
88  }
89  }
90  const TensorInfo out_info = input->clone()->set_tensor_shape(out_shape);
93  }
94  return Status{};
95 }
96 
97 Status NEReduceMean::validate(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output)
98 {
99  return validate_config(input, reduction_axis, keep_dims, output);
100 }
101 
102 void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis, bool keep_dims, ITensor *output)
103 {
104  // Perform validate step
105  ARM_COMPUTE_ERROR_THROW_ON(NEReduceMean::validate(input->info(), reduction_axis, keep_dims, output->info()));
106  // Output auto inizialitation if not yet initialized
108  auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
109 
110  _reduction_ops = reduction_axis.num_dimensions();
111  _reduction_kernels.resize(_reduction_ops);
112  _reduced_outs.resize(_reduction_ops - (keep_dims ? 1 : 0));
113  _keep_dims = keep_dims;
114 
115  Coordinates axis_local = reduction_axis;
116  const int input_dims = input->info()->num_dimensions();
117 
118  convert_negative_axis(axis_local, input_dims);
119 
120  // Perform reduction for every axis
121  for(int i = 0; i < _reduction_ops; ++i)
122  {
123  TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape();
124  out_shape.set(axis_local[i], 1);
125  auto in = (i == 0) ? input : (&_reduced_outs[i - 1]);
126 
127  if(i == _reduction_ops - 1 && keep_dims)
128  {
129  _reduction_kernels[i].configure(in, output, axis_local[i], ReductionOperation::MEAN_SUM);
130  }
131  else
132  {
133  _reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->quantization_info()));
134  _memory_group.manage(&_reduced_outs[i]);
135  _reduction_kernels[i].configure(in, &_reduced_outs[i], axis_local[i], ReductionOperation::MEAN_SUM);
136  }
137  }
138 
139  // Allocate intermediate tensors
140  for(int i = 0; i < _reduction_ops - (keep_dims ? 1 : 0); ++i)
141  {
142  _reduced_outs[i].allocator()->allocate();
143  }
144 
145  // Configure reshape layer if we want to drop the dimensions
146  if(!keep_dims)
147  {
148  TensorShape out_shape = input->info()->tensor_shape();
149  // We have to sort the reduction axis vectors in order for remove_dimension
150  // to work properly
151  std::sort(axis_local.begin(), axis_local.begin() + _reduction_ops);
152  for(int i = 0; i < _reduction_ops; ++i)
153  {
154  out_shape.remove_dimension(axis_local[i] - i);
155  }
156  auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape));
157  _reshape.configure(&_reduced_outs[_reduction_ops - 1], output);
158  }
159 }
160 
162 {
163  MemoryGroupResourceScope scope_mg(_memory_group);
164  for(auto &kernel : _reduction_kernels)
165  {
166  kernel.run();
167  }
168 
169  if(!_keep_dims)
170  {
171  _reshape.run();
172  }
173 }
174 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
void remove_dimension(size_t n)
Accessor to remove the dimension n from the tensor shape.
Definition: TensorShape.h:110
void run() override final
Run the kernels contained in the function.
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
1 channel, 1 F32 per channel
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Interface for NEON tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2020 ARM Limited.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
Definition: Helpers.inl:202
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:108
1 channel, 1 F16 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:443
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
Coordinates of an item.
Definition: Coordinates.h:37
size_t total_size() const
Collapses all dimensions to a single linear total size.
Definition: TensorShape.h:171
void configure(ITensor *input, const Coordinates &reduction_axis, bool keep_dims, ITensor *output)
Configure kernel.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
std::array< T, num_max_dimensions >::iterator begin()
Returns a read/write iterator that points to the first element in the dimension array.
Definition: Dimensions.h:194
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(...)
Definition: Validate.h:610
NEReduceMean(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:122
Status validate_config(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output)
void configure(const ITensor *input, ITensor *output)
Initialise the kernel's inputs and outputs.
TensorShape & set(size_t dimension, size_t value, bool apply_dim_correction=true)
Accessor to set the value of one of the dimensions.
Definition: TensorShape.h:78
Store the tensor's metadata.
Definition: TensorInfo.h:45
quantized, asymmetric fixed-point 8-bit number signed
Coordinates & convert_negative_axis(Coordinates &coords, int max_value)
Convert negative coordinates to positive in the range [0, num_dims_input].
Definition: Helpers.h:809
void run() override
Run the kernels contained in the function.
static Status validate(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output)
Static function to check if given info will lead to a valid configuration of NEReduceMean.
TensorShape calculate_reduce_mean_shape(ITensor *input, const Coordinates &reduction_axis, bool keep_dims)
Calculate the output tensor shape for the reduce mean operation.