Compute Library
 20.02.1
CLSoftmaxLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2019 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
30 #include "arm_compute/core/Types.h"
31 #include "arm_compute/core/Utils.h"
34 
35 namespace arm_compute
36 {
37 template <bool IS_LOG>
38 CLSoftmaxLayerGeneric<IS_LOG>::CLSoftmaxLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager)
39  : _memory_group(std::move(memory_manager)), _max_shift_exp_sum_kernel(), _norm_kernel(), _flatten_kernel_ptr(), _reshape_kernel(), _max(), _sum(), _tmp(), _input_flattened(), _output_flattened(),
40  _needs_flattening(false)
41 {
42 }
43 
44 template <bool IS_LOG>
46 {
47  // Flatten the input
49 
50  // Initialize the flat input
51  _input_flattened.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten));
52 
53  // If we need to flatten the input, we can use CLFlattenKernel or CLReshapeKernel
54  // If flattening on the third axes, we use CLFlattenKernel.
55  // In all other cases we have to use CLReshapeKernel
56  if(axis != 3)
57  {
58  auto reshape_kernel_ptr = support::cpp14::make_unique<CLReshapeLayerKernel>();
59  reshape_kernel_ptr->configure(input, &_input_flattened);
60  _flatten_kernel_ptr = std::move(reshape_kernel_ptr);
61  }
62  else
63  {
64  auto flatten_kernel_ptr = support::cpp14::make_unique<CLFlattenLayerKernel>();
65  flatten_kernel_ptr->configure(input, &_input_flattened);
66  _flatten_kernel_ptr = std::move(flatten_kernel_ptr);
67  }
68 
69  // We need to init the output tensor here. Indeed, the reshape kernel expects
70  // both tensors to be already initialized
71  auto_init_if_empty(*output->info(), *input->info()->clone());
72 }
73 
74 template <bool IS_LOG>
75 void CLSoftmaxLayerGeneric<IS_LOG>::configure(const ICLTensor *input, ICLTensor *output, float beta, size_t axis)
76 {
77  // Perform validation step
80 
81  // We don't need flattening only in the case the input is 2D and axis is 1
82  _needs_flattening = axis != 1;
83 
84  // If we are dealing with a 4D tensor, we will:
85  // - Flatten the input, so that we end up with a [width*height*depth] * batches 2D tensor
86  // - Execute all the pipeline (reduction + normalization) on the flattened tensor
87  // - Reshape the flattened output into the real output
88  if(_needs_flattening)
89  {
90  // Add to the memory manager _input_flattened
91  _memory_group.manage(&_input_flattened);
92 
93  // Cofigure _flatten_kernel and _input_flattened
94  configure_reshape_input_kernel(input, output, axis);
95  }
96 
97  // We want to deal with a 2D input. Either it is the flattened version of the original input (4D case)
98  // or it is the original input case (2D case)
99  const ICLTensor *input_2D = (_needs_flattening ? &_input_flattened : input);
100 
101  // Create intermediate tensors shapes
102  TensorInfo input_info = input_2D->info()->clone()->reset_padding().set_is_resizable(true);
103  DataType tmp_data_type = is_data_type_quantized_asymmetric(input_2D->info()->data_type()) ? DataType::S32 : input_2D->info()->data_type();
104  TensorInfo tensor_info_tmp(input_info.clone()->set_data_type(tmp_data_type));
105  _tmp.allocator()->init(tensor_info_tmp);
106 
107  TensorShape max_sum_shape = input_2D->info()->tensor_shape();
108  max_sum_shape.set(0, 1);
109  _max.allocator()->init(input_info.clone()->set_tensor_shape(max_sum_shape));
110  _sum.allocator()->init(input_info.clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type));
111 
112  // Set GPU target to kernels
113  _max_shift_exp_sum_kernel.set_target(CLScheduler::get().target());
114 
115  // Manage intermediate buffers
116  _memory_group.manage(&_tmp);
117  _memory_group.manage(&_max);
118  _memory_group.manage(&_sum);
119 
120  SoftmaxKernelInfo softmax_info;
121  softmax_info.beta = beta;
122  softmax_info.is_log = IS_LOG;
123  softmax_info.input_data_type = input_2D->info()->data_type();
124 
125  // Configure kernels
126  _max_shift_exp_sum_kernel.configure(input_2D, &_max, &_tmp, &_sum, softmax_info);
127 
128  if(_needs_flattening)
129  {
130  // Add to the memory manager _output_flattened
131  _memory_group.manage(&_output_flattened);
132 
133  // The normalization kernel stores the result in a flat output tensor
134  _norm_kernel.configure(&_tmp, &_sum, &_output_flattened, softmax_info);
135 
136  // Reshape the flat output into a the requested (4D) output
137  _reshape_kernel.configure(&_output_flattened, output);
138 
139  // Allocate the intermediate flat tensors
140  _input_flattened.allocator()->allocate();
141  _output_flattened.allocator()->allocate();
142  }
143  else
144  {
145  // Softmax 2D case
146  _norm_kernel.configure(&_tmp, &_sum, output, softmax_info);
147  }
148 
149  // Allocate intermediate buffers
150  _tmp.allocator()->allocate();
151  _max.allocator()->allocate();
152  _sum.allocator()->allocate();
153 }
154 
155 template <bool IS_LOG>
157 {
159  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4, "Only up to 4 dimensions are supported");
160  ARM_COMPUTE_UNUSED(beta);
161 
162  // Create intermediate tensor info
163  DataType tmp_data_type = is_data_type_quantized_asymmetric(input->data_type()) ? DataType::S32 : input->data_type();
164  TensorInfo tensor_info_tmp(input->clone()->set_data_type(tmp_data_type).set_is_resizable(true));
165 
166  TensorShape max_sum_shape = input->tensor_shape();
167  max_sum_shape.set(0, 1);
168  TensorInfo tensor_info_max(input->clone()->set_tensor_shape(max_sum_shape).set_is_resizable(true));
169  TensorInfo tensor_info_sum(input->clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type).set_quantization_info(QuantizationInfo()).set_is_resizable(true));
170 
171  const bool needs_flattening = (axis != 1);
172 
173  if(needs_flattening)
174  {
176  TensorInfo tensor_info_flat(input->clone()->set_tensor_shape(shape_flatten).set_is_resizable(true));
177 
178  if(axis != 3)
179  {
181  }
182  else
183  {
185  }
186  }
187 
188  SoftmaxKernelInfo softmax_info;
189  softmax_info.beta = beta;
190  softmax_info.is_log = IS_LOG;
191  softmax_info.input_data_type = input->data_type();
192 
193  ARM_COMPUTE_RETURN_ON_ERROR(CLLogits1DMaxShiftExpSumKernel::validate(input, &tensor_info_max, &tensor_info_tmp, &tensor_info_sum));
194  ARM_COMPUTE_RETURN_ON_ERROR(CLLogits1DNormKernel::validate(&tensor_info_tmp, &tensor_info_sum, output, softmax_info));
195 
196  if(needs_flattening)
197  {
199  TensorInfo tensor_info_flat(input->clone()->set_tensor_shape(shape_flatten).set_is_resizable(true));
200  }
201 
202  return Status{};
203 }
204 
205 template <bool IS_LOG>
207 {
208  MemoryGroupResourceScope scope_mg(_memory_group);
209 
210  if(_needs_flattening)
211  {
212  CLScheduler::get().enqueue(*_flatten_kernel_ptr, false);
213  }
214 
215  CLScheduler::get().enqueue(_max_shift_exp_sum_kernel, false);
216  CLScheduler::get().enqueue(_norm_kernel, !_needs_flattening);
217 
218  if(_needs_flattening)
219  {
220  CLScheduler::get().enqueue(_reshape_kernel, true);
221  }
222 }
223 
224 template class CLSoftmaxLayerGeneric<false>;
225 template class CLSoftmaxLayerGeneric<true>;
226 
227 } // namespace arm_compute
void run() override
Run the kernels contained in the function.
Shape of a tensor.
Definition: TensorShape.h:39
static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta=1.0f, size_t axis=1)
Static function to check if given info will lead to a valid configuration of CLSoftmaxLayer.
void configure(const ICLTensor *input, ICLTensor *output, float beta=1.0f, size_t axis=1)
Set the input and output tensors.
CLSoftmaxLayerGeneric(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info)
Static function to check if given info will lead to a valid configuration of CLLogits1DNormKernel.
static CLScheduler & get()
Access the scheduler singleton.
Definition: CLScheduler.cpp:99
float beta
A scaling factor for the exponent with default value 1.0.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis=1)
Calculate the softmax output shape of a tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
Copyright (c) 2017-2020 ARM Limited.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
Definition: Helpers.inl:202
1 channel, 1 S32 per channel
Basic function to compute a SoftmaxLayer.
Quantization information.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
DataType input_data_type
Input tensor data type.
bool is_log
Flag used to perform Log Softmax operation.
static Status validate(const ITensorInfo *input, const ITensorInfo *output)
Static function to check if given info will lead to a valid configuration of CLFlattenLayerKernel.
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
static Status validate(const ITensorInfo *input, const ITensorInfo *output)
Static function to check if given info will lead to a valid configuration of CLReshapeLayerKernel.
void enqueue(ICLKernel &kernel, bool flush=true)
Schedule the execution of the passed kernel if possible.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1139
void init(Format format)
Initialize the tensor info with just a format.
Definition: TensorInfo.cpp:107
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum)
Static function to check if given info will lead to a valid configuration of CLLogits1DMaxShiftExpSum...
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
TensorShape & set(size_t dimension, size_t value, bool apply_dim_correction=true)
Accessor to set the value of one of the dimensions.
Definition: TensorShape.h:78
Store the tensor's metadata.
Definition: TensorInfo.h:45
Descriptor used by the softmax kernels.
DataType
Available data types.
Definition: Types.h:75