Compute Library
 21.11
CLDepthwiseConvolutionLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
30 #include "arm_compute/core/Utils.h"
35 
36 #include "src/common/utils/Log.h"
37 
38 namespace arm_compute
39 {
40 using namespace arm_compute::misc;
42 
43 namespace
44 {
45 bool export_weights_to_cl_image_heuristic(const ITensorInfo *weights, unsigned int depth_multiplier, GPUTarget gpu_target)
46 {
47  if(!export_weights_to_cl_image(weights))
48  {
49  return false;
50  }
51 
52  const size_t idx_w = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH);
53  const size_t idx_h = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::HEIGHT);
54  const size_t kernel_w = weights->tensor_shape()[idx_w];
55  const size_t kernel_h = weights->tensor_shape()[idx_h];
56 
57  if((kernel_w == 1) && (kernel_h == 1))
58  {
59  return false;
60  }
61 
62  if(depth_multiplier > 1)
63  {
64  return false;
65  }
66 
67  if(gpu_target == GPUTarget::G71 || get_arch_from_target(gpu_target) == GPUTarget::MIDGARD)
68  {
69  return false;
70  }
71 
72  return true;
73 }
74 
75 void initialize_dwc_native_compute_info(DWCComputeKernelInfo &dwc_compute_info, const ITensorInfo *weights, const PadStrideInfo &conv_info, const Size2D &dilation, unsigned int depth_multiplier,
76  GPUTarget gpu_target)
77 {
78  if(!is_data_type_float(weights->data_type()))
79  {
80  dwc_compute_info.export_weights_to_cl_image = false;
81  dwc_compute_info.n0 = (depth_multiplier == 1) ? 4 : 1;
82  if(conv_info.stride().first == 1 && dilation.x() == 1 && depth_multiplier == 1)
83  {
84  dwc_compute_info.m0 = 2;
85  }
86  else
87  {
88  dwc_compute_info.m0 = 1;
89  }
90 
91  return;
92  }
93 
94  // Floating point path
95 
96  // First check if we can export to cl_image.
97  dwc_compute_info.export_weights_to_cl_image = export_weights_to_cl_image_heuristic(weights, depth_multiplier, gpu_target);
98 
99  // Set n0
100  if(depth_multiplier == 1)
101  {
102  if(dwc_compute_info.export_weights_to_cl_image == false && weights->data_type() == DataType::F16)
103  {
104  dwc_compute_info.n0 = 8;
105  }
106  else
107  {
108  dwc_compute_info.n0 = 4;
109  }
110  }
111  else
112  {
113  dwc_compute_info.n0 = 1;
114  }
115 
116  dwc_compute_info.n0 = adjust_vec_size(dwc_compute_info.n0, weights->dimension(0));
117 
118  // Set m0 only if stride_x == 1 and dilation_x == 1
119  if(conv_info.stride().first == 1 && dilation.x() == 1)
120  {
121  const size_t idx_w = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH);
122  const size_t kernel_w = weights->tensor_shape()[idx_w];
123 
124  dwc_compute_info.m0 = (kernel_w >= 9) || (kernel_w == 1) ? 1 : 2;
125  }
126  else
127  {
128  dwc_compute_info.m0 = 1;
129  }
130  return;
131 }
132 
133 } // namespace
134 
135 CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
136  : _memory_group(std::move(memory_manager)),
137  _dwc_native_kernel(std::make_unique<CLDepthwiseConvolutionLayerNativeKernel>()),
138  _permute_input_to_nhwc(),
139  _permute_weights_to_nhwc(),
140  _permute_output_to_nchw(),
141  _permuted_input(),
142  _permuted_weights(),
143  _permuted_output(),
144  _output_multipliers(),
145  _output_shifts(),
146  _original_weights(),
147  _input(),
148  _output(),
149  _needs_permute(false),
150  _is_prepared(false),
151  _is_quantized(false)
152 {
153 }
154 
156 
157 void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
158  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
159 {
160  configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
161 }
162 
163 void CLDepthwiseConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
164  ICLTensor *output, const PadStrideInfo &conv_info,
165  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
166 {
167  ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
169  weights->info(),
170  biases != nullptr ? biases->info() : nullptr,
171  output != nullptr ? output->info() : input->info(),
172  conv_info,
173  depth_multiplier,
174  act_info,
175  dilation));
176  ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
177 
178  _is_quantized = is_data_type_quantized(input->info()->data_type());
179  _is_prepared = false;
180  _original_weights = weights;
181  _input = input;
182  _output = output;
183  _needs_permute = input->info()->data_layout() == DataLayout::NCHW;
184 
185  const GPUTarget gpu_target = CLScheduler::get().target();
186 
187  ICLTensor *input_to_use = input;
188  const ICLTensor *weights_to_use = weights;
189  ICLTensor *output_to_use = output;
190  if(_needs_permute)
191  {
192  _memory_group.manage(&_permuted_input);
193  _memory_group.manage(&_permuted_output);
194 
195  // Configure the function to transform the input tensor from NCHW -> NHWC
196  _permute_input_to_nhwc.configure(compile_context, input, &_permuted_input, PermutationVector(2U, 0U, 1U));
197  _permuted_input.info()->set_data_layout(DataLayout::NHWC);
198 
199  // Configure the function to transform the weights tensor from IHW -> HWI
200  _permute_weights_to_nhwc.configure(compile_context, weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
201  _permuted_weights.info()->set_data_layout(DataLayout::NHWC);
202 
203  // Set output quantization info before dwc kernel configure
204  _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
205 
206  input_to_use = &_permuted_input;
207  weights_to_use = &_permuted_weights;
208  output_to_use = &_permuted_output;
209  }
210 
211  CLTensor *output_multipliers_to_use = nullptr;
212  CLTensor *output_shifts_to_use = nullptr;
213  if(_is_quantized)
214  {
216  const size_t num_filters = (is_data_type_quantized_per_channel(weights->info()->data_type())) ? weights->info()->dimension(idx_c) : 1;
217 
218  _output_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
219  _output_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
220 
221  output_multipliers_to_use = &_output_multipliers;
222  output_shifts_to_use = &_output_shifts;
223  }
224 
225  DWCComputeKernelInfo dwc_native_compute_info;
226  initialize_dwc_native_compute_info(dwc_native_compute_info, weights_to_use->info(), conv_info, dilation, depth_multiplier, gpu_target);
227 
228  const ConvolutionInfo conv_kernel_info{ conv_info, depth_multiplier, act_info, dilation };
229 
230  _dwc_native_kernel->configure(compile_context, input_to_use, weights_to_use, biases, output_to_use,
231  dwc_native_compute_info, conv_kernel_info, output_multipliers_to_use, output_shifts_to_use);
232 
233  if(_needs_permute)
234  {
235  _permuted_input.allocator()->allocate();
236 
237  // Configure the function to transform the convoluted output to NCHW format
238  _permuted_output.info()->set_data_layout(DataLayout::NCHW);
239  _permute_output_to_nchw.configure(compile_context, &_permuted_output, output, PermutationVector(1U, 2U, 0U));
240  _permuted_output.allocator()->allocate();
241  }
242 
243  if(_is_quantized)
244  {
245  _output_multipliers.allocator()->allocate();
246  _output_shifts.allocator()->allocate();
247  }
248 }
249 
251  const PadStrideInfo &conv_info,
252  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
253 {
254  const bool in_place = input == output || output == nullptr;
255  if(in_place)
256  {
257  output = input;
258  }
262 
263  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) + (weights->dimension(idx_w) - 1) * (dilation.x() - 1) > input->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right());
264  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) + (weights->dimension(idx_h) - 1) * (dilation.y() - 1) > input->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom());
265 
266  const GPUTarget gpu_target = CLScheduler::get().target();
267 
268  const ConvolutionInfo conv_kernel_info{ conv_info, depth_multiplier, act_info, dilation };
269 
270  const bool needs_permute = input->data_layout() == DataLayout::NCHW;
271 
272  const bool is_quantized = is_data_type_quantized(input->data_type());
273 
274  TensorInfo output_multipliers_shifts_info(TensorInfo(TensorShape(1U), 1, DataType::S32));
275  if(is_quantized)
276  {
278  {
280 
282  output_multipliers_shifts_info.set_tensor_shape(TensorShape(weights->dimension(idx_c)));
283  }
284  else
285  {
287  }
288  }
289 
290  if(needs_permute)
291  {
292  ARM_COMPUTE_RETURN_ERROR_ON_MSG(in_place, "In-place is supported only with NHWC data layout");
293  TensorShape permuted_input_shape = input->tensor_shape();
294  TensorShape permuted_weights_shape = weights->tensor_shape();
295  const ConvolutionInfo info{ conv_info, depth_multiplier, ActivationLayerInfo(), dilation };
296  TensorShape permuted_output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, info);
297 
298  permute(permuted_input_shape, PermutationVector(2U, 0U, 1U));
299  permute(permuted_weights_shape, PermutationVector(2U, 0U, 1U));
300  permute(permuted_output_shape, PermutationVector(2U, 0U, 1U));
301 
302  const TensorInfo permuted_input = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NHWC);
303  const TensorInfo permuted_weights = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NHWC);
304  const TensorInfo permuted_output = output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NHWC);
305 
306  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(input, &permuted_input, PermutationVector(2U, 0U, 1U)));
307  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(weights, &permuted_weights, PermutationVector(2U, 0U, 1U)));
308 
309  DWCComputeKernelInfo dwc_native_compute_info;
310  initialize_dwc_native_compute_info(dwc_native_compute_info, &permuted_weights, conv_info, dilation, depth_multiplier, gpu_target);
311 
312  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output,
313  dwc_native_compute_info, conv_kernel_info, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
314  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(&permuted_output, output, PermutationVector(1U, 2U, 0U)));
315  }
316  else
317  {
318  DWCComputeKernelInfo dwc_native_compute_info;
319  initialize_dwc_native_compute_info(dwc_native_compute_info, weights, conv_info, dilation, depth_multiplier, gpu_target);
320  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(input, weights, biases, output, dwc_native_compute_info, conv_kernel_info, &output_multipliers_shifts_info,
321  &output_multipliers_shifts_info));
322  }
323  return Status{};
324 }
325 
327 {
328  prepare();
329 
330  MemoryGroupResourceScope scope_mg(_memory_group);
331 
332  if(_needs_permute)
333  {
334  _permute_input_to_nhwc.run();
335  }
336  CLScheduler::get().enqueue(*_dwc_native_kernel);
337  if(_needs_permute)
338  {
339  _permute_output_to_nchw.run();
340  }
341 }
342 
344 {
345  if(!_is_prepared)
346  {
347  if(_is_quantized)
348  {
349  _output_multipliers.map();
350  _output_shifts.map();
352  _original_weights->info(),
353  _output != nullptr ? _output->info() : _input->info(),
354  reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
355  reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
356  _output_multipliers.unmap();
357  _output_shifts.unmap();
358  }
359 
360  if(_needs_permute)
361  {
362  ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
363 
364  _permuted_weights.allocator()->allocate();
365  _permute_weights_to_nhwc.run();
366  _original_weights->mark_as_unused();
367  }
368  _is_prepared = true;
369  }
370 }
371 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:981
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
Shape of a tensor.
Definition: TensorShape.h:39
TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const ConvolutionInfo &info)
Calculate the depthwise convolution output shape of a tensor.
bool export_weights_to_cl_image(const ITensorInfo *tensor)
Definition: CLHelpers.cpp:431
TensorInfo * info() const override
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Definition: CLTensor.cpp:41
void prepare() override
Prepare the function for executing.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
Definition: Validate.h:490
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
static CLScheduler & get()
Access the scheduler singleton.
GPUTarget target() const
Get the target GPU.
Definition: CLScheduler.cpp:45
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
bool is_used() const
Flags if the tensor is used or not.
Definition: ITensor.cpp:163
Strides PermutationVector
Permutation vector.
Definition: Types.h:51
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
CLTensorAllocator * allocator()
Return a pointer to the tensor&#39;s allocator.
Definition: CLTensor.cpp:61
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
size_t x() const
Semantic accessor for width as x.
Definition: Size2D.h:75
unsigned int pad_top() const
Get the top padding.
Definition: Types.h:740
Status class.
Definition: Error.h:52
GPUTarget get_arch_from_target(GPUTarget target)
Helper function to get the GPU arch.
Definition: GPUTarget.cpp:193
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Activation Layer Information class.
Definition: Types.h:1509
void init(const TensorInfo &input, size_t alignment=0)
Initialize a tensor based on the passed TensorInfo.
Copyright (c) 2017-2021 Arm Limited.
void run() override
Run the kernels contained in the function.
Definition: CLPermute.cpp:73
1 channel, 1 F16 per channel
void map(bool blocking=true)
Enqueue a map operation of the allocated buffer.
Definition: CLTensor.cpp:66
ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info) override
Set the quantization settings (scale and offset) of the tensor.
Definition: TensorInfo.cpp:346
void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Initialize the function&#39;s source, destination, weights and convolution information.
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:125
void mark_as_unused() const
Marks a tensor as unused.
Definition: ITensor.cpp:168
1 channel, 1 S32 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
Interface to enqueue OpenCL kernels and get/set the OpenCL CommandQueue and ICLTuner.
ITensorInfo & set_data_layout(const DataLayout &data_layout) override
Set the data layout of the tensor.
Definition: TensorInfo.cpp:352
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
Definition: Utils.h:1058
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
Coordinates of an item.
Definition: Coordinates.h:37
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
unsigned int pad_right() const
Get the right padding.
Definition: Types.h:735
Padding and stride information class.
Definition: Types.h:656
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
CLDepthwiseConvolutionLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
void enqueue(ICLKernel &kernel, bool flush=true)
Schedule the execution of the passed kernel if possible.
Num samples, channels, height, width.
CLCompileContext class.
size_t y() const
Semantic accessor for height as y.
Definition: Size2D.h:84
Compute descriptor used by the depthwise convolution native kernel.
quantized, symmetric per channel fixed-point 8-bit number
void allocate() override
Allocate size specified by TensorInfo of OpenCL memory.
void compute_quantized_multipliers_and_shifts(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int32_t *output_multipliers_ptr, int32_t *output_shifts_ptr)
Compute quantized per-channel multipliers and shifts.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
~CLDepthwiseConvolutionLayer()
Default destructor.
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
void run() override
Run the kernels contained in the function.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCComputeKernelInfo &dwc_info, const ConvolutionInfo &conv_info, const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_LOG_PARAMS(...)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
void configure(const ICLTensor *input, ICLTensor *output, const PermutationVector &perm)
Set the input and output tensors.
Definition: CLPermute.cpp:51
unsigned int adjust_vec_size(unsigned int vec_size, size_t dim0)
Returns the adjusted vector size in case it is less than the input&#39;s first dimension, getting rounded down to its closest valid vector size.
Definition: Utils.h:1171
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
Static function to check if given info will lead to a valid configuration of CLPermute.
Definition: CLPermute.cpp:68
unsigned int pad_bottom() const
Get the bottom padding.
Definition: Types.h:745
unsigned int pad_left() const
Get the left padding.
Definition: Types.h:730
void unmap()
Enqueue an unmap operation of the allocated and mapped buffer.
Definition: CLTensor.cpp:71
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:961
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
Interface for the kernel to run a MxN depthwise convolution.
Basic implementation of the OpenCL tensor interface.
Definition: CLTensor.h:41