Compute Library
 22.11
CLDepthwiseConvolutionLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
30 #include "arm_compute/core/Utils.h"
35 
36 #include "src/common/utils/Log.h"
37 
38 namespace arm_compute
39 {
40 using namespace arm_compute::misc;
42 
43 namespace
44 {
45 bool export_weights_to_cl_image_heuristic(const ITensorInfo *weights, unsigned int depth_multiplier, GPUTarget gpu_target)
46 {
47  if(!export_to_cl_image(weights))
48  {
49  return false;
50  }
51 
52  const size_t idx_w = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH);
53  const size_t idx_h = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::HEIGHT);
54  const size_t kernel_w = weights->tensor_shape()[idx_w];
55  const size_t kernel_h = weights->tensor_shape()[idx_h];
56 
57  if(gpu_target == GPUTarget::G71 || get_arch_from_target(gpu_target) == GPUTarget::MIDGARD)
58  {
59  return false;
60  }
61 
62  if((kernel_w == 1) && (kernel_h == 1))
63  {
64  return false;
65  }
66 
67  if(depth_multiplier > 1)
68  {
69  if((depth_multiplier % 4) != 0)
70  {
71  return false;
72  }
73  }
74 
75  return true;
76 }
77 
78 void initialize_dwc_native_compute_info(DWCComputeKernelInfo &dwc_compute_info, const ITensorInfo *input, const ITensorInfo *weights, const PadStrideInfo &conv_info, const Size2D &dilation,
79  unsigned int depth_multiplier,
80  GPUTarget gpu_target)
81 {
82  ARM_COMPUTE_UNUSED(input);
83 
84  if(!is_data_type_float(weights->data_type()))
85  {
86  dwc_compute_info.export_weights_to_cl_image = false;
87  dwc_compute_info.n0 = (depth_multiplier == 1) ? 4 : 1;
88  if(conv_info.stride().first == 1 && dilation.x() == 1 && depth_multiplier == 1)
89  {
90  dwc_compute_info.m0 = 2;
91  }
92  else
93  {
94  dwc_compute_info.m0 = 1;
95  }
96 
97  return;
98  }
99 
100  // Floating point path
101 
102  // First check if we can export to cl_image.
103  dwc_compute_info.export_input_to_cl_image = false;
104  dwc_compute_info.export_weights_to_cl_image = export_weights_to_cl_image_heuristic(weights, depth_multiplier, gpu_target);
105 
106  // Set n0
107  if(depth_multiplier == 1)
108  {
109  if(dwc_compute_info.export_weights_to_cl_image == false && weights->data_type() == DataType::F16)
110  {
111  dwc_compute_info.n0 = 8;
112  }
113  else
114  {
115  dwc_compute_info.n0 = 4;
116  }
117  }
118  else
119  {
120  if((depth_multiplier % 4) == 0)
121  {
122  dwc_compute_info.n0 = 4;
123  }
124  else if((depth_multiplier % 2) == 0)
125  {
126  dwc_compute_info.n0 = 2;
127  }
128  else
129  {
130  dwc_compute_info.n0 = 1;
131  }
132  }
133 
134  dwc_compute_info.n0 = adjust_vec_size(dwc_compute_info.n0, weights->dimension(0));
135 
136  // Set m0 only if stride_x == 1 and dilation_x == 1
137  if(conv_info.stride().first == 1 && dilation.x() == 1)
138  {
139  const size_t idx_w = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH);
140  const size_t kernel_w = weights->tensor_shape()[idx_w];
141 
142  if((kernel_w >= 9) || (kernel_w == 1))
143  {
144  dwc_compute_info.m0 = 1;
145  }
146  else
147  {
148  if(weights->data_type() == DataType::F16)
149  {
150  if((input->dimension(1) % 5) == 0)
151  {
152  dwc_compute_info.m0 = 5;
153  }
154  else
155  {
156  dwc_compute_info.m0 = 4;
157  }
158  }
159  else
160  {
161  dwc_compute_info.m0 = 2;
162  }
163  }
164  }
165  else
166  {
167  dwc_compute_info.m0 = 1;
168  }
169  return;
170 }
171 
172 } // namespace
173 
174 CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
175  : _memory_group(std::move(memory_manager)),
176  _dwc_native_kernel(std::make_unique<CLDepthwiseConvolutionLayerNativeKernel>()),
177  _permute_input_to_nhwc(),
178  _permute_weights_to_nhwc(),
179  _permute_output_to_nchw(),
180  _permuted_input(),
181  _permuted_weights(),
182  _permuted_output(),
183  _output_multipliers(),
184  _output_shifts(),
185  _original_weights(),
186  _input(),
187  _output(),
188  _needs_permute(false),
189  _is_prepared(false),
190  _is_quantized(false)
191 {
192 }
193 
195 
196 void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
197  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
198 {
199  configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
200 }
201 
202 void CLDepthwiseConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
203  ICLTensor *output, const PadStrideInfo &conv_info,
204  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
205 {
206  ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
208  weights->info(),
209  biases != nullptr ? biases->info() : nullptr,
210  output != nullptr ? output->info() : input->info(),
211  conv_info,
212  depth_multiplier,
213  act_info,
214  dilation));
215  ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
216 
217  _is_quantized = is_data_type_quantized(input->info()->data_type());
218  _is_prepared = false;
219  _original_weights = weights;
220  _input = input;
221  _output = output;
222  _needs_permute = input->info()->data_layout() == DataLayout::NCHW;
223 
224  const GPUTarget gpu_target = CLScheduler::get().target();
225 
226  ICLTensor *input_to_use = input;
227  const ICLTensor *weights_to_use = weights;
228  ICLTensor *output_to_use = output;
229  if(_needs_permute)
230  {
231  _memory_group.manage(&_permuted_input);
232  _memory_group.manage(&_permuted_output);
233 
234  // Configure the function to transform the input tensor from NCHW -> NHWC
235  _permute_input_to_nhwc.configure(compile_context, input, &_permuted_input, PermutationVector(2U, 0U, 1U));
236  _permuted_input.info()->set_data_layout(DataLayout::NHWC);
237 
238  // Configure the function to transform the weights tensor from IHW -> HWI
239  _permute_weights_to_nhwc.configure(compile_context, weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
240  _permuted_weights.info()->set_data_layout(DataLayout::NHWC);
241 
242  // Set output quantization info before dwc kernel configure
243  _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
244 
245  input_to_use = &_permuted_input;
246  weights_to_use = &_permuted_weights;
247  output_to_use = &_permuted_output;
248  }
249 
250  CLTensor *output_multipliers_to_use = nullptr;
251  CLTensor *output_shifts_to_use = nullptr;
252  if(_is_quantized)
253  {
255  const size_t num_filters = (is_data_type_quantized_per_channel(weights->info()->data_type())) ? weights->info()->dimension(idx_c) : 1;
256 
257  _output_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
258  _output_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
259 
260  output_multipliers_to_use = &_output_multipliers;
261  output_shifts_to_use = &_output_shifts;
262  }
263 
264  DWCComputeKernelInfo dwc_native_compute_info;
265  initialize_dwc_native_compute_info(dwc_native_compute_info, input->info(), weights_to_use->info(), conv_info, dilation, depth_multiplier, gpu_target);
266 
267  const ConvolutionInfo conv_kernel_info{ conv_info, depth_multiplier, act_info, dilation };
268 
269  _dwc_native_kernel->set_target(gpu_target);
270  _dwc_native_kernel->configure(compile_context, input_to_use, weights_to_use, biases, output_to_use,
271  dwc_native_compute_info, conv_kernel_info, output_multipliers_to_use, output_shifts_to_use);
272 
273  if(_needs_permute)
274  {
275  _permuted_input.allocator()->allocate();
276 
277  // Configure the function to transform the convoluted output to NCHW format
278  _permuted_output.info()->set_data_layout(DataLayout::NCHW);
279  _permute_output_to_nchw.configure(compile_context, &_permuted_output, output, PermutationVector(1U, 2U, 0U));
280  _permuted_output.allocator()->allocate();
281  }
282 
283  if(_is_quantized)
284  {
285  _output_multipliers.allocator()->allocate();
286  _output_shifts.allocator()->allocate();
287  }
288 }
289 
290 Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
291  const PadStrideInfo &conv_info,
292  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
293 {
294  const bool in_place = input == output || output == nullptr;
295  if(in_place)
296  {
297  output = input;
298  }
302 
303  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) + (weights->dimension(idx_w) - 1) * (dilation.x() - 1) > input->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right());
304  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) + (weights->dimension(idx_h) - 1) * (dilation.y() - 1) > input->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom());
305 
306  const GPUTarget gpu_target = CLScheduler::get().target();
307 
308  const ConvolutionInfo conv_kernel_info{ conv_info, depth_multiplier, act_info, dilation };
309 
310  const bool needs_permute = input->data_layout() == DataLayout::NCHW;
311 
312  const bool is_quantized = is_data_type_quantized(input->data_type());
313 
314  TensorInfo output_multipliers_shifts_info(TensorInfo(TensorShape(1U), 1, DataType::S32));
315  if(is_quantized)
316  {
318  {
320 
322  output_multipliers_shifts_info.set_tensor_shape(TensorShape(weights->dimension(idx_c)));
323  }
324  else
325  {
327  }
328  }
329 
330  if(needs_permute)
331  {
332  ARM_COMPUTE_RETURN_ERROR_ON_MSG(in_place, "In-place is supported only with NHWC data layout");
333  TensorShape permuted_input_shape = input->tensor_shape();
334  TensorShape permuted_weights_shape = weights->tensor_shape();
335  const ConvolutionInfo info{ conv_info, depth_multiplier, ActivationLayerInfo(), dilation };
336  TensorShape permuted_output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, info);
337 
338  permute(permuted_input_shape, PermutationVector(2U, 0U, 1U));
339  permute(permuted_weights_shape, PermutationVector(2U, 0U, 1U));
340  permute(permuted_output_shape, PermutationVector(2U, 0U, 1U));
341 
342  const TensorInfo permuted_input = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NHWC);
343  const TensorInfo permuted_weights = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NHWC);
344  const TensorInfo permuted_output = output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NHWC);
345 
346  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(input, &permuted_input, PermutationVector(2U, 0U, 1U)));
347  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(weights, &permuted_weights, PermutationVector(2U, 0U, 1U)));
348 
349  DWCComputeKernelInfo dwc_native_compute_info;
350  initialize_dwc_native_compute_info(dwc_native_compute_info, input, &permuted_weights, conv_info, dilation, depth_multiplier, gpu_target);
351 
352  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output,
353  dwc_native_compute_info, conv_kernel_info, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
354  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(&permuted_output, output, PermutationVector(1U, 2U, 0U)));
355  }
356  else
357  {
358  DWCComputeKernelInfo dwc_native_compute_info;
359  initialize_dwc_native_compute_info(dwc_native_compute_info, input, weights, conv_info, dilation, depth_multiplier, gpu_target);
360  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(input, weights, biases, output, dwc_native_compute_info, conv_kernel_info, &output_multipliers_shifts_info,
361  &output_multipliers_shifts_info));
362  }
363  return Status{};
364 }
365 
367 {
368  prepare();
369 
370  MemoryGroupResourceScope scope_mg(_memory_group);
371 
372  if(_needs_permute)
373  {
374  _permute_input_to_nhwc.run();
375  }
376  CLScheduler::get().enqueue(*_dwc_native_kernel);
377  if(_needs_permute)
378  {
379  _permute_output_to_nchw.run();
380  }
381 }
382 
384 {
385  if(!_is_prepared)
386  {
387  if(_is_quantized)
388  {
389  _output_multipliers.map();
390  _output_shifts.map();
392  _original_weights->info(),
393  _output != nullptr ? _output->info() : _input->info(),
394  reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
395  reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
396  _output_multipliers.unmap();
397  _output_shifts.unmap();
398  }
399 
400  if(_needs_permute)
401  {
402  ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
403 
404  _permuted_weights.allocator()->allocate();
405  _permute_weights_to_nhwc.run();
406  _original_weights->mark_as_unused();
407  }
408  _is_prepared = true;
409  }
410 }
411 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1030
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
Shape of a tensor.
Definition: TensorShape.h:39
TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const ConvolutionInfo &info)
Calculate the depthwise convolution output shape of a tensor.
TensorInfo * info() const override
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Definition: CLTensor.cpp:41
void prepare() override
Prepare the function for executing.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
Definition: Validate.h:490
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
static CLScheduler & get()
Access the scheduler singleton.
GPUTarget target() const
Get the target GPU.
Definition: CLScheduler.cpp:49
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
bool is_used() const
Flags if the tensor is used or not.
Definition: ITensor.cpp:163
Strides PermutationVector
Permutation vector.
Definition: Types.h:51
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
CLTensorAllocator * allocator()
Return a pointer to the tensor&#39;s allocator.
Definition: CLTensor.cpp:61
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
size_t x() const
Semantic accessor for width as x.
Definition: Size2D.h:75
unsigned int pad_top() const
Get the top padding.
Definition: Types.h:753
Status class.
Definition: Error.h:52
GPUTarget get_arch_from_target(GPUTarget target)
Helper function to get the GPU arch.
Definition: GPUTarget.cpp:240
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Activation Layer Information class.
Definition: Types.h:1639
void init(const TensorInfo &input, size_t alignment=0)
Initialize a tensor based on the passed TensorInfo.
Copyright (c) 2017-2022 Arm Limited.
void run() override
Run the kernels contained in the function.
Definition: CLPermute.cpp:73
1 channel, 1 F16 per channel
void map(bool blocking=true)
Enqueue a map operation of the allocated buffer.
Definition: CLTensor.cpp:66
ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info) override
Set the quantization settings (scale and offset) of the tensor.
Definition: TensorInfo.cpp:366
void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Initialize the function&#39;s source, destination, weights and convolution information.
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:125
void mark_as_unused() const
Marks a tensor as unused.
Definition: ITensor.cpp:168
1 channel, 1 S32 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
Interface to enqueue OpenCL kernels and get/set the OpenCL CommandQueue and ICLTuner.
ITensorInfo & set_data_layout(const DataLayout &data_layout) override
Set the data layout of the tensor.
Definition: TensorInfo.cpp:372
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
Definition: Utils.h:1107
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
Coordinates of an item.
Definition: Coordinates.h:37
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
unsigned int pad_right() const
Get the right padding.
Definition: Types.h:748
Padding and stride information class.
Definition: Types.h:669
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
CLDepthwiseConvolutionLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
void enqueue(ICLKernel &kernel, bool flush=true)
Schedule the execution of the passed kernel if possible.
Num samples, channels, height, width.
CLCompileContext class.
size_t y() const
Semantic accessor for height as y.
Definition: Size2D.h:84
Compute descriptor used by the depthwise convolution native kernel.
quantized, symmetric per channel fixed-point 8-bit number
void allocate() override
Allocate size specified by TensorInfo of OpenCL memory.
void compute_quantized_multipliers_and_shifts(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int32_t *output_multipliers_ptr, int32_t *output_shifts_ptr)
Compute quantized per-channel multipliers and shifts.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
~CLDepthwiseConvolutionLayer()
Default destructor.
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
void run() override
Run the kernels contained in the function.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCComputeKernelInfo &dwc_info, const ConvolutionInfo &conv_info, const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_LOG_PARAMS(...)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
void configure(const ICLTensor *input, ICLTensor *output, const PermutationVector &perm)
Set the input and output tensors.
Definition: CLPermute.cpp:51
unsigned int adjust_vec_size(unsigned int vec_size, size_t dim0)
Returns the adjusted vector size in case it is less than the input&#39;s first dimension, getting rounded down to its closest valid vector size.
Definition: Utils.h:1222
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
Static function to check if given info will lead to a valid configuration of CLPermute.
Definition: CLPermute.cpp:68
unsigned int pad_bottom() const
Get the bottom padding.
Definition: Types.h:758
unsigned int pad_left() const
Get the left padding.
Definition: Types.h:743
void unmap()
Enqueue an unmap operation of the allocated and mapped buffer.
Definition: CLTensor.cpp:71
bool export_to_cl_image(const ITensorInfo *tensor)
Definition: CLHelpers.cpp:444
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1010
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
Interface for the kernel to run a MxN depthwise convolution.
Basic implementation of the OpenCL tensor interface.
Definition: CLTensor.h:41