Compute Library
 22.11
ClDirectConv2d.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
37 
38 #include "src/common/utils/Log.h"
39 
40 using namespace arm_compute::cl_direct_conv;
41 
42 namespace arm_compute
43 {
44 namespace opencl
45 {
46 namespace
47 {
48 ITensorPack select_activation_src_dst(ITensorPack &tensors)
49 {
50  ITensorPack pack;
51  pack.add_tensor(TensorType::ACL_SRC, tensors.get_tensor(TensorType::ACL_DST));
52  pack.add_tensor(TensorType::ACL_DST, tensors.get_tensor(TensorType::ACL_DST));
53  return pack;
54 }
55 
56 DirectConvComputeKernelInfo config_direct_convolution_nhwc(const ITensorInfo *src, const ITensorInfo *weights, const PadStrideInfo &conv_info)
57 {
58  // Get GPU target
59  GPUTarget gpu_target = CLScheduler::get().target();
60 
61  std::unique_ptr<IClDirectConvKernelConfig> t = ClDirectConvKernelConfigurationFactory::create(gpu_target);
62 
63  return t->configure(src, weights, conv_info);
64 }
65 
66 } // namespace
67 
68 void ClDirectConv2d::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
69  const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
70 {
72  ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, conv_info, act_info);
73 
74  // Initialize the direct convolution descriptor
75  const DirectConvComputeKernelInfo desc = config_direct_convolution_nhwc(src, weights, conv_info);
76 
77  // Configure direct convolution kernel
78  const ActivationLayerInfo conv2d_act_info = (src->data_layout() == DataLayout::NHWC && is_data_type_float(src->data_type())) ? act_info : ActivationLayerInfo();
79  auto k = std::make_unique<kernels::ClDirectConv2dKernel>();
80  k->set_target(CLScheduler::get().target());
81  k->configure(compile_context, src, weights, biases, dst, conv_info, conv2d_act_info, desc);
82  _direct_conv_kernel = std::move(k);
83 
84  // Configure border handler
85  PixelValue zero_value(0.f);
87  {
88  zero_value = PixelValue(0, src->data_type(), src->quantization_info());
89  }
90  auto b = std::make_unique<CLFillBorderKernel>();
91  b->configure(compile_context, src, _direct_conv_kernel->border_size(), BorderMode::CONSTANT, zero_value);
92  _src_border_handler = std::move(b);
93 
94  // Fused activation is currently supported for NHWC and floating point types
95  if(act_info.enabled() && !conv2d_act_info.enabled())
96  {
97  auto a = std::make_unique<kernels::ClActivationKernel>();
98  a->configure(compile_context, dst, dst, act_info);
99  _activation_kernel = std::move(a);
100  }
101 
102  // Tune kernels
103  CLScheduler::get().tune_kernel_static(*_direct_conv_kernel);
104 }
105 
106 Status ClDirectConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
107  const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
108 {
109  // Initialize the direct convolution descriptor
110  const DirectConvComputeKernelInfo desc = config_direct_convolution_nhwc(src, weights, conv_info);
111 
112  ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClDirectConv2dKernel::validate(src, weights, biases, dst, conv_info, ActivationLayerInfo(), desc));
113  if(act_info.enabled())
114  {
116  }
117  return Status{};
118 }
119 
121 {
122  // Run border handler
123  CLScheduler::get().enqueue_op(*_src_border_handler.get(), tensors, false);
124  // Run direct convolution
125  CLScheduler::get().enqueue_op(*_direct_conv_kernel.get(), tensors, false);
126  // Run activation kernel
127  if(_activation_kernel)
128  {
129  auto act_pack = select_activation_src_dst(tensors);
130  CLScheduler::get().enqueue_op(*_activation_kernel.get(), act_pack, false);
131  }
132 }
133 } // namespace opencl
134 } // namespace arm_compute
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
bool enabled() const
Check if initialised.
Definition: Types.h:1694
SimpleTensor< float > b
Definition: DFT.cpp:157
static CLScheduler & get()
Access the scheduler singleton.
GPUTarget target() const
Get the target GPU.
Definition: CLScheduler.cpp:49
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
Activation Layer Information class.
Definition: Types.h:1639
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
Interface to enqueue OpenCL kernels and get/set the OpenCL CommandQueue and ICLTuner.
static std::unique_ptr< IClDirectConvKernelConfig > create(GPUTarget gpu)
Static method to call the ClDirectConvolution kernel configuration class accordingly with the GPU tar...
void enqueue_op(ICLKernel &kernel, ITensorPack &tensors, bool flush=true)
Schedule the execution of the passed kernel if possible.
Compute descriptor used by the direct convolution kernel.
Padding and stride information class.
Definition: Types.h:669
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
CLCompileContext class.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
Num samples, height, width, channels.
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_LOG_PARAMS(...)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
im2col_func configure(src_target.info(), dst_target.info(), spatial_kernel, conv_info, has_bias)
void tune_kernel_static(ICLKernel &kernel)
Tunes OpenCL kernel.
Definition: CLScheduler.cpp:86
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1010
void add_tensor(int id, ITensor *tensor)
Add tensor to the pack.
Definition: ITensorPack.cpp:39
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.