Compute Library
 21.02
BifrostTuner.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
27 #include "src/core/CL/CLKernels.h"
28 #include "support/Cast.h"
29 
31 
32 namespace arm_compute
33 {
34 namespace tuners
35 {
36 namespace
37 {
38 /** Tunes a @ref CLDirectConvolutionLayerKernel for a bifrost target
39  *
40  * @param[in] k Kernels to tune
41  */
42 void tune_direct_convolution_kernel(CLDirectConvolutionLayerKernel &k)
43 {
44  cl::NDRange lws_hint = k.lws_hint();
45 
46  const GPUTarget gpu_target = k.get_target();
47  const DataType dt = k._input->info()->data_type();
48  const TensorShape weights_shape = k._weights->info()->tensor_shape();
49  const TensorShape inputs_shape = k._input->info()->tensor_shape();
50  const size_t kernel_size = weights_shape.x();
51  const unsigned int stride_x = k._conv_stride_x;
52  const unsigned int stride_y = k._conv_stride_y;
53 
54  if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) && (kernel_size <= 5) && (stride_x == 1) && (stride_y == 1) && (dt == DataType::F32))
55  {
56  // Through extensive experimentation with over 30 representative tensor
57  // shapes, we found a small number of local work size configurations
58  // that result in nearly optimal execution times. Selecting the right
59  // lws for a given shape, however, required a complex decision tree,
60  // until we constructed a simple feature as described below.
61  //
62  // We started from the number of multiply-accumulate operations for a
63  // convolution layer, which is equal to the product of the input
64  // dimensions 0..2 and the weights dimensions 0..2. Unfortunately,
65  // this resulted in ties between distinct shapes that required distinct
66  // lws configurations. Replacing the width of the input with the kernel
67  // size, however, resulted in nearly optimal predictions. We use underscores
68  // in variable names to indicate when they are intentionally misleading.
69  const size_t product_of_weights_dimensions = weights_shape[0] * weights_shape[1] * weights_shape[2];
70  const size_t product_of_input_dimensions_ = inputs_shape[0] * inputs_shape[1] * inputs_shape[2];
71  const float mega_ops_ = 1e-6 * product_of_weights_dimensions * product_of_input_dimensions_;
72 
73  switch(kernel_size)
74  {
75  case 1:
76  {
77  if(mega_ops_ < 1.f)
78  {
79  lws_hint = cl::NDRange(1, 1, 8);
80  }
81  else if(mega_ops_ < 7.f)
82  {
83  lws_hint = cl::NDRange(1, 1, 4);
84  }
85  else
86  {
87  lws_hint = cl::NDRange(1, 1, 2);
88  }
89  break;
90  }
91  case 3:
92  {
93  if(mega_ops_ < 1.f)
94  {
95  lws_hint = cl::NDRange(1, 1, 8);
96  }
97  else if(mega_ops_ < 13.f)
98  {
99  lws_hint = cl::NDRange(2, 1, 4);
100  }
101  else if(mega_ops_ < 50.f)
102  {
103  lws_hint = cl::NDRange(3, 1, 4);
104  }
105  else
106  {
107  lws_hint = cl::NDRange(2, 1, 6);
108  }
109  break;
110  }
111  case 5:
112  {
113  if(mega_ops_ < 2.f || mega_ops_ > 80.f)
114  {
115  lws_hint = cl::NDRange(2, 1, 4);
116  }
117  else
118  {
119  lws_hint = cl::NDRange(2, 1, 8);
120  }
121  break;
122  }
123  default:
124  break;
125  }
126  k.set_lws_hint(lws_hint);
127  }
128 }
129 
130 void tune_col2im_kernel(CLCol2ImKernel &k)
131 {
132  cl::NDRange lws_hint = k.lws_hint();
133  const GPUTarget gpu_target = k.get_target();
134 
135  // Configure the local work size for Bifrost with a value obtained
136  // via exhaustive autotuning over 30 representative tensor shapes.
137  if(gpu_target_is_in(gpu_target,
141  {
142  if((k._convolved_dims.width == 7) || (k._convolved_dims.width == 14))
143  {
144  lws_hint = cl::NDRange(1, 7, 1);
145  }
146  else
147  {
148  lws_hint = cl::NDRange(1, 8, 1);
149  }
150  }
151 
152  k.set_lws_hint(lws_hint);
153 }
154 
155 void tune_im2col_kernel(CLIm2ColKernel &k)
156 {
157  cl::NDRange lws_hint = k.lws_hint();
158  const GPUTarget gpu_target = k.get_target();
159 
160  // Local work size optimized for the 11x11 AlexNet convolution on Bifrost.
161  if(gpu_target_is_in(gpu_target,
165  && k._kernel_dims.width == 11)
166  {
167  const bool is_square_kernel = (k._kernel_dims.width == k._kernel_dims.height);
168  if(!is_square_kernel && k._kernel_dims.width > 1 && !k._conv_info.has_padding())
169  {
170  lws_hint = cl::NDRange(1, 1, 1);
171  }
172  }
173  k.set_lws_hint(lws_hint);
174 }
175 
176 void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k)
177 {
178  cl::NDRange lws_hint = k.lws_hint();
179  const GPUTarget gpu_target = k.get_target();
180 
181  // Configure LWS hint
182  switch(gpu_target)
183  {
184  case GPUTarget::G71:
185  case GPUTarget::G72:
186  case GPUTarget::G51:
187  case GPUTarget::G51BIG:
188  case GPUTarget::G51LIT:
189  case GPUTarget::G52:
190  case GPUTarget::G52LIT:
191  case GPUTarget::G76:
192  if(k._input1->info()->dimension(1) == 24)
193  {
194  // LWS optimized for the 11x11 AlexNet convolution on Bifrost.
195  lws_hint = cl::NDRange(2, 2);
196  }
197  else if(k._output->info()->dimension(1) == 196)
198  {
199  lws_hint = cl::NDRange(1, 7);
200  }
201  else
202  {
203  lws_hint = cl::NDRange(8, 8);
204  }
205  break;
206  default:
207  lws_hint = cl::NullRange;
208  }
209 
210  k.set_lws_hint(lws_hint);
211 }
212 
213 void tune_pooling_kernel(opencl::kernels::ClPoolingKernel &k)
214 {
215  cl::NDRange lws_hint = k.lws_hint();
216  const GPUTarget gpu_target = k.get_target();
217 
218  // Configure the local work size (hint) from the first two dimensions of the global work size.
219  // On Bifrost, this works for up to 35x35xC filters, for which the pooling_layer_3_optimized
220  // kernel is launched with gws=(9, 33, C). In any case, the hint will be ignored if it is
221  // invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with).
222  if(k._pool_info.data_layout == DataLayout::NCHW)
223  {
224  if(gpu_target_is_in(gpu_target,
228  {
229  cl::NDRange gws = ICLKernel::gws_from_window(k.window());
230  lws_hint = cl::NDRange(gws[0], gws[1], 1);
231  }
232  }
233 
234  k.set_lws_hint(lws_hint);
235 }
236 
237 void tune_scale_kernel(CLScaleKernel &k)
238 {
239  cl::NDRange lws_hint = k.lws_hint();
240  const GPUTarget gpu_target = k.get_target();
241  const DataType dt = k.input()->info()->data_type();
242  const InterpolationPolicy interpolation = k.get_interpolation_policy();
243 
244  // Configure the local work size for Bifrost, interpolation (bilinear) and datatype F32.
245  // The value are obtained via exhaustive autotuning.
246  if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) && (dt == DataType::F32) && (interpolation == InterpolationPolicy::BILINEAR))
247  {
248  auto dim_0 = k.output()->info()->dimension(0);
249  if(dim_0 == 480)
250  {
251  lws_hint = cl::NDRange(2, 1);
252  }
253  else if(dim_0 == 3120)
254  {
255  lws_hint = cl::NDRange(2, 8);
256  }
257  else if(dim_0 == 4160)
258  {
259  lws_hint = cl::NDRange(4, 8);
260  }
261  k.set_lws_hint(lws_hint);
262  }
263 }
264 } // namespace
265 
267 {
268  if(dynamic_cast<CLDirectConvolutionLayerKernel *>(&kernel) != nullptr)
269  {
270  tune_direct_convolution_kernel(*utils::cast::polymorphic_downcast<CLDirectConvolutionLayerKernel *>(&kernel));
271  }
272  else if(dynamic_cast<CLCol2ImKernel *>(&kernel) != nullptr)
273  {
274  tune_col2im_kernel(*utils::cast::polymorphic_downcast<CLCol2ImKernel *>(&kernel));
275  }
276  else if(dynamic_cast<CLIm2ColKernel *>(&kernel) != nullptr)
277  {
278  tune_im2col_kernel(*utils::cast::polymorphic_downcast<CLIm2ColKernel *>(&kernel));
279  }
280  else if(dynamic_cast<CLGEMMMatrixMultiplyKernel *>(&kernel) != nullptr)
281  {
282  tune_gemm_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixMultiplyKernel *>(&kernel));
283  }
284  else if(dynamic_cast<opencl::kernels::ClPoolingKernel *>(&kernel) != nullptr)
285  {
286  tune_pooling_kernel(*utils::cast::polymorphic_downcast<opencl::kernels::ClPoolingKernel *>(&kernel));
287  }
288  else if(dynamic_cast<CLScaleKernel *>(&kernel) != nullptr)
289  {
290  tune_scale_kernel(*utils::cast::polymorphic_downcast<CLScaleKernel *>(&kernel));
291  }
292 }
293 
295 {
296  ARM_COMPUTE_UNUSED(kernel);
297 }
298 
300 {
301  ARM_COMPUTE_UNUSED(kernel, tensors);
302 }
303 } // namespace tuners
304 } // namespace arm_compute
static cl::NDRange gws_from_window(const Window &window)
Get the global work size given an execution window.
Definition: ICLKernel.cpp:140
InterpolationPolicy
Interpolation method.
Definition: Types.h:392
1 channel, 1 F32 per channel
Output values are defined by bilinear interpolation between the pixels.
Common interface for all the OpenCL kernels.
Definition: ICLKernel.h:46
Copyright (c) 2017-2021 Arm Limited.
DataType dt
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
Num samples, channels, height, width.
void tune_kernel_dynamic(ICLKernel &kernel) override
Tune OpenCL kernel dynamically.
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
Tensor packing service.
Definition: ITensorPack.h:37
void tune_kernel_static(ICLKernel &kernel) override
Tune OpenCL kernel statically.
DataType
Available data types.
Definition: Types.h:77
bool gpu_target_is_in(GPUTarget target_to_check, GPUTarget target, Args... targets)
Helper function to check whether a gpu target is equal to the provided targets.
Definition: GPUTarget.h:96