Compute Library
 22.05
graph_depthwiseconvolution.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/graph.h"
25 
26 #include "tests/NEON/Accessor.h"
30 
32 #include "utils/GraphUtils.h"
33 #include "utils/Utils.h"
34 
35 #include "ValidateExample.h"
36 #include "graph_validate_utils.h"
37 
38 #include <utility>
39 
40 using namespace arm_compute::utils;
41 using namespace arm_compute::graph::frontend;
42 using namespace arm_compute::graph_utils;
43 using namespace arm_compute::graph;
44 using namespace arm_compute;
45 using namespace arm_compute::test;
46 using namespace arm_compute::test::validation;
47 
48 namespace
49 {
50 /** Depthwise Convolution command line options used to configure the graph examples
51  *
52  * (Similar to common options)
53  * The options in this object get populated when "parse()" is called on the parser used to construct it.
54  * The expected workflow is:
55  *
56  * CommandLineParser parser;
57  * CommonOptions options( parser );
58  * parser.parse(argc, argv);
59  */
60 class DepthConvolutionOptions final : public CommonGraphValidateOptions
61 {
62 public:
63  explicit DepthConvolutionOptions(CommandLineParser &parser) noexcept
65  width(parser.add_option<SimpleOption<int>>("width", 9)),
66  height(parser.add_option<SimpleOption<int>>("height", 9)),
67  channels(parser.add_option<SimpleOption<int>>("channels", 1)),
68  batch(parser.add_option<SimpleOption<int>>("batch", 1)),
69  weights_width(parser.add_option<SimpleOption<int>>("weights_width", 3)),
70  weights_height(parser.add_option<SimpleOption<int>>("weights_height", 3)),
71  padding_top(parser.add_option<SimpleOption<int>>("padding_top", 0)),
72  padding_left(parser.add_option<SimpleOption<int>>("padding_left", 0)),
73  padding_bottom(parser.add_option<SimpleOption<int>>("padding_bottom", 0)),
74  padding_right(parser.add_option<SimpleOption<int>>("padding_right", 0)),
75  stride_x(parser.add_option<SimpleOption<int>>("stride_x", 1)),
76  stride_y(parser.add_option<SimpleOption<int>>("stride_y", 1)),
77  padding_mode(),
78  conv_mode(),
79  depth_multiplier(parser.add_option<SimpleOption<int>>("depth_multiplier", 1)),
80  data_layout(),
81  scale(parser.add_option<SimpleOption<float>>("scale", 1.0f)),
82  offset(parser.add_option<SimpleOption<int>>("offset", 0)),
83  weights_scale(parser.add_option<SimpleOption<float>>("weights_scale", 1.0f)),
84  weights_offset(parser.add_option<SimpleOption<int>>("weights_offset", 0)),
85  output_scale(parser.add_option<SimpleOption<float>>("output_scale", 1.0f)),
86  output_offset(parser.add_option<SimpleOption<int>>("output_offset", 0)),
87  input_range_low(parser.add_option<SimpleOption<uint64_t>>("input_range_low")),
88  input_range_high(parser.add_option<SimpleOption<uint64_t>>("input_range_high")),
89  weights_range_low(parser.add_option<SimpleOption<uint64_t>>("weights_range_low")),
90  weights_range_high(parser.add_option<SimpleOption<uint64_t>>("weights_range_high")),
91  input_npy(parser.add_option<SimpleOption<std::string>>("input_image")),
92  output_npy(parser.add_option<SimpleOption<std::string>>("reference_image")),
93  weights_npy(parser.add_option<SimpleOption<std::string>>("weights_npy")),
94  bias_npy(parser.add_option<SimpleOption<std::string>>("bias_image"))
95  {
96  const std::set<ConvolutionPaddingMode> available_padding_modes
97  {
100  };
101 
102  const std::set<arm_compute::graph::DepthwiseConvolutionMethod> supported_convolution_methods
103  {
107  };
108 
109  const std::set<DataLayout> supported_data_layouts
110  {
113  };
114 
115  padding_mode = parser.add_option<EnumOption<ConvolutionPaddingMode>>("padding_mode", available_padding_modes, ConvolutionPaddingMode::Valid);
116  conv_mode = parser.add_option<EnumOption<arm_compute::graph::DepthwiseConvolutionMethod>>("convolution_method", supported_convolution_methods,
118  data_layout = parser.add_option<EnumOption<DataLayout>>("layout", supported_data_layouts, DataLayout::NHWC);
119 
120  padding_mode->set_help("Set padding mode");
121  width->set_help("Set Input dimension width");
122  height->set_help("Set Input dimension height");
123  channels->set_help("Set Input dimension channels");
124  batch->set_help("Set Input dimension batch");
125  weights_width->set_help("Set weights_dimensions width");
126  weights_height->set_help("Set weights_dimensions height");
127  padding_top->set_help("Set padding top");
128  padding_bottom->set_help("Set padding bottom");
129  padding_left->set_help("Set padding left");
130  padding_right->set_help("Set padding right");
131  stride_x->set_help("Set padding stride x");
132  stride_y->set_help("Set padding stride y");
133  conv_mode->set_help("Set convolution method");
134  data_layout->set_help("Data layout to use");
135  scale->set_help("Quantization scale from QASYMM8");
136  offset->set_help("Quantization offset from QASYMM8");
137  output_scale->set_help("Quantization scale from QASYMM8");
138  output_offset->set_help("Quantization offset from QASYMM8");
139  input_npy->set_help("Use input .npy instead");
140  output_npy->set_help("Use .npy as a reference");
141  input_range_low->set_help("Lower bound for input randomization range");
142  input_range_high->set_help("Lower bound for input randomization range");
143  weights_scale->set_help("Quantization scale from QASYMM8");
144  weights_offset->set_help("Quantization offset from QASYMM8");
145  weights_range_low->set_help("Lower bound for input randomization range");
146  weights_range_high->set_help("Lower bound for input randomization range");
147  depth_multiplier->set_help("Depth multiplier");
148  }
149 
150  /** Fill out the supplied parameters with user supplied parameters
151  *
152  * @param[out] os Output stream.
153  * @param[in] common_params Example parameters to output
154  *
155  * @return None.
156  */
157  void consume_parameters(ExampleParams &common_params)
158  {
159  common_params.input.width = width->value();
160  common_params.input.height = height->value();
161  common_params.input.fm = channels->value();
162  common_params.input.batch = batch->value();
163  common_params.input.quant_info = QuantizationInfo(scale->value(), offset->value());
164  common_params.input.npy = input_npy->value();
165  common_params.input.range_low = input_range_low->value();
166  common_params.input.range_high = input_range_high->value();
167 
168  common_params.weights.width = weights_width->value();
169  common_params.weights.height = weights_height->value();
170  common_params.weights.npy = weights_npy->value();
171  common_params.weights.range_low = weights_range_low->value();
172  common_params.weights.range_high = weights_range_high->value();
173  common_params.weights.quant_info = QuantizationInfo(weights_scale->value(), weights_offset->value());
174 
175  common_params.bias.npy = bias_npy->value();
176 
177  common_params.output.quant_info = QuantizationInfo(output_scale->value(), output_offset->value());
178  common_params.output.npy = output_npy->value();
179 
180  common_params.convolution.padding_mode = padding_mode->value();
181  common_params.convolution.padding_top = padding_top->value();
182  common_params.convolution.padding_bottom = padding_bottom->value();
183  common_params.convolution.padding_left = padding_left->value();
184  common_params.convolution.padding_right = padding_right->value();
185  common_params.convolution.padding_stride_x = stride_x->value();
186  common_params.convolution.padding_stride_y = stride_y->value();
187  common_params.convolution.depth_multiplier = depth_multiplier->value();
188 
189  common_params.data_type = data_type->value();
190  common_params.data_layout = data_layout->value();
191  common_params.depth_convolution_method = conv_mode->value();
192  }
193 
194  void print_parameters(::std::ostream &os, const ExampleParams &common_params) override
195  {
196  os << "Threads : " << common_params.common_params.threads << std::endl;
197  os << "Target : " << common_params.common_params.target << std::endl;
198  os << "Data type : " << common_params.data_type << std::endl;
199  os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
200  << std::endl;
201  os << "Weight dimensions(X,Y, Channels(same as input)) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << ","
202  << ")" << std::endl;
203  os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," <<
204  common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y <<
205  ")" << std::endl;
206  os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl;
207  os << "Convolution Method: " << common_params.depth_convolution_method << std::endl;
208  os << "Depth multiplier: " << common_params.convolution.depth_multiplier;
209  }
210 
211  /** Prevent instances of this class from being copied (As this class contains pointers) */
212  DepthConvolutionOptions(const DepthConvolutionOptions &) = delete;
213  /** Prevent instances of this class from being copied (As this class contains pointers) */
214  DepthConvolutionOptions &operator=(const DepthConvolutionOptions &) = delete;
215  /** Allow instances of this class to be moved */
216  DepthConvolutionOptions(DepthConvolutionOptions &&) noexcept(true) = default;
217  /** Allow instances of this class to be moved */
218  DepthConvolutionOptions &operator=(DepthConvolutionOptions &&) noexcept(true) = default;
219  /** Default destructor */
220  ~DepthConvolutionOptions() override = default;
221 
222 private:
223  SimpleOption<int> *width; /**< Input width */
224  SimpleOption<int> *height; /**< Input height */
225  SimpleOption<int> *channels; /**< Input channels */
226  SimpleOption<int> *batch; /**< Input batch */
227  SimpleOption<int> *weights_width; /**< weights width */
228  SimpleOption<int> *weights_height; /**< weights height */
229  SimpleOption<int> *padding_top; /**< Padding top */
230  SimpleOption<int> *padding_left; /**< Padding left */
231  SimpleOption<int> *padding_bottom; /**< Padding bottom */
232  SimpleOption<int> *padding_right; /**< Padding right */
233  SimpleOption<int> *stride_x; /**< Padding stride x */
234  SimpleOption<int> *stride_y; /**< Padding stride y */
235  EnumOption<ConvolutionPaddingMode> *padding_mode; /**< Padding mode */
236  EnumOption<arm_compute::graph::DepthwiseConvolutionMethod> *conv_mode; /**< Convolution method */
237  SimpleOption<int> *depth_multiplier; /**< Depth multiplier */
238  EnumOption<arm_compute::DataLayout> *data_layout; /**< Graph data layout */
239  SimpleOption<float> *scale; /**< Input Quantization scale from QASYMM8 */
240  SimpleOption<int> *offset; /**< Input Quantization offset from QASYMM8 */
241  SimpleOption<float> *weights_scale; /**< Weights Quantization scale from QASYMM8 */
242  SimpleOption<int> *weights_offset; /**< Weights Quantization offset from QASYMM8 */
243  SimpleOption<float> *output_scale; /**< Output Quantization scale from QASYMM8 */
244  SimpleOption<int> *output_offset; /**< Output Quantization offset from QASYMM8 */
245  SimpleOption<uint64_t> *input_range_low; /**< Lower bound for input randomization range */
246  SimpleOption<uint64_t> *input_range_high; /**< Upper bound for input randomization range */
247  SimpleOption<uint64_t> *weights_range_low; /**< Lower bound for weights randomization range */
248  SimpleOption<uint64_t> *weights_range_high; /**< Upper bound for weights randomization range */
249 
250  SimpleOption<std::string> *input_npy; /**< Use input .npy image */
251  SimpleOption<std::string> *output_npy; /**< Use output .npy image to verify*/
252  SimpleOption<std::string> *weights_npy; /**< Use weights .npy image */
253  SimpleOption<std::string> *bias_npy; /**< Use bias .npy image */
254 };
255 
256 /** DepthwiseConvolutionLayer Graph example validation accessor class */
257 template <typename D>
258 class DepthConvolutionVerifyAccessor final : public VerifyAccessor<D>
259 {
260 public:
261  using BaseClassType = VerifyAccessor<D>;
262  using BaseClassType::BaseClassType;
263  using BaseClassType::_params;
264  using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
265 
266 public:
268  {
269  // Calculate padding information
270  const PadStrideInfo padding_info = calculate_convolution_padding(_params);
271 
272  //Calculate reference
273  return reference::depthwise_convolution<D>(src, weights, bias, output_shape, padding_info,
274  _params.convolution.depth_multiplier,
275  Size2D(1U, 1U),
276  _params.output.quant_info);
277  }
278 
279  float relative_tolerance() override
280  {
281  const std::map<arm_compute::graph::Target, const std::map<DataType, float>> relative_tolerance
282  {
283  {
285  { { DataType::F16, 0.01f },
286  { DataType::F32, 0.01f },
287  { DataType::QASYMM8, 0.0f }
288  }
289  },
290  {
292  { { DataType::F16, 0.01f },
293  { DataType::F32, 0.01f },
294  { DataType::QASYMM8, 1.0f }
295  }
296  }
297  };
298 
299  return relative_tolerance.at(_params.common_params.target).at(_params.data_type);
300  }
301 
302  float absolute_tolerance() override
303  {
304  const std::map<Target, const std::map<DataType, float>> absolute_tolerance
305  {
306  {
307  Target::CL,
308  { { DataType::F16, 0.0f },
309  { DataType::F32, 0.0000f },
310  { DataType::QASYMM8, 0.0f }
311  }
312  },
313  {
314  Target::NEON,
315  { { DataType::F16, 0.2f },
316  { DataType::F32, 0.002f },
317  { DataType::QASYMM8, 0.0f }
318  }
319  }
320  };
321 
322  return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
323  }
324 
325  float tolerance_number() override
326  {
327  const std::map<Target, const std::map<DataType, float>> absolute_tolerance
328  {
329  {
330  Target::CL,
331  { { DataType::F16, 0.05f },
332  { DataType::F32, 0.00f },
333  { DataType::QASYMM8, 0.0f }
334  }
335  },
336  {
337  Target::NEON,
338  { { DataType::F16, 0.05f },
339  { DataType::F32, 0.0f },
340  { DataType::QASYMM8, 0.0f }
341  }
342  }
343  };
344 
345  return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
346  }
347 };
348 
349 } // namespace
350 
351 class GraphDepthwiseConvolutionValidateExample final : public GraphValidateExample<DepthwiseConvolutionLayer, DepthConvolutionOptions, DepthConvolutionVerifyAccessor>
352 {
354 
355 public:
356  GraphDepthwiseConvolutionValidateExample()
357  : GraphValidateExample("DepthWiseConvolution Graph example")
358  {
359  }
360 
361  DepthwiseConvolutionLayer GraphFunctionLayer(ExampleParams &params) override
362  {
363  const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
364  const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
365 
366  const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info);
367  const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info);
368 
369  // Calculate padding information
370  const PadStrideInfo padding_info = calculate_convolution_padding(params);
371 
372  return DepthwiseConvolutionLayer(params.weights.width, params.weights.height,
373  get_accessor(params.weights, weights_lower, weights_upper, 1),
374  get_accessor(params.bias, lower, upper, 2),
375  padding_info, params.convolution.depth_multiplier, params.weights.quant_info, params.output.quant_info);
376  }
377 };
378 
379 /** Main program for Graph Depthwise Convolution test
380  *
381  * @param[in] argc Number of arguments
382  * @param[in] argv Arguments ( Input dimensions [width, height, channels, batch]
383  * Weights dimensions [width, height, channels]
384  * Padding [top,bottom,left,right, Stride x, Stride y, mode [Valid / Same / Manual] )
385  * Convolution Method[ Default/GEMV/Optimized3x3]
386  * Verification[tolerance_number,absolute_tolerance,relative_tolerance] )
387  *
388  */
389 int main(int argc, char **argv)
390 {
391  return arm_compute::utils::run_example<GraphDepthwiseConvolutionValidateExample>(argc, argv);
392 }
int padding_top
Padding graph parameters.
PadStrideInfo calculate_convolution_padding(ExampleParams params)
Calculate stride information.
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:1083
Arm® Neon™ capable target device.
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Shape of a tensor.
Definition: TensorShape.h:39
int main(int argc, char **argv)
Main program for Graph Depthwise Convolution test.
1 channel, 1 F32 per channel
CommonGraphValidateOptions command line options used to configure the graph examples.
Includes all the Graph headers at once.
Class to parse command line arguments.
decltype(strategy::transforms) typedef type
std::unique_ptr< graph::ITensorAccessor > get_accessor(const TensorParams &tensor, PixelValue lower, PixelValue upper, const std::random_device::result_type seed=0)
Generates appropriate accessor according to the specified graph parameters.
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
Quantization information.
quantized, asymmetric fixed-point 8-bit number unsigned
DepthwiseConvolutionMethod
Supported Depthwise Convolution layer methods.
Definition: Types.h:135
Structure holding all the graph Example parameters.
Generic GEMV based depthwise convolution.
Padding and stride information class.
Definition: Types.h:669
Num samples, channels, height, width.
Simple tensor object that stores elements in a consecutive chunk of memory.
Definition: SimpleTensor.h:58
arm_compute::graph::DepthwiseConvolutionMethod depth_convolution_method
Graph example validation accessor class.
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
Num samples, height, width, channels.
Implementation of a simple option that accepts a value from a fixed set.
Definition: EnumOption.h:40
Default approach using internal heuristics.
const size_t weights_width
Definition: impl.cpp:53
const size_t weights_height
Definition: impl.cpp:54
Optimized 3x3 direct depthwise convolution.
const T & value() const
Get the selected value.
arm_compute::graph::frontend::Stream graph
OpenCL capable target device.
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
void set_help(std::string help)
Set the help message for the option.
Definition: Option.h:125
const int32_t * bias