Compute Library
 21.02
graph_mobilenet_v2.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/graph.h"
27 #include "utils/GraphUtils.h"
28 #include "utils/Utils.h"
29 
30 using namespace arm_compute;
31 using namespace arm_compute::utils;
32 using namespace arm_compute::graph::frontend;
33 using namespace arm_compute::graph_utils;
34 
35 /** Example demonstrating how to implement MobileNetV2's network using the Compute Library's graph API */
36 class GraphMobilenetV2Example : public Example
37 {
38 public:
39  GraphMobilenetV2Example()
40  : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "MobileNetV2")
41  {
42  }
43  GraphMobilenetV2Example(const GraphMobilenetV2Example &) = delete;
44  GraphMobilenetV2Example &operator=(const GraphMobilenetV2Example &) = delete;
45  ~GraphMobilenetV2Example() override = default;
46 
47  bool do_setup(int argc, char **argv) override
48  {
49  // Parse arguments
50  cmd_parser.parse(argc, argv);
51  cmd_parser.validate();
52 
53  // Consume common parameters
54  common_params = consume_common_graph_parameters(common_opts);
55 
56  // Return when help menu is requested
57  if(common_params.help)
58  {
59  cmd_parser.print_help(argv[0]);
60  return false;
61  }
62 
63  // Print parameter values
64  std::cout << common_params << std::endl;
65 
66  // Create input descriptor
67  const TensorShape tensor_shape = permute_shape(TensorShape(224U, 224U, 3U, 1U), DataLayout::NCHW, common_params.data_layout);
68  TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
69 
70  // Set graph hints
71  graph << common_params.target
72  << common_params.fast_math_hint;
73 
74  // Create core graph
75  if(arm_compute::is_data_type_float(common_params.data_type))
76  {
77  create_graph_float(input_descriptor);
78  }
79  else
80  {
81  create_graph_qasymm8(input_descriptor);
82  }
83  // Create common tail
84  graph << ReshapeLayer(TensorShape(1001U)).set_name("Predictions/Reshape")
85  << SoftmaxLayer().set_name("Predictions/Softmax")
86  << OutputLayer(get_output_accessor(common_params, 5));
87 
88  // Finalize graph
89  GraphConfig config;
90  config.num_threads = common_params.threads;
91  config.use_tuner = common_params.enable_tuner;
92  config.tuner_mode = common_params.tuner_mode;
93  config.tuner_file = common_params.tuner_file;
94  config.mlgo_file = common_params.mlgo_file;
95 
96  graph.finalize(common_params.target, config);
97 
98  return true;
99  }
100 
101  void do_run() override
102  {
103  // Run graph
104  graph.run();
105  }
106 
107 private:
108  CommandLineParser cmd_parser;
109  CommonGraphOptions common_opts;
110  CommonGraphParams common_params;
111  Stream graph;
112 
113 private:
114  enum class IsResidual
115  {
116  Yes,
117  No
118  };
119 
120  enum class HasExpand
121  {
122  Yes,
123  No
124  };
125 
126 private:
127  void create_graph_float(TensorDescriptor &input_descriptor)
128  {
129  // Create model path
130  const std::string model_path = "/cnn_data/mobilenet_v2_1.0_224_model/";
131 
132  // Create a preprocessor object
133  std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
134 
135  // Get trainable parameters data path
136  std::string data_path = common_params.data_path;
137 
138  // Add model path to data path
139  if(!data_path.empty())
140  {
141  data_path += model_path;
142  }
143 
144  graph << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor), false))
145  << ConvolutionLayer(3U, 3U, 32U,
146  get_weights_accessor(data_path, "Conv_weights.npy", DataLayout::NCHW),
147  std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
148  PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL))
149  .set_name("Conv")
150  << BatchNormalizationLayer(get_weights_accessor(data_path, "Conv_BatchNorm_moving_mean.npy"),
151  get_weights_accessor(data_path, "Conv_BatchNorm_moving_variance.npy"),
152  get_weights_accessor(data_path, "Conv_BatchNorm_gamma.npy"),
153  get_weights_accessor(data_path, "Conv_BatchNorm_beta.npy"),
154  0.0010000000474974513f)
155  .set_name("Conv/BatchNorm")
157  .set_name("Conv/Relu6");
158 
159  get_expanded_conv_float(data_path, "expanded_conv", 32U, 16U, PadStrideInfo(1, 1, 1, 1));
160  get_expanded_conv_float(data_path, "expanded_conv_1", 16U, 24U, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), HasExpand::Yes);
161  get_expanded_conv_float(data_path, "expanded_conv_2", 24U, 24U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
162  get_expanded_conv_float(data_path, "expanded_conv_3", 24U, 32U, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), HasExpand::Yes);
163  get_expanded_conv_float(data_path, "expanded_conv_4", 32U, 32U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
164  get_expanded_conv_float(data_path, "expanded_conv_5", 32U, 32U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
165  get_expanded_conv_float(data_path, "expanded_conv_6", 32U, 64U, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), HasExpand::Yes);
166  get_expanded_conv_float(data_path, "expanded_conv_7", 64U, 64U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
167  get_expanded_conv_float(data_path, "expanded_conv_8", 64U, 64U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
168  get_expanded_conv_float(data_path, "expanded_conv_9", 64U, 64U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
169  get_expanded_conv_float(data_path, "expanded_conv_10", 64U, 96U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes);
170  get_expanded_conv_float(data_path, "expanded_conv_11", 96U, 96U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
171  get_expanded_conv_float(data_path, "expanded_conv_12", 96U, 96U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
172  get_expanded_conv_float(data_path, "expanded_conv_13", 96U, 160U, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), HasExpand::Yes);
173  get_expanded_conv_float(data_path, "expanded_conv_14", 160U, 160U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
174  get_expanded_conv_float(data_path, "expanded_conv_15", 160U, 160U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes, IsResidual::Yes);
175  get_expanded_conv_float(data_path, "expanded_conv_16", 160U, 320U, PadStrideInfo(1, 1, 1, 1), HasExpand::Yes);
176 
177  graph << ConvolutionLayer(1U, 1U, 1280U,
178  get_weights_accessor(data_path, "Conv_1_weights.npy", DataLayout::NCHW),
179  std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
180  PadStrideInfo(1, 1, 0, 0))
181  .set_name("Conv_1")
182  << BatchNormalizationLayer(get_weights_accessor(data_path, "Conv_1_BatchNorm_moving_mean.npy"),
183  get_weights_accessor(data_path, "Conv_1_BatchNorm_moving_variance.npy"),
184  get_weights_accessor(data_path, "Conv_1_BatchNorm_gamma.npy"),
185  get_weights_accessor(data_path, "Conv_1_BatchNorm_beta.npy"),
186  0.0010000000474974513f)
187  .set_name("Conv_1/BatchNorm")
189  .set_name("Conv_1/Relu6")
190  << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, common_params.data_layout)).set_name("Logits/AvgPool")
191  << ConvolutionLayer(1U, 1U, 1001U,
192  get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_weights.npy", DataLayout::NCHW),
193  get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_biases.npy"),
194  PadStrideInfo(1, 1, 0, 0))
195  .set_name("Logits/Conv2d_1c_1x1");
196  }
197 
198  void get_expanded_conv_float(const std::string &data_path, std::string &&param_path,
199  unsigned int input_channels, unsigned int output_channels,
200  PadStrideInfo dwc_pad_stride_info,
201  HasExpand has_expand = HasExpand::No, IsResidual is_residual = IsResidual::No,
202  unsigned int expansion_size = 6)
203  {
204  std::string total_path = param_path + "_";
205  SubStream left(graph);
206 
207  // Add expand node
208  if(has_expand == HasExpand::Yes)
209  {
210  left << ConvolutionLayer(1U, 1U, input_channels * expansion_size,
211  get_weights_accessor(data_path, total_path + "expand_weights.npy", DataLayout::NCHW),
212  std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
213  .set_name(param_path + "/expand/Conv2D")
214  << BatchNormalizationLayer(get_weights_accessor(data_path, total_path + "expand_BatchNorm_moving_mean.npy"),
215  get_weights_accessor(data_path, total_path + "expand_BatchNorm_moving_variance.npy"),
216  get_weights_accessor(data_path, total_path + "expand_BatchNorm_gamma.npy"),
217  get_weights_accessor(data_path, total_path + "expand_BatchNorm_beta.npy"),
218  0.0010000000474974513f)
219  .set_name(param_path + "/expand/BatchNorm")
221  .set_name(param_path + "/expand/Relu6");
222  }
223 
224  // Add depthwise node
225  left << DepthwiseConvolutionLayer(3U, 3U,
226  get_weights_accessor(data_path, total_path + "depthwise_depthwise_weights.npy", DataLayout::NCHW),
227  std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
228  dwc_pad_stride_info)
229  .set_name(param_path + "/depthwise/depthwise")
230  << BatchNormalizationLayer(get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_moving_mean.npy"),
231  get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_moving_variance.npy"),
232  get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_gamma.npy"),
233  get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_beta.npy"),
234  0.0010000000474974513f)
235  .set_name(param_path + "/depthwise/BatchNorm")
237  .set_name(param_path + "/depthwise/Relu6");
238 
239  // Add project node
240  left << ConvolutionLayer(1U, 1U, output_channels,
241  get_weights_accessor(data_path, total_path + "project_weights.npy", DataLayout::NCHW),
242  std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
243  .set_name(param_path + "/project/Conv2D")
244  << BatchNormalizationLayer(get_weights_accessor(data_path, total_path + "project_BatchNorm_moving_mean.npy"),
245  get_weights_accessor(data_path, total_path + "project_BatchNorm_moving_variance.npy"),
246  get_weights_accessor(data_path, total_path + "project_BatchNorm_gamma.npy"),
247  get_weights_accessor(data_path, total_path + "project_BatchNorm_beta.npy"),
248  0.0010000000474974513)
249  .set_name(param_path + "/project/BatchNorm");
250 
251  if(is_residual == IsResidual::Yes)
252  {
253  // Add residual node
254  SubStream right(graph);
255  graph << EltwiseLayer(std::move(left), std::move(right), EltwiseOperation::Add).set_name(param_path + "/add");
256  }
257  else
258  {
259  graph.forward_tail(left.tail_node());
260  }
261  }
262 
263  void create_graph_qasymm8(TensorDescriptor &input_descriptor)
264  {
265  // Create model path
266  const std::string model_path = "/cnn_data/mobilenet_v2_1.0_224_quantized_model/";
267 
268  // Get trainable parameters data path
269  std::string data_path = common_params.data_path;
270 
271  // Add model path to data path
272  if(!data_path.empty())
273  {
274  data_path += model_path;
275  }
276 
277  const QuantizationInfo in_quant_info = QuantizationInfo(0.0078125f, 128);
278  const QuantizationInfo mid_quant_info = QuantizationInfo(0.023528477177023888f, 128);
279 
280  const std::vector<QuantizationInfo> conv_weights_quant_info =
281  {
282  QuantizationInfo(0.03396892547607422f, 122), // Conv
283  QuantizationInfo(0.005167067516595125f, 125), // Conv1
284  QuantizationInfo(0.0016910821432247758f, 113) // Conv2d_1c_1x1
285  };
286 
287  // Pointwise expand convolution quantization info
288  const std::vector<QuantizationInfo> pwc_q =
289  {
290  QuantizationInfo(0.254282623529f, 129), // expand_0 (Dummy)
291  QuantizationInfo(0.009758507832884789f, 127), // expand_1
292  QuantizationInfo(0.0036556976847350597f, 144), // expand_2
293  QuantizationInfo(0.0029988749884068966f, 104), // expand_3
294  QuantizationInfo(0.0019244228024035692f, 128), // expand_4
295  QuantizationInfo(0.0013649158645421267f, 135), // expand_5
296  QuantizationInfo(0.0019170437008142471f, 127), // expand_6
297  QuantizationInfo(0.0015538912266492844f, 125), // expand_7
298  QuantizationInfo(0.0014702979242429137f, 134), // expand_8
299  QuantizationInfo(0.0013733493397012353f, 127), // expand_9
300  QuantizationInfo(0.0016282502328976989f, 131), // expand_10
301  QuantizationInfo(0.0016309921629726887f, 134), // expand_11
302  QuantizationInfo(0.0018258779309689999f, 138), // expand_12
303  QuantizationInfo(0.0013828007504343987f, 123), // expand_13
304  QuantizationInfo(0.0020222084131091833f, 135), // expand_14
305  QuantizationInfo(0.04281935095787048f, 102), // expand_15
306  QuantizationInfo(0.002046825597062707f, 135) // expand_16
307  };
308  // Depthwise expand convolution quantization info
309  const std::vector<QuantizationInfo> dwc_q =
310  {
311  QuantizationInfo(0.3436955213546753f, 165), // expand_0
312  QuantizationInfo(0.020969120785593987f, 109), // expand_1
313  QuantizationInfo(0.16981913149356842f, 52), // expand_2
314  QuantizationInfo(0.017202870920300484f, 143), // expand_3
315  QuantizationInfo(0.06525065749883652f, 118), // expand_4
316  QuantizationInfo(0.07909784466028214f, 95), // expand_5
317  QuantizationInfo(0.010087885893881321f, 127), // expand_6
318  QuantizationInfo(0.06092711538076401f, 110), // expand_7
319  QuantizationInfo(0.052407849580049515f, 133), // expand_8
320  QuantizationInfo(0.04077887907624245f, 155), // expand_9
321  QuantizationInfo(0.031107846647500992f, 143), // expand_10
322  QuantizationInfo(0.07080810517072678f, 66), // expand_11
323  QuantizationInfo(0.07448793947696686f, 159), // expand_12
324  QuantizationInfo(0.01525793131440878f, 92), // expand_13
325  QuantizationInfo(0.04166752099990845f, 147), // expand_14
326  QuantizationInfo(0.04281935095787048f, 102), // expand_15
327  QuantizationInfo(0.16456253826618195, 201) // expand_16
328  };
329  // Project convolution quantization info
330  const std::vector<QuantizationInfo> prwc_q =
331  {
332  QuantizationInfo(0.03737175464630127f, 140), // expand_0
333  QuantizationInfo(0.0225360207259655f, 156), // expand_1
334  QuantizationInfo(0.02740888111293316f, 122), // expand_2
335  QuantizationInfo(0.016844693571329117f, 111), // expand_3
336  QuantizationInfo(0.019062912091612816f, 146), // expand_4
337  QuantizationInfo(0.018293123692274094f, 128), // expand_5
338  QuantizationInfo(0.014601286500692368f, 147), // expand_6
339  QuantizationInfo(0.016782939434051514f, 124), // expand_7
340  QuantizationInfo(0.012898261658847332f, 125), // expand_8
341  QuantizationInfo(0.019561484456062317f, 144), // expand_9
342  QuantizationInfo(0.007436311338096857f, 129), // expand_10
343  QuantizationInfo(0.00838223285973072f, 136), // expand_11
344  QuantizationInfo(0.023982593789696693f, 154), // expand_12
345  QuantizationInfo(0.009447949007153511f, 140), // expand_13
346  QuantizationInfo(0.00789870135486126f, 139), // expand_14
347  QuantizationInfo(0.03697410225868225f, 131), // expand_15
348  QuantizationInfo(0.008009289391338825f, 111) // expand_16
349  };
350 
351  graph << InputLayer(input_descriptor.set_quantization_info(in_quant_info),
352  get_weights_accessor(data_path, common_params.image))
353  << ConvolutionLayer(
354  3U, 3U, 32U,
355  get_weights_accessor(data_path, "Conv_weights.npy"),
356  get_weights_accessor(data_path, "Conv_bias.npy"),
357  PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR),
358  1, conv_weights_quant_info.at(0), mid_quant_info)
359  .set_name("Conv")
361  << DepthwiseConvolutionLayer(3U, 3U,
362  get_weights_accessor(data_path, "expanded_conv_depthwise_depthwise_weights.npy"),
363  get_weights_accessor(data_path, "expanded_conv_depthwise_depthwise_biases.npy"),
364  PadStrideInfo(1, 1, 1, 1), 1, dwc_q.at(0))
365  .set_name("expanded_conv/depthwise/depthwise")
367  << ConvolutionLayer(1U, 1U, 16U,
368  get_weights_accessor(data_path, "expanded_conv_project_weights.npy"),
369  get_weights_accessor(data_path, "expanded_conv_project_biases.npy"),
370  PadStrideInfo(1, 1, 0, 0), 1, prwc_q.at(0))
371  .set_name("expanded_conv/project/Conv2D");
372 
373  get_expanded_conv_qasymm8(data_path, "expanded_conv_1", IsResidual::No, 96U, 24U, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL),
374  pwc_q.at(1), dwc_q.at(1), prwc_q.at(1));
375  get_expanded_conv_qasymm8(data_path, "expanded_conv_2", IsResidual::Yes, 144U, 24U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(2), dwc_q.at(2), prwc_q.at(2));
376  get_expanded_conv_qasymm8(data_path, "expanded_conv_3", IsResidual::No, 144U, 32U, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL),
377  pwc_q.at(3), dwc_q.at(3), prwc_q.at(3));
378  get_expanded_conv_qasymm8(data_path, "expanded_conv_4", IsResidual::Yes, 192U, 32U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(4), dwc_q.at(4), prwc_q.at(4));
379  get_expanded_conv_qasymm8(data_path, "expanded_conv_5", IsResidual::Yes, 192U, 32U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(5), dwc_q.at(5), prwc_q.at(5));
380  get_expanded_conv_qasymm8(data_path, "expanded_conv_6", IsResidual::No, 192U, 64U, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL),
381  pwc_q.at(6), dwc_q.at(6), prwc_q.at(6));
382  get_expanded_conv_qasymm8(data_path, "expanded_conv_7", IsResidual::Yes, 384U, 64U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(7), dwc_q.at(7), prwc_q.at(7));
383  get_expanded_conv_qasymm8(data_path, "expanded_conv_8", IsResidual::Yes, 384U, 64U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(8), dwc_q.at(8), prwc_q.at(8));
384  get_expanded_conv_qasymm8(data_path, "expanded_conv_9", IsResidual::Yes, 384U, 64U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(9), dwc_q.at(9), prwc_q.at(9));
385  get_expanded_conv_qasymm8(data_path, "expanded_conv_10", IsResidual::No, 384U, 96U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(10), dwc_q.at(10), prwc_q.at(10));
386  get_expanded_conv_qasymm8(data_path, "expanded_conv_11", IsResidual::Yes, 576U, 96U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(11), dwc_q.at(11), prwc_q.at(11));
387  get_expanded_conv_qasymm8(data_path, "expanded_conv_12", IsResidual::Yes, 576U, 96U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(12), dwc_q.at(12), prwc_q.at(12));
388  get_expanded_conv_qasymm8(data_path, "expanded_conv_13", IsResidual::No, 576U, 160U, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL),
389  pwc_q.at(13), dwc_q.at(13), prwc_q.at(13));
390  get_expanded_conv_qasymm8(data_path, "expanded_conv_14", IsResidual::Yes, 960U, 160U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(14), dwc_q.at(14), prwc_q.at(14));
391  get_expanded_conv_qasymm8(data_path, "expanded_conv_15", IsResidual::Yes, 960U, 160U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(15), dwc_q.at(15), prwc_q.at(15));
392  get_expanded_conv_qasymm8(data_path, "expanded_conv_16", IsResidual::No, 960U, 320U, PadStrideInfo(1, 1, 1, 1), pwc_q.at(16), dwc_q.at(16), prwc_q.at(16));
393 
394  graph << ConvolutionLayer(1U, 1U, 1280U,
395  get_weights_accessor(data_path, "Conv_1_weights.npy"),
396  get_weights_accessor(data_path, "Conv_1_biases.npy"),
397  PadStrideInfo(1, 1, 0, 0), 1, conv_weights_quant_info.at(1))
398  .set_name("Conv_1")
400  << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, common_params.data_layout)).set_name("Logits/AvgPool")
401  << ConvolutionLayer(1U, 1U, 1001U,
402  get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_weights.npy"),
403  get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_biases.npy"),
404  PadStrideInfo(1, 1, 0, 0), 1, conv_weights_quant_info.at(2))
405  .set_name("Logits/Conv2d_1c_1x1");
406  }
407 
408  void get_expanded_conv_qasymm8(const std::string &data_path, std::string &&param_path, IsResidual is_residual,
409  unsigned int input_channels, unsigned int output_channels,
410  PadStrideInfo dwc_pad_stride_info,
411  const QuantizationInfo &pwi, const QuantizationInfo &dwi, const QuantizationInfo &pji)
412  {
413  std::string total_path = param_path + "_";
414 
415  SubStream left(graph);
416  left << ConvolutionLayer(1U, 1U, input_channels,
417  get_weights_accessor(data_path, total_path + "project_weights.npy"),
418  get_weights_accessor(data_path, total_path + "project_biases.npy"),
419  PadStrideInfo(1, 1, 0, 0), 1, pwi)
420  .set_name(param_path + "/Conv2D")
422  << DepthwiseConvolutionLayer(3U, 3U,
423  get_weights_accessor(data_path, total_path + "depthwise_depthwise_weights.npy"),
424  get_weights_accessor(data_path, total_path + "depthwise_depthwise_biases.npy"),
425  dwc_pad_stride_info, 1, dwi)
426  .set_name(param_path + "/depthwise/depthwise")
428  << ConvolutionLayer(1U, 1U, output_channels,
429  get_weights_accessor(data_path, total_path + "project_weights.npy"),
430  get_weights_accessor(data_path, total_path + "project_biases.npy"),
431  PadStrideInfo(1, 1, 0, 0), 1, pji)
432  .set_name(param_path + "/project/Conv2D");
433 
434  if(is_residual == IsResidual::Yes)
435  {
436  // Add residual node
437  SubStream right(graph);
438  graph << EltwiseLayer(std::move(left), std::move(right), EltwiseOperation::Add).set_name(param_path + "/add");
439  }
440  else
441  {
442  graph.forward_tail(left.tail_node());
443  }
444  }
445 };
446 
447 /** Main program for MobileNetV2
448  *
449  * Model is based on:
450  * https://arxiv.org/abs/1801.04381
451  * "MobileNetV2: Inverted Residuals and Linear Bottlenecks"
452  * Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen
453  *
454  * Provenance: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_224.tgz
455  *
456  * @note To list all the possible arguments execute the binary appended with the --help option
457  *
458  * @param[in] argc Number of arguments
459  * @param[in] argv Arguments
460  */
461 int main(int argc, char **argv)
462 {
463  return arm_compute::utils::run_example<GraphMobilenetV2Example>(argc, argv);
464 }
Graph configuration structure Device target types.
Definition: Types.h:80
Shape of a tensor.
Definition: TensorShape.h:39
CLTunerMode tuner_mode
Tuner mode to be used by the CL tuner.
Definition: Types.h:87
std::unique_ptr< graph::ITensorAccessor > get_input_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, std::unique_ptr< IPreprocessor > preprocessor=nullptr, bool bgr=true)
Generates appropriate input accessor according to the specified graph parameters. ...
Definition: GraphUtils.h:497
void consume_common_graph_parameters(CommonGraphValidateOptions &options, CommonParams &common_params)
Consumes the consume_common_graph_parameters graph options and creates a structure containing any inf...
Includes all the Graph headers at once.
Common command line options used to configure the graph examples.
Class to parse command line arguments.
Activation Layer Information class.
Definition: Types.h:1550
Copyright (c) 2017-2021 Arm Limited.
std::string mlgo_file
Filename to load MLGO heuristics from.
Definition: Types.h:90
std::string tuner_file
File to load/store tuning values from.
Definition: Types.h:89
Quantization information.
void forward_tail(NodeID nid)
Forwards tail of stream to a given nid.
Definition: IStream.h:81
int main(int argc, char **argv)
Main program for MobileNetV2.
Pooling Layer Information struct.
Definition: Types.h:1214
Abstract Example class.
Definition: Utils.h:78
Padding and stride information class.
Definition: Types.h:722
TensorDescriptor & set_quantization_info(QuantizationInfo tensor_quant_info)
Sets tensor descriptor quantization info.
Num samples, channels, height, width.
TensorShape permute_shape(TensorShape tensor_shape, DataLayout in_data_layout, DataLayout out_data_layout)
Permutes a given tensor shape given the input and output data layout.
Definition: GraphUtils.h:664
TensorDescriptor & set_layout(DataLayout data_layout)
Sets tensor descriptor data layout.
Structure holding all the common graph parameters.
std::unique_ptr< graph::ITensorAccessor > get_output_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, size_t top_n=5, bool is_validation=false, std::ostream &output_stream=std::cout)
Generates appropriate output accessor according to the specified graph parameters.
Definition: GraphUtils.h:543
bool use_tuner
Use a tuner in tunable backends.
Definition: Types.h:85
std::unique_ptr< graph::ITensorAccessor > get_weights_accessor(const std::string &path, const std::string &data_file, DataLayout file_layout=DataLayout::NCHW)
Generates appropriate weights accessor according to the specified path.
Definition: GraphUtils.h:475
int num_threads
Number of threads to use (thread capable backends), if 0 the backend will auto-initialize, if -1 the backend will stay as it is.
Definition: Types.h:88
Stream frontend class to construct simple graphs in a stream fashion.
Definition: Stream.h:45
ILayer & set_name(std::string name)
Sets the name of the layer.
Definition: ILayer.h:55
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1148