Compute Library
 21.02
graph_alexnet.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/graph.h"
25 #ifdef ARM_COMPUTE_CL
27 #endif /* ARM_COMPUTE_CL */
30 #include "utils/GraphUtils.h"
31 #include "utils/Utils.h"
32 
33 using namespace arm_compute;
34 using namespace arm_compute::utils;
35 using namespace arm_compute::graph::frontend;
36 using namespace arm_compute::graph_utils;
37 
38 /** Example demonstrating how to implement AlexNet's network using the Compute Library's graph API */
39 class GraphAlexnetExample : public Example
40 {
41 public:
42  GraphAlexnetExample()
43  : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "AlexNet")
44  {
45  }
46  bool do_setup(int argc, char **argv) override
47  {
48  // Parse arguments
49  cmd_parser.parse(argc, argv);
50  cmd_parser.validate();
51 
52  // Consume common parameters
53  common_params = consume_common_graph_parameters(common_opts);
54 
55  // Return when help menu is requested
56  if(common_params.help)
57  {
58  cmd_parser.print_help(argv[0]);
59  return false;
60  }
61 
62  // Checks
63  ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "QASYMM8 not supported for this graph");
64 
65  // Print parameter values
66  std::cout << common_params << std::endl;
67 
68  // Get trainable parameters data path
69  std::string data_path = common_params.data_path;
70 
71  // Create a preprocessor object
72  const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
73  std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb);
74 
75  // Create input descriptor
76  const auto operation_layout = common_params.data_layout;
77  const TensorShape tensor_shape = permute_shape(TensorShape(227U, 227U, 3U, 1U), DataLayout::NCHW, operation_layout);
78  TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout);
79 
80  // Set weights trained layout
81  const DataLayout weights_layout = DataLayout::NCHW;
82 
83  graph << common_params.target
84  << common_params.fast_math_hint
85  << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor)))
86  // Layer 1
88  11U, 11U, 96U,
89  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv1_w.npy", weights_layout),
90  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv1_b.npy"),
91  PadStrideInfo(4, 4, 0, 0))
92  .set_name("conv1")
95  << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0))).set_name("pool1")
96  // Layer 2
98  5U, 5U, 256U,
99  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv2_w.npy", weights_layout),
100  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv2_b.npy"),
101  PadStrideInfo(1, 1, 2, 2), 2)
102  .set_name("conv2")
105  << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0))).set_name("pool2")
106  // Layer 3
107  << ConvolutionLayer(
108  3U, 3U, 384U,
109  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv3_w.npy", weights_layout),
110  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv3_b.npy"),
111  PadStrideInfo(1, 1, 1, 1))
112  .set_name("conv3")
114  // Layer 4
115  << ConvolutionLayer(
116  3U, 3U, 384U,
117  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv4_w.npy", weights_layout),
118  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv4_b.npy"),
119  PadStrideInfo(1, 1, 1, 1), 2)
120  .set_name("conv4")
122  // Layer 5
123  << ConvolutionLayer(
124  3U, 3U, 256U,
125  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv5_w.npy", weights_layout),
126  get_weights_accessor(data_path, "/cnn_data/alexnet_model/conv5_b.npy"),
127  PadStrideInfo(1, 1, 1, 1), 2)
128  .set_name("conv5")
130  << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 0))).set_name("pool5")
131  // Layer 6
133  4096U,
134  get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc6_w.npy", weights_layout),
135  get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc6_b.npy"))
136  .set_name("fc6")
138  // Layer 7
140  4096U,
141  get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc7_w.npy", weights_layout),
142  get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc7_b.npy"))
143  .set_name("fc7")
145  // Layer 8
147  1000U,
148  get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc8_w.npy", weights_layout),
149  get_weights_accessor(data_path, "/cnn_data/alexnet_model/fc8_b.npy"))
150  .set_name("fc8")
151  // Softmax
152  << SoftmaxLayer().set_name("prob")
153  << OutputLayer(get_output_accessor(common_params, 5));
154 
155  // Finalize graph
156  GraphConfig config;
157 
158  config.num_threads = common_params.threads;
159  config.use_tuner = common_params.enable_tuner;
160  config.tuner_mode = common_params.tuner_mode;
161  config.tuner_file = common_params.tuner_file;
162  config.mlgo_file = common_params.mlgo_file;
163 
164  // Load the precompiled kernels from a file into the kernel library, in this way the next time they are needed
165  // compilation won't be required.
166  if(common_params.enable_cl_cache)
167  {
168 #ifdef ARM_COMPUTE_CL
170 #endif /* ARM_COMPUTE_CL */
171  }
172 
173  graph.finalize(common_params.target, config);
174 
175  // Save the opencl kernels to a file
176  if(common_opts.enable_cl_cache)
177  {
178 #ifdef ARM_COMPUTE_CL
180 #endif /* ARM_COMPUTE_CL */
181  }
182 
183  return true;
184  }
185  void do_run() override
186  {
187  // Run graph
188  graph.run();
189  }
190 
191 private:
192  CommandLineParser cmd_parser;
193  CommonGraphOptions common_opts;
194  CommonGraphParams common_params;
195  Stream graph;
196 };
197 
198 /** Main program for AlexNet
199  *
200  * Model is based on:
201  * https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks
202  * "ImageNet Classification with Deep Convolutional Neural Networks"
203  * Alex Krizhevsky and Sutskever, Ilya and Hinton, Geoffrey E
204  *
205  * Provenance: https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet
206  *
207  * @note To list all the possible arguments execute the binary appended with the --help option
208  *
209  * @param[in] argc Number of arguments
210  * @param[in] argv Arguments
211  *
212  * @return Return code
213  */
214 int main(int argc, char **argv)
215 {
216  return arm_compute::utils::run_example<GraphAlexnetExample>(argc, argv);
217 }
Graph configuration structure Device target types.
Definition: Types.h:80
Shape of a tensor.
Definition: TensorShape.h:39
CLTunerMode tuner_mode
Tuner mode to be used by the CL tuner.
Definition: Types.h:87
void restore_program_cache_from_file(const std::string &filename="cache.bin")
This function loads prebuilt opencl kernels from a file.
Definition: Utils.cpp:35
std::unique_ptr< graph::ITensorAccessor > get_input_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, std::unique_ptr< IPreprocessor > preprocessor=nullptr, bool bgr=true)
Generates appropriate input accessor according to the specified graph parameters. ...
Definition: GraphUtils.h:497
Normalization Layer Information class.
Definition: Types.h:1647
void consume_common_graph_parameters(CommonGraphValidateOptions &options, CommonParams &common_params)
Consumes the consume_common_graph_parameters graph options and creates a structure containing any inf...
Includes all the Graph headers at once.
Common command line options used to configure the graph examples.
Class to parse command line arguments.
Activation Layer Information class.
Definition: Types.h:1550
Copyright (c) 2017-2021 Arm Limited.
std::string mlgo_file
Filename to load MLGO heuristics from.
Definition: Types.h:90
std::string tuner_file
File to load/store tuning values from.
Definition: Types.h:89
#define ARM_COMPUTE_EXIT_ON_MSG(cond, msg)
If the condition is true, the given message is printed and program exits.
Definition: Error.h:379
Pooling Layer Information struct.
Definition: Types.h:1214
Abstract Example class.
Definition: Utils.h:78
void save_program_cache_to_file(const std::string &filename="cache.bin")
This function saves opencl kernels library to a file.
Definition: Utils.cpp:73
Padding and stride information class.
Definition: Types.h:722
Num samples, channels, height, width.
TensorShape permute_shape(TensorShape tensor_shape, DataLayout in_data_layout, DataLayout out_data_layout)
Permutes a given tensor shape given the input and output data layout.
Definition: GraphUtils.h:664
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1190
TensorDescriptor & set_layout(DataLayout data_layout)
Sets tensor descriptor data layout.
Structure holding all the common graph parameters.
int main(int argc, char **argv)
Main program for AlexNet.
std::unique_ptr< graph::ITensorAccessor > get_output_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters, size_t top_n=5, bool is_validation=false, std::ostream &output_stream=std::cout)
Generates appropriate output accessor according to the specified graph parameters.
Definition: GraphUtils.h:543
bool use_tuner
Use a tuner in tunable backends.
Definition: Types.h:85
std::unique_ptr< graph::ITensorAccessor > get_weights_accessor(const std::string &path, const std::string &data_file, DataLayout file_layout=DataLayout::NCHW)
Generates appropriate weights accessor according to the specified path.
Definition: GraphUtils.h:475
int num_threads
Number of threads to use (thread capable backends), if 0 the backend will auto-initialize, if -1 the backend will stay as it is.
Definition: Types.h:88
Stream frontend class to construct simple graphs in a stream fashion.
Definition: Stream.h:45
DataLayout
[DataLayout enum definition]
Definition: Types.h:120
Normalization applied cross maps.
ILayer & set_name(std::string name)
Sets the name of the layer.
Definition: ILayer.h:55