Compute Library
 21.08
SyntheticDataTypeMutator.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
31 
32 #include "support/Cast.h"
33 
34 #include <set>
35 
36 namespace arm_compute
37 {
38 namespace graph
39 {
40 namespace
41 {
42 /** Empty accessor class */
43 class EmptyAccessor final : public graph::ITensorAccessor
44 {
45 public:
46  /** Default Constructor */
47  EmptyAccessor() = default;
48 
49  // Inherited methods overriden:
50  bool access_tensor(ITensor &tensor) override
51  {
52  ARM_COMPUTE_UNUSED(tensor);
53  return true;
54  }
55 };
56 
57 /** Check if the mutation pass can be applied
58  *
59  * @param[in] g Graph the mutation pass need to be applied on
60  *
61  * @return True if the pass can be applied else false
62  */
63 bool is_mutation_supported(Graph &g)
64 {
65  const std::set<NodeType> unsupported_node_types = { NodeType::DetectionOutputLayer,
68  };
69 
70  for(const auto &utype : unsupported_node_types)
71  {
72  if(!g.nodes(utype).empty())
73  {
74  return false;
75  }
76  }
77  return true;
78 }
79 
80 /** Remove nodes that get optimized out during conversion
81  *
82  * @param[in, out] g Graph to remove the nodes from.
83  */
84 void remove_optimized_nodes(Graph &g)
85 {
86  const std::set<NodeType> optimized_node_types = { NodeType::BatchNormalizationLayer };
87 
88  for(const auto &opt_type : optimized_node_types)
89  {
90  const std::vector<NodeID> opt_nodes_ids = g.nodes(opt_type);
91  for(const auto &node_id : opt_nodes_ids)
92  {
93  INode *node = g.node(node_id);
94 
95  // Get input edge
96  Edge *input_edge = node->input_edge(0);
97  ARM_COMPUTE_ERROR_ON(input_edge == nullptr);
98 
99  // Get producer node
100  INode *producer = input_edge->producer();
101  const EdgeID producer_edge_id = input_edge->producer_idx();
102  ARM_COMPUTE_ERROR_ON(producer == nullptr);
103 
104  // Get driving nodes
105  std::vector<NodeIdxPair> driving_nodes = get_driving_nodes(*node);
106 
107  // Remove node
108  g.remove_node(node->id());
109 
110  // Update connections
111  for(auto &driving_node : driving_nodes)
112  {
113  g.add_connection(producer->id(), producer_edge_id, driving_node.node_id, driving_node.index);
114  }
115  }
116  }
117 }
118 
119 /** Convert tensor meta-data
120  *
121  * @param[in,out] g Graph to convert tensors of.
122  */
123 void convert_tensors(Graph &g, DataType data_type)
124 {
125  auto &tensors = g.tensors();
126  for(auto &tensor : tensors)
127  {
128  if(tensor != nullptr)
129  {
130  switch(data_type)
131  {
132  case DataType::QASYMM8:
134  {
135  tensor->desc().quant_info = QuantizationInfo(0.125f, -10);
136  break;
137  }
138  default:
139  {
140  ARM_COMPUTE_ERROR("Unsupported mutation type");
141  break;
142  }
143  }
144  tensor->desc().data_type = data_type;
145  }
146  }
147 }
148 
149 /** Convert special node
150  *
151  * @param[in,out] g Graph to convert tensors of.
152  * @param[in] fnc Conversion function.
153  * @param[in] optional_arguments Conversion function arguments.
154  */
155 template <typename NT>
156 void convert_special_node(Graph &g, std::function<bool(INode *, Tensor *)> const &f)
157 {
158  const std::vector<NodeID> nodes_ids = g.nodes(NT::node_type);
159  for(const auto &nodes_id : nodes_ids)
160  {
161  INode *node = arm_compute::utils::cast::polymorphic_downcast<NT *>(g.node(nodes_id));
162  ARM_COMPUTE_ERROR_ON(node == nullptr);
163 
164  Tensor *output_tensor = node->output(0);
165  ARM_COMPUTE_ERROR_ON(output_tensor == nullptr);
166 
167  f(node, output_tensor);
168  }
169 }
170 
171 /** Converts special tensors
172  *
173  * @param[in,out] g Graph to convert tensors of.
174  */
175 void convert_special_tensors(Graph &g)
176 {
177  auto softmax_func = [](INode * node, Tensor * tensor)
178  {
179  ARM_COMPUTE_UNUSED(node);
180  if(tensor->desc().data_type == DataType::QASYMM8)
181  {
182  tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, 0);
183  }
184  else if(tensor->desc().data_type == DataType::QASYMM8_SIGNED)
185  {
186  tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, -128);
187  }
188  return true;
189  };
190 
191  auto act_func = [](INode * node, Tensor * tensor)
192  {
193  auto *act_node = arm_compute::utils::cast::polymorphic_downcast<ActivationLayerNode *>(node);
194  if(tensor->desc().data_type == DataType::QASYMM8)
195  {
196  if(act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::TANH)
197  {
198  tensor->desc().quant_info = QuantizationInfo(1.f / 128.f, 128);
199  }
200  else if(act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::LOGISTIC)
201  {
202  tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, 0);
203  }
204  }
205  else if(tensor->desc().data_type == DataType::QASYMM8_SIGNED)
206  {
207  if(act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::TANH)
208  {
209  tensor->desc().quant_info = QuantizationInfo(1.f / 128.f, 0);
210  }
211  else if(act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::LOGISTIC)
212  {
213  tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, -128);
214  }
215  }
216  return true;
217  };
218 
219  convert_special_node<ActivationLayerNode>(g, act_func);
220  convert_special_node<SoftmaxLayerNode>(g, softmax_func);
221 }
222 
223 /** Handle nodes with bias
224  *
225  * @note Special tensors are for now biases that the data type differ
226  *
227  * @param[in,out] g Graph to convert tensors of.
228  */
229 void handle_nodes_with_bias(Graph &g)
230 {
231  const std::set<NodeType> special_node_types = { NodeType::ConvolutionLayer,
235  };
236 
237  for(const auto &spc_type : special_node_types)
238  {
239  const std::vector<NodeID> scp_nodes_ids = g.nodes(spc_type);
240  for(const auto &node_id : scp_nodes_ids)
241  {
242  INode *node = g.node(node_id);
243  if(node != nullptr)
244  {
245  Tensor *tensor = node->input(2);
246  if(tensor != nullptr)
247  {
248  tensor->desc().data_type = DataType::S32;
249  }
250  else
251  {
252  auto params = node->common_node_params();
253  params.name = params.name.empty() ? "" : params.name + "Bias";
254 
255  TensorDescriptor b_desc = node->input(1)->desc();
256  auto depth = b_desc.shape[get_dimension_idx(b_desc.layout, DataLayoutDimension::BATCHES)];
257  b_desc.shape = TensorShape(depth);
258 
259  auto accessor = std::make_unique<EmptyAccessor>();
260  auto b_nid = GraphBuilder::add_const_node(g, params, b_desc, std::move(accessor));
261  g.add_connection(b_nid, 0, node_id, 2);
262  }
263  }
264  }
265  }
266 }
267 } // namespace
268 
270  : _mutate_type{ mutate_type }
271 {
272 }
273 
275 {
276  return "SyntheticDataTypeMutator";
277 }
278 
280 {
282 }
283 
285 {
286  if(is_mutation_supported(g))
287  {
288  // Remove nodes that get optimized out (e.g. BatchNorm)
289  remove_optimized_nodes(g);
290 
291  // Convert tensor
292  convert_tensors(g, _mutate_type);
293  convert_special_tensors(g);
294 
295  // Handle special nodes
296  handle_nodes_with_bias(g);
297  }
298  else
299  {
300  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Synthetic data type mutator couldn't be applied" << std::endl);
301  }
302 }
303 } // namespace graph
304 } // namespace arm_compute
std::vector< NodeIdxPair > get_driving_nodes(const INode &node)
Get the list of driving nodes of a given node.
Definition: Utils.cpp:166
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 S32 per channel
SyntheticDataTypeMutator(DataType mutate_type=DataType::QASYMM8)
const DataType data_type
Definition: Im2Col.cpp:150
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
quantized, asymmetric fixed-point 8-bit number unsigned
MutationType type() const override
Returns mutation type.
unsigned int EdgeID
Definition: Types.h:69
Graph class.
Definition: Graph.h:53
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
quantized, asymmetric fixed-point 8-bit number signed
static NodeID add_const_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor=nullptr)
Adds a Const node to the graph.
virtual void mutate(Graph &g) override
Walk the graph and perform a specific mutation.
DataType
Available data types.
Definition: Types.h:77
const char * name() override
Returns mutator name.
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor&#39;s given dimension depending on its layout.
Definition: Utils.cpp:137