Compute Library
 23.11
SyntheticDataTypeMutator.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
31 
32 #include "support/Cast.h"
33 
34 #include <set>
35 
36 namespace arm_compute
37 {
38 namespace graph
39 {
40 namespace
41 {
42 /** Empty accessor class */
43 class EmptyAccessor final : public graph::ITensorAccessor
44 {
45 public:
46  /** Default Constructor */
47  EmptyAccessor() = default;
48 
49  // Inherited methods overriden:
50  bool access_tensor(ITensor &tensor) override
51  {
53  return true;
54  }
55 };
56 
57 /** Check if the mutation pass can be applied
58  *
59  * @param[in] g Graph the mutation pass need to be applied on
60  *
61  * @return True if the pass can be applied else false
62  */
63 bool is_mutation_supported(Graph &g)
64 {
65  const std::set<NodeType> unsupported_node_types = {NodeType::DetectionOutputLayer, NodeType::NormalizationLayer,
67 
68  for (const auto &utype : unsupported_node_types)
69  {
70  if (!g.nodes(utype).empty())
71  {
72  return false;
73  }
74  }
75  return true;
76 }
77 
78 /** Remove nodes that get optimized out during conversion
79  *
80  * @param[in, out] g Graph to remove the nodes from.
81  */
82 void remove_optimized_nodes(Graph &g)
83 {
84  const std::set<NodeType> optimized_node_types = {NodeType::BatchNormalizationLayer};
85 
86  for (const auto &opt_type : optimized_node_types)
87  {
88  const std::vector<NodeID> opt_nodes_ids = g.nodes(opt_type);
89  for (const auto &node_id : opt_nodes_ids)
90  {
91  INode *node = g.node(node_id);
92 
93  // Get input edge
94  Edge *input_edge = node->input_edge(0);
95  ARM_COMPUTE_ERROR_ON(input_edge == nullptr);
96 
97  // Get producer node
98  INode *producer = input_edge->producer();
99  const EdgeID producer_edge_id = input_edge->producer_idx();
100  ARM_COMPUTE_ERROR_ON(producer == nullptr);
101 
102  // Get driving nodes
103  std::vector<NodeIdxPair> driving_nodes = get_driving_nodes(*node);
104 
105  // Remove node
106  g.remove_node(node->id());
107 
108  // Update connections
109  for (auto &driving_node : driving_nodes)
110  {
111  g.add_connection(producer->id(), producer_edge_id, driving_node.node_id, driving_node.index);
112  }
113  }
114  }
115 }
116 
117 /** Convert tensor meta-data
118  *
119  * @param[in,out] g Graph to convert tensors of.
120  */
121 void convert_tensors(Graph &g, DataType data_type)
122 {
123  auto &tensors = g.tensors();
124  for (auto &tensor : tensors)
125  {
126  if (tensor != nullptr)
127  {
128  switch (data_type)
129  {
130  case DataType::QASYMM8:
132  {
133  tensor->desc().quant_info = QuantizationInfo(0.125f, -10);
134  break;
135  }
136  default:
137  {
138  ARM_COMPUTE_ERROR("Unsupported mutation type");
139  break;
140  }
141  }
142  tensor->desc().data_type = data_type;
143  }
144  }
145 }
146 
147 /** Convert special node
148  *
149  * @param[in,out] g Graph to convert tensors of.
150  * @param[in] fnc Conversion function.
151  * @param[in] optional_arguments Conversion function arguments.
152  */
153 template <typename NT>
154 void convert_special_node(Graph &g, std::function<bool(INode *, Tensor *)> const &f)
155 {
156  const std::vector<NodeID> nodes_ids = g.nodes(NT::node_type);
157  for (const auto &nodes_id : nodes_ids)
158  {
159  INode *node = arm_compute::utils::cast::polymorphic_downcast<NT *>(g.node(nodes_id));
160  ARM_COMPUTE_ERROR_ON(node == nullptr);
161 
162  Tensor *output_tensor = node->output(0);
163  ARM_COMPUTE_ERROR_ON(output_tensor == nullptr);
164 
165  f(node, output_tensor);
166  }
167 }
168 
169 /** Converts special tensors
170  *
171  * @param[in,out] g Graph to convert tensors of.
172  */
173 void convert_special_tensors(Graph &g)
174 {
175  auto softmax_func = [](INode *node, Tensor *tensor)
176  {
177  ARM_COMPUTE_UNUSED(node);
178  if (tensor->desc().data_type == DataType::QASYMM8)
179  {
180  tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, 0);
181  }
182  else if (tensor->desc().data_type == DataType::QASYMM8_SIGNED)
183  {
184  tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, -128);
185  }
186  return true;
187  };
188 
189  auto act_func = [](INode *node, Tensor *tensor)
190  {
191  auto *act_node = arm_compute::utils::cast::polymorphic_downcast<ActivationLayerNode *>(node);
192  if (tensor->desc().data_type == DataType::QASYMM8)
193  {
194  if (act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::TANH)
195  {
196  tensor->desc().quant_info = QuantizationInfo(1.f / 128.f, 128);
197  }
198  else if (act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::LOGISTIC)
199  {
200  tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, 0);
201  }
202  }
203  else if (tensor->desc().data_type == DataType::QASYMM8_SIGNED)
204  {
205  if (act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::TANH)
206  {
207  tensor->desc().quant_info = QuantizationInfo(1.f / 128.f, 0);
208  }
209  else if (act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::LOGISTIC)
210  {
211  tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, -128);
212  }
213  }
214  return true;
215  };
216 
217  convert_special_node<ActivationLayerNode>(g, act_func);
218  convert_special_node<SoftmaxLayerNode>(g, softmax_func);
219 }
220 
221 /** Handle nodes with bias
222  *
223  * @note Special tensors are for now biases that the data type differ
224  *
225  * @param[in,out] g Graph to convert tensors of.
226  */
227 void handle_nodes_with_bias(Graph &g)
228 {
229  const std::set<NodeType> special_node_types = {NodeType::ConvolutionLayer, NodeType::DeconvolutionLayer,
231 
232  for (const auto &spc_type : special_node_types)
233  {
234  const std::vector<NodeID> scp_nodes_ids = g.nodes(spc_type);
235  for (const auto &node_id : scp_nodes_ids)
236  {
237  INode *node = g.node(node_id);
238  if (node != nullptr)
239  {
240  Tensor *tensor = node->input(2);
241  if (tensor != nullptr)
242  {
243  tensor->desc().data_type = DataType::S32;
244  }
245  else
246  {
247  auto params = node->common_node_params();
248  params.name = params.name.empty() ? "" : params.name + "Bias";
249 
250  TensorDescriptor b_desc = node->input(1)->desc();
251  auto depth = b_desc.shape[get_dimension_idx(b_desc.layout, DataLayoutDimension::BATCHES)];
252  b_desc.shape = TensorShape(depth);
253 
254  auto accessor = std::make_unique<EmptyAccessor>();
255  auto b_nid = GraphBuilder::add_const_node(g, params, b_desc, std::move(accessor));
256  g.add_connection(b_nid, 0, node_id, 2);
257  }
258  }
259  }
260  }
261 }
262 } // namespace
263 
264 SyntheticDataTypeMutator::SyntheticDataTypeMutator(DataType mutate_type) : _mutate_type{mutate_type}
265 {
266 }
267 
269 {
270  return "SyntheticDataTypeMutator";
271 }
272 
274 {
276 }
277 
279 {
280  if (is_mutation_supported(g))
281  {
282  // Remove nodes that get optimized out (e.g. BatchNorm)
283  remove_optimized_nodes(g);
284 
285  // Convert tensor
286  convert_tensors(g, _mutate_type);
287  convert_special_tensors(g);
288 
289  // Handle special nodes
290  handle_nodes_with_bias(g);
291  }
292  else
293  {
294  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Synthetic data type mutator couldn't be applied" << std::endl);
295  }
296 }
297 } // namespace graph
298 } // namespace arm_compute
Cast.h
arm_compute::graph::get_driving_nodes
std::vector< NodeIdxPair > get_driving_nodes(const INode &node)
Get the list of driving nodes of a given node.
Definition: Utils.cpp:180
arm_compute::graph::NodeType::DetectionOutputLayer
@ DetectionOutputLayer
arm_compute::graph::IGraphMutator::MutationType
MutationType
Mutation type.
Definition: IGraphMutator.h:39
arm_compute::DataType::QASYMM8
@ QASYMM8
quantized, asymmetric fixed-point 8-bit number unsigned
arm_compute::graph::NodeType::DeconvolutionLayer
@ DeconvolutionLayer
arm_compute::graph::NodeType::PriorBoxLayer
@ PriorBoxLayer
arm_compute::graph::IGraphMutator::MutationType::IR
@ IR
ARM_COMPUTE_ERROR
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:354
ARM_COMPUTE_LOG_GRAPH_VERBOSE
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
arm_compute::graph::NodeType::NormalizationLayer
@ NormalizationLayer
SyntheticDataTypeMutator.h
ARM_COMPUTE_ERROR_ON
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
arm_compute::graph::SyntheticDataTypeMutator::SyntheticDataTypeMutator
SyntheticDataTypeMutator(DataType mutate_type=DataType::QASYMM8)
Definition: SyntheticDataTypeMutator.cpp:264
arm_compute::graph::SyntheticDataTypeMutator::type
MutationType type() const override
Returns mutation type.
Definition: SyntheticDataTypeMutator.cpp:273
arm_compute::DataType::QASYMM8_SIGNED
@ QASYMM8_SIGNED
quantized, asymmetric fixed-point 8-bit number signed
arm_compute::detail::ObjectType::Tensor
@ Tensor
arm_compute::graph::NodeType::FullyConnectedLayer
@ FullyConnectedLayer
arm_compute::graph::SyntheticDataTypeMutator::mutate
virtual void mutate(Graph &g) override
Walk the graph and perform a specific mutation.
Definition: SyntheticDataTypeMutator.cpp:278
ARM_COMPUTE_UNUSED
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:151
tensor
CLTensor * tensor
Pointer to the auxiliary tensor.
Definition: ClWorkloadRuntime.cpp:67
Nodes.h
GraphBuilder.h
arm_compute::test::validation::data_type
data_type
Definition: Cast.cpp:222
Logger.h
arm_compute::graph::NodeType::ConvolutionLayer
@ ConvolutionLayer
arm_compute::graph::GraphBuilder::add_const_node
static NodeID add_const_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor=nullptr)
Adds a Const node to the graph.
Definition: GraphBuilder.cpp:115
arm_compute::graph::get_dimension_idx
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor's given dimension depending on its layout.
Definition: Utils.cpp:150
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
ITensorAccessor.h
arm_compute::DataType::S32
@ S32
signed 32-bit number
arm_compute::graph::Graph
Graph class.
Definition: Graph.h:52
Utils.h
arm_compute::DataLayoutDimension::BATCHES
@ BATCHES
batches
arm_compute::graph::NodeType::BatchNormalizationLayer
@ BatchNormalizationLayer
arm_compute::graph::SyntheticDataTypeMutator::name
const char * name() override
Returns mutator name.
Definition: SyntheticDataTypeMutator.cpp:268
arm_compute::DataType
DataType
Available data types.
Definition: CoreTypes.h:83
arm_compute::graph::NodeType::DepthwiseConvolutionLayer
@ DepthwiseConvolutionLayer
arm_compute::graph::EdgeID
unsigned int EdgeID
Definition: Types.h:73