43 class EmptyAccessor final :
public graph::ITensorAccessor
47 EmptyAccessor() =
default;
50 bool access_tensor(ITensor &
tensor)
override
63 bool is_mutation_supported(Graph &g)
68 for (
const auto &utype : unsupported_node_types)
70 if (!g.nodes(utype).empty())
82 void remove_optimized_nodes(Graph &g)
86 for (
const auto &opt_type : optimized_node_types)
88 const std::vector<NodeID> opt_nodes_ids = g.nodes(opt_type);
89 for (
const auto &node_id : opt_nodes_ids)
91 INode *node = g.node(node_id);
94 Edge *input_edge = node->input_edge(0);
98 INode *producer = input_edge->producer();
99 const EdgeID producer_edge_id = input_edge->producer_idx();
106 g.remove_node(node->id());
109 for (
auto &driving_node : driving_nodes)
111 g.add_connection(producer->id(), producer_edge_id, driving_node.node_id, driving_node.index);
123 auto &tensors = g.tensors();
124 for (
auto &
tensor : tensors)
133 tensor->desc().quant_info = QuantizationInfo(0.125f, -10);
153 template <
typename NT>
154 void convert_special_node(Graph &g, std::function<
bool(INode *, Tensor *)>
const &f)
156 const std::vector<NodeID> nodes_ids = g.nodes(NT::node_type);
157 for (
const auto &nodes_id : nodes_ids)
159 INode *node = arm_compute::utils::cast::polymorphic_downcast<NT *>(g.node(nodes_id));
162 Tensor *output_tensor = node->output(0);
165 f(node, output_tensor);
173 void convert_special_tensors(Graph &g)
180 tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, 0);
184 tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, -128);
191 auto *act_node = arm_compute::utils::cast::polymorphic_downcast<ActivationLayerNode *>(node);
194 if (act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::TANH)
196 tensor->desc().quant_info = QuantizationInfo(1.f / 128.f, 128);
198 else if (act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::LOGISTIC)
200 tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, 0);
205 if (act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::TANH)
207 tensor->desc().quant_info = QuantizationInfo(1.f / 128.f, 0);
209 else if (act_node->activation_info().activation() == ActivationLayerInfo::ActivationFunction::LOGISTIC)
211 tensor->desc().quant_info = QuantizationInfo(1.f / 256.f, -128);
217 convert_special_node<ActivationLayerNode>(g, act_func);
218 convert_special_node<SoftmaxLayerNode>(g, softmax_func);
227 void handle_nodes_with_bias(Graph &g)
232 for (
const auto &spc_type : special_node_types)
234 const std::vector<NodeID> scp_nodes_ids = g.nodes(spc_type);
235 for (
const auto &node_id : scp_nodes_ids)
237 INode *node = g.node(node_id);
247 auto params = node->common_node_params();
248 params.name = params.name.empty() ?
"" : params.name +
"Bias";
250 TensorDescriptor b_desc = node->input(1)->desc();
252 b_desc.shape = TensorShape(depth);
254 auto accessor = std::make_unique<EmptyAccessor>();
256 g.add_connection(b_nid, 0, node_id, 2);
270 return "SyntheticDataTypeMutator";
280 if (is_mutation_supported(g))
283 remove_optimized_nodes(g);
286 convert_tensors(g, _mutate_type);
287 convert_special_tensors(g);
290 handle_nodes_with_bias(g);