ArmNN
 25.11
Loading...
Searching...
No Matches
ElementwiseBinaryOperator.cpp
Go to the documentation of this file.
1//
2// Copyright © 2022-2025 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
8
9TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer* layer,
10 const LayerType type,
11 const std::vector<const TensorInfo*>& inputs,
12 const std::vector<const TensorInfo*>& outputs,
13 const ElementwiseBinaryDescriptor* descriptor)
14{
15 auto input0Name = std::string("input_0");
16 auto input1Name = std::string("input_1");
17 auto outputName = std::string("output0_");
18 std::string input0ElementwiseBinaryName = std::string("intermediate0_") + GetUniqueTosaMappingID();
19 std::string input1ElementwiseBinaryName = std::string("intermediate0_") + GetUniqueTosaMappingID();
20 std::string input2ElementwiseBinaryName = std::string("intermediate0_") + GetUniqueTosaMappingID();
21 std::string blockName;
22
23 // If a layer is present then the block will be used for execution, so input and output names need to be determined
24 // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
25 if(layer != nullptr)
26 {
27 input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
28 input1Name = GenerateUniqueInputName(layer->GetInputSlot(1));
29 outputName = GenerateUniqueOutputName(*layer);
30 }
31
32 TosaSerializationOperator* op = nullptr;
33 std::vector<TosaSerializationTensor*> tensors;
34 std::vector<TosaSerializationOperator*> operators;
35
36 DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
37 DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
38 DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
39
40 bool isInputInt8 = (inputDType0 == DType_INT8);
41
42 // Only add input tensors if connected layer is an input layer.
43 // As intermediate or constant tensors will be created separately.
44 // There also can't be duplicate tensor.
45 if(input0Name.find("input_") != std::string::npos)
46 {
47 std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
48 tensors.emplace_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
49 }
50 if(input1Name.find("input_") != std::string::npos && input0Name != input1Name)
51 {
52 std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
53 tensors.emplace_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {}));
54 }
55
56 // Assign an output name and add to tensors based on the input type
57 // An int8 input for all ops will require the output to be rescaled from int32 to int8
58 std::string outputElemenwiseBinaryName;
59 std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
60 if (isInputInt8)
61 {
62 outputElemenwiseBinaryName = std::string("intermediate0_") + GetUniqueTosaMappingID();
63 tensors.emplace_back(new TosaSerializationTensor(outputElemenwiseBinaryName, outputShape0, DType_INT32, {}));
64 }
65 else
66 {
67 tensors.emplace_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
68 }
69
70 float input0Scale = 0;
71 float input1Scale = 0;
72 float outputScale = 0;
73
74 if (isInputInt8)
75 {
76 input0Scale = inputs[0]->GetQuantizationScale();
77 input1Scale = inputs[1]->GetQuantizationScale();
78 outputScale = outputs[0]->GetQuantizationScale();
79
80 CalculateRescaleScales(input0Scale, input1Scale, outputScale, descriptor->m_Operation);
81
82 TosaSerializationOperator* rescaleOp0 = nullptr;
83 CreateRescaleTosaOperator(input0Name, input0ElementwiseBinaryName,
84 input0Scale,
85 inputs[0]->GetQuantizationOffset(),
86 0,
87 false,
88 false,
89 true,
90 true,
91 &rescaleOp0);
92
93 tensors.emplace_back(new TosaSerializationTensor(input0ElementwiseBinaryName,
94 GetTosaTensorShape(inputs[0]->GetShape()),
95 DType_INT32,
96 {}));
97 operators.emplace_back(rescaleOp0);
98
99 TosaSerializationOperator* rescaleOp1 = nullptr;
100
101 bool isSub = type == LayerType::Subtraction || (descriptor && descriptor->m_Operation == BinaryOperation::Sub);
102 if(isSub)
103 {
104 // Correct rescale values comes from model converter values which matches TFLite reference outputs.
105 auto maxScale = 2.0 * std::max(inputs[0]->GetQuantizationScale(), inputs[1]->GetQuantizationScale());
106 auto rescaleScale = static_cast<float>((inputs[0]->GetQuantizationScale() / maxScale) * (1 << 21));
107 CreateRescaleTosaOperator(input1Name,
108 input1ElementwiseBinaryName,
109 rescaleScale,
110 inputs[1]->GetQuantizationOffset(),
111 0,
112 false,
113 false,
114 true,
115 true,
116 &rescaleOp1);
117 operators.emplace_back(rescaleOp1);
118 tensors.emplace_back(new TosaSerializationTensor(input1ElementwiseBinaryName,
119 GetTosaTensorShape(inputs[1]->GetShape()),
120 DType_INT32,
121 {}));
122
123 TosaSerializationOperator* rescaleOp2 = nullptr;
124 CreateRescaleTosaOperator(input1ElementwiseBinaryName,
125 input2ElementwiseBinaryName,
126 input1Scale,
127 0,
128 0,
129 false,
130 false,
131 true,
132 true,
133 &rescaleOp2);
134 operators.emplace_back(rescaleOp2);
135 tensors.emplace_back(new TosaSerializationTensor(input2ElementwiseBinaryName,
136 GetTosaTensorShape(inputs[1]->GetShape()),
137 DType_INT32,
138 {}));
139 }
140 else
141 {
142 CreateRescaleTosaOperator(input1Name,
143 input1ElementwiseBinaryName,
144 input1Scale,
145 inputs[1]->GetQuantizationOffset(),
146 0,
147 false,
148 false,
149 true,
150 true,
151 &rescaleOp1);
152 operators.emplace_back(rescaleOp1);
153 tensors.emplace_back(new TosaSerializationTensor(input1ElementwiseBinaryName,
154 GetTosaTensorShape(inputs[1]->GetShape()),
155 DType_INT32,
156 {}));
157 }
158 }
159
160 std::string const& elementwiseInput0Str = isInputInt8 ? input0ElementwiseBinaryName : input0Name;
161 std::string elementwiseInput1Str = isInputInt8 ? input1ElementwiseBinaryName : input1Name;
162 std::string const& elementwiseOutputStr = isInputInt8 ? outputElemenwiseBinaryName : outputName;
163
164 switch(type)
165 {
166 case LayerType::ElementwiseBinary:
167 {
168 switch (descriptor->m_Operation)
169 {
170 case BinaryOperation::Add:
171 {
172 ConvertAddToTosaOperator({elementwiseInput0Str, elementwiseInput1Str},
173 {elementwiseOutputStr},
174 operators);
175 blockName = std::string("Op_ADD_block_") + GetUniqueTosaMappingID();
176 break;
177 }
178 case BinaryOperation::Maximum:
179 {
180 op = new TosaSerializationOperator(Op_MAXIMUM,
181 Attribute_NONE,
182 nullptr,
183 {elementwiseInput0Str, elementwiseInput1Str},
184 {elementwiseOutputStr});
185 blockName = std::string("Op_MAXIMUM_block_") + GetUniqueTosaMappingID();
186 break;
187 }
188 case BinaryOperation::Mul:
189 {
190 ConvertMulToTosaOperator({elementwiseInput0Str, elementwiseInput1Str},
191 {elementwiseOutputStr},
192 operators);
193 blockName = std::string("Op_MUL_block_") + GetUniqueTosaMappingID();
194 break;
195 }
196 case BinaryOperation::Sub:
197 {
198 if (isInputInt8)
199 {
200 elementwiseInput1Str = input2ElementwiseBinaryName;
201 }
202
203 ConvertSubToTosaOperator({elementwiseInput0Str, elementwiseInput1Str},
204 {elementwiseOutputStr},
205 operators);
206 blockName = std::string("Op_SUB_block_") + GetUniqueTosaMappingID();
207 break;
208 }
209 case BinaryOperation::SqDiff:
210 {
211 throw Exception("TOSA mappings of Squared Difference operator "
212 "implemented under ConvertSquaredDifferenceToTosaOperator().");
213 }
214 default:
215 throw Exception("ConvertElementwiseBinaryToTosaOperator: Unsupported layer type.");
216 }
217 break;
218 }
219 case LayerType::Addition:
220 {
221 ConvertAddToTosaOperator({input0Name, input1Name},
222 {outputName},
223 operators);
224 blockName = std::string("Op_ADD_block_") + GetUniqueTosaMappingID();
225 break;
226 }
227 case LayerType::Multiplication:
228 {
229 ConvertMulToTosaOperator({input0Name, input1Name},
230 {outputName},
231 operators);
232 blockName = std::string("Op_MUL_block_") + GetUniqueTosaMappingID();
233 break;
234 }
235 case LayerType::Subtraction:
236 {
237 ConvertSubToTosaOperator({input0Name, input1Name},
238 {outputName},
239 operators);
240 blockName = std::string("Op_SUB_block_") + GetUniqueTosaMappingID();
241 break;
242 }
243 default:
244 throw Exception("ConvertElementwiseBinaryToTosaOperator: Unsupported layer type.");
245 }
246
247 if(op != nullptr)
248 {
249 operators.emplace_back(op);
250 }
251
252 // All ElementwiseBinary operators require a rescale of output
253 // from DType_INT32 to DType_INT8 when the input is DType_INT8
254 if (inputDType0 == DType_INT8)
255 {
256 TosaSerializationOperator* rescaleOp = nullptr;
257 CreateRescaleTosaOperator(outputElemenwiseBinaryName,
258 outputName,
259 outputScale,
260 0,
261 outputs[0]->GetQuantizationOffset(),
262 false,
263 false,
264 true,
265 true,
266 &rescaleOp);
267 tensors.emplace_back(new TosaSerializationTensor(outputName,
268 GetTosaTensorShape(outputs[0]->GetShape()),
269 DType_INT8,
270 {}));
271 operators.emplace_back(rescaleOp);
272 }
273
274 if(input0Name == input1Name)
275 {
276 return new TosaSerializationBasicBlock(blockName, // name
277 mainName, // region name
278 {operators}, // operators
279 tensors, // tensors
280 {input0Name}, // inputs
281 {outputName}); // outputs
282 }
283
284 return new TosaSerializationBasicBlock(blockName, // name
285 mainName, // region name
286 {operators}, // operators
287 tensors, // tensors
288 {input0Name, input1Name}, // inputs
289 {outputName}); // outputs
290}
291
292void ConvertAddToTosaOperator(const std::vector<string>& inputs,
293 const std::vector<string>& outputs,
294 std::vector<TosaSerializationOperator*>& operators)
295{
296 operators.emplace_back(new TosaSerializationOperator(Op_ADD,
297 Attribute_NONE,
298 nullptr,
299 inputs,
300 outputs));
301}
302
303
304void ConvertMulToTosaOperator(const std::vector<string>& inputs,
305 const std::vector<string>& outputs,
306 std::vector<TosaSerializationOperator*>& operators)
307{
308 TosaMulAttribute mulAttribute(0);
309 operators.emplace_back(new TosaSerializationOperator(Op_MUL,
310 Attribute_MulAttribute,
311 &mulAttribute,
312 inputs,
313 outputs));
314}
315
316void ConvertSubToTosaOperator(const std::vector<string>& inputs,
317 const std::vector<string>& outputs,
318 std::vector<TosaSerializationOperator*>& operators)
319{
320 operators.emplace_back(new TosaSerializationOperator(Op_SUB,
321 Attribute_NONE,
322 nullptr,
323 inputs,
324 outputs));
325}
326
327void CalculateRescaleScales(float& input0Scale,
328 float& input1Scale,
329 float& outputScale,
330 const BinaryOperation& operation)
331{
332 // Correct Rescale values coming from model converter tosa values, which matches TFLite reference outputs.
333 auto maxScale = 2.0 * std::max(input0Scale, input1Scale);
334 if(operation == armnn::BinaryOperation::Add && outputScale != 0 && maxScale != 0)
335 {
336 auto inputShift = 20;
337
338 input0Scale = static_cast<float>((input0Scale / maxScale) * (1 << inputShift));
339 input1Scale = static_cast<float>((input1Scale / maxScale) * (1 << inputShift));
340 outputScale = static_cast<float>(maxScale / (outputScale * (static_cast<float>(1 << inputShift))));
341 }
342 else if(operation == armnn::BinaryOperation::Mul && outputScale != 0 && maxScale != 0)
343 {
344 auto inputShift = 1;
345 if(input0Scale > input1Scale)
346 {
347 outputScale = (input0Scale * input1Scale) / outputScale;
348 input1Scale = static_cast<float>((input0Scale / maxScale) * (1 << inputShift));
349 input0Scale = static_cast<float>((input0Scale / maxScale) * (1 << inputShift));
350 }
351 else
352 {
353 outputScale = (input0Scale * input1Scale) / outputScale;
354 input0Scale = static_cast<float>((input1Scale / maxScale) * (1 << inputShift));
355 input1Scale = static_cast<float>((input1Scale / maxScale) * (1 << inputShift));
356 }
357 }
358 else if(operation == armnn::BinaryOperation::Sub && outputScale != 0 && maxScale != 0)
359 {
360 auto inputShift = 20;
361
362 input0Scale = static_cast<float>((input0Scale / maxScale) * (1 << inputShift));
363 input1Scale = static_cast<float>((input1Scale / maxScale) * (1 << 0));
364 outputScale = static_cast<float>(maxScale / (outputScale * (static_cast<float>(1 << inputShift))));
365 }
366}
367
368TosaSerializationBasicBlock* ConvertSquaredDifferenceToTosaOperator(const Layer* layer,
369 const LayerType,
370 const std::vector<const TensorInfo*>& inputs,
371 const std::vector<const TensorInfo*>& outputs,
372 const ElementwiseBinaryDescriptor* descriptor)
373{
374 if (descriptor->m_Operation != BinaryOperation::SqDiff)
375 {
376 throw Exception("ElementwiseBinaryDescriptor operation must be SqDiff"
377 "in ConvertSquaredDifferenceToTosaOperator().");
378 }
379
380 auto input0Name = std::string("input_0");
381 auto input1Name = std::string("input_1");
382 auto outputName = std::string("output0_");
383 std::string interElemenwiseBinaryName = std::string("intermediate0_") + GetUniqueTosaMappingID();
384 std::string blockName = std::string("Op_SQDIFF_block_") + GetUniqueTosaMappingID();
385
386 // If a layer is present then the block will be used for execution, so input and output names need to be determined
387 // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
388 if (layer != nullptr)
389 {
390 if (layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer().GetType() == LayerType::Reshape ||
391 layer->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer().GetType() == LayerType::Reshape)
392 {
393 interElemenwiseBinaryName = std::string("intermediate1_") + GetUniqueTosaMappingID();
394 }
395
396 input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
397 input1Name = GenerateUniqueInputName(layer->GetInputSlot(1));
398 outputName = GenerateUniqueOutputName(*layer);
399 }
400
401 std::vector<TosaSerializationTensor*> tensors {};
402 std::vector<TosaSerializationOperator*> operators {};
403 DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
404 DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
405 DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
406 bool isInputInt8 = (inputDType0 == DType_INT8);
407
408 // Only add input tensors if connected layer is an input layer.
409 // As intermediate or constant tensors will be created separately.
410 // There also can't be duplicate tensor.
411 if(input0Name.find("input_") != std::string::npos)
412 {
413 std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
414 tensors.emplace_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
415 }
416 if(input1Name.find("input_") != std::string::npos)
417 {
418 std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
419 tensors.emplace_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {}));
420 }
421
422 std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
423
424 if (inputDType0 == DType_FP32 ||
425 inputDType0 == DType_FP16 ||
426 inputDType0 == DType_INT32)
427 {
428 ConvertSubToTosaOperator({input0Name, input1Name},
429 {interElemenwiseBinaryName},
430 operators);
431
432 tensors.emplace_back(new TosaSerializationTensor(interElemenwiseBinaryName,
433 outputShape0,
434 outputDType0,
435 {}));
436 ConvertMulToTosaOperator({interElemenwiseBinaryName, interElemenwiseBinaryName},
437 {outputName},
438 operators);
439 }
440 else if (isInputInt8)
441 {
442 std::string rescale0Output0Name = std::string("intermediate0_") + GetUniqueTosaMappingID();
443 std::string rescale0Output1Name = std::string("intermediate1_") + GetUniqueTosaMappingID();
444 std::string rescale1Output0Name = std::string("intermediate2_") + GetUniqueTosaMappingID();
445 std::string rescale1Output1Name = std::string("intermediate3_") + GetUniqueTosaMappingID();
446 std::string mulOutputName = std::string("intermediate4_") + GetUniqueTosaMappingID();
447 interElemenwiseBinaryName = std::string("intermediate5_") + GetUniqueTosaMappingID();
448
449 // We need to make sure the inputs are rescaled correctly
450 // Following the behaviour defined here lite/kernels/squared_difference.cc
451 double in_x_scale = inputs[0]->GetQuantizationScale();
452 double in_y_scale = inputs[1]->GetQuantizationScale();
453 double result_scale = outputs[0]->GetQuantizationScale();
454 double twice_max_input_scale = 2.0 * std::max(in_x_scale, in_y_scale);
455 const int32_t LEFT_SHIFT = 7;
456 double x_rescale_scale = in_x_scale / twice_max_input_scale;
457 double y_rescale_scale = in_y_scale / twice_max_input_scale;
458 double output_rescale_scale =
459 (twice_max_input_scale * twice_max_input_scale) /
460 ((static_cast<double>(1 << LEFT_SHIFT * 2)) * result_scale);
461
462 TosaSerializationOperator* xShiftOp = nullptr;
463 CreateRescaleTosaOperator(input0Name,
464 rescale0Output0Name,
465 (1 << LEFT_SHIFT),
466 inputs[0]->GetQuantizationOffset(),
467 0,
468 false,
469 false,
470 true,
471 true,
472 &xShiftOp);
473 operators.emplace_back(xShiftOp);
474 tensors.emplace_back(new TosaSerializationTensor(rescale0Output0Name,
475 GetTosaTensorShape(inputs[0]->GetShape()),
476 DType_INT32,
477 {}));
478
479 TosaSerializationOperator* yShiftOp = nullptr;
480 CreateRescaleTosaOperator(input1Name,
481 rescale0Output1Name,
482 (1 << LEFT_SHIFT),
483 inputs[1]->GetQuantizationOffset(),
484 0,
485 false,
486 false,
487 true,
488 true,
489 &yShiftOp);
490 operators.emplace_back(yShiftOp);
491 tensors.emplace_back(new TosaSerializationTensor(rescale0Output1Name,
492 GetTosaTensorShape(inputs[1]->GetShape()),
493 DType_INT32,
494 {}));
495
496 TosaSerializationOperator* xScaledOp = nullptr;
497 CreateRescaleTosaOperator(rescale0Output0Name,
498 rescale1Output0Name, //change
499 x_rescale_scale,
500 0,
501 0,
502 false,
503 false,
504 true,
505 true,
506 &xScaledOp);
507 operators.emplace_back(xScaledOp);
508 tensors.emplace_back(new TosaSerializationTensor(rescale1Output0Name,
509 GetTosaTensorShape(inputs[0]->GetShape()),
510 DType_INT32,
511 {}));
512
513 TosaSerializationOperator* yScaledOp = nullptr;
514 CreateRescaleTosaOperator(rescale0Output1Name,
515 rescale1Output1Name, //change
516 y_rescale_scale,
517 0,
518 0,
519 false,
520 false,
521 true,
522 true,
523 &yScaledOp);
524 operators.emplace_back(yScaledOp);
525 tensors.emplace_back(new TosaSerializationTensor(rescale1Output1Name,
526 GetTosaTensorShape(inputs[1]->GetShape()),
527 DType_INT32,
528 {}));
529
530 ConvertSubToTosaOperator({rescale1Output0Name, rescale1Output1Name},
531 {interElemenwiseBinaryName},
532 operators);
533
534 tensors.emplace_back(new TosaSerializationTensor(interElemenwiseBinaryName,
535 GetTosaTensorShape(outputs[0]->GetShape()),
536 DType_INT32,
537 {}));
538
539 ConvertMulToTosaOperator({interElemenwiseBinaryName, interElemenwiseBinaryName},
540 {mulOutputName},
541 operators);
542
543 tensors.emplace_back(new TosaSerializationTensor(mulOutputName,
544 GetTosaTensorShape(outputs[0]->GetShape()),
545 DType_INT32,
546 {}));
547
548
549 TosaSerializationOperator* rescaleOutputOp = nullptr;
550 CreateRescaleTosaOperator(mulOutputName,
551 outputName,
552 output_rescale_scale,
553 0,
554 outputs[0]->GetQuantizationOffset(),
555 false,
556 false,
557 true,
558 true,
559 &rescaleOutputOp);
560 operators.emplace_back(rescaleOutputOp);
561 }
562 else
563 {
564 throw Exception("TOSA spec only supports INT8, INT32, FP16 and FP32 datatypes for SqDiff.");
565 }
566
567 tensors.emplace_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
568
569 return new TosaSerializationBasicBlock(blockName, // name
570 mainName, // region name
571 {operators}, // operators
572 tensors, // tensors
573 {input0Name, input1Name}, // inputs
574 {outputName}); // outputs
575}
void CalculateRescaleScales(float &input0Scale, float &input1Scale, float &outputScale, const BinaryOperation &operation)
Function used to calculate correct scales for rescales for Int8 input to ADD, MUL and SUB operators.
TosaSerializationBasicBlock * ConvertSquaredDifferenceToTosaOperator(const Layer *layer, const LayerType, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const ElementwiseBinaryDescriptor *descriptor)
TosaSerializationBasicBlock * ConvertElementwiseBinaryToTosaOperator(const Layer *layer, const LayerType type, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const ElementwiseBinaryDescriptor *descriptor)
void ConvertAddToTosaOperator(const std::vector< string > &inputs, const std::vector< string > &outputs, std::vector< TosaSerializationOperator * > &operators)
Function used to add the ADD operator to the operator vector.
void ConvertMulToTosaOperator(const std::vector< string > &inputs, const std::vector< string > &outputs, std::vector< TosaSerializationOperator * > &operators)
Function used to add the MUL operator to the operator vector.
void ConvertSubToTosaOperator(const std::vector< string > &inputs, const std::vector< string > &outputs, std::vector< TosaSerializationOperator * > &operators)
Function used to add the SUB operator to the operator vector.
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot=0)
const std::string mainName
DType ArmNNToDType(const DataType &type)
std::string GenerateUniqueInputName(const armnn::InputSlot &slot)
std::string GetUniqueTosaMappingID()
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
void CreateRescaleTosaOperator(const std::string &inputName, const std::string &outputName, double scale, int32_t input_zp, int32_t output_zp, bool input_unsigned, bool output_unsigned, bool double_round, bool scale32, TosaSerializationOperator **op)
Creates a Tosa rescale operator.
Base class for all ArmNN exceptions so that users can filter to just those.
const OutputSlot * GetConnectedOutputSlot() const
Definition Layer.hpp:56
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition Layer.hpp:286
Layer & GetOwningLayer() const
Definition Layer.hpp:132
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
BinaryOperation
Definition Types.hpp:139
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
BinaryOperation m_Operation
Specifies the elementwiseBinary operation to execute.