ArmNN
 25.11
Loading...
Searching...
No Matches
RefElementwiseBinaryWorkload.cpp
Go to the documentation of this file.
1//
2// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
7
8#include "Decoders.hpp"
10#include "Encoders.hpp"
11#include "RefWorkloadUtils.hpp"
12#include "Maximum.hpp"
13#include "Minimum.hpp"
14#include "SquaredDifference.hpp"
15#include "Power.hpp"
16#include "FloorDiv.hpp"
17
18#include <Profiling.hpp>
19
20#include <armnn/TypesUtils.hpp>
21
22#include <functional>
23
24namespace armnn
25{
26
27template<typename DataType>
28void ExecuteFunction(std::vector<ITensorHandle*> inputs,
29 std::vector<ITensorHandle*> outputs,
30 BinaryOperation operation)
31{
32 const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
33 const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
34 const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
35
36 const TensorShape& inShape0 = inputInfo0.GetShape();
37 const TensorShape& inShape1 = inputInfo1.GetShape();
38 const TensorShape& outShape = outputInfo.GetShape();
39
40 std::unique_ptr<Decoder<DataType>> input0 = MakeDecoder<DataType>(inputInfo0, inputs[0]->Map());
41 std::unique_ptr<Decoder<DataType>> input1 = MakeDecoder<DataType>(inputInfo1, inputs[1]->Map());
42 std::unique_ptr<Encoder<DataType>> output = MakeEncoder<DataType>(outputInfo, outputs[0]->Map());
43
53
54 switch (operation)
55 {
57 {
58 AddFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
59 break;
60 }
62 {
63 DivFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
64 break;
65 }
67 {
68 FloorDivFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
69 break;
70 }
72 {
73 MaximumFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
74 break;
75 }
77 {
78 MinimumFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
79 break;
80 }
82 {
83 MulFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
84 break;
85 }
87 {
88 PowerFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
89 break;
90 }
92 {
93 SubFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
94 break;
95 }
97 {
98 SqDiffFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
99 break;
100 }
101 default:
102 {
103 throw InvalidArgumentException(std::string("Unsupported binary operation ") +
105 }
106 }
107}
108
113
115{
116 Execute(m_Data.m_Inputs, m_Data.m_Outputs);
117}
118
119void RefElementwiseBinaryWorkload::Execute(std::vector<ITensorHandle*> inputs,
120 std::vector<ITensorHandle*> outputs) const
121{
122 ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefElementwiseBinaryWorkload_Execute");
123
124 if (GetTensorInfo(inputs[0]).GetDataType() == DataType::Signed32)
125 {
127 }
128 else
129 {
131 }
132}
133
134} // namespace armnn
#define CHECK_LOCATION()
#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
RefBaseWorkload(const ElementwiseBinaryQueueDescriptor &descriptor, const WorkloadInfo &info)
RefElementwiseBinaryWorkload(const ElementwiseBinaryQueueDescriptor &descriptor, const WorkloadInfo &info)
const TensorShape & GetShape() const
Definition Tensor.hpp:193
Copyright (c) 2021 ARM Limited and Contributors.
constexpr char const * GetBinaryOperationAsCString(BinaryOperation operation)
std::unique_ptr< Decoder< T > > MakeDecoder(const TensorInfo &info, const void *data=nullptr)
std::unique_ptr< Encoder< T > > MakeEncoder(const TensorInfo &info, void *data=nullptr)
BinaryOperation
Definition Types.hpp:139
void ExecuteFunction(std::vector< ITensorHandle * > inputs, std::vector< ITensorHandle * > outputs, BinaryOperation operation)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
BinaryOperation m_Operation
Specifies the elementwiseBinary operation to execute.
Contains information about TensorInfos of a layer.