27 template<
typename DataType>
29 std::vector<ITensorHandle*> outputs,
40 std::unique_ptr<Decoder<DataType>> input0 = MakeDecoder<DataType>(inputInfo0, inputs[0]->
Map());
41 std::unique_ptr<Decoder<DataType>> input1 = MakeDecoder<DataType>(inputInfo1, inputs[1]->
Map());
42 std::unique_ptr<Encoder<DataType>> output = MakeEncoder<DataType>(outputInfo, outputs[0]->
Map());
58 AddFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
63 DivFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
68 FloorDivFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
73 MaximumFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
78 MinimumFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
83 MulFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
88 PowerFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
93 SubFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
98 SqDiffFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
120 std::vector<ITensorHandle*> outputs)
const
126 ExecuteFunction<int32_t>(inputs, outputs,
m_Data.m_Parameters.m_Operation);
130 ExecuteFunction<float>(inputs, outputs,
m_Data.m_Parameters.m_Operation);
#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
RefElementwiseBinaryWorkload(const ElementwiseBinaryQueueDescriptor &descriptor, const WorkloadInfo &info)
void Execute() const override
const TensorShape & GetShape() const
Copyright (c) 2021 ARM Limited and Contributors.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
void ExecuteFunction(std::vector< ITensorHandle * > inputs, std::vector< ITensorHandle * > outputs, BinaryOperation operation)
constexpr char const * GetBinaryOperationAsCString(BinaryOperation operation)
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.