17 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
24 const std::vector<float>& inputData,
25 const std::vector<float>& outputData,
26 const std::vector<int32_t> vAxis,
28 bool keepDims =
false)
31 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
34 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
36 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
37 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
40 std::vector<uint32_t> updated_idx;
41 uint32_t resolvedAxis = 0;
42 for (uint32_t i = 0; i < vAxis.size(); ++i)
46 resolvedAxis = inputTensorInfo.GetNumDimensions() +
static_cast<uint32_t
>(vAxis[i]);
49 resolvedAxis =
static_cast<uint32_t
>(vAxis[i]);
52 updated_idx.push_back(resolvedAxis);
60 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
61 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
63 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
65 inputHandle->Allocate();
66 outputHandle->Allocate();
79 template<armnn::DataType ArmnnType,
typename T>
90 if (armnn::IsQuantizedType<T>())
98 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
99 std::vector<float> outputValues({ 34.0f });
101 return ReduceTestCommon<ArmnnType>(workloadFactory,
112 template<armnn::DataType ArmnnType,
typename T>
123 if (armnn::IsQuantizedType<T>())
131 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
132 5.0f, 6.0f, 7.0f, 8.0f,
134 10.0f, 20.0f, 30.0f, 40.0f,
135 50.0f, 60.0f, 70.0f, 80.0f,
137 100.0f, 200.0f, 300.0f, 400.0f,
138 500.0f, 600.0f, 700.0f, 800.0f });
139 std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
140 555.0f, 666.0f, 777.0f, 888.0f });
142 return ReduceTestCommon<ArmnnType>(workloadFactory,
153 template<armnn::DataType ArmnnType,
typename T>
164 if (armnn::IsQuantizedType<T>())
172 std::vector<float> inputValues( {7, 8, 6, 1,
197 std::vector<float> outputValues({ 28.0f, 35.0f, 30.0f, 27.0f,
198 27.0f, 31.0f, 31.0f, 24.0f,
199 35.0f, 32.0f, 29.0f, 44.0f});
201 return ReduceTestCommon<ArmnnType>(workloadFactory,
212 template<armnn::DataType ArmnnType,
typename T>
223 if (armnn::IsQuantizedType<T>())
231 std::vector<float> inputValues( {7, 8, 6, 1,
256 std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
263 13.0f, 17.0f, 23.0f});
265 return ReduceTestCommon<ArmnnType>(workloadFactory,
277 template<armnn::DataType ArmnnType,
typename T>
288 if (armnn::IsQuantizedType<T>())
296 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
297 5.0f, 6.0f, 7.0f, 8.0f,
299 10.0f, 20.0f, 30.0f, 40.0f,
300 50.0f, 60.0f, 70.0f, 80.0f,
302 100.0f, 200.0f, 300.0f, 400.0f,
303 500.0f, 600.0f, 700.0f, 800.0f });
304 std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
306 return ReduceTestCommon<ArmnnType>(workloadFactory,
320 ReduceSumSimpleTest<armnn::DataType::Float32>(
326 ReduceSumSingleAxisTest1<armnn::DataType::Float32>(
332 ReduceSumSingleAxisTest2<armnn::DataType::Float32>(
338 ReduceSumSingleAxisTest3<armnn::DataType::Float32>(
344 ReduceSumMultipleAxisTest<armnn::DataType::Float32>(
LayerTestResult< float, 4 > ReduceSumMultipleAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSingleAxisTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool m_KeepDims
if true then output shape has no change.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
LayerTestResult< float, 4 > ReduceSumSingleAxisTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< float, 4 > ReduceSumSingleAxisTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)