19 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
26 const std::vector<float>& inputData,
27 const std::vector<float>& outputData,
28 const std::vector<int32_t> vAxis,
30 bool keepDims =
false)
33 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
36 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
38 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
39 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
42 std::vector<uint32_t> updated_idx;
43 uint32_t resolvedAxis = 0;
44 for (uint32_t i = 0; i < vAxis.size(); ++i)
48 resolvedAxis = inputTensorInfo.GetNumDimensions() +
static_cast<uint32_t
>(vAxis[i]);
51 resolvedAxis =
static_cast<uint32_t
>(vAxis[i]);
54 updated_idx.push_back(resolvedAxis);
62 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
63 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
65 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
67 inputHandle->Allocate();
68 outputHandle->Allocate();
81 template<armnn::DataType ArmnnType,
typename T>
92 if (armnn::IsQuantizedType<T>())
100 std::vector<float> inputValues
102 1001.0f, 11.0f, 1003.0f,
103 10.0f, 1002.0f, 12.0f
105 std::vector<float> outputValues
107 1001.0f, 1002.0f, 1003.0f
110 return ReductionTestCommon<ArmnnType>(workloadFactory,
121 template<armnn::DataType ArmnnType,
typename T>
132 if (armnn::IsQuantizedType<T>())
140 std::vector<float> inputValues
142 1001.0f, 11.0f, 1003.0f,
143 10.0f, 1002.0f, 12.0f
145 std::vector<float> outputValues
150 return ReductionTestCommon<ArmnnType>(workloadFactory,
162 template<armnn::DataType ArmnnType,
typename T>
173 if (armnn::IsQuantizedType<T>())
181 std::vector<float> inputValues
187 std::vector<float> outputValues
192 return ReductionTestCommon<ArmnnType>(workloadFactory,
204 template<armnn::DataType ArmnnType,
typename T>
215 if (armnn::IsQuantizedType<T>())
223 std::vector<float> inputValues
225 1001.0f, 11.0f, 1003.0f,
226 10.0f, 1002.0f, 12.0f
228 std::vector<float> outputValues
233 return ReductionTestCommon<ArmnnType>(workloadFactory,
244 template<armnn::DataType ArmnnType,
typename T>
255 if (armnn::IsQuantizedType<T>())
263 std::vector<float> inputValues
265 1001.0f, 11.0f, 1003.0f,
266 10.0f, 1002.0f, 12.0f
268 std::vector<float> outputValues
273 return ReductionTestCommon<ArmnnType>(workloadFactory,
287 ReduceMaxSimpleTest<armnn::DataType::Float32>(
293 ReduceMaxNegativeAxisTest<armnn::DataType::Float32>(
299 ReduceMaxSimpleTest2<armnn::DataType::Float32>(
305 ReduceMinSimpleTest<armnn::DataType::Float32>(
311 ReduceMinNegativeAxisTest<armnn::DataType::Float32>(
bool m_KeepDims
if true then output shape has no change.
LayerTestResult< float, 4 > ReduceMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< float, 4 > ReduceMinNegativeAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > ReduceMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > ReduceMaxNegativeAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
LayerTestResult< float, 4 > ReduceMaxSimpleTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)