18 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
26 const std::vector<float>& inputData,
27 const std::vector<int32_t>& outputData,
30 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
33 result.outputExpected = MakeTensor<int32_t, 3>(outputTensorInfo, outputData);
35 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
43 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
44 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
46 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
48 inputHandle->Allocate();
49 outputHandle->Allocate();
53 workload->PostAllocationConfigure();
63 template<armnn::DataType ArmnnType,
typename T>
74 if (armnn::IsQuantizedType<T>())
82 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
83 std::vector<int32_t> outputValues({ 3 });
85 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
87 inputTensorInfo, outputTensorInfo,
88 inputValues, outputValues, -1);
91 template<armnn::DataType ArmnnType,
typename T>
102 if (armnn::IsQuantizedType<T>())
110 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
111 std::vector<int32_t> outputValues({ 1 });
113 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
115 inputTensorInfo, outputTensorInfo,
116 inputValues, outputValues, 3);
119 template<armnn::DataType ArmnnType,
typename T>
130 if (armnn::IsQuantizedType<T>())
138 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
139 5.0f, 6.0f, 7.0f, 8.0f,
141 10.0f, 20.0f, 30.0f, 40.0f,
142 50.0f, 60.0f, 70.0f, 80.0f,
144 100.0f, 200.0f, 300.0f, 400.0f,
145 500.0f, 600.0f, 700.0f, 800.0f });
146 std::vector<int32_t> outputValues({ 0, 0, 0, 0,
149 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
151 inputTensorInfo, outputTensorInfo,
152 inputValues, outputValues, 1);
155 template<armnn::DataType ArmnnType,
typename T>
166 if (armnn::IsQuantizedType<T>())
174 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
175 5.0f, 6.0f, 7.0f, 8.0f,
177 10.0f, 20.0f, 30.0f, 40.0f,
178 50.0f, 60.0f, 70.0f, 80.0f,
180 100.0f, 200.0f, 300.0f, 400.0f,
181 500.0f, 600.0f, 700.0f, 800.0f });
182 std::vector<int32_t> outputValues({ 2, 2, 2, 2,
185 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
187 inputTensorInfo, outputTensorInfo,
188 inputValues, outputValues, 1);
191 template<armnn::DataType ArmnnType,
typename T>
203 if (armnn::IsQuantizedType<T>())
209 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
210 5.0f, 6.0f, 7.0f, 8.0f,
212 10.0f, 20.0f, 30.0f, 40.0f,
213 50.0f, 60.0f, 70.0f, 80.0f,
215 100.0f, 200.0f, 300.0f, 400.0f,
216 500.0f, 600.0f, 700.0f, 800.0f });
217 std::vector<int32_t> outputValues({ 1, 1, 1, 1,
221 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
223 inputTensorInfo, outputTensorInfo,
224 inputValues, outputValues, 2);
227 template<armnn::DataType ArmnnType,
typename T>
239 if (armnn::IsQuantizedType<T>())
245 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
246 5.0f, 6.0f, 7.0f, 8.0f,
248 10.0f, 20.0f, 30.0f, 40.0f,
249 50.0f, 60.0f, 70.0f, 80.0f,
251 100.0f, 200.0f, 300.0f, 400.0f,
252 500.0f, 600.0f, 700.0f, 800.0f });
253 std::vector<int32_t> outputValues({ 0, 0,
257 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
259 inputTensorInfo, outputTensorInfo,
260 inputValues, outputValues, 3);
267 ArgMaxSimpleTest<armnn::DataType::Float32>(
273 ArgMaxSimpleTest<armnn::DataType::Float16>(
279 ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
285 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
291 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
297 ArgMaxSimpleTest<armnn::DataType::Signed32>(
303 ArgMinSimpleTest<armnn::DataType::Float32>(
309 ArgMinSimpleTest<armnn::DataType::Float16>(
315 ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
321 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
327 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
333 ArgMinSimpleTest<armnn::DataType::Signed32>(
339 ArgMinChannelTest<armnn::DataType::Float32>(
345 ArgMinChannelTest<armnn::DataType::Float16>(
351 ArgMinChannelTest<armnn::DataType::QAsymmS8>(
357 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
363 ArgMinChannelTest<armnn::DataType::QSymmS16>(
369 ArgMinChannelTest<armnn::DataType::Signed32>(
375 ArgMaxChannelTest<armnn::DataType::Float32>(
381 ArgMaxChannelTest<armnn::DataType::Float16>(
387 ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
393 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
399 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
405 ArgMaxChannelTest<armnn::DataType::Signed32>(
411 ArgMaxHeightTest<armnn::DataType::Float32>(
417 ArgMaxHeightTest<armnn::DataType::Float16>(
423 ArgMaxHeightTest<armnn::DataType::Signed32>(
429 ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
435 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
441 ArgMinWidthTest<armnn::DataType::Float32>(
447 ArgMinWidthTest<armnn::DataType::Float16>(
453 ArgMinWidthTest<armnn::DataType::Signed32>(
459 ArgMinWidthTest<armnn::DataType::QAsymmS8>(
465 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
LayerTestResult< int32_t, 3 > ArgMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
LayerTestResult< int32_t, 3 > ArgMaxHeightTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
LayerTestResult< int32_t, 3 > ArgMinChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
int m_Axis
Axis to reduce across the input tensor.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< int32_t, 3 > ArgMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int32_t, 3 > ArgMinWidthTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int32_t, 3 > ArgMaxChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)