18 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
26 const std::vector<float>& inputData,
27 const std::vector<int32_t>& outputData,
30 std::vector<T> inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
31 std::vector<int32_t> actualOutput(outputTensorInfo.
GetNumElements());
33 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
34 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
41 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
42 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
44 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateArgMinMax(descriptor, info);
46 inputHandle->Allocate();
47 outputHandle->Allocate();
51 workload->PostAllocationConfigure();
58 outputHandle->GetShape(),
64 template<armnn::DataType ArmnnType,
typename T>
75 if (armnn::IsQuantizedType<T>())
83 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
84 std::vector<int32_t> outputValues({ 3 });
86 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
88 inputTensorInfo, outputTensorInfo,
89 inputValues, outputValues, -1);
92 template<armnn::DataType ArmnnType,
typename T>
103 if (armnn::IsQuantizedType<T>())
111 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
112 std::vector<int32_t> outputValues({ 1 });
114 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
116 inputTensorInfo, outputTensorInfo,
117 inputValues, outputValues, 3);
120 template<armnn::DataType ArmnnType,
typename T>
131 if (armnn::IsQuantizedType<T>())
139 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
140 5.0f, 6.0f, 7.0f, 8.0f,
142 10.0f, 20.0f, 30.0f, 40.0f,
143 50.0f, 60.0f, 70.0f, 80.0f,
145 100.0f, 200.0f, 300.0f, 400.0f,
146 500.0f, 600.0f, 700.0f, 800.0f });
147 std::vector<int32_t> outputValues({ 0, 0, 0, 0,
150 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
152 inputTensorInfo, outputTensorInfo,
153 inputValues, outputValues, 1);
156 template<armnn::DataType ArmnnType,
typename T>
167 if (armnn::IsQuantizedType<T>())
175 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
176 5.0f, 6.0f, 7.0f, 8.0f,
178 10.0f, 20.0f, 30.0f, 40.0f,
179 50.0f, 60.0f, 70.0f, 80.0f,
181 100.0f, 200.0f, 300.0f, 400.0f,
182 500.0f, 600.0f, 700.0f, 800.0f });
183 std::vector<int32_t> outputValues({ 2, 2, 2, 2,
186 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
188 inputTensorInfo, outputTensorInfo,
189 inputValues, outputValues, 1);
192 template<armnn::DataType ArmnnType,
typename T>
204 if (armnn::IsQuantizedType<T>())
210 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
211 5.0f, 6.0f, 7.0f, 8.0f,
213 10.0f, 20.0f, 30.0f, 40.0f,
214 50.0f, 60.0f, 70.0f, 80.0f,
216 100.0f, 200.0f, 300.0f, 400.0f,
217 500.0f, 600.0f, 700.0f, 800.0f });
218 std::vector<int32_t> outputValues({ 1, 1, 1, 1,
222 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
224 inputTensorInfo, outputTensorInfo,
225 inputValues, outputValues, 2);
228 template<armnn::DataType ArmnnType,
typename T>
240 if (armnn::IsQuantizedType<T>())
246 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
247 5.0f, 6.0f, 7.0f, 8.0f,
249 10.0f, 20.0f, 30.0f, 40.0f,
250 50.0f, 60.0f, 70.0f, 80.0f,
252 100.0f, 200.0f, 300.0f, 400.0f,
253 500.0f, 600.0f, 700.0f, 800.0f });
254 std::vector<int32_t> outputValues({ 0, 0,
258 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
260 inputTensorInfo, outputTensorInfo,
261 inputValues, outputValues, 3);
268 ArgMaxSimpleTest<armnn::DataType::Float32>(
274 ArgMaxSimpleTest<armnn::DataType::Float16>(
280 ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
286 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
292 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
298 ArgMaxSimpleTest<armnn::DataType::Signed32>(
304 ArgMinSimpleTest<armnn::DataType::Float32>(
310 ArgMinSimpleTest<armnn::DataType::Float16>(
316 ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
322 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
328 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
334 ArgMinSimpleTest<armnn::DataType::Signed32>(
340 ArgMinChannelTest<armnn::DataType::Float32>(
346 ArgMinChannelTest<armnn::DataType::Float16>(
352 ArgMinChannelTest<armnn::DataType::QAsymmS8>(
358 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
364 ArgMinChannelTest<armnn::DataType::QSymmS16>(
370 ArgMinChannelTest<armnn::DataType::Signed32>(
376 ArgMaxChannelTest<armnn::DataType::Float32>(
382 ArgMaxChannelTest<armnn::DataType::Float16>(
388 ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
394 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
400 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
406 ArgMaxChannelTest<armnn::DataType::Signed32>(
412 ArgMaxHeightTest<armnn::DataType::Float32>(
418 ArgMaxHeightTest<armnn::DataType::Float16>(
424 ArgMaxHeightTest<armnn::DataType::Signed32>(
430 ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
436 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
442 ArgMinWidthTest<armnn::DataType::Float32>(
448 ArgMinWidthTest<armnn::DataType::Float16>(
454 ArgMinWidthTest<armnn::DataType::Signed32>(
460 ArgMinWidthTest<armnn::DataType::QAsymmS8>(
466 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
const TensorShape & GetShape() const
LayerTestResult< int32_t, 3 > ArgMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
LayerTestResult< int32_t, 3 > ArgMaxHeightTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
LayerTestResult< int32_t, 3 > ArgMinChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
int m_Axis
Axis to reduce across the input tensor.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< int32_t, 3 > ArgMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< int32_t, 3 > ArgMinWidthTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetNumElements() const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int32_t, 3 > ArgMaxChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)