14 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
19 return workloadFactory.CreateAddition(descriptor, info);
27 unsigned int batchSize = 2u;
28 unsigned int channels = 2u;
29 unsigned int height = 2u;
30 unsigned int width = 3u;
32 unsigned int shape[] = { batchSize, channels, height, width };
34 std::vector<float> input1 =
49 std::vector<float> input2 =
65 std::vector<float> output
80 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
97 unsigned int depth = 2u;
98 unsigned int batchSize = 2u;
99 unsigned int channels = 2u;
100 unsigned int height = 2u;
101 unsigned int width = 3u;
103 unsigned int shape[] = { depth, batchSize, channels, height, width };
105 std::vector<float> input1 =
107 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
110 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
114 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
117 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
122 std::vector<float> input2 =
124 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
127 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
131 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
134 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
138 std::vector<float> output =
140 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
143 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
147 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
150 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
154 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
163 tensorHandleFactory);
166 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
179 if (armnn::IsQuantizedType<T>())
183 inputTensorInfo2.SetQuantizationScale(qScale);
184 inputTensorInfo2.SetQuantizationOffset(qOffset);
185 outputTensorInfo.SetQuantizationScale(qScale);
186 outputTensorInfo.SetQuantizationOffset(qOffset);
189 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
202 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
210 ret.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
223 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
224 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
225 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
229 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
230 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
231 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
233 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
235 inputHandle1->Allocate();
236 inputHandle2->Allocate();
237 outputHandle->Allocate();
242 workload->PostAllocationConfigure();
250 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
263 if (armnn::IsQuantizedType<T>())
267 inputTensorInfo2.SetQuantizationScale(qScale);
268 inputTensorInfo2.SetQuantizationOffset(qOffset);
269 outputTensorInfo.SetQuantizationScale(qScale);
270 outputTensorInfo.SetQuantizationOffset(qOffset);
273 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
284 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
291 ret.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
302 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
303 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
304 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
308 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
309 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
310 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
312 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
314 inputHandle1->Allocate();
315 inputHandle2->Allocate();
316 outputHandle->Allocate();
321 workload->PostAllocationConfigure();
334 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
335 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
343 return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
344 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
352 return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
353 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
361 return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
362 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
370 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
371 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
379 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
380 workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
388 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
389 workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
397 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
398 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
406 const unsigned int shape0[] = { 1, 2, 2, 3 };
407 const unsigned int shape1[] = { 1, 2, 2, 3 };
409 std::vector<uint8_t> input0(
411 63, 35, 77, 70, 56, 112,
412 203, 28, 252, 168, 245, 91
415 std::vector<uint8_t> input1(
417 21, 7, 175, 231, 175, 210,
418 126, 161, 63, 21, 105, 126
421 std::vector<uint8_t> output(
423 81, 39, 249, 255, 228, 255,
424 255, 186, 255, 186, 255, 214,
427 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
450 const unsigned int shape0[] = { 1, 2, 2, 3 };
451 const unsigned int shape1[] = { 1, 2, 2, 3 };
453 std::vector<int16_t> input0 =
455 63, 35, 77, 70, 56, 112,
456 203, 28, 252, 168, 245, 91
459 std::vector<int16_t> input1 =
461 21, 7, 175, 231, 175, 210,
462 126, 161, 63, 21, 105, 126
465 std::vector<int16_t> output =
467 84, 42, 252, 301, 231, 322,
468 329, 189, 315, 189, 350, 217,
471 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
494 const unsigned int shape0[] = { 1, 2, 2, 3 };
495 const unsigned int shape1[] = { 1, 2, 2, 3 };
497 std::vector<int32_t> input0 =
499 63, 35, 77, 70, 56, 112,
500 203, 28, 252, 168, 245, 91
503 std::vector<int32_t> input1 =
505 21, 7, 175, 231, 175, 210,
506 126, 161, 63, 21, 105, 126
509 std::vector<int32_t> output =
511 84, 42, 252, 301, 231, 322,
512 329, 189, 315, 189, 350, 217,
515 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
548 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
553 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
555 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
572 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
573 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
576 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(queueDescriptor, workloadInfo);
579 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
580 boost::multi_array<float, 4> resultMaxPool;
581 resultMaxPool.resize(shape);
593 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
600 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
606 std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.
CreateTensorHandle(addInputTensorInfo);
607 std::unique_ptr<armnn::ITensorHandle> addOutputHandle =
614 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
615 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
616 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
618 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.
CreateAddition(data, info);
620 poolingInputHandle->Allocate();
621 poolingOutputHandle->Allocate();
622 addInputHandle->Allocate();
623 addOutputHandle->Allocate();
631 workload->PostAllocationConfigure();
633 addWorkload->PostAllocationConfigure();
634 addWorkload->Execute();
649 unsigned int batchSize = 4;
650 unsigned int channels = 1;
651 unsigned int height = 2;
652 unsigned int width = 3;
657 unsigned int shape[] = {batchSize, channels, height, width};
663 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
664 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
668 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
669 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
670 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
672 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
673 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
674 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
678 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
679 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
680 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
684 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
685 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
686 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
688 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
689 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateAddition(refData, refInfo);
691 inputHandle1->Allocate();
692 inputHandle2->Allocate();
693 outputHandle->Allocate();
694 inputHandle1Ref->Allocate();
695 inputHandle2Ref->Allocate();
696 outputHandleRef->Allocate();
703 workload->PostAllocationConfigure();
705 workloadRef->PostAllocationConfigure();
706 workloadRef->Execute();
uint32_t m_PoolWidth
Pooling width value.
boost::multi_array< T, n > outputExpected
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 4 > AdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > CompareAdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
uint32_t m_PoolHeight
Pooling height value.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< T, 4 > AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
LayerTestResult< uint8_t, 4 > AdditionUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > output
LayerTestResult< T, 4 > AdditionBroadcastTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< float, 4 > AdditionBroadcastTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int32_t, 4 > AdditionBroadcastInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< int16_t, 4 > AdditionInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > Addition5dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< int32_t, 4 > AdditionInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< int32_t, 4 > AdditionBroadcast1ElementInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)