24 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
32 const std::vector<float>& inputValues,
35 std::vector<float>& expectedOutputValues,
37 float epsilon = 1e-12f)
40 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
41 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
45 std::vector<float> inputData = inputValues;
48 std::vector<float> tmp(inputData.size());
49 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(float));
53 auto inputTensor = armnnUtils::QuantizedVector<T>(inputData,
54 inputTensorInfo.GetQuantizationScale(),
55 inputTensorInfo.GetQuantizationOffset());
57 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
61 std::vector<float> tmp(expectedOutputValues.size());
62 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputValues.data(), tmp.data(),
64 expectedOutputValues = tmp;
67 std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputValues,
68 outputTensorInfo.GetQuantizationScale(),
69 outputTensorInfo.GetQuantizationOffset());
71 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
72 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
79 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
80 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
84 inputHandle->Allocate();
85 outputHandle->Allocate();
89 workload->PostAllocationConfigure();
90 ExecuteWorkload(*workload, memoryManager);
96 outputHandle->GetShape(),
97 outputTensorInfo.GetShape());
100 float CalcInvL2Norm(std::initializer_list<float> elements)
102 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
103 [](
float acc,
float element) {
return acc + element * element; });
104 return 1.0f / sqrtf(reduction);
107 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
123 unsigned int numberOfBatches = 1;
124 unsigned int numberOfChannels = 3;
125 unsigned int height = 1;
126 unsigned int width = 1;
129 numberOfBatches, numberOfChannels, height, width, layout);
132 std::vector<float> inputValues
144 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
145 std::vector<float> expectedOutputValues
148 0.00000001f * approxInvL2Norm,
149 0.00000002f * approxInvL2Norm,
150 0.00000003f * approxInvL2Norm,
153 return L2NormalizationTestImpl<ArmnnType>(
163 expectedOutputValues,
169 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
184 unsigned int numberOfBatches = 1;
185 unsigned int numberOfChannels = 10;
186 unsigned int height = 1;
187 unsigned int width = 1;
191 numberOfBatches, numberOfChannels, height, width, layout);
192 std::vector<float> inputValues
224 const float approxInvL2Norm = 0.050964719f;
225 std::vector<float> expectedOutputValues
228 1.0f * approxInvL2Norm,
229 2.0f * approxInvL2Norm,
230 3.0f * approxInvL2Norm,
231 4.0f * approxInvL2Norm,
232 5.0f * approxInvL2Norm,
233 6.0f * approxInvL2Norm,
234 7.0f * approxInvL2Norm,
235 8.0f * approxInvL2Norm,
236 9.0f * approxInvL2Norm,
237 10.0f * approxInvL2Norm
241 return L2NormalizationTestImpl<ArmnnType>(
251 expectedOutputValues,
255 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
270 unsigned int numberOfBatches = 1;
271 unsigned int numberOfChannels = 2;
272 unsigned int height = 1;
273 unsigned int width = 5;
276 numberOfBatches, numberOfChannels, height, width, layout);
277 std::vector<float> inputValues
280 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
283 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
285 std::vector<float> expectedOutputValues
288 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
289 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
290 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
291 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
292 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
295 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
296 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
297 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
298 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
299 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
302 return L2NormalizationTestImpl<ArmnnType>(
312 expectedOutputValues,
316 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
331 unsigned int numberOfBatches = 1;
332 unsigned int numberOfChannels = 2;
333 unsigned int height = 4;
334 unsigned int width = 3;
337 numberOfBatches, numberOfChannels, height, width, layout);
338 std::vector<float> inputValues
341 119.0f, 21.0f, 150.0f,
342 149.0f, 32.0f, 179.0f,
343 15.0f, 227.0f, 141.0f,
344 147.0f, 199.0f, 220.0f,
347 110.0f, 140.0f, 73.0f,
348 211.0f, 212.0f, 89.0f,
349 24.0f, 138.0f, 188.0f,
350 162.0f, 12.0f, 161.0f
352 std::vector<float> expectedOutputValues
355 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
356 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
357 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
358 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
359 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
360 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
361 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
362 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
363 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
364 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
365 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
366 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
369 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
370 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
371 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
372 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
373 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
374 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
375 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
376 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
377 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
378 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
379 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
380 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
383 return L2NormalizationTestImpl<ArmnnType>(
393 expectedOutputValues,
397 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
412 unsigned int numberOfBatches = 2;
413 unsigned int numberOfChannels = 3;
414 unsigned int height = 4;
415 unsigned int width = 3;
418 numberOfBatches, numberOfChannels, height, width, layout);
419 std::vector<float> inputValues
422 235.0f, 46.0f, 178.0f,
423 100.0f, 123.0f, 19.0f,
424 172.0f, 74.0f, 250.0f,
428 113.0f, 95.0f, 202.0f,
429 77.0f, 114.0f, 71.0f,
430 122.0f, 246.0f, 166.0f,
434 56.0f, 170.0f, 162.0f,
435 194.0f, 89.0f, 254.0f,
436 12.0f, 209.0f, 200.0f,
442 25.0f, 117.0f, 103.0f,
443 247.0f, 59.0f, 189.0f,
446 239.0f, 104.0f, 199.0f,
447 17.0f, 124.0f, 153.0f,
448 222.0f, 217.0f, 75.0f,
449 32.0f, 126.0f, 21.0f,
452 97.0f, 145.0f, 215.0f,
453 115.0f, 116.0f, 238.0f,
454 226.0f, 16.0f, 132.0f,
457 std::vector<float> expectedOutputValues
460 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
461 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
462 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
463 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
464 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
465 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
466 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
467 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
468 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
469 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
470 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
471 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
474 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
475 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
476 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
477 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
478 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
479 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
480 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
481 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
482 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
483 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
484 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
485 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
488 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
489 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
490 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
491 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
492 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
493 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
494 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
495 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
496 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
497 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
498 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
499 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
502 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
503 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
504 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
505 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
506 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
507 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
508 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
509 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
510 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
511 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
512 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
513 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
516 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
517 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
518 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
519 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
520 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
521 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
522 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
523 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
524 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
525 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
526 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
527 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
530 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
531 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
532 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
533 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
534 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
535 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
536 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
537 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
538 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
539 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
540 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
541 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
544 return L2NormalizationTestImpl<ArmnnType>(
554 expectedOutputValues,
569 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
587 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
605 return L2Normalization1dTestCommon<armnn::DataType::Float32>(
622 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
639 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
656 return L2Normalization2dTestCommon<armnn::DataType::Float32>(
673 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
690 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
709 std::vector<float> inputData
711 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
713 std::vector<float> expectedOutputData
715 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
716 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
717 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
718 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
719 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
720 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
721 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
722 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
723 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
724 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
730 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
732 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
733 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
740 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
741 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
743 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateL2Normalization(descriptor, info);
745 inputHandle->Allocate();
746 outputHandle->Allocate();
750 workload->PostAllocationConfigure();
751 ExecuteWorkload(*workload, memoryManager);
757 outputHandle->GetShape(),
767 return L2Normalization3dTestCommon<armnn::DataType::Float32>(
784 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
801 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
818 return L2Normalization4dTestCommon<armnn::DataType::Float32>(
835 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
852 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
float m_Eps
Used to avoid dividing by zero.
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorShape & GetShape() const
LayerTestResult< float, 4 > L2Normalization4dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< float, 4 > L2Normalization3dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< float, 4 > L2Normalization1dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
A L2NormalizationDescriptor for the L2NormalizationLayer.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< uint8_t, 4 > L2Normalization3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 2 > L2Normalization2dShapeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > L2Normalization2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > L2NormalizationDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
Contains information about TensorInfos of a layer.
LayerTestResult< uint8_t, 4 > L2Normalization1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > L2Normalization2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
unsigned int GetNumElements() const
LayerTestResult< float, 4 > L2NormalizationNonDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< uint8_t, 4 > L2Normalization4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)