23 #include <boost/test/unit_test.hpp> 33 using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
36 void VerifyInputTensorData(
const TensorData<T>& data,
const std::string& tensorName)
38 if (data.first.GetNumElements() > data.second.size())
41 std::to_string(data.first.GetNumElements()) +
"but got " + std::to_string(data.second.size()));
45 template<
typename T,
typename BT>
50 const TensorData<T>& input,
51 TensorData<T>& output,
52 const TensorData<T>& weights,
56 using namespace armnn;
58 VerifyInputTensorData(input,
"input");
59 VerifyInputTensorData(weights,
"biases");
63 if (!biases.has_value())
67 VerifyInputTensorData(biases.value(),
"biases");
75 queueDescriptor.
m_Weight = &weightsTensor;
79 std::unique_ptr<ScopedTensorHandle> biasesTensor;
83 biasesTensor = std::make_unique<ScopedTensorHandle>(biases.value().first);
84 queueDescriptor.
m_Bias = biasesTensor.get();
90 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(input.first);
91 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(output.first);
95 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
96 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
98 std::unique_ptr<armnn::IWorkload> workload =
101 inputHandle->Allocate();
102 outputHandle->Allocate();
106 ExecuteWorkload(*workload, memoryManager);
109 output.second = std::vector<T>(output.first.GetNumElements(), T());
113 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
120 const std::vector<float>& inputData,
122 const std::vector<float>& expectedOutputData,
124 const std::vector<float>& weightsData,
126 const std::vector<float>& biasesData)
128 using namespace armnn;
131 if (armnn::IsQuantizedType<T>())
133 constexpr
float qScale = 0.50f;
134 constexpr int32_t qOffset = 10;
150 TensorData<T> input =
157 TensorData<T> weights =
160 armnnUtils::QuantizedVector<T>(weightsData,
170 TensorData<BT> biases =
173 armnnUtils::QuantizedVector<BT>(biasesData,
182 TensorData<T> output = { outputInfo, {} };
185 TransposeConvolution2dTestImpl(workloadFactory,
196 testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
197 testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
198 armnnUtils::QuantizedVector<T>(expectedOutputData,
199 outputInfo.GetQuantizationScale(),
200 outputInfo.GetQuantizationOffset()));
207 std::vector<T>& inputData,
209 std::vector<T>& outputData,
211 std::vector<T>& weightsData)
213 PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
214 PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
215 PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
220 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
228 using namespace armnn;
230 constexpr
unsigned int batches = 1u;
231 constexpr
unsigned int channels = 1u;
233 constexpr
unsigned int wInput = 3u;
234 constexpr
unsigned int hInput = wInput;
236 constexpr
unsigned int wOutput = 5u;
237 constexpr
unsigned int hOutput = wOutput;
239 constexpr
unsigned int wWeights = 3u;
240 constexpr
unsigned int hWeights = wWeights;
242 TensorShape inputShape = { batches, channels, hInput, wInput };
243 TensorShape outputShape = { batches, channels, hOutput, wOutput };
244 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
247 TensorInfo outputInfo(outputShape, ArmnnType);
248 TensorInfo weightsInfo(weightsShape, ArmnnType);
249 TensorInfo biasesInfo({ channels }, ArmnnBType);
251 std::vector<float> inputData =
258 std::vector<float> weightsData =
265 std::vector<float> biasesData = { 1.f };
267 std::vector<float> expectedOutputData =
269 1.f, 3.f, 6.f, 5.f, 3.f,
270 5.f, 12.f, 21.f, 16.f, 9.f,
271 12.f, 27.f, 45.f, 33.f, 18.f,
272 11.f, 24.f, 39.f, 28.f, 15.f,
273 7.f, 15.f, 24.f, 17.f, 9.f
279 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
280 [&](
float f) ->
float {
return f + biasesData[0]; });
285 descriptor.m_StrideY = 1;
286 descriptor.m_BiasEnabled = biasEnabled;
287 descriptor.m_DataLayout = layout;
292 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
295 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
309 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
317 using namespace armnn;
319 constexpr
unsigned int batches = 1u;
320 constexpr
unsigned int channels = 1u;
322 constexpr
unsigned int wInput = 4u;
323 constexpr
unsigned int hInput = wInput;
325 constexpr
unsigned int wOutput = 2u;
326 constexpr
unsigned int hOutput = wOutput;
328 constexpr
unsigned int wWeights = 3u;
329 constexpr
unsigned int hWeights = wWeights;
331 TensorShape inputShape = { batches, channels, hInput, wInput };
332 TensorShape outputShape = { batches, channels, hOutput, wOutput };
333 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
336 TensorInfo outputInfo(outputShape, ArmnnType);
337 TensorInfo weightsInfo(weightsShape, ArmnnType);
338 TensorInfo biasesInfo({ channels }, ArmnnBType);
340 std::vector<float> inputData =
348 std::vector<float> weightsData =
355 std::vector<float> biasesData = { 1.f };
357 std::vector<float> expectedOutputData =
366 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
367 [&](
float f) ->
float {
return f + biasesData[0]; });
372 descriptor.m_PadRight = 2;
373 descriptor.m_PadTop = 2;
374 descriptor.m_PadBottom = 2;
375 descriptor.m_StrideX = 1;
376 descriptor.m_StrideY = 1;
377 descriptor.m_BiasEnabled = biasEnabled;
378 descriptor.m_DataLayout = layout;
383 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
386 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
400 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
408 using namespace armnn;
410 constexpr
unsigned int batches = 1u;
411 constexpr
unsigned int channels = 1u;
413 constexpr
unsigned int wInput = 3u;
414 constexpr
unsigned int hInput = wInput;
416 constexpr
unsigned int wOutput = 7u;
417 constexpr
unsigned int hOutput = wOutput;
419 constexpr
unsigned int wWeights = 3u;
420 constexpr
unsigned int hWeights = wWeights;
422 TensorShape inputShape = { batches, channels, hInput, wInput };
423 TensorShape outputShape = { batches, channels, hOutput, wOutput };
424 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
427 TensorInfo outputInfo(outputShape, ArmnnType);
428 TensorInfo weightsInfo(weightsShape, ArmnnType);
429 TensorInfo biasesInfo({ channels }, ArmnnBType);
431 std::vector<float> inputData =
438 std::vector<float> weightsData =
445 std::vector<float> biasesData = { 1.f };
447 std::vector<float> expectedOutputData =
449 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
450 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
451 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
452 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
453 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
454 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
455 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
461 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
462 [&](
float f) ->
float {
return f + biasesData[0]; });
467 descriptor.m_StrideY = 2;
468 descriptor.m_BiasEnabled = biasEnabled;
469 descriptor.m_DataLayout = layout;
474 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
477 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
491 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
498 using namespace armnn;
508 TensorInfo outputInfo(outputShape, ArmnnType);
509 TensorInfo weightsInfo(weightsShape, ArmnnType);
510 TensorInfo biasesInfo(biasesShape, ArmnnBType);
512 std::vector<float> inputData =
518 std::vector<float> weightsData =
529 std::vector<float> biasesData = { -1.5f, -2.0f };
531 std::vector<float> expectedOutputData =
533 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
534 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
535 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
536 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
537 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
539 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
540 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
541 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
542 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
543 40.0f, 46.0f, 108.0f, 62.0f, 70.0f
555 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
558 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
578 using namespace armnn;
584 TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
585 TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
587 const std::vector<float> quantScales{ 0.25f, 0.5f };
588 constexpr
unsigned int quantDimension = 0;
590 TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
592 const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
593 TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
595 std::vector<uint8_t> inputData =
601 std::vector<int8_t> kernelData =
612 std::vector<int32_t> biasData = { -12, -8 };
614 std::vector<uint8_t> expectedOutputData =
618 39, 55, 131, 91, 115,
620 85, 97, 213, 127, 143,
624 46, 62, 150, 102, 126,
625 54, 66, 142, 86, 102,
626 90, 102, 226, 134, 150
642 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputInfo);
643 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputInfo);
654 queueDescriptor.m_Weight = &weightTensor;
655 queueDescriptor.m_Bias = &biasTensor;
657 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
658 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
661 inputHandle->Allocate();
662 outputHandle->Allocate();
666 ExecuteWorkload(*workload, memoryManager);
670 ret.
outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
680 SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
688 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
696 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
704 SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
712 PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
719 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
720 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
727 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
728 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
735 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
736 PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
743 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
744 StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
751 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
752 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
759 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
760 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
767 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
768 StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
775 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
776 MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
782 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
783 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
789 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
790 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
796 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
797 MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool m_BiasEnabled
Enable/disable bias.
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
LayerTestResult< T, 4 > SimpleTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< T, 4 > MultiChannelTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void PermuteTensorNchwToNhwc(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
int32_t GetQuantizationOffset() const
float GetQuantizationScale() const
LayerTestResult< T, 4 > StridedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
void SetQuantizationScale(float scale)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
boost::multi_array< T, n > output
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > TransposeConvolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
const ConstTensorHandle * m_Weight
const ConstTensorHandle * m_Bias
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< T, 4 > PaddedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)