23 using namespace armcomputetensorutils;
31 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.
m_DataLayout);
32 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.
m_DataLayout);
33 const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.
m_DataLayout);
35 arm_compute::TensorInfo aclBiasesInfo;
36 arm_compute::TensorInfo *optionalAclBiasesInfo =
nullptr;
42 aclBiasesInfo = BuildArmComputeTensorInfo(biases.
value(), descriptor.
m_DataLayout);
43 optionalAclBiasesInfo = &aclBiasesInfo;
46 arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
48 return arm_compute::NEDeconvolutionLayer::validate(&aclInputInfo,
50 optionalAclBiasesInfo,
57 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
62 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Inputs[0])->GetTensor();
63 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Outputs[0])->GetTensor();
66 input.info()->set_data_layout(aclDataLayout);
67 output.info()->set_data_layout(aclDataLayout);
69 m_KernelTensor = std::make_unique<arm_compute::Tensor>();
74 m_BiasTensor = std::make_unique<arm_compute::Tensor>();
78 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(
m_Data.
m_Parameters);
97 m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
98 m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
119 void NeonTransposeConvolution2dWorkload::FreeUnusedTensors()
121 FreeTensorIfUnused(m_KernelTensor);
122 FreeTensorIfUnused(m_BiasTensor);