23 using namespace armcomputetensorutils;
31 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.
m_DataLayout);
32 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.
m_DataLayout);
33 const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.
m_DataLayout);
35 arm_compute::TensorInfo aclBiasesInfo;
36 arm_compute::TensorInfo *optionalAclBiasesInfo =
nullptr;
42 "NeonTransposeConvolution2dWorkload: Bias was enabled in the descriptor but no value was supplied.");
43 aclBiasesInfo = BuildArmComputeTensorInfo(biases.
value(), descriptor.
m_DataLayout);
44 optionalAclBiasesInfo = &aclBiasesInfo;
47 arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
49 return arm_compute::NEDeconvolutionLayer::validate(&aclInputInfo,
51 optionalAclBiasesInfo,
58 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
63 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Inputs[0])->GetTensor();
64 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Outputs[0])->GetTensor();
67 input.info()->set_data_layout(aclDataLayout);
68 output.info()->set_data_layout(aclDataLayout);
70 m_KernelTensor = std::make_unique<arm_compute::Tensor>();
71 BuildArmComputeTensor(*m_KernelTensor,
m_Data.m_Weight->GetTensorInfo(),
m_Data.m_Parameters.m_DataLayout);
73 if (
m_Data.m_Parameters.m_BiasEnabled)
75 m_BiasTensor = std::make_unique<arm_compute::Tensor>();
76 BuildArmComputeTensor(*m_BiasTensor,
m_Data.m_Bias->GetTensorInfo(),
m_Data.m_Parameters.m_DataLayout);
79 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(
m_Data.m_Parameters);
98 m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
99 m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
103 if (
m_Data.m_Parameters.m_BiasEnabled)
118 void NeonTransposeConvolution2dWorkload::FreeUnusedTensors()
120 FreeTensorIfUnused(m_KernelTensor);
121 FreeTensorIfUnused(m_BiasTensor);
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
const TensorInfo & GetTensorInfo() const
NeonTransposeConvolution2dWorkload(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
void Execute() const override
bool has_value() const noexcept
Copyright (c) 2021 ARM Limited and Contributors.
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
LayerDescriptor m_Parameters
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_BiasEnabled
Enable/disable bias.
const ConstTensorHandle * m_Bias
const ConstTensorHandle * m_Weight
Contains information about TensorInfos of a layer.
Optional< TensorInfo > m_BiasTensorInfo
std::vector< TensorInfo > m_OutputTensorInfos
std::vector< TensorInfo > m_InputTensorInfos
Optional< TensorInfo > m_WeightsTensorInfo