28 bool isFastMathEnabled,
31 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.
m_DataLayout);
32 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.
m_DataLayout);
33 arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.
m_DataLayout);
34 aclWeightsInfo.set_are_values_constant(weights.
IsConstant());
36 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(descriptor.
m_DilationX,
39 arm_compute::TensorInfo aclBiasesInfo;
40 arm_compute::TensorInfo *optionalAclBiasesInfo =
nullptr;
46 return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
47 "ArmNN ClConvolution2dWorkload has empty bias value."};
50 if (!biases.
value().IsConstant())
52 return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
53 "ArmNN ClDepthwiseConv2dWorkload does not support non constant bias."};
55 aclBiasesInfo = BuildArmComputeTensorInfo(biases.
value(), descriptor.
m_DataLayout);
56 aclBiasesInfo.set_are_values_constant(biases.
value().IsConstant());
57 optionalAclBiasesInfo = &aclBiasesInfo;
60 arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
63 activationDescriptor);
65 return arm_compute::CLConvolutionLayer::validate(&aclInputInfo,
67 optionalAclBiasesInfo,
70 arm_compute::WeightsInfo(),
78 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager,
79 const arm_compute::CLCompileContext& clCompileContext,
80 const bool isFastMathEnabled)
82 , m_ConvolutionLayer(memoryManager)
86 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
m_Data.m_Parameters.m_DilationX,
87 m_Data.m_Parameters.m_DilationY);
89 uint32_t numInputs =
m_Data.m_Parameters.m_BiasEnabled ? 3: 2;
90 m_Data.ValidateInputsOutputs(
"ClConvolution2dWorkload", numInputs, 1);
95 weights.info()->set_are_values_constant(
info.m_InputTensorInfos[1].IsConstant());
97 if (
m_Data.m_Parameters.m_BiasEnabled)
100 bias.info()->set_are_values_constant(
info.m_InputTensorInfos[2].IsConstant());
103 "The bias tensor must be constant.");
104 m_BiasProxy = std::make_unique<ICLTensorProxy>(&bias);
108 m_InputProxy = std::make_unique<ICLTensorProxy>(&input);
109 m_OutputProxy = std::make_unique<ICLTensorProxy>(&output);
110 m_WeightsProxy = std::make_unique<ICLTensorProxy>(&weights);
112 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(
m_Data.m_Parameters.m_DataLayout);
113 input.info()->set_data_layout(aclDataLayout);
114 output.info()->set_data_layout(aclDataLayout);
115 weights.info()->set_data_layout(aclDataLayout);
117 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(
m_Data.m_Parameters);
123 m_ConvolutionLayer.configure(clCompileContext,
125 m_WeightsProxy.get(),
129 arm_compute::WeightsInfo(),
135 m_ConvolutionMethod =
136 m_ConvolutionLayer.get_convolution_method(input.info(),
140 arm_compute::WeightsInfo(),
142 arm_compute::CLScheduler::get().target(),
ClConvolution2dWorkload(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager, const arm_compute::CLCompileContext &clCompileContext, const bool isFastMathEnabled=false)
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)