12 #include <arm_compute/runtime/NEON/functions/NEPReluLayer.h>
21 const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
22 const arm_compute::TensorInfo aclAlpha = armcomputetensorutils::BuildArmComputeTensorInfo(alpha);
23 const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
25 return arm_compute::NEPReluLayer::validate(&aclInput,
36 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Inputs[0])->GetTensor();
37 arm_compute::ITensor& alpha = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Inputs[1])->GetTensor();
38 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Outputs[0])->GetTensor();
40 auto layer = std::make_unique<arm_compute::NEPReluLayer>();
41 layer->configure(&input, &alpha, &output);
43 m_PreluLayer.reset(layer.release());
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
void Execute() const override
NeonPreluWorkload(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info)
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Contains information about TensorInfos of a layer.