10 #include <arm_compute/core/Error.h>
11 #include <arm_compute/runtime/IFunction.h>
12 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
20 const TensorInfo& output,
21 const NormalizationDescriptor& descriptor);
27 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
28 virtual void Execute()
const override;
35 std::unique_ptr<arm_compute::IFunction> m_NormalizationLayer;
36 virtual void Reconfigure();
NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
virtual void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Contains information about TensorInfos of a layer.