24.08
NeonL2NormalizationFloatWorkload.hpp
Go to the documentation of this file.
1
//
2
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3
// SPDX-License-Identifier: MIT
4
//
5
6
#pragma once
7
8
#include <
armnn/backends/Workload.hpp
>
9
10
#include <arm_compute/core/Error.h>
11
#include <arm_compute/runtime/IFunction.h>
12
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
13
14
#include <memory>
15
16
namespace
armnn
17
{
18
19
arm_compute::Status
NeonL2NormalizationWorkloadValidate
(
const
TensorInfo& input,
20
const
TensorInfo& output,
21
const
L2NormalizationDescriptor& descriptor);
22
23
class
NeonL2NormalizationFloatWorkload
:
public
FloatWorkload
<L2NormalizationQueueDescriptor>
24
{
25
public
:
26
NeonL2NormalizationFloatWorkload
(
const
L2NormalizationQueueDescriptor
& descriptor,
const
WorkloadInfo
&
info
,
27
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
28
virtual
void
Execute
()
const override
;
29
// Replace input tensor handle with the given TensorHandle
30
void
ReplaceInputTensorHandle
(
ITensorHandle
* tensorHandle,
unsigned
int
slot)
override
;
31
32
// Replace output tensor handle with the given TensorHandle
33
void
ReplaceOutputTensorHandle
(
ITensorHandle
* tensorHandle,
unsigned
int
slot)
override
;
34
private
:
35
std::unique_ptr<arm_compute::IFunction> m_Layer;
36
virtual
void
Reconfigure();
37
};
38
39
}
//namespace armnn
40
armnn::NeonL2NormalizationFloatWorkload::ReplaceInputTensorHandle
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Definition:
NeonL2NormalizationFloatWorkload.cpp:63
armnn::NeonL2NormalizationFloatWorkload::ReplaceOutputTensorHandle
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Definition:
NeonL2NormalizationFloatWorkload.cpp:80
armnn::L2NormalizationQueueDescriptor
Definition:
WorkloadData.hpp:358
armnn::ITensorHandle
Definition:
ITensorHandle.hpp:16
armnn::TypedWorkload
Definition:
Workload.hpp:101
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition:
WorkloadInfo.hpp:16
Workload.hpp
armnn::BoostLogSeverityMapping::info
@ info
armnn::NeonL2NormalizationFloatWorkload::Execute
virtual void Execute() const override
Definition:
NeonL2NormalizationFloatWorkload.cpp:57
armnn::NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload
NeonL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
Definition:
NeonL2NormalizationFloatWorkload.cpp:31
armnn::Status
Status
Definition:
Types.hpp:42
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition:
01_00_quick_start.dox:6
armnn::NeonL2NormalizationWorkloadValidate
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition:
NeonL2NormalizationFloatWorkload.cpp:19
armnn::NeonL2NormalizationFloatWorkload
Definition:
NeonL2NormalizationFloatWorkload.hpp:23
src
backends
neon
workloads
NeonL2NormalizationFloatWorkload.hpp
Generated on Wed Aug 28 2024 14:31:51 for Arm NN by
1.8.17