ArmNN
 25.11
Loading...
Searching...
No Matches
NeonLstmFloatWorkload.hpp
Go to the documentation of this file.
1//
2// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
12
13#include "arm_compute/graph/Tensor.h"
14#include "arm_compute/runtime/NEON/functions/NELSTMLayer.h"
15
16namespace armnn
17{
18
19class NeonLstmFloatWorkload : public FloatWorkload<LstmQueueDescriptor>
20{
21public:
23 virtual void Execute() const override;
24 // Replace input tensor handle with the given TensorHandle
25 void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
26
27 // Replace output tensor handle with the given TensorHandle
28 void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
29private:
30 mutable arm_compute::NELSTMLayer m_LstmLayer;
31
32 std::unique_ptr<arm_compute::Tensor> m_InputToInputWeightsTensor;
33 std::unique_ptr<arm_compute::Tensor> m_InputToForgetWeightsTensor;
34 std::unique_ptr<arm_compute::Tensor> m_InputToCellWeightsTensor;
35 std::unique_ptr<arm_compute::Tensor> m_InputToOutputWeightsTensor;
36 std::unique_ptr<arm_compute::Tensor> m_RecurrentToInputWeightsTensor;
37 std::unique_ptr<arm_compute::Tensor> m_RecurrentToForgetWeightsTensor;
38 std::unique_ptr<arm_compute::Tensor> m_RecurrentToCellWeightsTensor;
39 std::unique_ptr<arm_compute::Tensor> m_RecurrentToOutputWeightsTensor;
40 std::unique_ptr<arm_compute::Tensor> m_CellToInputWeightsTensor;
41 std::unique_ptr<arm_compute::Tensor> m_CellToForgetWeightsTensor;
42 std::unique_ptr<arm_compute::Tensor> m_CellToOutputWeightsTensor;
43 std::unique_ptr<arm_compute::Tensor> m_InputGateBiasTensor;
44 std::unique_ptr<arm_compute::Tensor> m_ForgetGateBiasTensor;
45 std::unique_ptr<arm_compute::Tensor> m_CellBiasTensor;
46 std::unique_ptr<arm_compute::Tensor> m_OutputGateBiasTensor;
47 std::unique_ptr<arm_compute::Tensor> m_ProjectionWeightsTensor;
48 std::unique_ptr<arm_compute::Tensor> m_ProjectionBiasTensor;
49
50 std::unique_ptr<arm_compute::Tensor> m_ScratchBuffer;
51
52 std::unique_ptr<arm_compute::Tensor> m_InputLayerNormWeightsTensor;
53 std::unique_ptr<arm_compute::Tensor> m_ForgetLayerNormWeightsTensor;
54 std::unique_ptr<arm_compute::Tensor> m_CellLayerNormWeightsTensor;
55 std::unique_ptr<arm_compute::Tensor> m_OutputLayerNormWeightsTensor;
56
57 void FreeUnusedTensors();
58 virtual void Reconfigure();
59};
60
61arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
62 const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
63 const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
64 const TensorInfo& output, const LstmDescriptor &descriptor,
65 const LstmInputParamsInfo& paramsInfo);
66
67} //namespace armnn
NeonLstmFloatWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
virtual void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
TypedWorkload< QueueDescriptor, armnn::DataType::Float16, armnn::DataType::Float32 > FloatWorkload
Definition Workload.hpp:195
An LstmDescriptor for the LstmLayer.
Contains information about TensorInfos of a layer.