ArmNN
 25.11
Loading...
Searching...
No Matches
ClLstmFloatWorkload.hpp
Go to the documentation of this file.
1//
2// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
12
13#include <arm_compute/runtime/CL/functions/CLLSTMLayer.h>
14
15namespace armnn
16{
17
18class ClLstmFloatWorkload : public FloatWorkload<LstmQueueDescriptor>
19{
20public:
22 const WorkloadInfo& info,
23 const arm_compute::CLCompileContext& clCompileContext);
24 void Execute() const override;
25 // Replace input tensor handle with the given TensorHandle
26 void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
27
28 // Replace output tensor handle with the given TensorHandle
29 void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
30private:
31 mutable arm_compute::CLLSTMLayer m_LstmLayer;
32 virtual void Reconfigure();
33
34 std::unique_ptr<arm_compute::CLTensor> m_InputToInputWeightsTensor;
35 std::unique_ptr<arm_compute::CLTensor> m_InputToForgetWeightsTensor;
36 std::unique_ptr<arm_compute::CLTensor> m_InputToCellWeightsTensor;
37 std::unique_ptr<arm_compute::CLTensor> m_InputToOutputWeightsTensor;
38 std::unique_ptr<arm_compute::CLTensor> m_RecurrentToInputWeightsTensor;
39 std::unique_ptr<arm_compute::CLTensor> m_RecurrentToForgetWeightsTensor;
40 std::unique_ptr<arm_compute::CLTensor> m_RecurrentToCellWeightsTensor;
41 std::unique_ptr<arm_compute::CLTensor> m_RecurrentToOutputWeightsTensor;
42 std::unique_ptr<arm_compute::CLTensor> m_CellToInputWeightsTensor;
43 std::unique_ptr<arm_compute::CLTensor> m_CellToForgetWeightsTensor;
44 std::unique_ptr<arm_compute::CLTensor> m_CellToOutputWeightsTensor;
45 std::unique_ptr<arm_compute::CLTensor> m_InputGateBiasTensor;
46 std::unique_ptr<arm_compute::CLTensor> m_ForgetGateBiasTensor;
47 std::unique_ptr<arm_compute::CLTensor> m_CellBiasTensor;
48 std::unique_ptr<arm_compute::CLTensor> m_OutputGateBiasTensor;
49 std::unique_ptr<arm_compute::CLTensor> m_ProjectionWeightsTensor;
50 std::unique_ptr<arm_compute::CLTensor> m_ProjectionBiasTensor;
51 std::unique_ptr<arm_compute::CLTensor> m_InputLayerNormWeightsTensor;
52 std::unique_ptr<arm_compute::CLTensor> m_ForgetLayerNormWeightsTensor;
53 std::unique_ptr<arm_compute::CLTensor> m_CellLayerNormWeightsTensor;
54 std::unique_ptr<arm_compute::CLTensor> m_OutputLayerNormWeightsTensor;
55
56 std::unique_ptr<arm_compute::CLTensor> m_ScratchBuffer;
57
58 void FreeUnusedTensors();
59};
60
61arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
62 const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
63 const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
64 const TensorInfo& output, const LstmDescriptor &descriptor,
65 const LstmInputParamsInfo& paramsInfo);
66} //namespace armnn
ClLstmFloatWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
TypedWorkload< QueueDescriptor, armnn::DataType::Float16, armnn::DataType::Float32 > FloatWorkload
Definition Workload.hpp:195
An LstmDescriptor for the LstmLayer.
Contains information about TensorInfos of a layer.