24.02.1
|
Go to the documentation of this file.
24 #ifndef ARM_COMPUTE_NELSTMLAYERQUANTIZED_H
25 #define ARM_COMPUTE_NELSTMLAYERQUANTIZED_H
119 const ITensor *output_state_in,
197 const ITensor *_input_to_input_weights;
198 const ITensor *_input_to_forget_weights;
199 const ITensor *_input_to_cell_weights;
200 const ITensor *_input_to_output_weights;
201 const ITensor *_recurrent_to_input_weights;
202 const ITensor *_recurrent_to_forget_weights;
203 const ITensor *_recurrent_to_cell_weights;
204 const ITensor *_recurrent_to_output_weights;
205 const ITensor *_input_gate_bias;
206 const ITensor *_forget_gate_bias;
208 const ITensor *_output_gate_bias;
211 Tensor _recurrent_weights;
215 Tensor _weights_transposed;
219 Tensor _forget_gate_input;
221 Tensor _output_gate_input;
222 Tensor _input_modulation_gate_input;
223 Tensor _forget_gate_output;
224 Tensor _input_gate_output;
225 Tensor _output_gate_output;
226 Tensor _input_modulation_gate_output;
230 Tensor _output_state_out_symm;
231 Tensor _output_state_out_f32;
Basic function to run cpu::kernels::CpuAddKernel.
~NELSTMLayerQuantized()
Default destructor.
static Status validate(const ITensorInfo *input, const ITensorInfo *input_to_input_weights, const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights, const ITensorInfo *recurrent_to_input_weights, const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights, const ITensorInfo *input_gate_bias, const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias, const ITensorInfo *cell_state_in, const ITensorInfo *output_state_in, const ITensorInfo *cell_state_out, const ITensorInfo *output_state_out)
Static function to check if given info will lead to a valid configuration of NELSTMLayer.
Base class for all functions.
Basic function to perform tensor slicing.
Interface for CPU tensor.
void prepare() override
Prepare the function for executing.
NELSTMLayerQuantized & operator=(const NELSTMLayerQuantized &)=delete
Prevent instances of this class from being copied (As this class contains pointers)
auto recurrent_to_forget_weights
Basic function to run cpu::CpuMul.
Basic function to run cpu::kernels::CpuActivationKernel.
Basic function to run a quantization layer using cpu::CpuQuantize.
auto recurrent_to_output_weights
Basic function to execute concatenate tensors along a given axis.
NELSTMLayerQuantized(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
void configure(const ITensor *input, const ITensor *input_to_input_weights, const ITensor *input_to_forget_weights, const ITensor *input_to_cell_weights, const ITensor *input_to_output_weights, const ITensor *recurrent_to_input_weights, const ITensor *recurrent_to_forget_weights, const ITensor *recurrent_to_cell_weights, const ITensor *recurrent_to_output_weights, const ITensor *input_gate_bias, const ITensor *forget_gate_bias, const ITensor *cell_bias, const ITensor *output_gate_bias, ITensor *cell_state_in, const ITensor *output_state_in, ITensor *cell_state_out, ITensor *output_state_out)
Initialize function's tensors.
Copyright (c) 2017-2024 Arm Limited.
Basic function to run cpu::CpuDequantize that dequantizes an input tensor.
auto recurrent_to_input_weights
Function to run Gemm on quantized types.
Basic function to execute GEMMLowpQuantizeDown kernels.
Store the tensor's metadata.
Basic implementation of the tensor interface.
auto input_to_forget_weights
auto input_to_output_weights
auto input_to_input_weights
auto input_to_cell_weights
auto recurrent_to_cell_weights
Basic function to run cpu::kernels::CpuTransposeKernel.
Basic function to run NELSTMLayerQuantized.
void run() override
Run the kernels contained in the function.