24 #ifndef ARM_COMPUTE_NELSTMLAYERQUANTIZED_H 25 #define ARM_COMPUTE_NELSTMLAYERQUANTIZED_H 168 const ITensor *_input_to_input_weights;
169 const ITensor *_input_to_forget_weights;
170 const ITensor *_input_to_cell_weights;
171 const ITensor *_input_to_output_weights;
172 const ITensor *_recurrent_to_input_weights;
173 const ITensor *_recurrent_to_forget_weights;
174 const ITensor *_recurrent_to_cell_weights;
175 const ITensor *_recurrent_to_output_weights;
176 const ITensor *_input_gate_bias;
177 const ITensor *_forget_gate_bias;
179 const ITensor *_output_gate_bias;
182 Tensor _recurrent_weights;
186 Tensor _weights_transposed;
190 Tensor _forget_gate_input;
192 Tensor _output_gate_input;
193 Tensor _input_modulation_gate_input;
194 Tensor _forget_gate_output;
195 Tensor _input_gate_output;
196 Tensor _output_gate_output;
197 Tensor _input_modulation_gate_output;
201 Tensor _output_state_out_symm;
202 Tensor _output_state_out_f32;
~NELSTMLayerQuantized()
Default destructor.
Base class for all functions.
Basic function to run cpu::kernels::CpuAddKernel.
auto input_to_input_weights
Basic function to perform tensor slicing.
auto recurrent_to_forget_weights
Store the tensor's metadata.
Basic function to simulate a quantization layer.
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
auto input_to_cell_weights
auto recurrent_to_output_weights
NELSTMLayerQuantized(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
auto input_to_output_weights
Basic function to run NELSTMLayerQuantized.
auto recurrent_to_input_weights
Basic implementation of the tensor interface.
Basic function to transpose a matrix on Neon.
auto recurrent_to_cell_weights
Basic function to run NEDequantizationLayerKernel that dequantizes an input tensor.
void prepare() override
Prepare the function for executing.
Basic function to run cpu::kernels::CpuActivationKernel.
NELSTMLayerQuantized & operator=(const NELSTMLayerQuantized &)=delete
Prevent instances of this class from being copied (As this class contains pointers) ...
Basic function to execute concatenate tensors along a given axis.
void run() override
Run the kernels contained in the function.
Basic function to run NEPixelWiseMultiplicationKernel.
void configure(const ITensor *input, const ITensor *input_to_input_weights, const ITensor *input_to_forget_weights, const ITensor *input_to_cell_weights, const ITensor *input_to_output_weights, const ITensor *recurrent_to_input_weights, const ITensor *recurrent_to_forget_weights, const ITensor *recurrent_to_cell_weights, const ITensor *recurrent_to_output_weights, const ITensor *input_gate_bias, const ITensor *forget_gate_bias, const ITensor *cell_bias, const ITensor *output_gate_bias, ITensor *cell_state_in, const ITensor *output_state_in, ITensor *cell_state_out, ITensor *output_state_out)
Initialize function's tensors.
Basic function to execute NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint on Neon.
auto input_to_forget_weights
Basic function to execute GEMMLowpMatrixMultiplyCore on Neon.
static Status validate(const ITensorInfo *input, const ITensorInfo *input_to_input_weights, const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights, const ITensorInfo *recurrent_to_input_weights, const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights, const ITensorInfo *input_gate_bias, const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias, const ITensorInfo *cell_state_in, const ITensorInfo *output_state_in, const ITensorInfo *cell_state_out, const ITensorInfo *output_state_out)
Static function to check if given info will lead to a valid configuration of NELSTMLayer.