24 #ifndef ARM_COMPUTE_CLLSTMLAYERQUANTIZED_H 25 #define ARM_COMPUTE_CLLSTMLAYERQUANTIZED_H 131 const ICLTensor *recurrent_to_input_weights,
const ICLTensor *recurrent_to_forget_weights,
const ICLTensor *recurrent_to_cell_weights,
const ICLTensor *recurrent_to_output_weights,
199 const ICLTensor *_input_to_input_weights;
200 const ICLTensor *_input_to_forget_weights;
202 const ICLTensor *_input_to_output_weights;
203 const ICLTensor *_recurrent_to_input_weights;
204 const ICLTensor *_recurrent_to_forget_weights;
205 const ICLTensor *_recurrent_to_cell_weights;
206 const ICLTensor *_recurrent_to_output_weights;
224 CLTensor _input_modulation_gate_input;
228 CLTensor _input_modulation_gate_output;
Base class for all functions.
void prepare() override
Prepare the function for executing.
auto input_to_input_weights
Basic function to execute GEMMLowpQuantizeDown kernels on CL.
Basic function to run opencl::kernels::ClSaturatedArithmeticKernel for addition.
auto recurrent_to_forget_weights
Store the tensor's metadata.
Basic function to run opencl::kernels::ClActivationKernel.
Basic function to execute concatenate tensors along a given axis.
Basic function to run opencl::ClDequantize that dequantizes an input tensor.
Basic function to perform tensor slicing.
Copyright (c) 2017-2021 Arm Limited.
void configure(const ICLTensor *input, const ICLTensor *input_to_input_weights, const ICLTensor *input_to_forget_weights, const ICLTensor *input_to_cell_weights, const ICLTensor *input_to_output_weights, const ICLTensor *recurrent_to_input_weights, const ICLTensor *recurrent_to_forget_weights, const ICLTensor *recurrent_to_cell_weights, const ICLTensor *recurrent_to_output_weights, const ICLTensor *input_gate_bias, const ICLTensor *forget_gate_bias, const ICLTensor *cell_bias, const ICLTensor *output_gate_bias, ICLTensor *cell_state_in, const ICLTensor *output_state_in, ICLTensor *cell_state_out, ICLTensor *output_state_out)
Initialize function's tensors.
auto input_to_cell_weights
auto recurrent_to_output_weights
auto input_to_output_weights
Basic function to run CLLSTMLayerQuantized.
static Status validate(const ITensorInfo *input, const ITensorInfo *input_to_input_weights, const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights, const ITensorInfo *recurrent_to_input_weights, const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights, const ITensorInfo *input_gate_bias, const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias, const ITensorInfo *cell_state_in, const ITensorInfo *output_state_in, const ITensorInfo *cell_state_out, const ITensorInfo *output_state_out)
Static function to check if given info will lead to a valid configuration of CLLSTMLayerQuantized.
auto recurrent_to_input_weights
auto recurrent_to_cell_weights
Basic function to run opencl::ClMul.
void run() override
Run the kernels contained in the function.
Interface for OpenCL tensor.
Basic function to execute an opencl::kernels::ClTransposeKernel.
Basic function to execute GEMMLowpMatrixMultiplyCore on OpenCL.
CLLSTMLayerQuantized & operator=(const CLLSTMLayerQuantized &)=delete
Prevent instances of this class from being copied (As this class contains pointers) ...
Basic function to simulate a quantization layer.
CLLSTMLayerQuantized(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
auto input_to_forget_weights
Basic implementation of the OpenCL tensor interface.