Compute Library
 22.05
CLRNNLayer.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CLRNN_LAYER_H
25 #define ARM_COMPUTE_CLRNN_LAYER_H
26 
33 
34 #include <memory>
35 
36 namespace arm_compute
37 {
38 class ICLTensor;
39 
40 /** Basic function to run @ref CLRNNLayer */
41 class CLRNNLayer : public IFunction
42 {
43 public:
44  /** Default constructor */
45  CLRNNLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
46  /** Prevent instances of this class from being copied */
47  CLRNNLayer(const CLRNNLayer &) = delete;
48  /** Prevent instances of this class from being copied */
49  CLRNNLayer &operator=(const CLRNNLayer &) = delete;
50  /** Default destructor */
51  ~CLRNNLayer();
52  /** Initialize the function
53  *
54  * Valid data layouts:
55  * - NHWC
56  * - NCHW
57  *
58  * Valid data type configurations:
59  * |src0 |src1 |src2 |src3 |dst0 |dst1 |
60  * |:------|:------|:------|:------|:------|:------|
61  * |F16 |F16 |F16 |F16 |F16 |F16 |
62  * |F32 |F32 |F32 |F32 |F32 |F32 |
63  *
64  * @param[in] input Input is a 2-D tensor of shape [input_size, batch_size]. Data types supported: F16/F32
65  * @param[in] weights Weights tensor of shape [input_size, num_units] that multiplies the input. Data types supported: Same as @p input
66  * @param[in] recurrent_weights Weights tensor of shape [num_units, num_units] that multiplies the current 'state'. Data types supported: Same as @p input
67  * @param[in] bias Bias vector of shape [num_units]. Data types supported: Same as @p input
68  * @param[out] output Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
69  * @param[in,out] hidden_state Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
70  * @param[in] info Activation layer parameter.
71  */
72  void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *recurrent_weights, const ICLTensor *bias, ICLTensor *hidden_state, ICLTensor *output, ActivationLayerInfo &info);
73  /** Initialize the function
74  *
75  * @param[in] compile_context The compile context to be used.
76  * @param[in] input Input is a 2-D tensor of shape [input_size, batch_size]. Data types supported: F16/F32
77  * @param[in] weights Weights tensor of shape [input_size, num_units] that multiplies the input. Data types supported: Same as @p input
78  * @param[in] recurrent_weights Weights tensor of shape [num_units, num_units] that multiplies the current 'state'. Data types supported: Same as @p input
79  * @param[in] bias Bias vector of shape [num_units]. Data types supported: Same as @p input
80  * @param[out] output Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
81  * @param[in,out] hidden_state Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
82  * @param[in] info Activation layer parameter.
83  */
84  void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *recurrent_weights, const ICLTensor *bias, ICLTensor *hidden_state,
85  ICLTensor *output, ActivationLayerInfo &info);
86  /** Initialize the function
87  *
88  * @param[in] input Input is a 2-D tensor of shape [input_size, batch_size]. Data types supported: F16/F32
89  * @param[in] weights Weights tensor of shape [input_size, num_units] that multiplies the input. Data types supported: Same as @p input
90  * @param[in] recurrent_weights Weights tensor of shape [num_units, num_units] that multiplies the current 'state'. Data types supported: Same as @p input
91  * @param[in] bias Bias vector of shape [num_units]. Data types supported: Same as @p input
92  * @param[in] output Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
93  * @param[in] hidden_state Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
94  * @param[in] info Activation layer parameter.
95  *
96  * @return a status
97  */
98  static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *recurrent_weights, const ITensorInfo *bias, const ITensorInfo *hidden_state, const ITensorInfo *output,
99  const ActivationLayerInfo &info);
100 
101  // Inherited methods overridden:
102  void run() override;
103  void prepare() override;
104 
105 private:
106  MemoryGroup _memory_group;
107  CLGEMM _gemm_state_f;
108  CLArithmeticAddition _add_kernel;
109  CLActivationLayer _activation;
110  CLFullyConnectedLayer _fully_connected_kernel;
111  CLCopy _copy;
112  CLTensor _fully_connected_out;
113  CLTensor _gemm_output;
114  CLTensor _add_output;
115  bool _is_prepared;
116 };
117 }
118 #endif /* ARM_COMPUTE_CLRNN_LAYER_H */
Basic function to compute a Fully Connected layer on OpenCL.
Base class for all functions.
Definition: IFunction.h:30
Basic function to run opencl::kernels::ClCopyKernel.
Definition: CLCopy.h:39
Basic function to run opencl::kernels::ClSaturatedArithmeticKernel for addition.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Basic function to run opencl::kernels::ClActivationKernel.
Status class.
Definition: Error.h:52
Activation Layer Information class.
Definition: Types.h:1625
void prepare() override
Prepare the function for executing.
Definition: CLRNNLayer.cpp:134
Copyright (c) 2017-2022 Arm Limited.
Basic function to execute GEMM on OpenCL.
Definition: CLGEMM.h:44
CLRNNLayer & operator=(const CLRNNLayer &)=delete
Prevent instances of this class from being copied.
~CLRNNLayer()
Default destructor.
void run() override
Run the kernels contained in the function.
Definition: CLRNNLayer.cpp:119
CLCompileContext class.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *recurrent_weights, const ITensorInfo *bias, const ITensorInfo *hidden_state, const ITensorInfo *output, const ActivationLayerInfo &info)
Initialize the function.
Definition: CLRNNLayer.cpp:47
CLRNNLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
Definition: CLRNNLayer.cpp:39
Basic function to run CLRNNLayer.
Definition: CLRNNLayer.h:41
void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *recurrent_weights, const ICLTensor *bias, ICLTensor *hidden_state, ICLTensor *output, ActivationLayerInfo &info)
Initialize the function.
Definition: CLRNNLayer.cpp:76
const int32_t * bias
Basic implementation of the OpenCL tensor interface.
Definition: CLTensor.h:41