Compute Library
 21.02
NERNNLayer.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NERNNLAYER_H
25 #define ARM_COMPUTE_NERNNLAYER_H
26 
27 #include "arm_compute/core/Types.h"
33 
34 namespace arm_compute
35 {
36 // Forward declarations
37 class ITensor;
38 
39 /** Basic function to run @ref NERNNLayer */
40 class NERNNLayer : public IFunction
41 {
42 public:
43  /** Default constructor */
44  NERNNLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
45  /** Prevent instances of this class from being copied (As this class contains pointers) */
46  NERNNLayer(const NERNNLayer &) = delete;
47  /** Prevent instances of this class from being moved (As this class contains pointers) */
48  NERNNLayer(NERNNLayer &&) = delete;
49  /** Prevent instances of this class from being copied (As this class contains pointers) */
50  NERNNLayer &operator=(const NERNNLayer &) = delete;
51  /** Prevent instances of this class from being moved (As this class contains pointers) */
52  NERNNLayer &operator=(NERNNLayer &&) = delete;
53  /** Default destructor */
54  ~NERNNLayer();
55  /** Initialize the function
56  *
57  * @param[in] input Input is a 2-D tensor of shape [input_size, batch_size]. Data types supported: F16/F32
58  * @param[in] weights Weights tensor of shape [input_size, num_units] that multiplies the input. Data types supported: Same as @p input
59  * @param[in] recurrent_weights Weights tensor of shape [num_units, num_units] that multiplies the current 'state'. Data types supported: Same as @p input
60  * @param[in] bias Bias vector of shape [num_units]. Data types supported: Same as @p input
61  * @param[out] output Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
62  * @param[in,out] hidden_state Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
63  * @param[in] info Activation layer parameter.
64  */
65  void configure(const ITensor *input, const ITensor *weights, const ITensor *recurrent_weights, const ITensor *bias, ITensor *hidden_state, ITensor *output, ActivationLayerInfo &info);
66  /** Initialize the function
67  *
68  * @param[in] input Input is a 2-D tensor of shape [input_size, batch_size]. Data types supported: F16/F32
69  * @param[in] weights Weights tensor of shape [input_size, num_units] that multiplies the input. Data types supported: Same as @p input
70  * @param[in] recurrent_weights Weights tensor of shape [num_units, num_units] that multiplies the current 'state'. Data types supported: Same as @p input
71  * @param[in] bias Bias vector of shape [num_units]. Data types supported: Same as @p input
72  * @param[in] output Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
73  * @param[in] hidden_state Output tensor of shape [num_units, batch_size]. Data types supported: Same as @p input
74  * @param[in] info Activation layer parameter.
75  *
76  * @return a status
77  */
78  static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *recurrent_weights, const ITensorInfo *bias, const ITensorInfo *hidden_state, const ITensorInfo *output,
79  const ActivationLayerInfo &info);
80 
81  // Inherited methods overridden:
82  void run() override;
83  void prepare() override;
84 
85 private:
86  MemoryGroup _memory_group;
87  NEGEMM _gemm_state_f;
88  NEArithmeticAddition _add_f;
89  NEActivationLayer _activation;
90  NEFullyConnectedLayer _fully_connected;
91  NECopy _copy_f;
92  Tensor _fully_connected_out;
93  Tensor _gemm_output;
94  Tensor _add_output;
95  bool _is_prepared;
96 };
97 } // namespace arm_compute
98 #endif /* ARM_COMPUTE_NERNNLAYER_H */
~NERNNLayer()
Default destructor.
Base class for all functions.
Definition: IFunction.h:30
Basic function to run cpu::kernels::CpuAddKernel.
void prepare() override
Prepare the function for executing.
Definition: NERNNLayer.cpp:134
Basic function to execute GEMM on Neon.
Definition: NEGEMM.h:62
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
Activation Layer Information class.
Definition: Types.h:1550
Interface for Neon tensor.
Definition: ITensor.h:36
NERNNLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
Definition: NERNNLayer.cpp:48
Copyright (c) 2017-2021 Arm Limited.
Basic implementation of the tensor interface.
Definition: Tensor.h:37
Basic function to run cpu::kernels::CpuActivationKernel.
Basic function to compute a Fully Connected layer on Neon.
Basic function to run NERNNLayer.
Definition: NERNNLayer.h:40
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
NERNNLayer & operator=(const NERNNLayer &)=delete
Prevent instances of this class from being copied (As this class contains pointers) ...
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *recurrent_weights, const ITensorInfo *bias, const ITensorInfo *hidden_state, const ITensorInfo *output, const ActivationLayerInfo &info)
Initialize the function.
Definition: NERNNLayer.cpp:54
void run() override
Run the kernels contained in the function.
Definition: NERNNLayer.cpp:117
void configure(const ITensor *input, const ITensor *weights, const ITensor *recurrent_weights, const ITensor *bias, ITensor *hidden_state, ITensor *output, ActivationLayerInfo &info)
Initialize the function.
Definition: NERNNLayer.cpp:81
Basic function to run cpu::kernels::CpuCopyKernel.
Definition: NECopy.h:39