Compute Library
 20.08
NEGEMM.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEGEMM_H
25 #define ARM_COMPUTE_NEGEMM_H
26 
40 
41 namespace arm_compute
42 {
43 /** Basic function to execute GEMM on NEON. This function calls the following NEON kernels:
44  *
45  * If optimized assembly is available:
46  * -# @ref NEGEMMAssemblyDispatch
47  * -# @ref NEActivationLayer (if alpha != 1.0)
48  * Else:
49  * -# @ref NEGEMMInterleave4x4Kernel (if the output tensor is a matrix)
50  * -# @ref NEGEMMTranspose1xWKernel (if the output tensor is a matrix)
51  * -# @ref NEGEMMMatrixMultiplyKernel
52  * In both cases:
53  * -# @ref NEGEMMMatrixAdditionKernel (if c != nullptr and beta != 0.0 and is not reshaped once)
54  * Else:
55  * -# @ref NEArithmeticAdditionKernel (if c != nullptr and is reshaped once and not optimized assembly in place)
56  *
57  * -# @ref NEActivationLayer (if activation is specified in GEMMInfo)
58  */
59 class NEGEMM : public IFunction
60 {
61 public:
62  /** Constructor */
63  NEGEMM(std::shared_ptr<IMemoryManager> memory_manager = nullptr, IWeightsManager *weights_manager = nullptr);
64  /** Prevent instances of this class from being copied (As this class contains pointers) */
65  NEGEMM(const NEGEMM &) = delete;
66  /** Default move constructor */
67  NEGEMM(NEGEMM &&) = default;
68  /** Prevent instances of this class from being copied (As this class contains pointers) */
69  NEGEMM &operator=(const NEGEMM &) = delete;
70  /** Default move assignment operator */
71  NEGEMM &operator=(NEGEMM &&) = default;
72  /** Initialise the kernel's inputs, output
73  *
74  * @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C].
75  * @note GEMM: The tensors a, b, c, d must have the same data type. You should not mix data types when calling this function.
76  *
77  * @param[in] a First input tensor (Matrix A or Vector A). Data type supported: BFLOAT16/F16/F32
78  * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
79  * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a
80  * @param[out] d Output tensor. Data type supported: same as @p a
81  * @param[in] alpha Weight of the matrix product
82  * @param[in] beta Weight of matrix C
83  * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
84  * if the reshape of matrix B should happen only for the first run
85  */
86  void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo());
87  /** Static function to check if given info will lead to a valid configuration of @ref NEGEMM.
88  *
89  * @param[in] a First input tensor info (Matrix or Vector A). Data types supported: BFLOAT16/F16/F32
90  * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a.
91  * @param[in] c Third input tensor info (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a.
92  * @param[out] output Output tensor info. Data type supported: same as @p a
93  * @param[in] alpha Weight of the matrix product
94  * @param[in] beta Weight of matrix C
95  * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
96  * if the reshape of matrix B should happen only for the first run
97  *
98  * @return a status
99  */
100  static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo());
101 
102  // Inherited methods overridden:
103  void run() override;
104  void prepare() override;
105 
106 private:
107  MemoryGroup _memory_group;
108  IWeightsManager *_weights_manager;
109  NEGEMMInterleave4x4Kernel _interleave_kernel;
110  NEGEMMTranspose1xWKernel _transpose_kernel;
111  NEGEMMMatrixMultiplyKernel _mm_kernel;
112  NEGEMMAssemblyDispatch _asm_glue;
113  NEGEMMMatrixAdditionKernel _ma_kernel;
114  NEActivationLayer _alpha_scale_func;
115  NEArithmeticAddition _add_bias;
116  NEActivationLayer _activation_func;
117 
118  Tensor _tmp_a;
119  Tensor _tmp_b;
120  Tensor _tmp_d;
121  const ITensor *_original_b;
122  bool _run_vector_matrix_multiplication;
123  bool _run_alpha_scale;
124  bool _run_addition;
125  bool _run_bias_addition;
126  bool _run_activation;
127  bool _reshape_b_only_on_first_run;
128  bool _is_prepared;
129 };
130 } // namespace arm_compute
131 #endif /*ARM_COMPUTE_NEGEMM_H */
Base class for all functions.
Definition: IFunction.h:30
Basic function to run NEArithmeticAdditionKernel.
Basic function to execute GEMM on NEON.
Definition: NEGEMM.h:59
SimpleTensor< float > b
Definition: DFT.cpp:157
Store the tensor's metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
Interface for NEON tensor.
Definition: ITensor.h:36
NEGEMM(std::shared_ptr< IMemoryManager > memory_manager=nullptr, IWeightsManager *weights_manager=nullptr)
Constructor.
Definition: NEGEMM.cpp:44
Copyright (c) 2017-2020 Arm Limited.
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of NEGEMM.
Definition: NEGEMM.cpp:163
NEON kernel to interleave the elements of a matrix.
void run() override
Run the kernels contained in the function.
Definition: NEGEMM.cpp:281
NEON kernel which transposes the elements of a matrix in chunks of 1xW, where W is equal to (16 / ele...
Basic implementation of the tensor interface.
Definition: Tensor.h:37
Weights manager interface to handle weights transformations.
NEON kernel to perform the in-place matrix addition between 2 matrices taking into account that the s...
Basic function to run NEActivationLayerKernel.
NEGEMM & operator=(const NEGEMM &)=delete
Prevent instances of this class from being copied (As this class contains pointers)
void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info=GEMMInfo())
Initialise the kernel's inputs, output.
Definition: NEGEMM.cpp:51
void prepare() override
Prepare the function for executing.
Definition: NEGEMM.cpp:331
GEMM information class.
Definition: Types.h:1932
NEON kernel to multiply two input matrices "A" and "B".