Compute Library
 21.02
NEGEMM.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEGEMM_H
25 #define ARM_COMPUTE_NEGEMM_H
26 
34 
35 #include <memory>
36 
37 namespace arm_compute
38 {
39 // Forward declarations
40 class NEGEMMInterleave4x4Kernel;
41 class NEGEMMMatrixAdditionKernel;
42 class NEGEMMMatrixMultiplyKernel;
43 class NEGEMMTranspose1xWKernel;
44 class NEGEMMAssemblyDispatch;
45 
46 /** Basic function to execute GEMM on Neon. This function calls the following Neon kernels:
47  *
48  * If optimized assembly is available:
49  * -# @ref NEGEMMAssemblyDispatch
50  * -# @ref NEActivationLayer (if alpha != 1.0)
51  * Else:
52  * -# @ref NEGEMMInterleave4x4Kernel (if the output tensor is a matrix)
53  * -# @ref NEGEMMTranspose1xWKernel (if the output tensor is a matrix)
54  * -# @ref NEGEMMMatrixMultiplyKernel
55  * In both cases:
56  * -# @ref NEGEMMMatrixAdditionKernel (if c != nullptr and beta != 0.0 and is not reshaped once)
57  * Else:
58  * -# @ref NEArithmeticAddition (if c != nullptr and is reshaped once and not optimized assembly in place)
59  *
60  * -# @ref NEActivationLayer (if activation is specified in GEMMInfo)
61  */
62 class NEGEMM : public IFunction
63 {
64 public:
65  /** Constructor */
66  NEGEMM(std::shared_ptr<IMemoryManager> memory_manager = nullptr, IWeightsManager *weights_manager = nullptr);
67  /** Prevent instances of this class from being copied (As this class contains pointers) */
68  NEGEMM(const NEGEMM &) = delete;
69  /** Default move constructor */
70  NEGEMM(NEGEMM &&) = default;
71  /** Prevent instances of this class from being copied (As this class contains pointers) */
72  NEGEMM &operator=(const NEGEMM &) = delete;
73  /** Default move assignment operator */
74  NEGEMM &operator=(NEGEMM &&) = default;
75  /** Default destructor */
76  ~NEGEMM();
77  /** Initialise the kernel's inputs, output
78  *
79  * @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C].
80  * @note GEMM: The tensors a, b, c, d must have the same data type. You should not mix data types when calling this function.
81  *
82  * @param[in] a First input tensor (Matrix A or Vector A). Data type supported: BFLOAT16/F16/F32
83  * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
84  * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a
85  * @param[out] d Output tensor. Data type supported: same as @p a
86  * @param[in] alpha Weight of the matrix product
87  * @param[in] beta Weight of matrix C
88  * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
89  * if the reshape of matrix B should happen only for the first run
90  */
91  void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo());
92  /** Static function to check if given info will lead to a valid configuration of @ref NEGEMM.
93  *
94  * @param[in] a First input tensor info (Matrix or Vector A). Data types supported: BFLOAT16/F16/F32
95  * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a.
96  * @param[in] c Third input tensor info (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a.
97  * @param[out] output Output tensor info. Data type supported: same as @p a
98  * @param[in] alpha Weight of the matrix product
99  * @param[in] beta Weight of matrix C
100  * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
101  * if the reshape of matrix B should happen only for the first run
102  *
103  * @return a status
104  */
105  static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo());
106 
107  // Inherited methods overridden:
108  void run() override;
109  void prepare() override;
110 
111 private:
112  MemoryGroup _memory_group;
113  IWeightsManager *_weights_manager;
114  std::unique_ptr<NEGEMMInterleave4x4Kernel> _interleave_kernel;
115  std::unique_ptr<NEGEMMTranspose1xWKernel> _transpose_kernel;
116  std::unique_ptr<NEGEMMMatrixMultiplyKernel> _mm_kernel;
117  std::unique_ptr<NEGEMMAssemblyDispatch> _asm_glue;
118  std::unique_ptr<NEGEMMMatrixAdditionKernel> _ma_kernel;
119  NEActivationLayer _alpha_scale_func;
120  NEArithmeticAddition _add_bias;
121  NEActivationLayer _activation_func;
122 
123  Tensor _tmp_a;
124  Tensor _tmp_b;
125  Tensor _tmp_d;
126  const ITensor *_original_b;
127  bool _run_vector_matrix_multiplication;
128  bool _run_alpha_scale;
129  bool _run_addition;
130  bool _run_bias_addition;
131  bool _run_activation;
132  bool _reshape_b_only_on_first_run;
133  bool _is_prepared;
134 };
135 } // namespace arm_compute
136 #endif /*ARM_COMPUTE_NEGEMM_H */
~NEGEMM()
Default destructor.
Base class for all functions.
Definition: IFunction.h:30
Basic function to run cpu::kernels::CpuAddKernel.
Basic function to execute GEMM on Neon.
Definition: NEGEMM.h:62
SimpleTensor< float > b
Definition: DFT.cpp:157
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
Interface for Neon tensor.
Definition: ITensor.h:36
NEGEMM(std::shared_ptr< IMemoryManager > memory_manager=nullptr, IWeightsManager *weights_manager=nullptr)
Constructor.
Definition: NEGEMM.cpp:63
Copyright (c) 2017-2021 Arm Limited.
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of NEGEMM.
Definition: NEGEMM.cpp:190
void run() override
Run the kernels contained in the function.
Definition: NEGEMM.cpp:309
Basic implementation of the tensor interface.
Definition: Tensor.h:37
Weights manager interface to handle weights transformations.
Basic function to run cpu::kernels::CpuActivationKernel.
NEGEMM & operator=(const NEGEMM &)=delete
Prevent instances of this class from being copied (As this class contains pointers) ...
void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info=GEMMInfo())
Initialise the kernel&#39;s inputs, output.
Definition: NEGEMM.cpp:72
void prepare() override
Prepare the function for executing.
Definition: NEGEMM.cpp:359
GEMM information class.
Definition: Types.h:2003