Compute Library
 21.02
NEGEMMLowpMatrixMultiplyCore.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H
25 #define ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H
26 
27 #include "NEActivationLayer.h"
33 
34 #include <memory>
35 
36 namespace arm_compute
37 {
38 class ITensor;
39 class NEConvertQuantizedSignednessKernel;
40 class NEConvertQuantizedSignednessKernel;
41 class NEGEMMInterleave4x4Kernel;
42 class NEGEMMLowpMatrixMultiplyKernel;
43 class NEGEMMLowpOffsetContributionKernel;
44 class NEGEMMLowpOffsetContributionOutputStageKernel;
45 class NEGEMMLowpMatrixAReductionKernel;
46 class NEGEMMLowpMatrixBReductionKernel;
47 class NEGEMMTranspose1xWKernel;
48 class NEGEMMAssemblyDispatch;
49 
50 /** Basic function to execute GEMMLowpMatrixMultiplyCore on Neon. This function calls the following Neon kernels if the DOT product instruction is not available:
51  *
52  * -# @ref NEGEMMInterleave4x4Kernel
53  * -# @ref NEGEMMTranspose1xWKernel
54  * -# @ref NEGEMMLowpMatrixMultiplyKernel
55  * -# @ref NEGEMMLowpOffsetContributionKernel
56  * -# @ref NEActivationLayer
57  *
58  * otherwise if the DOT product instruction is available:
59  *
60  * -# @ref NEGEMMLowpOffsetContributionKernel
61  *
62 */
64 {
65 public:
66  /** Constructor */
67  NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr, IWeightsManager *weights_manager = nullptr);
68  /** Prevent instances of this class from being copied (As this class contains pointers) */
70  /** Default move constructor */
72  /** Prevent instances of this class from being copied (As this class contains pointers) */
74  /** Default move assignment operator */
76  /** Default destructor */
78  /** Initialise the kernel's inputs, output
79  *
80  * @note GEMM_LOWP: low precision GEMM kernel
81  * This kernel performs the following computations:
82  *
83  * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
84  * -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
85  * -# Compute the matrix product of the resulting a * b in int32.
86  *
87  * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is QASYMM8/QASYMM8_SIGNED otherwise
88  *
89  * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8/QASYMM8_SIGNED.
90  * @param[in] b Second input tensor (Matrix B). Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL.
91  * @param[in] c Third input tensor (Matrix C). It can be a nullptr. Data type supported: S32
92  * @param[out] output Output tensor. Data type supported: Data type supported: S32/QASYMM8/QASYMM8_SIGNED
93  * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
94  * if the reshape of matrix B should be executed only for the first run
95  */
96  void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output, const GEMMInfo &gemm_info = GEMMInfo());
97  /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixMultiplyCore
98  *
99  * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is QASYMM8/QASYMM8_SIGNED otherwise
100  *
101  * @param[in] a First input tensor info (Matrix A). Data type supported: QASYMM8/QASYMM8_SIGNED.
102  * @param[in] b Second input tensor info (Matrix B). Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL.
103  * @param[in] c Third input tensor info (Matrix C). It can be a nullptr. Data type supported: S32
104  * @param[in] output Output tensor info. Data type supported: Data type supported: S32/QASYMM8/QASYMM8_SIGNED
105  * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
106  * if the reshape of matrix B should be executed only for the first run
107  *
108  * @return a status
109  */
110  static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info = GEMMInfo());
111 
112  // Inherited methods overridden
113  void run() override;
114  void prepare() override;
115 
116 private:
117  MemoryGroup _memory_group;
118  IWeightsManager *_weights_manager;
119  std::unique_ptr<NEGEMMAssemblyDispatch> _asm_glue;
120  std::unique_ptr<NEGEMMLowpMatrixMultiplyKernel> _mm_kernel;
121  std::unique_ptr<NEGEMMInterleave4x4Kernel> _mtx_a_reshape_kernel;
122  std::unique_ptr<NEGEMMTranspose1xWKernel> _mtx_b_reshape_kernel;
123  std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _mtx_a_reduction_kernel;
124  std::unique_ptr<NEGEMMLowpMatrixBReductionKernel> _mtx_b_reduction_kernel;
125  std::unique_ptr<NEGEMMLowpOffsetContributionKernel> _offset_contribution_kernel;
126  std::unique_ptr<NEGEMMLowpOffsetContributionOutputStageKernel> _offset_contribution_output_stage_kernel;
127  NEActivationLayer _activation_func;
128  std::unique_ptr<NEConvertQuantizedSignednessKernel> _convert_to_signed_asymm;
129  std::unique_ptr<NEConvertQuantizedSignednessKernel> _convert_from_signed_asymm;
130 
131  Tensor _vector_sum_col;
132  Tensor _vector_sum_row;
133  Tensor _tmp_a;
134  Tensor _tmp_b;
135  Tensor _mm_result_s32;
136  Tensor _signed_a;
137  Tensor _signed_output;
138  const ITensor *_original_b;
139  int32_t _a_offset;
140  int32_t _b_offset;
141 
142  bool _run_vector_matrix_multiplication;
143  bool _assembly_path;
144  bool _fused_assembly_path;
145  bool _reshape_b_only_on_first_run;
146  bool _is_prepared;
147  bool _fuse_output_stage;
148  bool _run_activation;
149  bool _flip_signedness;
150 };
151 } // namespace arm_compute
152 #endif /*ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H */
void prepare() override
Prepare the function for executing.
Base class for all functions.
Definition: IFunction.h:30
SimpleTensor< float > b
Definition: DFT.cpp:157
NEGEMMLowpMatrixMultiplyCore(std::shared_ptr< IMemoryManager > memory_manager=nullptr, IWeightsManager *weights_manager=nullptr)
Constructor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output, const GEMMInfo &gemm_info=GEMMInfo())
Initialise the kernel&#39;s inputs, output.
void run() override
Run the kernels contained in the function.
Basic implementation of the tensor interface.
Definition: Tensor.h:37
Weights manager interface to handle weights transformations.
Basic function to run cpu::kernels::CpuActivationKernel.
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpMatrixMultiply...
GEMM information class.
Definition: Types.h:2003
NEGEMMLowpMatrixMultiplyCore & operator=(const NEGEMMLowpMatrixMultiplyCore &)=delete
Prevent instances of this class from being copied (As this class contains pointers) ...
Basic function to execute GEMMLowpMatrixMultiplyCore on Neon.
~NEGEMMLowpMatrixMultiplyCore()
Default destructor.