Compute Library
 21.02
NEGEMMMatrixAdditionKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/Types.h"
30 #include "src/core/CPP/Validate.h"
34 
35 #include <arm_neon.h>
36 
37 namespace arm_compute
38 {
39 namespace
40 {
41 constexpr unsigned int num_elems_processed_per_iteration = 16;
42 
43 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float beta)
44 {
46  ARM_COMPUTE_UNUSED(beta);
47 
50 
51  if(output->total_size() > 0)
52  {
55  }
56 
57  return Status{};
58 }
59 
60 void matrix_addition_f32(const ITensor *input, ITensor *output, const Window &window, float beta)
61 {
62  const float32x4_t beta_f32 = vdupq_n_f32(beta);
63 
64  Iterator in(input, window);
65  Iterator out(output, window);
66 
67  execute_window_loop(window, [&](const Coordinates &)
68  {
69  const auto in_ptr = reinterpret_cast<const float *>(in.ptr());
70  const auto out_ptr = reinterpret_cast<float *>(out.ptr());
71 
72  float32x4x4_t alpha_ab = vld4q_f32(out_ptr);
73  const float32x4x4_t c = vld4q_f32(in_ptr);
74 
75  // Multiply matrix C by its weight and accumulate
76  alpha_ab.val[0] = vmlaq_f32(alpha_ab.val[0], c.val[0], beta_f32);
77  alpha_ab.val[1] = vmlaq_f32(alpha_ab.val[1], c.val[1], beta_f32);
78  alpha_ab.val[2] = vmlaq_f32(alpha_ab.val[2], c.val[2], beta_f32);
79  alpha_ab.val[3] = vmlaq_f32(alpha_ab.val[3], c.val[3], beta_f32);
80 
81  vst4q_f32(out_ptr, alpha_ab);
82  },
83  in, out);
84 }
85 
86 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
87 void matrix_addition_f16(const ITensor *input, ITensor *output, const Window &window, float beta)
88 {
89  const float16x8_t beta_f16 = vdupq_n_f16(beta);
90 
91  Iterator in(input, window);
92  Iterator out(output, window);
93 
94  execute_window_loop(window, [&](const Coordinates &)
95  {
96  const auto in_ptr = reinterpret_cast<const float16_t *>(in.ptr());
97  const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr());
98 
99  float16x8x2_t alpha_ab = vld2q_f16(out_ptr);
100  const float16x8x2_t c = vld2q_f16(in_ptr);
101  // Multiply matrix C by its weight and accumulate
102  alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16));
103  alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16));
104 
105  vst2q_f16(out_ptr + 0, alpha_ab);
106  },
107  in, out);
108 }
109 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
110 
111 } // namespace
112 
114  : INESimpleKernel(), _func(nullptr), _beta(0.0f)
115 {
116 }
117 
118 void NEGEMMMatrixAdditionKernel::configure(const ITensor *input, ITensor *output, float beta)
119 {
120  ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
121 
122  // Perform validation step
123  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), beta));
124 
125  switch(input->info()->data_type())
126  {
127  case DataType::F32:
128  _func = &matrix_addition_f32;
129  break;
130  case DataType::F16:
131 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
132  _func = &matrix_addition_f16;
133  break;
134 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
135  default:
136  ARM_COMPUTE_ERROR("Data type not supported");
137  break;
138  }
139 
140  // Configure kernel window
141  INESimpleKernel::configure(input, output, num_elems_processed_per_iteration);
142 
143  _beta = beta;
144 }
145 
146 Status NEGEMMMatrixAdditionKernel::validate(const ITensorInfo *input, const ITensorInfo *output, float beta)
147 {
148  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, beta));
149  ARM_COMPUTE_RETURN_ON_ERROR(INESimpleKernel::validate(input->clone().get(), output->clone().get(), num_elems_processed_per_iteration));
150  return Status{};
151 }
152 
154 {
155  ARM_COMPUTE_UNUSED(info);
158 
159  if(_beta != 0.0f)
160  {
161  (*_func)(_input, _output, window, _beta);
162  }
163 }
164 } // namespace arm_compute
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
float16x8_t vmulq_f16(float16x8_t, float16x8_t)
Definition: clang-tidy.h:78
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:108
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
float16x8_t vaddq_f16(float16x8_t, float16x8_t)
Definition: clang-tidy.h:68
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
Interface for simple C++ kernels having 1 tensor input and 1 tensor output.
Interface for Neon tensor.
Definition: ITensor.h:36
static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta)
Static function to check if given info will lead to a valid configuration of NEGEMMMatrixAdditionKern...
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Definition: CPPTypes.h:235
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:443
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
void configure(const ITensor *input, ITensor *output, float beta)
Initialise the kernel&#39;s input and output.
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
unsigned int num_elems_processed_per_iteration
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205