43 Status
validate_arguments(
const ITensorInfo *mm_result,
const ITensorInfo *vector_sum_col,
const ITensorInfo *vector_sum_row,
44 int32_t a_offset, int32_t b_offset)
61 const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
64 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
70 const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
72 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
77 "mm_result tensor must have the same number of batches of output tensor");
81 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
82 vector_sum_col_shape.collapse_from(1);
85 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
93 void run_offset_contribution(
const Window &window,
94 ITensor *mm_result,
const ITensor *vector_sum_col,
const ITensor *vector_sum_row,
95 int32_t a_offset, int32_t b_offset, int32_t k_offset,
bool slide_vector_sum_col,
bool is_gemm3d)
97 Window collapsed_window = window.collapse_if_possible(window,
Window::DimZ);
98 collapsed_window.set(
Window::DimX, Window::Dimension(0, 1, 1));
100 const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
101 const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
103 const int window_start_x = window.x().start();
104 const int window_end_x = window.x().end();
105 const int window_step_x = 16;
107 Iterator mm_result_it(mm_result, collapsed_window);
109 if((a_offset != 0) && (b_offset != 0) && (vector_sum_col !=
nullptr) && (vector_sum_row !=
nullptr))
112 Window win_vector_sum_col(collapsed_window);
113 win_vector_sum_col.set(
Window::DimY, Window::Dimension(0, 0, 0));
114 win_vector_sum_col.set(
Window::DimZ, Window::Dimension(0, 0, 0));
117 Window win_vector_sum_row(collapsed_window);
118 win_vector_sum_row.set(
Window::DimX, Window::Dimension(0, 0, 0));
119 win_vector_sum_row.set(
Window::DimY, Window::Dimension(0, 0, 0));
120 win_vector_sum_row.set(
Window::DimZ, Window::Dimension(0, 0, 0));
122 Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col);
123 Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
125 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
128 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
132 const int batch_id =
id.z() / depth_input;
133 auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
134 auto mm_result_ptr = reinterpret_cast<int32_t *>(mm_result_it.ptr());
137 int32_t b_offset_term_s32 = *(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) +
id.y() + (
id.z() % depth_input) * height_input);
138 b_offset_term_s32 *= b_offset;
140 const int32x4_t b_offset_term_s32_vec = vdupq_n_s32(b_offset_term_s32);
142 int x = window_start_x;
143 for(; x <= (window_end_x - window_step_x); x += window_step_x)
146 int32x4x4_t a_offset_term_s32 =
149 vld1q_s32(vector_sum_col_ptr + x + 0),
150 vld1q_s32(vector_sum_col_ptr + x + 4),
151 vld1q_s32(vector_sum_col_ptr + x + 8),
152 vld1q_s32(vector_sum_col_ptr + x + 12)
156 a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
157 a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
158 a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
159 a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
162 int32x4x4_t offset_term_s32 =
165 vdupq_n_s32(k_offset),
166 vdupq_n_s32(k_offset),
167 vdupq_n_s32(k_offset),
168 vdupq_n_s32(k_offset)
172 offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], vaddq_s32(a_offset_term_s32.val[0], b_offset_term_s32_vec));
173 offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], vaddq_s32(a_offset_term_s32.val[1], b_offset_term_s32_vec));
174 offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32_vec));
175 offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32_vec));
180 vld1q_s32(mm_result_ptr + x + 0),
181 vld1q_s32(mm_result_ptr + x + 4),
182 vld1q_s32(mm_result_ptr + x + 8),
183 vld1q_s32(mm_result_ptr + x + 12)
188 in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]);
189 in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]);
190 in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]);
191 in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]);
194 vst1q_s32(mm_result_ptr + x + 0, in_s32.val[0]);
195 vst1q_s32(mm_result_ptr + x + 4, in_s32.val[1]);
196 vst1q_s32(mm_result_ptr + x + 8, in_s32.val[2]);
197 vst1q_s32(mm_result_ptr + x + 12, in_s32.val[3]);
201 for(; x < window_end_x; ++x)
204 int32_t a_offset_term_s32 = *(vector_sum_col_ptr + x);
206 a_offset_term_s32 *= a_offset;
210 mm_result_ptr[x] += k_offset + a_offset_term_s32 + b_offset_term_s32;
213 vector_sum_col_it, vector_sum_row_it, mm_result_it);
215 else if((a_offset == 0) && (b_offset != 0) && (vector_sum_row !=
nullptr))
220 Window win_vector_sum_row(collapsed_window);
221 win_vector_sum_row.set(
Window::DimX, Window::Dimension(0, 0, 0));
222 win_vector_sum_row.set(
Window::DimY, Window::Dimension(0, 0, 0));
223 win_vector_sum_row.set(
Window::DimZ, Window::Dimension(0, 0, 0));
225 Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
227 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
231 const int batch_id =
id.z() / depth_input;
232 auto mm_result_ptr = reinterpret_cast<int32_t *>(mm_result_it.ptr());
235 int32_t b_offset_term_s32 = *(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) +
id.y() + (
id.z() % depth_input) * height_input);
236 b_offset_term_s32 *= b_offset;
238 const int32x4_t b_offset_term_s32_vec = vdupq_n_s32(b_offset_term_s32);
240 int x = window_start_x;
241 for(; x <= (window_end_x - window_step_x); x += window_step_x)
246 vld1q_s32(mm_result_ptr + x + 0),
247 vld1q_s32(mm_result_ptr + x + 4),
248 vld1q_s32(mm_result_ptr + x + 8),
249 vld1q_s32(mm_result_ptr + x + 12)
254 in_s32.val[0] = vaddq_s32(in_s32.val[0], b_offset_term_s32_vec);
255 in_s32.val[1] = vaddq_s32(in_s32.val[1], b_offset_term_s32_vec);
256 in_s32.val[2] = vaddq_s32(in_s32.val[2], b_offset_term_s32_vec);
257 in_s32.val[3] = vaddq_s32(in_s32.val[3], b_offset_term_s32_vec);
260 vst1q_s32(mm_result_ptr + x + 0, in_s32.val[0]);
261 vst1q_s32(mm_result_ptr + x + 4, in_s32.val[1]);
262 vst1q_s32(mm_result_ptr + x + 8, in_s32.val[2]);
263 vst1q_s32(mm_result_ptr + x + 12, in_s32.val[3]);
267 for(; x < window_end_x; ++x)
271 mm_result_ptr[x] += b_offset_term_s32;
274 vector_sum_row_it, mm_result_it);
276 else if((a_offset != 0) && (b_offset == 0) && (vector_sum_col !=
nullptr))
279 Window win_vector_sum_col(collapsed_window);
280 win_vector_sum_col.set(
Window::DimY, Window::Dimension(0, 0, 0));
281 win_vector_sum_col.set(
Window::DimZ, Window::Dimension(0, 0, 0));
283 Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col);
286 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
290 const int batch_id =
id.z() / depth_input;
291 auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
292 auto mm_result_ptr = reinterpret_cast<int32_t *>(mm_result_it.ptr());
294 int x = window_start_x;
295 for(; x <= (window_end_x - window_step_x); x += window_step_x)
298 int32x4x4_t a_offset_term_s32 =
301 vld1q_s32(vector_sum_col_ptr + x + 0),
302 vld1q_s32(vector_sum_col_ptr + x + 4),
303 vld1q_s32(vector_sum_col_ptr + x + 8),
304 vld1q_s32(vector_sum_col_ptr + x + 12)
308 a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
309 a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
310 a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
311 a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
316 vld1q_s32(mm_result_ptr + x + 0),
317 vld1q_s32(mm_result_ptr + x + 4),
318 vld1q_s32(mm_result_ptr + x + 8),
319 vld1q_s32(mm_result_ptr + x + 12)
324 in_s32.val[0] = vaddq_s32(in_s32.val[0], a_offset_term_s32.val[0]);
325 in_s32.val[1] = vaddq_s32(in_s32.val[1], a_offset_term_s32.val[1]);
326 in_s32.val[2] = vaddq_s32(in_s32.val[2], a_offset_term_s32.val[2]);
327 in_s32.val[3] = vaddq_s32(in_s32.val[3], a_offset_term_s32.val[3]);
330 vst1q_s32(mm_result_ptr + x + 0, in_s32.val[0]);
331 vst1q_s32(mm_result_ptr + x + 4, in_s32.val[1]);
332 vst1q_s32(mm_result_ptr + x + 8, in_s32.val[2]);
333 vst1q_s32(mm_result_ptr + x + 12, in_s32.val[3]);
337 for(; x < window_end_x; ++x)
340 const int32_t a_offset_term_s32 = *(vector_sum_col_ptr + x);
344 mm_result_ptr[x] += a_offset_term_s32 * a_offset;
347 vector_sum_col_it, mm_result_it);
358 : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _mm_result(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true)
367 vector_sum_col !=
nullptr ? vector_sum_col->
info() :
nullptr,
368 vector_sum_row !=
nullptr ? vector_sum_row->
info() :
nullptr,
369 a_offset, b_offset));
371 _vector_sum_col = vector_sum_col;
372 _vector_sum_row = vector_sum_row;
373 _mm_result = mm_result;
374 _a_offset = a_offset;
375 _b_offset = b_offset;
376 _k_offset = a_offset * b_offset * k;
389 INEKernel::configure(win);
393 int32_t a_offset, int32_t b_offset)
407 const bool reinterpret_as_3d = _vector_sum_row !=
nullptr 411 run_offset_contribution(
window, _mm_result, _vector_sum_col, _vector_sum_row, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, reinterpret_as_3d);
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Store the tensor's metadata.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void configure(ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, int32_t k, int32_t a_offset, int32_t b_offset)
Initialise the kernel's input and output.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Interface for CPU tensor.
static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, int32_t a_offset, int32_t b_offset)
Static function to check if given info will lead to a valid configuration of NEGEMMLowpOffsetContribu...
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
void collapse_from(size_t start)
Collapse dimensions starting from a given point.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
NEGEMMLowpOffsetContributionKernel()
Constructor.
Class to describe a number of elements in each dimension.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
T y() const
Alias to access the size of the second dimension.
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)