Compute Library
 21.02
NEGEMMLowpOffsetContributionKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
30 #include "arm_compute/core/Types.h"
31 #include "arm_compute/core/Utils.h"
37 
38 #include <arm_neon.h>
39 
40 namespace arm_compute
41 {
42 namespace
43 {
44 Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row,
45  int32_t a_offset, int32_t b_offset)
46 {
48 
49  // If a_offset == 0, vector_sum_col can be a nullptr
50  if(a_offset != 0)
51  {
53  ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
54  }
55 
56  // If b_offset == 0, vector_sum_row can be a nullptr
57  if(b_offset != 0)
58  {
60 
61  // Check if input is a 3D reinterpretation
62  const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
63 
64  // Validate input
65  ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
66  ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
67 
68  TensorShape output_shape = mm_result->tensor_shape();
69  if(output_shape.num_dimensions() > 1)
70  {
71  const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
72 
73  TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
74  vector_sum_row_shape.collapse_from(1);
75  output_shape.collapse_from(output_batch_idx);
76 
77  ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
78  "mm_result tensor must have the same number of batches of output tensor");
79 
80  if(a_offset != 0)
81  {
82  TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
83  vector_sum_col_shape.collapse_from(1);
84 
85  ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
86  "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
87  }
88  }
89  }
90 
91  return Status{};
92 }
93 
94 void run_offset_contribution(const Window &window,
95  ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row,
96  int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col, bool is_gemm3d)
97 {
98  Window collapsed_window = window.collapse_if_possible(window, Window::DimZ);
99  collapsed_window.set(Window::DimX, Window::Dimension(0, 1, 1));
100 
101  const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
102  const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
103 
104  const int window_start_x = window.x().start();
105  const int window_end_x = window.x().end();
106  const int window_step_x = 16;
107 
108  Iterator mm_result_it(mm_result, collapsed_window);
109 
110  if((a_offset != 0) && (b_offset != 0) && (vector_sum_col != nullptr) && (vector_sum_row != nullptr)) // true, true
111  {
112  // Set window for vector_sum_col
113  Window win_vector_sum_col(collapsed_window);
114  win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
115  win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
116 
117  // Set window for vector_sum_row
118  Window win_vector_sum_row(collapsed_window);
119  win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
120  win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
121  win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0));
122 
123  Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col);
124  Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
125 
126  const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
127 
128  // Offset in case vector_sum_col is batched
129  const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
130 
131  execute_window_loop(collapsed_window, [&](const Coordinates & id)
132  {
133  const int batch_id = id.z() / depth_input;
134  auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
135  auto mm_result_ptr = reinterpret_cast<int32_t *>(mm_result_it.ptr());
136 
137  // Compute the leftover term due to b_offset.
138  int32_t b_offset_term_s32 = *(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) + id.y() + (id.z() % depth_input) * height_input);
139  b_offset_term_s32 *= b_offset;
140 
141  const int32x4_t b_offset_term_s32_vec = vdupq_n_s32(b_offset_term_s32);
142 
143  int x = window_start_x;
144  for(; x <= (window_end_x - window_step_x); x += window_step_x)
145  {
146  // Compute the leftover term due to a_offset.
147  int32x4x4_t a_offset_term_s32 =
148  {
149  {
150  vld1q_s32(vector_sum_col_ptr + x + 0),
151  vld1q_s32(vector_sum_col_ptr + x + 4),
152  vld1q_s32(vector_sum_col_ptr + x + 8),
153  vld1q_s32(vector_sum_col_ptr + x + 12)
154  }
155  };
156 
157  a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
158  a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
159  a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
160  a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
161 
162  // Add a_offset_term_s32 and b_offset_term_s32
163  int32x4x4_t offset_term_s32 =
164  {
165  {
166  vdupq_n_s32(k_offset),
167  vdupq_n_s32(k_offset),
168  vdupq_n_s32(k_offset),
169  vdupq_n_s32(k_offset)
170  }
171  };
172 
173  offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], vaddq_s32(a_offset_term_s32.val[0], b_offset_term_s32_vec));
174  offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], vaddq_s32(a_offset_term_s32.val[1], b_offset_term_s32_vec));
175  offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32_vec));
176  offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32_vec));
177 
178  int32x4x4_t in_s32 =
179  {
180  {
181  vld1q_s32(mm_result_ptr + x + 0),
182  vld1q_s32(mm_result_ptr + x + 4),
183  vld1q_s32(mm_result_ptr + x + 8),
184  vld1q_s32(mm_result_ptr + x + 12)
185  }
186  };
187 
188  // Add the offset terms to GEMM's result
189  in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]);
190  in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]);
191  in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]);
192  in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]);
193 
194  // Store the result with the offset contribution
195  vst1q_s32(mm_result_ptr + x + 0, in_s32.val[0]);
196  vst1q_s32(mm_result_ptr + x + 4, in_s32.val[1]);
197  vst1q_s32(mm_result_ptr + x + 8, in_s32.val[2]);
198  vst1q_s32(mm_result_ptr + x + 12, in_s32.val[3]);
199  }
200 
201  // Left-overs loop
202  for(; x < window_end_x; ++x)
203  {
204  // Compute the leftover term due to a_offset.
205  int32_t a_offset_term_s32 = *(vector_sum_col_ptr + x);
206 
207  a_offset_term_s32 *= a_offset;
208 
209  // Add the offset terms to GEMM's result
210  // Store the result with the offset contribution
211  mm_result_ptr[x] += k_offset + a_offset_term_s32 + b_offset_term_s32;
212  }
213  },
214  vector_sum_col_it, vector_sum_row_it, mm_result_it);
215  }
216  else if((a_offset == 0) && (b_offset != 0) && (vector_sum_row != nullptr)) // false, true
217  {
218  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
219 
220  // Set window for vector_sum_row
221  Window win_vector_sum_row(collapsed_window);
222  win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
223  win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
224  win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0));
225 
226  Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
227 
228  const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
229 
230  execute_window_loop(collapsed_window, [&](const Coordinates & id)
231  {
232  const int batch_id = id.z() / depth_input;
233  auto mm_result_ptr = reinterpret_cast<int32_t *>(mm_result_it.ptr());
234 
235  // Compute the leftover term due to b_offset.
236  int32_t b_offset_term_s32 = *(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) + id.y() + (id.z() % depth_input) * height_input);
237  b_offset_term_s32 *= b_offset;
238 
239  const int32x4_t b_offset_term_s32_vec = vdupq_n_s32(b_offset_term_s32);
240 
241  int x = window_start_x;
242  for(; x <= (window_end_x - window_step_x); x += window_step_x)
243  {
244  int32x4x4_t in_s32 =
245  {
246  {
247  vld1q_s32(mm_result_ptr + x + 0),
248  vld1q_s32(mm_result_ptr + x + 4),
249  vld1q_s32(mm_result_ptr + x + 8),
250  vld1q_s32(mm_result_ptr + x + 12)
251  }
252  };
253 
254  // Add the offset terms to GEMM's result
255  in_s32.val[0] = vaddq_s32(in_s32.val[0], b_offset_term_s32_vec);
256  in_s32.val[1] = vaddq_s32(in_s32.val[1], b_offset_term_s32_vec);
257  in_s32.val[2] = vaddq_s32(in_s32.val[2], b_offset_term_s32_vec);
258  in_s32.val[3] = vaddq_s32(in_s32.val[3], b_offset_term_s32_vec);
259 
260  // Store the result with the offset contribution
261  vst1q_s32(mm_result_ptr + x + 0, in_s32.val[0]);
262  vst1q_s32(mm_result_ptr + x + 4, in_s32.val[1]);
263  vst1q_s32(mm_result_ptr + x + 8, in_s32.val[2]);
264  vst1q_s32(mm_result_ptr + x + 12, in_s32.val[3]);
265  }
266 
267  // Left-overs loop
268  for(; x < window_end_x; ++x)
269  {
270  // Add the offset terms to GEMM's result
271  // Store the result with the offset contribution
272  mm_result_ptr[x] += b_offset_term_s32;
273  }
274  },
275  vector_sum_row_it, mm_result_it);
276  }
277  else if((a_offset != 0) && (b_offset == 0) && (vector_sum_col != nullptr)) // true, false
278  {
279  // Set window for vector_sum_col
280  Window win_vector_sum_col(collapsed_window);
281  win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
282  win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
283 
284  Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col);
285 
286  // Offset in case vector_sum_col is batched
287  const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
288 
289  execute_window_loop(collapsed_window, [&](const Coordinates & id)
290  {
291  const int batch_id = id.z() / depth_input;
292  auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
293  auto mm_result_ptr = reinterpret_cast<int32_t *>(mm_result_it.ptr());
294 
295  int x = window_start_x;
296  for(; x <= (window_end_x - window_step_x); x += window_step_x)
297  {
298  // Compute the leftover term due to a_offset.
299  int32x4x4_t a_offset_term_s32 =
300  {
301  {
302  vld1q_s32(vector_sum_col_ptr + x + 0),
303  vld1q_s32(vector_sum_col_ptr + x + 4),
304  vld1q_s32(vector_sum_col_ptr + x + 8),
305  vld1q_s32(vector_sum_col_ptr + x + 12)
306  }
307  };
308 
309  a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
310  a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
311  a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
312  a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
313 
314  int32x4x4_t in_s32 =
315  {
316  {
317  vld1q_s32(mm_result_ptr + x + 0),
318  vld1q_s32(mm_result_ptr + x + 4),
319  vld1q_s32(mm_result_ptr + x + 8),
320  vld1q_s32(mm_result_ptr + x + 12)
321  }
322  };
323 
324  // Add the offset terms to GEMM's result
325  in_s32.val[0] = vaddq_s32(in_s32.val[0], a_offset_term_s32.val[0]);
326  in_s32.val[1] = vaddq_s32(in_s32.val[1], a_offset_term_s32.val[1]);
327  in_s32.val[2] = vaddq_s32(in_s32.val[2], a_offset_term_s32.val[2]);
328  in_s32.val[3] = vaddq_s32(in_s32.val[3], a_offset_term_s32.val[3]);
329 
330  // Store the result with the offset contribution
331  vst1q_s32(mm_result_ptr + x + 0, in_s32.val[0]);
332  vst1q_s32(mm_result_ptr + x + 4, in_s32.val[1]);
333  vst1q_s32(mm_result_ptr + x + 8, in_s32.val[2]);
334  vst1q_s32(mm_result_ptr + x + 12, in_s32.val[3]);
335  }
336 
337  // Left-overs loop
338  for(; x < window_end_x; ++x)
339  {
340  // Compute the leftover term due to a_offset.
341  const int32_t a_offset_term_s32 = *(vector_sum_col_ptr + x);
342 
343  // Add the offset terms to GEMM's result
344  // Store the result with the offset contribution
345  mm_result_ptr[x] += a_offset_term_s32 * a_offset;
346  }
347  },
348  vector_sum_col_it, mm_result_it);
349  }
350  else // false, false
351  {
352  // No offset contribution from matrix A and matrix B
353  return;
354  }
355 }
356 } // namespace
357 
359  : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _mm_result(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true)
360 {
361 }
362 
363 void NEGEMMLowpOffsetContributionKernel::configure(ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, int32_t k, int32_t a_offset, int32_t b_offset)
364 {
365  // Perform validate step
366  ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result);
368  vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, // NOLINT
369  vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, // NOLINT
370  a_offset, b_offset)); // NOLINT
371 
372  _vector_sum_col = vector_sum_col;
373  _vector_sum_row = vector_sum_row;
374  _mm_result = mm_result;
375  _a_offset = a_offset;
376  _b_offset = b_offset;
377  _k_offset = a_offset * b_offset * k;
378 
379  // If a_offset == 0, vector_sum_col can be a nullptr
380  if(a_offset != 0)
381  {
382  // Check if vector_sum_col_shape should be slidden or not
383  // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
384  // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
385  _slide_vector_sum_col = vector_sum_col->info()->tensor_shape().num_dimensions() > 1;
386  }
387 
388  // Configure kernel window
389  Window win = calculate_max_window(*mm_result->info(), Steps());
390  Coordinates coord;
391  coord.set_num_dimensions(mm_result->info()->num_dimensions());
392  mm_result->info()->set_valid_region(ValidRegion(coord, mm_result->info()->tensor_shape()));
393  INEKernel::configure(win);
394 }
395 
396 Status NEGEMMLowpOffsetContributionKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row,
397  int32_t a_offset, int32_t b_offset)
398 {
399  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, a_offset, b_offset));
400 
401  return Status{};
402 }
403 
405 {
406  ARM_COMPUTE_UNUSED(info);
409 
410  // Check if input is a 3D reinterpretation
411  const bool reinterpret_as_3d = _vector_sum_row != nullptr
412  && _mm_result->info()->num_dimensions() > 1
413  && _mm_result->info()->tensor_shape().y() != _vector_sum_row->info()->tensor_shape().x();
414 
415  run_offset_contribution(window, _mm_result, _vector_sum_col, _vector_sum_row, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, reinterpret_as_3d);
416 }
417 } // namespace arm_compute
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
void configure(ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, int32_t k, int32_t a_offset, int32_t b_offset)
Initialise the kernel&#39;s input and output.
Status class.
Definition: Error.h:52
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Interface for Neon tensor.
Definition: ITensor.h:36
static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, int32_t a_offset, int32_t b_offset)
Static function to check if given info will lead to a valid configuration of NEGEMMLowpOffsetContribu...
Copyright (c) 2017-2021 Arm Limited.
virtual void set_valid_region(const ValidRegion &valid_region)=0
Set the valid region of the tensor.
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
void collapse_from(size_t start)
Collapse dimensions starting from a given point.
Definition: Dimensions.h:183
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
Coordinates of an item.
Definition: Coordinates.h:37
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Definition: CPPTypes.h:235
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:143
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
void set_num_dimensions(size_t num_dimensions)
Set number of dimensions.
Definition: Dimensions.h:149
T y() const
Alias to access the size of the second dimension.
Definition: Dimensions.h:92
Container for valid region of a window.
Definition: Types.h:188
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205