Compute Library
 22.08
helpers.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_HELPER_H
25 #define ARM_COMPUTE_HELPER_H
26 
27 #include "load_store_utility.h"
28 
29 #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
30 #pragma OPENCL EXTENSION cl_khr_fp16 : enable
31 #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
32 
33 #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
34 #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
35 #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
36 
37 #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
38 #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
39 #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
40 
41 #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
42 #pragma OPENCL EXTENSION cl_arm_printf : enable
43 #endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
44 
45 #define GPU_ARCH_MIDGARD 0x100
46 #define GPU_ARCH_BIFROST 0x200
47 #define GPU_ARCH_VALHALL 0x300
48 
49 /** Concatenate two inputs.
50  *
51  * @param[in] a The first input to be concatenated
52  * @param[in] b The second input to be concatenated
53  *
54  * @return The concatenated output
55  */
56 #define CONCAT(a, b) a##b
57 
58 /** Expand the given vector
59  *
60  * @param[in] x The vector to be expanded
61  *
62  * @return The expanded output
63  */
64 #define EXPAND(x) x
65 
66 /** Clamp the given value between an upper and lower bound.
67  *
68  * @param[in] x The value to be clamped
69  * @param[in] min_val The lower bound
70  * @param[in] max_val The upper bound
71  *
72  * @return The clamped value.
73  */
74 #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
75 
76 /** REVn reverses the given vector whose size is n.
77  * @name REVn
78  *
79  * @param[in] x The vector to be reversed
80  *
81  * @return The reversed vector
82  * @{
83  */
84 #define REV1(x) ((x))
85 #define REV2(x) ((x).s10)
86 #define REV3(x) ((x).s210)
87 #define REV4(x) ((x).s3210)
88 #define REV8(x) ((x).s76543210)
89 #define REV16(x) ((x).sFEDCBA9876543210)
90 /** @} */ // end of group REVn
91 
92 /** Reverse the given vector.
93  * @name REVERSE
94  *
95  * @param[in] x The vector to be reversed
96  * @param[in] s The size of the vector
97  *
98  * @return The reversed vector
99  * @{
100  */
101 #define REVERSE_STR(x, s) REV##s((x))
102 #define REVERSE(x, s) REVERSE_STR(x, s)
103 /** @} */ // end of group REVERSE
104 
105 /** Circular-right-shift (rotate-right) the vector of size s by the amount of n.
106  * @name ROTs_n
107  *
108  * @param[in] x The vector to be shifted
109  *
110  * @return The shifted vector
111  * @{
112  */
113 #define ROT1_0(x) ((x))
114 #define ROT1_1(x) ((x))
115 
116 #define ROT2_0(x) ((x))
117 #define ROT2_1(x) ((x).s10)
118 #define ROT2_2(x) ((x))
119 
120 #define ROT3_0(x) ((x))
121 #define ROT3_1(x) ((x).s201)
122 #define ROT3_2(x) ((x).s120)
123 #define ROT3_3(x) ((x))
124 
125 #define ROT4_0(x) ((x))
126 #define ROT4_1(x) ((x).s3012)
127 #define ROT4_2(x) ((x).s2301)
128 #define ROT4_3(x) ((x).s1230)
129 #define ROT4_4(x) ((x))
130 
131 #define ROT8_0(x) ((x))
132 #define ROT8_1(x) ((x).s70123456)
133 #define ROT8_2(x) ((x).s67012345)
134 #define ROT8_3(x) ((x).s56701234)
135 #define ROT8_4(x) ((x).s45670123)
136 #define ROT8_5(x) ((x).s34567012)
137 #define ROT8_6(x) ((x).s23456701)
138 #define ROT8_7(x) ((x).s12345670)
139 #define ROT8_8(x) ((x))
140 
141 #define ROT16_0(x) ((x))
142 #define ROT16_1(x) ((x).sF0123456789ABCDE)
143 #define ROT16_2(x) ((x).sEF0123456789ABCD)
144 #define ROT16_3(x) ((x).sDEF0123456789ABC)
145 #define ROT16_4(x) ((x).sCDEF0123456789AB)
146 #define ROT16_5(x) ((x).sBCDEF0123456789A)
147 #define ROT16_6(x) ((x).sABCDEF0123456789)
148 #define ROT16_7(x) ((x).s9ABCDEF012345678)
149 #define ROT16_8(x) ((x).s89ABCDEF01234567)
150 #define ROT16_9(x) ((x).s789ABCDEF0123456)
151 #define ROT16_10(x) ((x).s6789ABCDEF012345)
152 #define ROT16_11(x) ((x).s56789ABCDEF01234)
153 #define ROT16_12(x) ((x).s456789ABCDEF0123)
154 #define ROT16_13(x) ((x).s3456789ABCDEF012)
155 #define ROT16_14(x) ((x).s23456789ABCDEF01)
156 #define ROT16_15(x) ((x).s123456789ABCDEF0)
157 #define ROT16_16(x) ((x))
158 /** @} */ // end of group ROTs_n
159 
160 /** Circular-right-shift (rotate-right) the given vector by the given amount.
161  * @name ROTATE
162  *
163  * @param[in] x The vector to be shifted
164  * @param[in] s The size of the vector
165  * @param[in] n The amount to be shifted
166  *
167  * @return The shifted vector
168  * @{
169  */
170 #define ROTATE_STR(x, s, n) ROT##s##_##n(x)
171 #define ROTATE(x, s, n) ROTATE_STR(x, s, n)
172 /** @} */ // end of group ROTATE
173 
174 /** Creates a vector of size n filled with offset values corresponding to the location of each element.
175  * @name V_OFFSn
176  *
177  * @param[in] dt The data type of the output vector
178  *
179  * @return The vector filled with offset values
180  * @{
181  */
182 #define V_OFFS1(dt) (dt##1)(0)
183 #define V_OFFS2(dt) (dt##2)(0, 1)
184 #define V_OFFS3(dt) (dt##3)(0, 1, 2)
185 #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
186 #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
187 #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
188 /** @} */ // end of group V_OFFSn
189 
190 /** Create a vector filled with offset values corresponding to the location of each element.
191  * @name VEC_OFFS
192  *
193  * @param[in] dt The data type of the output vector
194  * @param[in] s The size of the output vector
195  *
196  * @return The vector filled with offset values
197  * @{
198  */
199 #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
200 #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
201 /** @} */ // end of group VEC_OFFS
202 
203 #define VLOAD_STR(size) vload##size
204 #define VLOAD(size) VLOAD_STR(size)
205 
206 /** Extended partial vload that correctly handles scalar values as well.
207  * Load the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of load ops
208  * @name VLOAD_PARTIAL
209  *
210  * @note With this macro, the passed data can be both a vector and a scalar
211  * @note @p load_size needs to be <= @p size
212  * eg 1: Valid
213  * VLOAD_PARTIAL(16, 15) ...;
214  * eg 2: Invalid
215  * VLOAD_PARTIAL(4, 7) ...;
216  *
217  * @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
218  * @param[in] load_size The number of lower elements to load. Supported values: 1-16, but has to be <= @p size
219  * @{
220  */
221 #define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size
222 #define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size)
223 
224 #define NO_LOAD(data, offs, ptr) \
225  { \
226  }
227 
228 // Size == 1 (scalar)
229 #define vload_partial_1_0 NO_LOAD
230 #define vload_partial_1_1 vload1
231 #define vload_partial_1_2 NO_LOAD
232 #define vload_partial_1_3 NO_LOAD
233 #define vload_partial_1_4 NO_LOAD
234 #define vload_partial_1_5 NO_LOAD
235 #define vload_partial_1_6 NO_LOAD
236 #define vload_partial_1_7 NO_LOAD
237 #define vload_partial_1_8 NO_LOAD
238 #define vload_partial_1_9 NO_LOAD
239 #define vload_partial_1_10 NO_LOAD
240 #define vload_partial_1_11 NO_LOAD
241 #define vload_partial_1_12 NO_LOAD
242 #define vload_partial_1_13 NO_LOAD
243 #define vload_partial_1_14 NO_LOAD
244 #define vload_partial_1_15 NO_LOAD
245 #define vload_partial_1_16 NO_LOAD
246 // Size == 2
247 #define vload_partial_2_0 NO_LOAD
248 #define vload_partial_2_1 vload_partial_1
249 #define vload_partial_2_2 vload_partial_2
250 #define vload_partial_2_3 NO_LOAD
251 #define vload_partial_2_4 NO_LOAD
252 #define vload_partial_2_5 NO_LOAD
253 #define vload_partial_2_6 NO_LOAD
254 #define vload_partial_2_7 NO_LOAD
255 #define vload_partial_2_8 NO_LOAD
256 #define vload_partial_2_9 NO_LOAD
257 #define vload_partial_2_10 NO_LOAD
258 #define vload_partial_2_11 NO_LOAD
259 #define vload_partial_2_12 NO_LOAD
260 #define vload_partial_2_13 NO_LOAD
261 #define vload_partial_2_14 NO_LOAD
262 #define vload_partial_2_15 NO_LOAD
263 #define vload_partial_2_16 NO_LOAD
264 // Size == 3
265 #define vload_partial_3_0 NO_LOAD
266 #define vload_partial_3_1 vload_partial_1
267 #define vload_partial_3_2 vload_partial_2
268 #define vload_partial_3_3 vload_partial_3
269 #define vload_partial_3_4 NO_LOAD
270 #define vload_partial_3_5 NO_LOAD
271 #define vload_partial_3_6 NO_LOAD
272 #define vload_partial_3_7 NO_LOAD
273 #define vload_partial_3_8 NO_LOAD
274 #define vload_partial_3_9 NO_LOAD
275 #define vload_partial_3_10 NO_LOAD
276 #define vload_partial_3_11 NO_LOAD
277 #define vload_partial_3_12 NO_LOAD
278 #define vload_partial_3_13 NO_LOAD
279 #define vload_partial_3_14 NO_LOAD
280 #define vload_partial_3_15 NO_LOAD
281 #define vload_partial_3_16 NO_LOAD
282 // Size == 4
283 #define vload_partial_4_0 NO_LOAD
284 #define vload_partial_4_1 vload_partial_1
285 #define vload_partial_4_2 vload_partial_2
286 #define vload_partial_4_3 vload_partial_3
287 #define vload_partial_4_4 vload_partial_4
288 #define vload_partial_4_5 NO_LOAD
289 #define vload_partial_4_6 NO_LOAD
290 #define vload_partial_4_7 NO_LOAD
291 #define vload_partial_4_8 NO_LOAD
292 #define vload_partial_4_9 NO_LOAD
293 #define vload_partial_4_10 NO_LOAD
294 #define vload_partial_4_11 NO_LOAD
295 #define vload_partial_4_12 NO_LOAD
296 #define vload_partial_4_13 NO_LOAD
297 #define vload_partial_4_14 NO_LOAD
298 #define vload_partial_4_15 NO_LOAD
299 #define vload_partial_4_16 NO_LOAD
300 // Size == 8
301 #define vload_partial_8_0 NO_LOAD
302 #define vload_partial_8_1 vload_partial_1
303 #define vload_partial_8_2 vload_partial_2
304 #define vload_partial_8_3 vload_partial_3
305 #define vload_partial_8_4 vload_partial_4
306 #define vload_partial_8_5 vload_partial_5
307 #define vload_partial_8_6 vload_partial_6
308 #define vload_partial_8_7 vload_partial_7
309 #define vload_partial_8_8 vload_partial_8
310 #define vload_partial_8_9 NO_LOAD
311 #define vload_partial_8_10 NO_LOAD
312 #define vload_partial_8_11 NO_LOAD
313 #define vload_partial_8_12 NO_LOAD
314 #define vload_partial_8_13 NO_LOAD
315 #define vload_partial_8_14 NO_LOAD
316 #define vload_partial_8_15 NO_LOAD
317 #define vload_partial_8_16 NO_LOAD
318 // Size == 16
319 #define vload_partial_16_0 NO_LOAD
320 #define vload_partial_16_1 vload_partial_1
321 #define vload_partial_16_2 vload_partial_2
322 #define vload_partial_16_3 vload_partial_3
323 #define vload_partial_16_4 vload_partial_4
324 #define vload_partial_16_5 vload_partial_5
325 #define vload_partial_16_6 vload_partial_6
326 #define vload_partial_16_7 vload_partial_7
327 #define vload_partial_16_8 vload_partial_8
328 #define vload_partial_16_9 vload_partial_9
329 #define vload_partial_16_10 vload_partial_10
330 #define vload_partial_16_11 vload_partial_11
331 #define vload_partial_16_12 vload_partial_12
332 #define vload_partial_16_13 vload_partial_13
333 #define vload_partial_16_14 vload_partial_14
334 #define vload_partial_16_15 vload_partial_15
335 #define vload_partial_16_16 vload_partial_16
336 
337 /** Partial vload. Load the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vload ops
338  * @name vload_partial_n
339  *
340  * @note @p DATA needs to be a vector not a scalar
341  * @note n needs to be <= the vector width of the input variable @p DATA
342  * eg 1: Valid
343  * vload_partial_15(var:float16, 0, 0xabcd);
344  * eg 2: Invalid
345  * vload_partial_7(var:float4, 0, 0xabcd);
346  *
347  * @note in cases n == 1, 2, 3, 4, 8, 16, no extra vload is invoked, thus there's no performance penalty.
348  *
349  * @param[in] DATA The name of the variable where to load the values
350  * @param[in] OFFSET Offset in n
351  * @param[in] PTR The base pointer
352  * @{
353  */
354 #define vload_partial_1(DATA, OFFSET, PTR) \
355  DATA.s0 = vload1(OFFSET, PTR);
356 
357 #define vload_partial_2(DATA, OFFSET, PTR) \
358  DATA.s01 = vload2(OFFSET, PTR);
359 
360 #define vload_partial_3(DATA, OFFSET, PTR) \
361  DATA.s012 = vload3(OFFSET, PTR);
362 
363 #define vload_partial_4(DATA, OFFSET, PTR) \
364  DATA.s0123 = vload4(OFFSET, PTR);
365 
366 #define vload_partial_5(DATA, OFFSET, PTR) \
367  vload_partial_4(DATA.s0123, OFFSET, PTR); \
368  DATA.s4 = vload1(OFFSET, PTR + 4);
369 
370 #define vload_partial_6(DATA, OFFSET, PTR) \
371  vload_partial_4(DATA.s0123, OFFSET, PTR); \
372  vload_partial_2(DATA.s45, OFFSET, PTR + 4);
373 
374 #define vload_partial_7(DATA, OFFSET, PTR) \
375  vload_partial_4(DATA.s0123, OFFSET, PTR); \
376  vload_partial_3(DATA.s456, OFFSET, PTR + 4);
377 
378 #define vload_partial_8(DATA, OFFSET, PTR) \
379  DATA.s01234567 = vload8(OFFSET, PTR);
380 
381 #define vload_partial_9(DATA, OFFSET, PTR) \
382  vload_partial_8(DATA.s01234567, OFFSET, PTR); \
383  DATA.s8 = vload1(OFFSET, PTR + 8);
384 
385 #define vload_partial_10(DATA, OFFSET, PTR) \
386  vload_partial_8(DATA.s01234567, OFFSET, PTR); \
387  vload_partial_2(DATA.s89, OFFSET, PTR + 8);
388 
389 #define vload_partial_11(DATA, OFFSET, PTR) \
390  vload_partial_8(DATA.s01234567, OFFSET, PTR); \
391  vload_partial_3(DATA.s89A, OFFSET, PTR + 8);
392 
393 #define vload_partial_12(DATA, OFFSET, PTR) \
394  vload_partial_8(DATA.s01234567, OFFSET, PTR); \
395  vload_partial_4(DATA.s89AB, OFFSET, PTR + 8);
396 // For vload_partial_{13,14,15}, an 8-vector size has been passed, because vectors size of size 5,6,7 are not supported
397 #define vload_partial_13(DATA, OFFSET, PTR) \
398  vload_partial_8(DATA.s01234567, OFFSET, PTR); \
399  vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8);
400 
401 #define vload_partial_14(DATA, OFFSET, PTR) \
402  vload_partial_8(DATA.s01234567, OFFSET, PTR); \
403  vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8);
404 
405 #define vload_partial_15(DATA, OFFSET, PTR) \
406  vload_partial_8(DATA.s01234567, OFFSET, PTR); \
407  vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8);
408 
409 #define vload_partial_16(DATA, OFFSET, PTR) \
410  DATA = vload16(OFFSET, PTR);
411 /** @} */ // end of groupd vload_partial_n
412 /** @} */ // end of groupd VLOAD_PARTIAL
413 
414 #define PIXEL_UNIT4 1
415 #define PIXEL_UNIT8 2
416 #define PIXEL_UNIT16 4
417 
418 /** Utility macro to convert a vector size in pixel unit.
419  *
420  * @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
421  *
422  * @param[in] vec_size Vector size. Only 4,8 and 16 is supported
423  *
424  * @return The pixel unit (number of pixels)
425  * @{
426  */
427 #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
428 #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
429 /** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
430 
431 #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
432 #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
433 #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
434 
435 #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
436 #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
437 #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
438 #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
439 #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
440 
441 /** Utility macro to read a 2D OpenCL image object.
442  *
443  * @note Coordinates are not normalized
444  *
445  * @param[in] data_type Data type
446  * @param[in] n0 Number of pixel to read. Only 1,2 and 4 is supported
447  * @param[in] img OpenCL image object
448  * @param[in] x_coord The x coordinate for the top-left pixel
449  * @param[in] y_coord The y coordinate for the top-left pixel
450  *
451  * @return Pixels from the 2D OpenCL image object
452  * @{
453  */
454 #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
455 #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
456 
457 #define VSTORE_STR(size) vstore##size
458 #define VSTORE(size) VSTORE_STR(size)
459 
460 #define float1 float
461 #define half1 half
462 #define char1 char
463 #define uchar1 uchar
464 #define short1 short
465 #define ushort1 ushort
466 #define int1 int
467 #define uint1 uint
468 #define long1 long
469 #define ulong1 ulong
470 #define double1 double
471 
472 #define vload1(OFFSET, PTR) *(OFFSET + PTR)
473 #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
474 
475 /** Extended partial vstore that correctly handles scalar values as well.
476  * Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
477  * @name VSTORE_PARTIAL
478  *
479  * @note With this macro, the passed data can be both a vector and a scalar
480  * @note @p store_size needs to be <= @p size
481  * eg 1: Valid
482  * VSTORE_PARTIAL(16, 15) ...;
483  * eg 2: Invalid
484  * VSTORE_PARTIAL(4, 7) ...;
485  *
486  * @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
487  * @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size
488  * @{
489  */
490 #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
491 #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
492 
493 #define NO_STORE(data, offs, ptr) \
494  { \
495  }
496 
497 // Size == 1 (scalar)
498 #define vstore_partial_1_0 NO_STORE
499 #define vstore_partial_1_1 vstore1
500 #define vstore_partial_1_2 NO_STORE
501 #define vstore_partial_1_3 NO_STORE
502 #define vstore_partial_1_4 NO_STORE
503 #define vstore_partial_1_5 NO_STORE
504 #define vstore_partial_1_6 NO_STORE
505 #define vstore_partial_1_7 NO_STORE
506 #define vstore_partial_1_8 NO_STORE
507 #define vstore_partial_1_9 NO_STORE
508 #define vstore_partial_1_10 NO_STORE
509 #define vstore_partial_1_11 NO_STORE
510 #define vstore_partial_1_12 NO_STORE
511 #define vstore_partial_1_13 NO_STORE
512 #define vstore_partial_1_14 NO_STORE
513 #define vstore_partial_1_15 NO_STORE
514 #define vstore_partial_1_16 NO_STORE
515 // Size == 2
516 #define vstore_partial_2_0 NO_STORE
517 #define vstore_partial_2_1 vstore_partial_1
518 #define vstore_partial_2_2 vstore_partial_2
519 #define vstore_partial_2_3 NO_STORE
520 #define vstore_partial_2_4 NO_STORE
521 #define vstore_partial_2_5 NO_STORE
522 #define vstore_partial_2_6 NO_STORE
523 #define vstore_partial_2_7 NO_STORE
524 #define vstore_partial_2_8 NO_STORE
525 #define vstore_partial_2_9 NO_STORE
526 #define vstore_partial_2_10 NO_STORE
527 #define vstore_partial_2_11 NO_STORE
528 #define vstore_partial_2_12 NO_STORE
529 #define vstore_partial_2_13 NO_STORE
530 #define vstore_partial_2_14 NO_STORE
531 #define vstore_partial_2_15 NO_STORE
532 #define vstore_partial_2_16 NO_STORE
533 // Size == 3
534 #define vstore_partial_3_0 NO_STORE
535 #define vstore_partial_3_1 vstore_partial_1
536 #define vstore_partial_3_2 vstore_partial_2
537 #define vstore_partial_3_3 vstore_partial_3
538 #define vstore_partial_3_4 NO_STORE
539 #define vstore_partial_3_5 NO_STORE
540 #define vstore_partial_3_6 NO_STORE
541 #define vstore_partial_3_7 NO_STORE
542 #define vstore_partial_3_8 NO_STORE
543 #define vstore_partial_3_9 NO_STORE
544 #define vstore_partial_3_10 NO_STORE
545 #define vstore_partial_3_11 NO_STORE
546 #define vstore_partial_3_12 NO_STORE
547 #define vstore_partial_3_13 NO_STORE
548 #define vstore_partial_3_14 NO_STORE
549 #define vstore_partial_3_15 NO_STORE
550 #define vstore_partial_3_16 NO_STORE
551 // Size == 4
552 #define vstore_partial_4_0 NO_STORE
553 #define vstore_partial_4_1 vstore_partial_1
554 #define vstore_partial_4_2 vstore_partial_2
555 #define vstore_partial_4_3 vstore_partial_3
556 #define vstore_partial_4_4 vstore_partial_4
557 #define vstore_partial_4_5 NO_STORE
558 #define vstore_partial_4_6 NO_STORE
559 #define vstore_partial_4_7 NO_STORE
560 #define vstore_partial_4_8 NO_STORE
561 #define vstore_partial_4_9 NO_STORE
562 #define vstore_partial_4_10 NO_STORE
563 #define vstore_partial_4_11 NO_STORE
564 #define vstore_partial_4_12 NO_STORE
565 #define vstore_partial_4_13 NO_STORE
566 #define vstore_partial_4_14 NO_STORE
567 #define vstore_partial_4_15 NO_STORE
568 #define vstore_partial_4_16 NO_STORE
569 // Size == 8
570 #define vstore_partial_8_0 NO_STORE
571 #define vstore_partial_8_1 vstore_partial_1
572 #define vstore_partial_8_2 vstore_partial_2
573 #define vstore_partial_8_3 vstore_partial_3
574 #define vstore_partial_8_4 vstore_partial_4
575 #define vstore_partial_8_5 vstore_partial_5
576 #define vstore_partial_8_6 vstore_partial_6
577 #define vstore_partial_8_7 vstore_partial_7
578 #define vstore_partial_8_8 vstore_partial_8
579 #define vstore_partial_8_9 NO_STORE
580 #define vstore_partial_8_10 NO_STORE
581 #define vstore_partial_8_11 NO_STORE
582 #define vstore_partial_8_12 NO_STORE
583 #define vstore_partial_8_13 NO_STORE
584 #define vstore_partial_8_14 NO_STORE
585 #define vstore_partial_8_15 NO_STORE
586 #define vstore_partial_8_16 NO_STORE
587 // Size == 16
588 #define vstore_partial_16_0 NO_STORE
589 #define vstore_partial_16_1 vstore_partial_1
590 #define vstore_partial_16_2 vstore_partial_2
591 #define vstore_partial_16_3 vstore_partial_3
592 #define vstore_partial_16_4 vstore_partial_4
593 #define vstore_partial_16_5 vstore_partial_5
594 #define vstore_partial_16_6 vstore_partial_6
595 #define vstore_partial_16_7 vstore_partial_7
596 #define vstore_partial_16_8 vstore_partial_8
597 #define vstore_partial_16_9 vstore_partial_9
598 #define vstore_partial_16_10 vstore_partial_10
599 #define vstore_partial_16_11 vstore_partial_11
600 #define vstore_partial_16_12 vstore_partial_12
601 #define vstore_partial_16_13 vstore_partial_13
602 #define vstore_partial_16_14 vstore_partial_14
603 #define vstore_partial_16_15 vstore_partial_15
604 #define vstore_partial_16_16 vstore_partial_16
605 
606 /** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
607  * @name vstore_partial_n
608  *
609  * @note @p DATA needs to be a vector not a scalar
610  * @note n needs to be <= the vector width of the input variable @p DATA
611  * eg 1: Valid
612  * vstore_partial_15(var:float16, 0, 0xabcd);
613  * eg 2: Invalid
614  * vstore_partial_7(var:float4, 0, 0xabcd);
615  *
616  * @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty.
617  *
618  * @param[in] DATA The name of the variable
619  * @param[in] OFFSET Offset in n
620  * @param[in] PTR The base pointer
621  * @{
622  */
623 #define vstore_partial_1(DATA, OFFSET, PTR) \
624  vstore1(DATA.s0, OFFSET, PTR);
625 
626 #define vstore_partial_2(DATA, OFFSET, PTR) \
627  vstore2(DATA.s01, OFFSET, PTR);
628 
629 #define vstore_partial_3(DATA, OFFSET, PTR) \
630  vstore3(DATA.s012, OFFSET, PTR);
631 
632 #define vstore_partial_4(DATA, OFFSET, PTR) \
633  vstore4(DATA.s0123, OFFSET, PTR);
634 
635 #define vstore_partial_5(DATA, OFFSET, PTR) \
636  vstore_partial_4(DATA.s0123, OFFSET, PTR); \
637  vstore1(DATA.s4, OFFSET, PTR + 4);
638 
639 #define vstore_partial_6(DATA, OFFSET, PTR) \
640  vstore_partial_4(DATA.s0123, OFFSET, PTR); \
641  vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
642 
643 #define vstore_partial_7(DATA, OFFSET, PTR) \
644  vstore_partial_4(DATA.s0123, OFFSET, PTR); \
645  vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
646 
647 #define vstore_partial_8(DATA, OFFSET, PTR) \
648  vstore8(DATA.s01234567, OFFSET, PTR);
649 
650 #define vstore_partial_9(DATA, OFFSET, PTR) \
651  vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
652  vstore1(DATA.s8, OFFSET, PTR + 8);
653 
654 #define vstore_partial_10(DATA, OFFSET, PTR) \
655  vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
656  vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
657 
658 #define vstore_partial_11(DATA, OFFSET, PTR) \
659  vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
660  vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
661 
662 #define vstore_partial_12(DATA, OFFSET, PTR) \
663  vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
664  vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
665 
666 #define vstore_partial_13(DATA, OFFSET, PTR) \
667  vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
668  vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
669 
670 #define vstore_partial_14(DATA, OFFSET, PTR) \
671  vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
672  vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
673 
674 #define vstore_partial_15(DATA, OFFSET, PTR) \
675  vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
676  vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
677 
678 #define vstore_partial_16(DATA, OFFSET, PTR) \
679  vstore16(DATA, OFFSET, PTR);
680 /** @} */ // end of groupd vstore_partial_n
681 /** @} */ // end of groupd VSTORE_PARTIAL
682 
683 // Convert built-in functions with _sat modifier are not supported in floating point so we create defines
684 // without _sat to overcome this issue
685 #define convert_float_sat convert_float
686 #define convert_float1_sat convert_float
687 #define convert_float2_sat convert_float2
688 #define convert_float3_sat convert_float3
689 #define convert_float4_sat convert_float4
690 #define convert_float8_sat convert_float8
691 #define convert_float16_sat convert_float16
692 #define convert_half_sat convert_float
693 #define convert_half1_sat convert_half
694 #define convert_half2_sat convert_half2
695 #define convert_half3_sat convert_half3
696 #define convert_half4_sat convert_half4
697 #define convert_half8_sat convert_half8
698 #define convert_half16_sat convert_half16
699 
700 #define convert_float1 convert_float
701 #define convert_half1 convert_half
702 #define convert_char1 convert_char
703 #define convert_uchar1 convert_uchar
704 #define convert_short1 convert_short
705 #define convert_ushort1 convert_ushort
706 #define convert_int1 convert_int
707 #define convert_uint1 convert_uint
708 #define convert_long1 convert_long
709 #define convert_ulong1 convert_ulong
710 #define convert_double1 convert_double
711 
712 #define convert_char1_sat convert_char_sat
713 #define convert_uchar1_sat convert_uchar_sat
714 #define convert_uchar2_sat convert_uchar2_sat
715 #define convert_uchar3_sat convert_uchar3_sat
716 #define convert_uchar4_sat convert_uchar4_sat
717 #define convert_uchar8_sat convert_uchar8_sat
718 #define convert_uchar16_sat convert_uchar16_sat
719 #define convert_short1_sat convert_short_sat
720 #define convert_ushort1_sat convert_ushort_sat
721 #define convert_int1_sat convert_int_sat
722 #define convert_uint1_sat convert_uint_sat
723 #define convert_long1_sat convert_long_sat
724 #define convert_ulong1_sat convert_ulong_sat
725 #define convert_double1_sat convert_double_sat
726 
727 #define VEC_DATA_TYPE_STR(type, size) type##size
728 #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
729 
730 #define CONVERT_STR(x, type) (convert_##type((x)))
731 #define CONVERT(x, type) CONVERT_STR(x, type)
732 
733 #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
734 #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
735 
736 #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
737 #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
738 
739 #define select_vec_dt_uchar(size) uchar##size
740 #define select_vec_dt_char(size) char##size
741 #define select_vec_dt_ushort(size) ushort##size
742 #define select_vec_dt_short(size) short##size
743 #define select_vec_dt_half(size) short##size
744 #define select_vec_dt_uint(size) uint##size
745 #define select_vec_dt_int(size) int##size
746 #define select_vec_dt_float(size) int##size
747 #define select_vec_dt_ulong(size) ulong##size
748 #define select_vec_dt_long(size) long##size
749 
750 #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
751 #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
752 #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
753 
754 #define signed_int_vec_dt_uchar(size) char##size
755 #define signed_int_vec_dt_char(size) char##size
756 #define signed_int_vec_dt_ushort(size) short##size
757 #define signed_int_vec_dt_short(size) short##size
758 #define signed_int_vec_dt_half(size) short##size
759 #define signed_int_vec_dt_uint(size) int##size
760 #define signed_int_vec_dt_int(size) int##size
761 #define signed_int_vec_dt_float(size) int##size
762 #define signed_int_vec_dt_ulong(size) long##size
763 #define signed_int_vec_dt_long(size) long##size
764 
765 #define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size)
766 #define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size)
767 #define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1)
768 
769 #define sum_reduce_1(x) (x)
770 #define sum_reduce_2(x) ((x).s0) + ((x).s1)
771 #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
772 #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
773 #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
774 #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
775 
776 #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
777 #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
778 
779 #define prod_reduce_1(x) (x)
780 #define prod_reduce_2(x) ((x).s0) * ((x).s1)
781 #define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2)
782 #define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23)
783 #define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567)
784 #define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF)
785 
786 #define PROD_REDUCE_STR(x, size) prod_reduce_##size(x)
787 #define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size)
788 
789 #define max_reduce_1(x) (x)
790 #define max_reduce_2(x) max(((x).s0), ((x).s1))
791 #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
792 #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
793 #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
794 #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
795 
796 #define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
797 #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
798 
799 #define VECTOR_DECLARATION(name) \
800  __global uchar *name##_ptr, \
801  uint name##_stride_x, \
802  uint name##_step_x, \
803  uint name##_offset_first_element_in_bytes
804 
805 #define IMAGE_DECLARATION(name) \
806  __global uchar *name##_ptr, \
807  uint name##_stride_x, \
808  uint name##_step_x, \
809  uint name##_stride_y, \
810  uint name##_step_y, \
811  uint name##_offset_first_element_in_bytes
812 
813 #define TENSOR3D_DECLARATION(name) \
814  __global uchar *name##_ptr, \
815  uint name##_stride_x, \
816  uint name##_step_x, \
817  uint name##_stride_y, \
818  uint name##_step_y, \
819  uint name##_stride_z, \
820  uint name##_step_z, \
821  uint name##_offset_first_element_in_bytes
822 
823 #define TENSOR4D_DECLARATION(name) \
824  __global uchar *name##_ptr, \
825  uint name##_stride_x, \
826  uint name##_step_x, \
827  uint name##_stride_y, \
828  uint name##_step_y, \
829  uint name##_stride_z, \
830  uint name##_step_z, \
831  uint name##_stride_w, \
832  uint name##_step_w, \
833  uint name##_offset_first_element_in_bytes
834 
835 #define TENSOR5D_DECLARATION(name) \
836  __global uchar *name##_ptr, \
837  uint name##_stride_x, \
838  uint name##_step_x, \
839  uint name##_stride_y, \
840  uint name##_step_y, \
841  uint name##_stride_z, \
842  uint name##_step_z, \
843  uint name##_stride_w, \
844  uint name##_step_w, \
845  uint name##_stride_v, \
846  uint name##_step_v, \
847  uint name##_offset_first_element_in_bytes
848 
849 #define CONVERT_TO_VECTOR_STRUCT(name) \
850  update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
851 
852 #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
853  update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
854 
855 #define CONVERT_TO_IMAGE_STRUCT(name) \
856  update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
857 
858 #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
859  update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
860 
861 #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
862  update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
863 
864 #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
865  update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
866 
867 #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
868  update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
869 
870 #define CONVERT_TO_TENSOR3D_STRUCT(name) \
871  update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
872  name##_stride_z, name##_step_z)
873 
874 #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
875  update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
876 
877 #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
878  update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
879  name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
880 
881 #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
882  update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
883 
884 #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
885  tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
886  name##_stride_z, name##_step_z)
887 
888 /** Structure to hold Vector information */
889 typedef struct Vector
890 {
891  __global uchar *ptr; /**< Pointer to the starting postion of the buffer */
892  int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
893  int stride_x; /**< Stride of the image in X dimension (in bytes) */
894 } Vector;
895 
896 /** Structure to hold Image information */
897 typedef struct Image
898 {
899  __global uchar *ptr; /**< Pointer to the starting postion of the buffer */
900  int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
901  int stride_x; /**< Stride of the image in X dimension (in bytes) */
902  int stride_y; /**< Stride of the image in Y dimension (in bytes) */
903 } Image;
904 
905 /** Structure to hold 3D tensor information */
906 typedef struct Tensor3D
907 {
908  __global uchar *ptr; /**< Pointer to the starting postion of the buffer */
909  int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
910  int stride_x; /**< Stride of the image in X dimension (in bytes) */
911  int stride_y; /**< Stride of the image in Y dimension (in bytes) */
912  int stride_z; /**< Stride of the image in Z dimension (in bytes) */
913 } Tensor3D;
914 
915 /** Structure to hold 4D tensor information */
916 typedef struct Tensor4D
917 {
918  __global uchar *ptr; /**< Pointer to the starting postion of the buffer */
919  int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
920  int stride_x; /**< Stride of the image in X dimension (in bytes) */
921  int stride_y; /**< Stride of the image in Y dimension (in bytes) */
922  int stride_z; /**< Stride of the image in Z dimension (in bytes) */
923  int stride_w; /**< Stride of the image in W dimension (in bytes) */
924 } Tensor4D;
925 
926 /** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data.
927  *
928  * @param[in] ptr Pointer to the starting postion of the buffer
929  * @param[in] offset_first_element_in_bytes The offset of the first element in the source vector
930  * @param[in] stride_x Stride of the vector in X dimension (in bytes)
931  * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
932  *
933  * @return An image object
934  */
935 inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
936 {
937  Vector vector =
938  {
939  .ptr = ptr,
940  .offset_first_element_in_bytes = offset_first_element_in_bytes,
941  .stride_x = stride_x,
942  };
943  vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
944  return vector;
945 }
946 
947 /** Wrap image information into an Image structure, and make the pointer point at this workitem's data.
948  *
949  * @param[in] ptr Pointer to the starting postion of the buffer
950  * @param[in] offset_first_element_in_bytes The offset of the first element in the source image
951  * @param[in] stride_x Stride of the image in X dimension (in bytes)
952  * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
953  * @param[in] stride_y Stride of the image in Y dimension (in bytes)
954  * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
955  *
956  * @return An image object
957  */
958 inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
959 {
960  Image img =
961  {
962  .ptr = ptr,
963  .offset_first_element_in_bytes = offset_first_element_in_bytes,
964  .stride_x = stride_x,
965  .stride_y = stride_y
966  };
967  img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
968  return img;
969 }
970 
971 /** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data.
972  *
973  * @param[in] ptr Pointer to the starting postion of the buffer
974  * @param[in] offset_first_element_in_bytes The offset of the first element in the source image
975  * @param[in] stride_x Stride of the image in X dimension (in bytes)
976  * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
977  * @param[in] stride_y Stride of the image in Y dimension (in bytes)
978  * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
979  * @param[in] stride_z Stride of the image in Z dimension (in bytes)
980  * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
981  *
982  * @return A 3D tensor object
983  */
984 inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
985 {
986  Image img =
987  {
988  .ptr = ptr,
989  .offset_first_element_in_bytes = offset_first_element_in_bytes,
990  .stride_x = stride_x,
991  .stride_y = stride_y
992  };
993  img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
994  return img;
995 }
996 
997 /** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data.
998  *
999  * @param[in] ptr Pointer to the starting postion of the buffer
1000  * @param[in] offset_first_element_in_bytes The offset of the first element in the source image
1001  * @param[in] stride_x Stride of the image in X dimension (in bytes)
1002  * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
1003  * @param[in] stride_y Stride of the image in Y dimension (in bytes)
1004  * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
1005  * @param[in] stride_z Stride of the image in Z dimension (in bytes)
1006  * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
1007  *
1008  * @return A 3D tensor object
1009  */
1010 inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
1011 {
1012  Tensor3D tensor =
1013  {
1014  .ptr = ptr,
1015  .offset_first_element_in_bytes = offset_first_element_in_bytes,
1016  .stride_x = stride_x,
1017  .stride_y = stride_y,
1018  .stride_z = stride_z
1019  };
1020  tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
1021  return tensor;
1022 }
1023 
1024 /** Wrap 3D tensor information into an tensor structure.
1025  *
1026  * @param[in] ptr Pointer to the starting postion of the buffer
1027  * @param[in] offset_first_element_in_bytes The offset of the first element in the source image
1028  * @param[in] stride_x Stride of the image in X dimension (in bytes)
1029  * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
1030  * @param[in] stride_y Stride of the image in Y dimension (in bytes)
1031  * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
1032  * @param[in] stride_z Stride of the image in Z dimension (in bytes)
1033  * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
1034  *
1035  * @return A 3D tensor object
1036  */
1037 inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
1038 {
1039  Tensor3D tensor =
1040  {
1041  .ptr = ptr,
1042  .offset_first_element_in_bytes = offset_first_element_in_bytes,
1043  .stride_x = stride_x,
1044  .stride_y = stride_y,
1045  .stride_z = stride_z
1046  };
1047  return tensor;
1048 }
1049 
1050 inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
1051  uint step_w,
1052  uint mod_size)
1053 {
1054  Tensor4D tensor =
1055  {
1056  .ptr = ptr,
1057  .offset_first_element_in_bytes = offset_first_element_in_bytes,
1058  .stride_x = stride_x,
1059  .stride_y = stride_y,
1060  .stride_z = stride_z,
1061  .stride_w = stride_w
1062  };
1063 
1064  tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
1065  return tensor;
1066 }
1067 
1068 /** Get the pointer position of a Vector
1069  *
1070  * @param[in] vec Pointer to the starting position of the buffer
1071  * @param[in] x Relative X position
1072  */
1073 inline __global const uchar *vector_offset(const Vector *vec, int x)
1074 {
1075  return vec->ptr + x * vec->stride_x;
1076 }
1077 
1078 /** Get the pointer position of a Image
1079  *
1080  * @param[in] img Pointer to the starting position of the buffer
1081  * @param[in] x Relative X position
1082  * @param[in] y Relative Y position
1083  */
1084 inline __global uchar *offset(const Image *img, int x, int y)
1085 {
1086  return img->ptr + x * img->stride_x + y * img->stride_y;
1087 }
1088 
1089 /** Get the pointer position of a Tensor3D
1090  *
1091  * @param[in] tensor Pointer to the starting position of the buffer
1092  * @param[in] x Relative X position
1093  * @param[in] y Relative Y position
1094  * @param[in] z Relative Z position
1095  */
1096 inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
1097 {
1098  return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
1099 }
1100 
1101 /** Get the pointer position of a Tensor4D
1102  *
1103  * @param[in] tensor Pointer to the starting position of the buffer
1104  * @param[in] x Relative X position
1105  * @param[in] y Relative Y position
1106  * @param[in] z Relative Z position
1107  * @param[in] w Relative W position
1108  */
1109 inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
1110 {
1111  return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
1112 }
1113 
1114 /** Get the offset for a given linear index of a Tensor3D
1115  *
1116  * @param[in] tensor Pointer to the starting position of the buffer
1117  * @param[in] width Width of the input tensor
1118  * @param[in] height Height of the input tensor
1119  * @param[in] depth Depth of the input tensor
1120  * @param[in] index Linear index
1121  */
1122 inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
1123 {
1124  uint num_elements = width * height;
1125 
1126  const uint z = index / num_elements;
1127 
1128  index %= num_elements;
1129 
1130  const uint y = index / width;
1131 
1132  index %= width;
1133 
1134  const uint x = index;
1135 
1136  return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
1137 }
1138 
1139 #endif // _HELPER_H
Structure to hold Vector information.
Definition: helpers.h:889
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:1084
SimpleTensor< float > w
Definition: DFT.cpp:156
Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
Wrap image information into an Image structure, and make the pointer point at this workitem&#39;s data...
Definition: helpers.h:958
Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem&#39;s da...
Definition: helpers.h:1010
__global uchar * ptr
Pointer to the starting postion of the buffer.
Definition: helpers.h:918
int stride_z
Stride of the image in Z dimension (in bytes)
Definition: helpers.h:912
struct Image Image
Structure to hold Image information.
struct Tensor3D Tensor3D
Structure to hold 3D tensor information.
__global const uchar * tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
Get the offset for a given linear index of a Tensor3D.
Definition: helpers.h:1122
int stride_x
Stride of the image in X dimension (in bytes)
Definition: helpers.h:910
struct Tensor4D Tensor4D
Structure to hold 4D tensor information.
int offset_first_element_in_bytes
The offset of the first element in the source image.
Definition: helpers.h:900
Structure to hold 3D tensor information.
Definition: helpers.h:906
Structure to hold 4D tensor information.
Definition: helpers.h:916
int stride_w
Stride of the image in W dimension (in bytes)
Definition: helpers.h:923
__global const uchar * tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
Get the pointer position of a Tensor4D.
Definition: helpers.h:1109
int stride_x
Stride of the image in X dimension (in bytes)
Definition: helpers.h:893
__global uchar * ptr
Pointer to the starting postion of the buffer.
Definition: helpers.h:891
Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
Wrap 3D tensor information into an image structure, and make the pointer point at this workitem&#39;s dat...
Definition: helpers.h:984
int stride_y
Stride of the image in Y dimension (in bytes)
Definition: helpers.h:921
Structure to hold Image information.
Definition: helpers.h:897
int offset_first_element_in_bytes
The offset of the first element in the source image.
Definition: helpers.h:892
int offset_first_element_in_bytes
The offset of the first element in the source image.
Definition: helpers.h:919
__global uchar * ptr
Pointer to the starting postion of the buffer.
Definition: helpers.h:899
__global const uchar * vector_offset(const Vector *vec, int x)
Get the pointer position of a Vector.
Definition: helpers.h:1073
struct Vector Vector
Structure to hold Vector information.
Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
Wrap vector information into an Vector structure, and make the pointer point at this workitem&#39;s data...
Definition: helpers.h:935
__global uchar * ptr
Pointer to the starting postion of the buffer.
Definition: helpers.h:908
int stride_x
Stride of the image in X dimension (in bytes)
Definition: helpers.h:920
int stride_y
Stride of the image in Y dimension (in bytes)
Definition: helpers.h:911
int stride_z
Stride of the image in Z dimension (in bytes)
Definition: helpers.h:922
int offset_first_element_in_bytes
The offset of the first element in the source image.
Definition: helpers.h:909
Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, uint step_w, uint mod_size)
Definition: helpers.h:1050
int stride_y
Stride of the image in Y dimension (in bytes)
Definition: helpers.h:902
Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
Wrap 3D tensor information into an tensor structure.
Definition: helpers.h:1037
__global const uchar * tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
Get the pointer position of a Tensor3D.
Definition: helpers.h:1096
int stride_x
Stride of the image in X dimension (in bytes)
Definition: helpers.h:901