26 #undef CONVERT_SAT_STR 29 #if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT) 31 #define CONVERT_SAT_STR(x, type) (convert_##type##8_sat((x))) 32 #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) 37 #define CONVOLUTION1x9(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x9_STRIDE1(acc, src_row_ptr, weights_row_ptr) 39 #define CONVOLUTION1x9(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x9_STRIDE2(acc, src_row_ptr, weights_row_ptr) 41 #error "STRIDE_X larger than 2 is not supported" 44 #define CONVOLUTION1x9_STRIDE1(acc, src_row_ptr, weights_row_ptr) \ 46 int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \ 47 int weights_value1 = convert_int(*(weights_row_ptr + 8)); \ 48 int16 src0 = convert_int16(vload16(0, src_row_ptr)); \ 49 acc += (src0.lo + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \ 50 acc += ((int8)(src0.s1234, src0.s5678) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \ 51 acc += ((int8)(src0.s2345, src0.s6789) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \ 52 acc += ((int8)(src0.s3456, src0.s789A) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \ 53 acc += ((int8)(src0.s4567, src0.s89AB) + INPUT_OFFSET) * ((int8)weights_values0.s4 + WEIGHTS_OFFSET); \ 54 acc += ((int8)(src0.s5678, src0.s9ABC) + INPUT_OFFSET) * ((int8)weights_values0.s5 + WEIGHTS_OFFSET); \ 55 acc += ((int8)(src0.s6789, src0.sABCD) + INPUT_OFFSET) * ((int8)weights_values0.s6 + WEIGHTS_OFFSET); \ 56 acc += ((int8)(src0.s789A, src0.sBCDE) + INPUT_OFFSET) * ((int8)weights_values0.s7 + WEIGHTS_OFFSET); \ 57 acc += ((int8)(src0.s89AB, src0.sCDEF) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \ 60 #define CONVOLUTION1x9_STRIDE2(acc, src_row_ptr, weights_row_ptr) \ 62 int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \ 63 int weights_value1 = convert_int(*(weights_row_ptr + 8)); \ 64 int16 src0 = convert_int16(vload16(0, src_row_ptr)); \ 65 int8 src1 = convert_int8(vload8(0, src_row_ptr + 16)); \ 66 acc += (src0.even + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \ 67 acc += ((int8)(src0.s1357, src0.s9BDF) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \ 68 acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \ 69 acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \ 70 acc += ((int8)(src0.s468A, src0.sCE, src1.s02) + INPUT_OFFSET) * ((int8)weights_values0.s4 + WEIGHTS_OFFSET); \ 71 acc += ((int8)(src0.s579B, src0.sDF, src1.s13) + INPUT_OFFSET) * ((int8)weights_values0.s5 + WEIGHTS_OFFSET); \ 72 acc += ((int8)(src0.s68AC, src0.sE, src1.s024) + INPUT_OFFSET) * ((int8)weights_values0.s6 + WEIGHTS_OFFSET); \ 73 acc += ((int8)(src0.s79BD, src0.sF, src1.s135) + INPUT_OFFSET) * ((int8)weights_values0.s7 + WEIGHTS_OFFSET); \ 74 acc += ((int8)(src0.s8ACE, src1.s0246) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \ 77 #elif KERNEL_SIZE == 5 80 #define CONVOLUTION1x5(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr) 82 #define CONVOLUTION1x5(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x5_STRIDE2(acc, src_row_ptr, weights_row_ptr) 84 #error "STRIDE_X larger than 2 is not supported" 87 #define CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr) \ 89 int4 weights_values0 = convert_int4(vload4(0, weights_row_ptr)); \ 90 int weights_value1 = convert_int(*(weights_row_ptr + 4)); \ 91 int8 src0 = convert_int8(vload8(0, src_row_ptr)); \ 92 int4 src1 = convert_int4(vload4(0, src_row_ptr + 8)); \ 93 acc += (src0 + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \ 94 acc += ((int8)(src0.s1234, src0.s567, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \ 95 acc += ((int8)(src0.s234, src0.s567, src1.s01) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \ 96 acc += ((int8)(src0.s345, src0.s67, src1.s012) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \ 97 acc += ((int8)(src0.s45, src0.s67, src1.s0123) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \ 100 #define CONVOLUTION1x5_STRIDE2(acc, src_row_ptr, weights_row_ptr) \ 102 int4 weights_values0 = convert_int4(vload4(0, weights_row_ptr)); \ 103 int weights_value1 = convert_int(*(weights_row_ptr + 4)); \ 104 int16 src0 = convert_int16(vload16(0, src_row_ptr)); \ 105 int4 src1 = convert_int4(vload4(0, src_row_ptr + 16)); \ 106 acc += (src0.even + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \ 107 acc += ((int8)(src0.s1357, src0.s9BDF) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \ 108 acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \ 109 acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \ 110 acc += ((int8)(src0.s468a, src0.sCE, src1.s02) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \ 113 #elif KERNEL_SIZE == 3 116 #define CONVOLUTION1x3(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr) 118 #define CONVOLUTION1x3(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x3_STRIDE2(acc, src_row_ptr, weights_row_ptr) 120 #error "STRIDE_X larger than 2 is not supported" 123 #define CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr) \ 125 int3 weights_values0 = convert_int3(vload3(0, weights_row_ptr)); \ 126 int8 src0 = convert_int8(vload8(0, src_row_ptr)); \ 127 int2 src1 = convert_int2(vload2(0, src_row_ptr + 8)); \ 128 acc += (src0 + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \ 129 acc += ((int8)(src0.s1234, src0.s567, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \ 130 acc += ((int8)(src0.s234, src0.s567, src1.s01) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \ 133 #define CONVOLUTION1x3_STRIDE2(acc, src_row_ptr, weights_row_ptr) \ 135 int3 weights_values0 = convert_int3(vload3(0, weights_row_ptr)); \ 136 int16 src0 = convert_int16(vload16(0, src_row_ptr)); \ 137 int src1 = convert_int(*(src_row_ptr + 16)); \ 138 acc += (src0.even + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \ 139 acc += ((int8)(src0.s1357, src0.s9BDF) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \ 140 acc += ((int8)(src0.s2468, src0.sACE, src1) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \ 143 #elif KERNEL_SIZE == 1 146 #define INPUT_VALUE extract_input_stride3 148 #define INPUT_VALUE extract_input_stride2 150 #define INPUT_VALUE extract_input_stride1 153 #error "Only support strides 1, 2 and 3" 164 return vload8(0, input_value);
176 temp = vload16(0, input_value);
177 return temp.s02468ace;
189 temp1 = vload16(0, input_value);
191 temp2 = vload16(0, input_value + 12);
192 return (
VEC_DATA_TYPE(DATA_TYPE, 8))(temp1.s0369, temp2.s0369);
196 #error "Only kernel sizes 1, 3, 5 and 9 are supported" 240 __kernel
void direct_convolution_quantized(
247 unsigned int weights_stride_w)
255 __global DATA_TYPE *weights_addr = (__global DATA_TYPE *)
tensor3D_offset(&weights, 0, 0, 0);
256 __global DATA_TYPE *src_addr = (__global DATA_TYPE *)
offset(&src, 0, 0);
258 const int kernel_index = get_global_id(2);
259 weights_addr += kernel_index * weights_stride_w;
261 for(
volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
264 CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 0 *
weights_stride_y));
265 CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 *
weights_stride_y));
266 CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 *
weights_stride_y));
267 CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 3 *
weights_stride_y));
268 CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 4 *
weights_stride_y));
269 CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 5 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 5 *
weights_stride_y));
270 CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 6 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 6 *
weights_stride_y));
271 CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 7 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 7 *
weights_stride_y));
272 CONVOLUTION1x9(values0, (__global DATA_TYPE *)(src_addr + 8 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 8 *
weights_stride_y));
273 #elif KERNEL_SIZE == 5 274 CONVOLUTION1x5(values0, (__global DATA_TYPE *)src_addr, (__global DATA_TYPE *)weights_addr);
275 CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 *
weights_stride_y));
276 CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 *
weights_stride_y));
277 CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 3 *
weights_stride_y));
278 CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 4 *
weights_stride_y));
279 #elif KERNEL_SIZE == 3 280 CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 0 *
weights_stride_y));
281 CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 *
weights_stride_y));
282 CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 *
weights_stride_y));
283 #elif KERNEL_SIZE == 1 284 int weight = convert_int(*(__global DATA_TYPE *)weights_addr);
285 int8 input_value = convert_int8(INPUT_VALUE((__global DATA_TYPE *)src_addr));
286 values0 += (input_value + INPUT_OFFSET) * ((int8)weight + WEIGHTS_OFFSET);
289 src_addr += src_stride_z;
295 __global
int *bias_addr = ((__global
int *)(
vector_offset(&biases, kernel_index)));
296 values0 += (int8)(*bias_addr);
301 #else // OUTPUT_SHIFT < 0 303 #endif // OUTPUT_SHIFT < 0 304 values0 = values0 + OUTPUT_OFFSET;
306 vstore8(
CONVERT_SAT(values0, DATA_TYPE), 0, (__global DATA_TYPE *)dst.
ptr);
308 #endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT) Structure to hold Vector information.
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
#define CONVERT_TO_IMAGE_STRUCT(name)
const size_t weights_stride_y
#define CONVERT_SAT(a, b)
Structure to hold 3D tensor information.
SimpleTensor< float > src
#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name)
#define VECTOR_DECLARATION(name)
Structure to hold Image information.
#define CONVERT_TO_TENSOR3D_STRUCT(name)
__global const uchar * vector_offset(const Vector *vec, int x)
Get the pointer position of a Vector.
#define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(x, quantized_multiplier, left_shift, size)
#define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, quantized_multiplier, right_shift, size)
const size_t weights_stride_z
__global uchar * ptr
Pointer to the starting postion of the buffer.
#define TENSOR3D_DECLARATION(name)
#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name)
__global const uchar * tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
Get the pointer position of a Tensor3D.
#define VEC_DATA_TYPE(type, size)