27 #if defined(__aarch64__)
31 void a64_transpose_interleave_12_1x8(uint8_t *out,
const uint8_t *in,
size_t width,
size_t in_stride,
size_t height)
33 uint8_t *pad_row =
reinterpret_cast<uint8_t *
>(alloca(width *
sizeof(uint8_t)));
36 memset(pad_row, 0, width *
sizeof(uint8_t));
39 size_t out_stride = 12 * roundup<size_t>(height, 8) *
sizeof(uint8_t);
44 "add x28, x9, %x[in_stride]\n"
45 "add x27, x28, %x[in_stride]\n"
46 "add x26, x27, %x[in_stride]\n"
47 "add x25, x26, %x[in_stride]\n"
48 "add x24, x25, %x[in_stride]\n"
49 "add x23, x24, %x[in_stride]\n"
50 "add x22, x23, %x[in_stride]\n"
51 "cmp %x[height], #0x7\n"
52 "add %x[in], x22, %x[in_stride]\n"
53 "csel x22, x22, %x[pad_row], GT\n"
54 "csel x23, x23, %x[pad_row], GE\n"
55 "cmp %x[height], #0x5\n"
56 "mov x21, %x[width]\n"
57 "csel x24, x24, %x[pad_row], GT\n"
58 "csel x25, x25, %x[pad_row], GE\n"
59 "cmp %x[height], #0x3\n"
60 "csel x26, x26, %x[pad_row], GT\n"
61 "csel x27, x27, %x[pad_row], GE\n"
62 "cmp %x[height], #0x1\n"
63 "csel x28, x28, %x[pad_row], GT\n"
66 "sub %x[height], %x[height], #0x8\n"
69 "ldr q21, [x9], #0x10\n"
70 "ldr q25, [x28], #0x10\n"
71 "sub x21, x21, #0x30\n"
73 "ldr q20, [x27], #0x10\n"
74 "ldr q24, [x26], #0x10\n"
75 "ldr q19, [x25], #0x10\n"
76 "ldr q18, [x24], #0x10\n"
77 "zip1 v7.16b, v21.16b, v19.16b\n"
78 "zip1 v6.16b, v25.16b, v18.16b\n"
79 "ldr q17, [x23], #0x10\n"
80 "ldr q16, [x22], #0x10\n"
81 "zip1 v28.16b, v20.16b, v17.16b\n"
82 "zip1 v27.16b, v24.16b, v16.16b\n"
83 "ldr q23, [x9], #0x10\n"
84 "ldr q22, [x28], #0x10\n"
85 "zip2 v5.16b, v21.16b, v19.16b\n"
86 "zip2 v4.16b, v20.16b, v17.16b\n"
87 "ldr q21, [x27], #0x10\n"
88 "ldr q20, [x26], #0x10\n"
89 "zip2 v3.16b, v25.16b, v18.16b\n"
90 "zip2 v2.16b, v24.16b, v16.16b\n"
91 "ldr q19, [x25], #0x10\n"
92 "ldr q18, [x24], #0x10\n"
93 "zip1 v1.16b, v23.16b, v19.16b\n"
94 "zip1 v15.16b, v22.16b, v18.16b\n"
95 "ldr q17, [x23], #0x10\n"
96 "ldr q16, [x22], #0x10\n"
97 "zip1 v0.16b, v21.16b, v17.16b\n"
98 "zip1 v31.16b, v20.16b, v16.16b\n"
99 "ldr q26, [x9], #0x10\n"
100 "ldr q30, [x28], #0x10\n"
101 "zip2 v14.16b, v23.16b, v19.16b\n"
102 "zip2 v13.16b, v21.16b, v17.16b\n"
103 "ldr q25, [x27], #0x10\n"
104 "ldr q24, [x26], #0x10\n"
105 "zip2 v12.16b, v22.16b, v18.16b\n"
106 "zip2 v11.16b, v20.16b, v16.16b\n"
107 "ldr q23, [x25], #0x10\n"
108 "ldr q22, [x24], #0x10\n"
109 "zip1 v10.16b, v26.16b, v23.16b\n"
110 "zip1 v9.16b, v30.16b, v22.16b\n"
111 "ldr q21, [x23], #0x10\n"
112 "ldr q17, [x22], #0x10\n"
113 "zip1 v29.16b, v25.16b, v21.16b\n"
114 "zip1 v8.16b, v24.16b, v17.16b\n"
115 "zip1 v19.16b, v7.16b, v28.16b\n"
116 "zip1 v16.16b, v6.16b, v27.16b\n"
117 "zip2 v28.16b, v7.16b, v28.16b\n"
118 "zip2 v18.16b, v6.16b, v27.16b\n"
119 "zip1 v27.16b, v5.16b, v4.16b\n"
120 "zip1 v20.16b, v3.16b, v2.16b\n"
121 "zip2 v7.16b, v26.16b, v23.16b\n"
122 "zip2 v26.16b, v25.16b, v21.16b\n"
123 "zip2 v6.16b, v30.16b, v22.16b\n"
124 "zip2 v25.16b, v24.16b, v17.16b\n"
125 "zip2 v5.16b, v5.16b, v4.16b\n"
126 "zip2 v4.16b, v3.16b, v2.16b\n"
127 "zip1 v3.16b, v1.16b, v0.16b\n"
128 "zip1 v2.16b, v15.16b, v31.16b\n"
129 "zip2 v1.16b, v1.16b, v0.16b\n"
130 "zip2 v0.16b, v15.16b, v31.16b\n"
131 "zip1 v31.16b, v14.16b, v13.16b\n"
132 "zip1 v30.16b, v12.16b, v11.16b\n"
133 "zip2 v24.16b, v14.16b, v13.16b\n"
134 "zip2 v23.16b, v12.16b, v11.16b\n"
135 "zip1 v22.16b, v10.16b, v29.16b\n"
136 "zip1 v21.16b, v9.16b, v8.16b\n"
137 "zip1 v17.16b, v19.16b, v16.16b\n"
138 "zip2 v16.16b, v19.16b, v16.16b\n"
139 "str q17, [x20, #0x0]\n"
140 "zip1 v19.16b, v28.16b, v18.16b\n"
141 "zip2 v18.16b, v28.16b, v18.16b\n"
142 "str q16, [x20, #0x10]\n"
143 "zip1 v17.16b, v27.16b, v20.16b\n"
144 "zip2 v16.16b, v27.16b, v20.16b\n"
145 "str q19, [x20, #0x20]\n"
146 "str q18, [x20, #0x30]\n"
147 "zip2 v29.16b, v10.16b, v29.16b\n"
148 "zip2 v20.16b, v9.16b, v8.16b\n"
149 "str q17, [x20, #0x40]\n"
150 "zip1 v28.16b, v7.16b, v26.16b\n"
151 "zip1 v27.16b, v6.16b, v25.16b\n"
152 "str q16, [x20, #0x50]\n"
153 "add x20, x20, %x[out_stride]\n"
154 "zip2 v26.16b, v7.16b, v26.16b\n"
155 "zip2 v25.16b, v6.16b, v25.16b\n"
156 "zip1 v17.16b, v5.16b, v4.16b\n"
157 "zip2 v16.16b, v5.16b, v4.16b\n"
158 "str q17, [x20, #0x0]\n"
159 "zip1 v18.16b, v3.16b, v2.16b\n"
160 "zip2 v17.16b, v3.16b, v2.16b\n"
161 "str q16, [x20, #0x10]\n"
162 "zip1 v16.16b, v1.16b, v0.16b\n"
163 "zip2 v19.16b, v1.16b, v0.16b\n"
164 "str q18, [x20, #0x20]\n"
165 "str q17, [x20, #0x30]\n"
166 "zip1 v18.16b, v31.16b, v30.16b\n"
167 "zip2 v17.16b, v31.16b, v30.16b\n"
168 "str q16, [x20, #0x40]\n"
169 "zip1 v16.16b, v24.16b, v23.16b\n"
170 "zip2 v24.16b, v24.16b, v23.16b\n"
171 "str q19, [x20, #0x50]\n"
172 "add x20, x20, %x[out_stride]\n"
173 "zip1 v23.16b, v22.16b, v21.16b\n"
174 "zip2 v22.16b, v22.16b, v21.16b\n"
175 "str q18, [x20, #0x0]\n"
176 "zip1 v21.16b, v29.16b, v20.16b\n"
177 "zip2 v20.16b, v29.16b, v20.16b\n"
178 "str q17, [x20, #0x10]\n"
179 "zip1 v19.16b, v28.16b, v27.16b\n"
180 "zip2 v18.16b, v28.16b, v27.16b\n"
181 "str q16, [x20, #0x20]\n"
182 "zip1 v17.16b, v26.16b, v25.16b\n"
183 "zip2 v16.16b, v26.16b, v25.16b\n"
184 "str q24, [x20, #0x30]\n"
185 "str q23, [x20, #0x40]\n"
186 "str q22, [x20, #0x50]\n"
187 "add x20, x20, %x[out_stride]\n"
188 "str q21, [x20, #0x0]\n"
189 "str q20, [x20, #0x10]\n"
190 "str q19, [x20, #0x20]\n"
191 "str q18, [x20, #0x30]\n"
192 "str q17, [x20, #0x40]\n"
193 "str q16, [x20, #0x50]\n"
194 "add x20, x20, %x[out_stride]\n"
200 "ldr d23, [x9], #0x8\n"
201 "ldr d27, [x28], #0x8\n"
202 "sub x21, x21, #0xc\n"
204 "ldr d21, [x27], #0x8\n"
205 "ldr d26, [x26], #0x8\n"
206 "ldr d20, [x25], #0x8\n"
207 "ldr d19, [x24], #0x8\n"
208 "ldr d17, [x23], #0x8\n"
209 "ldr d16, [x22], #0x8\n"
210 "ld1 { v23.s }[2], [x9], #0x4\n"
211 "ld1 { v27.s }[2], [x28], #0x4\n"
212 "ld1 { v21.s }[2], [x27], #0x4\n"
213 "ld1 { v26.s }[2], [x26], #0x4\n"
214 "ld1 { v20.s }[2], [x25], #0x4\n"
215 "ld1 { v19.s }[2], [x24], #0x4\n"
216 "zip1 v25.16b, v23.16b, v20.16b\n"
217 "zip1 v24.16b, v27.16b, v19.16b\n"
218 "ld1 { v17.s }[2], [x23], #0x4\n"
219 "ld1 { v16.s }[2], [x22], #0x4\n"
220 "zip1 v22.16b, v21.16b, v17.16b\n"
221 "zip1 v18.16b, v26.16b, v16.16b\n"
222 "zip2 v23.16b, v23.16b, v20.16b\n"
223 "zip2 v21.16b, v21.16b, v17.16b\n"
224 "zip2 v20.16b, v27.16b, v19.16b\n"
225 "zip2 v17.16b, v26.16b, v16.16b\n"
226 "zip1 v19.16b, v25.16b, v22.16b\n"
227 "zip1 v16.16b, v24.16b, v18.16b\n"
228 "zip2 v22.16b, v25.16b, v22.16b\n"
229 "zip2 v18.16b, v24.16b, v18.16b\n"
230 "zip1 v21.16b, v23.16b, v21.16b\n"
231 "zip1 v20.16b, v20.16b, v17.16b\n"
232 "zip1 v17.16b, v19.16b, v16.16b\n"
233 "zip2 v16.16b, v19.16b, v16.16b\n"
234 "str q17, [x20, #0x0]\n"
235 "zip1 v19.16b, v22.16b, v18.16b\n"
236 "zip2 v18.16b, v22.16b, v18.16b\n"
237 "str q16, [x20, #0x10]\n"
238 "zip1 v17.16b, v21.16b, v20.16b\n"
239 "zip2 v16.16b, v21.16b, v20.16b\n"
240 "str q19, [x20, #0x20]\n"
241 "str q18, [x20, #0x30]\n"
242 "str q17, [x20, #0x40]\n"
243 "str q16, [x20, #0x50]\n"
244 "add x20, x20, %x[out_stride]\n"
250 "ldr s18, [x9], #0x4\n"
251 "ldr s19, [x28], #0x4\n"
252 "sub x21, x21, #0x4\n"
254 "ldr s21, [x27], #0x4\n"
255 "ldr s20, [x26], #0x4\n"
256 "ldr s17, [x25], #0x4\n"
257 "ldr s16, [x24], #0x4\n"
258 "zip1 v18.16b, v18.16b, v17.16b\n"
259 "zip1 v19.16b, v19.16b, v16.16b\n"
260 "ldr s17, [x23], #0x4\n"
261 "ldr s16, [x22], #0x4\n"
262 "zip1 v17.16b, v21.16b, v17.16b\n"
263 "zip1 v16.16b, v20.16b, v16.16b\n"
264 "zip1 v18.16b, v18.16b, v17.16b\n"
265 "zip1 v16.16b, v19.16b, v16.16b\n"
266 "zip1 v17.16b, v18.16b, v16.16b\n"
267 "zip2 v16.16b, v18.16b, v16.16b\n"
268 "str q17, [x20, #0x0]\n"
269 "str q16, [x20, #0x10]\n"
270 "add x20, x20, #0x20\n"
276 "ldr b19, [x9], #0x1\n"
277 "ldr b18, [x28], #0x1\n"
278 "sub x21, x21, #0x1\n"
280 "ldr b21, [x27], #0x1\n"
281 "ldr b20, [x26], #0x1\n"
282 "ldr b17, [x25], #0x1\n"
283 "ldr b16, [x24], #0x1\n"
284 "zip1 v19.16b, v19.16b, v17.16b\n"
285 "zip1 v18.16b, v18.16b, v16.16b\n"
286 "ldr b17, [x23], #0x1\n"
287 "ldr b16, [x22], #0x1\n"
288 "zip1 v17.16b, v21.16b, v17.16b\n"
289 "zip1 v16.16b, v20.16b, v16.16b\n"
290 "zip1 v17.16b, v19.16b, v17.16b\n"
291 "zip1 v16.16b, v18.16b, v16.16b\n"
292 "zip1 v16.16b, v17.16b, v16.16b\n"
293 "str d16, [x20, #0x0]\n"
294 "add x20, x20, #0x8\n"
297 "cmp %x[height], #0x1\n"
298 "add %x[out], %x[out], #0x60\n"
300 : [height]
"+&r" (height), [in]
"+&r" (in), [out]
"+&r" (out)
301 : [in_stride]
"r" (in_stride), [out_stride]
"r" (out_stride), [pad_row]
"r" (pad_row), [width]
"r" (width)
302 :
"cc",
"memory",
"v0",
"v1",
"v2",
"v3",
"v4",
"v5",
"v6",
"v7",
"v8",
"v9",
"v10",
"v11",
"v12",
"v13",
"v14",
"v15",
"v16",
"v17",
"v18",
"v19",
"v20",
"v21",
"v22",
"v23",
"v24",
"v25",
"v26",
"v27",
"v28",
"v29",
"v30",
"v31",
"x9",
"x20",
"x21",
"x22",
"x23",
"x24",
"x25",
"x26",
"x27",
"x28"
309 void Transform<12, 8, true, VLType::None>(
310 uint8_t *out,
const uint8_t *in,
int stride,
int x0,
int xmax,
int k0,
int kmax)
312 a64_transpose_interleave_12_1x8(
313 reinterpret_cast<uint8_t *
>(out),
314 reinterpret_cast<const uint8_t *
>(in + k0 * stride + x0),
315 (xmax-x0) *
sizeof(uint8_t) / 1,
316 stride *
sizeof(uint8_t),
322 void Transform<12, 8, true, VLType::None>(
323 int8_t *out,
const int8_t *in,
int stride,
int x0,
int xmax,
int k0,
int kmax)
325 a64_transpose_interleave_12_1x8(
326 reinterpret_cast<uint8_t *
>(out),
327 reinterpret_cast<const uint8_t *
>(in + k0 * stride + x0),
328 (xmax-x0) *
sizeof(int8_t) / 1,
329 stride *
sizeof(int8_t),
335 #endif // defined(__aarch64__)