27 #if defined(ARM_COMPUTE_ENABLE_SVE)
31 void sve_transpose_interleave_4VL(uint16_t *out,
const uint16_t *in,
size_t width,
size_t in_stride,
size_t height)
33 size_t out_stride = 4 * height * get_vector_length<uint8_t>();
36 "cmp %x[height], #0x4\n"
41 "add x25, x26, %x[in_stride]\n"
42 "add x24, x25, %x[in_stride]\n"
43 "add x23, x24, %x[in_stride]\n"
44 "add %x[in], x23, %x[in_stride]\n"
46 "sub %x[height], %x[height], #0x4\n"
47 "mov x21, %x[width]\n"
50 "whilelt p3.h, XZR, x20\n"
51 "ld1h { z31.h }, p3/Z, [x26]\n"
52 "ld1h { z30.h }, p3/Z, [x25]\n"
54 "whilelt p2.h, XZR, x20\n"
55 "ld1h { z29.h }, p2/Z, [x26, #1, MUL VL]\n"
56 "ld1h { z28.h }, p2/Z, [x25, #1, MUL VL]\n"
58 "whilelt p1.h, XZR, x20\n"
59 "ld1h { z27.h }, p1/Z, [x26, #2, MUL VL]\n"
60 "ld1h { z26.h }, p1/Z, [x25, #2, MUL VL]\n"
62 "whilelt p0.h, XZR, x20\n"
63 "ld1h { z25.h }, p0/Z, [x26, #3, MUL VL]\n"
64 "ld1h { z24.h }, p0/Z, [x25, #3, MUL VL]\n"
66 "dech x21, ALL, MUL #4\n"
67 "ld1h { z23.h }, p3/Z, [x24]\n"
68 "ld1h { z22.h }, p2/Z, [x24, #1, MUL VL]\n"
69 "ld1h { z21.h }, p1/Z, [x24, #2, MUL VL]\n"
70 "ld1h { z20.h }, p0/Z, [x24, #3, MUL VL]\n"
72 "addvl x26, x26, #4\n"
73 "ld1h { z19.h }, p3/Z, [x23]\n"
74 "ld1h { z18.h }, p2/Z, [x23, #1, MUL VL]\n"
75 "addvl x25, x25, #4\n"
76 "addvl x24, x24, #4\n"
77 "ld1h { z17.h }, p1/Z, [x23, #2, MUL VL]\n"
78 "ld1h { z16.h }, p0/Z, [x23, #3, MUL VL]\n"
79 "st1h { z31.h }, p4, [x20]\n"
80 "addvl x23, x23, #4\n"
81 "st1h { z29.h }, p4, [x20, #1, MUL VL]\n"
82 "add x22, x22, %x[out_stride]\n"
83 "st1h { z27.h }, p4, [x20, #2, MUL VL]\n"
84 "st1h { z25.h }, p4, [x20, #3, MUL VL]\n"
85 "st1h { z30.h }, p4, [x20, #4, MUL VL]\n"
86 "st1h { z28.h }, p4, [x20, #5, MUL VL]\n"
87 "st1h { z26.h }, p4, [x20, #6, MUL VL]\n"
88 "st1h { z24.h }, p4, [x20, #7, MUL VL]\n"
89 "addvl x20, x20, #16\n"
90 "st1h { z23.h }, p4, [x20, #-8, MUL VL]\n"
91 "st1h { z22.h }, p4, [x20, #-7, MUL VL]\n"
92 "st1h { z21.h }, p4, [x20, #-6, MUL VL]\n"
93 "st1h { z20.h }, p4, [x20, #-5, MUL VL]\n"
94 "st1h { z19.h }, p4, [x20, #-4, MUL VL]\n"
95 "st1h { z18.h }, p4, [x20, #-3, MUL VL]\n"
96 "st1h { z17.h }, p4, [x20, #-2, MUL VL]\n"
97 "st1h { z16.h }, p4, [x20, #-1, MUL VL]\n"
100 "cmp %x[height], #0x4\n"
101 "addvl %x[out], %x[out], #16\n"
103 "cbz %x[height], 8f\n"
107 "add %x[in], x26, %x[in_stride]\n"
109 "sub %x[height], %x[height], #0x1\n"
110 "mov x21, %x[width]\n"
113 "whilelt p0.h, XZR, x20\n"
114 "ld1h { z19.h }, p0/Z, [x26]\n"
116 "whilelt p0.h, XZR, x20\n"
117 "ld1h { z18.h }, p0/Z, [x26, #1, MUL VL]\n"
119 "whilelt p0.h, XZR, x20\n"
120 "ld1h { z17.h }, p0/Z, [x26, #2, MUL VL]\n"
122 "dech x21, ALL, MUL #4\n"
123 "whilelt p0.h, XZR, x20\n"
125 "ld1h { z16.h }, p0/Z, [x26, #3, MUL VL]\n"
126 "st1h { z19.h }, p4, [x22]\n"
127 "addvl x26, x26, #4\n"
128 "st1h { z18.h }, p4, [x22, #1, MUL VL]\n"
129 "st1h { z17.h }, p4, [x22, #2, MUL VL]\n"
130 "st1h { z16.h }, p4, [x22, #3, MUL VL]\n"
131 "add x22, x22, %x[out_stride]\n"
134 "cmp %x[height], #0x1\n"
135 "addvl %x[out], %x[out], #4\n"
138 : [height]
"+&r" (height), [in]
"+&r" (in), [out]
"+&r" (out)
139 : [in_stride]
"r" (in_stride), [out_stride]
"r" (out_stride), [width]
"r" (width)
140 :
"cc",
"memory",
"p0",
"p1",
"p2",
"p3",
"p4",
"x20",
"x21",
"x22",
"x23",
"x24",
"x25",
"x26",
"z16",
"z17",
"z18",
"z19",
"z20",
"z21",
"z22",
"z23",
"z24",
"z25",
"z26",
"z27",
"z28",
"z29",
"z30",
"z31"
147 void Transform<4, 1, true, VLType::SVE>(
148 float *out,
const float *in,
int stride,
int x0,
int xmax,
int k0,
int kmax)
150 sve_transpose_interleave_4VL(
151 reinterpret_cast<uint16_t *
>(out),
152 reinterpret_cast<const uint16_t *
>(in + k0 * stride + x0),
153 (xmax-x0) *
sizeof(
float) / 2,
154 stride *
sizeof(
float),
160 void Transform<4, 1, true, VLType::SVE>(
161 __fp16 *out,
const __fp16 *in,
int stride,
int x0,
int xmax,
int k0,
int kmax)
163 sve_transpose_interleave_4VL(
164 reinterpret_cast<uint16_t *
>(out),
165 reinterpret_cast<const uint16_t *
>(in + k0 * stride + x0),
166 (xmax-x0) *
sizeof(__fp16) / 2,
167 stride *
sizeof(__fp16),
173 void Transform<4, 1, true, VLType::SVE>(
174 double *out,
const double *in,
int stride,
int x0,
int xmax,
int k0,
int kmax)
176 sve_transpose_interleave_4VL(
177 reinterpret_cast<uint16_t *
>(out),
178 reinterpret_cast<const uint16_t *
>(in + k0 * stride + x0),
179 (xmax-x0) *
sizeof(
double) / 2,
180 stride *
sizeof(
double),
186 #endif // defined(ARM_COMPUTE_ENABLE_SVE)