27 #if defined(ARM_COMPUTE_ENABLE_SME)
31 void sme_transpose_interleave_4VL_2x2(uint16_t *out,
const uint16_t *in,
size_t width,
size_t in_stride,
size_t height)
33 uint16_t *pad_row =
reinterpret_cast<uint16_t *
>(alloca(width *
sizeof(uint16_t)));
36 memset(pad_row, 0, width *
sizeof(uint16_t));
39 size_t out_stride = 4 * roundup<size_t>(height, 2) * sme::get_vector_length<uint16_t>();
42 ".inst 0xd503477f // SMSTART ZA\n"
43 "cmp %x[height], #0x4\n"
48 "add x25, x26, %x[in_stride]\n"
49 "add x24, x25, %x[in_stride]\n"
50 "add x23, x24, %x[in_stride]\n"
51 "add %x[in], x23, %x[in_stride]\n"
53 "sub %x[height], %x[height], #0x4\n"
54 "mov x21, %x[width]\n"
57 "whilelt p1.h, XZR, x20\n"
58 "ld1h { z19.h }, p1/Z, [x26]\n"
60 "whilelt p0.h, XZR, x20\n"
61 "ld1h { z18.h }, p0/Z, [x26, #1, MUL VL]\n"
62 "ld1h { z17.h }, p1/Z, [x25]\n"
63 "decw x21, ALL, MUL #4\n"
65 "zip1 z24.h, z19.h, z17.h\n"
66 "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
67 "addvl x26, x26, #2\n"
68 "addvl x25, x25, #2\n"
69 "zip2 z23.h, z19.h, z17.h\n"
70 "ld1h { z17.h }, p1/Z, [x24]\n"
71 "zip1 z22.h, z18.h, z16.h\n"
72 "zip2 z21.h, z18.h, z16.h\n"
73 "ld1h { z20.h }, p0/Z, [x24, #1, MUL VL]\n"
74 "addvl x24, x24, #2\n"
75 "ld1h { z16.h }, p1/Z, [x23]\n"
76 "zip1 z19.h, z17.h, z16.h\n"
77 "zip2 z18.h, z17.h, z16.h\n"
78 "ld1h { z16.h }, p0/Z, [x23, #1, MUL VL]\n"
79 "addvl x23, x23, #2\n"
80 "zip1 z17.h, z20.h, z16.h\n"
81 "zip2 z16.h, z20.h, z16.h\n"
82 "st1h { z24.h }, p2, [x22]\n"
83 "st1h { z23.h }, p2, [x22, #1, MUL VL]\n"
84 "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
85 "st1h { z21.h }, p2, [x22, #3, MUL VL]\n"
86 "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
87 "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
88 "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
89 "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
90 "add x22, x22, %x[out_stride]\n"
93 "cmp %x[height], #0x4\n"
94 "addvl %x[out], %x[out], #8\n"
96 "cbz %x[height], 8f\n"
100 "add x25, x26, %x[in_stride]\n"
101 "cmp %x[height], #0x1\n"
102 "add %x[in], x25, %x[in_stride]\n"
104 "csel x25, x25, %x[pad_row], GT\n"
105 "sub %x[height], %x[height], #0x2\n"
106 "mov x21, %x[width]\n"
109 "whilelt p1.h, XZR, x20\n"
110 "ld1h { z18.h }, p1/Z, [x26]\n"
112 "whilelt p0.h, XZR, x20\n"
113 "ld1h { z20.h }, p0/Z, [x26, #1, MUL VL]\n"
114 "ld1h { z17.h }, p1/Z, [x25]\n"
115 "decw x21, ALL, MUL #4\n"
117 "zip1 z19.h, z18.h, z17.h\n"
118 "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
119 "addvl x26, x26, #2\n"
120 "addvl x25, x25, #2\n"
121 "zip2 z18.h, z18.h, z17.h\n"
122 "zip1 z17.h, z20.h, z16.h\n"
123 "zip2 z16.h, z20.h, z16.h\n"
124 "st1h { z19.h }, p2, [x22]\n"
125 "st1h { z18.h }, p2, [x22, #1, MUL VL]\n"
126 "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
127 "st1h { z16.h }, p2, [x22, #3, MUL VL]\n"
128 "add x22, x22, %x[out_stride]\n"
131 "cmp %x[height], #0x1\n"
132 "addvl %x[out], %x[out], #4\n"
135 ".inst 0xd503467f // SMSTOP\n"
136 : [height]
"+&r" (height), [in]
"+&r" (in), [out]
"+&r" (out)
137 : [in_stride]
"r" (in_stride), [out_stride]
"r" (out_stride), [pad_row]
"r" (pad_row), [width]
"r" (width)
138 :
"cc",
"memory",
"p0",
"p1",
"p2",
"p3",
"p4",
"p5",
"p6",
"p7",
"p8",
"p9",
"p10",
"p11",
"p12",
"p13",
"p14",
"p15",
"x20",
"x21",
"x22",
"x23",
"x24",
"x25",
"x26",
"z0",
"z1",
"z2",
"z3",
"z4",
"z5",
"z6",
"z7",
"z8",
"z9",
"z10",
"z11",
"z12",
"z13",
"z14",
"z15",
"z16",
"z17",
"z18",
"z19",
"z20",
"z21",
"z22",
"z23",
"z24",
"z25",
"z26",
"z27",
"z28",
"z29",
"z30",
"z31"
145 void Transform<4, 2, true, VLType::SME>(
146 bfloat16 *out,
const bfloat16 *in,
int stride,
int x0,
int xmax,
int k0,
int kmax)
148 sme_transpose_interleave_4VL_2x2(
149 reinterpret_cast<uint16_t *
>(out),
150 reinterpret_cast<const uint16_t *
>(in + k0 * stride + x0),
158 void Transform<4, 2, true, VLType::SME>(
159 __fp16 *out,
const __fp16 *in,
int stride,
int x0,
int xmax,
int k0,
int kmax)
161 sme_transpose_interleave_4VL_2x2(
162 reinterpret_cast<uint16_t *
>(out),
163 reinterpret_cast<const uint16_t *
>(in + k0 * stride + x0),
164 (xmax-x0) *
sizeof(__fp16) / 2,
165 stride *
sizeof(__fp16),
171 #endif // defined(ARM_COMPUTE_ENABLE_SME)