Sun Jul 19 07:18:07 2020 UTC ()
fix build error with LLVM.


(ryo)
diff -r1.6 -r1.7 src/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S

cvs diff -r1.6 -r1.7 src/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S (switch to unified diff)

--- src/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S 2020/07/01 07:59:16 1.6
+++ src/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S 2020/07/19 07:18:07 1.7
@@ -1,294 +1,294 @@ @@ -1,294 +1,294 @@
1/* $NetBSD: cpufunc_asm_armv8.S,v 1.6 2020/07/01 07:59:16 ryo Exp $ */ 1/* $NetBSD: cpufunc_asm_armv8.S,v 1.7 2020/07/19 07:18:07 ryo Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014 Robin Randhawa 4 * Copyright (c) 2014 Robin Randhawa
5 * Copyright (c) 2015 The FreeBSD Foundation 5 * Copyright (c) 2015 The FreeBSD Foundation
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Portions of this software were developed by Andrew Turner 8 * Portions of this software were developed by Andrew Turner
9 * under sponsorship from the FreeBSD Foundation 9 * under sponsorship from the FreeBSD Foundation
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE. 30 * SUCH DAMAGE.
31 * 31 *
32 * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $ 32 * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $
33 */ 33 */
34 34
35#include "opt_cputypes.h" 35#include "opt_cputypes.h"
36#include "opt_multiprocessor.h" 36#include "opt_multiprocessor.h"
37#include <aarch64/asm.h> 37#include <aarch64/asm.h>
38 38
39 .text 39 .text
40 .align 2 40 .align 2
41 41
42/* 42/*
43 * Macro to handle the cache. This takes the start address in x0, length 43 * Macro to handle the cache. This takes the start address in x0, length
44 * in x1. It will corrupt x2-x5. 44 * in x1. It will corrupt x2-x5.
45 */ 45 */
46.macro cache_handle_range dcop = 0, icop = 0 46.macro cache_handle_range dcop = "", icop = ""
47 mrs x3, ctr_el0 47 mrs x3, ctr_el0
48 mov x4, #4 /* size of word */ 48 mov x4, #4 /* size of word */
49.if \dcop != 0 49.ifnb \dcop
50 ubfx x2, x3, #16, #4 /* x2 = D cache shift */ 50 ubfx x2, x3, #16, #4 /* x2 = D cache shift */
51 lsl x2, x4, x2 /* x2 = D cache line size */ 51 lsl x2, x4, x2 /* x2 = D cache line size */
52.endif 52.endif
53.if \icop != 0 53.ifnb \icop
54 and x3, x3, #15 /* x3 = I cache shift */ 54 and x3, x3, #15 /* x3 = I cache shift */
55 lsl x3, x4, x3 /* x3 = I cache line size */ 55 lsl x3, x4, x3 /* x3 = I cache line size */
56.endif 56.endif
57.if \dcop != 0 57.ifnb \dcop
58 sub x4, x2, #1 /* Get the address mask */ 58 sub x4, x2, #1 /* Get the address mask */
59 and x4, x0, x4 /* Get the low bits of the address */ 59 and x4, x0, x4 /* Get the low bits of the address */
60 add x5, x1, x4 /* Add these to the size */ 60 add x5, x1, x4 /* Add these to the size */
61 bic x4, x0, x4 /* Clear the low bit of the address */ 61 bic x4, x0, x4 /* Clear the low bit of the address */
621: 621:
63 dc \dcop, x4 63 dc \dcop, x4
64 add x4, x4, x2 /* Move to the next line */ 64 add x4, x4, x2 /* Move to the next line */
65 subs x5, x5, x2 /* Reduce the size */ 65 subs x5, x5, x2 /* Reduce the size */
66 b.hi 1b /* Check if we are done */ 66 b.hi 1b /* Check if we are done */
67 dsb ish 67 dsb ish
68.endif 68.endif
69.if \icop != 0 69.ifnb \icop
70 sub x4, x3, #1 /* Get the address mask */ 70 sub x4, x3, #1 /* Get the address mask */
71 and x4, x0, x4 /* Get the low bits of the address */ 71 and x4, x0, x4 /* Get the low bits of the address */
72 add x5, x1, x4 /* Add these to the size */ 72 add x5, x1, x4 /* Add these to the size */
73 bic x4, x0, x4 /* Clear the low bit of the address */ 73 bic x4, x0, x4 /* Clear the low bit of the address */
741: 741:
75 ic \icop, x4 75 ic \icop, x4
76 add x4, x4, x3 /* Move to the next line */ 76 add x4, x4, x3 /* Move to the next line */
77 subs x5, x5, x3 /* Reduce the size */ 77 subs x5, x5, x3 /* Reduce the size */
78 b.hi 1b /* Check if we are done */ 78 b.hi 1b /* Check if we are done */
79 dsb ish 79 dsb ish
80 isb 80 isb
81.endif 81.endif
82.endm 82.endm
83 83
84 84
85ENTRY(aarch64_nullop) 85ENTRY(aarch64_nullop)
86 ret 86 ret
87END(aarch64_nullop) 87END(aarch64_nullop)
88 88
89ENTRY(aarch64_cpuid) 89ENTRY(aarch64_cpuid)
90 mrs x0, midr_el1 90 mrs x0, midr_el1
91 ret 91 ret
92END(aarch64_cpuid) 92END(aarch64_cpuid)
93 93
94/* 94/*
95 * void aarch64_dcache_wb_range(vaddr_t, vsize_t) 95 * void aarch64_dcache_wb_range(vaddr_t, vsize_t)
96 */ 96 */
97ENTRY(aarch64_dcache_wb_range) 97ENTRY(aarch64_dcache_wb_range)
98 cache_handle_range dcop = cvac 98 cache_handle_range dcop = cvac
99 ret 99 ret
100END(aarch64_dcache_wb_range) 100END(aarch64_dcache_wb_range)
101 101
102/* 102/*
103 * void aarch64_dcache_wbinv_range(vaddr_t, vsize_t) 103 * void aarch64_dcache_wbinv_range(vaddr_t, vsize_t)
104 */ 104 */
105ENTRY(aarch64_dcache_wbinv_range) 105ENTRY(aarch64_dcache_wbinv_range)
106 cache_handle_range dcop = civac 106 cache_handle_range dcop = civac
107 ret 107 ret
108END(aarch64_dcache_wbinv_range) 108END(aarch64_dcache_wbinv_range)
109 109
110/* 110/*
111 * void aarch64_dcache_inv_range(vaddr_t, vsize_t) 111 * void aarch64_dcache_inv_range(vaddr_t, vsize_t)
112 * 112 *
113 * Note, we must not invalidate everything. If the range is too big we 113 * Note, we must not invalidate everything. If the range is too big we
114 * must use wb-inv of the entire cache. 114 * must use wb-inv of the entire cache.
115 */ 115 */
116ENTRY(aarch64_dcache_inv_range) 116ENTRY(aarch64_dcache_inv_range)
117 cache_handle_range dcop = ivac 117 cache_handle_range dcop = ivac
118 ret 118 ret
119END(aarch64_dcache_inv_range) 119END(aarch64_dcache_inv_range)
120 120
121/* 121/*
122 * void aarch64_idcache_wbinv_range(vaddr_t, vsize_t) 122 * void aarch64_idcache_wbinv_range(vaddr_t, vsize_t)
123 */ 123 */
124ENTRY(aarch64_idcache_wbinv_range) 124ENTRY(aarch64_idcache_wbinv_range)
125 cache_handle_range dcop = civac, icop = ivau 125 cache_handle_range dcop = civac, icop = ivau
126 ret 126 ret
127END(aarch64_idcache_wbinv_range) 127END(aarch64_idcache_wbinv_range)
128 128
129/* 129/*
130 * void aarch64_icache_sync_range(vaddr_t, vsize_t) 130 * void aarch64_icache_sync_range(vaddr_t, vsize_t)
131 */ 131 */
132ENTRY(aarch64_icache_sync_range) 132ENTRY(aarch64_icache_sync_range)
133 cache_handle_range dcop = cvau, icop = ivau 133 cache_handle_range dcop = cvau, icop = ivau
134 ret 134 ret
135END(aarch64_icache_sync_range) 135END(aarch64_icache_sync_range)
136 136
137/* 137/*
138 * void aarch64_icache_inv_range(vaddr_t, vsize_t) 138 * void aarch64_icache_inv_range(vaddr_t, vsize_t)
139 */ 139 */
140ENTRY(aarch64_icache_inv_range) 140ENTRY(aarch64_icache_inv_range)
141 cache_handle_range icop = ivau 141 cache_handle_range icop = ivau
142 ret 142 ret
143END(aarch64_icache_inv_range) 143END(aarch64_icache_inv_range)
144 144
145/* 145/*
146 * void aarch64_icache_barrier_range(vaddr_t, vsize_t) 146 * void aarch64_icache_barrier_range(vaddr_t, vsize_t)
147 */ 147 */
148ENTRY(aarch64_icache_barrier_range) 148ENTRY(aarch64_icache_barrier_range)
149 dsb ishst 149 dsb ishst
150 isb 150 isb
151 ret 151 ret
152END(aarch64_icache_barrier_range) 152END(aarch64_icache_barrier_range)
153 153
154/* 154/*
155 * void aarch64_icache_inv_all(void) 155 * void aarch64_icache_inv_all(void)
156 */ 156 */
157ENTRY(aarch64_icache_inv_all) 157ENTRY(aarch64_icache_inv_all)
158 dsb ish 158 dsb ish
159#ifdef MULTIPROCESSOR 159#ifdef MULTIPROCESSOR
160 ic ialluis 160 ic ialluis
161#else 161#else
162 ic iallu 162 ic iallu
163#endif 163#endif
164 dsb ish 164 dsb ish
165 isb 165 isb
166 ret 166 ret
167END(aarch64_icache_inv_all) 167END(aarch64_icache_inv_all)
168 168
169 169
170 170
171ENTRY(aarch64_drain_writebuf) 171ENTRY(aarch64_drain_writebuf)
172 dsb sy 172 dsb sy
173 ret 173 ret
174END(aarch64_drain_writebuf) 174END(aarch64_drain_writebuf)
175 175
176 176
177/* 177/*
178 * TLB ops 178 * TLB ops
179 */ 179 */
180 180
181/* void aarch64_set_ttbr0(uint64_t ttbr0) */ 181/* void aarch64_set_ttbr0(uint64_t ttbr0) */
182ENTRY(aarch64_set_ttbr0) 182ENTRY(aarch64_set_ttbr0)
183 dsb ish 183 dsb ish
184 msr ttbr0_el1, x0 184 msr ttbr0_el1, x0
185 dsb ish 185 dsb ish
186 isb 186 isb
187 ret 187 ret
188END(aarch64_set_ttbr0) 188END(aarch64_set_ttbr0)
189 189
190#ifdef CPU_THUNDERX 190#ifdef CPU_THUNDERX
191/* 191/*
192 * Cavium erratum 27456 192 * Cavium erratum 27456
193 * void aarch64_set_ttbr0_thunderx(uint64_t ttbr0) 193 * void aarch64_set_ttbr0_thunderx(uint64_t ttbr0)
194 */ 194 */
195ENTRY(aarch64_set_ttbr0_thunderx) 195ENTRY(aarch64_set_ttbr0_thunderx)
196 dsb ish 196 dsb ish
197 msr ttbr0_el1, x0 197 msr ttbr0_el1, x0
198 isb 198 isb
199 ic iallu 199 ic iallu
200 dsb nsh 200 dsb nsh
201 isb 201 isb
202 ret 202 ret
203END(aarch64_set_ttbr0_thunderx) 203END(aarch64_set_ttbr0_thunderx)
204#endif /* CPU_THUNDERX */ 204#endif /* CPU_THUNDERX */
205 205
206/* void aarch64_tlbi_all(void) */ 206/* void aarch64_tlbi_all(void) */
207ENTRY(aarch64_tlbi_all) 207ENTRY(aarch64_tlbi_all)
208 dsb ishst 208 dsb ishst
209#ifdef MULTIPROCESSOR 209#ifdef MULTIPROCESSOR
210 tlbi vmalle1is 210 tlbi vmalle1is
211#else 211#else
212 tlbi vmalle1 212 tlbi vmalle1
213#endif 213#endif
214 dsb ish 214 dsb ish
215 isb 215 isb
216 ret 216 ret
217END(aarch64_tlbi_all) 217END(aarch64_tlbi_all)
218 218
219/* void aarch64_tlbi_by_asid(int asid) */ 219/* void aarch64_tlbi_by_asid(int asid) */
220ENTRY(aarch64_tlbi_by_asid) 220ENTRY(aarch64_tlbi_by_asid)
221 /* x8 = bit 63[ASID]48, 47[RES0]0 */ 221 /* x8 = bit 63[ASID]48, 47[RES0]0 */
222 lsl x8, x0, #48 222 lsl x8, x0, #48
223 dsb ishst 223 dsb ishst
224#ifdef MULTIPROCESSOR 224#ifdef MULTIPROCESSOR
225 tlbi aside1is, x8 225 tlbi aside1is, x8
226#else 226#else
227 tlbi aside1, x8 227 tlbi aside1, x8
228#endif 228#endif
229 dsb ish 229 dsb ish
230 isb 230 isb
231 ret 231 ret
232END(aarch64_tlbi_by_asid) 232END(aarch64_tlbi_by_asid)
233 233
234/* aarch64_tlbi_by_va(vaddr_t va) */ 234/* aarch64_tlbi_by_va(vaddr_t va) */
235ENTRY(aarch64_tlbi_by_va) 235ENTRY(aarch64_tlbi_by_va)
236 /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */ 236 /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
237 ubfx x8, x0, #12, #44 237 ubfx x8, x0, #12, #44
238 dsb ishst 238 dsb ishst
239#ifdef MULTIPROCESSOR 239#ifdef MULTIPROCESSOR
240 tlbi vaae1is, x8 240 tlbi vaae1is, x8
241#else 241#else
242 tlbi vaae1, x8 242 tlbi vaae1, x8
243#endif 243#endif
244 dsb ish 244 dsb ish
245 isb 245 isb
246 ret 246 ret
247END(aarch64_tlbi_by_va) 247END(aarch64_tlbi_by_va)
248 248
249/* aarch64_tlbi_by_va_ll(vaddr_t va) */ 249/* aarch64_tlbi_by_va_ll(vaddr_t va) */
250ENTRY(aarch64_tlbi_by_va_ll) 250ENTRY(aarch64_tlbi_by_va_ll)
251 /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */ 251 /* x8 = bit 63[RES0]44, 43[VA(55:12)]0 */
252 ubfx x8, x0, #12, #44 252 ubfx x8, x0, #12, #44
253 dsb ishst 253 dsb ishst
254#ifdef MULTIPROCESSOR 254#ifdef MULTIPROCESSOR
255 tlbi vaale1is, x8 255 tlbi vaale1is, x8
256#else 256#else
257 tlbi vaale1, x8 257 tlbi vaale1, x8
258#endif 258#endif
259 dsb ish 259 dsb ish
260 isb 260 isb
261 ret 261 ret
262END(aarch64_tlbi_by_va_ll) 262END(aarch64_tlbi_by_va_ll)
263 263
264/* aarch64_tlbi_by_asid_va(int asid, vaddr_t va) */ 264/* aarch64_tlbi_by_asid_va(int asid, vaddr_t va) */
265ENTRY(aarch64_tlbi_by_asid_va) 265ENTRY(aarch64_tlbi_by_asid_va)
266 /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */ 266 /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
267 lsl x8, x0, #48 267 lsl x8, x0, #48
268 bfxil x8, x1, #12, #44 268 bfxil x8, x1, #12, #44
269 dsb ishst 269 dsb ishst
270#ifdef MULTIPROCESSOR 270#ifdef MULTIPROCESSOR
271 tlbi vae1is, x8 271 tlbi vae1is, x8
272#else 272#else
273 tlbi vae1, x8 273 tlbi vae1, x8
274#endif 274#endif
275 dsb ish 275 dsb ish
276 isb 276 isb
277 ret 277 ret
278END(aarch64_tlbi_by_asid_va) 278END(aarch64_tlbi_by_asid_va)
279 279
280/* aarch64_tlbi_by_asid_va_ll(int asid, vaddr_t va) */ 280/* aarch64_tlbi_by_asid_va_ll(int asid, vaddr_t va) */
281ENTRY(aarch64_tlbi_by_asid_va_ll) 281ENTRY(aarch64_tlbi_by_asid_va_ll)
282 /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */ 282 /* x8 = bit 63[ASID]48, 47[RES0]44, 43[VA(55:12)]0 */
283 lsl x8, x0, #48 283 lsl x8, x0, #48
284 bfxil x8, x1, #12, #44 284 bfxil x8, x1, #12, #44
285 dsb ishst 285 dsb ishst
286#ifdef MULTIPROCESSOR 286#ifdef MULTIPROCESSOR
287 tlbi vale1is, x8 287 tlbi vale1is, x8
288#else 288#else
289 tlbi vale1, x8 289 tlbi vale1, x8
290#endif 290#endif
291 dsb ish 291 dsb ish
292 isb 292 isb
293 ret 293 ret
294END(aarch64_tlbi_by_asid_va_ll) 294END(aarch64_tlbi_by_asid_va_ll)