Fri Dec 21 08:01:01 2018 UTC ()
- add workaround for Cavium ThunderX errata 27456.
- add cpufuncs table in cpu_info. each cpu clusters may have different erratum. (e.g. big.LITTLE)


(ryo)
diff -r1.14 -r1.15 src/sys/arch/aarch64/aarch64/cpu.c
diff -r1.4 -r1.5 src/sys/arch/aarch64/aarch64/cpufunc.c
diff -r1.2 -r1.3 src/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S
diff -r1.33 -r1.34 src/sys/arch/aarch64/aarch64/pmap.c
diff -r1.12 -r1.13 src/sys/arch/aarch64/include/cpu.h
diff -r1.4 -r1.5 src/sys/arch/aarch64/include/cpufunc.h

cvs diff -r1.14 -r1.15 src/sys/arch/aarch64/aarch64/cpu.c (expand / switch to unified diff)

--- src/sys/arch/aarch64/aarch64/cpu.c 2018/11/28 09:16:19 1.14
+++ src/sys/arch/aarch64/aarch64/cpu.c 2018/12/21 08:01:01 1.15
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.c,v 1.14 2018/11/28 09:16:19 ryo Exp $ */ 1/* $NetBSD: cpu.c,v 1.15 2018/12/21 08:01:01 ryo Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(1, "$NetBSD: cpu.c,v 1.14 2018/11/28 09:16:19 ryo Exp $"); 30__KERNEL_RCSID(1, "$NetBSD: cpu.c,v 1.15 2018/12/21 08:01:01 ryo Exp $");
31 31
32#include "locators.h" 32#include "locators.h"
33#include "opt_arm_debug.h" 33#include "opt_arm_debug.h"
34#include "opt_fdt.h" 34#include "opt_fdt.h"
35#include "opt_multiprocessor.h" 35#include "opt_multiprocessor.h"
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/atomic.h> 39#include <sys/atomic.h>
40#include <sys/device.h> 40#include <sys/device.h>
41#include <sys/cpu.h> 41#include <sys/cpu.h>
42#include <sys/kmem.h> 42#include <sys/kmem.h>
43#include <sys/reboot.h> 43#include <sys/reboot.h>
@@ -153,26 +153,27 @@ cpu_attach(device_t dv, cpuid_t id) @@ -153,26 +153,27 @@ cpu_attach(device_t dv, cpuid_t id)
153 } 153 }
154 154
155 ci->ci_dev = dv; 155 ci->ci_dev = dv;
156 dv->dv_private = ci; 156 dv->dv_private = ci;
157 157
158 cpu_identify(ci->ci_dev, ci); 158 cpu_identify(ci->ci_dev, ci);
159#ifdef MULTIPROCESSOR 159#ifdef MULTIPROCESSOR
160 if (unit != 0) { 160 if (unit != 0) {
161 mi_cpu_attach(ci); 161 mi_cpu_attach(ci);
162 return; 162 return;
163 } 163 }
164#endif /* MULTIPROCESSOR */ 164#endif /* MULTIPROCESSOR */
165 165
 166 set_cpufuncs();
166 fpu_attach(ci); 167 fpu_attach(ci);
167 168
168 cpu_identify1(dv, ci); 169 cpu_identify1(dv, ci);
169 aarch64_getcacheinfo(); 170 aarch64_getcacheinfo();
170 aarch64_printcacheinfo(dv); 171 aarch64_printcacheinfo(dv);
171 cpu_identify2(dv, ci); 172 cpu_identify2(dv, ci);
172 173
173 cpu_setup_sysctl(dv, ci); 174 cpu_setup_sysctl(dv, ci);
174} 175}
175 176
176struct cpuidtab { 177struct cpuidtab {
177 uint32_t cpu_partnum; 178 uint32_t cpu_partnum;
178 const char *cpu_name; 179 const char *cpu_name;
@@ -512,26 +513,27 @@ cpu_boot_secondary_processors(void) @@ -512,26 +513,27 @@ cpu_boot_secondary_processors(void)
512 513
513 /* add available processors to kcpuset */ 514 /* add available processors to kcpuset */
514 uint32_t mbox = arm_cpu_hatched; 515 uint32_t mbox = arm_cpu_hatched;
515 kcpuset_export_u32(kcpuset_attached, &mbox, sizeof(mbox)); 516 kcpuset_export_u32(kcpuset_attached, &mbox, sizeof(mbox));
516} 517}
517 518
518void 519void
519cpu_hatch(struct cpu_info *ci) 520cpu_hatch(struct cpu_info *ci)
520{ 521{
521 KASSERT(curcpu() == ci); 522 KASSERT(curcpu() == ci);
522 523
523 mutex_enter(&cpu_hatch_lock); 524 mutex_enter(&cpu_hatch_lock);
524 525
 526 set_cpufuncs();
525 fpu_attach(ci); 527 fpu_attach(ci);
526 528
527 cpu_identify1(ci->ci_dev, ci); 529 cpu_identify1(ci->ci_dev, ci);
528 aarch64_getcacheinfo(); 530 aarch64_getcacheinfo();
529 aarch64_printcacheinfo(ci->ci_dev); 531 aarch64_printcacheinfo(ci->ci_dev);
530 cpu_identify2(ci->ci_dev, ci); 532 cpu_identify2(ci->ci_dev, ci);
531 533
532 mutex_exit(&cpu_hatch_lock); 534 mutex_exit(&cpu_hatch_lock);
533 535
534 intr_cpu_init(ci); 536 intr_cpu_init(ci);
535 537
536#ifdef FDT 538#ifdef FDT
537 arm_fdt_cpu_hatch(ci); 539 arm_fdt_cpu_hatch(ci);

cvs diff -r1.4 -r1.5 src/sys/arch/aarch64/aarch64/cpufunc.c (expand / switch to unified diff)

--- src/sys/arch/aarch64/aarch64/cpufunc.c 2018/08/29 06:16:40 1.4
+++ src/sys/arch/aarch64/aarch64/cpufunc.c 2018/12/21 08:01:01 1.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpufunc.c,v 1.4 2018/08/29 06:16:40 ryo Exp $ */ 1/* $NetBSD: cpufunc.c,v 1.5 2018/12/21 08:01:01 ryo Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.4 2018/08/29 06:16:40 ryo Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.5 2018/12/21 08:01:01 ryo Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/types.h> 33#include <sys/types.h>
34#include <sys/kmem.h> 34#include <sys/kmem.h>
35 35
36#include <aarch64/cpu.h> 36#include <aarch64/cpu.h>
37#include <aarch64/cpufunc.h> 37#include <aarch64/cpufunc.h>
38 38
39u_int cputype; /* compat arm */ 39u_int cputype; /* compat arm */
40u_int arm_dcache_align; /* compat arm */ 40u_int arm_dcache_align; /* compat arm */
41u_int arm_dcache_align_mask; /* compat arm */ 41u_int arm_dcache_align_mask; /* compat arm */
42u_int arm_dcache_maxline; 42u_int arm_dcache_maxline;
43 43
@@ -394,13 +394,37 @@ aarch64_dcache_wb_all(void) @@ -394,13 +394,37 @@ aarch64_dcache_wb_all(void)
394 int level; 394 int level;
395 395
396 cinfo = curcpu()->ci_cacheinfo; 396 cinfo = curcpu()->ci_cacheinfo;
397 397
398 for (level = 0; level < MAX_CACHE_LEVEL; level++) { 398 for (level = 0; level < MAX_CACHE_LEVEL; level++) {
399 if (cinfo[level].cacheable == CACHE_CACHEABLE_NONE) 399 if (cinfo[level].cacheable == CACHE_CACHEABLE_NONE)
400 break; 400 break;
401 401
402 __asm __volatile ("dsb ish"); 402 __asm __volatile ("dsb ish");
403 ln_dcache_wb_all(level, &cinfo[level].dcache); 403 ln_dcache_wb_all(level, &cinfo[level].dcache);
404 } 404 }
405 __asm __volatile ("dsb ish"); 405 __asm __volatile ("dsb ish");
406} 406}
 407
 408int
 409set_cpufuncs(void)
 410{
 411 struct cpu_info * const ci = curcpu();
 412 const uint32_t midr __unused = reg_midr_el1_read();
 413
 414 /* install default functions */
 415 ci->ci_cpufuncs.cf_set_ttbr0 = aarch64_set_ttbr0;
 416
 417
 418 /* install core/cluster specific functions */
 419#ifdef CPU_THUNDERX
 420 /* Cavium erratum 27456 */
 421 if ((midr == CPU_ID_THUNDERXP1d0) ||
 422 (midr == CPU_ID_THUNDERXP1d1) ||
 423 (midr == CPU_ID_THUNDERXP2d1) ||
 424 (midr == CPU_ID_THUNDERX81XXRX)) {
 425 ci->ci_cpufuncs.cf_set_ttbr0 = aarch64_set_ttbr0_thunderx;
 426 }
 427#endif
 428
 429 return 0;
 430}

cvs diff -r1.2 -r1.3 src/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S (expand / switch to unified diff)

--- src/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S 2018/07/23 22:51:39 1.2
+++ src/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S 2018/12/21 08:01:01 1.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpufunc_asm_armv8.S,v 1.2 2018/07/23 22:51:39 ryo Exp $ */ 1/* $NetBSD: cpufunc_asm_armv8.S,v 1.3 2018/12/21 08:01:01 ryo Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014 Robin Randhawa 4 * Copyright (c) 2014 Robin Randhawa
5 * Copyright (c) 2015 The FreeBSD Foundation 5 * Copyright (c) 2015 The FreeBSD Foundation
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Portions of this software were developed by Andrew Turner 8 * Portions of this software were developed by Andrew Turner
9 * under sponsorship from the FreeBSD Foundation 9 * under sponsorship from the FreeBSD Foundation
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -22,26 +22,27 @@ @@ -22,26 +22,27 @@
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE. 30 * SUCH DAMAGE.
31 * 31 *
32 * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $ 32 * $FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 313347 2017-02-06 17:50:09Z andrew $
33 */ 33 */
34 34
 35#include "opt_cputypes.h"
35#include "opt_multiprocessor.h" 36#include "opt_multiprocessor.h"
36#include <aarch64/asm.h> 37#include <aarch64/asm.h>
37 38
38 .text 39 .text
39 .align 2 40 .align 2
40 41
41/* 42/*
42 * Macro to handle the cache. This takes the start address in x0, length 43 * Macro to handle the cache. This takes the start address in x0, length
43 * in x1. It will corrupt x0, x1, x2, and x3. 44 * in x1. It will corrupt x0, x1, x2, and x3.
44 */ 45 */
45.macro cache_handle_range dcop = 0, ic = 0, icop = 0 46.macro cache_handle_range dcop = 0, ic = 0, icop = 0
46.if \ic == 0 47.if \ic == 0
47 mrs x3, ctr_el0 48 mrs x3, ctr_el0
@@ -153,26 +154,42 @@ END(aarch64_drain_writebuf) @@ -153,26 +154,42 @@ END(aarch64_drain_writebuf)
153/* 154/*
154 * TLB ops 155 * TLB ops
155 */ 156 */
156 157
157/* void aarch64_set_ttbr0(uint64_t ttbr0) */ 158/* void aarch64_set_ttbr0(uint64_t ttbr0) */
158ENTRY(aarch64_set_ttbr0) 159ENTRY(aarch64_set_ttbr0)
159 dsb ish 160 dsb ish
160 msr ttbr0_el1, x0 161 msr ttbr0_el1, x0
161 dsb ish 162 dsb ish
162 isb 163 isb
163 ret 164 ret
164END(aarch64_set_ttbr0) 165END(aarch64_set_ttbr0)
165 166
 167#ifdef CPU_THUNDERX
 168/*
 169 * Cavium erratum 27456
 170 * void aarch64_set_ttbr0_thunderx(uint64_t ttbr0)
 171 */
 172ENTRY(aarch64_set_ttbr0_thunderx)
 173 dsb ish
 174 msr ttbr0_el1, x0
 175 isb
 176 ic iallu
 177 dsb nsh
 178 isb
 179 ret
 180END(aarch64_set_ttbr0_thunderx)
 181#endif /* CPU_THUNDERX */
 182
166/* void aarch64_tlbi_all(void) */ 183/* void aarch64_tlbi_all(void) */
167ENTRY(aarch64_tlbi_all) 184ENTRY(aarch64_tlbi_all)
168 dsb ishst 185 dsb ishst
169#ifdef MULTIPROCESSOR 186#ifdef MULTIPROCESSOR
170 tlbi vmalle1is 187 tlbi vmalle1is
171#else 188#else
172 tlbi vmalle1 189 tlbi vmalle1
173#endif 190#endif
174 dsb ish 191 dsb ish
175 isb 192 isb
176 ret 193 ret
177END(aarch64_tlbi_all) 194END(aarch64_tlbi_all)
178 195

cvs diff -r1.33 -r1.34 src/sys/arch/aarch64/aarch64/pmap.c (expand / switch to unified diff)

--- src/sys/arch/aarch64/aarch64/pmap.c 2018/11/01 20:34:49 1.33
+++ src/sys/arch/aarch64/aarch64/pmap.c 2018/12/21 08:01:01 1.34
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.33 2018/11/01 20:34:49 maxv Exp $ */ 1/* $NetBSD: pmap.c,v 1.34 2018/12/21 08:01:01 ryo Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.33 2018/11/01 20:34:49 maxv Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.34 2018/12/21 08:01:01 ryo Exp $");
31 31
32#include "opt_arm_debug.h" 32#include "opt_arm_debug.h"
33#include "opt_ddb.h" 33#include "opt_ddb.h"
34#include "opt_kasan.h" 34#include "opt_kasan.h"
35#include "opt_multiprocessor.h" 35#include "opt_multiprocessor.h"
36#include "opt_pmap.h" 36#include "opt_pmap.h"
37#include "opt_uvmhist.h" 37#include "opt_uvmhist.h"
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40#include <sys/types.h> 40#include <sys/types.h>
41#include <sys/kmem.h> 41#include <sys/kmem.h>
42#include <sys/vmem.h> 42#include <sys/vmem.h>
43#include <sys/atomic.h> 43#include <sys/atomic.h>
@@ -1196,27 +1196,27 @@ pmap_activate(struct lwp *l) @@ -1196,27 +1196,27 @@ pmap_activate(struct lwp *l)
1196 if (l != curlwp) 1196 if (l != curlwp)
1197 return; 1197 return;
1198 1198
1199 KASSERT(pm->pm_l0table != NULL); 1199 KASSERT(pm->pm_l0table != NULL);
1200 1200
1201 UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0); 1201 UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0);
1202 1202
1203 /* XXX */ 1203 /* XXX */
1204 CTASSERT(PID_MAX <= 65535); /* 16bit ASID */ 1204 CTASSERT(PID_MAX <= 65535); /* 16bit ASID */
1205 if (pm->pm_asid == -1) 1205 if (pm->pm_asid == -1)
1206 pm->pm_asid = l->l_proc->p_pid; 1206 pm->pm_asid = l->l_proc->p_pid;
1207 1207
1208 ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa; 1208 ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa;
1209 aarch64_set_ttbr0(ttbr0); 1209 cpu_set_ttbr0(ttbr0);
1210 1210
1211 pm->pm_activated = true; 1211 pm->pm_activated = true;
1212 1212
1213 PMAP_COUNT(activate); 1213 PMAP_COUNT(activate);
1214} 1214}
1215 1215
1216void 1216void
1217pmap_deactivate(struct lwp *l) 1217pmap_deactivate(struct lwp *l)
1218{ 1218{
1219 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap; 1219 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap;
1220 1220
1221 UVMHIST_FUNC(__func__); 1221 UVMHIST_FUNC(__func__);
1222 UVMHIST_CALLED(pmaphist); 1222 UVMHIST_CALLED(pmaphist);

cvs diff -r1.12 -r1.13 src/sys/arch/aarch64/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/aarch64/include/cpu.h 2018/11/24 22:49:35 1.12
+++ src/sys/arch/aarch64/include/cpu.h 2018/12/21 08:01:01 1.13
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.12 2018/11/24 22:49:35 skrll Exp $ */ 1/* $NetBSD: cpu.h,v 1.13 2018/12/21 08:01:01 ryo Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry. 8 * by Matt Thomas of 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -54,26 +54,30 @@ struct clockframe { @@ -54,26 +54,30 @@ struct clockframe {
54#define CLKF_USERMODE(cf) ((((cf)->cf_tf.tf_spsr) & 0x0f) == 0) 54#define CLKF_USERMODE(cf) ((((cf)->cf_tf.tf_spsr) & 0x0f) == 0)
55#define CLKF_PC(cf) ((cf)->cf_tf.tf_pc) 55#define CLKF_PC(cf) ((cf)->cf_tf.tf_pc)
56#define CLKF_INTR(cf) ((void)(cf), curcpu()->ci_intr_depth > 1) 56#define CLKF_INTR(cf) ((void)(cf), curcpu()->ci_intr_depth > 1)
57 57
58/* 58/*
59 * LWP_PC: Find out the program counter for the given lwp. 59 * LWP_PC: Find out the program counter for the given lwp.
60 */ 60 */
61#define LWP_PC(l) ((l)->l_md.md_utf->tf_pc) 61#define LWP_PC(l) ((l)->l_md.md_utf->tf_pc)
62 62
63#include <sys/cpu_data.h> 63#include <sys/cpu_data.h>
64#include <sys/device_if.h> 64#include <sys/device_if.h>
65#include <sys/intr.h> 65#include <sys/intr.h>
66 66
 67struct aarch64_cpufuncs {
 68 void (*cf_set_ttbr0)(uint64_t);
 69};
 70
67struct cpu_info { 71struct cpu_info {
68 struct cpu_data ci_data; 72 struct cpu_data ci_data;
69 device_t ci_dev; 73 device_t ci_dev;
70 cpuid_t ci_cpuid; 74 cpuid_t ci_cpuid;
71 struct lwp *ci_curlwp; 75 struct lwp *ci_curlwp;
72 struct lwp *ci_softlwps[SOFTINT_COUNT]; 76 struct lwp *ci_softlwps[SOFTINT_COUNT];
73 77
74 uint64_t ci_lastintr; 78 uint64_t ci_lastintr;
75 79
76 int ci_mtx_oldspl; 80 int ci_mtx_oldspl;
77 int ci_mtx_count; 81 int ci_mtx_count;
78 82
79 int ci_want_resched; 83 int ci_want_resched;
@@ -88,26 +92,27 @@ struct cpu_info { @@ -88,26 +92,27 @@ struct cpu_info {
88 struct evcnt ci_vfp_save; 92 struct evcnt ci_vfp_save;
89 struct evcnt ci_vfp_release; 93 struct evcnt ci_vfp_release;
90 94
91 /* interrupt controller */ 95 /* interrupt controller */
92 u_int ci_gic_redist; /* GICv3 redistributor index */ 96 u_int ci_gic_redist; /* GICv3 redistributor index */
93 uint64_t ci_gic_sgir; /* GICv3 SGIR target */ 97 uint64_t ci_gic_sgir; /* GICv3 SGIR target */
94 98
95 /* ACPI */ 99 /* ACPI */
96 uint64_t ci_acpiid; /* ACPI Processor Unique ID */ 100 uint64_t ci_acpiid; /* ACPI Processor Unique ID */
97 101
98 struct aarch64_sysctl_cpu_id ci_id; 102 struct aarch64_sysctl_cpu_id ci_id;
99 103
100 struct aarch64_cache_info *ci_cacheinfo; 104 struct aarch64_cache_info *ci_cacheinfo;
 105 struct aarch64_cpufuncs ci_cpufuncs;
101 106
102} __aligned(COHERENCY_UNIT); 107} __aligned(COHERENCY_UNIT);
103 108
104static inline struct cpu_info * 109static inline struct cpu_info *
105curcpu(void) 110curcpu(void)
106{ 111{
107 struct cpu_info *ci; 112 struct cpu_info *ci;
108 __asm __volatile ("mrs %0, tpidr_el1" : "=r"(ci)); 113 __asm __volatile ("mrs %0, tpidr_el1" : "=r"(ci));
109 return ci; 114 return ci;
110} 115}
111#define curlwp (curcpu()->ci_curlwp) 116#define curlwp (curcpu()->ci_curlwp)
112 117
113#define setsoftast(ci) atomic_or_uint(&(ci)->ci_astpending, __BIT(0)) 118#define setsoftast(ci) atomic_or_uint(&(ci)->ci_astpending, __BIT(0))

cvs diff -r1.4 -r1.5 src/sys/arch/aarch64/include/cpufunc.h (expand / switch to unified diff)

--- src/sys/arch/aarch64/include/cpufunc.h 2018/12/15 16:54:30 1.4
+++ src/sys/arch/aarch64/include/cpufunc.h 2018/12/21 08:01:01 1.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpufunc.h,v 1.4 2018/12/15 16:54:30 alnsn Exp $ */ 1/* $NetBSD: cpufunc.h,v 1.5 2018/12/21 08:01:01 ryo Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -24,32 +24,26 @@ @@ -24,32 +24,26 @@
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#ifndef _AARCH64_CPUFUNC_H_ 29#ifndef _AARCH64_CPUFUNC_H_
30#define _AARCH64_CPUFUNC_H_ 30#define _AARCH64_CPUFUNC_H_
31 31
32#ifdef _KERNEL 32#ifdef _KERNEL
33 33
34#include <arm/armreg.h> 34#include <arm/armreg.h>
35#include <sys/device_if.h> 35#include <sys/device_if.h>
36 36
37static inline int 
38set_cpufuncs(void) 
39{ 
40 return 0; 
41} 
42 
43struct aarch64_cache_unit { 37struct aarch64_cache_unit {
44 u_int cache_type; 38 u_int cache_type;
45#define CACHE_TYPE_UNKNOWN 0 39#define CACHE_TYPE_UNKNOWN 0
46#define CACHE_TYPE_VIVT 1 /* ASID-tagged VIVT */ 40#define CACHE_TYPE_VIVT 1 /* ASID-tagged VIVT */
47#define CACHE_TYPE_VIPT 2 41#define CACHE_TYPE_VIPT 2
48#define CACHE_TYPE_PIPT 3 42#define CACHE_TYPE_PIPT 3
49 u_int cache_line_size; 43 u_int cache_line_size;
50 u_int cache_ways; 44 u_int cache_ways;
51 u_int cache_sets; 45 u_int cache_sets;
52 u_int cache_way_size; 46 u_int cache_way_size;
53 u_int cache_size; 47 u_int cache_size;
54 u_int cache_purging; 48 u_int cache_purging;
55#define CACHE_PURGING_WB 0x01 49#define CACHE_PURGING_WB 0x01
@@ -65,47 +59,50 @@ struct aarch64_cache_info { @@ -65,47 +59,50 @@ struct aarch64_cache_info {
65#define CACHE_CACHEABLE_DCACHE 2 /* data cache only */ 59#define CACHE_CACHEABLE_DCACHE 2 /* data cache only */
66#define CACHE_CACHEABLE_IDCACHE 3 /* instruction and data caches */ 60#define CACHE_CACHEABLE_IDCACHE 3 /* instruction and data caches */
67#define CACHE_CACHEABLE_UNIFIED 4 /* unified cache */ 61#define CACHE_CACHEABLE_UNIFIED 4 /* unified cache */
68 struct aarch64_cache_unit icache; 62 struct aarch64_cache_unit icache;
69 struct aarch64_cache_unit dcache; 63 struct aarch64_cache_unit dcache;
70}; 64};
71 65
72#define MAX_CACHE_LEVEL 8 /* ARMv8 has maximum 8 level cache */ 66#define MAX_CACHE_LEVEL 8 /* ARMv8 has maximum 8 level cache */
73extern struct aarch64_cache_info aarch64_cache_info[MAX_CACHE_LEVEL]; 67extern struct aarch64_cache_info aarch64_cache_info[MAX_CACHE_LEVEL];
74extern u_int aarch64_cache_vindexsize; /* cachesize/way (VIVT/VIPT) */ 68extern u_int aarch64_cache_vindexsize; /* cachesize/way (VIVT/VIPT) */
75extern u_int aarch64_cache_prefer_mask; 69extern u_int aarch64_cache_prefer_mask;
76extern u_int cputype; /* compat arm */ 70extern u_int cputype; /* compat arm */
77 71
 72int set_cpufuncs(void);
78void aarch64_getcacheinfo(void); 73void aarch64_getcacheinfo(void);
79void aarch64_printcacheinfo(device_t); 74void aarch64_printcacheinfo(device_t);
80 75
81void aarch64_dcache_wbinv_all(void); 76void aarch64_dcache_wbinv_all(void);
82void aarch64_dcache_inv_all(void); 77void aarch64_dcache_inv_all(void);
83void aarch64_dcache_wb_all(void); 78void aarch64_dcache_wb_all(void);
84void aarch64_icache_inv_all(void); 79void aarch64_icache_inv_all(void);
85 80
86/* cache op in cpufunc_asm_armv8.S */ 81/* cache op in cpufunc_asm_armv8.S */
87void aarch64_nullop(void); 82void aarch64_nullop(void);
88uint32_t aarch64_cpuid(void); 83uint32_t aarch64_cpuid(void);
89void aarch64_icache_sync_range(vaddr_t, vsize_t); 84void aarch64_icache_sync_range(vaddr_t, vsize_t);
90void aarch64_idcache_wbinv_range(vaddr_t, vsize_t); 85void aarch64_idcache_wbinv_range(vaddr_t, vsize_t);
91void aarch64_dcache_wbinv_range(vaddr_t, vsize_t); 86void aarch64_dcache_wbinv_range(vaddr_t, vsize_t);
92void aarch64_dcache_inv_range(vaddr_t, vsize_t); 87void aarch64_dcache_inv_range(vaddr_t, vsize_t);
93void aarch64_dcache_wb_range(vaddr_t, vsize_t); 88void aarch64_dcache_wb_range(vaddr_t, vsize_t);
94void aarch64_icache_inv_all(void); 89void aarch64_icache_inv_all(void);
95void aarch64_drain_writebuf(void); 90void aarch64_drain_writebuf(void);
96 91
97/* tlb op in cpufunc_asm_armv8.S */ 92/* tlb op in cpufunc_asm_armv8.S */
 93#define cpu_set_ttbr0(t) curcpu()->ci_cpufuncs.cf_set_ttbr0((t))
98void aarch64_set_ttbr0(uint64_t); 94void aarch64_set_ttbr0(uint64_t);
 95void aarch64_set_ttbr0_thunderx(uint64_t);
99void aarch64_tlbi_all(void); /* all ASID, all VA */ 96void aarch64_tlbi_all(void); /* all ASID, all VA */
100void aarch64_tlbi_by_asid(int); /* an ASID, all VA */ 97void aarch64_tlbi_by_asid(int); /* an ASID, all VA */
101void aarch64_tlbi_by_va(vaddr_t); /* all ASID, a VA */ 98void aarch64_tlbi_by_va(vaddr_t); /* all ASID, a VA */
102void aarch64_tlbi_by_va_ll(vaddr_t); /* all ASID, a VA, lastlevel */ 99void aarch64_tlbi_by_va_ll(vaddr_t); /* all ASID, a VA, lastlevel */
103void aarch64_tlbi_by_asid_va(int, vaddr_t); /* an ASID, a VA */ 100void aarch64_tlbi_by_asid_va(int, vaddr_t); /* an ASID, a VA */
104void aarch64_tlbi_by_asid_va_ll(int, vaddr_t); /* an ASID, a VA, lastlevel */ 101void aarch64_tlbi_by_asid_va_ll(int, vaddr_t); /* an ASID, a VA, lastlevel */
105 102
106 103
107/* misc */ 104/* misc */
108#define cpu_idnum() aarch64_cpuid() 105#define cpu_idnum() aarch64_cpuid()
109 106
110/* cache op */ 107/* cache op */
111 108