Thu May 27 06:11:20 2021 UTC ()
fix build error with options ARMV85_BTI


(ryo)
diff -r1.4 -r1.5 src/sys/arch/aarch64/aarch64/procfs_machdep.c
diff -r1.19 -r1.20 src/sys/arch/aarch64/include/cpufunc.h

cvs diff -r1.4 -r1.5 src/sys/arch/aarch64/aarch64/procfs_machdep.c (switch to unified diff)

--- src/sys/arch/aarch64/aarch64/procfs_machdep.c 2020/10/01 07:31:27 1.4
+++ src/sys/arch/aarch64/aarch64/procfs_machdep.c 2021/05/27 06:11:20 1.5
@@ -1,218 +1,218 @@ @@ -1,218 +1,218 @@
1/* $NetBSD: procfs_machdep.c,v 1.4 2020/10/01 07:31:27 skrll Exp $ */ 1/* $NetBSD: procfs_machdep.c,v 1.5 2021/05/27 06:11:20 ryo Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2020 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2020 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
17 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: procfs_machdep.c,v 1.4 2020/10/01 07:31:27 skrll Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: procfs_machdep.c,v 1.5 2021/05/27 06:11:20 ryo Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/cpu.h> 33#include <sys/cpu.h>
34#include <sys/systm.h> 34#include <sys/systm.h>
35 35
36#include <miscfs/procfs/procfs.h> 36#include <miscfs/procfs/procfs.h>
37 37
38#include <aarch64/armreg.h> 38#include <aarch64/armreg.h>
39 39#include <aarch64/cpufunc.h>
40 40
41/* use variables named 'buf', 'left', 'total' */ 41/* use variables named 'buf', 'left', 'total' */
42#define FORWARD_BUF(_len) \ 42#define FORWARD_BUF(_len) \
43 do { \ 43 do { \
44 total += _len; \ 44 total += _len; \
45 if (_len < left) { \ 45 if (_len < left) { \
46 buf += _len; \ 46 buf += _len; \
47 left -= _len; \ 47 left -= _len; \
48 } else { \ 48 } else { \
49 buf += left; \ 49 buf += left; \
50 left = 0; \ 50 left = 0; \
51 } \ 51 } \
52 } while (0 /*CONSTCOND*/) 52 } while (0 /*CONSTCOND*/)
53 53
54#define OUTPUT_BUF(fmt, args...) \ 54#define OUTPUT_BUF(fmt, args...) \
55 do { \ 55 do { \
56 size_t l = snprintf(buf, left, fmt, ## args); \ 56 size_t l = snprintf(buf, left, fmt, ## args); \
57 FORWARD_BUF(l); \ 57 FORWARD_BUF(l); \
58 } while (0/*CONSTCOND*/) 58 } while (0/*CONSTCOND*/)
59 59
60static int 60static int
61procfs_cpuinfo_features(struct cpu_info *ci, char *buf, int buflen) 61procfs_cpuinfo_features(struct cpu_info *ci, char *buf, int buflen)
62{ 62{
63 uint64_t isar0, isar1, mmfr2, pfr0, pfr1; 63 uint64_t isar0, isar1, mmfr2, pfr0, pfr1;
64 size_t left, total; 64 size_t left, total;
65 65
66 isar0 = ci->ci_id.ac_aa64isar0; 66 isar0 = ci->ci_id.ac_aa64isar0;
67 isar1 = ci->ci_id.ac_aa64isar1; 67 isar1 = ci->ci_id.ac_aa64isar1;
68 mmfr2 = ci->ci_id.ac_aa64mmfr2; 68 mmfr2 = ci->ci_id.ac_aa64mmfr2;
69 pfr0 = ci->ci_id.ac_aa64pfr0; 69 pfr0 = ci->ci_id.ac_aa64pfr0;
70 pfr1 = ci->ci_id.ac_aa64pfr1; 70 pfr1 = ci->ci_id.ac_aa64pfr1;
71 71
72 left = buflen; 72 left = buflen;
73 total = 0; 73 total = 0;
74 74
75 /* 75 /*
76 * I don't know if we need to mimic the order of HWCAP in linux 76 * I don't know if we need to mimic the order of HWCAP in linux
77 */ 77 */
78 OUTPUT_BUF("Features\t:"); 78 OUTPUT_BUF("Features\t:");
79#define SO_EQ(reg, mask, val) (__SHIFTOUT((reg), (mask)) == (val)) 79#define SO_EQ(reg, mask, val) (__SHIFTOUT((reg), (mask)) == (val))
80 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_FP, ID_AA64PFR0_EL1_FP_IMPL)) 80 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_FP, ID_AA64PFR0_EL1_FP_IMPL))
81 OUTPUT_BUF(" fp"); 81 OUTPUT_BUF(" fp");
82 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_ADVSIMD, ID_AA64PFR0_EL1_ADV_SIMD_IMPL)) 82 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_ADVSIMD, ID_AA64PFR0_EL1_ADV_SIMD_IMPL))
83 OUTPUT_BUF(" asimd"); 83 OUTPUT_BUF(" asimd");
84 /* notyet: " evtstrm" */ 84 /* notyet: " evtstrm" */
85 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_AES, ID_AA64ISAR0_EL1_AES_AES)) 85 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_AES, ID_AA64ISAR0_EL1_AES_AES))
86 OUTPUT_BUF(" aes"); 86 OUTPUT_BUF(" aes");
87 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_AES, ID_AA64ISAR0_EL1_AES_PMUL)) 87 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_AES, ID_AA64ISAR0_EL1_AES_PMUL))
88 OUTPUT_BUF(" pmull"); 88 OUTPUT_BUF(" pmull");
89 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA1, 89 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA1,
90 ID_AA64ISAR0_EL1_SHA1_SHA1CPMHSU)) 90 ID_AA64ISAR0_EL1_SHA1_SHA1CPMHSU))
91 OUTPUT_BUF(" sha1"); 91 OUTPUT_BUF(" sha1");
92 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA2, 92 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA2,
93 ID_AA64ISAR0_EL1_SHA2_SHA256HSU)) 93 ID_AA64ISAR0_EL1_SHA2_SHA256HSU))
94 OUTPUT_BUF(" sha2"); 94 OUTPUT_BUF(" sha2");
95 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_CRC32, ID_AA64ISAR0_EL1_CRC32_CRC32X)) 95 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_CRC32, ID_AA64ISAR0_EL1_CRC32_CRC32X))
96 OUTPUT_BUF(" crc32"); 96 OUTPUT_BUF(" crc32");
97 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_ATOMIC, ID_AA64ISAR0_EL1_ATOMIC_SWP)) 97 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_ATOMIC, ID_AA64ISAR0_EL1_ATOMIC_SWP))
98 OUTPUT_BUF(" atomics"); 98 OUTPUT_BUF(" atomics");
99 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_FP, ID_AA64PFR0_EL1_FP_HP)) 99 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_FP, ID_AA64PFR0_EL1_FP_HP))
100 OUTPUT_BUF(" fphp"); 100 OUTPUT_BUF(" fphp");
101 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_ADVSIMD, ID_AA64PFR0_EL1_ADV_SIMD_HP)) 101 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_ADVSIMD, ID_AA64PFR0_EL1_ADV_SIMD_HP))
102 OUTPUT_BUF(" asimdhp"); 102 OUTPUT_BUF(" asimdhp");
103 /* notyet: " cpuid" */ 103 /* notyet: " cpuid" */
104 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_RDM, ID_AA64ISAR0_EL1_RDM_SQRDML)) 104 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_RDM, ID_AA64ISAR0_EL1_RDM_SQRDML))
105 OUTPUT_BUF(" asimdrdm"); 105 OUTPUT_BUF(" asimdrdm");
106 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_JSCVT, 106 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_JSCVT,
107 ID_AA64ISAR1_EL1_JSCVT_SUPPORTED)) 107 ID_AA64ISAR1_EL1_JSCVT_SUPPORTED))
108 OUTPUT_BUF(" jscvt"); 108 OUTPUT_BUF(" jscvt");
109 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_FCMA, 109 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_FCMA,
110 ID_AA64ISAR1_EL1_FCMA_SUPPORTED)) 110 ID_AA64ISAR1_EL1_FCMA_SUPPORTED))
111 OUTPUT_BUF(" fcma"); 111 OUTPUT_BUF(" fcma");
112 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_LRCPC, ID_AA64ISAR1_EL1_LRCPC_PR)) 112 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_LRCPC, ID_AA64ISAR1_EL1_LRCPC_PR))
113 OUTPUT_BUF(" lrcpc"); 113 OUTPUT_BUF(" lrcpc");
114 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_DPB, ID_AA64ISAR1_EL1_DPB_CVAP)) 114 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_DPB, ID_AA64ISAR1_EL1_DPB_CVAP))
115 OUTPUT_BUF(" dcpop"); 115 OUTPUT_BUF(" dcpop");
116 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA3, ID_AA64ISAR0_EL1_SHA3_EOR3)) 116 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA3, ID_AA64ISAR0_EL1_SHA3_EOR3))
117 OUTPUT_BUF(" sha3"); 117 OUTPUT_BUF(" sha3");
118 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SM3, ID_AA64ISAR0_EL1_SM3_SM3)) 118 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SM3, ID_AA64ISAR0_EL1_SM3_SM3))
119 OUTPUT_BUF(" sm3"); 119 OUTPUT_BUF(" sm3");
120 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SM4, ID_AA64ISAR0_EL1_SM4_SM4)) 120 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SM4, ID_AA64ISAR0_EL1_SM4_SM4))
121 OUTPUT_BUF(" sm4"); 121 OUTPUT_BUF(" sm4");
122 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_DP, ID_AA64ISAR0_EL1_DP_UDOT)) 122 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_DP, ID_AA64ISAR0_EL1_DP_UDOT))
123 OUTPUT_BUF(" asimddp"); 123 OUTPUT_BUF(" asimddp");
124 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA2, 124 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA2,
125 ID_AA64ISAR0_EL1_SHA2_SHA512HSU)) 125 ID_AA64ISAR0_EL1_SHA2_SHA512HSU))
126 OUTPUT_BUF(" sha512"); 126 OUTPUT_BUF(" sha512");
127 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_SVE, ID_AA64PFR0_EL1_SVE_IMPL)) 127 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_SVE, ID_AA64PFR0_EL1_SVE_IMPL))
128 OUTPUT_BUF(" sve"); 128 OUTPUT_BUF(" sve");
129 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_FHM, ID_AA64ISAR0_EL1_FHM_FMLAL)) 129 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_FHM, ID_AA64ISAR0_EL1_FHM_FMLAL))
130 OUTPUT_BUF(" asimdfhm"); 130 OUTPUT_BUF(" asimdfhm");
131 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_DIT, ID_AA64PFR0_EL1_DIT_IMPL)) 131 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_DIT, ID_AA64PFR0_EL1_DIT_IMPL))
132 OUTPUT_BUF(" dit"); 132 OUTPUT_BUF(" dit");
133 if (SO_EQ(mmfr2, ID_AA64MMFR2_EL1_AT, ID_AA64MMFR2_EL1_AT_16BIT)) 133 if (SO_EQ(mmfr2, ID_AA64MMFR2_EL1_AT, ID_AA64MMFR2_EL1_AT_16BIT))
134 OUTPUT_BUF(" uscat"); 134 OUTPUT_BUF(" uscat");
135 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_LRCPC, ID_AA64ISAR1_EL1_LRCPC_PR_UR)) 135 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_LRCPC, ID_AA64ISAR1_EL1_LRCPC_PR_UR))
136 OUTPUT_BUF(" ilrcpc"); 136 OUTPUT_BUF(" ilrcpc");
137 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_TS, ID_AA64ISAR0_EL1_TS_CFINV)) 137 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_TS, ID_AA64ISAR0_EL1_TS_CFINV))
138 OUTPUT_BUF(" flagm"); 138 OUTPUT_BUF(" flagm");
139 if (SO_EQ(pfr1, ID_AA64PFR1_EL1_SSBS, ID_AA64PFR1_EL1_SSBS_MSR_MRS)) 139 if (SO_EQ(pfr1, ID_AA64PFR1_EL1_SSBS, ID_AA64PFR1_EL1_SSBS_MSR_MRS))
140 OUTPUT_BUF(" ssbs"); 140 OUTPUT_BUF(" ssbs");
141 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_SB, ID_AA64ISAR1_EL1_SB_SUPPORTED)) 141 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_SB, ID_AA64ISAR1_EL1_SB_SUPPORTED))
142 OUTPUT_BUF(" sb"); 142 OUTPUT_BUF(" sb");
143#ifdef ARMV83_PAC 143#ifdef ARMV83_PAC
144 if (aarch64_pac_enabled) 144 if (aarch64_pac_enabled)
145 OUTPUT_BUF(" paca pacg"); 145 OUTPUT_BUF(" paca pacg");
146#endif 146#endif
147 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_DPB, ID_AA64ISAR1_EL1_DPB_CVAP_CVADP)) 147 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_DPB, ID_AA64ISAR1_EL1_DPB_CVAP_CVADP))
148 OUTPUT_BUF(" dcpodp"); 148 OUTPUT_BUF(" dcpodp");
149 /* notyet: " sve2" */ 149 /* notyet: " sve2" */
150 /* notyet: " sveaes" */ 150 /* notyet: " sveaes" */
151 /* notyet: " svepmull" */ 151 /* notyet: " svepmull" */
152 /* notyet: " svebitperm" */ 152 /* notyet: " svebitperm" */
153 /* notyet: " svesha3" */ 153 /* notyet: " svesha3" */
154 /* notyet: " svesm4" */ 154 /* notyet: " svesm4" */
155 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_TS, ID_AA64ISAR0_EL1_TS_AXFLAG)) 155 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_TS, ID_AA64ISAR0_EL1_TS_AXFLAG))
156 OUTPUT_BUF(" flagm2"); 156 OUTPUT_BUF(" flagm2");
157 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_FRINTTS, 157 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_FRINTTS,
158 ID_AA64ISAR1_EL1_FRINTTS_SUPPORTED)) 158 ID_AA64ISAR1_EL1_FRINTTS_SUPPORTED))
159 OUTPUT_BUF(" frint"); 159 OUTPUT_BUF(" frint");
160 /* notyet: " svei8mm" */ 160 /* notyet: " svei8mm" */
161 /* notyet: " svef32mm" */ 161 /* notyet: " svef32mm" */
162 /* notyet: " svef64mm" */ 162 /* notyet: " svef64mm" */
163 /* notyet: " svebf16" */ 163 /* notyet: " svebf16" */
164 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_I8MM, 164 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_I8MM,
165 ID_AA64ISAR1_EL1_I8MM_SUPPORTED)) 165 ID_AA64ISAR1_EL1_I8MM_SUPPORTED))
166 OUTPUT_BUF(" i8mm"); 166 OUTPUT_BUF(" i8mm");
167 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_BF16, ID_AA64ISAR1_EL1_BF16_BFDOT)) 167 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_BF16, ID_AA64ISAR1_EL1_BF16_BFDOT))
168 OUTPUT_BUF(" bf16"); 168 OUTPUT_BUF(" bf16");
169 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_DGH, ID_AA64ISAR1_EL1_DGH_SUPPORTED)) 169 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_DGH, ID_AA64ISAR1_EL1_DGH_SUPPORTED))
170 OUTPUT_BUF(" dgh"); 170 OUTPUT_BUF(" dgh");
171 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_RNDR, ID_AA64ISAR0_EL1_RNDR_RNDRRS)) 171 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_RNDR, ID_AA64ISAR0_EL1_RNDR_RNDRRS))
172 OUTPUT_BUF(" rng"); 172 OUTPUT_BUF(" rng");
173#ifdef ARMV85_BTI 173#ifdef ARMV85_BTI
174 if (aarch64_bti_enabled) 174 if (aarch64_bti_enabled)
175 OUTPUT_BUF(" bti"); 175 OUTPUT_BUF(" bti");
176#endif 176#endif
177 OUTPUT_BUF("\n"); 177 OUTPUT_BUF("\n");
178#undef SO_EQ 178#undef SO_EQ
179 179
180 return total; 180 return total;
181} 181}
182 182
183int 183int
184procfs_getcpuinfstr(char *buf, size_t *lenp) 184procfs_getcpuinfstr(char *buf, size_t *lenp)
185{ 185{
186 struct cpu_info *ci; 186 struct cpu_info *ci;
187 CPU_INFO_ITERATOR cii; 187 CPU_INFO_ITERATOR cii;
188 size_t left, len, total; 188 size_t left, len, total;
189 int ret = 0; 189 int ret = 0;
190 190
191 left = *lenp; 191 left = *lenp;
192 total = 0; 192 total = 0;
193 193
194 for (CPU_INFO_FOREACH(cii, ci)) { 194 for (CPU_INFO_FOREACH(cii, ci)) {
195 OUTPUT_BUF("processor\t: %d\n", cii); 195 OUTPUT_BUF("processor\t: %d\n", cii);
196 196
197 len = procfs_cpuinfo_features(ci, buf, left); 197 len = procfs_cpuinfo_features(ci, buf, left);
198 FORWARD_BUF(len); 198 FORWARD_BUF(len);
199 199
200 OUTPUT_BUF("CPU implementer\t: 0x%02lx\n", 200 OUTPUT_BUF("CPU implementer\t: 0x%02lx\n",
201 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_IMPLEMENTOR_MASK)); 201 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_IMPLEMENTOR_MASK));
202 OUTPUT_BUF("CPU architecture: 8\n"); /* ARMv8 */ 202 OUTPUT_BUF("CPU architecture: 8\n"); /* ARMv8 */
203 OUTPUT_BUF("CPU variant\t: 0x%lx\n", 203 OUTPUT_BUF("CPU variant\t: 0x%lx\n",
204 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_VARIANT_MASK)); 204 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_VARIANT_MASK));
205 OUTPUT_BUF("CPU part\t: 0x%03lx\n", 205 OUTPUT_BUF("CPU part\t: 0x%03lx\n",
206 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_PARTNO_MASK)); 206 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_PARTNO_MASK));
207 OUTPUT_BUF("CPU revision\t: %lu\n", 207 OUTPUT_BUF("CPU revision\t: %lu\n",
208 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_REVISION_MASK)); 208 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_REVISION_MASK));
209 OUTPUT_BUF("\n"); 209 OUTPUT_BUF("\n");
210 } 210 }
211 211
212 /* not enough buffer? */ 212 /* not enough buffer? */
213 if (total >= *lenp) 213 if (total >= *lenp)
214 ret = -1; 214 ret = -1;
215 215
216 *lenp = total + 1; /* total output + '\0' */ 216 *lenp = total + 1; /* total output + '\0' */
217 return ret; 217 return ret;
218} 218}

cvs diff -r1.19 -r1.20 src/sys/arch/aarch64/include/cpufunc.h (switch to unified diff)

--- src/sys/arch/aarch64/include/cpufunc.h 2020/12/04 08:29:11 1.19
+++ src/sys/arch/aarch64/include/cpufunc.h 2021/05/27 06:11:20 1.20
@@ -1,252 +1,253 @@ @@ -1,252 +1,253 @@
1/* $NetBSD: cpufunc.h,v 1.19 2020/12/04 08:29:11 skrll Exp $ */ 1/* $NetBSD: cpufunc.h,v 1.20 2021/05/27 06:11:20 ryo Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#ifndef _AARCH64_CPUFUNC_H_ 29#ifndef _AARCH64_CPUFUNC_H_
30#define _AARCH64_CPUFUNC_H_ 30#define _AARCH64_CPUFUNC_H_
31 31
32#ifdef _KERNEL 32#ifdef _KERNEL
33 33
34#include <arm/armreg.h> 34#include <arm/armreg.h>
35#include <sys/device_if.h> 35#include <sys/device_if.h>
36 36
37struct aarch64_cache_unit { 37struct aarch64_cache_unit {
38 u_int cache_type; 38 u_int cache_type;
39#define CACHE_TYPE_VPIPT 0 /* VMID-aware PIPT */ 39#define CACHE_TYPE_VPIPT 0 /* VMID-aware PIPT */
40#define CACHE_TYPE_VIVT 1 /* ASID-tagged VIVT */ 40#define CACHE_TYPE_VIVT 1 /* ASID-tagged VIVT */
41#define CACHE_TYPE_VIPT 2 41#define CACHE_TYPE_VIPT 2
42#define CACHE_TYPE_PIPT 3 42#define CACHE_TYPE_PIPT 3
43 u_int cache_line_size; 43 u_int cache_line_size;
44 u_int cache_ways; 44 u_int cache_ways;
45 u_int cache_sets; 45 u_int cache_sets;
46 u_int cache_way_size; 46 u_int cache_way_size;
47 u_int cache_size; 47 u_int cache_size;
48}; 48};
49 49
50struct aarch64_cache_info { 50struct aarch64_cache_info {
51 u_int cacheable; 51 u_int cacheable;
52#define CACHE_CACHEABLE_NONE 0 52#define CACHE_CACHEABLE_NONE 0
53#define CACHE_CACHEABLE_ICACHE 1 /* instruction cache only */ 53#define CACHE_CACHEABLE_ICACHE 1 /* instruction cache only */
54#define CACHE_CACHEABLE_DCACHE 2 /* data cache only */ 54#define CACHE_CACHEABLE_DCACHE 2 /* data cache only */
55#define CACHE_CACHEABLE_IDCACHE 3 /* instruction and data caches */ 55#define CACHE_CACHEABLE_IDCACHE 3 /* instruction and data caches */
56#define CACHE_CACHEABLE_UNIFIED 4 /* unified cache */ 56#define CACHE_CACHEABLE_UNIFIED 4 /* unified cache */
57 struct aarch64_cache_unit icache; 57 struct aarch64_cache_unit icache;
58 struct aarch64_cache_unit dcache; 58 struct aarch64_cache_unit dcache;
59}; 59};
60 60
61#define MAX_CACHE_LEVEL 8 /* ARMv8 has maximum 8 level cache */ 61#define MAX_CACHE_LEVEL 8 /* ARMv8 has maximum 8 level cache */
62extern u_int aarch64_cache_vindexsize; /* cachesize/way (VIVT/VIPT) */ 62extern u_int aarch64_cache_vindexsize; /* cachesize/way (VIVT/VIPT) */
63extern u_int aarch64_cache_prefer_mask; 63extern u_int aarch64_cache_prefer_mask;
64extern u_int cputype; /* compat arm */ 64extern u_int cputype; /* compat arm */
65 65
 66extern int aarch64_bti_enabled;
66extern int aarch64_pan_enabled; 67extern int aarch64_pan_enabled;
67extern int aarch64_pac_enabled; 68extern int aarch64_pac_enabled;
68 69
69void aarch64_pan_init(int); 70void aarch64_pan_init(int);
70int aarch64_pac_init(int); 71int aarch64_pac_init(int);
71 72
72int set_cpufuncs(void); 73int set_cpufuncs(void);
73void aarch64_getcacheinfo(int); 74void aarch64_getcacheinfo(int);
74void aarch64_printcacheinfo(device_t); 75void aarch64_printcacheinfo(device_t);
75 76
76void aarch64_dcache_wbinv_all(void); 77void aarch64_dcache_wbinv_all(void);
77void aarch64_dcache_inv_all(void); 78void aarch64_dcache_inv_all(void);
78void aarch64_dcache_wb_all(void); 79void aarch64_dcache_wb_all(void);
79void aarch64_icache_inv_all(void); 80void aarch64_icache_inv_all(void);
80 81
81/* cache op in cpufunc_asm_armv8.S */ 82/* cache op in cpufunc_asm_armv8.S */
82void aarch64_nullop(void); 83void aarch64_nullop(void);
83uint32_t aarch64_cpuid(void); 84uint32_t aarch64_cpuid(void);
84void aarch64_icache_sync_range(vaddr_t, vsize_t); 85void aarch64_icache_sync_range(vaddr_t, vsize_t);
85void aarch64_icache_inv_range(vaddr_t, vsize_t); 86void aarch64_icache_inv_range(vaddr_t, vsize_t);
86void aarch64_icache_barrier_range(vaddr_t, vsize_t); 87void aarch64_icache_barrier_range(vaddr_t, vsize_t);
87void aarch64_idcache_wbinv_range(vaddr_t, vsize_t); 88void aarch64_idcache_wbinv_range(vaddr_t, vsize_t);
88void aarch64_dcache_wbinv_range(vaddr_t, vsize_t); 89void aarch64_dcache_wbinv_range(vaddr_t, vsize_t);
89void aarch64_dcache_inv_range(vaddr_t, vsize_t); 90void aarch64_dcache_inv_range(vaddr_t, vsize_t);
90void aarch64_dcache_wb_range(vaddr_t, vsize_t); 91void aarch64_dcache_wb_range(vaddr_t, vsize_t);
91void aarch64_icache_inv_all(void); 92void aarch64_icache_inv_all(void);
92void aarch64_drain_writebuf(void); 93void aarch64_drain_writebuf(void);
93 94
94/* tlb op in cpufunc_asm_armv8.S */ 95/* tlb op in cpufunc_asm_armv8.S */
95#define cpu_set_ttbr0(t) curcpu()->ci_cpufuncs.cf_set_ttbr0((t)) 96#define cpu_set_ttbr0(t) curcpu()->ci_cpufuncs.cf_set_ttbr0((t))
96void aarch64_set_ttbr0(uint64_t); 97void aarch64_set_ttbr0(uint64_t);
97void aarch64_set_ttbr0_thunderx(uint64_t); 98void aarch64_set_ttbr0_thunderx(uint64_t);
98void aarch64_tlbi_all(void); /* all ASID, all VA */ 99void aarch64_tlbi_all(void); /* all ASID, all VA */
99void aarch64_tlbi_by_asid(int); /* an ASID, all VA */ 100void aarch64_tlbi_by_asid(int); /* an ASID, all VA */
100void aarch64_tlbi_by_va(vaddr_t); /* all ASID, a VA */ 101void aarch64_tlbi_by_va(vaddr_t); /* all ASID, a VA */
101void aarch64_tlbi_by_va_ll(vaddr_t); /* all ASID, a VA, lastlevel */ 102void aarch64_tlbi_by_va_ll(vaddr_t); /* all ASID, a VA, lastlevel */
102void aarch64_tlbi_by_asid_va(int, vaddr_t); /* an ASID, a VA */ 103void aarch64_tlbi_by_asid_va(int, vaddr_t); /* an ASID, a VA */
103void aarch64_tlbi_by_asid_va_ll(int, vaddr_t); /* an ASID, a VA, lastlevel */ 104void aarch64_tlbi_by_asid_va_ll(int, vaddr_t); /* an ASID, a VA, lastlevel */
104 105
105 106
106/* misc */ 107/* misc */
107#define cpu_idnum() aarch64_cpuid() 108#define cpu_idnum() aarch64_cpuid()
108 109
109/* cache op */ 110/* cache op */
110 111
111#define cpu_dcache_wbinv_all() aarch64_dcache_wbinv_all() 112#define cpu_dcache_wbinv_all() aarch64_dcache_wbinv_all()
112#define cpu_dcache_inv_all() aarch64_dcache_inv_all() 113#define cpu_dcache_inv_all() aarch64_dcache_inv_all()
113#define cpu_dcache_wb_all() aarch64_dcache_wb_all() 114#define cpu_dcache_wb_all() aarch64_dcache_wb_all()
114#define cpu_idcache_wbinv_all() \ 115#define cpu_idcache_wbinv_all() \
115 (aarch64_dcache_wbinv_all(), aarch64_icache_inv_all()) 116 (aarch64_dcache_wbinv_all(), aarch64_icache_inv_all())
116#define cpu_icache_sync_all() \ 117#define cpu_icache_sync_all() \
117 (aarch64_dcache_wb_all(), aarch64_icache_inv_all()) 118 (aarch64_dcache_wb_all(), aarch64_icache_inv_all())
118#define cpu_icache_inv_all() aarch64_icache_inv_all() 119#define cpu_icache_inv_all() aarch64_icache_inv_all()
119 120
120#define cpu_dcache_wbinv_range(v,s) aarch64_dcache_wbinv_range((v),(s)) 121#define cpu_dcache_wbinv_range(v,s) aarch64_dcache_wbinv_range((v),(s))
121#define cpu_dcache_inv_range(v,s) aarch64_dcache_inv_range((v),(s)) 122#define cpu_dcache_inv_range(v,s) aarch64_dcache_inv_range((v),(s))
122#define cpu_dcache_wb_range(v,s) aarch64_dcache_wb_range((v),(s)) 123#define cpu_dcache_wb_range(v,s) aarch64_dcache_wb_range((v),(s))
123#define cpu_idcache_wbinv_range(v,s) aarch64_idcache_wbinv_range((v),(s)) 124#define cpu_idcache_wbinv_range(v,s) aarch64_idcache_wbinv_range((v),(s))
124#define cpu_icache_sync_range(v,s) \ 125#define cpu_icache_sync_range(v,s) \
125 curcpu()->ci_cpufuncs.cf_icache_sync_range((v),(s)) 126 curcpu()->ci_cpufuncs.cf_icache_sync_range((v),(s))
126 127
127#define cpu_sdcache_wbinv_range(v,p,s) ((void)0) 128#define cpu_sdcache_wbinv_range(v,p,s) ((void)0)
128#define cpu_sdcache_inv_range(v,p,s) ((void)0) 129#define cpu_sdcache_inv_range(v,p,s) ((void)0)
129#define cpu_sdcache_wb_range(v,p,s) ((void)0) 130#define cpu_sdcache_wb_range(v,p,s) ((void)0)
130 131
131/* others */ 132/* others */
132#define cpu_drain_writebuf() aarch64_drain_writebuf() 133#define cpu_drain_writebuf() aarch64_drain_writebuf()
133 134
134extern u_int arm_dcache_align; 135extern u_int arm_dcache_align;
135extern u_int arm_dcache_align_mask; 136extern u_int arm_dcache_align_mask;
136 137
137static inline bool 138static inline bool
138cpu_gtmr_exists_p(void) 139cpu_gtmr_exists_p(void)
139{ 140{
140 141
141 return true; 142 return true;
142} 143}
143 144
144static inline u_int 145static inline u_int
145cpu_clusterid(void) 146cpu_clusterid(void)
146{ 147{
147 148
148 return __SHIFTOUT(reg_mpidr_el1_read(), MPIDR_AFF1); 149 return __SHIFTOUT(reg_mpidr_el1_read(), MPIDR_AFF1);
149} 150}
150 151
151static inline bool 152static inline bool
152cpu_earlydevice_va_p(void) 153cpu_earlydevice_va_p(void)
153{ 154{
154 extern bool pmap_devmap_bootstrap_done; /* in pmap.c */ 155 extern bool pmap_devmap_bootstrap_done; /* in pmap.c */
155 156
156 /* This function may be called before enabling MMU, or mapping KVA */ 157 /* This function may be called before enabling MMU, or mapping KVA */
157 if ((reg_sctlr_el1_read() & SCTLR_M) == 0) 158 if ((reg_sctlr_el1_read() & SCTLR_M) == 0)
158 return false; 159 return false;
159 160
160 /* device mapping will be availabled after pmap_devmap_bootstrap() */ 161 /* device mapping will be availabled after pmap_devmap_bootstrap() */
161 if (!pmap_devmap_bootstrap_done) 162 if (!pmap_devmap_bootstrap_done)
162 return false; 163 return false;
163 164
164 return true; 165 return true;
165} 166}
166 167
167#endif /* _KERNEL */ 168#endif /* _KERNEL */
168 169
169/* definitions of TAG and PAC in pointers */ 170/* definitions of TAG and PAC in pointers */
170#define AARCH64_ADDRTOP_TAG_BIT 55 171#define AARCH64_ADDRTOP_TAG_BIT 55
171#define AARCH64_ADDRTOP_TAG __BIT(55) /* ECR_EL1.TBI[01]=1 */ 172#define AARCH64_ADDRTOP_TAG __BIT(55) /* ECR_EL1.TBI[01]=1 */
172#define AARCH64_ADDRTOP_MSB __BIT(63) /* ECR_EL1.TBI[01]=0 */ 173#define AARCH64_ADDRTOP_MSB __BIT(63) /* ECR_EL1.TBI[01]=0 */
173#define AARCH64_ADDRESS_TAG_MASK __BITS(63,56) /* if TCR.TBI[01]=1 */ 174#define AARCH64_ADDRESS_TAG_MASK __BITS(63,56) /* if TCR.TBI[01]=1 */
174#define AARCH64_ADDRESS_PAC_MASK __BITS(54,48) /* depend on VIRT_BIT */ 175#define AARCH64_ADDRESS_PAC_MASK __BITS(54,48) /* depend on VIRT_BIT */
175#define AARCH64_ADDRESS_TAGPAC_MASK \ 176#define AARCH64_ADDRESS_TAGPAC_MASK \
176 (AARCH64_ADDRESS_TAG_MASK|AARCH64_ADDRESS_PAC_MASK) 177 (AARCH64_ADDRESS_TAG_MASK|AARCH64_ADDRESS_PAC_MASK)
177 178
178#ifdef _KERNEL 179#ifdef _KERNEL
179/* 180/*
180 * Which is the address space of this VA? 181 * Which is the address space of this VA?
181 * return the space considering TBI. (PAC is not yet) 182 * return the space considering TBI. (PAC is not yet)
182 * 183 *
183 * return value: AARCH64_ADDRSPACE_{LOWER,UPPER}{_OUTOFRANGE}? 184 * return value: AARCH64_ADDRSPACE_{LOWER,UPPER}{_OUTOFRANGE}?
184 */ 185 */
185#define AARCH64_ADDRSPACE_LOWER 0 /* -> TTBR0 */ 186#define AARCH64_ADDRSPACE_LOWER 0 /* -> TTBR0 */
186#define AARCH64_ADDRSPACE_UPPER 1 /* -> TTBR1 */ 187#define AARCH64_ADDRSPACE_UPPER 1 /* -> TTBR1 */
187#define AARCH64_ADDRSPACE_LOWER_OUTOFRANGE -1 /* certainly fault */ 188#define AARCH64_ADDRSPACE_LOWER_OUTOFRANGE -1 /* certainly fault */
188#define AARCH64_ADDRSPACE_UPPER_OUTOFRANGE -2 /* certainly fault */ 189#define AARCH64_ADDRSPACE_UPPER_OUTOFRANGE -2 /* certainly fault */
189static inline int 190static inline int
190aarch64_addressspace(vaddr_t va) 191aarch64_addressspace(vaddr_t va)
191{ 192{
192 uint64_t addrtop, tbi; 193 uint64_t addrtop, tbi;
193 194
194 addrtop = va & AARCH64_ADDRTOP_TAG; 195 addrtop = va & AARCH64_ADDRTOP_TAG;
195 tbi = addrtop ? TCR_TBI1 : TCR_TBI0; 196 tbi = addrtop ? TCR_TBI1 : TCR_TBI0;
196 if (reg_tcr_el1_read() & tbi) { 197 if (reg_tcr_el1_read() & tbi) {
197 if (addrtop == 0) { 198 if (addrtop == 0) {
198 /* lower address, and TBI0 enabled */ 199 /* lower address, and TBI0 enabled */
199 if ((va & AARCH64_ADDRESS_PAC_MASK) != 0) 200 if ((va & AARCH64_ADDRESS_PAC_MASK) != 0)
200 return AARCH64_ADDRSPACE_LOWER_OUTOFRANGE; 201 return AARCH64_ADDRSPACE_LOWER_OUTOFRANGE;
201 return AARCH64_ADDRSPACE_LOWER; 202 return AARCH64_ADDRSPACE_LOWER;
202 } 203 }
203 /* upper address, and TBI1 enabled */ 204 /* upper address, and TBI1 enabled */
204 if ((va & AARCH64_ADDRESS_PAC_MASK) != AARCH64_ADDRESS_PAC_MASK) 205 if ((va & AARCH64_ADDRESS_PAC_MASK) != AARCH64_ADDRESS_PAC_MASK)
205 return AARCH64_ADDRSPACE_UPPER_OUTOFRANGE; 206 return AARCH64_ADDRSPACE_UPPER_OUTOFRANGE;
206 return AARCH64_ADDRSPACE_UPPER; 207 return AARCH64_ADDRSPACE_UPPER;
207 } 208 }
208 209
209 addrtop = va & AARCH64_ADDRTOP_MSB; 210 addrtop = va & AARCH64_ADDRTOP_MSB;
210 if (addrtop == 0) { 211 if (addrtop == 0) {
211 /* lower address, and TBI0 disabled */ 212 /* lower address, and TBI0 disabled */
212 if ((va & AARCH64_ADDRESS_TAGPAC_MASK) != 0) 213 if ((va & AARCH64_ADDRESS_TAGPAC_MASK) != 0)
213 return AARCH64_ADDRSPACE_LOWER_OUTOFRANGE; 214 return AARCH64_ADDRSPACE_LOWER_OUTOFRANGE;
214 return AARCH64_ADDRSPACE_LOWER; 215 return AARCH64_ADDRSPACE_LOWER;
215 } 216 }
216 /* upper address, and TBI1 disabled */ 217 /* upper address, and TBI1 disabled */
217 if ((va & AARCH64_ADDRESS_TAGPAC_MASK) != AARCH64_ADDRESS_TAGPAC_MASK) 218 if ((va & AARCH64_ADDRESS_TAGPAC_MASK) != AARCH64_ADDRESS_TAGPAC_MASK)
218 return AARCH64_ADDRSPACE_UPPER_OUTOFRANGE; 219 return AARCH64_ADDRSPACE_UPPER_OUTOFRANGE;
219 return AARCH64_ADDRSPACE_UPPER; 220 return AARCH64_ADDRSPACE_UPPER;
220} 221}
221 222
222static inline vaddr_t 223static inline vaddr_t
223aarch64_untag_address(vaddr_t va) 224aarch64_untag_address(vaddr_t va)
224{ 225{
225 uint64_t addrtop, tbi; 226 uint64_t addrtop, tbi;
226 227
227 addrtop = va & AARCH64_ADDRTOP_TAG; 228 addrtop = va & AARCH64_ADDRTOP_TAG;
228 tbi = addrtop ? TCR_TBI1 : TCR_TBI0; 229 tbi = addrtop ? TCR_TBI1 : TCR_TBI0;
229 if (reg_tcr_el1_read() & tbi) { 230 if (reg_tcr_el1_read() & tbi) {
230 if (addrtop == 0) { 231 if (addrtop == 0) {
231 /* lower address, and TBI0 enabled */ 232 /* lower address, and TBI0 enabled */
232 return va & ~AARCH64_ADDRESS_TAG_MASK; 233 return va & ~AARCH64_ADDRESS_TAG_MASK;
233 } 234 }
234 /* upper address, and TBI1 enabled */ 235 /* upper address, and TBI1 enabled */
235 return va | AARCH64_ADDRESS_TAG_MASK; 236 return va | AARCH64_ADDRESS_TAG_MASK;
236 } 237 }
237 238
238 /* TBI[01] is disabled, nothing to do */ 239 /* TBI[01] is disabled, nothing to do */
239 return va; 240 return va;
240} 241}
241 242
242#endif /* _KERNEL */ 243#endif /* _KERNEL */
243 244
244static __inline uint64_t 245static __inline uint64_t
245aarch64_strip_pac(uint64_t __val) 246aarch64_strip_pac(uint64_t __val)
246{ 247{
247 if (__val & AARCH64_ADDRTOP_TAG) 248 if (__val & AARCH64_ADDRTOP_TAG)
248 return __val | AARCH64_ADDRESS_TAGPAC_MASK; 249 return __val | AARCH64_ADDRESS_TAGPAC_MASK;
249 return __val & ~AARCH64_ADDRESS_TAGPAC_MASK; 250 return __val & ~AARCH64_ADDRESS_TAGPAC_MASK;
250} 251}
251 252
252#endif /* _AARCH64_CPUFUNC_H_ */ 253#endif /* _AARCH64_CPUFUNC_H_ */