| @@ -1,252 +1,253 @@ | | | @@ -1,252 +1,253 @@ |
1 | /* $NetBSD: cpufunc.h,v 1.19 2020/12/04 08:29:11 skrll Exp $ */ | | 1 | /* $NetBSD: cpufunc.h,v 1.20 2021/05/27 06:11:20 ryo Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> | | 4 | * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | | 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
18 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | | 18 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
19 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, | | 19 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, |
20 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 20 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
22 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 22 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | | 23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
24 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | | 24 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
25 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 25 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. | | 26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | #ifndef _AARCH64_CPUFUNC_H_ | | 29 | #ifndef _AARCH64_CPUFUNC_H_ |
30 | #define _AARCH64_CPUFUNC_H_ | | 30 | #define _AARCH64_CPUFUNC_H_ |
31 | | | 31 | |
32 | #ifdef _KERNEL | | 32 | #ifdef _KERNEL |
33 | | | 33 | |
34 | #include <arm/armreg.h> | | 34 | #include <arm/armreg.h> |
35 | #include <sys/device_if.h> | | 35 | #include <sys/device_if.h> |
36 | | | 36 | |
37 | struct aarch64_cache_unit { | | 37 | struct aarch64_cache_unit { |
38 | u_int cache_type; | | 38 | u_int cache_type; |
39 | #define CACHE_TYPE_VPIPT 0 /* VMID-aware PIPT */ | | 39 | #define CACHE_TYPE_VPIPT 0 /* VMID-aware PIPT */ |
40 | #define CACHE_TYPE_VIVT 1 /* ASID-tagged VIVT */ | | 40 | #define CACHE_TYPE_VIVT 1 /* ASID-tagged VIVT */ |
41 | #define CACHE_TYPE_VIPT 2 | | 41 | #define CACHE_TYPE_VIPT 2 |
42 | #define CACHE_TYPE_PIPT 3 | | 42 | #define CACHE_TYPE_PIPT 3 |
43 | u_int cache_line_size; | | 43 | u_int cache_line_size; |
44 | u_int cache_ways; | | 44 | u_int cache_ways; |
45 | u_int cache_sets; | | 45 | u_int cache_sets; |
46 | u_int cache_way_size; | | 46 | u_int cache_way_size; |
47 | u_int cache_size; | | 47 | u_int cache_size; |
48 | }; | | 48 | }; |
49 | | | 49 | |
50 | struct aarch64_cache_info { | | 50 | struct aarch64_cache_info { |
51 | u_int cacheable; | | 51 | u_int cacheable; |
52 | #define CACHE_CACHEABLE_NONE 0 | | 52 | #define CACHE_CACHEABLE_NONE 0 |
53 | #define CACHE_CACHEABLE_ICACHE 1 /* instruction cache only */ | | 53 | #define CACHE_CACHEABLE_ICACHE 1 /* instruction cache only */ |
54 | #define CACHE_CACHEABLE_DCACHE 2 /* data cache only */ | | 54 | #define CACHE_CACHEABLE_DCACHE 2 /* data cache only */ |
55 | #define CACHE_CACHEABLE_IDCACHE 3 /* instruction and data caches */ | | 55 | #define CACHE_CACHEABLE_IDCACHE 3 /* instruction and data caches */ |
56 | #define CACHE_CACHEABLE_UNIFIED 4 /* unified cache */ | | 56 | #define CACHE_CACHEABLE_UNIFIED 4 /* unified cache */ |
57 | struct aarch64_cache_unit icache; | | 57 | struct aarch64_cache_unit icache; |
58 | struct aarch64_cache_unit dcache; | | 58 | struct aarch64_cache_unit dcache; |
59 | }; | | 59 | }; |
60 | | | 60 | |
61 | #define MAX_CACHE_LEVEL 8 /* ARMv8 has maximum 8 level cache */ | | 61 | #define MAX_CACHE_LEVEL 8 /* ARMv8 has maximum 8 level cache */ |
62 | extern u_int aarch64_cache_vindexsize; /* cachesize/way (VIVT/VIPT) */ | | 62 | extern u_int aarch64_cache_vindexsize; /* cachesize/way (VIVT/VIPT) */ |
63 | extern u_int aarch64_cache_prefer_mask; | | 63 | extern u_int aarch64_cache_prefer_mask; |
64 | extern u_int cputype; /* compat arm */ | | 64 | extern u_int cputype; /* compat arm */ |
65 | | | 65 | |
| | | 66 | extern int aarch64_bti_enabled; |
66 | extern int aarch64_pan_enabled; | | 67 | extern int aarch64_pan_enabled; |
67 | extern int aarch64_pac_enabled; | | 68 | extern int aarch64_pac_enabled; |
68 | | | 69 | |
69 | void aarch64_pan_init(int); | | 70 | void aarch64_pan_init(int); |
70 | int aarch64_pac_init(int); | | 71 | int aarch64_pac_init(int); |
71 | | | 72 | |
72 | int set_cpufuncs(void); | | 73 | int set_cpufuncs(void); |
73 | void aarch64_getcacheinfo(int); | | 74 | void aarch64_getcacheinfo(int); |
74 | void aarch64_printcacheinfo(device_t); | | 75 | void aarch64_printcacheinfo(device_t); |
75 | | | 76 | |
76 | void aarch64_dcache_wbinv_all(void); | | 77 | void aarch64_dcache_wbinv_all(void); |
77 | void aarch64_dcache_inv_all(void); | | 78 | void aarch64_dcache_inv_all(void); |
78 | void aarch64_dcache_wb_all(void); | | 79 | void aarch64_dcache_wb_all(void); |
79 | void aarch64_icache_inv_all(void); | | 80 | void aarch64_icache_inv_all(void); |
80 | | | 81 | |
81 | /* cache op in cpufunc_asm_armv8.S */ | | 82 | /* cache op in cpufunc_asm_armv8.S */ |
82 | void aarch64_nullop(void); | | 83 | void aarch64_nullop(void); |
83 | uint32_t aarch64_cpuid(void); | | 84 | uint32_t aarch64_cpuid(void); |
84 | void aarch64_icache_sync_range(vaddr_t, vsize_t); | | 85 | void aarch64_icache_sync_range(vaddr_t, vsize_t); |
85 | void aarch64_icache_inv_range(vaddr_t, vsize_t); | | 86 | void aarch64_icache_inv_range(vaddr_t, vsize_t); |
86 | void aarch64_icache_barrier_range(vaddr_t, vsize_t); | | 87 | void aarch64_icache_barrier_range(vaddr_t, vsize_t); |
87 | void aarch64_idcache_wbinv_range(vaddr_t, vsize_t); | | 88 | void aarch64_idcache_wbinv_range(vaddr_t, vsize_t); |
88 | void aarch64_dcache_wbinv_range(vaddr_t, vsize_t); | | 89 | void aarch64_dcache_wbinv_range(vaddr_t, vsize_t); |
89 | void aarch64_dcache_inv_range(vaddr_t, vsize_t); | | 90 | void aarch64_dcache_inv_range(vaddr_t, vsize_t); |
90 | void aarch64_dcache_wb_range(vaddr_t, vsize_t); | | 91 | void aarch64_dcache_wb_range(vaddr_t, vsize_t); |
91 | void aarch64_icache_inv_all(void); | | 92 | void aarch64_icache_inv_all(void); |
92 | void aarch64_drain_writebuf(void); | | 93 | void aarch64_drain_writebuf(void); |
93 | | | 94 | |
94 | /* tlb op in cpufunc_asm_armv8.S */ | | 95 | /* tlb op in cpufunc_asm_armv8.S */ |
95 | #define cpu_set_ttbr0(t) curcpu()->ci_cpufuncs.cf_set_ttbr0((t)) | | 96 | #define cpu_set_ttbr0(t) curcpu()->ci_cpufuncs.cf_set_ttbr0((t)) |
96 | void aarch64_set_ttbr0(uint64_t); | | 97 | void aarch64_set_ttbr0(uint64_t); |
97 | void aarch64_set_ttbr0_thunderx(uint64_t); | | 98 | void aarch64_set_ttbr0_thunderx(uint64_t); |
98 | void aarch64_tlbi_all(void); /* all ASID, all VA */ | | 99 | void aarch64_tlbi_all(void); /* all ASID, all VA */ |
99 | void aarch64_tlbi_by_asid(int); /* an ASID, all VA */ | | 100 | void aarch64_tlbi_by_asid(int); /* an ASID, all VA */ |
100 | void aarch64_tlbi_by_va(vaddr_t); /* all ASID, a VA */ | | 101 | void aarch64_tlbi_by_va(vaddr_t); /* all ASID, a VA */ |
101 | void aarch64_tlbi_by_va_ll(vaddr_t); /* all ASID, a VA, lastlevel */ | | 102 | void aarch64_tlbi_by_va_ll(vaddr_t); /* all ASID, a VA, lastlevel */ |
102 | void aarch64_tlbi_by_asid_va(int, vaddr_t); /* an ASID, a VA */ | | 103 | void aarch64_tlbi_by_asid_va(int, vaddr_t); /* an ASID, a VA */ |
103 | void aarch64_tlbi_by_asid_va_ll(int, vaddr_t); /* an ASID, a VA, lastlevel */ | | 104 | void aarch64_tlbi_by_asid_va_ll(int, vaddr_t); /* an ASID, a VA, lastlevel */ |
104 | | | 105 | |
105 | | | 106 | |
106 | /* misc */ | | 107 | /* misc */ |
107 | #define cpu_idnum() aarch64_cpuid() | | 108 | #define cpu_idnum() aarch64_cpuid() |
108 | | | 109 | |
109 | /* cache op */ | | 110 | /* cache op */ |
110 | | | 111 | |
111 | #define cpu_dcache_wbinv_all() aarch64_dcache_wbinv_all() | | 112 | #define cpu_dcache_wbinv_all() aarch64_dcache_wbinv_all() |
112 | #define cpu_dcache_inv_all() aarch64_dcache_inv_all() | | 113 | #define cpu_dcache_inv_all() aarch64_dcache_inv_all() |
113 | #define cpu_dcache_wb_all() aarch64_dcache_wb_all() | | 114 | #define cpu_dcache_wb_all() aarch64_dcache_wb_all() |
114 | #define cpu_idcache_wbinv_all() \ | | 115 | #define cpu_idcache_wbinv_all() \ |
115 | (aarch64_dcache_wbinv_all(), aarch64_icache_inv_all()) | | 116 | (aarch64_dcache_wbinv_all(), aarch64_icache_inv_all()) |
116 | #define cpu_icache_sync_all() \ | | 117 | #define cpu_icache_sync_all() \ |
117 | (aarch64_dcache_wb_all(), aarch64_icache_inv_all()) | | 118 | (aarch64_dcache_wb_all(), aarch64_icache_inv_all()) |
118 | #define cpu_icache_inv_all() aarch64_icache_inv_all() | | 119 | #define cpu_icache_inv_all() aarch64_icache_inv_all() |
119 | | | 120 | |
120 | #define cpu_dcache_wbinv_range(v,s) aarch64_dcache_wbinv_range((v),(s)) | | 121 | #define cpu_dcache_wbinv_range(v,s) aarch64_dcache_wbinv_range((v),(s)) |
121 | #define cpu_dcache_inv_range(v,s) aarch64_dcache_inv_range((v),(s)) | | 122 | #define cpu_dcache_inv_range(v,s) aarch64_dcache_inv_range((v),(s)) |
122 | #define cpu_dcache_wb_range(v,s) aarch64_dcache_wb_range((v),(s)) | | 123 | #define cpu_dcache_wb_range(v,s) aarch64_dcache_wb_range((v),(s)) |
123 | #define cpu_idcache_wbinv_range(v,s) aarch64_idcache_wbinv_range((v),(s)) | | 124 | #define cpu_idcache_wbinv_range(v,s) aarch64_idcache_wbinv_range((v),(s)) |
124 | #define cpu_icache_sync_range(v,s) \ | | 125 | #define cpu_icache_sync_range(v,s) \ |
125 | curcpu()->ci_cpufuncs.cf_icache_sync_range((v),(s)) | | 126 | curcpu()->ci_cpufuncs.cf_icache_sync_range((v),(s)) |
126 | | | 127 | |
127 | #define cpu_sdcache_wbinv_range(v,p,s) ((void)0) | | 128 | #define cpu_sdcache_wbinv_range(v,p,s) ((void)0) |
128 | #define cpu_sdcache_inv_range(v,p,s) ((void)0) | | 129 | #define cpu_sdcache_inv_range(v,p,s) ((void)0) |
129 | #define cpu_sdcache_wb_range(v,p,s) ((void)0) | | 130 | #define cpu_sdcache_wb_range(v,p,s) ((void)0) |
130 | | | 131 | |
131 | /* others */ | | 132 | /* others */ |
132 | #define cpu_drain_writebuf() aarch64_drain_writebuf() | | 133 | #define cpu_drain_writebuf() aarch64_drain_writebuf() |
133 | | | 134 | |
134 | extern u_int arm_dcache_align; | | 135 | extern u_int arm_dcache_align; |
135 | extern u_int arm_dcache_align_mask; | | 136 | extern u_int arm_dcache_align_mask; |
136 | | | 137 | |
137 | static inline bool | | 138 | static inline bool |
138 | cpu_gtmr_exists_p(void) | | 139 | cpu_gtmr_exists_p(void) |
139 | { | | 140 | { |
140 | | | 141 | |
141 | return true; | | 142 | return true; |
142 | } | | 143 | } |
143 | | | 144 | |
144 | static inline u_int | | 145 | static inline u_int |
145 | cpu_clusterid(void) | | 146 | cpu_clusterid(void) |
146 | { | | 147 | { |
147 | | | 148 | |
148 | return __SHIFTOUT(reg_mpidr_el1_read(), MPIDR_AFF1); | | 149 | return __SHIFTOUT(reg_mpidr_el1_read(), MPIDR_AFF1); |
149 | } | | 150 | } |
150 | | | 151 | |
151 | static inline bool | | 152 | static inline bool |
152 | cpu_earlydevice_va_p(void) | | 153 | cpu_earlydevice_va_p(void) |
153 | { | | 154 | { |
154 | extern bool pmap_devmap_bootstrap_done; /* in pmap.c */ | | 155 | extern bool pmap_devmap_bootstrap_done; /* in pmap.c */ |
155 | | | 156 | |
156 | /* This function may be called before enabling MMU, or mapping KVA */ | | 157 | /* This function may be called before enabling MMU, or mapping KVA */ |
157 | if ((reg_sctlr_el1_read() & SCTLR_M) == 0) | | 158 | if ((reg_sctlr_el1_read() & SCTLR_M) == 0) |
158 | return false; | | 159 | return false; |
159 | | | 160 | |
160 | /* device mapping will be availabled after pmap_devmap_bootstrap() */ | | 161 | /* device mapping will be availabled after pmap_devmap_bootstrap() */ |
161 | if (!pmap_devmap_bootstrap_done) | | 162 | if (!pmap_devmap_bootstrap_done) |
162 | return false; | | 163 | return false; |
163 | | | 164 | |
164 | return true; | | 165 | return true; |
165 | } | | 166 | } |
166 | | | 167 | |
167 | #endif /* _KERNEL */ | | 168 | #endif /* _KERNEL */ |
168 | | | 169 | |
169 | /* definitions of TAG and PAC in pointers */ | | 170 | /* definitions of TAG and PAC in pointers */ |
170 | #define AARCH64_ADDRTOP_TAG_BIT 55 | | 171 | #define AARCH64_ADDRTOP_TAG_BIT 55 |
171 | #define AARCH64_ADDRTOP_TAG __BIT(55) /* ECR_EL1.TBI[01]=1 */ | | 172 | #define AARCH64_ADDRTOP_TAG __BIT(55) /* ECR_EL1.TBI[01]=1 */ |
172 | #define AARCH64_ADDRTOP_MSB __BIT(63) /* ECR_EL1.TBI[01]=0 */ | | 173 | #define AARCH64_ADDRTOP_MSB __BIT(63) /* ECR_EL1.TBI[01]=0 */ |
173 | #define AARCH64_ADDRESS_TAG_MASK __BITS(63,56) /* if TCR.TBI[01]=1 */ | | 174 | #define AARCH64_ADDRESS_TAG_MASK __BITS(63,56) /* if TCR.TBI[01]=1 */ |
174 | #define AARCH64_ADDRESS_PAC_MASK __BITS(54,48) /* depend on VIRT_BIT */ | | 175 | #define AARCH64_ADDRESS_PAC_MASK __BITS(54,48) /* depend on VIRT_BIT */ |
175 | #define AARCH64_ADDRESS_TAGPAC_MASK \ | | 176 | #define AARCH64_ADDRESS_TAGPAC_MASK \ |
176 | (AARCH64_ADDRESS_TAG_MASK|AARCH64_ADDRESS_PAC_MASK) | | 177 | (AARCH64_ADDRESS_TAG_MASK|AARCH64_ADDRESS_PAC_MASK) |
177 | | | 178 | |
178 | #ifdef _KERNEL | | 179 | #ifdef _KERNEL |
179 | /* | | 180 | /* |
180 | * Which is the address space of this VA? | | 181 | * Which is the address space of this VA? |
181 | * return the space considering TBI. (PAC is not yet) | | 182 | * return the space considering TBI. (PAC is not yet) |
182 | * | | 183 | * |
183 | * return value: AARCH64_ADDRSPACE_{LOWER,UPPER}{_OUTOFRANGE}? | | 184 | * return value: AARCH64_ADDRSPACE_{LOWER,UPPER}{_OUTOFRANGE}? |
184 | */ | | 185 | */ |
185 | #define AARCH64_ADDRSPACE_LOWER 0 /* -> TTBR0 */ | | 186 | #define AARCH64_ADDRSPACE_LOWER 0 /* -> TTBR0 */ |
186 | #define AARCH64_ADDRSPACE_UPPER 1 /* -> TTBR1 */ | | 187 | #define AARCH64_ADDRSPACE_UPPER 1 /* -> TTBR1 */ |
187 | #define AARCH64_ADDRSPACE_LOWER_OUTOFRANGE -1 /* certainly fault */ | | 188 | #define AARCH64_ADDRSPACE_LOWER_OUTOFRANGE -1 /* certainly fault */ |
188 | #define AARCH64_ADDRSPACE_UPPER_OUTOFRANGE -2 /* certainly fault */ | | 189 | #define AARCH64_ADDRSPACE_UPPER_OUTOFRANGE -2 /* certainly fault */ |
189 | static inline int | | 190 | static inline int |
190 | aarch64_addressspace(vaddr_t va) | | 191 | aarch64_addressspace(vaddr_t va) |
191 | { | | 192 | { |
192 | uint64_t addrtop, tbi; | | 193 | uint64_t addrtop, tbi; |
193 | | | 194 | |
194 | addrtop = va & AARCH64_ADDRTOP_TAG; | | 195 | addrtop = va & AARCH64_ADDRTOP_TAG; |
195 | tbi = addrtop ? TCR_TBI1 : TCR_TBI0; | | 196 | tbi = addrtop ? TCR_TBI1 : TCR_TBI0; |
196 | if (reg_tcr_el1_read() & tbi) { | | 197 | if (reg_tcr_el1_read() & tbi) { |
197 | if (addrtop == 0) { | | 198 | if (addrtop == 0) { |
198 | /* lower address, and TBI0 enabled */ | | 199 | /* lower address, and TBI0 enabled */ |
199 | if ((va & AARCH64_ADDRESS_PAC_MASK) != 0) | | 200 | if ((va & AARCH64_ADDRESS_PAC_MASK) != 0) |
200 | return AARCH64_ADDRSPACE_LOWER_OUTOFRANGE; | | 201 | return AARCH64_ADDRSPACE_LOWER_OUTOFRANGE; |
201 | return AARCH64_ADDRSPACE_LOWER; | | 202 | return AARCH64_ADDRSPACE_LOWER; |
202 | } | | 203 | } |
203 | /* upper address, and TBI1 enabled */ | | 204 | /* upper address, and TBI1 enabled */ |
204 | if ((va & AARCH64_ADDRESS_PAC_MASK) != AARCH64_ADDRESS_PAC_MASK) | | 205 | if ((va & AARCH64_ADDRESS_PAC_MASK) != AARCH64_ADDRESS_PAC_MASK) |
205 | return AARCH64_ADDRSPACE_UPPER_OUTOFRANGE; | | 206 | return AARCH64_ADDRSPACE_UPPER_OUTOFRANGE; |
206 | return AARCH64_ADDRSPACE_UPPER; | | 207 | return AARCH64_ADDRSPACE_UPPER; |
207 | } | | 208 | } |
208 | | | 209 | |
209 | addrtop = va & AARCH64_ADDRTOP_MSB; | | 210 | addrtop = va & AARCH64_ADDRTOP_MSB; |
210 | if (addrtop == 0) { | | 211 | if (addrtop == 0) { |
211 | /* lower address, and TBI0 disabled */ | | 212 | /* lower address, and TBI0 disabled */ |
212 | if ((va & AARCH64_ADDRESS_TAGPAC_MASK) != 0) | | 213 | if ((va & AARCH64_ADDRESS_TAGPAC_MASK) != 0) |
213 | return AARCH64_ADDRSPACE_LOWER_OUTOFRANGE; | | 214 | return AARCH64_ADDRSPACE_LOWER_OUTOFRANGE; |
214 | return AARCH64_ADDRSPACE_LOWER; | | 215 | return AARCH64_ADDRSPACE_LOWER; |
215 | } | | 216 | } |
216 | /* upper address, and TBI1 disabled */ | | 217 | /* upper address, and TBI1 disabled */ |
217 | if ((va & AARCH64_ADDRESS_TAGPAC_MASK) != AARCH64_ADDRESS_TAGPAC_MASK) | | 218 | if ((va & AARCH64_ADDRESS_TAGPAC_MASK) != AARCH64_ADDRESS_TAGPAC_MASK) |
218 | return AARCH64_ADDRSPACE_UPPER_OUTOFRANGE; | | 219 | return AARCH64_ADDRSPACE_UPPER_OUTOFRANGE; |
219 | return AARCH64_ADDRSPACE_UPPER; | | 220 | return AARCH64_ADDRSPACE_UPPER; |
220 | } | | 221 | } |
221 | | | 222 | |
222 | static inline vaddr_t | | 223 | static inline vaddr_t |
223 | aarch64_untag_address(vaddr_t va) | | 224 | aarch64_untag_address(vaddr_t va) |
224 | { | | 225 | { |
225 | uint64_t addrtop, tbi; | | 226 | uint64_t addrtop, tbi; |
226 | | | 227 | |
227 | addrtop = va & AARCH64_ADDRTOP_TAG; | | 228 | addrtop = va & AARCH64_ADDRTOP_TAG; |
228 | tbi = addrtop ? TCR_TBI1 : TCR_TBI0; | | 229 | tbi = addrtop ? TCR_TBI1 : TCR_TBI0; |
229 | if (reg_tcr_el1_read() & tbi) { | | 230 | if (reg_tcr_el1_read() & tbi) { |
230 | if (addrtop == 0) { | | 231 | if (addrtop == 0) { |
231 | /* lower address, and TBI0 enabled */ | | 232 | /* lower address, and TBI0 enabled */ |
232 | return va & ~AARCH64_ADDRESS_TAG_MASK; | | 233 | return va & ~AARCH64_ADDRESS_TAG_MASK; |
233 | } | | 234 | } |
234 | /* upper address, and TBI1 enabled */ | | 235 | /* upper address, and TBI1 enabled */ |
235 | return va | AARCH64_ADDRESS_TAG_MASK; | | 236 | return va | AARCH64_ADDRESS_TAG_MASK; |
236 | } | | 237 | } |
237 | | | 238 | |
238 | /* TBI[01] is disabled, nothing to do */ | | 239 | /* TBI[01] is disabled, nothing to do */ |
239 | return va; | | 240 | return va; |
240 | } | | 241 | } |
241 | | | 242 | |
242 | #endif /* _KERNEL */ | | 243 | #endif /* _KERNEL */ |
243 | | | 244 | |
244 | static __inline uint64_t | | 245 | static __inline uint64_t |
245 | aarch64_strip_pac(uint64_t __val) | | 246 | aarch64_strip_pac(uint64_t __val) |
246 | { | | 247 | { |
247 | if (__val & AARCH64_ADDRTOP_TAG) | | 248 | if (__val & AARCH64_ADDRTOP_TAG) |
248 | return __val | AARCH64_ADDRESS_TAGPAC_MASK; | | 249 | return __val | AARCH64_ADDRESS_TAGPAC_MASK; |
249 | return __val & ~AARCH64_ADDRESS_TAGPAC_MASK; | | 250 | return __val & ~AARCH64_ADDRESS_TAGPAC_MASK; |
250 | } | | 251 | } |
251 | | | 252 | |
252 | #endif /* _AARCH64_CPUFUNC_H_ */ | | 253 | #endif /* _AARCH64_CPUFUNC_H_ */ |