Wed Aug 22 17:04:36 2018 UTC ()
Explicitly unpoison the stack when entering a softint.

Softints are the only place where we "discard" a part of the stack: we may
have left the thread without allowing the asan instrumentation to clear
the poison, and in this case, we can get false positives when we hit a
poisoned area of the stack while executing another handler within the same
softint thread.

(I was actually getting a rare false positive in ip6intr.)


(maxv)
diff -r1.3 -r1.4 src/sys/arch/amd64/amd64/asan.c
diff -r1.35 -r1.36 src/sys/arch/amd64/amd64/spl.S

cvs diff -r1.3 -r1.4 src/sys/arch/amd64/amd64/Attic/asan.c (switch to unified diff)

--- src/sys/arch/amd64/amd64/Attic/asan.c 2018/08/22 12:07:42 1.3
+++ src/sys/arch/amd64/amd64/Attic/asan.c 2018/08/22 17:04:36 1.4
@@ -1,645 +1,654 @@ @@ -1,645 +1,654 @@
1/* $NetBSD: asan.c,v 1.3 2018/08/22 12:07:42 maxv Exp $ */ 1/* $NetBSD: asan.c,v 1.4 2018/08/22 17:04:36 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard, and Siddharth Muralee. 8 * by Maxime Villard, and Siddharth Muralee.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: asan.c,v 1.3 2018/08/22 12:07:42 maxv Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: asan.c,v 1.4 2018/08/22 17:04:36 maxv Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/device.h> 36#include <sys/device.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38#include <sys/module.h> 38#include <sys/module.h>
39#include <sys/param.h> 39#include <sys/param.h>
40#include <sys/conf.h> 40#include <sys/conf.h>
41#include <sys/systm.h> 41#include <sys/systm.h>
42#include <sys/types.h> 42#include <sys/types.h>
43#include <sys/asan.h> 43#include <sys/asan.h>
44 44
45#include <uvm/uvm.h> 45#include <uvm/uvm.h>
46#include <amd64/pmap.h> 46#include <amd64/pmap.h>
47#include <amd64/vmparam.h> 47#include <amd64/vmparam.h>
48 48
49#define VIRTUAL_SHIFT 47 /* 48bit address space, cut half */ 49#define VIRTUAL_SHIFT 47 /* 48bit address space, cut half */
50#define CANONICAL_BASE 0xFFFF800000000000 50#define CANONICAL_BASE 0xFFFF800000000000
51 51
52#define KASAN_SHADOW_SCALE_SHIFT 3 52#define KASAN_SHADOW_SCALE_SHIFT 3
53#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) 53#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
54#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) 54#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
55 55
56#define KASAN_SHADOW_SIZE (1ULL << (VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT)) 56#define KASAN_SHADOW_SIZE (1ULL << (VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
57#define KASAN_SHADOW_START (VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4))) 57#define KASAN_SHADOW_START (VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
58#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) 58#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
59 59
60#define __RET_ADDR (unsigned long)__builtin_return_address(0) 60#define __RET_ADDR (unsigned long)__builtin_return_address(0)
61 61
 62void kasan_softint(struct lwp *);
62void kasan_shadow_map(void *, size_t); 63void kasan_shadow_map(void *, size_t);
63void kasan_early_init(void); 64void kasan_early_init(void);
64void kasan_init(void); 65void kasan_init(void);
65 66
66static bool kasan_enabled __read_mostly = false; 67static bool kasan_enabled __read_mostly = false;
67 68
68static inline int8_t *kasan_addr_to_shad(const void *addr) 69static inline int8_t *kasan_addr_to_shad(const void *addr)
69{ 70{
70 vaddr_t va = (vaddr_t)addr; 71 vaddr_t va = (vaddr_t)addr;
71 return (int8_t *)(KASAN_SHADOW_START + 72 return (int8_t *)(KASAN_SHADOW_START +
72 ((va - CANONICAL_BASE) >> KASAN_SHADOW_SCALE_SHIFT)); 73 ((va - CANONICAL_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
73} 74}
74 75
75static __always_inline bool 76static __always_inline bool
76kasan_unsupported(vaddr_t addr) 77kasan_unsupported(vaddr_t addr)
77{ 78{
78 return (addr >= (vaddr_t)PTE_BASE && 79 return (addr >= (vaddr_t)PTE_BASE &&
79 addr < ((vaddr_t)PTE_BASE + NBPD_L4)); 80 addr < ((vaddr_t)PTE_BASE + NBPD_L4));
80} 81}
81 82
82/* -------------------------------------------------------------------------- */ 83/* -------------------------------------------------------------------------- */
83 84
84static bool kasan_early __read_mostly = true; 85static bool kasan_early __read_mostly = true;
85static uint8_t earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE); 86static uint8_t earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE);
86static size_t earlytaken = 0; 87static size_t earlytaken = 0;
87 88
88static paddr_t 89static paddr_t
89kasan_early_palloc(void) 90kasan_early_palloc(void)
90{ 91{
91 paddr_t ret; 92 paddr_t ret;
92 93
93 KASSERT(earlytaken < 8); 94 KASSERT(earlytaken < 8);
94 95
95 ret = (paddr_t)(&earlypages[0] + earlytaken * PAGE_SIZE); 96 ret = (paddr_t)(&earlypages[0] + earlytaken * PAGE_SIZE);
96 earlytaken++; 97 earlytaken++;
97 98
98 ret -= KERNBASE; 99 ret -= KERNBASE;
99 100
100 return ret; 101 return ret;
101} 102}
102 103
103static paddr_t 104static paddr_t
104kasan_palloc(void) 105kasan_palloc(void)
105{ 106{
106 paddr_t pa; 107 paddr_t pa;
107 108
108 if (__predict_false(kasan_early)) 109 if (__predict_false(kasan_early))
109 pa = kasan_early_palloc(); 110 pa = kasan_early_palloc();
110 else 111 else
111 pa = pmap_get_physpage(); 112 pa = pmap_get_physpage();
112 113
113 return pa; 114 return pa;
114} 115}
115 116
116static void 117static void
117kasan_shadow_map_page(vaddr_t va) 118kasan_shadow_map_page(vaddr_t va)
118{ 119{
119 paddr_t pa; 120 paddr_t pa;
120 121
121 if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) { 122 if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
122 pa = kasan_palloc(); 123 pa = kasan_palloc();
123 L4_BASE[pl4_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V; 124 L4_BASE[pl4_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V;
124 } 125 }
125 if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) { 126 if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
126 pa = kasan_palloc(); 127 pa = kasan_palloc();
127 L3_BASE[pl3_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V; 128 L3_BASE[pl3_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V;
128 } 129 }
129 if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) { 130 if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
130 pa = kasan_palloc(); 131 pa = kasan_palloc();
131 L2_BASE[pl2_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V; 132 L2_BASE[pl2_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V;
132 } 133 }
133 if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) { 134 if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
134 pa = kasan_palloc(); 135 pa = kasan_palloc();
135 L1_BASE[pl1_i(va)] = pa | PG_KW | pmap_pg_g | pmap_pg_nx | PG_V; 136 L1_BASE[pl1_i(va)] = pa | PG_KW | pmap_pg_g | pmap_pg_nx | PG_V;
136 } 137 }
137} 138}
138 139
139/* 140/*
140 * Allocate the necessary stuff in the shadow, so that we can monitor the 141 * Allocate the necessary stuff in the shadow, so that we can monitor the
141 * passed area. 142 * passed area.
142 */ 143 */
143void 144void
144kasan_shadow_map(void *addr, size_t size) 145kasan_shadow_map(void *addr, size_t size)
145{ 146{
146 size_t sz, npages, i; 147 size_t sz, npages, i;
147 vaddr_t sva, eva; 148 vaddr_t sva, eva;
148 149
149 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0); 150 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
150 151
151 sz = roundup(size, KASAN_SHADOW_SCALE_SIZE) / KASAN_SHADOW_SCALE_SIZE; 152 sz = roundup(size, KASAN_SHADOW_SCALE_SIZE) / KASAN_SHADOW_SCALE_SIZE;
152 153
153 sva = (vaddr_t)kasan_addr_to_shad(addr); 154 sva = (vaddr_t)kasan_addr_to_shad(addr);
154 eva = (vaddr_t)kasan_addr_to_shad(addr) + sz; 155 eva = (vaddr_t)kasan_addr_to_shad(addr) + sz;
155 156
156 sva = rounddown(sva, PAGE_SIZE); 157 sva = rounddown(sva, PAGE_SIZE);
157 eva = roundup(eva, PAGE_SIZE); 158 eva = roundup(eva, PAGE_SIZE);
158 159
159 npages = (eva - sva) / PAGE_SIZE; 160 npages = (eva - sva) / PAGE_SIZE;
160 161
161 KASSERT(sva >= KASAN_SHADOW_START && eva < KASAN_SHADOW_END); 162 KASSERT(sva >= KASAN_SHADOW_START && eva < KASAN_SHADOW_END);
162 163
163 for (i = 0; i < npages; i++) { 164 for (i = 0; i < npages; i++) {
164 kasan_shadow_map_page(sva + i * PAGE_SIZE); 165 kasan_shadow_map_page(sva + i * PAGE_SIZE);
165 } 166 }
166} 167}
167 168
168/* -------------------------------------------------------------------------- */ 169/* -------------------------------------------------------------------------- */
169 170
170#ifdef __HAVE_PCPU_AREA 171#ifdef __HAVE_PCPU_AREA
171#error "PCPU area not allowed with KASAN" 172#error "PCPU area not allowed with KASAN"
172#endif 173#endif
173#ifdef __HAVE_DIRECT_MAP 174#ifdef __HAVE_DIRECT_MAP
174#error "DMAP not allowed with KASAN" 175#error "DMAP not allowed with KASAN"
175#endif 176#endif
176 177
177static void 178static void
178kasan_ctors(void) 179kasan_ctors(void)
179{ 180{
180 extern uint64_t __CTOR_LIST__, __CTOR_END__; 181 extern uint64_t __CTOR_LIST__, __CTOR_END__;
181 size_t nentries, i; 182 size_t nentries, i;
182 uint64_t *ptr; 183 uint64_t *ptr;
183 184
184 nentries = ((size_t)&__CTOR_END__ - (size_t)&__CTOR_LIST__) / 185 nentries = ((size_t)&__CTOR_END__ - (size_t)&__CTOR_LIST__) /
185 sizeof(uintptr_t); 186 sizeof(uintptr_t);
186 187
187 ptr = &__CTOR_LIST__; 188 ptr = &__CTOR_LIST__;
188 for (i = 0; i < nentries; i++) { 189 for (i = 0; i < nentries; i++) {
189 void (*func)(void); 190 void (*func)(void);
190 191
191 func = (void *)(*ptr); 192 func = (void *)(*ptr);
192 (*func)(); 193 (*func)();
193 194
194 ptr++; 195 ptr++;
195 } 196 }
196} 197}
197 198
198/* 199/*
199 * Map only the current stack. We will map the rest in kasan_init. 200 * Map only the current stack. We will map the rest in kasan_init.
200 */ 201 */
201void 202void
202kasan_early_init(void) 203kasan_early_init(void)
203{ 204{
204 extern vaddr_t lwp0uarea; 205 extern vaddr_t lwp0uarea;
205 206
206 kasan_shadow_map((void *)lwp0uarea, USPACE); 207 kasan_shadow_map((void *)lwp0uarea, USPACE);
207 kasan_early = false; 208 kasan_early = false;
208} 209}
209 210
210/* 211/*
211 * Create the shadow mapping. We don't create the 'User' area, because we 212 * Create the shadow mapping. We don't create the 'User' area, because we
212 * exclude it from the monitoring. The 'Main' area is created dynamically 213 * exclude it from the monitoring. The 'Main' area is created dynamically
213 * in pmap_growkernel. 214 * in pmap_growkernel.
214 */ 215 */
215void 216void
216kasan_init(void) 217kasan_init(void)
217{ 218{
218 extern struct bootspace bootspace; 219 extern struct bootspace bootspace;
219 size_t i; 220 size_t i;
220 221
221 CTASSERT((KASAN_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN); 222 CTASSERT((KASAN_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
222 223
223 /* Kernel. */ 224 /* Kernel. */
224 for (i = 0; i < BTSPACE_NSEGS; i++) { 225 for (i = 0; i < BTSPACE_NSEGS; i++) {
225 if (bootspace.segs[i].type == BTSEG_NONE) { 226 if (bootspace.segs[i].type == BTSEG_NONE) {
226 continue; 227 continue;
227 } 228 }
228 kasan_shadow_map((void *)bootspace.segs[i].va, 229 kasan_shadow_map((void *)bootspace.segs[i].va,
229 bootspace.segs[i].sz); 230 bootspace.segs[i].sz);
230 } 231 }
231 232
232 /* Boot region. */ 233 /* Boot region. */
233 kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz); 234 kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
234 235
235 /* Module map. */ 236 /* Module map. */
236 kasan_shadow_map((void *)bootspace.smodule, 237 kasan_shadow_map((void *)bootspace.smodule,
237 (size_t)(bootspace.emodule - bootspace.smodule)); 238 (size_t)(bootspace.emodule - bootspace.smodule));
238 239
239 /* The bootstrap spare va. */ 240 /* The bootstrap spare va. */
240 kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE); 241 kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
241 242
242 kasan_enabled = true; 243 kasan_enabled = true;
243 244
244 /* Call the ASAN constructors. */ 245 /* Call the ASAN constructors. */
245 kasan_ctors(); 246 kasan_ctors();
246} 247}
247 248
248/* -------------------------------------------------------------------------- */ 249/* -------------------------------------------------------------------------- */
249 250
250static void 251static void
251kasan_report(unsigned long addr, size_t size, bool write, unsigned long rip) 252kasan_report(unsigned long addr, size_t size, bool write, unsigned long rip)
252{ 253{
253 printf("kASan: Unauthorized Access In %p: Addr %p [%zu byte%s, %s]\n", 254 printf("kASan: Unauthorized Access In %p: Addr %p [%zu byte%s, %s]\n",
254 (void *)rip, (void *)addr, size, (size > 1 ? "s" : ""), 255 (void *)rip, (void *)addr, size, (size > 1 ? "s" : ""),
255 (write ? "write" : "read")); 256 (write ? "write" : "read"));
256} 257}
257 258
258/* -------------------------------------------------------------------------- */ 259/* -------------------------------------------------------------------------- */
259 260
260/* Our redzone values. */ 261/* Our redzone values. */
261#define KASAN_GLOBAL_REDZONE 0xFA 262#define KASAN_GLOBAL_REDZONE 0xFA
262#define KASAN_MEMORY_REDZONE 0xFB 263#define KASAN_MEMORY_REDZONE 0xFB
263 264
264/* Stack redzone shadow values. Part of the compiler ABI. */ 265/* Stack redzone shadow values. Part of the compiler ABI. */
265#define KASAN_STACK_LEFT 0xF1 266#define KASAN_STACK_LEFT 0xF1
266#define KASAN_STACK_MID 0xF2 267#define KASAN_STACK_MID 0xF2
267#define KASAN_STACK_RIGHT 0xF3 268#define KASAN_STACK_RIGHT 0xF3
268#define KASAN_STACK_PARTIAL 0xF4 269#define KASAN_STACK_PARTIAL 0xF4
269#define KASAN_USE_AFTER_SCOPE 0xF8 270#define KASAN_USE_AFTER_SCOPE 0xF8
270 271
271static void 272static void
272kasan_shadow_fill(const void *addr, size_t size, uint8_t val) 273kasan_shadow_fill(const void *addr, size_t size, uint8_t val)
273{ 274{
274 void *shad; 275 void *shad;
275 276
276 if (__predict_false(!kasan_enabled)) 277 if (__predict_false(!kasan_enabled))
277 return; 278 return;
278 if (__predict_false(size == 0)) 279 if (__predict_false(size == 0))
279 return; 280 return;
280 if (__predict_false(kasan_unsupported((vaddr_t)addr))) 281 if (__predict_false(kasan_unsupported((vaddr_t)addr)))
281 return; 282 return;
282 283
283 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0); 284 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
284 KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0); 285 KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0);
285 286
286 shad = (void *)kasan_addr_to_shad(addr); 287 shad = (void *)kasan_addr_to_shad(addr);
287 size = size >> KASAN_SHADOW_SCALE_SHIFT; 288 size = size >> KASAN_SHADOW_SCALE_SHIFT;
288 289
289 __builtin_memset(shad, val, size); 290 __builtin_memset(shad, val, size);
290} 291}
291 292
292static __always_inline void 293static __always_inline void
293kasan_shadow_1byte_markvalid(unsigned long addr) 294kasan_shadow_1byte_markvalid(unsigned long addr)
294{ 295{
295 int8_t *byte = kasan_addr_to_shad((void *)addr); 296 int8_t *byte = kasan_addr_to_shad((void *)addr);
296 int8_t last = (addr & KASAN_SHADOW_MASK) + 1; 297 int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
297 298
298 *byte = last; 299 *byte = last;
299} 300}
300 301
301void 302void
302kasan_add_redzone(size_t *size) 303kasan_add_redzone(size_t *size)
303{ 304{
304 *size = roundup(*size, KASAN_SHADOW_SCALE_SIZE); 305 *size = roundup(*size, KASAN_SHADOW_SCALE_SIZE);
305 *size += KASAN_SHADOW_SCALE_SIZE; 306 *size += KASAN_SHADOW_SCALE_SIZE;
306} 307}
307 308
308static void 309static void
309kasan_markmem(const void *addr, size_t size, bool valid) 310kasan_markmem(const void *addr, size_t size, bool valid)
310{ 311{
311 size_t i; 312 size_t i;
312 313
313 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0); 314 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
314 315
315 if (valid) { 316 if (valid) {
316 for (i = 0; i < size; i++) { 317 for (i = 0; i < size; i++) {
317 kasan_shadow_1byte_markvalid((unsigned long)addr+i); 318 kasan_shadow_1byte_markvalid((unsigned long)addr+i);
318 } 319 }
319 } else { 320 } else {
320 KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0); 321 KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0);
321 kasan_shadow_fill(addr, size, KASAN_MEMORY_REDZONE); 322 kasan_shadow_fill(addr, size, KASAN_MEMORY_REDZONE);
322 } 323 }
323} 324}
324 325
325void 326void
 327kasan_softint(struct lwp *l)
 328{
 329 const void *stk = (const void *)uvm_lwp_getuarea(l);
 330
 331 kasan_shadow_fill(stk, USPACE, 0);
 332}
 333
 334void
326kasan_alloc(const void *addr, size_t size, size_t sz_with_redz) 335kasan_alloc(const void *addr, size_t size, size_t sz_with_redz)
327{ 336{
328 kasan_markmem(addr, sz_with_redz, false); 337 kasan_markmem(addr, sz_with_redz, false);
329 kasan_markmem(addr, size, true); 338 kasan_markmem(addr, size, true);
330} 339}
331 340
332void 341void
333kasan_free(const void *addr, size_t sz_with_redz) 342kasan_free(const void *addr, size_t sz_with_redz)
334{ 343{
335 kasan_markmem(addr, sz_with_redz, true); 344 kasan_markmem(addr, sz_with_redz, true);
336} 345}
337 346
338/* -------------------------------------------------------------------------- */ 347/* -------------------------------------------------------------------------- */
339 348
340#define ADDR_CROSSES_SCALE_BOUNDARY(addr, size) \ 349#define ADDR_CROSSES_SCALE_BOUNDARY(addr, size) \
341 (addr >> KASAN_SHADOW_SCALE_SHIFT) != \ 350 (addr >> KASAN_SHADOW_SCALE_SHIFT) != \
342 ((addr + size - 1) >> KASAN_SHADOW_SCALE_SHIFT) 351 ((addr + size - 1) >> KASAN_SHADOW_SCALE_SHIFT)
343 352
344static __always_inline bool 353static __always_inline bool
345kasan_shadow_1byte_isvalid(unsigned long addr) 354kasan_shadow_1byte_isvalid(unsigned long addr)
346{ 355{
347 int8_t *byte = kasan_addr_to_shad((void *)addr); 356 int8_t *byte = kasan_addr_to_shad((void *)addr);
348 int8_t last = (addr & KASAN_SHADOW_MASK) + 1; 357 int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
349 358
350 return __predict_true(*byte == 0 || last <= *byte); 359 return __predict_true(*byte == 0 || last <= *byte);
351} 360}
352 361
353static __always_inline bool 362static __always_inline bool
354kasan_shadow_2byte_isvalid(unsigned long addr) 363kasan_shadow_2byte_isvalid(unsigned long addr)
355{ 364{
356 int8_t *byte, last; 365 int8_t *byte, last;
357 366
358 if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 2)) { 367 if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 2)) {
359 return (kasan_shadow_1byte_isvalid(addr) && 368 return (kasan_shadow_1byte_isvalid(addr) &&
360 kasan_shadow_1byte_isvalid(addr+1)); 369 kasan_shadow_1byte_isvalid(addr+1));
361 } 370 }
362 371
363 byte = kasan_addr_to_shad((void *)addr); 372 byte = kasan_addr_to_shad((void *)addr);
364 last = ((addr + 1) & KASAN_SHADOW_MASK) + 1; 373 last = ((addr + 1) & KASAN_SHADOW_MASK) + 1;
365 374
366 return __predict_true(*byte == 0 || last <= *byte); 375 return __predict_true(*byte == 0 || last <= *byte);
367} 376}
368 377
369static __always_inline bool 378static __always_inline bool
370kasan_shadow_4byte_isvalid(unsigned long addr) 379kasan_shadow_4byte_isvalid(unsigned long addr)
371{ 380{
372 int8_t *byte, last; 381 int8_t *byte, last;
373 382
374 if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 4)) { 383 if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 4)) {
375 return (kasan_shadow_2byte_isvalid(addr) && 384 return (kasan_shadow_2byte_isvalid(addr) &&
376 kasan_shadow_2byte_isvalid(addr+2)); 385 kasan_shadow_2byte_isvalid(addr+2));
377 } 386 }
378 387
379 byte = kasan_addr_to_shad((void *)addr); 388 byte = kasan_addr_to_shad((void *)addr);
380 last = ((addr + 3) & KASAN_SHADOW_MASK) + 1; 389 last = ((addr + 3) & KASAN_SHADOW_MASK) + 1;
381 390
382 return __predict_true(*byte == 0 || last <= *byte); 391 return __predict_true(*byte == 0 || last <= *byte);
383} 392}
384 393
385static __always_inline bool 394static __always_inline bool
386kasan_shadow_8byte_isvalid(unsigned long addr) 395kasan_shadow_8byte_isvalid(unsigned long addr)
387{ 396{
388 int8_t *byte, last; 397 int8_t *byte, last;
389 398
390 if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 8)) { 399 if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 8)) {
391 return (kasan_shadow_4byte_isvalid(addr) && 400 return (kasan_shadow_4byte_isvalid(addr) &&
392 kasan_shadow_4byte_isvalid(addr+4)); 401 kasan_shadow_4byte_isvalid(addr+4));
393 } 402 }
394 403
395 byte = kasan_addr_to_shad((void *)addr); 404 byte = kasan_addr_to_shad((void *)addr);
396 last = ((addr + 7) & KASAN_SHADOW_MASK) + 1; 405 last = ((addr + 7) & KASAN_SHADOW_MASK) + 1;
397 406
398 return __predict_true(*byte == 0 || last <= *byte); 407 return __predict_true(*byte == 0 || last <= *byte);
399} 408}
400 409
401static __always_inline bool 410static __always_inline bool
402kasan_shadow_Nbyte_isvalid(unsigned long addr, size_t size) 411kasan_shadow_Nbyte_isvalid(unsigned long addr, size_t size)
403{ 412{
404 size_t i; 413 size_t i;
405 414
406 for (i = 0; i < size; i++) { 415 for (i = 0; i < size; i++) {
407 if (!kasan_shadow_1byte_isvalid(addr+i)) 416 if (!kasan_shadow_1byte_isvalid(addr+i))
408 return false; 417 return false;
409 } 418 }
410 419
411 return true; 420 return true;
412} 421}
413 422
414static __always_inline void 423static __always_inline void
415kasan_shadow_check(unsigned long addr, size_t size, bool write, 424kasan_shadow_check(unsigned long addr, size_t size, bool write,
416 unsigned long retaddr) 425 unsigned long retaddr)
417{ 426{
418 bool valid; 427 bool valid;
419 428
420 if (__predict_false(!kasan_enabled)) 429 if (__predict_false(!kasan_enabled))
421 return; 430 return;
422 if (__predict_false(size == 0)) 431 if (__predict_false(size == 0))
423 return; 432 return;
424 if (__predict_false(kasan_unsupported(addr))) 433 if (__predict_false(kasan_unsupported(addr)))
425 return; 434 return;
426 435
427 if (__builtin_constant_p(size)) { 436 if (__builtin_constant_p(size)) {
428 switch (size) { 437 switch (size) {
429 case 1: 438 case 1:
430 valid = kasan_shadow_1byte_isvalid(addr); 439 valid = kasan_shadow_1byte_isvalid(addr);
431 break; 440 break;
432 case 2: 441 case 2:
433 valid = kasan_shadow_2byte_isvalid(addr); 442 valid = kasan_shadow_2byte_isvalid(addr);
434 break; 443 break;
435 case 4: 444 case 4:
436 valid = kasan_shadow_4byte_isvalid(addr); 445 valid = kasan_shadow_4byte_isvalid(addr);
437 break; 446 break;
438 case 8: 447 case 8:
439 valid = kasan_shadow_8byte_isvalid(addr); 448 valid = kasan_shadow_8byte_isvalid(addr);
440 break; 449 break;
441 default: 450 default:
442 valid = kasan_shadow_Nbyte_isvalid(addr, size); 451 valid = kasan_shadow_Nbyte_isvalid(addr, size);
443 break; 452 break;
444 } 453 }
445 } else { 454 } else {
446 valid = kasan_shadow_Nbyte_isvalid(addr, size); 455 valid = kasan_shadow_Nbyte_isvalid(addr, size);
447 } 456 }
448 457
449 if (__predict_false(!valid)) { 458 if (__predict_false(!valid)) {
450 kasan_report(addr, size, write, retaddr); 459 kasan_report(addr, size, write, retaddr);
451 } 460 }
452} 461}
453 462
454/* -------------------------------------------------------------------------- */ 463/* -------------------------------------------------------------------------- */
455 464
456void * 465void *
457kasan_memcpy(void *dst, const void *src, size_t len) 466kasan_memcpy(void *dst, const void *src, size_t len)
458{ 467{
459 kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR); 468 kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR);
460 kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR); 469 kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR);
461 return __builtin_memcpy(dst, src, len); 470 return __builtin_memcpy(dst, src, len);
462} 471}
463 472
464int 473int
465kasan_memcmp(const void *b1, const void *b2, size_t len) 474kasan_memcmp(const void *b1, const void *b2, size_t len)
466{ 475{
467 kasan_shadow_check((unsigned long)b1, len, false, __RET_ADDR); 476 kasan_shadow_check((unsigned long)b1, len, false, __RET_ADDR);
468 kasan_shadow_check((unsigned long)b2, len, false, __RET_ADDR); 477 kasan_shadow_check((unsigned long)b2, len, false, __RET_ADDR);
469 return __builtin_memcmp(b1, b2, len); 478 return __builtin_memcmp(b1, b2, len);
470} 479}
471 480
472void * 481void *
473kasan_memset(void *b, int c, size_t len) 482kasan_memset(void *b, int c, size_t len)
474{ 483{
475 kasan_shadow_check((unsigned long)b, len, true, __RET_ADDR); 484 kasan_shadow_check((unsigned long)b, len, true, __RET_ADDR);
476 return __builtin_memset(b, c, len); 485 return __builtin_memset(b, c, len);
477} 486}
478 487
479/* -------------------------------------------------------------------------- */ 488/* -------------------------------------------------------------------------- */
480 489
481#if defined(__clang__) && (__clang_major__ - 0 >= 6) 490#if defined(__clang__) && (__clang_major__ - 0 >= 6)
482#define ASAN_ABI_VERSION 8 491#define ASAN_ABI_VERSION 8
483#elif __GNUC_PREREQ__(7, 1) && !defined(__clang__) 492#elif __GNUC_PREREQ__(7, 1) && !defined(__clang__)
484#define ASAN_ABI_VERSION 8 493#define ASAN_ABI_VERSION 8
485#elif __GNUC_PREREQ__(6, 1) && !defined(__clang__) 494#elif __GNUC_PREREQ__(6, 1) && !defined(__clang__)
486#define ASAN_ABI_VERSION 6 495#define ASAN_ABI_VERSION 6
487#else 496#else
488#error "Unsupported compiler version" 497#error "Unsupported compiler version"
489#endif 498#endif
490 499
491/* 500/*
492 * Part of the compiler ABI. 501 * Part of the compiler ABI.
493 */ 502 */
494struct __asan_global_source_location { 503struct __asan_global_source_location {
495 const char *filename; 504 const char *filename;
496 int line_no; 505 int line_no;
497 int column_no; 506 int column_no;
498}; 507};
499struct __asan_global { 508struct __asan_global {
500 const void *beg; /* address of the global variable */ 509 const void *beg; /* address of the global variable */
501 size_t size; /* size of the global variable */ 510 size_t size; /* size of the global variable */
502 size_t size_with_redzone; /* size with the redzone */ 511 size_t size_with_redzone; /* size with the redzone */
503 const void *name; /* name of the variable */ 512 const void *name; /* name of the variable */
504 const void *module_name; /* name of the module where the var is declared */ 513 const void *module_name; /* name of the module where the var is declared */
505 unsigned long has_dynamic_init; /* the var has dyn initializer (c++) */ 514 unsigned long has_dynamic_init; /* the var has dyn initializer (c++) */
506 struct __asan_global_source_location *location; 515 struct __asan_global_source_location *location;
507#if ASAN_ABI_VERSION >= 7 516#if ASAN_ABI_VERSION >= 7
508 uintptr_t odr_indicator; /* the address of the ODR indicator symbol */ 517 uintptr_t odr_indicator; /* the address of the ODR indicator symbol */
509#endif 518#endif
510}; 519};
511 520
512void __asan_register_globals(struct __asan_global *, size_t); 521void __asan_register_globals(struct __asan_global *, size_t);
513void __asan_unregister_globals(struct __asan_global *, size_t); 522void __asan_unregister_globals(struct __asan_global *, size_t);
514 523
515static void 524static void
516kasan_register_global(struct __asan_global *global) 525kasan_register_global(struct __asan_global *global)
517{ 526{
518 size_t aligned_size = roundup(global->size, KASAN_SHADOW_SCALE_SIZE); 527 size_t aligned_size = roundup(global->size, KASAN_SHADOW_SCALE_SIZE);
519 528
520 /* Poison the redzone following the var. */ 529 /* Poison the redzone following the var. */
521 kasan_shadow_fill((void *)((uintptr_t)global->beg + aligned_size), 530 kasan_shadow_fill((void *)((uintptr_t)global->beg + aligned_size),
522 global->size_with_redzone - aligned_size, KASAN_GLOBAL_REDZONE); 531 global->size_with_redzone - aligned_size, KASAN_GLOBAL_REDZONE);
523} 532}
524 533
525void 534void
526__asan_register_globals(struct __asan_global *globals, size_t size) 535__asan_register_globals(struct __asan_global *globals, size_t size)
527{ 536{
528 size_t i; 537 size_t i;
529 for (i = 0; i < size; i++) { 538 for (i = 0; i < size; i++) {
530 kasan_register_global(&globals[i]); 539 kasan_register_global(&globals[i]);
531 } 540 }
532} 541}
533 542
534void 543void
535__asan_unregister_globals(struct __asan_global *globals, size_t size) 544__asan_unregister_globals(struct __asan_global *globals, size_t size)
536{ 545{
537} 546}
538 547
539#define ASAN_LOAD_STORE(size) \ 548#define ASAN_LOAD_STORE(size) \
540 void __asan_load##size(unsigned long); \ 549 void __asan_load##size(unsigned long); \
541 void __asan_load##size(unsigned long addr) \ 550 void __asan_load##size(unsigned long addr) \
542 { \ 551 { \
543 kasan_shadow_check(addr, size, false, __RET_ADDR);\ 552 kasan_shadow_check(addr, size, false, __RET_ADDR);\
544 } \ 553 } \
545 void __asan_load##size##_noabort(unsigned long); \ 554 void __asan_load##size##_noabort(unsigned long); \
546 void __asan_load##size##_noabort(unsigned long addr) \ 555 void __asan_load##size##_noabort(unsigned long addr) \
547 { \ 556 { \
548 kasan_shadow_check(addr, size, false, __RET_ADDR);\ 557 kasan_shadow_check(addr, size, false, __RET_ADDR);\
549 } \ 558 } \
550 void __asan_store##size(unsigned long); \ 559 void __asan_store##size(unsigned long); \
551 void __asan_store##size(unsigned long addr) \ 560 void __asan_store##size(unsigned long addr) \
552 { \ 561 { \
553 kasan_shadow_check(addr, size, true, __RET_ADDR);\ 562 kasan_shadow_check(addr, size, true, __RET_ADDR);\
554 } \ 563 } \
555 void __asan_store##size##_noabort(unsigned long); \ 564 void __asan_store##size##_noabort(unsigned long); \
556 void __asan_store##size##_noabort(unsigned long addr) \ 565 void __asan_store##size##_noabort(unsigned long addr) \
557 { \ 566 { \
558 kasan_shadow_check(addr, size, true, __RET_ADDR);\ 567 kasan_shadow_check(addr, size, true, __RET_ADDR);\
559 } 568 }
560 569
561ASAN_LOAD_STORE(1); 570ASAN_LOAD_STORE(1);
562ASAN_LOAD_STORE(2); 571ASAN_LOAD_STORE(2);
563ASAN_LOAD_STORE(4); 572ASAN_LOAD_STORE(4);
564ASAN_LOAD_STORE(8); 573ASAN_LOAD_STORE(8);
565ASAN_LOAD_STORE(16); 574ASAN_LOAD_STORE(16);
566 575
567void __asan_loadN(unsigned long, size_t); 576void __asan_loadN(unsigned long, size_t);
568void __asan_loadN_noabort(unsigned long, size_t); 577void __asan_loadN_noabort(unsigned long, size_t);
569void __asan_storeN(unsigned long, size_t); 578void __asan_storeN(unsigned long, size_t);
570void __asan_storeN_noabort(unsigned long, size_t); 579void __asan_storeN_noabort(unsigned long, size_t);
571void __asan_handle_no_return(void); 580void __asan_handle_no_return(void);
572void __asan_poison_stack_memory(const void *, size_t); 581void __asan_poison_stack_memory(const void *, size_t);
573void __asan_unpoison_stack_memory(const void *, size_t); 582void __asan_unpoison_stack_memory(const void *, size_t);
574void __asan_alloca_poison(unsigned long, size_t); 583void __asan_alloca_poison(unsigned long, size_t);
575void __asan_allocas_unpoison(const void *, const void *); 584void __asan_allocas_unpoison(const void *, const void *);
576 585
577void 586void
578__asan_loadN(unsigned long addr, size_t size) 587__asan_loadN(unsigned long addr, size_t size)
579{ 588{
580 kasan_shadow_check(addr, size, false, __RET_ADDR); 589 kasan_shadow_check(addr, size, false, __RET_ADDR);
581} 590}
582 591
583void 592void
584__asan_loadN_noabort(unsigned long addr, size_t size) 593__asan_loadN_noabort(unsigned long addr, size_t size)
585{ 594{
586 kasan_shadow_check(addr, size, false, __RET_ADDR); 595 kasan_shadow_check(addr, size, false, __RET_ADDR);
587} 596}
588 597
589void 598void
590__asan_storeN(unsigned long addr, size_t size) 599__asan_storeN(unsigned long addr, size_t size)
591{ 600{
592 kasan_shadow_check(addr, size, true, __RET_ADDR); 601 kasan_shadow_check(addr, size, true, __RET_ADDR);
593} 602}
594 603
595void 604void
596__asan_storeN_noabort(unsigned long addr, size_t size) 605__asan_storeN_noabort(unsigned long addr, size_t size)
597{ 606{
598 kasan_shadow_check(addr, size, true, __RET_ADDR); 607 kasan_shadow_check(addr, size, true, __RET_ADDR);
599} 608}
600 609
601void 610void
602__asan_handle_no_return(void) 611__asan_handle_no_return(void)
603{ 612{
604 /* nothing */ 613 /* nothing */
605} 614}
606 615
607void 616void
608__asan_poison_stack_memory(const void *addr, size_t size) 617__asan_poison_stack_memory(const void *addr, size_t size)
609{ 618{
610 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0); 619 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
611 kasan_shadow_fill(addr, size, KASAN_USE_AFTER_SCOPE); 620 kasan_shadow_fill(addr, size, KASAN_USE_AFTER_SCOPE);
612} 621}
613 622
614void 623void
615__asan_unpoison_stack_memory(const void *addr, size_t size) 624__asan_unpoison_stack_memory(const void *addr, size_t size)
616{ 625{
617 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0); 626 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
618 kasan_shadow_fill(addr, size, 0); 627 kasan_shadow_fill(addr, size, 0);
619} 628}
620 629
621void 630void
622__asan_alloca_poison(unsigned long addr, size_t size) 631__asan_alloca_poison(unsigned long addr, size_t size)
623{ 632{
624 panic("%s: impossible!", __func__); 633 panic("%s: impossible!", __func__);
625} 634}
626 635
627void 636void
628__asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) 637__asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
629{ 638{
630 panic("%s: impossible!", __func__); 639 panic("%s: impossible!", __func__);
631} 640}
632 641
633#define ASAN_SET_SHADOW(byte) \ 642#define ASAN_SET_SHADOW(byte) \
634 void __asan_set_shadow_##byte(void *, size_t); \ 643 void __asan_set_shadow_##byte(void *, size_t); \
635 void __asan_set_shadow_##byte(void *addr, size_t size) \ 644 void __asan_set_shadow_##byte(void *addr, size_t size) \
636 { \ 645 { \
637 __builtin_memset((void *)addr, 0x##byte, size); \ 646 __builtin_memset((void *)addr, 0x##byte, size); \
638 } 647 }
639 648
640ASAN_SET_SHADOW(00); 649ASAN_SET_SHADOW(00);
641ASAN_SET_SHADOW(f1); 650ASAN_SET_SHADOW(f1);
642ASAN_SET_SHADOW(f2); 651ASAN_SET_SHADOW(f2);
643ASAN_SET_SHADOW(f3); 652ASAN_SET_SHADOW(f3);
644ASAN_SET_SHADOW(f5); 653ASAN_SET_SHADOW(f5);
645ASAN_SET_SHADOW(f8); 654ASAN_SET_SHADOW(f8);

cvs diff -r1.35 -r1.36 src/sys/arch/amd64/amd64/spl.S (switch to unified diff)

--- src/sys/arch/amd64/amd64/spl.S 2018/07/14 14:29:40 1.35
+++ src/sys/arch/amd64/amd64/spl.S 2018/08/22 17:04:36 1.36
@@ -1,368 +1,380 @@ @@ -1,368 +1,380 @@
1/* $NetBSD: spl.S,v 1.35 2018/07/14 14:29:40 maxv Exp $ */ 1/* $NetBSD: spl.S,v 1.36 2018/08/22 17:04:36 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2003 Wasabi Systems, Inc. 4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc. 7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/* 38/*
39 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. 39 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
40 * All rights reserved. 40 * All rights reserved.
41 * 41 *
42 * This code is derived from software contributed to The NetBSD Foundation 42 * This code is derived from software contributed to The NetBSD Foundation
43 * by Charles M. Hannum and Andrew Doran. 43 * by Charles M. Hannum and Andrew Doran.
44 * 44 *
45 * Redistribution and use in source and binary forms, with or without 45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions 46 * modification, are permitted provided that the following conditions
47 * are met: 47 * are met:
48 * 1. Redistributions of source code must retain the above copyright 48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer. 49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright 50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the 51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution. 52 * documentation and/or other materials provided with the distribution.
53 * 53 *
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67#include "opt_ddb.h" 67#include "opt_ddb.h"
 68#include "opt_kasan.h"
68 69
69#define ALIGN_TEXT .align 16,0x90 70#define ALIGN_TEXT .align 16,0x90
70 71
71#include <machine/asm.h> 72#include <machine/asm.h>
72#include <machine/trap.h> 73#include <machine/trap.h>
73#include <machine/segments.h> 74#include <machine/segments.h>
74#include <machine/frameasm.h> 75#include <machine/frameasm.h>
75 76
76#include "assym.h" 77#include "assym.h"
77 78
78 .text 79 .text
79 80
80#ifndef XEN 81#ifndef XEN
81/* 82/*
82 * Xsoftintr() 83 * Xsoftintr()
83 * 84 *
84 * Switch to the LWP assigned to handle interrupts from the given 85 * Switch to the LWP assigned to handle interrupts from the given
85 * source. We borrow the VM context from the interrupted LWP. 86 * source. We borrow the VM context from the interrupted LWP.
86 * 87 *
87 * On entry: 88 * On entry:
88 * 89 *
89 * %rax intrsource 90 * %rax intrsource
90 * %r13 address to return to 91 * %r13 address to return to
91 */ 92 */
92IDTVEC(softintr) 93IDTVEC(softintr)
93 /* set up struct switchframe */ 94 /* set up struct switchframe */
94 pushq $_C_LABEL(softintr_ret) 95 pushq $_C_LABEL(softintr_ret)
95 pushq %rbx 96 pushq %rbx
96 pushq %r12 97 pushq %r12
97 pushq %r13 98 pushq %r13
98 pushq %r14 99 pushq %r14
99 pushq %r15 100 pushq %r15
100 101
101 movl $IPL_HIGH,CPUVAR(ILEVEL) 102 movl $IPL_HIGH,CPUVAR(ILEVEL)
102 movq CPUVAR(CURLWP),%r15 103 movq CPUVAR(CURLWP),%r15
103 movq IS_LWP(%rax),%rdi /* switch to handler LWP */ 104 movq IS_LWP(%rax),%rdi /* switch to handler LWP */
104 movq L_PCB(%rdi),%rdx 105 movq L_PCB(%rdi),%rdx
105 movq L_PCB(%r15),%rcx 106 movq L_PCB(%r15),%rcx
106 movq %rdi,CPUVAR(CURLWP) 107 movq %rdi,CPUVAR(CURLWP)
107 108
 109#ifdef KASAN
 110 /* clear the new stack */
 111 pushq %rax
 112 pushq %rdx
 113 pushq %rcx
 114 callq _C_LABEL(kasan_softint)
 115 popq %rcx
 116 popq %rdx
 117 popq %rax
 118#endif
 119
108 /* save old context */ 120 /* save old context */
109 movq %rsp,PCB_RSP(%rcx) 121 movq %rsp,PCB_RSP(%rcx)
110 movq %rbp,PCB_RBP(%rcx) 122 movq %rbp,PCB_RBP(%rcx)
111 123
112 /* switch to the new stack */ 124 /* switch to the new stack */
113 movq PCB_RSP0(%rdx),%rsp 125 movq PCB_RSP0(%rdx),%rsp
114 126
115 /* dispatch */ 127 /* dispatch */
116 sti 128 sti
117 movq %r15,%rdi /* interrupted LWP */ 129 movq %r15,%rdi /* interrupted LWP */
118 movl IS_MAXLEVEL(%rax),%esi /* ipl to run at */ 130 movl IS_MAXLEVEL(%rax),%esi /* ipl to run at */
119 call _C_LABEL(softint_dispatch)/* run handlers */ 131 call _C_LABEL(softint_dispatch)/* run handlers */
120 cli 132 cli
121 133
122 /* restore old context */ 134 /* restore old context */
123 movq L_PCB(%r15),%rcx 135 movq L_PCB(%r15),%rcx
124 movq PCB_RSP(%rcx),%rsp 136 movq PCB_RSP(%rcx),%rsp
125 137
126 xchgq %r15,CPUVAR(CURLWP) /* must be globally visible */ 138 xchgq %r15,CPUVAR(CURLWP) /* must be globally visible */
127 popq %r15 /* unwind switchframe */ 139 popq %r15 /* unwind switchframe */
128 addq $(5 * 8),%rsp 140 addq $(5 * 8),%rsp
129 jmp *%r13 /* back to Xspllower/Xdoreti */ 141 jmp *%r13 /* back to Xspllower/Xdoreti */
130IDTVEC_END(softintr) 142IDTVEC_END(softintr)
131 143
132/* 144/*
133 * softintr_ret() 145 * softintr_ret()
134 * 146 *
135 * Trampoline function that gets returned to by cpu_switchto() when 147 * Trampoline function that gets returned to by cpu_switchto() when
136 * an interrupt handler blocks. On entry: 148 * an interrupt handler blocks. On entry:
137 * 149 *
138 * %rax prevlwp from cpu_switchto() 150 * %rax prevlwp from cpu_switchto()
139 */ 151 */
140ENTRY(softintr_ret) 152ENTRY(softintr_ret)
141 incl CPUVAR(MTX_COUNT) /* re-adjust after mi_switch */ 153 incl CPUVAR(MTX_COUNT) /* re-adjust after mi_switch */
142 movl $0,L_CTXSWTCH(%rax) /* %rax from cpu_switchto */ 154 movl $0,L_CTXSWTCH(%rax) /* %rax from cpu_switchto */
143 cli 155 cli
144 jmp *%r13 /* back to Xspllower/Xdoreti */ 156 jmp *%r13 /* back to Xspllower/Xdoreti */
145END(softintr_ret) 157END(softintr_ret)
146 158
147/* 159/*
148 * void softint_trigger(uintptr_t machdep); 160 * void softint_trigger(uintptr_t machdep);
149 * 161 *
150 * Software interrupt registration. 162 * Software interrupt registration.
151 */ 163 */
152ENTRY(softint_trigger) 164ENTRY(softint_trigger)
153 orl %edi,CPUVAR(IPENDING) /* atomic on local cpu */ 165 orl %edi,CPUVAR(IPENDING) /* atomic on local cpu */
154 ret 166 ret
155END(softint_trigger) 167END(softint_trigger)
156 168
157 169
158/* 170/*
159 * Xrecurse_preempt() 171 * Xrecurse_preempt()
160 * 172 *
161 * Handles preemption interrupts via Xspllower(). 173 * Handles preemption interrupts via Xspllower().
162 */ 174 */
163IDTVEC(recurse_preempt) 175IDTVEC(recurse_preempt)
164 movl $IPL_PREEMPT,CPUVAR(ILEVEL) 176 movl $IPL_PREEMPT,CPUVAR(ILEVEL)
165 sti 177 sti
166 xorq %rdi,%rdi 178 xorq %rdi,%rdi
167 call _C_LABEL(kpreempt) 179 call _C_LABEL(kpreempt)
168 cli 180 cli
169 jmp *%r13 /* back to Xspllower */ 181 jmp *%r13 /* back to Xspllower */
170IDTVEC_END(recurse_preempt) 182IDTVEC_END(recurse_preempt)
171 183
172/* 184/*
173 * Xresume_preempt() 185 * Xresume_preempt()
174 * 186 *
175 * Handles preemption interrupts via Xdoreti(). 187 * Handles preemption interrupts via Xdoreti().
176 */ 188 */
177IDTVEC(resume_preempt) 189IDTVEC(resume_preempt)
178 movl $IPL_PREEMPT,CPUVAR(ILEVEL) 190 movl $IPL_PREEMPT,CPUVAR(ILEVEL)
179 sti 191 sti
180 testq $SEL_RPL,TF_CS(%rsp) 192 testq $SEL_RPL,TF_CS(%rsp)
181 jnz 1f 193 jnz 1f
182 movq TF_RIP(%rsp),%rdi 194 movq TF_RIP(%rsp),%rdi
183 call _C_LABEL(kpreempt) /* from kernel */ 195 call _C_LABEL(kpreempt) /* from kernel */
184 cli 196 cli
185 jmp *%r13 /* back to Xdoreti */ 197 jmp *%r13 /* back to Xdoreti */
1861: 1981:
187 call _C_LABEL(preempt) /* from user */ 199 call _C_LABEL(preempt) /* from user */
188 cli 200 cli
189 jmp *%r13 /* back to Xdoreti */ 201 jmp *%r13 /* back to Xdoreti */
190IDTVEC_END(resume_preempt) 202IDTVEC_END(resume_preempt)
191 203
192/* 204/*
193 * int splraise(int s); 205 * int splraise(int s);
194 */ 206 */
195ENTRY(splraise) 207ENTRY(splraise)
196 movl CPUVAR(ILEVEL),%eax 208 movl CPUVAR(ILEVEL),%eax
197 cmpl %edi,%eax 209 cmpl %edi,%eax
198 cmoval %eax,%edi 210 cmoval %eax,%edi
199 movl %edi,CPUVAR(ILEVEL) 211 movl %edi,CPUVAR(ILEVEL)
200 ret 212 ret
201END(splraise) 213END(splraise)
202 214
203/* 215/*
204 * void spllower(int s); 216 * void spllower(int s);
205 * 217 *
206 * Must be the same size as cx8_spllower(). This must use 218 * Must be the same size as cx8_spllower(). This must use
207 * pushf/cli/popf as it is used early in boot where interrupts 219 * pushf/cli/popf as it is used early in boot where interrupts
208 * are disabled via eflags/IE. 220 * are disabled via eflags/IE.
209 */ 221 */
210ENTRY(spllower) 222ENTRY(spllower)
211 cmpl CPUVAR(ILEVEL),%edi 223 cmpl CPUVAR(ILEVEL),%edi
212 jae 1f 224 jae 1f
213 movl CPUVAR(IUNMASK)(,%rdi,4),%edx 225 movl CPUVAR(IUNMASK)(,%rdi,4),%edx
214 pushf 226 pushf
215 cli 227 cli
216 testl CPUVAR(IPENDING),%edx 228 testl CPUVAR(IPENDING),%edx
217 jnz 2f 229 jnz 2f
218 movl %edi,CPUVAR(ILEVEL) 230 movl %edi,CPUVAR(ILEVEL)
219 popf 231 popf
2201: 2321:
221 ret 233 ret
222 ret 234 ret
2232: 2352:
224 popf 236 popf
225 jmp _C_LABEL(Xspllower) 237 jmp _C_LABEL(Xspllower)
2263: 2383:
227 .space 16 239 .space 16
228 .align 16 240 .align 16
229END(spllower) 241END(spllower)
230LABEL(spllower_end) 242LABEL(spllower_end)
231#endif /* !XEN */ 243#endif /* !XEN */
232 244
233/* 245/*
234 * void cx8_spllower(int s); 246 * void cx8_spllower(int s);
235 * 247 *
236 * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low. 248 * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low.
237 * 249 *
238 * edx : eax = old level / old ipending 250 * edx : eax = old level / old ipending
239 * ecx : ebx = new level / old ipending 251 * ecx : ebx = new level / old ipending
240 */ 252 */
241ENTRY(cx8_spllower) 253ENTRY(cx8_spllower)
242 movl CPUVAR(ILEVEL),%edx 254 movl CPUVAR(ILEVEL),%edx
243 movq %rbx,%r8 255 movq %rbx,%r8
244 cmpl %edx,%edi /* new level is lower? */ 256 cmpl %edx,%edi /* new level is lower? */
245 jae 1f 257 jae 1f
2460: 2580:
247 movl CPUVAR(IPENDING),%eax 259 movl CPUVAR(IPENDING),%eax
248 movl %edi,%ecx 260 movl %edi,%ecx
249 testl %eax,CPUVAR(IUNMASK)(,%rcx,4)/* deferred interrupts? */ 261 testl %eax,CPUVAR(IUNMASK)(,%rcx,4)/* deferred interrupts? */
250 movl %eax,%ebx 262 movl %eax,%ebx
251 /* 263 /*
252 * On the P4 this jump is cheaper than patching in junk 264 * On the P4 this jump is cheaper than patching in junk
253 * using cmov. Is cmpxchg expensive if it fails? 265 * using cmov. Is cmpxchg expensive if it fails?
254 */ 266 */
255 jnz 2f 267 jnz 2f
256 cmpxchg8b CPUVAR(ISTATE) /* swap in new ilevel */ 268 cmpxchg8b CPUVAR(ISTATE) /* swap in new ilevel */
257 jnz 0b 269 jnz 0b
2581: 2701:
259 movq %r8,%rbx 271 movq %r8,%rbx
260 ret 272 ret
2612: 2732:
262 movq %r8,%rbx 274 movq %r8,%rbx
263 .type _C_LABEL(cx8_spllower_patch), @function 275 .type _C_LABEL(cx8_spllower_patch), @function
264LABEL(cx8_spllower_patch) 276LABEL(cx8_spllower_patch)
265 jmp _C_LABEL(Xspllower) 277 jmp _C_LABEL(Xspllower)
266 278
267 .align 16 279 .align 16
268END(cx8_spllower_patch) 280END(cx8_spllower_patch)
269END(cx8_spllower) 281END(cx8_spllower)
270LABEL(cx8_spllower_end) 282LABEL(cx8_spllower_end)
271 283
272/* 284/*
273 * void Xspllower(int s); 285 * void Xspllower(int s);
274 * 286 *
275 * Process pending interrupts. 287 * Process pending interrupts.
276 * 288 *
277 * Important registers: 289 * Important registers:
278 * ebx - cpl 290 * ebx - cpl
279 * r13 - address to resume loop at 291 * r13 - address to resume loop at
280 * 292 *
281 * It is important that the bit scan instruction is bsr, it will get 293 * It is important that the bit scan instruction is bsr, it will get
282 * the highest 2 bits (currently the IPI and clock handlers) first, 294 * the highest 2 bits (currently the IPI and clock handlers) first,
283 * to avoid deadlocks where one CPU sends an IPI, another one is at 295 * to avoid deadlocks where one CPU sends an IPI, another one is at
284 * splhigh() and defers it, lands in here via splx(), and handles 296 * splhigh() and defers it, lands in here via splx(), and handles
285 * a lower-prio one first, which needs to take the kernel lock --> 297 * a lower-prio one first, which needs to take the kernel lock -->
286 * the sending CPU will never see the that CPU accept the IPI 298 * the sending CPU will never see the that CPU accept the IPI
287 * (see pmap_tlb_shootnow). 299 * (see pmap_tlb_shootnow).
288 */ 300 */
289 nop 301 nop
290 .align 4 /* Avoid confusion with cx8_spllower_end */ 302 .align 4 /* Avoid confusion with cx8_spllower_end */
291 303
292IDTVEC(spllower) 304IDTVEC(spllower)
293 pushq %rbx 305 pushq %rbx
294 pushq %r13 306 pushq %r13
295 pushq %r12 307 pushq %r12
296 movl %edi,%ebx 308 movl %edi,%ebx
297 leaq 1f(%rip),%r13 /* address to resume loop at */ 309 leaq 1f(%rip),%r13 /* address to resume loop at */
2981: movl %ebx,%eax /* get cpl */ 3101: movl %ebx,%eax /* get cpl */
299 movl CPUVAR(IUNMASK)(,%rax,4),%eax 311 movl CPUVAR(IUNMASK)(,%rax,4),%eax
300 CLI(si) 312 CLI(si)
301 andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */ 313 andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */
302 jz 2f 314 jz 2f
303 bsrl %eax,%eax 315 bsrl %eax,%eax
304 btrl %eax,CPUVAR(IPENDING) 316 btrl %eax,CPUVAR(IPENDING)
305 movq CPUVAR(ISOURCES)(,%rax,8),%rax 317 movq CPUVAR(ISOURCES)(,%rax,8),%rax
306 jmp *IS_RECURSE(%rax) 318 jmp *IS_RECURSE(%rax)
3072: 3192:
308 movl %ebx,CPUVAR(ILEVEL) 320 movl %ebx,CPUVAR(ILEVEL)
309 STI(si) 321 STI(si)
310 popq %r12 322 popq %r12
311 popq %r13 323 popq %r13
312 popq %rbx 324 popq %rbx
313 ret 325 ret
314IDTVEC_END(spllower) 326IDTVEC_END(spllower)
315 327
316/* 328/*
317 * void Xdoreti(void); 329 * void Xdoreti(void);
318 * 330 *
319 * Handle return from interrupt after device handler finishes. 331 * Handle return from interrupt after device handler finishes.
320 * 332 *
321 * Important registers: 333 * Important registers:
322 * ebx - cpl to restore 334 * ebx - cpl to restore
323 * r13 - address to resume loop at 335 * r13 - address to resume loop at
324 */ 336 */
325IDTVEC(doreti) 337IDTVEC(doreti)
326 popq %rbx /* get previous priority */ 338 popq %rbx /* get previous priority */
327 decl CPUVAR(IDEPTH) 339 decl CPUVAR(IDEPTH)
328 leaq 1f(%rip),%r13 340 leaq 1f(%rip),%r13
3291: movl %ebx,%eax 3411: movl %ebx,%eax
330 movl CPUVAR(IUNMASK)(,%rax,4),%eax 342 movl CPUVAR(IUNMASK)(,%rax,4),%eax
331 CLI(si) 343 CLI(si)
332 andl CPUVAR(IPENDING),%eax 344 andl CPUVAR(IPENDING),%eax
333 jz 2f 345 jz 2f
334 bsrl %eax,%eax /* slow, but not worth optimizing */ 346 bsrl %eax,%eax /* slow, but not worth optimizing */
335 btrl %eax,CPUVAR(IPENDING) 347 btrl %eax,CPUVAR(IPENDING)
336 movq CPUVAR(ISOURCES)(,%rax,8),%rax 348 movq CPUVAR(ISOURCES)(,%rax,8),%rax
337 jmp *IS_RESUME(%rax) 349 jmp *IS_RESUME(%rax)
3382: /* Check for ASTs on exit to user mode. */ 3502: /* Check for ASTs on exit to user mode. */
339 movl %ebx,CPUVAR(ILEVEL) 351 movl %ebx,CPUVAR(ILEVEL)
3405: 3525:
341 testb $SEL_RPL,TF_CS(%rsp) 353 testb $SEL_RPL,TF_CS(%rsp)
342 jz 6f 354 jz 6f
343 355
344 .type _C_LABEL(doreti_checkast), @function 356 .type _C_LABEL(doreti_checkast), @function
345LABEL(doreti_checkast) 357LABEL(doreti_checkast)
346 movq CPUVAR(CURLWP),%r14 358 movq CPUVAR(CURLWP),%r14
347 CHECK_ASTPENDING(%r14) 359 CHECK_ASTPENDING(%r14)
348 je 3f 360 je 3f
349 CLEAR_ASTPENDING(%r14) 361 CLEAR_ASTPENDING(%r14)
350 STI(si) 362 STI(si)
351 movl $T_ASTFLT,TF_TRAPNO(%rsp) /* XXX undo later.. */ 363 movl $T_ASTFLT,TF_TRAPNO(%rsp) /* XXX undo later.. */
352 /* Pushed T_ASTFLT into tf_trapno on entry. */ 364 /* Pushed T_ASTFLT into tf_trapno on entry. */
353 movq %rsp,%rdi 365 movq %rsp,%rdi
354 call _C_LABEL(trap) 366 call _C_LABEL(trap)
355 CLI(si) 367 CLI(si)
356 jmp doreti_checkast 368 jmp doreti_checkast
3573: 3693:
358 CHECK_DEFERRED_SWITCH 370 CHECK_DEFERRED_SWITCH
359 jnz 9f 371 jnz 9f
3606: 3726:
361 INTRFASTEXIT 373 INTRFASTEXIT
3629: 3749:
363 STI(si) 375 STI(si)
364 call _C_LABEL(do_pmap_load) 376 call _C_LABEL(do_pmap_load)
365 CLI(si) 377 CLI(si)
366 jmp doreti_checkast /* recheck ASTs */ 378 jmp doreti_checkast /* recheck ASTs */
367END(doreti_checkast) 379END(doreti_checkast)
368IDTVEC_END(doreti) 380IDTVEC_END(doreti)