Wed Jun 14 12:49:37 2017 UTC ()
Move the direct map from slot 509 to slot 460. We will increase its size
dynamically.


(maxv)
diff -r1.64 -r1.65 src/sys/arch/x86/include/pmap.h

cvs diff -r1.64 -r1.65 src/sys/arch/x86/include/pmap.h (switch to unified diff)

--- src/sys/arch/x86/include/pmap.h 2017/03/23 18:08:06 1.64
+++ src/sys/arch/x86/include/pmap.h 2017/06/14 12:49:37 1.65
@@ -1,509 +1,509 @@ @@ -1,509 +1,509 @@
1/* $NetBSD: pmap.h,v 1.64 2017/03/23 18:08:06 maxv Exp $ */ 1/* $NetBSD: pmap.h,v 1.65 2017/06/14 12:49:37 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */ 26 */
27 27
28/* 28/*
29 * Copyright (c) 2001 Wasabi Systems, Inc. 29 * Copyright (c) 2001 Wasabi Systems, Inc.
30 * All rights reserved. 30 * All rights reserved.
31 * 31 *
32 * Written by Frank van der Linden for Wasabi Systems, Inc. 32 * Written by Frank van der Linden for Wasabi Systems, Inc.
33 * 33 *
34 * Redistribution and use in source and binary forms, with or without 34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions 35 * modification, are permitted provided that the following conditions
36 * are met: 36 * are met:
37 * 1. Redistributions of source code must retain the above copyright 37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer. 38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright 39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the 40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution. 41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software 42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement: 43 * must display the following acknowledgement:
44 * This product includes software developed for the NetBSD Project by 44 * This product includes software developed for the NetBSD Project by
45 * Wasabi Systems, Inc. 45 * Wasabi Systems, Inc.
46 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 46 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
47 * or promote products derived from this software without specific prior 47 * or promote products derived from this software without specific prior
48 * written permission. 48 * written permission.
49 * 49 *
50 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 50 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
52 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 52 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
53 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 53 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 * POSSIBILITY OF SUCH DAMAGE. 60 * POSSIBILITY OF SUCH DAMAGE.
61 */ 61 */
62 62
63/* 63/*
64 * pmap.h: see pmap.c for the history of this pmap module. 64 * pmap.h: see pmap.c for the history of this pmap module.
65 */ 65 */
66 66
67#ifndef _X86_PMAP_H_ 67#ifndef _X86_PMAP_H_
68#define _X86_PMAP_H_ 68#define _X86_PMAP_H_
69 69
70/* 70/*
71 * pl*_pi: index in the ptp page for a pde mapping a VA. 71 * pl*_pi: index in the ptp page for a pde mapping a VA.
72 * (pl*_i below is the index in the virtual array of all pdes per level) 72 * (pl*_i below is the index in the virtual array of all pdes per level)
73 */ 73 */
74#define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT) 74#define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
75#define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT) 75#define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
76#define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT) 76#define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
77#define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT) 77#define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
78 78
79/* 79/*
80 * pl*_i: generate index into pde/pte arrays in virtual space 80 * pl*_i: generate index into pde/pte arrays in virtual space
81 * 81 *
82 * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X) 82 * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
83 */ 83 */
84#define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT) 84#define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
85#define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT) 85#define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
86#define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT) 86#define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
87#define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT) 87#define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
88#define pl_i(va, lvl) \ 88#define pl_i(va, lvl) \
89 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1]) 89 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
90 90
91#define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl)) 91#define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
92 92
93/* 93/*
94 * PTP macros: 94 * PTP macros:
95 * a PTP's index is the PD index of the PDE that points to it 95 * a PTP's index is the PD index of the PDE that points to it
96 * a PTP's offset is the byte-offset in the PTE space that this PTP is at 96 * a PTP's offset is the byte-offset in the PTE space that this PTP is at
97 * a PTP's VA is the first VA mapped by that PTP 97 * a PTP's VA is the first VA mapped by that PTP
98 */ 98 */
99 99
100#define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE) 100#define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
101 101
102/* size of a PDP: usually one page, except for PAE */ 102/* size of a PDP: usually one page, except for PAE */
103#ifdef PAE 103#ifdef PAE
104#define PDP_SIZE 4 104#define PDP_SIZE 4
105#else 105#else
106#define PDP_SIZE 1 106#define PDP_SIZE 1
107#endif 107#endif
108 108
109 109
110#if defined(_KERNEL) 110#if defined(_KERNEL)
111#include <sys/kcpuset.h> 111#include <sys/kcpuset.h>
112#include <uvm/pmap/pmap_pvt.h> 112#include <uvm/pmap/pmap_pvt.h>
113 113
114/* 114/*
115 * pmap data structures: see pmap.c for details of locking. 115 * pmap data structures: see pmap.c for details of locking.
116 */ 116 */
117 117
118/* 118/*
119 * we maintain a list of all non-kernel pmaps 119 * we maintain a list of all non-kernel pmaps
120 */ 120 */
121 121
122LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 122LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
123 123
124/* 124/*
125 * linked list of all non-kernel pmaps 125 * linked list of all non-kernel pmaps
126 */ 126 */
127extern struct pmap_head pmaps; 127extern struct pmap_head pmaps;
128extern kmutex_t pmaps_lock; /* protects pmaps */ 128extern kmutex_t pmaps_lock; /* protects pmaps */
129 129
130/* 130/*
131 * pool_cache(9) that PDPs are allocated from  131 * pool_cache(9) that PDPs are allocated from
132 */ 132 */
133extern struct pool_cache pmap_pdp_cache; 133extern struct pool_cache pmap_pdp_cache;
134 134
135/* 135/*
136 * the pmap structure 136 * the pmap structure
137 * 137 *
138 * note that the pm_obj contains the lock pointer, the reference count, 138 * note that the pm_obj contains the lock pointer, the reference count,
139 * page list, and number of PTPs within the pmap. 139 * page list, and number of PTPs within the pmap.
140 * 140 *
141 * pm_lock is the same as the lock for vm object 0. Changes to 141 * pm_lock is the same as the lock for vm object 0. Changes to
142 * the other objects may only be made if that lock has been taken 142 * the other objects may only be made if that lock has been taken
143 * (the other object locks are only used when uvm_pagealloc is called) 143 * (the other object locks are only used when uvm_pagealloc is called)
144 */ 144 */
145 145
146struct pmap { 146struct pmap {
147 struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */ 147 struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
148#define pm_lock pm_obj[0].vmobjlock 148#define pm_lock pm_obj[0].vmobjlock
149 kmutex_t pm_obj_lock[PTP_LEVELS-1]; /* locks for pm_objs */ 149 kmutex_t pm_obj_lock[PTP_LEVELS-1]; /* locks for pm_objs */
150 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 150 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
151 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */ 151 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
152 paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */ 152 paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */
153 struct vm_page *pm_ptphint[PTP_LEVELS-1]; 153 struct vm_page *pm_ptphint[PTP_LEVELS-1];
154 /* pointer to a PTP in our pmap */ 154 /* pointer to a PTP in our pmap */
155 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ 155 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
156 156
157#if !defined(__x86_64__) 157#if !defined(__x86_64__)
158 vaddr_t pm_hiexec; /* highest executable mapping */ 158 vaddr_t pm_hiexec; /* highest executable mapping */
159#endif /* !defined(__x86_64__) */ 159#endif /* !defined(__x86_64__) */
160 int pm_flags; /* see below */ 160 int pm_flags; /* see below */
161 161
162 union descriptor *pm_ldt; /* user-set LDT */ 162 union descriptor *pm_ldt; /* user-set LDT */
163 size_t pm_ldt_len; /* size of LDT in bytes */ 163 size_t pm_ldt_len; /* size of LDT in bytes */
164 int pm_ldt_sel; /* LDT selector */ 164 int pm_ldt_sel; /* LDT selector */
165 kcpuset_t *pm_cpus; /* mask of CPUs using pmap */ 165 kcpuset_t *pm_cpus; /* mask of CPUs using pmap */
166 kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part 166 kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part
167 of pmap */ 167 of pmap */
168 kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's 168 kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's
169 ptp mapped */ 169 ptp mapped */
170 uint64_t pm_ncsw; /* for assertions */ 170 uint64_t pm_ncsw; /* for assertions */
171 struct vm_page *pm_gc_ptp; /* pages from pmap g/c */ 171 struct vm_page *pm_gc_ptp; /* pages from pmap g/c */
172}; 172};
173 173
174/* macro to access pm_pdirpa slots */ 174/* macro to access pm_pdirpa slots */
175#ifdef PAE 175#ifdef PAE
176#define pmap_pdirpa(pmap, index) \ 176#define pmap_pdirpa(pmap, index) \
177 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t)) 177 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
178#else 178#else
179#define pmap_pdirpa(pmap, index) \ 179#define pmap_pdirpa(pmap, index) \
180 ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t)) 180 ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
181#endif 181#endif
182 182
183/* 183/*
184 * MD flags that we use for pmap_enter and pmap_kenter_pa: 184 * MD flags that we use for pmap_enter and pmap_kenter_pa:
185 */ 185 */
186 186
187/* 187/*
188 * global kernel variables 188 * global kernel variables
189 */ 189 */
190 190
191/* 191/*
192 * PDPpaddr is the physical address of the kernel's PDP. 192 * PDPpaddr is the physical address of the kernel's PDP.
193 * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3 193 * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
194 * value associated to the kernel process, proc0. 194 * value associated to the kernel process, proc0.
195 * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to 195 * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
196 * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more. 196 * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
197 * - Xen: it corresponds to the PFN of the kernel's PDP. 197 * - Xen: it corresponds to the PFN of the kernel's PDP.
198 */ 198 */
199extern u_long PDPpaddr; 199extern u_long PDPpaddr;
200 200
201extern pd_entry_t pmap_pg_g; /* do we support PG_G? */ 201extern pd_entry_t pmap_pg_g; /* do we support PG_G? */
202extern pd_entry_t pmap_pg_nx; /* do we support PG_NX? */ 202extern pd_entry_t pmap_pg_nx; /* do we support PG_NX? */
203extern long nkptp[PTP_LEVELS]; 203extern long nkptp[PTP_LEVELS];
204 204
205/* 205/*
206 * macros 206 * macros
207 */ 207 */
208 208
209#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 209#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
210#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 210#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
211 211
212#define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M) 212#define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
213#define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U) 213#define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
214#define pmap_copy(DP,SP,D,L,S) __USE(L) 214#define pmap_copy(DP,SP,D,L,S) __USE(L)
215#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M) 215#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
216#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U) 216#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
217#define pmap_move(DP,SP,D,L,S) 217#define pmap_move(DP,SP,D,L,S)
218#define pmap_phys_address(ppn) (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK) 218#define pmap_phys_address(ppn) (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
219#define pmap_mmap_flags(ppn) x86_mmap_flags(ppn) 219#define pmap_mmap_flags(ppn) x86_mmap_flags(ppn)
220#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 220#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
221 221
222#if defined(__x86_64__) || defined(PAE) 222#if defined(__x86_64__) || defined(PAE)
223#define X86_MMAP_FLAG_SHIFT (64 - PGSHIFT) 223#define X86_MMAP_FLAG_SHIFT (64 - PGSHIFT)
224#else 224#else
225#define X86_MMAP_FLAG_SHIFT (32 - PGSHIFT) 225#define X86_MMAP_FLAG_SHIFT (32 - PGSHIFT)
226#endif 226#endif
227 227
228#define X86_MMAP_FLAG_MASK 0xf 228#define X86_MMAP_FLAG_MASK 0xf
229#define X86_MMAP_FLAG_PREFETCH 0x1 229#define X86_MMAP_FLAG_PREFETCH 0x1
230 230
231/* 231/*
232 * prototypes 232 * prototypes
233 */ 233 */
234 234
235void pmap_activate(struct lwp *); 235void pmap_activate(struct lwp *);
236void pmap_bootstrap(vaddr_t); 236void pmap_bootstrap(vaddr_t);
237bool pmap_clear_attrs(struct vm_page *, unsigned); 237bool pmap_clear_attrs(struct vm_page *, unsigned);
238bool pmap_pv_clear_attrs(paddr_t, unsigned); 238bool pmap_pv_clear_attrs(paddr_t, unsigned);
239void pmap_deactivate(struct lwp *); 239void pmap_deactivate(struct lwp *);
240void pmap_page_remove(struct vm_page *); 240void pmap_page_remove(struct vm_page *);
241void pmap_pv_remove(paddr_t); 241void pmap_pv_remove(paddr_t);
242void pmap_remove(struct pmap *, vaddr_t, vaddr_t); 242void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
243bool pmap_test_attrs(struct vm_page *, unsigned); 243bool pmap_test_attrs(struct vm_page *, unsigned);
244void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); 244void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
245void pmap_load(void); 245void pmap_load(void);
246paddr_t pmap_init_tmp_pgtbl(paddr_t); 246paddr_t pmap_init_tmp_pgtbl(paddr_t);
247void pmap_remove_all(struct pmap *); 247void pmap_remove_all(struct pmap *);
248void pmap_ldt_cleanup(struct lwp *); 248void pmap_ldt_cleanup(struct lwp *);
249void pmap_ldt_sync(struct pmap *); 249void pmap_ldt_sync(struct pmap *);
250void pmap_kremove_local(vaddr_t, vsize_t); 250void pmap_kremove_local(vaddr_t, vsize_t);
251 251
252void pmap_emap_enter(vaddr_t, paddr_t, vm_prot_t); 252void pmap_emap_enter(vaddr_t, paddr_t, vm_prot_t);
253void pmap_emap_remove(vaddr_t, vsize_t); 253void pmap_emap_remove(vaddr_t, vsize_t);
254void pmap_emap_sync(bool); 254void pmap_emap_sync(bool);
255 255
256#define __HAVE_PMAP_PV_TRACK 1 256#define __HAVE_PMAP_PV_TRACK 1
257void pmap_pv_init(void); 257void pmap_pv_init(void);
258void pmap_pv_track(paddr_t, psize_t); 258void pmap_pv_track(paddr_t, psize_t);
259void pmap_pv_untrack(paddr_t, psize_t); 259void pmap_pv_untrack(paddr_t, psize_t);
260 260
261void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **, 261void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
262 pd_entry_t * const **); 262 pd_entry_t * const **);
263void pmap_unmap_ptes(struct pmap *, struct pmap *); 263void pmap_unmap_ptes(struct pmap *, struct pmap *);
264 264
265int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *); 265int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
266 266
267u_int x86_mmap_flags(paddr_t); 267u_int x86_mmap_flags(paddr_t);
268 268
269bool pmap_is_curpmap(struct pmap *); 269bool pmap_is_curpmap(struct pmap *);
270 270
271#ifndef __HAVE_DIRECT_MAP 271#ifndef __HAVE_DIRECT_MAP
272void pmap_vpage_cpu_init(struct cpu_info *); 272void pmap_vpage_cpu_init(struct cpu_info *);
273#endif 273#endif
274 274
275vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */ 275vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
276 276
277typedef enum tlbwhy { 277typedef enum tlbwhy {
278 TLBSHOOT_APTE, 278 TLBSHOOT_APTE,
279 TLBSHOOT_KENTER, 279 TLBSHOOT_KENTER,
280 TLBSHOOT_KREMOVE, 280 TLBSHOOT_KREMOVE,
281 TLBSHOOT_FREE_PTP1, 281 TLBSHOOT_FREE_PTP1,
282 TLBSHOOT_FREE_PTP2, 282 TLBSHOOT_FREE_PTP2,
283 TLBSHOOT_REMOVE_PTE, 283 TLBSHOOT_REMOVE_PTE,
284 TLBSHOOT_REMOVE_PTES, 284 TLBSHOOT_REMOVE_PTES,
285 TLBSHOOT_SYNC_PV1, 285 TLBSHOOT_SYNC_PV1,
286 TLBSHOOT_SYNC_PV2, 286 TLBSHOOT_SYNC_PV2,
287 TLBSHOOT_WRITE_PROTECT, 287 TLBSHOOT_WRITE_PROTECT,
288 TLBSHOOT_ENTER, 288 TLBSHOOT_ENTER,
289 TLBSHOOT_UPDATE, 289 TLBSHOOT_UPDATE,
290 TLBSHOOT_BUS_DMA, 290 TLBSHOOT_BUS_DMA,
291 TLBSHOOT_BUS_SPACE, 291 TLBSHOOT_BUS_SPACE,
292 TLBSHOOT__MAX, 292 TLBSHOOT__MAX,
293} tlbwhy_t; 293} tlbwhy_t;
294 294
295void pmap_tlb_init(void); 295void pmap_tlb_init(void);
296void pmap_tlb_cpu_init(struct cpu_info *); 296void pmap_tlb_cpu_init(struct cpu_info *);
297void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t); 297void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
298void pmap_tlb_shootnow(void); 298void pmap_tlb_shootnow(void);
299void pmap_tlb_intr(void); 299void pmap_tlb_intr(void);
300 300
301#define __HAVE_PMAP_EMAP 301#define __HAVE_PMAP_EMAP
302 302
303#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 303#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
304#define PMAP_FORK /* turn on pmap_fork interface */ 304#define PMAP_FORK /* turn on pmap_fork interface */
305 305
306/* 306/*
307 * Do idle page zero'ing uncached to avoid polluting the cache. 307 * Do idle page zero'ing uncached to avoid polluting the cache.
308 */ 308 */
309bool pmap_pageidlezero(paddr_t); 309bool pmap_pageidlezero(paddr_t);
310#define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa)) 310#define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
311 311
312/* 312/*
313 * inline functions 313 * inline functions
314 */ 314 */
315 315
316__inline static bool __unused 316__inline static bool __unused
317pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde) 317pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
318{ 318{
319 return pmap_pdes_invalid(va, pdes, lastpde) == 0; 319 return pmap_pdes_invalid(va, pdes, lastpde) == 0;
320} 320}
321 321
322/* 322/*
323 * pmap_update_pg: flush one page from the TLB (or flush the whole thing 323 * pmap_update_pg: flush one page from the TLB (or flush the whole thing
324 * if hardware doesn't support one-page flushing) 324 * if hardware doesn't support one-page flushing)
325 */ 325 */
326 326
327__inline static void __unused 327__inline static void __unused
328pmap_update_pg(vaddr_t va) 328pmap_update_pg(vaddr_t va)
329{ 329{
330 invlpg(va); 330 invlpg(va);
331} 331}
332 332
333/* 333/*
334 * pmap_update_2pg: flush two pages from the TLB 334 * pmap_update_2pg: flush two pages from the TLB
335 */ 335 */
336 336
337__inline static void __unused 337__inline static void __unused
338pmap_update_2pg(vaddr_t va, vaddr_t vb) 338pmap_update_2pg(vaddr_t va, vaddr_t vb)
339{ 339{
340 invlpg(va); 340 invlpg(va);
341 invlpg(vb); 341 invlpg(vb);
342} 342}
343 343
344/* 344/*
345 * pmap_page_protect: change the protection of all recorded mappings 345 * pmap_page_protect: change the protection of all recorded mappings
346 * of a managed page 346 * of a managed page
347 * 347 *
348 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs 348 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
349 * => we only have to worry about making the page more protected. 349 * => we only have to worry about making the page more protected.
350 * unprotecting a page is done on-demand at fault time. 350 * unprotecting a page is done on-demand at fault time.
351 */ 351 */
352 352
353__inline static void __unused 353__inline static void __unused
354pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 354pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
355{ 355{
356 if ((prot & VM_PROT_WRITE) == 0) { 356 if ((prot & VM_PROT_WRITE) == 0) {
357 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 357 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
358 (void) pmap_clear_attrs(pg, PG_RW); 358 (void) pmap_clear_attrs(pg, PG_RW);
359 } else { 359 } else {
360 pmap_page_remove(pg); 360 pmap_page_remove(pg);
361 } 361 }
362 } 362 }
363} 363}
364 364
365/* 365/*
366 * pmap_pv_protect: change the protection of all recorded mappings 366 * pmap_pv_protect: change the protection of all recorded mappings
367 * of an unmanaged page 367 * of an unmanaged page
368 */ 368 */
369 369
370__inline static void __unused 370__inline static void __unused
371pmap_pv_protect(paddr_t pa, vm_prot_t prot) 371pmap_pv_protect(paddr_t pa, vm_prot_t prot)
372{ 372{
373 if ((prot & VM_PROT_WRITE) == 0) { 373 if ((prot & VM_PROT_WRITE) == 0) {
374 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 374 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
375 (void) pmap_pv_clear_attrs(pa, PG_RW); 375 (void) pmap_pv_clear_attrs(pa, PG_RW);
376 } else { 376 } else {
377 pmap_pv_remove(pa); 377 pmap_pv_remove(pa);
378 } 378 }
379 } 379 }
380} 380}
381 381
382/* 382/*
383 * pmap_protect: change the protection of pages in a pmap 383 * pmap_protect: change the protection of pages in a pmap
384 * 384 *
385 * => this function is a frontend for pmap_remove/pmap_write_protect 385 * => this function is a frontend for pmap_remove/pmap_write_protect
386 * => we only have to worry about making the page more protected. 386 * => we only have to worry about making the page more protected.
387 * unprotecting a page is done on-demand at fault time. 387 * unprotecting a page is done on-demand at fault time.
388 */ 388 */
389 389
390__inline static void __unused 390__inline static void __unused
391pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 391pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
392{ 392{
393 if ((prot & VM_PROT_WRITE) == 0) { 393 if ((prot & VM_PROT_WRITE) == 0) {
394 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 394 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
395 pmap_write_protect(pmap, sva, eva, prot); 395 pmap_write_protect(pmap, sva, eva, prot);
396 } else { 396 } else {
397 pmap_remove(pmap, sva, eva); 397 pmap_remove(pmap, sva, eva);
398 } 398 }
399 } 399 }
400} 400}
401 401
402/* 402/*
403 * various address inlines 403 * various address inlines
404 * 404 *
405 * vtopte: return a pointer to the PTE mapping a VA, works only for 405 * vtopte: return a pointer to the PTE mapping a VA, works only for
406 * user and PT addresses 406 * user and PT addresses
407 * 407 *
408 * kvtopte: return a pointer to the PTE mapping a kernel VA 408 * kvtopte: return a pointer to the PTE mapping a kernel VA
409 */ 409 */
410 410
411#include <lib/libkern/libkern.h> 411#include <lib/libkern/libkern.h>
412 412
413static __inline pt_entry_t * __unused 413static __inline pt_entry_t * __unused
414vtopte(vaddr_t va) 414vtopte(vaddr_t va)
415{ 415{
416 416
417 KASSERT(va < VM_MIN_KERNEL_ADDRESS); 417 KASSERT(va < VM_MIN_KERNEL_ADDRESS);
418 418
419 return (PTE_BASE + pl1_i(va)); 419 return (PTE_BASE + pl1_i(va));
420} 420}
421 421
422static __inline pt_entry_t * __unused 422static __inline pt_entry_t * __unused
423kvtopte(vaddr_t va) 423kvtopte(vaddr_t va)
424{ 424{
425 pd_entry_t *pde; 425 pd_entry_t *pde;
426 426
427 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 427 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
428 428
429 pde = L2_BASE + pl2_i(va); 429 pde = L2_BASE + pl2_i(va);
430 if (*pde & PG_PS) 430 if (*pde & PG_PS)
431 return ((pt_entry_t *)pde); 431 return ((pt_entry_t *)pde);
432 432
433 return (PTE_BASE + pl1_i(va)); 433 return (PTE_BASE + pl1_i(va));
434} 434}
435 435
436paddr_t vtophys(vaddr_t); 436paddr_t vtophys(vaddr_t);
437vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t); 437vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
438void pmap_cpu_init_late(struct cpu_info *); 438void pmap_cpu_init_late(struct cpu_info *);
439bool sse2_idlezero_page(void *); 439bool sse2_idlezero_page(void *);
440 440
441#ifdef XEN 441#ifdef XEN
442#include <sys/bitops.h> 442#include <sys/bitops.h>
443 443
444#define XPTE_MASK L1_FRAME 444#define XPTE_MASK L1_FRAME
445/* Selects the index of a PTE in (A)PTE_BASE */ 445/* Selects the index of a PTE in (A)PTE_BASE */
446#define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t))) 446#define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t)))
447 447
448/* PTE access inline fuctions */ 448/* PTE access inline fuctions */
449 449
450/* 450/*
451 * Get the machine address of the pointed pte 451 * Get the machine address of the pointed pte
452 * We use hardware MMU to get value so works only for levels 1-3 452 * We use hardware MMU to get value so works only for levels 1-3
453 */ 453 */
454 454
455static __inline paddr_t 455static __inline paddr_t
456xpmap_ptetomach(pt_entry_t *pte) 456xpmap_ptetomach(pt_entry_t *pte)
457{ 457{
458 pt_entry_t *up_pte; 458 pt_entry_t *up_pte;
459 vaddr_t va = (vaddr_t) pte; 459 vaddr_t va = (vaddr_t) pte;
460 460
461 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE; 461 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
462 up_pte = (pt_entry_t *) va; 462 up_pte = (pt_entry_t *) va;
463 463
464 return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK))); 464 return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
465} 465}
466 466
467/* Xen helpers to change bits of a pte */ 467/* Xen helpers to change bits of a pte */
468#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */ 468#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
469 469
470paddr_t vtomach(vaddr_t); 470paddr_t vtomach(vaddr_t);
471#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT) 471#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
472#endif /* XEN */ 472#endif /* XEN */
473 473
474/* pmap functions with machine addresses */ 474/* pmap functions with machine addresses */
475void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int); 475void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
476int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t, 476int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
477 vm_prot_t, u_int, int); 477 vm_prot_t, u_int, int);
478bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *); 478bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
479void pmap_free_ptps(struct vm_page *); 479void pmap_free_ptps(struct vm_page *);
480 480
481/* 481/*
482 * Hooks for the pool allocator. 482 * Hooks for the pool allocator.
483 */ 483 */
484#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va)) 484#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
485 485
486#ifdef __HAVE_DIRECT_MAP 486#ifdef __HAVE_DIRECT_MAP
487 487
488#define L4_SLOT_DIRECT 509 488#define L4_SLOT_DIRECT 460
489#define PDIR_SLOT_DIRECT L4_SLOT_DIRECT 489#define PDIR_SLOT_DIRECT L4_SLOT_DIRECT
490 490
491#define PMAP_DIRECT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4))) 491#define PMAP_DIRECT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
492#define PMAP_DIRECT_END (VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4))) 492#define PMAP_DIRECT_END (VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4)))
493 493
494#define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + (pa)) 494#define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + (pa))
495#define PMAP_DIRECT_UNMAP(va) ((paddr_t)(va) - PMAP_DIRECT_BASE) 495#define PMAP_DIRECT_UNMAP(va) ((paddr_t)(va) - PMAP_DIRECT_BASE)
496 496
497/* 497/*
498 * Alternate mapping hooks for pool pages. 498 * Alternate mapping hooks for pool pages.
499 */ 499 */
500#define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP((pa)) 500#define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP((pa))
501#define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP((va)) 501#define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP((va))
502 502
503void pagezero(vaddr_t); 503void pagezero(vaddr_t);
504 504
505#endif /* __HAVE_DIRECT_MAP */ 505#endif /* __HAVE_DIRECT_MAP */
506 506
507#endif /* _KERNEL */ 507#endif /* _KERNEL */
508 508
509#endif /* _X86_PMAP_H_ */ 509#endif /* _X86_PMAP_H_ */