Sun Jan 29 16:24:51 2012 UTC ()
convert from malloc(9) to kmem(9)


(para)
diff -r1.166 -r1.167 src/sys/arch/sun3/sun3/pmap.c

cvs diff -r1.166 -r1.167 src/sys/arch/sun3/sun3/pmap.c (switch to unified diff)

--- src/sys/arch/sun3/sun3/pmap.c 2011/06/03 17:03:53 1.166
+++ src/sys/arch/sun3/sun3/pmap.c 2012/01/29 16:24:51 1.167
@@ -1,2902 +1,2902 @@ @@ -1,2902 +1,2902 @@
1/* $NetBSD: pmap.c,v 1.166 2011/06/03 17:03:53 tsutsui Exp $ */ 1/* $NetBSD: pmap.c,v 1.167 2012/01/29 16:24:51 para Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass and Gordon W. Ross. 8 * by Adam Glass and Gordon W. Ross.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Some notes: 33 * Some notes:
34 * 34 *
35 * sun3s have contexts (8). In this pmap design, the kernel is mapped 35 * sun3s have contexts (8). In this pmap design, the kernel is mapped
36 * into all contexts. Processes take up a known portion of the context, 36 * into all contexts. Processes take up a known portion of the context,
37 * and compete for the available contexts on a LRU basis. 37 * and compete for the available contexts on a LRU basis.
38 * 38 *
39 * sun3s also have this evil "PMEG" crapola. Essentially each "context"'s 39 * sun3s also have this evil "PMEG" crapola. Essentially each "context"'s
40 * address space is defined by the 2048 one-byte entries in the segment map. 40 * address space is defined by the 2048 one-byte entries in the segment map.
41 * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG) 41 * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
42 * which contains the mappings for that virtual segment. (This strange 42 * which contains the mappings for that virtual segment. (This strange
43 * terminology invented by Sun and preserved here for consistency.) 43 * terminology invented by Sun and preserved here for consistency.)
44 * Each PMEG maps a segment of 128Kb length, with 16 pages of 8Kb each. 44 * Each PMEG maps a segment of 128Kb length, with 16 pages of 8Kb each.
45 * 45 *
46 * As you might guess, these PMEGs are in short supply and heavy demand. 46 * As you might guess, these PMEGs are in short supply and heavy demand.
47 * PMEGs allocated to the kernel are "static" in the sense that they can't 47 * PMEGs allocated to the kernel are "static" in the sense that they can't
48 * be stolen from it. PMEGs allocated to a particular segment of a 48 * be stolen from it. PMEGs allocated to a particular segment of a
49 * pmap's virtual space will be fought over by the other pmaps. 49 * pmap's virtual space will be fought over by the other pmaps.
50 */ 50 */
51 51
52/* 52/*
53 * Cache management: 53 * Cache management:
54 * All sun3 cache implementations are write-back. 54 * All sun3 cache implementations are write-back.
55 * Flushes must be done before removing translations 55 * Flushes must be done before removing translations
56 * from the MMU because the cache uses the MMU. 56 * from the MMU because the cache uses the MMU.
57 */ 57 */
58 58
59/* 59/*
60 * wanted attributes: 60 * wanted attributes:
61 * pmegs that aren't needed by a pmap remain in the MMU. 61 * pmegs that aren't needed by a pmap remain in the MMU.
62 * quick context switches between pmaps 62 * quick context switches between pmaps
63 * kernel is in all contexts 63 * kernel is in all contexts
64 */ 64 */
65 65
66/* 66/*
67 * Project1: Use a "null" context for processes that have not 67 * Project1: Use a "null" context for processes that have not
68 * touched any user-space address recently. This is efficient 68 * touched any user-space address recently. This is efficient
69 * for things that stay in the kernel for a while, waking up 69 * for things that stay in the kernel for a while, waking up
70 * to handle some I/O then going back to sleep (i.e. nfsd). 70 * to handle some I/O then going back to sleep (i.e. nfsd).
71 * If and when such a process returns to user-mode, it will 71 * If and when such a process returns to user-mode, it will
72 * fault and be given a real context at that time. 72 * fault and be given a real context at that time.
73 * 73 *
74 * This also lets context switch be fast, because all we need 74 * This also lets context switch be fast, because all we need
75 * to do there for the MMU is slam the context register. 75 * to do there for the MMU is slam the context register.
76 * 76 *
77 * Project2: Use a private pool of PV elements. This pool can be 77 * Project2: Use a private pool of PV elements. This pool can be
78 * fixed size because the total mapped virtual space supported by 78 * fixed size because the total mapped virtual space supported by
79 * the MMU H/W (and this pmap) is fixed for all time. 79 * the MMU H/W (and this pmap) is fixed for all time.
80 */ 80 */
81 81
82#include <sys/cdefs.h> 82#include <sys/cdefs.h>
83__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.166 2011/06/03 17:03:53 tsutsui Exp $"); 83__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.167 2012/01/29 16:24:51 para Exp $");
84 84
85#include "opt_ddb.h" 85#include "opt_ddb.h"
86#include "opt_pmap_debug.h" 86#include "opt_pmap_debug.h"
87 87
88#include <sys/param.h> 88#include <sys/param.h>
89#include <sys/systm.h> 89#include <sys/systm.h>
90#include <sys/proc.h> 90#include <sys/proc.h>
91#include <sys/malloc.h> 91#include <sys/kmem.h>
92#include <sys/pool.h> 92#include <sys/pool.h>
93#include <sys/queue.h> 93#include <sys/queue.h>
94#include <sys/kcore.h> 94#include <sys/kcore.h>
95#include <sys/atomic.h> 95#include <sys/atomic.h>
96 96
97#include <uvm/uvm.h> 97#include <uvm/uvm.h>
98 98
99#include <machine/cpu.h> 99#include <machine/cpu.h>
100#include <machine/dvma.h> 100#include <machine/dvma.h>
101#include <machine/idprom.h> 101#include <machine/idprom.h>
102#include <machine/kcore.h> 102#include <machine/kcore.h>
103#include <machine/mon.h> 103#include <machine/mon.h>
104#include <machine/pmap.h> 104#include <machine/pmap.h>
105#include <machine/pte.h> 105#include <machine/pte.h>
106#include <machine/vmparam.h> 106#include <machine/vmparam.h>
107#include <m68k/cacheops.h> 107#include <m68k/cacheops.h>
108 108
109#include <sun3/sun3/cache.h> 109#include <sun3/sun3/cache.h>
110#include <sun3/sun3/control.h> 110#include <sun3/sun3/control.h>
111#include <sun3/sun3/fc.h> 111#include <sun3/sun3/fc.h>
112#include <sun3/sun3/machdep.h> 112#include <sun3/sun3/machdep.h>
113#include <sun3/sun3/obmem.h> 113#include <sun3/sun3/obmem.h>
114 114
115#ifdef DDB 115#ifdef DDB
116#include <ddb/db_output.h> 116#include <ddb/db_output.h>
117#else 117#else
118#define db_printf printf 118#define db_printf printf
119#endif 119#endif
120 120
121/* Verify this correspondence between definitions. */ 121/* Verify this correspondence between definitions. */
122#if (PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO 122#if (PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
123#error "PMAP_XXX definitions don't match pte.h!" 123#error "PMAP_XXX definitions don't match pte.h!"
124#endif 124#endif
125 125
126/* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */ 126/* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */
127#define PMAP_TYPE PMAP_VME32 127#define PMAP_TYPE PMAP_VME32
128 128
129/* 129/*
130 * Local convenience macros 130 * Local convenience macros
131 */ 131 */
132 132
133#define DVMA_MAP_END (DVMA_MAP_BASE + DVMA_MAP_AVAIL) 133#define DVMA_MAP_END (DVMA_MAP_BASE + DVMA_MAP_AVAIL)
134 134
135/* User segments from 0 to KERNBASE */ 135/* User segments from 0 to KERNBASE */
136#define NUSEG (KERNBASE3 / NBSG) 136#define NUSEG (KERNBASE3 / NBSG)
137/* The remainder are kernel segments. */ 137/* The remainder are kernel segments. */
138#define NKSEG (NSEGMAP - NUSEG) 138#define NKSEG (NSEGMAP - NUSEG)
139 139
140#define VA_SEGNUM(x) ((u_int)(x) >> SEGSHIFT) 140#define VA_SEGNUM(x) ((u_int)(x) >> SEGSHIFT)
141 141
142/* 142/*
143 * Only "main memory" pages are registered in the pv_lists. 143 * Only "main memory" pages are registered in the pv_lists.
144 * This macro is used to determine if a given pte refers to 144 * This macro is used to determine if a given pte refers to
145 * "main memory" or not. One slight hack here deserves more 145 * "main memory" or not. One slight hack here deserves more
146 * explanation: The Sun frame buffers all appear as PG_OBMEM 146 * explanation: The Sun frame buffers all appear as PG_OBMEM
147 * devices but way up near the end of the address space. 147 * devices but way up near the end of the address space.
148 * We do not want to consider these as "main memory" so the 148 * We do not want to consider these as "main memory" so the
149 * macro below treats the high bits of the PFN as type bits. 149 * macro below treats the high bits of the PFN as type bits.
150 * 150 *
151 * Note that on the 3/60 only 16 bits of PFN are stored in the 151 * Note that on the 3/60 only 16 bits of PFN are stored in the
152 * MMU and the top 3 bits read back as zero. This means a 152 * MMU and the top 3 bits read back as zero. This means a
153 * translation entered into the mmu for physical address 153 * translation entered into the mmu for physical address
154 * 0xFF000000 will look like 0x1F000000 after one reads back 154 * 0xFF000000 will look like 0x1F000000 after one reads back
155 * the pte and converts the PFN to a physical address. 155 * the pte and converts the PFN to a physical address.
156 */ 156 */
157#define MEM_BITS (PG_TYPE | PA_PGNUM(0xF8000000)) 157#define MEM_BITS (PG_TYPE | PA_PGNUM(0xF8000000))
158#define IS_MAIN_MEM(pte) (((pte) & MEM_BITS) == 0) 158#define IS_MAIN_MEM(pte) (((pte) & MEM_BITS) == 0)
159 159
160/* Does this (pseudo) PA represent device space? */ 160/* Does this (pseudo) PA represent device space? */
161#define PA_DEV_MASK (0xF8000000 | PMAP_TYPE) 161#define PA_DEV_MASK (0xF8000000 | PMAP_TYPE)
162#define PA_IS_DEV(pa) ((pa) & PA_DEV_MASK) 162#define PA_IS_DEV(pa) ((pa) & PA_DEV_MASK)
163 163
164/* 164/*
165 * Is there a Virtually Addressed Cache (VAC) alias problem 165 * Is there a Virtually Addressed Cache (VAC) alias problem
166 * if one page is mapped at both a1 and a2? 166 * if one page is mapped at both a1 and a2?
167 */ 167 */
168#define BADALIAS(a1, a2) (((int)(a1) ^ (int)(a2)) & SEGOFSET) 168#define BADALIAS(a1, a2) (((int)(a1) ^ (int)(a2)) & SEGOFSET)
169 169
170 170
171/* 171/*
172 * Debugging support. 172 * Debugging support.
173 */ 173 */
174#define PMD_ENTER 1 174#define PMD_ENTER 1
175#define PMD_LINK 2 175#define PMD_LINK 2
176#define PMD_PROTECT 4 176#define PMD_PROTECT 4
177#define PMD_SWITCH 8 177#define PMD_SWITCH 8
178#define PMD_COW 0x10 178#define PMD_COW 0x10
179#define PMD_MODBIT 0x20 179#define PMD_MODBIT 0x20
180#define PMD_REFBIT 0x40 180#define PMD_REFBIT 0x40
181#define PMD_WIRING 0x80 181#define PMD_WIRING 0x80
182#define PMD_CONTEXT 0x100 182#define PMD_CONTEXT 0x100
183#define PMD_CREATE 0x200 183#define PMD_CREATE 0x200
184#define PMD_SEGMAP 0x400 184#define PMD_SEGMAP 0x400
185#define PMD_SETPTE 0x800 185#define PMD_SETPTE 0x800
186#define PMD_FAULT 0x1000 186#define PMD_FAULT 0x1000
187#define PMD_KMAP 0x2000 187#define PMD_KMAP 0x2000
188 188
189#define PMD_REMOVE PMD_ENTER 189#define PMD_REMOVE PMD_ENTER
190#define PMD_UNLINK PMD_LINK 190#define PMD_UNLINK PMD_LINK
191 191
192#ifdef PMAP_DEBUG 192#ifdef PMAP_DEBUG
193int pmap_debug = 0; 193int pmap_debug = 0;
194int pmap_db_watchva = -1; 194int pmap_db_watchva = -1;
195int pmap_db_watchpmeg = -1; 195int pmap_db_watchpmeg = -1;
196#endif /* PMAP_DEBUG */ 196#endif /* PMAP_DEBUG */
197 197
198/* 198/*
199 * Miscellaneous variables. 199 * Miscellaneous variables.
200 * 200 *
201 * For simplicity, this interface retains the variables 201 * For simplicity, this interface retains the variables
202 * that were used in the old interface (without NONCONTIG). 202 * that were used in the old interface (without NONCONTIG).
203 * These are set in pmap_bootstrap() and used in 203 * These are set in pmap_bootstrap() and used in
204 * pmap_next_page(). 204 * pmap_next_page().
205 */ 205 */
206vaddr_t virtual_avail, virtual_end; 206vaddr_t virtual_avail, virtual_end;
207paddr_t avail_start, avail_end; 207paddr_t avail_start, avail_end;
208#define managed(pa) (((pa) >= avail_start) && ((pa) < avail_end)) 208#define managed(pa) (((pa) >= avail_start) && ((pa) < avail_end))
209 209
210/* used to skip the Sun3/50 video RAM */ 210/* used to skip the Sun3/50 video RAM */
211static vaddr_t hole_start, hole_size; 211static vaddr_t hole_start, hole_size;
212 212
213/* This is for pmap_next_page() */ 213/* This is for pmap_next_page() */
214static paddr_t avail_next; 214static paddr_t avail_next;
215 215
216/* This is where we map a PMEG without a context. */ 216/* This is where we map a PMEG without a context. */
217static vaddr_t temp_seg_va; 217static vaddr_t temp_seg_va;
218 218
219/* 219/*
220 * Location to store virtual addresses 220 * Location to store virtual addresses
221 * to be used in copy/zero operations. 221 * to be used in copy/zero operations.
222 */ 222 */
223vaddr_t tmp_vpages[2] = { 223vaddr_t tmp_vpages[2] = {
224 SUN3_MONSHORTSEG, 224 SUN3_MONSHORTSEG,
225 SUN3_MONSHORTSEG + PAGE_SIZE }; 225 SUN3_MONSHORTSEG + PAGE_SIZE };
226int tmp_vpages_inuse; 226int tmp_vpages_inuse;
227 227
228static int pmap_version = 1; 228static int pmap_version = 1;
229static struct pmap kernel_pmap_store; 229static struct pmap kernel_pmap_store;
230struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; 230struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
231#define kernel_pmap (kernel_pmap_ptr) 231#define kernel_pmap (kernel_pmap_ptr)
232static u_char kernel_segmap[NSEGMAP]; 232static u_char kernel_segmap[NSEGMAP];
233 233
234/* memory pool for pmap structures */ 234/* memory pool for pmap structures */
235struct pool pmap_pmap_pool; 235struct pool pmap_pmap_pool;
236 236
237/* statistics... */ 237/* statistics... */
238struct pmap_stats { 238struct pmap_stats {
239 int ps_enter_firstpv; /* pv heads entered */ 239 int ps_enter_firstpv; /* pv heads entered */
240 int ps_enter_secondpv; /* pv nonheads entered */ 240 int ps_enter_secondpv; /* pv nonheads entered */
241 int ps_unlink_pvfirst; /* of pv_unlinks on head */ 241 int ps_unlink_pvfirst; /* of pv_unlinks on head */
242 int ps_unlink_pvsearch; /* of pv_unlink searches */ 242 int ps_unlink_pvsearch; /* of pv_unlink searches */
243 int ps_pmeg_faultin; /* pmegs reloaded */ 243 int ps_pmeg_faultin; /* pmegs reloaded */
244 int ps_changeprots; /* of calls to changeprot */ 244 int ps_changeprots; /* of calls to changeprot */
245 int ps_changewire; /* useless wiring changes */ 245 int ps_changewire; /* useless wiring changes */
246 int ps_npg_prot_all; /* of active pages protected */ 246 int ps_npg_prot_all; /* of active pages protected */
247 int ps_npg_prot_actual; /* pages actually affected */ 247 int ps_npg_prot_actual; /* pages actually affected */
248 int ps_vac_uncached; /* non-cached due to bad alias */ 248 int ps_vac_uncached; /* non-cached due to bad alias */
249 int ps_vac_recached; /* re-cached when bad alias gone */ 249 int ps_vac_recached; /* re-cached when bad alias gone */
250} pmap_stats; 250} pmap_stats;
251 251
252#ifdef PMAP_DEBUG 252#ifdef PMAP_DEBUG
253#define CHECK_SPL() do { \ 253#define CHECK_SPL() do { \
254 if ((getsr() & PSL_IPL) < PSL_IPL4) \ 254 if ((getsr() & PSL_IPL) < PSL_IPL4) \
255 panic("pmap: bad spl, line %d", __LINE__); \ 255 panic("pmap: bad spl, line %d", __LINE__); \
256} while (0) 256} while (0)
257#else /* PMAP_DEBUG */ 257#else /* PMAP_DEBUG */
258#define CHECK_SPL() (void)0 258#define CHECK_SPL() (void)0
259#endif /* PMAP_DEBUG */ 259#endif /* PMAP_DEBUG */
260 260
261 261
262/* 262/*
263 * PV support. 263 * PV support.
264 * (i.e. Find all virtual mappings of a physical page.) 264 * (i.e. Find all virtual mappings of a physical page.)
265 */ 265 */
266 266
267int pv_initialized = 0; 267int pv_initialized = 0;
268 268
269/* One of these for each mapped virtual page. */ 269/* One of these for each mapped virtual page. */
270struct pv_entry { 270struct pv_entry {
271 struct pv_entry *pv_next; 271 struct pv_entry *pv_next;
272 pmap_t pv_pmap; 272 pmap_t pv_pmap;
273 vaddr_t pv_va; 273 vaddr_t pv_va;
274}; 274};
275typedef struct pv_entry *pv_entry_t; 275typedef struct pv_entry *pv_entry_t;
276 276
277/* Table of PV list heads (per physical page). */ 277/* Table of PV list heads (per physical page). */
278static struct pv_entry **pv_head_tbl; 278static struct pv_entry **pv_head_tbl;
279 279
280/* Free list of PV entries. */ 280/* Free list of PV entries. */
281static struct pv_entry *pv_free_list; 281static struct pv_entry *pv_free_list;
282 282
283/* Table of flags (per physical page). */ 283/* Table of flags (per physical page). */
284static u_char *pv_flags_tbl; 284static u_char *pv_flags_tbl;
285 285
286/* These are as in the MMU but shifted by PV_SHIFT. */ 286/* These are as in the MMU but shifted by PV_SHIFT. */
287#define PV_SHIFT 24 287#define PV_SHIFT 24
288#define PV_VALID 0x80 288#define PV_VALID 0x80
289#define PV_WRITE 0x40 289#define PV_WRITE 0x40
290#define PV_SYSTEM 0x20 290#define PV_SYSTEM 0x20
291#define PV_NC 0x10 291#define PV_NC 0x10
292#define PV_PERM 0xF0 292#define PV_PERM 0xF0
293#define PV_TYPE 0x0C 293#define PV_TYPE 0x0C
294#define PV_REF 0x02 294#define PV_REF 0x02
295#define PV_MOD 0x01 295#define PV_MOD 0x01
296 296
297 297
298/* 298/*
299 * context structures, and queues 299 * context structures, and queues
300 */ 300 */
301 301
302struct context_state { 302struct context_state {
303 TAILQ_ENTRY(context_state) context_link; 303 TAILQ_ENTRY(context_state) context_link;
304 int context_num; 304 int context_num;
305 struct pmap *context_upmap; 305 struct pmap *context_upmap;
306}; 306};
307typedef struct context_state *context_t; 307typedef struct context_state *context_t;
308 308
309#define INVALID_CONTEXT -1 /* impossible value */ 309#define INVALID_CONTEXT -1 /* impossible value */
310#define EMPTY_CONTEXT 0 310#define EMPTY_CONTEXT 0
311#define FIRST_CONTEXT 1 311#define FIRST_CONTEXT 1
312#define has_context(pmap) ((pmap)->pm_ctxnum != EMPTY_CONTEXT) 312#define has_context(pmap) ((pmap)->pm_ctxnum != EMPTY_CONTEXT)
313 313
314TAILQ_HEAD(context_tailq, context_state) 314TAILQ_HEAD(context_tailq, context_state)
315 context_free_queue, context_active_queue; 315 context_free_queue, context_active_queue;
316 316
317static struct context_state context_array[NCONTEXT]; 317static struct context_state context_array[NCONTEXT];
318 318
319 319
320/* 320/*
321 * PMEG structures, queues, and macros 321 * PMEG structures, queues, and macros
322 */ 322 */
323#define PMEGQ_FREE 0 323#define PMEGQ_FREE 0
324#define PMEGQ_INACTIVE 1 324#define PMEGQ_INACTIVE 1
325#define PMEGQ_ACTIVE 2 325#define PMEGQ_ACTIVE 2
326#define PMEGQ_KERNEL 3 326#define PMEGQ_KERNEL 3
327#define PMEGQ_NONE 4 327#define PMEGQ_NONE 4
328 328
329struct pmeg_state { 329struct pmeg_state {
330 TAILQ_ENTRY(pmeg_state) pmeg_link; 330 TAILQ_ENTRY(pmeg_state) pmeg_link;
331 int pmeg_index; 331 int pmeg_index;
332 pmap_t pmeg_owner; 332 pmap_t pmeg_owner;
333 int pmeg_version; 333 int pmeg_version;
334 vaddr_t pmeg_va; 334 vaddr_t pmeg_va;
335 int pmeg_wired; 335 int pmeg_wired;
336 int pmeg_reserved; 336 int pmeg_reserved;
337 int pmeg_vpages; 337 int pmeg_vpages;
338 int pmeg_qstate; 338 int pmeg_qstate;
339}; 339};
340 340
341typedef struct pmeg_state *pmeg_t; 341typedef struct pmeg_state *pmeg_t;
342 342
343#define PMEG_INVAL (NPMEG-1) 343#define PMEG_INVAL (NPMEG-1)
344#define PMEG_NULL (pmeg_t) NULL 344#define PMEG_NULL (pmeg_t) NULL
345 345
346/* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */ 346/* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
347TAILQ_HEAD(pmeg_tailq, pmeg_state) 347TAILQ_HEAD(pmeg_tailq, pmeg_state)
348 pmeg_free_queue, pmeg_inactive_queue, 348 pmeg_free_queue, pmeg_inactive_queue,
349 pmeg_active_queue, pmeg_kernel_queue; 349 pmeg_active_queue, pmeg_kernel_queue;
350 350
351static struct pmeg_state pmeg_array[NPMEG]; 351static struct pmeg_state pmeg_array[NPMEG];
352 352
353 353
354/* 354/*
355 * prototypes 355 * prototypes
356 */ 356 */
357static int get_pte_pmeg(int, int); 357static int get_pte_pmeg(int, int);
358static void set_pte_pmeg(int, int, int); 358static void set_pte_pmeg(int, int, int);
359 359
360static void context_allocate(pmap_t); 360static void context_allocate(pmap_t);
361static void context_free(pmap_t); 361static void context_free(pmap_t);
362static void context_init(void); 362static void context_init(void);
363 363
364static void pmeg_init(void); 364static void pmeg_init(void);
365static void pmeg_reserve(int); 365static void pmeg_reserve(int);
366 366
367static pmeg_t pmeg_allocate(pmap_t, vaddr_t); 367static pmeg_t pmeg_allocate(pmap_t, vaddr_t);
368static void pmeg_mon_init(vaddr_t, vaddr_t, int); 368static void pmeg_mon_init(vaddr_t, vaddr_t, int);
369static void pmeg_release(pmeg_t); 369static void pmeg_release(pmeg_t);
370static void pmeg_free(pmeg_t); 370static void pmeg_free(pmeg_t);
371static pmeg_t pmeg_cache(pmap_t, vaddr_t); 371static pmeg_t pmeg_cache(pmap_t, vaddr_t);
372static void pmeg_set_wiring(pmeg_t, vaddr_t, int); 372static void pmeg_set_wiring(pmeg_t, vaddr_t, int);
373 373
374static int pv_link (pmap_t, int, vaddr_t); 374static int pv_link (pmap_t, int, vaddr_t);
375static void pv_unlink(pmap_t, int, vaddr_t); 375static void pv_unlink(pmap_t, int, vaddr_t);
376static void pv_remove_all(paddr_t); 376static void pv_remove_all(paddr_t);
377static void pv_changepte(paddr_t, int, int); 377static void pv_changepte(paddr_t, int, int);
378static u_int pv_syncflags(pv_entry_t); 378static u_int pv_syncflags(pv_entry_t);
379static void pv_init(void); 379static void pv_init(void);
380 380
381static void pmeg_clean(pmeg_t); 381static void pmeg_clean(pmeg_t);
382static void pmeg_clean_free(void); 382static void pmeg_clean_free(void);
383 383
384static void pmap_common_init(pmap_t); 384static void pmap_common_init(pmap_t);
385static void pmap_kernel_init(pmap_t); 385static void pmap_kernel_init(pmap_t);
386static void pmap_user_init(pmap_t); 386static void pmap_user_init(pmap_t);
387static void pmap_page_upload(void); 387static void pmap_page_upload(void);
388 388
389static void pmap_enter_kernel(vaddr_t, int, bool); 389static void pmap_enter_kernel(vaddr_t, int, bool);
390static void pmap_enter_user(pmap_t, vaddr_t, int, bool); 390static void pmap_enter_user(pmap_t, vaddr_t, int, bool);
391 391
392static void pmap_protect1(pmap_t, vaddr_t, vaddr_t); 392static void pmap_protect1(pmap_t, vaddr_t, vaddr_t);
393static void pmap_protect_mmu(pmap_t, vaddr_t, vaddr_t); 393static void pmap_protect_mmu(pmap_t, vaddr_t, vaddr_t);
394static void pmap_protect_noctx(pmap_t, vaddr_t, vaddr_t); 394static void pmap_protect_noctx(pmap_t, vaddr_t, vaddr_t);
395 395
396static void pmap_remove1(pmap_t, vaddr_t, vaddr_t); 396static void pmap_remove1(pmap_t, vaddr_t, vaddr_t);
397static void pmap_remove_mmu(pmap_t, vaddr_t, vaddr_t); 397static void pmap_remove_mmu(pmap_t, vaddr_t, vaddr_t);
398static void pmap_remove_noctx(pmap_t, vaddr_t, vaddr_t); 398static void pmap_remove_noctx(pmap_t, vaddr_t, vaddr_t);
399 399
400static int pmap_fault_reload(struct pmap *, vaddr_t, int); 400static int pmap_fault_reload(struct pmap *, vaddr_t, int);
401 401
402/* Called only from locore.s and pmap.c */ 402/* Called only from locore.s and pmap.c */
403void _pmap_switch(pmap_t); 403void _pmap_switch(pmap_t);
404 404
405#ifdef PMAP_DEBUG 405#ifdef PMAP_DEBUG
406void pmap_print(pmap_t); 406void pmap_print(pmap_t);
407void pv_print(paddr_t); 407void pv_print(paddr_t);
408void pmeg_print(pmeg_t); 408void pmeg_print(pmeg_t);
409static void pmeg_verify_empty(vaddr_t); 409static void pmeg_verify_empty(vaddr_t);
410#endif /* PMAP_DEBUG */ 410#endif /* PMAP_DEBUG */
411void pmap_pinit(pmap_t); 411void pmap_pinit(pmap_t);
412void pmap_release(pmap_t); 412void pmap_release(pmap_t);
413 413
414/* 414/*
415 * Various in-line helper functions. 415 * Various in-line helper functions.
416 */ 416 */
417 417
418static inline pmap_t 418static inline pmap_t
419current_pmap(void) 419current_pmap(void)
420{ 420{
421 struct vmspace *vm; 421 struct vmspace *vm;
422 struct vm_map *map; 422 struct vm_map *map;
423 pmap_t pmap; 423 pmap_t pmap;
424 424
425 vm = curproc->p_vmspace; 425 vm = curproc->p_vmspace;
426 map = &vm->vm_map; 426 map = &vm->vm_map;
427 pmap = vm_map_pmap(map); 427 pmap = vm_map_pmap(map);
428 428
429 return (pmap); 429 return (pmap);
430} 430}
431 431
432static inline struct pv_entry ** 432static inline struct pv_entry **
433pa_to_pvhead(paddr_t pa) 433pa_to_pvhead(paddr_t pa)
434{ 434{
435 int idx; 435 int idx;
436 436
437 idx = PA_PGNUM(pa); 437 idx = PA_PGNUM(pa);
438#ifdef DIAGNOSTIC 438#ifdef DIAGNOSTIC
439 if (PA_IS_DEV(pa) || (idx >= physmem)) 439 if (PA_IS_DEV(pa) || (idx >= physmem))
440 panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa); 440 panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
441#endif 441#endif
442 return (&pv_head_tbl[idx]); 442 return (&pv_head_tbl[idx]);
443} 443}
444 444
445static inline u_char * 445static inline u_char *
446pa_to_pvflags(paddr_t pa) 446pa_to_pvflags(paddr_t pa)
447{ 447{
448 int idx; 448 int idx;
449 449
450 idx = PA_PGNUM(pa); 450 idx = PA_PGNUM(pa);
451#ifdef DIAGNOSTIC 451#ifdef DIAGNOSTIC
452 if (PA_IS_DEV(pa) || (idx >= physmem)) 452 if (PA_IS_DEV(pa) || (idx >= physmem))
453 panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa); 453 panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa);
454#endif 454#endif
455 return (&pv_flags_tbl[idx]); 455 return (&pv_flags_tbl[idx]);
456} 456}
457 457
458/* 458/*
459 * Save the MOD bit from the given PTE using its PA 459 * Save the MOD bit from the given PTE using its PA
460 */ 460 */
461static inline void 461static inline void
462save_modref_bits(int pte) 462save_modref_bits(int pte)
463{ 463{
464 u_char *pv_flags; 464 u_char *pv_flags;
465 465
466 pv_flags = pa_to_pvflags(PG_PA(pte)); 466 pv_flags = pa_to_pvflags(PG_PA(pte));
467 *pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT); 467 *pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
468} 468}
469 469
470static inline pmeg_t 470static inline pmeg_t
471pmeg_p(int sme) 471pmeg_p(int sme)
472{ 472{
473#ifdef DIAGNOSTIC 473#ifdef DIAGNOSTIC
474 if (sme < 0 || sme >= SEGINV) 474 if (sme < 0 || sme >= SEGINV)
475 panic("pmeg_p: bad sme"); 475 panic("pmeg_p: bad sme");
476#endif 476#endif
477 return &pmeg_array[sme]; 477 return &pmeg_array[sme];
478} 478}
479 479
480#define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0) 480#define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
481 481
482static void  482static void
483pmeg_set_wiring(pmeg_t pmegp, vaddr_t va, int flag) 483pmeg_set_wiring(pmeg_t pmegp, vaddr_t va, int flag)
484{ 484{
485 int idx, mask; 485 int idx, mask;
486 486
487 idx = VA_PTE_NUM(va); 487 idx = VA_PTE_NUM(va);
488 mask = 1 << idx; 488 mask = 1 << idx;
489 489
490 if (flag) 490 if (flag)
491 pmegp->pmeg_wired |= mask; 491 pmegp->pmeg_wired |= mask;
492 else 492 else
493 pmegp->pmeg_wired &= ~mask; 493 pmegp->pmeg_wired &= ~mask;
494} 494}
495 495
496/**************************************************************** 496/****************************************************************
497 * Context management functions. 497 * Context management functions.
498 */ 498 */
499 499
500/* part of pmap_bootstrap */ 500/* part of pmap_bootstrap */
501static void  501static void
502context_init(void) 502context_init(void)
503{ 503{
504 int i; 504 int i;
505 505
506 TAILQ_INIT(&context_free_queue); 506 TAILQ_INIT(&context_free_queue);
507 TAILQ_INIT(&context_active_queue); 507 TAILQ_INIT(&context_active_queue);
508 508
509 /* Leave EMPTY_CONTEXT out of the free list. */ 509 /* Leave EMPTY_CONTEXT out of the free list. */
510 context_array[0].context_upmap = kernel_pmap; 510 context_array[0].context_upmap = kernel_pmap;
511 511
512 for (i = 1; i < NCONTEXT; i++) { 512 for (i = 1; i < NCONTEXT; i++) {
513 context_array[i].context_num = i; 513 context_array[i].context_num = i;
514 context_array[i].context_upmap = NULL; 514 context_array[i].context_upmap = NULL;
515 TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i], 515 TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
516 context_link); 516 context_link);
517#ifdef PMAP_DEBUG 517#ifdef PMAP_DEBUG
518 if (pmap_debug & PMD_CONTEXT) 518 if (pmap_debug & PMD_CONTEXT)
519 printf("context_init: sizeof(context_array[0])=%d\n", 519 printf("context_init: sizeof(context_array[0])=%d\n",
520 sizeof(context_array[0])); 520 sizeof(context_array[0]));
521#endif 521#endif
522 } 522 }
523} 523}
524 524
525/* Get us a context (steal one if necessary). */ 525/* Get us a context (steal one if necessary). */
526static void  526static void
527context_allocate(pmap_t pmap) 527context_allocate(pmap_t pmap)
528{ 528{
529 context_t context; 529 context_t context;
530 530
531 CHECK_SPL(); 531 CHECK_SPL();
532 532
533#ifdef DIAGNOSTIC 533#ifdef DIAGNOSTIC
534 if (pmap == kernel_pmap) 534 if (pmap == kernel_pmap)
535 panic("context_allocate: kernel_pmap"); 535 panic("context_allocate: kernel_pmap");
536 if (has_context(pmap)) 536 if (has_context(pmap))
537 panic("pmap: pmap already has context allocated to it"); 537 panic("pmap: pmap already has context allocated to it");
538#endif 538#endif
539 539
540 context = TAILQ_FIRST(&context_free_queue); 540 context = TAILQ_FIRST(&context_free_queue);
541 if (context == NULL) { 541 if (context == NULL) {
542 /* Steal the head of the active queue. */ 542 /* Steal the head of the active queue. */
543 context = TAILQ_FIRST(&context_active_queue); 543 context = TAILQ_FIRST(&context_active_queue);
544 if (context == NULL) 544 if (context == NULL)
545 panic("pmap: no contexts left?"); 545 panic("pmap: no contexts left?");
546#ifdef PMAP_DEBUG 546#ifdef PMAP_DEBUG
547 if (pmap_debug & PMD_CONTEXT) 547 if (pmap_debug & PMD_CONTEXT)
548 printf("context_allocate: steal ctx %d from pmap %p\n", 548 printf("context_allocate: steal ctx %d from pmap %p\n",
549 context->context_num, context->context_upmap); 549 context->context_num, context->context_upmap);
550#endif 550#endif
551 context_free(context->context_upmap); 551 context_free(context->context_upmap);
552 context = TAILQ_FIRST(&context_free_queue); 552 context = TAILQ_FIRST(&context_free_queue);
553 } 553 }
554 TAILQ_REMOVE(&context_free_queue, context, context_link); 554 TAILQ_REMOVE(&context_free_queue, context, context_link);
555 555
556#ifdef DIAGNOSTIC 556#ifdef DIAGNOSTIC
557 if (context->context_upmap != NULL) 557 if (context->context_upmap != NULL)
558 panic("pmap: context in use???"); 558 panic("pmap: context in use???");
559#endif 559#endif
560 560
561 context->context_upmap = pmap; 561 context->context_upmap = pmap;
562 pmap->pm_ctxnum = context->context_num; 562 pmap->pm_ctxnum = context->context_num;
563 563
564 TAILQ_INSERT_TAIL(&context_active_queue, context, context_link); 564 TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
565 565
566 /* 566 /*
567 * We could reload the MMU here, but that would 567 * We could reload the MMU here, but that would
568 * artificially move PMEGs from the inactive queue 568 * artificially move PMEGs from the inactive queue
569 * to the active queue, so do lazy reloading. 569 * to the active queue, so do lazy reloading.
570 * XXX - Need to reload wired pmegs though... 570 * XXX - Need to reload wired pmegs though...
571 * XXX: Verify the context it is empty? 571 * XXX: Verify the context it is empty?
572 */ 572 */
573} 573}
574 574
575/* 575/*
576 * Unload the context and put it on the free queue. 576 * Unload the context and put it on the free queue.
577 */ 577 */
578static void  578static void
579context_free(pmap_t pmap) 579context_free(pmap_t pmap)
580{ 580{
581 int saved_ctxnum, ctxnum; 581 int saved_ctxnum, ctxnum;
582 int i, sme; 582 int i, sme;
583 context_t contextp; 583 context_t contextp;
584 vaddr_t va; 584 vaddr_t va;
585 585
586 CHECK_SPL(); 586 CHECK_SPL();
587 587
588 ctxnum = pmap->pm_ctxnum; 588 ctxnum = pmap->pm_ctxnum;
589 if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT) 589 if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT)
590 panic("pmap: context_free ctxnum"); 590 panic("pmap: context_free ctxnum");
591 contextp = &context_array[ctxnum]; 591 contextp = &context_array[ctxnum];
592 592
593 /* Temporary context change. */ 593 /* Temporary context change. */
594 saved_ctxnum = get_context(); 594 saved_ctxnum = get_context();
595 set_context(ctxnum); 595 set_context(ctxnum);
596 596
597 /* Before unloading translations, flush cache. */ 597 /* Before unloading translations, flush cache. */
598#ifdef HAVECACHE 598#ifdef HAVECACHE
599 if (cache_size) 599 if (cache_size)
600 cache_flush_context(); 600 cache_flush_context();
601#endif 601#endif
602 602
603 /* Unload MMU (but keep in SW segmap). */ 603 /* Unload MMU (but keep in SW segmap). */
604 for (i = 0, va = 0; i < NUSEG; i++, va += NBSG) { 604 for (i = 0, va = 0; i < NUSEG; i++, va += NBSG) {
605 605
606#if !defined(PMAP_DEBUG) 606#if !defined(PMAP_DEBUG)
607 /* Short-cut using the S/W segmap (if !debug). */ 607 /* Short-cut using the S/W segmap (if !debug). */
608 if (pmap->pm_segmap[i] == SEGINV) 608 if (pmap->pm_segmap[i] == SEGINV)
609 continue; 609 continue;
610#endif 610#endif
611 611
612 /* Check the H/W segmap. */ 612 /* Check the H/W segmap. */
613 sme = get_segmap(va); 613 sme = get_segmap(va);
614 if (sme == SEGINV) 614 if (sme == SEGINV)
615 continue; 615 continue;
616 616
617 /* Found valid PMEG in the segmap. */ 617 /* Found valid PMEG in the segmap. */
618#ifdef PMAP_DEBUG 618#ifdef PMAP_DEBUG
619 if (pmap_debug & PMD_SEGMAP) 619 if (pmap_debug & PMD_SEGMAP)
620 printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x " 620 printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x "
621 "new=ff (cf)\n", ctxnum, va, sme); 621 "new=ff (cf)\n", ctxnum, va, sme);
622#endif 622#endif
623#ifdef DIAGNOSTIC 623#ifdef DIAGNOSTIC
624 if (sme != pmap->pm_segmap[i]) 624 if (sme != pmap->pm_segmap[i])
625 panic("context_free: unknown sme at va=0x%lx", va); 625 panic("context_free: unknown sme at va=0x%lx", va);
626#endif 626#endif
627 /* Did cache flush above (whole context). */ 627 /* Did cache flush above (whole context). */
628 set_segmap(va, SEGINV); 628 set_segmap(va, SEGINV);
629 /* In this case, do not clear pm_segmap. */ 629 /* In this case, do not clear pm_segmap. */
630 /* XXX: Maybe inline this call? */ 630 /* XXX: Maybe inline this call? */
631 pmeg_release(pmeg_p(sme)); 631 pmeg_release(pmeg_p(sme));
632 } 632 }
633 633
634 /* Restore previous context. */ 634 /* Restore previous context. */
635 set_context(saved_ctxnum); 635 set_context(saved_ctxnum);
636 636
637 /* Dequeue, update, requeue. */ 637 /* Dequeue, update, requeue. */
638 TAILQ_REMOVE(&context_active_queue, contextp, context_link); 638 TAILQ_REMOVE(&context_active_queue, contextp, context_link);
639 pmap->pm_ctxnum = EMPTY_CONTEXT; 639 pmap->pm_ctxnum = EMPTY_CONTEXT;
640 contextp->context_upmap = NULL; 640 contextp->context_upmap = NULL;
641 TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link); 641 TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link);
642} 642}
643 643
644 644
645/**************************************************************** 645/****************************************************************
646 * PMEG management functions. 646 * PMEG management functions.
647 */ 647 */
648 648
649static void  649static void
650pmeg_init(void) 650pmeg_init(void)
651{ 651{
652 int x; 652 int x;
653 653
654 /* clear pmeg array, put it all on the free pmeq queue */ 654 /* clear pmeg array, put it all on the free pmeq queue */
655 655
656 TAILQ_INIT(&pmeg_free_queue); 656 TAILQ_INIT(&pmeg_free_queue);
657 TAILQ_INIT(&pmeg_inactive_queue); 657 TAILQ_INIT(&pmeg_inactive_queue);
658 TAILQ_INIT(&pmeg_active_queue); 658 TAILQ_INIT(&pmeg_active_queue);
659 TAILQ_INIT(&pmeg_kernel_queue); 659 TAILQ_INIT(&pmeg_kernel_queue);
660 660
661 memset(pmeg_array, 0, NPMEG*sizeof(struct pmeg_state)); 661 memset(pmeg_array, 0, NPMEG*sizeof(struct pmeg_state));
662 for (x = 0; x < NPMEG; x++) { 662 for (x = 0; x < NPMEG; x++) {
663 TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x], pmeg_link); 663 TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x], pmeg_link);
664 pmeg_array[x].pmeg_qstate = PMEGQ_FREE; 664 pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
665 pmeg_array[x].pmeg_index = x; 665 pmeg_array[x].pmeg_index = x;
666 } 666 }
667 667
668 /* The last pmeg is not usable. */ 668 /* The last pmeg is not usable. */
669 pmeg_reserve(SEGINV); 669 pmeg_reserve(SEGINV);
670} 670}
671 671
672/* 672/*
673 * Reserve a pmeg (forever) for use by PROM, etc. 673 * Reserve a pmeg (forever) for use by PROM, etc.
674 * Contents are left as-is. Called very early... 674 * Contents are left as-is. Called very early...
675 */ 675 */
676void  676void
677pmeg_reserve(int sme) 677pmeg_reserve(int sme)
678{ 678{
679 pmeg_t pmegp; 679 pmeg_t pmegp;
680 680
681 /* Can not use pmeg_p() because it fails on SEGINV. */ 681 /* Can not use pmeg_p() because it fails on SEGINV. */
682 pmegp = &pmeg_array[sme]; 682 pmegp = &pmeg_array[sme];
683 683
684 if (pmegp->pmeg_reserved) { 684 if (pmegp->pmeg_reserved) {
685 mon_printf("pmeg_reserve: already reserved\n"); 685 mon_printf("pmeg_reserve: already reserved\n");
686 sunmon_abort(); 686 sunmon_abort();
687 } 687 }
688 if (pmegp->pmeg_owner) { 688 if (pmegp->pmeg_owner) {
689 mon_printf("pmeg_reserve: already owned\n"); 689 mon_printf("pmeg_reserve: already owned\n");
690 sunmon_abort(); 690 sunmon_abort();
691 } 691 }
692 692
693 /* Owned by kernel, but not really usable... */ 693 /* Owned by kernel, but not really usable... */
694 pmegp->pmeg_owner = kernel_pmap; 694 pmegp->pmeg_owner = kernel_pmap;
695 pmegp->pmeg_reserved++; /* keep count, just in case */ 695 pmegp->pmeg_reserved++; /* keep count, just in case */
696 TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link); 696 TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
697 pmegp->pmeg_qstate = PMEGQ_NONE; 697 pmegp->pmeg_qstate = PMEGQ_NONE;
698} 698}
699 699
700/* 700/*
701 * Examine PMEGs used by the monitor, and either 701 * Examine PMEGs used by the monitor, and either
702 * reserve them (keep=1) or clear them (keep=0) 702 * reserve them (keep=1) or clear them (keep=0)
703 */ 703 */
704static void  704static void
705pmeg_mon_init(vaddr_t sva, vaddr_t eva, int keep) 705pmeg_mon_init(vaddr_t sva, vaddr_t eva, int keep)
706{ 706{
707 vaddr_t pgva, endseg; 707 vaddr_t pgva, endseg;
708 int pte, valid; 708 int pte, valid;
709 unsigned char sme; 709 unsigned char sme;
710 710
711#ifdef PMAP_DEBUG 711#ifdef PMAP_DEBUG
712 if (pmap_debug & PMD_SEGMAP) 712 if (pmap_debug & PMD_SEGMAP)
713 mon_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n", 713 mon_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n",
714 sva, eva, keep); 714 sva, eva, keep);
715#endif 715#endif
716 716
717 sva &= ~(NBSG - 1); 717 sva &= ~(NBSG - 1);
718 718
719 while (sva < eva) { 719 while (sva < eva) {
720 sme = get_segmap(sva); 720 sme = get_segmap(sva);
721 if (sme != SEGINV) { 721 if (sme != SEGINV) {
722 valid = 0; 722 valid = 0;
723 endseg = sva + NBSG; 723 endseg = sva + NBSG;
724 for (pgva = sva; pgva < endseg; pgva += PAGE_SIZE) { 724 for (pgva = sva; pgva < endseg; pgva += PAGE_SIZE) {
725 pte = get_pte(pgva); 725 pte = get_pte(pgva);
726 if (pte & PG_VALID) { 726 if (pte & PG_VALID) {
727 valid++; 727 valid++;
728 } 728 }
729 } 729 }
730#ifdef PMAP_DEBUG 730#ifdef PMAP_DEBUG
731 if (pmap_debug & PMD_SEGMAP) 731 if (pmap_debug & PMD_SEGMAP)
732 mon_printf(" sva=0x%x seg=0x%x valid=%d\n", 732 mon_printf(" sva=0x%x seg=0x%x valid=%d\n",
733 sva, sme, valid); 733 sva, sme, valid);
734#endif 734#endif
735 if (keep && valid) 735 if (keep && valid)
736 pmeg_reserve(sme); 736 pmeg_reserve(sme);
737 else 737 else
738 set_segmap(sva, SEGINV); 738 set_segmap(sva, SEGINV);
739 } 739 }
740 sva += NBSG; 740 sva += NBSG;
741 } 741 }
742} 742}
743 743
744/* 744/*
745 * This is used only during pmap_bootstrap, so we can 745 * This is used only during pmap_bootstrap, so we can
746 * get away with borrowing a slot in the segmap. 746 * get away with borrowing a slot in the segmap.
747 */ 747 */
748static void  748static void
749pmeg_clean(pmeg_t pmegp) 749pmeg_clean(pmeg_t pmegp)
750{ 750{
751 int sme; 751 int sme;
752 vaddr_t va; 752 vaddr_t va;
753 753
754 sme = get_segmap(0); 754 sme = get_segmap(0);
755 if (sme != SEGINV) 755 if (sme != SEGINV)
756 panic("pmeg_clean"); 756 panic("pmeg_clean");
757 757
758 sme = pmegp->pmeg_index; 758 sme = pmegp->pmeg_index;
759 set_segmap(0, sme); 759 set_segmap(0, sme);
760 760
761 for (va = 0; va < NBSG; va += PAGE_SIZE) 761 for (va = 0; va < NBSG; va += PAGE_SIZE)
762 set_pte(va, PG_INVAL); 762 set_pte(va, PG_INVAL);
763 763
764 set_segmap(0, SEGINV); 764 set_segmap(0, SEGINV);
765} 765}
766 766
767/* 767/*
768 * This routine makes sure that pmegs on the pmeg_free_queue contain 768 * This routine makes sure that pmegs on the pmeg_free_queue contain
769 * no valid ptes. It pulls things off the queue, cleans them, and 769 * no valid ptes. It pulls things off the queue, cleans them, and
770 * puts them at the end. The ending condition is finding the first 770 * puts them at the end. The ending condition is finding the first
771 * queue element at the head of the queue again. 771 * queue element at the head of the queue again.
772 */ 772 */
773static void  773static void
774pmeg_clean_free(void) 774pmeg_clean_free(void)
775{ 775{
776 pmeg_t pmegp, pmegp_first; 776 pmeg_t pmegp, pmegp_first;
777 777
778 pmegp = TAILQ_FIRST(&pmeg_free_queue); 778 pmegp = TAILQ_FIRST(&pmeg_free_queue);
779 if (pmegp == NULL) 779 if (pmegp == NULL)
780 panic("pmap: no free pmegs available to clean"); 780 panic("pmap: no free pmegs available to clean");
781 781
782 pmegp_first = NULL; 782 pmegp_first = NULL;
783 783
784 for (;;) { 784 for (;;) {
785 pmegp = TAILQ_FIRST(&pmeg_free_queue); 785 pmegp = TAILQ_FIRST(&pmeg_free_queue);
786 TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link); 786 TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
787 787
788 pmegp->pmeg_qstate = PMEGQ_NONE; 788 pmegp->pmeg_qstate = PMEGQ_NONE;
789 pmeg_clean(pmegp); 789 pmeg_clean(pmegp);
790 pmegp->pmeg_qstate = PMEGQ_FREE; 790 pmegp->pmeg_qstate = PMEGQ_FREE;
791 791
792 TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link); 792 TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
793 793
794 if (pmegp == pmegp_first) 794 if (pmegp == pmegp_first)
795 break; 795 break;
796 if (pmegp_first == NULL) 796 if (pmegp_first == NULL)
797 pmegp_first = pmegp; 797 pmegp_first = pmegp;
798 } 798 }
799} 799}
800 800
801/* 801/*
802 * Allocate a PMEG by whatever means necessary. 802 * Allocate a PMEG by whatever means necessary.
803 * (May invalidate some mappings!) 803 * (May invalidate some mappings!)
804 */ 804 */
805static pmeg_t  805static pmeg_t
806pmeg_allocate(pmap_t pmap, vaddr_t va) 806pmeg_allocate(pmap_t pmap, vaddr_t va)
807{ 807{
808 pmeg_t pmegp; 808 pmeg_t pmegp;
809 809
810 CHECK_SPL(); 810 CHECK_SPL();
811 811
812#ifdef DIAGNOSTIC 812#ifdef DIAGNOSTIC
813 if (va & SEGOFSET) { 813 if (va & SEGOFSET) {
814 panic("pmap:pmeg_allocate: va=0x%lx", va); 814 panic("pmap:pmeg_allocate: va=0x%lx", va);
815 } 815 }
816#endif 816#endif
817 817
818 /* Get one onto the free list if necessary. */ 818 /* Get one onto the free list if necessary. */
819 pmegp = TAILQ_FIRST(&pmeg_free_queue); 819 pmegp = TAILQ_FIRST(&pmeg_free_queue);
820 if (!pmegp) { 820 if (!pmegp) {
821 /* Try inactive queue... */ 821 /* Try inactive queue... */
822 pmegp = TAILQ_FIRST(&pmeg_inactive_queue); 822 pmegp = TAILQ_FIRST(&pmeg_inactive_queue);
823 if (!pmegp) { 823 if (!pmegp) {
824 /* Try active queue... */ 824 /* Try active queue... */
825 pmegp = TAILQ_FIRST(&pmeg_active_queue); 825 pmegp = TAILQ_FIRST(&pmeg_active_queue);
826 } 826 }
827 if (!pmegp) { 827 if (!pmegp) {
828 panic("pmeg_allocate: failed"); 828 panic("pmeg_allocate: failed");
829 } 829 }
830 830
831 /* 831 /*
832 * Remove mappings to free-up a pmeg 832 * Remove mappings to free-up a pmeg
833 * (so it will go onto the free list). 833 * (so it will go onto the free list).
834 * XXX - Skip this one if it is wired? 834 * XXX - Skip this one if it is wired?
835 */ 835 */
836 pmap_remove1(pmegp->pmeg_owner, 836 pmap_remove1(pmegp->pmeg_owner,
837 pmegp->pmeg_va, 837 pmegp->pmeg_va,
838 pmegp->pmeg_va + NBSG); 838 pmegp->pmeg_va + NBSG);
839 } 839 }
840 840
841 /* OK, free list has something for us to take. */ 841 /* OK, free list has something for us to take. */
842 pmegp = TAILQ_FIRST(&pmeg_free_queue); 842 pmegp = TAILQ_FIRST(&pmeg_free_queue);
843#ifdef DIAGNOSTIC 843#ifdef DIAGNOSTIC
844 if (pmegp == NULL) 844 if (pmegp == NULL)
845 panic("pmeg_allocagte: still none free?"); 845 panic("pmeg_allocagte: still none free?");
846 if ((pmegp->pmeg_qstate != PMEGQ_FREE) || 846 if ((pmegp->pmeg_qstate != PMEGQ_FREE) ||
847 (pmegp->pmeg_index == SEGINV) || 847 (pmegp->pmeg_index == SEGINV) ||
848 (pmegp->pmeg_vpages)) 848 (pmegp->pmeg_vpages))
849 panic("pmeg_allocate: bad pmegp=%p", pmegp); 849 panic("pmeg_allocate: bad pmegp=%p", pmegp);
850#endif 850#endif
851#ifdef PMAP_DEBUG 851#ifdef PMAP_DEBUG
852 if (pmegp->pmeg_index == pmap_db_watchpmeg) { 852 if (pmegp->pmeg_index == pmap_db_watchpmeg) {
853 db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp); 853 db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp);
854 Debugger(); 854 Debugger();
855 } 855 }
856#endif 856#endif
857 857
858 TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link); 858 TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
859 859
860 /* Reassign this PMEG for the caller. */ 860 /* Reassign this PMEG for the caller. */
861 pmegp->pmeg_owner = pmap; 861 pmegp->pmeg_owner = pmap;
862 pmegp->pmeg_version = pmap->pm_version; 862 pmegp->pmeg_version = pmap->pm_version;
863 pmegp->pmeg_va = va; 863 pmegp->pmeg_va = va;
864 pmegp->pmeg_wired = 0; 864 pmegp->pmeg_wired = 0;
865 pmegp->pmeg_reserved = 0; 865 pmegp->pmeg_reserved = 0;
866 pmegp->pmeg_vpages = 0; 866 pmegp->pmeg_vpages = 0;
867 if (pmap == kernel_pmap) { 867 if (pmap == kernel_pmap) {
868 TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link); 868 TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
869 pmegp->pmeg_qstate = PMEGQ_KERNEL; 869 pmegp->pmeg_qstate = PMEGQ_KERNEL;
870 } else { 870 } else {
871 TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link); 871 TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
872 pmegp->pmeg_qstate = PMEGQ_ACTIVE; 872 pmegp->pmeg_qstate = PMEGQ_ACTIVE;
873 } 873 }
874 /* Caller will verify that it's empty (if debugging). */ 874 /* Caller will verify that it's empty (if debugging). */
875 return pmegp; 875 return pmegp;
876} 876}
877 877
878/* 878/*
879 * Put pmeg on the inactive queue, leaving its contents intact. 879 * Put pmeg on the inactive queue, leaving its contents intact.
880 * This happens when we loose our context. We may reclaim 880 * This happens when we loose our context. We may reclaim
881 * this pmeg later if it is still in the inactive queue. 881 * this pmeg later if it is still in the inactive queue.
882 */ 882 */
883static void  883static void
884pmeg_release(pmeg_t pmegp) 884pmeg_release(pmeg_t pmegp)
885{ 885{
886 886
887 CHECK_SPL(); 887 CHECK_SPL();
888 888
889#ifdef DIAGNOSTIC 889#ifdef DIAGNOSTIC
890 if ((pmegp->pmeg_owner == kernel_pmap) || 890 if ((pmegp->pmeg_owner == kernel_pmap) ||
891 (pmegp->pmeg_qstate != PMEGQ_ACTIVE)) 891 (pmegp->pmeg_qstate != PMEGQ_ACTIVE))
892 panic("pmeg_release: bad pmeg=%p", pmegp); 892 panic("pmeg_release: bad pmeg=%p", pmegp);
893#endif 893#endif
894 894
895 TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link); 895 TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
896 pmegp->pmeg_qstate = PMEGQ_INACTIVE; 896 pmegp->pmeg_qstate = PMEGQ_INACTIVE;
897 TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link); 897 TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
898} 898}
899 899
900/* 900/*
901 * Move the pmeg to the free queue from wherever it is. 901 * Move the pmeg to the free queue from wherever it is.
902 * The pmeg will be clean. It might be in kernel_pmap. 902 * The pmeg will be clean. It might be in kernel_pmap.
903 */ 903 */
904static void  904static void
905pmeg_free(pmeg_t pmegp) 905pmeg_free(pmeg_t pmegp)
906{ 906{
907 907
908 CHECK_SPL(); 908 CHECK_SPL();
909 909
910#ifdef DIAGNOSTIC 910#ifdef DIAGNOSTIC
911 /* Caller should verify that it's empty. */ 911 /* Caller should verify that it's empty. */
912 if (pmegp->pmeg_vpages != 0) 912 if (pmegp->pmeg_vpages != 0)
913 panic("pmeg_free: vpages"); 913 panic("pmeg_free: vpages");
914#endif 914#endif
915 915
916 switch (pmegp->pmeg_qstate) { 916 switch (pmegp->pmeg_qstate) {
917 case PMEGQ_ACTIVE: 917 case PMEGQ_ACTIVE:
918 TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link); 918 TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
919 break; 919 break;
920 case PMEGQ_INACTIVE: 920 case PMEGQ_INACTIVE:
921 TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link); 921 TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
922 break; 922 break;
923 case PMEGQ_KERNEL: 923 case PMEGQ_KERNEL:
924 TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link); 924 TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
925 break; 925 break;
926 default: 926 default:
927 panic("pmeg_free: releasing bad pmeg"); 927 panic("pmeg_free: releasing bad pmeg");
928 break; 928 break;
929 } 929 }
930 930
931#ifdef PMAP_DEBUG 931#ifdef PMAP_DEBUG
932 if (pmegp->pmeg_index == pmap_db_watchpmeg) { 932 if (pmegp->pmeg_index == pmap_db_watchpmeg) {
933 db_printf("pmeg_free: watch pmeg 0x%x\n", 933 db_printf("pmeg_free: watch pmeg 0x%x\n",
934 pmegp->pmeg_index); 934 pmegp->pmeg_index);
935 Debugger(); 935 Debugger();
936 } 936 }
937#endif 937#endif
938 938
939 pmegp->pmeg_owner = NULL; 939 pmegp->pmeg_owner = NULL;
940 pmegp->pmeg_qstate = PMEGQ_FREE; 940 pmegp->pmeg_qstate = PMEGQ_FREE;
941 TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link); 941 TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
942} 942}
943 943
944/* 944/*
945 * Find a PMEG that was put on the inactive queue when we 945 * Find a PMEG that was put on the inactive queue when we
946 * had our context stolen. If found, move to active queue. 946 * had our context stolen. If found, move to active queue.
947 */ 947 */
948static pmeg_t  948static pmeg_t
949pmeg_cache(pmap_t pmap, vaddr_t va) 949pmeg_cache(pmap_t pmap, vaddr_t va)
950{ 950{
951 int sme, segnum; 951 int sme, segnum;
952 pmeg_t pmegp; 952 pmeg_t pmegp;
953 953
954 CHECK_SPL(); 954 CHECK_SPL();
955 955
956#ifdef DIAGNOSTIC 956#ifdef DIAGNOSTIC
957 if (pmap == kernel_pmap) 957 if (pmap == kernel_pmap)
958 panic("pmeg_cache: kernel_pmap"); 958 panic("pmeg_cache: kernel_pmap");
959 if (va & SEGOFSET) { 959 if (va & SEGOFSET) {
960 panic("pmap:pmeg_cache: va=0x%lx", va); 960 panic("pmap:pmeg_cache: va=0x%lx", va);
961 } 961 }
962#endif 962#endif
963 963
964 if (pmap->pm_segmap == NULL) 964 if (pmap->pm_segmap == NULL)
965 return PMEG_NULL; 965 return PMEG_NULL;
966 966
967 segnum = VA_SEGNUM(va); 967 segnum = VA_SEGNUM(va);
968 if (segnum > NUSEG) /* out of range */ 968 if (segnum > NUSEG) /* out of range */
969 return PMEG_NULL; 969 return PMEG_NULL;
970 970
971 sme = pmap->pm_segmap[segnum]; 971 sme = pmap->pm_segmap[segnum];
972 if (sme == SEGINV) /* nothing cached */ 972 if (sme == SEGINV) /* nothing cached */
973 return PMEG_NULL; 973 return PMEG_NULL;
974 974
975 pmegp = pmeg_p(sme); 975 pmegp = pmeg_p(sme);
976 976
977#ifdef PMAP_DEBUG 977#ifdef PMAP_DEBUG
978 if (pmegp->pmeg_index == pmap_db_watchpmeg) { 978 if (pmegp->pmeg_index == pmap_db_watchpmeg) {
979 db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index); 979 db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
980 Debugger(); 980 Debugger();
981 } 981 }
982#endif 982#endif
983 983
984 /* 984 /*
985 * Our segmap named a PMEG. If it is no longer ours, 985 * Our segmap named a PMEG. If it is no longer ours,
986 * invalidate that entry in our segmap and return NULL. 986 * invalidate that entry in our segmap and return NULL.
987 */ 987 */
988 if ((pmegp->pmeg_owner != pmap) || 988 if ((pmegp->pmeg_owner != pmap) ||
989 (pmegp->pmeg_version != pmap->pm_version) || 989 (pmegp->pmeg_version != pmap->pm_version) ||
990 (pmegp->pmeg_va != va)) 990 (pmegp->pmeg_va != va))
991 { 991 {
992#ifdef PMAP_DEBUG 992#ifdef PMAP_DEBUG
993 db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme); 993 db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme);
994 pmeg_print(pmegp); 994 pmeg_print(pmegp);
995 Debugger(); 995 Debugger();
996#endif 996#endif
997 pmap->pm_segmap[segnum] = SEGINV; 997 pmap->pm_segmap[segnum] = SEGINV;
998 return PMEG_NULL; /* cache lookup failed */ 998 return PMEG_NULL; /* cache lookup failed */
999 } 999 }
1000 1000
1001#ifdef DIAGNOSTIC 1001#ifdef DIAGNOSTIC
1002 /* Make sure it is on the inactive queue. */ 1002 /* Make sure it is on the inactive queue. */
1003 if (pmegp->pmeg_qstate != PMEGQ_INACTIVE) 1003 if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
1004 panic("pmeg_cache: pmeg was taken: %p", pmegp); 1004 panic("pmeg_cache: pmeg was taken: %p", pmegp);
1005#endif 1005#endif
1006 1006
1007 TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link); 1007 TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
1008 pmegp->pmeg_qstate = PMEGQ_ACTIVE; 1008 pmegp->pmeg_qstate = PMEGQ_ACTIVE;
1009 TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link); 1009 TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
1010 1010
1011 return pmegp; 1011 return pmegp;
1012} 1012}
1013 1013
1014#ifdef PMAP_DEBUG 1014#ifdef PMAP_DEBUG
1015static void  1015static void
1016pmeg_verify_empty(vaddr_t va) 1016pmeg_verify_empty(vaddr_t va)
1017{ 1017{
1018 vaddr_t eva; 1018 vaddr_t eva;
1019 int pte; 1019 int pte;
1020 1020
1021 for (eva = va + NBSG; va < eva; va += PAGE_SIZE) { 1021 for (eva = va + NBSG; va < eva; va += PAGE_SIZE) {
1022 pte = get_pte(va); 1022 pte = get_pte(va);
1023 if (pte & PG_VALID) 1023 if (pte & PG_VALID)
1024 panic("pmeg_verify_empty"); 1024 panic("pmeg_verify_empty");
1025 } 1025 }
1026} 1026}
1027#endif /* PMAP_DEBUG */ 1027#endif /* PMAP_DEBUG */
1028 1028
1029 1029
1030/**************************************************************** 1030/****************************************************************
1031 * Physical-to-virutal lookup support 1031 * Physical-to-virutal lookup support
1032 * 1032 *
1033 * Need memory for the pv_alloc/pv_free list heads 1033 * Need memory for the pv_alloc/pv_free list heads
1034 * and elements. We know how many to allocate since 1034 * and elements. We know how many to allocate since
1035 * there is one list head for each physical page, and 1035 * there is one list head for each physical page, and
1036 * at most one element for each PMEG slot. 1036 * at most one element for each PMEG slot.
1037 */ 1037 */
1038static void  1038static void
1039pv_init(void) 1039pv_init(void)
1040{ 1040{
1041 int npp, nvp, sz; 1041 int npp, nvp, sz;
1042 pv_entry_t pv; 1042 pv_entry_t pv;
1043 char *p; 1043 char *p;
1044 1044
1045 /* total allocation size */ 1045 /* total allocation size */
1046 sz = 0; 1046 sz = 0;
1047 1047
1048 /* 1048 /*
1049 * Data for each physical page. 1049 * Data for each physical page.
1050 * Each "mod/ref" flag is a char. 1050 * Each "mod/ref" flag is a char.
1051 * Each PV head is a pointer. 1051 * Each PV head is a pointer.
1052 * Note physmem is in pages. 1052 * Note physmem is in pages.
1053 */ 1053 */
1054 npp = ALIGN(physmem); 1054 npp = ALIGN(physmem);
1055 sz += (npp * sizeof(*pv_flags_tbl)); 1055 sz += (npp * sizeof(*pv_flags_tbl));
1056 sz += (npp * sizeof(*pv_head_tbl)); 1056 sz += (npp * sizeof(*pv_head_tbl));
1057 1057
1058 /* 1058 /*
1059 * Data for each virtual page (all PMEGs). 1059 * Data for each virtual page (all PMEGs).
1060 * One pv_entry for each page frame. 1060 * One pv_entry for each page frame.
1061 */ 1061 */
1062 nvp = NPMEG * NPAGSEG; 1062 nvp = NPMEG * NPAGSEG;
1063 sz += (nvp * sizeof(*pv_free_list)); 1063 sz += (nvp * sizeof(*pv_free_list));
1064 1064
1065 /* Now allocate the whole thing. */ 1065 /* Now allocate the whole thing. */
1066 sz = m68k_round_page(sz); 1066 sz = m68k_round_page(sz);
1067 p = (char *)uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED); 1067 p = (char *)uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED);
1068 if (p == NULL) 1068 if (p == NULL)
1069 panic("pmap:pv_init: alloc failed"); 1069 panic("pmap:pv_init: alloc failed");
1070 memset(p, 0, sz); 1070 memset(p, 0, sz);
1071 1071
1072 /* Now divide up the space. */ 1072 /* Now divide up the space. */
1073 pv_flags_tbl = (void *) p; 1073 pv_flags_tbl = (void *) p;
1074 p += (npp * sizeof(*pv_flags_tbl)); 1074 p += (npp * sizeof(*pv_flags_tbl));
1075 pv_head_tbl = (void*) p; 1075 pv_head_tbl = (void*) p;
1076 p += (npp * sizeof(*pv_head_tbl)); 1076 p += (npp * sizeof(*pv_head_tbl));
1077 pv_free_list = (void *)p; 1077 pv_free_list = (void *)p;
1078 p += (nvp * sizeof(*pv_free_list)); 1078 p += (nvp * sizeof(*pv_free_list));
1079 1079
1080 /* Finally, make pv_free_list into a list. */ 1080 /* Finally, make pv_free_list into a list. */
1081 for (pv = pv_free_list; (char *)pv < p; pv++) 1081 for (pv = pv_free_list; (char *)pv < p; pv++)
1082 pv->pv_next = &pv[1]; 1082 pv->pv_next = &pv[1];
1083 pv[-1].pv_next = 0; 1083 pv[-1].pv_next = 0;
1084 1084
1085 pv_initialized++; 1085 pv_initialized++;
1086} 1086}
1087 1087
1088/* 1088/*
1089 * Set or clear bits in all PTEs mapping a page. 1089 * Set or clear bits in all PTEs mapping a page.
1090 * Also does syncflags work while we are there... 1090 * Also does syncflags work while we are there...
1091 */ 1091 */
1092static void  1092static void
1093pv_changepte(paddr_t pa, int set_bits, int clear_bits) 1093pv_changepte(paddr_t pa, int set_bits, int clear_bits)
1094{ 1094{
1095 pv_entry_t *head, pv; 1095 pv_entry_t *head, pv;
1096 u_char *pv_flags; 1096 u_char *pv_flags;
1097 pmap_t pmap; 1097 pmap_t pmap;
1098 vaddr_t va; 1098 vaddr_t va;
1099 int pte, sme; 1099 int pte, sme;
1100 int saved_ctx; 1100 int saved_ctx;
1101 bool in_ctx; 1101 bool in_ctx;
1102 u_int flags; 1102 u_int flags;
1103 1103
1104 pv_flags = pa_to_pvflags(pa); 1104 pv_flags = pa_to_pvflags(pa);
1105 head = pa_to_pvhead(pa); 1105 head = pa_to_pvhead(pa);
1106 1106
1107 /* If no mappings, no work to do. */ 1107 /* If no mappings, no work to do. */
1108 if (*head == NULL) 1108 if (*head == NULL)
1109 return; 1109 return;
1110 1110
1111#ifdef DIAGNOSTIC 1111#ifdef DIAGNOSTIC
1112 /* This function should only clear these bits: */ 1112 /* This function should only clear these bits: */
1113 if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD)) 1113 if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD))
1114 panic("pv_changepte: clear=0x%x", clear_bits); 1114 panic("pv_changepte: clear=0x%x", clear_bits);
1115#endif 1115#endif
1116 1116
1117 flags = 0; 1117 flags = 0;
1118 saved_ctx = get_context(); 1118 saved_ctx = get_context();
1119 for (pv = *head; pv != NULL; pv = pv->pv_next) { 1119 for (pv = *head; pv != NULL; pv = pv->pv_next) {
1120 pmap = pv->pv_pmap; 1120 pmap = pv->pv_pmap;
1121 va = pv->pv_va; 1121 va = pv->pv_va;
1122 1122
1123#ifdef DIAGNOSTIC 1123#ifdef DIAGNOSTIC
1124 if (pmap->pm_segmap == NULL) 1124 if (pmap->pm_segmap == NULL)
1125 panic("pv_changepte: null segmap"); 1125 panic("pv_changepte: null segmap");
1126#endif 1126#endif
1127 1127
1128 /* Is the PTE currently accessible in some context? */ 1128 /* Is the PTE currently accessible in some context? */
1129 in_ctx = false; 1129 in_ctx = false;
1130 sme = SEGINV; /* kill warning */ 1130 sme = SEGINV; /* kill warning */
1131 if (pmap == kernel_pmap) 1131 if (pmap == kernel_pmap)
1132 in_ctx = true; 1132 in_ctx = true;
1133 else if (has_context(pmap)) { 1133 else if (has_context(pmap)) {
1134 /* PMEG may be inactive. */ 1134 /* PMEG may be inactive. */
1135 set_context(pmap->pm_ctxnum); 1135 set_context(pmap->pm_ctxnum);
1136 sme = get_segmap(va); 1136 sme = get_segmap(va);
1137 if (sme != SEGINV) 1137 if (sme != SEGINV)
1138 in_ctx = true; 1138 in_ctx = true;
1139 } 1139 }
1140 1140
1141 if (in_ctx == true) { 1141 if (in_ctx == true) {
1142 /* 1142 /*
1143 * The PTE is in the current context. 1143 * The PTE is in the current context.
1144 * Make sure PTE is up-to-date with VAC. 1144 * Make sure PTE is up-to-date with VAC.
1145 */ 1145 */
1146#ifdef HAVECACHE 1146#ifdef HAVECACHE
1147 if (cache_size) 1147 if (cache_size)
1148 cache_flush_page(va); 1148 cache_flush_page(va);
1149#endif 1149#endif
1150 pte = get_pte(va); 1150 pte = get_pte(va);
1151 } else { 1151 } else {
1152 1152
1153 /* 1153 /*
1154 * The PTE is not in any context. 1154 * The PTE is not in any context.
1155 */ 1155 */
1156 1156
1157 sme = pmap->pm_segmap[VA_SEGNUM(va)]; 1157 sme = pmap->pm_segmap[VA_SEGNUM(va)];
1158#ifdef DIAGNOSTIC 1158#ifdef DIAGNOSTIC
1159 if (sme == SEGINV) 1159 if (sme == SEGINV)
1160 panic("pv_changepte: SEGINV"); 1160 panic("pv_changepte: SEGINV");
1161#endif 1161#endif
1162 pte = get_pte_pmeg(sme, VA_PTE_NUM(va)); 1162 pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1163 } 1163 }
1164 1164
1165#ifdef DIAGNOSTIC 1165#ifdef DIAGNOSTIC
1166 /* PV entries point only to valid mappings. */ 1166 /* PV entries point only to valid mappings. */
1167 if ((pte & PG_VALID) == 0) 1167 if ((pte & PG_VALID) == 0)
1168 panic("pv_changepte: not PG_VALID at va=0x%lx", va); 1168 panic("pv_changepte: not PG_VALID at va=0x%lx", va);
1169#endif 1169#endif
1170 /* Get these while it's easy. */ 1170 /* Get these while it's easy. */
1171 if (pte & PG_MODREF) { 1171 if (pte & PG_MODREF) {
1172 flags |= (pte & PG_MODREF); 1172 flags |= (pte & PG_MODREF);
1173 pte &= ~PG_MODREF; 1173 pte &= ~PG_MODREF;
1174 } 1174 }
1175 1175
1176 /* Finally, set and clear some bits. */ 1176 /* Finally, set and clear some bits. */
1177 pte |= set_bits; 1177 pte |= set_bits;
1178 pte &= ~clear_bits; 1178 pte &= ~clear_bits;
1179 1179
1180 if (in_ctx == true) { 1180 if (in_ctx == true) {
1181 /* Did cache flush above. */ 1181 /* Did cache flush above. */
1182 set_pte(va, pte); 1182 set_pte(va, pte);
1183 } else { 1183 } else {
1184 set_pte_pmeg(sme, VA_PTE_NUM(va), pte); 1184 set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1185 } 1185 }
1186 } 1186 }
1187 set_context(saved_ctx); 1187 set_context(saved_ctx);
1188 1188
1189 *pv_flags |= (flags >> PV_SHIFT); 1189 *pv_flags |= (flags >> PV_SHIFT);
1190} 1190}
1191 1191
1192/* 1192/*
1193 * Return ref and mod bits from pvlist, 1193 * Return ref and mod bits from pvlist,
1194 * and turns off same in hardware PTEs. 1194 * and turns off same in hardware PTEs.
1195 */ 1195 */
1196static u_int  1196static u_int
1197pv_syncflags(pv_entry_t pv) 1197pv_syncflags(pv_entry_t pv)
1198{ 1198{
1199 pmap_t pmap; 1199 pmap_t pmap;
1200 vaddr_t va; 1200 vaddr_t va;
1201 int pte, sme; 1201 int pte, sme;
1202 int saved_ctx; 1202 int saved_ctx;
1203 bool in_ctx; 1203 bool in_ctx;
1204 u_int flags; 1204 u_int flags;
1205 1205
1206 /* If no mappings, no work to do. */ 1206 /* If no mappings, no work to do. */
1207 if (pv == NULL) 1207 if (pv == NULL)
1208 return (0); 1208 return (0);
1209 1209
1210 flags = 0; 1210 flags = 0;
1211 saved_ctx = get_context(); 1211 saved_ctx = get_context();
1212 for (; pv != NULL; pv = pv->pv_next) { 1212 for (; pv != NULL; pv = pv->pv_next) {
1213 pmap = pv->pv_pmap; 1213 pmap = pv->pv_pmap;
1214 va = pv->pv_va; 1214 va = pv->pv_va;
1215 sme = SEGINV; 1215 sme = SEGINV;
1216 1216
1217#ifdef DIAGNOSTIC 1217#ifdef DIAGNOSTIC
1218 /* 1218 /*
1219 * Only the head may have a null pmap, and 1219 * Only the head may have a null pmap, and
1220 * we checked for that above. 1220 * we checked for that above.
1221 */ 1221 */
1222 if (pmap->pm_segmap == NULL) 1222 if (pmap->pm_segmap == NULL)
1223 panic("pv_syncflags: null segmap"); 1223 panic("pv_syncflags: null segmap");
1224#endif 1224#endif
1225 1225
1226 /* Is the PTE currently accessible in some context? */ 1226 /* Is the PTE currently accessible in some context? */
1227 in_ctx = false; 1227 in_ctx = false;
1228 if (pmap == kernel_pmap) 1228 if (pmap == kernel_pmap)
1229 in_ctx = true; 1229 in_ctx = true;
1230 else if (has_context(pmap)) { 1230 else if (has_context(pmap)) {
1231 /* PMEG may be inactive. */ 1231 /* PMEG may be inactive. */
1232 set_context(pmap->pm_ctxnum); 1232 set_context(pmap->pm_ctxnum);
1233 sme = get_segmap(va); 1233 sme = get_segmap(va);
1234 if (sme != SEGINV) 1234 if (sme != SEGINV)
1235 in_ctx = true; 1235 in_ctx = true;
1236 } 1236 }
1237 1237
1238 if (in_ctx == true) { 1238 if (in_ctx == true) {
1239 1239
1240 /* 1240 /*
1241 * The PTE is in the current context. 1241 * The PTE is in the current context.
1242 * Make sure PTE is up-to-date with VAC. 1242 * Make sure PTE is up-to-date with VAC.
1243 */ 1243 */
1244 1244
1245#ifdef HAVECACHE 1245#ifdef HAVECACHE
1246 if (cache_size) 1246 if (cache_size)
1247 cache_flush_page(va); 1247 cache_flush_page(va);
1248#endif 1248#endif
1249 pte = get_pte(va); 1249 pte = get_pte(va);
1250 } else { 1250 } else {
1251 1251
1252 /* 1252 /*
1253 * The PTE is not in any context. 1253 * The PTE is not in any context.
1254 */ 1254 */
1255 1255
1256 sme = pmap->pm_segmap[VA_SEGNUM(va)]; 1256 sme = pmap->pm_segmap[VA_SEGNUM(va)];
1257#ifdef DIAGNOSTIC 1257#ifdef DIAGNOSTIC
1258 if (sme == SEGINV) 1258 if (sme == SEGINV)
1259 panic("pv_syncflags: SEGINV"); 1259 panic("pv_syncflags: SEGINV");
1260#endif 1260#endif
1261 pte = get_pte_pmeg(sme, VA_PTE_NUM(va)); 1261 pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1262 } 1262 }
1263 1263
1264#ifdef DIAGNOSTIC 1264#ifdef DIAGNOSTIC
1265 /* PV entries point only to valid mappings. */ 1265 /* PV entries point only to valid mappings. */
1266 if ((pte & PG_VALID) == 0) 1266 if ((pte & PG_VALID) == 0)
1267 panic("pv_syncflags: not PG_VALID at va=0x%lx", va); 1267 panic("pv_syncflags: not PG_VALID at va=0x%lx", va);
1268#endif 1268#endif
1269 /* OK, do what we came here for... */ 1269 /* OK, do what we came here for... */
1270 if (pte & PG_MODREF) { 1270 if (pte & PG_MODREF) {
1271 flags |= (pte & PG_MODREF); 1271 flags |= (pte & PG_MODREF);
1272 pte &= ~PG_MODREF; 1272 pte &= ~PG_MODREF;
1273 } 1273 }
1274 1274
1275 if (in_ctx == true) { 1275 if (in_ctx == true) {
1276 /* Did cache flush above. */ 1276 /* Did cache flush above. */
1277 set_pte(va, pte); 1277 set_pte(va, pte);
1278 } else { 1278 } else {
1279 set_pte_pmeg(sme, VA_PTE_NUM(va), pte); 1279 set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1280 } 1280 }
1281 } 1281 }
1282 set_context(saved_ctx); 1282 set_context(saved_ctx);
1283 1283
1284 return (flags >> PV_SHIFT); 1284 return (flags >> PV_SHIFT);
1285} 1285}
1286 1286
1287/* Remove all mappings for the physical page. */ 1287/* Remove all mappings for the physical page. */
1288static void  1288static void
1289pv_remove_all(paddr_t pa) 1289pv_remove_all(paddr_t pa)
1290{ 1290{
1291 pv_entry_t *head, pv; 1291 pv_entry_t *head, pv;
1292 pmap_t pmap; 1292 pmap_t pmap;
1293 vaddr_t va; 1293 vaddr_t va;
1294 1294
1295 CHECK_SPL(); 1295 CHECK_SPL();
1296 1296
1297#ifdef PMAP_DEBUG 1297#ifdef PMAP_DEBUG
1298 if (pmap_debug & PMD_REMOVE) 1298 if (pmap_debug & PMD_REMOVE)
1299 printf("pv_remove_all(0x%lx)\n", pa); 1299 printf("pv_remove_all(0x%lx)\n", pa);
1300#endif 1300#endif
1301 1301
1302 head = pa_to_pvhead(pa); 1302 head = pa_to_pvhead(pa);
1303 while ((pv = *head) != NULL) { 1303 while ((pv = *head) != NULL) {
1304 pmap = pv->pv_pmap; 1304 pmap = pv->pv_pmap;
1305 va = pv->pv_va; 1305 va = pv->pv_va;
1306 pmap_remove1(pmap, va, va + PAGE_SIZE); 1306 pmap_remove1(pmap, va, va + PAGE_SIZE);
1307#ifdef PMAP_DEBUG 1307#ifdef PMAP_DEBUG
1308 /* Make sure it went away. */ 1308 /* Make sure it went away. */
1309 if (pv == *head) { 1309 if (pv == *head) {
1310 db_printf("pv_remove_all: " 1310 db_printf("pv_remove_all: "
1311 "head unchanged for pa=0x%lx\n", pa); 1311 "head unchanged for pa=0x%lx\n", pa);
1312 Debugger(); 1312 Debugger();
1313 } 1313 }
1314#endif 1314#endif
1315 } 1315 }
1316} 1316}
1317 1317
1318/* 1318/*
1319 * The pmap system is asked to lookup all mappings that point to a 1319 * The pmap system is asked to lookup all mappings that point to a
1320 * given physical memory address. This function adds a new element 1320 * given physical memory address. This function adds a new element
1321 * to the list of mappings maintained for the given physical address. 1321 * to the list of mappings maintained for the given physical address.
1322 * Returns PV_NC if the (new) pvlist says that the address cannot 1322 * Returns PV_NC if the (new) pvlist says that the address cannot
1323 * be cached. 1323 * be cached.
1324 */ 1324 */
1325static int  1325static int
1326pv_link(pmap_t pmap, int pte, vaddr_t va) 1326pv_link(pmap_t pmap, int pte, vaddr_t va)
1327{ 1327{
1328 paddr_t pa; 1328 paddr_t pa;
1329 pv_entry_t *head, pv; 1329 pv_entry_t *head, pv;
1330 u_char *pv_flags; 1330 u_char *pv_flags;
1331 int flags; 1331 int flags;
1332 1332
1333 if (!pv_initialized) 1333 if (!pv_initialized)
1334 return 0; 1334 return 0;
1335 1335
1336 CHECK_SPL(); 1336 CHECK_SPL();
1337 1337
1338 /* Only the non-cached bit is of interest here. */ 1338 /* Only the non-cached bit is of interest here. */
1339 flags = (pte & PG_NC) ? PV_NC : 0; 1339 flags = (pte & PG_NC) ? PV_NC : 0;
1340 pa = PG_PA(pte); 1340 pa = PG_PA(pte);
1341 1341
1342#ifdef PMAP_DEBUG 1342#ifdef PMAP_DEBUG
1343 if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) { 1343 if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1344 printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va); 1344 printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1345 /* pv_print(pa); */ 1345 /* pv_print(pa); */
1346 } 1346 }
1347#endif 1347#endif
1348 1348
1349 pv_flags = pa_to_pvflags(pa); 1349 pv_flags = pa_to_pvflags(pa);
1350 head = pa_to_pvhead(pa); 1350 head = pa_to_pvhead(pa);
1351 1351
1352#ifdef DIAGNOSTIC 1352#ifdef DIAGNOSTIC
1353 /* See if this mapping is already in the list. */ 1353 /* See if this mapping is already in the list. */
1354 for (pv = *head; pv != NULL; pv = pv->pv_next) { 1354 for (pv = *head; pv != NULL; pv = pv->pv_next) {
1355 if ((pv->pv_pmap == pmap) && (pv->pv_va == va)) 1355 if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1356 panic("pv_link: duplicate entry for PA=0x%lx", pa); 1356 panic("pv_link: duplicate entry for PA=0x%lx", pa);
1357 } 1357 }
1358#endif 1358#endif
1359#ifdef HAVECACHE 1359#ifdef HAVECACHE
1360 1360
1361 /* 1361 /*
1362 * Does this new mapping cause VAC alias problems? 1362 * Does this new mapping cause VAC alias problems?
1363 */ 1363 */
1364 1364
1365 *pv_flags |= flags; 1365 *pv_flags |= flags;
1366 if ((*pv_flags & PV_NC) == 0) { 1366 if ((*pv_flags & PV_NC) == 0) {
1367 for (pv = *head; pv != NULL; pv = pv->pv_next) { 1367 for (pv = *head; pv != NULL; pv = pv->pv_next) {
1368 if (BADALIAS(va, pv->pv_va)) { 1368 if (BADALIAS(va, pv->pv_va)) {
1369 *pv_flags |= PV_NC; 1369 *pv_flags |= PV_NC;
1370 pv_changepte(pa, PG_NC, 0); 1370 pv_changepte(pa, PG_NC, 0);
1371 pmap_stats.ps_vac_uncached++; 1371 pmap_stats.ps_vac_uncached++;
1372 break; 1372 break;
1373 } 1373 }
1374 } 1374 }
1375 } 1375 }
1376#endif 1376#endif
1377 1377
1378 /* Allocate a PV element (pv_alloc()). */ 1378 /* Allocate a PV element (pv_alloc()). */
1379 pv = pv_free_list; 1379 pv = pv_free_list;
1380 if (pv == NULL) 1380 if (pv == NULL)
1381 panic("pv_link: pv_alloc"); 1381 panic("pv_link: pv_alloc");
1382 pv_free_list = pv->pv_next; 1382 pv_free_list = pv->pv_next;
1383 pv->pv_next = 0; 1383 pv->pv_next = 0;
1384 1384
1385 /* Insert new entry at the head. */ 1385 /* Insert new entry at the head. */
1386 pv->pv_pmap = pmap; 1386 pv->pv_pmap = pmap;
1387 pv->pv_va = va; 1387 pv->pv_va = va;
1388 pv->pv_next = *head; 1388 pv->pv_next = *head;
1389 *head = pv; 1389 *head = pv;
1390 1390
1391 return (*pv_flags & PV_NC); 1391 return (*pv_flags & PV_NC);
1392} 1392}
1393 1393
1394/* 1394/*
1395 * pv_unlink is a helper function for pmap_remove. 1395 * pv_unlink is a helper function for pmap_remove.
1396 * It removes the appropriate (pmap, pa, va) entry. 1396 * It removes the appropriate (pmap, pa, va) entry.
1397 * 1397 *
1398 * Once the entry is removed, if the pv_table head has the cache 1398 * Once the entry is removed, if the pv_table head has the cache
1399 * inhibit bit set, see if we can turn that off; if so, walk the 1399 * inhibit bit set, see if we can turn that off; if so, walk the
1400 * pvlist and turn off PG_NC in each PTE. (The pvlist is by 1400 * pvlist and turn off PG_NC in each PTE. (The pvlist is by
1401 * definition nonempty, since it must have at least two elements 1401 * definition nonempty, since it must have at least two elements
1402 * in it to have PV_NC set, and we only remove one here.) 1402 * in it to have PV_NC set, and we only remove one here.)
1403 */ 1403 */
1404static void  1404static void
1405pv_unlink(pmap_t pmap, int pte, vaddr_t va) 1405pv_unlink(pmap_t pmap, int pte, vaddr_t va)
1406{ 1406{
1407 paddr_t pa; 1407 paddr_t pa;
1408 pv_entry_t *head, *ppv, pv; 1408 pv_entry_t *head, *ppv, pv;
1409 u_char *pv_flags; 1409 u_char *pv_flags;
1410 1410
1411 CHECK_SPL(); 1411 CHECK_SPL();
1412 1412
1413 pa = PG_PA(pte); 1413 pa = PG_PA(pte);
1414#ifdef PMAP_DEBUG 1414#ifdef PMAP_DEBUG
1415 if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) { 1415 if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1416 printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va); 1416 printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1417 /* pv_print(pa); */ 1417 /* pv_print(pa); */
1418 } 1418 }
1419#endif 1419#endif
1420 1420
1421 pv_flags = pa_to_pvflags(pa); 1421 pv_flags = pa_to_pvflags(pa);
1422 head = pa_to_pvhead(pa); 1422 head = pa_to_pvhead(pa);
1423 1423
1424 /* 1424 /*
1425 * Find the entry. 1425 * Find the entry.
1426 */ 1426 */
1427 ppv = head; 1427 ppv = head;
1428 pv = *ppv; 1428 pv = *ppv;
1429 while (pv) { 1429 while (pv) {
1430 if ((pv->pv_pmap == pmap) && (pv->pv_va == va)) 1430 if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1431 goto found; 1431 goto found;
1432 ppv = &pv->pv_next; 1432 ppv = &pv->pv_next;
1433 pv = pv->pv_next; 1433 pv = pv->pv_next;
1434 } 1434 }
1435#ifdef PMAP_DEBUG 1435#ifdef PMAP_DEBUG
1436 db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va); 1436 db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
1437 Debugger(); 1437 Debugger();
1438#endif 1438#endif
1439 return; 1439 return;
1440 1440
1441 found: 1441 found:
1442 /* Unlink this entry from the list and clear it. */ 1442 /* Unlink this entry from the list and clear it. */
1443 *ppv = pv->pv_next; 1443 *ppv = pv->pv_next;
1444 pv->pv_pmap = NULL; 1444 pv->pv_pmap = NULL;
1445 pv->pv_va = 0; 1445 pv->pv_va = 0;
1446 1446
1447 /* Insert it on the head of the free list. (pv_free()) */ 1447 /* Insert it on the head of the free list. (pv_free()) */
1448 pv->pv_next = pv_free_list; 1448 pv->pv_next = pv_free_list;
1449 pv_free_list = pv; 1449 pv_free_list = pv;
1450 pv = NULL; 1450 pv = NULL;
1451 1451
1452 /* Do any non-cached mappings remain? */ 1452 /* Do any non-cached mappings remain? */
1453 if ((*pv_flags & PV_NC) == 0) 1453 if ((*pv_flags & PV_NC) == 0)
1454 return; 1454 return;
1455 if ((pv = *head) == NULL) 1455 if ((pv = *head) == NULL)
1456 return; 1456 return;
1457 1457
1458 /* 1458 /*
1459 * Have non-cached mappings. See if we can fix that now. 1459 * Have non-cached mappings. See if we can fix that now.
1460 */ 1460 */
1461 va = pv->pv_va; 1461 va = pv->pv_va;
1462 for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) { 1462 for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
1463 /* If there is a DVMA mapping, leave it NC. */ 1463 /* If there is a DVMA mapping, leave it NC. */
1464 if (va >= DVMA_MAP_BASE) 1464 if (va >= DVMA_MAP_BASE)
1465 return; 1465 return;
1466 /* If there are VAC alias problems, leave NC. */ 1466 /* If there are VAC alias problems, leave NC. */
1467 if (BADALIAS(va, pv->pv_va)) 1467 if (BADALIAS(va, pv->pv_va))
1468 return; 1468 return;
1469 } 1469 }
1470 /* OK, there are no "problem" mappings. */ 1470 /* OK, there are no "problem" mappings. */
1471 *pv_flags &= ~PV_NC; 1471 *pv_flags &= ~PV_NC;
1472 pv_changepte(pa, 0, PG_NC); 1472 pv_changepte(pa, 0, PG_NC);
1473 pmap_stats.ps_vac_recached++; 1473 pmap_stats.ps_vac_recached++;
1474} 1474}
1475 1475
1476 1476
1477/**************************************************************** 1477/****************************************************************
1478 * Bootstrap and Initialization, etc. 1478 * Bootstrap and Initialization, etc.
1479 */ 1479 */
1480 1480
1481void  1481void
1482pmap_common_init(pmap_t pmap) 1482pmap_common_init(pmap_t pmap)
1483{ 1483{
1484 memset(pmap, 0, sizeof(struct pmap)); 1484 memset(pmap, 0, sizeof(struct pmap));
1485 pmap->pm_refcount = 1; 1485 pmap->pm_refcount = 1;
1486 pmap->pm_version = pmap_version++; 1486 pmap->pm_version = pmap_version++;
1487 pmap->pm_ctxnum = EMPTY_CONTEXT; 1487 pmap->pm_ctxnum = EMPTY_CONTEXT;
1488} 1488}
1489 1489
1490/* 1490/*
1491 * Prepare the kernel for VM operations. 1491 * Prepare the kernel for VM operations.
1492 * This is called by locore2.c:_vm_init() 1492 * This is called by locore2.c:_vm_init()
1493 * after the "start/end" globals are set. 1493 * after the "start/end" globals are set.
1494 * This function must NOT leave context zero. 1494 * This function must NOT leave context zero.
1495 */ 1495 */
1496void  1496void
1497pmap_bootstrap(vaddr_t nextva) 1497pmap_bootstrap(vaddr_t nextva)
1498{ 1498{
1499 struct sunromvec *rvec; 1499 struct sunromvec *rvec;
1500 vaddr_t va, eva; 1500 vaddr_t va, eva;
1501 int i, pte, sme; 1501 int i, pte, sme;
1502 extern char etext[]; 1502 extern char etext[];
1503 1503
1504 nextva = m68k_round_page(nextva); 1504 nextva = m68k_round_page(nextva);
1505 rvec = romVectorPtr; 1505 rvec = romVectorPtr;
1506 1506
1507 /* Steal some special-purpose, already mapped pages? */ 1507 /* Steal some special-purpose, already mapped pages? */
1508 1508
1509 /* 1509 /*
1510 * Determine the range of kernel virtual space available. 1510 * Determine the range of kernel virtual space available.
1511 * It is segment-aligned to simplify PMEG management. 1511 * It is segment-aligned to simplify PMEG management.
1512 */ 1512 */
1513 virtual_avail = sun3_round_seg(nextva); 1513 virtual_avail = sun3_round_seg(nextva);
1514 virtual_end = VM_MAX_KERNEL_ADDRESS; 1514 virtual_end = VM_MAX_KERNEL_ADDRESS;
1515 1515
1516 /* 1516 /*
1517 * Determine the range of physical memory available. 1517 * Determine the range of physical memory available.
1518 * Physical memory at zero was remapped to KERNBASE. 1518 * Physical memory at zero was remapped to KERNBASE.
1519 */ 1519 */
1520 avail_start = nextva - KERNBASE3; 1520 avail_start = nextva - KERNBASE3;
1521 if (rvec->romvecVersion < 1) { 1521 if (rvec->romvecVersion < 1) {
1522 mon_printf("Warning: ancient PROM version=%d\n", 1522 mon_printf("Warning: ancient PROM version=%d\n",
1523 rvec->romvecVersion); 1523 rvec->romvecVersion);
1524 /* Guess that PROM version 0.X used two pages. */ 1524 /* Guess that PROM version 0.X used two pages. */
1525 avail_end = *rvec->memorySize - (2*PAGE_SIZE); 1525 avail_end = *rvec->memorySize - (2*PAGE_SIZE);
1526 } else { 1526 } else {
1527 /* PROM version 1 or later. */ 1527 /* PROM version 1 or later. */
1528 avail_end = *rvec->memoryAvail; 1528 avail_end = *rvec->memoryAvail;
1529 } 1529 }
1530 avail_end = m68k_trunc_page(avail_end); 1530 avail_end = m68k_trunc_page(avail_end);
1531 1531
1532 /* 1532 /*
1533 * Report the actual amount of physical memory, 1533 * Report the actual amount of physical memory,
1534 * even though the PROM takes a few pages. 1534 * even though the PROM takes a few pages.
1535 */ 1535 */
1536 physmem = (btoc(avail_end) + 0xF) & ~0xF; 1536 physmem = (btoc(avail_end) + 0xF) & ~0xF;
1537 1537
1538 /* 1538 /*
1539 * On the Sun3/50, the video frame buffer is located at 1539 * On the Sun3/50, the video frame buffer is located at
1540 * physical addres 1MB so we must step over it. 1540 * physical addres 1MB so we must step over it.
1541 */ 1541 */
1542 if (cpu_machine_id == ID_SUN3_50) { 1542 if (cpu_machine_id == ID_SUN3_50) {
1543 hole_start = m68k_trunc_page(OBMEM_BW50_ADDR); 1543 hole_start = m68k_trunc_page(OBMEM_BW50_ADDR);
1544 hole_size = m68k_round_page(OBMEM_BW2_SIZE); 1544 hole_size = m68k_round_page(OBMEM_BW2_SIZE);
1545 if (avail_start > hole_start) { 1545 if (avail_start > hole_start) {
1546 mon_printf("kernel too large for Sun3/50\n"); 1546 mon_printf("kernel too large for Sun3/50\n");
1547 sunmon_abort(); 1547 sunmon_abort();
1548 } 1548 }
1549 } 1549 }
1550 1550
1551 /* 1551 /*
1552 * Done allocating PAGES of virtual space, so 1552 * Done allocating PAGES of virtual space, so
1553 * clean out the rest of the last used segment. 1553 * clean out the rest of the last used segment.
1554 */ 1554 */
1555 for (va = nextva; va < virtual_avail; va += PAGE_SIZE) 1555 for (va = nextva; va < virtual_avail; va += PAGE_SIZE)
1556 set_pte(va, PG_INVAL); 1556 set_pte(va, PG_INVAL);
1557 1557
1558 /* 1558 /*
1559 * Now that we are done stealing physical pages, etc. 1559 * Now that we are done stealing physical pages, etc.
1560 * figure out which PMEGs are used by those mappings 1560 * figure out which PMEGs are used by those mappings
1561 * and either reserve them or clear them out. 1561 * and either reserve them or clear them out.
1562 * -- but first, init PMEG management. 1562 * -- but first, init PMEG management.
1563 * This puts all PMEGs in the free list. 1563 * This puts all PMEGs in the free list.
1564 * We will allocte the in-use ones. 1564 * We will allocte the in-use ones.
1565 */ 1565 */
1566 pmeg_init(); 1566 pmeg_init();
1567 1567
1568 /* 1568 /*
1569 * Unmap user virtual segments. 1569 * Unmap user virtual segments.
1570 * VA range: [0 .. KERNBASE] 1570 * VA range: [0 .. KERNBASE]
1571 */ 1571 */
1572 for (va = 0; va < KERNBASE3; va += NBSG) 1572 for (va = 0; va < KERNBASE3; va += NBSG)
1573 set_segmap(va, SEGINV); 1573 set_segmap(va, SEGINV);
1574 1574
1575 /* 1575 /*
1576 * Reserve PMEGS for kernel text/data/bss 1576 * Reserve PMEGS for kernel text/data/bss
1577 * and the misc pages taken above. 1577 * and the misc pages taken above.
1578 * VA range: [KERNBASE .. virtual_avail] 1578 * VA range: [KERNBASE .. virtual_avail]
1579 */ 1579 */
1580 for ( ; va < virtual_avail; va += NBSG) { 1580 for ( ; va < virtual_avail; va += NBSG) {
1581 sme = get_segmap(va); 1581 sme = get_segmap(va);
1582 if (sme == SEGINV) { 1582 if (sme == SEGINV) {
1583 mon_printf("kernel text/data/bss not mapped\n"); 1583 mon_printf("kernel text/data/bss not mapped\n");
1584 sunmon_abort(); 1584 sunmon_abort();
1585 } 1585 }
1586 pmeg_reserve(sme); 1586 pmeg_reserve(sme);
1587 } 1587 }
1588 1588
1589 /* 1589 /*
1590 * Unmap kernel virtual space. Make sure to leave no valid 1590 * Unmap kernel virtual space. Make sure to leave no valid
1591 * segmap entries in the MMU unless pmeg_array records them. 1591 * segmap entries in the MMU unless pmeg_array records them.
1592 * VA range: [vseg_avail .. virtual_end] 1592 * VA range: [vseg_avail .. virtual_end]
1593 */ 1593 */
1594 for ( ; va < virtual_end; va += NBSG) 1594 for ( ; va < virtual_end; va += NBSG)
1595 set_segmap(va, SEGINV); 1595 set_segmap(va, SEGINV);
1596 1596
1597 /* 1597 /*
1598 * Reserve PMEGs used by the PROM monitor (device mappings). 1598 * Reserve PMEGs used by the PROM monitor (device mappings).
1599 * Free up any pmegs in this range which have no mappings. 1599 * Free up any pmegs in this range which have no mappings.
1600 * VA range: [0x0FE00000 .. 0x0FF00000] 1600 * VA range: [0x0FE00000 .. 0x0FF00000]
1601 */ 1601 */
1602 pmeg_mon_init(SUN3_MONSTART, SUN3_MONEND, true); 1602 pmeg_mon_init(SUN3_MONSTART, SUN3_MONEND, true);
1603 1603
1604 /* 1604 /*
1605 * Unmap any pmegs left in DVMA space by the PROM. 1605 * Unmap any pmegs left in DVMA space by the PROM.
1606 * DO NOT kill the last one! (owned by the PROM!) 1606 * DO NOT kill the last one! (owned by the PROM!)
1607 * VA range: [0x0FF00000 .. 0x0FFE0000] 1607 * VA range: [0x0FF00000 .. 0x0FFE0000]
1608 */ 1608 */
1609 pmeg_mon_init(SUN3_MONEND, SUN3_MONSHORTSEG, false); 1609 pmeg_mon_init(SUN3_MONEND, SUN3_MONSHORTSEG, false);
1610 1610
1611 /* 1611 /*
1612 * MONSHORTSEG contains MONSHORTPAGE which is a data page 1612 * MONSHORTSEG contains MONSHORTPAGE which is a data page
1613 * allocated by the PROM monitor. Reserve the segment, 1613 * allocated by the PROM monitor. Reserve the segment,
1614 * but clear out all but the last PTE inside it. 1614 * but clear out all but the last PTE inside it.
1615 * Note we use this for tmp_vpages. 1615 * Note we use this for tmp_vpages.
1616 */ 1616 */
1617 va = SUN3_MONSHORTSEG; 1617 va = SUN3_MONSHORTSEG;
1618 eva = SUN3_MONSHORTPAGE; 1618 eva = SUN3_MONSHORTPAGE;
1619 sme = get_segmap(va); 1619 sme = get_segmap(va);
1620 pmeg_reserve(sme); 1620 pmeg_reserve(sme);
1621 for ( ; va < eva; va += PAGE_SIZE) 1621 for ( ; va < eva; va += PAGE_SIZE)
1622 set_pte(va, PG_INVAL); 1622 set_pte(va, PG_INVAL);
1623 1623
1624 /* 1624 /*
1625 * Done reserving PMEGs and/or clearing out mappings. 1625 * Done reserving PMEGs and/or clearing out mappings.
1626 * 1626 *
1627 * Now verify the mapping protections and such for the 1627 * Now verify the mapping protections and such for the
1628 * important parts of the address space (in VA order). 1628 * important parts of the address space (in VA order).
1629 * Note that the Sun PROM usually leaves the memory 1629 * Note that the Sun PROM usually leaves the memory
1630 * mapped with everything non-cached... 1630 * mapped with everything non-cached...
1631 */ 1631 */
1632 1632
1633 /* 1633 /*
1634 * Map the message buffer page at a constant location 1634 * Map the message buffer page at a constant location
1635 * (physical address zero) so its contents will be 1635 * (physical address zero) so its contents will be
1636 * preserved through a reboot. 1636 * preserved through a reboot.
1637 */ 1637 */
1638 va = KERNBASE3; 1638 va = KERNBASE3;
1639 pte = get_pte(va); 1639 pte = get_pte(va);
1640 pte |= (PG_SYSTEM | PG_WRITE | PG_NC); 1640 pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
1641 set_pte(va, pte); 1641 set_pte(va, pte);
1642 va += PAGE_SIZE; 1642 va += PAGE_SIZE;
1643 /* Initialize msgbufaddr later, in machdep.c */ 1643 /* Initialize msgbufaddr later, in machdep.c */
1644 1644
1645 /* Next is the tmpstack page. */ 1645 /* Next is the tmpstack page. */
1646 pte = get_pte(va); 1646 pte = get_pte(va);
1647 pte &= ~(PG_NC); 1647 pte &= ~(PG_NC);
1648 pte |= (PG_SYSTEM | PG_WRITE); 1648 pte |= (PG_SYSTEM | PG_WRITE);
1649 set_pte(va, pte); 1649 set_pte(va, pte);
1650 va += PAGE_SIZE; 1650 va += PAGE_SIZE;
1651 1651
1652 /* 1652 /*
1653 * Next is the kernel text. 1653 * Next is the kernel text.
1654 * 1654 *
1655 * Verify protection bits on kernel text/data/bss 1655 * Verify protection bits on kernel text/data/bss
1656 * All of kernel text, data, and bss are cached. 1656 * All of kernel text, data, and bss are cached.
1657 * Text is read-only (except in db_write_ktext). 1657 * Text is read-only (except in db_write_ktext).
1658 */ 1658 */
1659 eva = m68k_trunc_page(etext); 1659 eva = m68k_trunc_page(etext);
1660 while (va < eva) { 1660 while (va < eva) {
1661 pte = get_pte(va); 1661 pte = get_pte(va);
1662 if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) { 1662 if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1663 mon_printf("invalid page at 0x%x\n", va); 1663 mon_printf("invalid page at 0x%x\n", va);
1664 } 1664 }
1665 pte &= ~(PG_WRITE|PG_NC); 1665 pte &= ~(PG_WRITE|PG_NC);
1666 /* Kernel text is read-only */ 1666 /* Kernel text is read-only */
1667 pte |= (PG_SYSTEM); 1667 pte |= (PG_SYSTEM);
1668 set_pte(va, pte); 1668 set_pte(va, pte);
1669 va += PAGE_SIZE; 1669 va += PAGE_SIZE;
1670 } 1670 }
1671 /* data, bss, etc. */ 1671 /* data, bss, etc. */
1672 while (va < nextva) { 1672 while (va < nextva) {
1673 pte = get_pte(va); 1673 pte = get_pte(va);
1674 if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) { 1674 if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1675 mon_printf("invalid page at 0x%x\n", va); 1675 mon_printf("invalid page at 0x%x\n", va);
1676 } 1676 }
1677 pte &= ~(PG_NC); 1677 pte &= ~(PG_NC);
1678 pte |= (PG_SYSTEM | PG_WRITE); 1678 pte |= (PG_SYSTEM | PG_WRITE);
1679 set_pte(va, pte); 1679 set_pte(va, pte);
1680 va += PAGE_SIZE; 1680 va += PAGE_SIZE;
1681 } 1681 }
1682 1682
1683 /* 1683 /*
1684 * Duplicate all mappings in the current context into 1684 * Duplicate all mappings in the current context into
1685 * every other context. We have to let the PROM do the 1685 * every other context. We have to let the PROM do the
1686 * actual segmap manipulation because we can only switch 1686 * actual segmap manipulation because we can only switch
1687 * the MMU context after we are sure that the kernel is 1687 * the MMU context after we are sure that the kernel is
1688 * identically mapped in all contexts. The PROM can do 1688 * identically mapped in all contexts. The PROM can do
1689 * the job using hardware-dependent tricks... 1689 * the job using hardware-dependent tricks...
1690 */ 1690 */
1691#ifdef DIAGNOSTIC 1691#ifdef DIAGNOSTIC
1692 /* Note: PROM setcxsegmap function needs sfc=dfs=FC_CONTROL */ 1692 /* Note: PROM setcxsegmap function needs sfc=dfs=FC_CONTROL */
1693 if ((getsfc() != FC_CONTROL) || (getdfc() != FC_CONTROL)) { 1693 if ((getsfc() != FC_CONTROL) || (getdfc() != FC_CONTROL)) {
1694 mon_printf("pmap_bootstrap: bad dfc or sfc\n"); 1694 mon_printf("pmap_bootstrap: bad dfc or sfc\n");
1695 sunmon_abort(); 1695 sunmon_abort();
1696 } 1696 }
1697 /* Near the beginning of locore.s we set context zero. */ 1697 /* Near the beginning of locore.s we set context zero. */
1698 if (get_context() != 0) { 1698 if (get_context() != 0) {
1699 mon_printf("pmap_bootstrap: not in context zero?\n"); 1699 mon_printf("pmap_bootstrap: not in context zero?\n");
1700 sunmon_abort(); 1700 sunmon_abort();
1701 } 1701 }
1702#endif /* DIAGNOSTIC */ 1702#endif /* DIAGNOSTIC */
1703 for (va = 0; va < (vaddr_t) (NBSG * NSEGMAP); va += NBSG) { 1703 for (va = 0; va < (vaddr_t) (NBSG * NSEGMAP); va += NBSG) {
1704 /* Read the segmap entry from context zero... */ 1704 /* Read the segmap entry from context zero... */
1705 sme = get_segmap(va); 1705 sme = get_segmap(va);
1706 /* ... then copy it into all other contexts. */ 1706 /* ... then copy it into all other contexts. */
1707 for (i = 1; i < NCONTEXT; i++) { 1707 for (i = 1; i < NCONTEXT; i++) {
1708 (*rvec->setcxsegmap)(i, va, sme); 1708 (*rvec->setcxsegmap)(i, va, sme);
1709 } 1709 }
1710 } 1710 }
1711 1711
1712 /* 1712 /*
1713 * Reserve a segment for the kernel to use to access a pmeg 1713 * Reserve a segment for the kernel to use to access a pmeg
1714 * that is not currently mapped into any context/segmap. 1714 * that is not currently mapped into any context/segmap.
1715 * The kernel temporarily maps such a pmeg into this segment. 1715 * The kernel temporarily maps such a pmeg into this segment.
1716 * 1716 *
1717 * XXX: Now that context zero is reserved as kernel-only, 1717 * XXX: Now that context zero is reserved as kernel-only,
1718 * we could borrow context zero for these temporary uses. 1718 * we could borrow context zero for these temporary uses.
1719 */ 1719 */
1720 temp_seg_va = virtual_avail; 1720 temp_seg_va = virtual_avail;
1721 virtual_avail += NBSG; 1721 virtual_avail += NBSG;
1722#ifdef DIAGNOSTIC 1722#ifdef DIAGNOSTIC
1723 if (temp_seg_va & SEGOFSET) { 1723 if (temp_seg_va & SEGOFSET) {
1724 mon_printf("pmap_bootstrap: temp_seg_va\n"); 1724 mon_printf("pmap_bootstrap: temp_seg_va\n");
1725 sunmon_abort(); 1725 sunmon_abort();
1726 } 1726 }
1727#endif 1727#endif
1728 1728
1729 /* Initialization for pmap_next_page() */ 1729 /* Initialization for pmap_next_page() */
1730 avail_next = avail_start; 1730 avail_next = avail_start;
1731 1731
1732 uvmexp.pagesize = PAGE_SIZE; 1732 uvmexp.pagesize = PAGE_SIZE;
1733 uvm_setpagesize(); 1733 uvm_setpagesize();
1734 1734
1735 /* after setting up some structures */ 1735 /* after setting up some structures */
1736 1736
1737 pmap_common_init(kernel_pmap); 1737 pmap_common_init(kernel_pmap);
1738 pmap_kernel_init(kernel_pmap); 1738 pmap_kernel_init(kernel_pmap);
1739 1739
1740 context_init(); 1740 context_init();
1741 1741
1742 pmeg_clean_free(); 1742 pmeg_clean_free();
1743 1743
1744 pmap_page_upload(); 1744 pmap_page_upload();
1745} 1745}
1746 1746
1747/* 1747/*
1748 * Give the kernel pmap a segmap, just so there are not 1748 * Give the kernel pmap a segmap, just so there are not
1749 * so many special cases required. Maybe faster too, 1749 * so many special cases required. Maybe faster too,
1750 * because this lets pmap_remove() and pmap_protect() 1750 * because this lets pmap_remove() and pmap_protect()
1751 * use a S/W copy of the segmap to avoid function calls. 1751 * use a S/W copy of the segmap to avoid function calls.
1752 */ 1752 */
1753void  1753void
1754pmap_kernel_init(pmap_t pmap) 1754pmap_kernel_init(pmap_t pmap)
1755{ 1755{
1756 vaddr_t va; 1756 vaddr_t va;
1757 int i, sme; 1757 int i, sme;
1758 1758
1759 for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) { 1759 for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) {
1760 sme = get_segmap(va); 1760 sme = get_segmap(va);
1761 kernel_segmap[i] = sme; 1761 kernel_segmap[i] = sme;
1762 } 1762 }
1763 pmap->pm_segmap = kernel_segmap; 1763 pmap->pm_segmap = kernel_segmap;
1764} 1764}
1765 1765
1766 1766
1767/**************************************************************** 1767/****************************************************************
1768 * PMAP interface functions. 1768 * PMAP interface functions.
1769 */ 1769 */
1770 1770
1771/* 1771/*
1772 * Support functions for vm_page_bootstrap(). 1772 * Support functions for vm_page_bootstrap().
1773 */ 1773 */
1774 1774
1775/* 1775/*
1776 * How much virtual space does this kernel have? 1776 * How much virtual space does this kernel have?
1777 * (After mapping kernel text, data, etc.) 1777 * (After mapping kernel text, data, etc.)
1778 */ 1778 */
1779void  1779void
1780pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end) 1780pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end)
1781{ 1781{
1782 *v_start = virtual_avail; 1782 *v_start = virtual_avail;
1783 *v_end = virtual_end; 1783 *v_end = virtual_end;
1784} 1784}
1785 1785
1786/* Provide memory to the VM system. */ 1786/* Provide memory to the VM system. */
1787static void  1787static void
1788pmap_page_upload(void) 1788pmap_page_upload(void)
1789{ 1789{
1790 int a, b, c, d; 1790 int a, b, c, d;
1791 1791
1792 if (hole_size) { 1792 if (hole_size) {
1793 /* 1793 /*
1794 * Supply the memory in two segments so the 1794 * Supply the memory in two segments so the
1795 * reserved memory (3/50 video ram at 1MB) 1795 * reserved memory (3/50 video ram at 1MB)
1796 * can be carved from the front of the 2nd. 1796 * can be carved from the front of the 2nd.
1797 */ 1797 */
1798 a = atop(avail_start); 1798 a = atop(avail_start);
1799 b = atop(hole_start); 1799 b = atop(hole_start);
1800 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT); 1800 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
1801 c = atop(hole_start + hole_size); 1801 c = atop(hole_start + hole_size);
1802 d = atop(avail_end); 1802 d = atop(avail_end);
1803 uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT); 1803 uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT);
1804 } else { 1804 } else {
1805 a = atop(avail_start); 1805 a = atop(avail_start);
1806 d = atop(avail_end); 1806 d = atop(avail_end);
1807 uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT); 1807 uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT);
1808 } 1808 }
1809} 1809}
1810 1810
1811/* 1811/*
1812 * Initialize the pmap module. 1812 * Initialize the pmap module.
1813 * Called by vm_init, to initialize any structures that the pmap 1813 * Called by vm_init, to initialize any structures that the pmap
1814 * system needs to map virtual memory. 1814 * system needs to map virtual memory.
1815 */ 1815 */
1816void  1816void
1817pmap_init(void) 1817pmap_init(void)
1818{ 1818{
1819 pv_init(); 1819 pv_init();
1820 1820
1821 /* Initialize the pmap pool. */ 1821 /* Initialize the pmap pool. */
1822 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", 1822 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1823 &pool_allocator_nointr, IPL_NONE); 1823 &pool_allocator_nointr, IPL_NONE);
1824} 1824}
1825 1825
1826/* 1826/*
1827 * Map a range of kernel virtual address space. 1827 * Map a range of kernel virtual address space.
1828 * This might be used for device mappings, or to 1828 * This might be used for device mappings, or to
1829 * record the mapping for kernel text/data/bss. 1829 * record the mapping for kernel text/data/bss.
1830 * Return VA following the mapped range. 1830 * Return VA following the mapped range.
1831 */ 1831 */
1832vaddr_t  1832vaddr_t
1833pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot) 1833pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
1834{ 1834{
1835 int sz; 1835 int sz;
1836 1836
1837 sz = endpa - pa; 1837 sz = endpa - pa;
1838 do { 1838 do {
1839 pmap_enter(kernel_pmap, va, pa, prot, 0); 1839 pmap_enter(kernel_pmap, va, pa, prot, 0);
1840 va += PAGE_SIZE; 1840 va += PAGE_SIZE;
1841 pa += PAGE_SIZE; 1841 pa += PAGE_SIZE;
1842 sz -= PAGE_SIZE; 1842 sz -= PAGE_SIZE;
1843 } while (sz > 0); 1843 } while (sz > 0);
1844 pmap_update(kernel_pmap); 1844 pmap_update(kernel_pmap);
1845 return(va); 1845 return(va);
1846} 1846}
1847 1847
1848void  1848void
1849pmap_user_init(pmap_t pmap) 1849pmap_user_init(pmap_t pmap)
1850{ 1850{
1851 int i; 1851 int i;
1852 pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK); 1852 pmap->pm_segmap = kmem_alloc(sizeof(char)*NUSEG, KM_SLEEP);
1853 for (i = 0; i < NUSEG; i++) { 1853 for (i = 0; i < NUSEG; i++) {
1854 pmap->pm_segmap[i] = SEGINV; 1854 pmap->pm_segmap[i] = SEGINV;
1855 } 1855 }
1856} 1856}
1857 1857
1858/* 1858/*
1859 * Create and return a physical map. 1859 * Create and return a physical map.
1860 * 1860 *
1861 * If the size specified for the map 1861 * If the size specified for the map
1862 * is zero, the map is an actual physical 1862 * is zero, the map is an actual physical
1863 * map, and may be referenced by the 1863 * map, and may be referenced by the
1864 * hardware. 1864 * hardware.
1865 * 1865 *
1866 * If the size specified is non-zero, 1866 * If the size specified is non-zero,
1867 * the map will be used in software only, and 1867 * the map will be used in software only, and
1868 * is bounded by that size. 1868 * is bounded by that size.
1869 */ 1869 */
1870pmap_t  1870pmap_t
1871pmap_create(void) 1871pmap_create(void)
1872{ 1872{
1873 pmap_t pmap; 1873 pmap_t pmap;
1874 1874
1875 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); 1875 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1876 pmap_pinit(pmap); 1876 pmap_pinit(pmap);
1877 return pmap; 1877 return pmap;
1878} 1878}
1879 1879
1880/* 1880/*
1881 * Release any resources held by the given physical map. 1881 * Release any resources held by the given physical map.
1882 * Called when a pmap initialized by pmap_pinit is being released. 1882 * Called when a pmap initialized by pmap_pinit is being released.
1883 * Should only be called if the map contains no valid mappings. 1883 * Should only be called if the map contains no valid mappings.
1884 */ 1884 */
1885void  1885void
1886pmap_release(struct pmap *pmap) 1886pmap_release(struct pmap *pmap)
1887{ 1887{
1888 int s; 1888 int s;
1889 1889
1890 s = splvm(); 1890 s = splvm();
1891 1891
1892 if (pmap == kernel_pmap) 1892 if (pmap == kernel_pmap)
1893 panic("pmap_release: kernel_pmap!"); 1893 panic("pmap_release: kernel_pmap!");
1894 1894
1895 if (has_context(pmap)) { 1895 if (has_context(pmap)) {
1896#ifdef PMAP_DEBUG 1896#ifdef PMAP_DEBUG
1897 if (pmap_debug & PMD_CONTEXT) 1897 if (pmap_debug & PMD_CONTEXT)
1898 printf("pmap_release(%p): free ctx %d\n", 1898 printf("pmap_release(%p): free ctx %d\n",
1899 pmap, pmap->pm_ctxnum); 1899 pmap, pmap->pm_ctxnum);
1900#endif 1900#endif
1901 context_free(pmap); 1901 context_free(pmap);
1902 } 1902 }
1903 free(pmap->pm_segmap, M_VMPMAP); 1903 kmem_free(pmap->pm_segmap, sizeof(char)*NUSEG);
1904 pmap->pm_segmap = NULL; 1904 pmap->pm_segmap = NULL;
1905 1905
1906 splx(s); 1906 splx(s);
1907} 1907}
1908 1908
1909 1909
1910/* 1910/*
1911 * Retire the given physical map from service. 1911 * Retire the given physical map from service.
1912 * Should only be called if the map contains 1912 * Should only be called if the map contains
1913 * no valid mappings. 1913 * no valid mappings.
1914 */ 1914 */
1915void  1915void
1916pmap_destroy(pmap_t pmap) 1916pmap_destroy(pmap_t pmap)
1917{ 1917{
1918 int count; 1918 int count;
1919 1919
1920#ifdef PMAP_DEBUG 1920#ifdef PMAP_DEBUG
1921 if (pmap_debug & PMD_CREATE) 1921 if (pmap_debug & PMD_CREATE)
1922 printf("pmap_destroy(%p)\n", pmap); 1922 printf("pmap_destroy(%p)\n", pmap);
1923#endif 1923#endif
1924 if (pmap == kernel_pmap) 1924 if (pmap == kernel_pmap)
1925 panic("pmap_destroy: kernel_pmap!"); 1925 panic("pmap_destroy: kernel_pmap!");
1926 count = atomic_dec_uint_nv(&pmap->pm_refcount); 1926 count = atomic_dec_uint_nv(&pmap->pm_refcount);
1927 if (count == 0) { 1927 if (count == 0) {
1928 pmap_release(pmap); 1928 pmap_release(pmap);
1929 pool_put(&pmap_pmap_pool, pmap); 1929 pool_put(&pmap_pmap_pool, pmap);
1930 } 1930 }
1931} 1931}
1932 1932
1933/* 1933/*
1934 * Add a reference to the specified pmap. 1934 * Add a reference to the specified pmap.
1935 */ 1935 */
1936void  1936void
1937pmap_reference(pmap_t pmap) 1937pmap_reference(pmap_t pmap)
1938{ 1938{
1939 1939
1940 atomic_inc_uint(&pmap->pm_refcount); 1940 atomic_inc_uint(&pmap->pm_refcount);
1941} 1941}
1942 1942
1943 1943
1944/* 1944/*
1945 * Insert the given physical page (p) at 1945 * Insert the given physical page (p) at
1946 * the specified virtual address (v) in the 1946 * the specified virtual address (v) in the
1947 * target physical map with the protection requested. 1947 * target physical map with the protection requested.
1948 * 1948 *
1949 * The physical address is page aligned, but may have some 1949 * The physical address is page aligned, but may have some
1950 * low bits set indicating an OBIO or VME bus page, or just 1950 * low bits set indicating an OBIO or VME bus page, or just
1951 * that the non-cache bit should be set (i.e PMAP_NC). 1951 * that the non-cache bit should be set (i.e PMAP_NC).
1952 * 1952 *
1953 * If specified, the page will be wired down, meaning 1953 * If specified, the page will be wired down, meaning
1954 * that the related pte can not be reclaimed. 1954 * that the related pte can not be reclaimed.
1955 * 1955 *
1956 * NB: This is the only routine which MAY NOT lazy-evaluate 1956 * NB: This is the only routine which MAY NOT lazy-evaluate
1957 * or lose information. That is, this routine must actually 1957 * or lose information. That is, this routine must actually
1958 * insert this page into the given map NOW. 1958 * insert this page into the given map NOW.
1959 */ 1959 */
1960int  1960int
1961pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1961pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1962{ 1962{
1963 int new_pte, s; 1963 int new_pte, s;
1964 bool wired = (flags & PMAP_WIRED) != 0; 1964 bool wired = (flags & PMAP_WIRED) != 0;
1965 1965
1966#ifdef PMAP_DEBUG 1966#ifdef PMAP_DEBUG
1967 if ((pmap_debug & PMD_ENTER) || 1967 if ((pmap_debug & PMD_ENTER) ||
1968 (va == pmap_db_watchva)) 1968 (va == pmap_db_watchva))
1969 printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n", 1969 printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1970 pmap, va, pa, prot, wired); 1970 pmap, va, pa, prot, wired);
1971#endif 1971#endif
1972 1972
1973 /* Get page-type bits from low part of the PA... */ 1973 /* Get page-type bits from low part of the PA... */
1974 new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT; 1974 new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
1975 1975
1976 /* ...now the valid and writable bits... */ 1976 /* ...now the valid and writable bits... */
1977 new_pte |= PG_VALID; 1977 new_pte |= PG_VALID;
1978 if (prot & VM_PROT_WRITE) 1978 if (prot & VM_PROT_WRITE)
1979 new_pte |= PG_WRITE; 1979 new_pte |= PG_WRITE;
1980 if (flags & VM_PROT_ALL) { 1980 if (flags & VM_PROT_ALL) {
1981 new_pte |= PG_REF; 1981 new_pte |= PG_REF;
1982 if (flags & VM_PROT_WRITE) { 1982 if (flags & VM_PROT_WRITE) {
1983 new_pte |= PG_MOD; 1983 new_pte |= PG_MOD;
1984 } 1984 }
1985 } 1985 }
1986 1986
1987 /* ...and finally the page-frame number. */ 1987 /* ...and finally the page-frame number. */
1988 new_pte |= PA_PGNUM(pa); 1988 new_pte |= PA_PGNUM(pa);
1989 1989
1990 /* 1990 /*
1991 * treatment varies significantly: 1991 * treatment varies significantly:
1992 * kernel ptes are in all contexts, and are always in the mmu 1992 * kernel ptes are in all contexts, and are always in the mmu
1993 * user ptes may not necessarily? be in the mmu. pmap may not 1993 * user ptes may not necessarily? be in the mmu. pmap may not
1994 * be in the mmu either. 1994 * be in the mmu either.
1995 * 1995 *
1996 */ 1996 */
1997 s = splvm(); 1997 s = splvm();
1998 if (pmap == kernel_pmap) { 1998 if (pmap == kernel_pmap) {
1999 new_pte |= PG_SYSTEM; 1999 new_pte |= PG_SYSTEM;
2000 pmap_enter_kernel(va, new_pte, wired); 2000 pmap_enter_kernel(va, new_pte, wired);
2001 } else { 2001 } else {
2002 pmap_enter_user(pmap, va, new_pte, wired); 2002 pmap_enter_user(pmap, va, new_pte, wired);
2003 } 2003 }
2004 splx(s); 2004 splx(s);
2005 return 0; 2005 return 0;
2006} 2006}
2007 2007
2008static void  2008static void
2009pmap_enter_kernel(vaddr_t pgva, int new_pte, bool wired) 2009pmap_enter_kernel(vaddr_t pgva, int new_pte, bool wired)
2010{ 2010{
2011 pmap_t pmap = kernel_pmap; 2011 pmap_t pmap = kernel_pmap;
2012 pmeg_t pmegp; 2012 pmeg_t pmegp;
2013 int do_pv, old_pte, sme; 2013 int do_pv, old_pte, sme;
2014 vaddr_t segva; 2014 vaddr_t segva;
2015 2015
2016 /* 2016 /*
2017 keep in hardware only, since its mapped into all contexts anyway; 2017 keep in hardware only, since its mapped into all contexts anyway;
2018 need to handle possibly allocating additional pmegs 2018 need to handle possibly allocating additional pmegs
2019 need to make sure they cant be stolen from the kernel; 2019 need to make sure they cant be stolen from the kernel;
2020 map any new pmegs into all contexts, make sure rest of pmeg is null; 2020 map any new pmegs into all contexts, make sure rest of pmeg is null;
2021 deal with pv_stuff; possibly caching problems; 2021 deal with pv_stuff; possibly caching problems;
2022 must also deal with changes too. 2022 must also deal with changes too.
2023 */ 2023 */
2024 2024
2025 /* 2025 /*
2026 * In detail: 2026 * In detail:
2027 * 2027 *
2028 * (a) lock pmap 2028 * (a) lock pmap
2029 * (b) Is the VA in a already mapped segment, if so 2029 * (b) Is the VA in a already mapped segment, if so
2030 * look to see if that VA address is "valid". If it is, then 2030 * look to see if that VA address is "valid". If it is, then
2031 * action is a change to an existing pte 2031 * action is a change to an existing pte
2032 * (c) if not mapped segment, need to allocate pmeg 2032 * (c) if not mapped segment, need to allocate pmeg
2033 * (d) if adding pte entry or changing physaddr of existing one, 2033 * (d) if adding pte entry or changing physaddr of existing one,
2034 * use pv_stuff, for change, pmap_remove() possibly. 2034 * use pv_stuff, for change, pmap_remove() possibly.
2035 * (e) change/add pte 2035 * (e) change/add pte
2036 */ 2036 */
2037 2037
2038#ifdef DIAGNOSTIC 2038#ifdef DIAGNOSTIC
2039 if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END)) 2039 if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END))
2040 panic("pmap_enter_kernel: bad va=0x%lx", pgva); 2040 panic("pmap_enter_kernel: bad va=0x%lx", pgva);
2041 if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM)) 2041 if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
2042 panic("pmap_enter_kernel: bad pte"); 2042 panic("pmap_enter_kernel: bad pte");
2043#endif 2043#endif
2044 2044
2045 if (pgva >= DVMA_MAP_BASE) { 2045 if (pgva >= DVMA_MAP_BASE) {
2046 /* This is DVMA space. Always want it non-cached. */ 2046 /* This is DVMA space. Always want it non-cached. */
2047 new_pte |= PG_NC; 2047 new_pte |= PG_NC;
2048 } 2048 }
2049 2049
2050 segva = sun3_trunc_seg(pgva); 2050 segva = sun3_trunc_seg(pgva);
2051 do_pv = true; 2051 do_pv = true;
2052 2052
2053 /* Do we have a PMEG? */ 2053 /* Do we have a PMEG? */
2054 sme = get_segmap(segva); 2054 sme = get_segmap(segva);
2055 if (sme != SEGINV) { 2055 if (sme != SEGINV) {
2056 /* Found a PMEG in the segmap. Cool. */ 2056 /* Found a PMEG in the segmap. Cool. */
2057 pmegp = pmeg_p(sme); 2057 pmegp = pmeg_p(sme);
2058#ifdef DIAGNOSTIC 2058#ifdef DIAGNOSTIC
2059 /* Make sure it is the right PMEG. */ 2059 /* Make sure it is the right PMEG. */
2060 if (sme != pmap->pm_segmap[VA_SEGNUM(segva)]) 2060 if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2061 panic("pmap_enter_kernel: wrong sme at VA=0x%lx", 2061 panic("pmap_enter_kernel: wrong sme at VA=0x%lx",
2062 segva); 2062 segva);
2063 /* Make sure it is ours. */ 2063 /* Make sure it is ours. */
2064 if (pmegp->pmeg_owner != pmap) 2064 if (pmegp->pmeg_owner != pmap)
2065 panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme); 2065 panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme);
2066#endif 2066#endif
2067 } else { 2067 } else {
2068 /* No PMEG in the segmap. Have to allocate one. */ 2068 /* No PMEG in the segmap. Have to allocate one. */
2069 pmegp = pmeg_allocate(pmap, segva); 2069 pmegp = pmeg_allocate(pmap, segva);
2070 sme = pmegp->pmeg_index; 2070 sme = pmegp->pmeg_index;
2071 pmap->pm_segmap[VA_SEGNUM(segva)] = sme; 2071 pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2072 set_segmap_allctx(segva, sme); 2072 set_segmap_allctx(segva, sme);
2073#ifdef PMAP_DEBUG 2073#ifdef PMAP_DEBUG
2074 pmeg_verify_empty(segva); 2074 pmeg_verify_empty(segva);
2075 if (pmap_debug & PMD_SEGMAP) { 2075 if (pmap_debug & PMD_SEGMAP) {
2076 printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x " 2076 printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
2077 "(ek)\n", pmap, segva, sme); 2077 "(ek)\n", pmap, segva, sme);
2078 } 2078 }
2079#endif 2079#endif
2080 /* There are no existing mappings to deal with. */ 2080 /* There are no existing mappings to deal with. */
2081 old_pte = 0; 2081 old_pte = 0;
2082 goto add_pte; 2082 goto add_pte;
2083 } 2083 }
2084 2084
2085 /* 2085 /*
2086 * We have a PMEG. Is the VA already mapped to somewhere? 2086 * We have a PMEG. Is the VA already mapped to somewhere?
2087 * (a) if so, is it same pa? (really a protection change) 2087 * (a) if so, is it same pa? (really a protection change)
2088 * (b) if not same pa, then we have to unlink from old pa 2088 * (b) if not same pa, then we have to unlink from old pa
2089 */ 2089 */
2090 old_pte = get_pte(pgva); 2090 old_pte = get_pte(pgva);
2091 if ((old_pte & PG_VALID) == 0) 2091 if ((old_pte & PG_VALID) == 0)
2092 goto add_pte; 2092 goto add_pte;
2093 2093
2094 /* Have valid translation. Flush cache before changing it. */ 2094 /* Have valid translation. Flush cache before changing it. */
2095#ifdef HAVECACHE 2095#ifdef HAVECACHE
2096 if (cache_size) { 2096 if (cache_size) {
2097 cache_flush_page(pgva); 2097 cache_flush_page(pgva);
2098 /* Get fresh mod/ref bits from write-back. */ 2098 /* Get fresh mod/ref bits from write-back. */
2099 old_pte = get_pte(pgva); 2099 old_pte = get_pte(pgva);
2100 } 2100 }
2101#endif 2101#endif
2102 2102
2103 /* XXX - removing valid page here, way lame... -glass */ 2103 /* XXX - removing valid page here, way lame... -glass */
2104 pmegp->pmeg_vpages--; 2104 pmegp->pmeg_vpages--;
2105 2105
2106 if (!IS_MAIN_MEM(old_pte)) { 2106 if (!IS_MAIN_MEM(old_pte)) {
2107 /* Was not main memory, so no pv_entry for it. */ 2107 /* Was not main memory, so no pv_entry for it. */
2108 goto add_pte; 2108 goto add_pte;
2109 } 2109 }
2110 2110
2111 /* Old mapping was main memory. Save mod/ref bits. */ 2111 /* Old mapping was main memory. Save mod/ref bits. */
2112 save_modref_bits(old_pte); 2112 save_modref_bits(old_pte);
2113 2113
2114 /* 2114 /*
2115 * If not changing the type or pfnum then re-use pv_entry. 2115 * If not changing the type or pfnum then re-use pv_entry.
2116 * Note we get here only with old_pte having PGT_OBMEM. 2116 * Note we get here only with old_pte having PGT_OBMEM.
2117 */ 2117 */
2118 if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) { 2118 if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
2119 do_pv = false; /* re-use pv_entry */ 2119 do_pv = false; /* re-use pv_entry */
2120 new_pte |= (old_pte & PG_NC); 2120 new_pte |= (old_pte & PG_NC);
2121 goto add_pte; 2121 goto add_pte;
2122 } 2122 }
2123 2123
2124 /* OK, different type or PA, have to kill old pv_entry. */ 2124 /* OK, different type or PA, have to kill old pv_entry. */
2125 pv_unlink(pmap, old_pte, pgva); 2125 pv_unlink(pmap, old_pte, pgva);
2126 2126
2127add_pte: /* can be destructive */ 2127add_pte: /* can be destructive */
2128 pmeg_set_wiring(pmegp, pgva, wired); 2128 pmeg_set_wiring(pmegp, pgva, wired);
2129 2129
2130 /* Anything but MAIN_MEM is mapped non-cached. */ 2130 /* Anything but MAIN_MEM is mapped non-cached. */
2131 if (!IS_MAIN_MEM(new_pte)) { 2131 if (!IS_MAIN_MEM(new_pte)) {
2132 new_pte |= PG_NC; 2132 new_pte |= PG_NC;
2133 do_pv = false; 2133 do_pv = false;
2134 } 2134 }
2135 if (do_pv == true) { 2135 if (do_pv == true) {
2136 if (pv_link(pmap, new_pte, pgva) & PV_NC) 2136 if (pv_link(pmap, new_pte, pgva) & PV_NC)
2137 new_pte |= PG_NC; 2137 new_pte |= PG_NC;
2138 } 2138 }
2139#ifdef PMAP_DEBUG 2139#ifdef PMAP_DEBUG
2140 if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) { 2140 if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2141 printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x " 2141 printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x "
2142 "(ek)\n", pmap, pgva, old_pte, new_pte); 2142 "(ek)\n", pmap, pgva, old_pte, new_pte);
2143 } 2143 }
2144#endif 2144#endif
2145 /* cache flush done above */ 2145 /* cache flush done above */
2146 set_pte(pgva, new_pte); 2146 set_pte(pgva, new_pte);
2147 pmegp->pmeg_vpages++; 2147 pmegp->pmeg_vpages++;
2148} 2148}
2149 2149
2150 2150
2151static void  2151static void
2152pmap_enter_user(pmap_t pmap, vaddr_t pgva, int new_pte, bool wired) 2152pmap_enter_user(pmap_t pmap, vaddr_t pgva, int new_pte, bool wired)
2153{ 2153{
2154 int do_pv, old_pte, sme; 2154 int do_pv, old_pte, sme;
2155 vaddr_t segva; 2155 vaddr_t segva;
2156 pmeg_t pmegp; 2156 pmeg_t pmegp;
2157 2157
2158#ifdef DIAGNOSTIC 2158#ifdef DIAGNOSTIC
2159 if (pgva >= VM_MAXUSER_ADDRESS) 2159 if (pgva >= VM_MAXUSER_ADDRESS)
2160 panic("pmap_enter_user: bad va=0x%lx", pgva); 2160 panic("pmap_enter_user: bad va=0x%lx", pgva);
2161 if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID) 2161 if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
2162 panic("pmap_enter_user: bad pte"); 2162 panic("pmap_enter_user: bad pte");
2163#endif 2163#endif
2164#ifdef PMAP_DEBUG 2164#ifdef PMAP_DEBUG
2165 /* 2165 /*
2166 * Some user pages are wired here, and a later 2166 * Some user pages are wired here, and a later
2167 * call to pmap_unwire() will unwire them. 2167 * call to pmap_unwire() will unwire them.
2168 * XXX - Need a separate list for wired user pmegs 2168 * XXX - Need a separate list for wired user pmegs
2169 * so they can not be stolen from the active list. 2169 * so they can not be stolen from the active list.
2170 * XXX - Note: vm_fault.c assumes pmap_extract will 2170 * XXX - Note: vm_fault.c assumes pmap_extract will
2171 * work on wired mappings, so must preserve them... 2171 * work on wired mappings, so must preserve them...
2172 * XXX: Maybe keep a list of wired PMEGs? 2172 * XXX: Maybe keep a list of wired PMEGs?
2173 */ 2173 */
2174 if (wired && (pmap_debug & PMD_WIRING)) { 2174 if (wired && (pmap_debug & PMD_WIRING)) {
2175 db_printf("pmap_enter_user: attempt to wire user page, " 2175 db_printf("pmap_enter_user: attempt to wire user page, "
2176 "ignored\n"); 2176 "ignored\n");
2177 Debugger(); 2177 Debugger();
2178 } 2178 }
2179#endif 2179#endif
2180 2180
2181 /* Validate this assumption. */ 2181 /* Validate this assumption. */
2182 if (pmap != current_pmap()) { 2182 if (pmap != current_pmap()) {
2183#ifdef PMAP_DEBUG 2183#ifdef PMAP_DEBUG
2184 /* Aparently, this never happens. */ 2184 /* Aparently, this never happens. */
2185 db_printf("pmap_enter_user: not curlwp\n"); 2185 db_printf("pmap_enter_user: not curlwp\n");
2186 Debugger(); 2186 Debugger();
2187#endif 2187#endif
2188 /* Just throw it out (fault it in later). */ 2188 /* Just throw it out (fault it in later). */
2189 /* XXX: But must remember it if wired... */ 2189 /* XXX: But must remember it if wired... */
2190 return; 2190 return;
2191 } 2191 }
2192 2192
2193 segva = sun3_trunc_seg(pgva); 2193 segva = sun3_trunc_seg(pgva);
2194 do_pv = true; 2194 do_pv = true;
2195 2195
2196 /* 2196 /*
2197 * If this pmap was sharing the "empty" context, 2197 * If this pmap was sharing the "empty" context,
2198 * allocate a real context for its exclusive use. 2198 * allocate a real context for its exclusive use.
2199 */ 2199 */
2200 if (!has_context(pmap)) { 2200 if (!has_context(pmap)) {
2201 context_allocate(pmap); 2201 context_allocate(pmap);
2202#ifdef PMAP_DEBUG 2202#ifdef PMAP_DEBUG
2203 if (pmap_debug & PMD_CONTEXT) 2203 if (pmap_debug & PMD_CONTEXT)
2204 printf("pmap_enter(%p) got context %d\n", 2204 printf("pmap_enter(%p) got context %d\n",
2205 pmap, pmap->pm_ctxnum); 2205 pmap, pmap->pm_ctxnum);
2206#endif 2206#endif
2207 set_context(pmap->pm_ctxnum); 2207 set_context(pmap->pm_ctxnum);
2208 } else { 2208 } else {
2209#ifdef PMAP_DEBUG 2209#ifdef PMAP_DEBUG
2210 /* Make sure context is correct. */ 2210 /* Make sure context is correct. */
2211 if (pmap->pm_ctxnum != get_context()) { 2211 if (pmap->pm_ctxnum != get_context()) {
2212 db_printf("pmap_enter_user: wrong context\n"); 2212 db_printf("pmap_enter_user: wrong context\n");
2213 Debugger(); 2213 Debugger();
2214 /* XXX: OK to proceed? */ 2214 /* XXX: OK to proceed? */
2215 set_context(pmap->pm_ctxnum); 2215 set_context(pmap->pm_ctxnum);
2216 } 2216 }
2217#endif 2217#endif
2218 } 2218 }
2219 2219
2220 /* 2220 /*
2221 * We have a context. Do we have a PMEG? 2221 * We have a context. Do we have a PMEG?
2222 */ 2222 */
2223 sme = get_segmap(segva); 2223 sme = get_segmap(segva);
2224 if (sme != SEGINV) { 2224 if (sme != SEGINV) {
2225 /* Found a PMEG in the segmap. Cool. */ 2225 /* Found a PMEG in the segmap. Cool. */
2226 pmegp = pmeg_p(sme); 2226 pmegp = pmeg_p(sme);
2227#ifdef DIAGNOSTIC 2227#ifdef DIAGNOSTIC
2228 /* Make sure it is the right PMEG. */ 2228 /* Make sure it is the right PMEG. */
2229 if (sme != pmap->pm_segmap[VA_SEGNUM(segva)]) 2229 if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2230 panic("pmap_enter_user: wrong sme at VA=0x%lx", segva); 2230 panic("pmap_enter_user: wrong sme at VA=0x%lx", segva);
2231 /* Make sure it is ours. */ 2231 /* Make sure it is ours. */
2232 if (pmegp->pmeg_owner != pmap) 2232 if (pmegp->pmeg_owner != pmap)
2233 panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme); 2233 panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme);
2234#endif 2234#endif
2235 } else { 2235 } else {
2236 /* Not in the segmap. Try the S/W cache. */ 2236 /* Not in the segmap. Try the S/W cache. */
2237 pmegp = pmeg_cache(pmap, segva); 2237 pmegp = pmeg_cache(pmap, segva);
2238 if (pmegp) { 2238 if (pmegp) {
2239 /* Found PMEG in cache. Just reload it. */ 2239 /* Found PMEG in cache. Just reload it. */
2240 sme = pmegp->pmeg_index; 2240 sme = pmegp->pmeg_index;
2241 set_segmap(segva, sme); 2241 set_segmap(segva, sme);
2242 } else { 2242 } else {
2243 /* PMEG not in cache, so allocate one. */ 2243 /* PMEG not in cache, so allocate one. */
2244 pmegp = pmeg_allocate(pmap, segva); 2244 pmegp = pmeg_allocate(pmap, segva);
2245 sme = pmegp->pmeg_index; 2245 sme = pmegp->pmeg_index;
2246 pmap->pm_segmap[VA_SEGNUM(segva)] = sme; 2246 pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2247 set_segmap(segva, sme); 2247 set_segmap(segva, sme);
2248#ifdef PMAP_DEBUG 2248#ifdef PMAP_DEBUG
2249 pmeg_verify_empty(segva); 2249 pmeg_verify_empty(segva);
2250#endif 2250#endif
2251 } 2251 }
2252#ifdef PMAP_DEBUG 2252#ifdef PMAP_DEBUG
2253 if (pmap_debug & PMD_SEGMAP) { 2253 if (pmap_debug & PMD_SEGMAP) {
2254 printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x " 2254 printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
2255 "(eu)\n", pmap, segva, sme); 2255 "(eu)\n", pmap, segva, sme);
2256 } 2256 }
2257#endif 2257#endif
2258 } 2258 }
2259 2259
2260 /* 2260 /*
2261 * We have a PMEG. Is the VA already mapped to somewhere? 2261 * We have a PMEG. Is the VA already mapped to somewhere?
2262 * (a) if so, is it same pa? (really a protection change) 2262 * (a) if so, is it same pa? (really a protection change)
2263 * (b) if not same pa, then we have to unlink from old pa 2263 * (b) if not same pa, then we have to unlink from old pa
2264 */ 2264 */
2265 old_pte = get_pte(pgva); 2265 old_pte = get_pte(pgva);
2266 if ((old_pte & PG_VALID) == 0) 2266 if ((old_pte & PG_VALID) == 0)
2267 goto add_pte; 2267 goto add_pte;
2268 2268
2269 /* Have valid translation. Flush cache before changing it. */ 2269 /* Have valid translation. Flush cache before changing it. */
2270#ifdef HAVECACHE 2270#ifdef HAVECACHE
2271 if (cache_size) { 2271 if (cache_size) {
2272 cache_flush_page(pgva); 2272 cache_flush_page(pgva);
2273 /* Get fresh mod/ref bits from write-back. */ 2273 /* Get fresh mod/ref bits from write-back. */
2274 old_pte = get_pte(pgva); 2274 old_pte = get_pte(pgva);
2275 } 2275 }
2276#endif 2276#endif
2277 2277
2278 /* XXX - removing valid page here, way lame... -glass */ 2278 /* XXX - removing valid page here, way lame... -glass */
2279 pmegp->pmeg_vpages--; 2279 pmegp->pmeg_vpages--;
2280 2280
2281 if (!IS_MAIN_MEM(old_pte)) { 2281 if (!IS_MAIN_MEM(old_pte)) {
2282 /* Was not main memory, so no pv_entry for it. */ 2282 /* Was not main memory, so no pv_entry for it. */
2283 goto add_pte; 2283 goto add_pte;
2284 } 2284 }
2285 2285
2286 /* Old mapping was main memory. Save mod/ref bits. */ 2286 /* Old mapping was main memory. Save mod/ref bits. */
2287 save_modref_bits(old_pte); 2287 save_modref_bits(old_pte);
2288 2288
2289 /* 2289 /*
2290 * If not changing the type or pfnum then re-use pv_entry. 2290 * If not changing the type or pfnum then re-use pv_entry.
2291 * Note we get here only with old_pte having PGT_OBMEM. 2291 * Note we get here only with old_pte having PGT_OBMEM.
2292 */ 2292 */
2293 if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) { 2293 if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
2294 do_pv = false; /* re-use pv_entry */ 2294 do_pv = false; /* re-use pv_entry */
2295 new_pte |= (old_pte & PG_NC); 2295 new_pte |= (old_pte & PG_NC);
2296 goto add_pte; 2296 goto add_pte;
2297 } 2297 }
2298 2298
2299 /* OK, different type or PA, have to kill old pv_entry. */ 2299 /* OK, different type or PA, have to kill old pv_entry. */
2300 pv_unlink(pmap, old_pte, pgva); 2300 pv_unlink(pmap, old_pte, pgva);
2301 2301
2302 add_pte: 2302 add_pte:
2303 /* XXX - Wiring changes on user pmaps? */ 2303 /* XXX - Wiring changes on user pmaps? */
2304 /* pmeg_set_wiring(pmegp, pgva, wired); */ 2304 /* pmeg_set_wiring(pmegp, pgva, wired); */
2305 2305
2306 /* Anything but MAIN_MEM is mapped non-cached. */ 2306 /* Anything but MAIN_MEM is mapped non-cached. */
2307 if (!IS_MAIN_MEM(new_pte)) { 2307 if (!IS_MAIN_MEM(new_pte)) {
2308 new_pte |= PG_NC; 2308 new_pte |= PG_NC;
2309 do_pv = false; 2309 do_pv = false;
2310 } 2310 }
2311 if (do_pv == true) { 2311 if (do_pv == true) {
2312 if (pv_link(pmap, new_pte, pgva) & PV_NC) 2312 if (pv_link(pmap, new_pte, pgva) & PV_NC)
2313 new_pte |= PG_NC; 2313 new_pte |= PG_NC;
2314 } 2314 }
2315#ifdef PMAP_DEBUG 2315#ifdef PMAP_DEBUG
2316 if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) { 2316 if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2317 printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x " 2317 printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x "
2318 "(eu)\n", pmap, pgva, old_pte, new_pte); 2318 "(eu)\n", pmap, pgva, old_pte, new_pte);
2319 } 2319 }
2320#endif 2320#endif
2321 /* cache flush done above */ 2321 /* cache flush done above */
2322 set_pte(pgva, new_pte); 2322 set_pte(pgva, new_pte);
2323 pmegp->pmeg_vpages++; 2323 pmegp->pmeg_vpages++;
2324} 2324}
2325 2325
2326void  2326void
2327pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2327pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2328{ 2328{
2329 int new_pte, s; 2329 int new_pte, s;
2330 pmap_t pmap = kernel_pmap; 2330 pmap_t pmap = kernel_pmap;
2331 pmeg_t pmegp; 2331 pmeg_t pmegp;
2332 int sme; 2332 int sme;
2333 vaddr_t segva; 2333 vaddr_t segva;
2334 2334
2335#ifdef PMAP_DEBUG 2335#ifdef PMAP_DEBUG
2336 if ((pmap_debug & PMD_ENTER) || 2336 if ((pmap_debug & PMD_ENTER) ||
2337 (va == pmap_db_watchva)) 2337 (va == pmap_db_watchva))
2338 printf("pmap_kenter_pa(0x%lx, 0x%lx, 0x%x)\n", 2338 printf("pmap_kenter_pa(0x%lx, 0x%lx, 0x%x)\n",
2339 va, pa, prot); 2339 va, pa, prot);
2340#endif 2340#endif
2341 2341
2342 /* Get page-type bits from low part of the PA... */ 2342 /* Get page-type bits from low part of the PA... */
2343 new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT; 2343 new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
2344 2344
2345 /* ...now the valid and writable bits... */ 2345 /* ...now the valid and writable bits... */
2346 new_pte |= PG_SYSTEM|PG_VALID; 2346 new_pte |= PG_SYSTEM|PG_VALID;
2347 if (prot & VM_PROT_WRITE) 2347 if (prot & VM_PROT_WRITE)
2348 new_pte |= PG_WRITE; 2348 new_pte |= PG_WRITE;
2349 2349
2350 /* ...and finally the page-frame number. */ 2350 /* ...and finally the page-frame number. */
2351 new_pte |= PA_PGNUM(pa); 2351 new_pte |= PA_PGNUM(pa);
2352 2352
2353 /* 2353 /*
2354 * keep in hardware only, since its mapped into all contexts anyway; 2354 * keep in hardware only, since its mapped into all contexts anyway;
2355 * need to handle possibly allocating additional pmegs 2355 * need to handle possibly allocating additional pmegs
2356 * need to make sure they cant be stolen from the kernel; 2356 * need to make sure they cant be stolen from the kernel;
2357 * map any new pmegs into all contexts, make sure rest of pmeg is null; 2357 * map any new pmegs into all contexts, make sure rest of pmeg is null;
2358 * must also deal with changes too. 2358 * must also deal with changes too.
2359 */ 2359 */
2360 2360
2361 /* 2361 /*
2362 * In detail: 2362 * In detail:
2363 * 2363 *
2364 * (a) lock pmap 2364 * (a) lock pmap
2365 * (b) Is the VA in a already mapped segment, if so 2365 * (b) Is the VA in a already mapped segment, if so
2366 * look to see if that VA address is "valid". If it is, then 2366 * look to see if that VA address is "valid". If it is, then
2367 * action is a change to an existing pte 2367 * action is a change to an existing pte
2368 * (c) if not mapped segment, need to allocate pmeg 2368 * (c) if not mapped segment, need to allocate pmeg
2369 * (d) change/add pte 2369 * (d) change/add pte
2370 */ 2370 */
2371 2371
2372#ifdef DIAGNOSTIC 2372#ifdef DIAGNOSTIC
2373 if ((va < virtual_avail) || (va >= DVMA_MAP_END)) 2373 if ((va < virtual_avail) || (va >= DVMA_MAP_END))
2374 panic("pmap_kenter_pa: bad va=0x%lx", va); 2374 panic("pmap_kenter_pa: bad va=0x%lx", va);
2375#endif 2375#endif
2376 2376
2377 if (va >= DVMA_MAP_BASE) { 2377 if (va >= DVMA_MAP_BASE) {
2378 /* This is DVMA space. Always want it non-cached. */ 2378 /* This is DVMA space. Always want it non-cached. */
2379 new_pte |= PG_NC; 2379 new_pte |= PG_NC;
2380 } 2380 }
2381 2381
2382 segva = sun3_trunc_seg(va); 2382 segva = sun3_trunc_seg(va);
2383 2383
2384 s = splvm(); 2384 s = splvm();
2385 2385
2386 /* Do we have a PMEG? */ 2386 /* Do we have a PMEG? */
2387 sme = get_segmap(segva); 2387 sme = get_segmap(segva);
2388 if (sme != SEGINV) { 2388 if (sme != SEGINV) {
2389 KASSERT((get_pte(va) & PG_VALID) == 0); 2389 KASSERT((get_pte(va) & PG_VALID) == 0);
2390 2390
2391 /* Found a PMEG in the segmap. Cool. */ 2391 /* Found a PMEG in the segmap. Cool. */
2392 pmegp = pmeg_p(sme); 2392 pmegp = pmeg_p(sme);
2393#ifdef DIAGNOSTIC 2393#ifdef DIAGNOSTIC
2394 /* Make sure it is the right PMEG. */ 2394 /* Make sure it is the right PMEG. */
2395 if (sme != pmap->pm_segmap[VA_SEGNUM(segva)]) 2395 if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2396 panic("pmap_kenter_pa: wrong sme at VA=0x%lx", segva); 2396 panic("pmap_kenter_pa: wrong sme at VA=0x%lx", segva);
2397 /* Make sure it is ours. */ 2397 /* Make sure it is ours. */
2398 if (pmegp->pmeg_owner != pmap) 2398 if (pmegp->pmeg_owner != pmap)
2399 panic("pmap_kenter_pa: MMU has bad pmeg 0x%x", sme); 2399 panic("pmap_kenter_pa: MMU has bad pmeg 0x%x", sme);
2400#endif 2400#endif
2401 } else { 2401 } else {
2402 2402
2403 /* No PMEG in the segmap. Have to allocate one. */ 2403 /* No PMEG in the segmap. Have to allocate one. */
2404 pmegp = pmeg_allocate(pmap, segva); 2404 pmegp = pmeg_allocate(pmap, segva);
2405 sme = pmegp->pmeg_index; 2405 sme = pmegp->pmeg_index;
2406 pmap->pm_segmap[VA_SEGNUM(segva)] = sme; 2406 pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2407 set_segmap_allctx(segva, sme); 2407 set_segmap_allctx(segva, sme);
2408#ifdef PMAP_DEBUG 2408#ifdef PMAP_DEBUG
2409 pmeg_verify_empty(segva); 2409 pmeg_verify_empty(segva);
2410 if (pmap_debug & PMD_SEGMAP) { 2410 if (pmap_debug & PMD_SEGMAP) {
2411 printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x " 2411 printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
2412 "(ek)\n", pmap, segva, sme); 2412 "(ek)\n", pmap, segva, sme);
2413 } 2413 }
2414#endif 2414#endif
2415 } 2415 }
2416 2416
2417 pmeg_set_wiring(pmegp, va, true); 2417 pmeg_set_wiring(pmegp, va, true);
2418 2418
2419 /* Anything but MAIN_MEM is mapped non-cached. */ 2419 /* Anything but MAIN_MEM is mapped non-cached. */
2420 if (!IS_MAIN_MEM(new_pte)) { 2420 if (!IS_MAIN_MEM(new_pte)) {
2421 new_pte |= PG_NC; 2421 new_pte |= PG_NC;
2422 } 2422 }
2423#ifdef PMAP_DEBUG 2423#ifdef PMAP_DEBUG
2424 if ((pmap_debug & PMD_SETPTE) || (va == pmap_db_watchva)) { 2424 if ((pmap_debug & PMD_SETPTE) || (va == pmap_db_watchva)) {
2425 printf("pmap: set_pte pmap=%p va=0x%lx new=0x%x " 2425 printf("pmap: set_pte pmap=%p va=0x%lx new=0x%x "
2426 "(ek)\n", pmap, va, new_pte); 2426 "(ek)\n", pmap, va, new_pte);
2427 } 2427 }
2428#endif 2428#endif
2429 /* cache flush done above */ 2429 /* cache flush done above */
2430 set_pte(va, new_pte); 2430 set_pte(va, new_pte);
2431 pmegp->pmeg_vpages++; 2431 pmegp->pmeg_vpages++;
2432 splx(s); 2432 splx(s);
2433} 2433}
2434 2434
2435void  2435void
2436pmap_kremove(vaddr_t va, vsize_t len) 2436pmap_kremove(vaddr_t va, vsize_t len)
2437{ 2437{
2438 pmap_t pmap = kernel_pmap; 2438 pmap_t pmap = kernel_pmap;
2439 vaddr_t eva, neva, pgva, segva, segnum; 2439 vaddr_t eva, neva, pgva, segva, segnum;
2440 int pte, sme; 2440 int pte, sme;
2441 pmeg_t pmegp; 2441 pmeg_t pmegp;
2442#ifdef HAVECACHE 2442#ifdef HAVECACHE
2443 int flush_by_page = 0; 2443 int flush_by_page = 0;
2444#endif 2444#endif
2445 int s; 2445 int s;
2446 2446
2447 s = splvm(); 2447 s = splvm();
2448 segnum = VA_SEGNUM(va); 2448 segnum = VA_SEGNUM(va);
2449 for (eva = va + len; va < eva; va = neva, segnum++) { 2449 for (eva = va + len; va < eva; va = neva, segnum++) {
2450 neva = sun3_trunc_seg(va) + NBSG; 2450 neva = sun3_trunc_seg(va) + NBSG;
2451 if (neva > eva) { 2451 if (neva > eva) {
2452 neva = eva; 2452 neva = eva;
2453 } 2453 }
2454 if (pmap->pm_segmap[segnum] == SEGINV) { 2454 if (pmap->pm_segmap[segnum] == SEGINV) {
2455 continue; 2455 continue;
2456 } 2456 }
2457 2457
2458 segva = sun3_trunc_seg(va); 2458 segva = sun3_trunc_seg(va);
2459 sme = get_segmap(segva); 2459 sme = get_segmap(segva);
2460 pmegp = pmeg_p(sme); 2460 pmegp = pmeg_p(sme);
2461 2461
2462#ifdef HAVECACHE 2462#ifdef HAVECACHE
2463 if (cache_size) { 2463 if (cache_size) {
2464 2464
2465 /* 2465 /*
2466 * If the range to be removed is larger than the cache, 2466 * If the range to be removed is larger than the cache,
2467 * it will be cheaper to flush this segment entirely. 2467 * it will be cheaper to flush this segment entirely.
2468 */ 2468 */
2469 2469
2470 if (cache_size < (eva - va)) { 2470 if (cache_size < (eva - va)) {
2471 /* cheaper to flush whole segment */ 2471 /* cheaper to flush whole segment */
2472 cache_flush_segment(segva); 2472 cache_flush_segment(segva);
2473 } else { 2473 } else {
2474 flush_by_page = 1; 2474 flush_by_page = 1;
2475 } 2475 }
2476 } 2476 }
2477#endif 2477#endif
2478 2478
2479 /* Invalidate the PTEs in the given range. */ 2479 /* Invalidate the PTEs in the given range. */
2480 for (pgva = va; pgva < neva; pgva += PAGE_SIZE) { 2480 for (pgva = va; pgva < neva; pgva += PAGE_SIZE) {
2481 pte = get_pte(pgva); 2481 pte = get_pte(pgva);
2482 if (pte & PG_VALID) { 2482 if (pte & PG_VALID) {
2483#ifdef HAVECACHE 2483#ifdef HAVECACHE
2484 if (flush_by_page) { 2484 if (flush_by_page) {
2485 cache_flush_page(pgva); 2485 cache_flush_page(pgva);
2486 /* Get fresh mod/ref bits 2486 /* Get fresh mod/ref bits
2487 from write-back. */ 2487 from write-back. */
2488 pte = get_pte(pgva); 2488 pte = get_pte(pgva);
2489 } 2489 }
2490#endif 2490#endif
2491#ifdef PMAP_DEBUG 2491#ifdef PMAP_DEBUG
2492 if ((pmap_debug & PMD_SETPTE) || 2492 if ((pmap_debug & PMD_SETPTE) ||
2493 (pgva == pmap_db_watchva)) { 2493 (pgva == pmap_db_watchva)) {
2494 printf("pmap: set_pte pmap=%p va=0x%lx" 2494 printf("pmap: set_pte pmap=%p va=0x%lx"
2495 " old=0x%x new=0x%x (rrmmu)\n", 2495 " old=0x%x new=0x%x (rrmmu)\n",
2496 pmap, pgva, pte, PG_INVAL); 2496 pmap, pgva, pte, PG_INVAL);
2497 } 2497 }
2498#endif 2498#endif
2499 set_pte(pgva, PG_INVAL); 2499 set_pte(pgva, PG_INVAL);
2500 KASSERT(pmegp->pmeg_vpages > 0); 2500 KASSERT(pmegp->pmeg_vpages > 0);
2501 pmegp->pmeg_vpages--; 2501 pmegp->pmeg_vpages--;
2502 } 2502 }
2503 } 2503 }
2504 KASSERT(pmegp->pmeg_vpages >= 0); 2504 KASSERT(pmegp->pmeg_vpages >= 0);
2505 if (pmegp->pmeg_vpages == 0) { 2505 if (pmegp->pmeg_vpages == 0) {
2506 /* We are done with this pmeg. */ 2506 /* We are done with this pmeg. */
2507#ifdef PMAP_DEBUG 2507#ifdef PMAP_DEBUG
2508 if (is_pmeg_wired(pmegp)) { 2508 if (is_pmeg_wired(pmegp)) {
2509 if (pmap_debug & PMD_WIRING) { 2509 if (pmap_debug & PMD_WIRING) {
2510 db_printf("pmap: removing wired " 2510 db_printf("pmap: removing wired "
2511 "pmeg: %p\n", pmegp); 2511 "pmeg: %p\n", pmegp);
2512 Debugger(); 2512 Debugger();
2513 } 2513 }
2514 } 2514 }
2515 if (pmap_debug & PMD_SEGMAP) { 2515 if (pmap_debug & PMD_SEGMAP) {
2516 printf("pmap: set_segmap ctx=%d v=0x%lx " 2516 printf("pmap: set_segmap ctx=%d v=0x%lx "
2517 "old=0x%x new=ff (rm)\n", 2517 "old=0x%x new=ff (rm)\n",
2518 pmap->pm_ctxnum, segva, 2518 pmap->pm_ctxnum, segva,
2519 pmegp->pmeg_index); 2519 pmegp->pmeg_index);
2520 } 2520 }
2521 pmeg_verify_empty(segva); 2521 pmeg_verify_empty(segva);
2522#endif 2522#endif
2523 2523
2524 /* Remove it from the MMU. */ 2524 /* Remove it from the MMU. */
2525 set_segmap_allctx(segva, SEGINV); 2525 set_segmap_allctx(segva, SEGINV);
2526 pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV; 2526 pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
2527 2527
2528 /* Now, put it on the free list. */ 2528 /* Now, put it on the free list. */
2529 pmeg_free(pmegp); 2529 pmeg_free(pmegp);
2530 } 2530 }
2531 } 2531 }
2532 splx(s); 2532 splx(s);
2533} 2533}
2534 2534
2535 2535
2536/* 2536/*
2537 * The trap handler calls this so we can try to resolve 2537 * The trap handler calls this so we can try to resolve
2538 * user-level faults by reloading a PMEG. 2538 * user-level faults by reloading a PMEG.
2539 * If that does not prodce a valid mapping, 2539 * If that does not prodce a valid mapping,
2540 * call vm_fault as usual. 2540 * call vm_fault as usual.
2541 * 2541 *
2542 * XXX: Merge this with the next function? 2542 * XXX: Merge this with the next function?
2543 */ 2543 */
2544int  2544int
2545_pmap_fault(struct vm_map *map, vaddr_t va, vm_prot_t ftype) 2545_pmap_fault(struct vm_map *map, vaddr_t va, vm_prot_t ftype)
2546{ 2546{
2547 pmap_t pmap; 2547 pmap_t pmap;
2548 int rv; 2548 int rv;
2549 2549
2550 pmap = vm_map_pmap(map); 2550 pmap = vm_map_pmap(map);
2551 if (map == kernel_map) { 2551 if (map == kernel_map) {
2552 /* Do not allow faults below the "managed" space. */ 2552 /* Do not allow faults below the "managed" space. */
2553 if (va < virtual_avail) { 2553 if (va < virtual_avail) {
2554 /* 2554 /*
2555 * Most pages below virtual_avail are read-only, 2555 * Most pages below virtual_avail are read-only,
2556 * so I will assume it is a protection failure. 2556 * so I will assume it is a protection failure.
2557 */ 2557 */
2558 return EACCES; 2558 return EACCES;
2559 } 2559 }
2560 } else { 2560 } else {
2561 /* User map. Try reload shortcut. */ 2561 /* User map. Try reload shortcut. */
2562 if (pmap_fault_reload(pmap, va, ftype)) 2562 if (pmap_fault_reload(pmap, va, ftype))
2563 return 0; 2563 return 0;
2564 } 2564 }
2565 rv = uvm_fault(map, va, ftype); 2565 rv = uvm_fault(map, va, ftype);
2566 2566
2567#ifdef PMAP_DEBUG 2567#ifdef PMAP_DEBUG
2568 if (pmap_debug & PMD_FAULT) { 2568 if (pmap_debug & PMD_FAULT) {
2569 printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", 2569 printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n",
2570 map, va, ftype, rv); 2570 map, va, ftype, rv);
2571 } 2571 }
2572#endif 2572#endif
2573 2573
2574 return (rv); 2574 return (rv);
2575} 2575}
2576 2576
2577/* 2577/*
2578 * This is a shortcut used by the trap handler to 2578 * This is a shortcut used by the trap handler to
2579 * reload PMEGs into a user segmap without calling 2579 * reload PMEGs into a user segmap without calling
2580 * the actual VM fault handler. Returns true if: 2580 * the actual VM fault handler. Returns true if:
2581 * the PMEG was reloaded, and 2581 * the PMEG was reloaded, and
2582 * it has a valid PTE at va. 2582 * it has a valid PTE at va.
2583 * Otherwise return zero and let VM code handle it. 2583 * Otherwise return zero and let VM code handle it.
2584 */ 2584 */
2585int  2585int
2586pmap_fault_reload(pmap_t pmap, vaddr_t pgva, vm_prot_t ftype) 2586pmap_fault_reload(pmap_t pmap, vaddr_t pgva, vm_prot_t ftype)
2587{ 2587{
2588 int rv, s, pte, chkpte, sme; 2588 int rv, s, pte, chkpte, sme;
2589 vaddr_t segva; 2589 vaddr_t segva;
2590 pmeg_t pmegp; 2590 pmeg_t pmegp;
2591 2591
2592 if (pgva >= VM_MAXUSER_ADDRESS) 2592 if (pgva >= VM_MAXUSER_ADDRESS)
2593 return (0); 2593 return (0);
2594 if (pmap->pm_segmap == NULL) { 2594 if (pmap->pm_segmap == NULL) {
2595#ifdef PMAP_DEBUG 2595#ifdef PMAP_DEBUG
2596 db_printf("pmap_fault_reload: null segmap\n"); 2596 db_printf("pmap_fault_reload: null segmap\n");
2597 Debugger(); 2597 Debugger();
2598#endif 2598#endif
2599 return (0); 2599 return (0);
2600 } 2600 }
2601 2601
2602 /* Short-cut using the S/W segmap. */ 2602 /* Short-cut using the S/W segmap. */
2603 if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV) 2603 if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV)
2604 return (0); 2604 return (0);
2605 2605
2606 segva = sun3_trunc_seg(pgva); 2606 segva = sun3_trunc_seg(pgva);
2607 chkpte = PG_VALID; 2607 chkpte = PG_VALID;
2608 if (ftype & VM_PROT_WRITE) 2608 if (ftype & VM_PROT_WRITE)
2609 chkpte |= PG_WRITE; 2609 chkpte |= PG_WRITE;
2610 rv = 0; 2610 rv = 0;
2611 2611
2612 s = splvm(); 2612 s = splvm();
2613 2613
2614 /* 2614 /*
2615 * Given that we faulted on a user-space address, we will 2615 * Given that we faulted on a user-space address, we will
2616 * probably need a context. Get a context now so we can 2616 * probably need a context. Get a context now so we can
2617 * try to resolve the fault with a segmap reload. 2617 * try to resolve the fault with a segmap reload.
2618 */ 2618 */
2619 if (!has_context(pmap)) { 2619 if (!has_context(pmap)) {
2620 context_allocate(pmap); 2620 context_allocate(pmap);
2621#ifdef PMAP_DEBUG 2621#ifdef PMAP_DEBUG
2622 if (pmap_debug & PMD_CONTEXT) 2622 if (pmap_debug & PMD_CONTEXT)
2623 printf("pmap_fault(%p) got context %d\n", 2623 printf("pmap_fault(%p) got context %d\n",
2624 pmap, pmap->pm_ctxnum); 2624 pmap, pmap->pm_ctxnum);
2625#endif 2625#endif
2626 set_context(pmap->pm_ctxnum); 2626 set_context(pmap->pm_ctxnum);
2627 } else { 2627 } else {
2628#ifdef PMAP_DEBUG 2628#ifdef PMAP_DEBUG
2629 /* Make sure context is correct. */ 2629 /* Make sure context is correct. */
2630 if (pmap->pm_ctxnum != get_context()) { 2630 if (pmap->pm_ctxnum != get_context()) {
2631 db_printf("pmap_fault_reload: wrong context\n"); 2631 db_printf("pmap_fault_reload: wrong context\n");
2632 Debugger(); 2632 Debugger();
2633 /* XXX: OK to proceed? */ 2633 /* XXX: OK to proceed? */
2634 set_context(pmap->pm_ctxnum); 2634 set_context(pmap->pm_ctxnum);
2635 } 2635 }
2636#endif 2636#endif
2637 } 2637 }
2638 2638
2639 sme = get_segmap(segva); 2639 sme = get_segmap(segva);
2640 if (sme == SEGINV) { 2640 if (sme == SEGINV) {
2641 /* See if there is something to reload. */ 2641 /* See if there is something to reload. */
2642 pmegp = pmeg_cache(pmap, segva); 2642 pmegp = pmeg_cache(pmap, segva);
2643 if (pmegp) { 2643 if (pmegp) {
2644 /* Found one! OK, reload it. */ 2644 /* Found one! OK, reload it. */
2645 pmap_stats.ps_pmeg_faultin++; 2645 pmap_stats.ps_pmeg_faultin++;
2646 sme = pmegp->pmeg_index; 2646 sme = pmegp->pmeg_index;
2647 set_segmap(segva, sme); 2647 set_segmap(segva, sme);
2648 pte = get_pte(pgva); 2648 pte = get_pte(pgva);
2649 if (pte & chkpte) 2649 if (pte & chkpte)
2650 rv = 1; 2650 rv = 1;
2651 } 2651 }
2652 } 2652 }
2653 2653
2654 splx(s); 2654 splx(s);
2655 return (rv); 2655 return (rv);
2656} 2656}
2657 2657
2658 2658
2659/* 2659/*
2660 * Clear the modify bit for the given physical page. 2660 * Clear the modify bit for the given physical page.
2661 */ 2661 */
2662bool 2662bool
2663pmap_clear_modify(struct vm_page *pg) 2663pmap_clear_modify(struct vm_page *pg)
2664{ 2664{
2665 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2665 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2666 pv_entry_t *head; 2666 pv_entry_t *head;
2667 u_char *pv_flags; 2667 u_char *pv_flags;
2668 int s; 2668 int s;
2669 bool rv; 2669 bool rv;
2670 2670
2671 pv_flags = pa_to_pvflags(pa); 2671 pv_flags = pa_to_pvflags(pa);
2672 head = pa_to_pvhead(pa); 2672 head = pa_to_pvhead(pa);
2673 2673
2674 s = splvm(); 2674 s = splvm();
2675 *pv_flags |= pv_syncflags(*head); 2675 *pv_flags |= pv_syncflags(*head);
2676 rv = *pv_flags & PV_MOD; 2676 rv = *pv_flags & PV_MOD;
2677 *pv_flags &= ~PV_MOD; 2677 *pv_flags &= ~PV_MOD;
2678 splx(s); 2678 splx(s);
2679 return rv; 2679 return rv;
2680} 2680}
2681 2681
2682/* 2682/*
2683 * Tell whether the given physical page has been modified. 2683 * Tell whether the given physical page has been modified.
2684 */ 2684 */
2685bool 2685bool
2686pmap_is_modified(struct vm_page *pg) 2686pmap_is_modified(struct vm_page *pg)
2687{ 2687{
2688 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2688 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2689 pv_entry_t *head; 2689 pv_entry_t *head;
2690 u_char *pv_flags; 2690 u_char *pv_flags;
2691 int s; 2691 int s;
2692 bool rv; 2692 bool rv;
2693 2693
2694 pv_flags = pa_to_pvflags(pa); 2694 pv_flags = pa_to_pvflags(pa);
2695 head = pa_to_pvhead(pa); 2695 head = pa_to_pvhead(pa);
2696 2696
2697 s = splvm(); 2697 s = splvm();
2698 if ((*pv_flags & PV_MOD) == 0) 2698 if ((*pv_flags & PV_MOD) == 0)
2699 *pv_flags |= pv_syncflags(*head); 2699 *pv_flags |= pv_syncflags(*head);
2700 rv = (*pv_flags & PV_MOD); 2700 rv = (*pv_flags & PV_MOD);
2701 splx(s); 2701 splx(s);
2702 return (rv); 2702 return (rv);
2703} 2703}
2704 2704
2705/* 2705/*
2706 * Clear the reference bit for the given physical page. 2706 * Clear the reference bit for the given physical page.
2707 * It's OK to just remove mappings if that's easier. 2707 * It's OK to just remove mappings if that's easier.
2708 */ 2708 */
2709bool 2709bool
2710pmap_clear_reference(struct vm_page *pg) 2710pmap_clear_reference(struct vm_page *pg)
2711{ 2711{
2712 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2712 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2713 pv_entry_t *head; 2713 pv_entry_t *head;
2714 u_char *pv_flags; 2714 u_char *pv_flags;
2715 int s; 2715 int s;
2716 bool rv; 2716 bool rv;
2717 2717
2718 pv_flags = pa_to_pvflags(pa); 2718 pv_flags = pa_to_pvflags(pa);
2719 head = pa_to_pvhead(pa); 2719 head = pa_to_pvhead(pa);
2720 2720
2721 s = splvm(); 2721 s = splvm();
2722 *pv_flags |= pv_syncflags(*head); 2722 *pv_flags |= pv_syncflags(*head);
2723 rv = *pv_flags & PV_REF; 2723 rv = *pv_flags & PV_REF;
2724 *pv_flags &= ~PV_REF; 2724 *pv_flags &= ~PV_REF;
2725 splx(s); 2725 splx(s);
2726 return rv; 2726 return rv;
2727} 2727}
2728 2728
2729/* 2729/*
2730 * Tell whether the given physical page has been referenced. 2730 * Tell whether the given physical page has been referenced.
2731 * It's OK to just return false if page is not mapped. 2731 * It's OK to just return false if page is not mapped.
2732 */ 2732 */
2733bool 2733bool
2734pmap_is_referenced(struct vm_page *pg) 2734pmap_is_referenced(struct vm_page *pg)
2735{ 2735{
2736 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2736 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2737 pv_entry_t *head; 2737 pv_entry_t *head;
2738 u_char *pv_flags; 2738 u_char *pv_flags;
2739 int s; 2739 int s;
2740 bool rv; 2740 bool rv;
2741 2741
2742 pv_flags = pa_to_pvflags(pa); 2742 pv_flags = pa_to_pvflags(pa);
2743 head = pa_to_pvhead(pa); 2743 head = pa_to_pvhead(pa);
2744 2744
2745 s = splvm(); 2745 s = splvm();
2746 if ((*pv_flags & PV_REF) == 0) 2746 if ((*pv_flags & PV_REF) == 0)
2747 *pv_flags |= pv_syncflags(*head); 2747 *pv_flags |= pv_syncflags(*head);
2748 rv = (*pv_flags & PV_REF); 2748 rv = (*pv_flags & PV_REF);
2749 splx(s); 2749 splx(s);
2750 return (rv); 2750 return (rv);
2751} 2751}
2752 2752
2753 2753
2754/* 2754/*
2755 * This is called by locore.s:cpu_switch() when it is 2755 * This is called by locore.s:cpu_switch() when it is
2756 * switching to a new process. Load new translations. 2756 * switching to a new process. Load new translations.
2757 * Note: done in-line by locore.s unless PMAP_DEBUG 2757 * Note: done in-line by locore.s unless PMAP_DEBUG
2758 * 2758 *
2759 * Note that we do NOT allocate a context here, but 2759 * Note that we do NOT allocate a context here, but
2760 * share the "kernel only" context until we really 2760 * share the "kernel only" context until we really
2761 * need our own context for user-space mappings in 2761 * need our own context for user-space mappings in
2762 * pmap_enter_user(). 2762 * pmap_enter_user().
2763 */ 2763 */
2764void  2764void
2765_pmap_switch(pmap_t pmap) 2765_pmap_switch(pmap_t pmap)
2766{ 2766{
2767 set_context(pmap->pm_ctxnum); 2767 set_context(pmap->pm_ctxnum);
2768 ICIA(); 2768 ICIA();
2769} 2769}
2770 2770
2771/* 2771/*
2772 * Exported version of pmap_activate(). This is called from the 2772 * Exported version of pmap_activate(). This is called from the
2773 * machine-independent VM code when a process is given a new pmap. 2773 * machine-independent VM code when a process is given a new pmap.
2774 * If (p == curlwp) do like cpu_switch would do; otherwise just 2774 * If (p == curlwp) do like cpu_switch would do; otherwise just
2775 * take this as notification that the process has a new pmap. 2775 * take this as notification that the process has a new pmap.
2776 */ 2776 */
2777void  2777void
2778pmap_activate(struct lwp *l) 2778pmap_activate(struct lwp *l)
2779{ 2779{
2780 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2780 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2781 2781
2782 if (l->l_proc == curproc) { 2782 if (l->l_proc == curproc) {
2783 _pmap_switch(pmap); 2783 _pmap_switch(pmap);
2784 } 2784 }
2785} 2785}
2786 2786
2787/* 2787/*
2788 * Deactivate the address space of the specified process. 2788 * Deactivate the address space of the specified process.
2789 */ 2789 */
2790void  2790void
2791pmap_deactivate(struct lwp *l) 2791pmap_deactivate(struct lwp *l)
2792{ 2792{
2793 /* Nothing to do. */ 2793 /* Nothing to do. */
2794} 2794}
2795 2795
2796/* 2796/*
2797 * Routine: pmap_unwire 2797 * Routine: pmap_unwire
2798 * Function: Clear the wired attribute for a map/virtual-address 2798 * Function: Clear the wired attribute for a map/virtual-address
2799 * pair. 2799 * pair.
2800 * In/out conditions: 2800 * In/out conditions:
2801 * The mapping must already exist in the pmap. 2801 * The mapping must already exist in the pmap.
2802 */ 2802 */
2803void  2803void
2804pmap_unwire(pmap_t pmap, vaddr_t va) 2804pmap_unwire(pmap_t pmap, vaddr_t va)
2805{ 2805{
2806 int s, sme; 2806 int s, sme;
2807 int wiremask, ptenum; 2807 int wiremask, ptenum;
2808 pmeg_t pmegp; 2808 pmeg_t pmegp;
2809 2809
2810#ifdef PMAP_DEBUG 2810#ifdef PMAP_DEBUG
2811 if (pmap_debug & PMD_WIRING) 2811 if (pmap_debug & PMD_WIRING)
2812 printf("pmap_unwire(pmap=%p, va=0x%lx)\n", 2812 printf("pmap_unwire(pmap=%p, va=0x%lx)\n",
2813 pmap, va); 2813 pmap, va);
2814#endif 2814#endif
2815 /* 2815 /*
2816 * We are asked to unwire pages that were wired when 2816 * We are asked to unwire pages that were wired when
2817 * pmap_enter() was called and we ignored wiring. 2817 * pmap_enter() was called and we ignored wiring.
2818 * (VM code appears to wire a stack page during fork.) 2818 * (VM code appears to wire a stack page during fork.)
2819 */ 2819 */
2820 if (pmap != kernel_pmap) { 2820 if (pmap != kernel_pmap) {
2821#ifdef PMAP_DEBUG 2821#ifdef PMAP_DEBUG
2822 if (pmap_debug & PMD_WIRING) { 2822 if (pmap_debug & PMD_WIRING) {
2823 db_printf(" (user pmap -- ignored)\n"); 2823 db_printf(" (user pmap -- ignored)\n");
2824 Debugger(); 2824 Debugger();
2825 } 2825 }
2826#endif 2826#endif
2827 return; 2827 return;
2828 } 2828 }
2829 2829
2830 ptenum = VA_PTE_NUM(va); 2830 ptenum = VA_PTE_NUM(va);
2831 wiremask = 1 << ptenum; 2831 wiremask = 1 << ptenum;
2832 2832
2833 s = splvm(); 2833 s = splvm();
2834 sme = get_segmap(va); 2834 sme = get_segmap(va);
2835 pmegp = pmeg_p(sme); 2835 pmegp = pmeg_p(sme);
2836 pmegp->pmeg_wired &= ~wiremask; 2836 pmegp->pmeg_wired &= ~wiremask;
2837 splx(s); 2837 splx(s);
2838} 2838}
2839 2839
2840/* 2840/*
2841 * Copy the range specified by src_addr/len 2841 * Copy the range specified by src_addr/len
2842 * from the source map to the range dst_addr/len 2842 * from the source map to the range dst_addr/len
2843 * in the destination map. 2843 * in the destination map.
2844 * 2844 *
2845 * This routine is only advisory and need not do anything. 2845 * This routine is only advisory and need not do anything.
2846 */ 2846 */
2847void 2847void
2848pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, 2848pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
2849 vaddr_t src_addr) 2849 vaddr_t src_addr)
2850{ 2850{
2851} 2851}
2852 2852
2853/* 2853/*
2854 * Routine: pmap_extract 2854 * Routine: pmap_extract
2855 * Function: 2855 * Function:
2856 * Extract the physical page address associated 2856 * Extract the physical page address associated
2857 * with the given map/virtual_address pair. 2857 * with the given map/virtual_address pair.
2858 * Returns zero if VA not valid. 2858 * Returns zero if VA not valid.
2859 */ 2859 */
2860bool  2860bool
2861pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) 2861pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
2862{ 2862{
2863 int s, sme, segnum, ptenum, pte; 2863 int s, sme, segnum, ptenum, pte;
2864 paddr_t pa; 2864 paddr_t pa;
2865 2865
2866 pte = 0; 2866 pte = 0;
2867 s = splvm(); 2867 s = splvm();
2868 if (pmap == kernel_pmap) { 2868 if (pmap == kernel_pmap) {
2869 sme = get_segmap(va); 2869 sme = get_segmap(va);
2870 if (sme != SEGINV) 2870 if (sme != SEGINV)
2871 pte = get_pte(va); 2871 pte = get_pte(va);
2872 } else { 2872 } else {
2873 /* This is rare, so do it the easy way. */ 2873 /* This is rare, so do it the easy way. */
2874 segnum = VA_SEGNUM(va); 2874 segnum = VA_SEGNUM(va);
2875 sme = pmap->pm_segmap[segnum]; 2875 sme = pmap->pm_segmap[segnum];
2876 if (sme != SEGINV) { 2876 if (sme != SEGINV) {
2877 ptenum = VA_PTE_NUM(va); 2877 ptenum = VA_PTE_NUM(va);
2878 pte = get_pte_pmeg(sme, ptenum); 2878 pte = get_pte_pmeg(sme, ptenum);
2879 } 2879 }
2880 } 2880 }
2881 splx(s); 2881 splx(s);
2882 2882
2883 if ((pte & PG_VALID) == 0) { 2883 if ((pte & PG_VALID) == 0) {
2884#ifdef PMAP_DEBUG 2884#ifdef PMAP_DEBUG
2885 db_printf("pmap_extract: invalid va=0x%lx\n", va); 2885 db_printf("pmap_extract: invalid va=0x%lx\n", va);
2886 Debugger(); 2886 Debugger();
2887#endif 2887#endif
2888 return (false); 2888 return (false);
2889 } 2889 }
2890 pa = PG_PA(pte); 2890 pa = PG_PA(pte);
2891#ifdef DIAGNOSTIC 2891#ifdef DIAGNOSTIC
2892 if (pte & PG_TYPE) { 2892 if (pte & PG_TYPE) {
2893 panic("pmap_extract: not main mem, va=0x%lx", va); 2893 panic("pmap_extract: not main mem, va=0x%lx", va);
2894 } 2894 }
2895#endif 2895#endif
2896 if (pap != NULL) 2896 if (pap != NULL)
2897 *pap = pa; 2897 *pap = pa;
2898 return (true); 2898 return (true);
2899} 2899}
2900 2900
2901 2901
2902/* 2902/*