Wed Dec 31 06:54:33 2008 UTC ()
Adjust comments (which were added by me) to sync with m68k/pmap_motorola.c.


(tsutsui)
diff -r1.139 -r1.140 src/sys/arch/amiga/amiga/pmap.c

cvs diff -r1.139 -r1.140 src/sys/arch/amiga/amiga/Attic/pmap.c (switch to unified diff)

--- src/sys/arch/amiga/amiga/Attic/pmap.c 2008/12/19 18:49:38 1.139
+++ src/sys/arch/amiga/amiga/Attic/pmap.c 2008/12/31 06:54:33 1.140
@@ -1,1353 +1,1353 @@ @@ -1,1353 +1,1353 @@
1/* $NetBSD: pmap.c,v 1.139 2008/12/19 18:49:38 cegger Exp $ */ 1/* $NetBSD: pmap.c,v 1.140 2008/12/31 06:54:33 tsutsui Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe. 8 * by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright (c) 1991 Regents of the University of California. 33 * Copyright (c) 1991 Regents of the University of California.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * This code is derived from software contributed to Berkeley by 36 * This code is derived from software contributed to Berkeley by
37 * the Systems Programming Group of the University of Utah Computer 37 * the Systems Programming Group of the University of Utah Computer
38 * Science Department. 38 * Science Department.
39 * 39 *
40 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions 41 * modification, are permitted provided that the following conditions
42 * are met: 42 * are met:
43 * 1. Redistributions of source code must retain the above copyright 43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer. 44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright 45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the 46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution. 47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors 48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software 49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission. 50 * without specific prior written permission.
51 * 51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE. 62 * SUCH DAMAGE.
63 * 63 *
64 * @(#)pmap.c 7.5 (Berkeley) 5/10/91 64 * @(#)pmap.c 7.5 (Berkeley) 5/10/91
65 */ 65 */
66 66
67/* 67/*
68 * AMIGA physical map management code. 68 * AMIGA physical map management code.
69 * For 68020/68030 machines with 68851, or 68030 MMUs 69 * For 68020/68030 machines with 68851, or 68030 MMUs
70 * Don't even pay lip service to multiprocessor support. 70 * Don't even pay lip service to multiprocessor support.
71 * 71 *
72 * right now because of the assumed one-to-one relationship of PT 72 * right now because of the assumed one-to-one relationship of PT
73 * pages to STEs. 73 * pages to STEs.
74 */ 74 */
75 75
76/* 76/*
77 * Manages physical address maps. 77 * Manages physical address maps.
78 * 78 *
79 * In addition to hardware address maps, this 79 * In addition to hardware address maps, this
80 * module is called upon to provide software-use-only 80 * module is called upon to provide software-use-only
81 * maps which may or may not be stored in the same 81 * maps which may or may not be stored in the same
82 * form as hardware maps. These pseudo-maps are 82 * form as hardware maps. These pseudo-maps are
83 * used to store intermediate results from copy 83 * used to store intermediate results from copy
84 * operations to and from address spaces. 84 * operations to and from address spaces.
85 * 85 *
86 * Since the information managed by this module is 86 * Since the information managed by this module is
87 * also stored by the logical address mapping module, 87 * also stored by the logical address mapping module,
88 * this module may throw away valid virtual-to-physical 88 * this module may throw away valid virtual-to-physical
89 * mappings at almost any time. However, invalidations 89 * mappings at almost any time. However, invalidations
90 * of virtual-to-physical mappings must be done as 90 * of virtual-to-physical mappings must be done as
91 * requested. 91 * requested.
92 * 92 *
93 * In order to cope with hardware architectures which 93 * In order to cope with hardware architectures which
94 * make virtual-to-physical map invalidates expensive, 94 * make virtual-to-physical map invalidates expensive,
95 * this module may delay invalidate or reduced protection 95 * this module may delay invalidate or reduced protection
96 * operations until such time as they are actually 96 * operations until such time as they are actually
97 * necessary. This module is given full information as 97 * necessary. This module is given full information as
98 * to which processors are currently using which maps, 98 * to which processors are currently using which maps,
99 * and to when physical maps must be made correct. 99 * and to when physical maps must be made correct.
100 */ 100 */
101 101
102#include <sys/cdefs.h> 102#include <sys/cdefs.h>
103__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.139 2008/12/19 18:49:38 cegger Exp $"); 103__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.140 2008/12/31 06:54:33 tsutsui Exp $");
104 104
105#include <sys/param.h> 105#include <sys/param.h>
106#include <sys/systm.h> 106#include <sys/systm.h>
107#include <sys/proc.h> 107#include <sys/proc.h>
108#include <sys/malloc.h> 108#include <sys/malloc.h>
109#include <sys/user.h> 109#include <sys/user.h>
110 110
111#include <uvm/uvm.h> 111#include <uvm/uvm.h>
112 112
113#include <machine/pte.h> 113#include <machine/pte.h>
114#include <machine/cpu.h> 114#include <machine/cpu.h>
115#include <machine/vmparam.h> 115#include <machine/vmparam.h>
116 116
117#include <m68k/cacheops.h> 117#include <m68k/cacheops.h>
118 118
119#include <amiga/amiga/memlist.h> 119#include <amiga/amiga/memlist.h>
120/* 120/*
121 * Allocate various and sundry SYSMAPs used in the days of old VM 121 * Allocate various and sundry SYSMAPs used in the days of old VM
122 * and not yet converted. XXX. 122 * and not yet converted. XXX.
123 */ 123 */
124 124
125#ifdef DEBUG 125#ifdef DEBUG
126struct kpt_stats { 126struct kpt_stats {
127 int collectscans; 127 int collectscans;
128 int collectpages; 128 int collectpages;
129 int kpttotal; 129 int kpttotal;
130 int kptinuse; 130 int kptinuse;
131 int kptmaxuse; 131 int kptmaxuse;
132}; 132};
133struct enter_stats { 133struct enter_stats {
134 int kernel; /* entering kernel mapping */ 134 int kernel; /* entering kernel mapping */
135 int user; /* entering user mapping */ 135 int user; /* entering user mapping */
136 int ptpneeded; /* needed to allocate a PT page */ 136 int ptpneeded; /* needed to allocate a PT page */
137 int pwchange; /* no mapping change, just wiring or protection */ 137 int pwchange; /* no mapping change, just wiring or protection */
138 int wchange; /* no mapping change, just wiring */ 138 int wchange; /* no mapping change, just wiring */
139 int mchange; /* was mapped but mapping to different page */ 139 int mchange; /* was mapped but mapping to different page */
140 int managed; /* a managed page */ 140 int managed; /* a managed page */
141 int firstpv; /* first mapping for this PA */ 141 int firstpv; /* first mapping for this PA */
142 int secondpv; /* second mapping for this PA */ 142 int secondpv; /* second mapping for this PA */
143 int ci; /* cache inhibited */ 143 int ci; /* cache inhibited */
144 int unmanaged; /* not a managed page */ 144 int unmanaged; /* not a managed page */
145 int flushes; /* cache flushes */ 145 int flushes; /* cache flushes */
146}; 146};
147struct remove_stats { 147struct remove_stats {
148 int calls; 148 int calls;
149 int removes; 149 int removes;
150 int pvfirst; 150 int pvfirst;
151 int pvsearch; 151 int pvsearch;
152 int ptinvalid; 152 int ptinvalid;
153 int uflushes; 153 int uflushes;
154 int sflushes; 154 int sflushes;
155}; 155};
156 156
157struct remove_stats remove_stats; 157struct remove_stats remove_stats;
158struct enter_stats enter_stats; 158struct enter_stats enter_stats;
159struct kpt_stats kpt_stats; 159struct kpt_stats kpt_stats;
160 160
161#define PDB_FOLLOW 0x0001 161#define PDB_FOLLOW 0x0001
162#define PDB_INIT 0x0002 162#define PDB_INIT 0x0002
163#define PDB_ENTER 0x0004 163#define PDB_ENTER 0x0004
164#define PDB_REMOVE 0x0008 164#define PDB_REMOVE 0x0008
165#define PDB_CREATE 0x0010 165#define PDB_CREATE 0x0010
166#define PDB_PTPAGE 0x0020 166#define PDB_PTPAGE 0x0020
167#define PDB_CACHE 0x0040 167#define PDB_CACHE 0x0040
168#define PDB_BITS 0x0080 168#define PDB_BITS 0x0080
169#define PDB_COLLECT 0x0100 169#define PDB_COLLECT 0x0100
170#define PDB_PROTECT 0x0200 170#define PDB_PROTECT 0x0200
171#define PDB_SEGTAB 0x0400 171#define PDB_SEGTAB 0x0400
172#define PDB_PARANOIA 0x2000 172#define PDB_PARANOIA 0x2000
173#define PDB_WIRING 0x4000 173#define PDB_WIRING 0x4000
174#define PDB_PVDUMP 0x8000 174#define PDB_PVDUMP 0x8000
175int debugmap = 0; 175int debugmap = 0;
176int pmapdebug = PDB_PARANOIA; 176int pmapdebug = PDB_PARANOIA;
177 177
178#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x 178#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
179 179
180static void pmap_check_wiring(const char *, vaddr_t); 180static void pmap_check_wiring(const char *, vaddr_t);
181static void pmap_pvdump(paddr_t); 181static void pmap_pvdump(paddr_t);
182#else 182#else
183#define PMAP_DPRINTF(l, x) 183#define PMAP_DPRINTF(l, x)
184#endif 184#endif
185 185
186/* 186/*
187 * Get STEs and PTEs for user/kernel address space 187 * Get STEs and PTEs for user/kernel address space
188 */ 188 */
189#if defined(M68040) || defined(M68060) 189#if defined(M68040) || defined(M68060)
190#if defined(M68020) || defined(M68030) 190#if defined(M68020) || defined(M68030)
191#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) \ 191#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) \
192 >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)])) 192 >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
193#else 193#else
194#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1])) 194#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
195#endif 195#endif
196#define pmap_ste1(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1])) 196#define pmap_ste1(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
197/* XXX assumes physically contiguous ST pages (if more than one) */ 197/* XXX assumes physically contiguous ST pages (if more than one) */
198#define pmap_ste2(m, v) \ 198#define pmap_ste2(m, v) \
199 (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m,v) & SG4_ADDR1) \ 199 (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m,v) & SG4_ADDR1) \
200 - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)])) 200 - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
201#define pmap_ste_v(m, v) \ 201#define pmap_ste_v(m, v) \
202 (mmutype == MMU_68040 \ 202 (mmutype == MMU_68040 \
203 ? ((*pmap_ste1(m, v) & SG_V) && \ 203 ? ((*pmap_ste1(m, v) & SG_V) && \
204 (*pmap_ste2(m, v) & SG_V)) \ 204 (*pmap_ste2(m, v) & SG_V)) \
205 : (*pmap_ste(m, v) & SG_V)) 205 : (*pmap_ste(m, v) & SG_V))
206#else /* defined(M68040) || defined(M68060) */ 206#else /* defined(M68040) || defined(M68060) */
207#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT])) 207#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
208#define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V) 208#define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
209#endif /* defined(M68040) || defined(M68060) */ 209#endif /* defined(M68040) || defined(M68060) */
210 210
211#define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT])) 211#define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
212 212
213#define pmap_pte_pa(pte) (*(u_int *)(pte) & PG_FRAME) 213#define pmap_pte_pa(pte) (*(u_int *)(pte) & PG_FRAME)
214 214
215#define pmap_pte_w(pte) (*(u_int *)(pte) & PG_W) 215#define pmap_pte_w(pte) (*(u_int *)(pte) & PG_W)
216#define pmap_pte_ci(pte) (*(u_int *)(pte) & PG_CI) 216#define pmap_pte_ci(pte) (*(u_int *)(pte) & PG_CI)
217#define pmap_pte_m(pte) (*(u_int *)(pte) & PG_M) 217#define pmap_pte_m(pte) (*(u_int *)(pte) & PG_M)
218#define pmap_pte_u(pte) (*(u_int *)(pte) & PG_U) 218#define pmap_pte_u(pte) (*(u_int *)(pte) & PG_U)
219#define pmap_pte_prot(pte) (*(u_int *)(pte) & PG_PROT) 219#define pmap_pte_prot(pte) (*(u_int *)(pte) & PG_PROT)
220#define pmap_pte_v(pte) (*(u_int *)(pte) & PG_V) 220#define pmap_pte_v(pte) (*(u_int *)(pte) & PG_V)
221 221
222#define pmap_pte_set_w(pte, v) \ 222#define pmap_pte_set_w(pte, v) \
223 do { if (v) *(u_int *)(pte) |= PG_W; else *(u_int *)(pte) &= ~PG_W; \ 223 do { if (v) *(u_int *)(pte) |= PG_W; else *(u_int *)(pte) &= ~PG_W; \
224 } while (0) 224 } while (0)
225#define pmap_pte_set_prot(pte, v) \ 225#define pmap_pte_set_prot(pte, v) \
226 do { if (v) *(u_int *)(pte) |= PG_PROT; else *(u_int *)(pte) &= ~PG_PROT; \ 226 do { if (v) *(u_int *)(pte) |= PG_PROT; else *(u_int *)(pte) &= ~PG_PROT; \
227 } while (0) 227 } while (0)
228#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte)) 228#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
229#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte)) 229#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
230 230
231#define active_pmap(pm) \ 231#define active_pmap(pm) \
232 ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap) 232 ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
233 233
234/* 234/*
235 * Given a map and a machine independent protection code, 235 * Given a map and a machine independent protection code,
236 * convert to a vax protection code. 236 * convert to a vax protection code.
237 */ 237 */
238#define pte_prot(m, p) (protection_codes[p]) 238#define pte_prot(m, p) (protection_codes[p])
239int protection_codes[8]; 239int protection_codes[8];
240 240
241/* 241/*
242 * Kernel page table page management. 242 * Kernel page table page management.
243 * 243 *
244 * One additional page of KPT allows for 16 MB of virtual buffer cache. 244 * One additional page of KPT allows for 16 MB of virtual buffer cache.
245 * A GENERIC kernel allocates this for 2 MB of real buffer cache, 245 * A GENERIC kernel allocates this for 2 MB of real buffer cache,
246 * which in turn is allocated for 38 MB of RAM. 246 * which in turn is allocated for 38 MB of RAM.
247 * We add one per 16 MB of RAM to allow for tuning the machine-independent 247 * We add one per 16 MB of RAM to allow for tuning the machine-independent
248 * options. 248 * options.
249 */ 249 */
250#ifndef NKPTADDSHIFT 250#ifndef NKPTADDSHIFT
251#define NKPTADDSHIFT 24 251#define NKPTADDSHIFT 24
252#endif 252#endif
253 253
254struct kpt_page { 254struct kpt_page {
255 struct kpt_page *kpt_next; /* link on either used or free list */ 255 struct kpt_page *kpt_next; /* link on either used or free list */
256 vaddr_t kpt_va; /* always valid kernel VA */ 256 vaddr_t kpt_va; /* always valid kernel VA */
257 paddr_t kpt_pa; /* PA of this page (for speed) */ 257 paddr_t kpt_pa; /* PA of this page (for speed) */
258}; 258};
259struct kpt_page *kpt_free_list, *kpt_used_list; 259struct kpt_page *kpt_free_list, *kpt_used_list;
260struct kpt_page *kpt_pages; 260struct kpt_page *kpt_pages;
261 261
262/* 262/*
263 * Kernel segment/page table and page table map. 263 * Kernel segment/page table and page table map.
264 * The page table map gives us a level of indirection we need to dynamically 264 * The page table map gives us a level of indirection we need to dynamically
265 * expand the page table. It is essentially a copy of the segment table 265 * expand the page table. It is essentially a copy of the segment table
266 * with PTEs instead of STEs. All are initialized in locore at boot time. 266 * with PTEs instead of STEs. All are initialized in locore at boot time.
267 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs. 267 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
268 * Segtabzero is an empty segment table which all processes share til they 268 * Segtabzero is an empty segment table which all processes share til they
269 * reference something. 269 * reference something.
270 */ 270 */
271u_int *Sysseg, *Sysseg_pa; 271u_int *Sysseg, *Sysseg_pa;
272pt_entry_t *Sysmap, *Sysptmap; 272pt_entry_t *Sysmap, *Sysptmap;
273st_entry_t *Segtabzero, *Segtabzeropa; 273st_entry_t *Segtabzero, *Segtabzeropa;
274vsize_t Sysptsize = VM_KERNEL_PT_PAGES; 274vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
275 275
276struct pv_entry *pv_table; /* array of entries, one per page */ 276struct pv_entry *pv_table; /* array of entries, one per page */
277 277
278struct pmap kernel_pmap_store; 278struct pmap kernel_pmap_store;
279struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; 279struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
280struct vm_map *pt_map; 280struct vm_map *pt_map;
281struct vm_map_kernel pt_map_store; 281struct vm_map_kernel pt_map_store;
282 282
283vsize_t mem_size; /* memory size in bytes */ 283vsize_t mem_size; /* memory size in bytes */
284vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 284vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/
285vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */ 285vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
286int page_cnt; /* number of pages managed by the VM system */ 286int page_cnt; /* number of pages managed by the VM system */
287bool pmap_initialized = false; /* Has pmap_init completed? */ 287bool pmap_initialized = false; /* Has pmap_init completed? */
288char *pmap_attributes; /* reference and modify bits */ 288char *pmap_attributes; /* reference and modify bits */
289TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist; 289TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
290int pv_nfree; 290int pv_nfree;
291#if defined(M68040) || defined(M68060) 291#if defined(M68040) || defined(M68060)
292int protostfree; /* prototype (default) free ST map */ 292int protostfree; /* prototype (default) free ST map */
293#endif 293#endif
294 294
295pt_entry_t *caddr1_pte; /* PTE for CADDR1 */ 295pt_entry_t *caddr1_pte; /* PTE for CADDR1 */
296pt_entry_t *caddr2_pte; /* PTE for CADDR2 */ 296pt_entry_t *caddr2_pte; /* PTE for CADDR2 */
297 297
298extern void * msgbufaddr; 298extern void * msgbufaddr;
299extern paddr_t msgbufpa; 299extern paddr_t msgbufpa;
300 300
301u_long noncontig_enable; 301u_long noncontig_enable;
302extern const vaddr_t amiga_uptbase; 302extern const vaddr_t amiga_uptbase;
303 303
304extern paddr_t z2mem_start; 304extern paddr_t z2mem_start;
305 305
306extern vaddr_t reserve_dumppages(vaddr_t); 306extern vaddr_t reserve_dumppages(vaddr_t);
307 307
308bool pmap_testbit(paddr_t, int); 308bool pmap_testbit(paddr_t, int);
309int pmap_enter_ptpage(pmap_t, vaddr_t, bool); 309int pmap_enter_ptpage(pmap_t, vaddr_t, bool);
310static void pmap_ptpage_addref(vaddr_t); 310static void pmap_ptpage_addref(vaddr_t);
311static int pmap_ptpage_delref(vaddr_t); 311static int pmap_ptpage_delref(vaddr_t);
312static void pmap_changebit(vaddr_t, int, bool); 312static void pmap_changebit(vaddr_t, int, bool);
313struct pv_entry * pmap_alloc_pv(void); 313struct pv_entry * pmap_alloc_pv(void);
314void pmap_free_pv(struct pv_entry *); 314void pmap_free_pv(struct pv_entry *);
315void pmap_pinit(pmap_t); 315void pmap_pinit(pmap_t);
316void pmap_release(pmap_t); 316void pmap_release(pmap_t);
317static void pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int); 317static void pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int);
318 318
319void pmap_collect1(pmap_t, paddr_t, paddr_t); 319void pmap_collect1(pmap_t, paddr_t, paddr_t);
320 320
321/* pmap_remove_mapping flags */ 321/* pmap_remove_mapping flags */
322#define PRM_TFLUSH 0x01 322#define PRM_TFLUSH 0x01
323#define PRM_CFLUSH 0x02 323#define PRM_CFLUSH 0x02
324#define PRM_KEEPPTPAGE 0x04 324#define PRM_KEEPPTPAGE 0x04
325 325
326 326
327 327
328 328
329 329
330/* 330/*
331 * All those kernel PT submaps that BSD is so fond of 331 * All those kernel PT submaps that BSD is so fond of
332 */ 332 */
333void *CADDR1, *CADDR2; 333void *CADDR1, *CADDR2;
334char *vmmap; 334char *vmmap;
335 335
336#define PAGE_IS_MANAGED(pa) (pmap_initialized \ 336#define PAGE_IS_MANAGED(pa) (pmap_initialized \
337 && vm_physseg_find(atop((pa)), NULL) != -1) 337 && vm_physseg_find(atop((pa)), NULL) != -1)
338 338
339static inline struct pv_entry *pa_to_pvh(paddr_t pa); 339static inline struct pv_entry *pa_to_pvh(paddr_t pa);
340static inline char *pa_to_attribute(paddr_t pa); 340static inline char *pa_to_attribute(paddr_t pa);
341 341
342static inline struct pv_entry * 342static inline struct pv_entry *
343pa_to_pvh(paddr_t pa) 343pa_to_pvh(paddr_t pa)
344{ 344{
345 int bank, pg = 0; /* gcc4 -Wunitialized */ 345 int bank, pg = 0; /* XXX gcc4 -Wuninitialized */
346 346
347 bank = vm_physseg_find(atop((pa)), &pg); 347 bank = vm_physseg_find(atop((pa)), &pg);
348 return &vm_physmem[bank].pmseg.pvent[pg]; 348 return &vm_physmem[bank].pmseg.pvent[pg];
349} 349}
350 350
351static inline char * 351static inline char *
352pa_to_attribute(paddr_t pa) 352pa_to_attribute(paddr_t pa)
353{ 353{
354 int bank, pg = 0; /* gcc4 -Wuninitialized */ 354 int bank, pg = 0; /* XXX gcc4 -Wuninitialized */
355 355
356 bank = vm_physseg_find(atop((pa)), &pg); 356 bank = vm_physseg_find(atop((pa)), &pg);
357 return &vm_physmem[bank].pmseg.attrs[pg]; 357 return &vm_physmem[bank].pmseg.attrs[pg];
358} 358}
359 359
360/* 360/*
361 * Initialize the pmap module. 361 * Initialize the pmap module.
362 * Called by vm_init, to initialize any structures that the pmap 362 * Called by vm_init, to initialize any structures that the pmap
363 * system needs to map virtual memory. 363 * system needs to map virtual memory.
364 */ 364 */
365void 365void
366pmap_init() 366pmap_init()
367{ 367{
368 vaddr_t addr, addr2; 368 vaddr_t addr, addr2;
369 vsize_t s; 369 vsize_t s;
370 u_int npg; 370 u_int npg;
371 struct pv_entry *pv; 371 struct pv_entry *pv;
372 char *attr; 372 char *attr;
373 int rv, bank; 373 int rv, bank;
374#if defined(M68060) 374#if defined(M68060)
375 struct kpt_page *kptp; 375 struct kpt_page *kptp;
376 paddr_t paddr; 376 paddr_t paddr;
377#endif 377#endif
378 378
379#ifdef DEBUG 379#ifdef DEBUG
380 if (pmapdebug & PDB_FOLLOW) 380 if (pmapdebug & PDB_FOLLOW)
381 printf("pmap_init()\n"); 381 printf("pmap_init()\n");
382#endif 382#endif
383 383
384 /* 384 /*
385 * Before we do anything else, initialize the PTE pointers 385 * Before we do anything else, initialize the PTE pointers
386 * used by pmap_zero_page() and pmap_copy_page(). 386 * used by pmap_zero_page() and pmap_copy_page().
387 */ 387 */
388 caddr1_pte = pmap_pte(pmap_kernel(), CADDR1); 388 caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
389 caddr2_pte = pmap_pte(pmap_kernel(), CADDR2); 389 caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
390 390
391#ifdef DEBUG 391#ifdef DEBUG
392 if (pmapdebug & PDB_INIT) { 392 if (pmapdebug & PDB_INIT) {
393 printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n", 393 printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
394 Sysseg, Sysmap, Sysptmap); 394 Sysseg, Sysmap, Sysptmap);
395 printf(" vstart %lx, vend %lx\n", virtual_avail, virtual_end); 395 printf(" vstart %lx, vend %lx\n", virtual_avail, virtual_end);
396 } 396 }
397#endif 397#endif
398 398
399 /* 399 /*
400 * Allocate memory for random pmap data structures. Includes the 400 * Allocate memory for random pmap data structures. Includes the
401 * initial segment table, pv_head_table and pmap_attributes. 401 * initial segment table, pv_head_table and pmap_attributes.
402 */ 402 */
403 for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) { 403 for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
404 page_cnt += vm_physmem[bank].end - vm_physmem[bank].start; 404 page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
405#ifdef DEBUG 405#ifdef DEBUG
406 printf("pmap_init: %2d: %08lx - %08lx (%10d)\n", bank, 406 printf("pmap_init: %2d: %08lx - %08lx (%10d)\n", bank,
407 vm_physmem[bank].start << PGSHIFT, 407 vm_physmem[bank].start << PGSHIFT,
408 vm_physmem[bank].end << PGSHIFT, page_cnt << PGSHIFT); 408 vm_physmem[bank].end << PGSHIFT, page_cnt << PGSHIFT);
409#endif 409#endif
410 } 410 }
411 s = M68K_STSIZE; /* Segtabzero */ 411 s = M68K_STSIZE; /* Segtabzero */
412 s += page_cnt * sizeof(struct pv_entry); /* pv table */ 412 s += page_cnt * sizeof(struct pv_entry); /* pv table */
413 s += page_cnt * sizeof(char); /* attribute table */ 413 s += page_cnt * sizeof(char); /* attribute table */
414 s = round_page(s); 414 s = round_page(s);
415 415
416 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); 416 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
417 if (addr == 0) 417 if (addr == 0)
418 panic("pmap_init: can't allocate data structures"); 418 panic("pmap_init: can't allocate data structures");
419 Segtabzero = (u_int *) addr; 419 Segtabzero = (u_int *) addr;
420 (void) pmap_extract(pmap_kernel(), addr, (paddr_t *)(void *)&Segtabzeropa); 420 (void) pmap_extract(pmap_kernel(), addr, (paddr_t *)(void *)&Segtabzeropa);
421 addr += M68K_STSIZE; 421 addr += M68K_STSIZE;
422 422
423 pv_table = (struct pv_entry *) addr; 423 pv_table = (struct pv_entry *) addr;
424 addr += page_cnt * sizeof(struct pv_entry); 424 addr += page_cnt * sizeof(struct pv_entry);
425 425
426 pmap_attributes = (char *) addr; 426 pmap_attributes = (char *) addr;
427#ifdef DEBUG 427#ifdef DEBUG
428 if (pmapdebug & PDB_INIT) 428 if (pmapdebug & PDB_INIT)
429 printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) " 429 printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
430 "tbl %p atr %p\n", 430 "tbl %p atr %p\n",
431 s, page_cnt, Segtabzero, Segtabzeropa, 431 s, page_cnt, Segtabzero, Segtabzeropa,
432 pv_table, pmap_attributes); 432 pv_table, pmap_attributes);
433#endif 433#endif
434 434
435 /* 435 /*
436 * Now that the pv and attribute tables have been allocated, 436 * Now that the pv and attribute tables have been allocated,
437 * assign them to the memory segments. 437 * assign them to the memory segments.
438 */ 438 */
439 pv = pv_table; 439 pv = pv_table;
440 attr = pmap_attributes; 440 attr = pmap_attributes;
441 for (bank = 0; bank < vm_nphysseg; bank++) { 441 for (bank = 0; bank < vm_nphysseg; bank++) {
442 npg = vm_physmem[bank].end - vm_physmem[bank].start; 442 npg = vm_physmem[bank].end - vm_physmem[bank].start;
443 vm_physmem[bank].pmseg.pvent = pv; 443 vm_physmem[bank].pmseg.pvent = pv;
444 vm_physmem[bank].pmseg.attrs = attr; 444 vm_physmem[bank].pmseg.attrs = attr;
445 pv += npg; 445 pv += npg;
446 attr += npg; 446 attr += npg;
447 } 447 }
448 448
449 /* 449 /*
450 * Allocate physical memory for kernel PT pages and their management. 450 * Allocate physical memory for kernel PT pages and their management.
451 * we need enough pages to map the page tables for each process 451 * we need enough pages to map the page tables for each process
452 * plus some slop. 452 * plus some slop.
453 */ 453 */
454 npg = howmany(((maxproc + 16) * M68K_MAX_PTSIZE / NPTEPG), PAGE_SIZE); 454 npg = howmany(((maxproc + 16) * M68K_MAX_PTSIZE / NPTEPG), PAGE_SIZE);
455#ifdef NKPTADD 455#ifdef NKPTADD
456 npg += NKPTADD; 456 npg += NKPTADD;
457#else 457#else
458 npg += mem_size >> NKPTADDSHIFT; 458 npg += mem_size >> NKPTADDSHIFT;
459#endif 459#endif
460#if 1/*def DEBUG*/ 460#if 1/*def DEBUG*/
461 printf("Maxproc %d, mem_size %ld MB: allocating %d KPT pages\n", 461 printf("Maxproc %d, mem_size %ld MB: allocating %d KPT pages\n",
462 maxproc, mem_size>>20, npg); 462 maxproc, mem_size>>20, npg);
463#endif 463#endif
464 s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page)); 464 s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page));
465 465
466 /* 466 /*
467 * Verify that space will be allocated in region for which 467 * Verify that space will be allocated in region for which
468 * we already have kernel PT pages. 468 * we already have kernel PT pages.
469 */ 469 */
470 addr = 0; 470 addr = 0;
471 rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0, 471 rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
472 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 472 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
473 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)); 473 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
474 if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap) 474 if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
475 panic("pmap_init: kernel PT too small"); 475 panic("pmap_init: kernel PT too small");
476 uvm_unmap(kernel_map, addr, addr + s); 476 uvm_unmap(kernel_map, addr, addr + s);
477 477
478 /* 478 /*
479 * Now allocate the space and link the pages together to 479 * Now allocate the space and link the pages together to
480 * form the KPT free list. 480 * form the KPT free list.
481 */ 481 */
482 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); 482 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
483 if (addr == 0) 483 if (addr == 0)
484 panic("pmap_init: cannot allocate KPT free list"); 484 panic("pmap_init: cannot allocate KPT free list");
485 s = ptoa(npg); 485 s = ptoa(npg);
486 addr2 = addr + s; 486 addr2 = addr + s;
487 kpt_pages = &((struct kpt_page *)addr2)[npg]; 487 kpt_pages = &((struct kpt_page *)addr2)[npg];
488 kpt_free_list = NULL; 488 kpt_free_list = NULL;
489 do { 489 do {
490 addr2 -= PAGE_SIZE; 490 addr2 -= PAGE_SIZE;
491 (--kpt_pages)->kpt_next = kpt_free_list; 491 (--kpt_pages)->kpt_next = kpt_free_list;
492 kpt_free_list = kpt_pages; 492 kpt_free_list = kpt_pages;
493 kpt_pages->kpt_va = addr2; 493 kpt_pages->kpt_va = addr2;
494 (void) pmap_extract(pmap_kernel(), addr2, 494 (void) pmap_extract(pmap_kernel(), addr2,
495 (paddr_t *)&kpt_pages->kpt_pa); 495 (paddr_t *)&kpt_pages->kpt_pa);
496 } while (addr != addr2); 496 } while (addr != addr2);
497 497
498#ifdef DEBUG 498#ifdef DEBUG
499 kpt_stats.kpttotal = atop(s); 499 kpt_stats.kpttotal = atop(s);
500 if (pmapdebug & PDB_INIT) 500 if (pmapdebug & PDB_INIT)
501 printf("pmap_init: KPT: %ld pages from %lx to %lx\n", 501 printf("pmap_init: KPT: %ld pages from %lx to %lx\n",
502 atop(s), addr, addr + s); 502 atop(s), addr, addr + s);
503#endif 503#endif
504 504
505 /* 505 /*
506 * Allocate the segment table map and the page table map. 506 * Allocate the segment table map and the page table map.
507 */ 507 */
508 addr = amiga_uptbase; 508 addr = amiga_uptbase;
509 if (M68K_PTMAXSIZE / M68K_MAX_PTSIZE < maxproc) { 509 if (M68K_PTMAXSIZE / M68K_MAX_PTSIZE < maxproc) {
510 s = M68K_PTMAXSIZE; 510 s = M68K_PTMAXSIZE;
511 511
512 /* 512 /*
513 * XXX We don't want to hang when we run out of page 513 * XXX We don't want to hang when we run out of page
514 * tables, so we lower maxproc so that fork will fail 514 * tables, so we lower maxproc so that fork will fail
515 * instead. Note that root could still raise this 515 * instead. Note that root could still raise this
516 * value through sysctl(3). 516 * value through sysctl(3).
517 */ 517 */
518 maxproc = M68K_PTMAXSIZE / M68K_MAX_PTSIZE; 518 maxproc = M68K_PTMAXSIZE / M68K_MAX_PTSIZE;
519 } else 519 } else
520 s = maxproc * M68K_MAX_PTSIZE; 520 s = maxproc * M68K_MAX_PTSIZE;
521 521
522 pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, 522 pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
523 true, &pt_map_store); 523 true, &pt_map_store);
524 524
525#if defined(M68040) || defined(M68060) 525#if defined(M68040) || defined(M68060)
526 if (mmutype == MMU_68040) 526 if (mmutype == MMU_68040)
527 protostfree = ~1 & ~(-1 << MAXUL2SIZE); 527 protostfree = ~1 & ~(-1 << MAXUL2SIZE);
528#endif /* defined(M68040) || defined(M68060) */ 528#endif /* defined(M68040) || defined(M68060) */
529 529
530 /* 530 /*
531 * Now it is safe to enable pv_table recording. 531 * Now it is safe to enable pv_table recording.
532 */ 532 */
533 pmap_initialized = true; 533 pmap_initialized = true;
534 534
535 /* 535 /*
536 * Now that this is done, mark the pages shared with the 536 * Now that this is done, mark the pages shared with the
537 * hardware page table search as non-CCB (actually, as CI). 537 * hardware page table search as non-CCB (actually, as CI).
538 * 538 *
539 * XXX Hm. Given that this is in the kernel map, can't we just 539 * XXX Hm. Given that this is in the kernel map, can't we just
540 * use the va's? 540 * use the va's?
541 */ 541 */
542#ifdef M68060 542#ifdef M68060
543 if (cputype == CPU_68060) { 543 if (cputype == CPU_68060) {
544 kptp = kpt_free_list; 544 kptp = kpt_free_list;
545 while (kptp) { 545 while (kptp) {
546 pmap_changebit(kptp->kpt_pa, PG_CCB, 0); 546 pmap_changebit(kptp->kpt_pa, PG_CCB, 0);
547 pmap_changebit(kptp->kpt_pa, PG_CI, 1); 547 pmap_changebit(kptp->kpt_pa, PG_CI, 1);
548 kptp = kptp->kpt_next; 548 kptp = kptp->kpt_next;
549 } 549 }
550 550
551 paddr = (paddr_t)Segtabzeropa; 551 paddr = (paddr_t)Segtabzeropa;
552 while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) { 552 while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
553 pmap_changebit(paddr, PG_CCB, 0); 553 pmap_changebit(paddr, PG_CCB, 0);
554 pmap_changebit(paddr, PG_CI, 1); 554 pmap_changebit(paddr, PG_CI, 1);
555 paddr += PAGE_SIZE; 555 paddr += PAGE_SIZE;
556 } 556 }
557 557
558 DCIS(); 558 DCIS();
559 } 559 }
560#endif 560#endif
561} 561}
562 562
563struct pv_entry * 563struct pv_entry *
564pmap_alloc_pv() 564pmap_alloc_pv()
565{ 565{
566 struct pv_page *pvp; 566 struct pv_page *pvp;
567 struct pv_entry *pv; 567 struct pv_entry *pv;
568 int i; 568 int i;
569 569
570 if (pv_nfree == 0) { 570 if (pv_nfree == 0) {
571 pvp = (struct pv_page *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 571 pvp = (struct pv_page *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
572 UVM_KMF_WIRED | UVM_KMF_ZERO); 572 UVM_KMF_WIRED | UVM_KMF_ZERO);
573 if (pvp == 0) 573 if (pvp == 0)
574 panic("pmap_alloc_pv: uvm_km_zalloc() failed"); 574 panic("pmap_alloc_pv: uvm_km_zalloc() failed");
575 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1]; 575 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
576 for (i = NPVPPG - 2; i; i--, pv++) 576 for (i = NPVPPG - 2; i; i--, pv++)
577 pv->pv_next = pv + 1; 577 pv->pv_next = pv + 1;
578 pv->pv_next = 0; 578 pv->pv_next = 0;
579 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1; 579 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
580 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list); 580 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
581 pv = &pvp->pvp_pv[0]; 581 pv = &pvp->pvp_pv[0];
582 } else { 582 } else {
583 --pv_nfree; 583 --pv_nfree;
584 pvp = pv_page_freelist.tqh_first; 584 pvp = pv_page_freelist.tqh_first;
585 if (--pvp->pvp_pgi.pgi_nfree == 0) { 585 if (--pvp->pvp_pgi.pgi_nfree == 0) {
586 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); 586 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
587 } 587 }
588 pv = pvp->pvp_pgi.pgi_freelist; 588 pv = pvp->pvp_pgi.pgi_freelist;
589#ifdef DIAGNOSTIC 589#ifdef DIAGNOSTIC
590 if (pv == 0) 590 if (pv == 0)
591 panic("pmap_alloc_pv: pgi_nfree inconsistent"); 591 panic("pmap_alloc_pv: pgi_nfree inconsistent");
592#endif 592#endif
593 pvp->pvp_pgi.pgi_freelist = pv->pv_next; 593 pvp->pvp_pgi.pgi_freelist = pv->pv_next;
594 } 594 }
595 return pv; 595 return pv;
596} 596}
597 597
598void 598void
599pmap_free_pv(pv) 599pmap_free_pv(pv)
600 struct pv_entry *pv; 600 struct pv_entry *pv;
601{ 601{
602 struct pv_page *pvp; 602 struct pv_page *pvp;
603 603
604 pvp = (struct pv_page *) trunc_page((vaddr_t)pv); 604 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
605 switch (++pvp->pvp_pgi.pgi_nfree) { 605 switch (++pvp->pvp_pgi.pgi_nfree) {
606 case 1: 606 case 1:
607 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list); 607 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
608 default: 608 default:
609 pv->pv_next = pvp->pvp_pgi.pgi_freelist; 609 pv->pv_next = pvp->pvp_pgi.pgi_freelist;
610 pvp->pvp_pgi.pgi_freelist = pv; 610 pvp->pvp_pgi.pgi_freelist = pv;
611 ++pv_nfree; 611 ++pv_nfree;
612 break; 612 break;
613 case NPVPPG: 613 case NPVPPG:
614 pv_nfree -= NPVPPG - 1; 614 pv_nfree -= NPVPPG - 1;
615 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); 615 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
616 uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED); 616 uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
617 break; 617 break;
618 } 618 }
619} 619}
620 620
621 621
622/* 622/*
623 * Used to map a range of physical addresses into kernel 623 * Used to map a range of physical addresses into kernel
624 * virtual address space. 624 * virtual address space.
625 * 625 *
626 * For now, VM is already on, we only need to map the 626 * For now, VM is already on, we only need to map the
627 * specified memory. 627 * specified memory.
628 */ 628 */
629vaddr_t 629vaddr_t
630pmap_map(virt, start, end, prot) 630pmap_map(virt, start, end, prot)
631 vaddr_t virt; 631 vaddr_t virt;
632 paddr_t start; 632 paddr_t start;
633 paddr_t end; 633 paddr_t end;
634 int prot; 634 int prot;
635{ 635{
636#ifdef DEBUG 636#ifdef DEBUG
637 if (pmapdebug & PDB_FOLLOW) 637 if (pmapdebug & PDB_FOLLOW)
638 printf("pmap_map(%lx, %lx, %lx, %x)\n", virt, start, end, prot); 638 printf("pmap_map(%lx, %lx, %lx, %x)\n", virt, start, end, prot);
639#endif 639#endif
640 while (start < end) { 640 while (start < end) {
641 pmap_enter(pmap_kernel(), virt, start, prot, 0); 641 pmap_enter(pmap_kernel(), virt, start, prot, 0);
642 virt += PAGE_SIZE; 642 virt += PAGE_SIZE;
643 start += PAGE_SIZE; 643 start += PAGE_SIZE;
644 } 644 }
645 pmap_update(pmap_kernel()); 645 pmap_update(pmap_kernel());
646 return(virt); 646 return(virt);
647} 647}
648 648
649/* 649/*
650 * Create and return a physical map. 650 * Create and return a physical map.
651 * 651 *
652 * If the size specified for the map 652 * If the size specified for the map
653 * is zero, the map is an actual physical 653 * is zero, the map is an actual physical
654 * map, and may be referenced by the 654 * map, and may be referenced by the
655 * hardware. 655 * hardware.
656 * 656 *
657 * If the size specified is non-zero, 657 * If the size specified is non-zero,
658 * the map will be used in software only, and 658 * the map will be used in software only, and
659 * is bounded by that size. 659 * is bounded by that size.
660 */ 660 */
661pmap_t 661pmap_t
662pmap_create() 662pmap_create()
663{ 663{
664 pmap_t pmap; 664 pmap_t pmap;
665 665
666#ifdef DEBUG 666#ifdef DEBUG
667 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 667 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
668 printf("pmap_create\n"); 668 printf("pmap_create\n");
669#endif 669#endif
670 670
671 pmap = malloc(sizeof *pmap, M_VMPMAP, M_WAITOK|M_ZERO); 671 pmap = malloc(sizeof *pmap, M_VMPMAP, M_WAITOK|M_ZERO);
672 pmap_pinit(pmap); 672 pmap_pinit(pmap);
673 return (pmap); 673 return (pmap);
674} 674}
675 675
676/* 676/*
677 * Initialize a preallocated and zeroed pmap structure, 677 * Initialize a preallocated and zeroed pmap structure,
678 * such as one in a vmspace structure. 678 * such as one in a vmspace structure.
679 */ 679 */
680void 680void
681pmap_pinit(pmap) 681pmap_pinit(pmap)
682 pmap_t pmap; 682 pmap_t pmap;
683{ 683{
684 684
685#ifdef DEBUG 685#ifdef DEBUG
686 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 686 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
687 printf("pmap_pinit(%p)\n", pmap); 687 printf("pmap_pinit(%p)\n", pmap);
688#endif 688#endif
689 /* 689 /*
690 * No need to allocate page table space yet but we do need a 690 * No need to allocate page table space yet but we do need a
691 * valid segment table. Initially, we point everyone at the 691 * valid segment table. Initially, we point everyone at the
692 * "null" segment table. On the first pmap_enter, a real 692 * "null" segment table. On the first pmap_enter, a real
693 * segment table will be allocated. 693 * segment table will be allocated.
694 */ 694 */
695 pmap->pm_stab = Segtabzero; 695 pmap->pm_stab = Segtabzero;
696 pmap->pm_stpa = Segtabzeropa; 696 pmap->pm_stpa = Segtabzeropa;
697#if defined(M68040) || defined(M68060) 697#if defined(M68040) || defined(M68060)
698 if (mmutype == MMU_68040) 698 if (mmutype == MMU_68040)
699 pmap->pm_stfree = protostfree; 699 pmap->pm_stfree = protostfree;
700#endif 700#endif
701 pmap->pm_count = 1; 701 pmap->pm_count = 1;
702} 702}
703 703
704/* 704/*
705 * Retire the given physical map from service. 705 * Retire the given physical map from service.
706 * Should only be called if the map contains 706 * Should only be called if the map contains
707 * no valid mappings. 707 * no valid mappings.
708 */ 708 */
709void 709void
710pmap_destroy(pmap) 710pmap_destroy(pmap)
711 pmap_t pmap; 711 pmap_t pmap;
712{ 712{
713 int count; 713 int count;
714 714
715#ifdef DEBUG 715#ifdef DEBUG
716 if (pmapdebug & PDB_FOLLOW) 716 if (pmapdebug & PDB_FOLLOW)
717 printf("pmap_destroy(%p)\n", pmap); 717 printf("pmap_destroy(%p)\n", pmap);
718#endif 718#endif
719 count = --pmap->pm_count; 719 count = --pmap->pm_count;
720 if (count == 0) { 720 if (count == 0) {
721 pmap_release(pmap); 721 pmap_release(pmap);
722 free((void *)pmap, M_VMPMAP); 722 free((void *)pmap, M_VMPMAP);
723 } 723 }
724} 724}
725 725
726/* 726/*
727 * Release any resources held by the given physical map. 727 * Release any resources held by the given physical map.
728 * Called when a pmap initialized by pmap_pinit is being released. 728 * Called when a pmap initialized by pmap_pinit is being released.
729 * Should only be called if the map contains no valid mappings. 729 * Should only be called if the map contains no valid mappings.
730 */ 730 */
731void 731void
732pmap_release(pmap) 732pmap_release(pmap)
733 pmap_t pmap; 733 pmap_t pmap;
734{ 734{
735 735
736#ifdef DEBUG 736#ifdef DEBUG
737 if (pmapdebug & PDB_FOLLOW) 737 if (pmapdebug & PDB_FOLLOW)
738 printf("pmap_release(%p)\n", pmap); 738 printf("pmap_release(%p)\n", pmap);
739#endif 739#endif
740#ifdef notdef /* DIAGNOSTIC */ 740#ifdef notdef /* DIAGNOSTIC */
741 /* count would be 0 from pmap_destroy... */ 741 /* count would be 0 from pmap_destroy... */
742 if (pmap->pm_count != 1) 742 if (pmap->pm_count != 1)
743 panic("pmap_release count"); 743 panic("pmap_release count");
744#endif 744#endif
745 if (pmap->pm_ptab) { 745 if (pmap->pm_ptab) {
746 pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab, 746 pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
747 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE); 747 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
748 uvm_km_pgremove((vaddr_t)pmap->pm_ptab, 748 uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
749 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE); 749 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
750 uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab, 750 uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
751 M68K_MAX_PTSIZE, UVM_KMF_VAONLY); 751 M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
752 } 752 }
753 KASSERT(pmap->pm_stab == Segtabzero); 753 KASSERT(pmap->pm_stab == Segtabzero);
754} 754}
755 755
756/* 756/*
757 * Add a reference to the specified pmap. 757 * Add a reference to the specified pmap.
758 */ 758 */
759void 759void
760pmap_reference(pmap) 760pmap_reference(pmap)
761 pmap_t pmap; 761 pmap_t pmap;
762{ 762{
763#ifdef DEBUG 763#ifdef DEBUG
764 if (pmapdebug & PDB_FOLLOW) 764 if (pmapdebug & PDB_FOLLOW)
765 printf("pmap_reference(%p)\n", pmap); 765 printf("pmap_reference(%p)\n", pmap);
766#endif 766#endif
767 if (pmap != NULL) { 767 if (pmap != NULL) {
768 pmap->pm_count++; 768 pmap->pm_count++;
769 } 769 }
770} 770}
771 771
772/* 772/*
773 * Remove the given range of addresses from the specified map. 773 * Remove the given range of addresses from the specified map.
774 * 774 *
775 * It is assumed that the start and end are properly 775 * It is assumed that the start and end are properly
776 * rounded to the page size. 776 * rounded to the page size.
777 */ 777 */
778void 778void
779pmap_remove(pmap, sva, eva) 779pmap_remove(pmap, sva, eva)
780 pmap_t pmap; 780 pmap_t pmap;
781 vaddr_t sva, eva; 781 vaddr_t sva, eva;
782{ 782{
783 paddr_t pa; 783 paddr_t pa;
784 vaddr_t va; 784 vaddr_t va;
785 u_int *pte; 785 u_int *pte;
786 int flags; 786 int flags;
787 787
788#ifdef DEBUG 788#ifdef DEBUG
789 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 789 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
790 printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva); 790 printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva);
791 remove_stats.calls++; 791 remove_stats.calls++;
792#endif 792#endif
793 flags = active_pmap(pmap) ? PRM_TFLUSH : 0; 793 flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
794 for (va = sva; va < eva; va += PAGE_SIZE) { 794 for (va = sva; va < eva; va += PAGE_SIZE) {
795 /* 795 /*
796 * Weed out invalid mappings. 796 * Weed out invalid mappings.
797 * Note: we assume that the segment table is always allocated. 797 * Note: we assume that the segment table is always allocated.
798 */ 798 */
799 if (!pmap_ste_v(pmap, va)) { 799 if (!pmap_ste_v(pmap, va)) {
800 /* XXX: avoid address wrap around */ 800 /* XXX: avoid address wrap around */
801 if (va >= m68k_trunc_seg((vaddr_t)-1)) 801 if (va >= m68k_trunc_seg((vaddr_t)-1))
802 break; 802 break;
803 va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 803 va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
804 continue; 804 continue;
805 } 805 }
806 pte = pmap_pte(pmap, va); 806 pte = pmap_pte(pmap, va);
807 pa = pmap_pte_pa(pte); 807 pa = pmap_pte_pa(pte);
808 if (pa == 0) 808 if (pa == 0)
809 continue; 809 continue;
810 pmap_remove_mapping(pmap, va, pte, flags); 810 pmap_remove_mapping(pmap, va, pte, flags);
811 } 811 }
812} 812}
813 813
814/* 814/*
815 * pmap_page_protect: 815 * pmap_page_protect:
816 * 816 *
817 * Lower the permission for all mappings to a given page. 817 * Lower the permission for all mappings to a given page.
818 */ 818 */
819void 819void
820pmap_page_protect(pg, prot) 820pmap_page_protect(pg, prot)
821 struct vm_page *pg; 821 struct vm_page *pg;
822 vm_prot_t prot; 822 vm_prot_t prot;
823{ 823{
824 struct pv_entry *pv; 824 struct pv_entry *pv;
825 int s; 825 int s;
826 paddr_t pa = VM_PAGE_TO_PHYS(pg); 826 paddr_t pa = VM_PAGE_TO_PHYS(pg);
827 827
828#ifdef DEBUG 828#ifdef DEBUG
829 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 829 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
830 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) 830 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
831 printf("pmap_page_protect(%lx, %x)\n", pa, prot); 831 printf("pmap_page_protect(%lx, %x)\n", pa, prot);
832#endif 832#endif
833 switch (prot) { 833 switch (prot) {
834 case VM_PROT_ALL: 834 case VM_PROT_ALL:
835 break; 835 break;
836 /* copy_on_write */ 836 /* copy_on_write */
837 case VM_PROT_READ: 837 case VM_PROT_READ:
838 case VM_PROT_READ|VM_PROT_EXECUTE: 838 case VM_PROT_READ|VM_PROT_EXECUTE:
839 pmap_changebit(pa, PG_RO, true); 839 pmap_changebit(pa, PG_RO, true);
840 break; 840 break;
841 /* remove_all */ 841 /* remove_all */
842 default: 842 default:
843 pv = pa_to_pvh(pa); 843 pv = pa_to_pvh(pa);
844 s = splvm(); 844 s = splvm();
845 while (pv->pv_pmap != NULL) { 845 while (pv->pv_pmap != NULL) {
846 pt_entry_t *pte; 846 pt_entry_t *pte;
847 847
848 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 848 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
849#ifdef DEBUG 849#ifdef DEBUG
850 if (!pmap_ste_v(pv->pv_pmap,pv->pv_va) || 850 if (!pmap_ste_v(pv->pv_pmap,pv->pv_va) ||
851 pmap_pte_pa(pte) != pa) 851 pmap_pte_pa(pte) != pa)
852{ 852{
853 printf ("pmap_page_protect: va %lx, pmap_ste_v %d pmap_pte_pa %08x/%lx\n", 853 printf ("pmap_page_protect: va %lx, pmap_ste_v %d pmap_pte_pa %08x/%lx\n",
854 pv->pv_va, pmap_ste_v(pv->pv_pmap,pv->pv_va), 854 pv->pv_va, pmap_ste_v(pv->pv_pmap,pv->pv_va),
855 pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)),pa); 855 pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)),pa);
856 printf (" pvh %p pv %p pv_next %p\n", pa_to_pvh(pa), pv, pv->pv_next); 856 printf (" pvh %p pv %p pv_next %p\n", pa_to_pvh(pa), pv, pv->pv_next);
857 panic("pmap_page_protect: bad mapping"); 857 panic("pmap_page_protect: bad mapping");
858} 858}
859#endif 859#endif
860 pmap_remove_mapping(pv->pv_pmap, pv->pv_va, 860 pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
861 pte, PRM_TFLUSH|PRM_CFLUSH); 861 pte, PRM_TFLUSH|PRM_CFLUSH);
862 } 862 }
863 splx(s); 863 splx(s);
864 break; 864 break;
865 } 865 }
866} 866}
867 867
868/* 868/*
869 * Set the physical protection on the 869 * Set the physical protection on the
870 * specified range of this map as requested. 870 * specified range of this map as requested.
871 */ 871 */
872void 872void
873pmap_protect(pmap, sva, eva, prot) 873pmap_protect(pmap, sva, eva, prot)
874 pmap_t pmap; 874 pmap_t pmap;
875 vaddr_t sva, eva; 875 vaddr_t sva, eva;
876 vm_prot_t prot; 876 vm_prot_t prot;
877{ 877{
878 u_int *pte; 878 u_int *pte;
879 vaddr_t va; 879 vaddr_t va;
880 bool needtflush; 880 bool needtflush;
881 int isro; 881 int isro;
882 882
883#ifdef DEBUG 883#ifdef DEBUG
884 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 884 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
885 printf("pmap_protect(%p, %lx, %lx, %x)\n", pmap, sva, eva, prot); 885 printf("pmap_protect(%p, %lx, %lx, %x)\n", pmap, sva, eva, prot);
886#endif 886#endif
887 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 887 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
888 pmap_remove(pmap, sva, eva); 888 pmap_remove(pmap, sva, eva);
889 return; 889 return;
890 } 890 }
891 pte = pmap_pte(pmap, sva); 891 pte = pmap_pte(pmap, sva);
892 isro = pte_prot(pmap, prot) == PG_RO ? 1 : 0; 892 isro = pte_prot(pmap, prot) == PG_RO ? 1 : 0;
893 needtflush = active_pmap(pmap); 893 needtflush = active_pmap(pmap);
894 for (va = sva; va < eva; va += PAGE_SIZE) { 894 for (va = sva; va < eva; va += PAGE_SIZE) {
895 /* 895 /*
896 * Page table page is not allocated. 896 * Page table page is not allocated.
897 * Skip it, we don't want to force allocation 897 * Skip it, we don't want to force allocation
898 * of unnecessary PTE pages just to set the protection. 898 * of unnecessary PTE pages just to set the protection.
899 */ 899 */
900 if (!pmap_ste_v(pmap, va)) { 900 if (!pmap_ste_v(pmap, va)) {
901 /* XXX: avoid address wrap around */ 901 /* XXX: avoid address wrap around */
902 if (va >= m68k_trunc_seg((vaddr_t)-1)) 902 if (va >= m68k_trunc_seg((vaddr_t)-1))
903 break; 903 break;
904 va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 904 va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
905 pte = pmap_pte(pmap, va); 905 pte = pmap_pte(pmap, va);
906 pte++; 906 pte++;
907 continue; 907 continue;
908 } 908 }
909 /* 909 /*
910 * skip if page not valid or protection is same 910 * skip if page not valid or protection is same
911 */ 911 */
912 if (!pmap_pte_v(pte) || !pmap_pte_prot_chg(pte, isro)) { 912 if (!pmap_pte_v(pte) || !pmap_pte_prot_chg(pte, isro)) {
913 pte++; 913 pte++;
914 continue; 914 continue;
915 } 915 }
916#if defined(M68040) || defined(M68060) 916#if defined(M68040) || defined(M68060)
917 /* 917 /*
918 * Clear caches if making RO (see section 918 * Clear caches if making RO (see section
919 * "7.3 Cache Coherency" in the manual). 919 * "7.3 Cache Coherency" in the manual).
920 */ 920 */
921 if (isro && mmutype == MMU_68040) { 921 if (isro && mmutype == MMU_68040) {
922 paddr_t pa = pmap_pte_pa(pte); 922 paddr_t pa = pmap_pte_pa(pte);
923 923
924 DCFP(pa); 924 DCFP(pa);
925 ICPP(pa); 925 ICPP(pa);
926 } 926 }
927#endif 927#endif
928 pmap_pte_set_prot(pte, isro); 928 pmap_pte_set_prot(pte, isro);
929 if (needtflush) 929 if (needtflush)
930 TBIS(va); 930 TBIS(va);
931 pte++; 931 pte++;
932 } 932 }
933} 933}
934 934
935/* 935/*
936 * Insert the given physical page (p) at 936 * Insert the given physical page (p) at
937 * the specified virtual address (v) in the 937 * the specified virtual address (v) in the
938 * target physical map with the protection requested. 938 * target physical map with the protection requested.
939 * 939 *
940 * If specified, the page will be wired down, meaning 940 * If specified, the page will be wired down, meaning
941 * that the related pte can not be reclaimed. 941 * that the related pte can not be reclaimed.
942 * 942 *
943 * NB: This is the only routine which MAY NOT lazy-evaluate 943 * NB: This is the only routine which MAY NOT lazy-evaluate
944 * or lose information. That is, this routine must actually 944 * or lose information. That is, this routine must actually
945 * insert this page into the given map NOW. 945 * insert this page into the given map NOW.
946 */ 946 */
947extern int kernel_copyback; 947extern int kernel_copyback;
948 948
949int 949int
950pmap_enter(pmap, va, pa, prot, flags) 950pmap_enter(pmap, va, pa, prot, flags)
951 pmap_t pmap; 951 pmap_t pmap;
952 vaddr_t va; 952 vaddr_t va;
953 paddr_t pa; 953 paddr_t pa;
954 vm_prot_t prot; 954 vm_prot_t prot;
955 int flags; 955 int flags;
956{ 956{
957 u_int *pte; 957 u_int *pte;
958 int npte; 958 int npte;
959 paddr_t opa; 959 paddr_t opa;
960 bool cacheable = true; 960 bool cacheable = true;
961 bool checkpv = true; 961 bool checkpv = true;
962 bool wired = (flags & PMAP_WIRED) != 0; 962 bool wired = (flags & PMAP_WIRED) != 0;
963 bool can_fail = (flags & PMAP_CANFAIL) != 0; 963 bool can_fail = (flags & PMAP_CANFAIL) != 0;
964 964
965#ifdef DEBUG 965#ifdef DEBUG
966 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 966 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
967 printf("pmap_enter(%p, %lx, %lx, %x, %x)\n", 967 printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
968 pmap, va, pa, prot, wired); 968 pmap, va, pa, prot, wired);
969 if (pmap == pmap_kernel()) 969 if (pmap == pmap_kernel())
970 enter_stats.kernel++; 970 enter_stats.kernel++;
971 else 971 else
972 enter_stats.user++; 972 enter_stats.user++;
973#endif 973#endif
974 /* 974 /*
975 * For user mapping, allocate kernel VM resources if necessary. 975 * For user mapping, allocate kernel VM resources if necessary.
976 */ 976 */
977 if (pmap->pm_ptab == NULL) 977 if (pmap->pm_ptab == NULL)
978 pmap->pm_ptab = (pt_entry_t *) 978 pmap->pm_ptab = (pt_entry_t *)
979 uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0, 979 uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
980 UVM_KMF_VAONLY |  980 UVM_KMF_VAONLY |
981 (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA)); 981 (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
982 if (pmap->pm_ptab == NULL) 982 if (pmap->pm_ptab == NULL)
983 return ENOMEM; 983 return ENOMEM;
984 984
985 /* 985 /*
986 * Segment table entry not valid, we need a new PT page 986 * Segment table entry not valid, we need a new PT page
987 */ 987 */
988 if (!pmap_ste_v(pmap, va)) { 988 if (!pmap_ste_v(pmap, va)) {
989 int err = pmap_enter_ptpage(pmap, va, can_fail); 989 int err = pmap_enter_ptpage(pmap, va, can_fail);
990 if (err) 990 if (err)
991 return err; 991 return err;
992 } 992 }
993 993
994 pte = pmap_pte(pmap, va); 994 pte = pmap_pte(pmap, va);
995 opa = pmap_pte_pa(pte); 995 opa = pmap_pte_pa(pte);
996#ifdef DEBUG 996#ifdef DEBUG
997 if (pmapdebug & PDB_ENTER) 997 if (pmapdebug & PDB_ENTER)
998 printf("enter: pte %p, *pte %x\n", pte, *(int *)pte); 998 printf("enter: pte %p, *pte %x\n", pte, *(int *)pte);
999#endif 999#endif
1000 1000
1001 /* 1001 /*
1002 * Mapping has not changed, must be protection or wiring change. 1002 * Mapping has not changed, must be protection or wiring change.
1003 */ 1003 */
1004 if (opa == pa) { 1004 if (opa == pa) {
1005#ifdef DEBUG 1005#ifdef DEBUG
1006 enter_stats.pwchange++; 1006 enter_stats.pwchange++;
1007#endif 1007#endif
1008 /* 1008 /*
1009 * Wiring change, just update stats. 1009 * Wiring change, just update stats.
1010 * We don't worry about wiring PT pages as they remain 1010 * We don't worry about wiring PT pages as they remain
1011 * resident as long as there are valid mappings in them. 1011 * resident as long as there are valid mappings in them.
1012 * Hence, if a user page is wired, the PT page will be also. 1012 * Hence, if a user page is wired, the PT page will be also.
1013 */ 1013 */
1014 if ((wired && !pmap_pte_w(pte)) || (!wired && pmap_pte_w(pte))){ 1014 if ((wired && !pmap_pte_w(pte)) || (!wired && pmap_pte_w(pte))){
1015#ifdef DEBUG 1015#ifdef DEBUG
1016 if (pmapdebug & PDB_ENTER) 1016 if (pmapdebug & PDB_ENTER)
1017 printf("enter: wiring change -> %x\n", wired); 1017 printf("enter: wiring change -> %x\n", wired);
1018#endif 1018#endif
1019 if (wired) 1019 if (wired)
1020 pmap->pm_stats.wired_count++; 1020 pmap->pm_stats.wired_count++;
1021 else 1021 else
1022 pmap->pm_stats.wired_count--; 1022 pmap->pm_stats.wired_count--;
1023#ifdef DEBUG 1023#ifdef DEBUG
1024 enter_stats.wchange++; 1024 enter_stats.wchange++;
1025#endif 1025#endif
1026 } 1026 }
1027 /* 1027 /*
1028 * Retain cache inhibition status 1028 * Retain cache inhibition status
1029 */ 1029 */
1030 checkpv = false; 1030 checkpv = false;
1031 if (pmap_pte_ci(pte)) 1031 if (pmap_pte_ci(pte))
1032 cacheable = false; 1032 cacheable = false;
1033 goto validate; 1033 goto validate;
1034 } 1034 }
1035 1035
1036 /* 1036 /*
1037 * Mapping has changed, invalidate old range and fall through to 1037 * Mapping has changed, invalidate old range and fall through to
1038 * handle validating new mapping. 1038 * handle validating new mapping.
1039 */ 1039 */
1040 if (opa) { 1040 if (opa) {
1041#ifdef DEBUG 1041#ifdef DEBUG
1042 if (pmapdebug & PDB_ENTER) 1042 if (pmapdebug & PDB_ENTER)
1043 printf("enter: removing old mapping %lx\n", va); 1043 printf("enter: removing old mapping %lx\n", va);
1044#endif 1044#endif
1045 pmap_remove_mapping(pmap, va, pte, 1045 pmap_remove_mapping(pmap, va, pte,
1046 PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE); 1046 PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
1047#ifdef DEBUG 1047#ifdef DEBUG
1048 enter_stats.mchange++; 1048 enter_stats.mchange++;
1049#endif 1049#endif
1050 } 1050 }
1051 1051
1052 /* 1052 /*
1053 * If this is a new user mapping, increment the wiring count 1053 * If this is a new user mapping, increment the wiring count
1054 * on this PT page. PT pages are wired down as long as there 1054 * on this PT page. PT pages are wired down as long as there
1055 * is a valid mapping in the page. 1055 * is a valid mapping in the page.
1056 */ 1056 */
1057 if (pmap != pmap_kernel()) 1057 if (pmap != pmap_kernel())
1058 pmap_ptpage_addref(trunc_page((vaddr_t)pte)); 1058 pmap_ptpage_addref(trunc_page((vaddr_t)pte));
1059 1059
1060 /* 1060 /*
1061 * Enter on the PV list if part of our managed memory 1061 * Enter on the PV list if part of our managed memory
1062 * Note that we raise IPL while manipulating pv_table 1062 * Note that we raise IPL while manipulating pv_table
1063 * since pmap_enter can be called at interrupt time. 1063 * since pmap_enter can be called at interrupt time.
1064 */ 1064 */
1065 if (PAGE_IS_MANAGED(pa)) { 1065 if (PAGE_IS_MANAGED(pa)) {
1066 struct pv_entry *pv, *npv; 1066 struct pv_entry *pv, *npv;
1067 int s; 1067 int s;
1068 1068
1069#ifdef DEBUG 1069#ifdef DEBUG
1070 enter_stats.managed++; 1070 enter_stats.managed++;
1071#endif 1071#endif
1072 pv = pa_to_pvh(pa); 1072 pv = pa_to_pvh(pa);
1073 s = splvm(); 1073 s = splvm();
1074#ifdef DEBUG 1074#ifdef DEBUG
1075 if (pmapdebug & PDB_ENTER) 1075 if (pmapdebug & PDB_ENTER)
1076 printf("enter: pv at %p: %lx/%p/%p\n", 1076 printf("enter: pv at %p: %lx/%p/%p\n",
1077 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 1077 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
1078#endif 1078#endif
1079 /* 1079 /*
1080 * No entries yet, use header as the first entry 1080 * No entries yet, use header as the first entry
1081 */ 1081 */
1082 if (pv->pv_pmap == NULL) { 1082 if (pv->pv_pmap == NULL) {
1083#ifdef DEBUG 1083#ifdef DEBUG
1084 enter_stats.firstpv++; 1084 enter_stats.firstpv++;
1085#endif 1085#endif
1086 pv->pv_va = va; 1086 pv->pv_va = va;
1087 pv->pv_pmap = pmap; 1087 pv->pv_pmap = pmap;
1088 pv->pv_next = NULL; 1088 pv->pv_next = NULL;
1089 pv->pv_ptste = NULL; 1089 pv->pv_ptste = NULL;
1090 pv->pv_ptpmap = NULL; 1090 pv->pv_ptpmap = NULL;
1091 pv->pv_flags = 0; 1091 pv->pv_flags = 0;
1092 } 1092 }
1093 /* 1093 /*
1094 * There is at least one other VA mapping this page. 1094 * There is at least one other VA mapping this page.
1095 * Place this entry after the header. 1095 * Place this entry after the header.
1096 */ 1096 */
1097 else { 1097 else {
1098#ifdef DEBUG 1098#ifdef DEBUG
1099 for (npv = pv; npv; npv = npv->pv_next) 1099 for (npv = pv; npv; npv = npv->pv_next)
1100 if (pmap == npv->pv_pmap && va == npv->pv_va) 1100 if (pmap == npv->pv_pmap && va == npv->pv_va)
1101 panic("pmap_enter: already in pv_tab"); 1101 panic("pmap_enter: already in pv_tab");
1102#endif 1102#endif
1103 npv = pmap_alloc_pv(); 1103 npv = pmap_alloc_pv();
1104 npv->pv_va = va; 1104 npv->pv_va = va;
1105 npv->pv_pmap = pmap; 1105 npv->pv_pmap = pmap;
1106 npv->pv_next = pv->pv_next; 1106 npv->pv_next = pv->pv_next;
1107 npv->pv_ptste = NULL; 1107 npv->pv_ptste = NULL;
1108 npv->pv_ptpmap = NULL; 1108 npv->pv_ptpmap = NULL;
1109 pv->pv_next = npv; 1109 pv->pv_next = npv;
1110#ifdef DEBUG 1110#ifdef DEBUG
1111 if (!npv->pv_next) 1111 if (!npv->pv_next)
1112 enter_stats.secondpv++; 1112 enter_stats.secondpv++;
1113#endif 1113#endif
1114 } 1114 }
1115 splx(s); 1115 splx(s);
1116 } 1116 }
1117 /* 1117 /*
1118 * Assumption: if it is not part of our managed memory 1118 * Assumption: if it is not part of our managed memory
1119 * then it must be device memory which may be volitile. 1119 * then it must be device memory which may be volitile.
1120 */ 1120 */
1121 else if (pmap_initialized) { 1121 else if (pmap_initialized) {
1122 checkpv = cacheable = false; 1122 checkpv = cacheable = false;
1123#ifdef DEBUG 1123#ifdef DEBUG
1124 enter_stats.unmanaged++; 1124 enter_stats.unmanaged++;
1125#endif 1125#endif
1126 } 1126 }
1127 1127
1128 /* 1128 /*
1129 * Increment counters 1129 * Increment counters
1130 */ 1130 */
1131 pmap->pm_stats.resident_count++; 1131 pmap->pm_stats.resident_count++;
1132 if (wired) 1132 if (wired)
1133 pmap->pm_stats.wired_count++; 1133 pmap->pm_stats.wired_count++;
1134 1134
1135validate: 1135validate:
1136 /* 1136 /*
1137 * Now validate mapping with desired protection/wiring. 1137 * Now validate mapping with desired protection/wiring.
1138 * Assume uniform modified and referenced status for all 1138 * Assume uniform modified and referenced status for all
1139 * AMIGA pages in a MACH page. 1139 * AMIGA pages in a MACH page.
1140 */ 1140 */
1141#if defined(M68040) || defined(M68060) 1141#if defined(M68040) || defined(M68060)
1142#if DEBUG 1142#if DEBUG
1143 if (pmapdebug & 0x10000 && mmutype == MMU_68040 && 1143 if (pmapdebug & 0x10000 && mmutype == MMU_68040 &&
1144 pmap == pmap_kernel()) { 1144 pmap == pmap_kernel()) {
1145 const char *s; 1145 const char *s;
1146 struct proc *cp = curproc; 1146 struct proc *cp = curproc;
1147 if (va >= amiga_uptbase && 1147 if (va >= amiga_uptbase &&
1148 va < (amiga_uptbase + M68K_PTMAXSIZE)) 1148 va < (amiga_uptbase + M68K_PTMAXSIZE))
1149 s = "UPT"; 1149 s = "UPT";
1150 else if (va >= (u_int)Sysmap && 1150 else if (va >= (u_int)Sysmap &&
1151 va < ((u_int)Sysmap + M68K_MAX_KPTSIZE)) 1151 va < ((u_int)Sysmap + M68K_MAX_KPTSIZE))
1152 s = "KPT"; 1152 s = "KPT";
1153 else if (va >= (u_int)pmap->pm_stab && 1153 else if (va >= (u_int)pmap->pm_stab &&
1154 va < ((u_int)pmap->pm_stab + M68K_STSIZE)) 1154 va < ((u_int)pmap->pm_stab + M68K_STSIZE))
1155 s = "KST"; 1155 s = "KST";
1156 else if (cp && 1156 else if (cp &&
1157 va >= (u_int)cp->p_vmspace->vm_map.pmap->pm_stab && 1157 va >= (u_int)cp->p_vmspace->vm_map.pmap->pm_stab &&
1158 va < ((u_int)cp->p_vmspace->vm_map.pmap->pm_stab + 1158 va < ((u_int)cp->p_vmspace->vm_map.pmap->pm_stab +
1159 M68K_STSIZE)) 1159 M68K_STSIZE))
1160 s = "UST"; 1160 s = "UST";
1161 else 1161 else
1162 s = "other"; 1162 s = "other";
1163 printf("pmap_init: validating %s kernel page at %lx -> %lx\n", 1163 printf("pmap_init: validating %s kernel page at %lx -> %lx\n",
1164 s, va, pa); 1164 s, va, pa);
1165 1165
1166 } 1166 }
1167#endif 1167#endif
1168 if (mmutype == MMU_68040 && pmap == pmap_kernel() && ( 1168 if (mmutype == MMU_68040 && pmap == pmap_kernel() && (
1169 (va >= amiga_uptbase && va < (amiga_uptbase + M68K_PTMAXSIZE)) || 1169 (va >= amiga_uptbase && va < (amiga_uptbase + M68K_PTMAXSIZE)) ||
1170 (va >= (u_int)Sysmap && va < ((u_int)Sysmap + M68K_MAX_KPTSIZE)))) 1170 (va >= (u_int)Sysmap && va < ((u_int)Sysmap + M68K_MAX_KPTSIZE))))
1171 cacheable = false; /* don't cache user page tables */ 1171 cacheable = false; /* don't cache user page tables */
1172#endif 1172#endif
1173 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; 1173 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
1174 npte |= (*(int *)pte & (PG_M|PG_U)); 1174 npte |= (*(int *)pte & (PG_M|PG_U));
1175 if (wired) 1175 if (wired)
1176 npte |= PG_W; 1176 npte |= PG_W;
1177 if (!checkpv && !cacheable) 1177 if (!checkpv && !cacheable)
1178#if defined(M68060) && defined(NO_SLOW_CIRRUS) 1178#if defined(M68060) && defined(NO_SLOW_CIRRUS)
1179#if defined(M68040) || defined(M68030) || defined(M68020) 1179#if defined(M68040) || defined(M68030) || defined(M68020)
1180 npte |= (cputype == CPU_68060 ? PG_CIN : PG_CI); 1180 npte |= (cputype == CPU_68060 ? PG_CIN : PG_CI);
1181#else 1181#else
1182 npte |= PG_CIN; 1182 npte |= PG_CIN;
1183#endif 1183#endif
1184#else 1184#else
1185 npte |= PG_CI; 1185 npte |= PG_CI;
1186#endif 1186#endif
1187#if defined(M68040) || defined(M68060) 1187#if defined(M68040) || defined(M68060)
1188 else if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW && 1188 else if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW &&
1189 (kernel_copyback || pmap != pmap_kernel())) 1189 (kernel_copyback || pmap != pmap_kernel()))
1190 npte |= PG_CCB; /* cache copyback */ 1190 npte |= PG_CCB; /* cache copyback */
1191#endif 1191#endif
1192 if (flags & VM_PROT_ALL) { 1192 if (flags & VM_PROT_ALL) {
1193 npte |= PG_U; 1193 npte |= PG_U;
1194 if (flags & VM_PROT_WRITE) 1194 if (flags & VM_PROT_WRITE)
1195 npte |= PG_M; 1195 npte |= PG_M;
1196 } 1196 }
1197 1197
1198 /* 1198 /*
1199 * Remember if this was a wiring-only change. 1199 * Remember if this was a wiring-only change.
1200 * If so, we need not flush the TLB and caches. 1200 * If so, we need not flush the TLB and caches.
1201 */ 1201 */
1202 wired = ((*(int *)pte ^ npte) == PG_W); 1202 wired = ((*(int *)pte ^ npte) == PG_W);
1203#if defined(M68040) || defined(M68060) 1203#if defined(M68040) || defined(M68060)
1204 if (mmutype == MMU_68040 && !wired) { 1204 if (mmutype == MMU_68040 && !wired) {
1205 DCFP(pa); 1205 DCFP(pa);
1206 ICPP(pa); 1206 ICPP(pa);
1207 } 1207 }
1208#endif 1208#endif
1209#ifdef DEBUG 1209#ifdef DEBUG
1210 if (pmapdebug & PDB_ENTER) 1210 if (pmapdebug & PDB_ENTER)
1211 printf("enter: new pte value %x\n", npte); 1211 printf("enter: new pte value %x\n", npte);
1212#endif 1212#endif
1213 *(int *)pte++ = npte; 1213 *(int *)pte++ = npte;
1214 if (!wired && active_pmap(pmap)) 1214 if (!wired && active_pmap(pmap))
1215 TBIS(va); 1215 TBIS(va);
1216#ifdef DEBUG 1216#ifdef DEBUG
1217 if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) { 1217 if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) {
1218 va -= PAGE_SIZE; 1218 va -= PAGE_SIZE;
1219 pmap_check_wiring("enter", trunc_page((vaddr_t) pmap_pte(pmap, va))); 1219 pmap_check_wiring("enter", trunc_page((vaddr_t) pmap_pte(pmap, va)));
1220 } 1220 }
1221#endif 1221#endif
1222 return 0; 1222 return 0;
1223} 1223}
1224 1224
1225void 1225void
1226pmap_kenter_pa(va, pa, prot) 1226pmap_kenter_pa(va, pa, prot)
1227 vaddr_t va; 1227 vaddr_t va;
1228 paddr_t pa; 1228 paddr_t pa;
1229 vm_prot_t prot; 1229 vm_prot_t prot;
1230{ 1230{
1231 struct pmap *pmap = pmap_kernel(); 1231 struct pmap *pmap = pmap_kernel();
1232 pt_entry_t *pte; 1232 pt_entry_t *pte;
1233 int s, npte; 1233 int s, npte;
1234 1234
1235 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER, 1235 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1236 ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot)); 1236 ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
1237 1237
1238 /* 1238 /*
1239 * Segment table entry not valid, we need a new PT page 1239 * Segment table entry not valid, we need a new PT page
1240 */ 1240 */
1241 1241
1242 if (!pmap_ste_v(pmap, va)) { 1242 if (!pmap_ste_v(pmap, va)) {
1243 s = splvm(); 1243 s = splvm();
1244 pmap_enter_ptpage(pmap, va, false); 1244 pmap_enter_ptpage(pmap, va, false);
1245 splx(s); 1245 splx(s);
1246 } 1246 }
1247 1247
1248 pa = m68k_trunc_page(pa); 1248 pa = m68k_trunc_page(pa);
1249 pte = pmap_pte(pmap, va); 1249 pte = pmap_pte(pmap, va);
1250 1250
1251 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte)); 1251 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1252 KASSERT(!pmap_pte_v(pte)); 1252 KASSERT(!pmap_pte_v(pte));
1253 1253
1254 /* 1254 /*
1255 * Increment counters 1255 * Increment counters
1256 */ 1256 */
1257 1257
1258 pmap->pm_stats.resident_count++; 1258 pmap->pm_stats.resident_count++;
1259 pmap->pm_stats.wired_count++; 1259 pmap->pm_stats.wired_count++;
1260 1260
1261 /* 1261 /*
1262 * Build the new PTE. 1262 * Build the new PTE.
1263 */ 1263 */
1264 1264
1265 npte = pa | pte_prot(pmap, prot) | PG_V | PG_W; 1265 npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
1266#if defined(M68040) 1266#if defined(M68040)
1267 if (mmutype == MMU_68040 && (npte & (PG_PROT)) == PG_RW) 1267 if (mmutype == MMU_68040 && (npte & (PG_PROT)) == PG_RW)
1268 npte |= PG_CCB; 1268 npte |= PG_CCB;
1269#endif 1269#endif
1270 1270
1271 PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte)); 1271 PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
1272#if defined(M68040) 1272#if defined(M68040)
1273 if (mmutype == MMU_68040) { 1273 if (mmutype == MMU_68040) {
1274 DCFP(pa); 1274 DCFP(pa);
1275 ICPP(pa); 1275 ICPP(pa);
1276 } 1276 }
1277#endif 1277#endif
1278 *pte = npte; 1278 *pte = npte;
1279} 1279}
1280 1280
1281void 1281void
1282pmap_kremove(va, len) 1282pmap_kremove(va, len)
1283 vaddr_t va; 1283 vaddr_t va;
1284 vsize_t len; 1284 vsize_t len;
1285{ 1285{
1286 struct pmap *pmap = pmap_kernel(); 1286 struct pmap *pmap = pmap_kernel();
1287 vaddr_t sva, eva, nssva; 1287 vaddr_t sva, eva, nssva;
1288 pt_entry_t *pte; 1288 pt_entry_t *pte;
1289 1289
1290 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT, 1290 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
1291 ("pmap_kremove(%lx, %lx)\n", va, len)); 1291 ("pmap_kremove(%lx, %lx)\n", va, len));
1292 1292
1293 sva = va; 1293 sva = va;
1294 eva = va + len; 1294 eva = va + len;
1295 while (sva < eva) { 1295 while (sva < eva) {
1296 nssva = m68k_trunc_seg(sva) + NBSEG; 1296 nssva = m68k_trunc_seg(sva) + NBSEG;
1297 if (nssva == 0 || nssva > eva) 1297 if (nssva == 0 || nssva > eva)
1298 nssva = eva; 1298 nssva = eva;
1299 1299
1300 /* 1300 /*
1301 * If VA belongs to an unallocated segment, 1301 * If VA belongs to an unallocated segment,
1302 * skip to the next segment boundary. 1302 * skip to the next segment boundary.
1303 */ 1303 */
1304 1304
1305 if (!pmap_ste_v(pmap, sva)) { 1305 if (!pmap_ste_v(pmap, sva)) {
1306 sva = nssva; 1306 sva = nssva;
1307 continue; 1307 continue;
1308 } 1308 }
1309 1309
1310 /* 1310 /*
1311 * Invalidate every valid mapping within this segment. 1311 * Invalidate every valid mapping within this segment.
1312 */ 1312 */
1313 1313
1314 pte = pmap_pte(pmap, sva); 1314 pte = pmap_pte(pmap, sva);
1315 while (sva < nssva) { 1315 while (sva < nssva) {
1316 if (pmap_pte_v(pte)) { 1316 if (pmap_pte_v(pte)) {
1317#ifdef DEBUG 1317#ifdef DEBUG
1318 struct pv_entry *pv; 1318 struct pv_entry *pv;
1319 int s; 1319 int s;
1320 1320
1321 pv = pa_to_pvh(pmap_pte_pa(pte)); 1321 pv = pa_to_pvh(pmap_pte_pa(pte));
1322 s = splvm(); 1322 s = splvm();
1323 while (pv->pv_pmap != NULL) { 1323 while (pv->pv_pmap != NULL) {
1324 KASSERT(pv->pv_pmap != pmap_kernel() || 1324 KASSERT(pv->pv_pmap != pmap_kernel() ||
1325 pv->pv_va != sva); 1325 pv->pv_va != sva);
1326 pv = pv->pv_next; 1326 pv = pv->pv_next;
1327 if (pv == NULL) { 1327 if (pv == NULL) {
1328 break; 1328 break;
1329 } 1329 }
1330 } 1330 }
1331 splx(s); 1331 splx(s);
1332#endif 1332#endif
1333 /* 1333 /*
1334 * Update statistics 1334 * Update statistics
1335 */ 1335 */
1336 1336
1337 pmap->pm_stats.wired_count--; 1337 pmap->pm_stats.wired_count--;
1338 pmap->pm_stats.resident_count--; 1338 pmap->pm_stats.resident_count--;
1339 1339
1340 /* 1340 /*
1341 * Invalidate the PTE. 1341 * Invalidate the PTE.
1342 */ 1342 */
1343 1343
1344 *pte = PG_NV; 1344 *pte = PG_NV;
1345 TBIS(sva); 1345 TBIS(sva);
1346 } 1346 }
1347 pte++; 1347 pte++;
1348 sva += PAGE_SIZE; 1348 sva += PAGE_SIZE;
1349 } 1349 }
1350 } 1350 }
1351} 1351}
1352 1352
1353/* 1353/*