Wed Dec 31 11:43:26 2008 UTC ()
- remove extern decls for unused msgbufaddr and msgbufpa
- remove noncontig_enable which has been moved into pmap_bootstrap.c


(tsutsui)
diff -r1.144 -r1.145 src/sys/arch/amiga/amiga/pmap.c

cvs diff -r1.144 -r1.145 src/sys/arch/amiga/amiga/Attic/pmap.c (switch to unified diff)

--- src/sys/arch/amiga/amiga/Attic/pmap.c 2008/12/31 11:37:20 1.144
+++ src/sys/arch/amiga/amiga/Attic/pmap.c 2008/12/31 11:43:26 1.145
@@ -1,1302 +1,1298 @@ @@ -1,1302 +1,1298 @@
1/* $NetBSD: pmap.c,v 1.144 2008/12/31 11:37:20 tsutsui Exp $ */ 1/* $NetBSD: pmap.c,v 1.145 2008/12/31 11:43:26 tsutsui Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe. 8 * by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright (c) 1991 Regents of the University of California. 33 * Copyright (c) 1991 Regents of the University of California.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * This code is derived from software contributed to Berkeley by 36 * This code is derived from software contributed to Berkeley by
37 * the Systems Programming Group of the University of Utah Computer 37 * the Systems Programming Group of the University of Utah Computer
38 * Science Department. 38 * Science Department.
39 * 39 *
40 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions 41 * modification, are permitted provided that the following conditions
42 * are met: 42 * are met:
43 * 1. Redistributions of source code must retain the above copyright 43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer. 44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright 45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the 46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution. 47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors 48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software 49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission. 50 * without specific prior written permission.
51 * 51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE. 62 * SUCH DAMAGE.
63 * 63 *
64 * @(#)pmap.c 7.5 (Berkeley) 5/10/91 64 * @(#)pmap.c 7.5 (Berkeley) 5/10/91
65 */ 65 */
66 66
67/* 67/*
68 * AMIGA physical map management code. 68 * AMIGA physical map management code.
69 * For 68020/68030 machines with 68851, or 68030 MMUs 69 * For 68020/68030 machines with 68851, or 68030 MMUs
70 * Don't even pay lip service to multiprocessor support. 70 * Don't even pay lip service to multiprocessor support.
71 * 71 *
72 * right now because of the assumed one-to-one relationship of PT 72 * right now because of the assumed one-to-one relationship of PT
73 * pages to STEs. 73 * pages to STEs.
74 */ 74 */
75 75
76/* 76/*
77 * Manages physical address maps. 77 * Manages physical address maps.
78 * 78 *
79 * In addition to hardware address maps, this 79 * In addition to hardware address maps, this
80 * module is called upon to provide software-use-only 80 * module is called upon to provide software-use-only
81 * maps which may or may not be stored in the same 81 * maps which may or may not be stored in the same
82 * form as hardware maps. These pseudo-maps are 82 * form as hardware maps. These pseudo-maps are
83 * used to store intermediate results from copy 83 * used to store intermediate results from copy
84 * operations to and from address spaces. 84 * operations to and from address spaces.
85 * 85 *
86 * Since the information managed by this module is 86 * Since the information managed by this module is
87 * also stored by the logical address mapping module, 87 * also stored by the logical address mapping module,
88 * this module may throw away valid virtual-to-physical 88 * this module may throw away valid virtual-to-physical
89 * mappings at almost any time. However, invalidations 89 * mappings at almost any time. However, invalidations
90 * of virtual-to-physical mappings must be done as 90 * of virtual-to-physical mappings must be done as
91 * requested. 91 * requested.
92 * 92 *
93 * In order to cope with hardware architectures which 93 * In order to cope with hardware architectures which
94 * make virtual-to-physical map invalidates expensive, 94 * make virtual-to-physical map invalidates expensive,
95 * this module may delay invalidate or reduced protection 95 * this module may delay invalidate or reduced protection
96 * operations until such time as they are actually 96 * operations until such time as they are actually
97 * necessary. This module is given full information as 97 * necessary. This module is given full information as
98 * to which processors are currently using which maps, 98 * to which processors are currently using which maps,
99 * and to when physical maps must be made correct. 99 * and to when physical maps must be made correct.
100 */ 100 */
101 101
102#include <sys/cdefs.h> 102#include <sys/cdefs.h>
103__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.144 2008/12/31 11:37:20 tsutsui Exp $"); 103__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.145 2008/12/31 11:43:26 tsutsui Exp $");
104 104
105#include <sys/param.h> 105#include <sys/param.h>
106#include <sys/systm.h> 106#include <sys/systm.h>
107#include <sys/proc.h> 107#include <sys/proc.h>
108#include <sys/malloc.h> 108#include <sys/malloc.h>
109#include <sys/user.h> 109#include <sys/user.h>
110 110
111#include <uvm/uvm.h> 111#include <uvm/uvm.h>
112 112
113#include <machine/pte.h> 113#include <machine/pte.h>
114#include <machine/cpu.h> 114#include <machine/cpu.h>
115#include <machine/vmparam.h> 115#include <machine/vmparam.h>
116 116
117#include <m68k/cacheops.h> 117#include <m68k/cacheops.h>
118 118
119#include <amiga/amiga/memlist.h> 119#include <amiga/amiga/memlist.h>
120/* 120/*
121 * Allocate various and sundry SYSMAPs used in the days of old VM 121 * Allocate various and sundry SYSMAPs used in the days of old VM
122 * and not yet converted. XXX. 122 * and not yet converted. XXX.
123 */ 123 */
124 124
125#ifdef DEBUG 125#ifdef DEBUG
126struct kpt_stats { 126struct kpt_stats {
127 int collectscans; 127 int collectscans;
128 int collectpages; 128 int collectpages;
129 int kpttotal; 129 int kpttotal;
130 int kptinuse; 130 int kptinuse;
131 int kptmaxuse; 131 int kptmaxuse;
132}; 132};
133struct enter_stats { 133struct enter_stats {
134 int kernel; /* entering kernel mapping */ 134 int kernel; /* entering kernel mapping */
135 int user; /* entering user mapping */ 135 int user; /* entering user mapping */
136 int ptpneeded; /* needed to allocate a PT page */ 136 int ptpneeded; /* needed to allocate a PT page */
137 int pwchange; /* no mapping change, just wiring or protection */ 137 int pwchange; /* no mapping change, just wiring or protection */
138 int wchange; /* no mapping change, just wiring */ 138 int wchange; /* no mapping change, just wiring */
139 int mchange; /* was mapped but mapping to different page */ 139 int mchange; /* was mapped but mapping to different page */
140 int managed; /* a managed page */ 140 int managed; /* a managed page */
141 int firstpv; /* first mapping for this PA */ 141 int firstpv; /* first mapping for this PA */
142 int secondpv; /* second mapping for this PA */ 142 int secondpv; /* second mapping for this PA */
143 int ci; /* cache inhibited */ 143 int ci; /* cache inhibited */
144 int unmanaged; /* not a managed page */ 144 int unmanaged; /* not a managed page */
145 int flushes; /* cache flushes */ 145 int flushes; /* cache flushes */
146}; 146};
147struct remove_stats { 147struct remove_stats {
148 int calls; 148 int calls;
149 int removes; 149 int removes;
150 int pvfirst; 150 int pvfirst;
151 int pvsearch; 151 int pvsearch;
152 int ptinvalid; 152 int ptinvalid;
153 int uflushes; 153 int uflushes;
154 int sflushes; 154 int sflushes;
155}; 155};
156 156
157struct remove_stats remove_stats; 157struct remove_stats remove_stats;
158struct enter_stats enter_stats; 158struct enter_stats enter_stats;
159struct kpt_stats kpt_stats; 159struct kpt_stats kpt_stats;
160 160
161#define PDB_FOLLOW 0x0001 161#define PDB_FOLLOW 0x0001
162#define PDB_INIT 0x0002 162#define PDB_INIT 0x0002
163#define PDB_ENTER 0x0004 163#define PDB_ENTER 0x0004
164#define PDB_REMOVE 0x0008 164#define PDB_REMOVE 0x0008
165#define PDB_CREATE 0x0010 165#define PDB_CREATE 0x0010
166#define PDB_PTPAGE 0x0020 166#define PDB_PTPAGE 0x0020
167#define PDB_CACHE 0x0040 167#define PDB_CACHE 0x0040
168#define PDB_BITS 0x0080 168#define PDB_BITS 0x0080
169#define PDB_COLLECT 0x0100 169#define PDB_COLLECT 0x0100
170#define PDB_PROTECT 0x0200 170#define PDB_PROTECT 0x0200
171#define PDB_SEGTAB 0x0400 171#define PDB_SEGTAB 0x0400
172#define PDB_PARANOIA 0x2000 172#define PDB_PARANOIA 0x2000
173#define PDB_WIRING 0x4000 173#define PDB_WIRING 0x4000
174#define PDB_PVDUMP 0x8000 174#define PDB_PVDUMP 0x8000
175int debugmap = 0; 175int debugmap = 0;
176int pmapdebug = PDB_PARANOIA; 176int pmapdebug = PDB_PARANOIA;
177 177
178#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x 178#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
179 179
180static void pmap_check_wiring(const char *, vaddr_t); 180static void pmap_check_wiring(const char *, vaddr_t);
181static void pmap_pvdump(paddr_t); 181static void pmap_pvdump(paddr_t);
182#else 182#else
183#define PMAP_DPRINTF(l, x) 183#define PMAP_DPRINTF(l, x)
184#endif 184#endif
185 185
186/* 186/*
187 * Get STEs and PTEs for user/kernel address space 187 * Get STEs and PTEs for user/kernel address space
188 */ 188 */
189#if defined(M68040) || defined(M68060) 189#if defined(M68040) || defined(M68060)
190#if defined(M68020) || defined(M68030) 190#if defined(M68020) || defined(M68030)
191#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) \ 191#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) \
192 >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)])) 192 >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
193#else 193#else
194#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1])) 194#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
195#endif 195#endif
196#define pmap_ste1(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1])) 196#define pmap_ste1(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
197/* XXX assumes physically contiguous ST pages (if more than one) */ 197/* XXX assumes physically contiguous ST pages (if more than one) */
198#define pmap_ste2(m, v) \ 198#define pmap_ste2(m, v) \
199 (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m,v) & SG4_ADDR1) \ 199 (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m,v) & SG4_ADDR1) \
200 - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)])) 200 - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
201#define pmap_ste_v(m, v) \ 201#define pmap_ste_v(m, v) \
202 (mmutype == MMU_68040 \ 202 (mmutype == MMU_68040 \
203 ? ((*pmap_ste1(m, v) & SG_V) && \ 203 ? ((*pmap_ste1(m, v) & SG_V) && \
204 (*pmap_ste2(m, v) & SG_V)) \ 204 (*pmap_ste2(m, v) & SG_V)) \
205 : (*pmap_ste(m, v) & SG_V)) 205 : (*pmap_ste(m, v) & SG_V))
206#else /* defined(M68040) || defined(M68060) */ 206#else /* defined(M68040) || defined(M68060) */
207#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT])) 207#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
208#define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V) 208#define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
209#endif /* defined(M68040) || defined(M68060) */ 209#endif /* defined(M68040) || defined(M68060) */
210 210
211#define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT])) 211#define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
212 212
213#define pmap_pte_pa(pte) (*(u_int *)(pte) & PG_FRAME) 213#define pmap_pte_pa(pte) (*(u_int *)(pte) & PG_FRAME)
214 214
215#define pmap_pte_w(pte) (*(u_int *)(pte) & PG_W) 215#define pmap_pte_w(pte) (*(u_int *)(pte) & PG_W)
216#define pmap_pte_ci(pte) (*(u_int *)(pte) & PG_CI) 216#define pmap_pte_ci(pte) (*(u_int *)(pte) & PG_CI)
217#define pmap_pte_m(pte) (*(u_int *)(pte) & PG_M) 217#define pmap_pte_m(pte) (*(u_int *)(pte) & PG_M)
218#define pmap_pte_u(pte) (*(u_int *)(pte) & PG_U) 218#define pmap_pte_u(pte) (*(u_int *)(pte) & PG_U)
219#define pmap_pte_prot(pte) (*(u_int *)(pte) & PG_PROT) 219#define pmap_pte_prot(pte) (*(u_int *)(pte) & PG_PROT)
220#define pmap_pte_v(pte) (*(u_int *)(pte) & PG_V) 220#define pmap_pte_v(pte) (*(u_int *)(pte) & PG_V)
221 221
222#define pmap_pte_set_w(pte, v) \ 222#define pmap_pte_set_w(pte, v) \
223 do { if (v) *(u_int *)(pte) |= PG_W; else *(u_int *)(pte) &= ~PG_W; \ 223 do { if (v) *(u_int *)(pte) |= PG_W; else *(u_int *)(pte) &= ~PG_W; \
224 } while (0) 224 } while (0)
225#define pmap_pte_set_prot(pte, v) \ 225#define pmap_pte_set_prot(pte, v) \
226 do { if (v) *(u_int *)(pte) |= PG_PROT; else *(u_int *)(pte) &= ~PG_PROT; \ 226 do { if (v) *(u_int *)(pte) |= PG_PROT; else *(u_int *)(pte) &= ~PG_PROT; \
227 } while (0) 227 } while (0)
228#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte)) 228#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
229#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte)) 229#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
230 230
231#define active_pmap(pm) \ 231#define active_pmap(pm) \
232 ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap) 232 ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
233 233
234/* 234/*
235 * Given a map and a machine independent protection code, 235 * Given a map and a machine independent protection code,
236 * convert to a vax protection code. 236 * convert to a vax protection code.
237 */ 237 */
238#define pte_prot(m, p) (protection_codes[p]) 238#define pte_prot(m, p) (protection_codes[p])
239int protection_codes[8]; 239int protection_codes[8];
240 240
241/* 241/*
242 * Kernel page table page management. 242 * Kernel page table page management.
243 * 243 *
244 * One additional page of KPT allows for 16 MB of virtual buffer cache. 244 * One additional page of KPT allows for 16 MB of virtual buffer cache.
245 * A GENERIC kernel allocates this for 2 MB of real buffer cache, 245 * A GENERIC kernel allocates this for 2 MB of real buffer cache,
246 * which in turn is allocated for 38 MB of RAM. 246 * which in turn is allocated for 38 MB of RAM.
247 * We add one per 16 MB of RAM to allow for tuning the machine-independent 247 * We add one per 16 MB of RAM to allow for tuning the machine-independent
248 * options. 248 * options.
249 */ 249 */
250#ifndef NKPTADDSHIFT 250#ifndef NKPTADDSHIFT
251#define NKPTADDSHIFT 24 251#define NKPTADDSHIFT 24
252#endif 252#endif
253 253
254struct kpt_page { 254struct kpt_page {
255 struct kpt_page *kpt_next; /* link on either used or free list */ 255 struct kpt_page *kpt_next; /* link on either used or free list */
256 vaddr_t kpt_va; /* always valid kernel VA */ 256 vaddr_t kpt_va; /* always valid kernel VA */
257 paddr_t kpt_pa; /* PA of this page (for speed) */ 257 paddr_t kpt_pa; /* PA of this page (for speed) */
258}; 258};
259struct kpt_page *kpt_free_list, *kpt_used_list; 259struct kpt_page *kpt_free_list, *kpt_used_list;
260struct kpt_page *kpt_pages; 260struct kpt_page *kpt_pages;
261 261
262/* 262/*
263 * Kernel segment/page table and page table map. 263 * Kernel segment/page table and page table map.
264 * The page table map gives us a level of indirection we need to dynamically 264 * The page table map gives us a level of indirection we need to dynamically
265 * expand the page table. It is essentially a copy of the segment table 265 * expand the page table. It is essentially a copy of the segment table
266 * with PTEs instead of STEs. All are initialized in locore at boot time. 266 * with PTEs instead of STEs. All are initialized in locore at boot time.
267 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs. 267 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
268 * Segtabzero is an empty segment table which all processes share til they 268 * Segtabzero is an empty segment table which all processes share til they
269 * reference something. 269 * reference something.
270 */ 270 */
271st_entry_t *Sysseg; 271st_entry_t *Sysseg;
272pt_entry_t *Sysmap, *Sysptmap; 272pt_entry_t *Sysmap, *Sysptmap;
273st_entry_t *Segtabzero, *Segtabzeropa; 273st_entry_t *Segtabzero, *Segtabzeropa;
274vsize_t Sysptsize = VM_KERNEL_PT_PAGES; 274vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
275 275
276struct pv_entry *pv_table; /* array of entries, one per page */ 276struct pv_entry *pv_table; /* array of entries, one per page */
277 277
278static struct pmap kernel_pmap_store; 278static struct pmap kernel_pmap_store;
279struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; 279struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
280struct vm_map *pt_map; 280struct vm_map *pt_map;
281struct vm_map_kernel pt_map_store; 281struct vm_map_kernel pt_map_store;
282 282
283paddr_t avail_start; /* PA of first available physical page */ 283paddr_t avail_start; /* PA of first available physical page */
284paddr_t avail_end; /* PA of last available physical page */ 284paddr_t avail_end; /* PA of last available physical page */
285vsize_t mem_size; /* memory size in bytes */ 285vsize_t mem_size; /* memory size in bytes */
286vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 286vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/
287vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */ 287vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
288int page_cnt; /* number of pages managed by the VM system */ 288int page_cnt; /* number of pages managed by the VM system */
289bool pmap_initialized = false; /* Has pmap_init completed? */ 289bool pmap_initialized = false; /* Has pmap_init completed? */
290char *pmap_attributes; /* reference and modify bits */ 290char *pmap_attributes; /* reference and modify bits */
291TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist; 291TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
292int pv_nfree; 292int pv_nfree;
293#if defined(M68040) || defined(M68060) 293#if defined(M68040) || defined(M68060)
294int protostfree; /* prototype (default) free ST map */ 294int protostfree; /* prototype (default) free ST map */
295#endif 295#endif
296 296
297pt_entry_t *caddr1_pte; /* PTE for CADDR1 */ 297pt_entry_t *caddr1_pte; /* PTE for CADDR1 */
298pt_entry_t *caddr2_pte; /* PTE for CADDR2 */ 298pt_entry_t *caddr2_pte; /* PTE for CADDR2 */
299 299
300extern void * msgbufaddr; 
301extern paddr_t msgbufpa; 
302 
303u_long noncontig_enable; 
304extern const vaddr_t amiga_uptbase; 300extern const vaddr_t amiga_uptbase;
305 301
306extern paddr_t z2mem_start; 302extern paddr_t z2mem_start;
307 303
308extern vaddr_t reserve_dumppages(vaddr_t); 304extern vaddr_t reserve_dumppages(vaddr_t);
309 305
310bool pmap_testbit(paddr_t, int); 306bool pmap_testbit(paddr_t, int);
311int pmap_enter_ptpage(pmap_t, vaddr_t, bool); 307int pmap_enter_ptpage(pmap_t, vaddr_t, bool);
312static void pmap_ptpage_addref(vaddr_t); 308static void pmap_ptpage_addref(vaddr_t);
313static int pmap_ptpage_delref(vaddr_t); 309static int pmap_ptpage_delref(vaddr_t);
314static void pmap_changebit(vaddr_t, int, bool); 310static void pmap_changebit(vaddr_t, int, bool);
315struct pv_entry * pmap_alloc_pv(void); 311struct pv_entry * pmap_alloc_pv(void);
316void pmap_free_pv(struct pv_entry *); 312void pmap_free_pv(struct pv_entry *);
317void pmap_pinit(pmap_t); 313void pmap_pinit(pmap_t);
318void pmap_release(pmap_t); 314void pmap_release(pmap_t);
319static void pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int); 315static void pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int);
320 316
321void pmap_collect1(pmap_t, paddr_t, paddr_t); 317void pmap_collect1(pmap_t, paddr_t, paddr_t);
322 318
323/* pmap_remove_mapping flags */ 319/* pmap_remove_mapping flags */
324#define PRM_TFLUSH 0x01 320#define PRM_TFLUSH 0x01
325#define PRM_CFLUSH 0x02 321#define PRM_CFLUSH 0x02
326#define PRM_KEEPPTPAGE 0x04 322#define PRM_KEEPPTPAGE 0x04
327 323
328/* 324/*
329 * All those kernel PT submaps that BSD is so fond of 325 * All those kernel PT submaps that BSD is so fond of
330 */ 326 */
331void *CADDR1, *CADDR2; 327void *CADDR1, *CADDR2;
332char *vmmap; 328char *vmmap;
333 329
334#define PAGE_IS_MANAGED(pa) (pmap_initialized \ 330#define PAGE_IS_MANAGED(pa) (pmap_initialized \
335 && vm_physseg_find(atop((pa)), NULL) != -1) 331 && vm_physseg_find(atop((pa)), NULL) != -1)
336 332
337static inline struct pv_entry *pa_to_pvh(paddr_t pa); 333static inline struct pv_entry *pa_to_pvh(paddr_t pa);
338static inline char *pa_to_attribute(paddr_t pa); 334static inline char *pa_to_attribute(paddr_t pa);
339 335
340static inline struct pv_entry * 336static inline struct pv_entry *
341pa_to_pvh(paddr_t pa) 337pa_to_pvh(paddr_t pa)
342{ 338{
343 int bank, pg = 0; /* XXX gcc4 -Wuninitialized */ 339 int bank, pg = 0; /* XXX gcc4 -Wuninitialized */
344 340
345 bank = vm_physseg_find(atop((pa)), &pg); 341 bank = vm_physseg_find(atop((pa)), &pg);
346 return &vm_physmem[bank].pmseg.pvent[pg]; 342 return &vm_physmem[bank].pmseg.pvent[pg];
347} 343}
348 344
349static inline char * 345static inline char *
350pa_to_attribute(paddr_t pa) 346pa_to_attribute(paddr_t pa)
351{ 347{
352 int bank, pg = 0; /* XXX gcc4 -Wuninitialized */ 348 int bank, pg = 0; /* XXX gcc4 -Wuninitialized */
353 349
354 bank = vm_physseg_find(atop((pa)), &pg); 350 bank = vm_physseg_find(atop((pa)), &pg);
355 return &vm_physmem[bank].pmseg.attrs[pg]; 351 return &vm_physmem[bank].pmseg.attrs[pg];
356} 352}
357 353
358/* 354/*
359 * Initialize the pmap module. 355 * Initialize the pmap module.
360 * Called by vm_init, to initialize any structures that the pmap 356 * Called by vm_init, to initialize any structures that the pmap
361 * system needs to map virtual memory. 357 * system needs to map virtual memory.
362 */ 358 */
363void 359void
364pmap_init() 360pmap_init()
365{ 361{
366 vaddr_t addr, addr2; 362 vaddr_t addr, addr2;
367 vsize_t s; 363 vsize_t s;
368 u_int npg; 364 u_int npg;
369 struct pv_entry *pv; 365 struct pv_entry *pv;
370 char *attr; 366 char *attr;
371 int rv, bank; 367 int rv, bank;
372#if defined(M68060) 368#if defined(M68060)
373 struct kpt_page *kptp; 369 struct kpt_page *kptp;
374 paddr_t paddr; 370 paddr_t paddr;
375#endif 371#endif
376 372
377#ifdef DEBUG 373#ifdef DEBUG
378 if (pmapdebug & PDB_FOLLOW) 374 if (pmapdebug & PDB_FOLLOW)
379 printf("pmap_init()\n"); 375 printf("pmap_init()\n");
380#endif 376#endif
381 377
382 /* 378 /*
383 * Before we do anything else, initialize the PTE pointers 379 * Before we do anything else, initialize the PTE pointers
384 * used by pmap_zero_page() and pmap_copy_page(). 380 * used by pmap_zero_page() and pmap_copy_page().
385 */ 381 */
386 caddr1_pte = pmap_pte(pmap_kernel(), CADDR1); 382 caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
387 caddr2_pte = pmap_pte(pmap_kernel(), CADDR2); 383 caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
388 384
389#ifdef DEBUG 385#ifdef DEBUG
390 if (pmapdebug & PDB_INIT) { 386 if (pmapdebug & PDB_INIT) {
391 printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n", 387 printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
392 Sysseg, Sysmap, Sysptmap); 388 Sysseg, Sysmap, Sysptmap);
393 printf(" pstart %lx, pend %lx, vstart %lx, vend %lx\n", 389 printf(" pstart %lx, pend %lx, vstart %lx, vend %lx\n",
394 avail_start, avail_end, virtual_avail, virtual_end); 390 avail_start, avail_end, virtual_avail, virtual_end);
395 } 391 }
396#endif 392#endif
397 393
398 /* 394 /*
399 * Allocate memory for random pmap data structures. Includes the 395 * Allocate memory for random pmap data structures. Includes the
400 * initial segment table, pv_head_table and pmap_attributes. 396 * initial segment table, pv_head_table and pmap_attributes.
401 */ 397 */
402 for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) { 398 for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
403 page_cnt += vm_physmem[bank].end - vm_physmem[bank].start; 399 page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
404#ifdef DEBUG 400#ifdef DEBUG
405 printf("pmap_init: %2d: %08lx - %08lx (%10d)\n", bank, 401 printf("pmap_init: %2d: %08lx - %08lx (%10d)\n", bank,
406 vm_physmem[bank].start << PGSHIFT, 402 vm_physmem[bank].start << PGSHIFT,
407 vm_physmem[bank].end << PGSHIFT, page_cnt << PGSHIFT); 403 vm_physmem[bank].end << PGSHIFT, page_cnt << PGSHIFT);
408#endif 404#endif
409 } 405 }
410 s = M68K_STSIZE; /* Segtabzero */ 406 s = M68K_STSIZE; /* Segtabzero */
411 s += page_cnt * sizeof(struct pv_entry); /* pv table */ 407 s += page_cnt * sizeof(struct pv_entry); /* pv table */
412 s += page_cnt * sizeof(char); /* attribute table */ 408 s += page_cnt * sizeof(char); /* attribute table */
413 s = round_page(s); 409 s = round_page(s);
414 410
415 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); 411 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
416 if (addr == 0) 412 if (addr == 0)
417 panic("pmap_init: can't allocate data structures"); 413 panic("pmap_init: can't allocate data structures");
418 Segtabzero = (u_int *) addr; 414 Segtabzero = (u_int *) addr;
419 (void) pmap_extract(pmap_kernel(), addr, (paddr_t *)(void *)&Segtabzeropa); 415 (void) pmap_extract(pmap_kernel(), addr, (paddr_t *)(void *)&Segtabzeropa);
420 addr += M68K_STSIZE; 416 addr += M68K_STSIZE;
421 417
422 pv_table = (struct pv_entry *) addr; 418 pv_table = (struct pv_entry *) addr;
423 addr += page_cnt * sizeof(struct pv_entry); 419 addr += page_cnt * sizeof(struct pv_entry);
424 420
425 pmap_attributes = (char *) addr; 421 pmap_attributes = (char *) addr;
426#ifdef DEBUG 422#ifdef DEBUG
427 if (pmapdebug & PDB_INIT) 423 if (pmapdebug & PDB_INIT)
428 printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) " 424 printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
429 "tbl %p atr %p\n", 425 "tbl %p atr %p\n",
430 s, page_cnt, Segtabzero, Segtabzeropa, 426 s, page_cnt, Segtabzero, Segtabzeropa,
431 pv_table, pmap_attributes); 427 pv_table, pmap_attributes);
432#endif 428#endif
433 429
434 /* 430 /*
435 * Now that the pv and attribute tables have been allocated, 431 * Now that the pv and attribute tables have been allocated,
436 * assign them to the memory segments. 432 * assign them to the memory segments.
437 */ 433 */
438 pv = pv_table; 434 pv = pv_table;
439 attr = pmap_attributes; 435 attr = pmap_attributes;
440 for (bank = 0; bank < vm_nphysseg; bank++) { 436 for (bank = 0; bank < vm_nphysseg; bank++) {
441 npg = vm_physmem[bank].end - vm_physmem[bank].start; 437 npg = vm_physmem[bank].end - vm_physmem[bank].start;
442 vm_physmem[bank].pmseg.pvent = pv; 438 vm_physmem[bank].pmseg.pvent = pv;
443 vm_physmem[bank].pmseg.attrs = attr; 439 vm_physmem[bank].pmseg.attrs = attr;
444 pv += npg; 440 pv += npg;
445 attr += npg; 441 attr += npg;
446 } 442 }
447 443
448 /* 444 /*
449 * Allocate physical memory for kernel PT pages and their management. 445 * Allocate physical memory for kernel PT pages and their management.
450 * we need enough pages to map the page tables for each process 446 * we need enough pages to map the page tables for each process
451 * plus some slop. 447 * plus some slop.
452 */ 448 */
453 npg = howmany(((maxproc + 16) * M68K_MAX_PTSIZE / NPTEPG), PAGE_SIZE); 449 npg = howmany(((maxproc + 16) * M68K_MAX_PTSIZE / NPTEPG), PAGE_SIZE);
454#ifdef NKPTADD 450#ifdef NKPTADD
455 npg += NKPTADD; 451 npg += NKPTADD;
456#else 452#else
457 npg += mem_size >> NKPTADDSHIFT; 453 npg += mem_size >> NKPTADDSHIFT;
458#endif 454#endif
459#if 1/*def DEBUG*/ 455#if 1/*def DEBUG*/
460 printf("Maxproc %d, mem_size %ld MB: allocating %d KPT pages\n", 456 printf("Maxproc %d, mem_size %ld MB: allocating %d KPT pages\n",
461 maxproc, mem_size>>20, npg); 457 maxproc, mem_size>>20, npg);
462#endif 458#endif
463 s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page)); 459 s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page));
464 460
465 /* 461 /*
466 * Verify that space will be allocated in region for which 462 * Verify that space will be allocated in region for which
467 * we already have kernel PT pages. 463 * we already have kernel PT pages.
468 */ 464 */
469 addr = 0; 465 addr = 0;
470 rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0, 466 rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
471 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 467 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
472 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)); 468 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
473 if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap) 469 if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
474 panic("pmap_init: kernel PT too small"); 470 panic("pmap_init: kernel PT too small");
475 uvm_unmap(kernel_map, addr, addr + s); 471 uvm_unmap(kernel_map, addr, addr + s);
476 472
477 /* 473 /*
478 * Now allocate the space and link the pages together to 474 * Now allocate the space and link the pages together to
479 * form the KPT free list. 475 * form the KPT free list.
480 */ 476 */
481 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); 477 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
482 if (addr == 0) 478 if (addr == 0)
483 panic("pmap_init: cannot allocate KPT free list"); 479 panic("pmap_init: cannot allocate KPT free list");
484 s = ptoa(npg); 480 s = ptoa(npg);
485 addr2 = addr + s; 481 addr2 = addr + s;
486 kpt_pages = &((struct kpt_page *)addr2)[npg]; 482 kpt_pages = &((struct kpt_page *)addr2)[npg];
487 kpt_free_list = NULL; 483 kpt_free_list = NULL;
488 do { 484 do {
489 addr2 -= PAGE_SIZE; 485 addr2 -= PAGE_SIZE;
490 (--kpt_pages)->kpt_next = kpt_free_list; 486 (--kpt_pages)->kpt_next = kpt_free_list;
491 kpt_free_list = kpt_pages; 487 kpt_free_list = kpt_pages;
492 kpt_pages->kpt_va = addr2; 488 kpt_pages->kpt_va = addr2;
493 (void) pmap_extract(pmap_kernel(), addr2, 489 (void) pmap_extract(pmap_kernel(), addr2,
494 (paddr_t *)&kpt_pages->kpt_pa); 490 (paddr_t *)&kpt_pages->kpt_pa);
495 } while (addr != addr2); 491 } while (addr != addr2);
496 492
497#ifdef DEBUG 493#ifdef DEBUG
498 kpt_stats.kpttotal = atop(s); 494 kpt_stats.kpttotal = atop(s);
499 if (pmapdebug & PDB_INIT) 495 if (pmapdebug & PDB_INIT)
500 printf("pmap_init: KPT: %ld pages from %lx to %lx\n", 496 printf("pmap_init: KPT: %ld pages from %lx to %lx\n",
501 atop(s), addr, addr + s); 497 atop(s), addr, addr + s);
502#endif 498#endif
503 499
504 /* 500 /*
505 * Allocate the segment table map and the page table map. 501 * Allocate the segment table map and the page table map.
506 */ 502 */
507 addr = amiga_uptbase; 503 addr = amiga_uptbase;
508 if (M68K_PTMAXSIZE / M68K_MAX_PTSIZE < maxproc) { 504 if (M68K_PTMAXSIZE / M68K_MAX_PTSIZE < maxproc) {
509 s = M68K_PTMAXSIZE; 505 s = M68K_PTMAXSIZE;
510 506
511 /* 507 /*
512 * XXX We don't want to hang when we run out of page 508 * XXX We don't want to hang when we run out of page
513 * tables, so we lower maxproc so that fork will fail 509 * tables, so we lower maxproc so that fork will fail
514 * instead. Note that root could still raise this 510 * instead. Note that root could still raise this
515 * value through sysctl(3). 511 * value through sysctl(3).
516 */ 512 */
517 maxproc = M68K_PTMAXSIZE / M68K_MAX_PTSIZE; 513 maxproc = M68K_PTMAXSIZE / M68K_MAX_PTSIZE;
518 } else 514 } else
519 s = maxproc * M68K_MAX_PTSIZE; 515 s = maxproc * M68K_MAX_PTSIZE;
520 516
521 pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, 517 pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
522 true, &pt_map_store); 518 true, &pt_map_store);
523 519
524#if defined(M68040) || defined(M68060) 520#if defined(M68040) || defined(M68060)
525 if (mmutype == MMU_68040) 521 if (mmutype == MMU_68040)
526 protostfree = ~1 & ~(-1 << MAXUL2SIZE); 522 protostfree = ~1 & ~(-1 << MAXUL2SIZE);
527#endif /* defined(M68040) || defined(M68060) */ 523#endif /* defined(M68040) || defined(M68060) */
528 524
529 /* 525 /*
530 * Now it is safe to enable pv_table recording. 526 * Now it is safe to enable pv_table recording.
531 */ 527 */
532 pmap_initialized = true; 528 pmap_initialized = true;
533 529
534 /* 530 /*
535 * Now that this is done, mark the pages shared with the 531 * Now that this is done, mark the pages shared with the
536 * hardware page table search as non-CCB (actually, as CI). 532 * hardware page table search as non-CCB (actually, as CI).
537 * 533 *
538 * XXX Hm. Given that this is in the kernel map, can't we just 534 * XXX Hm. Given that this is in the kernel map, can't we just
539 * use the va's? 535 * use the va's?
540 */ 536 */
541#ifdef M68060 537#ifdef M68060
542 if (cputype == CPU_68060) { 538 if (cputype == CPU_68060) {
543 kptp = kpt_free_list; 539 kptp = kpt_free_list;
544 while (kptp) { 540 while (kptp) {
545 pmap_changebit(kptp->kpt_pa, PG_CCB, 0); 541 pmap_changebit(kptp->kpt_pa, PG_CCB, 0);
546 pmap_changebit(kptp->kpt_pa, PG_CI, 1); 542 pmap_changebit(kptp->kpt_pa, PG_CI, 1);
547 kptp = kptp->kpt_next; 543 kptp = kptp->kpt_next;
548 } 544 }
549 545
550 paddr = (paddr_t)Segtabzeropa; 546 paddr = (paddr_t)Segtabzeropa;
551 while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) { 547 while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
552 pmap_changebit(paddr, PG_CCB, 0); 548 pmap_changebit(paddr, PG_CCB, 0);
553 pmap_changebit(paddr, PG_CI, 1); 549 pmap_changebit(paddr, PG_CI, 1);
554 paddr += PAGE_SIZE; 550 paddr += PAGE_SIZE;
555 } 551 }
556 552
557 DCIS(); 553 DCIS();
558 } 554 }
559#endif 555#endif
560} 556}
561 557
562struct pv_entry * 558struct pv_entry *
563pmap_alloc_pv() 559pmap_alloc_pv()
564{ 560{
565 struct pv_page *pvp; 561 struct pv_page *pvp;
566 struct pv_entry *pv; 562 struct pv_entry *pv;
567 int i; 563 int i;
568 564
569 if (pv_nfree == 0) { 565 if (pv_nfree == 0) {
570 pvp = (struct pv_page *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 566 pvp = (struct pv_page *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
571 UVM_KMF_WIRED | UVM_KMF_ZERO); 567 UVM_KMF_WIRED | UVM_KMF_ZERO);
572 if (pvp == 0) 568 if (pvp == 0)
573 panic("pmap_alloc_pv: uvm_km_zalloc() failed"); 569 panic("pmap_alloc_pv: uvm_km_zalloc() failed");
574 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1]; 570 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
575 for (i = NPVPPG - 2; i; i--, pv++) 571 for (i = NPVPPG - 2; i; i--, pv++)
576 pv->pv_next = pv + 1; 572 pv->pv_next = pv + 1;
577 pv->pv_next = 0; 573 pv->pv_next = 0;
578 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1; 574 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
579 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list); 575 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
580 pv = &pvp->pvp_pv[0]; 576 pv = &pvp->pvp_pv[0];
581 } else { 577 } else {
582 --pv_nfree; 578 --pv_nfree;
583 pvp = pv_page_freelist.tqh_first; 579 pvp = pv_page_freelist.tqh_first;
584 if (--pvp->pvp_pgi.pgi_nfree == 0) { 580 if (--pvp->pvp_pgi.pgi_nfree == 0) {
585 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); 581 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
586 } 582 }
587 pv = pvp->pvp_pgi.pgi_freelist; 583 pv = pvp->pvp_pgi.pgi_freelist;
588#ifdef DIAGNOSTIC 584#ifdef DIAGNOSTIC
589 if (pv == 0) 585 if (pv == 0)
590 panic("pmap_alloc_pv: pgi_nfree inconsistent"); 586 panic("pmap_alloc_pv: pgi_nfree inconsistent");
591#endif 587#endif
592 pvp->pvp_pgi.pgi_freelist = pv->pv_next; 588 pvp->pvp_pgi.pgi_freelist = pv->pv_next;
593 } 589 }
594 return pv; 590 return pv;
595} 591}
596 592
597void 593void
598pmap_free_pv(pv) 594pmap_free_pv(pv)
599 struct pv_entry *pv; 595 struct pv_entry *pv;
600{ 596{
601 struct pv_page *pvp; 597 struct pv_page *pvp;
602 598
603 pvp = (struct pv_page *) trunc_page((vaddr_t)pv); 599 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
604 switch (++pvp->pvp_pgi.pgi_nfree) { 600 switch (++pvp->pvp_pgi.pgi_nfree) {
605 case 1: 601 case 1:
606 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list); 602 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
607 default: 603 default:
608 pv->pv_next = pvp->pvp_pgi.pgi_freelist; 604 pv->pv_next = pvp->pvp_pgi.pgi_freelist;
609 pvp->pvp_pgi.pgi_freelist = pv; 605 pvp->pvp_pgi.pgi_freelist = pv;
610 ++pv_nfree; 606 ++pv_nfree;
611 break; 607 break;
612 case NPVPPG: 608 case NPVPPG:
613 pv_nfree -= NPVPPG - 1; 609 pv_nfree -= NPVPPG - 1;
614 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); 610 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
615 uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED); 611 uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
616 break; 612 break;
617 } 613 }
618} 614}
619 615
620 616
621/* 617/*
622 * Used to map a range of physical addresses into kernel 618 * Used to map a range of physical addresses into kernel
623 * virtual address space. 619 * virtual address space.
624 * 620 *
625 * For now, VM is already on, we only need to map the 621 * For now, VM is already on, we only need to map the
626 * specified memory. 622 * specified memory.
627 */ 623 */
628vaddr_t 624vaddr_t
629pmap_map(virt, start, end, prot) 625pmap_map(virt, start, end, prot)
630 vaddr_t virt; 626 vaddr_t virt;
631 paddr_t start; 627 paddr_t start;
632 paddr_t end; 628 paddr_t end;
633 int prot; 629 int prot;
634{ 630{
635#ifdef DEBUG 631#ifdef DEBUG
636 if (pmapdebug & PDB_FOLLOW) 632 if (pmapdebug & PDB_FOLLOW)
637 printf("pmap_map(%lx, %lx, %lx, %x)\n", virt, start, end, prot); 633 printf("pmap_map(%lx, %lx, %lx, %x)\n", virt, start, end, prot);
638#endif 634#endif
639 while (start < end) { 635 while (start < end) {
640 pmap_enter(pmap_kernel(), virt, start, prot, 0); 636 pmap_enter(pmap_kernel(), virt, start, prot, 0);
641 virt += PAGE_SIZE; 637 virt += PAGE_SIZE;
642 start += PAGE_SIZE; 638 start += PAGE_SIZE;
643 } 639 }
644 pmap_update(pmap_kernel()); 640 pmap_update(pmap_kernel());
645 return(virt); 641 return(virt);
646} 642}
647 643
648/* 644/*
649 * Create and return a physical map. 645 * Create and return a physical map.
650 * 646 *
651 * If the size specified for the map 647 * If the size specified for the map
652 * is zero, the map is an actual physical 648 * is zero, the map is an actual physical
653 * map, and may be referenced by the 649 * map, and may be referenced by the
654 * hardware. 650 * hardware.
655 * 651 *
656 * If the size specified is non-zero, 652 * If the size specified is non-zero,
657 * the map will be used in software only, and 653 * the map will be used in software only, and
658 * is bounded by that size. 654 * is bounded by that size.
659 */ 655 */
660pmap_t 656pmap_t
661pmap_create() 657pmap_create()
662{ 658{
663 pmap_t pmap; 659 pmap_t pmap;
664 660
665#ifdef DEBUG 661#ifdef DEBUG
666 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 662 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
667 printf("pmap_create\n"); 663 printf("pmap_create\n");
668#endif 664#endif
669 665
670 pmap = malloc(sizeof(*pmap), M_VMPMAP, M_WAITOK|M_ZERO); 666 pmap = malloc(sizeof(*pmap), M_VMPMAP, M_WAITOK|M_ZERO);
671 pmap_pinit(pmap); 667 pmap_pinit(pmap);
672 return (pmap); 668 return (pmap);
673} 669}
674 670
675/* 671/*
676 * Initialize a preallocated and zeroed pmap structure, 672 * Initialize a preallocated and zeroed pmap structure,
677 * such as one in a vmspace structure. 673 * such as one in a vmspace structure.
678 */ 674 */
679void 675void
680pmap_pinit(pmap) 676pmap_pinit(pmap)
681 pmap_t pmap; 677 pmap_t pmap;
682{ 678{
683 679
684#ifdef DEBUG 680#ifdef DEBUG
685 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 681 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
686 printf("pmap_pinit(%p)\n", pmap); 682 printf("pmap_pinit(%p)\n", pmap);
687#endif 683#endif
688 /* 684 /*
689 * No need to allocate page table space yet but we do need a 685 * No need to allocate page table space yet but we do need a
690 * valid segment table. Initially, we point everyone at the 686 * valid segment table. Initially, we point everyone at the
691 * "null" segment table. On the first pmap_enter, a real 687 * "null" segment table. On the first pmap_enter, a real
692 * segment table will be allocated. 688 * segment table will be allocated.
693 */ 689 */
694 pmap->pm_stab = Segtabzero; 690 pmap->pm_stab = Segtabzero;
695 pmap->pm_stpa = Segtabzeropa; 691 pmap->pm_stpa = Segtabzeropa;
696#if defined(M68040) || defined(M68060) 692#if defined(M68040) || defined(M68060)
697 if (mmutype == MMU_68040) 693 if (mmutype == MMU_68040)
698 pmap->pm_stfree = protostfree; 694 pmap->pm_stfree = protostfree;
699#endif 695#endif
700 pmap->pm_count = 1; 696 pmap->pm_count = 1;
701} 697}
702 698
703/* 699/*
704 * Retire the given physical map from service. 700 * Retire the given physical map from service.
705 * Should only be called if the map contains 701 * Should only be called if the map contains
706 * no valid mappings. 702 * no valid mappings.
707 */ 703 */
708void 704void
709pmap_destroy(pmap) 705pmap_destroy(pmap)
710 pmap_t pmap; 706 pmap_t pmap;
711{ 707{
712 int count; 708 int count;
713 709
714#ifdef DEBUG 710#ifdef DEBUG
715 if (pmapdebug & PDB_FOLLOW) 711 if (pmapdebug & PDB_FOLLOW)
716 printf("pmap_destroy(%p)\n", pmap); 712 printf("pmap_destroy(%p)\n", pmap);
717#endif 713#endif
718 count = --pmap->pm_count; 714 count = --pmap->pm_count;
719 if (count == 0) { 715 if (count == 0) {
720 pmap_release(pmap); 716 pmap_release(pmap);
721 free((void *)pmap, M_VMPMAP); 717 free((void *)pmap, M_VMPMAP);
722 } 718 }
723} 719}
724 720
725/* 721/*
726 * Release any resources held by the given physical map. 722 * Release any resources held by the given physical map.
727 * Called when a pmap initialized by pmap_pinit is being released. 723 * Called when a pmap initialized by pmap_pinit is being released.
728 * Should only be called if the map contains no valid mappings. 724 * Should only be called if the map contains no valid mappings.
729 */ 725 */
730void 726void
731pmap_release(pmap) 727pmap_release(pmap)
732 pmap_t pmap; 728 pmap_t pmap;
733{ 729{
734 730
735#ifdef DEBUG 731#ifdef DEBUG
736 if (pmapdebug & PDB_FOLLOW) 732 if (pmapdebug & PDB_FOLLOW)
737 printf("pmap_release(%p)\n", pmap); 733 printf("pmap_release(%p)\n", pmap);
738#endif 734#endif
739#ifdef notdef /* DIAGNOSTIC */ 735#ifdef notdef /* DIAGNOSTIC */
740 /* count would be 0 from pmap_destroy... */ 736 /* count would be 0 from pmap_destroy... */
741 if (pmap->pm_count != 1) 737 if (pmap->pm_count != 1)
742 panic("pmap_release count"); 738 panic("pmap_release count");
743#endif 739#endif
744 if (pmap->pm_ptab) { 740 if (pmap->pm_ptab) {
745 pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab, 741 pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
746 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE); 742 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
747 uvm_km_pgremove((vaddr_t)pmap->pm_ptab, 743 uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
748 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE); 744 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
749 uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab, 745 uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
750 M68K_MAX_PTSIZE, UVM_KMF_VAONLY); 746 M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
751 } 747 }
752 KASSERT(pmap->pm_stab == Segtabzero); 748 KASSERT(pmap->pm_stab == Segtabzero);
753} 749}
754 750
755/* 751/*
756 * Add a reference to the specified pmap. 752 * Add a reference to the specified pmap.
757 */ 753 */
758void 754void
759pmap_reference(pmap) 755pmap_reference(pmap)
760 pmap_t pmap; 756 pmap_t pmap;
761{ 757{
762#ifdef DEBUG 758#ifdef DEBUG
763 if (pmapdebug & PDB_FOLLOW) 759 if (pmapdebug & PDB_FOLLOW)
764 printf("pmap_reference(%p)\n", pmap); 760 printf("pmap_reference(%p)\n", pmap);
765#endif 761#endif
766 if (pmap != NULL) { 762 if (pmap != NULL) {
767 pmap->pm_count++; 763 pmap->pm_count++;
768 } 764 }
769} 765}
770 766
771/* 767/*
772 * Remove the given range of addresses from the specified map. 768 * Remove the given range of addresses from the specified map.
773 * 769 *
774 * It is assumed that the start and end are properly 770 * It is assumed that the start and end are properly
775 * rounded to the page size. 771 * rounded to the page size.
776 */ 772 */
777void 773void
778pmap_remove(pmap, sva, eva) 774pmap_remove(pmap, sva, eva)
779 pmap_t pmap; 775 pmap_t pmap;
780 vaddr_t sva, eva; 776 vaddr_t sva, eva;
781{ 777{
782 paddr_t pa; 778 paddr_t pa;
783 vaddr_t va; 779 vaddr_t va;
784 u_int *pte; 780 u_int *pte;
785 int flags; 781 int flags;
786 782
787#ifdef DEBUG 783#ifdef DEBUG
788 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 784 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
789 printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva); 785 printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva);
790 remove_stats.calls++; 786 remove_stats.calls++;
791#endif 787#endif
792 flags = active_pmap(pmap) ? PRM_TFLUSH : 0; 788 flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
793 for (va = sva; va < eva; va += PAGE_SIZE) { 789 for (va = sva; va < eva; va += PAGE_SIZE) {
794 /* 790 /*
795 * Weed out invalid mappings. 791 * Weed out invalid mappings.
796 * Note: we assume that the segment table is always allocated. 792 * Note: we assume that the segment table is always allocated.
797 */ 793 */
798 if (!pmap_ste_v(pmap, va)) { 794 if (!pmap_ste_v(pmap, va)) {
799 /* XXX: avoid address wrap around */ 795 /* XXX: avoid address wrap around */
800 if (va >= m68k_trunc_seg((vaddr_t)-1)) 796 if (va >= m68k_trunc_seg((vaddr_t)-1))
801 break; 797 break;
802 va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 798 va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
803 continue; 799 continue;
804 } 800 }
805 pte = pmap_pte(pmap, va); 801 pte = pmap_pte(pmap, va);
806 pa = pmap_pte_pa(pte); 802 pa = pmap_pte_pa(pte);
807 if (pa == 0) 803 if (pa == 0)
808 continue; 804 continue;
809 pmap_remove_mapping(pmap, va, pte, flags); 805 pmap_remove_mapping(pmap, va, pte, flags);
810 } 806 }
811} 807}
812 808
813/* 809/*
814 * pmap_page_protect: 810 * pmap_page_protect:
815 * 811 *
816 * Lower the permission for all mappings to a given page. 812 * Lower the permission for all mappings to a given page.
817 */ 813 */
818void 814void
819pmap_page_protect(pg, prot) 815pmap_page_protect(pg, prot)
820 struct vm_page *pg; 816 struct vm_page *pg;
821 vm_prot_t prot; 817 vm_prot_t prot;
822{ 818{
823 struct pv_entry *pv; 819 struct pv_entry *pv;
824 int s; 820 int s;
825 paddr_t pa = VM_PAGE_TO_PHYS(pg); 821 paddr_t pa = VM_PAGE_TO_PHYS(pg);
826 822
827#ifdef DEBUG 823#ifdef DEBUG
828 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 824 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
829 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) 825 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
830 printf("pmap_page_protect(%lx, %x)\n", pa, prot); 826 printf("pmap_page_protect(%lx, %x)\n", pa, prot);
831#endif 827#endif
832 switch (prot) { 828 switch (prot) {
833 case VM_PROT_ALL: 829 case VM_PROT_ALL:
834 break; 830 break;
835 /* copy_on_write */ 831 /* copy_on_write */
836 case VM_PROT_READ: 832 case VM_PROT_READ:
837 case VM_PROT_READ|VM_PROT_EXECUTE: 833 case VM_PROT_READ|VM_PROT_EXECUTE:
838 pmap_changebit(pa, PG_RO, true); 834 pmap_changebit(pa, PG_RO, true);
839 break; 835 break;
840 /* remove_all */ 836 /* remove_all */
841 default: 837 default:
842 pv = pa_to_pvh(pa); 838 pv = pa_to_pvh(pa);
843 s = splvm(); 839 s = splvm();
844 while (pv->pv_pmap != NULL) { 840 while (pv->pv_pmap != NULL) {
845 pt_entry_t *pte; 841 pt_entry_t *pte;
846 842
847 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 843 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
848#ifdef DEBUG 844#ifdef DEBUG
849 if (!pmap_ste_v(pv->pv_pmap,pv->pv_va) || 845 if (!pmap_ste_v(pv->pv_pmap,pv->pv_va) ||
850 pmap_pte_pa(pte) != pa) 846 pmap_pte_pa(pte) != pa)
851{ 847{
852 printf ("pmap_page_protect: va %lx, pmap_ste_v %d pmap_pte_pa %08x/%lx\n", 848 printf ("pmap_page_protect: va %lx, pmap_ste_v %d pmap_pte_pa %08x/%lx\n",
853 pv->pv_va, pmap_ste_v(pv->pv_pmap,pv->pv_va), 849 pv->pv_va, pmap_ste_v(pv->pv_pmap,pv->pv_va),
854 pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)),pa); 850 pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)),pa);
855 printf (" pvh %p pv %p pv_next %p\n", pa_to_pvh(pa), pv, pv->pv_next); 851 printf (" pvh %p pv %p pv_next %p\n", pa_to_pvh(pa), pv, pv->pv_next);
856 panic("pmap_page_protect: bad mapping"); 852 panic("pmap_page_protect: bad mapping");
857} 853}
858#endif 854#endif
859 pmap_remove_mapping(pv->pv_pmap, pv->pv_va, 855 pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
860 pte, PRM_TFLUSH|PRM_CFLUSH); 856 pte, PRM_TFLUSH|PRM_CFLUSH);
861 } 857 }
862 splx(s); 858 splx(s);
863 break; 859 break;
864 } 860 }
865} 861}
866 862
867/* 863/*
868 * Set the physical protection on the 864 * Set the physical protection on the
869 * specified range of this map as requested. 865 * specified range of this map as requested.
870 */ 866 */
871void 867void
872pmap_protect(pmap, sva, eva, prot) 868pmap_protect(pmap, sva, eva, prot)
873 pmap_t pmap; 869 pmap_t pmap;
874 vaddr_t sva, eva; 870 vaddr_t sva, eva;
875 vm_prot_t prot; 871 vm_prot_t prot;
876{ 872{
877 u_int *pte; 873 u_int *pte;
878 vaddr_t va; 874 vaddr_t va;
879 bool needtflush; 875 bool needtflush;
880 int isro; 876 int isro;
881 877
882#ifdef DEBUG 878#ifdef DEBUG
883 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 879 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
884 printf("pmap_protect(%p, %lx, %lx, %x)\n", pmap, sva, eva, prot); 880 printf("pmap_protect(%p, %lx, %lx, %x)\n", pmap, sva, eva, prot);
885#endif 881#endif
886 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 882 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
887 pmap_remove(pmap, sva, eva); 883 pmap_remove(pmap, sva, eva);
888 return; 884 return;
889 } 885 }
890 pte = pmap_pte(pmap, sva); 886 pte = pmap_pte(pmap, sva);
891 isro = pte_prot(pmap, prot) == PG_RO ? 1 : 0; 887 isro = pte_prot(pmap, prot) == PG_RO ? 1 : 0;
892 needtflush = active_pmap(pmap); 888 needtflush = active_pmap(pmap);
893 for (va = sva; va < eva; va += PAGE_SIZE) { 889 for (va = sva; va < eva; va += PAGE_SIZE) {
894 /* 890 /*
895 * Page table page is not allocated. 891 * Page table page is not allocated.
896 * Skip it, we don't want to force allocation 892 * Skip it, we don't want to force allocation
897 * of unnecessary PTE pages just to set the protection. 893 * of unnecessary PTE pages just to set the protection.
898 */ 894 */
899 if (!pmap_ste_v(pmap, va)) { 895 if (!pmap_ste_v(pmap, va)) {
900 /* XXX: avoid address wrap around */ 896 /* XXX: avoid address wrap around */
901 if (va >= m68k_trunc_seg((vaddr_t)-1)) 897 if (va >= m68k_trunc_seg((vaddr_t)-1))
902 break; 898 break;
903 va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 899 va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
904 pte = pmap_pte(pmap, va); 900 pte = pmap_pte(pmap, va);
905 pte++; 901 pte++;
906 continue; 902 continue;
907 } 903 }
908 /* 904 /*
909 * skip if page not valid or protection is same 905 * skip if page not valid or protection is same
910 */ 906 */
911 if (!pmap_pte_v(pte) || !pmap_pte_prot_chg(pte, isro)) { 907 if (!pmap_pte_v(pte) || !pmap_pte_prot_chg(pte, isro)) {
912 pte++; 908 pte++;
913 continue; 909 continue;
914 } 910 }
915#if defined(M68040) || defined(M68060) 911#if defined(M68040) || defined(M68060)
916 /* 912 /*
917 * Clear caches if making RO (see section 913 * Clear caches if making RO (see section
918 * "7.3 Cache Coherency" in the manual). 914 * "7.3 Cache Coherency" in the manual).
919 */ 915 */
920 if (isro && mmutype == MMU_68040) { 916 if (isro && mmutype == MMU_68040) {
921 paddr_t pa = pmap_pte_pa(pte); 917 paddr_t pa = pmap_pte_pa(pte);
922 918
923 DCFP(pa); 919 DCFP(pa);
924 ICPP(pa); 920 ICPP(pa);
925 } 921 }
926#endif 922#endif
927 pmap_pte_set_prot(pte, isro); 923 pmap_pte_set_prot(pte, isro);
928 if (needtflush) 924 if (needtflush)
929 TBIS(va); 925 TBIS(va);
930 pte++; 926 pte++;
931 } 927 }
932} 928}
933 929
934/* 930/*
935 * Insert the given physical page (p) at 931 * Insert the given physical page (p) at
936 * the specified virtual address (v) in the 932 * the specified virtual address (v) in the
937 * target physical map with the protection requested. 933 * target physical map with the protection requested.
938 * 934 *
939 * If specified, the page will be wired down, meaning 935 * If specified, the page will be wired down, meaning
940 * that the related pte can not be reclaimed. 936 * that the related pte can not be reclaimed.
941 * 937 *
942 * NB: This is the only routine which MAY NOT lazy-evaluate 938 * NB: This is the only routine which MAY NOT lazy-evaluate
943 * or lose information. That is, this routine must actually 939 * or lose information. That is, this routine must actually
944 * insert this page into the given map NOW. 940 * insert this page into the given map NOW.
945 */ 941 */
946extern int kernel_copyback; 942extern int kernel_copyback;
947 943
948int 944int
949pmap_enter(pmap, va, pa, prot, flags) 945pmap_enter(pmap, va, pa, prot, flags)
950 pmap_t pmap; 946 pmap_t pmap;
951 vaddr_t va; 947 vaddr_t va;
952 paddr_t pa; 948 paddr_t pa;
953 vm_prot_t prot; 949 vm_prot_t prot;
954 int flags; 950 int flags;
955{ 951{
956 u_int *pte; 952 u_int *pte;
957 int npte; 953 int npte;
958 paddr_t opa; 954 paddr_t opa;
959 bool cacheable = true; 955 bool cacheable = true;
960 bool checkpv = true; 956 bool checkpv = true;
961 bool wired = (flags & PMAP_WIRED) != 0; 957 bool wired = (flags & PMAP_WIRED) != 0;
962 bool can_fail = (flags & PMAP_CANFAIL) != 0; 958 bool can_fail = (flags & PMAP_CANFAIL) != 0;
963 959
964#ifdef DEBUG 960#ifdef DEBUG
965 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 961 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
966 printf("pmap_enter(%p, %lx, %lx, %x, %x)\n", 962 printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
967 pmap, va, pa, prot, wired); 963 pmap, va, pa, prot, wired);
968 if (pmap == pmap_kernel()) 964 if (pmap == pmap_kernel())
969 enter_stats.kernel++; 965 enter_stats.kernel++;
970 else 966 else
971 enter_stats.user++; 967 enter_stats.user++;
972#endif 968#endif
973 /* 969 /*
974 * For user mapping, allocate kernel VM resources if necessary. 970 * For user mapping, allocate kernel VM resources if necessary.
975 */ 971 */
976 if (pmap->pm_ptab == NULL) 972 if (pmap->pm_ptab == NULL)
977 pmap->pm_ptab = (pt_entry_t *) 973 pmap->pm_ptab = (pt_entry_t *)
978 uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0, 974 uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
979 UVM_KMF_VAONLY |  975 UVM_KMF_VAONLY |
980 (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA)); 976 (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
981 if (pmap->pm_ptab == NULL) 977 if (pmap->pm_ptab == NULL)
982 return ENOMEM; 978 return ENOMEM;
983 979
984 /* 980 /*
985 * Segment table entry not valid, we need a new PT page 981 * Segment table entry not valid, we need a new PT page
986 */ 982 */
987 if (!pmap_ste_v(pmap, va)) { 983 if (!pmap_ste_v(pmap, va)) {
988 int err = pmap_enter_ptpage(pmap, va, can_fail); 984 int err = pmap_enter_ptpage(pmap, va, can_fail);
989 if (err) 985 if (err)
990 return err; 986 return err;
991 } 987 }
992 988
993 pte = pmap_pte(pmap, va); 989 pte = pmap_pte(pmap, va);
994 opa = pmap_pte_pa(pte); 990 opa = pmap_pte_pa(pte);
995#ifdef DEBUG 991#ifdef DEBUG
996 if (pmapdebug & PDB_ENTER) 992 if (pmapdebug & PDB_ENTER)
997 printf("enter: pte %p, *pte %x\n", pte, *(int *)pte); 993 printf("enter: pte %p, *pte %x\n", pte, *(int *)pte);
998#endif 994#endif
999 995
1000 /* 996 /*
1001 * Mapping has not changed, must be protection or wiring change. 997 * Mapping has not changed, must be protection or wiring change.
1002 */ 998 */
1003 if (opa == pa) { 999 if (opa == pa) {
1004#ifdef DEBUG 1000#ifdef DEBUG
1005 enter_stats.pwchange++; 1001 enter_stats.pwchange++;
1006#endif 1002#endif
1007 /* 1003 /*
1008 * Wiring change, just update stats. 1004 * Wiring change, just update stats.
1009 * We don't worry about wiring PT pages as they remain 1005 * We don't worry about wiring PT pages as they remain
1010 * resident as long as there are valid mappings in them. 1006 * resident as long as there are valid mappings in them.
1011 * Hence, if a user page is wired, the PT page will be also. 1007 * Hence, if a user page is wired, the PT page will be also.
1012 */ 1008 */
1013 if ((wired && !pmap_pte_w(pte)) || (!wired && pmap_pte_w(pte))){ 1009 if ((wired && !pmap_pte_w(pte)) || (!wired && pmap_pte_w(pte))){
1014#ifdef DEBUG 1010#ifdef DEBUG
1015 if (pmapdebug & PDB_ENTER) 1011 if (pmapdebug & PDB_ENTER)
1016 printf("enter: wiring change -> %x\n", wired); 1012 printf("enter: wiring change -> %x\n", wired);
1017#endif 1013#endif
1018 if (wired) 1014 if (wired)
1019 pmap->pm_stats.wired_count++; 1015 pmap->pm_stats.wired_count++;
1020 else 1016 else
1021 pmap->pm_stats.wired_count--; 1017 pmap->pm_stats.wired_count--;
1022#ifdef DEBUG 1018#ifdef DEBUG
1023 enter_stats.wchange++; 1019 enter_stats.wchange++;
1024#endif 1020#endif
1025 } 1021 }
1026 /* 1022 /*
1027 * Retain cache inhibition status 1023 * Retain cache inhibition status
1028 */ 1024 */
1029 checkpv = false; 1025 checkpv = false;
1030 if (pmap_pte_ci(pte)) 1026 if (pmap_pte_ci(pte))
1031 cacheable = false; 1027 cacheable = false;
1032 goto validate; 1028 goto validate;
1033 } 1029 }
1034 1030
1035 /* 1031 /*
1036 * Mapping has changed, invalidate old range and fall through to 1032 * Mapping has changed, invalidate old range and fall through to
1037 * handle validating new mapping. 1033 * handle validating new mapping.
1038 */ 1034 */
1039 if (opa) { 1035 if (opa) {
1040#ifdef DEBUG 1036#ifdef DEBUG
1041 if (pmapdebug & PDB_ENTER) 1037 if (pmapdebug & PDB_ENTER)
1042 printf("enter: removing old mapping %lx\n", va); 1038 printf("enter: removing old mapping %lx\n", va);
1043#endif 1039#endif
1044 pmap_remove_mapping(pmap, va, pte, 1040 pmap_remove_mapping(pmap, va, pte,
1045 PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE); 1041 PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
1046#ifdef DEBUG 1042#ifdef DEBUG
1047 enter_stats.mchange++; 1043 enter_stats.mchange++;
1048#endif 1044#endif
1049 } 1045 }
1050 1046
1051 /* 1047 /*
1052 * If this is a new user mapping, increment the wiring count 1048 * If this is a new user mapping, increment the wiring count
1053 * on this PT page. PT pages are wired down as long as there 1049 * on this PT page. PT pages are wired down as long as there
1054 * is a valid mapping in the page. 1050 * is a valid mapping in the page.
1055 */ 1051 */
1056 if (pmap != pmap_kernel()) 1052 if (pmap != pmap_kernel())
1057 pmap_ptpage_addref(trunc_page((vaddr_t)pte)); 1053 pmap_ptpage_addref(trunc_page((vaddr_t)pte));
1058 1054
1059 /* 1055 /*
1060 * Enter on the PV list if part of our managed memory 1056 * Enter on the PV list if part of our managed memory
1061 * Note that we raise IPL while manipulating pv_table 1057 * Note that we raise IPL while manipulating pv_table
1062 * since pmap_enter can be called at interrupt time. 1058 * since pmap_enter can be called at interrupt time.
1063 */ 1059 */
1064 if (PAGE_IS_MANAGED(pa)) { 1060 if (PAGE_IS_MANAGED(pa)) {
1065 struct pv_entry *pv, *npv; 1061 struct pv_entry *pv, *npv;
1066 int s; 1062 int s;
1067 1063
1068#ifdef DEBUG 1064#ifdef DEBUG
1069 enter_stats.managed++; 1065 enter_stats.managed++;
1070#endif 1066#endif
1071 pv = pa_to_pvh(pa); 1067 pv = pa_to_pvh(pa);
1072 s = splvm(); 1068 s = splvm();
1073#ifdef DEBUG 1069#ifdef DEBUG
1074 if (pmapdebug & PDB_ENTER) 1070 if (pmapdebug & PDB_ENTER)
1075 printf("enter: pv at %p: %lx/%p/%p\n", 1071 printf("enter: pv at %p: %lx/%p/%p\n",
1076 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 1072 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
1077#endif 1073#endif
1078 /* 1074 /*
1079 * No entries yet, use header as the first entry 1075 * No entries yet, use header as the first entry
1080 */ 1076 */
1081 if (pv->pv_pmap == NULL) { 1077 if (pv->pv_pmap == NULL) {
1082#ifdef DEBUG 1078#ifdef DEBUG
1083 enter_stats.firstpv++; 1079 enter_stats.firstpv++;
1084#endif 1080#endif
1085 pv->pv_va = va; 1081 pv->pv_va = va;
1086 pv->pv_pmap = pmap; 1082 pv->pv_pmap = pmap;
1087 pv->pv_next = NULL; 1083 pv->pv_next = NULL;
1088 pv->pv_ptste = NULL; 1084 pv->pv_ptste = NULL;
1089 pv->pv_ptpmap = NULL; 1085 pv->pv_ptpmap = NULL;
1090 pv->pv_flags = 0; 1086 pv->pv_flags = 0;
1091 } 1087 }
1092 /* 1088 /*
1093 * There is at least one other VA mapping this page. 1089 * There is at least one other VA mapping this page.
1094 * Place this entry after the header. 1090 * Place this entry after the header.
1095 */ 1091 */
1096 else { 1092 else {
1097#ifdef DEBUG 1093#ifdef DEBUG
1098 for (npv = pv; npv; npv = npv->pv_next) 1094 for (npv = pv; npv; npv = npv->pv_next)
1099 if (pmap == npv->pv_pmap && va == npv->pv_va) 1095 if (pmap == npv->pv_pmap && va == npv->pv_va)
1100 panic("pmap_enter: already in pv_tab"); 1096 panic("pmap_enter: already in pv_tab");
1101#endif 1097#endif
1102 npv = pmap_alloc_pv(); 1098 npv = pmap_alloc_pv();
1103 npv->pv_va = va; 1099 npv->pv_va = va;
1104 npv->pv_pmap = pmap; 1100 npv->pv_pmap = pmap;
1105 npv->pv_next = pv->pv_next; 1101 npv->pv_next = pv->pv_next;
1106 npv->pv_ptste = NULL; 1102 npv->pv_ptste = NULL;
1107 npv->pv_ptpmap = NULL; 1103 npv->pv_ptpmap = NULL;
1108 pv->pv_next = npv; 1104 pv->pv_next = npv;
1109#ifdef DEBUG 1105#ifdef DEBUG
1110 if (!npv->pv_next) 1106 if (!npv->pv_next)
1111 enter_stats.secondpv++; 1107 enter_stats.secondpv++;
1112#endif 1108#endif
1113 } 1109 }
1114 splx(s); 1110 splx(s);
1115 } 1111 }
1116 /* 1112 /*
1117 * Assumption: if it is not part of our managed memory 1113 * Assumption: if it is not part of our managed memory
1118 * then it must be device memory which may be volitile. 1114 * then it must be device memory which may be volitile.
1119 */ 1115 */
1120 else if (pmap_initialized) { 1116 else if (pmap_initialized) {
1121 checkpv = cacheable = false; 1117 checkpv = cacheable = false;
1122#ifdef DEBUG 1118#ifdef DEBUG
1123 enter_stats.unmanaged++; 1119 enter_stats.unmanaged++;
1124#endif 1120#endif
1125 } 1121 }
1126 1122
1127 /* 1123 /*
1128 * Increment counters 1124 * Increment counters
1129 */ 1125 */
1130 pmap->pm_stats.resident_count++; 1126 pmap->pm_stats.resident_count++;
1131 if (wired) 1127 if (wired)
1132 pmap->pm_stats.wired_count++; 1128 pmap->pm_stats.wired_count++;
1133 1129
1134validate: 1130validate:
1135 /* 1131 /*
1136 * Now validate mapping with desired protection/wiring. 1132 * Now validate mapping with desired protection/wiring.
1137 * Assume uniform modified and referenced status for all 1133 * Assume uniform modified and referenced status for all
1138 * AMIGA pages in a MACH page. 1134 * AMIGA pages in a MACH page.
1139 */ 1135 */
1140#if defined(M68040) || defined(M68060) 1136#if defined(M68040) || defined(M68060)
1141#if DEBUG 1137#if DEBUG
1142 if (pmapdebug & 0x10000 && mmutype == MMU_68040 && 1138 if (pmapdebug & 0x10000 && mmutype == MMU_68040 &&
1143 pmap == pmap_kernel()) { 1139 pmap == pmap_kernel()) {
1144 const char *s; 1140 const char *s;
1145 struct proc *cp = curproc; 1141 struct proc *cp = curproc;
1146 if (va >= amiga_uptbase && 1142 if (va >= amiga_uptbase &&
1147 va < (amiga_uptbase + M68K_PTMAXSIZE)) 1143 va < (amiga_uptbase + M68K_PTMAXSIZE))
1148 s = "UPT"; 1144 s = "UPT";
1149 else if (va >= (u_int)Sysmap && 1145 else if (va >= (u_int)Sysmap &&
1150 va < ((u_int)Sysmap + M68K_MAX_KPTSIZE)) 1146 va < ((u_int)Sysmap + M68K_MAX_KPTSIZE))
1151 s = "KPT"; 1147 s = "KPT";
1152 else if (va >= (u_int)pmap->pm_stab && 1148 else if (va >= (u_int)pmap->pm_stab &&
1153 va < ((u_int)pmap->pm_stab + M68K_STSIZE)) 1149 va < ((u_int)pmap->pm_stab + M68K_STSIZE))
1154 s = "KST"; 1150 s = "KST";
1155 else if (cp && 1151 else if (cp &&
1156 va >= (u_int)cp->p_vmspace->vm_map.pmap->pm_stab && 1152 va >= (u_int)cp->p_vmspace->vm_map.pmap->pm_stab &&
1157 va < ((u_int)cp->p_vmspace->vm_map.pmap->pm_stab + 1153 va < ((u_int)cp->p_vmspace->vm_map.pmap->pm_stab +
1158 M68K_STSIZE)) 1154 M68K_STSIZE))
1159 s = "UST"; 1155 s = "UST";
1160 else 1156 else
1161 s = "other"; 1157 s = "other";
1162 printf("pmap_init: validating %s kernel page at %lx -> %lx\n", 1158 printf("pmap_init: validating %s kernel page at %lx -> %lx\n",
1163 s, va, pa); 1159 s, va, pa);
1164 1160
1165 } 1161 }
1166#endif 1162#endif
1167 if (mmutype == MMU_68040 && pmap == pmap_kernel() && ( 1163 if (mmutype == MMU_68040 && pmap == pmap_kernel() && (
1168 (va >= amiga_uptbase && va < (amiga_uptbase + M68K_PTMAXSIZE)) || 1164 (va >= amiga_uptbase && va < (amiga_uptbase + M68K_PTMAXSIZE)) ||
1169 (va >= (u_int)Sysmap && va < ((u_int)Sysmap + M68K_MAX_KPTSIZE)))) 1165 (va >= (u_int)Sysmap && va < ((u_int)Sysmap + M68K_MAX_KPTSIZE))))
1170 cacheable = false; /* don't cache user page tables */ 1166 cacheable = false; /* don't cache user page tables */
1171#endif 1167#endif
1172 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; 1168 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
1173 npte |= (*(int *)pte & (PG_M|PG_U)); 1169 npte |= (*(int *)pte & (PG_M|PG_U));
1174 if (wired) 1170 if (wired)
1175 npte |= PG_W; 1171 npte |= PG_W;
1176 if (!checkpv && !cacheable) 1172 if (!checkpv && !cacheable)
1177#if defined(M68060) && defined(NO_SLOW_CIRRUS) 1173#if defined(M68060) && defined(NO_SLOW_CIRRUS)
1178#if defined(M68040) || defined(M68030) || defined(M68020) 1174#if defined(M68040) || defined(M68030) || defined(M68020)
1179 npte |= (cputype == CPU_68060 ? PG_CIN : PG_CI); 1175 npte |= (cputype == CPU_68060 ? PG_CIN : PG_CI);
1180#else 1176#else
1181 npte |= PG_CIN; 1177 npte |= PG_CIN;
1182#endif 1178#endif
1183#else 1179#else
1184 npte |= PG_CI; 1180 npte |= PG_CI;
1185#endif 1181#endif
1186#if defined(M68040) || defined(M68060) 1182#if defined(M68040) || defined(M68060)
1187 else if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW && 1183 else if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW &&
1188 (kernel_copyback || pmap != pmap_kernel())) 1184 (kernel_copyback || pmap != pmap_kernel()))
1189 npte |= PG_CCB; /* cache copyback */ 1185 npte |= PG_CCB; /* cache copyback */
1190#endif 1186#endif
1191 if (flags & VM_PROT_ALL) { 1187 if (flags & VM_PROT_ALL) {
1192 npte |= PG_U; 1188 npte |= PG_U;
1193 if (flags & VM_PROT_WRITE) 1189 if (flags & VM_PROT_WRITE)
1194 npte |= PG_M; 1190 npte |= PG_M;
1195 } 1191 }
1196 1192
1197 /* 1193 /*
1198 * Remember if this was a wiring-only change. 1194 * Remember if this was a wiring-only change.
1199 * If so, we need not flush the TLB and caches. 1195 * If so, we need not flush the TLB and caches.
1200 */ 1196 */
1201 wired = ((*(int *)pte ^ npte) == PG_W); 1197 wired = ((*(int *)pte ^ npte) == PG_W);
1202#if defined(M68040) || defined(M68060) 1198#if defined(M68040) || defined(M68060)
1203 if (mmutype == MMU_68040 && !wired) { 1199 if (mmutype == MMU_68040 && !wired) {
1204 DCFP(pa); 1200 DCFP(pa);
1205 ICPP(pa); 1201 ICPP(pa);
1206 } 1202 }
1207#endif 1203#endif
1208#ifdef DEBUG 1204#ifdef DEBUG
1209 if (pmapdebug & PDB_ENTER) 1205 if (pmapdebug & PDB_ENTER)
1210 printf("enter: new pte value %x\n", npte); 1206 printf("enter: new pte value %x\n", npte);
1211#endif 1207#endif
1212 *(int *)pte++ = npte; 1208 *(int *)pte++ = npte;
1213 if (!wired && active_pmap(pmap)) 1209 if (!wired && active_pmap(pmap))
1214 TBIS(va); 1210 TBIS(va);
1215#ifdef DEBUG 1211#ifdef DEBUG
1216 if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) { 1212 if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) {
1217 va -= PAGE_SIZE; 1213 va -= PAGE_SIZE;
1218 pmap_check_wiring("enter", 1214 pmap_check_wiring("enter",
1219 trunc_page((vaddr_t) pmap_pte(pmap, va))); 1215 trunc_page((vaddr_t) pmap_pte(pmap, va)));
1220 } 1216 }
1221#endif 1217#endif
1222 return 0; 1218 return 0;
1223} 1219}
1224 1220
1225void 1221void
1226pmap_kenter_pa(va, pa, prot) 1222pmap_kenter_pa(va, pa, prot)
1227 vaddr_t va; 1223 vaddr_t va;
1228 paddr_t pa; 1224 paddr_t pa;
1229 vm_prot_t prot; 1225 vm_prot_t prot;
1230{ 1226{
1231 struct pmap *pmap = pmap_kernel(); 1227 struct pmap *pmap = pmap_kernel();
1232 pt_entry_t *pte; 1228 pt_entry_t *pte;
1233 int s, npte; 1229 int s, npte;
1234 1230
1235 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER, 1231 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1236 ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot)); 1232 ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
1237 1233
1238 /* 1234 /*
1239 * Segment table entry not valid, we need a new PT page 1235 * Segment table entry not valid, we need a new PT page
1240 */ 1236 */
1241 1237
1242 if (!pmap_ste_v(pmap, va)) { 1238 if (!pmap_ste_v(pmap, va)) {
1243 s = splvm(); 1239 s = splvm();
1244 pmap_enter_ptpage(pmap, va, false); 1240 pmap_enter_ptpage(pmap, va, false);
1245 splx(s); 1241 splx(s);
1246 } 1242 }
1247 1243
1248 pa = m68k_trunc_page(pa); 1244 pa = m68k_trunc_page(pa);
1249 pte = pmap_pte(pmap, va); 1245 pte = pmap_pte(pmap, va);
1250 1246
1251 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte)); 1247 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1252 KASSERT(!pmap_pte_v(pte)); 1248 KASSERT(!pmap_pte_v(pte));
1253 1249
1254 /* 1250 /*
1255 * Increment counters 1251 * Increment counters
1256 */ 1252 */
1257 1253
1258 pmap->pm_stats.resident_count++; 1254 pmap->pm_stats.resident_count++;
1259 pmap->pm_stats.wired_count++; 1255 pmap->pm_stats.wired_count++;
1260 1256
1261 /* 1257 /*
1262 * Build the new PTE. 1258 * Build the new PTE.
1263 */ 1259 */
1264 1260
1265 npte = pa | pte_prot(pmap, prot) | PG_V | PG_W; 1261 npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
1266#if defined(M68040) 1262#if defined(M68040)
1267 if (mmutype == MMU_68040 && (npte & (PG_PROT)) == PG_RW) 1263 if (mmutype == MMU_68040 && (npte & (PG_PROT)) == PG_RW)
1268 npte |= PG_CCB; 1264 npte |= PG_CCB;
1269#endif 1265#endif
1270 1266
1271 PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte)); 1267 PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
1272#if defined(M68040) 1268#if defined(M68040)
1273 if (mmutype == MMU_68040) { 1269 if (mmutype == MMU_68040) {
1274 DCFP(pa); 1270 DCFP(pa);
1275 ICPP(pa); 1271 ICPP(pa);
1276 } 1272 }
1277#endif 1273#endif
1278 *pte = npte; 1274 *pte = npte;
1279} 1275}
1280 1276
1281void 1277void
1282pmap_kremove(va, len) 1278pmap_kremove(va, len)
1283 vaddr_t va; 1279 vaddr_t va;
1284 vsize_t len; 1280 vsize_t len;
1285{ 1281{
1286 struct pmap *pmap = pmap_kernel(); 1282 struct pmap *pmap = pmap_kernel();
1287 vaddr_t sva, eva, nssva; 1283 vaddr_t sva, eva, nssva;
1288 pt_entry_t *pte; 1284 pt_entry_t *pte;
1289 1285
1290 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT, 1286 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
1291 ("pmap_kremove(%lx, %lx)\n", va, len)); 1287 ("pmap_kremove(%lx, %lx)\n", va, len));
1292 1288
1293 sva = va; 1289 sva = va;
1294 eva = va + len; 1290 eva = va + len;
1295 while (sva < eva) { 1291 while (sva < eva) {
1296 nssva = m68k_trunc_seg(sva) + NBSEG; 1292 nssva = m68k_trunc_seg(sva) + NBSEG;
1297 if (nssva == 0 || nssva > eva) 1293 if (nssva == 0 || nssva > eva)
1298 nssva = eva; 1294 nssva = eva;
1299 1295
1300 /* 1296 /*
1301 * If VA belongs to an unallocated segment, 1297 * If VA belongs to an unallocated segment,
1302 * skip to the next segment boundary. 1298 * skip to the next segment boundary.