| @@ -1,1087 +1,1087 @@ | | | @@ -1,1087 +1,1087 @@ |
1 | /* $NetBSD: pmap.c,v 1.106 2020/04/15 08:14:22 skrll Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.107 2020/04/15 09:41:09 skrll Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Matthew Fredette. | | 8 | * by Matthew Fredette. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* $OpenBSD: pmap.c,v 1.132 2008/04/18 06:42:21 djm Exp $ */ | | 32 | /* $OpenBSD: pmap.c,v 1.132 2008/04/18 06:42:21 djm Exp $ */ |
33 | | | 33 | |
34 | /* | | 34 | /* |
35 | * Copyright (c) 1998-2004 Michael Shalayeff | | 35 | * Copyright (c) 1998-2004 Michael Shalayeff |
36 | * All rights reserved. | | 36 | * All rights reserved. |
37 | * | | 37 | * |
38 | * Redistribution and use in source and binary forms, with or without | | 38 | * Redistribution and use in source and binary forms, with or without |
39 | * modification, are permitted provided that the following conditions | | 39 | * modification, are permitted provided that the following conditions |
40 | * are met: | | 40 | * are met: |
41 | * 1. Redistributions of source code must retain the above copyright | | 41 | * 1. Redistributions of source code must retain the above copyright |
42 | * notice, this list of conditions and the following disclaimer. | | 42 | * notice, this list of conditions and the following disclaimer. |
43 | * 2. Redistributions in binary form must reproduce the above copyright | | 43 | * 2. Redistributions in binary form must reproduce the above copyright |
44 | * notice, this list of conditions and the following disclaimer in the | | 44 | * notice, this list of conditions and the following disclaimer in the |
45 | * documentation and/or other materials provided with the distribution. | | 45 | * documentation and/or other materials provided with the distribution. |
46 | * | | 46 | * |
47 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 47 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
48 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 48 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
49 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 49 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
50 | * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, | | 50 | * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, |
51 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 51 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
52 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 52 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
53 | * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 53 | * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
54 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | | 54 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
55 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | | 55 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
56 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | | 56 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
57 | * THE POSSIBILITY OF SUCH DAMAGE. | | 57 | * THE POSSIBILITY OF SUCH DAMAGE. |
58 | */ | | 58 | */ |
59 | /* | | 59 | /* |
60 | * References: | | 60 | * References: |
61 | * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0 | | 61 | * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0 |
62 | * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0 | | 62 | * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0 |
63 | * 3. PA-RISC 1.1 Architecture and Instruction Set Reference Manual, | | 63 | * 3. PA-RISC 1.1 Architecture and Instruction Set Reference Manual, |
64 | * Hewlett-Packard, February 1994, Third Edition | | 64 | * Hewlett-Packard, February 1994, Third Edition |
65 | */ | | 65 | */ |
66 | | | 66 | |
67 | #include <sys/cdefs.h> | | 67 | #include <sys/cdefs.h> |
68 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.106 2020/04/15 08:14:22 skrll Exp $"); | | 68 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.107 2020/04/15 09:41:09 skrll Exp $"); |
69 | | | 69 | |
70 | #include "opt_cputype.h" | | 70 | #include "opt_cputype.h" |
71 | | | 71 | |
72 | #include <sys/param.h> | | 72 | #include <sys/param.h> |
73 | #include <sys/systm.h> | | | |
74 | #include <sys/proc.h> | | | |
75 | #include <sys/mutex.h> | | 73 | #include <sys/mutex.h> |
| | | 74 | #include <sys/proc.h> |
76 | #include <sys/rwlock.h> | | 75 | #include <sys/rwlock.h> |
| | | 76 | #include <sys/systm.h> |
77 | | | 77 | |
78 | #include <uvm/uvm.h> | | 78 | #include <uvm/uvm.h> |
79 | #include <uvm/uvm_page_array.h> | | 79 | #include <uvm/uvm_page_array.h> |
80 | | | 80 | |
81 | #include <machine/reg.h> | | | |
82 | #include <machine/psl.h> | | | |
83 | #include <machine/cpu.h> | | 81 | #include <machine/cpu.h> |
| | | 82 | #include <machine/cpufunc.h> |
| | | 83 | #include <machine/iomod.h> |
84 | #include <machine/pcb.h> | | 84 | #include <machine/pcb.h> |
85 | #include <machine/pmap.h> | | 85 | #include <machine/pmap.h> |
86 | #include <machine/pte.h> | | 86 | #include <machine/pte.h> |
87 | #include <machine/cpufunc.h> | | 87 | #include <machine/psl.h> |
88 | #include <machine/iomod.h> | | 88 | #include <machine/reg.h> |
89 | | | 89 | |
90 | #include <hppa/hppa/hpt.h> | | 90 | #include <hppa/hppa/hpt.h> |
91 | #include <hppa/hppa/machdep.h> | | 91 | #include <hppa/hppa/machdep.h> |
92 | | | 92 | |
93 | #if defined(DDB) | | 93 | #if defined(DDB) |
94 | #include <ddb/db_output.h> | | 94 | #include <ddb/db_output.h> |
95 | #endif | | 95 | #endif |
96 | | | 96 | |
97 | #ifdef PMAPDEBUG | | 97 | #ifdef PMAPDEBUG |
98 | | | 98 | |
99 | #define static /**/ | | 99 | #define static /**/ |
100 | #define inline /**/ | | 100 | #define inline /**/ |
101 | | | 101 | |
102 | #define DPRINTF(l,s) do { \ | | 102 | #define DPRINTF(l,s) do { \ |
103 | if ((pmapdebug & (l)) == (l)) \ | | 103 | if ((pmapdebug & (l)) == (l)) \ |
104 | printf s; \ | | 104 | printf s; \ |
105 | } while(0) | | 105 | } while(0) |
106 | | | 106 | |
107 | #define PDB_FOLLOW 0x00000001 | | 107 | #define PDB_FOLLOW 0x00000001 |
108 | #define PDB_INIT 0x00000002 | | 108 | #define PDB_INIT 0x00000002 |
109 | #define PDB_ENTER 0x00000004 | | 109 | #define PDB_ENTER 0x00000004 |
110 | #define PDB_REMOVE 0x00000008 | | 110 | #define PDB_REMOVE 0x00000008 |
111 | #define PDB_CREATE 0x00000010 | | 111 | #define PDB_CREATE 0x00000010 |
112 | #define PDB_PTPAGE 0x00000020 | | 112 | #define PDB_PTPAGE 0x00000020 |
113 | #define PDB_CACHE 0x00000040 | | 113 | #define PDB_CACHE 0x00000040 |
114 | #define PDB_BITS 0x00000080 | | 114 | #define PDB_BITS 0x00000080 |
115 | #define PDB_COLLECT 0x00000100 | | 115 | #define PDB_COLLECT 0x00000100 |
116 | #define PDB_PROTECT 0x00000200 | | 116 | #define PDB_PROTECT 0x00000200 |
117 | #define PDB_EXTRACT 0x00000400 | | 117 | #define PDB_EXTRACT 0x00000400 |
118 | #define PDB_VP 0x00000800 | | 118 | #define PDB_VP 0x00000800 |
119 | #define PDB_PV 0x00001000 | | 119 | #define PDB_PV 0x00001000 |
120 | #define PDB_PARANOIA 0x00002000 | | 120 | #define PDB_PARANOIA 0x00002000 |
121 | #define PDB_WIRING 0x00004000 | | 121 | #define PDB_WIRING 0x00004000 |
122 | #define PDB_PMAP 0x00008000 | | 122 | #define PDB_PMAP 0x00008000 |
123 | #define PDB_STEAL 0x00010000 | | 123 | #define PDB_STEAL 0x00010000 |
124 | #define PDB_PHYS 0x00020000 | | 124 | #define PDB_PHYS 0x00020000 |
125 | #define PDB_POOL 0x00040000 | | 125 | #define PDB_POOL 0x00040000 |
126 | #define PDB_ALIAS 0x00080000 | | 126 | #define PDB_ALIAS 0x00080000 |
127 | int pmapdebug = 0 | | 127 | int pmapdebug = 0 |
128 | | PDB_INIT | | 128 | | PDB_INIT |
129 | | PDB_FOLLOW | | 129 | | PDB_FOLLOW |
130 | | PDB_VP | | 130 | | PDB_VP |
131 | | PDB_PV | | 131 | | PDB_PV |
132 | | PDB_ENTER | | 132 | | PDB_ENTER |
133 | | PDB_REMOVE | | 133 | | PDB_REMOVE |
134 | | PDB_STEAL | | 134 | | PDB_STEAL |
135 | | PDB_PROTECT | | 135 | | PDB_PROTECT |
136 | | PDB_PHYS | | 136 | | PDB_PHYS |
137 | | PDB_ALIAS | | 137 | | PDB_ALIAS |
138 | ; | | 138 | ; |
139 | #else | | 139 | #else |
140 | #define DPRINTF(l,s) /* */ | | 140 | #define DPRINTF(l,s) /* */ |
141 | #endif | | 141 | #endif |
142 | | | 142 | |
143 | int pmap_hptsize = 16 * PAGE_SIZE; /* patchable */ | | 143 | int pmap_hptsize = 16 * PAGE_SIZE; /* patchable */ |
144 | vaddr_t pmap_hpt; | | 144 | vaddr_t pmap_hpt; |
145 | | | 145 | |
146 | static struct pmap kernel_pmap_store; | | 146 | static struct pmap kernel_pmap_store; |
147 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; | | 147 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; |
148 | | | 148 | |
149 | int hppa_sid_max = HPPA_SID_MAX; | | 149 | int hppa_sid_max = HPPA_SID_MAX; |
150 | struct pool pmap_pool; | | 150 | struct pool pmap_pool; |
151 | struct pool pmap_pv_pool; | | 151 | struct pool pmap_pv_pool; |
152 | int pmap_pvlowat = 252; | | 152 | int pmap_pvlowat = 252; |
153 | bool pmap_initialized = false; | | 153 | bool pmap_initialized = false; |
154 | | | 154 | |
155 | static kmutex_t pmaps_lock; | | 155 | static kmutex_t pmaps_lock; |
156 | | | 156 | |
157 | u_int hppa_prot[8]; | | 157 | u_int hppa_prot[8]; |
158 | u_int sid_counter; | | 158 | u_int sid_counter; |
159 | | | 159 | |
160 | static const struct uvm_pagerops pmap_pager = { | | 160 | static const struct uvm_pagerops pmap_pager = { |
161 | /* nothing */ | | 161 | /* nothing */ |
162 | }; | | 162 | }; |
163 | | | 163 | |
164 | /* | | 164 | /* |
165 | * Page 3-6 of the "PA-RISC 1.1 Architecture and Instruction Set | | 165 | * Page 3-6 of the "PA-RISC 1.1 Architecture and Instruction Set |
166 | * Reference Manual" (HP part number 09740-90039) defines equivalent | | 166 | * Reference Manual" (HP part number 09740-90039) defines equivalent |
167 | * and non-equivalent virtual addresses in the cache. | | 167 | * and non-equivalent virtual addresses in the cache. |
168 | * | | 168 | * |
169 | * This macro evaluates to true iff the two space/virtual address | | 169 | * This macro evaluates to true iff the two space/virtual address |
170 | * combinations are non-equivalent aliases, and therefore will find | | 170 | * combinations are non-equivalent aliases, and therefore will find |
171 | * two different locations in the cache. | | 171 | * two different locations in the cache. |
172 | * | | 172 | * |
173 | * NB: currently, the CPU-specific desidhash() functions disable the | | 173 | * NB: currently, the CPU-specific desidhash() functions disable the |
174 | * use of the space in all cache hashing functions. This means that | | 174 | * use of the space in all cache hashing functions. This means that |
175 | * this macro definition is stricter than it has to be (because it | | 175 | * this macro definition is stricter than it has to be (because it |
176 | * takes space into account), but one day cache space hashing should | | 176 | * takes space into account), but one day cache space hashing should |
177 | * be re-enabled. Cache space hashing should yield better performance | | 177 | * be re-enabled. Cache space hashing should yield better performance |
178 | * through better utilization of the cache, assuming that most aliasing | | 178 | * through better utilization of the cache, assuming that most aliasing |
179 | * is the read-only kind, which we do allow in the cache. | | 179 | * is the read-only kind, which we do allow in the cache. |
180 | */ | | 180 | */ |
181 | #define NON_EQUIVALENT_ALIAS(sp1, va1, sp2, va2) \ | | 181 | #define NON_EQUIVALENT_ALIAS(sp1, va1, sp2, va2) \ |
182 | (((((va1) ^ (va2)) & ~HPPA_PGAMASK) != 0) || \ | | 182 | (((((va1) ^ (va2)) & ~HPPA_PGAMASK) != 0) || \ |
183 | ((((sp1) ^ (sp2)) & ~HPPA_SPAMASK) != 0)) | | 183 | ((((sp1) ^ (sp2)) & ~HPPA_SPAMASK) != 0)) |
184 | | | 184 | |
185 | /* Prototypes. */ | | 185 | /* Prototypes. */ |
186 | struct vm_page *pmap_pagealloc(struct uvm_object *, voff_t); | | 186 | struct vm_page *pmap_pagealloc(struct uvm_object *, voff_t); |
187 | void pmap_pagefree(struct vm_page *); | | 187 | void pmap_pagefree(struct vm_page *); |
188 | | | 188 | |
189 | static inline void pmap_sdir_set(pa_space_t, volatile uint32_t *); | | 189 | static inline void pmap_sdir_set(pa_space_t, volatile uint32_t *); |
190 | static inline uint32_t *pmap_sdir_get(pa_space_t); | | 190 | static inline uint32_t *pmap_sdir_get(pa_space_t); |
191 | | | 191 | |
192 | static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t); | | 192 | static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t); |
193 | static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t); | | 193 | static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t); |
194 | static inline pt_entry_t *pmap_pde_alloc(pmap_t, vaddr_t, struct vm_page **); | | 194 | static inline pt_entry_t *pmap_pde_alloc(pmap_t, vaddr_t, struct vm_page **); |
195 | static inline struct vm_page *pmap_pde_ptp(pmap_t, volatile pt_entry_t *); | | 195 | static inline struct vm_page *pmap_pde_ptp(pmap_t, volatile pt_entry_t *); |
196 | static inline void pmap_pde_release(pmap_t, vaddr_t, struct vm_page *); | | 196 | static inline void pmap_pde_release(pmap_t, vaddr_t, struct vm_page *); |
197 | | | 197 | |
198 | static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t); | | 198 | static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t); |
199 | static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t); | | 199 | static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t); |
200 | | | 200 | |
201 | void pmap_pte_flush(pmap_t, vaddr_t, pt_entry_t); | | 201 | void pmap_pte_flush(pmap_t, vaddr_t, pt_entry_t); |
202 | | | 202 | |
203 | static inline pt_entry_t pmap_pte_get(volatile pt_entry_t *, vaddr_t); | | 203 | static inline pt_entry_t pmap_pte_get(volatile pt_entry_t *, vaddr_t); |
204 | static inline void pmap_pte_set(volatile pt_entry_t *, vaddr_t, pt_entry_t); | | 204 | static inline void pmap_pte_set(volatile pt_entry_t *, vaddr_t, pt_entry_t); |
205 | | | 205 | |
206 | static inline pt_entry_t pmap_vp_find(pmap_t, vaddr_t); | | 206 | static inline pt_entry_t pmap_vp_find(pmap_t, vaddr_t); |
207 | | | 207 | |
208 | static inline struct pv_entry *pmap_pv_alloc(void); | | 208 | static inline struct pv_entry *pmap_pv_alloc(void); |
209 | static inline void pmap_pv_free(struct pv_entry *); | | 209 | static inline void pmap_pv_free(struct pv_entry *); |
210 | static inline void pmap_pv_enter(struct vm_page *, struct pv_entry *, pmap_t, | | 210 | static inline void pmap_pv_enter(struct vm_page *, struct pv_entry *, pmap_t, |
211 | vaddr_t , struct vm_page *, u_int); | | 211 | vaddr_t , struct vm_page *, u_int); |
212 | static inline struct pv_entry *pmap_pv_remove(struct vm_page *, pmap_t, | | 212 | static inline struct pv_entry *pmap_pv_remove(struct vm_page *, pmap_t, |
213 | vaddr_t); | | 213 | vaddr_t); |
214 | | | 214 | |
215 | static inline void pmap_flush_page(struct vm_page *, bool); | | 215 | static inline void pmap_flush_page(struct vm_page *, bool); |
216 | static int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t); | | 216 | static int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t); |
217 | static void pmap_syncicache_page(struct vm_page *, pmap_t, vaddr_t); | | 217 | static void pmap_syncicache_page(struct vm_page *, pmap_t, vaddr_t); |
218 | | | 218 | |
219 | static void pmap_page_physload(paddr_t, paddr_t); | | 219 | static void pmap_page_physload(paddr_t, paddr_t); |
220 | | | 220 | |
221 | void pmap_copy_page(paddr_t, paddr_t); | | 221 | void pmap_copy_page(paddr_t, paddr_t); |
222 | | | 222 | |
223 | #ifdef USE_HPT | | 223 | #ifdef USE_HPT |
224 | static inline struct hpt_entry *pmap_hash(pmap_t, vaddr_t); | | 224 | static inline struct hpt_entry *pmap_hash(pmap_t, vaddr_t); |
225 | static inline uint32_t pmap_vtag(pmap_t, vaddr_t); | | 225 | static inline uint32_t pmap_vtag(pmap_t, vaddr_t); |
226 | | | 226 | |
227 | #ifdef DDB | | 227 | #ifdef DDB |
228 | void pmap_hptdump(void); | | 228 | void pmap_hptdump(void); |
229 | #endif | | 229 | #endif |
230 | #endif | | 230 | #endif |
231 | | | 231 | |
232 | #ifdef DDB | | 232 | #ifdef DDB |
233 | void pmap_dump_table(pa_space_t, vaddr_t); | | 233 | void pmap_dump_table(pa_space_t, vaddr_t); |
234 | void pmap_dump_pv(paddr_t); | | 234 | void pmap_dump_pv(paddr_t); |
235 | #endif | | 235 | #endif |
236 | | | 236 | |
237 | #define IS_IOPAGE_P(pa) ((pa) >= HPPA_IOBEGIN) | | 237 | #define IS_IOPAGE_P(pa) ((pa) >= HPPA_IOBEGIN) |
238 | #define IS_PVFEXEC_P(f) (((f) & PVF_EXEC) != 0) | | 238 | #define IS_PVFEXEC_P(f) (((f) & PVF_EXEC) != 0) |
239 | | | 239 | |
240 | /* un-invert PVF_REF */ | | 240 | /* un-invert PVF_REF */ |
241 | #define pmap_pvh_attrs(a) \ | | 241 | #define pmap_pvh_attrs(a) \ |
242 | (((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF) | | 242 | (((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF) |
243 | | | 243 | |
244 | #define PMAP_LOCK(pm) \ | | 244 | #define PMAP_LOCK(pm) \ |
245 | do { \ | | 245 | do { \ |
246 | if ((pm) != pmap_kernel()) \ | | 246 | if ((pm) != pmap_kernel()) \ |
247 | rw_enter((pm)->pm_lock, RW_WRITER); \ | | 247 | rw_enter((pm)->pm_lock, RW_WRITER); \ |
248 | } while (/*CONSTCOND*/0) | | 248 | } while (/*CONSTCOND*/0) |
249 | | | 249 | |
250 | #define PMAP_UNLOCK(pm) \ | | 250 | #define PMAP_UNLOCK(pm) \ |
251 | do { \ | | 251 | do { \ |
252 | if ((pm) != pmap_kernel()) \ | | 252 | if ((pm) != pmap_kernel()) \ |
253 | rw_exit((pm)->pm_lock); \ | | 253 | rw_exit((pm)->pm_lock); \ |
254 | } while (/*CONSTCOND*/0) | | 254 | } while (/*CONSTCOND*/0) |
255 | | | 255 | |
256 | struct vm_page * | | 256 | struct vm_page * |
257 | pmap_pagealloc(struct uvm_object *obj, voff_t off) | | 257 | pmap_pagealloc(struct uvm_object *obj, voff_t off) |
258 | { | | 258 | { |
259 | struct vm_page *pg; | | 259 | struct vm_page *pg; |
260 | | | 260 | |
261 | if ((pg = uvm_pagealloc(obj, off, NULL, | | 261 | if ((pg = uvm_pagealloc(obj, off, NULL, |
262 | UVM_PGA_USERESERVE | UVM_PGA_ZERO)) == NULL) | | 262 | UVM_PGA_USERESERVE | UVM_PGA_ZERO)) == NULL) |
263 | printf("pmap_pagealloc fail\n"); | | 263 | printf("pmap_pagealloc fail\n"); |
264 | | | 264 | |
265 | return (pg); | | 265 | return (pg); |
266 | } | | 266 | } |
267 | | | 267 | |
268 | void | | 268 | void |
269 | pmap_pagefree(struct vm_page *pg) | | 269 | pmap_pagefree(struct vm_page *pg) |
270 | { | | 270 | { |
271 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 271 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
272 | pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE); | | 272 | pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE); |
273 | | | 273 | |
274 | #if defined(HP8000_CPU) || defined(HP8200_CPU) || \ | | 274 | #if defined(HP8000_CPU) || defined(HP8200_CPU) || \ |
275 | defined(HP8500_CPU) || defined(HP8600_CPU) | | 275 | defined(HP8500_CPU) || defined(HP8600_CPU) |
276 | pdtlb(HPPA_SID_KERNEL, pa); | | 276 | pdtlb(HPPA_SID_KERNEL, pa); |
277 | pitlb(HPPA_SID_KERNEL, pa); | | 277 | pitlb(HPPA_SID_KERNEL, pa); |
278 | #endif | | 278 | #endif |
279 | uvm_pagefree(pg); | | 279 | uvm_pagefree(pg); |
280 | } | | 280 | } |
281 | | | 281 | |
282 | #ifdef USE_HPT | | 282 | #ifdef USE_HPT |
283 | /* | | 283 | /* |
284 | * This hash function is the one used by the hardware TLB walker on the 7100LC. | | 284 | * This hash function is the one used by the hardware TLB walker on the 7100LC. |
285 | */ | | 285 | */ |
286 | static inline struct hpt_entry * | | 286 | static inline struct hpt_entry * |
287 | pmap_hash(pmap_t pmap, vaddr_t va) | | 287 | pmap_hash(pmap_t pmap, vaddr_t va) |
288 | { | | 288 | { |
289 | | | 289 | |
290 | return (struct hpt_entry *)(pmap_hpt + | | 290 | return (struct hpt_entry *)(pmap_hpt + |
291 | (((va >> 8) ^ (pmap->pm_space << 9)) & (pmap_hptsize - 1))); | | 291 | (((va >> 8) ^ (pmap->pm_space << 9)) & (pmap_hptsize - 1))); |
292 | } | | 292 | } |
293 | | | 293 | |
294 | static inline uint32_t | | 294 | static inline uint32_t |
295 | pmap_vtag(pmap_t pmap, vaddr_t va) | | 295 | pmap_vtag(pmap_t pmap, vaddr_t va) |
296 | { | | 296 | { |
297 | | | 297 | |
298 | return (0x80000000 | (pmap->pm_space & 0xffff) | | | 298 | return (0x80000000 | (pmap->pm_space & 0xffff) | |
299 | ((va >> 1) & 0x7fff0000)); | | 299 | ((va >> 1) & 0x7fff0000)); |
300 | } | | 300 | } |
301 | #endif | | 301 | #endif |
302 | | | 302 | |
303 | static inline void | | 303 | static inline void |
304 | pmap_sdir_set(pa_space_t space, volatile uint32_t *pd) | | 304 | pmap_sdir_set(pa_space_t space, volatile uint32_t *pd) |
305 | { | | 305 | { |
306 | volatile uint32_t *vtop; | | 306 | volatile uint32_t *vtop; |
307 | | | 307 | |
308 | mfctl(CR_VTOP, vtop); | | 308 | mfctl(CR_VTOP, vtop); |
309 | | | 309 | |
310 | KASSERT(vtop != NULL); | | 310 | KASSERT(vtop != NULL); |
311 | | | 311 | |
312 | vtop[space] = (uint32_t)pd; | | 312 | vtop[space] = (uint32_t)pd; |
313 | } | | 313 | } |
314 | | | 314 | |
315 | static inline uint32_t * | | 315 | static inline uint32_t * |
316 | pmap_sdir_get(pa_space_t space) | | 316 | pmap_sdir_get(pa_space_t space) |
317 | { | | 317 | { |
318 | uint32_t *vtop; | | 318 | uint32_t *vtop; |
319 | | | 319 | |
320 | mfctl(CR_VTOP, vtop); | | 320 | mfctl(CR_VTOP, vtop); |
321 | return ((uint32_t *)vtop[space]); | | 321 | return ((uint32_t *)vtop[space]); |
322 | } | | 322 | } |
323 | | | 323 | |
324 | static inline volatile pt_entry_t * | | 324 | static inline volatile pt_entry_t * |
325 | pmap_pde_get(volatile uint32_t *pd, vaddr_t va) | | 325 | pmap_pde_get(volatile uint32_t *pd, vaddr_t va) |
326 | { | | 326 | { |
327 | | | 327 | |
328 | return ((pt_entry_t *)pd[va >> 22]); | | 328 | return ((pt_entry_t *)pd[va >> 22]); |
329 | } | | 329 | } |
330 | | | 330 | |
331 | static inline void | | 331 | static inline void |
332 | pmap_pde_set(pmap_t pm, vaddr_t va, paddr_t ptp) | | 332 | pmap_pde_set(pmap_t pm, vaddr_t va, paddr_t ptp) |
333 | { | | 333 | { |
334 | | | 334 | |
335 | DPRINTF(PDB_FOLLOW|PDB_VP, | | 335 | DPRINTF(PDB_FOLLOW|PDB_VP, |
336 | ("%s(%p, 0x%lx, 0x%lx)\n", __func__, pm, va, ptp)); | | 336 | ("%s(%p, 0x%lx, 0x%lx)\n", __func__, pm, va, ptp)); |
337 | | | 337 | |
338 | KASSERT((ptp & PGOFSET) == 0); | | 338 | KASSERT((ptp & PGOFSET) == 0); |
339 | | | 339 | |
340 | pm->pm_pdir[va >> 22] = ptp; | | 340 | pm->pm_pdir[va >> 22] = ptp; |
341 | } | | 341 | } |
342 | | | 342 | |
343 | static inline pt_entry_t * | | 343 | static inline pt_entry_t * |
344 | pmap_pde_alloc(pmap_t pm, vaddr_t va, struct vm_page **pdep) | | 344 | pmap_pde_alloc(pmap_t pm, vaddr_t va, struct vm_page **pdep) |
345 | { | | 345 | { |
346 | struct vm_page *pg; | | 346 | struct vm_page *pg; |
347 | paddr_t pa; | | 347 | paddr_t pa; |
348 | | | 348 | |
349 | DPRINTF(PDB_FOLLOW|PDB_VP, | | 349 | DPRINTF(PDB_FOLLOW|PDB_VP, |
350 | ("%s(%p, 0x%lx, %p)\n", __func__, pm, va, pdep)); | | 350 | ("%s(%p, 0x%lx, %p)\n", __func__, pm, va, pdep)); |
351 | | | 351 | |
352 | KASSERT(pm != pmap_kernel()); | | 352 | KASSERT(pm != pmap_kernel()); |
353 | KASSERT(rw_write_held(pm->pm_lock)); | | 353 | KASSERT(rw_write_held(pm->pm_lock)); |
354 | | | 354 | |
355 | pg = pmap_pagealloc(&pm->pm_obj, va); | | 355 | pg = pmap_pagealloc(&pm->pm_obj, va); |
356 | | | 356 | |
357 | if (pg == NULL) | | 357 | if (pg == NULL) |
358 | return NULL; | | 358 | return NULL; |
359 | | | 359 | |
360 | pa = VM_PAGE_TO_PHYS(pg); | | 360 | pa = VM_PAGE_TO_PHYS(pg); |
361 | | | 361 | |
362 | DPRINTF(PDB_FOLLOW|PDB_VP, ("%s: pde %lx\n", __func__, pa)); | | 362 | DPRINTF(PDB_FOLLOW|PDB_VP, ("%s: pde %lx\n", __func__, pa)); |
363 | | | 363 | |
364 | pg->flags &= ~PG_BUSY; /* never busy */ | | 364 | pg->flags &= ~PG_BUSY; /* never busy */ |
365 | pg->wire_count = 1; /* no mappings yet */ | | 365 | pg->wire_count = 1; /* no mappings yet */ |
366 | pmap_pde_set(pm, va, pa); | | 366 | pmap_pde_set(pm, va, pa); |
367 | pm->pm_stats.resident_count++; /* count PTP as resident */ | | 367 | pm->pm_stats.resident_count++; /* count PTP as resident */ |
368 | pm->pm_ptphint = pg; | | 368 | pm->pm_ptphint = pg; |
369 | if (pdep) | | 369 | if (pdep) |
370 | *pdep = pg; | | 370 | *pdep = pg; |
371 | return ((pt_entry_t *)pa); | | 371 | return ((pt_entry_t *)pa); |
372 | } | | 372 | } |
373 | | | 373 | |
374 | static inline struct vm_page * | | 374 | static inline struct vm_page * |
375 | pmap_pde_ptp(pmap_t pm, volatile pt_entry_t *pde) | | 375 | pmap_pde_ptp(pmap_t pm, volatile pt_entry_t *pde) |
376 | { | | 376 | { |
377 | paddr_t pa = (paddr_t)pde; | | 377 | paddr_t pa = (paddr_t)pde; |
378 | | | 378 | |
379 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p)\n", __func__, pm, pde)); | | 379 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p)\n", __func__, pm, pde)); |
380 | | | 380 | |
381 | if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa) | | 381 | if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa) |
382 | return (pm->pm_ptphint); | | 382 | return (pm->pm_ptphint); |
383 | | | 383 | |
384 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: lookup 0x%lx\n", __func__, pa)); | | 384 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: lookup 0x%lx\n", __func__, pa)); |
385 | | | 385 | |
386 | return (PHYS_TO_VM_PAGE(pa)); | | 386 | return (PHYS_TO_VM_PAGE(pa)); |
387 | } | | 387 | } |
388 | | | 388 | |
389 | static inline void | | 389 | static inline void |
390 | pmap_pde_release(pmap_t pmap, vaddr_t va, struct vm_page *ptp) | | 390 | pmap_pde_release(pmap_t pmap, vaddr_t va, struct vm_page *ptp) |
391 | { | | 391 | { |
392 | | | 392 | |
393 | DPRINTF(PDB_FOLLOW|PDB_PV, | | 393 | DPRINTF(PDB_FOLLOW|PDB_PV, |
394 | ("%s(%p, 0x%lx, %p)\n", __func__, pmap, va, ptp)); | | 394 | ("%s(%p, 0x%lx, %p)\n", __func__, pmap, va, ptp)); |
395 | | | 395 | |
396 | KASSERT(pmap != pmap_kernel()); | | 396 | KASSERT(pmap != pmap_kernel()); |
397 | if (--ptp->wire_count <= 1) { | | 397 | if (--ptp->wire_count <= 1) { |
398 | DPRINTF(PDB_FOLLOW|PDB_PV, | | 398 | DPRINTF(PDB_FOLLOW|PDB_PV, |
399 | ("%s: disposing ptp %p\n", __func__, ptp)); | | 399 | ("%s: disposing ptp %p\n", __func__, ptp)); |
400 | pmap_pde_set(pmap, va, 0); | | 400 | pmap_pde_set(pmap, va, 0); |
401 | pmap->pm_stats.resident_count--; | | 401 | pmap->pm_stats.resident_count--; |
402 | if (pmap->pm_ptphint == ptp) | | 402 | if (pmap->pm_ptphint == ptp) |
403 | pmap->pm_ptphint = NULL; | | 403 | pmap->pm_ptphint = NULL; |
404 | ptp->wire_count = 0; | | 404 | ptp->wire_count = 0; |
405 | | | 405 | |
406 | KASSERT((ptp->flags & PG_BUSY) == 0); | | 406 | KASSERT((ptp->flags & PG_BUSY) == 0); |
407 | | | 407 | |
408 | pmap_pagefree(ptp); | | 408 | pmap_pagefree(ptp); |
409 | } | | 409 | } |
410 | } | | 410 | } |
411 | | | 411 | |
412 | static inline pt_entry_t | | 412 | static inline pt_entry_t |
413 | pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va) | | 413 | pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va) |
414 | { | | 414 | { |
415 | | | 415 | |
416 | return (pde[(va >> 12) & 0x3ff]); | | 416 | return (pde[(va >> 12) & 0x3ff]); |
417 | } | | 417 | } |
418 | | | 418 | |
419 | static inline void | | 419 | static inline void |
420 | pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte) | | 420 | pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte) |
421 | { | | 421 | { |
422 | | | 422 | |
423 | DPRINTF(PDB_FOLLOW|PDB_VP, ("%s(%p, 0x%lx, 0x%x)\n", | | 423 | DPRINTF(PDB_FOLLOW|PDB_VP, ("%s(%p, 0x%lx, 0x%x)\n", |
424 | __func__, pde, va, pte)); | | 424 | __func__, pde, va, pte)); |
425 | | | 425 | |
426 | KASSERT(pde != NULL); | | 426 | KASSERT(pde != NULL); |
427 | KASSERT(((paddr_t)pde & PGOFSET) == 0); | | 427 | KASSERT(((paddr_t)pde & PGOFSET) == 0); |
428 | | | 428 | |
429 | pde[(va >> 12) & 0x3ff] = pte; | | 429 | pde[(va >> 12) & 0x3ff] = pte; |
430 | } | | 430 | } |
431 | | | 431 | |
432 | void | | 432 | void |
433 | pmap_pte_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte) | | 433 | pmap_pte_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte) |
434 | { | | 434 | { |
435 | | | 435 | |
436 | fdcache(pmap->pm_space, va, PAGE_SIZE); | | 436 | fdcache(pmap->pm_space, va, PAGE_SIZE); |
437 | if (pte & PTE_PROT(TLB_EXECUTE)) { | | 437 | if (pte & PTE_PROT(TLB_EXECUTE)) { |
438 | ficache(pmap->pm_space, va, PAGE_SIZE); | | 438 | ficache(pmap->pm_space, va, PAGE_SIZE); |
439 | pitlb(pmap->pm_space, va); | | 439 | pitlb(pmap->pm_space, va); |
440 | } | | 440 | } |
441 | pdtlb(pmap->pm_space, va); | | 441 | pdtlb(pmap->pm_space, va); |
442 | #ifdef USE_HPT | | 442 | #ifdef USE_HPT |
443 | if (pmap_hpt) { | | 443 | if (pmap_hpt) { |
444 | struct hpt_entry *hpt; | | 444 | struct hpt_entry *hpt; |
445 | hpt = pmap_hash(pmap, va); | | 445 | hpt = pmap_hash(pmap, va); |
446 | if (hpt->hpt_valid && | | 446 | if (hpt->hpt_valid && |
447 | hpt->hpt_space == pmap->pm_space && | | 447 | hpt->hpt_space == pmap->pm_space && |
448 | hpt->hpt_vpn == ((va >> 1) & 0x7fff0000)) | | 448 | hpt->hpt_vpn == ((va >> 1) & 0x7fff0000)) |
449 | hpt->hpt_space = 0xffff; | | 449 | hpt->hpt_space = 0xffff; |
450 | } | | 450 | } |
451 | #endif | | 451 | #endif |
452 | } | | 452 | } |
453 | | | 453 | |
454 | static inline pt_entry_t | | 454 | static inline pt_entry_t |
455 | pmap_vp_find(pmap_t pm, vaddr_t va) | | 455 | pmap_vp_find(pmap_t pm, vaddr_t va) |
456 | { | | 456 | { |
457 | volatile pt_entry_t *pde; | | 457 | volatile pt_entry_t *pde; |
458 | | | 458 | |
459 | if (!(pde = pmap_pde_get(pm->pm_pdir, va))) | | 459 | if (!(pde = pmap_pde_get(pm->pm_pdir, va))) |
460 | return (0); | | 460 | return (0); |
461 | | | 461 | |
462 | return (pmap_pte_get(pde, va)); | | 462 | return (pmap_pte_get(pde, va)); |
463 | } | | 463 | } |
464 | | | 464 | |
465 | #ifdef DDB | | 465 | #ifdef DDB |
466 | void | | 466 | void |
467 | pmap_dump_table(pa_space_t space, vaddr_t sva) | | 467 | pmap_dump_table(pa_space_t space, vaddr_t sva) |
468 | { | | 468 | { |
469 | char buf[64]; | | 469 | char buf[64]; |
470 | volatile pt_entry_t *pde = NULL; | | 470 | volatile pt_entry_t *pde = NULL; |
471 | vaddr_t va = sva; | | 471 | vaddr_t va = sva; |
472 | vaddr_t pdemask = 1; | | 472 | vaddr_t pdemask = 1; |
473 | pt_entry_t pte; | | 473 | pt_entry_t pte; |
474 | uint32_t *pd; | | 474 | uint32_t *pd; |
475 | | | 475 | |
476 | if (space > hppa_sid_max) | | 476 | if (space > hppa_sid_max) |
477 | return; | | 477 | return; |
478 | | | 478 | |
479 | pd = pmap_sdir_get(space); | | 479 | pd = pmap_sdir_get(space); |
480 | if (!pd) | | 480 | if (!pd) |
481 | return; | | 481 | return; |
482 | | | 482 | |
483 | do { | | 483 | do { |
484 | if (pdemask != (va & PDE_MASK)) { | | 484 | if (pdemask != (va & PDE_MASK)) { |
485 | pdemask = va & PDE_MASK; | | 485 | pdemask = va & PDE_MASK; |
486 | pde = pmap_pde_get(pd, va); | | 486 | pde = pmap_pde_get(pd, va); |
487 | if (!pde) { | | 487 | if (!pde) { |
488 | va = pdemask + PDE_SIZE; | | 488 | va = pdemask + PDE_SIZE; |
489 | continue; | | 489 | continue; |
490 | } | | 490 | } |
491 | db_printf("%x:%8p:\n", space, pde); | | 491 | db_printf("%x:%8p:\n", space, pde); |
492 | } | | 492 | } |
493 | | | 493 | |
494 | pte = pmap_pte_get(pde, va); | | 494 | pte = pmap_pte_get(pde, va); |
495 | if (pte) { | | 495 | if (pte) { |
496 | snprintb(buf, sizeof(buf), TLB_BITS, | | 496 | snprintb(buf, sizeof(buf), TLB_BITS, |
497 | TLB_PROT(pte & PAGE_MASK)); | | 497 | TLB_PROT(pte & PAGE_MASK)); |
498 | db_printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK, | | 498 | db_printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK, |
499 | buf); | | 499 | buf); |
500 | } | | 500 | } |
501 | va += PAGE_SIZE; | | 501 | va += PAGE_SIZE; |
502 | } while (va != 0); | | 502 | } while (va != 0); |
503 | } | | 503 | } |
504 | | | 504 | |
505 | void | | 505 | void |
506 | pmap_dump_pv(paddr_t pa) | | 506 | pmap_dump_pv(paddr_t pa) |
507 | { | | 507 | { |
508 | struct vm_page *pg; | | 508 | struct vm_page *pg; |
509 | struct vm_page_md *md; | | 509 | struct vm_page_md *md; |
510 | struct pv_entry *pve; | | 510 | struct pv_entry *pve; |
511 | | | 511 | |
512 | pg = PHYS_TO_VM_PAGE(pa); | | 512 | pg = PHYS_TO_VM_PAGE(pa); |
513 | if (pg == NULL) | | 513 | if (pg == NULL) |
514 | return; | | 514 | return; |
515 | | | 515 | |
516 | md = VM_PAGE_TO_MD(pg); | | 516 | md = VM_PAGE_TO_MD(pg); |
517 | db_printf("pg %p attr 0x%08x\n", pg, md->pvh_attrs); | | 517 | db_printf("pg %p attr 0x%08x\n", pg, md->pvh_attrs); |
518 | for (pve = md->pvh_list; pve; pve = pve->pv_next) | | 518 | for (pve = md->pvh_list; pve; pve = pve->pv_next) |
519 | db_printf("%x:%lx\n", pve->pv_pmap->pm_space, | | 519 | db_printf("%x:%lx\n", pve->pv_pmap->pm_space, |
520 | pve->pv_va & PV_VAMASK); | | 520 | pve->pv_va & PV_VAMASK); |
521 | } | | 521 | } |
522 | #endif | | 522 | #endif |
523 | | | 523 | |
524 | static int | | 524 | static int |
525 | pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte) | | 525 | pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte) |
526 | { | | 526 | { |
527 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 527 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
528 | struct pv_entry *pve; | | 528 | struct pv_entry *pve; |
529 | int ret = 0; | | 529 | int ret = 0; |
530 | | | 530 | |
531 | /* check for non-equ aliased mappings */ | | 531 | /* check for non-equ aliased mappings */ |
532 | for (pve = md->pvh_list; pve; pve = pve->pv_next) { | | 532 | for (pve = md->pvh_list; pve; pve = pve->pv_next) { |
533 | vaddr_t pva = pve->pv_va & PV_VAMASK; | | 533 | vaddr_t pva = pve->pv_va & PV_VAMASK; |
534 | | | 534 | |
535 | pte |= pmap_vp_find(pve->pv_pmap, pva); | | 535 | pte |= pmap_vp_find(pve->pv_pmap, pva); |
536 | if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) && | | 536 | if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) && |
537 | (pte & PTE_PROT(TLB_WRITE))) { | | 537 | (pte & PTE_PROT(TLB_WRITE))) { |
538 | | | 538 | |
539 | DPRINTF(PDB_FOLLOW|PDB_ALIAS, | | 539 | DPRINTF(PDB_FOLLOW|PDB_ALIAS, |
540 | ("%s: aliased writable mapping 0x%x:0x%lx\n", | | 540 | ("%s: aliased writable mapping 0x%x:0x%lx\n", |
541 | __func__, pve->pv_pmap->pm_space, pve->pv_va)); | | 541 | __func__, pve->pv_pmap->pm_space, pve->pv_va)); |
542 | ret++; | | 542 | ret++; |
543 | } | | 543 | } |
544 | } | | 544 | } |
545 | | | 545 | |
546 | return (ret); | | 546 | return (ret); |
547 | } | | 547 | } |
548 | | | 548 | |
549 | /* | | 549 | /* |
550 | * This allocates and returns a new struct pv_entry. | | 550 | * This allocates and returns a new struct pv_entry. |
551 | */ | | 551 | */ |
552 | static inline struct pv_entry * | | 552 | static inline struct pv_entry * |
553 | pmap_pv_alloc(void) | | 553 | pmap_pv_alloc(void) |
554 | { | | 554 | { |
555 | struct pv_entry *pv; | | 555 | struct pv_entry *pv; |
556 | | | 556 | |
557 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s()\n", __func__)); | | 557 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s()\n", __func__)); |
558 | | | 558 | |
559 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); | | 559 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); |
560 | | | 560 | |
561 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: %p\n", __func__, pv)); | | 561 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: %p\n", __func__, pv)); |
562 | | | 562 | |
563 | return (pv); | | 563 | return (pv); |
564 | } | | 564 | } |
565 | | | 565 | |
566 | static inline void | | 566 | static inline void |
567 | pmap_pv_free(struct pv_entry *pv) | | 567 | pmap_pv_free(struct pv_entry *pv) |
568 | { | | 568 | { |
569 | | | 569 | |
570 | if (pv->pv_ptp) | | 570 | if (pv->pv_ptp) |
571 | pmap_pde_release(pv->pv_pmap, pv->pv_va & PV_VAMASK, | | 571 | pmap_pde_release(pv->pv_pmap, pv->pv_va & PV_VAMASK, |
572 | pv->pv_ptp); | | 572 | pv->pv_ptp); |
573 | | | 573 | |
574 | pool_put(&pmap_pv_pool, pv); | | 574 | pool_put(&pmap_pv_pool, pv); |
575 | } | | 575 | } |
576 | | | 576 | |
577 | static inline void | | 577 | static inline void |
578 | pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, | | 578 | pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, |
579 | vaddr_t va, struct vm_page *pdep, u_int flags) | | 579 | vaddr_t va, struct vm_page *pdep, u_int flags) |
580 | { | | 580 | { |
581 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 581 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
582 | | | 582 | |
583 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n", | | 583 | DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n", |
584 | __func__, pg, pve, pm, va, pdep, flags)); | | 584 | __func__, pg, pve, pm, va, pdep, flags)); |
585 | | | 585 | |
586 | KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg, true)); | | 586 | KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg, true)); |
587 | | | 587 | |
588 | pve->pv_pmap = pm; | | 588 | pve->pv_pmap = pm; |
589 | pve->pv_va = va | flags; | | 589 | pve->pv_va = va | flags; |
590 | pve->pv_ptp = pdep; | | 590 | pve->pv_ptp = pdep; |
591 | pve->pv_next = md->pvh_list; | | 591 | pve->pv_next = md->pvh_list; |
592 | md->pvh_list = pve; | | 592 | md->pvh_list = pve; |
593 | } | | 593 | } |
594 | | | 594 | |
595 | static inline struct pv_entry * | | 595 | static inline struct pv_entry * |
596 | pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va) | | 596 | pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va) |
597 | { | | 597 | { |
598 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 598 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
599 | struct pv_entry **pve, *pv; | | 599 | struct pv_entry **pve, *pv; |
600 | | | 600 | |
601 | KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg, true)); | | 601 | KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg, true)); |
602 | | | 602 | |
603 | for (pv = *(pve = &md->pvh_list); | | 603 | for (pv = *(pve = &md->pvh_list); |
604 | pv; pv = *(pve = &(*pve)->pv_next)) { | | 604 | pv; pv = *(pve = &(*pve)->pv_next)) { |
605 | if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) { | | 605 | if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) { |
606 | *pve = pv->pv_next; | | 606 | *pve = pv->pv_next; |
607 | break; | | 607 | break; |
608 | } | | 608 | } |
609 | } | | 609 | } |
610 | | | 610 | |
611 | if (IS_PVFEXEC_P(md->pvh_attrs)) { | | 611 | if (IS_PVFEXEC_P(md->pvh_attrs)) { |
612 | if (md->pvh_list == NULL) { | | 612 | if (md->pvh_list == NULL) { |
613 | md->pvh_attrs &= ~PVF_EXEC; | | 613 | md->pvh_attrs &= ~PVF_EXEC; |
614 | } else { | | 614 | } else { |
615 | pmap_syncicache_page(pg, pmap, va); | | 615 | pmap_syncicache_page(pg, pmap, va); |
616 | } | | 616 | } |
617 | } | | 617 | } |
618 | | | 618 | |
619 | return (pv); | | 619 | return (pv); |
620 | } | | 620 | } |
621 | | | 621 | |
622 | #define FIRST_16M atop(16 * 1024 * 1024) | | 622 | #define FIRST_16M atop(16 * 1024 * 1024) |
623 | | | 623 | |
624 | static void | | 624 | static void |
625 | pmap_page_physload(paddr_t spa, paddr_t epa) | | 625 | pmap_page_physload(paddr_t spa, paddr_t epa) |
626 | { | | 626 | { |
627 | | | 627 | |
628 | if (spa < FIRST_16M && epa <= FIRST_16M) { | | 628 | if (spa < FIRST_16M && epa <= FIRST_16M) { |
629 | DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", | | 629 | DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", |
630 | __func__, spa, epa)); | | 630 | __func__, spa, epa)); |
631 | | | 631 | |
632 | uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_ISADMA); | | 632 | uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_ISADMA); |
633 | } else if (spa < FIRST_16M && epa > FIRST_16M) { | | 633 | } else if (spa < FIRST_16M && epa > FIRST_16M) { |
634 | DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", | | 634 | DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", |
635 | __func__, spa, FIRST_16M)); | | 635 | __func__, spa, FIRST_16M)); |
636 | | | 636 | |
637 | uvm_page_physload(spa, FIRST_16M, spa, FIRST_16M, | | 637 | uvm_page_physload(spa, FIRST_16M, spa, FIRST_16M, |
638 | VM_FREELIST_ISADMA); | | 638 | VM_FREELIST_ISADMA); |
639 | | | 639 | |
640 | DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", | | 640 | DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", |
641 | __func__, FIRST_16M, epa)); | | 641 | __func__, FIRST_16M, epa)); |
642 | | | 642 | |
643 | uvm_page_physload(FIRST_16M, epa, FIRST_16M, epa, | | 643 | uvm_page_physload(FIRST_16M, epa, FIRST_16M, epa, |
644 | VM_FREELIST_DEFAULT); | | 644 | VM_FREELIST_DEFAULT); |
645 | } else { | | 645 | } else { |
646 | DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", | | 646 | DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", |
647 | __func__, spa, epa)); | | 647 | __func__, spa, epa)); |
648 | | | 648 | |
649 | uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_DEFAULT); | | 649 | uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_DEFAULT); |
650 | } | | 650 | } |
651 | | | 651 | |
652 | availphysmem += epa - spa; | | 652 | availphysmem += epa - spa; |
653 | } | | 653 | } |
654 | | | 654 | |
655 | /* | | 655 | /* |
656 | * Bootstrap the system enough to run with virtual memory. | | 656 | * Bootstrap the system enough to run with virtual memory. |
657 | * Map the kernel's code, data and bss, and allocate the system page table. | | 657 | * Map the kernel's code, data and bss, and allocate the system page table. |
658 | * Called with mapping OFF. | | 658 | * Called with mapping OFF. |
659 | * | | 659 | * |
660 | * Parameters: | | 660 | * Parameters: |
661 | * vstart PA of first available physical page | | 661 | * vstart PA of first available physical page |
662 | */ | | 662 | */ |
663 | void | | 663 | void |
664 | pmap_bootstrap(vaddr_t vstart) | | 664 | pmap_bootstrap(vaddr_t vstart) |
665 | { | | 665 | { |
666 | vaddr_t va, addr; | | 666 | vaddr_t va, addr; |
667 | vsize_t size; | | 667 | vsize_t size; |
668 | extern paddr_t hppa_vtop; | | 668 | extern paddr_t hppa_vtop; |
669 | pmap_t kpm; | | 669 | pmap_t kpm; |
670 | int npdes, nkpdes; | | 670 | int npdes, nkpdes; |
671 | extern int resvphysmem; | | 671 | extern int resvphysmem; |
672 | vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got; | | 672 | vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got; |
673 | paddr_t ksrx, kerx, ksro, kero, ksrw, kerw; | | 673 | paddr_t ksrx, kerx, ksro, kero, ksrw, kerw; |
674 | extern int usebtlb; | | 674 | extern int usebtlb; |
675 | | | 675 | |
676 | /* Provided by the linker script */ | | 676 | /* Provided by the linker script */ |
677 | extern int kernel_text, etext; | | 677 | extern int kernel_text, etext; |
678 | extern int __rodata_start, __rodata_end; | | 678 | extern int __rodata_start, __rodata_end; |
679 | extern int __data_start; | | 679 | extern int __data_start; |
680 | | | 680 | |
681 | DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(0x%lx)\n", __func__, vstart)); | | 681 | DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(0x%lx)\n", __func__, vstart)); |
682 | | | 682 | |
683 | uvm_md_init(); | | 683 | uvm_md_init(); |
684 | | | 684 | |
685 | hppa_prot[UVM_PROT_NONE] = TLB_AR_NA; | | 685 | hppa_prot[UVM_PROT_NONE] = TLB_AR_NA; |
686 | hppa_prot[UVM_PROT_READ] = TLB_AR_R; | | 686 | hppa_prot[UVM_PROT_READ] = TLB_AR_R; |
687 | hppa_prot[UVM_PROT_WRITE] = TLB_AR_RW; | | 687 | hppa_prot[UVM_PROT_WRITE] = TLB_AR_RW; |
688 | hppa_prot[UVM_PROT_RW] = TLB_AR_RW; | | 688 | hppa_prot[UVM_PROT_RW] = TLB_AR_RW; |
689 | hppa_prot[UVM_PROT_EXEC] = TLB_AR_RX; | | 689 | hppa_prot[UVM_PROT_EXEC] = TLB_AR_RX; |
690 | hppa_prot[UVM_PROT_RX] = TLB_AR_RX; | | 690 | hppa_prot[UVM_PROT_RX] = TLB_AR_RX; |
691 | hppa_prot[UVM_PROT_WX] = TLB_AR_RWX; | | 691 | hppa_prot[UVM_PROT_WX] = TLB_AR_RWX; |
692 | hppa_prot[UVM_PROT_RWX] = TLB_AR_RWX; | | 692 | hppa_prot[UVM_PROT_RWX] = TLB_AR_RWX; |
693 | | | 693 | |
694 | /* | | 694 | /* |
695 | * Initialize kernel pmap | | 695 | * Initialize kernel pmap |
696 | */ | | 696 | */ |
697 | addr = round_page(vstart); | | 697 | addr = round_page(vstart); |
698 | kpm = pmap_kernel(); | | 698 | kpm = pmap_kernel(); |
699 | memset(kpm, 0, sizeof(*kpm)); | | 699 | memset(kpm, 0, sizeof(*kpm)); |
700 | | | 700 | |
701 | rw_init(&kpm->pm_obj_lock); | | 701 | rw_init(&kpm->pm_obj_lock); |
702 | uvm_obj_init(&kpm->pm_obj, &pmap_pager, false, 1); | | 702 | uvm_obj_init(&kpm->pm_obj, &pmap_pager, false, 1); |
703 | uvm_obj_setlock(&kpm->pm_obj, &kpm->pm_obj_lock); | | 703 | uvm_obj_setlock(&kpm->pm_obj, &kpm->pm_obj_lock); |
704 | | | 704 | |
705 | kpm->pm_space = HPPA_SID_KERNEL; | | 705 | kpm->pm_space = HPPA_SID_KERNEL; |
706 | kpm->pm_pid = HPPA_PID_KERNEL; | | 706 | kpm->pm_pid = HPPA_PID_KERNEL; |
707 | kpm->pm_pdir_pg = NULL; | | 707 | kpm->pm_pdir_pg = NULL; |
708 | kpm->pm_pdir = (uint32_t *)addr; | | 708 | kpm->pm_pdir = (uint32_t *)addr; |
709 | | | 709 | |
710 | memset((void *)addr, 0, PAGE_SIZE); | | 710 | memset((void *)addr, 0, PAGE_SIZE); |
711 | fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE); | | 711 | fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE); |
712 | addr += PAGE_SIZE; | | 712 | addr += PAGE_SIZE; |
713 | | | 713 | |
714 | /* | | 714 | /* |
715 | * Allocate various tables and structures. | | 715 | * Allocate various tables and structures. |
716 | */ | | 716 | */ |
717 | mtctl(addr, CR_VTOP); | | 717 | mtctl(addr, CR_VTOP); |
718 | hppa_vtop = addr; | | 718 | hppa_vtop = addr; |
719 | size = round_page((hppa_sid_max + 1) * 4); | | 719 | size = round_page((hppa_sid_max + 1) * 4); |
720 | memset((void *)addr, 0, size); | | 720 | memset((void *)addr, 0, size); |
721 | fdcache(HPPA_SID_KERNEL, addr, size); | | 721 | fdcache(HPPA_SID_KERNEL, addr, size); |
722 | DPRINTF(PDB_INIT, ("%s: vtop 0x%lx @ 0x%lx\n", __func__, size, | | 722 | DPRINTF(PDB_INIT, ("%s: vtop 0x%lx @ 0x%lx\n", __func__, size, |
723 | addr)); | | 723 | addr)); |
724 | | | 724 | |
725 | addr += size; | | 725 | addr += size; |
726 | pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir); | | 726 | pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir); |
727 | | | 727 | |
728 | /* | | 728 | /* |
729 | * cpuid() found out how big the HPT should be, so align addr to | | 729 | * cpuid() found out how big the HPT should be, so align addr to |
730 | * what will be its beginning. We don't waste the pages skipped | | 730 | * what will be its beginning. We don't waste the pages skipped |
731 | * for the alignment. | | 731 | * for the alignment. |
732 | */ | | 732 | */ |
733 | #ifdef USE_HPT | | 733 | #ifdef USE_HPT |
734 | if (pmap_hptsize) { | | 734 | if (pmap_hptsize) { |
735 | struct hpt_entry *hptp; | | 735 | struct hpt_entry *hptp; |
736 | int i, error; | | 736 | int i, error; |
737 | | | 737 | |
738 | if (addr & (pmap_hptsize - 1)) | | 738 | if (addr & (pmap_hptsize - 1)) |
739 | addr += pmap_hptsize; | | 739 | addr += pmap_hptsize; |
740 | addr &= ~(pmap_hptsize - 1); | | 740 | addr &= ~(pmap_hptsize - 1); |
741 | | | 741 | |
742 | memset((void *)addr, 0, pmap_hptsize); | | 742 | memset((void *)addr, 0, pmap_hptsize); |
743 | hptp = (struct hpt_entry *)addr; | | 743 | hptp = (struct hpt_entry *)addr; |
744 | for (i = pmap_hptsize / sizeof(struct hpt_entry); i--; ) { | | 744 | for (i = pmap_hptsize / sizeof(struct hpt_entry); i--; ) { |
745 | hptp[i].hpt_valid = 0; | | 745 | hptp[i].hpt_valid = 0; |
746 | hptp[i].hpt_space = 0xffff; | | 746 | hptp[i].hpt_space = 0xffff; |
747 | hptp[i].hpt_vpn = 0; | | 747 | hptp[i].hpt_vpn = 0; |
748 | } | | 748 | } |
749 | pmap_hpt = addr; | | 749 | pmap_hpt = addr; |
750 | addr += pmap_hptsize; | | 750 | addr += pmap_hptsize; |
751 | | | 751 | |
752 | DPRINTF(PDB_INIT, ("%s: hpt_table 0x%x @ 0x%lx\n", __func__, | | 752 | DPRINTF(PDB_INIT, ("%s: hpt_table 0x%x @ 0x%lx\n", __func__, |
753 | pmap_hptsize, addr)); | | 753 | pmap_hptsize, addr)); |
754 | | | 754 | |
755 | if ((error = (cpu_hpt_init)(pmap_hpt, pmap_hptsize)) < 0) { | | 755 | if ((error = (cpu_hpt_init)(pmap_hpt, pmap_hptsize)) < 0) { |
756 | printf("WARNING: HPT init error %d -- DISABLED\n", | | 756 | printf("WARNING: HPT init error %d -- DISABLED\n", |
757 | error); | | 757 | error); |
758 | pmap_hpt = 0; | | 758 | pmap_hpt = 0; |
759 | } else | | 759 | } else |
760 | DPRINTF(PDB_INIT, | | 760 | DPRINTF(PDB_INIT, |
761 | ("%s: HPT installed for %ld entries @ 0x%lx\n", | | 761 | ("%s: HPT installed for %ld entries @ 0x%lx\n", |
762 | __func__, pmap_hptsize / sizeof(struct hpt_entry), | | 762 | __func__, pmap_hptsize / sizeof(struct hpt_entry), |
763 | addr)); | | 763 | addr)); |
764 | } | | 764 | } |
765 | #endif | | 765 | #endif |
766 | | | 766 | |
767 | /* Setup vtop in lwp0 trapframe. */ | | 767 | /* Setup vtop in lwp0 trapframe. */ |
768 | lwp0.l_md.md_regs->tf_vtop = hppa_vtop; | | 768 | lwp0.l_md.md_regs->tf_vtop = hppa_vtop; |
769 | | | 769 | |
770 | /* Pre-allocate PDEs for kernel virtual */ | | 770 | /* Pre-allocate PDEs for kernel virtual */ |
771 | nkpdes = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PDE_SIZE; | | 771 | nkpdes = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PDE_SIZE; |
772 | /* ... and io space too */ | | 772 | /* ... and io space too */ |
773 | nkpdes += HPPA_IOLEN / PDE_SIZE; | | 773 | nkpdes += HPPA_IOLEN / PDE_SIZE; |
774 | /* ... and all physmem (VA == PA) */ | | 774 | /* ... and all physmem (VA == PA) */ |
775 | npdes = nkpdes + (physmem + atop(PDE_SIZE) - 1) / atop(PDE_SIZE); | | 775 | npdes = nkpdes + (physmem + atop(PDE_SIZE) - 1) / atop(PDE_SIZE); |
776 | | | 776 | |
777 | DPRINTF(PDB_INIT, ("%s: npdes %d\n", __func__, npdes)); | | 777 | DPRINTF(PDB_INIT, ("%s: npdes %d\n", __func__, npdes)); |
778 | | | 778 | |
779 | /* map the pdes */ | | 779 | /* map the pdes */ |
780 | for (va = 0; npdes--; va += PDE_SIZE, addr += PAGE_SIZE) { | | 780 | for (va = 0; npdes--; va += PDE_SIZE, addr += PAGE_SIZE) { |
781 | /* last nkpdes are for the kernel virtual */ | | 781 | /* last nkpdes are for the kernel virtual */ |
782 | if (npdes == nkpdes - 1) | | 782 | if (npdes == nkpdes - 1) |
783 | va = SYSCALLGATE; | | 783 | va = SYSCALLGATE; |
784 | if (npdes == HPPA_IOLEN / PDE_SIZE - 1) | | 784 | if (npdes == HPPA_IOLEN / PDE_SIZE - 1) |
785 | va = HPPA_IOBEGIN; | | 785 | va = HPPA_IOBEGIN; |
786 | /* now map the pde for the physmem */ | | 786 | /* now map the pde for the physmem */ |
787 | memset((void *)addr, 0, PAGE_SIZE); | | 787 | memset((void *)addr, 0, PAGE_SIZE); |
788 | DPRINTF(PDB_INIT|PDB_VP, | | 788 | DPRINTF(PDB_INIT|PDB_VP, |
789 | ("%s: pde premap 0x%08lx 0x%08lx\n", __func__, va, | | 789 | ("%s: pde premap 0x%08lx 0x%08lx\n", __func__, va, |
790 | addr)); | | 790 | addr)); |
791 | pmap_pde_set(kpm, va, addr); | | 791 | pmap_pde_set(kpm, va, addr); |
792 | kpm->pm_stats.resident_count++; /* count PTP as resident */ | | 792 | kpm->pm_stats.resident_count++; /* count PTP as resident */ |
793 | } | | 793 | } |
794 | | | 794 | |
795 | /* | | 795 | /* |
796 | * At this point we've finished reserving memory for the kernel. | | 796 | * At this point we've finished reserving memory for the kernel. |
797 | */ | | 797 | */ |
798 | /* XXXNH */ | | 798 | /* XXXNH */ |
799 | resvphysmem = atop(addr); | | 799 | resvphysmem = atop(addr); |
800 | | | 800 | |
801 | ksrx = (paddr_t) &kernel_text; | | 801 | ksrx = (paddr_t) &kernel_text; |
802 | kerx = (paddr_t) &etext; | | 802 | kerx = (paddr_t) &etext; |
803 | ksro = (paddr_t) &__rodata_start; | | 803 | ksro = (paddr_t) &__rodata_start; |
804 | kero = (paddr_t) &__rodata_end; | | 804 | kero = (paddr_t) &__rodata_end; |
805 | ksrw = (paddr_t) &__data_start; | | 805 | ksrw = (paddr_t) &__data_start; |
806 | kerw = addr; | | 806 | kerw = addr; |
807 | | | 807 | |
808 | /* | | 808 | /* |
809 | * The kernel text, data, and bss must be direct-mapped, | | 809 | * The kernel text, data, and bss must be direct-mapped, |
810 | * because the kernel often runs in physical mode, and | | 810 | * because the kernel often runs in physical mode, and |
811 | * anyways the loader loaded the kernel into physical | | 811 | * anyways the loader loaded the kernel into physical |
812 | * memory exactly where it was linked. | | 812 | * memory exactly where it was linked. |
813 | * | | 813 | * |
814 | * All memory already allocated after bss, either by | | 814 | * All memory already allocated after bss, either by |
815 | * our caller or by this function itself, must also be | | 815 | * our caller or by this function itself, must also be |
816 | * direct-mapped, because it's completely unmanaged | | 816 | * direct-mapped, because it's completely unmanaged |
817 | * and was allocated in physical mode. | | 817 | * and was allocated in physical mode. |
818 | * | | 818 | * |
819 | * BTLB entries are used to do this direct mapping. | | 819 | * BTLB entries are used to do this direct mapping. |
820 | * BTLB entries have a minimum and maximum possible size, | | 820 | * BTLB entries have a minimum and maximum possible size, |
821 | * and MD code gives us these sizes in units of pages. | | 821 | * and MD code gives us these sizes in units of pages. |
822 | */ | | 822 | */ |
823 | | | 823 | |
824 | btlb_entry_min = (vsize_t) hppa_btlb_size_min * PAGE_SIZE; | | 824 | btlb_entry_min = (vsize_t) hppa_btlb_size_min * PAGE_SIZE; |
825 | btlb_entry_max = (vsize_t) hppa_btlb_size_max * PAGE_SIZE; | | 825 | btlb_entry_max = (vsize_t) hppa_btlb_size_max * PAGE_SIZE; |
826 | | | 826 | |
827 | /* | | 827 | /* |
828 | * To try to conserve BTLB entries, take a hint from how | | 828 | * To try to conserve BTLB entries, take a hint from how |
829 | * the kernel was linked: take the kernel text start as | | 829 | * the kernel was linked: take the kernel text start as |
830 | * our effective minimum BTLB entry size, assuming that | | 830 | * our effective minimum BTLB entry size, assuming that |
831 | * the data segment was also aligned to that size. | | 831 | * the data segment was also aligned to that size. |
832 | * | | 832 | * |
833 | * In practice, linking the kernel at 2MB, and aligning | | 833 | * In practice, linking the kernel at 2MB, and aligning |
834 | * the data segment to a 2MB boundary, should control well | | 834 | * the data segment to a 2MB boundary, should control well |
835 | * how much of the BTLB the pmap uses. However, this code | | 835 | * how much of the BTLB the pmap uses. However, this code |
836 | * should not rely on this 2MB magic number, nor should | | 836 | * should not rely on this 2MB magic number, nor should |
837 | * it rely on the data segment being aligned at all. This | | 837 | * it rely on the data segment being aligned at all. This |
838 | * is to allow (smaller) kernels (linked lower) to work fine. | | 838 | * is to allow (smaller) kernels (linked lower) to work fine. |
839 | */ | | 839 | */ |
840 | btlb_entry_min = (vaddr_t) &kernel_text; | | 840 | btlb_entry_min = (vaddr_t) &kernel_text; |
841 | | | 841 | |
842 | if (usebtlb) { | | 842 | if (usebtlb) { |
843 | #define BTLB_SET_SIZE 16 | | 843 | #define BTLB_SET_SIZE 16 |
844 | vaddr_t btlb_entry_start[BTLB_SET_SIZE]; | | 844 | vaddr_t btlb_entry_start[BTLB_SET_SIZE]; |
845 | vsize_t btlb_entry_size[BTLB_SET_SIZE]; | | 845 | vsize_t btlb_entry_size[BTLB_SET_SIZE]; |
846 | int btlb_entry_vm_prot[BTLB_SET_SIZE]; | | 846 | int btlb_entry_vm_prot[BTLB_SET_SIZE]; |
847 | int btlb_i; | | 847 | int btlb_i; |
848 | int btlb_j; | | 848 | int btlb_j; |
849 | | | 849 | |
850 | /* | | 850 | /* |
851 | * Now make BTLB entries to direct-map the kernel text | | 851 | * Now make BTLB entries to direct-map the kernel text |
852 | * read- and execute-only as much as possible. Note that | | 852 | * read- and execute-only as much as possible. Note that |
853 | * if the data segment isn't nicely aligned, the last | | 853 | * if the data segment isn't nicely aligned, the last |
854 | * BTLB entry for the kernel text may also cover some of | | 854 | * BTLB entry for the kernel text may also cover some of |
855 | * the data segment, meaning it will have to allow writing. | | 855 | * the data segment, meaning it will have to allow writing. |
856 | */ | | 856 | */ |
857 | addr = ksrx; | | 857 | addr = ksrx; |
858 | | | 858 | |
859 | DPRINTF(PDB_INIT, | | 859 | DPRINTF(PDB_INIT, |
860 | ("%s: BTLB mapping text and rodata @ %p - %p\n", __func__, | | 860 | ("%s: BTLB mapping text and rodata @ %p - %p\n", __func__, |
861 | (void *)addr, (void *)kero)); | | 861 | (void *)addr, (void *)kero)); |
862 | | | 862 | |
863 | btlb_j = 0; | | 863 | btlb_j = 0; |
864 | while (addr < (vaddr_t) kero) { | | 864 | while (addr < (vaddr_t) kero) { |
865 | | | 865 | |
866 | /* Set up the next BTLB entry. */ | | 866 | /* Set up the next BTLB entry. */ |
867 | KASSERT(btlb_j < BTLB_SET_SIZE); | | 867 | KASSERT(btlb_j < BTLB_SET_SIZE); |
868 | btlb_entry_start[btlb_j] = addr; | | 868 | btlb_entry_start[btlb_j] = addr; |
869 | btlb_entry_size[btlb_j] = btlb_entry_min; | | 869 | btlb_entry_size[btlb_j] = btlb_entry_min; |
870 | btlb_entry_vm_prot[btlb_j] = | | 870 | btlb_entry_vm_prot[btlb_j] = |
871 | VM_PROT_READ | VM_PROT_EXECUTE; | | 871 | VM_PROT_READ | VM_PROT_EXECUTE; |
872 | if (addr + btlb_entry_min > kero) | | 872 | if (addr + btlb_entry_min > kero) |
873 | btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE; | | 873 | btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE; |
874 | | | 874 | |
875 | /* Coalesce BTLB entries whenever possible. */ | | 875 | /* Coalesce BTLB entries whenever possible. */ |
876 | while (btlb_j > 0 && | | 876 | while (btlb_j > 0 && |
877 | btlb_entry_vm_prot[btlb_j] == | | 877 | btlb_entry_vm_prot[btlb_j] == |
878 | btlb_entry_vm_prot[btlb_j - 1] && | | 878 | btlb_entry_vm_prot[btlb_j - 1] && |
879 | btlb_entry_size[btlb_j] == | | 879 | btlb_entry_size[btlb_j] == |
880 | btlb_entry_size[btlb_j - 1] && | | 880 | btlb_entry_size[btlb_j - 1] && |
881 | !(btlb_entry_start[btlb_j - 1] & | | 881 | !(btlb_entry_start[btlb_j - 1] & |
882 | ((btlb_entry_size[btlb_j - 1] << 1) - 1)) && | | 882 | ((btlb_entry_size[btlb_j - 1] << 1) - 1)) && |
883 | (btlb_entry_size[btlb_j - 1] << 1) <= | | 883 | (btlb_entry_size[btlb_j - 1] << 1) <= |
884 | btlb_entry_max) | | 884 | btlb_entry_max) |
885 | btlb_entry_size[--btlb_j] <<= 1; | | 885 | btlb_entry_size[--btlb_j] <<= 1; |
886 | | | 886 | |
887 | /* Move on. */ | | 887 | /* Move on. */ |
888 | addr = | | 888 | addr = |
889 | btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j]; | | 889 | btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j]; |
890 | btlb_j++; | | 890 | btlb_j++; |
891 | } | | 891 | } |
892 | | | 892 | |
893 | /* | | 893 | /* |
894 | * Now make BTLB entries to direct-map the kernel data, | | 894 | * Now make BTLB entries to direct-map the kernel data, |
895 | * bss, and all of the preallocated space read-write. | | 895 | * bss, and all of the preallocated space read-write. |
896 | * | | 896 | * |
897 | * Note that, unlike above, we're not concerned with | | 897 | * Note that, unlike above, we're not concerned with |
898 | * making these BTLB entries such that they finish as | | 898 | * making these BTLB entries such that they finish as |
899 | * close as possible to the end of the space we need | | 899 | * close as possible to the end of the space we need |
900 | * them to map. Instead, to minimize the number of BTLB | | 900 | * them to map. Instead, to minimize the number of BTLB |
901 | * entries we need, we make them as large as possible. | | 901 | * entries we need, we make them as large as possible. |
902 | * The only thing this wastes is kernel virtual space, | | 902 | * The only thing this wastes is kernel virtual space, |
903 | * which is plentiful. | | 903 | * which is plentiful. |
904 | */ | | 904 | */ |
905 | | | 905 | |
906 | DPRINTF(PDB_INIT, ("%s: mapping data, bss, etc @ %p - %p\n", | | 906 | DPRINTF(PDB_INIT, ("%s: mapping data, bss, etc @ %p - %p\n", |
907 | __func__, (void *)addr, (void *)kerw)); | | 907 | __func__, (void *)addr, (void *)kerw)); |
908 | | | 908 | |
909 | while (addr < kerw) { | | 909 | while (addr < kerw) { |
910 | | | 910 | |
911 | /* Make the next BTLB entry. */ | | 911 | /* Make the next BTLB entry. */ |
912 | KASSERT(btlb_j < BTLB_SET_SIZE); | | 912 | KASSERT(btlb_j < BTLB_SET_SIZE); |
913 | size = btlb_entry_min; | | 913 | size = btlb_entry_min; |
914 | while ((addr + size) < kerw && | | 914 | while ((addr + size) < kerw && |
915 | (size << 1) < btlb_entry_max && | | 915 | (size << 1) < btlb_entry_max && |
916 | !(addr & ((size << 1) - 1))) | | 916 | !(addr & ((size << 1) - 1))) |
917 | size <<= 1; | | 917 | size <<= 1; |
918 | btlb_entry_start[btlb_j] = addr; | | 918 | btlb_entry_start[btlb_j] = addr; |
919 | btlb_entry_size[btlb_j] = size; | | 919 | btlb_entry_size[btlb_j] = size; |
920 | btlb_entry_vm_prot[btlb_j] = | | 920 | btlb_entry_vm_prot[btlb_j] = |
921 | VM_PROT_READ | VM_PROT_WRITE; | | 921 | VM_PROT_READ | VM_PROT_WRITE; |
922 | | | 922 | |
923 | /* Move on. */ | | 923 | /* Move on. */ |
924 | addr = | | 924 | addr = |
925 | btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j]; | | 925 | btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j]; |
926 | btlb_j++; | | 926 | btlb_j++; |
927 | } | | 927 | } |
928 | | | 928 | |
929 | /* Now insert all of the BTLB entries. */ | | 929 | /* Now insert all of the BTLB entries. */ |
930 | for (btlb_i = 0; btlb_i < btlb_j; btlb_i++) { | | 930 | for (btlb_i = 0; btlb_i < btlb_j; btlb_i++) { |
931 | int error; | | 931 | int error; |
932 | int prot; | | 932 | int prot; |
933 | | | 933 | |
934 | btlb_entry_got = btlb_entry_size[btlb_i]; | | 934 | btlb_entry_got = btlb_entry_size[btlb_i]; |
935 | prot = btlb_entry_vm_prot[btlb_i]; | | 935 | prot = btlb_entry_vm_prot[btlb_i]; |
936 | | | 936 | |
937 | error = hppa_btlb_insert(kpm->pm_space, | | 937 | error = hppa_btlb_insert(kpm->pm_space, |
938 | btlb_entry_start[btlb_i], btlb_entry_start[btlb_i], | | 938 | btlb_entry_start[btlb_i], btlb_entry_start[btlb_i], |
939 | &btlb_entry_got, | | 939 | &btlb_entry_got, |
940 | kpm->pm_pid | pmap_prot(kpm, prot)); | | 940 | kpm->pm_pid | pmap_prot(kpm, prot)); |
941 | | | 941 | |
942 | if (error) | | 942 | if (error) |
943 | panic("%s: cannot insert BTLB entry", | | 943 | panic("%s: cannot insert BTLB entry", |
944 | __func__); | | 944 | __func__); |
945 | if (btlb_entry_got != btlb_entry_size[btlb_i]) | | 945 | if (btlb_entry_got != btlb_entry_size[btlb_i]) |
946 | panic("%s: BTLB entry mapped wrong amount", | | 946 | panic("%s: BTLB entry mapped wrong amount", |
947 | __func__); | | 947 | __func__); |
948 | } | | 948 | } |
949 | | | 949 | |
950 | kerw = | | 950 | kerw = |
951 | btlb_entry_start[btlb_j - 1] + btlb_entry_size[btlb_j - 1]; | | 951 | btlb_entry_start[btlb_j - 1] + btlb_entry_size[btlb_j - 1]; |
952 | } | | 952 | } |
953 | | | 953 | |
954 | /* | | 954 | /* |
955 | * We now know the exact beginning of managed kernel virtual space. | | 955 | * We now know the exact beginning of managed kernel virtual space. |
956 | * | | 956 | * |
957 | * Finally, load physical pages into UVM. There are three segments of | | 957 | * Finally, load physical pages into UVM. There are three segments of |
958 | * pages. | | 958 | * pages. |
959 | */ | | 959 | */ |
960 | | | 960 | |
961 | availphysmem = 0; | | 961 | availphysmem = 0; |
962 | | | 962 | |
963 | pmap_page_physload(resvmem, atop(ksrx)); | | 963 | pmap_page_physload(resvmem, atop(ksrx)); |
964 | pmap_page_physload(atop(kero), atop(ksrw)); | | 964 | pmap_page_physload(atop(kero), atop(ksrw)); |
965 | pmap_page_physload(atop(kerw), physmem); | | 965 | pmap_page_physload(atop(kerw), physmem); |
966 | | | 966 | |
967 | mutex_init(&pmaps_lock, MUTEX_DEFAULT, IPL_NONE); | | 967 | mutex_init(&pmaps_lock, MUTEX_DEFAULT, IPL_NONE); |
968 | | | 968 | |
969 | /* TODO optimize/inline the kenter */ | | 969 | /* TODO optimize/inline the kenter */ |
970 | for (va = PAGE_SIZE; va < ptoa(physmem); va += PAGE_SIZE) { | | 970 | for (va = PAGE_SIZE; va < ptoa(physmem); va += PAGE_SIZE) { |
971 | vm_prot_t prot = UVM_PROT_RW; | | 971 | vm_prot_t prot = UVM_PROT_RW; |
972 | | | 972 | |
973 | if (va < resvmem) | | 973 | if (va < resvmem) |
974 | prot = UVM_PROT_RX; | | 974 | prot = UVM_PROT_RX; |
975 | else if (va >= ksrx && va < kerx) | | 975 | else if (va >= ksrx && va < kerx) |
976 | prot = UVM_PROT_RX; | | 976 | prot = UVM_PROT_RX; |
977 | else if (va >= ksro && va < kero) | | 977 | else if (va >= ksro && va < kero) |
978 | prot = UVM_PROT_R; | | 978 | prot = UVM_PROT_R; |
979 | #ifdef DIAGNOSTIC | | 979 | #ifdef DIAGNOSTIC |
980 | else if (va == uvm_lwp_getuarea(&lwp0) + USPACE - PAGE_SIZE) | | 980 | else if (va == uvm_lwp_getuarea(&lwp0) + USPACE - PAGE_SIZE) |
981 | prot = UVM_PROT_NONE; | | 981 | prot = UVM_PROT_NONE; |
982 | #endif | | 982 | #endif |
983 | pmap_kenter_pa(va, va, prot, 0); | | 983 | pmap_kenter_pa(va, va, prot, 0); |
984 | } | | 984 | } |
985 | | | 985 | |
986 | /* XXXNH update */ | | 986 | /* XXXNH update */ |
987 | DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksro, | | 987 | DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksro, |
988 | kero)); | | 988 | kero)); |
989 | DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksrw, | | 989 | DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksrw, |
990 | kerw)); | | 990 | kerw)); |
991 | | | 991 | |
992 | } | | 992 | } |
993 | | | 993 | |
994 | /* | | 994 | /* |
995 | * Finishes the initialization of the pmap module. | | 995 | * Finishes the initialization of the pmap module. |
996 | * This procedure is called from uvm_init() in uvm/uvm_init.c | | 996 | * This procedure is called from uvm_init() in uvm/uvm_init.c |
997 | * to initialize any remaining data structures that the pmap module | | 997 | * to initialize any remaining data structures that the pmap module |
998 | * needs to map virtual memory (VM is already ON). | | 998 | * needs to map virtual memory (VM is already ON). |
999 | */ | | 999 | */ |
1000 | void | | 1000 | void |
1001 | pmap_init(void) | | 1001 | pmap_init(void) |
1002 | { | | 1002 | { |
1003 | extern void gateway_page(void); | | 1003 | extern void gateway_page(void); |
1004 | volatile pt_entry_t *pde; | | 1004 | volatile pt_entry_t *pde; |
1005 | | | 1005 | |
1006 | DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s()\n", __func__)); | | 1006 | DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s()\n", __func__)); |
1007 | | | 1007 | |
1008 | sid_counter = HPPA_SID_KERNEL; | | 1008 | sid_counter = HPPA_SID_KERNEL; |
1009 | | | 1009 | |
1010 | pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", | | 1010 | pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", |
1011 | &pool_allocator_nointr, IPL_NONE); | | 1011 | &pool_allocator_nointr, IPL_NONE); |
1012 | pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pmappv", | | 1012 | pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pmappv", |
1013 | &pool_allocator_nointr, IPL_NONE); | | 1013 | &pool_allocator_nointr, IPL_NONE); |
1014 | | | 1014 | |
1015 | pool_setlowat(&pmap_pv_pool, pmap_pvlowat); | | 1015 | pool_setlowat(&pmap_pv_pool, pmap_pvlowat); |
1016 | pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32); | | 1016 | pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32); |
1017 | | | 1017 | |
1018 | /* | | 1018 | /* |
1019 | * map SysCall gateway page once for everybody | | 1019 | * map SysCall gateway page once for everybody |
1020 | * NB: we'll have to remap the phys memory | | 1020 | * NB: we'll have to remap the phys memory |
1021 | * if we have any at SYSCALLGATE address (; | | 1021 | * if we have any at SYSCALLGATE address (; |
1022 | * | | 1022 | * |
1023 | * no spls since no interrupts | | 1023 | * no spls since no interrupts |
1024 | */ | | 1024 | */ |
1025 | if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) && | | 1025 | if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) && |
1026 | !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL))) | | 1026 | !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL))) |
1027 | panic("pmap_init: cannot allocate pde"); | | 1027 | panic("pmap_init: cannot allocate pde"); |
1028 | | | 1028 | |
1029 | pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page | | | 1029 | pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page | |
1030 | PTE_PROT(TLB_GATE_PROT)); | | 1030 | PTE_PROT(TLB_GATE_PROT)); |
1031 | | | 1031 | |
1032 | pmap_initialized = true; | | 1032 | pmap_initialized = true; |
1033 | | | 1033 | |
1034 | DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(): done\n", __func__)); | | 1034 | DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(): done\n", __func__)); |
1035 | } | | 1035 | } |
1036 | | | 1036 | |
1037 | /* | | 1037 | /* |
1038 | * How much virtual space does this kernel have? | | 1038 | * How much virtual space does this kernel have? |
1039 | */ | | 1039 | */ |
1040 | void | | 1040 | void |
1041 | pmap_virtual_space(vaddr_t *startp, vaddr_t *endp) | | 1041 | pmap_virtual_space(vaddr_t *startp, vaddr_t *endp) |
1042 | { | | 1042 | { |
1043 | | | 1043 | |
1044 | *startp = SYSCALLGATE + PAGE_SIZE; | | 1044 | *startp = SYSCALLGATE + PAGE_SIZE; |
1045 | *endp = VM_MAX_KERNEL_ADDRESS; | | 1045 | *endp = VM_MAX_KERNEL_ADDRESS; |
1046 | } | | 1046 | } |
1047 | | | 1047 | |
1048 | /* | | 1048 | /* |
1049 | * pmap_create() | | 1049 | * pmap_create() |
1050 | * | | 1050 | * |
1051 | * Create and return a physical map. | | 1051 | * Create and return a physical map. |
1052 | * The map is an actual physical map, and may be referenced by the hardware. | | 1052 | * The map is an actual physical map, and may be referenced by the hardware. |
1053 | */ | | 1053 | */ |
1054 | pmap_t | | 1054 | pmap_t |
1055 | pmap_create(void) | | 1055 | pmap_create(void) |
1056 | { | | 1056 | { |
1057 | pmap_t pmap; | | 1057 | pmap_t pmap; |
1058 | pa_space_t space; | | 1058 | pa_space_t space; |
1059 | | | 1059 | |
1060 | pmap = pool_get(&pmap_pool, PR_WAITOK); | | 1060 | pmap = pool_get(&pmap_pool, PR_WAITOK); |
1061 | | | 1061 | |
1062 | DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: pmap = %p\n", __func__, pmap)); | | 1062 | DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: pmap = %p\n", __func__, pmap)); |
1063 | | | 1063 | |
1064 | rw_init(&pmap->pm_obj_lock); | | 1064 | rw_init(&pmap->pm_obj_lock); |
1065 | uvm_obj_init(&pmap->pm_obj, &pmap_pager, false, 1); | | 1065 | uvm_obj_init(&pmap->pm_obj, &pmap_pager, false, 1); |
1066 | uvm_obj_setlock(&pmap->pm_obj, &pmap->pm_obj_lock); | | 1066 | uvm_obj_setlock(&pmap->pm_obj, &pmap->pm_obj_lock); |
1067 | | | 1067 | |
1068 | mutex_enter(&pmaps_lock); | | 1068 | mutex_enter(&pmaps_lock); |
1069 | | | 1069 | |
1070 | /* | | 1070 | /* |
1071 | * Allocate space IDs for the pmap; we get the protection ID from this. | | 1071 | * Allocate space IDs for the pmap; we get the protection ID from this. |
1072 | * If all are allocated, there is nothing we can do. | | 1072 | * If all are allocated, there is nothing we can do. |
1073 | */ | | 1073 | */ |
1074 | /* XXXNH can't this loop forever??? */ | | 1074 | /* XXXNH can't this loop forever??? */ |
1075 | for (space = sid_counter; pmap_sdir_get(space); | | 1075 | for (space = sid_counter; pmap_sdir_get(space); |
1076 | space = (space + 1) % hppa_sid_max) | | 1076 | space = (space + 1) % hppa_sid_max) |
1077 | ; | | 1077 | ; |
1078 | | | 1078 | |
1079 | if ((pmap->pm_pdir_pg = pmap_pagealloc(NULL, 0)) == NULL) | | 1079 | if ((pmap->pm_pdir_pg = pmap_pagealloc(NULL, 0)) == NULL) |
1080 | panic("pmap_create: no pages"); | | 1080 | panic("pmap_create: no pages"); |
1081 | pmap->pm_ptphint = NULL; | | 1081 | pmap->pm_ptphint = NULL; |
1082 | pmap->pm_pdir = (uint32_t *)VM_PAGE_TO_PHYS(pmap->pm_pdir_pg); | | 1082 | pmap->pm_pdir = (uint32_t *)VM_PAGE_TO_PHYS(pmap->pm_pdir_pg); |
1083 | pmap_sdir_set(space, pmap->pm_pdir); | | 1083 | pmap_sdir_set(space, pmap->pm_pdir); |
1084 | | | 1084 | |
1085 | pmap->pm_space = space; | | 1085 | pmap->pm_space = space; |
1086 | pmap->pm_pid = (space + 1) << 1; | | 1086 | pmap->pm_pid = (space + 1) << 1; |
1087 | | | 1087 | |