| @@ -1,1203 +1,1203 @@ | | | @@ -1,1203 +1,1203 @@ |
1 | /* $NetBSD: pmap.c,v 1.74 2011/11/27 21:33:19 reinoud Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.75 2011/12/13 11:11:03 reinoud Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2011 Reinoud Zandijk <reinoud@NetBSD.org> | | 4 | * Copyright (c) 2011 Reinoud Zandijk <reinoud@NetBSD.org> |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 16 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. | | 26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | #include <sys/cdefs.h> | | 29 | #include <sys/cdefs.h> |
30 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.74 2011/11/27 21:33:19 reinoud Exp $"); | | 30 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.75 2011/12/13 11:11:03 reinoud Exp $"); |
31 | | | 31 | |
32 | #include "opt_memsize.h" | | 32 | #include "opt_memsize.h" |
33 | #include "opt_kmempages.h" | | 33 | #include "opt_kmempages.h" |
34 | | | 34 | |
35 | #include <sys/types.h> | | 35 | #include <sys/types.h> |
36 | #include <sys/param.h> | | 36 | #include <sys/param.h> |
37 | #include <sys/mutex.h> | | 37 | #include <sys/mutex.h> |
38 | #include <sys/buf.h> | | 38 | #include <sys/buf.h> |
39 | #include <sys/malloc.h> | | 39 | #include <sys/malloc.h> |
40 | #include <sys/pool.h> | | 40 | #include <sys/pool.h> |
41 | #include <machine/thunk.h> | | 41 | #include <machine/thunk.h> |
42 | | | 42 | |
43 | #include <uvm/uvm.h> | | 43 | #include <uvm/uvm.h> |
44 | | | 44 | |
45 | struct pv_entry { | | 45 | struct pv_entry { |
46 | struct pv_entry *pv_next; | | 46 | struct pv_entry *pv_next; |
47 | pmap_t pv_pmap; | | 47 | pmap_t pv_pmap; |
48 | uintptr_t pv_ppn; /* physical page number */ | | 48 | uintptr_t pv_ppn; /* physical page number */ |
49 | uintptr_t pv_lpn; /* logical page number */ | | 49 | uintptr_t pv_lpn; /* logical page number */ |
50 | vm_prot_t pv_prot; /* logical protection */ | | 50 | vm_prot_t pv_prot; /* logical protection */ |
51 | int pv_mmap_ppl; /* programmed protection */ | | 51 | int pv_mmap_ppl; /* programmed protection */ |
52 | uint8_t pv_vflags; /* per mapping flags */ | | 52 | uint8_t pv_vflags; /* per mapping flags */ |
53 | #define PV_WIRED 0x01 /* wired mapping */ | | 53 | #define PV_WIRED 0x01 /* wired mapping */ |
54 | #define PV_UNMANAGED 0x02 /* entered by pmap_kenter_ */ | | 54 | #define PV_UNMANAGED 0x02 /* entered by pmap_kenter_ */ |
55 | #define PV_MAPPEDIN 0x04 /* is actually mapped */ | | 55 | #define PV_MAPPEDIN 0x04 /* is actually mapped */ |
56 | uint8_t pv_pflags; /* per phys page flags */ | | 56 | uint8_t pv_pflags; /* per phys page flags */ |
57 | #define PV_REFERENCED 0x01 | | 57 | #define PV_REFERENCED 0x01 |
58 | #define PV_MODIFIED 0x02 | | 58 | #define PV_MODIFIED 0x02 |
59 | }; | | 59 | }; |
60 | | | 60 | |
61 | struct pmap { | | 61 | struct pmap { |
62 | int pm_count; | | 62 | int pm_count; |
63 | int pm_flags; | | 63 | int pm_flags; |
64 | #define PM_ACTIVE 0x01 | | 64 | #define PM_ACTIVE 0x01 |
65 | struct pmap_statistics pm_stats; | | 65 | struct pmap_statistics pm_stats; |
66 | struct pv_entry **pm_entries; | | 66 | struct pv_entry **pm_entries; |
67 | }; | | 67 | }; |
68 | | | 68 | |
69 | static struct pv_entry *pv_table; | | 69 | static struct pv_entry *pv_table; |
70 | static struct pmap pmap_kernel_store; | | 70 | static struct pmap pmap_kernel_store; |
71 | struct pmap * const kernel_pmap_ptr = &pmap_kernel_store; | | 71 | struct pmap * const kernel_pmap_ptr = &pmap_kernel_store; |
72 | | | 72 | |
73 | static pmap_t active_pmap = NULL; | | 73 | static pmap_t active_pmap = NULL; |
74 | | | 74 | |
75 | static char mem_name[20] = ""; | | 75 | static char mem_name[20] = ""; |
76 | static int mem_fh; | | 76 | static int mem_fh; |
77 | static void *mem_uvm; /* keeps all memory managed by UVM */ | | 77 | static void *mem_uvm; /* keeps all memory managed by UVM */ |
78 | | | 78 | |
79 | static int phys_npages = 0; | | 79 | static int phys_npages = 0; |
80 | static int pm_nentries = 0; | | 80 | static int pm_nentries = 0; |
81 | static uint64_t pm_entries_size = 0; | | 81 | static uint64_t pm_entries_size = 0; |
82 | | | 82 | |
83 | static struct pool pmap_pool; | | 83 | static struct pool pmap_pool; |
84 | | | 84 | |
85 | /* forwards */ | | 85 | /* forwards */ |
86 | void pmap_bootstrap(void); | | 86 | void pmap_bootstrap(void); |
87 | static void pmap_page_activate(struct pv_entry *pv); | | 87 | static void pmap_page_activate(struct pv_entry *pv); |
88 | static void pmap_page_deactivate(struct pv_entry *pv); | | 88 | static void pmap_page_deactivate(struct pv_entry *pv); |
89 | static void pv_update(struct pv_entry *pv); | | 89 | static void pv_update(struct pv_entry *pv); |
90 | static void pmap_update_page(uintptr_t ppn); | | 90 | static void pmap_update_page(uintptr_t ppn); |
91 | bool pmap_fault(pmap_t pmap, vaddr_t va, vm_prot_t *atype); | | 91 | bool pmap_fault(pmap_t pmap, vaddr_t va, vm_prot_t *atype); |
92 | | | 92 | |
93 | static struct pv_entry *pv_get(pmap_t pmap, uintptr_t ppn, uintptr_t lpn); | | 93 | static struct pv_entry *pv_get(pmap_t pmap, uintptr_t ppn, uintptr_t lpn); |
94 | static struct pv_entry *pv_alloc(void); | | 94 | static struct pv_entry *pv_alloc(void); |
95 | static void pv_free(struct pv_entry *pv); | | 95 | static void pv_free(struct pv_entry *pv); |
96 | static void pmap_deferred_init(void); | | 96 | static void pmap_deferred_init(void); |
97 | | | 97 | |
98 | extern void setup_signal_handlers(void); | | 98 | extern void setup_signal_handlers(void); |
99 | | | 99 | |
100 | /* exposed (to signal handler f.e.) */ | | 100 | /* exposed (to signal handler f.e.) */ |
101 | vaddr_t kmem_k_start, kmem_k_end; | | 101 | vaddr_t kmem_k_start, kmem_k_end; |
102 | vaddr_t kmem_ext_start, kmem_ext_end; | | 102 | vaddr_t kmem_ext_start, kmem_ext_end; |
103 | vaddr_t kmem_user_start, kmem_user_end; | | 103 | vaddr_t kmem_user_start, kmem_user_end; |
104 | vaddr_t kmem_ext_cur_start, kmem_ext_cur_end; | | 104 | vaddr_t kmem_ext_cur_start, kmem_ext_cur_end; |
105 | | | 105 | |
106 | /* amount of physical memory */ | | 106 | /* amount of physical memory */ |
107 | int physmem; | | 107 | int physmem; |
108 | int num_pv_entries = 0; | | 108 | int num_pv_entries = 0; |
109 | | | 109 | |
110 | #define SPARSE_MEMFILE | | 110 | #define SPARSE_MEMFILE |
111 | | | 111 | |
112 | static uint8_t mem_kvm[KVMSIZE + 2*PAGE_SIZE]; | | 112 | static uint8_t mem_kvm[KVMSIZE + 2*PAGE_SIZE]; |
113 | | | 113 | |
114 | void | | 114 | void |
115 | pmap_bootstrap(void) | | 115 | pmap_bootstrap(void) |
116 | { | | 116 | { |
117 | struct pmap *pmap; | | 117 | struct pmap *pmap; |
118 | paddr_t totmem_len; | | 118 | paddr_t totmem_len; |
119 | paddr_t fpos, file_len; | | 119 | paddr_t fpos, file_len; |
120 | paddr_t pv_fpos, pm_fpos; | | 120 | paddr_t pv_fpos, pm_fpos; |
121 | paddr_t wlen; | | 121 | paddr_t wlen; |
122 | paddr_t user_len, barrier_len; | | 122 | paddr_t user_len, barrier_len; |
123 | paddr_t pv_table_size; | | 123 | paddr_t pv_table_size; |
124 | vaddr_t free_start, free_end; | | 124 | vaddr_t free_start, free_end; |
125 | vaddr_t mpos; | | 125 | vaddr_t mpos; |
126 | paddr_t pa; | | 126 | paddr_t pa; |
127 | vaddr_t va; | | 127 | vaddr_t va; |
128 | uintptr_t pg; | | 128 | uintptr_t pg; |
129 | void *addr; | | 129 | void *addr; |
130 | int err; | | 130 | int err; |
131 | | | 131 | |
132 | extern void _start(void); /* start of kernel */ | | 132 | extern void _start(void); /* start of kernel */ |
133 | extern int etext; /* end of the kernel */ | | 133 | extern int etext; /* end of the kernel */ |
134 | extern int edata; /* end of the init. data segment */ | | 134 | extern int edata; /* end of the init. data segment */ |
135 | extern int end; /* end of bss */ | | 135 | extern int end; /* end of bss */ |
136 | vaddr_t vm_min_addr; | | 136 | vaddr_t vm_min_addr; |
137 | | | 137 | |
138 | vm_min_addr = thunk_get_vm_min_address(); | | 138 | vm_min_addr = thunk_get_vm_min_address(); |
139 | vm_min_addr = vm_min_addr < PAGE_SIZE ? PAGE_SIZE : vm_min_addr; | | 139 | vm_min_addr = vm_min_addr < PAGE_SIZE ? PAGE_SIZE : vm_min_addr; |
140 | | | 140 | |
141 | dprintf_debug("Information retrieved from system and elf image\n"); | | 141 | dprintf_debug("Information retrieved from system and elf image\n"); |
142 | dprintf_debug("min VM address at %p\n", (void *) vm_min_addr); | | 142 | dprintf_debug("min VM address at %p\n", (void *) vm_min_addr); |
143 | dprintf_debug("start kernel at %p\n", _start); | | 143 | dprintf_debug("start kernel at %p\n", _start); |
144 | dprintf_debug(" end kernel at %p\n", &etext); | | 144 | dprintf_debug(" end kernel at %p\n", &etext); |
145 | dprintf_debug(" end of init. data at %p\n", &edata); | | 145 | dprintf_debug(" end of init. data at %p\n", &edata); |
146 | dprintf_debug("1st end of data at %p\n", &end); | | 146 | dprintf_debug("1st end of data at %p\n", &end); |
147 | dprintf_debug("CUR end data at %p\n", thunk_sbrk(0)); | | 147 | dprintf_debug("CUR end data at %p\n", thunk_sbrk(0)); |
148 | | | 148 | |
149 | /* calculate kernel section (R-X) */ | | 149 | /* calculate kernel section (R-X) */ |
150 | kmem_k_start = (vaddr_t) PAGE_SIZE * (atop(_start) ); | | 150 | kmem_k_start = (vaddr_t) PAGE_SIZE * (atop(_start) ); |
151 | kmem_k_end = (vaddr_t) PAGE_SIZE * (atop(&etext) + 1); | | 151 | kmem_k_end = (vaddr_t) PAGE_SIZE * (atop(&etext) + 1); |
152 | | | 152 | |
153 | /* calculate total available memory space */ | | 153 | /* calculate total available memory space */ |
154 | totmem_len = (vaddr_t) mem_kvm + KVMSIZE; | | 154 | totmem_len = (vaddr_t) mem_kvm + KVMSIZE; |
155 | | | 155 | |
156 | /* calculate the number of available pages */ | | 156 | /* calculate the number of available pages */ |
157 | physmem = totmem_len / PAGE_SIZE; | | 157 | physmem = totmem_len / PAGE_SIZE; |
158 | | | 158 | |
159 | /* calculate memory lengths */ | | 159 | /* calculate memory lengths */ |
160 | barrier_len = 2 * 1024 * 1024; | | 160 | barrier_len = 2 * 1024 * 1024; |
161 | user_len = kmem_k_start - vm_min_addr - barrier_len; | | 161 | user_len = kmem_k_start - vm_min_addr - barrier_len; |
162 | | | 162 | |
163 | /* devide memory */ | | 163 | /* devide memory */ |
164 | mem_uvm = (void *) vm_min_addr; | | 164 | mem_uvm = (void *) vm_min_addr; |
165 | mpos = vm_min_addr; | | 165 | mpos = vm_min_addr; |
166 | | | 166 | |
167 | /* claim an area for userland (---/R--/RW-/RWX) */ | | 167 | /* claim an area for userland (---/R--/RW-/RWX) */ |
168 | kmem_user_start = mpos; | | 168 | kmem_user_start = mpos; |
169 | mpos += user_len; | | 169 | mpos += user_len; |
170 | kmem_user_end = mpos; | | 170 | kmem_user_end = mpos; |
171 | | | 171 | |
172 | /* calculate KVM section (RW-) */ | | 172 | /* calculate KVM section (RW-) */ |
173 | kmem_ext_start = round_page((vaddr_t) mem_kvm); | | 173 | kmem_ext_start = round_page((vaddr_t) mem_kvm); |
174 | mpos += KVMSIZE; | | 174 | mpos += KVMSIZE; |
175 | kmem_ext_end = mpos; | | 175 | kmem_ext_end = mpos; |
176 | | | 176 | |
177 | /* print summary */ | | 177 | /* print summary */ |
178 | aprint_verbose("\nMemory summary\n"); | | 178 | aprint_verbose("\nMemory summary\n"); |
179 | aprint_verbose("\tkmem_k_start\t%p\n", (void *) kmem_k_start); | | 179 | aprint_verbose("\tkmem_k_start\t%p\n", (void *) kmem_k_start); |
180 | aprint_verbose("\tkmem_k_end\t%p\n", (void *) kmem_k_end); | | 180 | aprint_verbose("\tkmem_k_end\t%p\n", (void *) kmem_k_end); |
181 | aprint_verbose("\tkmem_ext_start\t%p\n", (void *) kmem_ext_start); | | 181 | aprint_verbose("\tkmem_ext_start\t%p\n", (void *) kmem_ext_start); |
182 | aprint_verbose("\tkmem_ext_end\t%p\n", (void *) kmem_ext_end); | | 182 | aprint_verbose("\tkmem_ext_end\t%p\n", (void *) kmem_ext_end); |
183 | aprint_verbose("\tkmem_user_start\t%p\n", (void *) kmem_user_start); | | 183 | aprint_verbose("\tkmem_user_start\t%p\n", (void *) kmem_user_start); |
184 | aprint_verbose("\tkmem_user_end\t%p\n", (void *) kmem_user_end); | | 184 | aprint_verbose("\tkmem_user_end\t%p\n", (void *) kmem_user_end); |
185 | | | 185 | |
186 | aprint_verbose("\ttotmem_len\t%10d\n", (int) totmem_len); | | 186 | aprint_verbose("\ttotmem_len\t%10d\n", (int) totmem_len); |
187 | aprint_verbose("\tkvmsize\t\t%10d\n", (int) KVMSIZE); | | 187 | aprint_verbose("\tkvmsize\t\t%10d\n", (int) KVMSIZE); |
188 | aprint_verbose("\tuser_len\t%10d\n", (int) user_len); | | 188 | aprint_verbose("\tuser_len\t%10d\n", (int) user_len); |
189 | | | 189 | |
190 | aprint_verbose("\n\n"); | | 190 | aprint_verbose("\n\n"); |
191 | | | 191 | |
192 | /* protect user memory UVM area (---) */ | | 192 | /* protect user memory UVM area (---) */ |
193 | err = thunk_munmap(mem_uvm, kmem_user_end - vm_min_addr); | | 193 | err = thunk_munmap(mem_uvm, kmem_user_end - vm_min_addr); |
194 | if (err) | | 194 | if (err) |
195 | panic("pmap_bootstrap: userland uvm space protection " | | 195 | panic("pmap_bootstrap: userland uvm space protection " |
196 | "failed (%d)\n", thunk_geterrno()); | | 196 | "failed (%d)\n", thunk_geterrno()); |
197 | | | 197 | |
198 | /* protect kvm UVM area (---) */ | | 198 | /* protect kvm UVM area (---) */ |
199 | err = thunk_munmap((void *) kmem_ext_start, KVMSIZE); | | 199 | err = thunk_munmap((void *) kmem_ext_start, KVMSIZE); |
200 | if (err) | | 200 | if (err) |
201 | panic("pmap_bootstrap: kvm uvm space protection " | | 201 | panic("pmap_bootstrap: kvm uvm space protection " |
202 | "failed (%d)\n", thunk_geterrno()); | | 202 | "failed (%d)\n", thunk_geterrno()); |
203 | | | 203 | |
204 | dprintf_debug("Creating memory mapped backend\n"); | | 204 | dprintf_debug("Creating memory mapped backend\n"); |
205 | | | 205 | |
206 | /* create memory file since mmap/maccess only can be on files */ | | 206 | /* create memory file since mmap/maccess only can be on files */ |
207 | strlcpy(mem_name, "/tmp/netbsd.XXXXXX", sizeof(mem_name)); | | 207 | strlcpy(mem_name, "/tmp/netbsd.XXXXXX", sizeof(mem_name)); |
208 | mem_fh = thunk_mkstemp(mem_name); | | 208 | mem_fh = thunk_mkstemp(mem_name); |
209 | if (mem_fh < 0) | | 209 | if (mem_fh < 0) |
210 | panic("pmap_bootstrap: can't create memory file\n"); | | 210 | panic("pmap_bootstrap: can't create memory file\n"); |
211 | /* unlink the file so space is freed when we quit */ | | 211 | /* unlink the file so space is freed when we quit */ |
212 | if (thunk_unlink(mem_name) == -1) | | 212 | if (thunk_unlink(mem_name) == -1) |
213 | panic("pmap_bootstrap: can't unlink %s", mem_name); | | 213 | panic("pmap_bootstrap: can't unlink %s", mem_name); |
214 | | | 214 | |
215 | /* file_len is the backing store length, nothing to do with placement */ | | 215 | /* file_len is the backing store length, nothing to do with placement */ |
216 | file_len = totmem_len; | | 216 | file_len = totmem_len; |
217 | | | 217 | |
218 | #ifdef SPARSE_MEMFILE | | 218 | #ifdef SPARSE_MEMFILE |
219 | { | | 219 | { |
220 | char dummy; | | 220 | char dummy; |
221 | | | 221 | |
222 | wlen = thunk_pwrite(mem_fh, &dummy, 1, file_len - 1); | | 222 | wlen = thunk_pwrite(mem_fh, &dummy, 1, file_len - 1); |
223 | if (wlen != 1) | | 223 | if (wlen != 1) |
224 | panic("pmap_bootstrap: can't grow file\n"); | | 224 | panic("pmap_bootstrap: can't grow file\n"); |
225 | } | | 225 | } |
226 | #else | | 226 | #else |
227 | { | | 227 | { |
228 | void *block; | | 228 | void *block; |
229 | | | 229 | |
230 | printf("Creating memory file\r"); | | 230 | printf("Creating memory file\r"); |
231 | block = thunk_malloc(PAGE_SIZE); | | 231 | block = thunk_malloc(PAGE_SIZE); |
232 | if (!block) | | 232 | if (!block) |
233 | panic("pmap_bootstrap: can't malloc writeout block"); | | 233 | panic("pmap_bootstrap: can't malloc writeout block"); |
234 | | | 234 | |
235 | for (pg = 0; pg < file_len; pg += PAGE_SIZE) { | | 235 | for (pg = 0; pg < file_len; pg += PAGE_SIZE) { |
236 | wlen = thunk_pwrite(mem_fh, block, PAGE_SIZE, pg); | | 236 | wlen = thunk_pwrite(mem_fh, block, PAGE_SIZE, pg); |
237 | if (wlen != PAGE_SIZE) | | 237 | if (wlen != PAGE_SIZE) |
238 | panic("pmap_bootstrap: write fails, disc full?"); | | 238 | panic("pmap_bootstrap: write fails, disc full?"); |
239 | } | | 239 | } |
240 | thunk_free(block); | | 240 | thunk_free(block); |
241 | } | | 241 | } |
242 | #endif | | 242 | #endif |
243 | | | 243 | |
244 | /* protect the current kernel section */ | | 244 | /* protect the current kernel section */ |
245 | #if 0 | | 245 | #if 0 |
246 | int err; | | 246 | int err; |
247 | err = thunk_mprotect((void *) kmem_k_start, kmem_k_end - kmem_k_start, | | 247 | err = thunk_mprotect((void *) kmem_k_start, kmem_k_end - kmem_k_start, |
248 | THUNK_PROT_READ | THUNK_PROT_EXEC); | | 248 | THUNK_PROT_READ | THUNK_PROT_EXEC); |
249 | assert(err == 0); | | 249 | assert(err == 0); |
250 | #endif | | 250 | #endif |
251 | | | 251 | |
252 | /* set up pv_table; bootstrap problem! */ | | 252 | /* set up pv_table; bootstrap problem! */ |
253 | fpos = 0; | | 253 | fpos = 0; |
254 | free_start = fpos; /* in physical space ! */ | | 254 | free_start = fpos; /* in physical space ! */ |
255 | free_end = file_len; /* in physical space ! */ | | 255 | free_end = file_len; /* in physical space ! */ |
256 | | | 256 | |
257 | phys_npages = (free_end - free_start) / PAGE_SIZE; | | 257 | phys_npages = (free_end - free_start) / PAGE_SIZE; |
258 | pv_table_size = round_page(phys_npages * sizeof(struct pv_entry)); | | 258 | pv_table_size = round_page(phys_npages * sizeof(struct pv_entry)); |
259 | | | 259 | |
260 | dprintf_debug("claiming %"PRIu64" KB of pv_table for " | | 260 | dprintf_debug("claiming %"PRIu64" KB of pv_table for " |
261 | "%"PRIdPTR" pages of physical memory\n", | | 261 | "%"PRIdPTR" pages of physical memory\n", |
262 | (uint64_t) pv_table_size/1024, (uintptr_t) phys_npages); | | 262 | (uint64_t) pv_table_size/1024, (uintptr_t) phys_npages); |
263 | | | 263 | |
264 | kmem_ext_cur_start = kmem_ext_start; | | 264 | kmem_ext_cur_start = kmem_ext_start; |
265 | pv_fpos = fpos; | | 265 | pv_fpos = fpos; |
266 | pv_table = (struct pv_entry *) kmem_ext_cur_start; | | 266 | pv_table = (struct pv_entry *) kmem_ext_cur_start; |
267 | addr = thunk_mmap(pv_table, pv_table_size, | | 267 | addr = thunk_mmap(pv_table, pv_table_size, |
268 | THUNK_PROT_READ | THUNK_PROT_WRITE, | | 268 | THUNK_PROT_READ | THUNK_PROT_WRITE, |
269 | THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED, | | 269 | THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED, |
270 | mem_fh, pv_fpos); | | 270 | mem_fh, pv_fpos); |
271 | if (addr != (void *) kmem_ext_start) | | 271 | if (addr != (void *) kmem_ext_start) |
272 | panic("pmap_bootstrap: can't map in pv table\n"); | | 272 | panic("pmap_bootstrap: can't map in pv table\n"); |
273 | | | 273 | |
274 | memset(pv_table, 0, pv_table_size); /* test and clear */ | | 274 | memset(pv_table, 0, pv_table_size); /* test and clear */ |
275 | | | 275 | |
276 | dprintf_debug("pv_table initialiased correctly, mmap works\n"); | | 276 | dprintf_debug("pv_table initialiased correctly, mmap works\n"); |
277 | | | 277 | |
278 | /* advance */ | | 278 | /* advance */ |
279 | kmem_ext_cur_start += pv_table_size; | | 279 | kmem_ext_cur_start += pv_table_size; |
280 | fpos += pv_table_size; | | 280 | fpos += pv_table_size; |
281 | | | 281 | |
282 | /* set up kernel pmap */ | | 282 | /* set up kernel pmap */ |
283 | pm_nentries = (VM_MAX_ADDRESS - VM_MIN_ADDRESS) / PAGE_SIZE; | | 283 | pm_nentries = (VM_MAX_ADDRESS - VM_MIN_ADDRESS) / PAGE_SIZE; |
284 | pm_entries_size = round_page(pm_nentries * sizeof(struct pv_entry *)); | | 284 | pm_entries_size = round_page(pm_nentries * sizeof(struct pv_entry *)); |
285 | dprintf_debug("pmap va->pa lookup table is %"PRIu64" KB for %d logical pages\n", | | 285 | dprintf_debug("pmap va->pa lookup table is %"PRIu64" KB for %d logical pages\n", |
286 | pm_entries_size/1024, pm_nentries); | | 286 | pm_entries_size/1024, pm_nentries); |
287 | | | 287 | |
288 | pmap = pmap_kernel(); | | 288 | pmap = pmap_kernel(); |
289 | memset(pmap, 0, sizeof(*pmap)); | | 289 | memset(pmap, 0, sizeof(*pmap)); |
290 | pmap->pm_count = 1; /* reference */ | | 290 | pmap->pm_count = 1; /* reference */ |
291 | pmap->pm_flags = PM_ACTIVE; /* kernel pmap is allways active */ | | 291 | pmap->pm_flags = PM_ACTIVE; /* kernel pmap is allways active */ |
292 | pmap->pm_entries = (struct pv_entry **) kmem_ext_cur_start; | | 292 | pmap->pm_entries = (struct pv_entry **) kmem_ext_cur_start; |
293 | | | 293 | |
294 | pm_fpos = fpos; | | 294 | pm_fpos = fpos; |
295 | addr = thunk_mmap(pmap->pm_entries, pm_entries_size, | | 295 | addr = thunk_mmap(pmap->pm_entries, pm_entries_size, |
296 | THUNK_PROT_READ | THUNK_PROT_WRITE, | | 296 | THUNK_PROT_READ | THUNK_PROT_WRITE, |
297 | THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED, | | 297 | THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED, |
298 | mem_fh, pm_fpos); | | 298 | mem_fh, pm_fpos); |
299 | if (addr != (void *) pmap->pm_entries) | | 299 | if (addr != (void *) pmap->pm_entries) |
300 | panic("pmap_bootstrap: can't map in pmap entries\n"); | | 300 | panic("pmap_bootstrap: can't map in pmap entries\n"); |
301 | | | 301 | |
302 | memset(pmap->pm_entries, 0, pm_entries_size); /* test and clear */ | | 302 | memset(pmap->pm_entries, 0, pm_entries_size); /* test and clear */ |
303 | | | 303 | |
304 | dprintf_debug("kernel pmap entries initialiased correctly\n"); | | 304 | dprintf_debug("kernel pmap entries initialiased correctly\n"); |
305 | | | 305 | |
306 | /* advance */ | | 306 | /* advance */ |
307 | kmem_ext_cur_start += pm_entries_size; | | 307 | kmem_ext_cur_start += pm_entries_size; |
308 | fpos += pm_entries_size; | | 308 | fpos += pm_entries_size; |
309 | | | 309 | |
310 | /* kmem used [kmem_ext_start - kmem_ext_cur_start] */ | | 310 | /* kmem used [kmem_ext_start - kmem_ext_cur_start] */ |
311 | kmem_ext_cur_end = kmem_ext_cur_start; | | 311 | kmem_ext_cur_end = kmem_ext_cur_start; |
312 | | | 312 | |
313 | /* manually enter the mappings into the kernel map */ | | 313 | /* manually enter the mappings into the kernel map */ |
314 | for (pg = 0; pg < pv_table_size; pg += PAGE_SIZE) { | | 314 | for (pg = 0; pg < pv_table_size; pg += PAGE_SIZE) { |
315 | pa = pv_fpos + pg; | | 315 | pa = pv_fpos + pg; |
316 | va = (vaddr_t) pv_table + pg; | | 316 | va = (vaddr_t) pv_table + pg; |
317 | pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); | | 317 | pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); |
318 | } | | 318 | } |
319 | dprintf_debug("pv_table mem added to the kernel pmap\n"); | | 319 | dprintf_debug("pv_table mem added to the kernel pmap\n"); |
320 | for (pg = 0; pg < pm_entries_size; pg += PAGE_SIZE) { | | 320 | for (pg = 0; pg < pm_entries_size; pg += PAGE_SIZE) { |
321 | pa = pm_fpos + pg; | | 321 | pa = pm_fpos + pg; |
322 | va = (vaddr_t) pmap->pm_entries + pg; | | 322 | va = (vaddr_t) pmap->pm_entries + pg; |
323 | pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); | | 323 | pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); |
324 | } | | 324 | } |
325 | dprintf_debug("kernel pmap entries mem added to the kernel pmap\n"); | | 325 | dprintf_debug("kernel pmap entries mem added to the kernel pmap\n"); |
326 | | | 326 | |
327 | /* add file space to uvm's FREELIST */ | | 327 | /* add file space to uvm's FREELIST */ |
328 | /* XXX really from 0? or from fpos to have better stats */ | | 328 | /* XXX really from 0? or from fpos to have better stats */ |
329 | uvm_page_physload(atop(0), | | 329 | uvm_page_physload(atop(0), |
330 | atop(free_end), | | 330 | atop(free_end), |
331 | atop(free_start + fpos), /* mark used till fpos */ | | 331 | atop(free_start + fpos), /* mark used till fpos */ |
332 | atop(free_end), | | 332 | atop(free_end), |
333 | VM_FREELIST_DEFAULT); | | 333 | VM_FREELIST_DEFAULT); |
334 | | | 334 | |
335 | aprint_verbose("leaving pmap_bootstrap:\n"); | | 335 | aprint_verbose("leaving pmap_bootstrap:\n"); |
336 | aprint_verbose("\t%"PRIu64" MB of physical pages left\n", | | 336 | aprint_verbose("\t%"PRIu64" MB of physical pages left\n", |
337 | (uint64_t) (free_end - (free_start + fpos))/1024/1024); | | 337 | (uint64_t) (free_end - (free_start + fpos))/1024/1024); |
338 | aprint_verbose("\t%"PRIu64" MB of kmem left\n", | | 338 | aprint_verbose("\t%"PRIu64" MB of kmem left\n", |
339 | (uint64_t) (kmem_ext_end - kmem_ext_cur_end)/1024/1024); | | 339 | (uint64_t) (kmem_ext_end - kmem_ext_cur_end)/1024/1024); |
340 | | | 340 | |
341 | setup_signal_handlers(); | | 341 | setup_signal_handlers(); |
342 | } | | 342 | } |
343 | | | 343 | |
344 | void | | 344 | void |
345 | pmap_init(void) | | 345 | pmap_init(void) |
346 | { | | 346 | { |
347 | /* All deferred to pmap_create, because malloc() is nice. */ | | 347 | /* All deferred to pmap_create, because malloc() is nice. */ |
348 | } | | 348 | } |
349 | | | 349 | |
350 | /* return kernel space start and end (including growth) */ | | 350 | /* return kernel space start and end (including growth) */ |
351 | void | | 351 | void |
352 | pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) | | 352 | pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) |
353 | { | | 353 | { |
354 | *vstartp = kmem_ext_cur_start; /* min to map in */ | | 354 | *vstartp = kmem_ext_cur_start; /* min to map in */ |
355 | *vendp = kmem_ext_end; /* max available */ | | 355 | *vendp = kmem_ext_end; /* max available */ |
356 | } | | 356 | } |
357 | | | 357 | |
358 | static void | | 358 | static void |
359 | pmap_deferred_init(void) | | 359 | pmap_deferred_init(void) |
360 | { | | 360 | { |
361 | /* XXX we COULD realloc our pv_table etc with malloc() but for what? */ | | 361 | /* XXX we COULD realloc our pv_table etc with malloc() but for what? */ |
362 | | | 362 | |
363 | /* create pmap pool */ | | 363 | /* create pmap pool */ |
364 | pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, | | 364 | pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, |
365 | "pmappool", NULL, IPL_NONE); | | 365 | "pmappool", NULL, IPL_NONE); |
366 | } | | 366 | } |
367 | | | 367 | |
368 | pmap_t | | 368 | pmap_t |
369 | pmap_create(void) | | 369 | pmap_create(void) |
370 | { | | 370 | { |
371 | static int pmap_initialised = 0; | | 371 | static int pmap_initialised = 0; |
372 | struct pmap *pmap; | | 372 | struct pmap *pmap; |
373 | | | 373 | |
374 | if (!pmap_initialised) { | | 374 | if (!pmap_initialised) { |
375 | pmap_deferred_init(); | | 375 | pmap_deferred_init(); |
376 | pmap_initialised = 1; | | 376 | pmap_initialised = 1; |
377 | } | | 377 | } |
378 | | | 378 | |
379 | dprintf_debug("pmap_create\n"); | | 379 | dprintf_debug("pmap_create\n"); |
380 | pmap = pool_get(&pmap_pool, PR_WAITOK); | | 380 | pmap = pool_get(&pmap_pool, PR_WAITOK); |
381 | memset(pmap, 0, sizeof(*pmap)); | | 381 | memset(pmap, 0, sizeof(*pmap)); |
382 | | | 382 | |
383 | pmap->pm_count = 1; | | 383 | pmap->pm_count = 1; |
384 | pmap->pm_flags = 0; | | 384 | pmap->pm_flags = 0; |
385 | pmap->pm_entries = (struct pv_entry **) malloc( | | 385 | pmap->pm_entries = (struct pv_entry **) malloc( |
386 | pm_entries_size, M_VMPMAP, | | 386 | pm_entries_size, M_VMPMAP, |
387 | M_WAITOK | M_ZERO); | | 387 | M_WAITOK | M_ZERO); |
388 | dprintf_debug("\tpmap %p\n", pmap); | | 388 | dprintf_debug("\tpmap %p\n", pmap); |
389 | | | 389 | |
390 | return pmap; | | 390 | return pmap; |
391 | } | | 391 | } |
392 | | | 392 | |
393 | void | | 393 | void |
394 | pmap_destroy(pmap_t pmap) | | 394 | pmap_destroy(pmap_t pmap) |
395 | { | | 395 | { |
396 | int i; | | 396 | int i; |
397 | | | 397 | |
398 | /* if multiple references exist just remove a reference */ | | 398 | /* if multiple references exist just remove a reference */ |
399 | dprintf_debug("pmap_destroy %p\n", pmap); | | 399 | dprintf_debug("pmap_destroy %p\n", pmap); |
400 | if (--pmap->pm_count > 0) | | 400 | if (--pmap->pm_count > 0) |
401 | return; | | 401 | return; |
402 | | | 402 | |
403 | /* safe guard against silly errors */ | | 403 | /* safe guard against silly errors */ |
404 | KASSERT((pmap->pm_flags & PM_ACTIVE) == 0); | | 404 | KASSERT((pmap->pm_flags & PM_ACTIVE) == 0); |
405 | KASSERT(pmap->pm_stats.resident_count == 0); | | 405 | KASSERT(pmap->pm_stats.resident_count == 0); |
406 | KASSERT(pmap->pm_stats.wired_count == 0); | | 406 | KASSERT(pmap->pm_stats.wired_count == 0); |
407 | #ifdef DIAGNOSTIC | | 407 | #ifdef DIAGNOSTIC |
408 | for (i = 0; i < pm_nentries; i++) | | 408 | for (i = 0; i < pm_nentries; i++) |
409 | if (pmap->pm_entries[i] != NULL) | | 409 | if (pmap->pm_entries[i] != NULL) |
410 | panic("pmap_destroy: pmap isn't empty"); | | 410 | panic("pmap_destroy: pmap isn't empty"); |
411 | #endif | | 411 | #endif |
412 | free((void *)pmap->pm_entries, M_VMPMAP); | | 412 | free((void *)pmap->pm_entries, M_VMPMAP); |
413 | pool_put(&pmap_pool, pmap); | | 413 | pool_put(&pmap_pool, pmap); |
414 | } | | 414 | } |
415 | | | 415 | |
416 | void | | 416 | void |
417 | pmap_reference(pmap_t pmap) | | 417 | pmap_reference(pmap_t pmap) |
418 | { | | 418 | { |
419 | dprintf_debug("pmap_reference %p\n", (void *) pmap); | | 419 | dprintf_debug("pmap_reference %p\n", (void *) pmap); |
420 | pmap->pm_count++; | | 420 | pmap->pm_count++; |
421 | } | | 421 | } |
422 | | | 422 | |
423 | long | | 423 | long |
424 | pmap_resident_count(pmap_t pmap) | | 424 | pmap_resident_count(pmap_t pmap) |
425 | { | | 425 | { |
426 | return pmap->pm_stats.resident_count; | | 426 | return pmap->pm_stats.resident_count; |
427 | } | | 427 | } |
428 | | | 428 | |
429 | long | | 429 | long |
430 | pmap_wired_count(pmap_t pmap) | | 430 | pmap_wired_count(pmap_t pmap) |
431 | { | | 431 | { |
432 | return pmap->pm_stats.wired_count; | | 432 | return pmap->pm_stats.wired_count; |
433 | } | | 433 | } |
434 | | | 434 | |
435 | static struct pv_entry * | | 435 | static struct pv_entry * |
436 | pv_alloc(void) | | 436 | pv_alloc(void) |
437 | { | | 437 | { |
438 | num_pv_entries++; | | 438 | num_pv_entries++; |
439 | return malloc(sizeof(struct pv_entry), M_VMPMAP, M_NOWAIT | M_ZERO); | | 439 | return malloc(sizeof(struct pv_entry), M_VMPMAP, M_NOWAIT | M_ZERO); |
440 | } | | 440 | } |
441 | | | 441 | |
442 | static void | | 442 | static void |
443 | pv_free(struct pv_entry *pv) | | 443 | pv_free(struct pv_entry *pv) |
444 | { | | 444 | { |
445 | num_pv_entries--; | | 445 | num_pv_entries--; |
446 | free(pv, M_VMPMAP); | | 446 | free(pv, M_VMPMAP); |
447 | } | | 447 | } |
448 | | | 448 | |
449 | static struct pv_entry * | | 449 | static struct pv_entry * |
450 | pv_get(pmap_t pmap, uintptr_t ppn, uintptr_t lpn) | | 450 | pv_get(pmap_t pmap, uintptr_t ppn, uintptr_t lpn) |
451 | { | | 451 | { |
452 | struct pv_entry *pv; | | 452 | struct pv_entry *pv; |
453 | | | 453 | |
454 | /* If the head entry's free use that. */ | | 454 | /* If the head entry's free use that. */ |
455 | pv = &pv_table[ppn]; | | 455 | pv = &pv_table[ppn]; |
456 | if (pv->pv_pmap == NULL) { | | 456 | if (pv->pv_pmap == NULL) { |
457 | pmap->pm_stats.resident_count++; | | 457 | pmap->pm_stats.resident_count++; |
458 | return pv; | | 458 | return pv; |
459 | } | | 459 | } |
460 | /* If this mapping exists already, use that. */ | | 460 | /* If this mapping exists already, use that. */ |
461 | for (pv = pv; pv != NULL; pv = pv->pv_next) { | | 461 | for (pv = pv; pv != NULL; pv = pv->pv_next) { |
462 | if ((pv->pv_pmap == pmap) && (pv->pv_lpn == lpn)) { | | 462 | if ((pv->pv_pmap == pmap) && (pv->pv_lpn == lpn)) { |
463 | return pv; | | 463 | return pv; |
464 | } | | 464 | } |
465 | } | | 465 | } |
466 | /* Otherwise, allocate a new entry and link it in after the head. */ | | 466 | /* Otherwise, allocate a new entry and link it in after the head. */ |
467 | dprintf_debug("pv_get: multiple mapped page ppn %"PRIdPTR", " | | 467 | dprintf_debug("pv_get: multiple mapped page ppn %"PRIdPTR", " |
468 | "lpn %"PRIdPTR"\n", ppn, lpn); | | 468 | "lpn %"PRIdPTR"\n", ppn, lpn); |
469 | | | 469 | |
470 | /* extra sanity */ | | 470 | /* extra sanity */ |
471 | assert(ppn < phys_npages); | | 471 | assert(ppn < phys_npages); |
472 | assert(ppn >= 0); | | 472 | assert(ppn >= 0); |
473 | | | 473 | |
474 | pv = pv_alloc(); | | 474 | pv = pv_alloc(); |
475 | if (pv == NULL) | | 475 | if (pv == NULL) |
476 | return NULL; | | 476 | return NULL; |
477 | | | 477 | |
478 | pv->pv_next = pv_table[ppn].pv_next; | | 478 | pv->pv_next = pv_table[ppn].pv_next; |
479 | pv_table[ppn].pv_next = pv; | | 479 | pv_table[ppn].pv_next = pv; |
480 | pmap->pm_stats.resident_count++; | | 480 | pmap->pm_stats.resident_count++; |
481 | | | 481 | |
482 | return pv; | | 482 | return pv; |
483 | } | | 483 | } |
484 | | | 484 | |
485 | /* | | 485 | /* |
486 | * Check if the given page fault was our reference / modified emulation fault; | | 486 | * Check if the given page fault was our reference / modified emulation fault; |
487 | * if so return true otherwise return false and let uvm handle it | | 487 | * if so return true otherwise return false and let uvm handle it |
488 | */ | | 488 | */ |
489 | bool | | 489 | bool |
490 | pmap_fault(pmap_t pmap, vaddr_t va, vm_prot_t *atype) | | 490 | pmap_fault(pmap_t pmap, vaddr_t va, vm_prot_t *atype) |
491 | { | | 491 | { |
492 | struct pv_entry *pv, *ppv; | | 492 | struct pv_entry *pv, *ppv; |
493 | uintptr_t lpn, ppn; | | 493 | uintptr_t lpn, ppn; |
494 | int prot, cur_prot, diff; | | 494 | int prot, cur_prot, diff; |
495 | | | 495 | |
496 | dprintf_debug("pmap_fault pmap %p, va %p\n", pmap, (void *) va); | | 496 | dprintf_debug("pmap_fault pmap %p, va %p\n", pmap, (void *) va); |
497 | | | 497 | |
498 | /* get logical page from vaddr */ | | 498 | /* get logical page from vaddr */ |
499 | lpn = atop(va - VM_MIN_ADDRESS); /* V->L */ | | 499 | lpn = atop(va - VM_MIN_ADDRESS); /* V->L */ |
500 | pv = pmap->pm_entries[lpn]; | | 500 | pv = pmap->pm_entries[lpn]; |
501 | | | 501 | |
502 | /* not known! then it must be UVM's work */ | | 502 | /* not known! then it must be UVM's work */ |
503 | if (pv == NULL) { | | 503 | if (pv == NULL) { |
504 | dprintf_debug("%s: no mapping yet\n", __func__); | | 504 | dprintf_debug("%s: no mapping yet\n", __func__); |
505 | *atype = VM_PROT_READ; /* assume it was a read */ | | 505 | *atype = VM_PROT_READ; /* assume it was a read */ |
506 | return false; | | 506 | return false; |
507 | } | | 507 | } |
508 | | | 508 | |
509 | /* determine physical address and lookup 'root' pv_entry */ | | 509 | /* determine physical address and lookup 'root' pv_entry */ |
510 | ppn = pv->pv_ppn; | | 510 | ppn = pv->pv_ppn; |
511 | ppv = &pv_table[ppn]; | | 511 | ppv = &pv_table[ppn]; |
512 | | | 512 | |
513 | /* if unmanaged we just make sure it is there! */ | | 513 | /* if unmanaged we just make sure it is there! */ |
514 | if (ppv->pv_vflags & PV_UNMANAGED) { | | 514 | if (ppv->pv_vflags & PV_UNMANAGED) { |
515 | printf("%s: oops warning unmanaged page %"PRIiPTR" faulted\n", | | 515 | printf("%s: oops warning unmanaged page %"PRIiPTR" faulted\n", |
516 | __func__, ppn); | | 516 | __func__, ppn); |
517 | /* atype not set */ | | 517 | /* atype not set */ |
518 | pmap_page_activate(pv); | | 518 | pmap_page_activate(pv); |
519 | return true; | | 519 | return true; |
520 | } | | 520 | } |
521 | | | 521 | |
522 | /* if its not mapped in, we have a TBL fault */ | | 522 | /* if its not mapped in, we have a TLB fault */ |
523 | if ((pv->pv_vflags & PV_MAPPEDIN) == 0) { | | 523 | if ((pv->pv_vflags & PV_MAPPEDIN) == 0) { |
524 | if (pv->pv_mmap_ppl != THUNK_PROT_NONE) { | | 524 | if (pv->pv_mmap_ppl != THUNK_PROT_NONE) { |
525 | dprintf_debug("%s: tlb fault page lpn %"PRIiPTR"\n", | | 525 | dprintf_debug("%s: tlb fault page lpn %"PRIiPTR"\n", |
526 | __func__, pv->pv_lpn); | | 526 | __func__, pv->pv_lpn); |
527 | pmap_page_activate(pv); | | 527 | pmap_page_activate(pv); |
528 | return true; | | 528 | return true; |
529 | } | | 529 | } |
530 | } | | 530 | } |
531 | | | 531 | |
532 | /* determine pmap access type (mmap doesnt need to be 1:1 on VM_PROT_) */ | | 532 | /* determine pmap access type (mmap doesnt need to be 1:1 on VM_PROT_) */ |
533 | prot = pv->pv_prot; | | 533 | prot = pv->pv_prot; |
534 | cur_prot = VM_PROT_NONE; | | 534 | cur_prot = VM_PROT_NONE; |
535 | if (pv->pv_mmap_ppl & THUNK_PROT_READ) | | 535 | if (pv->pv_mmap_ppl & THUNK_PROT_READ) |
536 | cur_prot |= VM_PROT_READ; | | 536 | cur_prot |= VM_PROT_READ; |
537 | if (pv->pv_mmap_ppl & THUNK_PROT_WRITE) | | 537 | if (pv->pv_mmap_ppl & THUNK_PROT_WRITE) |
538 | cur_prot |= VM_PROT_WRITE; | | 538 | cur_prot |= VM_PROT_WRITE; |
539 | if (pv->pv_mmap_ppl & THUNK_PROT_EXEC) | | 539 | if (pv->pv_mmap_ppl & THUNK_PROT_EXEC) |
540 | cur_prot |= VM_PROT_EXECUTE; | | 540 | cur_prot |= VM_PROT_EXECUTE; |
541 | | | 541 | |
542 | diff = prot & (prot ^ cur_prot); | | 542 | diff = prot & (prot ^ cur_prot); |
543 | | | 543 | |
544 | dprintf_debug("%s: prot = %d, cur_prot = %d, diff = %d\n", | | 544 | dprintf_debug("%s: prot = %d, cur_prot = %d, diff = %d\n", |
545 | __func__, prot, cur_prot, diff); | | 545 | __func__, prot, cur_prot, diff); |
546 | *atype = VM_PROT_READ; /* assume its a read error */ | | 546 | *atype = VM_PROT_READ; /* assume its a read error */ |
547 | if (diff & VM_PROT_READ) { | | 547 | if (diff & VM_PROT_READ) { |
548 | if ((ppv->pv_pflags & PV_REFERENCED) == 0) { | | 548 | if ((ppv->pv_pflags & PV_REFERENCED) == 0) { |
549 | ppv->pv_pflags |= PV_REFERENCED; | | 549 | ppv->pv_pflags |= PV_REFERENCED; |
550 | pmap_update_page(ppn); | | 550 | pmap_update_page(ppn); |
551 | return true; | | 551 | return true; |
552 | } | | 552 | } |
553 | panic("pmap: page not readable but marked referenced?"); | | 553 | panic("pmap: page not readable but marked referenced?"); |
554 | return false; | | 554 | return false; |
555 | } | | 555 | } |
556 | | | 556 | |
557 | #if 0 | | 557 | #if 0 |
558 | /* this might be questionable */ | | 558 | /* this might be questionable */ |
559 | if (diff & VM_PROT_EXECUTE) { | | 559 | if (diff & VM_PROT_EXECUTE) { |
560 | *atype = VM_PROT_EXECUTE; /* assume it was executing */ | | 560 | *atype = VM_PROT_EXECUTE; /* assume it was executing */ |
561 | if (prot & VM_PROT_EXECUTE) { | | 561 | if (prot & VM_PROT_EXECUTE) { |
562 | if ((ppv->pv_pflags & PV_REFERENCED) == 0) { | | 562 | if ((ppv->pv_pflags & PV_REFERENCED) == 0) { |
563 | ppv->pv_pflags |= PV_REFERENCED; | | 563 | ppv->pv_pflags |= PV_REFERENCED; |
564 | pmap_update_page(ppn); | | 564 | pmap_update_page(ppn); |
565 | return true; | | 565 | return true; |
566 | } | | 566 | } |
567 | } | | 567 | } |
568 | return false; | | 568 | return false; |
569 | } | | 569 | } |
570 | #endif | | 570 | #endif |
571 | | | 571 | |
572 | *atype = VM_PROT_WRITE; /* assume its a write error */ | | 572 | *atype = VM_PROT_WRITE; /* assume its a write error */ |
573 | if (diff & VM_PROT_WRITE) { | | 573 | if (diff & VM_PROT_WRITE) { |
574 | if (prot & VM_PROT_WRITE) { | | 574 | if (prot & VM_PROT_WRITE) { |
575 | /* should be allowed to write */ | | 575 | /* should be allowed to write */ |
576 | if ((ppv->pv_pflags & PV_MODIFIED) == 0) { | | 576 | if ((ppv->pv_pflags & PV_MODIFIED) == 0) { |
577 | /* was marked unmodified */ | | 577 | /* was marked unmodified */ |
578 | ppv->pv_pflags |= PV_MODIFIED; | | 578 | ppv->pv_pflags |= PV_MODIFIED; |
579 | pmap_update_page(ppn); | | 579 | pmap_update_page(ppn); |
580 | return true; | | 580 | return true; |
581 | } | | 581 | } |
582 | } | | 582 | } |
583 | panic("pmap: page not writable but marked modified?"); | | 583 | panic("pmap: page not writable but marked modified?"); |
584 | return false; | | 584 | return false; |
585 | } | | 585 | } |
586 | | | 586 | |
587 | /* not due to our r/m handling, let uvm handle it ! */ | | 587 | /* not due to our r/m handling, let uvm handle it ! */ |
588 | return false; | | 588 | return false; |
589 | } | | 589 | } |
590 | | | 590 | |
591 | | | 591 | |
592 | static void | | 592 | static void |
593 | pmap_page_activate(struct pv_entry *pv) | | 593 | pmap_page_activate(struct pv_entry *pv) |
594 | { | | 594 | { |
595 | paddr_t pa = pv->pv_ppn * PAGE_SIZE; | | 595 | paddr_t pa = pv->pv_ppn * PAGE_SIZE; |
596 | vaddr_t va = pv->pv_lpn * PAGE_SIZE + VM_MIN_ADDRESS; /* L->V */ | | 596 | vaddr_t va = pv->pv_lpn * PAGE_SIZE + VM_MIN_ADDRESS; /* L->V */ |
597 | void *addr; | | 597 | void *addr; |
598 | | | 598 | |
599 | addr = thunk_mmap((void *) va, PAGE_SIZE, pv->pv_mmap_ppl, | | 599 | addr = thunk_mmap((void *) va, PAGE_SIZE, pv->pv_mmap_ppl, |
600 | THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED, | | 600 | THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED, |
601 | mem_fh, pa); | | 601 | mem_fh, pa); |
602 | dprintf_debug("page_activate: (va %p, pa %p, prot %d, ppl %d) -> %p\n", | | 602 | dprintf_debug("page_activate: (va %p, pa %p, prot %d, ppl %d) -> %p\n", |
603 | (void *) va, (void *) pa, pv->pv_prot, pv->pv_mmap_ppl, | | 603 | (void *) va, (void *) pa, pv->pv_prot, pv->pv_mmap_ppl, |
604 | (void *) addr); | | 604 | (void *) addr); |
605 | if (addr != (void *) va) | | 605 | if (addr != (void *) va) |
606 | panic("pmap_page_activate: mmap failed (expected %p got %p): %d", | | 606 | panic("pmap_page_activate: mmap failed (expected %p got %p): %d", |
607 | (void *)va, addr, thunk_geterrno()); | | 607 | (void *)va, addr, thunk_geterrno()); |
608 | | | 608 | |
609 | pv->pv_vflags &= ~PV_MAPPEDIN; | | 609 | pv->pv_vflags &= ~PV_MAPPEDIN; |
610 | if (pv->pv_mmap_ppl != THUNK_PROT_NONE) | | 610 | if (pv->pv_mmap_ppl != THUNK_PROT_NONE) |
611 | pv->pv_vflags |= PV_MAPPEDIN; | | 611 | pv->pv_vflags |= PV_MAPPEDIN; |
612 | } | | 612 | } |
613 | | | 613 | |
614 | static void | | 614 | static void |
615 | pmap_page_deactivate(struct pv_entry *pv) | | 615 | pmap_page_deactivate(struct pv_entry *pv) |
616 | { | | 616 | { |
617 | paddr_t pa = pv->pv_ppn * PAGE_SIZE; | | 617 | paddr_t pa = pv->pv_ppn * PAGE_SIZE; |
618 | vaddr_t va = pv->pv_lpn * PAGE_SIZE + VM_MIN_ADDRESS; /* L->V */ | | 618 | vaddr_t va = pv->pv_lpn * PAGE_SIZE + VM_MIN_ADDRESS; /* L->V */ |
619 | void *addr; | | 619 | void *addr; |
620 | | | 620 | |
621 | addr = thunk_mmap((void *) va, PAGE_SIZE, THUNK_PROT_NONE, | | 621 | addr = thunk_mmap((void *) va, PAGE_SIZE, THUNK_PROT_NONE, |
622 | THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED, | | 622 | THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED, |
623 | mem_fh, pa); | | 623 | mem_fh, pa); |
624 | dprintf_debug("page_deactivate: (va %p, pa %p, ppl %d) -> %p\n", | | 624 | dprintf_debug("page_deactivate: (va %p, pa %p, ppl %d) -> %p\n", |
625 | (void *) va, (void *) pa, pv->pv_mmap_ppl, (void *) addr); | | 625 | (void *) va, (void *) pa, pv->pv_mmap_ppl, (void *) addr); |
626 | if (addr != (void *) va) | | 626 | if (addr != (void *) va) |
627 | panic("pmap_page_deactivate: mmap failed"); | | 627 | panic("pmap_page_deactivate: mmap failed"); |
628 | pv->pv_vflags &= ~PV_MAPPEDIN; | | 628 | pv->pv_vflags &= ~PV_MAPPEDIN; |
629 | } | | 629 | } |
630 | | | 630 | |
631 | static void | | 631 | static void |
632 | pv_update(struct pv_entry *pv) | | 632 | pv_update(struct pv_entry *pv) |
633 | { | | 633 | { |
634 | int pflags, vflags; | | 634 | int pflags, vflags; |
635 | int mmap_ppl; | | 635 | int mmap_ppl; |
636 | | | 636 | |
637 | /* get our per-physical-page flags */ | | 637 | /* get our per-physical-page flags */ |
638 | pflags = pv_table[pv->pv_ppn].pv_pflags; | | 638 | pflags = pv_table[pv->pv_ppn].pv_pflags; |
639 | vflags = pv_table[pv->pv_ppn].pv_vflags; | | 639 | vflags = pv_table[pv->pv_ppn].pv_vflags; |
640 | | | 640 | |
641 | KASSERT(THUNK_PROT_READ == VM_PROT_READ); | | 641 | KASSERT(THUNK_PROT_READ == VM_PROT_READ); |
642 | KASSERT(THUNK_PROT_WRITE == VM_PROT_WRITE); | | 642 | KASSERT(THUNK_PROT_WRITE == VM_PROT_WRITE); |
643 | KASSERT(THUNK_PROT_EXEC == VM_PROT_EXECUTE); | | 643 | KASSERT(THUNK_PROT_EXEC == VM_PROT_EXECUTE); |
644 | | | 644 | |
645 | /* create referenced/modified emulation */ | | 645 | /* create referenced/modified emulation */ |
646 | if ((pv->pv_prot & VM_PROT_WRITE) && | | 646 | if ((pv->pv_prot & VM_PROT_WRITE) && |
647 | (pflags & PV_REFERENCED) && (pflags & PV_MODIFIED)) { | | 647 | (pflags & PV_REFERENCED) && (pflags & PV_MODIFIED)) { |
648 | mmap_ppl = THUNK_PROT_READ | THUNK_PROT_WRITE; | | 648 | mmap_ppl = THUNK_PROT_READ | THUNK_PROT_WRITE; |
649 | } else if ((pv->pv_prot & (VM_PROT_READ | VM_PROT_EXECUTE)) && | | 649 | } else if ((pv->pv_prot & (VM_PROT_READ | VM_PROT_EXECUTE)) && |
650 | (pflags & PV_REFERENCED)) { | | 650 | (pflags & PV_REFERENCED)) { |
651 | mmap_ppl = THUNK_PROT_READ; | | 651 | mmap_ppl = THUNK_PROT_READ; |
652 | if (pv->pv_prot & VM_PROT_EXECUTE) | | 652 | if (pv->pv_prot & VM_PROT_EXECUTE) |
653 | mmap_ppl |= THUNK_PROT_EXEC; | | 653 | mmap_ppl |= THUNK_PROT_EXEC; |
654 | } else { | | 654 | } else { |
655 | mmap_ppl = THUNK_PROT_NONE; | | 655 | mmap_ppl = THUNK_PROT_NONE; |
656 | } | | 656 | } |
657 | | | 657 | |
658 | /* unmanaged pages are special; they dont track r/m */ | | 658 | /* unmanaged pages are special; they dont track r/m */ |
659 | if (vflags & PV_UNMANAGED) | | 659 | if (vflags & PV_UNMANAGED) |
660 | mmap_ppl = THUNK_PROT_READ | THUNK_PROT_WRITE; | | 660 | mmap_ppl = THUNK_PROT_READ | THUNK_PROT_WRITE; |
661 | | | 661 | |
662 | pv->pv_mmap_ppl = mmap_ppl; | | 662 | pv->pv_mmap_ppl = mmap_ppl; |
663 | } | | 663 | } |
664 | | | 664 | |
665 | /* update mapping of a physical page */ | | 665 | /* update mapping of a physical page */ |
666 | static void | | 666 | static void |
667 | pmap_update_page(uintptr_t ppn) | | 667 | pmap_update_page(uintptr_t ppn) |
668 | { | | 668 | { |
669 | struct pv_entry *pv; | | 669 | struct pv_entry *pv; |
670 | | | 670 | |
671 | for (pv = &pv_table[ppn]; pv != NULL; pv = pv->pv_next) { | | 671 | for (pv = &pv_table[ppn]; pv != NULL; pv = pv->pv_next) { |
672 | dprintf_debug("pmap_update_page: ppn %"PRIdPTR", pv->pv_map = %p\n", | | 672 | dprintf_debug("pmap_update_page: ppn %"PRIdPTR", pv->pv_map = %p\n", |
673 | ppn, pv->pv_pmap); | | 673 | ppn, pv->pv_pmap); |
674 | if (pv->pv_pmap != NULL) { | | 674 | if (pv->pv_pmap != NULL) { |
675 | pv_update(pv); | | 675 | pv_update(pv); |
676 | if (pv->pv_pmap->pm_flags & PM_ACTIVE) | | 676 | if (pv->pv_pmap->pm_flags & PM_ACTIVE) |
677 | pmap_page_activate(pv); | | 677 | pmap_page_activate(pv); |
678 | else | | 678 | else |
679 | pmap_page_deactivate(pv) | | 679 | pmap_page_deactivate(pv) |
680 | ; | | 680 | ; |
681 | } | | 681 | } |
682 | } | | 682 | } |
683 | } | | 683 | } |
684 | | | 684 | |
685 | static int | | 685 | static int |
686 | pmap_do_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, uint flags, int unmanaged) | | 686 | pmap_do_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, uint flags, int unmanaged) |
687 | { | | 687 | { |
688 | struct pv_entry *pv, *ppv; | | 688 | struct pv_entry *pv, *ppv; |
689 | uintptr_t ppn, lpn; | | 689 | uintptr_t ppn, lpn; |
690 | int s; | | 690 | int s; |
691 | | | 691 | |
692 | /* to page numbers */ | | 692 | /* to page numbers */ |
693 | ppn = atop(pa); | | 693 | ppn = atop(pa); |
694 | lpn = atop(va - VM_MIN_ADDRESS); /* V->L */ | | 694 | lpn = atop(va - VM_MIN_ADDRESS); /* V->L */ |
695 | #ifdef DIAGNOSTIC | | 695 | #ifdef DIAGNOSTIC |
696 | if ((va < VM_MIN_ADDRESS) || (va > VM_MAX_ADDRESS)) | | 696 | if ((va < VM_MIN_ADDRESS) || (va > VM_MAX_ADDRESS)) |
697 | panic("pmap_do_enter: invalid va isued\n"); | | 697 | panic("pmap_do_enter: invalid va isued\n"); |
698 | #endif | | 698 | #endif |
699 | | | 699 | |
700 | /* raise interupt level */ | | 700 | /* raise interupt level */ |
701 | s = splvm(); | | 701 | s = splvm(); |
702 | | | 702 | |
703 | /* remove existing mapping at this lpn */ | | 703 | /* remove existing mapping at this lpn */ |
704 | if (pmap->pm_entries[lpn] && | | 704 | if (pmap->pm_entries[lpn] && |
705 | pmap->pm_entries[lpn]->pv_ppn != ppn) | | 705 | pmap->pm_entries[lpn]->pv_ppn != ppn) |
706 | pmap_remove(pmap, va, va + PAGE_SIZE); | | 706 | pmap_remove(pmap, va, va + PAGE_SIZE); |
707 | | | 707 | |
708 | /* get our entry */ | | 708 | /* get our entry */ |
709 | ppv = &pv_table[ppn]; | | 709 | ppv = &pv_table[ppn]; |
710 | pv = pv_get(pmap, ppn, lpn); /* get our (copy) of pv entry */ | | 710 | pv = pv_get(pmap, ppn, lpn); /* get our (copy) of pv entry */ |
711 | | | 711 | |
712 | /* and adjust stats */ | | 712 | /* and adjust stats */ |
713 | if (pv == NULL) | | 713 | if (pv == NULL) |
714 | panic("pamp_do_enter: didn't find pv entry!"); | | 714 | panic("pamp_do_enter: didn't find pv entry!"); |
715 | if (pv->pv_vflags & PV_WIRED) | | 715 | if (pv->pv_vflags & PV_WIRED) |
716 | pmap->pm_stats.wired_count--; | | 716 | pmap->pm_stats.wired_count--; |
717 | | | 717 | |
718 | /* enter our details */ | | 718 | /* enter our details */ |
719 | pv->pv_pmap = pmap; | | 719 | pv->pv_pmap = pmap; |
720 | pv->pv_ppn = ppn; | | 720 | pv->pv_ppn = ppn; |
721 | pv->pv_lpn = lpn; | | 721 | pv->pv_lpn = lpn; |
722 | pv->pv_prot = prot; | | 722 | pv->pv_prot = prot; |
723 | pv->pv_vflags = 0; | | 723 | pv->pv_vflags = 0; |
724 | /* pv->pv_next = NULL; */ /* might confuse linked list? */ | | 724 | /* pv->pv_next = NULL; */ /* might confuse linked list? */ |
725 | if (flags & PMAP_WIRED) | | 725 | if (flags & PMAP_WIRED) |
726 | pv->pv_vflags |= PV_WIRED; | | 726 | pv->pv_vflags |= PV_WIRED; |
727 | | | 727 | |
728 | if (unmanaged) { | | 728 | if (unmanaged) { |
729 | /* dont track r/m */ | | 729 | /* dont track r/m */ |
730 | pv->pv_vflags |= PV_UNMANAGED; | | 730 | pv->pv_vflags |= PV_UNMANAGED; |
731 | } else { | | 731 | } else { |
732 | if (flags & VM_PROT_WRITE) | | 732 | if (flags & VM_PROT_WRITE) |
733 | ppv->pv_pflags |= PV_REFERENCED | PV_MODIFIED; | | 733 | ppv->pv_pflags |= PV_REFERENCED | PV_MODIFIED; |
734 | else if (flags & (VM_PROT_ALL)) | | 734 | else if (flags & (VM_PROT_ALL)) |
735 | ppv->pv_pflags |= PV_REFERENCED; | | 735 | ppv->pv_pflags |= PV_REFERENCED; |
736 | } | | 736 | } |
737 | | | 737 | |
738 | /* map it in */ | | 738 | /* map it in */ |
739 | pmap_update_page(ppn); | | 739 | pmap_update_page(ppn); |
740 | pmap->pm_entries[lpn] = pv; | | 740 | pmap->pm_entries[lpn] = pv; |
741 | | | 741 | |
742 | /* adjust stats */ | | 742 | /* adjust stats */ |
743 | if (pv->pv_vflags & PV_WIRED) | | 743 | if (pv->pv_vflags & PV_WIRED) |
744 | pmap->pm_stats.wired_count++; | | 744 | pmap->pm_stats.wired_count++; |
745 | | | 745 | |
746 | splx(s); | | 746 | splx(s); |
747 | | | 747 | |
748 | /* activate page directly when on active pmap */ | | 748 | /* activate page directly when on active pmap */ |
749 | if (pmap->pm_flags & PM_ACTIVE) | | 749 | if (pmap->pm_flags & PM_ACTIVE) |
750 | pmap_page_activate(pv); | | 750 | pmap_page_activate(pv); |
751 | | | 751 | |
752 | return 0; | | 752 | return 0; |
753 | } | | 753 | } |
754 | | | 754 | |
755 | int | | 755 | int |
756 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 756 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
757 | { | | 757 | { |
758 | dprintf_debug("pmap_enter %p : v %p, p %p, prot %d, flags %d\n", | | 758 | dprintf_debug("pmap_enter %p : v %p, p %p, prot %d, flags %d\n", |
759 | (void *) pmap, (void *) va, (void *) pa, (int) prot, (int) flags); | | 759 | (void *) pmap, (void *) va, (void *) pa, (int) prot, (int) flags); |
760 | return pmap_do_enter(pmap, va, pa, prot, flags, 0); | | 760 | return pmap_do_enter(pmap, va, pa, prot, flags, 0); |
761 | } | | 761 | } |
762 | | | 762 | |
763 | /* release the pv_entry for a mapping. Code derived also from hp300 pmap */ | | 763 | /* release the pv_entry for a mapping. Code derived also from hp300 pmap */ |
764 | static void | | 764 | static void |
765 | pv_release(pmap_t pmap, uintptr_t ppn, uintptr_t lpn) | | 765 | pv_release(pmap_t pmap, uintptr_t ppn, uintptr_t lpn) |
766 | { | | 766 | { |
767 | struct pv_entry *pv, *npv; | | 767 | struct pv_entry *pv, *npv; |
768 | | | 768 | |
769 | dprintf_debug("pv_release ppn %"PRIdPTR", lpn %"PRIdPTR"\n", ppn, lpn); | | 769 | dprintf_debug("pv_release ppn %"PRIdPTR", lpn %"PRIdPTR"\n", ppn, lpn); |
770 | pv = &pv_table[ppn]; | | 770 | pv = &pv_table[ppn]; |
771 | /* | | 771 | /* |
772 | * If it is the first entry on the list, it is actually | | 772 | * If it is the first entry on the list, it is actually |
773 | * in the header and we must copy the following entry up | | 773 | * in the header and we must copy the following entry up |
774 | * to the header. Otherwise we must search the list for | | 774 | * to the header. Otherwise we must search the list for |
775 | * the entry. In either case we free the now unused entry. | | 775 | * the entry. In either case we free the now unused entry. |
776 | */ | | 776 | */ |
777 | if ((pmap == pv->pv_pmap) && (lpn == pv->pv_lpn)) { | | 777 | if ((pmap == pv->pv_pmap) && (lpn == pv->pv_lpn)) { |
778 | npv = pv->pv_next; | | 778 | npv = pv->pv_next; |
779 | if (npv) { | | 779 | if (npv) { |
780 | /* Pull up first entry from chain. */ | | 780 | /* Pull up first entry from chain. */ |
781 | memcpy(pv, npv, offsetof(struct pv_entry, pv_pflags)); | | 781 | memcpy(pv, npv, offsetof(struct pv_entry, pv_pflags)); |
782 | pv->pv_pmap->pm_entries[pv->pv_lpn] = pv; | | 782 | pv->pv_pmap->pm_entries[pv->pv_lpn] = pv; |
783 | pv_free(npv); | | 783 | pv_free(npv); |
784 | } else { | | 784 | } else { |
785 | memset(pv, 0, offsetof(struct pv_entry, pv_pflags)); | | 785 | memset(pv, 0, offsetof(struct pv_entry, pv_pflags)); |
786 | } | | 786 | } |
787 | } else { | | 787 | } else { |
788 | for (npv = pv->pv_next; npv; npv = npv->pv_next) { | | 788 | for (npv = pv->pv_next; npv; npv = npv->pv_next) { |
789 | if ((pmap == npv->pv_pmap) && (lpn == npv->pv_lpn)) | | 789 | if ((pmap == npv->pv_pmap) && (lpn == npv->pv_lpn)) |
790 | break; | | 790 | break; |
791 | pv = npv; | | 791 | pv = npv; |
792 | } | | 792 | } |
793 | KASSERT(npv != NULL); | | 793 | KASSERT(npv != NULL); |
794 | pv->pv_next = npv->pv_next; | | 794 | pv->pv_next = npv->pv_next; |
795 | pv_free(npv); | | 795 | pv_free(npv); |
796 | } | | 796 | } |
797 | pmap->pm_entries[lpn] = NULL; | | 797 | pmap->pm_entries[lpn] = NULL; |
798 | pmap->pm_stats.resident_count--; | | 798 | pmap->pm_stats.resident_count--; |
799 | } | | 799 | } |
800 | | | 800 | |
801 | void | | 801 | void |
802 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) | | 802 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) |
803 | { | | 803 | { |
804 | uintptr_t slpn, elpn, lpn; | | 804 | uintptr_t slpn, elpn, lpn; |
805 | struct pv_entry *pv; | | 805 | struct pv_entry *pv; |
806 | int s; | | 806 | int s; |
807 | | | 807 | |
808 | slpn = atop(sva - VM_MIN_ADDRESS); /* V->L */ | | 808 | slpn = atop(sva - VM_MIN_ADDRESS); /* V->L */ |
809 | elpn = atop(eva - VM_MIN_ADDRESS); /* V->L */ | | 809 | elpn = atop(eva - VM_MIN_ADDRESS); /* V->L */ |
810 | | | 810 | |
811 | dprintf_debug("pmap_remove() called from " | | 811 | dprintf_debug("pmap_remove() called from " |
812 | "lpn %"PRIdPTR" to lpn %"PRIdPTR"\n", slpn, elpn); | | 812 | "lpn %"PRIdPTR" to lpn %"PRIdPTR"\n", slpn, elpn); |
813 | | | 813 | |
814 | s = splvm(); | | 814 | s = splvm(); |
815 | for (lpn = slpn; lpn < elpn; lpn++) { | | 815 | for (lpn = slpn; lpn < elpn; lpn++) { |
816 | pv = pmap->pm_entries[lpn]; | | 816 | pv = pmap->pm_entries[lpn]; |
817 | if (pv != NULL) { | | 817 | if (pv != NULL) { |
818 | if (pmap->pm_flags & PM_ACTIVE) { | | 818 | if (pmap->pm_flags & PM_ACTIVE) { |
819 | pmap_page_deactivate(pv); | | 819 | pmap_page_deactivate(pv); |
820 | // MEMC_WRITE(pv->pv_deactivate); | | 820 | // MEMC_WRITE(pv->pv_deactivate); |
821 | // cpu_cache_flush(); | | 821 | // cpu_cache_flush(); |
822 | } | | 822 | } |
823 | pmap->pm_entries[lpn] = NULL; | | 823 | pmap->pm_entries[lpn] = NULL; |
824 | if (pv->pv_vflags & PV_WIRED) | | 824 | if (pv->pv_vflags & PV_WIRED) |
825 | pmap->pm_stats.wired_count--; | | 825 | pmap->pm_stats.wired_count--; |
826 | pv_release(pmap, pv->pv_ppn, lpn); | | 826 | pv_release(pmap, pv->pv_ppn, lpn); |
827 | } | | 827 | } |
828 | } | | 828 | } |
829 | splx(s); | | 829 | splx(s); |
830 | } | | 830 | } |
831 | | | 831 | |
832 | void | | 832 | void |
833 | pmap_remove_all(pmap_t pmap) | | 833 | pmap_remove_all(pmap_t pmap) |
834 | { | | 834 | { |
835 | /* just a hint that all the entries are to be removed */ | | 835 | /* just a hint that all the entries are to be removed */ |
836 | dprintf_debug("pmap_remove_all() dummy called\n"); | | 836 | dprintf_debug("pmap_remove_all() dummy called\n"); |
837 | | | 837 | |
838 | /* we dont do anything with the kernel pmap */ | | 838 | /* we dont do anything with the kernel pmap */ |
839 | if (pmap == pmap_kernel()) | | 839 | if (pmap == pmap_kernel()) |
840 | return; | | 840 | return; |
841 | | | 841 | |
842 | #if 0 | | 842 | #if 0 |
843 | /* remove all mappings in one-go; not needed */ | | 843 | /* remove all mappings in one-go; not needed */ |
844 | pmap_remove(pmap, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); | | 844 | pmap_remove(pmap, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); |
845 | thunk_munmap((void *) VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); | | 845 | thunk_munmap((void *) VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); |
846 | #endif | | 846 | #endif |
847 | #if 0 | | 847 | #if 0 |
848 | /* remove all cached info from the pages */ | | 848 | /* remove all cached info from the pages */ |
849 | thunk_msync(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS, | | 849 | thunk_msync(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS, |
850 | THUNK_MS_SYNC | THUNK_MS_INVALIDATE); | | 850 | THUNK_MS_SYNC | THUNK_MS_INVALIDATE); |
851 | #endif | | 851 | #endif |
852 | } | | 852 | } |
853 | | | 853 | |
854 | void | | 854 | void |
855 | pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) | | 855 | pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) |
856 | { | | 856 | { |
857 | struct pv_entry *pv; | | 857 | struct pv_entry *pv; |
858 | intptr_t slpn, elpn, lpn; | | 858 | intptr_t slpn, elpn, lpn; |
859 | int s; | | 859 | int s; |
860 | | | 860 | |
861 | if (prot == VM_PROT_NONE) { | | 861 | if (prot == VM_PROT_NONE) { |
862 | pmap_remove(pmap, sva, eva); | | 862 | pmap_remove(pmap, sva, eva); |
863 | return; | | 863 | return; |
864 | } | | 864 | } |
865 | if (prot & VM_PROT_WRITE) | | 865 | if (prot & VM_PROT_WRITE) |
866 | return; /* apparently we're meant to */ | | 866 | return; /* apparently we're meant to */ |
867 | if (pmap == pmap_kernel()) | | 867 | if (pmap == pmap_kernel()) |
868 | return; /* can't restrict kernel w/o unmapping. */ | | 868 | return; /* can't restrict kernel w/o unmapping. */ |
869 | | | 869 | |
870 | slpn = atop(sva - VM_MIN_ADDRESS); /* V->L */ | | 870 | slpn = atop(sva - VM_MIN_ADDRESS); /* V->L */ |
871 | elpn = atop(eva - VM_MIN_ADDRESS); /* V->L */ | | 871 | elpn = atop(eva - VM_MIN_ADDRESS); /* V->L */ |
872 | | | 872 | |
873 | dprintf_debug("pmap_protect() called from " | | 873 | dprintf_debug("pmap_protect() called from " |
874 | "lpn %"PRIdPTR" to lpn %"PRIdPTR"\n", slpn, elpn); | | 874 | "lpn %"PRIdPTR" to lpn %"PRIdPTR"\n", slpn, elpn); |
875 | | | 875 | |
876 | s = splvm(); | | 876 | s = splvm(); |
877 | for (lpn = slpn; lpn < elpn; lpn++) { | | 877 | for (lpn = slpn; lpn < elpn; lpn++) { |
878 | pv = pmap->pm_entries[lpn]; | | 878 | pv = pmap->pm_entries[lpn]; |
879 | if (pv != NULL) { | | 879 | if (pv != NULL) { |
880 | pv->pv_prot &= prot; | | 880 | pv->pv_prot &= prot; |
881 | pv_update(pv); | | 881 | pv_update(pv); |
882 | if (pv->pv_pmap->pm_flags & PM_ACTIVE) | | 882 | if (pv->pv_pmap->pm_flags & PM_ACTIVE) |
883 | pmap_page_activate(pv); | | 883 | pmap_page_activate(pv); |
884 | } | | 884 | } |
885 | } | | 885 | } |
886 | splx(s); | | 886 | splx(s); |
887 | } | | 887 | } |
888 | | | 888 | |
889 | void | | 889 | void |
890 | pmap_unwire(pmap_t pmap, vaddr_t va) | | 890 | pmap_unwire(pmap_t pmap, vaddr_t va) |
891 | { | | 891 | { |
892 | struct pv_entry *pv; | | 892 | struct pv_entry *pv; |
893 | intptr_t lpn; | | 893 | intptr_t lpn; |
894 | | | 894 | |
895 | dprintf_debug("pmap_unwire called\n'"); | | 895 | dprintf_debug("pmap_unwire called\n'"); |
896 | if (pmap == NULL) | | 896 | if (pmap == NULL) |
897 | return; | | 897 | return; |
898 | | | 898 | |
899 | lpn = atop(va - VM_MIN_ADDRESS); /* V->L */ | | 899 | lpn = atop(va - VM_MIN_ADDRESS); /* V->L */ |
900 | pv = pmap->pm_entries[lpn]; | | 900 | pv = pmap->pm_entries[lpn]; |
901 | if (pv == NULL) | | 901 | if (pv == NULL) |
902 | return; | | 902 | return; |
903 | /* but is it wired? */ | | 903 | /* but is it wired? */ |
904 | if ((pv->pv_vflags & PV_WIRED) == 0) | | 904 | if ((pv->pv_vflags & PV_WIRED) == 0) |
905 | return; | | 905 | return; |
906 | pmap->pm_stats.wired_count--; | | 906 | pmap->pm_stats.wired_count--; |
907 | pv->pv_vflags &= ~PV_WIRED; | | 907 | pv->pv_vflags &= ~PV_WIRED; |
908 | } | | 908 | } |
909 | | | 909 | |
910 | bool | | 910 | bool |
911 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *ppa) | | 911 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *ppa) |
912 | { | | 912 | { |
913 | struct pv_entry *pv; | | 913 | struct pv_entry *pv; |
914 | intptr_t lpn; | | 914 | intptr_t lpn; |
915 | | | 915 | |
916 | dprintf_debug("pmap_extract: extracting va %p\n", (void *) va); | | 916 | dprintf_debug("pmap_extract: extracting va %p\n", (void *) va); |
917 | #ifdef DIAGNOSTIC | | 917 | #ifdef DIAGNOSTIC |
918 | if ((va < VM_MIN_ADDRESS) || (va > VM_MAX_ADDRESS)) | | 918 | if ((va < VM_MIN_ADDRESS) || (va > VM_MAX_ADDRESS)) |
919 | panic("pmap_extract: invalid va isued\n"); | | 919 | panic("pmap_extract: invalid va isued\n"); |
920 | #endif | | 920 | #endif |
921 | lpn = atop(va - VM_MIN_ADDRESS); /* V->L */ | | 921 | lpn = atop(va - VM_MIN_ADDRESS); /* V->L */ |
922 | pv = pmap->pm_entries[lpn]; | | 922 | pv = pmap->pm_entries[lpn]; |
923 | | | 923 | |
924 | if (pv == NULL) | | 924 | if (pv == NULL) |
925 | return false; | | 925 | return false; |
926 | if (ppa) | | 926 | if (ppa) |
927 | *ppa = ptoa(pv->pv_ppn); | | 927 | *ppa = ptoa(pv->pv_ppn); |
928 | return true; | | 928 | return true; |
929 | } | | 929 | } |
930 | | | 930 | |
931 | /* | | 931 | /* |
932 | * Enter an unmanaged, `wired' kernel mapping. | | 932 | * Enter an unmanaged, `wired' kernel mapping. |
933 | * Only to be removed by pmap_kremove() | | 933 | * Only to be removed by pmap_kremove() |
934 | */ | | 934 | */ |
935 | void | | 935 | void |
936 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 936 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
937 | { | | 937 | { |
938 | dprintf_debug("pmap_kenter_pa : v %p, p %p, prot %d, flags %d\n", | | 938 | dprintf_debug("pmap_kenter_pa : v %p, p %p, prot %d, flags %d\n", |
939 | (void *) va, (void *) pa, (int) prot, (int) flags); | | 939 | (void *) va, (void *) pa, (int) prot, (int) flags); |
940 | pmap_do_enter(pmap_kernel(), va, pa, prot, prot | PMAP_WIRED, 1); | | 940 | pmap_do_enter(pmap_kernel(), va, pa, prot, prot | PMAP_WIRED, 1); |
941 | } | | 941 | } |
942 | | | 942 | |
943 | void | | 943 | void |
944 | pmap_kremove(vaddr_t va, vsize_t size) | | 944 | pmap_kremove(vaddr_t va, vsize_t size) |
945 | { | | 945 | { |
946 | pmap_remove(pmap_kernel(), va, va + size); | | 946 | pmap_remove(pmap_kernel(), va, va + size); |
947 | } | | 947 | } |
948 | | | 948 | |
949 | void | | 949 | void |
950 | pmap_copy(pmap_t dst_map, pmap_t src_map, vaddr_t dst_addr, vsize_t len, | | 950 | pmap_copy(pmap_t dst_map, pmap_t src_map, vaddr_t dst_addr, vsize_t len, |
951 | vaddr_t src_addr) | | 951 | vaddr_t src_addr) |
952 | { | | 952 | { |
953 | dprintf_debug("pmap_copy (dummy)\n"); | | 953 | dprintf_debug("pmap_copy (dummy)\n"); |
954 | } | | 954 | } |
955 | | | 955 | |
956 | void | | 956 | void |
957 | pmap_update(pmap_t pmap) | | 957 | pmap_update(pmap_t pmap) |
958 | { | | 958 | { |
959 | dprintf_debug("pmap_update (dummy)\n"); | | 959 | dprintf_debug("pmap_update (dummy)\n"); |
960 | } | | 960 | } |
961 | | | 961 | |
962 | void | | 962 | void |
963 | pmap_activate(struct lwp *l) | | 963 | pmap_activate(struct lwp *l) |
964 | { | | 964 | { |
965 | struct proc *p = l->l_proc; | | 965 | struct proc *p = l->l_proc; |
966 | pmap_t pmap; | | 966 | pmap_t pmap; |
967 | | | 967 | |
968 | pmap = p->p_vmspace->vm_map.pmap; | | 968 | pmap = p->p_vmspace->vm_map.pmap; |
969 | dprintf_debug("pmap_activate for lwp %p, pmap = %p\n", l, pmap); | | 969 | dprintf_debug("pmap_activate for lwp %p, pmap = %p\n", l, pmap); |
970 | | | 970 | |
971 | if (pmap == pmap_kernel()) | | 971 | if (pmap == pmap_kernel()) |
972 | return; /* kernel pmap is always active */ | | 972 | return; /* kernel pmap is always active */ |
973 | | | 973 | |
974 | KASSERT(active_pmap == NULL); | | 974 | KASSERT(active_pmap == NULL); |
975 | KASSERT((pmap->pm_flags & PM_ACTIVE) == 0); | | 975 | KASSERT((pmap->pm_flags & PM_ACTIVE) == 0); |
976 | | | 976 | |
977 | active_pmap = pmap; | | 977 | active_pmap = pmap; |
978 | pmap->pm_flags |= PM_ACTIVE; | | 978 | pmap->pm_flags |= PM_ACTIVE; |
979 | } | | 979 | } |
980 | | | 980 | |
981 | void | | 981 | void |
982 | pmap_deactivate(struct lwp *l) | | 982 | pmap_deactivate(struct lwp *l) |
983 | { | | 983 | { |
984 | struct proc *p = l->l_proc; | | 984 | struct proc *p = l->l_proc; |
985 | pmap_t pmap; | | 985 | pmap_t pmap; |
986 | int i; | | 986 | int i; |
987 | | | 987 | |
988 | pmap = p->p_vmspace->vm_map.pmap; | | 988 | pmap = p->p_vmspace->vm_map.pmap; |
989 | dprintf_debug("pmap_DEactivate for lwp %p, pmap = %p\n", l, pmap); | | 989 | dprintf_debug("pmap_DEactivate for lwp %p, pmap = %p\n", l, pmap); |
990 | | | 990 | |
991 | if (pmap == pmap_kernel()) | | 991 | if (pmap == pmap_kernel()) |
992 | return; /* kernel pmap is always active */ | | 992 | return; /* kernel pmap is always active */ |
993 | | | 993 | |
994 | KASSERT(pmap == active_pmap); | | 994 | KASSERT(pmap == active_pmap); |
995 | KASSERT(pmap->pm_flags & PM_ACTIVE); | | 995 | KASSERT(pmap->pm_flags & PM_ACTIVE); |
996 | | | 996 | |
997 | active_pmap = NULL; | | 997 | active_pmap = NULL; |
998 | pmap->pm_flags &=~ PM_ACTIVE; | | 998 | pmap->pm_flags &=~ PM_ACTIVE; |
999 | for (i = 0; i < pm_nentries; i++) { | | 999 | for (i = 0; i < pm_nentries; i++) { |
1000 | if (pmap->pm_entries[i] != NULL) { | | 1000 | if (pmap->pm_entries[i] != NULL) { |
1001 | pmap_page_deactivate(pmap->pm_entries[i]); | | 1001 | pmap_page_deactivate(pmap->pm_entries[i]); |
1002 | // MEMC_WRITE(pmap->pm_entries[i]->pv_deactivate); | | 1002 | // MEMC_WRITE(pmap->pm_entries[i]->pv_deactivate); |
1003 | } | | 1003 | } |
1004 | } | | 1004 | } |
1005 | /* dummy */ | | 1005 | /* dummy */ |
1006 | // cpu_cache_flush(); | | 1006 | // cpu_cache_flush(); |
1007 | } | | 1007 | } |
1008 | | | 1008 | |
1009 | void | | 1009 | void |
1010 | pmap_zero_page(paddr_t pa) | | 1010 | pmap_zero_page(paddr_t pa) |
1011 | { | | 1011 | { |
1012 | char *blob; | | 1012 | char *blob; |
1013 | | | 1013 | |
1014 | dprintf_debug("pmap_zero_page: pa %p\n", (void *) pa); | | 1014 | dprintf_debug("pmap_zero_page: pa %p\n", (void *) pa); |
1015 | | | 1015 | |
1016 | if (pa & (PAGE_SIZE-1)) | | 1016 | if (pa & (PAGE_SIZE-1)) |
1017 | panic("%s: unaligned address passed : %p\n", __func__, (void *) pa); | | 1017 | panic("%s: unaligned address passed : %p\n", __func__, (void *) pa); |
1018 | | | 1018 | |
1019 | blob = thunk_mmap(NULL, PAGE_SIZE, | | 1019 | blob = thunk_mmap(NULL, PAGE_SIZE, |
1020 | THUNK_PROT_READ | THUNK_PROT_WRITE, | | 1020 | THUNK_PROT_READ | THUNK_PROT_WRITE, |
1021 | THUNK_MAP_FILE | THUNK_MAP_SHARED, | | 1021 | THUNK_MAP_FILE | THUNK_MAP_SHARED, |
1022 | mem_fh, pa); | | 1022 | mem_fh, pa); |
1023 | if (!blob) | | 1023 | if (!blob) |
1024 | panic("%s: couldn't get mapping", __func__); | | 1024 | panic("%s: couldn't get mapping", __func__); |
1025 | | | 1025 | |
1026 | memset(blob, 0, PAGE_SIZE); | | 1026 | memset(blob, 0, PAGE_SIZE); |
1027 | | | 1027 | |
1028 | thunk_munmap(blob, PAGE_SIZE); | | 1028 | thunk_munmap(blob, PAGE_SIZE); |
1029 | } | | 1029 | } |
1030 | | | 1030 | |
1031 | void | | 1031 | void |
1032 | pmap_copy_page(paddr_t src_pa, paddr_t dst_pa) | | 1032 | pmap_copy_page(paddr_t src_pa, paddr_t dst_pa) |
1033 | { | | 1033 | { |
1034 | char *sblob, *dblob; | | 1034 | char *sblob, *dblob; |
1035 | | | 1035 | |
1036 | if (src_pa & (PAGE_SIZE-1)) | | 1036 | if (src_pa & (PAGE_SIZE-1)) |
1037 | panic("%s: unaligned address passed : %p\n", __func__, (void *) src_pa); | | 1037 | panic("%s: unaligned address passed : %p\n", __func__, (void *) src_pa); |
1038 | if (dst_pa & (PAGE_SIZE-1)) | | 1038 | if (dst_pa & (PAGE_SIZE-1)) |
1039 | panic("%s: unaligned address passed : %p\n", __func__, (void *) dst_pa); | | 1039 | panic("%s: unaligned address passed : %p\n", __func__, (void *) dst_pa); |
1040 | | | 1040 | |
1041 | dprintf_debug("pmap_copy_page: pa src %p, pa dst %p\n", | | 1041 | dprintf_debug("pmap_copy_page: pa src %p, pa dst %p\n", |
1042 | (void *) src_pa, (void *) dst_pa); | | 1042 | (void *) src_pa, (void *) dst_pa); |
1043 | | | 1043 | |
1044 | sblob = thunk_mmap(NULL, PAGE_SIZE, | | 1044 | sblob = thunk_mmap(NULL, PAGE_SIZE, |
1045 | THUNK_PROT_READ, | | 1045 | THUNK_PROT_READ, |
1046 | THUNK_MAP_FILE | THUNK_MAP_SHARED, | | 1046 | THUNK_MAP_FILE | THUNK_MAP_SHARED, |
1047 | mem_fh, src_pa); | | 1047 | mem_fh, src_pa); |
1048 | if (!sblob) | | 1048 | if (!sblob) |
1049 | panic("%s: couldn't get src mapping", __func__); | | 1049 | panic("%s: couldn't get src mapping", __func__); |
1050 | | | 1050 | |
1051 | dblob = thunk_mmap(NULL, PAGE_SIZE, | | 1051 | dblob = thunk_mmap(NULL, PAGE_SIZE, |
1052 | THUNK_PROT_READ | THUNK_PROT_WRITE, | | 1052 | THUNK_PROT_READ | THUNK_PROT_WRITE, |
1053 | THUNK_MAP_FILE | THUNK_MAP_SHARED, | | 1053 | THUNK_MAP_FILE | THUNK_MAP_SHARED, |
1054 | mem_fh, dst_pa); | | 1054 | mem_fh, dst_pa); |
1055 | if (!dblob) | | 1055 | if (!dblob) |
1056 | panic("%s: couldn't get dst mapping", __func__); | | 1056 | panic("%s: couldn't get dst mapping", __func__); |
1057 | | | 1057 | |
1058 | memcpy(dblob, sblob, PAGE_SIZE); | | 1058 | memcpy(dblob, sblob, PAGE_SIZE); |
1059 | | | 1059 | |
1060 | thunk_munmap(sblob, PAGE_SIZE); | | 1060 | thunk_munmap(sblob, PAGE_SIZE); |
1061 | thunk_munmap(dblob, PAGE_SIZE); | | 1061 | thunk_munmap(dblob, PAGE_SIZE); |
1062 | } | | 1062 | } |
1063 | | | 1063 | |
1064 | /* change access permissions on a given physical page */ | | 1064 | /* change access permissions on a given physical page */ |
1065 | void | | 1065 | void |
1066 | pmap_page_protect(struct vm_page *page, vm_prot_t prot) | | 1066 | pmap_page_protect(struct vm_page *page, vm_prot_t prot) |
1067 | { | | 1067 | { |
1068 | intptr_t ppn; | | 1068 | intptr_t ppn; |
1069 | struct pv_entry *pv, *npv; | | 1069 | struct pv_entry *pv, *npv; |
1070 | | | 1070 | |
1071 | ppn = atop(VM_PAGE_TO_PHYS(page)); | | 1071 | ppn = atop(VM_PAGE_TO_PHYS(page)); |
1072 | dprintf_debug("pmap_page_protect page %"PRIiPTR" to prot %d\n", ppn, prot); | | 1072 | dprintf_debug("pmap_page_protect page %"PRIiPTR" to prot %d\n", ppn, prot); |
1073 | | | 1073 | |
1074 | if (prot == VM_PROT_NONE) { | | 1074 | if (prot == VM_PROT_NONE) { |
1075 | /* visit all mappings */ | | 1075 | /* visit all mappings */ |
1076 | npv = pv = &pv_table[ppn]; | | 1076 | npv = pv = &pv_table[ppn]; |
1077 | while ((pv != NULL) && (pv->pv_pmap != NULL)) { | | 1077 | while ((pv != NULL) && (pv->pv_pmap != NULL)) { |
1078 | /* skip unmanaged entries */ | | 1078 | /* skip unmanaged entries */ |
1079 | if (pv->pv_vflags & PV_UNMANAGED) { | | 1079 | if (pv->pv_vflags & PV_UNMANAGED) { |
1080 | pv = pv->pv_next; | | 1080 | pv = pv->pv_next; |
1081 | continue; | | 1081 | continue; |
1082 | } | | 1082 | } |
1083 | | | 1083 | |
1084 | /* if in an active pmap deactivate */ | | 1084 | /* if in an active pmap deactivate */ |
1085 | if (pv->pv_pmap->pm_flags & PM_ACTIVE) | | 1085 | if (pv->pv_pmap->pm_flags & PM_ACTIVE) |
1086 | pmap_page_deactivate(pv); | | 1086 | pmap_page_deactivate(pv); |
1087 | | | 1087 | |
1088 | /* if not on the head, remember our next */ | | 1088 | /* if not on the head, remember our next */ |
1089 | if (pv != &pv_table[ppn]) | | 1089 | if (pv != &pv_table[ppn]) |
1090 | npv = pv->pv_next; | | 1090 | npv = pv->pv_next; |
1091 | | | 1091 | |
1092 | /* remove from pmap */ | | 1092 | /* remove from pmap */ |
1093 | pv->pv_pmap->pm_entries[pv->pv_lpn] = NULL; | | 1093 | pv->pv_pmap->pm_entries[pv->pv_lpn] = NULL; |
1094 | if (pv->pv_vflags & PV_WIRED) | | 1094 | if (pv->pv_vflags & PV_WIRED) |
1095 | pv->pv_pmap->pm_stats.wired_count--; | | 1095 | pv->pv_pmap->pm_stats.wired_count--; |
1096 | pv_release(pv->pv_pmap, ppn, pv->pv_lpn); | | 1096 | pv_release(pv->pv_pmap, ppn, pv->pv_lpn); |
1097 | | | 1097 | |
1098 | pv = npv; | | 1098 | pv = npv; |
1099 | } | | 1099 | } |
1100 | } else if (prot != VM_PROT_ALL) { | | 1100 | } else if (prot != VM_PROT_ALL) { |
1101 | /* visit all mappings */ | | 1101 | /* visit all mappings */ |
1102 | for (pv = &pv_table[ppn]; pv != NULL; pv = pv->pv_next) { | | 1102 | for (pv = &pv_table[ppn]; pv != NULL; pv = pv->pv_next) { |
1103 | /* if managed and in a pmap restrict access */ | | 1103 | /* if managed and in a pmap restrict access */ |
1104 | if ((pv->pv_pmap != NULL) && | | 1104 | if ((pv->pv_pmap != NULL) && |
1105 | ((pv->pv_vflags & PV_UNMANAGED) == 0)) { | | 1105 | ((pv->pv_vflags & PV_UNMANAGED) == 0)) { |
1106 | pv->pv_prot &= prot; | | 1106 | pv->pv_prot &= prot; |
1107 | pv_update(pv); | | 1107 | pv_update(pv); |
1108 | /* if in active pmap (re)activate page */ | | 1108 | /* if in active pmap (re)activate page */ |
1109 | if (pv->pv_pmap->pm_flags & PM_ACTIVE) | | 1109 | if (pv->pv_pmap->pm_flags & PM_ACTIVE) |
1110 | pmap_page_activate(pv); | | 1110 | pmap_page_activate(pv); |
1111 | } | | 1111 | } |
1112 | } | | 1112 | } |
1113 | } | | 1113 | } |
1114 | } | | 1114 | } |
1115 | | | 1115 | |
1116 | bool | | 1116 | bool |
1117 | pmap_clear_modify(struct vm_page *page) | | 1117 | pmap_clear_modify(struct vm_page *page) |
1118 | { | | 1118 | { |
1119 | struct pv_entry *pv; | | 1119 | struct pv_entry *pv; |
1120 | uintptr_t ppn; | | 1120 | uintptr_t ppn; |
1121 | bool rv; | | 1121 | bool rv; |
1122 | | | 1122 | |
1123 | ppn = atop(VM_PAGE_TO_PHYS(page)); | | 1123 | ppn = atop(VM_PAGE_TO_PHYS(page)); |
1124 | rv = pmap_is_modified(page); | | 1124 | rv = pmap_is_modified(page); |
1125 | | | 1125 | |
1126 | dprintf_debug("pmap_clear_modify page %"PRIiPTR"\n", ppn); | | 1126 | dprintf_debug("pmap_clear_modify page %"PRIiPTR"\n", ppn); |
1127 | | | 1127 | |
1128 | /* if marked modified, clear it in all the pmap's referencing it */ | | 1128 | /* if marked modified, clear it in all the pmap's referencing it */ |
1129 | if (rv) { | | 1129 | if (rv) { |
1130 | /* if its marked modified in a kernel mapping, don't clear it */ | | 1130 | /* if its marked modified in a kernel mapping, don't clear it */ |
1131 | for (pv = &pv_table[ppn]; pv != NULL; pv = pv->pv_next) | | 1131 | for (pv = &pv_table[ppn]; pv != NULL; pv = pv->pv_next) |
1132 | if (pv->pv_pmap == pmap_kernel() && | | 1132 | if (pv->pv_pmap == pmap_kernel() && |
1133 | (pv->pv_prot & VM_PROT_WRITE)) | | 1133 | (pv->pv_prot & VM_PROT_WRITE)) |
1134 | return rv; | | 1134 | return rv; |
1135 | /* clear it */ | | 1135 | /* clear it */ |
1136 | pv_table[ppn].pv_pflags &= ~PV_MODIFIED; | | 1136 | pv_table[ppn].pv_pflags &= ~PV_MODIFIED; |
1137 | pmap_update_page(ppn); | | 1137 | pmap_update_page(ppn); |
1138 | } | | 1138 | } |
1139 | return rv; | | 1139 | return rv; |
1140 | } | | 1140 | } |
1141 | | | 1141 | |
1142 | bool | | 1142 | bool |
1143 | pmap_clear_reference(struct vm_page *page) | | 1143 | pmap_clear_reference(struct vm_page *page) |
1144 | { | | 1144 | { |
1145 | uintptr_t ppn; | | 1145 | uintptr_t ppn; |
1146 | bool rv; | | 1146 | bool rv; |
1147 | | | 1147 | |
1148 | ppn = atop(VM_PAGE_TO_PHYS(page)); | | 1148 | ppn = atop(VM_PAGE_TO_PHYS(page)); |
1149 | rv = pmap_is_referenced(page); | | 1149 | rv = pmap_is_referenced(page); |
1150 | | | 1150 | |
1151 | dprintf_debug("pmap_clear_reference page %"PRIiPTR"\n", ppn); | | 1151 | dprintf_debug("pmap_clear_reference page %"PRIiPTR"\n", ppn); |
1152 | | | 1152 | |
1153 | if (rv) { | | 1153 | if (rv) { |
1154 | pv_table[ppn].pv_pflags &= ~PV_REFERENCED; | | 1154 | pv_table[ppn].pv_pflags &= ~PV_REFERENCED; |
1155 | pmap_update_page(ppn); | | 1155 | pmap_update_page(ppn); |
1156 | } | | 1156 | } |
1157 | return rv; | | 1157 | return rv; |
1158 | } | | 1158 | } |
1159 | | | 1159 | |
1160 | bool | | 1160 | bool |
1161 | pmap_is_modified(struct vm_page *page) | | 1161 | pmap_is_modified(struct vm_page *page) |
1162 | { | | 1162 | { |
1163 | intptr_t ppn; | | 1163 | intptr_t ppn; |
1164 | bool rv; | | 1164 | bool rv; |
1165 | | | 1165 | |
1166 | ppn = atop(VM_PAGE_TO_PHYS(page)); | | 1166 | ppn = atop(VM_PAGE_TO_PHYS(page)); |
1167 | rv = (pv_table[ppn].pv_pflags & PV_MODIFIED) != 0; | | 1167 | rv = (pv_table[ppn].pv_pflags & PV_MODIFIED) != 0; |
1168 | | | 1168 | |
1169 | dprintf_debug("pmap_is_modified page %"PRIiPTR" : %s\n", ppn, rv?"yes":"no"); | | 1169 | dprintf_debug("pmap_is_modified page %"PRIiPTR" : %s\n", ppn, rv?"yes":"no"); |
1170 | | | 1170 | |
1171 | return rv; | | 1171 | return rv; |
1172 | } | | 1172 | } |
1173 | | | 1173 | |
1174 | bool | | 1174 | bool |
1175 | pmap_is_referenced(struct vm_page *page) | | 1175 | pmap_is_referenced(struct vm_page *page) |
1176 | { | | 1176 | { |
1177 | intptr_t ppn; | | 1177 | intptr_t ppn; |
1178 | | | 1178 | |
1179 | ppn = atop(VM_PAGE_TO_PHYS(page)); | | 1179 | ppn = atop(VM_PAGE_TO_PHYS(page)); |
1180 | dprintf_debug("pmap_is_referenced page %"PRIiPTR"\n", ppn); | | 1180 | dprintf_debug("pmap_is_referenced page %"PRIiPTR"\n", ppn); |
1181 | | | 1181 | |
1182 | return (pv_table[ppn].pv_pflags & PV_REFERENCED) != 0; | | 1182 | return (pv_table[ppn].pv_pflags & PV_REFERENCED) != 0; |
1183 | } | | 1183 | } |
1184 | | | 1184 | |
1185 | paddr_t | | 1185 | paddr_t |
1186 | pmap_phys_address(paddr_t cookie) | | 1186 | pmap_phys_address(paddr_t cookie) |
1187 | { | | 1187 | { |
1188 | panic("pmap_phys_address not implemented\n"); | | 1188 | panic("pmap_phys_address not implemented\n"); |
1189 | return ptoa(cookie); | | 1189 | return ptoa(cookie); |
1190 | } | | 1190 | } |
1191 | | | 1191 | |
1192 | vaddr_t | | 1192 | vaddr_t |
1193 | pmap_growkernel(vaddr_t maxkvaddr) | | 1193 | pmap_growkernel(vaddr_t maxkvaddr) |
1194 | { | | 1194 | { |
1195 | dprintf_debug("pmap_growkernel: till %p (adding %"PRIu64" KB)\n", | | 1195 | dprintf_debug("pmap_growkernel: till %p (adding %"PRIu64" KB)\n", |
1196 | (void *) maxkvaddr, | | 1196 | (void *) maxkvaddr, |
1197 | (uint64_t) (maxkvaddr - kmem_ext_cur_end)/1024); | | 1197 | (uint64_t) (maxkvaddr - kmem_ext_cur_end)/1024); |
1198 | if (maxkvaddr > kmem_ext_end) | | 1198 | if (maxkvaddr > kmem_ext_end) |
1199 | return kmem_ext_end; | | 1199 | return kmem_ext_end; |
1200 | kmem_ext_cur_end = maxkvaddr; | | 1200 | kmem_ext_cur_end = maxkvaddr; |
1201 | return kmem_ext_cur_end; | | 1201 | return kmem_ext_cur_end; |
1202 | } | | 1202 | } |
1203 | | | 1203 | |