| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: pmap.c,v 1.119 2022/05/03 20:52:31 andvar Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.120 2022/05/04 07:48:34 andvar Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jeremy Cooper. | | 8 | * by Jeremy Cooper. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
| @@ -95,27 +95,27 @@ | | | @@ -95,27 +95,27 @@ |
95 | * affected by the change. These instances are documented in the code at | | 95 | * affected by the change. These instances are documented in the code at |
96 | * various points. | | 96 | * various points. |
97 | */ | | 97 | */ |
98 | /*** A Note About the Note About the 68851 Address Translation Cache | | 98 | /*** A Note About the Note About the 68851 Address Translation Cache |
99 | * 4 months into this code I discovered that the sun3x does not have | | 99 | * 4 months into this code I discovered that the sun3x does not have |
100 | * a MC68851 chip. Instead, it has a version of this MMU that is part of the | | 100 | * a MC68851 chip. Instead, it has a version of this MMU that is part of the |
101 | * the 68030 CPU. | | 101 | * the 68030 CPU. |
102 | * All though it behaves very similarly to the 68851, it only has 1 task | | 102 | * All though it behaves very similarly to the 68851, it only has 1 task |
103 | * alias and a 22 entry cache. So sadly (or happily), the first paragraph | | 103 | * alias and a 22 entry cache. So sadly (or happily), the first paragraph |
104 | * of the previous note does not apply to the sun3x pmap. | | 104 | * of the previous note does not apply to the sun3x pmap. |
105 | */ | | 105 | */ |
106 | | | 106 | |
107 | #include <sys/cdefs.h> | | 107 | #include <sys/cdefs.h> |
108 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.119 2022/05/03 20:52:31 andvar Exp $"); | | 108 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.120 2022/05/04 07:48:34 andvar Exp $"); |
109 | | | 109 | |
110 | #include "opt_ddb.h" | | 110 | #include "opt_ddb.h" |
111 | #include "opt_pmap_debug.h" | | 111 | #include "opt_pmap_debug.h" |
112 | | | 112 | |
113 | #include <sys/param.h> | | 113 | #include <sys/param.h> |
114 | #include <sys/systm.h> | | 114 | #include <sys/systm.h> |
115 | #include <sys/proc.h> | | 115 | #include <sys/proc.h> |
116 | #include <sys/malloc.h> | | 116 | #include <sys/malloc.h> |
117 | #include <sys/pool.h> | | 117 | #include <sys/pool.h> |
118 | #include <sys/queue.h> | | 118 | #include <sys/queue.h> |
119 | #include <sys/kcore.h> | | 119 | #include <sys/kcore.h> |
120 | #include <sys/atomic.h> | | 120 | #include <sys/atomic.h> |
121 | | | 121 | |
| @@ -212,27 +212,27 @@ int pmap_debug = 0; | | | @@ -212,27 +212,27 @@ int pmap_debug = 0; |
212 | * tmgrCbase -> +-------------------------------------------------------+ | | 212 | * tmgrCbase -> +-------------------------------------------------------+ |
213 | * | TMGR C level table structures | | | 213 | * | TMGR C level table structures | |
214 | * pvbase -> +-------------------------------------------------------+ | | 214 | * pvbase -> +-------------------------------------------------------+ |
215 | * | Physical to Virtual mapping table (list heads) | | | 215 | * | Physical to Virtual mapping table (list heads) | |
216 | * pvebase -> +-------------------------------------------------------+ | | 216 | * pvebase -> +-------------------------------------------------------+ |
217 | * | Physical to Virtual mapping table (list elements) | | | 217 | * | Physical to Virtual mapping table (list elements) | |
218 | * | | | | 218 | * | | |
219 | * +-------------------------------------------------------+ | | 219 | * +-------------------------------------------------------+ |
220 | * towards higher memory | | 220 | * towards higher memory |
221 | * | | 221 | * |
222 | * For every A table in the MMU A area, there will be a corresponding | | 222 | * For every A table in the MMU A area, there will be a corresponding |
223 | * a_tmgr structure in the TMGR A area. The same will be true for | | 223 | * a_tmgr structure in the TMGR A area. The same will be true for |
224 | * the B and C tables. This arrangement will make it easy to find the | | 224 | * the B and C tables. This arrangement will make it easy to find the |
225 | * controling tmgr structure for any table in the system by use of | | 225 | * controlling tmgr structure for any table in the system by use of |
226 | * (relatively) simple macros. | | 226 | * (relatively) simple macros. |
227 | */ | | 227 | */ |
228 | | | 228 | |
229 | /* | | 229 | /* |
230 | * Global variables for storing the base addresses for the areas | | 230 | * Global variables for storing the base addresses for the areas |
231 | * labeled above. | | 231 | * labeled above. |
232 | */ | | 232 | */ |
233 | static vaddr_t kernAphys; | | 233 | static vaddr_t kernAphys; |
234 | static mmu_long_dte_t *kernAbase; | | 234 | static mmu_long_dte_t *kernAbase; |
235 | static mmu_short_dte_t *kernBbase; | | 235 | static mmu_short_dte_t *kernBbase; |
236 | static mmu_short_pte_t *kernCbase; | | 236 | static mmu_short_pte_t *kernCbase; |
237 | static mmu_short_pte_t *mmuCbase; | | 237 | static mmu_short_pte_t *mmuCbase; |
238 | static mmu_short_dte_t *mmuBbase; | | 238 | static mmu_short_dte_t *mmuBbase; |
| @@ -283,27 +283,27 @@ vaddr_t virtual_contig_end; | | | @@ -283,27 +283,27 @@ vaddr_t virtual_contig_end; |
283 | paddr_t avail_next; | | 283 | paddr_t avail_next; |
284 | | | 284 | |
285 | /* These are used by pmap_copy_page(), etc. */ | | 285 | /* These are used by pmap_copy_page(), etc. */ |
286 | vaddr_t tmp_vpages[2]; | | 286 | vaddr_t tmp_vpages[2]; |
287 | | | 287 | |
288 | /* memory pool for pmap structures */ | | 288 | /* memory pool for pmap structures */ |
289 | struct pool pmap_pmap_pool; | | 289 | struct pool pmap_pmap_pool; |
290 | | | 290 | |
291 | /* | | 291 | /* |
292 | * The 3/80 is the only member of the sun3x family that has non-contiguous | | 292 | * The 3/80 is the only member of the sun3x family that has non-contiguous |
293 | * physical memory. Memory is divided into 4 banks which are physically | | 293 | * physical memory. Memory is divided into 4 banks which are physically |
294 | * locatable on the system board. Although the size of these banks varies | | 294 | * locatable on the system board. Although the size of these banks varies |
295 | * with the size of memory they contain, their base addresses are | | 295 | * with the size of memory they contain, their base addresses are |
296 | * permenently fixed. The following structure, which describes these | | 296 | * permanently fixed. The following structure, which describes these |
297 | * banks, is initialized by pmap_bootstrap() after it reads from a similar | | 297 | * banks, is initialized by pmap_bootstrap() after it reads from a similar |
298 | * structure provided by the ROM Monitor. | | 298 | * structure provided by the ROM Monitor. |
299 | * | | 299 | * |
300 | * For the other machines in the sun3x architecture which do have contiguous | | 300 | * For the other machines in the sun3x architecture which do have contiguous |
301 | * RAM, this list will have only one entry, which will describe the entire | | 301 | * RAM, this list will have only one entry, which will describe the entire |
302 | * range of available memory. | | 302 | * range of available memory. |
303 | */ | | 303 | */ |
304 | struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS]; | | 304 | struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS]; |
305 | u_int total_phys_mem; | | 305 | u_int total_phys_mem; |
306 | | | 306 | |
307 | /*************************************************************************/ | | 307 | /*************************************************************************/ |
308 | | | 308 | |
309 | /* | | 309 | /* |
| @@ -1208,27 +1208,27 @@ pmap_init_pv(void) | | | @@ -1208,27 +1208,27 @@ pmap_init_pv(void) |
1208 | pvbase[i].pv_flags = 0; /* Zero out page flags */ | | 1208 | pvbase[i].pv_flags = 0; /* Zero out page flags */ |
1209 | } | | 1209 | } |
1210 | } | | 1210 | } |
1211 | | | 1211 | |
1212 | /* is_managed INTERNAL | | 1212 | /* is_managed INTERNAL |
1213 | ** | | 1213 | ** |
1214 | * Determine if the given physical address is managed by the PV system. | | 1214 | * Determine if the given physical address is managed by the PV system. |
1215 | * Note that this logic assumes that no one will ask for the status of | | 1215 | * Note that this logic assumes that no one will ask for the status of |
1216 | * addresses which lie in-between the memory banks on the 3/80. If they | | 1216 | * addresses which lie in-between the memory banks on the 3/80. If they |
1217 | * do so, it will falsely report that it is managed. | | 1217 | * do so, it will falsely report that it is managed. |
1218 | * | | 1218 | * |
1219 | * Note: A "managed" address is one that was reported to the VM system as | | 1219 | * Note: A "managed" address is one that was reported to the VM system as |
1220 | * a "usable page" during system startup. As such, the VM system expects the | | 1220 | * a "usable page" during system startup. As such, the VM system expects the |
1221 | * pmap module to keep an accurate track of the useage of those pages. | | 1221 | * pmap module to keep an accurate track of the usage of those pages. |
1222 | * Any page not given to the VM system at startup does not exist (as far as | | 1222 | * Any page not given to the VM system at startup does not exist (as far as |
1223 | * the VM system is concerned) and is therefore "unmanaged." Examples are | | 1223 | * the VM system is concerned) and is therefore "unmanaged." Examples are |
1224 | * those pages which belong to the ROM monitor and the memory allocated before | | 1224 | * those pages which belong to the ROM monitor and the memory allocated before |
1225 | * the VM system was started. | | 1225 | * the VM system was started. |
1226 | */ | | 1226 | */ |
1227 | static INLINE bool | | 1227 | static INLINE bool |
1228 | is_managed(paddr_t pa) | | 1228 | is_managed(paddr_t pa) |
1229 | { | | 1229 | { |
1230 | if (pa >= avail_start && pa < avail_end) | | 1230 | if (pa >= avail_start && pa < avail_end) |
1231 | return true; | | 1231 | return true; |
1232 | else | | 1232 | else |
1233 | return false; | | 1233 | return false; |
1234 | } | | 1234 | } |
| @@ -1694,27 +1694,27 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd | | | @@ -1694,27 +1694,27 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd |
1694 | * system, parts of the architecture-dependent section of the sun3x | | 1694 | * system, parts of the architecture-dependent section of the sun3x |
1695 | * kernel pass their own flags in the lower, unused bits of the | | 1695 | * kernel pass their own flags in the lower, unused bits of the |
1696 | * physical address supplied to this function. These flags are | | 1696 | * physical address supplied to this function. These flags are |
1697 | * extracted and stored in the temporary variable 'mapflags'. | | 1697 | * extracted and stored in the temporary variable 'mapflags'. |
1698 | * | | 1698 | * |
1699 | * Extract sun3x specific flags from the physical address. | | 1699 | * Extract sun3x specific flags from the physical address. |
1700 | */ | | 1700 | */ |
1701 | mapflags = (pa & ~MMU_PAGE_MASK); | | 1701 | mapflags = (pa & ~MMU_PAGE_MASK); |
1702 | pa &= MMU_PAGE_MASK; | | 1702 | pa &= MMU_PAGE_MASK; |
1703 | | | 1703 | |
1704 | /* | | 1704 | /* |
1705 | * Determine if the physical address being mapped is on-board RAM. | | 1705 | * Determine if the physical address being mapped is on-board RAM. |
1706 | * Any other area of the address space is likely to belong to a | | 1706 | * Any other area of the address space is likely to belong to a |
1707 | * device and hence it would be disasterous to cache its contents. | | 1707 | * device and hence it would be disastrous to cache its contents. |
1708 | */ | | 1708 | */ |
1709 | if ((managed = is_managed(pa)) == false) | | 1709 | if ((managed = is_managed(pa)) == false) |
1710 | mapflags |= PMAP_NC; | | 1710 | mapflags |= PMAP_NC; |
1711 | | | 1711 | |
1712 | /* | | 1712 | /* |
1713 | * For user mappings we walk along the MMU tables of the given | | 1713 | * For user mappings we walk along the MMU tables of the given |
1714 | * pmap, reaching a PTE which describes the virtual page being | | 1714 | * pmap, reaching a PTE which describes the virtual page being |
1715 | * mapped or changed. If any level of the walk ends in an invalid | | 1715 | * mapped or changed. If any level of the walk ends in an invalid |
1716 | * entry, a table must be allocated and the entry must be updated | | 1716 | * entry, a table must be allocated and the entry must be updated |
1717 | * to point to it. | | 1717 | * to point to it. |
1718 | * There is a bit of confusion as to whether this code must be | | 1718 | * There is a bit of confusion as to whether this code must be |
1719 | * re-entrant. For now we will assume it is. To support | | 1719 | * re-entrant. For now we will assume it is. To support |
1720 | * re-entrancy we must unlink tables from the table pool before | | 1720 | * re-entrancy we must unlink tables from the table pool before |
| @@ -2054,27 +2054,27 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd | | | @@ -2054,27 +2054,27 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd |
2054 | } | | 2054 | } |
2055 | | | 2055 | |
2056 | /* pmap_enter_kernel INTERNAL | | 2056 | /* pmap_enter_kernel INTERNAL |
2057 | ** | | 2057 | ** |
2058 | * Map the given virtual address to the given physical address within the | | 2058 | * Map the given virtual address to the given physical address within the |
2059 | * kernel address space. This function exists because the kernel map does | | 2059 | * kernel address space. This function exists because the kernel map does |
2060 | * not do dynamic table allocation. It consists of a contiguous array of ptes | | 2060 | * not do dynamic table allocation. It consists of a contiguous array of ptes |
2061 | * and can be edited directly without the need to walk through any tables. | | 2061 | * and can be edited directly without the need to walk through any tables. |
2062 | * | | 2062 | * |
2063 | * XXX: "Danger, Will Robinson!" | | 2063 | * XXX: "Danger, Will Robinson!" |
2064 | * Note that the kernel should never take a fault on any page | | 2064 | * Note that the kernel should never take a fault on any page |
2065 | * between [ KERNBASE .. virtual_avail ] and this is checked in | | 2065 | * between [ KERNBASE .. virtual_avail ] and this is checked in |
2066 | * trap.c for kernel-mode MMU faults. This means that mappings | | 2066 | * trap.c for kernel-mode MMU faults. This means that mappings |
2067 | * created in that range must be implicily wired. -gwr | | 2067 | * created in that range must be implicitly wired. -gwr |
2068 | */ | | 2068 | */ |
2069 | void | | 2069 | void |
2070 | pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot) | | 2070 | pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot) |
2071 | { | | 2071 | { |
2072 | bool was_valid, insert; | | 2072 | bool was_valid, insert; |
2073 | u_short pte_idx; | | 2073 | u_short pte_idx; |
2074 | int flags; | | 2074 | int flags; |
2075 | mmu_short_pte_t *pte; | | 2075 | mmu_short_pte_t *pte; |
2076 | pv_t *pv; | | 2076 | pv_t *pv; |
2077 | paddr_t old_pa; | | 2077 | paddr_t old_pa; |
2078 | | | 2078 | |
2079 | flags = (pa & ~MMU_PAGE_MASK); | | 2079 | flags = (pa & ~MMU_PAGE_MASK); |
2080 | pa &= MMU_PAGE_MASK; | | 2080 | pa &= MMU_PAGE_MASK; |
| @@ -3016,27 +3016,27 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va | | | @@ -3016,27 +3016,27 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va |
3016 | if (kernel_crp.rp_addr == pmap->pm_a_phys) { | | 3016 | if (kernel_crp.rp_addr == pmap->pm_a_phys) { |
3017 | kernel_crp.rp_addr = kernAphys; | | 3017 | kernel_crp.rp_addr = kernAphys; |
3018 | loadcrp(&kernel_crp); | | 3018 | loadcrp(&kernel_crp); |
3019 | /* will do TLB flush below */ | | 3019 | /* will do TLB flush below */ |
3020 | } | | 3020 | } |
3021 | pmap->pm_a_tmgr = NULL; | | 3021 | pmap->pm_a_tmgr = NULL; |
3022 | pmap->pm_a_phys = kernAphys; | | 3022 | pmap->pm_a_phys = kernAphys; |
3023 | } | | 3023 | } |
3024 | | | 3024 | |
3025 | /* | | 3025 | /* |
3026 | * If we just modified the current address space, | | 3026 | * If we just modified the current address space, |
3027 | * make sure to flush the MMU cache. | | 3027 | * make sure to flush the MMU cache. |
3028 | * | | 3028 | * |
3029 | * XXX - this could be an unecessarily large flush. | | 3029 | * XXX - this could be an unnecessarily large flush. |
3030 | * XXX - Could decide, based on the size of the VA range | | 3030 | * XXX - Could decide, based on the size of the VA range |
3031 | * to be removed, whether to flush "by pages" or "all". | | 3031 | * to be removed, whether to flush "by pages" or "all". |
3032 | */ | | 3032 | */ |
3033 | if (pmap == current_pmap()) | | 3033 | if (pmap == current_pmap()) |
3034 | TBIAU(); | | 3034 | TBIAU(); |
3035 | } | | 3035 | } |
3036 | | | 3036 | |
3037 | /* pmap_remove_a INTERNAL | | 3037 | /* pmap_remove_a INTERNAL |
3038 | ** | | 3038 | ** |
3039 | * This is function number one in a set of three that removes a range | | 3039 | * This is function number one in a set of three that removes a range |
3040 | * of memory in the most efficient manner by removing the highest possible | | 3040 | * of memory in the most efficient manner by removing the highest possible |
3041 | * tables from the memory space. This particular function attempts to remove | | 3041 | * tables from the memory space. This particular function attempts to remove |
3042 | * as many B tables as it can, delegating the remaining fragmented ranges to | | 3042 | * as many B tables as it can, delegating the remaining fragmented ranges to |
| @@ -3050,44 +3050,44 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va | | | @@ -3050,44 +3050,44 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va |
3050 | bool | | 3050 | bool |
3051 | pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva) | | 3051 | pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva) |
3052 | { | | 3052 | { |
3053 | bool empty; | | 3053 | bool empty; |
3054 | int idx; | | 3054 | int idx; |
3055 | vaddr_t nstart, nend; | | 3055 | vaddr_t nstart, nend; |
3056 | b_tmgr_t *b_tbl; | | 3056 | b_tmgr_t *b_tbl; |
3057 | mmu_long_dte_t *a_dte; | | 3057 | mmu_long_dte_t *a_dte; |
3058 | mmu_short_dte_t *b_dte; | | 3058 | mmu_short_dte_t *b_dte; |
3059 | uint8_t at_wired, bt_wired; | | 3059 | uint8_t at_wired, bt_wired; |
3060 | | | 3060 | |
3061 | /* | | 3061 | /* |
3062 | * The following code works with what I call a 'granularity | | 3062 | * The following code works with what I call a 'granularity |
3063 | * reduction algorithim'. A range of addresses will always have | | 3063 | * reduction algorithm'. A range of addresses will always have |
3064 | * the following properties, which are classified according to | | 3064 | * the following properties, which are classified according to |
3065 | * how the range relates to the size of the current granularity | | 3065 | * how the range relates to the size of the current granularity |
3066 | * - an A table entry: | | 3066 | * - an A table entry: |
3067 | * | | 3067 | * |
3068 | * 1 2 3 4 | | 3068 | * 1 2 3 4 |
3069 | * -+---+---+---+---+---+---+---+- | | 3069 | * -+---+---+---+---+---+---+---+- |
3070 | * -+---+---+---+---+---+---+---+- | | 3070 | * -+---+---+---+---+---+---+---+- |
3071 | * | | 3071 | * |
3072 | * A range will always start on a granularity boundary, illustrated | | 3072 | * A range will always start on a granularity boundary, illustrated |
3073 | * by '+' signs in the table above, or it will start at some point | | 3073 | * by '+' signs in the table above, or it will start at some point |
3074 | * inbetween a granularity boundary, as illustrated by point 1. | | 3074 | * in-between a granularity boundary, as illustrated by point 1. |
3075 | * The first step in removing a range of addresses is to remove the | | 3075 | * The first step in removing a range of addresses is to remove the |
3076 | * range between 1 and 2, the nearest granularity boundary. This | | 3076 | * range between 1 and 2, the nearest granularity boundary. This |
3077 | * job is handled by the section of code governed by the | | 3077 | * job is handled by the section of code governed by the |
3078 | * 'if (start < nstart)' statement. | | 3078 | * 'if (start < nstart)' statement. |
3079 | * | | 3079 | * |
3080 | * A range will always encompass zero or more intergral granules, | | 3080 | * A range will always encompass zero or more integral granules, |
3081 | * illustrated by points 2 and 3. Integral granules are easy to | | 3081 | * illustrated by points 2 and 3. Integral granules are easy to |
3082 | * remove. The removal of these granules is the second step, and | | 3082 | * remove. The removal of these granules is the second step, and |
3083 | * is handled by the code block 'if (nstart < nend)'. | | 3083 | * is handled by the code block 'if (nstart < nend)'. |
3084 | * | | 3084 | * |
3085 | * Lastly, a range will always end on a granularity boundary, | | 3085 | * Lastly, a range will always end on a granularity boundary, |
3086 | * ill. by point 3, or it will fall just beyond one, ill. by point | | 3086 | * ill. by point 3, or it will fall just beyond one, ill. by point |
3087 | * 4. The last step involves removing this range and is handled by | | 3087 | * 4. The last step involves removing this range and is handled by |
3088 | * the code block 'if (nend < end)'. | | 3088 | * the code block 'if (nend < end)'. |
3089 | */ | | 3089 | */ |
3090 | nstart = MMU_ROUND_UP_A(sva); | | 3090 | nstart = MMU_ROUND_UP_A(sva); |
3091 | nend = MMU_ROUND_A(eva); | | 3091 | nend = MMU_ROUND_A(eva); |
3092 | | | 3092 | |
3093 | at_wired = a_tbl->at_wcnt; | | 3093 | at_wired = a_tbl->at_wcnt; |
| @@ -3582,27 +3582,27 @@ pmap_kcore_hdr(struct sun3x_kcore_hdr *s | | | @@ -3582,27 +3582,27 @@ pmap_kcore_hdr(struct sun3x_kcore_hdr *s |
3582 | spa = avail_mem[i].pmem_start; | | 3582 | spa = avail_mem[i].pmem_start; |
3583 | spa = m68k_trunc_page(spa); | | 3583 | spa = m68k_trunc_page(spa); |
3584 | len = avail_mem[i].pmem_end - spa; | | 3584 | len = avail_mem[i].pmem_end - spa; |
3585 | len = m68k_round_page(len); | | 3585 | len = m68k_round_page(len); |
3586 | sh->ram_segs[i].start = spa; | | 3586 | sh->ram_segs[i].start = spa; |
3587 | sh->ram_segs[i].size = len; | | 3587 | sh->ram_segs[i].size = len; |
3588 | } | | 3588 | } |
3589 | } | | 3589 | } |
3590 | | | 3590 | |
3591 | | | 3591 | |
3592 | /* pmap_virtual_space INTERFACE | | 3592 | /* pmap_virtual_space INTERFACE |
3593 | ** | | 3593 | ** |
3594 | * Return the current available range of virtual addresses in the | | 3594 | * Return the current available range of virtual addresses in the |
3595 | * arguuments provided. Only really called once. | | 3595 | * arguments provided. Only really called once. |
3596 | */ | | 3596 | */ |
3597 | void | | 3597 | void |
3598 | pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend) | | 3598 | pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend) |
3599 | { | | 3599 | { |
3600 | | | 3600 | |
3601 | *vstart = virtual_avail; | | 3601 | *vstart = virtual_avail; |
3602 | *vend = virtual_end; | | 3602 | *vend = virtual_end; |
3603 | } | | 3603 | } |
3604 | | | 3604 | |
3605 | /* | | 3605 | /* |
3606 | * Provide memory to the VM system. | | 3606 | * Provide memory to the VM system. |
3607 | * | | 3607 | * |
3608 | * Assume avail_start is always in the | | 3608 | * Assume avail_start is always in the |
| @@ -3639,27 +3639,27 @@ pmap_page_upload(void) | | | @@ -3639,27 +3639,27 @@ pmap_page_upload(void) |
3639 | * map statistics. | | 3639 | * map statistics. |
3640 | */ | | 3640 | */ |
3641 | segsz_t | | 3641 | segsz_t |
3642 | pmap_count(pmap_t pmap, int type) | | 3642 | pmap_count(pmap_t pmap, int type) |
3643 | { | | 3643 | { |
3644 | u_int count; | | 3644 | u_int count; |
3645 | int a_idx, b_idx; | | 3645 | int a_idx, b_idx; |
3646 | a_tmgr_t *a_tbl; | | 3646 | a_tmgr_t *a_tbl; |
3647 | b_tmgr_t *b_tbl; | | 3647 | b_tmgr_t *b_tbl; |
3648 | c_tmgr_t *c_tbl; | | 3648 | c_tmgr_t *c_tbl; |
3649 | | | 3649 | |
3650 | /* | | 3650 | /* |
3651 | * If the pmap does not have its own A table manager, it has no | | 3651 | * If the pmap does not have its own A table manager, it has no |
3652 | * valid entires. | | 3652 | * valid entries. |
3653 | */ | | 3653 | */ |
3654 | if (pmap->pm_a_tmgr == NULL) | | 3654 | if (pmap->pm_a_tmgr == NULL) |
3655 | return 0; | | 3655 | return 0; |
3656 | | | 3656 | |
3657 | a_tbl = pmap->pm_a_tmgr; | | 3657 | a_tbl = pmap->pm_a_tmgr; |
3658 | | | 3658 | |
3659 | count = 0; | | 3659 | count = 0; |
3660 | for (a_idx = 0; a_idx < MMU_TIA(KERNBASE3X); a_idx++) { | | 3660 | for (a_idx = 0; a_idx < MMU_TIA(KERNBASE3X); a_idx++) { |
3661 | if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) { | | 3661 | if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) { |
3662 | b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw)); | | 3662 | b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw)); |
3663 | for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) { | | 3663 | for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) { |
3664 | if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) { | | 3664 | if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) { |
3665 | c_tbl = mmuC2tmgr( | | 3665 | c_tbl = mmuC2tmgr( |