Tue Jan 26 21:19:25 2010 UTC ()
Revamp pmap.  Add exec page caching logic from powerpc oea pmap.  Shrink struct
vm_page by placing the first pv_entry in it.  Remove pv_flags since nothing
really needed it.  Add pmap counters.  Rework virtual cache alias logic.
Allow pmap_copy_page and pmap_zero_page to deal with non-KSEG0 mappable pages.


(matt)
diff -r1.50.24.3 -r1.50.24.4 src/sys/arch/mips/conf/Makefile.mips
diff -r1.9.96.1 -r1.9.96.2 src/sys/arch/mips/include/cache.h
diff -r1.90.16.14 -r1.90.16.15 src/sys/arch/mips/include/cpu.h
diff -r1.23.38.5 -r1.23.38.6 src/sys/arch/mips/include/mips3_pte.h
diff -r1.54.26.7 -r1.54.26.8 src/sys/arch/mips/include/pmap.h
diff -r1.19.18.1 -r1.19.18.2 src/sys/arch/mips/include/pte.h
diff -r1.41.28.9 -r1.41.28.10 src/sys/arch/mips/include/vmparam.h
diff -r1.33.96.2 -r1.33.96.3 src/sys/arch/mips/mips/cache.c
diff -r1.35.38.4 -r1.35.38.5 src/sys/arch/mips/mips/mem.c
diff -r1.179.16.12 -r1.179.16.13 src/sys/arch/mips/mips/pmap.c
diff -r1.1.2.3 -r1.1.2.4 src/sys/arch/mips/mips/pmap_segtab.c

cvs diff -r1.50.24.3 -r1.50.24.4 src/sys/arch/mips/conf/Makefile.mips (expand / switch to unified diff)

--- src/sys/arch/mips/conf/Makefile.mips 2009/09/16 03:22:03 1.50.24.3
+++ src/sys/arch/mips/conf/Makefile.mips 2010/01/26 21:19:25 1.50.24.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: Makefile.mips,v 1.50.24.3 2009/09/16 03:22:03 matt Exp $ 1# $NetBSD: Makefile.mips,v 1.50.24.4 2010/01/26 21:19:25 matt Exp $
2 2
3# Makefile for NetBSD 3# Makefile for NetBSD
4# 4#
5# This makefile is constructed from a machine description: 5# This makefile is constructed from a machine description:
6# config machineid 6# config machineid
7# Most changes should be made in the machine description 7# Most changes should be made in the machine description
8# /sys/arch/<machine>/conf/``machineid'' 8# /sys/arch/<machine>/conf/``machineid''
9# after which you should do 9# after which you should do
10# config machineid 10# config machineid
11# Machine generic makefile changes should be made in 11# Machine generic makefile changes should be made in
12# /sys/arch/mips/conf/Makefile.mips 12# /sys/arch/mips/conf/Makefile.mips
13# after which config should be rerun for all machines of that type. 13# after which config should be rerun for all machines of that type.
14# 14#
@@ -53,28 +53,28 @@ CFLAGS+= -msym32 -mabi=64 @@ -53,28 +53,28 @@ CFLAGS+= -msym32 -mabi=64
53AFLAGS+= -msym32 -mabi=64 53AFLAGS+= -msym32 -mabi=64
54.endif 54.endif
55.if ${MACHINE_ARCH} == "mips64eb" 55.if ${MACHINE_ARCH} == "mips64eb"
56LDFLAGS+= -m elf64btsmip 56LDFLAGS+= -m elf64btsmip
57LINKFORMAT+= -m elf64btsmip 57LINKFORMAT+= -m elf64btsmip
58SYSTEM_LD_TAIL_EXTRA+= \ 58SYSTEM_LD_TAIL_EXTRA+= \
59 ;echo ${OBJCOPY} -O elf32-nbigmips $@ $@.elf32; \ 59 ;echo ${OBJCOPY} -O elf32-nbigmips $@ $@.elf32; \
60 ${OBJCOPY} -O elf32-nbigmips $@ $@.elf32 60 ${OBJCOPY} -O elf32-nbigmips $@ $@.elf32
61.endif 61.endif
62.if ${MACHINE_ARCH} == "mips64el" 62.if ${MACHINE_ARCH} == "mips64el"
63LDFLAGS+= -m elf64ltsmip 63LDFLAGS+= -m elf64ltsmip
64LINKFORMAT+= -m elf64ltsmip 64LINKFORMAT+= -m elf64ltsmip
65SYSTEM_LD_TAIL_EXTRA+= \ 65SYSTEM_LD_TAIL_EXTRA+= \
66 ;echo ${OBJCOPY} -O elf32-nlittlemips $@ $@.elf32; \ 66 ;echo ${OBJCOPY} -O elf32-nlittlemips $@ $@.elf32; \
67 ${OBJCOPY} -O elf32-nlittlemips $@ $@.elf32 67 ${OBJCOPY} -O elf32-nlittlemips $@ $@.elf32
68.endif 68.endif
69.endif # LP64=yes 69.endif # LP64=yes
70AFLAGS+= -mno-abicalls -x assembler-with-cpp -traditional-cpp ${AOPTS} 70AFLAGS+= -mno-abicalls -x assembler-with-cpp -traditional-cpp ${AOPTS}
71 71
72## 72##
73## (3) libkern and compat 73## (3) libkern and compat
74## 74##
75.if !empty(IDENT:M-DLKM) 75.if !empty(IDENT:M-DLKM)
76KERN_AS= obj 76KERN_AS= obj
77.endif 77.endif
78 78
79## 79##
80## (4) local objects, compile rules, and dependencies 80## (4) local objects, compile rules, and dependencies

cvs diff -r1.9.96.1 -r1.9.96.2 src/sys/arch/mips/include/cache.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/cache.h 2010/01/20 06:58:35 1.9.96.1
+++ src/sys/arch/mips/include/cache.h 2010/01/26 21:19:25 1.9.96.2
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cache.h,v 1.9.96.1 2010/01/20 06:58:35 matt Exp $ */ 1/* $NetBSD: cache.h,v 1.9.96.2 2010/01/26 21:19:25 matt Exp $ */
2 2
3/* 3/*
4 * Copyright 2001 Wasabi Systems, Inc. 4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -196,37 +196,50 @@ struct mips_cache_info { @@ -196,37 +196,50 @@ struct mips_cache_info {
196 196
197 /* 197 /*
198 * These two variables inform the rest of the kernel about the 198 * These two variables inform the rest of the kernel about the
199 * size of the largest D-cache line present in the system. The 199 * size of the largest D-cache line present in the system. The
200 * mask can be used to determine if a region of memory is cache 200 * mask can be used to determine if a region of memory is cache
201 * line size aligned. 201 * line size aligned.
202 * 202 *
203 * Whenever any code updates a data cache line size, it should 203 * Whenever any code updates a data cache line size, it should
204 * call mips_dcache_compute_align() to recompute these values. 204 * call mips_dcache_compute_align() to recompute these values.
205 */ 205 */
206 u_int mci_dcache_align; 206 u_int mci_dcache_align;
207 u_int mci_dcache_align_mask; 207 u_int mci_dcache_align_mask;
208 208
209 u_int mci_cache_alias_mask; 
210 u_int mci_cache_prefer_mask; 209 u_int mci_cache_prefer_mask;
 210#if defined(MIPS2) || defined(MIPS3) || defined(MIPS3_5900)
 211 u_int mci_cache_alias_mask;
211 212
212 bool mci_cache_virtual_alias; 213 bool mci_cache_virtual_alias;
 214
 215#define MIPS_CACHE_ALIAS_MASK mips_cache_info.mci_cache_alias_mask
 216#define MIPS_CACHE_VIRTUAL_ALIAS mips_cache_info.mci_cache_virtual_alias
 217#elif defined(MIPS1) || defined(MIPS32) || defined(MIPS64)
 218#define MIPS_CACHE_ALIAS_MASK 0
 219#define MIPS_CACHE_VIRTUAL_ALIAS false
 220#else
 221#error mci_cache screw up
 222#endif
213}; 223};
214 224
215extern struct mips_cache_info mips_cache_info; 225extern struct mips_cache_info mips_cache_info;
 226
 227
216/* 228/*
217 * XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX 229 * XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
218 */ 230 */
219#define mips_cache_indexof(x) (((vaddr_t)(x)) & mips_cache_info.mci_cache_alias_mask) 231#define mips_cache_indexof(x) (((vaddr_t)(x)) & MIPS_CACHE_ALIAS_MASK)
 232#define mips_cache_badalias(x,y) (((vaddr_t)(x)^(vaddr_t)(y)) & MIPS_CACHE_ALIAS_MASK)
220 233
221#define __mco_noargs(prefix, x) \ 234#define __mco_noargs(prefix, x) \
222do { \ 235do { \
223 (*mips_cache_ops.mco_ ## prefix ## p ## x )(); \ 236 (*mips_cache_ops.mco_ ## prefix ## p ## x )(); \
224 if (*mips_cache_ops.mco_ ## prefix ## s ## x ) \ 237 if (*mips_cache_ops.mco_ ## prefix ## s ## x ) \
225 (*mips_cache_ops.mco_ ## prefix ## s ## x )(); \ 238 (*mips_cache_ops.mco_ ## prefix ## s ## x )(); \
226} while (/*CONSTCOND*/0) 239} while (/*CONSTCOND*/0)
227 240
228#define __mco_2args(prefix, x, a, b) \ 241#define __mco_2args(prefix, x, a, b) \
229do { \ 242do { \
230 (*mips_cache_ops.mco_ ## prefix ## p ## x )((a), (b)); \ 243 (*mips_cache_ops.mco_ ## prefix ## p ## x )((a), (b)); \
231 if (*mips_cache_ops.mco_ ## prefix ## s ## x ) \ 244 if (*mips_cache_ops.mco_ ## prefix ## s ## x ) \
232 (*mips_cache_ops.mco_ ## prefix ## s ## x )((a), (b)); \ 245 (*mips_cache_ops.mco_ ## prefix ## s ## x )((a), (b)); \

cvs diff -r1.90.16.14 -r1.90.16.15 src/sys/arch/mips/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/cpu.h 2010/01/20 09:04:34 1.90.16.14
+++ src/sys/arch/mips/include/cpu.h 2010/01/26 21:19:25 1.90.16.15
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.90.16.14 2010/01/20 09:04:34 matt Exp $ */ 1/* $NetBSD: cpu.h,v 1.90.16.15 2010/01/26 21:19:25 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell and Rick Macklem. 8 * Ralph Campbell and Rick Macklem.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -132,26 +132,28 @@ struct cpu_info { @@ -132,26 +132,28 @@ struct cpu_info {
132 struct lwp *ci_fpcurlwp; /* the current FPU owner */ 132 struct lwp *ci_fpcurlwp; /* the current FPU owner */
133 int ci_want_resched; /* user preemption pending */ 133 int ci_want_resched; /* user preemption pending */
134 int ci_mtx_count; /* negative count of held mutexes */ 134 int ci_mtx_count; /* negative count of held mutexes */
135 int ci_mtx_oldspl; /* saved SPL value */ 135 int ci_mtx_oldspl; /* saved SPL value */
136 int ci_idepth; /* hardware interrupt depth */ 136 int ci_idepth; /* hardware interrupt depth */
137 device_t ci_dev; /* owning device */ 137 device_t ci_dev; /* owning device */
138 vaddr_t ci_ebase; /* VA of exception base */ 138 vaddr_t ci_ebase; /* VA of exception base */
139 paddr_t ci_ebase_pa; /* PA of exception base */ 139 paddr_t ci_ebase_pa; /* PA of exception base */
140 u_long ci_cctr_freq; /* cycle counter frequency */ 140 u_long ci_cctr_freq; /* cycle counter frequency */
141 /* 141 /*
142 * Per-cpu pmap information 142 * Per-cpu pmap information
143 */ 143 */
144 struct segtab *ci_pmap_segbase; 144 struct segtab *ci_pmap_segbase;
 145 vaddr_t ci_pmap_srcbase; /* starting VA of ephemeral src space */
 146 vaddr_t ci_pmap_dstbase; /* starting VA of ephemeral dst space */
145 uint32_t ci_pmap_asid_next; /* next asid to be assigned */ 147 uint32_t ci_pmap_asid_next; /* next asid to be assigned */
146 uint32_t ci_pmap_asid_generation; /* current asid generation */ 148 uint32_t ci_pmap_asid_generation; /* current asid generation */
147 uint32_t ci_pmap_asid_reserved; /* base of ASID space */ 149 uint32_t ci_pmap_asid_reserved; /* base of ASID space */
148 uint32_t ci_pmap_asid_max; /* max (exclusive) assignable asid */ 150 uint32_t ci_pmap_asid_max; /* max (exclusive) assignable asid */
149}; 151};
150 152
151#define CPU_INFO_ITERATOR int 153#define CPU_INFO_ITERATOR int
152#define CPU_INFO_FOREACH(cii, ci) \ 154#define CPU_INFO_FOREACH(cii, ci) \
153 (void)(cii), ci = &cpu_info_store; ci != NULL; ci = ci->ci_next 155 (void)(cii), ci = &cpu_info_store; ci != NULL; ci = ci->ci_next
154 156
155#endif /* !_LOCORE */ 157#endif /* !_LOCORE */
156#endif /* _KERNEL */ 158#endif /* _KERNEL */
157 159

cvs diff -r1.23.38.5 -r1.23.38.6 src/sys/arch/mips/include/mips3_pte.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/mips3_pte.h 2010/01/20 06:58:35 1.23.38.5
+++ src/sys/arch/mips/include/mips3_pte.h 2010/01/26 21:19:25 1.23.38.6
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: mips3_pte.h,v 1.23.38.5 2010/01/20 06:58:35 matt Exp $ */ 1/* $NetBSD: mips3_pte.h,v 1.23.38.6 2010/01/26 21:19:25 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer 8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department and Ralph Campbell. 9 * Science Department and Ralph Campbell.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -162,27 +162,27 @@ unsigned int pg_g:1, /* HW: ignore as @@ -162,27 +162,27 @@ unsigned int pg_g:1, /* HW: ignore as
162 162
163/* pte accessor macros */ 163/* pte accessor macros */
164 164
165#define mips3_pfn_is_ext(x) ((x) & 0x3c000000) 165#define mips3_pfn_is_ext(x) ((x) & 0x3c000000)
166#define mips3_paddr_to_tlbpfn(x) \ 166#define mips3_paddr_to_tlbpfn(x) \
167 (((paddr_t)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) 167 (((paddr_t)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
168#define mips3_tlbpfn_to_paddr(x) \ 168#define mips3_tlbpfn_to_paddr(x) \
169 ((paddr_t)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) 169 ((paddr_t)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
170#define mips3_vad_to_vpn(x) ((vaddr_t)(x) & MIPS3_PG_SVPN) 170#define mips3_vad_to_vpn(x) ((vaddr_t)(x) & MIPS3_PG_SVPN)
171#define mips3_vpn_to_vad(x) ((x) & MIPS3_PG_SVPN) 171#define mips3_vpn_to_vad(x) ((x) & MIPS3_PG_SVPN)
172 172
173#define MIPS3_PTE_TO_PADDR(pte) (mips3_tlbpfn_to_paddr(pte)) 173#define MIPS3_PTE_TO_PADDR(pte) (mips3_tlbpfn_to_paddr(pte))
174#define MIPS3_PAGE_IS_RDONLY(pte,va) \ 174#define MIPS3_PAGE_IS_RDONLY(pte,va) \
175 (pmap_is_page_ro(pmap_kernel(), mips_trunc_page(va), (pte))) 175 (pmap_is_page_ro_p(pmap_kernel(), mips_trunc_page(va), (pte)))
176 176
177 177
178#define MIPS3_PG_SIZE_4K 0x00000000 178#define MIPS3_PG_SIZE_4K 0x00000000
179#define MIPS3_PG_SIZE_16K 0x00006000 179#define MIPS3_PG_SIZE_16K 0x00006000
180#define MIPS3_PG_SIZE_64K 0x0001e000 180#define MIPS3_PG_SIZE_64K 0x0001e000
181#define MIPS3_PG_SIZE_256K 0x0007e000 181#define MIPS3_PG_SIZE_256K 0x0007e000
182#define MIPS3_PG_SIZE_1M 0x001fe000 182#define MIPS3_PG_SIZE_1M 0x001fe000
183#define MIPS3_PG_SIZE_4M 0x007fe000 183#define MIPS3_PG_SIZE_4M 0x007fe000
184#define MIPS3_PG_SIZE_16M 0x01ffe000 184#define MIPS3_PG_SIZE_16M 0x01ffe000
185#define MIPS3_PG_SIZE_64M 0x07ffe000 185#define MIPS3_PG_SIZE_64M 0x07ffe000
186#define MIPS3_PG_SIZE_256M 0x1fffe000 186#define MIPS3_PG_SIZE_256M 0x1fffe000
187 187
188#define MIPS3_PG_SIZE_MASK_TO_SIZE(pg_mask) \ 188#define MIPS3_PG_SIZE_MASK_TO_SIZE(pg_mask) \

cvs diff -r1.54.26.7 -r1.54.26.8 src/sys/arch/mips/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/pmap.h 2010/01/22 07:41:10 1.54.26.7
+++ src/sys/arch/mips/include/pmap.h 2010/01/26 21:19:25 1.54.26.8
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.54.26.7 2010/01/22 07:41:10 matt Exp $ */ 1/* $NetBSD: pmap.h,v 1.54.26.8 2010/01/26 21:19:25 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell. 8 * Ralph Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -159,43 +159,26 @@ struct pmap_asid_info { @@ -159,43 +159,26 @@ struct pmap_asid_info {
159 * Machine dependent pmap structure. 159 * Machine dependent pmap structure.
160 */ 160 */
161typedef struct pmap { 161typedef struct pmap {
162 kmutex_t pm_lock; /* lock on pmap */ 162 kmutex_t pm_lock; /* lock on pmap */
163 struct segtab *pm_segtab; /* pointers to pages of PTEs */ 163 struct segtab *pm_segtab; /* pointers to pages of PTEs */
164#ifdef MULTIPROCESSOR 164#ifdef MULTIPROCESSOR
165 uint32_t pm_cpus; /* pmap was active on ... */ 165 uint32_t pm_cpus; /* pmap was active on ... */
166#endif 166#endif
167 int pm_count; /* pmap reference count */ 167 int pm_count; /* pmap reference count */
168 struct pmap_statistics pm_stats; /* pmap statistics */ 168 struct pmap_statistics pm_stats; /* pmap statistics */
169 struct pmap_asid_info pm_pai[1]; 169 struct pmap_asid_info pm_pai[1];
170} *pmap_t; 170} *pmap_t;
171 171
172/* 
173 * For each struct vm_page, there is a list of all currently valid virtual 
174 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 
175 * XXX really should do this as a part of the higher level code. 
176 */ 
177typedef struct pv_entry { 
178 struct pv_entry *pv_next; /* next pv_entry */ 
179 struct pmap *pv_pmap; /* pmap where mapping lies */ 
180 vaddr_t pv_va; /* virtual address for mapping */ 
181 int pv_flags; /* some flags for the mapping */ 
182} *pv_entry_t; 
183 
184#define PV_UNCACHED 0x0001 /* page is mapped uncached */ 
185#define PV_MODIFIED 0x0002 /* page has been modified */ 
186#define PV_REFERENCED 0x0004 /* page has been recently referenced */ 
187 
188 
189#ifdef _KERNEL 172#ifdef _KERNEL
190 173
191struct pmap_kernel { 174struct pmap_kernel {
192 struct pmap kernel_pmap; 175 struct pmap kernel_pmap;
193#ifdef MULTIPROCESSOR 176#ifdef MULTIPROCESSOR
194 struct pmap_asid_info kernel_pai[MAXCPUS-1]; 177 struct pmap_asid_info kernel_pai[MAXCPUS-1];
195#endif 178#endif
196}; 179};
197 180
198extern struct pmap_kernel kernel_pmap_store; 181extern struct pmap_kernel kernel_pmap_store;
199extern paddr_t mips_avail_start; 182extern paddr_t mips_avail_start;
200extern paddr_t mips_avail_end; 183extern paddr_t mips_avail_end;
201extern vaddr_t mips_virtual_end; 184extern vaddr_t mips_virtual_end;
@@ -221,28 +204,28 @@ void pmap_set_modified(paddr_t); @@ -221,28 +204,28 @@ void pmap_set_modified(paddr_t);
221void pmap_procwr(struct proc *, vaddr_t, size_t); 204void pmap_procwr(struct proc *, vaddr_t, size_t);
222#define PMAP_NEED_PROCWR 205#define PMAP_NEED_PROCWR
223 206
224uint32_t pmap_tlb_asid_alloc(pmap_t pmap, struct cpu_info *ci); 207uint32_t pmap_tlb_asid_alloc(pmap_t pmap, struct cpu_info *ci);
225void pmap_tlb_invalidate_asid(pmap_t pmap); 208void pmap_tlb_invalidate_asid(pmap_t pmap);
226int pmap_tlb_update(pmap_t pmap, vaddr_t, uint32_t); 209int pmap_tlb_update(pmap_t pmap, vaddr_t, uint32_t);
227void pmap_tlb_invalidate_addr(pmap_t pmap, vaddr_t); 210void pmap_tlb_invalidate_addr(pmap_t pmap, vaddr_t);
228 211
229/* 212/*
230 * pmap_prefer() helps reduce virtual-coherency exceptions in 213 * pmap_prefer() helps reduce virtual-coherency exceptions in
231 * the virtually-indexed cache on mips3 CPUs. 214 * the virtually-indexed cache on mips3 CPUs.
232 */ 215 */
233#ifdef MIPS3_PLUS 216#ifdef MIPS3_PLUS
234#define PMAP_PREFER(pa, va, sz, td) pmap_prefer((pa), (va), (td)) 217#define PMAP_PREFER(pa, va, sz, td) pmap_prefer((pa), (va), (sz), (td))
235void pmap_prefer(vaddr_t, vaddr_t *, int); 218void pmap_prefer(vaddr_t, vaddr_t *, vsize_t, int);
236#endif /* MIPS3_PLUS */ 219#endif /* MIPS3_PLUS */
237 220
238#define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */ 221#define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */
239 222
240/* 223/*
241 * Alternate mapping hooks for pool pages. Avoids thrashing the TLB. 224 * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
242 */ 225 */
243vaddr_t mips_pmap_map_poolpage(paddr_t); 226vaddr_t mips_pmap_map_poolpage(paddr_t);
244paddr_t mips_pmap_unmap_poolpage(vaddr_t); 227paddr_t mips_pmap_unmap_poolpage(vaddr_t);
245struct vm_page *mips_pmap_alloc_poolpage(int); 228struct vm_page *mips_pmap_alloc_poolpage(int);
246#define PMAP_ALLOC_POOLPAGE(flags) mips_pmap_alloc_poolpage(flags) 229#define PMAP_ALLOC_POOLPAGE(flags) mips_pmap_alloc_poolpage(flags)
247#define PMAP_MAP_POOLPAGE(pa) mips_pmap_map_poolpage(pa) 230#define PMAP_MAP_POOLPAGE(pa) mips_pmap_map_poolpage(pa)
248#define PMAP_UNMAP_POOLPAGE(va) mips_pmap_unmap_poolpage(va) 231#define PMAP_UNMAP_POOLPAGE(va) mips_pmap_unmap_poolpage(va)

cvs diff -r1.19.18.1 -r1.19.18.2 src/sys/arch/mips/include/pte.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/pte.h 2009/12/30 04:51:26 1.19.18.1
+++ src/sys/arch/mips/include/pte.h 2010/01/26 21:19:25 1.19.18.2
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pte.h,v 1.19.18.1 2009/12/30 04:51:26 matt Exp $ */ 1/* $NetBSD: pte.h,v 1.19.18.2 2010/01/26 21:19:25 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57typedef union pt_entry { 57typedef union pt_entry {
58 unsigned int pt_entry; /* for copying, etc. */ 58 unsigned int pt_entry; /* for copying, etc. */
59 struct mips1_pte pt_mips1_pte; /* for getting to bits by name */ 59 struct mips1_pte pt_mips1_pte; /* for getting to bits by name */
60 struct mips3_pte pt_mips3_pte; 60 struct mips3_pte pt_mips3_pte;
61} pt_entry_t; 61} pt_entry_t;
62 62
63/* 63/*
64 * Macros/inline functions to hide PTE format differences. 64 * Macros/inline functions to hide PTE format differences.
65 */ 65 */
66 66
67#define mips_pg_nv_bit() (MIPS1_PG_NV) /* same on mips1 and mips3 */ 67#define mips_pg_nv_bit() (MIPS1_PG_NV) /* same on mips1 and mips3 */
68 68
69 69
70int pmap_is_page_ro(struct pmap *pmap, vaddr_t, int); 70bool pmap_is_page_ro_p(struct pmap *pmap, vaddr_t, uint32_t);
71 71
72 72
73/* MIPS1-only */ 73/* MIPS1-only */
74#if defined(MIPS1) && !defined(MIPS3_PLUS) 74#if defined(MIPS1) && !defined(MIPS3_PLUS)
75#define mips_pg_v(entry) ((entry) & MIPS1_PG_V) 75#define mips_pg_v(entry) ((entry) & MIPS1_PG_V)
76#define mips_pg_wired(entry) ((entry) & MIPS1_PG_WIRED) 76#define mips_pg_wired(entry) ((entry) & MIPS1_PG_WIRED)
77 77
78#define mips_pg_m_bit() (MIPS1_PG_D) 78#define mips_pg_m_bit() (MIPS1_PG_D)
79#define mips_pg_rw_bit() (MIPS1_PG_RW) /* no RW bits for mips1 */ 79#define mips_pg_rw_bit() (MIPS1_PG_RW) /* no RW bits for mips1 */
80#define mips_pg_ro_bit() (MIPS1_PG_RO) 80#define mips_pg_ro_bit() (MIPS1_PG_RO)
81#define mips_pg_ropage_bit() (MIPS1_PG_RO) /* XXX not MIPS1_PG_ROPAGE? */ 81#define mips_pg_ropage_bit() (MIPS1_PG_RO) /* XXX not MIPS1_PG_ROPAGE? */
82#define mips_pg_rwpage_bit() (MIPS1_PG_RWPAGE) 82#define mips_pg_rwpage_bit() (MIPS1_PG_RWPAGE)
83#define mips_pg_rwncpage_bit() (MIPS1_PG_RWNCPAGE) 83#define mips_pg_rwncpage_bit() (MIPS1_PG_RWNCPAGE)
@@ -110,63 +110,61 @@ int pmap_is_page_ro(struct pmap *pmap, v @@ -110,63 +110,61 @@ int pmap_is_page_ro(struct pmap *pmap, v
110#define mips_pg_global_bit() (MIPS3_PG_G) 110#define mips_pg_global_bit() (MIPS3_PG_G)
111#define mips_pg_wired_bit() (MIPS3_PG_WIRED) 111#define mips_pg_wired_bit() (MIPS3_PG_WIRED)
112 112
113#define PTE_TO_PADDR(pte) MIPS3_PTE_TO_PADDR((pte)) 113#define PTE_TO_PADDR(pte) MIPS3_PTE_TO_PADDR((pte))
114#define PAGE_IS_RDONLY(pte, va) MIPS3_PAGE_IS_RDONLY((pte), (va)) 114#define PAGE_IS_RDONLY(pte, va) MIPS3_PAGE_IS_RDONLY((pte), (va))
115 115
116#define mips_tlbpfn_to_paddr(x) mips3_tlbpfn_to_paddr((vaddr_t)(x)) 116#define mips_tlbpfn_to_paddr(x) mips3_tlbpfn_to_paddr((vaddr_t)(x))
117#define mips_paddr_to_tlbpfn(x) mips3_paddr_to_tlbpfn((x)) 117#define mips_paddr_to_tlbpfn(x) mips3_paddr_to_tlbpfn((x))
118#endif /* mips3 */ 118#endif /* mips3 */
119 119
120/* MIPS1 and MIPS3 (or greater) */ 120/* MIPS1 and MIPS3 (or greater) */
121#if defined(MIPS1) && defined(MIPS3_PLUS) 121#if defined(MIPS1) && defined(MIPS3_PLUS)
122 122
123static __inline int 123static __inline bool
124 mips_pg_v(unsigned int entry), 124 mips_pg_v(uint32_t entry),
125 mips_pg_wired(unsigned int entry), 125 mips_pg_wired(uint32_t entry),
126 PAGE_IS_RDONLY(unsigned int pte, vaddr_t va); 126 PAGE_IS_RDONLY(uint32_t pte, vaddr_t va);
127 127
128static __inline unsigned int 128static __inline uint32_t
129 mips_pg_wired_bit(void), mips_pg_m_bit(void), 129 mips_pg_wired_bit(void), mips_pg_m_bit(void),
130 mips_pg_ro_bit(void), mips_pg_rw_bit(void), 130 mips_pg_ro_bit(void), mips_pg_rw_bit(void),
131 mips_pg_ropage_bit(void), 131 mips_pg_ropage_bit(void),
132 mips_pg_cwpage_bit(void), 132 mips_pg_cwpage_bit(void),
133 mips_pg_rwpage_bit(void), 133 mips_pg_rwpage_bit(void),
134 mips_pg_global_bit(void); 134 mips_pg_global_bit(void);
135static __inline paddr_t PTE_TO_PADDR(unsigned int pte); 135static __inline paddr_t PTE_TO_PADDR(uint32_t pte);
136 136
137static __inline paddr_t mips_tlbpfn_to_paddr(unsigned int pfn); 137static __inline paddr_t mips_tlbpfn_to_paddr(uint32_t pfn);
138static __inline unsigned int mips_paddr_to_tlbpfn(paddr_t pa); 138static __inline uint32_t mips_paddr_to_tlbpfn(paddr_t pa);
139 139
140 140
141static __inline int 141static __inline bool
142mips_pg_v(entry) 142mips_pg_v(uint32_t entry)
143 unsigned int entry; 
144{ 143{
145 if (MIPS_HAS_R4K_MMU) 144 if (MIPS_HAS_R4K_MMU)
146 return (entry & MIPS3_PG_V); 145 return (entry & MIPS3_PG_V) != 0;
147 return (entry & MIPS1_PG_V); 146 return (entry & MIPS1_PG_V) != 0;
148} 147}
149 148
150static __inline int 149static __inline bool
151mips_pg_wired(entry) 150mips_pg_wired(uint32_t entry)
152 unsigned int entry; 
153{ 151{
154 if (MIPS_HAS_R4K_MMU) 152 if (MIPS_HAS_R4K_MMU)
155 return (entry & MIPS3_PG_WIRED); 153 return (entry & MIPS3_PG_WIRED) != 0;
156 return (entry & MIPS1_PG_WIRED); 154 return (entry & MIPS1_PG_WIRED) != 0;
157} 155}
158 156
159static __inline unsigned int 157static __inline uint32_t
160mips_pg_m_bit(void) 158mips_pg_m_bit(void)
161{ 159{
162 if (MIPS_HAS_R4K_MMU) 160 if (MIPS_HAS_R4K_MMU)
163 return (MIPS3_PG_D); 161 return (MIPS3_PG_D);
164 return (MIPS1_PG_D); 162 return (MIPS1_PG_D);
165} 163}
166 164
167static __inline unsigned int 165static __inline unsigned int
168mips_pg_ro_bit(void) 166mips_pg_ro_bit(void)
169{ 167{
170 if (MIPS_HAS_R4K_MMU) 168 if (MIPS_HAS_R4K_MMU)
171 return (MIPS3_PG_RO); 169 return (MIPS3_PG_RO);
172 return (MIPS1_PG_RO); 170 return (MIPS1_PG_RO);
@@ -212,56 +210,51 @@ mips_pg_global_bit(void) @@ -212,56 +210,51 @@ mips_pg_global_bit(void)
212 return (MIPS3_PG_G); 210 return (MIPS3_PG_G);
213 return (MIPS1_PG_G); 211 return (MIPS1_PG_G);
214} 212}
215 213
216static __inline unsigned int 214static __inline unsigned int
217mips_pg_wired_bit(void) 215mips_pg_wired_bit(void)
218{ 216{
219 if (MIPS_HAS_R4K_MMU) 217 if (MIPS_HAS_R4K_MMU)
220 return (MIPS3_PG_WIRED); 218 return (MIPS3_PG_WIRED);
221 return (MIPS1_PG_WIRED); 219 return (MIPS1_PG_WIRED);
222} 220}
223 221
224static __inline paddr_t 222static __inline paddr_t
225PTE_TO_PADDR(pte) 223PTE_TO_PADDR(uint32_t pte)
226 unsigned int pte; 
227{ 224{
228 if (MIPS_HAS_R4K_MMU) 225 if (MIPS_HAS_R4K_MMU)
229 return (MIPS3_PTE_TO_PADDR(pte)); 226 return (MIPS3_PTE_TO_PADDR(pte));
230 return (MIPS1_PTE_TO_PADDR(pte)); 227 return (MIPS1_PTE_TO_PADDR(pte));
231} 228}
232 229
233static __inline int 230static __inline bool
234PAGE_IS_RDONLY(pte, va) 231PAGE_IS_RDONLY(uint32_t pte, vaddr_t va)
235 unsigned int pte; 
236 vaddr_t va; 
237{ 232{
238 if (MIPS_HAS_R4K_MMU) 233 if (MIPS_HAS_R4K_MMU)
239 return (MIPS3_PAGE_IS_RDONLY(pte, va)); 234 return (MIPS3_PAGE_IS_RDONLY(pte, va));
240 return (MIPS1_PAGE_IS_RDONLY(pte, va)); 235 return (MIPS1_PAGE_IS_RDONLY(pte, va));
241} 236}
242 237
243static __inline paddr_t 238static __inline paddr_t
244mips_tlbpfn_to_paddr(pfn) 239mips_tlbpfn_to_paddr(uint32_t pfn)
245 unsigned int pfn; 
246{ 240{
247 if (MIPS_HAS_R4K_MMU) 241 if (MIPS_HAS_R4K_MMU)
248 return (mips3_tlbpfn_to_paddr(pfn)); 242 return (mips3_tlbpfn_to_paddr(pfn));
249 return (mips1_tlbpfn_to_paddr(pfn)); 243 return (mips1_tlbpfn_to_paddr(pfn));
250} 244}
251 245
252static __inline unsigned int 246static __inline uint32_t
253mips_paddr_to_tlbpfn(pa) 247mips_paddr_to_tlbpfn(paddr_t pa)
254 paddr_t pa; 
255{ 248{
256 if (MIPS_HAS_R4K_MMU) 249 if (MIPS_HAS_R4K_MMU)
257 return (mips3_paddr_to_tlbpfn(pa)); 250 return (mips3_paddr_to_tlbpfn(pa));
258 return (mips1_paddr_to_tlbpfn(pa)); 251 return (mips1_paddr_to_tlbpfn(pa));
259} 252}
260#endif 253#endif
261 254
262#endif /* ! _LOCORE */ 255#endif /* ! _LOCORE */
263 256
264#if defined(_KERNEL) && !defined(_LOCORE) 257#if defined(_KERNEL) && !defined(_LOCORE)
265/* 258/*
266 * Kernel virtual address to page table entry and visa versa. 259 * Kernel virtual address to page table entry and visa versa.
267 */ 260 */

cvs diff -r1.41.28.9 -r1.41.28.10 src/sys/arch/mips/include/vmparam.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/vmparam.h 2010/01/20 06:58:35 1.41.28.9
+++ src/sys/arch/mips/include/vmparam.h 2010/01/26 21:19:25 1.41.28.10
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vmparam.h,v 1.41.28.9 2010/01/20 06:58:35 matt Exp $ */ 1/* $NetBSD: vmparam.h,v 1.41.28.10 2010/01/26 21:19:25 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer 8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department and Ralph Campbell. 9 * Science Department and Ralph Campbell.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -199,40 +199,66 @@ @@ -199,40 +199,66 @@
199 199
200/* virtual sizes (bytes) for various kernel submaps */ 200/* virtual sizes (bytes) for various kernel submaps */
201#define VM_PHYS_SIZE (USRIOSIZE*PAGE_SIZE) 201#define VM_PHYS_SIZE (USRIOSIZE*PAGE_SIZE)
202 202
203/* VM_PHYSSEG_MAX defined by platform-dependent code. */ 203/* VM_PHYSSEG_MAX defined by platform-dependent code. */
204#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH 204#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
205#define VM_PHYSSEG_NOADD /* can add RAM after vm_mem_init */ 205#define VM_PHYSSEG_NOADD /* can add RAM after vm_mem_init */
206 206
207#define __HAVE_VM_PAGE_MD 207#define __HAVE_VM_PAGE_MD
208 208
209/* 209/*
210 * pmap-specific data stored in the vm_page structure. 210 * pmap-specific data stored in the vm_page structure.
211 */ 211 */
 212/*
 213 * For each struct vm_page, there is a list of all currently valid virtual
 214 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
 215 * XXX really should do this as a part of the higher level code.
 216 */
 217typedef struct pv_entry {
 218 struct pv_entry *pv_next; /* next pv_entry */
 219 struct pmap *pv_pmap; /* pmap where mapping lies */
 220 vaddr_t pv_va; /* virtual address for mapping */
 221} *pv_entry_t;
 222
 223#define PG_MD_UNCACHED 0x0001 /* page is mapped uncached */
 224#define PG_MD_MODIFIED 0x0002 /* page has been modified */
 225#define PG_MD_REFERENCED 0x0004 /* page has been recently referenced */
 226#define PG_MD_POOLPAGE 0x0008 /* page is used as a poolpage */
 227#define PG_MD_EXECPAGE 0x0010 /* page is exec mapped */
 228
 229#define PG_MD_CACHED_P(pg) (((pg)->mdpage.pvh_attrs & PG_MD_UNCACHED) == 0)
 230#define PG_MD_UNCACHED_P(pg) (((pg)->mdpage.pvh_attrs & PG_MD_UNCACHED) != 0)
 231#define PG_MD_MODIFIED_P(pg) (((pg)->mdpage.pvh_attrs & PG_MD_MODIFIED) != 0)
 232#define PG_MD_REFERENCED_P(pg) (((pg)->mdpage.pvh_attrs & PG_MD_REFERENCED) != 0)
 233#define PG_MD_POOLPAGE_P(pg) (((pg)->mdpage.pvh_attrs & PG_MD_POOLPAGE) != 0)
 234#define PG_MD_EXECPAGE_P(pg) (((pg)->mdpage.pvh_attrs & PG_MD_EXECPAGE) != 0)
 235
212struct vm_page_md { 236struct vm_page_md {
213 struct pv_entry *pvh_list; /* pv_entry list */ 237 struct pv_entry pvh_first; /* pv_entry first */
214#ifdef MULTIPROCESSOR 238#ifdef MULTIPROCESSOR
215 volatile u_int pvh_attrs; /* page attributes */ 239 volatile u_int pvh_attrs; /* page attributes */
216 __cpu_simple_lock_t pvh_slock; /* pv list lock */ 240 __cpu_simple_lock_t pvh_slock; /* pv list lock */
217#define VM_MDPAGE_SLOCK_INIT(pg) \ 241#define VM_MDPAGE_SLOCK_INIT(pg) \
218 __cpu_simple_lock_clear(&(pg)->mdpage.pvh_slock) 242 __cpu_simple_lock_clear(&(pg)->mdpage.pvh_slock)
219#else 243#else
220 u_int pvh_attrs; /* page attributes */ 244 u_int pvh_attrs; /* page attributes */
221#define VM_MDPAGE_SLOCK_INIT(pg) do { } while (/*CONSTCOND*/ 0) 245#define VM_MDPAGE_SLOCK_INIT(pg) do { } while (/*CONSTCOND*/ 0)
222#endif 246#endif
223}; 247};
224 248
225#define VM_MDPAGE_INIT(pg) \ 249#define VM_MDPAGE_INIT(pg) \
226do { \ 250do { \
227 (pg)->mdpage.pvh_list = NULL; \ 251 (pg)->mdpage.pvh_first.pv_next = NULL; \
 252 (pg)->mdpage.pvh_first.pv_pmap = NULL; \
 253 (pg)->mdpage.pvh_first.pv_va = (pg)->phys_addr; \
228 VM_MDPAGE_SLOCK_INIT(pg); \ 254 VM_MDPAGE_SLOCK_INIT(pg); \
229 (pg)->mdpage.pvh_attrs = 0; \ 255 (pg)->mdpage.pvh_attrs = 0; \
230} while (/* CONSTCOND */ 0) 256} while (/* CONSTCOND */ 0)
231 257
232#ifndef VM_NFREELIST 258#ifndef VM_NFREELIST
233#define VM_NFREELIST 16 /* 16 distinct memory segments */ 259#define VM_NFREELIST 16 /* 16 distinct memory segments */
234#define VM_FREELIST_DEFAULT 0 260#define VM_FREELIST_DEFAULT 0
235#define VM_FREELIST_MAX 1 261#define VM_FREELIST_MAX 1
236#endif 262#endif
237 263
238#endif /* ! _MIPS_VMPARAM_H_ */ 264#endif /* ! _MIPS_VMPARAM_H_ */

cvs diff -r1.33.96.2 -r1.33.96.3 src/sys/arch/mips/mips/cache.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/cache.c 2010/01/20 09:04:34 1.33.96.2
+++ src/sys/arch/mips/mips/cache.c 2010/01/26 21:19:25 1.33.96.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cache.c,v 1.33.96.2 2010/01/20 09:04:34 matt Exp $ */ 1/* $NetBSD: cache.c,v 1.33.96.3 2010/01/26 21:19:25 matt Exp $ */
2 2
3/* 3/*
4 * Copyright 2001, 2002 Wasabi Systems, Inc. 4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -58,27 +58,27 @@ @@ -58,27 +58,27 @@
58 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF 58 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
59 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR 59 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
60 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE 60 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
61 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE 61 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
62 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
65 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 65 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
66 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 66 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
67 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */ 68 */
69 69
70#include <sys/cdefs.h> 70#include <sys/cdefs.h>
71__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.33.96.2 2010/01/20 09:04:34 matt Exp $"); 71__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.33.96.3 2010/01/26 21:19:25 matt Exp $");
72 72
73#include "opt_cputype.h" 73#include "opt_cputype.h"
74#include "opt_mips_cache.h" 74#include "opt_mips_cache.h"
75 75
76#include <sys/param.h> 76#include <sys/param.h>
77 77
78#include <uvm/uvm_extern.h> 78#include <uvm/uvm_extern.h>
79 79
80#include <mips/cache.h> 80#include <mips/cache.h>
81#include <mips/locore.h> 81#include <mips/locore.h>
82 82
83#ifdef MIPS1 83#ifdef MIPS1
84#include <mips/cache_r3k.h> 84#include <mips/cache_r3k.h>
@@ -951,31 +951,32 @@ mips_config_cache_modern(void) @@ -951,31 +951,32 @@ mips_config_cache_modern(void)
951 case MIPSNN_CFG1_DL_RSVD: 951 case MIPSNN_CFG1_DL_RSVD:
952 panic("reserved MIPS32/64 Dcache line size"); 952 panic("reserved MIPS32/64 Dcache line size");
953 break; 953 break;
954 default: 954 default:
955 if (MIPSNN_GET(CFG1_DS, cfg1) == MIPSNN_CFG1_DS_RSVD) 955 if (MIPSNN_GET(CFG1_DS, cfg1) == MIPSNN_CFG1_DS_RSVD)
956 panic("reserved MIPS32/64 Dcache sets per way"); 956 panic("reserved MIPS32/64 Dcache sets per way");
957 mci->mci_pdcache_line_size = MIPSNN_CFG1_DL(cfg1); 957 mci->mci_pdcache_line_size = MIPSNN_CFG1_DL(cfg1);
958 mci->mci_pdcache_way_size = 958 mci->mci_pdcache_way_size =
959 mci->mci_pdcache_line_size * MIPSNN_CFG1_DS(cfg1); 959 mci->mci_pdcache_line_size * MIPSNN_CFG1_DS(cfg1);
960 mci->mci_pdcache_ways = MIPSNN_CFG1_DA(cfg1) + 1; 960 mci->mci_pdcache_ways = MIPSNN_CFG1_DA(cfg1) + 1;
961 961
962 /* 962 /*
963 * Compute the total size and "way mask" for the 963 * Compute the total size and "way mask" for the
964 * primary Icache. 964 * primary Dcache.
965 */ 965 */
966 mci->mci_pdcache_size = 966 mci->mci_pdcache_size =
967 mci->mci_pdcache_way_size * mci->mci_pdcache_ways; 967 mci->mci_pdcache_way_size * mci->mci_pdcache_ways;
968 mci->mci_pdcache_way_mask = mci->mci_pdcache_way_size - 1; 968 mci->mci_pdcache_way_mask = mci->mci_pdcache_way_size - 1;
 969 uvmexp.ncolors = atop(mci->mci_pdcache_size) / mci->mci_pdcache_ways;
969 break; 970 break;
970 } 971 }
971 972
972 /* figure out Icache params. */ 973 /* figure out Icache params. */
973 switch (MIPSNN_GET(CFG1_IL, cfg1)) { 974 switch (MIPSNN_GET(CFG1_IL, cfg1)) {
974 case MIPSNN_CFG1_IL_NONE: 975 case MIPSNN_CFG1_IL_NONE:
975 mci->mci_picache_line_size = mci->mci_picache_way_size = 976 mci->mci_picache_line_size = mci->mci_picache_way_size =
976 mci->mci_picache_ways = 0; 977 mci->mci_picache_ways = 0;
977 break; 978 break;
978 case MIPSNN_CFG1_IL_RSVD: 979 case MIPSNN_CFG1_IL_RSVD:
979 panic("reserved MIPS32/64 Icache line size"); 980 panic("reserved MIPS32/64 Icache line size");
980 break; 981 break;
981 default: 982 default:

cvs diff -r1.35.38.4 -r1.35.38.5 src/sys/arch/mips/mips/Attic/mem.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/Attic/mem.c 2010/01/20 06:58:36 1.35.38.4
+++ src/sys/arch/mips/mips/Attic/mem.c 2010/01/26 21:19:25 1.35.38.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: mem.c,v 1.35.38.4 2010/01/20 06:58:36 matt Exp $ */ 1/* $NetBSD: mem.c,v 1.35.38.5 2010/01/26 21:19:25 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1982, 1986, 1990, 1993 4 * Copyright (c) 1982, 1986, 1990, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer 8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department and Ralph Campbell. 9 * Science Department and Ralph Campbell.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -70,27 +70,27 @@ @@ -70,27 +70,27 @@
70 * SUCH DAMAGE. 70 * SUCH DAMAGE.
71 * 71 *
72 * @(#)mem.c 8.3 (Berkeley) 1/12/94 72 * @(#)mem.c 8.3 (Berkeley) 1/12/94
73 */ 73 */
74 74
75/* 75/*
76 * Memory special file 76 * Memory special file
77 */ 77 */
78 78
79#include "opt_cputype.h" 79#include "opt_cputype.h"
80#include "opt_mips_cache.h" 80#include "opt_mips_cache.h"
81 81
82#include <sys/cdefs.h> 82#include <sys/cdefs.h>
83__KERNEL_RCSID(0, "$NetBSD: mem.c,v 1.35.38.4 2010/01/20 06:58:36 matt Exp $"); 83__KERNEL_RCSID(0, "$NetBSD: mem.c,v 1.35.38.5 2010/01/26 21:19:25 matt Exp $");
84 84
85#include <sys/param.h> 85#include <sys/param.h>
86#include <sys/conf.h> 86#include <sys/conf.h>
87#include <sys/buf.h> 87#include <sys/buf.h>
88#include <sys/systm.h> 88#include <sys/systm.h>
89#include <sys/uio.h> 89#include <sys/uio.h>
90#include <sys/malloc.h> 90#include <sys/malloc.h>
91#include <sys/msgbuf.h> 91#include <sys/msgbuf.h>
92#include <sys/event.h> 92#include <sys/event.h>
93 93
94#include <machine/cpu.h> 94#include <machine/cpu.h>
95 95
96#include <mips/cache.h> 96#include <mips/cache.h>
@@ -145,46 +145,46 @@ mmrw(dev, uio, flags) @@ -145,46 +145,46 @@ mmrw(dev, uio, flags)
145 c = iov->iov_len; 145 c = iov->iov_len;
146 /* 146 /*
147 * XXX Broken; assumes contiguous physical memory. 147 * XXX Broken; assumes contiguous physical memory.
148 */ 148 */
149 if (v + c > ctob(physmem)) 149 if (v + c > ctob(physmem))
150 return (EFAULT); 150 return (EFAULT);
151#ifdef _LP64 151#ifdef _LP64
152 v = MIPS_PHYS_TO_XKPHYS_CACHED(v); 152 v = MIPS_PHYS_TO_XKPHYS_CACHED(v);
153#else 153#else
154 v = MIPS_PHYS_TO_KSEG0(v); 154 v = MIPS_PHYS_TO_KSEG0(v);
155#endif 155#endif
156 error = uiomove((void *)v, c, uio); 156 error = uiomove((void *)v, c, uio);
157#if defined(MIPS3_PLUS) 157#if defined(MIPS3_PLUS)
158 if (mips_cache_info.mci_cache_virtual_alias) 158 if (MIPS_CACHE_VIRTUAL_ALIAS)
159 mips_dcache_wbinv_range(v, c); 159 mips_dcache_wbinv_range(v, c);
160#endif 160#endif
161 continue; 161 continue;
162 162
163 case DEV_KMEM: 163 case DEV_KMEM:
164 v = uio->uio_offset; 164 v = uio->uio_offset;
165 c = min(iov->iov_len, MAXPHYS); 165 c = min(iov->iov_len, MAXPHYS);
166 if (v < MIPS_KSEG0_START) 166 if (v < MIPS_KSEG0_START)
167 if (v < MIPS_KSEG0_START) 167 if (v < MIPS_KSEG0_START)
168 return (EFAULT); 168 return (EFAULT);
169 if (v > MIPS_PHYS_TO_KSEG0(mips_avail_end + 169 if (v > MIPS_PHYS_TO_KSEG0(mips_avail_end +
170 mips_round_page(MSGBUFSIZE) - c) && 170 mips_round_page(MSGBUFSIZE) - c) &&
171 (v < MIPS_KSEG2_START || 171 (v < MIPS_KSEG2_START ||
172 !uvm_kernacc((void *)v, c, 172 !uvm_kernacc((void *)v, c,
173 uio->uio_rw == UIO_READ ? B_READ : B_WRITE))) 173 uio->uio_rw == UIO_READ ? B_READ : B_WRITE)))
174 return (EFAULT); 174 return (EFAULT);
175 error = uiomove((void *)v, c, uio); 175 error = uiomove((void *)v, c, uio);
176#if defined(MIPS3_PLUS) 176#if defined(MIPS3_PLUS)
177 if (mips_cache_info.mci_cache_virtual_alias) 177 if (MIPS_CACHE_VIRTUAL_ALIAS)
178 mips_dcache_wbinv_range(v, c); 178 mips_dcache_wbinv_range(v, c);
179#endif 179#endif
180 continue; 180 continue;
181 181
182 case DEV_NULL: 182 case DEV_NULL:
183 if (uio->uio_rw == UIO_WRITE) 183 if (uio->uio_rw == UIO_WRITE)
184 uio->uio_resid = 0; 184 uio->uio_resid = 0;
185 return (0); 185 return (0);
186 186
187 case DEV_ZERO: 187 case DEV_ZERO:
188 if (uio->uio_rw == UIO_WRITE) { 188 if (uio->uio_rw == UIO_WRITE) {
189 c = iov->iov_len; 189 c = iov->iov_len;
190 break; 190 break;

cvs diff -r1.179.16.12 -r1.179.16.13 src/sys/arch/mips/mips/Attic/pmap.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/Attic/pmap.c 2010/01/22 07:41:10 1.179.16.12
+++ src/sys/arch/mips/mips/Attic/pmap.c 2010/01/26 21:19:25 1.179.16.13
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.179.16.12 2010/01/22 07:41:10 matt Exp $ */ 1/* $NetBSD: pmap.c,v 1.179.16.13 2010/01/26 21:19:25 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94 65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94
66 */ 66 */
67 67
68#include <sys/cdefs.h> 68#include <sys/cdefs.h>
69 69
70__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.179.16.12 2010/01/22 07:41:10 matt Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.179.16.13 2010/01/26 21:19:25 matt Exp $");
71 71
72/* 72/*
73 * Manages physical address maps. 73 * Manages physical address maps.
74 * 74 *
75 * In addition to hardware address maps, this 75 * In addition to hardware address maps, this
76 * module is called upon to provide software-use-only 76 * module is called upon to provide software-use-only
77 * maps which may or may not be stored in the same 77 * maps which may or may not be stored in the same
78 * form as hardware maps. These pseudo-maps are 78 * form as hardware maps. These pseudo-maps are
79 * used to store intermediate results from copy 79 * used to store intermediate results from copy
80 * operations to and from address spaces. 80 * operations to and from address spaces.
81 * 81 *
82 * Since the information managed by this module is 82 * Since the information managed by this module is
83 * also stored by the logical address mapping module, 83 * also stored by the logical address mapping module,
@@ -141,212 +141,303 @@ CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x @@ -141,212 +141,303 @@ CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x
141CTASSERT(MIPS_KSEG1_START < 0); 141CTASSERT(MIPS_KSEG1_START < 0);
142CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0); 142CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0);
143CTASSERT(MIPS_KSEG2_START < 0); 143CTASSERT(MIPS_KSEG2_START < 0);
144CTASSERT(MIPS_MAX_MEM_ADDR < 0); 144CTASSERT(MIPS_MAX_MEM_ADDR < 0);
145CTASSERT(MIPS_RESERVED_ADDR < 0); 145CTASSERT(MIPS_RESERVED_ADDR < 0);
146CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000); 146CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000);
147CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000); 147CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000);
148CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000); 148CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000);
149CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000); 149CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000);
150CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000); 150CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000);
151CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0))); 151CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0)));
152CTASSERT(MIPS_KSEG1_P(MIPS_PHYS_TO_KSEG1(0))); 152CTASSERT(MIPS_KSEG1_P(MIPS_PHYS_TO_KSEG1(0)));
153 153
154#ifdef DEBUG 154#define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0)
155struct { 155#define PMAP_COUNTER(name, desc) \
156 int kernel; /* entering kernel mapping */ 156static struct evcnt pmap_evcnt_##name = \
157 int user; /* entering user mapping */ 157 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
158 int ptpneeded; /* needed to allocate a PT page */ 158EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
159 int pwchange; /* no mapping change, just wiring or protection */ 159
160 int wchange; /* no mapping change, just wiring */ 160PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
161 int mchange; /* was mapped but mapping to different page */ 161PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
162 int managed; /* a managed page */ 162PMAP_COUNTER(remove_user_calls, "remove user calls");
163 int firstpv; /* first mapping for this PA */ 163PMAP_COUNTER(remove_user_pages, "user pages unmapped");
164 int secondpv; /* second mapping for this PA */ 164PMAP_COUNTER(remove_flushes, "remove cache flushes");
165 int ci; /* cache inhibited */ 165PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
166 int unmanaged; /* not a managed page */ 166PMAP_COUNTER(remove_pvfirst, "remove pv first");
167 int flushes; /* cache flushes */ 167PMAP_COUNTER(remove_pvsearch, "remove pv search");
168 int cachehit; /* new entry forced valid entry out */ 168
169} enter_stats; 169PMAP_COUNTER(prefer_requests, "prefer requests");
170struct { 170PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
171 int calls; 171
172 int removes; 172PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
173 int flushes; 173PMAP_COUNTER(zeroed_pages, "pages zeroed");
174 int pidflushes; /* HW pid stolen */ 174PMAP_COUNTER(copied_pages, "pages copied");
175 int pvfirst; 175
176 int pvsearch; 176PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
177} remove_stats; 177PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
 178PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
 179PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
 180
 181PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
 182PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
 183
 184PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
 185PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
 186PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
 187PMAP_COUNTER(user_mappings, "user pages mapped");
 188PMAP_COUNTER(user_mappings_changed, "user mapping changed");
 189PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
 190PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
 191PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
 192PMAP_COUNTER(managed_mappings, "managed pages mapped");
 193PMAP_COUNTER(mappings, "pages mapped");
 194PMAP_COUNTER(remappings, "pages remapped");
 195PMAP_COUNTER(unmappings, "pages unmapped");
 196PMAP_COUNTER(primary_mappings, "page initial mappings");
 197PMAP_COUNTER(primary_unmappings, "page final unmappings");
 198PMAP_COUNTER(tlb_hit, "page mapping");
 199
 200PMAP_COUNTER(exec_mappings, "exec pages mapped");
 201PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
 202PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
 203PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
 204PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
 205PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
 206PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
 207PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
 208PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
 209PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
 210PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
 211
 212PMAP_COUNTER(create, "creates");
 213PMAP_COUNTER(reference, "references");
 214PMAP_COUNTER(dereference, "dereferences");
 215PMAP_COUNTER(destroy, "destroyed");
 216PMAP_COUNTER(activate, "activations");
 217PMAP_COUNTER(deactivate, "deactivations");
 218PMAP_COUNTER(update, "updates");
 219PMAP_COUNTER(unwire, "unwires");
 220PMAP_COUNTER(copy, "copies");
 221PMAP_COUNTER(collect, "collects");
 222PMAP_COUNTER(clear_modify, "clear_modifies");
 223PMAP_COUNTER(protect, "protects");
 224PMAP_COUNTER(page_protect, "page_protects");
178 225
179#define PDB_FOLLOW 0x0001 226#define PDB_FOLLOW 0x0001
180#define PDB_INIT 0x0002 227#define PDB_INIT 0x0002
181#define PDB_ENTER 0x0004 228#define PDB_ENTER 0x0004
182#define PDB_REMOVE 0x0008 229#define PDB_REMOVE 0x0008
183#define PDB_CREATE 0x0010 230#define PDB_CREATE 0x0010
184#define PDB_PTPAGE 0x0020 231#define PDB_PTPAGE 0x0020
185#define PDB_PVENTRY 0x0040 232#define PDB_PVENTRY 0x0040
186#define PDB_BITS 0x0080 233#define PDB_BITS 0x0080
187#define PDB_COLLECT 0x0100 234#define PDB_COLLECT 0x0100
188#define PDB_PROTECT 0x0200 235#define PDB_PROTECT 0x0200
189#define PDB_TLBPID 0x0400 236#define PDB_TLBPID 0x0400
190#define PDB_PARANOIA 0x2000 237#define PDB_PARANOIA 0x2000
191#define PDB_WIRING 0x4000 238#define PDB_WIRING 0x4000
192#define PDB_PVDUMP 0x8000 239#define PDB_PVDUMP 0x8000
193int pmapdebug = 0; 240int pmapdebug = 0;
194 241
195#endif 
196 
197#define PMAP_ASID_RESERVED 0 242#define PMAP_ASID_RESERVED 0
198 243
199CTASSERT(PMAP_ASID_RESERVED == 0); 244CTASSERT(PMAP_ASID_RESERVED == 0);
200/* 245/*
201 * Initialize the kernel pmap. 246 * Initialize the kernel pmap.
202 */ 247 */
203#ifdef MULTIPROCESSOR 248#ifdef MULTIPROCESSOR
204#define PMAP_SIZE offsetof(struct pmap, pm_pai[MAXCPUS]) 249#define PMAP_SIZE offsetof(struct pmap, pm_pai[MAXCPUS])
205#else 250#else
206#define PMAP_SIZE sizeof(struct pmap) 251#define PMAP_SIZE sizeof(struct pmap)
207#endif 252#endif
208struct pmap_kernel kernel_pmap_store = { 253struct pmap_kernel kernel_pmap_store = {
209 .kernel_pmap = { 254 .kernel_pmap = {
210 .pm_count = 1, 255 .pm_count = 1,
211 .pm_segtab = (void *)(MIPS_KSEG2_START + 0x1eadbeef), 256 .pm_segtab = (void *)(MIPS_KSEG2_START + 0x1eadbeef),
212 }, 257 },
213}; 258};
214 259
215paddr_t mips_avail_start; /* PA of first available physical page */ 260paddr_t mips_avail_start; /* PA of first available physical page */
216paddr_t mips_avail_end; /* PA of last available physical page */ 261paddr_t mips_avail_end; /* PA of last available physical page */
217vaddr_t mips_virtual_end; /* VA of last avail page (end of kernel AS) */ 262vaddr_t mips_virtual_end; /* VA of last avail page (end of kernel AS) */
218 263
219struct pv_entry *pv_table; 
220int pv_table_npages; 
221 
222pt_entry_t *Sysmap; /* kernel pte table */ 264pt_entry_t *Sysmap; /* kernel pte table */
223unsigned int Sysmapsize; /* number of pte's in Sysmap */ 265unsigned int Sysmapsize; /* number of pte's in Sysmap */
224 266
225/* 267/*
226 * The pools from which pmap structures and sub-structures are allocated. 268 * The pools from which pmap structures and sub-structures are allocated.
227 */ 269 */
228struct pool pmap_pmap_pool; 270struct pool pmap_pmap_pool;
229struct pool pmap_pv_pool; 271struct pool pmap_pv_pool;
230 272
231#ifndef PMAP_PV_LOWAT 273#ifndef PMAP_PV_LOWAT
232#define PMAP_PV_LOWAT 16 274#define PMAP_PV_LOWAT 16
233#endif 275#endif
234int pmap_pv_lowat = PMAP_PV_LOWAT; 276int pmap_pv_lowat = PMAP_PV_LOWAT;
235 277
236bool pmap_initialized = false; 278bool pmap_initialized = false;
 279#define PMAP_PAGE_COLOROK_P(a, b) \
 280 ((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
 281u_int pmap_page_colormask;
237 282
238#define PAGE_IS_MANAGED(pa) \ 283#define PAGE_IS_MANAGED(pa) \
239 (pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1) 284 (pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1)
240 285
241#define PMAP_IS_ACTIVE(pm) \ 286#define PMAP_IS_ACTIVE(pm) \
242 ((pm) == pmap_kernel() || \ 287 ((pm) == pmap_kernel() || \
243 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap) 288 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
244 289
245/* Forward function declarations */ 290/* Forward function declarations */
246void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *); 291void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
247void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *); 292void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *);
248pt_entry_t *pmap_pte(pmap_t, vaddr_t); 293pt_entry_t *pmap_pte(pmap_t, vaddr_t);
249 294
250/* 295/*
251 * PV table management functions. 296 * PV table management functions.
252 */ 297 */
253void *pmap_pv_page_alloc(struct pool *, int); 298void *pmap_pv_page_alloc(struct pool *, int);
254void pmap_pv_page_free(struct pool *, void *); 299void pmap_pv_page_free(struct pool *, void *);
255 300
256struct pool_allocator pmap_pv_page_allocator = { 301struct pool_allocator pmap_pv_page_allocator = {
257 pmap_pv_page_alloc, pmap_pv_page_free, 0, 302 pmap_pv_page_alloc, pmap_pv_page_free, 0,
258}; 303};
259 304
260#define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT) 305#define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT)
261#define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv)) 306#define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv))
262 307
263/* 308/*
264 * Misc. functions. 309 * Misc. functions.
265 */ 310 */
266 311
267static inline bool 312static inline bool
268pmap_clear_page_attributes(struct vm_page *pg, u_int attributes) 313pmap_clear_page_attributes(struct vm_page *pg, u_int clear_attributes)
269{ 314{
270 volatile u_int * const attrp = &pg->mdpage.pvh_attrs; 315 volatile u_int * const attrp = &pg->mdpage.pvh_attrs;
271#ifdef MULTIPROCESSOR 316#ifdef MULTIPROCESSOR
272 for (;;) { 317 for (;;) {
273 u_int old_attr = *attrp; 318 u_int old_attr = *attrp;
274 if ((old_attr & attributes) == 0) 319 if ((old_attr & clear_attributes) == 0)
275 return false; 320 return false;
276 u_int new_attr = old_attr & ~attributes; 321 u_int new_attr = old_attr & ~clear_attributes;
277 if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr)) 322 if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr))
278 return true; 323 return true;
279 } 324 }
280#else 325#else
281 u_int old_attr = *attrp; 326 u_int old_attr = *attrp;
282 if ((old_attr & attributes) == 0) 327 if ((old_attr & clear_attributes) == 0)
283 return false; 328 return false;
284 *attrp &= ~attributes; 329 *attrp &= ~clear_attributes;
285 return true; 330 return true;
286#endif 331#endif
287} 332}
288 333
289static inline void 334static inline void
290pmap_set_page_attributes(struct vm_page *pg, u_int attributes) 335pmap_set_page_attributes(struct vm_page *pg, u_int set_attributes)
291{ 336{
292#ifdef MULTIPROCESSOR 337#ifdef MULTIPROCESSOR
293 atomic_or_uint(&pg->mdpage.pvh_attrs, attributes); 338 atomic_or_uint(&pg->mdpage.pvh_attrs, set_attributes);
294#else 339#else
295 pg->mdpage.pvh_attrs |= attributes; 340 pg->mdpage.pvh_attrs |= set_attributes;
296#endif 341#endif
297} 342}
298 343
299#if defined(MIPS3_PLUS) /* XXX mmu XXX */ 344static inline void
300void mips_dump_segtab(struct proc *); 345pmap_page_syncicache(struct vm_page *pg)
301static void mips_flushcache_allpvh(paddr_t); 346{
 347 if (MIPS_HAS_R4K_MMU) {
 348 if (PG_MD_CACHED_P(pg)) {
 349 mips_icache_sync_range_index(
 350 pg->mdpage.pvh_first.pv_va, PAGE_SIZE);
 351 }
 352 } else {
 353 mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
 354 PAGE_SIZE);
 355 }
 356}
302 357
303/* 358static vaddr_t
304 * Flush virtual addresses associated with a given physical address 359pmap_map_ephemeral_page(struct vm_page *pg, int prot, pt_entry_t *old_pt_entry_p)
305 */ 
306static void 
307mips_flushcache_allpvh(paddr_t pa) 
308{ 360{
309 struct vm_page *pg; 361 const paddr_t pa = VM_PAGE_TO_PHYS(pg);
310 struct pv_entry *pv; 362 pv_entry_t pv = &pg->mdpage.pvh_first;
311 363
312 pg = PHYS_TO_VM_PAGE(pa); 364#ifdef _LP64
313 if (pg == NULL) { 365 vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
314 /* page is unmanaged */ 366#else
315#ifdef DIAGNOSTIC 367 vaddr_t va;
316 printf("mips_flushcache_allpvh(): unmanaged pa = %#"PRIxPADDR"\n", 368 if (pa <= MIPS_PHYS_MASK) {
317 pa); 369 va = MIPS_PHYS_TO_KSEG0(pa);
318#endif 370 } else {
319 return; 371 KASSERT(pmap_initialized);
320 } 372 /*
 373 * Make sure to use a congruent mapping to the last mapped
 374 * address so we don't have to worry about virtual aliases.
 375 */
 376 kpreempt_disable();
 377 struct cpu_info * const ci = curcpu();
321 378
322 pv = pg->mdpage.pvh_list; 379 va = (prot & VM_PROT_WRITE ? ci->ci_pmap_dstbase : ci->ci_pmap_srcbase)
 380 + mips_cache_indexof(MIPS_CACHE_VIRTUAL_ALIAS ? pv->pv_va : pa);
 381 *old_pt_entry_p = *kvtopte(va);
 382 pmap_kenter_pa(va, pa, prot);
 383 }
 384#endif /* _LP64 */
 385 if (MIPS_CACHE_VIRTUAL_ALIAS) {
 386 /*
 387 * If we are forced to use an incompatible alias, flush the
 388 * page from the cache so we will copy the correct contents.
 389 */
 390 if (PG_MD_CACHED_P(pg)
 391 && mips_cache_badalias(pv->pv_va, va))
 392 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
 393 if (pv->pv_pmap == NULL);
 394 pv->pv_va = va;
 395 }
323 396
324#if defined(MIPS3_NO_PV_UNCACHED) 397 return va;
325 /* No current mapping. Cache was flushed by pmap_remove_pv() */ 398}
326 if (pv->pv_pmap == NULL) 
327 return; 
328 399
329 /* Only one index is allowed at a time */ 400static void
330 if (mips_cache_indexof(pa) != mips_cache_indexof(pv->pv_va)) 401pmap_unmap_ephemeral_page(struct vm_page *pg, vaddr_t va,
331 mips_dcache_wbinv_range_index(pv->pv_va, NBPG); 402 pt_entry_t old_pt_entry)
332#else 403{
333 while (pv) { 404 pv_entry_t pv = &pg->mdpage.pvh_first;
334 mips_dcache_wbinv_range_index(pv->pv_va, NBPG); 405
335 pv = pv->pv_next; 406 if (MIPS_CACHE_VIRTUAL_ALIAS
 407 && (PG_MD_UNCACHED_P(pg)
 408 || (pv->pv_pmap != NULL
 409 && mips_cache_badalias(pv->pv_va, va)))) {
 410 /*
 411 * If this page was previously uncached or we had to use an
 412 * incompatible alias and it has a valid mapping, flush it
 413 * from the cache.
 414 */
 415 mips_dcache_wbinv_range(va, PAGE_SIZE);
 416 }
 417#ifndef _LP64
 418 /*
 419 * If we had to map using a page table entry, unmap it now.
 420 */
 421 if (va >= VM_MIN_KERNEL_ADDRESS) {
 422 pmap_kremove(va, PAGE_SIZE);
 423 if (mips_pg_v(old_pt_entry.pt_entry)) {
 424 *kvtopte(va) = old_pt_entry;
 425 pmap_tlb_update(pmap_kernel(), va, old_pt_entry.pt_entry);
 426 }
 427 kpreempt_enable();
336 } 428 }
337#endif 429#endif
338} 430}
339#endif /* MIPS3_PLUS */ 
340 431
341/* 432/*
342 * Bootstrap the system enough to run with virtual memory. 433 * Bootstrap the system enough to run with virtual memory.
343 * firstaddr is the first unused kseg0 address (not page aligned). 434 * firstaddr is the first unused kseg0 address (not page aligned).
344 */ 435 */
345void 436void
346pmap_bootstrap(void) 437pmap_bootstrap(void)
347{ 438{
348 vsize_t bufsz; 439 vsize_t bufsz;
349 440
350 /* 441 /*
351 * Compute the number of pages kmem_map will have. 442 * Compute the number of pages kmem_map will have.
352 */ 443 */
@@ -390,39 +481,26 @@ pmap_bootstrap(void) @@ -390,39 +481,26 @@ pmap_bootstrap(void)
390 Sysmapsize = 481 Sysmapsize =
391 (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG; 482 (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG;
392 } 483 }
393#endif 484#endif
394 485
395 /* 486 /*
396 * Now actually allocate the kernel PTE array (must be done 487 * Now actually allocate the kernel PTE array (must be done
397 * after virtual_end is initialized). 488 * after virtual_end is initialized).
398 */ 489 */
399 Sysmap = (pt_entry_t *) 490 Sysmap = (pt_entry_t *)
400 uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize); 491 uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize);
401 492
402 /* 493 /*
403 * Allocate memory for the pv_heads. (A few more of the latter 
404 * are allocated than are needed.) 
405 * 
406 * We could do this in pmap_init when we know the actual 
407 * managed page pool size, but its better to use kseg0 
408 * addresses rather than kernel virtual addresses mapped 
409 * through the TLB. 
410 */ 
411 pv_table_npages = physmem; 
412 pv_table = (struct pv_entry *) 
413 uvm_pageboot_alloc(sizeof(struct pv_entry) * pv_table_npages); 
414 
415 /* 
416 * Initialize the pools. 494 * Initialize the pools.
417 */ 495 */
418 pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl", 496 pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
419 &pool_allocator_nointr, IPL_NONE); 497 &pool_allocator_nointr, IPL_NONE);
420 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl", 498 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
421 &pmap_pv_page_allocator, IPL_NONE); 499 &pmap_pv_page_allocator, IPL_NONE);
422 500
423 tlb_set_asid(0); 501 tlb_set_asid(0);
424 502
425#ifdef MIPS3_PLUS /* XXX mmu XXX */ 503#ifdef MIPS3_PLUS /* XXX mmu XXX */
426 /* 504 /*
427 * The R4?00 stores only one copy of the Global bit in the 505 * The R4?00 stores only one copy of the Global bit in the
428 * translation lookaside buffer for each 2 page entry. 506 * translation lookaside buffer for each 2 page entry.
@@ -540,59 +618,58 @@ pmap_steal_memory(vsize_t size, vaddr_t  @@ -540,59 +618,58 @@ pmap_steal_memory(vsize_t size, vaddr_t
540 * If we got here, there was no memory left. 618 * If we got here, there was no memory left.
541 */ 619 */
542 panic("pmap_steal_memory: no memory to steal"); 620 panic("pmap_steal_memory: no memory to steal");
543} 621}
544 622
545/* 623/*
546 * Initialize the pmap module. 624 * Initialize the pmap module.
547 * Called by vm_init, to initialize any structures that the pmap 625 * Called by vm_init, to initialize any structures that the pmap
548 * system needs to map virtual memory. 626 * system needs to map virtual memory.
549 */ 627 */
550void 628void
551pmap_init(void) 629pmap_init(void)
552{ 630{
553 vsize_t s; 
554 int bank, i; 
555 pv_entry_t pv; 
556 
557#ifdef DEBUG 631#ifdef DEBUG
558 if (pmapdebug & (PDB_FOLLOW|PDB_INIT)) 632 if (pmapdebug & (PDB_FOLLOW|PDB_INIT))
559 printf("pmap_init()\n"); 633 printf("pmap_init()\n");
560#endif 634#endif
561 635
562 /* 636 /*
563 * Memory for the pv entry heads has 
564 * already been allocated. Initialize the physical memory 
565 * segments. 
566 */ 
567 pv = pv_table; 
568 for (bank = 0; bank < vm_nphysseg; bank++) { 
569 s = vm_physmem[bank].end - vm_physmem[bank].start; 
570 for (i = 0; i < s; i++) 
571 vm_physmem[bank].pgs[i].mdpage.pvh_list = pv++; 
572 } 
573 
574 /* 
575 * Set a low water mark on the pv_entry pool, so that we are 637 * Set a low water mark on the pv_entry pool, so that we are
576 * more likely to have these around even in extreme memory 638 * more likely to have these around even in extreme memory
577 * starvation. 639 * starvation.
578 */ 640 */
579 pool_setlowat(&pmap_pv_pool, pmap_pv_lowat); 641 pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
580 642
581 /* 643 /*
582 * Now it is safe to enable pv entry recording. 644 * Now it is safe to enable pv entry recording.
583 */ 645 */
584 pmap_initialized = true; 646 pmap_initialized = true;
585 647
 648#ifndef _LP64
 649 /*
 650 * If we have more memory than can be mapped by KSEG0, we need allocate
 651 * enough VA so we can map pages with the right color (to avoid cache
 652 * alias problems).
 653 */
 654 if (mips_avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) {
 655 curcpu()->ci_pmap_dstbase = uvm_km_alloc(kernel_map,
 656 uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY);
 657 curcpu()->ci_pmap_srcbase = uvm_km_alloc(kernel_map,
 658 uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY);
 659 }
 660#endif
 661
 662
586#ifdef MIPS3 663#ifdef MIPS3
587 if (MIPS_HAS_R4K_MMU) { 664 if (MIPS_HAS_R4K_MMU) {
588 /* 665 /*
589 * XXX 666 * XXX
590 * Disable sosend_loan() in src/sys/kern/uipc_socket.c 667 * Disable sosend_loan() in src/sys/kern/uipc_socket.c
591 * on MIPS3 CPUs to avoid possible virtual cache aliases 668 * on MIPS3 CPUs to avoid possible virtual cache aliases
592 * and uncached mappings in pmap_enter_pv(). 669 * and uncached mappings in pmap_enter_pv().
593 *  670 *
594 * Ideally, read only shared mapping won't cause aliases 671 * Ideally, read only shared mapping won't cause aliases
595 * so pmap_enter_pv() should handle any shared read only 672 * so pmap_enter_pv() should handle any shared read only
596 * mappings without uncached ops like ARM pmap. 673 * mappings without uncached ops like ARM pmap.
597 *  674 *
598 * On the other hand, R4000 and R4400 have the virtual 675 * On the other hand, R4000 and R4400 have the virtual
@@ -612,109 +689,115 @@ pmap_init(void) @@ -612,109 +689,115 @@ pmap_init(void)
612 * is zero, the map is an actual physical 689 * is zero, the map is an actual physical
613 * map, and may be referenced by the 690 * map, and may be referenced by the
614 * hardware. 691 * hardware.
615 * 692 *
616 * If the size specified is non-zero, 693 * If the size specified is non-zero,
617 * the map will be used in software only, and 694 * the map will be used in software only, and
618 * is bounded by that size. 695 * is bounded by that size.
619 */ 696 */
620pmap_t 697pmap_t
621pmap_create(void) 698pmap_create(void)
622{ 699{
623 pmap_t pmap; 700 pmap_t pmap;
624 701
 702 PMAP_COUNT(create);
625#ifdef DEBUG 703#ifdef DEBUG
626 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 704 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
627 printf("pmap_create()\n"); 705 printf("pmap_create()\n");
628#endif 706#endif
629 707
630 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); 708 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
631 memset(pmap, 0, PMAP_SIZE); 709 memset(pmap, 0, PMAP_SIZE);
632 710
633 pmap->pm_count = 1; 711 pmap->pm_count = 1;
634 712
635 pmap_segtab_alloc(pmap); 713 pmap_segtab_alloc(pmap);
636 714
637 return pmap; 715 return pmap;
638} 716}
639 717
640/* 718/*
641 * Retire the given physical map from service. 719 * Retire the given physical map from service.
642 * Should only be called if the map contains 720 * Should only be called if the map contains
643 * no valid mappings. 721 * no valid mappings.
644 */ 722 */
645void 723void
646pmap_destroy(pmap_t pmap) 724pmap_destroy(pmap_t pmap)
647{ 725{
648 int count; 
649 
650#ifdef DEBUG 726#ifdef DEBUG
651 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 727 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
652 printf("pmap_destroy(%p)\n", pmap); 728 printf("pmap_destroy(%p)\n", pmap);
653#endif 729#endif
654 count = --pmap->pm_count; 730 if (--pmap->pm_count) {
655 if (count > 0) 731 PMAP_COUNT(dereference);
656 return; 732 return;
 733 }
657 734
 735 PMAP_COUNT(destroy);
658 pmap_segtab_free(pmap); 736 pmap_segtab_free(pmap);
659 737
660 pool_put(&pmap_pmap_pool, pmap); 738 pool_put(&pmap_pmap_pool, pmap);
661} 739}
662 740
663/* 741/*
664 * Add a reference to the specified pmap. 742 * Add a reference to the specified pmap.
665 */ 743 */
666void 744void
667pmap_reference(pmap_t pmap) 745pmap_reference(pmap_t pmap)
668{ 746{
669 747
670#ifdef DEBUG 748#ifdef DEBUG
671 if (pmapdebug & PDB_FOLLOW) 749 if (pmapdebug & PDB_FOLLOW)
672 printf("pmap_reference(%p)\n", pmap); 750 printf("pmap_reference(%p)\n", pmap);
673#endif 751#endif
674 if (pmap != NULL) { 752 if (pmap != NULL) {
675 pmap->pm_count++; 753 pmap->pm_count++;
676 } 754 }
 755 PMAP_COUNT(reference);
677} 756}
678 757
679/* 758/*
680 * Make a new pmap (vmspace) active for the given process. 759 * Make a new pmap (vmspace) active for the given process.
681 */ 760 */
682void 761void
683pmap_activate(struct lwp *l) 762pmap_activate(struct lwp *l)
684{ 763{
685 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 764 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
686 unsigned int asid; 765 uint32_t asid;
 766
 767 PMAP_COUNT(activate);
687 768
688 asid = pmap_tlb_asid_alloc(pmap, l->l_cpu); 769 asid = pmap_tlb_asid_alloc(pmap, l->l_cpu);
689 if (l == curlwp) { 770 if (l == curlwp) {
690 pmap_segtab_activate(l); 771 pmap_segtab_activate(l);
691 tlb_set_asid(asid); 772 tlb_set_asid(asid);
692 } 773 }
693} 774}
694 775
695/* 776/*
696 * Make a previously active pmap (vmspace) inactive. 777 * Make a previously active pmap (vmspace) inactive.
697 */ 778 */
698void 779void
699pmap_deactivate(struct lwp *l) 780pmap_deactivate(struct lwp *l)
700{ 781{
 782 PMAP_COUNT(deactivate);
701 783
702 /* Nothing to do. */ 784 /* Nothing to do. */
703} 785}
704 786
705void 787void
706pmap_update(struct pmap *pmap) 788pmap_update(struct pmap *pmap)
707{ 789{
 790 PMAP_COUNT(update);
708#if 0 791#if 0
709 __asm __volatile( 792 __asm __volatile(
710 "mtc0\t$ra,$%0; nop; eret" 793 "mtc0\t$ra,$%0; nop; eret"
711 : 794 :
712 : "n"(MIPS_COP_0_ERROR_PC)); 795 : "n"(MIPS_COP_0_ERROR_PC));
713#endif 796#endif
714} 797}
715 798
716/* 799/*
717 * Remove the given range of addresses from the specified map. 800 * Remove the given range of addresses from the specified map.
718 * 801 *
719 * It is assumed that the start and end are properly 802 * It is assumed that the start and end are properly
720 * rounded to the page size. 803 * rounded to the page size.
@@ -726,85 +809,88 @@ pmap_pte_remove(pmap_t pmap, vaddr_t sva @@ -726,85 +809,88 @@ pmap_pte_remove(pmap_t pmap, vaddr_t sva
726{ 809{
727#ifdef DEBUG 810#ifdef DEBUG
728 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) { 811 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) {
729 printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n", 812 printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
730 __func__, pmap, sva, eva, pte, flags); 813 __func__, pmap, sva, eva, pte, flags);
731 } 814 }
732#endif 815#endif
733 816
734 for (; sva < eva; sva += NBPG, pte++) { 817 for (; sva < eva; sva += NBPG, pte++) {
735 struct vm_page *pg; 818 struct vm_page *pg;
736 uint32_t pt_entry = pte->pt_entry; 819 uint32_t pt_entry = pte->pt_entry;
737 if (!mips_pg_v(pt_entry)) 820 if (!mips_pg_v(pt_entry))
738 continue; 821 continue;
 822 PMAP_COUNT(remove_user_pages);
739 if (mips_pg_wired(pt_entry)) 823 if (mips_pg_wired(pt_entry))
740 pmap->pm_stats.wired_count--; 824 pmap->pm_stats.wired_count--;
741 pmap->pm_stats.resident_count--; 825 pmap->pm_stats.resident_count--;
742 pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry)); 826 pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry));
743 if (pg) 827 if (pg) {
744 pmap_remove_pv(pmap, sva, pg); 828 pmap_remove_pv(pmap, sva, pg,
 829 pt_entry & mips_pg_m_bit());
 830 }
745 pte->pt_entry = mips_pg_nv_bit(); 831 pte->pt_entry = mips_pg_nv_bit();
746 /* 832 /*
747 * Flush the TLB for the given address. 833 * Flush the TLB for the given address.
748 */ 834 */
749 pmap_tlb_invalidate_addr(pmap, sva); 835 pmap_tlb_invalidate_addr(pmap, sva);
750#ifdef DEBUG 
751 remove_stats.flushes++; 
752#endif 
753 } 836 }
754 return false; 837 return false;
755} 838}
756 839
757void 840void
758pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) 841pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
759{ 842{
760 struct vm_page *pg; 843 struct vm_page *pg;
761 844
762#ifdef DEBUG 845#ifdef DEBUG
763 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 846 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
764 printf("pmap_remove(%p, %#"PRIxVADDR", %#"PRIxVADDR")\n", pmap, sva, eva); 847 printf("pmap_remove(%p, %#"PRIxVADDR", %#"PRIxVADDR")\n", pmap, sva, eva);
765 remove_stats.calls++; 
766#endif 848#endif
 849
767 if (pmap == pmap_kernel()) { 850 if (pmap == pmap_kernel()) {
768 /* remove entries from kernel pmap */ 851 /* remove entries from kernel pmap */
 852 PMAP_COUNT(remove_kernel_calls);
769#ifdef PARANOIADIAG 853#ifdef PARANOIADIAG
770 if (sva < VM_MIN_KERNEL_ADDRESS || eva >= virtual_end) 854 if (sva < VM_MIN_KERNEL_ADDRESS || eva >= mips_virtual_end)
771 panic("pmap_remove: kva not in range"); 855 panic("pmap_remove: kva not in range");
772#endif 856#endif
773 pt_entry_t *pte = kvtopte(sva); 857 pt_entry_t *pte = kvtopte(sva);
774 for (; sva < eva; sva += NBPG, pte++) { 858 for (; sva < eva; sva += NBPG, pte++) {
775 uint32_t pt_entry = pte->pt_entry; 859 uint32_t pt_entry = pte->pt_entry;
776 if (!mips_pg_v(pt_entry)) 860 if (!mips_pg_v(pt_entry))
777 continue; 861 continue;
 862 PMAP_COUNT(remove_kernel_pages);
778 if (mips_pg_wired(pt_entry)) 863 if (mips_pg_wired(pt_entry))
779 pmap->pm_stats.wired_count--; 864 pmap->pm_stats.wired_count--;
780 pmap->pm_stats.resident_count--; 865 pmap->pm_stats.resident_count--;
781 pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry)); 866 pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry));
782 if (pg) 867 if (pg)
783 pmap_remove_pv(pmap, sva, pg); 868 pmap_remove_pv(pmap, sva, pg, false);
784 if (MIPS_HAS_R4K_MMU) 869 if (MIPS_HAS_R4K_MMU)
785 /* See above about G bit */ 870 /* See above about G bit */
786 pte->pt_entry = MIPS3_PG_NV | MIPS3_PG_G; 871 pte->pt_entry = MIPS3_PG_NV | MIPS3_PG_G;
787 else 872 else
788 pte->pt_entry = MIPS1_PG_NV; 873 pte->pt_entry = MIPS1_PG_NV;
789 874
790 /* 875 /*
791 * Flush the TLB for the given address. 876 * Flush the TLB for the given address.
792 */ 877 */
793 pmap_tlb_invalidate_addr(pmap, sva); 878 pmap_tlb_invalidate_addr(pmap, sva);
794 } 879 }
795 return; 880 return;
796 } 881 }
797 882
 883 PMAP_COUNT(remove_user_calls);
798#ifdef PARANOIADIAG 884#ifdef PARANOIADIAG
799 if (eva > VM_MAXUSER_ADDRESS) 885 if (eva > VM_MAXUSER_ADDRESS)
800 panic("pmap_remove: uva not in range"); 886 panic("pmap_remove: uva not in range");
801 if (PMAP_IS_ACTIVE(pmap)) { 887 if (PMAP_IS_ACTIVE(pmap)) {
802 struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu()); 888 struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
803 uint32_t asid; 889 uint32_t asid;
804 890
805 __asm volatile("mfc0 %0,$10; nop" : "=r"(asid)); 891 __asm volatile("mfc0 %0,$10; nop" : "=r"(asid));
806 asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6; 892 asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6;
807 if (asid != pai->pai_asid) { 893 if (asid != pai->pai_asid) {
808 panic("inconsistency for active TLB flush: %d <-> %d", 894 panic("inconsistency for active TLB flush: %d <-> %d",
809 asid, pai->pai_asid); 895 asid, pai->pai_asid);
810 } 896 }
@@ -814,125 +900,146 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va @@ -814,125 +900,146 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
814} 900}
815 901
816/* 902/*
817 * pmap_page_protect: 903 * pmap_page_protect:
818 * 904 *
819 * Lower the permission for all mappings to a given page. 905 * Lower the permission for all mappings to a given page.
820 */ 906 */
821void 907void
822pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 908pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
823{ 909{
824 pv_entry_t pv; 910 pv_entry_t pv;
825 vaddr_t va; 911 vaddr_t va;
826 912
 913 PMAP_COUNT(page_protect);
827#ifdef DEBUG 914#ifdef DEBUG
828 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 915 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
829 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) 916 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
830 printf("pmap_page_protect(%#"PRIxPADDR", %x)\n", 917 printf("pmap_page_protect(%#"PRIxPADDR", %x)\n",
831 VM_PAGE_TO_PHYS(pg), prot); 918 VM_PAGE_TO_PHYS(pg), prot);
832#endif 919#endif
833 switch (prot) { 920 switch (prot) {
834 case VM_PROT_READ|VM_PROT_WRITE: 921 case VM_PROT_READ|VM_PROT_WRITE:
835 case VM_PROT_ALL: 922 case VM_PROT_ALL:
836 break; 923 break;
837 924
838 /* copy_on_write */ 925 /* copy_on_write */
839 case VM_PROT_READ: 926 case VM_PROT_READ:
840 case VM_PROT_READ|VM_PROT_EXECUTE: 927 case VM_PROT_READ|VM_PROT_EXECUTE:
841 pv = pg->mdpage.pvh_list; 928 pv = &pg->mdpage.pvh_first;
842 /* 929 /*
843 * Loop over all current mappings setting/clearing as appropos. 930 * Loop over all current mappings setting/clearing as appropos.
844 */ 931 */
845 if (pv->pv_pmap != NULL) { 932 if (pv->pv_pmap != NULL) {
846 for (; pv; pv = pv->pv_next) { 933 for (; pv; pv = pv->pv_next) {
847 va = pv->pv_va; 934 va = pv->pv_va;
848 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, 935 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
849 prot); 936 prot);
850 pmap_update(pv->pv_pmap); 937 pmap_update(pv->pv_pmap);
851 } 938 }
852 } 939 }
853 break; 940 break;
854 941
855 /* remove_all */ 942 /* remove_all */
856 default: 943 default:
857 pv = pg->mdpage.pvh_list; 944 /*
 945 * Do this first so that for each unmapping, pmap_remove_pv
 946 * won't try to sync the icache.
 947 */
 948 if (pmap_clear_page_attributes(pg, PG_MD_EXECPAGE)) {
 949 PMAP_COUNT(exec_uncached_page_protect);
 950 }
 951 pv = &pg->mdpage.pvh_first;
858 while (pv->pv_pmap != NULL) { 952 while (pv->pv_pmap != NULL) {
859 pmap_remove(pv->pv_pmap, pv->pv_va, 953 va = pv->pv_va;
860 pv->pv_va + PAGE_SIZE); 954 pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE);
 955 pmap_update(pv->pv_pmap);
861 } 956 }
862 pmap_update(pv->pv_pmap); 
863 } 957 }
864} 958}
865 959
866static bool 960static bool
867pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *pte, 961pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *pte,
868 uintptr_t flags) 962 uintptr_t flags)
869{ 963{
870 const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit()); 964 const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit());
871 const uint32_t p = flags; 965 const uint32_t p = (flags & VM_PROT_WRITE) ? mips_pg_rw_bit() : mips_pg_ro_bit();
872 
873 /* 966 /*
874 * Change protection on every valid mapping within this segment. 967 * Change protection on every valid mapping within this segment.
875 */ 968 */
876 for (; sva < eva; sva += NBPG, pte++) { 969 for (; sva < eva; sva += NBPG, pte++) {
877 uint32_t pt_entry = pte->pt_entry; 970 uint32_t pt_entry = pte->pt_entry;
878 if (!mips_pg_v(pt_entry)) 971 if (!mips_pg_v(pt_entry))
879 continue; 972 continue;
880 if (MIPS_HAS_R4K_MMU && (pt_entry & mips_pg_m_bit())) 973 struct vm_page *pg;
881 mips_dcache_wbinv_range_index(sva, PAGE_SIZE); 974 pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry));
 975 if (pg && (pt_entry & mips_pg_m_bit())) {
 976 if (MIPS_HAS_R4K_MMU
 977 && MIPS_CACHE_VIRTUAL_ALIAS
 978 && PG_MD_CACHED_P(pg))
 979 mips_dcache_wbinv_range_index(sva, PAGE_SIZE);
 980 if (PG_MD_EXECPAGE_P(pg)) {
 981 KASSERT(pg->mdpage.pvh_first.pv_pmap != NULL);
 982 if (PG_MD_CACHED_P(pg)) {
 983 pmap_page_syncicache(pg);
 984 PMAP_COUNT(exec_synced_protect);
 985 }
 986 }
 987 }
882 pt_entry = (pt_entry & pg_mask) | p; 988 pt_entry = (pt_entry & pg_mask) | p;
883 pte->pt_entry = pt_entry; 989 pte->pt_entry = pt_entry;
884 /* 990 /*
885 * Update the TLB if needed. 991 * Update the TLB if needed.
886 */ 992 */
887 pmap_tlb_update(pmap, sva, pt_entry); 993 pmap_tlb_update(pmap, sva, pt_entry);
888 } 994 }
889 return false; 995 return false;
890} 996}
891 997
892/* 998/*
893 * Set the physical protection on the 999 * Set the physical protection on the
894 * specified range of this map as requested. 1000 * specified range of this map as requested.
895 */ 1001 */
896void 1002void
897pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1003pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
898{ 1004{
899 const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit()); 1005 const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit());
900 pt_entry_t *pte; 1006 pt_entry_t *pte;
901 u_int p; 1007 u_int p;
902 1008
 1009 PMAP_COUNT(protect);
903#ifdef DEBUG 1010#ifdef DEBUG
904 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 1011 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
905 printf("pmap_protect(%p, %#"PRIxVADDR", %#"PRIxVADDR", %x)\n", 1012 printf("pmap_protect(%p, %#"PRIxVADDR", %#"PRIxVADDR", %x)\n",
906 pmap, sva, eva, prot); 1013 pmap, sva, eva, prot);
907#endif 1014#endif
908 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1015 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
909 pmap_remove(pmap, sva, eva); 1016 pmap_remove(pmap, sva, eva);
910 return; 1017 return;
911 } 1018 }
912 1019
913 p = (prot & VM_PROT_WRITE) ? mips_pg_rw_bit() : mips_pg_ro_bit(); 1020 p = (prot & VM_PROT_WRITE) ? mips_pg_rw_bit() : mips_pg_ro_bit();
914 1021
915 if (pmap == pmap_kernel()) { 1022 if (pmap == pmap_kernel()) {
916 /* 1023 /*
917 * Change entries in kernel pmap. 1024 * Change entries in kernel pmap.
918 * This will trap if the page is writable (in order to set 1025 * This will trap if the page is writable (in order to set
919 * the dirty bit) even if the dirty bit is already set. The 1026 * the dirty bit) even if the dirty bit is already set. The
920 * optimization isn't worth the effort since this code isn't 1027 * optimization isn't worth the effort since this code isn't
921 * executed much. The common case is to make a user page 1028 * executed much. The common case is to make a user page
922 * read-only. 1029 * read-only.
923 */ 1030 */
924#ifdef PARANOIADIAG 1031#ifdef PARANOIADIAG
925 if (sva < VM_MIN_KERNEL_ADDRESS || eva >= virtual_end) 1032 if (sva < VM_MIN_KERNEL_ADDRESS || eva >= mips_virtual_end)
926 panic("pmap_protect: kva not in range"); 1033 panic("pmap_protect: kva not in range");
927#endif 1034#endif
928 pte = kvtopte(sva); 1035 pte = kvtopte(sva);
929 for (; sva < eva; sva += NBPG, pte++) { 1036 for (; sva < eva; sva += NBPG, pte++) {
930 uint32_t pt_entry = pte->pt_entry; 1037 uint32_t pt_entry = pte->pt_entry;
931 if (!mips_pg_v(pt_entry)) 1038 if (!mips_pg_v(pt_entry))
932 continue; 1039 continue;
933 if (MIPS_HAS_R4K_MMU && (pt_entry & mips_pg_m_bit())) 1040 if (MIPS_HAS_R4K_MMU && (pt_entry & mips_pg_m_bit()))
934 mips_dcache_wb_range(sva, PAGE_SIZE); 1041 mips_dcache_wb_range(sva, PAGE_SIZE);
935 pt_entry &= (pt_entry & pg_mask) | p; 1042 pt_entry &= (pt_entry & pg_mask) | p;
936 pte->pt_entry = pt_entry; 1043 pte->pt_entry = pt_entry;
937 pmap_tlb_update(pmap, sva, pt_entry); 1044 pmap_tlb_update(pmap, sva, pt_entry);
938 } 1045 }
@@ -1006,237 +1113,250 @@ pmap_procwr(struct proc *p, vaddr_t va,  @@ -1006,237 +1113,250 @@ pmap_procwr(struct proc *p, vaddr_t va,
1006 * XXXJRT need to loop. 1113 * XXXJRT need to loop.
1007 */ 1114 */
1008 mips_icache_sync_range( 1115 mips_icache_sync_range(
1009 MIPS_PHYS_TO_KSEG0(mips1_tlbpfn_to_paddr(entry) 1116 MIPS_PHYS_TO_KSEG0(mips1_tlbpfn_to_paddr(entry)
1010 + (va & PGOFSET)), 1117 + (va & PGOFSET)),
1011 len); 1118 len);
1012#endif /* MIPS1 */ 1119#endif /* MIPS1 */
1013 } 1120 }
1014} 1121}
1015 1122
1016/* 1123/*
1017 * Return RO protection of page. 1124 * Return RO protection of page.
1018 */ 1125 */
1019int 1126bool
1020pmap_is_page_ro(pmap_t pmap, vaddr_t va, int entry) 1127pmap_is_page_ro_p(pmap_t pmap, vaddr_t va, uint32_t entry)
1021{ 1128{
1022 1129
1023 return entry & mips_pg_ro_bit(); 1130 return (entry & mips_pg_ro_bit()) != 0;
1024} 1131}
1025 1132
1026#if defined(MIPS3_PLUS) && !defined(MIPS3_NO_PV_UNCACHED) /* XXX mmu XXX */ 1133#if defined(MIPS3_PLUS) && !defined(MIPS3_NO_PV_UNCACHED) /* XXX mmu XXX */
1027/* 1134/*
1028 * pmap_page_cache: 1135 * pmap_page_cache:
1029 * 1136 *
1030 * Change all mappings of a managed page to cached/uncached. 1137 * Change all mappings of a managed page to cached/uncached.
1031 */ 1138 */
1032static void 1139static void
1033pmap_page_cache(struct vm_page *pg, int mode) 1140pmap_page_cache(struct vm_page *pg, bool cached)
1034{ 1141{
1035 pv_entry_t pv; 1142 const uint32_t newmode = cached ? MIPS3_PG_CACHED : MIPS3_PG_UNCACHED;
1036 unsigned newmode; 
1037 1143
1038#ifdef DEBUG 1144#ifdef DEBUG
1039 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 1145 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
1040 printf("pmap_page_uncache(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg)); 1146 printf("pmap_page_uncache(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg));
1041#endif 1147#endif
1042 newmode = mode & PV_UNCACHED ? MIPS3_PG_UNCACHED : MIPS3_PG_CACHED; 1148
1043 pv = pg->mdpage.pvh_list; 1149 if (cached) {
 1150 pmap_clear_page_attributes(pg, PG_MD_UNCACHED);
 1151 PMAP_COUNT(page_cache_restorations);
 1152 } else {
 1153 pmap_set_page_attributes(pg, PG_MD_UNCACHED);
 1154 PMAP_COUNT(page_cache_evictions);
 1155 }
1044 1156
1045 while (pv) { 1157 for (pv_entry_t pv = &pg->mdpage.pvh_first;
 1158 pv != NULL;
 1159 pv = pv->pv_next) {
1046 pmap_t pmap = pv->pv_pmap; 1160 pmap_t pmap = pv->pv_pmap;
1047 pt_entry_t *pte; 1161 pt_entry_t *pte;
1048 uint32_t entry; 1162 uint32_t pt_entry;
1049 1163
1050 pv->pv_flags = (pv->pv_flags & ~PV_UNCACHED) | mode; 1164 KASSERT(pv->pv_pmap != NULL);
1051 if (pmap == pmap_kernel()) { 1165 if (pmap == pmap_kernel()) {
1052 /* 1166 /*
1053 * Change entries in kernel pmap. 1167 * Change entries in kernel pmap.
1054 */ 1168 */
1055 pte = kvtopte(pv->pv_va); 1169 pte = kvtopte(pv->pv_va);
1056 } else { 1170 } else {
1057 pte = pmap_pte_lookup(pmap, pv->pv_va); 1171 pte = pmap_pte_lookup(pmap, pv->pv_va);
1058 if (pte == NULL) 1172 if (pte == NULL)
1059 continue; 1173 continue;
1060 } 1174 }
1061 entry = pte->pt_entry; 1175 pt_entry = pte->pt_entry;
1062 if (entry & MIPS3_PG_V) { 1176 if (pt_entry & MIPS3_PG_V) {
1063 entry = (entry & ~MIPS3_PG_CACHEMODE) | newmode; 1177 pt_entry = (pt_entry & ~MIPS3_PG_CACHEMODE) | newmode;
1064 pte->pt_entry = entry; 1178 pte->pt_entry = pt_entry;
1065 pmap_tlb_update(pv->pv_pmap, pv->pv_va, entry); 1179 pmap_tlb_update(pv->pv_pmap, pv->pv_va, pt_entry);
1066 } 1180 }
1067 pv = pv->pv_next; 
1068 } 1181 }
1069} 1182}
1070#endif /* MIPS3_PLUS && !MIPS3_NO_PV_UNCACHED */ 1183#endif /* MIPS3_PLUS && !MIPS3_NO_PV_UNCACHED */
1071 1184
1072/* 1185/*
1073 * Insert the given physical page (p) at 1186 * Insert the given physical page (p) at
1074 * the specified virtual address (v) in the 1187 * the specified virtual address (v) in the
1075 * target physical map with the protection requested. 1188 * target physical map with the protection requested.
1076 * 1189 *
1077 * If specified, the page will be wired down, meaning 1190 * If specified, the page will be wired down, meaning
1078 * that the related pte can not be reclaimed. 1191 * that the related pte can not be reclaimed.
1079 * 1192 *
1080 * NB: This is the only routine which MAY NOT lazy-evaluate 1193 * NB: This is the only routine which MAY NOT lazy-evaluate
1081 * or lose information. That is, this routine must actually 1194 * or lose information. That is, this routine must actually
1082 * insert this page into the given map NOW. 1195 * insert this page into the given map NOW.
1083 */ 1196 */
1084int 1197int
1085pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) 1198pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1086{ 1199{
1087 pt_entry_t *pte; 1200 pt_entry_t *pte;
1088 u_int npte; 1201 u_int npte;
1089 struct vm_page *pg; 1202 struct vm_page *pg;
1090 unsigned asid; 
1091#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) 1203#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
1092 int cached = 1; 1204 bool cached = true;
1093#endif 1205#endif
1094 bool wired = (flags & PMAP_WIRED) != 0; 1206 bool wired = (flags & PMAP_WIRED) != 0;
1095 1207
1096#ifdef DEBUG 1208#ifdef DEBUG
1097 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 1209 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
1098 printf("pmap_enter(%p, %#"PRIxVADDR", %#"PRIxPADDR", %x, %x)\n", 1210 printf("pmap_enter(%p, %#"PRIxVADDR", %#"PRIxPADDR", %x, %x)\n",
1099 pmap, va, pa, prot, wired); 1211 pmap, va, pa, prot, wired);
1100#endif 1212#endif
1101#if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG) 1213 const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
1102 if (pmap == pmap_kernel()) { 1214 if (pmap == pmap_kernel()) {
1103#ifdef DEBUG 1215 PMAP_COUNT(kernel_mappings);
1104 enter_stats.kernel++; 1216 if (!good_color)
1105#endif 1217 PMAP_COUNT(kernel_mappings_bad);
 1218#if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG)
1106 if (va < VM_MIN_KERNEL_ADDRESS || va >= mips_virtual_end) 1219 if (va < VM_MIN_KERNEL_ADDRESS || va >= mips_virtual_end)
1107 panic("pmap_enter: kva too big"); 1220 panic("pmap_enter: kva %#"PRIxVADDR"too big", va);
1108 } else { 
1109#ifdef DEBUG 
1110 enter_stats.user++; 
1111#endif 1221#endif
 1222 } else {
 1223 PMAP_COUNT(user_mappings);
 1224 if (!good_color)
 1225 PMAP_COUNT(user_mappings_bad);
 1226#if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG)
1112 if (va >= VM_MAXUSER_ADDRESS) 1227 if (va >= VM_MAXUSER_ADDRESS)
1113 panic("pmap_enter: uva too big"); 1228 panic("pmap_enter: uva %#"PRIxVADDR" too big", va);
1114 } 
1115#endif 1229#endif
 1230 }
1116#ifdef PARANOIADIAG 1231#ifdef PARANOIADIAG
1117#if defined(cobalt) || defined(newsmips) || defined(pmax) /* otherwise ok */ 1232#if defined(cobalt) || defined(newsmips) || defined(pmax) /* otherwise ok */
1118 if (pa & 0x80000000) /* this is not error in general. */ 1233 if (pa & 0x80000000) /* this is not error in general. */
1119 panic("pmap_enter: pa"); 1234 panic("pmap_enter: pa");
1120#endif 1235#endif
1121 1236
1122#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) 1237#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
1123 if (pa & PMAP_NOCACHE) { 1238 if (pa & PMAP_NOCACHE) {
1124 cached = 0; 1239 cached = false;
1125 pa &= ~PMAP_NOCACHE; 1240 pa &= ~PMAP_NOCACHE;
1126 } 1241 }
1127#endif 1242#endif
1128 1243
1129 if (!(prot & VM_PROT_READ)) 1244 if (!(prot & VM_PROT_READ))
1130 panic("pmap_enter: prot"); 1245 panic("pmap_enter: prot");
1131#endif 1246#endif
1132 pg = PHYS_TO_VM_PAGE(pa); 1247 pg = PHYS_TO_VM_PAGE(pa);
1133 1248
1134 if (pg) { 1249 if (pg) {
1135 /* Set page referenced/modified status based on flags */ 1250 /* Set page referenced/modified status based on flags */
1136 if (flags & VM_PROT_WRITE) 1251 if (flags & VM_PROT_WRITE)
1137 pmap_set_page_attributes(pg, PV_MODIFIED|PV_REFERENCED); 1252 pmap_set_page_attributes(pg, PG_MD_MODIFIED|PG_MD_REFERENCED);
1138 else if (flags & VM_PROT_ALL) 1253 else if (flags & VM_PROT_ALL)
1139 pmap_set_page_attributes(pg, PV_REFERENCED); 1254 pmap_set_page_attributes(pg, PG_MD_REFERENCED);
1140 if (!(prot & VM_PROT_WRITE)) 1255 if (!(prot & VM_PROT_WRITE))
1141 /* 1256 /*
1142 * If page is not yet referenced, we could emulate this 1257 * If page is not yet referenced, we could emulate this
1143 * by not setting the page valid, and setting the 1258 * by not setting the page valid, and setting the
1144 * referenced status in the TLB fault handler, similar 1259 * referenced status in the TLB fault handler, similar
1145 * to how page modified status is done for UTLBmod 1260 * to how page modified status is done for UTLBmod
1146 * exceptions. 1261 * exceptions.
1147 */ 1262 */
1148 npte = mips_pg_ropage_bit(); 1263 npte = mips_pg_ropage_bit();
1149 else { 1264 else {
1150#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) 1265#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
1151 if (cached == 0) { 1266 if (cached == false) {
1152 if (pg->mdpage.pvh_attrs & PV_MODIFIED) { 1267 if (PG_MD_MODIFIED_P(pg)) {
1153 npte = mips_pg_rwncpage_bit(); 1268 npte = mips_pg_rwncpage_bit();
1154 } else { 1269 } else {
1155 npte = mips_pg_cwncpage_bit(); 1270 npte = mips_pg_cwncpage_bit();
1156 } 1271 }
 1272 PMAP_COUNT(uncached_mappings);
1157 } else { 1273 } else {
1158#endif 1274#endif
1159 if (pg->mdpage.pvh_attrs & PV_MODIFIED) { 1275 if (PG_MD_MODIFIED_P(pg)) {
1160 npte = mips_pg_rwpage_bit(); 1276 npte = mips_pg_rwpage_bit();
1161 } else { 1277 } else {
1162 npte = mips_pg_cwpage_bit(); 1278 npte = mips_pg_cwpage_bit();
1163 } 1279 }
1164#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) 1280#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
1165 } 1281 }
1166#endif 1282#endif
1167 } 1283 }
1168#ifdef DEBUG 1284 PMAP_COUNT(managed_mappings);
1169 enter_stats.managed++; 
1170#endif 
1171 } else { 1285 } else {
1172 /* 1286 /*
1173 * Assumption: if it is not part of our managed memory 1287 * Assumption: if it is not part of our managed memory
1174 * then it must be device memory which may be volatile. 1288 * then it must be device memory which may be volatile.
1175 */ 1289 */
1176#ifdef DEBUG 
1177 enter_stats.unmanaged++; 
1178#endif 
1179 if (MIPS_HAS_R4K_MMU) { 1290 if (MIPS_HAS_R4K_MMU) {
1180 npte = MIPS3_PG_IOPAGE(PMAP_CCA_FOR_PA(pa)) & 1291 npte = MIPS3_PG_IOPAGE(PMAP_CCA_FOR_PA(pa)) &
1181 ~MIPS3_PG_G; 1292 ~MIPS3_PG_G;
1182 if ((prot & VM_PROT_WRITE) == 0) { 1293 if ((prot & VM_PROT_WRITE) == 0) {
1183 npte |= MIPS3_PG_RO; 1294 npte |= MIPS3_PG_RO;
1184 npte &= ~MIPS3_PG_D; 1295 npte &= ~MIPS3_PG_D;
1185 } 1296 }
1186 } else { 1297 } else {
1187 npte = (prot & VM_PROT_WRITE) ? 1298 npte = (prot & VM_PROT_WRITE) ?
1188 (MIPS1_PG_D | MIPS1_PG_N) : 1299 (MIPS1_PG_D | MIPS1_PG_N) :
1189 (MIPS1_PG_RO | MIPS1_PG_N); 1300 (MIPS1_PG_RO | MIPS1_PG_N);
1190 } 1301 }
 1302 PMAP_COUNT(unmanaged_mappings);
1191 } 1303 }
1192 1304
 1305#if 0
1193 /* 1306 /*
1194 * The only time we need to flush the cache is if we 1307 * The only time we need to flush the cache is if we
1195 * execute from a physical address and then change the data. 1308 * execute from a physical address and then change the data.
1196 * This is the best place to do this. 1309 * This is the best place to do this.
1197 * pmap_protect() and pmap_remove() are mostly used to switch 1310 * pmap_protect() and pmap_remove() are mostly used to switch
1198 * between R/W and R/O pages. 1311 * between R/W and R/O pages.
1199 * NOTE: we only support cache flush for read only text. 1312 * NOTE: we only support cache flush for read only text.
1200 */ 1313 */
1201#ifdef MIPS1 1314#ifdef MIPS1
1202 if ((!MIPS_HAS_R4K_MMU) && prot == (VM_PROT_READ | VM_PROT_EXECUTE)) { 1315 if (!MIPS_HAS_R4K_MMU
1203 mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(pa), PAGE_SIZE); 1316 && pg != NULL
 1317 && prot == (VM_PROT_READ | VM_PROT_EXECUTE)) {
 1318 PMAP_COUNT(enter_exec_mapping);
 1319 if (!PG_MD_EXECPAGE_P(pg)) {
 1320 mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(pa),
 1321 PAGE_SIZE);
 1322 pmap_set_page_attributes(pg, PG_MD_EXECPAGE);
 1323 PMAP_COUNT(exec_syncicache_entry);
 1324 }
1204 } 1325 }
1205#endif 1326#endif
 1327#endif
1206 1328
1207 if (pmap == pmap_kernel()) { 1329 if (pmap == pmap_kernel()) {
1208 if (pg) 1330 if (pg)
1209 pmap_enter_pv(pmap, va, pg, &npte); 1331 pmap_enter_pv(pmap, va, pg, &npte);
1210 1332
1211 /* enter entries into kernel pmap */ 1333 /* enter entries into kernel pmap */
1212 pte = kvtopte(va); 1334 pte = kvtopte(va);
1213 1335
1214 if (MIPS_HAS_R4K_MMU) 1336 if (MIPS_HAS_R4K_MMU)
1215 npte |= mips3_paddr_to_tlbpfn(pa) | MIPS3_PG_G; 1337 npte |= mips3_paddr_to_tlbpfn(pa) | MIPS3_PG_G;
1216 else 1338 else
1217 npte |= mips1_paddr_to_tlbpfn(pa) | 1339 npte |= mips1_paddr_to_tlbpfn(pa) |
1218 MIPS1_PG_V | MIPS1_PG_G; 1340 MIPS1_PG_V | MIPS1_PG_G;
1219 1341
1220 if (wired) { 1342 if (wired) {
1221 pmap->pm_stats.wired_count++; 1343 pmap->pm_stats.wired_count++;
1222 npte |= mips_pg_wired_bit(); 1344 npte |= mips_pg_wired_bit();
1223 } 1345 }
1224 if (mips_pg_v(pte->pt_entry) 1346 if (mips_pg_v(pte->pt_entry)
1225 && mips_tlbpfn_to_paddr(pte->pt_entry) != pa) { 1347 && mips_tlbpfn_to_paddr(pte->pt_entry) != pa) {
1226 pmap_remove(pmap, va, va + NBPG); 1348 pmap_remove(pmap, va, va + NBPG);
1227#ifdef DEBUG 1349 PMAP_COUNT(kernel_mappings_changed);
1228 enter_stats.mchange++; 
1229#endif 
1230 } 1350 }
1231 if (!mips_pg_v(pte->pt_entry)) 1351 if (!mips_pg_v(pte->pt_entry))
1232 pmap->pm_stats.resident_count++; 1352 pmap->pm_stats.resident_count++;
1233 pte->pt_entry = npte; 1353 pte->pt_entry = npte;
1234 1354
1235 /* 1355 /*
1236 * Update the same virtual address entry. 1356 * Update the same virtual address entry.
1237 */ 1357 */
1238 1358
1239 pmap_tlb_update(pmap, va, npte); 1359 pmap_tlb_update(pmap, va, npte);
1240 return 0; 1360 return 0;
1241 } 1361 }
1242 1362
@@ -1254,169 +1374,161 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd @@ -1254,169 +1374,161 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
1254 * Assume uniform modified and referenced status for all 1374 * Assume uniform modified and referenced status for all
1255 * MIPS pages in a MACH page. 1375 * MIPS pages in a MACH page.
1256 */ 1376 */
1257 1377
1258 if (MIPS_HAS_R4K_MMU) 1378 if (MIPS_HAS_R4K_MMU)
1259 npte |= mips3_paddr_to_tlbpfn(pa); 1379 npte |= mips3_paddr_to_tlbpfn(pa);
1260 else 1380 else
1261 npte |= mips1_paddr_to_tlbpfn(pa) | MIPS1_PG_V; 1381 npte |= mips1_paddr_to_tlbpfn(pa) | MIPS1_PG_V;
1262 1382
1263 if (wired) { 1383 if (wired) {
1264 pmap->pm_stats.wired_count++; 1384 pmap->pm_stats.wired_count++;
1265 npte |= mips_pg_wired_bit(); 1385 npte |= mips_pg_wired_bit();
1266 } 1386 }
1267 struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu()); 
1268 bool needsupdate = PMAP_PAI_ASIDVALID_P(pai, curcpu()); 
1269#if defined(DEBUG) 1387#if defined(DEBUG)
1270 if (pmapdebug & PDB_ENTER) { 1388 if (pmapdebug & PDB_ENTER) {
1271 printf("pmap_enter: %p: %#"PRIxVADDR": new pte %#x (pa %#"PRIxPADDR")", pmap, va, npte, pa); 1389 printf("pmap_enter: %p: %#"PRIxVADDR": new pte %#x (pa %#"PRIxPADDR")", pmap, va, npte, pa);
1272 if (needsupdate) 
1273 printf(" asid %u (%#x)", pai->pai_asid, pai->pai_asid); 
1274 printf("\n"); 1390 printf("\n");
1275 } 1391 }
1276#endif 1392#endif
1277 1393
1278#ifdef PARANOIADIAG 1394#ifdef PARANOIADIAG
1279 if (PMAP_IS_ACTIVE(pmap)) { 1395 if (PMAP_IS_ACTIVE(pmap)) {
1280 uint32_t asid; 1396 uint32_t asid;
1281 1397
1282 __asm volatile("mfc0 %0,$10; nop" : "=r"(asid)); 1398 __asm volatile("mfc0 %0,$10; nop" : "=r"(asid));
1283 asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6; 1399 asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6;
1284 if (asid != pai->pai_asid) { 1400 if (asid != pai->pai_asid) {
1285 panic("inconsistency for active TLB update: %u <-> %u", 1401 panic("inconsistency for active TLB update: %u <-> %u",
1286 asid, pai->pai_asid); 1402 asid, pai->pai_asid);
1287 } 1403 }
1288 } 1404 }
1289#endif 1405#endif
1290 1406
1291 asid = pai->pai_asid << MIPS_TLB_PID_SHIFT; 
1292 if (mips_pg_v(pte->pt_entry) && 1407 if (mips_pg_v(pte->pt_entry) &&
1293 mips_tlbpfn_to_paddr(pte->pt_entry) != pa) { 1408 mips_tlbpfn_to_paddr(pte->pt_entry) != pa) {
1294 pmap_remove(pmap, va, va + NBPG); 1409 pmap_remove(pmap, va, va + NBPG);
1295#ifdef DEBUG 1410 PMAP_COUNT(user_mappings_changed);
1296 enter_stats.mchange++; 
1297#endif 
1298 } 1411 }
1299 1412
1300 if (!mips_pg_v(pte->pt_entry)) 1413 if (!mips_pg_v(pte->pt_entry))
1301 pmap->pm_stats.resident_count++; 1414 pmap->pm_stats.resident_count++;
1302 pte->pt_entry = npte; 1415 pte->pt_entry = npte;
1303 1416
1304 if (needsupdate) 1417 pmap_tlb_update(pmap, va, npte);
1305 pmap_tlb_update(pmap, va, npte); 
1306 1418
1307#ifdef MIPS3_PLUS /* XXX mmu XXX */ 1419 if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
1308 if (MIPS_HAS_R4K_MMU && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) { 
1309#ifdef DEBUG 1420#ifdef DEBUG
1310 if (pmapdebug & PDB_ENTER) 1421 if (pmapdebug & PDB_ENTER)
1311 printf("pmap_enter: flush I cache va %#"PRIxVADDR" (%#"PRIxPADDR")\n", 1422 printf("pmap_enter: flush I cache va %#"PRIxVADDR" (%#"PRIxPADDR")\n",
1312 va - NBPG, pa); 1423 va - NBPG, pa);
1313#endif 1424#endif
1314 /* XXXJRT */ 1425 PMAP_COUNT(exec_mappings);
1315 mips_icache_sync_range_index(va, PAGE_SIZE); 1426 if (!PG_MD_EXECPAGE_P(pg) && PG_MD_CACHED_P(pg)) {
 1427 pmap_page_syncicache(pg);
 1428 pmap_set_page_attributes(pg, PG_MD_EXECPAGE);
 1429 PMAP_COUNT(exec_synced_mappings);
 1430 }
1316 } 1431 }
1317#endif 
1318 1432
1319 return 0; 1433 return 0;
1320} 1434}
1321 1435
1322void 1436void
1323pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) 1437pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1324{ 1438{
 1439 const bool managed = PAGE_IS_MANAGED(pa);
1325 pt_entry_t *pte; 1440 pt_entry_t *pte;
1326 u_int npte; 1441 u_int npte;
1327 bool managed = PAGE_IS_MANAGED(pa); 
1328 1442
1329#ifdef DEBUG 1443#ifdef DEBUG
1330 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 1444 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
1331 printf("pmap_kenter_pa(%#"PRIxVADDR", %#"PRIxPADDR", %x)\n", va, pa, prot); 1445 printf("pmap_kenter_pa(%#"PRIxVADDR", %#"PRIxPADDR", %x)\n", va, pa, prot);
1332#endif 1446#endif
 1447 PMAP_COUNT(kenter_pa);
 1448 if (!PMAP_PAGE_COLOROK_P(pa, va) && managed)
 1449 PMAP_COUNT(kenter_pa_bad);
 1450
 1451 if (!managed)
 1452 PMAP_COUNT(kenter_pa_unmanaged);
1333 1453
1334 if (MIPS_HAS_R4K_MMU) { 1454 if (MIPS_HAS_R4K_MMU) {
1335 npte = mips3_paddr_to_tlbpfn(pa) | MIPS3_PG_WIRED; 1455 npte = mips3_paddr_to_tlbpfn(pa)
1336 if (prot & VM_PROT_WRITE) { 1456 | ((prot & VM_PROT_WRITE) ? MIPS3_PG_D : MIPS3_PG_RO)
1337 npte |= MIPS3_PG_D; 1457 | (managed ? MIPS3_PG_CACHED : MIPS3_PG_UNCACHED)
1338 } else { 1458 | MIPS3_PG_WIRED | MIPS3_PG_V | MIPS3_PG_G;
1339 npte |= MIPS3_PG_RO; 
1340 } 
1341 if (managed) { 
1342 npte |= MIPS3_PG_CACHED; 
1343 } else { 
1344 npte |= MIPS3_PG_UNCACHED; 
1345 } 
1346 npte |= MIPS3_PG_V | MIPS3_PG_G; 
1347 } else { 1459 } else {
1348 npte = mips1_paddr_to_tlbpfn(pa) | MIPS1_PG_WIRED; 1460 npte = mips1_paddr_to_tlbpfn(pa)
1349 if (prot & VM_PROT_WRITE) { 1461 | ((prot & VM_PROT_WRITE) ? MIPS1_PG_D : MIPS1_PG_RO)
1350 npte |= MIPS1_PG_D; 1462 | (managed ? 0 : MIPS1_PG_N)
1351 } else { 1463 | MIPS1_PG_WIRED | MIPS1_PG_V | MIPS1_PG_G;
1352 npte |= MIPS1_PG_RO; 
1353 } 
1354 if (managed) { 
1355 npte |= 0; 
1356 } else { 
1357 npte |= MIPS1_PG_N; 
1358 } 
1359 npte |= MIPS1_PG_V | MIPS1_PG_G; 
1360 } 1464 }
1361 pte = kvtopte(va); 1465 pte = kvtopte(va);
1362 KASSERT(!mips_pg_v(pte->pt_entry)); 1466 KASSERT(!mips_pg_v(pte->pt_entry));
1363 pte->pt_entry = npte; 1467 pte->pt_entry = npte;
1364 pmap_tlb_update(pmap_kernel(), va, npte); 1468 pmap_tlb_update(pmap_kernel(), va, npte);
1365} 1469}
1366 1470
1367void 1471void
1368pmap_kremove(vaddr_t va, vsize_t len) 1472pmap_kremove(vaddr_t va, vsize_t len)
1369{ 1473{
1370 pt_entry_t *pte; 
1371 vaddr_t eva; 
1372 u_int entry; 
1373 
1374#ifdef DEBUG 1474#ifdef DEBUG
1375 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE)) 1475 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE))
1376 printf("pmap_kremove(%#"PRIxVADDR", %#"PRIxVSIZE")\n", va, len); 1476 printf("pmap_kremove(%#"PRIxVADDR", %#"PRIxVSIZE")\n", va, len);
1377#endif 1477#endif
1378 1478
1379 pte = kvtopte(va); 1479 const uint32_t new_pt_entry =
1380 eva = va + len; 1480 (MIPS_HAS_R4K_MMU ? MIPS3_PG_NV | MIPS3_PG_G : MIPS1_PG_NV);
1381 for (; va < eva; va += PAGE_SIZE, pte++) { 1481
1382 entry = pte->pt_entry; 1482 pt_entry_t *pte = kvtopte(va);
1383 if (!mips_pg_v(entry)) { 1483 for (vaddr_t eva = va + len; va < eva; va += PAGE_SIZE, pte++) {
 1484 uint32_t pt_entry = pte->pt_entry;
 1485 if (!mips_pg_v(pt_entry)) {
1384 continue; 1486 continue;
1385 } 1487 }
1386 if (MIPS_HAS_R4K_MMU) { 1488
1387#ifndef sbmips /* XXX XXX if (dcache_is_virtual) - should also check icache virtual && EXEC mapping */ 1489 PMAP_COUNT(kremove_pages);
1388 mips_dcache_wbinv_range(va, PAGE_SIZE); 1490 if (MIPS_HAS_R4K_MMU && MIPS_CACHE_VIRTUAL_ALIAS) {
1389#endif 1491 struct vm_page *pg =
1390 pte->pt_entry = MIPS3_PG_NV | MIPS3_PG_G; 1492 PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry));
1391 } else { 1493 if (pg != NULL) {
1392 pte->pt_entry = MIPS1_PG_NV; 1494 pv_entry_t pv = &pg->mdpage.pvh_first;
 1495 if (pv->pv_pmap == NULL) {
 1496 pv->pv_va = va;
 1497 } else if (PG_MD_CACHED_P(pg)
 1498 && mips_cache_badalias(pv->pv_va, va)) {
 1499 mips_dcache_wbinv_range(va, PAGE_SIZE);
 1500 }
 1501 }
1393 } 1502 }
 1503
 1504 pte->pt_entry = new_pt_entry;
1394 pmap_tlb_invalidate_addr(pmap_kernel(), va); 1505 pmap_tlb_invalidate_addr(pmap_kernel(), va);
1395 } 1506 }
1396} 1507}
1397 1508
1398/* 1509/*
1399 * Routine: pmap_unwire 1510 * Routine: pmap_unwire
1400 * Function: Clear the wired attribute for a map/virtual-address 1511 * Function: Clear the wired attribute for a map/virtual-address
1401 * pair. 1512 * pair.
1402 * In/out conditions: 1513 * In/out conditions:
1403 * The mapping must already exist in the pmap. 1514 * The mapping must already exist in the pmap.
1404 */ 1515 */
1405void 1516void
1406pmap_unwire(pmap_t pmap, vaddr_t va) 1517pmap_unwire(pmap_t pmap, vaddr_t va)
1407{ 1518{
1408 pt_entry_t *pte; 1519 pt_entry_t *pte;
1409 1520
 1521 PMAP_COUNT(unwire);
1410#ifdef DEBUG 1522#ifdef DEBUG
1411 if (pmapdebug & (PDB_FOLLOW|PDB_WIRING)) 1523 if (pmapdebug & (PDB_FOLLOW|PDB_WIRING))
1412 printf("pmap_unwire(%p, %#"PRIxVADDR")\n", pmap, va); 1524 printf("pmap_unwire(%p, %#"PRIxVADDR")\n", pmap, va);
1413#endif 1525#endif
1414 /* 1526 /*
1415 * Don't need to flush the TLB since PG_WIRED is only in software. 1527 * Don't need to flush the TLB since PG_WIRED is only in software.
1416 */ 1528 */
1417 if (pmap == pmap_kernel()) { 1529 if (pmap == pmap_kernel()) {
1418 /* change entries in kernel pmap */ 1530 /* change entries in kernel pmap */
1419#ifdef PARANOIADIAG 1531#ifdef PARANOIADIAG
1420 if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end) 1532 if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)
1421 panic("pmap_unwire"); 1533 panic("pmap_unwire");
1422#endif 1534#endif
@@ -1511,222 +1623,159 @@ done: @@ -1511,222 +1623,159 @@ done:
1511 1623
1512/* 1624/*
1513 * Copy the range specified by src_addr/len 1625 * Copy the range specified by src_addr/len
1514 * from the source map to the range dst_addr/len 1626 * from the source map to the range dst_addr/len
1515 * in the destination map. 1627 * in the destination map.
1516 * 1628 *
1517 * This routine is only advisory and need not do anything. 1629 * This routine is only advisory and need not do anything.
1518 */ 1630 */
1519void 1631void
1520pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, 1632pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
1521 vaddr_t src_addr) 1633 vaddr_t src_addr)
1522{ 1634{
1523 1635
 1636 PMAP_COUNT(copy);
1524#ifdef DEBUG 1637#ifdef DEBUG
1525 if (pmapdebug & PDB_FOLLOW) 1638 if (pmapdebug & PDB_FOLLOW)
1526 printf("pmap_copy(%p, %p, %#"PRIxVADDR", %#"PRIxVSIZE", %#"PRIxVADDR")\n", 1639 printf("pmap_copy(%p, %p, %#"PRIxVADDR", %#"PRIxVSIZE", %#"PRIxVADDR")\n",
1527 dst_pmap, src_pmap, dst_addr, len, src_addr); 1640 dst_pmap, src_pmap, dst_addr, len, src_addr);
1528#endif 1641#endif
1529} 1642}
1530 1643
1531/* 1644/*
1532 * Routine: pmap_collect 1645 * Routine: pmap_collect
1533 * Function: 1646 * Function:
1534 * Garbage collects the physical map system for 1647 * Garbage collects the physical map system for
1535 * pages which are no longer used. 1648 * pages which are no longer used.
1536 * Success need not be guaranteed -- that is, there 1649 * Success need not be guaranteed -- that is, there
1537 * may well be pages which are not referenced, but 1650 * may well be pages which are not referenced, but
1538 * others may be collected. 1651 * others may be collected.
1539 * Usage: 1652 * Usage:
1540 * Called by the pageout daemon when pages are scarce. 1653 * Called by the pageout daemon when pages are scarce.
1541 */ 1654 */
1542void 1655void
1543pmap_collect(pmap_t pmap) 1656pmap_collect(pmap_t pmap)
1544{ 1657{
1545 1658
 1659 PMAP_COUNT(collect);
1546#ifdef DEBUG 1660#ifdef DEBUG
1547 if (pmapdebug & PDB_FOLLOW) 1661 if (pmapdebug & PDB_FOLLOW)
1548 printf("pmap_collect(%p)\n", pmap); 1662 printf("pmap_collect(%p)\n", pmap);
1549#endif 1663#endif
1550} 1664}
1551 1665
1552/* 1666/*
1553 * pmap_zero_page zeros the specified page. 1667 * pmap_zero_page zeros the specified page.
1554 */ 1668 */
1555void 1669void
1556pmap_zero_page(paddr_t phys) 1670pmap_zero_page(paddr_t dst_pa)
1557{ 1671{
1558 vaddr_t va; 1672 vaddr_t dst_va;
1559#if defined(MIPS3_PLUS) 1673 pt_entry_t dst_tmp;
1560 struct vm_page *pg; 
1561 pv_entry_t pv; 
1562#endif 
1563 1674
1564#ifdef DEBUG 1675#ifdef DEBUG
1565 if (pmapdebug & PDB_FOLLOW) 1676 if (pmapdebug & PDB_FOLLOW)
1566 printf("pmap_zero_page(%#"PRIxPADDR")\n", phys); 1677 printf("pmap_zero_page(%#"PRIxPADDR")\n", dst_pa);
1567#endif 
1568#ifdef PARANOIADIAG 
1569 if (!(phys < MIPS_MAX_MEM_ADDR)) 
1570 printf("pmap_zero_page(%#"PRIxPADDR") nonphys\n", phys); 
1571#endif 
1572#ifdef _LP64 
1573 KASSERT(mips_options.mips3_xkphys_cached); 
1574 va = MIPS_PHYS_TO_XKPHYS_CACHED(phys); 
1575#else 
1576 va = MIPS_PHYS_TO_KSEG0(phys); 
1577#endif 1678#endif
 1679 PMAP_COUNT(zeroed_pages);
1578 1680
1579#if defined(MIPS3_PLUS) /* XXX mmu XXX */ 1681 struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst_pa);
1580 pg = PHYS_TO_VM_PAGE(phys); 
1581 if (mips_cache_info.mci_cache_virtual_alias) { 
1582 pv = pg->mdpage.pvh_list; 
1583 if ((pv->pv_flags & PV_UNCACHED) == 0 && 
1584 mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) 
1585 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 
1586 } 
1587#endif 
1588 1682
1589 mips_pagezero((void *)va); 1683 dst_va = pmap_map_ephemeral_page(dst_pg, VM_PROT_READ|VM_PROT_WRITE, &dst_tmp);
1590 1684
1591#if defined(MIPS3_PLUS) /* XXX mmu XXX */ 1685 mips_pagezero((void *)dst_va);
1592 /* 1686
1593 * If we have a virtually-indexed, physically-tagged WB cache, 1687 pmap_unmap_ephemeral_page(dst_pg, dst_va, dst_tmp);
1594 * and no L2 cache to warn of aliased mappings, we must force a 
1595 * writeback of the destination out of the L1 cache. If we don't, 
1596 * later reads (from virtual addresses mapped to the destination PA) 
1597 * might read old stale DRAM footprint, not the just-written data. 
1598 * 
1599 * XXXJRT This is totally disgusting. 
1600 */ 
1601 if (MIPS_HAS_R4K_MMU) /* XXX VCED on kernel stack is not allowed */ 
1602 mips_dcache_wbinv_range(va, PAGE_SIZE); 
1603#endif /* MIPS3_PLUS */ 
1604} 1688}
1605 1689
1606/* 1690/*
1607 * pmap_copy_page copies the specified page. 1691 * pmap_copy_page copies the specified page.
1608 */ 1692 */
1609void 1693void
1610pmap_copy_page(paddr_t src, paddr_t dst) 1694pmap_copy_page(paddr_t src_pa, paddr_t dst_pa)
1611{ 1695{
1612 vaddr_t src_va, dst_va; 1696 vaddr_t src_va, dst_va;
 1697 pt_entry_t src_tmp, dst_tmp;
1613#ifdef DEBUG 1698#ifdef DEBUG
1614 if (pmapdebug & PDB_FOLLOW) 1699 if (pmapdebug & PDB_FOLLOW)
1615 printf("pmap_copy_page(%#"PRIxPADDR", %#"PRIxPADDR")\n", src, dst); 1700 printf("pmap_copy_page(%#"PRIxPADDR", %#"PRIxPADDR")\n", src_pa, dst_pa);
1616#endif 
1617#ifdef _LP64 
1618 KASSERT(mips_options.mips3_xkphys_cached); 
1619 src_va = MIPS_PHYS_TO_XKPHYS_CACHED(src); 
1620 dst_va = MIPS_PHYS_TO_XKPHYS_CACHED(dst); 
1621#else 
1622 src_va = MIPS_PHYS_TO_KSEG0(src); 
1623 dst_va = MIPS_PHYS_TO_KSEG0(dst); 
1624#endif 
1625#if !defined(_LP64) && defined(PARANOIADIAG) 
1626 if (!(src < MIPS_MAX_MEM_ADDR)) 
1627 printf("pmap_copy_page(%#"PRIxPADDR") src nonphys\n", src); 
1628 if (!(dst < MIPS_MAX_MEM_ADDR)) 
1629 printf("pmap_copy_page(%#"PRIxPADDR") dst nonphys\n", dst); 
1630#endif 1701#endif
 1702 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src_pa);
 1703 struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst_pa);
1631 1704
1632#if defined(MIPS3_PLUS) /* XXX mmu XXX */ 1705 PMAP_COUNT(copied_pages);
1633 /* 1706
1634 * If we have a virtually-indexed, physically-tagged cache, 1707 src_va = pmap_map_ephemeral_page(src_pg, VM_PROT_READ, &src_tmp);
1635 * and no L2 cache to warn of aliased mappings, we must force an 1708 dst_va = pmap_map_ephemeral_page(dst_pg, VM_PROT_READ|VM_PROT_WRITE, &dst_tmp);
1636 * write-back of all L1 cache lines of the source physical address, 
1637 * irrespective of their virtual address (cache indexes). 
1638 * If we don't, our copy loop might read and copy stale DRAM 
1639 * footprint instead of the fresh (but dirty) data in a WB cache. 
1640 * XXX invalidate any cached lines of the destination PA 
1641 * here also? 
1642 * 
1643 * It would probably be better to map the destination as a 
1644 * write-through no allocate to reduce cache thrash. 
1645 */ 
1646 if (mips_cache_info.mci_cache_virtual_alias) { 
1647 /*XXX FIXME Not very sophisticated */ 
1648 mips_flushcache_allpvh(src); 
1649#if 0 
1650 mips_flushcache_allpvh(dst); 
1651#endif 
1652 } 
1653#endif /* MIPS3_PLUS */ 
1654 1709
1655 mips_pagecopy((void *)dst_va, (void *)src_va); 1710 mips_pagecopy((void *)dst_va, (void *)src_va);
1656 1711
1657#if defined(MIPS3_PLUS) /* XXX mmu XXX */ 1712 pmap_unmap_ephemeral_page(dst_pg, dst_va, dst_tmp);
1658 /* 1713 pmap_unmap_ephemeral_page(src_pg, src_va, src_tmp);
1659 * If we have a virtually-indexed, physically-tagged WB cache, 
1660 * and no L2 cache to warn of aliased mappings, we must force a 
1661 * writeback of the destination out of the L1 cache. If we don't, 
1662 * later reads (from virtual addresses mapped to the destination PA) 
1663 * might read old stale DRAM footprint, not the just-written data. 
1664 * XXX Do we need to also invalidate any cache lines matching 
1665 * the destination as well? 
1666 * 
1667 * XXXJRT -- This is totally disgusting. 
1668 */ 
1669 if (mips_cache_info.mci_cache_virtual_alias) { 
1670 mips_dcache_wbinv_range(src_va, PAGE_SIZE); 
1671 mips_dcache_wbinv_range(dst_va, PAGE_SIZE); 
1672 } 
1673#endif /* MIPS3_PLUS */ 
1674} 1714}
1675 1715
1676/* 1716/*
1677 * pmap_clear_reference: 1717 * pmap_clear_reference:
1678 * 1718 *
1679 * Clear the reference bit on the specified physical page. 1719 * Clear the reference bit on the specified physical page.
1680 */ 1720 */
1681bool 1721bool
1682pmap_clear_reference(struct vm_page *pg) 1722pmap_clear_reference(struct vm_page *pg)
1683{ 1723{
1684#ifdef DEBUG 1724#ifdef DEBUG
1685 if (pmapdebug & PDB_FOLLOW) 1725 if (pmapdebug & PDB_FOLLOW)
1686 printf("pmap_clear_reference(%#"PRIxPADDR")\n", 1726 printf("pmap_clear_reference(%#"PRIxPADDR")\n",
1687 VM_PAGE_TO_PHYS(pg)); 1727 VM_PAGE_TO_PHYS(pg));
1688#endif 1728#endif
1689 return pmap_clear_page_attributes(pg, PV_REFERENCED); 1729 return pmap_clear_page_attributes(pg, PG_MD_REFERENCED);
1690} 1730}
1691 1731
1692/* 1732/*
1693 * pmap_is_referenced: 1733 * pmap_is_referenced:
1694 * 1734 *
1695 * Return whether or not the specified physical page is referenced 1735 * Return whether or not the specified physical page is referenced
1696 * by any physical maps. 1736 * by any physical maps.
1697 */ 1737 */
1698bool 1738bool
1699pmap_is_referenced(struct vm_page *pg) 1739pmap_is_referenced(struct vm_page *pg)
1700{ 1740{
1701 1741
1702 return pg->mdpage.pvh_attrs & PV_REFERENCED; 1742 return PG_MD_REFERENCED_P(pg);
1703} 1743}
1704 1744
1705/* 1745/*
1706 * Clear the modify bits on the specified physical page. 1746 * Clear the modify bits on the specified physical page.
1707 */ 1747 */
1708bool 1748bool
1709pmap_clear_modify(struct vm_page *pg) 1749pmap_clear_modify(struct vm_page *pg)
1710{ 1750{
1711 struct pv_entry *pv; 1751 struct pv_entry *pv = &pg->mdpage.pvh_first;
1712 1752
 1753 PMAP_COUNT(clear_modify);
1713#ifdef DEBUG 1754#ifdef DEBUG
1714 if (pmapdebug & PDB_FOLLOW) 1755 if (pmapdebug & PDB_FOLLOW)
1715 printf("pmap_clear_modify(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg)); 1756 printf("pmap_clear_modify(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg));
1716#endif 1757#endif
1717 if (!pmap_clear_page_attributes(pg, PV_MODIFIED)) 1758 if (PG_MD_EXECPAGE_P(pg)) {
 1759 if (pv->pv_pmap == NULL) {
 1760 pmap_clear_page_attributes(pg, PG_MD_EXECPAGE);
 1761 PMAP_COUNT(exec_uncached_clear_modify);
 1762 } else {
 1763 pmap_page_syncicache(pg);
 1764 PMAP_COUNT(exec_synced_clear_modify);
 1765 }
 1766 }
 1767 if (!pmap_clear_page_attributes(pg, PG_MD_MODIFIED))
1718 return false; 1768 return false;
1719 pv = pg->mdpage.pvh_list; 
1720 if (pv->pv_pmap == NULL) { 1769 if (pv->pv_pmap == NULL) {
1721 return true; 1770 return true;
1722 } 1771 }
1723 1772
1724 /* 1773 /*
1725 * remove write access from any pages that are dirty 1774 * remove write access from any pages that are dirty
1726 * so we can tell if they are written to again later. 1775 * so we can tell if they are written to again later.
1727 * flush the VAC first if there is one. 1776 * flush the VAC first if there is one.
1728 */ 1777 */
1729 for (; pv; pv = pv->pv_next) { 1778 for (; pv; pv = pv->pv_next) {
1730 pmap_t pmap = pv->pv_pmap; 1779 pmap_t pmap = pv->pv_pmap;
1731 vaddr_t va = pv->pv_va; 1780 vaddr_t va = pv->pv_va;
1732 pt_entry_t *pte; 1781 pt_entry_t *pte;
@@ -1736,442 +1785,450 @@ pmap_clear_modify(struct vm_page *pg) @@ -1736,442 +1785,450 @@ pmap_clear_modify(struct vm_page *pg)
1736 } else { 1785 } else {
1737 pte = pmap_pte_lookup(pmap, va); 1786 pte = pmap_pte_lookup(pmap, va);
1738 KASSERT(pte); 1787 KASSERT(pte);
1739 } 1788 }
1740 pt_entry = pte->pt_entry & ~mips_pg_m_bit(); 1789 pt_entry = pte->pt_entry & ~mips_pg_m_bit();
1741 if (pte->pt_entry == pt_entry) { 1790 if (pte->pt_entry == pt_entry) {
1742 continue; 1791 continue;
1743 } 1792 }
1744 KASSERT(pt_entry & MIPS3_PG_V); 1793 KASSERT(pt_entry & MIPS3_PG_V);
1745 /* 1794 /*
1746 * Why? Why? 1795 * Why? Why?
1747 */ 1796 */
1748 if (MIPS_HAS_R4K_MMU 1797 if (MIPS_HAS_R4K_MMU
1749 && mips_cache_info.mci_cache_virtual_alias) { 1798 && MIPS_CACHE_VIRTUAL_ALIAS) {
1750 if (PMAP_IS_ACTIVE(pmap)) { 1799 if (PMAP_IS_ACTIVE(pmap)) {
1751 mips_dcache_wbinv_range(va, PAGE_SIZE); 1800 mips_dcache_wbinv_range(va, PAGE_SIZE);
1752 } else { 1801 } else {
1753 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 1802 mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1754 } 1803 }
1755 } 1804 }
1756 pte->pt_entry = pt_entry; 1805 pte->pt_entry = pt_entry;
1757 pmap_tlb_invalidate_addr(pmap, va); 1806 pmap_tlb_invalidate_addr(pmap, va);
1758 } 1807 }
1759 return true; 1808 return true;
1760} 1809}
1761 1810
1762/* 1811/*
1763 * pmap_is_modified: 1812 * pmap_is_modified:
1764 * 1813 *
1765 * Return whether or not the specified physical page is modified 1814 * Return whether or not the specified physical page is modified
1766 * by any physical maps. 1815 * by any physical maps.
1767 */ 1816 */
1768bool 1817bool
1769pmap_is_modified(struct vm_page *pg) 1818pmap_is_modified(struct vm_page *pg)
1770{ 1819{
1771 1820
1772 return pg->mdpage.pvh_attrs & PV_MODIFIED; 1821 return PG_MD_MODIFIED_P(pg);
1773} 1822}
1774 1823
1775/* 1824/*
1776 * pmap_set_modified: 1825 * pmap_set_modified:
1777 * 1826 *
1778 * Sets the page modified reference bit for the specified page. 1827 * Sets the page modified reference bit for the specified page.
1779 */ 1828 */
1780void 1829void
1781pmap_set_modified(paddr_t pa) 1830pmap_set_modified(paddr_t pa)
1782{ 1831{
1783 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 1832 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
1784 pmap_set_page_attributes(pg, PV_MODIFIED | PV_REFERENCED); 1833 pmap_set_page_attributes(pg, PG_MD_MODIFIED | PG_MD_REFERENCED);
1785} 1834}
1786 1835
1787/******************** pv_entry management ********************/ 1836/******************** pv_entry management ********************/
1788 1837
1789/* 1838/*
1790 * Enter the pmap and virtual address into the 1839 * Enter the pmap and virtual address into the
1791 * physical to virtual map table. 1840 * physical to virtual map table.
1792 */ 1841 */
1793void 1842void
1794pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte) 1843pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte)
1795{ 1844{
1796 pv_entry_t pv, npv; 1845 pv_entry_t pv, npv;
1797 1846
1798 pv = pg->mdpage.pvh_list; 1847 pv = &pg->mdpage.pvh_first;
1799#ifdef DEBUG 1848#ifdef DEBUG
1800 if (pmapdebug & PDB_ENTER) 1849 if (pmapdebug & PDB_ENTER)
1801 printf("pmap_enter: pv %p: was %#"PRIxVADDR"/%p/%p\n", 1850 printf("pmap_enter: pv %p: was %#"PRIxVADDR"/%p/%p\n",
1802 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 1851 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
1803#endif 1852#endif
1804#if defined(MIPS3_NO_PV_UNCACHED) 1853#if defined(MIPS3_NO_PV_UNCACHED)
1805again: 1854again:
1806#endif 1855#endif
1807 if (pv->pv_pmap == NULL) { 1856 if (pv->pv_pmap == NULL) {
1808 1857 KASSERT(pv->pv_next == NULL);
1809 /* 1858 /*
1810 * No entries yet, use header as the first entry 1859 * No entries yet, use header as the first entry
1811 */ 1860 */
1812 
1813#ifdef DEBUG 1861#ifdef DEBUG
1814 if (pmapdebug & PDB_PVENTRY) 1862 if (pmapdebug & PDB_PVENTRY)
1815 printf("pmap_enter: first pv: pmap %p va %#"PRIxVADDR"\n", 1863 printf("pmap_enter: first pv: pmap %p va %#"PRIxVADDR"\n",
1816 pmap, va); 1864 pmap, va);
1817 enter_stats.firstpv++; 
1818#endif 1865#endif
 1866 PMAP_COUNT(primary_mappings);
 1867 PMAP_COUNT(mappings);
 1868 pmap_clear_page_attributes(pg, PG_MD_UNCACHED);
1819 pv->pv_va = va; 1869 pv->pv_va = va;
1820 pv->pv_flags &= ~PV_UNCACHED; 
1821 pv->pv_pmap = pmap; 1870 pv->pv_pmap = pmap;
1822 pv->pv_next = NULL; 1871 pv->pv_next = NULL;
1823 } else { 1872 } else {
1824#if defined(MIPS3_PLUS) /* XXX mmu XXX */ 1873#if defined(MIPS3_PLUS) /* XXX mmu XXX */
1825 if (mips_cache_info.mci_cache_virtual_alias) { 1874 if (MIPS_CACHE_VIRTUAL_ALIAS) {
1826 /* 1875 /*
1827 * There is at least one other VA mapping this page. 1876 * There is at least one other VA mapping this page.
1828 * Check if they are cache index compatible. 1877 * Check if they are cache index compatible.
1829 */ 1878 */
1830 1879
1831#if defined(MIPS3_NO_PV_UNCACHED) 1880#if defined(MIPS3_NO_PV_UNCACHED)
1832 1881
1833 /* 1882 /*
1834 * Instead of mapping uncached, which some platforms 1883 * Instead of mapping uncached, which some platforms
1835 * cannot support, remove the mapping from the pmap. 1884 * cannot support, remove the mapping from the pmap.
1836 * When this address is touched again, the uvm will 1885 * When this address is touched again, the uvm will
1837 * fault it in. Because of this, each page will only 1886 * fault it in. Because of this, each page will only
1838 * be mapped with one index at any given time. 1887 * be mapped with one index at any given time.
1839 */ 1888 */
1840 1889
1841 for (npv = pv; npv; npv = npv->pv_next) { 1890 if (mips_cache_badalias(pv->pv_va, va)) {
1842 if (mips_cache_indexof(npv->pv_va) != 1891 for (npv = pv; npv; npv = npv->pv_next) {
1843 mips_cache_indexof(va)) { 
1844 pmap_remove(npv->pv_pmap, npv->pv_va, 1892 pmap_remove(npv->pv_pmap, npv->pv_va,
1845 npv->pv_va + PAGE_SIZE); 1893 npv->pv_va + PAGE_SIZE);
1846 pmap_update(npv->pv_pmap); 1894 pmap_update(npv->pv_pmap);
1847 goto again; 1895 goto again;
1848 } 1896 }
1849 } 1897 }
1850#else /* !MIPS3_NO_PV_UNCACHED */ 1898#else /* !MIPS3_NO_PV_UNCACHED */
1851 if (!(pv->pv_flags & PV_UNCACHED)) { 1899 if (PG_MD_CACHED_P(pg)) {
1852 for (npv = pv; npv; npv = npv->pv_next) { 1900 /*
1853 1901 * If this page is cached, then all mappings
1854 /* 1902 * have the same cache alias so we only need
1855 * Check cache aliasing incompatibility. 1903 * to check the first page to see if it's
1856 * If one exists, re-map this page 1904 * incompatible with the new mapping.
1857 * uncached until all mappings have 1905 *
1858 * the same index again. 1906 * If the mappings are incompatible, map this
1859 */ 1907 * page as uncached and re-map all the current
1860 if (mips_cache_indexof(npv->pv_va) != 1908 * mapping as uncached until all pages can
1861 mips_cache_indexof(va)) { 1909 * share the same cache index again.
1862 pmap_page_cache(pg,PV_UNCACHED); 1910 */
1863 mips_dcache_wbinv_range_index( 1911 if (mips_cache_badalias(pv->pv_va, va)) {
1864 pv->pv_va, PAGE_SIZE); 1912 pmap_page_cache(pg, false);
1865 *npte = (*npte & 1913 mips_dcache_wbinv_range_index(
1866 ~MIPS3_PG_CACHEMODE) | 1914 pv->pv_va, PAGE_SIZE);
1867 MIPS3_PG_UNCACHED; 1915 *npte = (*npte &
1868#ifdef DEBUG 1916 ~MIPS3_PG_CACHEMODE) |
1869 enter_stats.ci++; 1917 MIPS3_PG_UNCACHED;
1870#endif 1918 PMAP_COUNT(page_cache_evictions);
1871 break; 
1872 } 
1873 } 1919 }
1874 } else { 1920 } else {
1875 *npte = (*npte & ~MIPS3_PG_CACHEMODE) | 1921 *npte = (*npte & ~MIPS3_PG_CACHEMODE) |
1876 MIPS3_PG_UNCACHED; 1922 MIPS3_PG_UNCACHED;
 1923 PMAP_COUNT(page_cache_evictions);
1877 } 1924 }
1878#endif /* !MIPS3_NO_PV_UNCACHED */ 1925#endif /* !MIPS3_NO_PV_UNCACHED */
1879 } 1926 }
1880#endif /* MIPS3_PLUS */ 1927#endif /* MIPS3_PLUS */
1881 1928
1882 /* 1929 /*
1883 * There is at least one other VA mapping this page. 1930 * There is at least one other VA mapping this page.
1884 * Place this entry after the header. 1931 * Place this entry after the header.
1885 * 1932 *
1886 * Note: the entry may already be in the table if 1933 * Note: the entry may already be in the table if
1887 * we are only changing the protection bits. 1934 * we are only changing the protection bits.
1888 */ 1935 */
1889 1936
1890 for (npv = pv; npv; npv = npv->pv_next) { 1937 for (npv = pv; npv; npv = npv->pv_next) {
1891 if (pmap == npv->pv_pmap && va == npv->pv_va) { 1938 if (pmap == npv->pv_pmap && va == npv->pv_va) {
1892#ifdef PARANOIADIAG 1939#ifdef PARANOIADIAG
1893 pt_entry_t *pte; 1940 pt_entry_t *pte;
1894 unsigned pt_entry; 1941 uint32_t pt_entry;
1895 1942
1896 if (pmap == pmap_kernel()) { 1943 if (pmap == pmap_kernel()) {
1897 pt_entry = kvtopte(va)->pt_entry; 1944 pt_entry = kvtopte(va)->pt_entry;
1898 } else { 1945 } else {
1899 pte = pmap_pte_lookup(pmap, va); 1946 pte = pmap_pte_lookup(pmap, va);
1900 if (pte) { 1947 if (pte) {
1901 pt_entry = pte->pt_entry; 1948 pt_entry = pte->pt_entry;
1902 } else 1949 } else
1903 pt_entry = 0; 1950 pt_entry = 0;
1904 } 1951 }
1905 if (!mips_pg_v(pt_entry) || 1952 if (!mips_pg_v(pt_entry) ||
1906 mips_tlbpfn_to_paddr(pt_entry) != 1953 mips_tlbpfn_to_paddr(pt_entry) !=
1907 VM_PAGE_TO_PHYS(pg)) 1954 VM_PAGE_TO_PHYS(pg))
1908 printf( 1955 printf(
1909 "pmap_enter: found va %#"PRIxVADDR" pa %#"PRIxPADDR" in pv_table but != %x\n", 1956 "pmap_enter: found va %#"PRIxVADDR" pa %#"PRIxPADDR" in pv_table but != %x\n",
1910 va, VM_PAGE_TO_PHYS(pg), 1957 va, VM_PAGE_TO_PHYS(pg),
1911 pt_entry); 1958 pt_entry);
1912#endif 1959#endif
 1960 PMAP_COUNT(remappings);
1913 return; 1961 return;
1914 } 1962 }
1915 } 1963 }
1916#ifdef DEBUG 1964#ifdef DEBUG
1917 if (pmapdebug & PDB_PVENTRY) 1965 if (pmapdebug & PDB_PVENTRY)
1918 printf("pmap_enter: new pv: pmap %p va %#"PRIxVADDR"\n", 1966 printf("pmap_enter: new pv: pmap %p va %#"PRIxVADDR"\n",
1919 pmap, va); 1967 pmap, va);
1920#endif 1968#endif
1921 npv = (pv_entry_t)pmap_pv_alloc(); 1969 npv = (pv_entry_t)pmap_pv_alloc();
1922 if (npv == NULL) 1970 if (npv == NULL)
1923 panic("pmap_enter_pv: pmap_pv_alloc() failed"); 1971 panic("pmap_enter_pv: pmap_pv_alloc() failed");
1924 npv->pv_va = va; 1972 npv->pv_va = va;
1925 npv->pv_pmap = pmap; 1973 npv->pv_pmap = pmap;
1926 npv->pv_flags = pv->pv_flags; 
1927 npv->pv_next = pv->pv_next; 1974 npv->pv_next = pv->pv_next;
1928 pv->pv_next = npv; 1975 pv->pv_next = npv;
1929#ifdef DEBUG 1976 PMAP_COUNT(mappings);
1930 if (!npv->pv_next) 
1931 enter_stats.secondpv++; 
1932#endif 
1933 } 1977 }
1934} 1978}
1935 1979
1936/* 1980/*
1937 * Remove a physical to virtual address translation. 1981 * Remove a physical to virtual address translation.
1938 * If cache was inhibited on this page, and there are no more cache 1982 * If cache was inhibited on this page, and there are no more cache
1939 * conflicts, restore caching. 1983 * conflicts, restore caching.
1940 * Flush the cache if the last page is removed (should always be cached 1984 * Flush the cache if the last page is removed (should always be cached
1941 * at this point). 1985 * at this point).
1942 */ 1986 */
1943void 1987void
1944pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg) 1988pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
1945{ 1989{
1946 pv_entry_t pv, npv; 1990 pv_entry_t pv, npv;
1947 int last; 1991 bool last;
1948 1992
1949#ifdef DEBUG 1993#ifdef DEBUG
1950 if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY)) 1994 if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY))
1951 printf("pmap_remove_pv(%p, %#"PRIxVADDR", %#"PRIxPADDR")\n", pmap, va, 1995 printf("pmap_remove_pv(%p, %#"PRIxVADDR", %#"PRIxPADDR")\n", pmap, va,
1952 VM_PAGE_TO_PHYS(pg)); 1996 VM_PAGE_TO_PHYS(pg));
1953#endif 1997#endif
1954 1998
1955 pv = pg->mdpage.pvh_list; 1999 pv = &pg->mdpage.pvh_first;
1956 2000
1957 /* 2001 /*
1958 * If it is the first entry on the list, it is actually 2002 * If it is the first entry on the list, it is actually
1959 * in the header and we must copy the following entry up 2003 * in the header and we must copy the following entry up
1960 * to the header. Otherwise we must search the list for 2004 * to the header. Otherwise we must search the list for
1961 * the entry. In either case we free the now unused entry. 2005 * the entry. In either case we free the now unused entry.
1962 */ 2006 */
1963 2007
1964 last = 0; 2008 last = false;
1965 if (pmap == pv->pv_pmap && va == pv->pv_va) { 2009 if (pmap == pv->pv_pmap && va == pv->pv_va) {
1966 npv = pv->pv_next; 2010 npv = pv->pv_next;
1967 if (npv) { 2011 if (npv) {
1968 
1969 /* 
1970 * Copy current modified and referenced status to 
1971 * the following entry before copying. 
1972 */ 
1973 npv->pv_flags |= 
1974 pv->pv_flags & (PV_MODIFIED | PV_REFERENCED); 
1975 *pv = *npv; 2012 *pv = *npv;
1976 pmap_pv_free(npv); 2013 pmap_pv_free(npv);
1977 } else { 2014 } else {
 2015 pmap_clear_page_attributes(pg, PG_MD_UNCACHED);
1978 pv->pv_pmap = NULL; 2016 pv->pv_pmap = NULL;
1979 last = 1; /* Last mapping removed */ 2017 last = true; /* Last mapping removed */
1980 } 2018 }
1981#ifdef DEBUG 2019 PMAP_COUNT(remove_pvfirst);
1982 remove_stats.pvfirst++; 
1983#endif 
1984 } else { 2020 } else {
1985 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { 2021 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
1986#ifdef DEBUG 2022 PMAP_COUNT(remove_pvsearch);
1987 remove_stats.pvsearch++; 
1988#endif 
1989 if (pmap == npv->pv_pmap && va == npv->pv_va) 2023 if (pmap == npv->pv_pmap && va == npv->pv_va)
1990 break; 2024 break;
1991 } 2025 }
1992 if (npv) { 2026 if (npv) {
1993 pv->pv_next = npv->pv_next; 2027 pv->pv_next = npv->pv_next;
1994 pmap_pv_free(npv); 2028 pmap_pv_free(npv);
1995 } 2029 }
1996 } 2030 }
 2031 if (PG_MD_EXECPAGE_P(pg) && dirty) {
 2032 if (last) {
 2033 /*
 2034 * If this was the page's last mapping, we no longer
 2035 * care about it execness.
 2036 */
 2037 pmap_clear_page_attributes(pg, PG_MD_EXECPAGE);
 2038 PMAP_COUNT(exec_uncached_remove);
 2039 } else {
 2040 /*
 2041 * Someone still has it mapped as an executable page
 2042 * so we must sync it.
 2043 */
 2044 pmap_page_syncicache(pg);
 2045 PMAP_COUNT(exec_synced_remove);
 2046 }
 2047 }
1997#ifdef MIPS3_PLUS /* XXX mmu XXX */ 2048#ifdef MIPS3_PLUS /* XXX mmu XXX */
1998#if !defined(MIPS3_NO_PV_UNCACHED) 2049#ifndef MIPS3_NO_PV_UNCACHED
1999 if (MIPS_HAS_R4K_MMU && pv->pv_flags & PV_UNCACHED) { 2050 if (MIPS_HAS_R4K_MMU && PG_MD_UNCACHED_P(pg)) {
2000 2051
2001 /* 2052 /*
2002 * Page is currently uncached, check if alias mapping has been 2053 * Page is currently uncached, check if alias mapping has been
2003 * removed. If it was, then reenable caching. 2054 * removed. If it was, then reenable caching.
2004 */ 2055 */
2005 2056
2006 pv = pg->mdpage.pvh_list; 2057 pv = &pg->mdpage.pvh_first;
2007 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 2058 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
2008 if (mips_cache_indexof(pv->pv_va ^ npv->pv_va)) 2059 if (mips_cache_badalias(pv->pv_va, npv->pv_va))
2009 break; 2060 break;
2010 } 2061 }
2011 if (npv == NULL) 2062 if (npv == NULL)
2012 pmap_page_cache(pg, 0); 2063 pmap_page_cache(pg, true);
2013 } 2064 }
2014#endif 2065#endif
2015 if (MIPS_HAS_R4K_MMU && last != 0) 2066 if (MIPS_HAS_R4K_MMU && last) /* XXX why */
2016 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 2067 mips_dcache_wbinv_range_index(va, PAGE_SIZE);
2017#endif /* MIPS3_PLUS */ 2068#endif /* MIPS3_PLUS */
2018} 2069}
2019 2070
2020/* 2071/*
2021 * pmap_pv_page_alloc: 2072 * pmap_pv_page_alloc:
2022 * 2073 *
2023 * Allocate a page for the pv_entry pool. 2074 * Allocate a page for the pv_entry pool.
2024 */ 2075 */
2025void * 2076void *
2026pmap_pv_page_alloc(struct pool *pp, int flags) 2077pmap_pv_page_alloc(struct pool *pp, int flags)
2027{ 2078{
2028 const struct vm_page *pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE); 2079 struct vm_page *pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
2029 if (pg == NULL) 2080 if (pg == NULL)
2030 return NULL; 2081 return NULL;
2031 2082
2032 const paddr_t pa = VM_PAGE_TO_PHYS(pg); 2083 return (void *)mips_pmap_map_poolpage(VM_PAGE_TO_PHYS(pg));
2033#ifdef _LP64 
2034 KASSERT(mips_options.mips3_xkphys_cached); 
2035 const vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa); 
2036#else 
2037 const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa); 
2038#endif 
2039#if defined(MIPS3_PLUS) 
2040 if (mips_cache_info.mci_cache_virtual_alias) { 
2041 pv_entry_t pv = pg->mdpage.pvh_list; 
2042 if ((pv->pv_flags & PV_UNCACHED) == 0 && 
2043 mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) 
2044 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 
2045 } 
2046#endif 
2047 return (void *)va; 
2048} 2084}
2049 2085
2050/* 2086/*
2051 * pmap_pv_page_free: 2087 * pmap_pv_page_free:
2052 * 2088 *
2053 * Free a pv_entry pool page. 2089 * Free a pv_entry pool page.
2054 */ 2090 */
2055void 2091void
2056pmap_pv_page_free(struct pool *pp, void *v) 2092pmap_pv_page_free(struct pool *pp, void *v)
2057{ 2093{
2058 paddr_t phys; 2094 vaddr_t va = (vaddr_t)v;
 2095 paddr_t pa;
2059 2096
2060#ifdef MIPS3_PLUS 
2061 if (mips_cache_info.mci_cache_virtual_alias) 
2062 mips_dcache_inv_range((vaddr_t)v, PAGE_SIZE); 
2063#endif 
2064#ifdef _LP64 2097#ifdef _LP64
2065 KASSERT(MIPS_XKPHYS_P(v)); 2098 KASSERT(MIPS_XKPHYS_P(va));
2066 phys = MIPS_XKPHYS_TO_PHYS((vaddr_t)v); 2099 pa = MIPS_XKPHYS_TO_PHYS(va);
2067#else 2100#else
2068 phys = MIPS_KSEG0_TO_PHYS((vaddr_t)v); 2101 KASSERT(MIPS_KSEG0_P(va));
 2102 pa = MIPS_KSEG0_TO_PHYS(va);
2069#endif 2103#endif
2070 uvm_pagefree(PHYS_TO_VM_PAGE(phys)); 2104#ifdef MIPS3_PLUS
 2105 if (MIPS_CACHE_VIRTUAL_ALIAS)
 2106 mips_dcache_inv_range(va, PAGE_SIZE);
 2107#endif
 2108 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
 2109 pmap_clear_page_attributes(pg, PG_MD_POOLPAGE);
 2110 uvm_pagefree(pg);
2071} 2111}
2072 2112
2073pt_entry_t * 2113pt_entry_t *
2074pmap_pte(pmap_t pmap, vaddr_t va) 2114pmap_pte(pmap_t pmap, vaddr_t va)
2075{ 2115{
2076 pt_entry_t *pte; 2116 pt_entry_t *pte;
2077 2117
2078 if (pmap == pmap_kernel()) 2118 if (pmap == pmap_kernel())
2079 pte = kvtopte(va); 2119 pte = kvtopte(va);
2080 else 2120 else
2081 pte = pmap_pte_lookup(pmap, va); 2121 pte = pmap_pte_lookup(pmap, va);
2082 return pte; 2122 return pte;
2083} 2123}
2084 2124
2085#ifdef MIPS3_PLUS /* XXX mmu XXX */ 2125#ifdef MIPS3_PLUS /* XXX mmu XXX */
2086/* 2126/*
2087 * Find first virtual address >= *vap that doesn't cause 2127 * Find first virtual address >= *vap that doesn't cause
2088 * a cache alias conflict. 2128 * a cache alias conflict.
2089 */ 2129 */
2090void 2130void
2091pmap_prefer(vaddr_t foff, vaddr_t *vap, int td) 2131pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
2092{ 2132{
2093 const struct mips_cache_info * const mci = &mips_cache_info; 2133 const struct mips_cache_info * const mci = &mips_cache_info;
2094 vaddr_t va; 2134 vaddr_t va;
2095 vsize_t d; 2135 vsize_t d;
 2136 vsize_t prefer_mask = ptoa(uvmexp.colormask);
 2137
 2138 PMAP_COUNT(prefer_requests);
2096 2139
2097 if (MIPS_HAS_R4K_MMU) { 2140 if (MIPS_HAS_R4K_MMU) {
 2141 prefer_mask |= mci->mci_cache_prefer_mask;
 2142 }
 2143
 2144 if (prefer_mask) {
2098 va = *vap; 2145 va = *vap;
2099 2146
2100 d = foff - va; 2147 d = foff - va;
2101 d &= mci->mci_cache_prefer_mask; 2148 d &= prefer_mask;
2102 if (td && d) 2149 if (d) {
2103 d = -((-d) & mci->mci_cache_prefer_mask); 2150 if (td)
2104 *vap = va + d; 2151 *vap = trunc_page(va -((-d) & prefer_mask));
 2152 else
 2153 *vap = round_page(va + d);
 2154 PMAP_COUNT(prefer_adjustments);
 2155 }
2105 } 2156 }
2106} 2157}
2107#endif /* MIPS3_PLUS */ 2158#endif /* MIPS3_PLUS */
2108 2159
2109struct vm_page * 2160struct vm_page *
2110mips_pmap_alloc_poolpage(int flags) 2161mips_pmap_alloc_poolpage(int flags)
2111{ 2162{
2112 /* 2163 /*
2113 * On 32bit kernels, we must make sure that we only allocate pages that 2164 * On 32bit kernels, we must make sure that we only allocate pages that
2114 * can be mapped via KSEG0. On 64bit kernels, try to allocated from 2165 * can be mapped via KSEG0. On 64bit kernels, try to allocated from
2115 * the first 4G. If all memory is in KSEG0/4G, then we can just 2166 * the first 4G. If all memory is in KSEG0/4G, then we can just
2116 * use the default freelist otherwise we must use the pool page list. 2167 * use the default freelist otherwise we must use the pool page list.
2117 */ 2168 */
2118 if (mips_poolpage_vmfreelist != VM_FREELIST_DEFAULT) 2169 if (mips_poolpage_vmfreelist != VM_FREELIST_DEFAULT)
2119 return uvm_pagealloc_strat(NULL, 0, NULL, flags, 2170 return uvm_pagealloc_strat(NULL, 0, NULL, flags,
2120 UVM_PGA_STRAT_ONLY, mips_poolpage_vmfreelist); 2171 UVM_PGA_STRAT_ONLY, mips_poolpage_vmfreelist);
2121 2172
2122 return uvm_pagealloc(NULL, 0, NULL, flags); 2173 return uvm_pagealloc(NULL, 0, NULL, flags);
2123} 2174}
2124 2175
2125vaddr_t 2176vaddr_t
2126mips_pmap_map_poolpage(paddr_t pa) 2177mips_pmap_map_poolpage(paddr_t pa)
2127{ 2178{
2128 vaddr_t va; 2179 vaddr_t va;
2129#if defined(MIPS3_PLUS) 
2130 struct vm_page *pg; 
2131 pv_entry_t pv; 
2132#endif 
2133 2180
2134#ifdef _LP64 2181#ifdef _LP64
2135 KASSERT(mips_options.mips3_xkphys_cached); 2182 KASSERT(mips_options.mips3_xkphys_cached);
2136 va = MIPS_PHYS_TO_XKPHYS_CACHED(pa); 2183 va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
2137#else 2184#else
2138 if (pa > MIPS_PHYS_MASK) 2185 if (pa > MIPS_PHYS_MASK)
2139 panic("mips_pmap_map_poolpage: " 2186 panic("mips_pmap_map_poolpage: "
2140 "pa #%"PRIxPADDR" can not be mapped into KSEG0", pa); 2187 "pa #%"PRIxPADDR" can not be mapped into KSEG0", pa);
2141 2188
2142 va = MIPS_PHYS_TO_KSEG0(pa); 2189 va = MIPS_PHYS_TO_KSEG0(pa);
2143#endif 2190#endif
 2191 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
 2192 KASSERT(pg);
 2193 pmap_set_page_attributes(pg, PG_MD_POOLPAGE);
2144#if defined(MIPS3_PLUS) 2194#if defined(MIPS3_PLUS)
2145 if (mips_cache_info.mci_cache_virtual_alias) { 2195 if (MIPS_CACHE_VIRTUAL_ALIAS) {
2146 pg = PHYS_TO_VM_PAGE(pa); 2196 /*
2147 pv = pg->mdpage.pvh_list; 2197 * If this page was last mapped with an address that might
2148 if ((pv->pv_flags & PV_UNCACHED) == 0 && 2198 * cause aliases, flush the page from the cache.
2149 mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) 2199 */
2150 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 2200 pv_entry_t pv = &pg->mdpage.pvh_first;
 2201 KASSERT(pv->pv_pmap == NULL);
 2202 if (PG_MD_CACHED_P(pg) && mips_cache_badalias(pv->pv_va, va))
 2203 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
 2204 pv->pv_va = va;
2151 } 2205 }
2152#endif 2206#endif
2153 return va; 2207 return va;
2154} 2208}
2155 2209
2156paddr_t 2210paddr_t
2157mips_pmap_unmap_poolpage(vaddr_t va) 2211mips_pmap_unmap_poolpage(vaddr_t va)
2158{ 2212{
2159 paddr_t pa; 2213 paddr_t pa;
2160 2214
2161#ifdef _LP64 2215#ifdef _LP64
2162 KASSERT(MIPS_XKPHYS_P(va)); 2216 KASSERT(MIPS_XKPHYS_P(va));
2163 pa = MIPS_XKPHYS_TO_PHYS(va); 2217 pa = MIPS_XKPHYS_TO_PHYS(va);
2164#else 2218#else
 2219 KASSERT(MIPS_KSEG0_P(va));
2165 pa = MIPS_KSEG0_TO_PHYS(va); 2220 pa = MIPS_KSEG0_TO_PHYS(va);
2166#endif 2221#endif
 2222 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
 2223 pmap_clear_page_attributes(pg, PG_MD_POOLPAGE);
2167#if defined(MIPS3_PLUS) 2224#if defined(MIPS3_PLUS)
2168 if (mips_cache_info.mci_cache_virtual_alias) { 2225 if (MIPS_CACHE_VIRTUAL_ALIAS) {
2169 mips_dcache_inv_range(va, PAGE_SIZE); 2226 mips_dcache_inv_range(va, PAGE_SIZE);
2170 } 2227 }
2171#endif 2228#endif
2172 return pa; 2229 return pa;
2173} 2230}
2174 2231
2175/******************** page table page management ********************/ 2232/******************** page table page management ********************/
2176 2233
2177/* TO BE DONE */ 2234/* TO BE DONE */

cvs diff -r1.1.2.3 -r1.1.2.4 src/sys/arch/mips/mips/Attic/pmap_segtab.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/Attic/pmap_segtab.c 2010/01/20 06:58:37 1.1.2.3
+++ src/sys/arch/mips/mips/Attic/pmap_segtab.c 2010/01/26 21:19:25 1.1.2.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap_segtab.c,v 1.1.2.3 2010/01/20 06:58:37 matt Exp $ */ 1/* $NetBSD: pmap_segtab.c,v 1.1.2.4 2010/01/26 21:19:25 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94 65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94
66 */ 66 */
67 67
68#include <sys/cdefs.h> 68#include <sys/cdefs.h>
69 69
70__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.1.2.3 2010/01/20 06:58:37 matt Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.1.2.4 2010/01/26 21:19:25 matt Exp $");
71 71
72/* 72/*
73 * Manages physical address maps. 73 * Manages physical address maps.
74 * 74 *
75 * In addition to hardware address maps, this 75 * In addition to hardware address maps, this
76 * module is called upon to provide software-use-only 76 * module is called upon to provide software-use-only
77 * maps which may or may not be stored in the same 77 * maps which may or may not be stored in the same
78 * form as hardware maps. These pseudo-maps are 78 * form as hardware maps. These pseudo-maps are
79 * used to store intermediate results from copy 79 * used to store intermediate results from copy
80 * operations to and from address spaces. 80 * operations to and from address spaces.
81 * 81 *
82 * Since the information managed by this module is 82 * Since the information managed by this module is
83 * also stored by the logical address mapping module, 83 * also stored by the logical address mapping module,
@@ -267,27 +267,27 @@ pmap_segtab_free(pmap_t pmap) @@ -267,27 +267,27 @@ pmap_segtab_free(pmap_t pmap)
267 panic("pmap_destroy: segmap not empty"); 267 panic("pmap_destroy: segmap not empty");
268 } 268 }
269#endif 269#endif
270 270
271#ifdef MIPS3_PLUS /* XXX mmu XXX */ 271#ifdef MIPS3_PLUS /* XXX mmu XXX */
272 /* 272 /*
273 * The pica pmap.c flushed the segmap pages here. I'm 273 * The pica pmap.c flushed the segmap pages here. I'm
274 * not sure why, but I suspect it's because the page(s) 274 * not sure why, but I suspect it's because the page(s)
275 * were being accessed by KSEG0 (cached) addresses and 275 * were being accessed by KSEG0 (cached) addresses and
276 * may cause cache coherency problems when the page 276 * may cause cache coherency problems when the page
277 * is reused with KSEG2 (mapped) addresses. This may 277 * is reused with KSEG2 (mapped) addresses. This may
278 * cause problems on machines without VCED/VCEI. 278 * cause problems on machines without VCED/VCEI.
279 */ 279 */
280 if (mips_cache_info.mci_cache_virtual_alias) 280 if (MIPS_CACHE_VIRTUAL_ALIAS)
281 mips_dcache_inv_range((vaddr_t)pte, PAGE_SIZE); 281 mips_dcache_inv_range((vaddr_t)pte, PAGE_SIZE);
282#endif /* MIPS3_PLUS */ 282#endif /* MIPS3_PLUS */
283#ifdef _LP64 283#ifdef _LP64
284 KASSERT(MIPS_XKPHYS_P(pte)); 284 KASSERT(MIPS_XKPHYS_P(pte));
285 pa = MIPS_XKPHYS_TO_PHYS(pte); 285 pa = MIPS_XKPHYS_TO_PHYS(pte);
286#else 286#else
287 pa = MIPS_KSEG0_TO_PHYS(pte); 287 pa = MIPS_KSEG0_TO_PHYS(pte);
288#endif 288#endif
289 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 289 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
290 290
291 stp->seg_tab[i] = NULL; 291 stp->seg_tab[i] = NULL;
292 } 292 }
293 293