| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: pmap.c,v 1.374 2019/09/25 16:37:54 skrll Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.375 2019/12/31 18:09:21 skrll Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright 2003 Wasabi Systems, Inc. | | 4 | * Copyright 2003 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. | | 7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
| @@ -211,27 +211,27 @@ | | | @@ -211,27 +211,27 @@ |
211 | #include <sys/bus.h> | | 211 | #include <sys/bus.h> |
212 | #include <sys/atomic.h> | | 212 | #include <sys/atomic.h> |
213 | #include <sys/kernhist.h> | | 213 | #include <sys/kernhist.h> |
214 | | | 214 | |
215 | #include <uvm/uvm.h> | | 215 | #include <uvm/uvm.h> |
216 | #include <uvm/pmap/pmap_pvt.h> | | 216 | #include <uvm/pmap/pmap_pvt.h> |
217 | | | 217 | |
218 | #include <arm/locore.h> | | 218 | #include <arm/locore.h> |
219 | | | 219 | |
220 | #ifdef DDB | | 220 | #ifdef DDB |
221 | #include <arm/db_machdep.h> | | 221 | #include <arm/db_machdep.h> |
222 | #endif | | 222 | #endif |
223 | | | 223 | |
224 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.374 2019/09/25 16:37:54 skrll Exp $"); | | 224 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.375 2019/12/31 18:09:21 skrll Exp $"); |
225 | | | 225 | |
226 | //#define PMAP_DEBUG | | 226 | //#define PMAP_DEBUG |
227 | #ifdef PMAP_DEBUG | | 227 | #ifdef PMAP_DEBUG |
228 | | | 228 | |
229 | /* XXX need to get rid of all refs to this */ | | 229 | /* XXX need to get rid of all refs to this */ |
230 | int pmap_debug_level = 0; | | 230 | int pmap_debug_level = 0; |
231 | | | 231 | |
232 | /* | | 232 | /* |
233 | * for switching to potentially finer grained debugging | | 233 | * for switching to potentially finer grained debugging |
234 | */ | | 234 | */ |
235 | #define PDB_FOLLOW 0x0001 | | 235 | #define PDB_FOLLOW 0x0001 |
236 | #define PDB_INIT 0x0002 | | 236 | #define PDB_INIT 0x0002 |
237 | #define PDB_ENTER 0x0004 | | 237 | #define PDB_ENTER 0x0004 |
| @@ -3623,28 +3623,28 @@ pmap_kremove_pg(struct vm_page *pg, vadd | | | @@ -3623,28 +3623,28 @@ pmap_kremove_pg(struct vm_page *pg, vadd |
3623 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3623 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3624 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 3624 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
3625 | struct pv_entry *pv; | | 3625 | struct pv_entry *pv; |
3626 | | | 3626 | |
3627 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); | | 3627 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); |
3628 | KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); | | 3628 | KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); |
3629 | KASSERT(pmap_page_locked_p(md)); | | 3629 | KASSERT(pmap_page_locked_p(md)); |
3630 | | | 3630 | |
3631 | pv = pmap_remove_pv(md, pa, pmap_kernel(), va); | | 3631 | pv = pmap_remove_pv(md, pa, pmap_kernel(), va); |
3632 | KASSERTMSG(pv, "pg %p (pa #%lx) va %#lx", pg, pa, va); | | 3632 | KASSERTMSG(pv, "pg %p (pa #%lx) va %#lx", pg, pa, va); |
3633 | KASSERT(PV_IS_KENTRY_P(pv->pv_flags)); | | 3633 | KASSERT(PV_IS_KENTRY_P(pv->pv_flags)); |
3634 | | | 3634 | |
3635 | /* | | 3635 | /* |
3636 | * If we are removing a writeable mapping to a cached exec page, | | 3636 | * We are removing a writeable mapping to a cached exec page, if |
3637 | * if it's the last mapping then clear it execness other sync | | 3637 | * it's the last mapping then clear its execness otherwise sync |
3638 | * the page to the icache. | | 3638 | * the page to the icache. |
3639 | */ | | 3639 | */ |
3640 | if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC | | 3640 | if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC |
3641 | && (pv->pv_flags & PVF_WRITE) != 0) { | | 3641 | && (pv->pv_flags & PVF_WRITE) != 0) { |
3642 | if (SLIST_EMPTY(&md->pvh_list)) { | | 3642 | if (SLIST_EMPTY(&md->pvh_list)) { |
3643 | md->pvh_attrs &= ~PVF_EXEC; | | 3643 | md->pvh_attrs &= ~PVF_EXEC; |
3644 | PMAPCOUNT(exec_discarded_kremove); | | 3644 | PMAPCOUNT(exec_discarded_kremove); |
3645 | } else { | | 3645 | } else { |
3646 | pmap_syncicache_page(md, pa); | | 3646 | pmap_syncicache_page(md, pa); |
3647 | PMAPCOUNT(exec_synced_kremove); | | 3647 | PMAPCOUNT(exec_synced_kremove); |
3648 | } | | 3648 | } |
3649 | } | | 3649 | } |
3650 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); | | 3650 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); |