Wed Feb 16 23:31:13 2022 UTC ()
powerpc: Implement pv-tracking for unmanaged pages.

Needed for drm.


(riastradh)
diff -r1.98 -r1.99 src/sys/arch/powerpc/conf/files.powerpc
diff -r1.40 -r1.41 src/sys/arch/powerpc/include/pmap.h
diff -r1.35 -r1.36 src/sys/arch/powerpc/include/oea/pmap.h
diff -r1.107 -r1.108 src/sys/arch/powerpc/oea/pmap.c
diff -r1.12 -r1.13 src/sys/arch/powerpc/oea/pmap_kernel.c

cvs diff -r1.98 -r1.99 src/sys/arch/powerpc/conf/files.powerpc (expand / switch to unified diff)

--- src/sys/arch/powerpc/conf/files.powerpc 2021/03/05 06:06:34 1.98
+++ src/sys/arch/powerpc/conf/files.powerpc 2022/02/16 23:31:13 1.99
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: files.powerpc,v 1.98 2021/03/05 06:06:34 rin Exp $ 1# $NetBSD: files.powerpc,v 1.99 2022/02/16 23:31:13 riastradh Exp $
2 2
3defflag opt_altivec.h ALTIVEC K_ALTIVEC PPC_HAVE_SPE 3defflag opt_altivec.h ALTIVEC K_ALTIVEC PPC_HAVE_SPE
4defflag opt_openpic.h OPENPIC_DISTRIBUTE 4defflag opt_openpic.h OPENPIC_DISTRIBUTE
5defparam opt_ppcparam.h L2CR_CONFIG L3CR_CONFIG INTSTK CLOCKBASE VERBOSE_INITPPC PPC_CPU_FREQ 5defparam opt_ppcparam.h L2CR_CONFIG L3CR_CONFIG INTSTK CLOCKBASE VERBOSE_INITPPC PPC_CPU_FREQ
6defflag opt_ppcarch.h PPC_OEA PPC_OEA601 PPC_OEA64 PPC_OEA64_BRIDGE PPC_MPC8XX PPC_IBM4XX PPC_IBM403 PPC_IBM440 PPC_BOOKE 6defflag opt_ppcarch.h PPC_OEA PPC_OEA601 PPC_OEA64 PPC_OEA64_BRIDGE PPC_MPC8XX PPC_IBM4XX PPC_IBM403 PPC_IBM440 PPC_BOOKE
7defflag opt_ppccache.h CACHE_PROTO_MEI 7defflag opt_ppccache.h CACHE_PROTO_MEI
8defflag opt_pmap.h PMAPDEBUG PMAPCHECK PMAPCOUNTERS PMAP_MINIMALTLB PMAP_TLBDEBUG 8defflag opt_pmap.h PMAPDEBUG PMAPCHECK PMAPCOUNTERS PMAP_MINIMALTLB PMAP_TLBDEBUG
9defparam opt_pmap.h PTEGCOUNT PMAP_MEMLIMIT 9defparam opt_pmap.h PTEGCOUNT PMAP_MEMLIMIT
10 10
11file arch/powerpc/powerpc/core_machdep.c coredump 11file arch/powerpc/powerpc/core_machdep.c coredump
12file arch/powerpc/powerpc/fixup.c 12file arch/powerpc/powerpc/fixup.c
13file arch/powerpc/powerpc/kgdb_machdep.c kgdb 13file arch/powerpc/powerpc/kgdb_machdep.c kgdb
14file arch/powerpc/powerpc/kobj_machdep.c modular 14file arch/powerpc/powerpc/kobj_machdep.c modular
@@ -41,26 +41,27 @@ file arch/powerpc/ibm4xx/copyoutstr.c p @@ -41,26 +41,27 @@ file arch/powerpc/ibm4xx/copyoutstr.c p
41 41
42# MPC (Motorola PowerPC) 6xx Family files (60x,74x,75x,74xx), IBM 970 family (bridge mode??) 42# MPC (Motorola PowerPC) 6xx Family files (60x,74x,75x,74xx), IBM 970 family (bridge mode??)
43# 43#
44defparam opt_oea.h EXTMAP_RANGES=20 44defparam opt_oea.h EXTMAP_RANGES=20
45file arch/powerpc/oea/altivec.c (ppc_oea |ppc_oea64 | ppc_oea64_bridge) & altivec 45file arch/powerpc/oea/altivec.c (ppc_oea |ppc_oea64 | ppc_oea64_bridge) & altivec
46file arch/powerpc/oea/cpu_subr.c ppc_oea | ppc_oea64 | ppc_oea64_bridge | ppc_oea601 46file arch/powerpc/oea/cpu_subr.c ppc_oea | ppc_oea64 | ppc_oea64_bridge | ppc_oea601
47file arch/powerpc/oea/cpu_speedctl.c ppc_oea64 | ppc_oea64_bridge 47file arch/powerpc/oea/cpu_speedctl.c ppc_oea64 | ppc_oea64_bridge
48file arch/powerpc/oea/oea_machdep.c ppc_oea | ppc_oea64 | ppc_oea64_bridge | ppc_oea601 48file arch/powerpc/oea/oea_machdep.c ppc_oea | ppc_oea64 | ppc_oea64_bridge | ppc_oea601
49file arch/powerpc/oea/pmap.c ppc_oea | ppc_oea601 49file arch/powerpc/oea/pmap.c ppc_oea | ppc_oea601
50file arch/powerpc/oea/pmap64.c ppc_oea64 50file arch/powerpc/oea/pmap64.c ppc_oea64
51file arch/powerpc/oea/pmap64_bridge.c ppc_oea64_bridge 51file arch/powerpc/oea/pmap64_bridge.c ppc_oea64_bridge
52file arch/powerpc/oea/pmap_kernel.c ppc_oea | ppc_oea64 | ppc_oea64_bridge | ppc_oea601 52file arch/powerpc/oea/pmap_kernel.c ppc_oea | ppc_oea64 | ppc_oea64_bridge | ppc_oea601
53file arch/powerpc/powerpc/trap.c ppc_oea | ppc_oea64 | ppc_oea64_bridge | ppc_oea601 53file arch/powerpc/powerpc/trap.c ppc_oea | ppc_oea64 | ppc_oea64_bridge | ppc_oea601
 54file uvm/pmap/pmap_pvt.c ppc_oea | ppc_oea601 | ppc_oea64
54 55
55# PPC BookE (MPC85xx) Family files 56# PPC BookE (MPC85xx) Family files
56file arch/powerpc/booke/booke_machdep.c ppc_booke 57file arch/powerpc/booke/booke_machdep.c ppc_booke
57file arch/powerpc/booke/booke_cache.c ppc_booke 58file arch/powerpc/booke/booke_cache.c ppc_booke
58file arch/powerpc/booke/booke_pmap.c ppc_booke 59file arch/powerpc/booke/booke_pmap.c ppc_booke
59file arch/powerpc/booke/booke_stubs.c ppc_booke 60file arch/powerpc/booke/booke_stubs.c ppc_booke
60file arch/powerpc/booke/copyin.c ppc_booke 61file arch/powerpc/booke/copyin.c ppc_booke
61file arch/powerpc/booke/copyout.c ppc_booke 62file arch/powerpc/booke/copyout.c ppc_booke
62file arch/powerpc/booke/kcopy.c ppc_booke 63file arch/powerpc/booke/kcopy.c ppc_booke
63file arch/powerpc/booke/spe.c ppc_booke 64file arch/powerpc/booke/spe.c ppc_booke
64file arch/powerpc/booke/trap.c ppc_booke 65file arch/powerpc/booke/trap.c ppc_booke
65 66
66# MPC8xx (Power QUICC 1) Family files 67# MPC8xx (Power QUICC 1) Family files

cvs diff -r1.40 -r1.41 src/sys/arch/powerpc/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/powerpc/include/pmap.h 2020/07/06 08:17:01 1.40
+++ src/sys/arch/powerpc/include/pmap.h 2022/02/16 23:31:13 1.41
@@ -1,37 +1,39 @@ @@ -1,37 +1,39 @@
1/* $NetBSD: pmap.h,v 1.40 2020/07/06 08:17:01 rin Exp $ */ 1/* $NetBSD: pmap.h,v 1.41 2022/02/16 23:31:13 riastradh Exp $ */
2 2
3#ifndef _POWERPC_PMAP_H_ 3#ifndef _POWERPC_PMAP_H_
4#define _POWERPC_PMAP_H_ 4#define _POWERPC_PMAP_H_
5 5
6#ifdef _KERNEL_OPT 6#ifdef _KERNEL_OPT
7#include "opt_ppcarch.h" 7#include "opt_ppcarch.h"
8#include "opt_modular.h" 8#include "opt_modular.h"
9#endif 9#endif
10 10
11#if !defined(_MODULE) 11#if !defined(_MODULE)
12 12
13#if defined(PPC_BOOKE) 13#if defined(PPC_BOOKE)
14#include <powerpc/booke/pmap.h> 14#include <powerpc/booke/pmap.h>
15#elif defined(PPC_IBM4XX) 15#elif defined(PPC_IBM4XX)
16#include <powerpc/ibm4xx/pmap.h> 16#include <powerpc/ibm4xx/pmap.h>
17#elif defined(PPC_OEA) || defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE) 17#elif defined(PPC_OEA) || defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
18#include <powerpc/oea/pmap.h> 18#include <powerpc/oea/pmap.h>
19#elif defined(_KERNEL) 19#elif defined(_KERNEL)
20#error unknown PPC variant 20#error unknown PPC variant
21#endif 21#endif
22 22
23#endif /* !_MODULE */ 23#endif /* !_MODULE */
24 24
 25#include <uvm/pmap/pmap_pvt.h>
 26
25#if !defined(_LOCORE) && (defined(MODULAR) || defined(_MODULE)) 27#if !defined(_LOCORE) && (defined(MODULAR) || defined(_MODULE))
26/* 28/*
27 * Both BOOKE and OEA use __HAVE_VM_PAGE_MD but IBM4XX doesn't so define 29 * Both BOOKE and OEA use __HAVE_VM_PAGE_MD but IBM4XX doesn't so define
28 * a compatible vm_page_md so that struct vm_page is the same size for all 30 * a compatible vm_page_md so that struct vm_page is the same size for all
29 * PPC variants. 31 * PPC variants.
30 */ 32 */
31#ifndef __HAVE_VM_PAGE_MD 33#ifndef __HAVE_VM_PAGE_MD
32#define __HAVE_VM_PAGE_MD 34#define __HAVE_VM_PAGE_MD
33#define VM_MDPAGE_INIT(pg) __nothing 35#define VM_MDPAGE_INIT(pg) __nothing
34 36
35struct vm_page_md { 37struct vm_page_md {
36 uintptr_t mdpg_dummy[5]; 38 uintptr_t mdpg_dummy[5];
37}; 39};

cvs diff -r1.35 -r1.36 src/sys/arch/powerpc/include/oea/pmap.h (expand / switch to unified diff)

--- src/sys/arch/powerpc/include/oea/pmap.h 2021/03/12 04:57:42 1.35
+++ src/sys/arch/powerpc/include/oea/pmap.h 2022/02/16 23:31:13 1.36
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.35 2021/03/12 04:57:42 thorpej Exp $ */ 1/* $NetBSD: pmap.h,v 1.36 2022/02/16 23:31:13 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH. 5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -80,26 +80,27 @@ struct pmap_ops { @@ -80,26 +80,27 @@ struct pmap_ops {
80 void (*pmapop_reference)(pmap_t); 80 void (*pmapop_reference)(pmap_t);
81 void (*pmapop_destroy)(pmap_t); 81 void (*pmapop_destroy)(pmap_t);
82 void (*pmapop_copy)(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t); 82 void (*pmapop_copy)(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
83 void (*pmapop_update)(pmap_t); 83 void (*pmapop_update)(pmap_t);
84 int (*pmapop_enter)(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int); 84 int (*pmapop_enter)(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
85 void (*pmapop_remove)(pmap_t, vaddr_t, vaddr_t); 85 void (*pmapop_remove)(pmap_t, vaddr_t, vaddr_t);
86 void (*pmapop_kenter_pa)(vaddr_t, paddr_t, vm_prot_t, u_int); 86 void (*pmapop_kenter_pa)(vaddr_t, paddr_t, vm_prot_t, u_int);
87 void (*pmapop_kremove)(vaddr_t, vsize_t); 87 void (*pmapop_kremove)(vaddr_t, vsize_t);
88 bool (*pmapop_extract)(pmap_t, vaddr_t, paddr_t *); 88 bool (*pmapop_extract)(pmap_t, vaddr_t, paddr_t *);
89 89
90 void (*pmapop_protect)(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 90 void (*pmapop_protect)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
91 void (*pmapop_unwire)(pmap_t, vaddr_t); 91 void (*pmapop_unwire)(pmap_t, vaddr_t);
92 void (*pmapop_page_protect)(struct vm_page *, vm_prot_t); 92 void (*pmapop_page_protect)(struct vm_page *, vm_prot_t);
 93 void (*pmapop_pv_protect)(paddr_t, vm_prot_t);
93 bool (*pmapop_query_bit)(struct vm_page *, int); 94 bool (*pmapop_query_bit)(struct vm_page *, int);
94 bool (*pmapop_clear_bit)(struct vm_page *, int); 95 bool (*pmapop_clear_bit)(struct vm_page *, int);
95 96
96 void (*pmapop_activate)(struct lwp *); 97 void (*pmapop_activate)(struct lwp *);
97 void (*pmapop_deactivate)(struct lwp *); 98 void (*pmapop_deactivate)(struct lwp *);
98 99
99 void (*pmapop_pinit)(pmap_t); 100 void (*pmapop_pinit)(pmap_t);
100 void (*pmapop_procwr)(struct proc *, vaddr_t, size_t); 101 void (*pmapop_procwr)(struct proc *, vaddr_t, size_t);
101 102
102 void (*pmapop_pte_print)(volatile struct pte *); 103 void (*pmapop_pte_print)(volatile struct pte *);
103 void (*pmapop_pteg_check)(void); 104 void (*pmapop_pteg_check)(void);
104 void (*pmapop_print_mmuregs)(void); 105 void (*pmapop_print_mmuregs)(void);
105 void (*pmapop_print_pte)(pmap_t, vaddr_t); 106 void (*pmapop_print_pte)(pmap_t, vaddr_t);
@@ -237,30 +238,39 @@ int pmap_setup_segment0_map(int use_larg @@ -237,30 +238,39 @@ int pmap_setup_segment0_map(int use_larg
237#endif 238#endif
238 239
239#define PMAP_MD_PREFETCHABLE 0x2000000 240#define PMAP_MD_PREFETCHABLE 0x2000000
240#define PMAP_STEAL_MEMORY 241#define PMAP_STEAL_MEMORY
241#define PMAP_NEED_PROCWR 242#define PMAP_NEED_PROCWR
242 243
243void pmap_zero_page(paddr_t); 244void pmap_zero_page(paddr_t);
244void pmap_copy_page(paddr_t, paddr_t); 245void pmap_copy_page(paddr_t, paddr_t);
245 246
246LIST_HEAD(pvo_head, pvo_entry); 247LIST_HEAD(pvo_head, pvo_entry);
247 248
248#define __HAVE_VM_PAGE_MD 249#define __HAVE_VM_PAGE_MD
249 250
 251struct pmap_page {
 252 unsigned int pp_attrs;
 253 struct pvo_head pp_pvoh;
 254#ifdef MODULAR
 255 uintptr_t pp_dummy[3];
 256#endif
 257};
 258
250struct vm_page_md { 259struct vm_page_md {
251 unsigned int mdpg_attrs;  260 struct pmap_page mdpg_pp;
252 struct pvo_head mdpg_pvoh; 261#define mdpg_attrs mdpg_pp.pp_attrs
 262#define mdpg_pvoh mdpg_pp.pp_pvoh
253#ifdef MODULAR 263#ifdef MODULAR
254 uintptr_t mdpg_dummy[3]; 264#define mdpg_dummy mdpg_pp.pp_dummy
255#endif 265#endif
256}; 266};
257 267
258#define VM_MDPAGE_INIT(pg) do { \ 268#define VM_MDPAGE_INIT(pg) do { \
259 (pg)->mdpage.mdpg_attrs = 0; \ 269 (pg)->mdpage.mdpg_attrs = 0; \
260 LIST_INIT(&(pg)->mdpage.mdpg_pvoh); \ 270 LIST_INIT(&(pg)->mdpage.mdpg_pvoh); \
261} while (/*CONSTCOND*/0) 271} while (/*CONSTCOND*/0)
262 272
263__END_DECLS 273__END_DECLS
264#endif /* _KERNEL */ 274#endif /* _KERNEL */
265 275
266#endif /* _POWERPC_OEA_PMAP_H_ */ 276#endif /* _POWERPC_OEA_PMAP_H_ */

cvs diff -r1.107 -r1.108 src/sys/arch/powerpc/oea/pmap.c (expand / switch to unified diff)

--- src/sys/arch/powerpc/oea/pmap.c 2021/07/19 14:49:45 1.107
+++ src/sys/arch/powerpc/oea/pmap.c 2022/02/16 23:31:13 1.108
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.107 2021/07/19 14:49:45 chs Exp $ */ 1/* $NetBSD: pmap.c,v 1.108 2022/02/16 23:31:13 riastradh Exp $ */
2/*- 2/*-
3 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * Copyright (c) 2001 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8 * 8 *
9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com> 9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com>
10 * of Kyma Systems LLC. 10 * of Kyma Systems LLC.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -53,27 +53,27 @@ @@ -53,27 +53,27 @@
53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 */ 63 */
64 64
65#include <sys/cdefs.h> 65#include <sys/cdefs.h>
66__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.107 2021/07/19 14:49:45 chs Exp $"); 66__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.108 2022/02/16 23:31:13 riastradh Exp $");
67 67
68#define PMAP_NOOPNAMES 68#define PMAP_NOOPNAMES
69 69
70#ifdef _KERNEL_OPT 70#ifdef _KERNEL_OPT
71#include "opt_altivec.h" 71#include "opt_altivec.h"
72#include "opt_multiprocessor.h" 72#include "opt_multiprocessor.h"
73#include "opt_pmap.h" 73#include "opt_pmap.h"
74#include "opt_ppcarch.h" 74#include "opt_ppcarch.h"
75#endif 75#endif
76 76
77#include <sys/param.h> 77#include <sys/param.h>
78#include <sys/proc.h> 78#include <sys/proc.h>
79#include <sys/pool.h> 79#include <sys/pool.h>
@@ -204,26 +204,27 @@ STATIC pmap_t pmap_create(void); @@ -204,26 +204,27 @@ STATIC pmap_t pmap_create(void);
204STATIC void pmap_reference(pmap_t); 204STATIC void pmap_reference(pmap_t);
205STATIC void pmap_destroy(pmap_t); 205STATIC void pmap_destroy(pmap_t);
206STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t); 206STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
207STATIC void pmap_update(pmap_t); 207STATIC void pmap_update(pmap_t);
208STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int); 208STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
209STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t); 209STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t);
210STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int); 210STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int);
211STATIC void pmap_kremove(vaddr_t, vsize_t); 211STATIC void pmap_kremove(vaddr_t, vsize_t);
212STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *); 212STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
213 213
214STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 214STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
215STATIC void pmap_unwire(pmap_t, vaddr_t); 215STATIC void pmap_unwire(pmap_t, vaddr_t);
216STATIC void pmap_page_protect(struct vm_page *, vm_prot_t); 216STATIC void pmap_page_protect(struct vm_page *, vm_prot_t);
 217STATIC void pmap_pv_protect(paddr_t, vm_prot_t);
217STATIC bool pmap_query_bit(struct vm_page *, int); 218STATIC bool pmap_query_bit(struct vm_page *, int);
218STATIC bool pmap_clear_bit(struct vm_page *, int); 219STATIC bool pmap_clear_bit(struct vm_page *, int);
219 220
220STATIC void pmap_activate(struct lwp *); 221STATIC void pmap_activate(struct lwp *);
221STATIC void pmap_deactivate(struct lwp *); 222STATIC void pmap_deactivate(struct lwp *);
222 223
223STATIC void pmap_pinit(pmap_t pm); 224STATIC void pmap_pinit(pmap_t pm);
224STATIC void pmap_procwr(struct proc *, vaddr_t, size_t); 225STATIC void pmap_procwr(struct proc *, vaddr_t, size_t);
225 226
226#if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 227#if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
227STATIC void pmap_pte_print(volatile struct pte *); 228STATIC void pmap_pte_print(volatile struct pte *);
228STATIC void pmap_pteg_check(void); 229STATIC void pmap_pteg_check(void);
229STATIC void pmap_print_mmuregs(void); 230STATIC void pmap_print_mmuregs(void);
@@ -635,59 +636,76 @@ pmap_pte_to_va(volatile const struct pte @@ -635,59 +636,76 @@ pmap_pte_to_va(volatile const struct pte
635 /* PPC Bits 0-3 */ 636 /* PPC Bits 0-3 */
636 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; 637 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
637#endif 638#endif
638 639
639 return va; 640 return va;
640} 641}
641#endif 642#endif
642 643
643static inline struct pvo_head * 644static inline struct pvo_head *
644pa_to_pvoh(paddr_t pa, struct vm_page **pg_p) 645pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
645{ 646{
646 struct vm_page *pg; 647 struct vm_page *pg;
647 struct vm_page_md *md; 648 struct vm_page_md *md;
 649 struct pmap_page *pp;
648 650
649 pg = PHYS_TO_VM_PAGE(pa); 651 pg = PHYS_TO_VM_PAGE(pa);
650 if (pg_p != NULL) 652 if (pg_p != NULL)
651 *pg_p = pg; 653 *pg_p = pg;
652 if (pg == NULL) 654 if (pg == NULL) {
 655 if ((pp = pmap_pv_tracked(pa)) != NULL)
 656 return &pp->pp_pvoh;
653 return NULL; 657 return NULL;
 658 }
654 md = VM_PAGE_TO_MD(pg); 659 md = VM_PAGE_TO_MD(pg);
655 return &md->mdpg_pvoh; 660 return &md->mdpg_pvoh;
656} 661}
657 662
658static inline struct pvo_head * 663static inline struct pvo_head *
659vm_page_to_pvoh(struct vm_page *pg) 664vm_page_to_pvoh(struct vm_page *pg)
660{ 665{
661 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 666 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
662 667
663 return &md->mdpg_pvoh; 668 return &md->mdpg_pvoh;
664} 669}
665 670
 671static inline void
 672pmap_pp_attr_clear(struct pmap_page *pp, int ptebit)
 673{
 674
 675 pp->pp_attrs &= ptebit;
 676}
666 677
667static inline void 678static inline void
668pmap_attr_clear(struct vm_page *pg, int ptebit) 679pmap_attr_clear(struct vm_page *pg, int ptebit)
669{ 680{
670 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 681 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
671 682
672 md->mdpg_attrs &= ~ptebit; 683 pmap_pp_attr_clear(&md->mdpg_pp, ptebit);
 684}
 685
 686static inline int
 687pmap_pp_attr_fetch(struct pmap_page *pp)
 688{
 689
 690 return pp->pp_attrs;
673} 691}
674 692
675static inline int 693static inline int
676pmap_attr_fetch(struct vm_page *pg) 694pmap_attr_fetch(struct vm_page *pg)
677{ 695{
678 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 696 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
679 697
680 return md->mdpg_attrs; 698 return pmap_pp_attr_fetch(&md->mdpg_pp);
681} 699}
682 700
683static inline void 701static inline void
684pmap_attr_save(struct vm_page *pg, int ptebit) 702pmap_attr_save(struct vm_page *pg, int ptebit)
685{ 703{
686 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 704 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
687 705
688 md->mdpg_attrs |= ptebit; 706 md->mdpg_attrs |= ptebit;
689} 707}
690 708
691static inline int 709static inline int
692pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt) 710pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
693{ 711{
@@ -2264,58 +2282,55 @@ pmap_unwire(pmap_t pm, vaddr_t va) @@ -2264,58 +2282,55 @@ pmap_unwire(pmap_t pm, vaddr_t va)
2264 msr = pmap_interrupts_off(); 2282 msr = pmap_interrupts_off();
2265 pvo = pmap_pvo_find_va(pm, va, NULL); 2283 pvo = pmap_pvo_find_va(pm, va, NULL);
2266 if (pvo != NULL) { 2284 if (pvo != NULL) {
2267 if (PVO_WIRED_P(pvo)) { 2285 if (PVO_WIRED_P(pvo)) {
2268 pvo->pvo_vaddr &= ~PVO_WIRED; 2286 pvo->pvo_vaddr &= ~PVO_WIRED;
2269 pm->pm_stats.wired_count--; 2287 pm->pm_stats.wired_count--;
2270 } 2288 }
2271 PMAP_PVO_CHECK(pvo); /* sanity check */ 2289 PMAP_PVO_CHECK(pvo); /* sanity check */
2272 } 2290 }
2273 pmap_interrupts_restore(msr); 2291 pmap_interrupts_restore(msr);
2274 PMAP_UNLOCK(); 2292 PMAP_UNLOCK();
2275} 2293}
2276 2294
2277/* 2295static void
2278 * Lower the protection on the specified physical page. 2296pmap_pp_protect(struct pmap_page *pp, paddr_t pa, vm_prot_t prot)
2279 */ 
2280void 
2281pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 
2282{ 2297{
2283 struct pvo_head *pvo_head, pvol; 2298 struct pvo_head *pvo_head, pvol;
2284 struct pvo_entry *pvo, *next_pvo; 2299 struct pvo_entry *pvo, *next_pvo;
2285 volatile struct pte *pt; 2300 volatile struct pte *pt;
2286 register_t msr; 2301 register_t msr;
2287 2302
2288 PMAP_LOCK(); 2303 PMAP_LOCK();
2289 2304
2290 KASSERT(prot != VM_PROT_ALL); 2305 KASSERT(prot != VM_PROT_ALL);
2291 LIST_INIT(&pvol); 2306 LIST_INIT(&pvol);
2292 msr = pmap_interrupts_off(); 2307 msr = pmap_interrupts_off();
2293 2308
2294 /* 2309 /*
2295 * When UVM reuses a page, it does a pmap_page_protect with 2310 * When UVM reuses a page, it does a pmap_page_protect with
2296 * VM_PROT_NONE. At that point, we can clear the exec flag 2311 * VM_PROT_NONE. At that point, we can clear the exec flag
2297 * since we know the page will have different contents. 2312 * since we know the page will have different contents.
2298 */ 2313 */
2299 if ((prot & VM_PROT_READ) == 0) { 2314 if ((prot & VM_PROT_READ) == 0) {
2300 DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n", 2315 DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n",
2301 VM_PAGE_TO_PHYS(pg)); 2316 pa);
2302 if (pmap_attr_fetch(pg) & PTE_EXEC) { 2317 if (pmap_pp_attr_fetch(pp) & PTE_EXEC) {
2303 PMAPCOUNT(exec_uncached_page_protect); 2318 PMAPCOUNT(exec_uncached_page_protect);
2304 pmap_attr_clear(pg, PTE_EXEC); 2319 pmap_pp_attr_clear(pp, PTE_EXEC);
2305 } 2320 }
2306 } 2321 }
2307 2322
2308 pvo_head = vm_page_to_pvoh(pg); 2323 pvo_head = &pp->pp_pvoh;
2309 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2324 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2310 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2325 next_pvo = LIST_NEXT(pvo, pvo_vlink);
2311 PMAP_PVO_CHECK(pvo); /* sanity check */ 2326 PMAP_PVO_CHECK(pvo); /* sanity check */
2312 2327
2313 /* 2328 /*
2314 * Downgrading to no mapping at all, we just remove the entry. 2329 * Downgrading to no mapping at all, we just remove the entry.
2315 */ 2330 */
2316 if ((prot & VM_PROT_READ) == 0) { 2331 if ((prot & VM_PROT_READ) == 0) {
2317 pmap_pvo_remove(pvo, -1, &pvol); 2332 pmap_pvo_remove(pvo, -1, &pvol);
2318 continue; 2333 continue;
2319 }  2334 }
2320 2335
2321 /* 2336 /*
@@ -2346,26 +2361,52 @@ pmap_page_protect(struct vm_page *pg, vm @@ -2346,26 +2361,52 @@ pmap_page_protect(struct vm_page *pg, vm
2346 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2361 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2347 PVO_WHERE(pvo, PMAP_PAGE_PROTECT); 2362 PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
2348 PMAPCOUNT(ptes_changed); 2363 PMAPCOUNT(ptes_changed);
2349 } 2364 }
2350 PMAP_PVO_CHECK(pvo); /* sanity check */ 2365 PMAP_PVO_CHECK(pvo); /* sanity check */
2351 } 2366 }
2352 pmap_interrupts_restore(msr); 2367 pmap_interrupts_restore(msr);
2353 pmap_pvo_free_list(&pvol); 2368 pmap_pvo_free_list(&pvol);
2354 2369
2355 PMAP_UNLOCK(); 2370 PMAP_UNLOCK();
2356} 2371}
2357 2372
2358/* 2373/*
 2374 * Lower the protection on the specified physical page.
 2375 */
 2376void
 2377pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
 2378{
 2379 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 2380
 2381 pmap_pp_protect(&md->mdpg_pp, VM_PAGE_TO_PHYS(pg), prot);
 2382}
 2383
 2384/*
 2385 * Lower the protection on the physical page at the specified physical
 2386 * address, which may not be managed and so may not have a struct
 2387 * vm_page.
 2388 */
 2389void
 2390pmap_pv_protect(paddr_t pa, vm_prot_t prot)
 2391{
 2392 struct pmap_page *pp;
 2393
 2394 if ((pp = pmap_pv_tracked(pa)) == NULL)
 2395 return;
 2396 pmap_pp_protect(pp, pa, prot);
 2397}
 2398
 2399/*
2359 * Activate the address space for the specified process. If the process 2400 * Activate the address space for the specified process. If the process
2360 * is the current process, load the new MMU context. 2401 * is the current process, load the new MMU context.
2361 */ 2402 */
2362void 2403void
2363pmap_activate(struct lwp *l) 2404pmap_activate(struct lwp *l)
2364{ 2405{
2365 struct pcb *pcb = lwp_getpcb(l); 2406 struct pcb *pcb = lwp_getpcb(l);
2366 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2407 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2367 2408
2368 DPRINTFN(ACTIVATE, 2409 DPRINTFN(ACTIVATE,
2369 "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp); 2410 "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp);
2370 2411
2371 /* 2412 /*

cvs diff -r1.12 -r1.13 src/sys/arch/powerpc/oea/pmap_kernel.c (expand / switch to unified diff)

--- src/sys/arch/powerpc/oea/pmap_kernel.c 2021/03/02 01:47:44 1.12
+++ src/sys/arch/powerpc/oea/pmap_kernel.c 2022/02/16 23:31:13 1.13
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap_kernel.c,v 1.12 2021/03/02 01:47:44 thorpej Exp $ */ 1/* $NetBSD: pmap_kernel.c,v 1.13 2022/02/16 23:31:13 riastradh Exp $ */
2/*- 2/*-
3 * Copyright (c) 2011 The NetBSD Foundation, Inc. 3 * Copyright (c) 2011 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas of 3am Software Foundry. 7 * by Matt Thomas of 3am Software Foundry.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE. 28 * POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31#include <sys/cdefs.h> 31#include <sys/cdefs.h>
32 32
33__KERNEL_RCSID(1, "$NetBSD: pmap_kernel.c,v 1.12 2021/03/02 01:47:44 thorpej Exp $"); 33__KERNEL_RCSID(1, "$NetBSD: pmap_kernel.c,v 1.13 2022/02/16 23:31:13 riastradh Exp $");
34 34
35#ifdef _KERNEL_OPT 35#ifdef _KERNEL_OPT
36#include "opt_altivec.h" 36#include "opt_altivec.h"
37#include "opt_ddb.h" 37#include "opt_ddb.h"
38#include "opt_pmap.h" 38#include "opt_pmap.h"
39#endif 39#endif
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <uvm/uvm_extern.h> 42#include <uvm/uvm_extern.h>
43 43
44#ifdef ALTIVEC 44#ifdef ALTIVEC
45int pmap_use_altivec; 45int pmap_use_altivec;
46#endif 46#endif
@@ -205,26 +205,32 @@ pmap_protect(pmap_t pm, vaddr_t start, v @@ -205,26 +205,32 @@ pmap_protect(pmap_t pm, vaddr_t start, v
205 205
206void 206void
207pmap_unwire(pmap_t pm, vaddr_t va) 207pmap_unwire(pmap_t pm, vaddr_t va)
208{ 208{
209 (*pmapops->pmapop_unwire)(pm, va); 209 (*pmapops->pmapop_unwire)(pm, va);
210} 210}
211 211
212void 212void
213pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 213pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
214{ 214{
215 (*pmapops->pmapop_page_protect)(pg, prot); 215 (*pmapops->pmapop_page_protect)(pg, prot);
216} 216}
217 217
 218void
 219pmap_pv_protect(paddr_t pa, vm_prot_t prot)
 220{
 221 (*pmapops->pmapop_pv_protect)(pa, prot);
 222}
 223
218bool 224bool
219pmap_query_bit(struct vm_page *pg, int ptebit) 225pmap_query_bit(struct vm_page *pg, int ptebit)
220{ 226{
221 return (*pmapops->pmapop_query_bit)(pg, ptebit); 227 return (*pmapops->pmapop_query_bit)(pg, ptebit);
222} 228}
223 229
224bool 230bool
225pmap_clear_bit(struct vm_page *pg, int ptebit) 231pmap_clear_bit(struct vm_page *pg, int ptebit)
226{ 232{
227 return (*pmapops->pmapop_clear_bit)(pg, ptebit); 233 return (*pmapops->pmapop_clear_bit)(pg, ptebit);
228} 234}
229 235
230void 236void