Wed Apr 15 15:50:16 2020 UTC ()
Convert PMAPDEBUG to UVMHIST


(skrll)
diff -r1.4 -r1.5 src/sys/arch/hppa/gsc/if_ie_gsc.c
diff -r1.12 -r1.13 src/sys/arch/hppa/hppa/machdep.c
diff -r1.108 -r1.109 src/sys/arch/hppa/hppa/pmap.c

cvs diff -r1.4 -r1.5 src/sys/arch/hppa/gsc/if_ie_gsc.c (expand / switch to unified diff)

--- src/sys/arch/hppa/gsc/if_ie_gsc.c 2019/04/25 10:08:45 1.4
+++ src/sys/arch/hppa/gsc/if_ie_gsc.c 2020/04/15 15:50:15 1.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_ie_gsc.c,v 1.4 2019/04/25 10:08:45 msaitoh Exp $ */ 1/* $NetBSD: if_ie_gsc.c,v 1.5 2020/04/15 15:50:15 skrll Exp $ */
2 2
3/* $OpenBSD: if_ie_gsc.c,v 1.6 2001/01/12 22:57:04 mickey Exp $ */ 3/* $OpenBSD: if_ie_gsc.c,v 1.6 2001/01/12 22:57:04 mickey Exp $ */
4 4
5/* 5/*
6 * Copyright (c) 1998-2004 Michael Shalayeff 6 * Copyright (c) 1998-2004 Michael Shalayeff
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -28,27 +28,27 @@ @@ -28,27 +28,27 @@
28 * THE POSSIBILITY OF SUCH DAMAGE. 28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31/* 31/*
32 * Referencies: 32 * Referencies:
33 * 1. 82596DX and 82596SX High-Performance 32-bit Local Area Network Coprocessor 33 * 1. 82596DX and 82596SX High-Performance 32-bit Local Area Network Coprocessor
34 * Intel Corporation, November 1996, Order Number: 290219-006 34 * Intel Corporation, November 1996, Order Number: 290219-006
35 * 35 *
36 * 2. 712 I/O Subsystem ERS Rev 1.0 36 * 2. 712 I/O Subsystem ERS Rev 1.0
37 * Hewlett-Packard, June 17 1992, Dwg No. A-A2263-66510-31 37 * Hewlett-Packard, June 17 1992, Dwg No. A-A2263-66510-31
38 */ 38 */
39 39
40#include <sys/cdefs.h> 40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: if_ie_gsc.c,v 1.4 2019/04/25 10:08:45 msaitoh Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: if_ie_gsc.c,v 1.5 2020/04/15 15:50:15 skrll Exp $");
42 42
43#include <sys/param.h> 43#include <sys/param.h>
44#include <sys/systm.h> 44#include <sys/systm.h>
45#include <sys/device.h> 45#include <sys/device.h>
46#include <sys/socket.h> 46#include <sys/socket.h>
47#include <sys/sockio.h> 47#include <sys/sockio.h>
48 48
49#include <uvm/uvm_extern.h> 49#include <uvm/uvm_extern.h>
50 50
51#include <net/if.h> 51#include <net/if.h>
52#include <net/if_dl.h> 52#include <net/if_dl.h>
53#include <net/if_ether.h> 53#include <net/if_ether.h>
54#include <net/if_types.h> 54#include <net/if_types.h>
@@ -368,31 +368,26 @@ ie_gsc_probe(device_t parent, cfdata_t m @@ -368,31 +368,26 @@ ie_gsc_probe(device_t parent, cfdata_t m
368 return 1; 368 return 1;
369} 369}
370 370
371void 371void
372ie_gsc_attach(device_t parent, device_t self, void *aux) 372ie_gsc_attach(device_t parent, device_t self, void *aux)
373{ 373{
374 struct ie_gsc_softc *gsc = device_private(self); 374 struct ie_gsc_softc *gsc = device_private(self);
375 struct ie_softc *sc = &gsc->ie; 375 struct ie_softc *sc = &gsc->ie;
376 struct gsc_attach_args *ga = aux; 376 struct gsc_attach_args *ga = aux;
377 bus_dma_segment_t seg; 377 bus_dma_segment_t seg;
378 int rseg; 378 int rseg;
379 int rv; 379 int rv;
380 uint8_t myaddr[ETHER_ADDR_LEN]; 380 uint8_t myaddr[ETHER_ADDR_LEN];
381#ifdef PMAPDEBUG 
382 extern int pmapdebug; 
383 int opmapdebug = pmapdebug; 
384 pmapdebug = 0; 
385#endif 
386 381
387 if (ga->ga_type.iodc_sv_model == HPPA_FIO_GLAN) 382 if (ga->ga_type.iodc_sv_model == HPPA_FIO_GLAN)
388 gsc->flags |= IEGSC_GECKO; 383 gsc->flags |= IEGSC_GECKO;
389 384
390 /* Map the GSC registers. */ 385 /* Map the GSC registers. */
391 if (bus_space_map(ga->ga_iot, ga->ga_hpa, 386 if (bus_space_map(ga->ga_iot, ga->ga_hpa,
392 IE_GSC_BANK_SZ, 0, &gsc->ioh)) { 387 IE_GSC_BANK_SZ, 0, &gsc->ioh)) {
393 printf(": can't map i/o space\n"); 388 printf(": can't map i/o space\n");
394 return; 389 return;
395 } 390 }
396 391
397 /* Set up some initial glue. */ 392 /* Set up some initial glue. */
398 sc->sc_dev = self; 393 sc->sc_dev = self;
@@ -496,29 +491,26 @@ ie_gsc_attach(device_t parent, device_t  @@ -496,29 +491,26 @@ ie_gsc_attach(device_t parent, device_t
496 /* The remainder of the memory is for buffers. */ 491 /* The remainder of the memory is for buffers. */
497 sc->buf_area = IE_GSC_ALIGN(sc->scb + IE_SCB_SZ); 492 sc->buf_area = IE_GSC_ALIGN(sc->scb + IE_SCB_SZ);
498 sc->buf_area_sz = sc->sc_msize - sc->buf_area; 493 sc->buf_area_sz = sc->sc_msize - sc->buf_area;
499 494
500 /* Finally, we can probe the chip. */ 495 /* Finally, we can probe the chip. */
501 rv = i82596_probe(sc); 496 rv = i82596_probe(sc);
502 if (!rv) { 497 if (!rv) {
503 bus_dmamap_destroy(gsc->iemt, sc->sc_dmamap); 498 bus_dmamap_destroy(gsc->iemt, sc->sc_dmamap);
504 bus_dmamem_unmap(gsc->iemt, 499 bus_dmamem_unmap(gsc->iemt,
505 (void *)sc->sc_maddr, sc->sc_msize); 500 (void *)sc->sc_maddr, sc->sc_msize);
506 bus_dmamem_free(gsc->iemt, &seg, rseg); 501 bus_dmamem_free(gsc->iemt, &seg, rseg);
507 return; 502 return;
508 } 503 }
509#ifdef PMAPDEBUG 
510 pmapdebug = opmapdebug; 
511#endif 
512 if (!rv) 504 if (!rv)
513 return; 505 return;
514 506
515 /* Get our Ethernet address. */ 507 /* Get our Ethernet address. */
516 memcpy(myaddr, ga->ga_ether_address, ETHER_ADDR_LEN); 508 memcpy(myaddr, ga->ga_ether_address, ETHER_ADDR_LEN);
517 509
518 /* Set up the SCP. */ 510 /* Set up the SCP. */
519 sc->ie_bus_write16(sc, IE_SCP_BUS_USE(sc->scp), IE_GSC_SYSBUS); 511 sc->ie_bus_write16(sc, IE_SCP_BUS_USE(sc->scp), IE_GSC_SYSBUS);
520 sc->ie_bus_write24(sc, IE_SCP_ISCP(sc->scp), sc->iscp); 512 sc->ie_bus_write24(sc, IE_SCP_ISCP(sc->scp), sc->iscp);
521 513
522 /* Set up the ISCP. */ 514 /* Set up the ISCP. */
523 sc->ie_bus_write16(sc, IE_ISCP_SCB(sc->iscp), sc->scb); 515 sc->ie_bus_write16(sc, IE_ISCP_SCB(sc->iscp), sc->scb);
524 sc->ie_bus_write24(sc, IE_ISCP_BASE(sc->iscp), 0); 516 sc->ie_bus_write24(sc, IE_ISCP_BASE(sc->iscp), 0);

cvs diff -r1.12 -r1.13 src/sys/arch/hppa/hppa/machdep.c (expand / switch to unified diff)

--- src/sys/arch/hppa/hppa/machdep.c 2019/12/31 13:07:10 1.12
+++ src/sys/arch/hppa/hppa/machdep.c 2020/04/15 15:50:15 1.13
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: machdep.c,v 1.12 2019/12/31 13:07:10 ad Exp $ */ 1/* $NetBSD: machdep.c,v 1.13 2020/04/15 15:50:15 skrll Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matthew Fredette. 8 * by Matthew Fredette.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -48,27 +48,27 @@ @@ -48,27 +48,27 @@
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 50 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
51 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 51 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
52 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 52 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
53 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57 * THE POSSIBILITY OF SUCH DAMAGE. 57 * THE POSSIBILITY OF SUCH DAMAGE.
58 */ 58 */
59 59
60#include <sys/cdefs.h> 60#include <sys/cdefs.h>
61__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.12 2019/12/31 13:07:10 ad Exp $"); 61__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.13 2020/04/15 15:50:15 skrll Exp $");
62 62
63#include "opt_cputype.h" 63#include "opt_cputype.h"
64#include "opt_ddb.h" 64#include "opt_ddb.h"
65#include "opt_kgdb.h" 65#include "opt_kgdb.h"
66#include "opt_modular.h" 66#include "opt_modular.h"
67#include "opt_useleds.h" 67#include "opt_useleds.h"
68 68
69#include <sys/param.h> 69#include <sys/param.h>
70#include <sys/systm.h> 70#include <sys/systm.h>
71#include <sys/signalvar.h> 71#include <sys/signalvar.h>
72#include <sys/kernel.h> 72#include <sys/kernel.h>
73#include <sys/proc.h> 73#include <sys/proc.h>
74#include <sys/buf.h> 74#include <sys/buf.h>
@@ -116,30 +116,26 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v  @@ -116,30 +116,26 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v
116#endif 116#endif
117 117
118#ifdef DDB 118#ifdef DDB
119#include <machine/db_machdep.h> 119#include <machine/db_machdep.h>
120#include <ddb/db_access.h> 120#include <ddb/db_access.h>
121#include <ddb/db_sym.h> 121#include <ddb/db_sym.h>
122#include <ddb/db_extern.h> 122#include <ddb/db_extern.h>
123#endif 123#endif
124 124
125#include <hppa/hppa/machdep.h> 125#include <hppa/hppa/machdep.h>
126#include <hppa/hppa/pim.h> 126#include <hppa/hppa/pim.h>
127#include <hppa/dev/cpudevs.h> 127#include <hppa/dev/cpudevs.h>
128 128
129#ifdef PMAPDEBUG 
130#include <hppa/hppa/hpt.h> 
131#endif 
132 
133#include "ksyms.h" 129#include "ksyms.h"
134#include "lcd.h" 130#include "lcd.h"
135 131
136#ifdef MACHDEPDEBUG 132#ifdef MACHDEPDEBUG
137 133
138#define DPRINTF(s) do { \ 134#define DPRINTF(s) do { \
139 if (machdepdebug) \ 135 if (machdepdebug) \
140 printf s; \ 136 printf s; \
141} while(0) 137} while(0)
142 138
143#define DPRINTFN(l,s) do { \ 139#define DPRINTFN(l,s) do { \
144 if (machdepdebug >= (1)) \ 140 if (machdepdebug >= (1)) \
145 printf s; \ 141 printf s; \
@@ -888,32 +884,27 @@ cpu_model_cpuid(int modelno) @@ -888,32 +884,27 @@ cpu_model_cpuid(int modelno)
888 case HPPA_BOARD_HP725_50: 884 case HPPA_BOARD_HP725_50:
889 case HPPA_BOARD_HP725_75: 885 case HPPA_BOARD_HP725_75:
890 case HPPA_BOARD_HP725_99: 886 case HPPA_BOARD_HP725_99:
891 return hpcxt; 887 return hpcxt;
892 } 888 }
893 return hpc_unknown; 889 return hpc_unknown;
894} 890}
895 891
896void 892void
897cpu_startup(void) 893cpu_startup(void)
898{ 894{
899 vaddr_t minaddr, maxaddr; 895 vaddr_t minaddr, maxaddr;
900 char pbuf[3][9]; 896 char pbuf[3][9];
901#ifdef PMAPDEBUG 
902 extern int pmapdebug; 
903 int opmapdebug = pmapdebug; 
904 897
905 pmapdebug = 0; 
906#endif 
907 /* Initialize the message buffer. */ 898 /* Initialize the message buffer. */
908 initmsgbuf(msgbufaddr, MSGBUFSIZE); 899 initmsgbuf(msgbufaddr, MSGBUFSIZE);
909 900
910 /* 901 /*
911 * i won't understand a friend of mine, 902 * i won't understand a friend of mine,
912 * who sat in a room full of artificial ice, 903 * who sat in a room full of artificial ice,
913 * fogging the air w/ humid cries -- 904 * fogging the air w/ humid cries --
914 * WELCOME TO SUMMER! 905 * WELCOME TO SUMMER!
915 */ 906 */
916 printf("%s%s", copyright, version); 907 printf("%s%s", copyright, version);
917 908
918 /* identify system type */ 909 /* identify system type */
919 printf("%s\n", cpu_getmodel()); 910 printf("%s\n", cpu_getmodel());
@@ -930,29 +921,26 @@ cpu_startup(void) @@ -930,29 +921,26 @@ cpu_startup(void)
930 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(totalphysmem - physmem)); 921 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(totalphysmem - physmem));
931 DPRINTF(("lost mem = %s\n", pbuf[0])); 922 DPRINTF(("lost mem = %s\n", pbuf[0]));
932 } 923 }
933#endif 924#endif
934 925
935 minaddr = 0; 926 minaddr = 0;
936 927
937 /* 928 /*
938 * Allocate a submap for physio 929 * Allocate a submap for physio
939 */ 930 */
940 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 931 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
941 VM_PHYS_SIZE, 0, false, NULL); 932 VM_PHYS_SIZE, 0, false, NULL);
942 933
943#ifdef PMAPDEBUG 
944 pmapdebug = opmapdebug; 
945#endif 
946 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(uvm_availmem())); 934 format_bytes(pbuf[0], sizeof(pbuf[0]), ptoa(uvm_availmem()));
947 printf("avail mem = %s\n", pbuf[0]); 935 printf("avail mem = %s\n", pbuf[0]);
948} 936}
949 937
950/* 938/*
951 * compute CPU clock ratio such as: 939 * compute CPU clock ratio such as:
952 * cpu_ticksnum / cpu_ticksdenom = t + delta 940 * cpu_ticksnum / cpu_ticksdenom = t + delta
953 * delta -> 0 941 * delta -> 0
954 */ 942 */
955void 943void
956delay_init(void) 944delay_init(void)
957{ 945{
958 u_int num, denom, delta, mdelta; 946 u_int num, denom, delta, mdelta;

cvs diff -r1.108 -r1.109 src/sys/arch/hppa/hppa/pmap.c (expand / switch to unified diff)

--- src/sys/arch/hppa/hppa/pmap.c 2020/04/15 15:22:37 1.108
+++ src/sys/arch/hppa/hppa/pmap.c 2020/04/15 15:50:15 1.109
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.108 2020/04/15 15:22:37 skrll Exp $ */ 1/* $NetBSD: pmap.c,v 1.109 2020/04/15 15:50:15 skrll Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matthew Fredette. 8 * by Matthew Fredette.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -55,27 +55,27 @@ @@ -55,27 +55,27 @@
55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57 * THE POSSIBILITY OF SUCH DAMAGE. 57 * THE POSSIBILITY OF SUCH DAMAGE.
58 */ 58 */
59/* 59/*
60 * References: 60 * References:
61 * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0 61 * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
62 * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0 62 * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
63 * 3. PA-RISC 1.1 Architecture and Instruction Set Reference Manual, 63 * 3. PA-RISC 1.1 Architecture and Instruction Set Reference Manual,
64 * Hewlett-Packard, February 1994, Third Edition 64 * Hewlett-Packard, February 1994, Third Edition
65 */ 65 */
66 66
67#include <sys/cdefs.h> 67#include <sys/cdefs.h>
68__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.108 2020/04/15 15:22:37 skrll Exp $"); 68__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.109 2020/04/15 15:50:15 skrll Exp $");
69 69
70#include "opt_cputype.h" 70#include "opt_cputype.h"
71 71
72#include <sys/param.h> 72#include <sys/param.h>
73#include <sys/mutex.h> 73#include <sys/mutex.h>
74#include <sys/proc.h> 74#include <sys/proc.h>
75#include <sys/rwlock.h> 75#include <sys/rwlock.h>
76#include <sys/systm.h> 76#include <sys/systm.h>
77 77
78#include <uvm/uvm.h> 78#include <uvm/uvm.h>
79#include <uvm/uvm_page_array.h> 79#include <uvm/uvm_page_array.h>
80 80
81#include <machine/cpu.h> 81#include <machine/cpu.h>
@@ -84,72 +84,26 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.1 @@ -84,72 +84,26 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.1
84#include <machine/pcb.h> 84#include <machine/pcb.h>
85#include <machine/pmap.h> 85#include <machine/pmap.h>
86#include <machine/pte.h> 86#include <machine/pte.h>
87#include <machine/psl.h> 87#include <machine/psl.h>
88#include <machine/reg.h> 88#include <machine/reg.h>
89 89
90#include <hppa/hppa/hpt.h> 90#include <hppa/hppa/hpt.h>
91#include <hppa/hppa/machdep.h> 91#include <hppa/hppa/machdep.h>
92 92
93#if defined(DDB) 93#if defined(DDB)
94#include <ddb/db_output.h> 94#include <ddb/db_output.h>
95#endif 95#endif
96 96
97#ifdef PMAPDEBUG 
98 
99#define static /**/ 
100#define inline /**/ 
101 
102#define DPRINTF(l,s) do { \ 
103 if ((pmapdebug & (l)) == (l)) \ 
104 printf s; \ 
105} while(0) 
106 
107#define PDB_FOLLOW 0x00000001 
108#define PDB_INIT 0x00000002 
109#define PDB_ENTER 0x00000004 
110#define PDB_REMOVE 0x00000008 
111#define PDB_CREATE 0x00000010 
112#define PDB_PTPAGE 0x00000020 
113#define PDB_CACHE 0x00000040 
114#define PDB_BITS 0x00000080 
115#define PDB_COLLECT 0x00000100 
116#define PDB_PROTECT 0x00000200 
117#define PDB_EXTRACT 0x00000400 
118#define PDB_VP 0x00000800 
119#define PDB_PV 0x00001000 
120#define PDB_PARANOIA 0x00002000 
121#define PDB_WIRING 0x00004000 
122#define PDB_PMAP 0x00008000 
123#define PDB_STEAL 0x00010000 
124#define PDB_PHYS 0x00020000 
125#define PDB_POOL 0x00040000 
126#define PDB_ALIAS 0x00080000 
127int pmapdebug = 0 
128 | PDB_INIT 
129 | PDB_FOLLOW 
130 | PDB_VP 
131 | PDB_PV 
132 | PDB_ENTER 
133 | PDB_REMOVE 
134 | PDB_STEAL 
135 | PDB_PROTECT 
136 | PDB_PHYS 
137 | PDB_ALIAS 
138 ; 
139#else 
140#define DPRINTF(l,s) /* */ 
141#endif 
142 
143int pmap_hptsize = 16 * PAGE_SIZE; /* patchable */ 97int pmap_hptsize = 16 * PAGE_SIZE; /* patchable */
144vaddr_t pmap_hpt; 98vaddr_t pmap_hpt;
145 99
146static struct pmap kernel_pmap_store; 100static struct pmap kernel_pmap_store;
147struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; 101struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
148 102
149int hppa_sid_max = HPPA_SID_MAX; 103int hppa_sid_max = HPPA_SID_MAX;
150struct pool pmap_pool; 104struct pool pmap_pool;
151struct pool pmap_pv_pool; 105struct pool pmap_pv_pool;
152int pmap_pvlowat = 252; 106int pmap_pvlowat = 252;
153bool pmap_initialized = false; 107bool pmap_initialized = false;
154 108
155static kmutex_t pmaps_lock; 109static kmutex_t pmaps_lock;
@@ -321,128 +275,142 @@ pmap_sdir_get(pa_space_t space) @@ -321,128 +275,142 @@ pmap_sdir_get(pa_space_t space)
321 return ((uint32_t *)vtop[space]); 275 return ((uint32_t *)vtop[space]);
322} 276}
323 277
324static inline volatile pt_entry_t * 278static inline volatile pt_entry_t *
325pmap_pde_get(volatile uint32_t *pd, vaddr_t va) 279pmap_pde_get(volatile uint32_t *pd, vaddr_t va)
326{ 280{
327 281
328 return ((pt_entry_t *)pd[va >> 22]); 282 return ((pt_entry_t *)pd[va >> 22]);
329} 283}
330 284
331static inline void 285static inline void
332pmap_pde_set(pmap_t pm, vaddr_t va, paddr_t ptp) 286pmap_pde_set(pmap_t pm, vaddr_t va, paddr_t ptp)
333{ 287{
334 288 UVMHIST_FUNC(__func__);
335 DPRINTF(PDB_FOLLOW|PDB_VP, 289 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx ptp %#jx", (uintptr_t)pm,
336 ("%s(%p, 0x%lx, 0x%lx)\n", __func__, pm, va, ptp)); 290 va, ptp, 0);
337 291
338 KASSERT((ptp & PGOFSET) == 0); 292 KASSERT((ptp & PGOFSET) == 0);
339 293
340 pm->pm_pdir[va >> 22] = ptp; 294 pm->pm_pdir[va >> 22] = ptp;
341} 295}
342 296
343static inline pt_entry_t * 297static inline pt_entry_t *
344pmap_pde_alloc(pmap_t pm, vaddr_t va, struct vm_page **pdep) 298pmap_pde_alloc(pmap_t pm, vaddr_t va, struct vm_page **pdep)
345{ 299{
346 struct vm_page *pg; 300 struct vm_page *pg;
347 paddr_t pa; 301 paddr_t pa;
348 302
349 DPRINTF(PDB_FOLLOW|PDB_VP, 303 UVMHIST_FUNC(__func__);
350 ("%s(%p, 0x%lx, %p)\n", __func__, pm, va, pdep)); 304 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pdep %#jx", (uintptr_t)pm,
 305 va, (uintptr_t)pdep, 0);
351 306
352 KASSERT(pm != pmap_kernel()); 307 KASSERT(pm != pmap_kernel());
353 KASSERT(rw_write_held(pm->pm_lock)); 308 KASSERT(rw_write_held(pm->pm_lock));
354 309
355 pg = pmap_pagealloc(&pm->pm_obj, va); 310 pg = pmap_pagealloc(&pm->pm_obj, va);
356 311
357 if (pg == NULL) 312 if (pg == NULL)
358 return NULL; 313 return NULL;
359 314
360 pa = VM_PAGE_TO_PHYS(pg); 315 pa = VM_PAGE_TO_PHYS(pg);
361 316
362 DPRINTF(PDB_FOLLOW|PDB_VP, ("%s: pde %lx\n", __func__, pa)); 317 UVMHIST_LOG(maphist, "pde %#jx", pa, 0, 0, 0);
363 318
364 pg->flags &= ~PG_BUSY; /* never busy */ 319 pg->flags &= ~PG_BUSY; /* never busy */
365 pg->wire_count = 1; /* no mappings yet */ 320 pg->wire_count = 1; /* no mappings yet */
366 pmap_pde_set(pm, va, pa); 321 pmap_pde_set(pm, va, pa);
367 pm->pm_stats.resident_count++; /* count PTP as resident */ 322 pm->pm_stats.resident_count++; /* count PTP as resident */
368 pm->pm_ptphint = pg; 323 pm->pm_ptphint = pg;
369 if (pdep) 324 if (pdep)
370 *pdep = pg; 325 *pdep = pg;
371 return ((pt_entry_t *)pa); 326 return ((pt_entry_t *)pa);
372} 327}
373 328
374static inline struct vm_page * 329static inline struct vm_page *
375pmap_pde_ptp(pmap_t pm, volatile pt_entry_t *pde) 330pmap_pde_ptp(pmap_t pm, volatile pt_entry_t *pde)
376{ 331{
377 paddr_t pa = (paddr_t)pde; 332 paddr_t pa = (paddr_t)pde;
378 333
379 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p)\n", __func__, pm, pde)); 334 UVMHIST_FUNC(__func__);
 335 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pdep %#jx", (uintptr_t)pm,
 336 (uintptr_t)pde, 0, 0);
380 337
381 if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa) 338 if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa)
382 return (pm->pm_ptphint); 339 return (pm->pm_ptphint);
383 340
384 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: lookup 0x%lx\n", __func__, pa)); 341 UVMHIST_LOG(maphist, "<--- done (%#jx)",
 342 (uintptr_t)PHYS_TO_VM_PAGE(pa), 0, 0, 0);
385 343
386 return (PHYS_TO_VM_PAGE(pa)); 344 return (PHYS_TO_VM_PAGE(pa));
387} 345}
388 346
389static inline void 347static inline void
390pmap_pde_release(pmap_t pmap, vaddr_t va, struct vm_page *ptp) 348pmap_pde_release(pmap_t pmap, vaddr_t va, struct vm_page *ptp)
391{ 349{
392 350 UVMHIST_FUNC(__func__);
393 DPRINTF(PDB_FOLLOW|PDB_PV, 351 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx ptp %#jx", (uintptr_t)pmap,
394 ("%s(%p, 0x%lx, %p)\n", __func__, pmap, va, ptp)); 352 va, (uintptr_t)ptp, 0);
395 353
396 KASSERT(pmap != pmap_kernel()); 354 KASSERT(pmap != pmap_kernel());
397 if (--ptp->wire_count <= 1) { 355 if (--ptp->wire_count <= 1) {
398 DPRINTF(PDB_FOLLOW|PDB_PV, 356 UVMHIST_LOG(maphist, "disposing ptp %#jx", (uintptr_t)ptp, 0,
399 ("%s: disposing ptp %p\n", __func__, ptp)); 357 0, 0);
400 pmap_pde_set(pmap, va, 0); 358 pmap_pde_set(pmap, va, 0);
401 pmap->pm_stats.resident_count--; 359 pmap->pm_stats.resident_count--;
402 if (pmap->pm_ptphint == ptp) 360 if (pmap->pm_ptphint == ptp)
403 pmap->pm_ptphint = NULL; 361 pmap->pm_ptphint = NULL;
404 ptp->wire_count = 0; 362 ptp->wire_count = 0;
405 363
406 KASSERT((ptp->flags & PG_BUSY) == 0); 364 KASSERT((ptp->flags & PG_BUSY) == 0);
407 365
408 pmap_pagefree(ptp); 366 pmap_pagefree(ptp);
409 } 367 }
410} 368}
411 369
412static inline pt_entry_t 370static inline pt_entry_t
413pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va) 371pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va)
414{ 372{
415 373
416 return (pde[(va >> 12) & 0x3ff]); 374 return (pde[(va >> 12) & 0x3ff]);
417} 375}
418 376
419static inline void 377static inline void
420pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte) 378pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte)
421{ 379{
422 380
423 DPRINTF(PDB_FOLLOW|PDB_VP, ("%s(%p, 0x%lx, 0x%x)\n", 381 /* too verbose due to hppa_pagezero_{,un}map */
424 __func__, pde, va, pte)); 382#if 0
 383 UVMHIST_FUNC(__func__);
 384 UVMHIST_CALLARGS(maphist, "pdep %#jx va %#jx pte %#jx", (uintptr_t)pde,
 385 va, pte, 0);
 386#endif
425 387
426 KASSERT(pde != NULL); 388 KASSERT(pde != NULL);
427 KASSERT(((paddr_t)pde & PGOFSET) == 0); 389 KASSERT(((paddr_t)pde & PGOFSET) == 0);
428 390
429 pde[(va >> 12) & 0x3ff] = pte; 391 pde[(va >> 12) & 0x3ff] = pte;
430} 392}
431 393
432void 394void
433pmap_pte_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte) 395pmap_pte_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte)
434{ 396{
435 397
 398 UVMHIST_FUNC(__func__);
 399 if (pmap != pmap_kernel() && va != 0) {
 400 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pte %#jx",
 401 (uintptr_t)pmap, va, (uintptr_t)pte, 0);
 402 }
 403
436 fdcache(pmap->pm_space, va, PAGE_SIZE); 404 fdcache(pmap->pm_space, va, PAGE_SIZE);
437 if (pte & PTE_PROT(TLB_EXECUTE)) { 405 if (pte & PTE_PROT(TLB_EXECUTE)) {
438 ficache(pmap->pm_space, va, PAGE_SIZE); 406 ficache(pmap->pm_space, va, PAGE_SIZE);
439 pitlb(pmap->pm_space, va); 407 pitlb(pmap->pm_space, va);
440 } 408 }
441 pdtlb(pmap->pm_space, va); 409 pdtlb(pmap->pm_space, va);
442#ifdef USE_HPT 410#ifdef USE_HPT
443 if (pmap_hpt) { 411 if (pmap_hpt) {
444 struct hpt_entry *hpt; 412 struct hpt_entry *hpt;
445 hpt = pmap_hash(pmap, va); 413 hpt = pmap_hash(pmap, va);
446 if (hpt->hpt_valid && 414 if (hpt->hpt_valid &&
447 hpt->hpt_space == pmap->pm_space && 415 hpt->hpt_space == pmap->pm_space &&
448 hpt->hpt_vpn == ((va >> 1) & 0x7fff0000)) 416 hpt->hpt_vpn == ((va >> 1) & 0x7fff0000))
@@ -518,93 +486,105 @@ pmap_dump_pv(paddr_t pa) @@ -518,93 +486,105 @@ pmap_dump_pv(paddr_t pa)
518 for (pve = md->pvh_list; pve; pve = pve->pv_next) 486 for (pve = md->pvh_list; pve; pve = pve->pv_next)
519 db_printf("%x:%lx\n", pve->pv_pmap->pm_space, 487 db_printf("%x:%lx\n", pve->pv_pmap->pm_space,
520 pve->pv_va & PV_VAMASK); 488 pve->pv_va & PV_VAMASK);
521} 489}
522#endif 490#endif
523 491
524static int 492static int
525pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte) 493pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte)
526{ 494{
527 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 495 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
528 struct pv_entry *pve; 496 struct pv_entry *pve;
529 int ret = 0; 497 int ret = 0;
530 498
 499 UVMHIST_FUNC(__func__);
 500 UVMHIST_CALLARGS(maphist, "pg %#jx va %#jx pte %#jx", (uintptr_t)pg,
 501 va, pte, 0);
 502
531 /* check for non-equ aliased mappings */ 503 /* check for non-equ aliased mappings */
532 for (pve = md->pvh_list; pve; pve = pve->pv_next) { 504 for (pve = md->pvh_list; pve; pve = pve->pv_next) {
533 vaddr_t pva = pve->pv_va & PV_VAMASK; 505 vaddr_t pva = pve->pv_va & PV_VAMASK;
534 506
 507 UVMHIST_LOG(maphist, "... pm %#jx va %#jx",
 508 (uintptr_t)pve->pv_pmap, pva, 0, 0);
 509
535 pte |= pmap_vp_find(pve->pv_pmap, pva); 510 pte |= pmap_vp_find(pve->pv_pmap, pva);
536 if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) && 511 if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) &&
537 (pte & PTE_PROT(TLB_WRITE))) { 512 (pte & PTE_PROT(TLB_WRITE))) {
 513 UVMHIST_LOG(maphist,
 514 "aliased writable mapping %#jx:%#jx",
 515 pve->pv_pmap->pm_space, pve->pv_va, 0, 0);
538 516
539 DPRINTF(PDB_FOLLOW|PDB_ALIAS, 
540 ("%s: aliased writable mapping 0x%x:0x%lx\n", 
541 __func__, pve->pv_pmap->pm_space, pve->pv_va)); 
542 ret++; 517 ret++;
543 } 518 }
544 } 519 }
545 520
 521 UVMHIST_LOG(maphist, "<--- done (%jd)", ret, 0, 0, 0);
 522
546 return (ret); 523 return (ret);
547} 524}
548 525
549/* 526/*
550 * This allocates and returns a new struct pv_entry. 527 * This allocates and returns a new struct pv_entry.
551 */ 528 */
552static inline struct pv_entry * 529static inline struct pv_entry *
553pmap_pv_alloc(void) 530pmap_pv_alloc(void)
554{ 531{
555 struct pv_entry *pv; 532 struct pv_entry *pv;
556 533
557 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s()\n", __func__)); 
558 
559 pv = pool_get(&pmap_pv_pool, PR_NOWAIT); 534 pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
560 535
561 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: %p\n", __func__, pv)); 
562 
563 return (pv); 536 return (pv);
564} 537}
565 538
566static inline void 539static inline void
567pmap_pv_free(struct pv_entry *pv) 540pmap_pv_free(struct pv_entry *pv)
568{ 541{
569 542
570 if (pv->pv_ptp) 543 if (pv->pv_ptp)
571 pmap_pde_release(pv->pv_pmap, pv->pv_va & PV_VAMASK, 544 pmap_pde_release(pv->pv_pmap, pv->pv_va & PV_VAMASK,
572 pv->pv_ptp); 545 pv->pv_ptp);
573 546
574 pool_put(&pmap_pv_pool, pv); 547 pool_put(&pmap_pv_pool, pv);
575} 548}
576 549
577static inline void 550static inline void
578pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, 551pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
579 vaddr_t va, struct vm_page *pdep, u_int flags) 552 vaddr_t va, struct vm_page *pdep, u_int flags)
580{ 553{
581 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 554 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
582 555
583 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n", 556 UVMHIST_FUNC(__func__);
584 __func__, pg, pve, pm, va, pdep, flags)); 557 UVMHIST_CALLARGS(maphist, "pg %#jx pve %#jx pm %#jx va %#jx",
 558 (uintptr_t)pg, (uintptr_t)pve, (uintptr_t)pm, va);
 559 UVMHIST_LOG(maphist, "...pdep %#jx flags %#jx",
 560 (uintptr_t)pdep, flags, 0, 0);
585 561
586 KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg, true)); 562 KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg, true));
587 563
588 pve->pv_pmap = pm; 564 pve->pv_pmap = pm;
589 pve->pv_va = va | flags; 565 pve->pv_va = va | flags;
590 pve->pv_ptp = pdep; 566 pve->pv_ptp = pdep;
591 pve->pv_next = md->pvh_list; 567 pve->pv_next = md->pvh_list;
592 md->pvh_list = pve; 568 md->pvh_list = pve;
593} 569}
594 570
595static inline struct pv_entry * 571static inline struct pv_entry *
596pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va) 572pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va)
597{ 573{
 574 UVMHIST_FUNC(__func__);
 575 UVMHIST_CALLARGS(maphist, "pg %#jx pm %#jx va %#jx",
 576 (uintptr_t)pg, (uintptr_t)pmap, va, 0);
 577
598 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 578 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
599 struct pv_entry **pve, *pv; 579 struct pv_entry **pve, *pv;
600 580
601 KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg, true)); 581 KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg, true));
602 582
603 for (pv = *(pve = &md->pvh_list); 583 for (pv = *(pve = &md->pvh_list);
604 pv; pv = *(pve = &(*pve)->pv_next)) { 584 pv; pv = *(pve = &(*pve)->pv_next)) {
605 if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) { 585 if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) {
606 *pve = pv->pv_next; 586 *pve = pv->pv_next;
607 break; 587 break;
608 } 588 }
609 } 589 }
610 590
@@ -616,80 +596,68 @@ pmap_pv_remove(struct vm_page *pg, pmap_ @@ -616,80 +596,68 @@ pmap_pv_remove(struct vm_page *pg, pmap_
616 } 596 }
617 } 597 }
618 598
619 return (pv); 599 return (pv);
620} 600}
621 601
622#define FIRST_16M atop(16 * 1024 * 1024) 602#define FIRST_16M atop(16 * 1024 * 1024)
623 603
624static void 604static void
625pmap_page_physload(paddr_t spa, paddr_t epa) 605pmap_page_physload(paddr_t spa, paddr_t epa)
626{ 606{
627 607
628 if (spa < FIRST_16M && epa <= FIRST_16M) { 608 if (spa < FIRST_16M && epa <= FIRST_16M) {
629 DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", 
630 __func__, spa, epa)); 
631 
632 uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_ISADMA); 609 uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_ISADMA);
633 } else if (spa < FIRST_16M && epa > FIRST_16M) { 610 } else if (spa < FIRST_16M && epa > FIRST_16M) {
634 DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", 
635 __func__, spa, FIRST_16M)); 
636 
637 uvm_page_physload(spa, FIRST_16M, spa, FIRST_16M, 611 uvm_page_physload(spa, FIRST_16M, spa, FIRST_16M,
638 VM_FREELIST_ISADMA); 612 VM_FREELIST_ISADMA);
639 
640 DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", 
641 __func__, FIRST_16M, epa)); 
642 
643 uvm_page_physload(FIRST_16M, epa, FIRST_16M, epa, 613 uvm_page_physload(FIRST_16M, epa, FIRST_16M, epa,
644 VM_FREELIST_DEFAULT); 614 VM_FREELIST_DEFAULT);
645 } else { 615 } else {
646 DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", 
647 __func__, spa, epa)); 
648 
649 uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_DEFAULT); 616 uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_DEFAULT);
650 } 617 }
651 618
652 availphysmem += epa - spa; 619 availphysmem += epa - spa;
653} 620}
654 621
655/* 622/*
656 * Bootstrap the system enough to run with virtual memory. 623 * Bootstrap the system enough to run with virtual memory.
657 * Map the kernel's code, data and bss, and allocate the system page table. 624 * Map the kernel's code, data and bss, and allocate the system page table.
658 * Called with mapping OFF. 625 * Called with mapping OFF.
659 * 626 *
660 * Parameters: 627 * Parameters:
661 * vstart PA of first available physical page 628 * vstart PA of first available physical page
662 */ 629 */
663void 630void
664pmap_bootstrap(vaddr_t vstart) 631pmap_bootstrap(vaddr_t vstart)
665{ 632{
 633 UVMHIST_FUNC(__func__);
 634 UVMHIST_CALLED(maphist);
 635
666 vaddr_t va, addr; 636 vaddr_t va, addr;
667 vsize_t size; 637 vsize_t size;
668 extern paddr_t hppa_vtop; 638 extern paddr_t hppa_vtop;
669 pmap_t kpm; 639 pmap_t kpm;
670 int npdes, nkpdes; 640 int npdes, nkpdes;
671 extern int resvphysmem; 641 extern int resvphysmem;
672 vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got; 642 vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got;
673 paddr_t ksrx, kerx, ksro, kero, ksrw, kerw; 643 paddr_t ksrx, kerx, ksro, kero, ksrw, kerw;
674 extern int usebtlb; 644 extern int usebtlb;
675 645
676 /* Provided by the linker script */ 646 /* Provided by the linker script */
677 extern int kernel_text, etext; 647 extern int kernel_text, etext;
678 extern int __rodata_start, __rodata_end; 648 extern int __rodata_start, __rodata_end;
679 extern int __data_start; 649 extern int __data_start;
680 650
681 DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(0x%lx)\n", __func__, vstart)); 
682 
683 uvm_md_init(); 651 uvm_md_init();
684 652
685 hppa_prot[UVM_PROT_NONE] = TLB_AR_NA; 653 hppa_prot[UVM_PROT_NONE] = TLB_AR_NA;
686 hppa_prot[UVM_PROT_READ] = TLB_AR_R; 654 hppa_prot[UVM_PROT_READ] = TLB_AR_R;
687 hppa_prot[UVM_PROT_WRITE] = TLB_AR_RW; 655 hppa_prot[UVM_PROT_WRITE] = TLB_AR_RW;
688 hppa_prot[UVM_PROT_RW] = TLB_AR_RW; 656 hppa_prot[UVM_PROT_RW] = TLB_AR_RW;
689 hppa_prot[UVM_PROT_EXEC] = TLB_AR_RX; 657 hppa_prot[UVM_PROT_EXEC] = TLB_AR_RX;
690 hppa_prot[UVM_PROT_RX] = TLB_AR_RX; 658 hppa_prot[UVM_PROT_RX] = TLB_AR_RX;
691 hppa_prot[UVM_PROT_WX] = TLB_AR_RWX; 659 hppa_prot[UVM_PROT_WX] = TLB_AR_RWX;
692 hppa_prot[UVM_PROT_RWX] = TLB_AR_RWX; 660 hppa_prot[UVM_PROT_RWX] = TLB_AR_RWX;
693 661
694 /* 662 /*
695 * Initialize kernel pmap 663 * Initialize kernel pmap
@@ -709,28 +677,26 @@ pmap_bootstrap(vaddr_t vstart) @@ -709,28 +677,26 @@ pmap_bootstrap(vaddr_t vstart)
709 677
710 memset((void *)addr, 0, PAGE_SIZE); 678 memset((void *)addr, 0, PAGE_SIZE);
711 fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE); 679 fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE);
712 addr += PAGE_SIZE; 680 addr += PAGE_SIZE;
713 681
714 /* 682 /*
715 * Allocate various tables and structures. 683 * Allocate various tables and structures.
716 */ 684 */
717 mtctl(addr, CR_VTOP); 685 mtctl(addr, CR_VTOP);
718 hppa_vtop = addr; 686 hppa_vtop = addr;
719 size = round_page((hppa_sid_max + 1) * 4); 687 size = round_page((hppa_sid_max + 1) * 4);
720 memset((void *)addr, 0, size); 688 memset((void *)addr, 0, size);
721 fdcache(HPPA_SID_KERNEL, addr, size); 689 fdcache(HPPA_SID_KERNEL, addr, size);
722 DPRINTF(PDB_INIT, ("%s: vtop 0x%lx @ 0x%lx\n", __func__, size, 
723 addr)); 
724 690
725 addr += size; 691 addr += size;
726 pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir); 692 pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir);
727 693
728 /* 694 /*
729 * cpuid() found out how big the HPT should be, so align addr to 695 * cpuid() found out how big the HPT should be, so align addr to
730 * what will be its beginning. We don't waste the pages skipped 696 * what will be its beginning. We don't waste the pages skipped
731 * for the alignment. 697 * for the alignment.
732 */ 698 */
733#ifdef USE_HPT 699#ifdef USE_HPT
734 if (pmap_hptsize) { 700 if (pmap_hptsize) {
735 struct hpt_entry *hptp; 701 struct hpt_entry *hptp;
736 int i, error; 702 int i, error;
@@ -739,65 +705,66 @@ pmap_bootstrap(vaddr_t vstart) @@ -739,65 +705,66 @@ pmap_bootstrap(vaddr_t vstart)
739 addr += pmap_hptsize; 705 addr += pmap_hptsize;
740 addr &= ~(pmap_hptsize - 1); 706 addr &= ~(pmap_hptsize - 1);
741 707
742 memset((void *)addr, 0, pmap_hptsize); 708 memset((void *)addr, 0, pmap_hptsize);
743 hptp = (struct hpt_entry *)addr; 709 hptp = (struct hpt_entry *)addr;
744 for (i = pmap_hptsize / sizeof(struct hpt_entry); i--; ) { 710 for (i = pmap_hptsize / sizeof(struct hpt_entry); i--; ) {
745 hptp[i].hpt_valid = 0; 711 hptp[i].hpt_valid = 0;
746 hptp[i].hpt_space = 0xffff; 712 hptp[i].hpt_space = 0xffff;
747 hptp[i].hpt_vpn = 0; 713 hptp[i].hpt_vpn = 0;
748 } 714 }
749 pmap_hpt = addr; 715 pmap_hpt = addr;
750 addr += pmap_hptsize; 716 addr += pmap_hptsize;
751 717
752 DPRINTF(PDB_INIT, ("%s: hpt_table 0x%x @ 0x%lx\n", __func__, 718 UVMHIST_LOG(maphist, "hpt_table %#jx @ %#jx\n",
753 pmap_hptsize, addr)); 719 pmap_hptsize, addr, 0, 0);
754 720
755 if ((error = (cpu_hpt_init)(pmap_hpt, pmap_hptsize)) < 0) { 721 if ((error = (cpu_hpt_init)(pmap_hpt, pmap_hptsize)) < 0) {
756 printf("WARNING: HPT init error %d -- DISABLED\n", 722 printf("WARNING: HPT init error %d -- DISABLED\n",
757 error); 723 error);
758 pmap_hpt = 0; 724 pmap_hpt = 0;
759 } else 725 } else {
760 DPRINTF(PDB_INIT, 726 UVMHIST_LOG(maphist,
761 ("%s: HPT installed for %ld entries @ 0x%lx\n", 727 "HPT installed for %jd entries @ %#jx",
762 __func__, pmap_hptsize / sizeof(struct hpt_entry), 728 pmap_hptsize / sizeof(struct hpt_entry), addr, 0,
763 addr)); 729 0);
 730 }
764 } 731 }
765#endif 732#endif
766 733
767 /* Setup vtop in lwp0 trapframe. */ 734 /* Setup vtop in lwp0 trapframe. */
768 lwp0.l_md.md_regs->tf_vtop = hppa_vtop; 735 lwp0.l_md.md_regs->tf_vtop = hppa_vtop;
769 736
770 /* Pre-allocate PDEs for kernel virtual */ 737 /* Pre-allocate PDEs for kernel virtual */
771 nkpdes = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PDE_SIZE; 738 nkpdes = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PDE_SIZE;
772 /* ... and io space too */ 739 /* ... and io space too */
773 nkpdes += HPPA_IOLEN / PDE_SIZE; 740 nkpdes += HPPA_IOLEN / PDE_SIZE;
774 /* ... and all physmem (VA == PA) */ 741 /* ... and all physmem (VA == PA) */
775 npdes = nkpdes + (physmem + atop(PDE_SIZE) - 1) / atop(PDE_SIZE); 742 npdes = nkpdes + (physmem + atop(PDE_SIZE) - 1) / atop(PDE_SIZE);
776 743
777 DPRINTF(PDB_INIT, ("%s: npdes %d\n", __func__, npdes)); 744 UVMHIST_LOG(maphist, "npdes %jd", npdes, 0, 0, 0);
778 745
779 /* map the pdes */ 746 /* map the pdes */
780 for (va = 0; npdes--; va += PDE_SIZE, addr += PAGE_SIZE) { 747 for (va = 0; npdes--; va += PDE_SIZE, addr += PAGE_SIZE) {
781 /* last nkpdes are for the kernel virtual */ 748 /* last nkpdes are for the kernel virtual */
782 if (npdes == nkpdes - 1) 749 if (npdes == nkpdes - 1)
783 va = SYSCALLGATE; 750 va = SYSCALLGATE;
784 if (npdes == HPPA_IOLEN / PDE_SIZE - 1) 751 if (npdes == HPPA_IOLEN / PDE_SIZE - 1)
785 va = HPPA_IOBEGIN; 752 va = HPPA_IOBEGIN;
786 /* now map the pde for the physmem */ 753 /* now map the pde for the physmem */
787 memset((void *)addr, 0, PAGE_SIZE); 754 memset((void *)addr, 0, PAGE_SIZE);
788 DPRINTF(PDB_INIT|PDB_VP, 755
789 ("%s: pde premap 0x%08lx 0x%08lx\n", __func__, va, 756 UVMHIST_LOG(maphist, "pde premap 0x%08jx 0x%08jx", va,
790 addr)); 757 addr, 0, 0);
791 pmap_pde_set(kpm, va, addr); 758 pmap_pde_set(kpm, va, addr);
792 kpm->pm_stats.resident_count++; /* count PTP as resident */ 759 kpm->pm_stats.resident_count++; /* count PTP as resident */
793 } 760 }
794 761
795 /* 762 /*
796 * At this point we've finished reserving memory for the kernel. 763 * At this point we've finished reserving memory for the kernel.
797 */ 764 */
798 /* XXXNH */ 765 /* XXXNH */
799 resvphysmem = atop(addr); 766 resvphysmem = atop(addr);
800 767
801 ksrx = (paddr_t) &kernel_text; 768 ksrx = (paddr_t) &kernel_text;
802 kerx = (paddr_t) &etext; 769 kerx = (paddr_t) &etext;
803 ksro = (paddr_t) &__rodata_start; 770 ksro = (paddr_t) &__rodata_start;
@@ -846,29 +813,29 @@ pmap_bootstrap(vaddr_t vstart) @@ -846,29 +813,29 @@ pmap_bootstrap(vaddr_t vstart)
846 int btlb_entry_vm_prot[BTLB_SET_SIZE]; 813 int btlb_entry_vm_prot[BTLB_SET_SIZE];
847 int btlb_i; 814 int btlb_i;
848 int btlb_j; 815 int btlb_j;
849 816
850 /* 817 /*
851 * Now make BTLB entries to direct-map the kernel text 818 * Now make BTLB entries to direct-map the kernel text
852 * read- and execute-only as much as possible. Note that 819 * read- and execute-only as much as possible. Note that
853 * if the data segment isn't nicely aligned, the last 820 * if the data segment isn't nicely aligned, the last
854 * BTLB entry for the kernel text may also cover some of 821 * BTLB entry for the kernel text may also cover some of
855 * the data segment, meaning it will have to allow writing. 822 * the data segment, meaning it will have to allow writing.
856 */ 823 */
857 addr = ksrx; 824 addr = ksrx;
858 825
859 DPRINTF(PDB_INIT, 826 UVMHIST_LOG(maphist,
860 ("%s: BTLB mapping text and rodata @ %p - %p\n", __func__, 827 "BTLB mapping text and rodata @ %#jx - %#jx", addr, kero,
861 (void *)addr, (void *)kero)); 828 0, 0);
862 829
863 btlb_j = 0; 830 btlb_j = 0;
864 while (addr < (vaddr_t) kero) { 831 while (addr < (vaddr_t) kero) {
865 832
866 /* Set up the next BTLB entry. */ 833 /* Set up the next BTLB entry. */
867 KASSERT(btlb_j < BTLB_SET_SIZE); 834 KASSERT(btlb_j < BTLB_SET_SIZE);
868 btlb_entry_start[btlb_j] = addr; 835 btlb_entry_start[btlb_j] = addr;
869 btlb_entry_size[btlb_j] = btlb_entry_min; 836 btlb_entry_size[btlb_j] = btlb_entry_min;
870 btlb_entry_vm_prot[btlb_j] = 837 btlb_entry_vm_prot[btlb_j] =
871 VM_PROT_READ | VM_PROT_EXECUTE; 838 VM_PROT_READ | VM_PROT_EXECUTE;
872 if (addr + btlb_entry_min > kero) 839 if (addr + btlb_entry_min > kero)
873 btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE; 840 btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE;
874 841
@@ -893,28 +860,28 @@ pmap_bootstrap(vaddr_t vstart) @@ -893,28 +860,28 @@ pmap_bootstrap(vaddr_t vstart)
893 /* 860 /*
894 * Now make BTLB entries to direct-map the kernel data, 861 * Now make BTLB entries to direct-map the kernel data,
895 * bss, and all of the preallocated space read-write. 862 * bss, and all of the preallocated space read-write.
896 * 863 *
897 * Note that, unlike above, we're not concerned with 864 * Note that, unlike above, we're not concerned with
898 * making these BTLB entries such that they finish as 865 * making these BTLB entries such that they finish as
899 * close as possible to the end of the space we need 866 * close as possible to the end of the space we need
900 * them to map. Instead, to minimize the number of BTLB 867 * them to map. Instead, to minimize the number of BTLB
901 * entries we need, we make them as large as possible. 868 * entries we need, we make them as large as possible.
902 * The only thing this wastes is kernel virtual space, 869 * The only thing this wastes is kernel virtual space,
903 * which is plentiful. 870 * which is plentiful.
904 */ 871 */
905 872
906 DPRINTF(PDB_INIT, ("%s: mapping data, bss, etc @ %p - %p\n", 873 UVMHIST_LOG(maphist, "mapping data, bss, etc @ %#jx - %#jx",
907 __func__, (void *)addr, (void *)kerw)); 874 addr, kerw, 0, 0);
908 875
909 while (addr < kerw) { 876 while (addr < kerw) {
910 877
911 /* Make the next BTLB entry. */ 878 /* Make the next BTLB entry. */
912 KASSERT(btlb_j < BTLB_SET_SIZE); 879 KASSERT(btlb_j < BTLB_SET_SIZE);
913 size = btlb_entry_min; 880 size = btlb_entry_min;
914 while ((addr + size) < kerw && 881 while ((addr + size) < kerw &&
915 (size << 1) < btlb_entry_max && 882 (size << 1) < btlb_entry_max &&
916 !(addr & ((size << 1) - 1))) 883 !(addr & ((size << 1) - 1)))
917 size <<= 1; 884 size <<= 1;
918 btlb_entry_start[btlb_j] = addr; 885 btlb_entry_start[btlb_j] = addr;
919 btlb_entry_size[btlb_j] = size; 886 btlb_entry_size[btlb_j] = size;
920 btlb_entry_vm_prot[btlb_j] = 887 btlb_entry_vm_prot[btlb_j] =
@@ -974,46 +941,45 @@ pmap_bootstrap(vaddr_t vstart) @@ -974,46 +941,45 @@ pmap_bootstrap(vaddr_t vstart)
974 prot = UVM_PROT_RX; 941 prot = UVM_PROT_RX;
975 else if (va >= ksrx && va < kerx) 942 else if (va >= ksrx && va < kerx)
976 prot = UVM_PROT_RX; 943 prot = UVM_PROT_RX;
977 else if (va >= ksro && va < kero) 944 else if (va >= ksro && va < kero)
978 prot = UVM_PROT_R; 945 prot = UVM_PROT_R;
979#ifdef DIAGNOSTIC 946#ifdef DIAGNOSTIC
980 else if (va == uvm_lwp_getuarea(&lwp0) + USPACE - PAGE_SIZE) 947 else if (va == uvm_lwp_getuarea(&lwp0) + USPACE - PAGE_SIZE)
981 prot = UVM_PROT_NONE; 948 prot = UVM_PROT_NONE;
982#endif 949#endif
983 pmap_kenter_pa(va, va, prot, 0); 950 pmap_kenter_pa(va, va, prot, 0);
984 } 951 }
985 952
986 /* XXXNH update */ 953 /* XXXNH update */
987 DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksro, 954 UVMHIST_LOG(maphist, "mapped %#jx - %#jx", ksro, kero, 0, 0);
988 kero)); 955 UVMHIST_LOG(maphist, "mapped %#jx - %#jx", ksrw, kerw, 0, 0);
989 DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksrw, 
990 kerw)); 
991 956
992} 957}
993 958
994/* 959/*
995 * Finishes the initialization of the pmap module. 960 * Finishes the initialization of the pmap module.
996 * This procedure is called from uvm_init() in uvm/uvm_init.c 961 * This procedure is called from uvm_init() in uvm/uvm_init.c
997 * to initialize any remaining data structures that the pmap module 962 * to initialize any remaining data structures that the pmap module
998 * needs to map virtual memory (VM is already ON). 963 * needs to map virtual memory (VM is already ON).
999 */ 964 */
1000void 965void
1001pmap_init(void) 966pmap_init(void)
1002{ 967{
1003 extern void gateway_page(void); 968 extern void gateway_page(void);
1004 volatile pt_entry_t *pde; 969 volatile pt_entry_t *pde;
1005 970
1006 DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s()\n", __func__)); 971 UVMHIST_FUNC(__func__)
 972 UVMHIST_CALLED(maphist);
1007 973
1008 sid_counter = HPPA_SID_KERNEL; 974 sid_counter = HPPA_SID_KERNEL;
1009 975
1010 pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", 976 pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1011 &pool_allocator_nointr, IPL_NONE); 977 &pool_allocator_nointr, IPL_NONE);
1012 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pmappv", 978 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pmappv",
1013 &pool_allocator_nointr, IPL_NONE); 979 &pool_allocator_nointr, IPL_NONE);
1014 980
1015 pool_setlowat(&pmap_pv_pool, pmap_pvlowat); 981 pool_setlowat(&pmap_pv_pool, pmap_pvlowat);
1016 pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32); 982 pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32);
1017 983
1018 /* 984 /*
1019 * map SysCall gateway page once for everybody 985 * map SysCall gateway page once for everybody
@@ -1021,55 +987,58 @@ pmap_init(void) @@ -1021,55 +987,58 @@ pmap_init(void)
1021 * if we have any at SYSCALLGATE address (; 987 * if we have any at SYSCALLGATE address (;
1022 * 988 *
1023 * no spls since no interrupts 989 * no spls since no interrupts
1024 */ 990 */
1025 if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) && 991 if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) &&
1026 !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL))) 992 !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL)))
1027 panic("pmap_init: cannot allocate pde"); 993 panic("pmap_init: cannot allocate pde");
1028 994
1029 pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page | 995 pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page |
1030 PTE_PROT(TLB_GATE_PROT)); 996 PTE_PROT(TLB_GATE_PROT));
1031 997
1032 pmap_initialized = true; 998 pmap_initialized = true;
1033 999
1034 DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(): done\n", __func__)); 1000 UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
1035} 1001}
1036 1002
1037/* 1003/*
1038 * How much virtual space does this kernel have? 1004 * How much virtual space does this kernel have?
1039 */ 1005 */
1040void 1006void
1041pmap_virtual_space(vaddr_t *startp, vaddr_t *endp) 1007pmap_virtual_space(vaddr_t *startp, vaddr_t *endp)
1042{ 1008{
1043 1009
1044 *startp = SYSCALLGATE + PAGE_SIZE; 1010 *startp = SYSCALLGATE + PAGE_SIZE;
1045 *endp = VM_MAX_KERNEL_ADDRESS; 1011 *endp = VM_MAX_KERNEL_ADDRESS;
1046} 1012}
1047 1013
1048/* 1014/*
1049 * pmap_create() 1015 * pmap_create()
1050 * 1016 *
1051 * Create and return a physical map. 1017 * Create and return a physical map.
1052 * The map is an actual physical map, and may be referenced by the hardware. 1018 * The map is an actual physical map, and may be referenced by the hardware.
1053 */ 1019 */
1054pmap_t 1020pmap_t
1055pmap_create(void) 1021pmap_create(void)
1056{ 1022{
1057 pmap_t pmap; 1023 pmap_t pmap;
1058 pa_space_t space; 1024 pa_space_t space;
1059 1025
 1026 UVMHIST_FUNC(__func__)
 1027 UVMHIST_CALLED(maphist);
 1028
1060 pmap = pool_get(&pmap_pool, PR_WAITOK); 1029 pmap = pool_get(&pmap_pool, PR_WAITOK);
1061 1030
1062 DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: pmap = %p\n", __func__, pmap)); 1031 UVMHIST_LOG(maphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
1063 1032
1064 rw_init(&pmap->pm_obj_lock); 1033 rw_init(&pmap->pm_obj_lock);
1065 uvm_obj_init(&pmap->pm_obj, &pmap_pager, false, 1); 1034 uvm_obj_init(&pmap->pm_obj, &pmap_pager, false, 1);
1066 uvm_obj_setlock(&pmap->pm_obj, &pmap->pm_obj_lock); 1035 uvm_obj_setlock(&pmap->pm_obj, &pmap->pm_obj_lock);
1067 1036
1068 mutex_enter(&pmaps_lock); 1037 mutex_enter(&pmaps_lock);
1069 1038
1070 /* 1039 /*
1071 * Allocate space IDs for the pmap; we get the protection ID from this. 1040 * Allocate space IDs for the pmap; we get the protection ID from this.
1072 * If all are allocated, there is nothing we can do. 1041 * If all are allocated, there is nothing we can do.
1073 */ 1042 */
1074 /* XXXNH can't this loop forever??? */ 1043 /* XXXNH can't this loop forever??? */
1075 for (space = sid_counter; pmap_sdir_get(space); 1044 for (space = sid_counter; pmap_sdir_get(space);
@@ -1080,122 +1049,122 @@ pmap_create(void) @@ -1080,122 +1049,122 @@ pmap_create(void)
1080 panic("pmap_create: no pages"); 1049 panic("pmap_create: no pages");
1081 pmap->pm_ptphint = NULL; 1050 pmap->pm_ptphint = NULL;
1082 pmap->pm_pdir = (uint32_t *)VM_PAGE_TO_PHYS(pmap->pm_pdir_pg); 1051 pmap->pm_pdir = (uint32_t *)VM_PAGE_TO_PHYS(pmap->pm_pdir_pg);
1083 pmap_sdir_set(space, pmap->pm_pdir); 1052 pmap_sdir_set(space, pmap->pm_pdir);
1084 1053
1085 pmap->pm_space = space; 1054 pmap->pm_space = space;
1086 pmap->pm_pid = (space + 1) << 1; 1055 pmap->pm_pid = (space + 1) << 1;
1087 1056
1088 pmap->pm_stats.resident_count = 1; 1057 pmap->pm_stats.resident_count = 1;
1089 pmap->pm_stats.wired_count = 0; 1058 pmap->pm_stats.wired_count = 0;
1090 1059
1091 mutex_exit(&pmaps_lock); 1060 mutex_exit(&pmaps_lock);
1092 1061
1093 DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: pm = %p, space = %d, pid = %d\n", 1062 UVMHIST_LOG(maphist, "pm %#jx, space %jd, pid %jd",
1094 __func__, pmap, space, pmap->pm_pid)); 1063 (uintptr_t)pmap, space, pmap->pm_pid, 0);
1095 1064
1096 return (pmap); 1065 return (pmap);
1097} 1066}
1098 1067
1099/* 1068/*
1100 * pmap_destroy(pmap) 1069 * pmap_destroy(pmap)
1101 * Gives up a reference to the specified pmap. When the reference count 1070 * Gives up a reference to the specified pmap. When the reference count
1102 * reaches zero the pmap structure is added to the pmap free list. 1071 * reaches zero the pmap structure is added to the pmap free list.
1103 * Should only be called if the map contains no valid mappings. 1072 * Should only be called if the map contains no valid mappings.
1104 */ 1073 */
1105void 1074void
1106pmap_destroy(pmap_t pmap) 1075pmap_destroy(pmap_t pmap)
1107{ 1076{
 1077 UVMHIST_FUNC(__func__)
 1078 UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
 1079
1108#ifdef DIAGNOSTIC 1080#ifdef DIAGNOSTIC
1109 struct uvm_page_array a; 1081 struct uvm_page_array a;
1110 struct vm_page *pg; 1082 struct vm_page *pg;
1111 off_t off; 1083 off_t off;
1112#endif 1084#endif
1113 int refs; 1085 int refs;
1114 1086
1115 DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p)\n", __func__, pmap)); 
1116 
1117 rw_enter(pmap->pm_lock, RW_WRITER); 1087 rw_enter(pmap->pm_lock, RW_WRITER);
1118 refs = --pmap->pm_obj.uo_refs; 1088 refs = --pmap->pm_obj.uo_refs;
1119 rw_exit(pmap->pm_lock); 1089 rw_exit(pmap->pm_lock);
1120 1090
1121 if (refs > 0) 1091 if (refs > 0)
1122 return; 1092 return;
1123 1093
1124#ifdef DIAGNOSTIC 1094#ifdef DIAGNOSTIC
1125 uvm_page_array_init(&a); 1095 uvm_page_array_init(&a);
1126 off = 0; 1096 off = 0;
1127 rw_enter(pmap->pm_lock, RW_WRITER); 1097 rw_enter(pmap->pm_lock, RW_WRITER);
1128 while ((pg = uvm_page_array_fill_and_peek(&a, &pmap->pm_obj, off, 0, 0)) 1098 while ((pg = uvm_page_array_fill_and_peek(&a, &pmap->pm_obj, off, 0, 0))
1129 != NULL) { 1099 != NULL) {
1130 pt_entry_t *pde, *epde; 1100 pt_entry_t *pde, *epde;
1131 struct vm_page *spg; 1101 struct vm_page *spg;
1132 struct pv_entry *pv, *npv; 1102 struct pv_entry *pv, *npv;
1133 paddr_t pa; 1103 paddr_t pa;
1134 1104
1135 off = pg->offset + PAGE_SIZE; 1105 off = pg->offset + PAGE_SIZE;
1136 uvm_page_array_advance(&a); 1106 uvm_page_array_advance(&a);
1137 KASSERT(pg != pmap->pm_pdir_pg); 1107 KASSERT(pg != pmap->pm_pdir_pg);
1138 pa = VM_PAGE_TO_PHYS(pg); 1108 pa = VM_PAGE_TO_PHYS(pg);
1139 1109
1140 DPRINTF(PDB_FOLLOW, ("%s(%p): stray ptp " 1110 UVMHIST_LOG(maphist, "pm %#jx: stray ptp %#jx w/ %jd entries:",
1141 "0x%lx w/ %d ents:", __func__, pmap, pa, 1111 (uintptr_t)pmap, pa, pg->wire_count - 1, 0);
1142 pg->wire_count - 1)); 
1143 1112
1144 pde = (pt_entry_t *)pa; 1113 pde = (pt_entry_t *)pa;
1145 epde = (pt_entry_t *)(pa + PAGE_SIZE); 1114 epde = (pt_entry_t *)(pa + PAGE_SIZE);
1146 for (; pde < epde; pde++) { 1115 for (; pde < epde; pde++) {
1147 if (*pde == 0) 1116 if (*pde == 0)
1148 continue; 1117 continue;
1149 1118
1150 spg = PHYS_TO_VM_PAGE(PTE_PAGE(*pde)); 1119 spg = PHYS_TO_VM_PAGE(PTE_PAGE(*pde));
1151 if (spg == NULL) 1120 if (spg == NULL)
1152 continue; 1121 continue;
1153 1122
1154 struct vm_page_md * const md = VM_PAGE_TO_MD(spg); 1123 struct vm_page_md * const md = VM_PAGE_TO_MD(spg);
1155 for (pv = md->pvh_list; pv != NULL; pv = npv) { 1124 for (pv = md->pvh_list; pv != NULL; pv = npv) {
1156 npv = pv->pv_next; 1125 npv = pv->pv_next;
1157 if (pv->pv_pmap != pmap) 1126 if (pv->pv_pmap != pmap)
1158 continue; 1127 continue;
1159 1128
1160 DPRINTF(PDB_FOLLOW, (" 0x%lx", pv->pv_va)); 1129 UVMHIST_LOG(maphist, " %#jx", pv->pv_va, 0, 0,
 1130 0);
1161 1131
1162 pmap_remove(pmap, pv->pv_va & PV_VAMASK, 1132 pmap_remove(pmap, pv->pv_va & PV_VAMASK,
1163 (pv->pv_va & PV_VAMASK) + PAGE_SIZE); 1133 (pv->pv_va & PV_VAMASK) + PAGE_SIZE);
1164 } 1134 }
1165 } 1135 }
1166 DPRINTF(PDB_FOLLOW, ("\n")); 
1167 } 1136 }
1168 rw_exit(pmap->pm_lock); 1137 rw_exit(pmap->pm_lock);
1169 uvm_page_array_fini(&a); 1138 uvm_page_array_fini(&a);
1170#endif 1139#endif
1171 pmap_sdir_set(pmap->pm_space, 0); 1140 pmap_sdir_set(pmap->pm_space, 0);
1172 rw_enter(pmap->pm_lock, RW_WRITER); 1141 rw_enter(pmap->pm_lock, RW_WRITER);
1173 pmap_pagefree(pmap->pm_pdir_pg); 1142 pmap_pagefree(pmap->pm_pdir_pg);
1174 rw_exit(pmap->pm_lock); 1143 rw_exit(pmap->pm_lock);
1175 1144
1176 uvm_obj_destroy(&pmap->pm_obj, false); 1145 uvm_obj_destroy(&pmap->pm_obj, false);
1177 rw_destroy(&pmap->pm_obj_lock); 1146 rw_destroy(&pmap->pm_obj_lock);
1178 pool_put(&pmap_pool, pmap); 1147 pool_put(&pmap_pool, pmap);
1179} 1148}
1180 1149
1181/* 1150/*
1182 * Add a reference to the specified pmap. 1151 * Add a reference to the specified pmap.
1183 */ 1152 */
1184void 1153void
1185pmap_reference(pmap_t pmap) 1154pmap_reference(pmap_t pmap)
1186{ 1155{
1187 1156 UVMHIST_FUNC(__func__)
1188 DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p)\n", __func__, pmap)); 1157 UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
1189 1158
1190 rw_enter(pmap->pm_lock, RW_WRITER); 1159 rw_enter(pmap->pm_lock, RW_WRITER);
1191 pmap->pm_obj.uo_refs++; 1160 pmap->pm_obj.uo_refs++;
1192 rw_exit(pmap->pm_lock); 1161 rw_exit(pmap->pm_lock);
1193} 1162}
1194 1163
1195 1164
1196void 1165void
1197pmap_syncicache_page(struct vm_page *pg, pmap_t pm, vaddr_t va) 1166pmap_syncicache_page(struct vm_page *pg, pmap_t pm, vaddr_t va)
1198{ 1167{
1199 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1168 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1200 struct pv_entry *pve = md->pvh_list; 1169 struct pv_entry *pve = md->pvh_list;
1201 1170
@@ -1223,72 +1192,70 @@ pmap_syncicache_page(struct vm_page *pg, @@ -1223,72 +1192,70 @@ pmap_syncicache_page(struct vm_page *pg,
1223 * address (pa) in the pmap with the protection requested. If the 1192 * address (pa) in the pmap with the protection requested. If the
1224 * translation is wired then we can not allow a page fault to occur 1193 * translation is wired then we can not allow a page fault to occur
1225 * for this mapping. 1194 * for this mapping.
1226 */ 1195 */
1227int 1196int
1228pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1197pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1229{ 1198{
1230 volatile pt_entry_t *pde; 1199 volatile pt_entry_t *pde;
1231 pt_entry_t pte; 1200 pt_entry_t pte;
1232 struct vm_page *pg = NULL, *ptp = NULL; 1201 struct vm_page *pg = NULL, *ptp = NULL;
1233 struct pv_entry *pve = NULL; 1202 struct pv_entry *pve = NULL;
1234 bool wired = (flags & PMAP_WIRED) != 0; 1203 bool wired = (flags & PMAP_WIRED) != 0;
1235 1204
1236 DPRINTF(PDB_FOLLOW|PDB_ENTER, 1205 UVMHIST_FUNC(__func__);
1237 ("%s(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n", __func__, pmap, va, pa, 1206 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pa %#jx prot %#jx",
1238 prot, flags)); 1207 (uintptr_t)pmap, va, pa, prot);
 1208 UVMHIST_LOG(maphist, "...flags %#jx", flags, 0, 0, 0);
1239 1209
1240 PMAP_LOCK(pmap); 1210 PMAP_LOCK(pmap);
1241 1211
1242 if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) && 1212 if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) &&
1243 !(pde = pmap_pde_alloc(pmap, va, &ptp))) { 1213 !(pde = pmap_pde_alloc(pmap, va, &ptp))) {
1244 if (flags & PMAP_CANFAIL) { 1214 if (flags & PMAP_CANFAIL) {
1245 PMAP_UNLOCK(pmap); 1215 PMAP_UNLOCK(pmap);
1246 return (ENOMEM); 1216 return (ENOMEM);
1247 } 1217 }
1248 1218
1249 panic("pmap_enter: cannot allocate pde"); 1219 panic("pmap_enter: cannot allocate pde");
1250 } 1220 }
1251 1221
1252 if (!ptp) 1222 if (!ptp)
1253 ptp = pmap_pde_ptp(pmap, pde); 1223 ptp = pmap_pde_ptp(pmap, pde);
1254 1224
1255 if ((pte = pmap_pte_get(pde, va))) { 1225 if ((pte = pmap_pte_get(pde, va))) {
1256 1226 UVMHIST_LOG(maphist, "remapping %#jx -> %#jx", pte, pa, 0, 0);
1257 DPRINTF(PDB_ENTER, 
1258 ("%s: remapping 0x%x -> 0x%lx\n", __func__, pte, pa)); 
1259 1227
1260 pmap_pte_flush(pmap, va, pte); 1228 pmap_pte_flush(pmap, va, pte);
1261 if (wired && !(pte & PTE_PROT(TLB_WIRED))) 1229 if (wired && !(pte & PTE_PROT(TLB_WIRED)))
1262 pmap->pm_stats.wired_count++; 1230 pmap->pm_stats.wired_count++;
1263 else if (!wired && (pte & PTE_PROT(TLB_WIRED))) 1231 else if (!wired && (pte & PTE_PROT(TLB_WIRED)))
1264 pmap->pm_stats.wired_count--; 1232 pmap->pm_stats.wired_count--;
1265 1233
1266 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); 1234 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1267 if (PTE_PAGE(pte) == pa) { 1235 if (PTE_PAGE(pte) == pa) {
1268 DPRINTF(PDB_FOLLOW|PDB_ENTER, 1236 UVMHIST_LOG(maphist, "same page", 0, 0, 0, 0);
1269 ("%s: same page\n", __func__)); 
1270 goto enter; 1237 goto enter;
1271 } 1238 }
1272 1239
1273 if (pg != NULL) { 1240 if (pg != NULL) {
1274 pve = pmap_pv_remove(pg, pmap, va); 1241 pve = pmap_pv_remove(pg, pmap, va);
1275 1242
1276 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1243 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1277 md->pvh_attrs |= pmap_pvh_attrs(pte); 1244 md->pvh_attrs |= pmap_pvh_attrs(pte);
1278 } 1245 }
1279 } else { 1246 } else {
1280 DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n", 1247 UVMHIST_LOG(maphist, "new mapping %#jx -> %#jx",
1281 __func__, va, pa)); 1248 va, pa, 0, 0);
1282 pte = PTE_PROT(TLB_REFTRAP); 1249 pte = PTE_PROT(TLB_REFTRAP);
1283 pmap->pm_stats.resident_count++; 1250 pmap->pm_stats.resident_count++;
1284 if (wired) 1251 if (wired)
1285 pmap->pm_stats.wired_count++; 1252 pmap->pm_stats.wired_count++;
1286 if (ptp) 1253 if (ptp)
1287 ptp->wire_count++; 1254 ptp->wire_count++;
1288 } 1255 }
1289 1256
1290 if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) { 1257 if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
1291 if (!pve && !(pve = pmap_pv_alloc())) { 1258 if (!pve && !(pve = pmap_pv_alloc())) {
1292 if (flags & PMAP_CANFAIL) { 1259 if (flags & PMAP_CANFAIL) {
1293 PMAP_UNLOCK(pmap); 1260 PMAP_UNLOCK(pmap);
1294 return (ENOMEM); 1261 return (ENOMEM);
@@ -1316,51 +1283,52 @@ enter: @@ -1316,51 +1283,52 @@ enter:
1316 pmap_syncicache_page(pg, pmap, va); 1283 pmap_syncicache_page(pg, pmap, va);
1317 md->pvh_attrs |= PVF_EXEC; 1284 md->pvh_attrs |= PVF_EXEC;
1318 } 1285 }
1319 } 1286 }
1320 1287
1321 if (IS_IOPAGE_P(pa)) 1288 if (IS_IOPAGE_P(pa))
1322 pte |= PTE_PROT(TLB_UNCACHEABLE); 1289 pte |= PTE_PROT(TLB_UNCACHEABLE);
1323 if (wired) 1290 if (wired)
1324 pte |= PTE_PROT(TLB_WIRED); 1291 pte |= PTE_PROT(TLB_WIRED);
1325 pmap_pte_set(pde, va, pte); 1292 pmap_pte_set(pde, va, pte);
1326 1293
1327 PMAP_UNLOCK(pmap); 1294 PMAP_UNLOCK(pmap);
1328 1295
1329 DPRINTF(PDB_FOLLOW|PDB_ENTER, ("%s: leaving\n", __func__)); 1296 UVMHIST_LOG(maphist, "<--- done (0)", 0, 0, 0, 0);
1330 1297
1331 return (0); 1298 return (0);
1332} 1299}
1333 1300
1334/* 1301/*
1335 * pmap_remove(pmap, sva, eva) 1302 * pmap_remove(pmap, sva, eva)
1336 * unmaps all virtual addresses in the virtual address 1303 * unmaps all virtual addresses in the virtual address
1337 * range determined by [sva, eva) and pmap. 1304 * range determined by [sva, eva) and pmap.
1338 * sva and eva must be on machine independent page boundaries and 1305 * sva and eva must be on machine independent page boundaries and
1339 * sva must be less than or equal to eva. 1306 * sva must be less than or equal to eva.
1340 */ 1307 */
1341void 1308void
1342pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) 1309pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1343{ 1310{
 1311
 1312 UVMHIST_FUNC(__func__);
 1313 UVMHIST_CALLARGS(maphist, "sva %#jx eva %#jx", sva, eva, 0, 0);
 1314
1344 struct pv_entry *pve; 1315 struct pv_entry *pve;
1345 volatile pt_entry_t *pde = NULL; 1316 volatile pt_entry_t *pde = NULL;
1346 pt_entry_t pte; 1317 pt_entry_t pte;
1347 struct vm_page *pg, *ptp; 1318 struct vm_page *pg, *ptp;
1348 vaddr_t pdemask; 1319 vaddr_t pdemask;
1349 int batch; 1320 int batch;
1350 1321
1351 DPRINTF(PDB_FOLLOW|PDB_REMOVE, 
1352 ("%s(%p, 0x%lx, 0x%lx)\n", __func__, pmap, sva, eva)); 
1353 
1354 PMAP_LOCK(pmap); 1322 PMAP_LOCK(pmap);
1355 1323
1356 for (batch = 0; sva < eva; sva += PAGE_SIZE) { 1324 for (batch = 0; sva < eva; sva += PAGE_SIZE) {
1357 pdemask = sva & PDE_MASK; 1325 pdemask = sva & PDE_MASK;
1358 if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) { 1326 if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
1359 sva = pdemask + PDE_SIZE - PAGE_SIZE; 1327 sva = pdemask + PDE_SIZE - PAGE_SIZE;
1360 continue; 1328 continue;
1361 } 1329 }
1362 batch = pdemask == sva && sva + PDE_SIZE <= eva; 1330 batch = pdemask == sva && sva + PDE_SIZE <= eva;
1363 1331
1364 if ((pte = pmap_pte_get(pde, sva))) { 1332 if ((pte = pmap_pte_get(pde, sva))) {
1365 1333
1366 /* TODO measure here the speed tradeoff 1334 /* TODO measure here the speed tradeoff
@@ -1389,57 +1357,57 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va @@ -1389,57 +1357,57 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
1389 } else { 1357 } else {
1390 if (IS_IOPAGE_P(PTE_PAGE(pte))) { 1358 if (IS_IOPAGE_P(PTE_PAGE(pte))) {
1391 ptp = pmap_pde_ptp(pmap, pde); 1359 ptp = pmap_pde_ptp(pmap, pde);
1392 if (ptp != NULL) 1360 if (ptp != NULL)
1393 pmap_pde_release(pmap, sva, 1361 pmap_pde_release(pmap, sva,
1394 ptp); 1362 ptp);
1395 } 1363 }
1396 } 1364 }
1397 } 1365 }
1398 } 1366 }
1399 1367
1400 PMAP_UNLOCK(pmap); 1368 PMAP_UNLOCK(pmap);
1401 1369
1402 DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__)); 1370 UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
1403} 1371}
1404 1372
1405void 1373void
1406pmap_write_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1374pmap_write_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1407{ 1375{
 1376 UVMHIST_FUNC(__func__);
 1377 UVMHIST_CALLARGS(maphist, "pm %#jx sva %#jx eva %#jx prot %#jx",
 1378 (uintptr_t)pmap, sva, eva, prot);
 1379
1408 struct vm_page *pg; 1380 struct vm_page *pg;
1409 volatile pt_entry_t *pde = NULL; 1381 volatile pt_entry_t *pde = NULL;
1410 pt_entry_t pte; 1382 pt_entry_t pte;
1411 u_int pteprot, pdemask; 1383 u_int pteprot, pdemask;
1412 1384
1413 DPRINTF(PDB_FOLLOW|PDB_PMAP, 
1414 ("%s(%p, %lx, %lx, %x)\n", __func__, pmap, sva, eva, prot)); 
1415 
1416 sva = trunc_page(sva); 1385 sva = trunc_page(sva);
1417 pteprot = PTE_PROT(pmap_prot(pmap, prot)); 1386 pteprot = PTE_PROT(pmap_prot(pmap, prot));
1418 1387
1419 PMAP_LOCK(pmap); 1388 PMAP_LOCK(pmap);
1420 1389
1421 for (pdemask = 1; sva < eva; sva += PAGE_SIZE) { 1390 for (pdemask = 1; sva < eva; sva += PAGE_SIZE) {
1422 if (pdemask != (sva & PDE_MASK)) { 1391 if (pdemask != (sva & PDE_MASK)) {
1423 pdemask = sva & PDE_MASK; 1392 pdemask = sva & PDE_MASK;
1424 if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) { 1393 if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
1425 sva = pdemask + PDE_SIZE - PAGE_SIZE; 1394 sva = pdemask + PDE_SIZE - PAGE_SIZE;
1426 continue; 1395 continue;
1427 } 1396 }
1428 } 1397 }
1429 if ((pte = pmap_pte_get(pde, sva))) { 1398 if ((pte = pmap_pte_get(pde, sva))) {
1430 1399 UVMHIST_LOG(maphist, "va% #jx pte %#jx", sva, pte,
1431 DPRINTF(PDB_PMAP, 1400 0, 0);
1432 ("%s: va=0x%lx pte=0x%x\n", __func__, sva, pte)); 
1433 /* 1401 /*
1434 * Determine if mapping is changing. 1402 * Determine if mapping is changing.
1435 * If not, nothing to do. 1403 * If not, nothing to do.
1436 */ 1404 */
1437 if ((pte & PTE_PROT(TLB_AR_MASK)) == pteprot) 1405 if ((pte & PTE_PROT(TLB_AR_MASK)) == pteprot)
1438 continue; 1406 continue;
1439 1407
1440 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); 1408 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1441 if (pg != NULL) { 1409 if (pg != NULL) {
1442 struct vm_page_md * const md = 1410 struct vm_page_md * const md =
1443 VM_PAGE_TO_MD(pg); 1411 VM_PAGE_TO_MD(pg);
1444 1412
1445 md->pvh_attrs |= pmap_pvh_attrs(pte); 1413 md->pvh_attrs |= pmap_pvh_attrs(pte);
@@ -1449,31 +1417,32 @@ pmap_write_protect(pmap_t pmap, vaddr_t  @@ -1449,31 +1417,32 @@ pmap_write_protect(pmap_t pmap, vaddr_t
1449 pmap_pte_flush(pmap, sva, pte); 1417 pmap_pte_flush(pmap, sva, pte);
1450 pte &= ~PTE_PROT(TLB_AR_MASK); 1418 pte &= ~PTE_PROT(TLB_AR_MASK);
1451 pte |= pteprot; 1419 pte |= pteprot;
1452 pmap_pte_set(pde, sva, pte); 1420 pmap_pte_set(pde, sva, pte);
1453 } 1421 }
1454 } 1422 }
1455 1423
1456 PMAP_UNLOCK(pmap); 1424 PMAP_UNLOCK(pmap);
1457} 1425}
1458 1426
1459void 1427void
1460pmap_page_remove(struct vm_page *pg) 1428pmap_page_remove(struct vm_page *pg)
1461{ 1429{
 1430 UVMHIST_FUNC(__func__)
 1431 UVMHIST_CALLARGS(maphist, "pg %#jx", (uintptr_t)pg, 0, 0, 0);
 1432
1462 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1433 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1463 struct pv_entry *pve, *npve, **pvp; 1434 struct pv_entry *pve, *npve, **pvp;
1464 1435
1465 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg)); 
1466 
1467 if (md->pvh_list == NULL) { 1436 if (md->pvh_list == NULL) {
1468 KASSERT((md->pvh_attrs & PVF_EXEC) == 0); 1437 KASSERT((md->pvh_attrs & PVF_EXEC) == 0);
1469 return; 1438 return;
1470 } 1439 }
1471 1440
1472 pvp = &md->pvh_list; 1441 pvp = &md->pvh_list;
1473 for (pve = md->pvh_list; pve; pve = npve) { 1442 for (pve = md->pvh_list; pve; pve = npve) {
1474 pmap_t pmap = pve->pv_pmap; 1443 pmap_t pmap = pve->pv_pmap;
1475 vaddr_t va = pve->pv_va & PV_VAMASK; 1444 vaddr_t va = pve->pv_va & PV_VAMASK;
1476 volatile pt_entry_t *pde; 1445 volatile pt_entry_t *pde;
1477 pt_entry_t pte; 1446 pt_entry_t pte;
1478 1447
1479 PMAP_LOCK(pmap); 1448 PMAP_LOCK(pmap);
@@ -1496,154 +1465,158 @@ pmap_page_remove(struct vm_page *pg) @@ -1496,154 +1465,158 @@ pmap_page_remove(struct vm_page *pg)
1496 if (pte & PTE_PROT(TLB_WIRED)) 1465 if (pte & PTE_PROT(TLB_WIRED))
1497 pmap->pm_stats.wired_count--; 1466 pmap->pm_stats.wired_count--;
1498 pmap->pm_stats.resident_count--; 1467 pmap->pm_stats.resident_count--;
1499 1468
1500 if (!(pve->pv_va & PV_KENTER)) { 1469 if (!(pve->pv_va & PV_KENTER)) {
1501 pmap_pte_set(pde, va, 0); 1470 pmap_pte_set(pde, va, 0);
1502 pmap_pv_free(pve); 1471 pmap_pv_free(pve);
1503 } 1472 }
1504 PMAP_UNLOCK(pmap); 1473 PMAP_UNLOCK(pmap);
1505 } 1474 }
1506 md->pvh_attrs &= ~PVF_EXEC; 1475 md->pvh_attrs &= ~PVF_EXEC;
1507 *pvp = NULL; 1476 *pvp = NULL;
1508 1477
1509 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: leaving\n", __func__)); 1478 UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
1510} 1479}
1511 1480
1512/* 1481/*
1513 * Routine: pmap_unwire 1482 * Routine: pmap_unwire
1514 * Function: Change the wiring attribute for a map/virtual-address 1483 * Function: Change the wiring attribute for a map/virtual-address
1515 * pair. 1484 * pair.
1516 * In/out conditions: 1485 * In/out conditions:
1517 * The mapping must already exist in the pmap. 1486 * The mapping must already exist in the pmap.
1518 * 1487 *
1519 * Change the wiring for a given virtual page. This routine currently is 1488 * Change the wiring for a given virtual page. This routine currently is
1520 * only used to unwire pages and hence the mapping entry will exist. 1489 * only used to unwire pages and hence the mapping entry will exist.
1521 */ 1490 */
1522void 1491void
1523pmap_unwire(pmap_t pmap, vaddr_t va) 1492pmap_unwire(pmap_t pmap, vaddr_t va)
1524{ 1493{
 1494 UVMHIST_FUNC(__func__);
 1495 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pmap, va, 0, 0);
 1496
1525 volatile pt_entry_t *pde; 1497 volatile pt_entry_t *pde;
1526 pt_entry_t pte = 0; 1498 pt_entry_t pte = 0;
1527 1499
1528 DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p, 0x%lx)\n", __func__, pmap, va)); 
1529 
1530 PMAP_LOCK(pmap); 1500 PMAP_LOCK(pmap);
1531 if ((pde = pmap_pde_get(pmap->pm_pdir, va))) { 1501 if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1532 pte = pmap_pte_get(pde, va); 1502 pte = pmap_pte_get(pde, va);
1533 1503
1534 KASSERT(pte); 1504 KASSERT(pte);
1535 1505
1536 if (pte & PTE_PROT(TLB_WIRED)) { 1506 if (pte & PTE_PROT(TLB_WIRED)) {
1537 pte &= ~PTE_PROT(TLB_WIRED); 1507 pte &= ~PTE_PROT(TLB_WIRED);
1538 pmap->pm_stats.wired_count--; 1508 pmap->pm_stats.wired_count--;
1539 pmap_pte_set(pde, va, pte); 1509 pmap_pte_set(pde, va, pte);
1540 } 1510 }
1541 } 1511 }
1542 PMAP_UNLOCK(pmap); 1512 PMAP_UNLOCK(pmap);
1543 1513
1544 DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: leaving\n", __func__)); 1514 UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
1545} 1515}
1546 1516
1547bool 1517bool
1548pmap_changebit(struct vm_page *pg, u_int set, u_int clear) 1518pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
1549{ 1519{
 1520 UVMHIST_FUNC(__func__);
 1521 UVMHIST_CALLARGS(maphist, "pg %#jx (md %#jx) set %#jx clear %#jx",
 1522 (uintptr_t)pg, (uintptr_t)VM_PAGE_TO_MD(pg), set, clear);
 1523
1550 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1524 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1551 struct pv_entry *pve; 1525 struct pv_entry *pve;
1552 int res; 1526 int res;
1553 1527
1554 DPRINTF(PDB_FOLLOW|PDB_BITS, 
1555 ("%s(%p, %x, %x)\n", __func__, pg, set, clear)); 
1556 
1557 KASSERT((set & clear) == 0); 1528 KASSERT((set & clear) == 0);
1558 KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0); 1529 KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0);
1559 KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0); 1530 KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0);
1560 1531
1561 /* preserve other bits */ 1532 /* preserve other bits */
1562 res = md->pvh_attrs & (set | clear); 1533 res = md->pvh_attrs & (set | clear);
1563 md->pvh_attrs ^= res; 1534 md->pvh_attrs ^= res;
1564 1535
1565 for (pve = md->pvh_list; pve; pve = pve->pv_next) { 1536 for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1566 pmap_t pmap = pve->pv_pmap; 1537 pmap_t pmap = pve->pv_pmap;
1567 vaddr_t va = pve->pv_va & PV_VAMASK; 1538 vaddr_t va = pve->pv_va & PV_VAMASK;
1568 volatile pt_entry_t *pde; 1539 volatile pt_entry_t *pde;
1569 pt_entry_t opte, pte; 1540 pt_entry_t opte, pte;
1570 1541
1571 if ((pde = pmap_pde_get(pmap->pm_pdir, va))) { 1542 if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1572 opte = pte = pmap_pte_get(pde, va); 1543 opte = pte = pmap_pte_get(pde, va);
1573#ifdef PMAPDEBUG 1544#ifdef DEBUG
1574 if (!pte) { 1545 if (!pte) {
1575 DPRINTF(PDB_FOLLOW|PDB_BITS, 1546 UVMHIST_LOG(maphist, "zero pte for %#jx",
1576 ("%s: zero pte for 0x%lx\n", __func__, 1547 va, 0, 0, 0);
1577 va)); 
1578 continue; 1548 continue;
1579 } 1549 }
1580#endif 1550#endif
1581 pte &= ~clear; 1551 pte &= ~clear;
1582 pte |= set; 1552 pte |= set;
1583 1553
1584 if (!(pve->pv_va & PV_KENTER)) { 1554 if (!(pve->pv_va & PV_KENTER)) {
1585 md->pvh_attrs |= pmap_pvh_attrs(pte); 1555 md->pvh_attrs |= pmap_pvh_attrs(pte);
1586 res |= pmap_pvh_attrs(opte); 1556 res |= pmap_pvh_attrs(opte);
1587 } 1557 }
1588 1558
1589 if (opte != pte) { 1559 if (opte != pte) {
1590 pmap_pte_flush(pmap, va, opte); 1560 pmap_pte_flush(pmap, va, opte);
1591 pmap_pte_set(pde, va, pte); 1561 pmap_pte_set(pde, va, pte);
1592 } 1562 }
1593 } 1563 }
1594 } 1564 }
1595 1565
1596 return ((res & (clear | set)) != 0); 1566 return ((res & (clear | set)) != 0);
1597} 1567}
1598 1568
1599bool 1569bool
1600pmap_testbit(struct vm_page *pg, u_int bit) 1570pmap_testbit(struct vm_page *pg, u_int bit)
1601{ 1571{
 1572 UVMHIST_FUNC(__func__);
 1573 UVMHIST_CALLARGS(maphist, "pg %#jx (md %#jx) bit %#jx",
 1574 (uintptr_t)pg, (uintptr_t)VM_PAGE_TO_MD(pg), bit, 0);
 1575
1602 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1576 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1603 struct pv_entry *pve; 1577 struct pv_entry *pve;
1604 pt_entry_t pte; 1578 pt_entry_t pte;
1605 int ret; 1579 int ret;
1606 1580
1607 DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x)\n", __func__, pg, bit)); 
1608 
1609 for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve; 1581 for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve;
1610 pve = pve->pv_next) { 1582 pve = pve->pv_next) {
1611 pmap_t pm = pve->pv_pmap; 1583 pmap_t pm = pve->pv_pmap;
1612 1584
1613 pte = pmap_vp_find(pm, pve->pv_va & PV_VAMASK); 1585 pte = pmap_vp_find(pm, pve->pv_va & PV_VAMASK);
1614 if (pve->pv_va & PV_KENTER) 1586 if (pve->pv_va & PV_KENTER)
1615 continue; 1587 continue;
1616 1588
1617 md->pvh_attrs |= pmap_pvh_attrs(pte); 1589 md->pvh_attrs |= pmap_pvh_attrs(pte);
1618 } 1590 }
1619 ret = ((md->pvh_attrs & bit) != 0); 1591 ret = ((md->pvh_attrs & bit) != 0);
1620 1592
1621 return ret; 1593 return ret;
1622} 1594}
1623 1595
1624/* 1596/*
1625 * pmap_extract(pmap, va, pap) 1597 * pmap_extract(pmap, va, pap)
1626 * fills in the physical address corresponding to the 1598 * fills in the physical address corresponding to the
1627 * virtual address specified by pmap and va into the 1599 * virtual address specified by pmap and va into the
1628 * storage pointed to by pap and returns true if the 1600 * storage pointed to by pap and returns true if the
1629 * virtual address is mapped. returns false in not mapped. 1601 * virtual address is mapped. returns false in not mapped.
1630 */ 1602 */
1631bool 1603bool
1632pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) 1604pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1633{ 1605{
1634 pt_entry_t pte; 1606 UVMHIST_FUNC(__func__);
 1607 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pmap, va, 0, 0);
1635 1608
1636 DPRINTF(PDB_FOLLOW|PDB_EXTRACT, ("%s(%p, %lx)\n", __func__, pmap, va)); 1609 pt_entry_t pte;
1637 1610
1638 PMAP_LOCK(pmap); 1611 PMAP_LOCK(pmap);
1639 pte = pmap_vp_find(pmap, va); 1612 pte = pmap_vp_find(pmap, va);
1640 PMAP_UNLOCK(pmap); 1613 PMAP_UNLOCK(pmap);
1641 1614
1642 if (pte) { 1615 if (pte) {
1643 if (pap) 1616 if (pap)
1644 *pap = (pte & ~PGOFSET) | (va & PGOFSET); 1617 *pap = (pte & ~PGOFSET) | (va & PGOFSET);
1645 return true; 1618 return true;
1646 } 1619 }
1647 1620
1648 return false; 1621 return false;
1649} 1622}
@@ -1672,31 +1645,33 @@ pmap_activate(struct lwp *l) @@ -1672,31 +1645,33 @@ pmap_activate(struct lwp *l)
1672 1645
1673void 1646void
1674pmap_procwr(struct proc *p, vaddr_t va, size_t len) 1647pmap_procwr(struct proc *p, vaddr_t va, size_t len)
1675{ 1648{
1676 pmap_t pmap = p->p_vmspace->vm_map.pmap; 1649 pmap_t pmap = p->p_vmspace->vm_map.pmap;
1677 1650
1678 fdcache(pmap->pm_space, va, len); 1651 fdcache(pmap->pm_space, va, len);
1679 ficache(pmap->pm_space, va, len); 1652 ficache(pmap->pm_space, va, len);
1680} 1653}
1681 1654
1682static inline void 1655static inline void
1683pmap_flush_page(struct vm_page *pg, bool purge) 1656pmap_flush_page(struct vm_page *pg, bool purge)
1684{ 1657{
 1658 UVMHIST_FUNC(__func__);
 1659 UVMHIST_CALLARGS(maphist, "pg %#jx (md %#jx) purge %jd",
 1660 (uintptr_t)pg, (uintptr_t)VM_PAGE_TO_MD(pg), purge, 0);
 1661
1685 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1662 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1686 struct pv_entry *pve; 1663 struct pv_entry *pve;
1687 1664
1688 DPRINTF(PDB_FOLLOW|PDB_CACHE, ("%s(%p, %d)\n", __func__, pg, purge)); 
1689 
1690 /* purge cache for all possible mappings for the pa */ 1665 /* purge cache for all possible mappings for the pa */
1691 for (pve = md->pvh_list; pve; pve = pve->pv_next) { 1666 for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1692 vaddr_t va = pve->pv_va & PV_VAMASK; 1667 vaddr_t va = pve->pv_va & PV_VAMASK;
1693 pa_space_t sp = pve->pv_pmap->pm_space; 1668 pa_space_t sp = pve->pv_pmap->pm_space;
1694 1669
1695 if (purge) 1670 if (purge)
1696 pdcache(sp, va, PAGE_SIZE); 1671 pdcache(sp, va, PAGE_SIZE);
1697 else 1672 else
1698 fdcache(sp, va, PAGE_SIZE); 1673 fdcache(sp, va, PAGE_SIZE);
1699#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ 1674#if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1700 defined(HP8500_CPU) || defined(HP8600_CPU) 1675 defined(HP8500_CPU) || defined(HP8600_CPU)
1701 ficache(sp, va, PAGE_SIZE); 1676 ficache(sp, va, PAGE_SIZE);
1702 pdtlb(sp, va); 1677 pdtlb(sp, va);
@@ -1704,205 +1679,190 @@ pmap_flush_page(struct vm_page *pg, bool @@ -1704,205 +1679,190 @@ pmap_flush_page(struct vm_page *pg, bool
1704#endif 1679#endif
1705 } 1680 }
1706} 1681}
1707 1682
1708/* 1683/*
1709 * pmap_zero_page(pa) 1684 * pmap_zero_page(pa)
1710 * 1685 *
1711 * Zeros the specified page. 1686 * Zeros the specified page.
1712 */ 1687 */
1713void 1688void
1714pmap_zero_page(paddr_t pa) 1689pmap_zero_page(paddr_t pa)
1715{ 1690{
1716 1691
1717 DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx)\n", __func__, pa)); 1692 UVMHIST_FUNC(__func__);
 1693 UVMHIST_CALLARGS(maphist, "pa %#jx (pg %#jx)", pa,
 1694 (uintptr_t)PHYS_TO_VM_PAGE(pa), 0, 0);
1718 1695
1719 KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_list == NULL); 1696 KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_list == NULL);
1720 KASSERT((VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_attrs & PVF_EXEC) == 0); 1697 KASSERT((VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_attrs & PVF_EXEC) == 0);
1721 1698
1722 memset((void *)pa, 0, PAGE_SIZE); 1699 memset((void *)pa, 0, PAGE_SIZE);
1723 fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE); 1700 fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1724 1701
1725#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ 1702#if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1726 defined(HP8500_CPU) || defined(HP8600_CPU) 1703 defined(HP8500_CPU) || defined(HP8600_CPU)
1727 ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE); 1704 ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1728 pdtlb(HPPA_SID_KERNEL, pa); 1705 pdtlb(HPPA_SID_KERNEL, pa);
1729 pitlb(HPPA_SID_KERNEL, pa); 1706 pitlb(HPPA_SID_KERNEL, pa);
1730#endif 1707#endif
1731} 1708}
1732 1709
1733/* 1710/*
1734 * pmap_copy_page(src, dst) 1711 * pmap_copy_page(src, dst)
1735 * 1712 *
1736 * pmap_copy_page copies the source page to the destination page. 1713 * pmap_copy_page copies the source page to the destination page.
1737 */ 1714 */
1738void 1715void
1739pmap_copy_page(paddr_t spa, paddr_t dpa) 1716pmap_copy_page(paddr_t spa, paddr_t dpa)
1740{ 1717{
1741 struct vm_page *srcpg = PHYS_TO_VM_PAGE(spa); 1718 UVMHIST_FUNC(__func__);
 1719 UVMHIST_CALLARGS(maphist, "spa %#jx (pg %#jx) dpa %#jx (pg %#jx)",
 1720 spa, (uintptr_t)PHYS_TO_VM_PAGE(spa),
 1721 dpa, (uintptr_t)PHYS_TO_VM_PAGE(dpa));
1742 1722
1743 DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx, %lx)\n", __func__, spa, dpa)); 1723 struct vm_page *srcpg = PHYS_TO_VM_PAGE(spa);
1744 1724
1745 KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_list == NULL); 1725 KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_list == NULL);
1746 KASSERT((VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_attrs & PVF_EXEC) == 0); 1726 KASSERT((VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_attrs & PVF_EXEC) == 0);
1747 1727
1748 pmap_flush_page(srcpg, false); 1728 pmap_flush_page(srcpg, false);
1749 1729
1750 memcpy((void *)dpa, (void *)spa, PAGE_SIZE); 1730 memcpy((void *)dpa, (void *)spa, PAGE_SIZE);
1751 1731
1752 pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE); 1732 pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1753 fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); 1733 fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1754#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ 1734#if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1755 defined(HP8500_CPU) || defined(HP8600_CPU) 1735 defined(HP8500_CPU) || defined(HP8600_CPU)
1756 ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE); 1736 ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1757 ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); 1737 ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1758 pdtlb(HPPA_SID_KERNEL, spa); 1738 pdtlb(HPPA_SID_KERNEL, spa);
1759 pdtlb(HPPA_SID_KERNEL, dpa); 1739 pdtlb(HPPA_SID_KERNEL, dpa);
1760 pitlb(HPPA_SID_KERNEL, spa); 1740 pitlb(HPPA_SID_KERNEL, spa);
1761 pitlb(HPPA_SID_KERNEL, dpa); 1741 pitlb(HPPA_SID_KERNEL, dpa);
1762#endif 1742#endif
1763} 1743}
1764 1744
1765void 1745void
1766pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1746pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1767{ 1747{
 1748 UVMHIST_FUNC(__func__);
 1749 if (va != 0) {
 1750 UVMHIST_CALLARGS(maphist, "va %#jx pa %#jx prot %#jx flags %#jx",
 1751 va, pa, prot, flags);
 1752 }
 1753
1768 volatile pt_entry_t *pde; 1754 volatile pt_entry_t *pde;
1769 pt_entry_t pte, opte; 1755 pt_entry_t pte, opte;
1770 struct vm_page *pg; 1756 struct vm_page *pg;
1771 1757
1772#ifdef PMAPDEBUG 
1773 int opmapdebug = pmapdebug; 
1774 
1775 /* 
1776 * If we're being told to map page zero, we can't call printf() at all, 
1777 * because doing so would lead to an infinite recursion on this call. 
1778 * (printf requires page zero to be mapped). 
1779 */ 
1780 if (va == 0) 
1781 pmapdebug = 0; 
1782#endif /* PMAPDEBUG */ 
1783 
1784 DPRINTF(PDB_FOLLOW|PDB_ENTER, 
1785 ("%s(%lx, %lx, %x)\n", __func__, va, pa, prot)); 
1786 
1787 if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va)) && 1758 if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va)) &&
1788 !(pde = pmap_pde_alloc(pmap_kernel(), va, NULL))) 1759 !(pde = pmap_pde_alloc(pmap_kernel(), va, NULL)))
1789 panic("pmap_kenter_pa: cannot allocate pde for va=0x%lx", va); 1760 panic("pmap_kenter_pa: cannot allocate pde for va=0x%lx", va);
1790 opte = pmap_pte_get(pde, va); 1761 opte = pmap_pte_get(pde, va);
1791 pte = pa | PTE_PROT(TLB_WIRED | TLB_REFTRAP | 1762 pte = pa | PTE_PROT(TLB_WIRED | TLB_REFTRAP |
1792 pmap_prot(pmap_kernel(), prot & VM_PROT_ALL)); 1763 pmap_prot(pmap_kernel(), prot & VM_PROT_ALL));
1793 if (IS_IOPAGE_P(pa) || (flags & PMAP_NOCACHE)) 1764 if (IS_IOPAGE_P(pa) || (flags & PMAP_NOCACHE))
1794 pte |= PTE_PROT(TLB_UNCACHEABLE); 1765 pte |= PTE_PROT(TLB_UNCACHEABLE);
1795 pmap_kernel()->pm_stats.wired_count++; 1766 pmap_kernel()->pm_stats.wired_count++;
1796 pmap_kernel()->pm_stats.resident_count++; 1767 pmap_kernel()->pm_stats.resident_count++;
1797 if (opte) 1768 if (opte)
1798 pmap_pte_flush(pmap_kernel(), va, opte); 1769 pmap_pte_flush(pmap_kernel(), va, opte);
1799 1770
1800 pg = pmap_initialized ? PHYS_TO_VM_PAGE(PTE_PAGE(pte)) : NULL; 1771 pg = pmap_initialized ? PHYS_TO_VM_PAGE(PTE_PAGE(pte)) : NULL;
1801 if (pg != NULL) { 1772 if (pg != NULL) {
1802 KASSERT(pa < HPPA_IOBEGIN); 1773 KASSERT(pa < HPPA_IOBEGIN);
1803 1774
1804 struct pv_entry *pve; 1775 struct pv_entry *pve;
1805 1776
1806 pve = pmap_pv_alloc(); 1777 pve = pmap_pv_alloc();
1807 if (!pve) 1778 if (!pve)
1808 panic("%s: no pv entries available", __func__); 1779 panic("%s: no pv entries available", __func__);
1809 DPRINTF(PDB_FOLLOW|PDB_ENTER, ("%s(%lx, %lx, %x) TLB_KENTER\n", 1780 UVMHIST_LOG(maphist, "va %#jx pa %#jx pte %#jx TLB_KENTER",
1810 __func__, va, pa, pte)); 1781 va, pa, pte, 0);
1811 1782
1812 if (pmap_check_alias(pg, va, pte)) 1783 if (pmap_check_alias(pg, va, pte))
1813 pmap_page_remove(pg); 1784 pmap_page_remove(pg);
1814 1785
1815 pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, PV_KENTER); 1786 pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, PV_KENTER);
1816 } 1787 }
1817 pmap_pte_set(pde, va, pte); 1788 pmap_pte_set(pde, va, pte);
1818 1789
1819 DPRINTF(PDB_FOLLOW|PDB_ENTER, ("%s: leaving\n", __func__)); 1790 if (va != 0) {
 1791 UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
 1792 }
1820 1793
1821#ifdef PMAPDEBUG 
1822 pmapdebug = opmapdebug; 
1823#endif /* PMAPDEBUG */ 
1824} 1794}
1825 1795
1826void 1796void
1827pmap_kremove(vaddr_t va, vsize_t size) 1797pmap_kremove(vaddr_t va, vsize_t size)
1828{ 1798{
 1799 UVMHIST_FUNC(__func__);
 1800 bool pzero = false;
 1801 if (va != 0) {
 1802 UVMHIST_CALLARGS(maphist, "va %#jx...%#jx", va, va + size, 0,
 1803 0);
 1804 pzero = true;
 1805 }
 1806
1829 struct pv_entry *pve; 1807 struct pv_entry *pve;
1830 vaddr_t eva, pdemask; 1808 vaddr_t eva, pdemask;
1831 volatile pt_entry_t *pde = NULL; 1809 volatile pt_entry_t *pde = NULL;
1832 pt_entry_t pte; 1810 pt_entry_t pte;
1833 struct vm_page *pg; 1811 struct vm_page *pg;
1834 pmap_t pmap = pmap_kernel(); 1812 pmap_t pmap = pmap_kernel();
1835#ifdef PMAPDEBUG 
1836 int opmapdebug = pmapdebug; 
1837 
1838 /* 
1839 * If we're being told to unmap page zero, we can't call printf() at 
1840 * all as printf requires page zero to be mapped. 
1841 */ 
1842 if (va == 0) 
1843 pmapdebug = 0; 
1844#endif /* PMAPDEBUG */ 
1845 
1846 DPRINTF(PDB_FOLLOW|PDB_REMOVE, 
1847 ("%s(%lx, %lx)\n", __func__, va, size)); 
1848#ifdef PMAPDEBUG 
1849 1813
 1814#ifdef DEBUG
1850 /* 1815 /*
1851 * Don't allow the VA == PA mappings, apart from page zero, to be 1816 * Don't allow the VA == PA mappings, apart from page zero, to be
1852 * removed. Page zero is given special treatment so that we get TLB 1817 * removed. Page zero is given special treatment so that we get TLB
1853 * faults when the kernel tries to de-reference NULL or anything else 1818 * faults when the kernel tries to de-reference NULL or anything else
1854 * in the first page when it shouldn't. 1819 * in the first page when it shouldn't.
1855 */ 1820 */
1856 if (va != 0 && va < ptoa(physmem)) { 1821 if (va != 0 && va < ptoa(physmem)) {
1857 DPRINTF(PDB_FOLLOW|PDB_REMOVE, 1822 UVMHIST_LOG(maphist, "va %#jx size %#jx: unmapping physmem", va,
1858 ("%s(%lx, %lx): unmapping physmem\n", __func__, va, 1823 size, 0, 0);
1859 size)); 
1860 pmapdebug = opmapdebug; 
1861 return; 1824 return;
1862 } 1825 }
1863#endif 1826#endif
1864 1827
1865 for (pdemask = 1, eva = va + size; va < eva; va += PAGE_SIZE) { 1828 for (pdemask = 1, eva = va + size; va < eva; va += PAGE_SIZE) {
1866 if (pdemask != (va & PDE_MASK)) { 1829 if (pdemask != (va & PDE_MASK)) {
1867 pdemask = va & PDE_MASK; 1830 pdemask = va & PDE_MASK;
1868 if (!(pde = pmap_pde_get(pmap->pm_pdir, va))) { 1831 if (!(pde = pmap_pde_get(pmap->pm_pdir, va))) {
1869 va = pdemask + PDE_SIZE - PAGE_SIZE; 1832 va = pdemask + PDE_SIZE - PAGE_SIZE;
1870 continue; 1833 continue;
1871 } 1834 }
1872 } 1835 }
1873 if (!(pte = pmap_pte_get(pde, va))) { 1836 if (!(pte = pmap_pte_get(pde, va))) {
1874 DPRINTF(PDB_FOLLOW|PDB_REMOVE, 1837 UVMHIST_LOG(maphist, "unmapping unmapped %#jx",
1875 ("%s: unmapping unmapped 0x%lx\n", __func__, 1838 va, 0, 0, 0);
1876 va)); 
1877 continue; 1839 continue;
1878 } 1840 }
1879 1841
1880 pmap_pte_flush(pmap, va, pte); 1842 pmap_pte_flush(pmap, va, pte);
1881 pmap_pte_set(pde, va, 0); 1843 pmap_pte_set(pde, va, 0);
1882 1844
1883 pg = pmap_initialized ? PHYS_TO_VM_PAGE(PTE_PAGE(pte)) : NULL; 1845 pg = pmap_initialized ? PHYS_TO_VM_PAGE(PTE_PAGE(pte)) : NULL;
1884 if (pg != NULL) { 1846 if (pg != NULL) {
1885 pve = pmap_pv_remove(pg, pmap, va); 1847 pve = pmap_pv_remove(pg, pmap, va);
1886 1848
1887 if (pve != NULL) 1849 if (pve != NULL)
1888 pmap_pv_free(pve); 1850 pmap_pv_free(pve);
1889 } 1851 }
1890 } 1852 }
1891 DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__)); 1853 if (pzero) {
1892 1854 UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
1893#ifdef PMAPDEBUG 1855 }
1894 pmapdebug = opmapdebug; 
1895#endif /* PMAPDEBUG */ 
1896} 1856}
1897 1857
1898#if defined(USE_HPT) 1858#if defined(USE_HPT)
1899#if defined(DDB) 1859#if defined(DDB)
1900/* 1860/*
1901 * prints whole va->pa (aka HPT or HVT) 1861 * prints whole va->pa (aka HPT or HVT)
1902 */ 1862 */
1903void 1863void
1904pmap_hptdump(void) 1864pmap_hptdump(void)
1905{ 1865{
1906 struct hpt_entry *hpt, *ehpt; 1866 struct hpt_entry *hpt, *ehpt;
1907 1867
1908 hpt = (struct hpt_entry *)pmap_hpt; 1868 hpt = (struct hpt_entry *)pmap_hpt;