Sun May 30 19:50:23 2021 UTC ()
Fix DEBUG build.


(thorpej)
diff -r1.291 -r1.292 src/sys/arch/alpha/alpha/pmap.c

cvs diff -r1.291 -r1.292 src/sys/arch/alpha/alpha/pmap.c (expand / switch to unified diff)

--- src/sys/arch/alpha/alpha/pmap.c 2021/05/30 19:46:21 1.291
+++ src/sys/arch/alpha/alpha/pmap.c 2021/05/30 19:50:23 1.292
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.291 2021/05/30 19:46:21 thorpej Exp $ */ 1/* $NetBSD: pmap.c,v 1.292 2021/05/30 19:50:23 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, 10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius,
11 * and by Chris G. Demetriou. 11 * and by Chris G. Demetriou.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -125,27 +125,27 @@ @@ -125,27 +125,27 @@
125 * this module may delay invalidate or reduced protection 125 * this module may delay invalidate or reduced protection
126 * operations until such time as they are actually 126 * operations until such time as they are actually
127 * necessary. This module is given full information as 127 * necessary. This module is given full information as
128 * to which processors are currently using which maps, 128 * to which processors are currently using which maps,
129 * and to when physical maps must be made correct. 129 * and to when physical maps must be made correct.
130 */ 130 */
131 131
132#include "opt_lockdebug.h" 132#include "opt_lockdebug.h"
133#include "opt_sysv.h" 133#include "opt_sysv.h"
134#include "opt_multiprocessor.h" 134#include "opt_multiprocessor.h"
135 135
136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
137 137
138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.291 2021/05/30 19:46:21 thorpej Exp $"); 138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.292 2021/05/30 19:50:23 thorpej Exp $");
139 139
140#include <sys/param.h> 140#include <sys/param.h>
141#include <sys/systm.h> 141#include <sys/systm.h>
142#include <sys/kernel.h> 142#include <sys/kernel.h>
143#include <sys/proc.h> 143#include <sys/proc.h>
144#include <sys/malloc.h> 144#include <sys/malloc.h>
145#include <sys/pool.h> 145#include <sys/pool.h>
146#include <sys/buf.h> 146#include <sys/buf.h>
147#include <sys/evcnt.h> 147#include <sys/evcnt.h>
148#include <sys/atomic.h> 148#include <sys/atomic.h>
149#include <sys/cpu.h> 149#include <sys/cpu.h>
150 150
151#include <uvm/uvm.h> 151#include <uvm/uvm.h>
@@ -1923,29 +1923,26 @@ pmap_remove_all(pmap_t pmap) @@ -1923,29 +1923,26 @@ pmap_remove_all(pmap_t pmap)
1923 * 1923 *
1924 * Lower the permission for all mappings to a given page to 1924 * Lower the permission for all mappings to a given page to
1925 * the permissions specified. 1925 * the permissions specified.
1926 */ 1926 */
1927void 1927void
1928pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1928pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1929{ 1929{
1930 pv_entry_t pv, nextpv; 1930 pv_entry_t pv, nextpv;
1931 pt_entry_t opte; 1931 pt_entry_t opte;
1932 kmutex_t *lock; 1932 kmutex_t *lock;
1933 struct pmap_tlb_context tlbctx; 1933 struct pmap_tlb_context tlbctx;
1934 1934
1935#ifdef DEBUG 1935#ifdef DEBUG
1936 paddr_t pa = VM_PAGE_TO_PHYS(pg); 
1937 
1938 
1939 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 1936 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
1940 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) 1937 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
1941 printf("pmap_page_protect(%p, %x)\n", pg, prot); 1938 printf("pmap_page_protect(%p, %x)\n", pg, prot);
1942#endif 1939#endif
1943 1940
1944 pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); 1941 pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV);
1945 1942
1946 switch (prot) { 1943 switch (prot) {
1947 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 1944 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
1948 case VM_PROT_READ|VM_PROT_WRITE: 1945 case VM_PROT_READ|VM_PROT_WRITE:
1949 return; 1946 return;
1950 1947
1951 /* copy_on_write */ 1948 /* copy_on_write */
@@ -2566,27 +2563,27 @@ pmap_extract(pmap_t pmap, vaddr_t va, pa @@ -2566,27 +2563,27 @@ pmap_extract(pmap_t pmap, vaddr_t va, pa
2566#ifdef DEBUG 2563#ifdef DEBUG
2567 if (pmapdebug & PDB_FOLLOW) 2564 if (pmapdebug & PDB_FOLLOW)
2568 printf("pmap_extract(%p, %lx) -> ", pmap, va); 2565 printf("pmap_extract(%p, %lx) -> ", pmap, va);
2569#endif 2566#endif
2570 2567
2571 /* 2568 /*
2572 * Take a faster path for the kernel pmap. Avoids locking, 2569 * Take a faster path for the kernel pmap. Avoids locking,
2573 * handles K0SEG. 2570 * handles K0SEG.
2574 */ 2571 */
2575 if (__predict_true(pmap == pmap_kernel())) { 2572 if (__predict_true(pmap == pmap_kernel())) {
2576 if (__predict_true(vtophys_internal(va, pap))) { 2573 if (__predict_true(vtophys_internal(va, pap))) {
2577#ifdef DEBUG 2574#ifdef DEBUG
2578 if (pmapdebug & PDB_FOLLOW) 2575 if (pmapdebug & PDB_FOLLOW)
2579 printf("0x%lx (kernel vtophys)\n", pa); 2576 printf("0x%lx (kernel vtophys)\n", *pap);
2580#endif 2577#endif
2581 return true; 2578 return true;
2582 } 2579 }
2583#ifdef DEBUG 2580#ifdef DEBUG
2584 if (pmapdebug & PDB_FOLLOW) 2581 if (pmapdebug & PDB_FOLLOW)
2585 printf("failed (kernel vtophys)\n"); 2582 printf("failed (kernel vtophys)\n");
2586#endif 2583#endif
2587 return false; 2584 return false;
2588 } 2585 }
2589 2586
2590 pt_entry_t * const lev1map = pmap_lev1map(pmap); 2587 pt_entry_t * const lev1map = pmap_lev1map(pmap);
2591 2588
2592 PMAP_LOCK(pmap); 2589 PMAP_LOCK(pmap);
@@ -3268,27 +3265,27 @@ void @@ -3268,27 +3265,27 @@ void
3268pmap_pv_dump(paddr_t pa) 3265pmap_pv_dump(paddr_t pa)
3269{ 3266{
3270 struct vm_page *pg; 3267 struct vm_page *pg;
3271 struct vm_page_md *md; 3268 struct vm_page_md *md;
3272 pv_entry_t pv; 3269 pv_entry_t pv;
3273 kmutex_t *lock; 3270 kmutex_t *lock;
3274 3271
3275 pg = PHYS_TO_VM_PAGE(pa); 3272 pg = PHYS_TO_VM_PAGE(pa);
3276 md = VM_PAGE_TO_MD(pg); 3273 md = VM_PAGE_TO_MD(pg);
3277 3274
3278 lock = pmap_pvh_lock(pg); 3275 lock = pmap_pvh_lock(pg);
3279 mutex_enter(lock); 3276 mutex_enter(lock);
3280 3277
3281 printf("pa 0x%lx (attrs = 0x%x):\n", pa, md->pvh_listx & PGA_ATTRS); 3278 printf("pa 0x%lx (attrs = 0x%lx):\n", pa, md->pvh_listx & PGA_ATTRS);
3282 for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) 3279 for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next)
3283 printf(" pmap %p, va 0x%lx\n", 3280 printf(" pmap %p, va 0x%lx\n",
3284 pv->pv_pmap, pv->pv_va); 3281 pv->pv_pmap, pv->pv_va);
3285 printf("\n"); 3282 printf("\n");
3286 3283
3287 mutex_exit(lock); 3284 mutex_exit(lock);
3288} 3285}
3289#endif 3286#endif
3290 3287
3291/* 3288/*
3292 * vtophys: 3289 * vtophys:
3293 * 3290 *
3294 * Return the physical address corresponding to the K0SEG or 3291 * Return the physical address corresponding to the K0SEG or
@@ -3994,27 +3991,27 @@ pmap_asn_alloc(pmap_t const pmap, struct @@ -3994,27 +3991,27 @@ pmap_asn_alloc(pmap_t const pmap, struct
3994 * are not currently in use by this processor. 3991 * are not currently in use by this processor.
3995 * 3992 *
3996 * However... considering that we're using an unsigned 64-bit 3993 * However... considering that we're using an unsigned 64-bit
3997 * integer for generation numbers, on non-ASN CPUs, we won't 3994 * integer for generation numbers, on non-ASN CPUs, we won't
3998 * wrap for approximately 75 billion years on a 128-ASN CPU 3995 * wrap for approximately 75 billion years on a 128-ASN CPU
3999 * (assuming 1000 switch * operations per second). 3996 * (assuming 1000 switch * operations per second).
4000 * 3997 *
4001 * So, we don't bother. 3998 * So, we don't bother.
4002 */ 3999 */
4003 KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID); 4000 KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID);
4004#ifdef DEBUG 4001#ifdef DEBUG
4005 if (pmapdebug & PDB_ASN) 4002 if (pmapdebug & PDB_ASN)
4006 printf("pmap_asn_alloc: generation bumped to %lu\n", 4003 printf("pmap_asn_alloc: generation bumped to %lu\n",
4007 ci->ci_asn_ge); 4004 ci->ci_asn_gen);
4008#endif 4005#endif
4009 } 4006 }
4010 4007
4011 /* 4008 /*
4012 * Assign the new ASN and validate the generation number. 4009 * Assign the new ASN and validate the generation number.
4013 */ 4010 */
4014 pmc->pmc_asn = ci->ci_next_asn++; 4011 pmc->pmc_asn = ci->ci_next_asn++;
4015 pmc->pmc_asngen = ci->ci_asn_gen; 4012 pmc->pmc_asngen = ci->ci_asn_gen;
4016 TLB_COUNT(asn_assign); 4013 TLB_COUNT(asn_assign);
4017 4014
4018 /* 4015 /*
4019 * We have a new ASN, so we can skip any pending I-stream sync 4016 * We have a new ASN, so we can skip any pending I-stream sync
4020 * on the way back out to user space. 4017 * on the way back out to user space.