Fri Sep 11 03:54:14 2020 UTC ()
Fix shift tyop in a comment.


(simonb)
diff -r1.272 -r1.273 src/sys/arch/alpha/alpha/pmap.c

cvs diff -r1.272 -r1.273 src/sys/arch/alpha/alpha/pmap.c (expand / switch to unified diff)

--- src/sys/arch/alpha/alpha/pmap.c 2020/09/08 21:41:37 1.272
+++ src/sys/arch/alpha/alpha/pmap.c 2020/09/11 03:54:14 1.273
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.272 2020/09/08 21:41:37 riastradh Exp $ */ 1/* $NetBSD: pmap.c,v 1.273 2020/09/11 03:54:14 simonb Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, 10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius,
11 * and by Chris G. Demetriou. 11 * and by Chris G. Demetriou.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -125,27 +125,27 @@ @@ -125,27 +125,27 @@
125 * this module may delay invalidate or reduced protection 125 * this module may delay invalidate or reduced protection
126 * operations until such time as they are actually 126 * operations until such time as they are actually
127 * necessary. This module is given full information as 127 * necessary. This module is given full information as
128 * to which processors are currently using which maps, 128 * to which processors are currently using which maps,
129 * and to when physical maps must be made correct. 129 * and to when physical maps must be made correct.
130 */ 130 */
131 131
132#include "opt_lockdebug.h" 132#include "opt_lockdebug.h"
133#include "opt_sysv.h" 133#include "opt_sysv.h"
134#include "opt_multiprocessor.h" 134#include "opt_multiprocessor.h"
135 135
136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
137 137
138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.272 2020/09/08 21:41:37 riastradh Exp $"); 138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.273 2020/09/11 03:54:14 simonb Exp $");
139 139
140#include <sys/param.h> 140#include <sys/param.h>
141#include <sys/systm.h> 141#include <sys/systm.h>
142#include <sys/kernel.h> 142#include <sys/kernel.h>
143#include <sys/proc.h> 143#include <sys/proc.h>
144#include <sys/malloc.h> 144#include <sys/malloc.h>
145#include <sys/pool.h> 145#include <sys/pool.h>
146#include <sys/buf.h> 146#include <sys/buf.h>
147#include <sys/evcnt.h> 147#include <sys/evcnt.h>
148#include <sys/atomic.h> 148#include <sys/atomic.h>
149#include <sys/cpu.h> 149#include <sys/cpu.h>
150 150
151#include <uvm/uvm.h> 151#include <uvm/uvm.h>
@@ -429,27 +429,27 @@ pmap_activation_lock(pmap_t const pmap) @@ -429,27 +429,27 @@ pmap_activation_lock(pmap_t const pmap)
429#define pmap_all_cpus() cpus_running 429#define pmap_all_cpus() cpus_running
430#else 430#else
431#define pmap_all_cpus() ~0UL 431#define pmap_all_cpus() ~0UL
432#endif /* MULTIPROCESSOR */ 432#endif /* MULTIPROCESSOR */
433 433
434/* 434/*
435 * TLB management. 435 * TLB management.
436 * 436 *
437 * TLB invalidations need to be performed on local and remote CPUs 437 * TLB invalidations need to be performed on local and remote CPUs
438 * whenever parts of the PTE that the hardware or PALcode understands 438 * whenever parts of the PTE that the hardware or PALcode understands
439 * changes. In order amortize the cost of these operations, we will 439 * changes. In order amortize the cost of these operations, we will
440 * queue up to 8 addresses to invalidate in a batch. Any more than 440 * queue up to 8 addresses to invalidate in a batch. Any more than
441 * that, and we will hit the entire TLB. 441 * that, and we will hit the entire TLB.
442 8 442 *
443 * Some things that add complexity: 443 * Some things that add complexity:
444 * 444 *
445 * ==> ASNs. A CPU may have valid TLB entries for other than the current 445 * ==> ASNs. A CPU may have valid TLB entries for other than the current
446 * address spaace. We can only invalidate TLB entries for the current 446 * address spaace. We can only invalidate TLB entries for the current
447 * address space, so when asked to invalidate a VA for the non-current 447 * address space, so when asked to invalidate a VA for the non-current
448 * pmap on a given CPU, we simply invalidate the ASN for that pmap,CPU 448 * pmap on a given CPU, we simply invalidate the ASN for that pmap,CPU
449 * tuple so that new one is allocated on the next activation on that 449 * tuple so that new one is allocated on the next activation on that
450 * CPU. N.B. that for CPUs that don't implement ASNs, SWPCTX does all 450 * CPU. N.B. that for CPUs that don't implement ASNs, SWPCTX does all
451 * the work necessary, so we can skip some work in the pmap module 451 * the work necessary, so we can skip some work in the pmap module
452 * itself. 452 * itself.
453 * 453 *
454 * When a pmap is activated on a given CPU, we set a corresponding 454 * When a pmap is activated on a given CPU, we set a corresponding
455 * bit in pmap::pm_cpus, indicating that it potentially has valid 455 * bit in pmap::pm_cpus, indicating that it potentially has valid