Sun Mar 8 15:01:50 2020 UTC ()
Don't zap the non-pdpolicy bits in pg->pqflags.


(ad)
diff -r1.33 -r1.34 src/sys/uvm/uvm_pdpolicy_clock.c

cvs diff -r1.33 -r1.34 src/sys/uvm/uvm_pdpolicy_clock.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_pdpolicy_clock.c 2020/02/23 15:46:43 1.33
+++ src/sys/uvm/uvm_pdpolicy_clock.c 2020/03/08 15:01:50 1.34
@@ -1,18 +1,18 @@ @@ -1,18 +1,18 @@
1/* $NetBSD: uvm_pdpolicy_clock.c,v 1.33 2020/02/23 15:46:43 ad Exp $ */ 1/* $NetBSD: uvm_pdpolicy_clock.c,v 1.34 2020/03/08 15:01:50 ad Exp $ */
2/* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */ 2/* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
3 3
4/*- 4/*-
5 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Andrew Doran. 9 * by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
@@ -88,27 +88,27 @@ @@ -88,27 +88,27 @@
88 * Pittsburgh PA 15213-3890 88 * Pittsburgh PA 15213-3890
89 * 89 *
90 * any improvements or extensions that they make and grant Carnegie the 90 * any improvements or extensions that they make and grant Carnegie the
91 * rights to redistribute these changes. 91 * rights to redistribute these changes.
92 */ 92 */
93 93
94#if defined(PDSIM) 94#if defined(PDSIM)
95 95
96#include "pdsim.h" 96#include "pdsim.h"
97 97
98#else /* defined(PDSIM) */ 98#else /* defined(PDSIM) */
99 99
100#include <sys/cdefs.h> 100#include <sys/cdefs.h>
101__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.33 2020/02/23 15:46:43 ad Exp $"); 101__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.34 2020/03/08 15:01:50 ad Exp $");
102 102
103#include <sys/param.h> 103#include <sys/param.h>
104#include <sys/proc.h> 104#include <sys/proc.h>
105#include <sys/systm.h> 105#include <sys/systm.h>
106#include <sys/kernel.h> 106#include <sys/kernel.h>
107#include <sys/kmem.h> 107#include <sys/kmem.h>
108#include <sys/atomic.h> 108#include <sys/atomic.h>
109 109
110#include <uvm/uvm.h> 110#include <uvm/uvm.h>
111#include <uvm/uvm_pdpolicy.h> 111#include <uvm/uvm_pdpolicy.h>
112#include <uvm/uvm_pdpolicy_impl.h> 112#include <uvm/uvm_pdpolicy_impl.h>
113#include <uvm/uvm_stat.h> 113#include <uvm/uvm_stat.h>
114 114
@@ -445,27 +445,28 @@ uvmpdpol_pagedeactivate_locked(struct vm @@ -445,27 +445,28 @@ uvmpdpol_pagedeactivate_locked(struct vm
445 KASSERT((pg->pqflags & (PQ_INTENT_MASK | PQ_INTENT_SET)) != 445 KASSERT((pg->pqflags & (PQ_INTENT_MASK | PQ_INTENT_SET)) !=
446 (PQ_INTENT_D | PQ_INTENT_SET)); 446 (PQ_INTENT_D | PQ_INTENT_SET));
447 447
448 if (pg->pqflags & PQ_ACTIVE) { 448 if (pg->pqflags & PQ_ACTIVE) {
449 TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue); 449 TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue);
450 KASSERT(pdpol_state.s_active > 0); 450 KASSERT(pdpol_state.s_active > 0);
451 pdpol_state.s_active--; 451 pdpol_state.s_active--;
452 } 452 }
453 if ((pg->pqflags & PQ_INACTIVE) == 0) { 453 if ((pg->pqflags & PQ_INACTIVE) == 0) {
454 KASSERT(pg->wire_count == 0); 454 KASSERT(pg->wire_count == 0);
455 TAILQ_INSERT_TAIL(&pdpol_state.s_inactiveq, pg, pdqueue); 455 TAILQ_INSERT_TAIL(&pdpol_state.s_inactiveq, pg, pdqueue);
456 pdpol_state.s_inactive++; 456 pdpol_state.s_inactive++;
457 } 457 }
458 pg->pqflags = (pg->pqflags & PQ_INTENT_QUEUED) | PQ_INACTIVE; 458 pg->pqflags &= ~(PQ_ACTIVE | PQ_INTENT_SET);
 459 pg->pqflags |= PQ_INACTIVE;
459} 460}
460 461
461void 462void
462uvmpdpol_pagedeactivate(struct vm_page *pg) 463uvmpdpol_pagedeactivate(struct vm_page *pg)
463{ 464{
464 465
465 KASSERT(uvm_page_owner_locked_p(pg, true)); 466 KASSERT(uvm_page_owner_locked_p(pg, true));
466 KASSERT(mutex_owned(&pg->interlock)); 467 KASSERT(mutex_owned(&pg->interlock));
467 468
468 /* 469 /*
469 * we have to clear the reference bit now, as when it comes time to 470 * we have to clear the reference bit now, as when it comes time to
470 * realize the intent we won't have the object locked any more. 471 * realize the intent we won't have the object locked any more.
471 */ 472 */
@@ -476,27 +477,28 @@ uvmpdpol_pagedeactivate(struct vm_page * @@ -476,27 +477,28 @@ uvmpdpol_pagedeactivate(struct vm_page *
476static void 477static void
477uvmpdpol_pageactivate_locked(struct vm_page *pg) 478uvmpdpol_pageactivate_locked(struct vm_page *pg)
478{ 479{
479 struct uvmpdpol_globalstate *s __diagused = &pdpol_state; 480 struct uvmpdpol_globalstate *s __diagused = &pdpol_state;
480 481
481 KASSERT(mutex_owned(&s->lock)); 482 KASSERT(mutex_owned(&s->lock));
482 KASSERT(mutex_owned(&pg->interlock)); 483 KASSERT(mutex_owned(&pg->interlock));
483 KASSERT((pg->pqflags & (PQ_INTENT_MASK | PQ_INTENT_SET)) != 484 KASSERT((pg->pqflags & (PQ_INTENT_MASK | PQ_INTENT_SET)) !=
484 (PQ_INTENT_D | PQ_INTENT_SET)); 485 (PQ_INTENT_D | PQ_INTENT_SET));
485 486
486 uvmpdpol_pagedequeue_locked(pg); 487 uvmpdpol_pagedequeue_locked(pg);
487 TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pdqueue); 488 TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pdqueue);
488 pdpol_state.s_active++; 489 pdpol_state.s_active++;
489 pg->pqflags = (pg->pqflags & PQ_INTENT_QUEUED) | PQ_ACTIVE; 490 pg->pqflags &= ~(PQ_INACTIVE | PQ_INTENT_SET);
 491 pg->pqflags |= PQ_ACTIVE;
490} 492}
491 493
492void 494void
493uvmpdpol_pageactivate(struct vm_page *pg) 495uvmpdpol_pageactivate(struct vm_page *pg)
494{ 496{
495 497
496 KASSERT(uvm_page_owner_locked_p(pg, true)); 498 KASSERT(uvm_page_owner_locked_p(pg, true));
497 KASSERT(mutex_owned(&pg->interlock)); 499 KASSERT(mutex_owned(&pg->interlock));
498 500
499 uvmpdpol_set_intent(pg, PQ_INTENT_A); 501 uvmpdpol_set_intent(pg, PQ_INTENT_A);
500} 502}
501 503
502static void 504static void
@@ -507,27 +509,27 @@ uvmpdpol_pagedequeue_locked(struct vm_pa @@ -507,27 +509,27 @@ uvmpdpol_pagedequeue_locked(struct vm_pa
507 KASSERT(mutex_owned(&s->lock)); 509 KASSERT(mutex_owned(&s->lock));
508 KASSERT(mutex_owned(&pg->interlock)); 510 KASSERT(mutex_owned(&pg->interlock));
509 511
510 if (pg->pqflags & PQ_ACTIVE) { 512 if (pg->pqflags & PQ_ACTIVE) {
511 TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue); 513 TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue);
512 KASSERT((pg->pqflags & PQ_INACTIVE) == 0); 514 KASSERT((pg->pqflags & PQ_INACTIVE) == 0);
513 KASSERT(pdpol_state.s_active > 0); 515 KASSERT(pdpol_state.s_active > 0);
514 pdpol_state.s_active--; 516 pdpol_state.s_active--;
515 } else if (pg->pqflags & PQ_INACTIVE) { 517 } else if (pg->pqflags & PQ_INACTIVE) {
516 TAILQ_REMOVE(&pdpol_state.s_inactiveq, pg, pdqueue); 518 TAILQ_REMOVE(&pdpol_state.s_inactiveq, pg, pdqueue);
517 KASSERT(pdpol_state.s_inactive > 0); 519 KASSERT(pdpol_state.s_inactive > 0);
518 pdpol_state.s_inactive--; 520 pdpol_state.s_inactive--;
519 } 521 }
520 pg->pqflags &= PQ_INTENT_QUEUED; 522 pg->pqflags &= ~(PQ_ACTIVE | PQ_INACTIVE | PQ_INTENT_SET);
521} 523}
522 524
523void 525void
524uvmpdpol_pagedequeue(struct vm_page *pg) 526uvmpdpol_pagedequeue(struct vm_page *pg)
525{ 527{
526 528
527 KASSERT(uvm_page_owner_locked_p(pg, true)); 529 KASSERT(uvm_page_owner_locked_p(pg, true));
528 KASSERT(mutex_owned(&pg->interlock)); 530 KASSERT(mutex_owned(&pg->interlock));
529 531
530 uvmpdpol_set_intent(pg, PQ_INTENT_D); 532 uvmpdpol_set_intent(pg, PQ_INTENT_D);
531} 533}
532 534
533void 535void