Thu Nov 11 14:50:54 2010 UTC ()
Minor clean up.


(uebayasi)
diff -r1.157 -r1.158 src/sys/uvm/uvm_page.c

cvs diff -r1.157 -r1.158 src/sys/uvm/uvm_page.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_page.c 2010/11/06 15:42:43 1.157
+++ src/sys/uvm/uvm_page.c 2010/11/11 14:50:54 1.158
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_page.c,v 1.157 2010/11/06 15:42:43 uebayasi Exp $ */ 1/* $NetBSD: uvm_page.c,v 1.158 2010/11/11 14:50:54 uebayasi Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -87,27 +87,27 @@ @@ -87,27 +87,27 @@
87 * School of Computer Science 87 * School of Computer Science
88 * Carnegie Mellon University 88 * Carnegie Mellon University
89 * Pittsburgh PA 15213-3890 89 * Pittsburgh PA 15213-3890
90 * 90 *
91 * any improvements or extensions that they make and grant Carnegie the 91 * any improvements or extensions that they make and grant Carnegie the
92 * rights to redistribute these changes. 92 * rights to redistribute these changes.
93 */ 93 */
94 94
95/* 95/*
96 * uvm_page.c: page ops. 96 * uvm_page.c: page ops.
97 */ 97 */
98 98
99#include <sys/cdefs.h> 99#include <sys/cdefs.h>
100__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.157 2010/11/06 15:42:43 uebayasi Exp $"); 100__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.158 2010/11/11 14:50:54 uebayasi Exp $");
101 101
102#include "opt_ddb.h" 102#include "opt_ddb.h"
103#include "opt_uvmhist.h" 103#include "opt_uvmhist.h"
104#include "opt_readahead.h" 104#include "opt_readahead.h"
105 105
106#include <sys/param.h> 106#include <sys/param.h>
107#include <sys/systm.h> 107#include <sys/systm.h>
108#include <sys/malloc.h> 108#include <sys/malloc.h>
109#include <sys/sched.h> 109#include <sys/sched.h>
110#include <sys/kernel.h> 110#include <sys/kernel.h>
111#include <sys/vnode.h> 111#include <sys/vnode.h>
112#include <sys/proc.h> 112#include <sys/proc.h>
113#include <sys/atomic.h> 113#include <sys/atomic.h>
@@ -117,26 +117,27 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v @@ -117,26 +117,27 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v
117#include <uvm/uvm_ddb.h> 117#include <uvm/uvm_ddb.h>
118#include <uvm/uvm_pdpolicy.h> 118#include <uvm/uvm_pdpolicy.h>
119 119
120/* 120/*
121 * global vars... XXXCDC: move to uvm. structure. 121 * global vars... XXXCDC: move to uvm. structure.
122 */ 122 */
123 123
124/* 124/*
125 * physical memory config is stored in vm_physmem. 125 * physical memory config is stored in vm_physmem.
126 */ 126 */
127 127
128struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 128struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
129int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 129int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
 130#define vm_nphysmem vm_nphysseg
130 131
131/* 132/*
132 * Some supported CPUs in a given architecture don't support all 133 * Some supported CPUs in a given architecture don't support all
133 * of the things necessary to do idle page zero'ing efficiently. 134 * of the things necessary to do idle page zero'ing efficiently.
134 * We therefore provide a way to enable it from machdep code here. 135 * We therefore provide a way to enable it from machdep code here.
135 */ 136 */
136bool vm_page_zero_enable = false; 137bool vm_page_zero_enable = false;
137 138
138/* 139/*
139 * number of pages per-CPU to reserve for the kernel. 140 * number of pages per-CPU to reserve for the kernel.
140 */ 141 */
141int vm_page_reserve_kernel = 5; 142int vm_page_reserve_kernel = 5;
142 143
@@ -357,26 +358,27 @@ uvm_page_init_buckets(struct pgfreelist  @@ -357,26 +358,27 @@ uvm_page_init_buckets(struct pgfreelist
357/* 358/*
358 * uvm_page_init: init the page system. called from uvm_init(). 359 * uvm_page_init: init the page system. called from uvm_init().
359 * 360 *
360 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 361 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
361 */ 362 */
362 363
363void 364void
364uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 365uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
365{ 366{
366 static struct uvm_cpu boot_cpu; 367 static struct uvm_cpu boot_cpu;
367 psize_t freepages, pagecount, bucketcount, n; 368 psize_t freepages, pagecount, bucketcount, n;
368 struct pgflbucket *bucketarray, *cpuarray; 369 struct pgflbucket *bucketarray, *cpuarray;
369 struct vm_page *pagearray; 370 struct vm_page *pagearray;
 371 struct vm_physseg *seg;
370 int lcv; 372 int lcv;
371 u_int i; 373 u_int i;
372 paddr_t paddr; 374 paddr_t paddr;
373 375
374 KASSERT(ncpu <= 1); 376 KASSERT(ncpu <= 1);
375 CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *)); 377 CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
376 378
377 /* 379 /*
378 * init the page queues and page queue locks, except the free 380 * init the page queues and page queue locks, except the free
379 * list; we allocate that later (with the initial vm_page 381 * list; we allocate that later (with the initial vm_page
380 * structures). 382 * structures).
381 */ 383 */
382 384
@@ -388,40 +390,42 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr @@ -388,40 +390,42 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr
388 mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM); 390 mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM);
389 391
390 /* 392 /*
391 * allocate vm_page structures. 393 * allocate vm_page structures.
392 */ 394 */
393 395
394 /* 396 /*
395 * sanity check: 397 * sanity check:
396 * before calling this function the MD code is expected to register 398 * before calling this function the MD code is expected to register
397 * some free RAM with the uvm_page_physload() function. our job 399 * some free RAM with the uvm_page_physload() function. our job
398 * now is to allocate vm_page structures for this memory. 400 * now is to allocate vm_page structures for this memory.
399 */ 401 */
400 402
401 if (vm_nphysseg == 0) 403 if (vm_nphysmem == 0)
402 panic("uvm_page_bootstrap: no memory pre-allocated"); 404 panic("uvm_page_bootstrap: no memory pre-allocated");
403 405
404 /* 406 /*
405 * first calculate the number of free pages... 407 * first calculate the number of free pages...
406 * 408 *
407 * note that we use start/end rather than avail_start/avail_end. 409 * note that we use start/end rather than avail_start/avail_end.
408 * this allows us to allocate extra vm_page structures in case we 410 * this allows us to allocate extra vm_page structures in case we
409 * want to return some memory to the pool after booting. 411 * want to return some memory to the pool after booting.
410 */ 412 */
411 413
412 freepages = 0; 414 freepages = 0;
413 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 415 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
414 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 416 seg = VM_PHYSMEM_PTR(lcv);
 417 freepages += (seg->end - seg->start);
 418 }
415 419
416 /* 420 /*
417 * Let MD code initialize the number of colors, or default 421 * Let MD code initialize the number of colors, or default
418 * to 1 color if MD code doesn't care. 422 * to 1 color if MD code doesn't care.
419 */ 423 */
420 if (uvmexp.ncolors == 0) 424 if (uvmexp.ncolors == 0)
421 uvmexp.ncolors = 1; 425 uvmexp.ncolors = 1;
422 uvmexp.colormask = uvmexp.ncolors - 1; 426 uvmexp.colormask = uvmexp.ncolors - 1;
423 427
424 /* 428 /*
425 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 429 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
426 * use. for each page of memory we use we need a vm_page structure. 430 * use. for each page of memory we use we need a vm_page structure.
427 * thus, the total number of pages we can use is the total size of 431 * thus, the total number of pages we can use is the total size of
@@ -445,47 +449,48 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr @@ -445,47 +449,48 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr
445 uvm.page_free[lcv].pgfl_buckets = 449 uvm.page_free[lcv].pgfl_buckets =
446 (bucketarray + (lcv * uvmexp.ncolors)); 450 (bucketarray + (lcv * uvmexp.ncolors));
447 uvm_page_init_buckets(&uvm.page_free[lcv]); 451 uvm_page_init_buckets(&uvm.page_free[lcv]);
448 uvm.cpus[0]->page_free[lcv].pgfl_buckets = 452 uvm.cpus[0]->page_free[lcv].pgfl_buckets =
449 (cpuarray + (lcv * uvmexp.ncolors)); 453 (cpuarray + (lcv * uvmexp.ncolors));
450 uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]); 454 uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]);
451 } 455 }
452 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 456 memset(pagearray, 0, pagecount * sizeof(struct vm_page));
453 457
454 /* 458 /*
455 * init the vm_page structures and put them in the correct place. 459 * init the vm_page structures and put them in the correct place.
456 */ 460 */
457 461
458 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 462 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
459 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 463 seg = VM_PHYSMEM_PTR(lcv);
 464 n = seg->end - seg->start;
460 465
461 /* set up page array pointers */ 466 /* set up page array pointers */
462 vm_physmem[lcv].pgs = pagearray; 467 seg->pgs = pagearray;
463 pagearray += n; 468 pagearray += n;
464 pagecount -= n; 469 pagecount -= n;
465 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 470 seg->lastpg = seg->pgs + (n - 1);
466 471
467 /* init and free vm_pages (we've already zeroed them) */ 472 /* init and free vm_pages (we've already zeroed them) */
468 paddr = ctob(vm_physmem[lcv].start); 473 paddr = ctob(seg->start);
469 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 474 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
470 vm_physmem[lcv].pgs[i].phys_addr = paddr; 475 seg->pgs[i].phys_addr = paddr;
471#ifdef __HAVE_VM_PAGE_MD 476#ifdef __HAVE_VM_PAGE_MD
472 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 477 VM_MDPAGE_INIT(&seg->pgs[i]);
473#endif 478#endif
474 if (atop(paddr) >= vm_physmem[lcv].avail_start && 479 if (atop(paddr) >= seg->avail_start &&
475 atop(paddr) <= vm_physmem[lcv].avail_end) { 480 atop(paddr) <= seg->avail_end) {
476 uvmexp.npages++; 481 uvmexp.npages++;
477 /* add page to free pool */ 482 /* add page to free pool */
478 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 483 uvm_pagefree(&seg->pgs[i]);
479 } 484 }
480 } 485 }
481 } 486 }
482 487
483 /* 488 /*
484 * pass up the values of virtual_space_start and 489 * pass up the values of virtual_space_start and
485 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 490 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
486 * layers of the VM. 491 * layers of the VM.
487 */ 492 */
488 493
489 *kvm_startp = round_page(virtual_space_start); 494 *kvm_startp = round_page(virtual_space_start);
490 *kvm_endp = trunc_page(virtual_space_end); 495 *kvm_endp = trunc_page(virtual_space_end);
491#ifdef DEBUG 496#ifdef DEBUG
@@ -643,102 +648,102 @@ uvm_pageboot_alloc(vsize_t size) @@ -643,102 +648,102 @@ uvm_pageboot_alloc(vsize_t size)
643 * => return false if out of memory. 648 * => return false if out of memory.
644 */ 649 */
645 650
646/* subroutine: try to allocate from memory chunks on the specified freelist */ 651/* subroutine: try to allocate from memory chunks on the specified freelist */
647static bool uvm_page_physget_freelist(paddr_t *, int); 652static bool uvm_page_physget_freelist(paddr_t *, int);
648 653
649static bool 654static bool
650uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 655uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
651{ 656{
652 int lcv, x; 657 int lcv, x;
653 658
654 /* pass 1: try allocating from a matching end */ 659 /* pass 1: try allocating from a matching end */
655#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 660#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
656 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 661 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
657#else 662#else
658 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 663 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
659#endif 664#endif
660 { 665 {
661 666
662 if (uvm.page_init_done == true) 667 if (uvm.page_init_done == true)
663 panic("uvm_page_physget: called _after_ bootstrap"); 668 panic("uvm_page_physget: called _after_ bootstrap");
664 669
665 if (vm_physmem[lcv].free_list != freelist) 670 if (vm_physmem[lcv].free_list != freelist)
666 continue; 671 continue;
667 672
668 /* try from front */ 673 /* try from front */
669 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 674 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
670 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 675 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
671 *paddrp = ctob(vm_physmem[lcv].avail_start); 676 *paddrp = ctob(vm_physmem[lcv].avail_start);
672 vm_physmem[lcv].avail_start++; 677 vm_physmem[lcv].avail_start++;
673 vm_physmem[lcv].start++; 678 vm_physmem[lcv].start++;
674 /* nothing left? nuke it */ 679 /* nothing left? nuke it */
675 if (vm_physmem[lcv].avail_start == 680 if (vm_physmem[lcv].avail_start ==
676 vm_physmem[lcv].end) { 681 vm_physmem[lcv].end) {
677 if (vm_nphysseg == 1) 682 if (vm_nphysmem == 1)
678 panic("uvm_page_physget: out of memory!"); 683 panic("uvm_page_physget: out of memory!");
679 vm_nphysseg--; 684 vm_nphysmem--;
680 for (x = lcv ; x < vm_nphysseg ; x++) 685 for (x = lcv ; x < vm_nphysmem ; x++)
681 /* structure copy */ 686 /* structure copy */
682 vm_physmem[x] = vm_physmem[x+1]; 687 VM_PHYSMEM_PTR_SWAP(x, x + 1);
683 } 688 }
684 return (true); 689 return (true);
685 } 690 }
686 691
687 /* try from rear */ 692 /* try from rear */
688 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 693 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
689 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 694 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
690 *paddrp = ctob(vm_physmem[lcv].avail_end - 1); 695 *paddrp = ctob(vm_physmem[lcv].avail_end - 1);
691 vm_physmem[lcv].avail_end--; 696 vm_physmem[lcv].avail_end--;
692 vm_physmem[lcv].end--; 697 vm_physmem[lcv].end--;
693 /* nothing left? nuke it */ 698 /* nothing left? nuke it */
694 if (vm_physmem[lcv].avail_end == 699 if (vm_physmem[lcv].avail_end ==
695 vm_physmem[lcv].start) { 700 vm_physmem[lcv].start) {
696 if (vm_nphysseg == 1) 701 if (vm_nphysmem == 1)
697 panic("uvm_page_physget: out of memory!"); 702 panic("uvm_page_physget: out of memory!");
698 vm_nphysseg--; 703 vm_nphysmem--;
699 for (x = lcv ; x < vm_nphysseg ; x++) 704 for (x = lcv ; x < vm_nphysmem ; x++)
700 /* structure copy */ 705 /* structure copy */
701 vm_physmem[x] = vm_physmem[x+1]; 706 VM_PHYSMEM_PTR_SWAP(x, x + 1);
702 } 707 }
703 return (true); 708 return (true);
704 } 709 }
705 } 710 }
706 711
707 /* pass2: forget about matching ends, just allocate something */ 712 /* pass2: forget about matching ends, just allocate something */
708#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 713#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
709 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 714 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
710#else 715#else
711 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 716 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
712#endif 717#endif
713 { 718 {
714 719
715 /* any room in this bank? */ 720 /* any room in this bank? */
716 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 721 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
717 continue; /* nope */ 722 continue; /* nope */
718 723
719 *paddrp = ctob(vm_physmem[lcv].avail_start); 724 *paddrp = ctob(vm_physmem[lcv].avail_start);
720 vm_physmem[lcv].avail_start++; 725 vm_physmem[lcv].avail_start++;
721 /* truncate! */ 726 /* truncate! */
722 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 727 vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
723 728
724 /* nothing left? nuke it */ 729 /* nothing left? nuke it */
725 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 730 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
726 if (vm_nphysseg == 1) 731 if (vm_nphysmem == 1)
727 panic("uvm_page_physget: out of memory!"); 732 panic("uvm_page_physget: out of memory!");
728 vm_nphysseg--; 733 vm_nphysmem--;
729 for (x = lcv ; x < vm_nphysseg ; x++) 734 for (x = lcv ; x < vm_nphysmem ; x++)
730 /* structure copy */ 735 /* structure copy */
731 vm_physmem[x] = vm_physmem[x+1]; 736 VM_PHYSMEM_PTR_SWAP(x, x + 1);
732 } 737 }
733 return (true); 738 return (true);
734 } 739 }
735 740
736 return (false); /* whoops! */ 741 return (false); /* whoops! */
737} 742}
738 743
739bool 744bool
740uvm_page_physget(paddr_t *paddrp) 745uvm_page_physget(paddr_t *paddrp)
741{ 746{
742 int i; 747 int i;
743 748
744 /* try in the order of freelist preference */ 749 /* try in the order of freelist preference */
@@ -768,107 +773,107 @@ uvm_page_physload(paddr_t start, paddr_t @@ -768,107 +773,107 @@ uvm_page_physload(paddr_t start, paddr_t
768 struct vm_physseg *ps; 773 struct vm_physseg *ps;
769 774
770 if (uvmexp.pagesize == 0) 775 if (uvmexp.pagesize == 0)
771 panic("uvm_page_physload: page size not set!"); 776 panic("uvm_page_physload: page size not set!");
772 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 777 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
773 panic("uvm_page_physload: bad free list %d", free_list); 778 panic("uvm_page_physload: bad free list %d", free_list);
774 if (start >= end) 779 if (start >= end)
775 panic("uvm_page_physload: start >= end"); 780 panic("uvm_page_physload: start >= end");
776 781
777 /* 782 /*
778 * do we have room? 783 * do we have room?
779 */ 784 */
780 785
781 if (vm_nphysseg == VM_PHYSSEG_MAX) { 786 if (vm_nphysmem == VM_PHYSSEG_MAX) {
782 printf("uvm_page_physload: unable to load physical memory " 787 printf("uvm_page_physload: unable to load physical memory "
783 "segment\n"); 788 "segment\n");
784 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 789 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
785 VM_PHYSSEG_MAX, (long long)start, (long long)end); 790 VM_PHYSSEG_MAX, (long long)start, (long long)end);
786 printf("\tincrease VM_PHYSSEG_MAX\n"); 791 printf("\tincrease VM_PHYSSEG_MAX\n");
787 return; 792 return;
788 } 793 }
789 794
790 /* 795 /*
791 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 796 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
792 * called yet, so malloc is not available). 797 * called yet, so malloc is not available).
793 */ 798 */
794 799
795 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 800 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
796 if (vm_physmem[lcv].pgs) 801 if (vm_physmem[lcv].pgs)
797 break; 802 break;
798 } 803 }
799 preload = (lcv == vm_nphysseg); 804 preload = (lcv == vm_nphysmem);
800 805
801 /* 806 /*
802 * if VM is already running, attempt to malloc() vm_page structures 807 * if VM is already running, attempt to malloc() vm_page structures
803 */ 808 */
804 809
805 if (!preload) { 810 if (!preload) {
806 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 811 panic("uvm_page_physload: tried to add RAM after vm_mem_init");
807 } else { 812 } else {
808 pgs = NULL; 813 pgs = NULL;
809 npages = 0; 814 npages = 0;
810 } 815 }
811 816
812 /* 817 /*
813 * now insert us in the proper place in vm_physmem[] 818 * now insert us in the proper place in vm_physmem[]
814 */ 819 */
815 820
816#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 821#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
817 /* random: put it at the end (easy!) */ 822 /* random: put it at the end (easy!) */
818 ps = &vm_physmem[vm_nphysseg]; 823 ps = &vm_physmem[vm_nphysmem];
819#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 824#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
820 { 825 {
821 int x; 826 int x;
822 /* sort by address for binary search */ 827 /* sort by address for binary search */
823 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 828 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
824 if (start < vm_physmem[lcv].start) 829 if (start < vm_physmem[lcv].start)
825 break; 830 break;
826 ps = &vm_physmem[lcv]; 831 ps = &vm_physmem[lcv];
827 /* move back other entries, if necessary ... */ 832 /* move back other entries, if necessary ... */
828 for (x = vm_nphysseg ; x > lcv ; x--) 833 for (x = vm_nphysmem ; x > lcv ; x--)
829 /* structure copy */ 834 /* structure copy */
830 vm_physmem[x] = vm_physmem[x - 1]; 835 VM_PHYSMEM_PTR_SWAP(x, x - 1);
831 } 836 }
832#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 837#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
833 { 838 {
834 int x; 839 int x;
835 /* sort by largest segment first */ 840 /* sort by largest segment first */
836 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 841 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
837 if ((end - start) > 842 if ((end - start) >
838 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 843 (vm_physmem[lcv].end - vm_physmem[lcv].start))
839 break; 844 break;
840 ps = &vm_physmem[lcv]; 845 ps = &vm_physmem[lcv];
841 /* move back other entries, if necessary ... */ 846 /* move back other entries, if necessary ... */
842 for (x = vm_nphysseg ; x > lcv ; x--) 847 for (x = vm_nphysmem ; x > lcv ; x--)
843 /* structure copy */ 848 /* structure copy */
844 vm_physmem[x] = vm_physmem[x - 1]; 849 VM_PHYSMEM_PTR_SWAP(x, x - 1);
845 } 850 }
846#else 851#else
847 panic("uvm_page_physload: unknown physseg strategy selected!"); 852 panic("uvm_page_physload: unknown physseg strategy selected!");
848#endif 853#endif
849 854
850 ps->start = start; 855 ps->start = start;
851 ps->end = end; 856 ps->end = end;
852 ps->avail_start = avail_start; 857 ps->avail_start = avail_start;
853 ps->avail_end = avail_end; 858 ps->avail_end = avail_end;
854 if (preload) { 859 if (preload) {
855 ps->pgs = NULL; 860 ps->pgs = NULL;
856 } else { 861 } else {
857 ps->pgs = pgs; 862 ps->pgs = pgs;
858 ps->lastpg = pgs + npages - 1; 863 ps->lastpg = pgs + npages - 1;
859 } 864 }
860 ps->free_list = free_list; 865 ps->free_list = free_list;
861 vm_nphysseg++; 866 vm_nphysmem++;
862 867
863 if (!preload) { 868 if (!preload) {
864 uvmpdpol_reinit(); 869 uvmpdpol_reinit();
865 } 870 }
866} 871}
867 872
868/* 873/*
869 * uvm_page_recolor: Recolor the pages if the new bucket count is 874 * uvm_page_recolor: Recolor the pages if the new bucket count is
870 * larger than the old one. 875 * larger than the old one.
871 */ 876 */
872 877
873void 878void
874uvm_page_recolor(int newncolors) 879uvm_page_recolor(int newncolors)
@@ -1970,27 +1975,27 @@ uvm_page_printit(struct vm_page *pg, boo @@ -1970,27 +1975,27 @@ uvm_page_printit(struct vm_page *pg, boo
1970 */ 1975 */
1971 1976
1972void 1977void
1973uvm_page_printall(void (*pr)(const char *, ...)) 1978uvm_page_printall(void (*pr)(const char *, ...))
1974{ 1979{
1975 unsigned i; 1980 unsigned i;
1976 struct vm_page *pg; 1981 struct vm_page *pg;
1977 1982
1978 (*pr)("%18s %4s %4s %18s %18s" 1983 (*pr)("%18s %4s %4s %18s %18s"
1979#ifdef UVM_PAGE_TRKOWN 1984#ifdef UVM_PAGE_TRKOWN
1980 " OWNER" 1985 " OWNER"
1981#endif 1986#endif
1982 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON"); 1987 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
1983 for (i = 0; i < vm_nphysseg; i++) { 1988 for (i = 0; i < vm_nphysmem; i++) {
1984 for (pg = vm_physmem[i].pgs; pg <= vm_physmem[i].lastpg; pg++) { 1989 for (pg = vm_physmem[i].pgs; pg <= vm_physmem[i].lastpg; pg++) {
1985 (*pr)("%18p %04x %04x %18p %18p", 1990 (*pr)("%18p %04x %04x %18p %18p",
1986 pg, pg->flags, pg->pqflags, pg->uobject, 1991 pg, pg->flags, pg->pqflags, pg->uobject,
1987 pg->uanon); 1992 pg->uanon);
1988#ifdef UVM_PAGE_TRKOWN 1993#ifdef UVM_PAGE_TRKOWN
1989 if (pg->flags & PG_BUSY) 1994 if (pg->flags & PG_BUSY)
1990 (*pr)(" %d [%s]", pg->owner, pg->owner_tag); 1995 (*pr)(" %d [%s]", pg->owner, pg->owner_tag);
1991#endif 1996#endif
1992 (*pr)("\n"); 1997 (*pr)("\n");
1993 } 1998 }
1994 } 1999 }
1995} 2000}
1996 2001