Wed Jul 20 12:38:44 2016 UTC ()
Introduce uvm_km_protect.


(maxv)
diff -r1.197 -r1.198 src/sys/uvm/uvm_extern.h
diff -r1.139 -r1.140 src/sys/uvm/uvm_km.c

cvs diff -r1.197 -r1.198 src/sys/uvm/uvm_extern.h (expand / switch to unified diff)

--- src/sys/uvm/uvm_extern.h 2016/05/25 17:43:58 1.197
+++ src/sys/uvm/uvm_extern.h 2016/07/20 12:38:43 1.198
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_extern.h,v 1.197 2016/05/25 17:43:58 christos Exp $ */ 1/* $NetBSD: uvm_extern.h,v 1.198 2016/07/20 12:38:43 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -617,26 +617,28 @@ int uvm_vslock(struct vmspace *, void  @@ -617,26 +617,28 @@ int uvm_vslock(struct vmspace *, void
617void uvm_vsunlock(struct vmspace *, void *, size_t); 617void uvm_vsunlock(struct vmspace *, void *, size_t);
618void uvm_cpu_attach(struct cpu_info *); 618void uvm_cpu_attach(struct cpu_info *);
619 619
620 620
621/* uvm_init.c */ 621/* uvm_init.c */
622void uvm_init(void); 622void uvm_init(void);
623 623
624/* uvm_io.c */ 624/* uvm_io.c */
625int uvm_io(struct vm_map *, struct uio *, int); 625int uvm_io(struct vm_map *, struct uio *, int);
626 626
627/* uvm_km.c */ 627/* uvm_km.c */
628vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t, 628vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t,
629 uvm_flag_t); 629 uvm_flag_t);
 630int uvm_km_protect(struct vm_map *, vaddr_t, vsize_t,
 631 vm_prot_t);
630void uvm_km_free(struct vm_map *, vaddr_t, vsize_t, 632void uvm_km_free(struct vm_map *, vaddr_t, vsize_t,
631 uvm_flag_t); 633 uvm_flag_t);
632 634
633struct vm_map *uvm_km_suballoc(struct vm_map *, vaddr_t *, 635struct vm_map *uvm_km_suballoc(struct vm_map *, vaddr_t *,
634 vaddr_t *, vsize_t, int, bool, 636 vaddr_t *, vsize_t, int, bool,
635 struct vm_map *); 637 struct vm_map *);
636#ifdef _KERNEL 638#ifdef _KERNEL
637int uvm_km_kmem_alloc(vmem_t *, vmem_size_t, vm_flag_t, 639int uvm_km_kmem_alloc(vmem_t *, vmem_size_t, vm_flag_t,
638 vmem_addr_t *); 640 vmem_addr_t *);
639void uvm_km_kmem_free(vmem_t *, vmem_addr_t, vmem_size_t); 641void uvm_km_kmem_free(vmem_t *, vmem_addr_t, vmem_size_t);
640bool uvm_km_va_starved_p(void); 642bool uvm_km_va_starved_p(void);
641#endif 643#endif
642 644

cvs diff -r1.139 -r1.140 src/sys/uvm/uvm_km.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_km.c 2015/02/06 18:19:22 1.139
+++ src/sys/uvm/uvm_km.c 2016/07/20 12:38:43 1.140
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_km.c,v 1.139 2015/02/06 18:19:22 maxv Exp $ */ 1/* $NetBSD: uvm_km.c,v 1.140 2016/07/20 12:38:43 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California. 5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 * 6 *
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * This code is derived from software contributed to Berkeley by 9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University. 10 * The Mach Operating System project at Carnegie-Mellon University.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -142,27 +142,27 @@ @@ -142,27 +142,27 @@
142 * kmem_meta_arena: 142 * kmem_meta_arena:
143 * Imports from kmem_va_meta_arena. Allocations from this arena are 143 * Imports from kmem_va_meta_arena. Allocations from this arena are
144 * backed with the pages. 144 * backed with the pages.
145 * 145 *
146 * Arena stacking: 146 * Arena stacking:
147 * 147 *
148 * kmem_arena 148 * kmem_arena
149 * kmem_va_arena 149 * kmem_va_arena
150 * kmem_va_meta_arena 150 * kmem_va_meta_arena
151 * kmem_meta_arena 151 * kmem_meta_arena
152 */ 152 */
153 153
154#include <sys/cdefs.h> 154#include <sys/cdefs.h>
155__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.139 2015/02/06 18:19:22 maxv Exp $"); 155__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.140 2016/07/20 12:38:43 maxv Exp $");
156 156
157#include "opt_uvmhist.h" 157#include "opt_uvmhist.h"
158 158
159#include "opt_kmempages.h" 159#include "opt_kmempages.h"
160 160
161#ifndef NKMEMPAGES 161#ifndef NKMEMPAGES
162#define NKMEMPAGES 0 162#define NKMEMPAGES 0
163#endif 163#endif
164 164
165/* 165/*
166 * Defaults for lower and upper-bounds for the kmem_arena page count. 166 * Defaults for lower and upper-bounds for the kmem_arena page count.
167 * Can be overridden by kernel config options. 167 * Can be overridden by kernel config options.
168 */ 168 */
@@ -698,26 +698,36 @@ uvm_km_alloc(struct vm_map *map, vsize_t @@ -698,26 +698,36 @@ uvm_km_alloc(struct vm_map *map, vsize_t
698 prot, PMAP_KMPAGE); 698 prot, PMAP_KMPAGE);
699 loopva += PAGE_SIZE; 699 loopva += PAGE_SIZE;
700 offset += PAGE_SIZE; 700 offset += PAGE_SIZE;
701 loopsize -= PAGE_SIZE; 701 loopsize -= PAGE_SIZE;
702 } 702 }
703 703
704 pmap_update(pmap_kernel()); 704 pmap_update(pmap_kernel());
705 705
706 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 706 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
707 return(kva); 707 return(kva);
708} 708}
709 709
710/* 710/*
 711 * uvm_km_protect: change the protection of an allocated area
 712 */
 713
 714int
 715uvm_km_protect(struct vm_map *map, vaddr_t addr, vsize_t size, vm_prot_t prot)
 716{
 717 return uvm_map_protect(map, addr, addr + round_page(size), prot, false);
 718}
 719
 720/*
711 * uvm_km_free: free an area of kernel memory 721 * uvm_km_free: free an area of kernel memory
712 */ 722 */
713 723
714void 724void
715uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 725uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
716{ 726{
717 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 727 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
718 728
719 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 729 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
720 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 730 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
721 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 731 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
722 KASSERT((addr & PAGE_MASK) == 0); 732 KASSERT((addr & PAGE_MASK) == 0);
723 KASSERT(vm_map_pmap(map) == pmap_kernel()); 733 KASSERT(vm_map_pmap(map) == pmap_kernel());