| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: vmalloc.h,v 1.11 2021/12/19 12:07:55 riastradh Exp $ */ | | 1 | /* $NetBSD: vmalloc.h,v 1.12 2022/02/26 15:57:22 rillig Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Taylor R. Campbell. | | 8 | * by Taylor R. Campbell. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
| @@ -66,27 +66,27 @@ vmalloc_user(unsigned long size) | | | @@ -66,27 +66,27 @@ vmalloc_user(unsigned long size) |
66 | { | | 66 | { |
67 | return kzalloc(size, GFP_KERNEL); | | 67 | return kzalloc(size, GFP_KERNEL); |
68 | } | | 68 | } |
69 | | | 69 | |
70 | static inline void * | | 70 | static inline void * |
71 | vzalloc(unsigned long size) | | 71 | vzalloc(unsigned long size) |
72 | { | | 72 | { |
73 | return kzalloc(size, GFP_KERNEL); | | 73 | return kzalloc(size, GFP_KERNEL); |
74 | } | | 74 | } |
75 | | | 75 | |
76 | static inline void | | 76 | static inline void |
77 | vfree(void *ptr) | | 77 | vfree(void *ptr) |
78 | { | | 78 | { |
79 | return kfree(ptr); | | 79 | kfree(ptr); |
80 | } | | 80 | } |
81 | | | 81 | |
82 | #define PAGE_KERNEL UVM_PROT_RW | | 82 | #define PAGE_KERNEL UVM_PROT_RW |
83 | | | 83 | |
84 | /* | | 84 | /* |
85 | * vmap(pages, npages, flags, prot) | | 85 | * vmap(pages, npages, flags, prot) |
86 | * | | 86 | * |
87 | * Map pages[0], pages[1], ..., pages[npages-1] into contiguous | | 87 | * Map pages[0], pages[1], ..., pages[npages-1] into contiguous |
88 | * kernel virtual address space with the specified protection, and | | 88 | * kernel virtual address space with the specified protection, and |
89 | * return a KVA pointer to the start. | | 89 | * return a KVA pointer to the start. |
90 | * | | 90 | * |
91 | * prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and | | 91 | * prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and |
92 | * PMAP_* cache flags accepted by pmap_enter(). | | 92 | * PMAP_* cache flags accepted by pmap_enter(). |