Sat Feb 26 15:57:22 2022 UTC ()
drm2: do not try to return a value from a void function

lint complained:
vmalloc.h(79): error: void function vfree cannot return value [213]

No functional change.


(rillig)
diff -r1.11 -r1.12 src/sys/external/bsd/drm2/include/linux/vmalloc.h

cvs diff -r1.11 -r1.12 src/sys/external/bsd/drm2/include/linux/vmalloc.h (switch to unified diff)

--- src/sys/external/bsd/drm2/include/linux/vmalloc.h 2021/12/19 12:07:55 1.11
+++ src/sys/external/bsd/drm2/include/linux/vmalloc.h 2022/02/26 15:57:22 1.12
@@ -1,157 +1,157 @@ @@ -1,157 +1,157 @@
1/* $NetBSD: vmalloc.h,v 1.11 2021/12/19 12:07:55 riastradh Exp $ */ 1/* $NetBSD: vmalloc.h,v 1.12 2022/02/26 15:57:22 rillig Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell. 8 * by Taylor R. Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#ifndef _LINUX_VMALLOC_H_ 32#ifndef _LINUX_VMALLOC_H_
33#define _LINUX_VMALLOC_H_ 33#define _LINUX_VMALLOC_H_
34 34
35#include <uvm/uvm_extern.h> 35#include <uvm/uvm_extern.h>
36 36
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/mm_types.h> 38#include <linux/mm_types.h>
39#include <linux/overflow.h> 39#include <linux/overflow.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41 41
42#include <asm/page.h> 42#include <asm/page.h>
43 43
44struct notifier_block; 44struct notifier_block;
45 45
46/* 46/*
47 * XXX vmalloc and kvmalloc both use kmalloc. If you change that, be 47 * XXX vmalloc and kvmalloc both use kmalloc. If you change that, be
48 * sure to update this so kvfree in <linux/mm.h> still works on vmalloc 48 * sure to update this so kvfree in <linux/mm.h> still works on vmalloc
49 * addresses. 49 * addresses.
50 */ 50 */
51 51
52static inline bool 52static inline bool
53is_vmalloc_addr(void *addr) 53is_vmalloc_addr(void *addr)
54{ 54{
55 return true; 55 return true;
56} 56}
57 57
58static inline void * 58static inline void *
59vmalloc(unsigned long size) 59vmalloc(unsigned long size)
60{ 60{
61 return kmalloc(size, GFP_KERNEL); 61 return kmalloc(size, GFP_KERNEL);
62} 62}
63 63
64static inline void * 64static inline void *
65vmalloc_user(unsigned long size) 65vmalloc_user(unsigned long size)
66{ 66{
67 return kzalloc(size, GFP_KERNEL); 67 return kzalloc(size, GFP_KERNEL);
68} 68}
69 69
70static inline void * 70static inline void *
71vzalloc(unsigned long size) 71vzalloc(unsigned long size)
72{ 72{
73 return kzalloc(size, GFP_KERNEL); 73 return kzalloc(size, GFP_KERNEL);
74} 74}
75 75
76static inline void 76static inline void
77vfree(void *ptr) 77vfree(void *ptr)
78{ 78{
79 return kfree(ptr); 79 kfree(ptr);
80} 80}
81 81
82#define PAGE_KERNEL UVM_PROT_RW 82#define PAGE_KERNEL UVM_PROT_RW
83 83
84/* 84/*
85 * vmap(pages, npages, flags, prot) 85 * vmap(pages, npages, flags, prot)
86 * 86 *
87 * Map pages[0], pages[1], ..., pages[npages-1] into contiguous 87 * Map pages[0], pages[1], ..., pages[npages-1] into contiguous
88 * kernel virtual address space with the specified protection, and 88 * kernel virtual address space with the specified protection, and
89 * return a KVA pointer to the start. 89 * return a KVA pointer to the start.
90 * 90 *
91 * prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and 91 * prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and
92 * PMAP_* cache flags accepted by pmap_enter(). 92 * PMAP_* cache flags accepted by pmap_enter().
93 */ 93 */
94static inline void * 94static inline void *
95vmap(struct page **pages, unsigned npages, unsigned long flags, 95vmap(struct page **pages, unsigned npages, unsigned long flags,
96 pgprot_t protflags) 96 pgprot_t protflags)
97{ 97{
98 vm_prot_t justprot = protflags & UVM_PROT_ALL; 98 vm_prot_t justprot = protflags & UVM_PROT_ALL;
99 vaddr_t va; 99 vaddr_t va;
100 unsigned i; 100 unsigned i;
101 101
102 /* Allocate some KVA, or return NULL if we can't. */ 102 /* Allocate some KVA, or return NULL if we can't. */
103 va = uvm_km_alloc(kernel_map, (vsize_t)npages << PAGE_SHIFT, PAGE_SIZE, 103 va = uvm_km_alloc(kernel_map, (vsize_t)npages << PAGE_SHIFT, PAGE_SIZE,
104 UVM_KMF_VAONLY|UVM_KMF_NOWAIT); 104 UVM_KMF_VAONLY|UVM_KMF_NOWAIT);
105 if (va == 0) 105 if (va == 0)
106 return NULL; 106 return NULL;
107 107
108 /* Ask pmap to map the KVA to the specified page addresses. */ 108 /* Ask pmap to map the KVA to the specified page addresses. */
109 for (i = 0; i < npages; i++) { 109 for (i = 0; i < npages; i++) {
110 pmap_kenter_pa(va + i*PAGE_SIZE, page_to_phys(pages[i]), 110 pmap_kenter_pa(va + i*PAGE_SIZE, page_to_phys(pages[i]),
111 justprot, protflags); 111 justprot, protflags);
112 } 112 }
113 113
114 /* Commit the pmap updates. */ 114 /* Commit the pmap updates. */
115 pmap_update(pmap_kernel()); 115 pmap_update(pmap_kernel());
116 116
117 return (void *)va; 117 return (void *)va;
118} 118}
119 119
120/* 120/*
121 * vunmap(ptr, npages) 121 * vunmap(ptr, npages)
122 * 122 *
123 * Unmap the KVA pages starting at ptr that were mapped by a call 123 * Unmap the KVA pages starting at ptr that were mapped by a call
124 * to vmap with the same npages parameter. 124 * to vmap with the same npages parameter.
125 */ 125 */
126static inline void 126static inline void
127vunmap(void *ptr, unsigned npages) 127vunmap(void *ptr, unsigned npages)
128{ 128{
129 vaddr_t va = (vaddr_t)ptr; 129 vaddr_t va = (vaddr_t)ptr;
130 130
131 /* Ask pmap to unmap the KVA. */ 131 /* Ask pmap to unmap the KVA. */
132 pmap_kremove(va, (vsize_t)npages << PAGE_SHIFT); 132 pmap_kremove(va, (vsize_t)npages << PAGE_SHIFT);
133 133
134 /* Commit the pmap updates. */ 134 /* Commit the pmap updates. */
135 pmap_update(pmap_kernel()); 135 pmap_update(pmap_kernel());
136 136
137 /* 137 /*
138 * Now that the pmap is no longer mapping the KVA we allocated 138 * Now that the pmap is no longer mapping the KVA we allocated
139 * on any CPU, it is safe to free the KVA. 139 * on any CPU, it is safe to free the KVA.
140 */ 140 */
141 uvm_km_free(kernel_map, va, (vsize_t)npages << PAGE_SHIFT, 141 uvm_km_free(kernel_map, va, (vsize_t)npages << PAGE_SHIFT,
142 UVM_KMF_VAONLY); 142 UVM_KMF_VAONLY);
143} 143}
144 144
145static inline int 145static inline int
146register_vmap_purge_notifier(struct notifier_block *nb __unused) 146register_vmap_purge_notifier(struct notifier_block *nb __unused)
147{ 147{
148 return 0; 148 return 0;
149} 149}
150 150
151static inline int 151static inline int
152unregister_vmap_purge_notifier(struct notifier_block *nb __unused) 152unregister_vmap_purge_notifier(struct notifier_block *nb __unused)
153{ 153{
154 return 0; 154 return 0;
155} 155}
156 156
157#endif /* _LINUX_VMALLOC_H_ */ 157#endif /* _LINUX_VMALLOC_H_ */