Sat Dec 28 08:25:33 2019 UTC ()
Add mising sys/param.h include (for COHERENCY_UNIT, now needed in uvm headers)


(martin)
diff -r1.6 -r1.7 src/sys/uvm/pmap/pmap_pvt.c

cvs diff -r1.6 -r1.7 src/sys/uvm/pmap/pmap_pvt.c (switch to unified diff)

--- src/sys/uvm/pmap/pmap_pvt.c 2019/12/18 11:27:56 1.6
+++ src/sys/uvm/pmap/pmap_pvt.c 2019/12/28 08:25:33 1.7
@@ -1,173 +1,174 @@ @@ -1,173 +1,174 @@
1/* $NetBSD: pmap_pvt.c,v 1.6 2019/12/18 11:27:56 skrll Exp $ */ 1/* $NetBSD: pmap_pvt.c,v 1.7 2019/12/28 08:25:33 martin Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell. 8 * by Taylor R. Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__RCSID("$NetBSD: pmap_pvt.c,v 1.6 2019/12/18 11:27:56 skrll Exp $"); 33__RCSID("$NetBSD: pmap_pvt.c,v 1.7 2019/12/28 08:25:33 martin Exp $");
34 34
 35#include <sys/param.h>
35#include <sys/atomic.h> 36#include <sys/atomic.h>
36#include <sys/kmem.h> 37#include <sys/kmem.h>
37#include <sys/pserialize.h> 38#include <sys/pserialize.h>
38 39
39#include <uvm/uvm.h> 40#include <uvm/uvm.h>
40#include <uvm/pmap/pmap_pvt.h> 41#include <uvm/pmap/pmap_pvt.h>
41 42
42/* 43/*
43 * unmanaged pv-tracked ranges 44 * unmanaged pv-tracked ranges
44 * 45 *
45 * This is a linear list for now because the only user are the DRM 46 * This is a linear list for now because the only user are the DRM
46 * graphics drivers, with a single tracked range per device, for the 47 * graphics drivers, with a single tracked range per device, for the
47 * graphics aperture, so there are expected to be few of them. 48 * graphics aperture, so there are expected to be few of them.
48 * 49 *
49 * This is used only after the VM system is initialized well enough 50 * This is used only after the VM system is initialized well enough
50 * that we can use kmem_alloc. 51 * that we can use kmem_alloc.
51 */ 52 */
52 53
53struct pv_track { 54struct pv_track {
54 paddr_t pvt_start; 55 paddr_t pvt_start;
55 psize_t pvt_size; 56 psize_t pvt_size;
56 struct pv_track *pvt_next; 57 struct pv_track *pvt_next;
57 struct pmap_page pvt_pages[]; 58 struct pmap_page pvt_pages[];
58}; 59};
59 60
60static struct { 61static struct {
61 kmutex_t lock; 62 kmutex_t lock;
62 pserialize_t psz; 63 pserialize_t psz;
63 struct pv_track *list; 64 struct pv_track *list;
64} pv_unmanaged __cacheline_aligned; 65} pv_unmanaged __cacheline_aligned;
65 66
66void 67void
67pmap_pv_init(void) 68pmap_pv_init(void)
68{ 69{
69 70
70 mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_NONE); 71 mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_NONE);
71 pv_unmanaged.psz = pserialize_create(); 72 pv_unmanaged.psz = pserialize_create();
72 pv_unmanaged.list = NULL; 73 pv_unmanaged.list = NULL;
73} 74}
74 75
75void 76void
76pmap_pv_track(paddr_t start, psize_t size) 77pmap_pv_track(paddr_t start, psize_t size)
77{ 78{
78 struct pv_track *pvt; 79 struct pv_track *pvt;
79 size_t npages; 80 size_t npages;
80 81
81 KASSERT(start == trunc_page(start)); 82 KASSERT(start == trunc_page(start));
82 KASSERT(size == trunc_page(size)); 83 KASSERT(size == trunc_page(size));
83 84
84 /* We may sleep for allocation. */ 85 /* We may sleep for allocation. */
85 ASSERT_SLEEPABLE(); 86 ASSERT_SLEEPABLE();
86 87
87 npages = size >> PAGE_SHIFT; 88 npages = size >> PAGE_SHIFT;
88 pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]), 89 pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]),
89 KM_SLEEP); 90 KM_SLEEP);
90 pvt->pvt_start = start; 91 pvt->pvt_start = start;
91 pvt->pvt_size = size; 92 pvt->pvt_size = size;
92 93
93 mutex_enter(&pv_unmanaged.lock); 94 mutex_enter(&pv_unmanaged.lock);
94 pvt->pvt_next = pv_unmanaged.list; 95 pvt->pvt_next = pv_unmanaged.list;
95 atomic_store_release(&pv_unmanaged.list, pvt); 96 atomic_store_release(&pv_unmanaged.list, pvt);
96 mutex_exit(&pv_unmanaged.lock); 97 mutex_exit(&pv_unmanaged.lock);
97} 98}
98 99
99void 100void
100pmap_pv_untrack(paddr_t start, psize_t size) 101pmap_pv_untrack(paddr_t start, psize_t size)
101{ 102{
102 struct pv_track **pvtp, *pvt; 103 struct pv_track **pvtp, *pvt;
103 size_t npages; 104 size_t npages;
104 105
105 KASSERT(start == trunc_page(start)); 106 KASSERT(start == trunc_page(start));
106 KASSERT(size == trunc_page(size)); 107 KASSERT(size == trunc_page(size));
107 108
108 /* We may sleep for pserialize_perform. */ 109 /* We may sleep for pserialize_perform. */
109 ASSERT_SLEEPABLE(); 110 ASSERT_SLEEPABLE();
110 111
111 mutex_enter(&pv_unmanaged.lock); 112 mutex_enter(&pv_unmanaged.lock);
112 for (pvtp = &pv_unmanaged.list; 113 for (pvtp = &pv_unmanaged.list;
113 (pvt = *pvtp) != NULL; 114 (pvt = *pvtp) != NULL;
114 pvtp = &pvt->pvt_next) { 115 pvtp = &pvt->pvt_next) {
115 if (pvt->pvt_start != start) 116 if (pvt->pvt_start != start)
116 continue; 117 continue;
117 if (pvt->pvt_size != size) 118 if (pvt->pvt_size != size)
118 panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR 119 panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR
119 ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes", 120 ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes",
120 pvt->pvt_start, pvt->pvt_size, size); 121 pvt->pvt_start, pvt->pvt_size, size);
121 122
122 /* 123 /*
123 * Remove from list. Readers can safely see the old 124 * Remove from list. Readers can safely see the old
124 * and new states of the list. 125 * and new states of the list.
125 */ 126 */
126 atomic_store_relaxed(pvtp, pvt->pvt_next); 127 atomic_store_relaxed(pvtp, pvt->pvt_next);
127 128
128 /* Wait for readers who can see the old state to finish. */ 129 /* Wait for readers who can see the old state to finish. */
129 pserialize_perform(pv_unmanaged.psz); 130 pserialize_perform(pv_unmanaged.psz);
130 131
131 /* 132 /*
132 * We now have exclusive access to pvt and can destroy 133 * We now have exclusive access to pvt and can destroy
133 * it. Poison it to catch bugs. 134 * it. Poison it to catch bugs.
134 */ 135 */
135 explicit_memset(&pvt->pvt_next, 0x1a, sizeof pvt->pvt_next); 136 explicit_memset(&pvt->pvt_next, 0x1a, sizeof pvt->pvt_next);
136 goto out; 137 goto out;
137 } 138 }
138 panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR 139 panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR
139 " (0x%"PRIxPSIZE" bytes)", 140 " (0x%"PRIxPSIZE" bytes)",
140 start, size); 141 start, size);
141out: mutex_exit(&pv_unmanaged.lock); 142out: mutex_exit(&pv_unmanaged.lock);
142 143
143 npages = size >> PAGE_SHIFT; 144 npages = size >> PAGE_SHIFT;
144 kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages])); 145 kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages]));
145} 146}
146 147
147struct pmap_page * 148struct pmap_page *
148pmap_pv_tracked(paddr_t pa) 149pmap_pv_tracked(paddr_t pa)
149{ 150{
150 struct pv_track *pvt; 151 struct pv_track *pvt;
151 size_t pgno; 152 size_t pgno;
152 int s; 153 int s;
153 154
154 KASSERT(pa == trunc_page(pa)); 155 KASSERT(pa == trunc_page(pa));
155 156
156 s = pserialize_read_enter(); 157 s = pserialize_read_enter();
157 for (pvt = atomic_load_consume(&pv_unmanaged.list); 158 for (pvt = atomic_load_consume(&pv_unmanaged.list);
158 pvt != NULL; 159 pvt != NULL;
159 pvt = pvt->pvt_next) { 160 pvt = pvt->pvt_next) {
160 if ((pvt->pvt_start <= pa) && 161 if ((pvt->pvt_start <= pa) &&
161 ((pa - pvt->pvt_start) < pvt->pvt_size)) 162 ((pa - pvt->pvt_start) < pvt->pvt_size))
162 break; 163 break;
163 } 164 }
164 pserialize_read_exit(s); 165 pserialize_read_exit(s);
165 166
166 if (pvt == NULL) 167 if (pvt == NULL)
167 return NULL; 168 return NULL;
168 KASSERT(pvt->pvt_start <= pa); 169 KASSERT(pvt->pvt_start <= pa);
169 KASSERT((pa - pvt->pvt_start) < pvt->pvt_size); 170 KASSERT((pa - pvt->pvt_start) < pvt->pvt_size);
170 pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT; 171 pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT;
171 return &pvt->pvt_pages[pgno]; 172 return &pvt->pvt_pages[pgno];
172} 173}
173 174