Sun Jan 29 16:24:01 2012 UTC ()
adapt to recent changes in uvm


(para)
diff -r1.37 -r1.38 src/sys/arch/sun3/sun3/dvma.c

cvs diff -r1.37 -r1.38 src/sys/arch/sun3/sun3/dvma.c (switch to unified diff)

--- src/sys/arch/sun3/sun3/dvma.c 2012/01/27 18:53:03 1.37
+++ src/sys/arch/sun3/sun3/dvma.c 2012/01/29 16:24:01 1.38
@@ -1,384 +1,386 @@ @@ -1,384 +1,386 @@
1/* $NetBSD: dvma.c,v 1.37 2012/01/27 18:53:03 para Exp $ */ 1/* $NetBSD: dvma.c,v 1.38 2012/01/29 16:24:01 para Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Gordon W. Ross. 8 * by Gordon W. Ross.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: dvma.c,v 1.37 2012/01/27 18:53:03 para Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: dvma.c,v 1.38 2012/01/29 16:24:01 para Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/device.h> 37#include <sys/device.h>
38#include <sys/proc.h> 38#include <sys/proc.h>
39#include <sys/malloc.h> 39#include <sys/malloc.h>
40#include <sys/extent.h> 40#include <sys/extent.h>
41#include <sys/buf.h> 41#include <sys/buf.h>
42#include <sys/vnode.h> 42#include <sys/vnode.h>
43#include <sys/core.h> 43#include <sys/core.h>
44#include <sys/exec.h> 44#include <sys/exec.h>
45 45
46#include <uvm/uvm.h> /* XXX: not _extern ... need uvm_map_create */ 46#include <uvm/uvm.h> /* XXX: not _extern ... need uvm_map_create */
47 47
48#define _SUN68K_BUS_DMA_PRIVATE 48#define _SUN68K_BUS_DMA_PRIVATE
49#include <machine/autoconf.h> 49#include <machine/autoconf.h>
50#include <machine/bus.h> 50#include <machine/bus.h>
51#include <machine/cpu.h> 51#include <machine/cpu.h>
52#include <machine/dvma.h> 52#include <machine/dvma.h>
53#include <machine/pmap.h> 53#include <machine/pmap.h>
54#include <machine/pte.h> 54#include <machine/pte.h>
55 55
56#include <sun3/sun3/control.h> 56#include <sun3/sun3/control.h>
57#include <sun3/sun3/machdep.h> 57#include <sun3/sun3/machdep.h>
58 58
59/* DVMA is the last 1MB, but the PROM owns the last page. */ 59/* DVMA is the last 1MB, but the PROM owns the last page. */
60#define DVMA_MAP_END (DVMA_MAP_BASE + DVMA_MAP_AVAIL) 60#define DVMA_MAP_END (DVMA_MAP_BASE + DVMA_MAP_AVAIL)
61 61
62/* Extent map used by dvma_mapin/dvma_mapout */ 62/* Extent map used by dvma_mapin/dvma_mapout */
63struct extent *dvma_extent; 63struct extent *dvma_extent;
64 64
65/* XXX: Might need to tune this... */ 65/* XXX: Might need to tune this... */
66vsize_t dvma_segmap_size = 6 * NBSG; 66vsize_t dvma_segmap_size = 6 * NBSG;
67 67
68/* Using phys_map to manage DVMA scratch-memory pages. */ 68/* Using phys_map to manage DVMA scratch-memory pages. */
69/* Note: Could use separate pagemap for obio if needed. */ 69/* Note: Could use separate pagemap for obio if needed. */
70 70
71void  71void
72dvma_init(void) 72dvma_init(void)
73{ 73{
74 vaddr_t segmap_addr; 74 vaddr_t segmap_addr;
75 75
76 /* 76 /*
77 * Create phys_map covering the entire DVMA space, 77 * Create phys_map covering the entire DVMA space,
78 * then allocate the segment pool from that. The 78 * then allocate the segment pool from that. The
79 * remainder will be used as the DVMA page pool. 79 * remainder will be used as the DVMA page pool.
80 * 80 *
81 * Note that no INTRSAFE is needed here because the 81 * Note that no INTRSAFE is needed here because the
82 * dvma_extent manages things handled in interrupt 82 * dvma_extent manages things handled in interrupt
83 * context. 83 * context.
84 */ 84 */
85 phys_map = uvm_map_create(pmap_kernel(), 85 phys_map = kmem_alloc(sizeof(struct vm_map), KM_SLEEP);
86 DVMA_MAP_BASE, DVMA_MAP_END, 0); 
87 if (phys_map == NULL) 86 if (phys_map == NULL)
88 panic("unable to create DVMA map"); 87 panic("unable to create DVMA map");
89 88
 89 uvm_map_setup(phys_map, DVMA_MAP_BASE, DVMA_MAP_END, 0);
 90 phys_map->pmap = pmap_kernel();
 91
90 /* 92 /*
91 * Reserve the DVMA space used for segment remapping. 93 * Reserve the DVMA space used for segment remapping.
92 * The remainder of phys_map is used for DVMA scratch 94 * The remainder of phys_map is used for DVMA scratch
93 * memory pages (i.e. driver control blocks, etc.) 95 * memory pages (i.e. driver control blocks, etc.)
94 */ 96 */
95 segmap_addr = uvm_km_alloc(phys_map, dvma_segmap_size, 0, 97 segmap_addr = uvm_km_alloc(phys_map, dvma_segmap_size, 0,
96 UVM_KMF_VAONLY | UVM_KMF_WAITVA); 98 UVM_KMF_VAONLY | UVM_KMF_WAITVA);
97 if (segmap_addr != DVMA_MAP_BASE) 99 if (segmap_addr != DVMA_MAP_BASE)
98 panic("dvma_init: unable to allocate DVMA segments"); 100 panic("dvma_init: unable to allocate DVMA segments");
99 101
100 /* 102 /*
101 * Create the VM pool used for mapping whole segments 103 * Create the VM pool used for mapping whole segments
102 * into DVMA space for the purpose of data transfer. 104 * into DVMA space for the purpose of data transfer.
103 */ 105 */
104 dvma_extent = extent_create("dvma", segmap_addr, 106 dvma_extent = extent_create("dvma", segmap_addr,
105 segmap_addr + (dvma_segmap_size - 1), 107 segmap_addr + (dvma_segmap_size - 1),
106 NULL, 0, EX_NOCOALESCE|EX_NOWAIT); 108 NULL, 0, EX_NOCOALESCE|EX_NOWAIT);
107} 109}
108 110
109/* 111/*
110 * Allocate actual memory pages in DVMA space. 112 * Allocate actual memory pages in DVMA space.
111 * (idea for implementation borrowed from Chris Torek.) 113 * (idea for implementation borrowed from Chris Torek.)
112 */ 114 */
113void * 115void *
114dvma_malloc(size_t bytes) 116dvma_malloc(size_t bytes)
115{ 117{
116 void *new_mem; 118 void *new_mem;
117 vsize_t new_size; 119 vsize_t new_size;
118 120
119 if (bytes == 0) 121 if (bytes == 0)
120 return NULL; 122 return NULL;
121 new_size = m68k_round_page(bytes); 123 new_size = m68k_round_page(bytes);
122 new_mem = (void *)uvm_km_alloc(phys_map, new_size, 0, UVM_KMF_WIRED); 124 new_mem = (void *)uvm_km_alloc(phys_map, new_size, 0, UVM_KMF_WIRED);
123 if (new_mem == 0) 125 if (new_mem == 0)
124 panic("dvma_malloc: no space in phys_map"); 126 panic("dvma_malloc: no space in phys_map");
125 /* The pmap code always makes DVMA pages non-cached. */ 127 /* The pmap code always makes DVMA pages non-cached. */
126 return new_mem; 128 return new_mem;
127} 129}
128 130
129/* 131/*
130 * Free pages from dvma_malloc() 132 * Free pages from dvma_malloc()
131 */ 133 */
132void  134void
133dvma_free(void *addr, size_t size) 135dvma_free(void *addr, size_t size)
134{ 136{
135 vsize_t sz = m68k_round_page(size); 137 vsize_t sz = m68k_round_page(size);
136 138
137 uvm_km_free(phys_map, (vaddr_t)addr, sz, UVM_KMF_WIRED); 139 uvm_km_free(phys_map, (vaddr_t)addr, sz, UVM_KMF_WIRED);
138} 140}
139 141
140/* 142/*
141 * Given a DVMA address, return the physical address that 143 * Given a DVMA address, return the physical address that
142 * would be used by some OTHER bus-master besides the CPU. 144 * would be used by some OTHER bus-master besides the CPU.
143 * (Examples: on-board ie/le, VME xy board). 145 * (Examples: on-board ie/le, VME xy board).
144 */ 146 */
145u_long  147u_long
146dvma_kvtopa(void *kva, int bustype) 148dvma_kvtopa(void *kva, int bustype)
147{ 149{
148 u_long addr, mask; 150 u_long addr, mask;
149 151
150 addr = (u_long)kva; 152 addr = (u_long)kva;
151 if ((addr & DVMA_MAP_BASE) != DVMA_MAP_BASE) 153 if ((addr & DVMA_MAP_BASE) != DVMA_MAP_BASE)
152 panic("dvma_kvtopa: bad dmva addr=0x%lx", addr); 154 panic("dvma_kvtopa: bad dmva addr=0x%lx", addr);
153 155
154 switch (bustype) { 156 switch (bustype) {
155 case BUS_OBIO: 157 case BUS_OBIO:
156 case BUS_OBMEM: 158 case BUS_OBMEM:
157 mask = DVMA_OBIO_SLAVE_MASK; 159 mask = DVMA_OBIO_SLAVE_MASK;
158 break; 160 break;
159 default: /* VME bus device. */ 161 default: /* VME bus device. */
160 mask = DVMA_VME_SLAVE_MASK; 162 mask = DVMA_VME_SLAVE_MASK;
161 break; 163 break;
162 } 164 }
163 165
164 return addr & mask; 166 return addr & mask;
165} 167}
166 168
167/* 169/*
168 * Given a range of kernel virtual space, remap all the 170 * Given a range of kernel virtual space, remap all the
169 * pages found there into the DVMA space (dup mappings). 171 * pages found there into the DVMA space (dup mappings).
170 * This IS safe to call at interrupt time. 172 * This IS safe to call at interrupt time.
171 * (Typically called at SPLBIO) 173 * (Typically called at SPLBIO)
172 */ 174 */
173void * 175void *
174dvma_mapin(void *kva, int len, int canwait /* ignored */) 176dvma_mapin(void *kva, int len, int canwait /* ignored */)
175{ 177{
176 vaddr_t seg_kva, seg_dma; 178 vaddr_t seg_kva, seg_dma;
177 vsize_t seg_len, seg_off; 179 vsize_t seg_len, seg_off;
178 vaddr_t v, x; 180 vaddr_t v, x;
179 int s, sme, error; 181 int s, sme, error;
180 182
181 /* Get seg-aligned address and length. */ 183 /* Get seg-aligned address and length. */
182 seg_kva = (vaddr_t)kva; 184 seg_kva = (vaddr_t)kva;
183 seg_len = (vsize_t)len; 185 seg_len = (vsize_t)len;
184 seg_off = seg_kva & SEGOFSET; 186 seg_off = seg_kva & SEGOFSET;
185 seg_kva -= seg_off; 187 seg_kva -= seg_off;
186 seg_len = sun3_round_seg(seg_len + seg_off); 188 seg_len = sun3_round_seg(seg_len + seg_off);
187 189
188 s = splvm(); 190 s = splvm();
189 191
190 /* Allocate the DVMA segment(s) */ 192 /* Allocate the DVMA segment(s) */
191 193
192 error = extent_alloc(dvma_extent, seg_len, NBSG, 0, 194 error = extent_alloc(dvma_extent, seg_len, NBSG, 0,
193 EX_FAST | EX_NOWAIT | EX_MALLOCOK, &seg_dma); 195 EX_FAST | EX_NOWAIT | EX_MALLOCOK, &seg_dma);
194 if (error) { 196 if (error) {
195 splx(s); 197 splx(s);
196 return NULL; 198 return NULL;
197 } 199 }
198 200
199#ifdef DIAGNOSTIC 201#ifdef DIAGNOSTIC
200 if (seg_dma & SEGOFSET) 202 if (seg_dma & SEGOFSET)
201 panic("dvma_mapin: seg not aligned"); 203 panic("dvma_mapin: seg not aligned");
202#endif 204#endif
203 205
204 /* Duplicate the mappings into DMA space. */ 206 /* Duplicate the mappings into DMA space. */
205 v = seg_kva; 207 v = seg_kva;
206 x = seg_dma; 208 x = seg_dma;
207 while (seg_len > 0) { 209 while (seg_len > 0) {
208 sme = get_segmap(v); 210 sme = get_segmap(v);
209#ifdef DIAGNOSTIC 211#ifdef DIAGNOSTIC
210 if (sme == SEGINV) 212 if (sme == SEGINV)
211 panic("dvma_mapin: seg not mapped"); 213 panic("dvma_mapin: seg not mapped");
212#endif 214#endif
213#ifdef HAVECACHE 215#ifdef HAVECACHE
214 /* flush write-back on old mappings */ 216 /* flush write-back on old mappings */
215 if (cache_size) 217 if (cache_size)
216 cache_flush_segment(v); 218 cache_flush_segment(v);
217#endif 219#endif
218 set_segmap_allctx(x, sme); 220 set_segmap_allctx(x, sme);
219 v += NBSG; 221 v += NBSG;
220 x += NBSG; 222 x += NBSG;
221 seg_len -= NBSG; 223 seg_len -= NBSG;
222 } 224 }
223 seg_dma += seg_off; 225 seg_dma += seg_off;
224 226
225 splx(s); 227 splx(s);
226 return (void *)seg_dma; 228 return (void *)seg_dma;
227} 229}
228 230
229/* 231/*
230 * Free some DVMA space allocated by the above. 232 * Free some DVMA space allocated by the above.
231 * This IS safe to call at interrupt time. 233 * This IS safe to call at interrupt time.
232 * (Typically called at SPLBIO) 234 * (Typically called at SPLBIO)
233 */ 235 */
234void  236void
235dvma_mapout(void *dma, int len) 237dvma_mapout(void *dma, int len)
236{ 238{
237 vaddr_t seg_dma; 239 vaddr_t seg_dma;
238 vsize_t seg_len, seg_off; 240 vsize_t seg_len, seg_off;
239 vaddr_t v, x; 241 vaddr_t v, x;
240 int sme; 242 int sme;
241 int s; 243 int s;
242 244
243 /* Get seg-aligned address and length. */ 245 /* Get seg-aligned address and length. */
244 seg_dma = (vaddr_t)dma; 246 seg_dma = (vaddr_t)dma;
245 seg_len = (vsize_t)len; 247 seg_len = (vsize_t)len;
246 seg_off = seg_dma & SEGOFSET; 248 seg_off = seg_dma & SEGOFSET;
247 seg_dma -= seg_off; 249 seg_dma -= seg_off;
248 seg_len = sun3_round_seg(seg_len + seg_off); 250 seg_len = sun3_round_seg(seg_len + seg_off);
249 251
250 s = splvm(); 252 s = splvm();
251 253
252 /* Flush cache and remove DVMA mappings. */ 254 /* Flush cache and remove DVMA mappings. */
253 v = seg_dma; 255 v = seg_dma;
254 x = v + seg_len; 256 x = v + seg_len;
255 while (v < x) { 257 while (v < x) {
256 sme = get_segmap(v); 258 sme = get_segmap(v);
257#ifdef DIAGNOSTIC 259#ifdef DIAGNOSTIC
258 if (sme == SEGINV) 260 if (sme == SEGINV)
259 panic("dvma_mapout: seg not mapped"); 261 panic("dvma_mapout: seg not mapped");
260#endif 262#endif
261#ifdef HAVECACHE 263#ifdef HAVECACHE
262 /* flush write-back on the DVMA mappings */ 264 /* flush write-back on the DVMA mappings */
263 if (cache_size) 265 if (cache_size)
264 cache_flush_segment(v); 266 cache_flush_segment(v);
265#endif 267#endif
266 set_segmap_allctx(v, SEGINV); 268 set_segmap_allctx(v, SEGINV);
267 v += NBSG; 269 v += NBSG;
268 } 270 }
269 271
270 if (extent_free(dvma_extent, seg_dma, seg_len, 272 if (extent_free(dvma_extent, seg_dma, seg_len,
271 EX_NOWAIT | EX_MALLOCOK)) 273 EX_NOWAIT | EX_MALLOCOK))
272 panic("dvma_mapout: unable to free 0x%lx,0x%lx", 274 panic("dvma_mapout: unable to free 0x%lx,0x%lx",
273 seg_dma, seg_len); 275 seg_dma, seg_len);
274 splx(s); 276 splx(s);
275} 277}
276 278
277int  279int
278_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, 280_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
279 int nsegs, bus_size_t size, int flags) 281 int nsegs, bus_size_t size, int flags)
280{ 282{
281 283
282 panic("_bus_dmamap_load_raw(): not implemented yet."); 284 panic("_bus_dmamap_load_raw(): not implemented yet.");
283} 285}
284 286
285int 287int
286_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 288_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
287 bus_size_t buflen, struct proc *p, int flags) 289 bus_size_t buflen, struct proc *p, int flags)
288{ 290{
289 vaddr_t kva, dva; 291 vaddr_t kva, dva;
290 vsize_t off, sgsize; 292 vsize_t off, sgsize;
291 paddr_t pa; 293 paddr_t pa;
292 pmap_t pmap; 294 pmap_t pmap;
293 int error, rv, s; 295 int error, rv, s;
294 296
295 /* 297 /*
296 * Make sure that on error condition we return "no valid mappings". 298 * Make sure that on error condition we return "no valid mappings".
297 */ 299 */
298 map->dm_nsegs = 0; 300 map->dm_nsegs = 0;
299 map->dm_mapsize = 0; 301 map->dm_mapsize = 0;
300 302
301 if (buflen > map->_dm_size) 303 if (buflen > map->_dm_size)
302 return EINVAL; 304 return EINVAL;
303 305
304 kva = (vaddr_t)buf; 306 kva = (vaddr_t)buf;
305 off = kva & PGOFSET; 307 off = kva & PGOFSET;
306 sgsize = round_page(off + buflen); 308 sgsize = round_page(off + buflen);
307 309
308 /* Try to allocate DVMA space. */ 310 /* Try to allocate DVMA space. */
309 s = splvm(); 311 s = splvm();
310 error = extent_alloc(dvma_extent, sgsize, PAGE_SIZE, 0, 312 error = extent_alloc(dvma_extent, sgsize, PAGE_SIZE, 0,
311 EX_FAST | ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT), 313 EX_FAST | ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT),
312 &dva); 314 &dva);
313 splx(s); 315 splx(s);
314 if (error) 316 if (error)
315 return ENOMEM; 317 return ENOMEM;
316 318
317 /* Fill in the segment. */ 319 /* Fill in the segment. */
318 map->dm_segs[0].ds_addr = dva + off; 320 map->dm_segs[0].ds_addr = dva + off;
319 map->dm_segs[0].ds_len = buflen; 321 map->dm_segs[0].ds_len = buflen;
320 map->dm_segs[0]._ds_va = dva; 322 map->dm_segs[0]._ds_va = dva;
321 map->dm_segs[0]._ds_sgsize = sgsize; 323 map->dm_segs[0]._ds_sgsize = sgsize;
322 324
323 /* 325 /*
324 * Now map the DVMA addresses we allocated to point to the 326 * Now map the DVMA addresses we allocated to point to the
325 * pages of the caller's buffer. 327 * pages of the caller's buffer.
326 */ 328 */
327 if (p != NULL) 329 if (p != NULL)
328 pmap = p->p_vmspace->vm_map.pmap; 330 pmap = p->p_vmspace->vm_map.pmap;
329 else 331 else
330 pmap = pmap_kernel(); 332 pmap = pmap_kernel();
331 333
332 while (sgsize > 0) { 334 while (sgsize > 0) {
333 rv = pmap_extract(pmap, kva, &pa); 335 rv = pmap_extract(pmap, kva, &pa);
334#ifdef DIAGNOSTIC 336#ifdef DIAGNOSTIC
335 if (rv == false) 337 if (rv == false)
336 panic("%s: unmapped VA", __func__); 338 panic("%s: unmapped VA", __func__);
337#endif 339#endif
338 pmap_enter(pmap_kernel(), dva, pa | PMAP_NC, 340 pmap_enter(pmap_kernel(), dva, pa | PMAP_NC,
339 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED); 341 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
340 kva += PAGE_SIZE; 342 kva += PAGE_SIZE;
341 dva += PAGE_SIZE; 343 dva += PAGE_SIZE;
342 sgsize -= PAGE_SIZE; 344 sgsize -= PAGE_SIZE;
343 } 345 }
344 346
345 map->dm_nsegs = 1; 347 map->dm_nsegs = 1;
346 map->dm_mapsize = map->dm_segs[0].ds_len; 348 map->dm_mapsize = map->dm_segs[0].ds_len;
347 349
348 return 0; 350 return 0;
349} 351}
350 352
351void  353void
352_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 354_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
353{ 355{
354 bus_dma_segment_t *segs; 356 bus_dma_segment_t *segs;
355 vaddr_t dva; 357 vaddr_t dva;
356 vsize_t sgsize; 358 vsize_t sgsize;
357 int error, s; 359 int error, s;
358 360
359#ifdef DIAGNOSTIC 361#ifdef DIAGNOSTIC
360 if (map->dm_nsegs != 1) 362 if (map->dm_nsegs != 1)
361 panic("%s: invalid nsegs = %d", __func__, map->dm_nsegs); 363 panic("%s: invalid nsegs = %d", __func__, map->dm_nsegs);
362#endif 364#endif
363 365
364 segs = map->dm_segs; 366 segs = map->dm_segs;
365 dva = segs[0]._ds_va & ~PGOFSET; 367 dva = segs[0]._ds_va & ~PGOFSET;
366 sgsize = segs[0]._ds_sgsize; 368 sgsize = segs[0]._ds_sgsize;
367 369
368 /* Unmap the DVMA addresses. */ 370 /* Unmap the DVMA addresses. */
369 pmap_remove(pmap_kernel(), dva, dva + sgsize); 371 pmap_remove(pmap_kernel(), dva, dva + sgsize);
370 pmap_update(pmap_kernel()); 372 pmap_update(pmap_kernel());
371 373
372 /* Free the DVMA addresses. */ 374 /* Free the DVMA addresses. */
373 s = splvm(); 375 s = splvm();
374 error = extent_free(dvma_extent, dva, sgsize, EX_NOWAIT); 376 error = extent_free(dvma_extent, dva, sgsize, EX_NOWAIT);
375 splx(s); 377 splx(s);
376#ifdef DIAGNOSTIC 378#ifdef DIAGNOSTIC
377 if (error) 379 if (error)
378 panic("%s: unable to free DVMA region", __func__); 380 panic("%s: unable to free DVMA region", __func__);
379#endif 381#endif
380 382
381 /* Mark the mappings as invalid. */ 383 /* Mark the mappings as invalid. */
382 map->dm_mapsize = 0; 384 map->dm_mapsize = 0;
383 map->dm_nsegs = 0; 385 map->dm_nsegs = 0;
384} 386}