Tue Sep 18 14:42:19 2012 UTC ()
Remove bounce buffer support since there is now generic bounce buffer support


(matt)
diff -r1.17 -r1.18 src/sys/arch/evbarm/integrator/int_bus_dma.c

cvs diff -r1.17 -r1.18 src/sys/arch/evbarm/integrator/int_bus_dma.c (expand / switch to unified diff)

--- src/sys/arch/evbarm/integrator/int_bus_dma.c 2010/11/04 12:16:15 1.17
+++ src/sys/arch/evbarm/integrator/int_bus_dma.c 2012/09/18 14:42:19 1.18
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: int_bus_dma.c,v 1.17 2010/11/04 12:16:15 uebayasi Exp $ */ 1/* $NetBSD: int_bus_dma.c,v 1.18 2012/09/18 14:42:19 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2002 Wasabi Systems, Inc. 4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -32,636 +32,76 @@ @@ -32,636 +32,76 @@
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/* 38/*
39 * PCI DMA support for the ARM Integrator. 39 * PCI DMA support for the ARM Integrator.
40 */ 40 */
41 41
42#define _ARM32_BUS_DMA_PRIVATE 42#define _ARM32_BUS_DMA_PRIVATE
43 43
44#include <sys/cdefs.h> 44#include <sys/cdefs.h>
45__KERNEL_RCSID(0, "$NetBSD: int_bus_dma.c,v 1.17 2010/11/04 12:16:15 uebayasi Exp $"); 45__KERNEL_RCSID(0, "$NetBSD: int_bus_dma.c,v 1.18 2012/09/18 14:42:19 matt Exp $");
46 46
47#include <sys/param.h> 47#include <sys/param.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/device.h> 49#include <sys/device.h>
50#include <sys/malloc.h> 50#include <sys/malloc.h>
51#include <sys/mbuf.h> 51#include <sys/mbuf.h>
52 52
53#include <uvm/uvm_extern.h> 53#include <uvm/uvm_extern.h>
54 54
55#include <machine/bootconfig.h> 55#include <machine/bootconfig.h>
56 56
57#include <evbarm/integrator/int_bus_dma.h> 57#include <evbarm/integrator/int_bus_dma.h>
58 58
59struct integrator_dma_cookie { 
60 int id_flags; /* flags; see below */ 
61 
62 /* 
63 * Information about the original buffer used during 
64 * DMA map syncs. Note that origbuflen is only used 
65 * for ID_BUFTYPE_LINEAR. 
66 */ 
67 void *id_origbuf; /* pointer to orig buffer if 
68 bouncing */ 
69 bus_size_t id_origbuflen; /* ...and size */ 
70 int id_buftype; /* type of buffer */ 
71 
72 void *id_bouncebuf; /* pointer to the bounce buffer */ 
73 bus_size_t id_bouncebuflen; /* ...and size */ 
74 int id_nbouncesegs; /* number of valid bounce segs */ 
75 bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer 
76 physical memory segments */ 
77}; 
78/* id_flags */ 
79#define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */ 
80#define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */ 
81#define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */ 
82 
83/* id_buftype */ 
84#define ID_BUFTYPE_INVALID 0 
85#define ID_BUFTYPE_LINEAR 1 
86#define ID_BUFTYPE_MBUF 2 
87#define ID_BUFTYPE_UIO 3 
88#define ID_BUFTYPE_RAW 4 
89 
90#undef DEBUG 59#undef DEBUG
91#define DEBUG(x) 60#define DEBUG(x)
92 61
93static struct arm32_dma_range integrator_dma_ranges[DRAM_BLOCKS]; 62static struct arm32_dma_range integrator_dma_ranges[DRAM_BLOCKS];
94 63
95extern BootConfig bootconfig; 64extern BootConfig bootconfig;
96 65
97static int integrator_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, 
98 bus_size_t, bus_size_t, int, bus_dmamap_t *); 
99static void integrator_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 
100static int integrator_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 
101 bus_size_t, struct proc *, int); 
102static int integrator_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, 
103 struct mbuf *, int); 
104static int integrator_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, 
105 struct uio *, int); 
106static int integrator_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, 
107 bus_dma_segment_t *, int, bus_size_t, int); 
108static void integrator_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 
109static void integrator_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 
110 bus_addr_t, bus_size_t, int); 
111static int integrator_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, 
112 bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *, int); 
113static int integrator_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t, 
114 bus_size_t, int); 
115static void integrator_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t); 
116 
117 
118/* 
119 * Create an Integrator DMA map. 
120 */ 
121static int 
122integrator_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 
123 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 
124{ 
125 struct integrator_dma_cookie *cookie; 
126 bus_dmamap_t map; 
127 int error, cookieflags; 
128 void *cookiestore; 
129 size_t cookiesize; 
130 
131 DEBUG(printf("I_bus_dmamap_create(tag %x, size %x, nseg %d, max %x," 
132 " boundary %x, flags %x, dmamap %p)\n", (unsigned) t, 
133 (unsigned) size, nsegments, (unsigned) maxsegsz, 
134 (unsigned)boundary, flags, dmamp)); 
135 
136 /* Call common function to create the basic map. */ 
137 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, 
138 flags, dmamp); 
139 if (error) 
140 return (error); 
141 
142 map = *dmamp; 
143 map->_dm_cookie = NULL; 
144 
145 cookiesize = sizeof(struct integrator_dma_cookie); 
146 
147 /* 
148 * Some CM boards have private memory which is significantly 
149 * faster than the normal memory stick. To support this 
150 * memory we have to bounce any DMA transfers. 
151 * 
152 * In order to DMA to arbitrary buffers, we use "bounce 
153 * buffers" - pages in in the main PCI visible memory. On DMA 
154 * reads, DMA happens to the bounce buffers, and is copied 
155 * into the caller's buffer. On writes, data is copied into 
156 * but bounce buffer, and the DMA happens from those pages. 
157 * To software using the DMA mapping interface, this looks 
158 * simply like a data cache. 
159 * 
160 * If we have private RAM in the system, we may need bounce 
161 * buffers. We check and remember that here. 
162 */ 
163#if 0 
164 cookieflags = ID_MIGHT_NEED_BOUNCE; 
165#else 
166 cookieflags = 0; 
167#endif 
168 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt); 
169 
170 /* 
171 * Allocate our cookie. 
172 */ 
173 if ((cookiestore = malloc(cookiesize, M_DMAMAP, 
174 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { 
175 error = ENOMEM; 
176 goto out; 
177 } 
178 memset(cookiestore, 0, cookiesize); 
179 cookie = (struct integrator_dma_cookie *)cookiestore; 
180 cookie->id_flags = cookieflags; 
181 map->_dm_cookie = cookie; 
182 
183 if (cookieflags & ID_MIGHT_NEED_BOUNCE) { 
184 /* 
185 * Allocate the bounce pages now if the caller 
186 * wishes us to do so. 
187 */ 
188 if ((flags & BUS_DMA_ALLOCNOW) == 0) 
189 goto out; 
190 
191 DEBUG(printf("I_bus_dmamap_create bouncebuf alloc\n")); 
192 error = integrator_dma_alloc_bouncebuf(t, map, size, flags); 
193 } 
194 
195 out: 
196 if (error) { 
197 if (map->_dm_cookie != NULL) 
198 free(map->_dm_cookie, M_DMAMAP); 
199 _bus_dmamap_destroy(t, map); 
200 printf("I_bus_dmamap_create failed (%d)\n", error); 
201 } 
202 return (error); 
203} 
204 
205/* 
206 * Destroy an ISA DMA map. 
207 */ 
208static void 
209integrator_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 
210{ 
211 struct integrator_dma_cookie *cookie = map->_dm_cookie; 
212 
213 DEBUG(printf("I_bus_dmamap_destroy (tag %x, map %x)\n", (unsigned) t, 
214 (unsigned) map)); 
215 /* 
216 * Free any bounce pages this map might hold. 
217 */ 
218 if (cookie->id_flags & ID_HAS_BOUNCE) { 
219 DEBUG(printf("I_bus_dmamap_destroy bouncebuf\n")); 
220 integrator_dma_free_bouncebuf(t, map); 
221 } 
222 
223 free(cookie, M_DMAMAP); 
224 _bus_dmamap_destroy(t, map); 
225} 
226 
227/* 
228 * Load an Integrator DMA map with a linear buffer. 
229 */ 
230static int 
231integrator_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 
232 bus_size_t buflen, struct proc *p, int flags) 
233{ 
234 struct integrator_dma_cookie *cookie = map->_dm_cookie; 
235 int error; 
236 
237 DEBUG(printf("I_bus_dmamap_load (tag %x, map %x, buf %p, len %u," 
238 " proc %p, flags %d)\n", (unsigned) t, (unsigned) map, buf, 
239 (unsigned) buflen, p, flags)); 
240 /* 
241 * Make sure that on error condition we return "no valid mappings." 
242 */ 
243 map->dm_mapsize = 0; 
244 map->dm_nsegs = 0; 
245 
246 /* 
247 * Try to load the map the normal way. If this errors out, 
248 * and we can bounce, we will. 
249 */ 
250 error = _bus_dmamap_load(t, map, buf, buflen, p, flags); 
251 if (error == 0 || 
252 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) 
253 return (error); 
254 
255 /* 
256 * First attempt failed; bounce it. 
257 */ 
258 
259 /* 
260 * Allocate bounce pages, if necessary. 
261 */ 
262 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 
263 DEBUG(printf("I_bus_dmamap_load alloc bouncebuf\n")); 
264 error = integrator_dma_alloc_bouncebuf(t, map, buflen, flags); 
265 if (error) 
266 return (error); 
267 } 
268 
269 /* 
270 * Cache a pointer to the caller's buffer and load the DMA map 
271 * with the bounce buffer. 
272 */ 
273 cookie->id_origbuf = buf; 
274 cookie->id_origbuflen = buflen; 
275 cookie->id_buftype = ID_BUFTYPE_LINEAR; 
276 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen, 
277 NULL, flags); 
278 if (error) { 
279 /* 
280 * Free the bounce pages, unless our resources 
281 * are reserved for our exclusive use. 
282 */ 
283 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 
284 integrator_dma_free_bouncebuf(t, map); 
285 return (error); 
286 } 
287 
288 /* ...so integrator_bus_dmamap_sync() knows we're bouncing */ 
289 cookie->id_flags |= ID_IS_BOUNCING; 
290 return (0); 
291} 
292 
293/* 
294 * Like integrator_bus_dmamap_load(), but for mbufs. 
295 */ 
296static int 
297integrator_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 
298 struct mbuf *m0, int flags) 
299{ 
300 struct integrator_dma_cookie *cookie = map->_dm_cookie; 
301 int error; 
302 
303 /* 
304 * Make sure that on error condition we return "no valid mappings." 
305 */ 
306 map->dm_mapsize = 0; 
307 map->dm_nsegs = 0; 
308 
309#ifdef DIAGNOSTIC 
310 if ((m0->m_flags & M_PKTHDR) == 0) 
311 panic("integrator_bus_dmamap_load_mbuf: no packet header"); 
312#endif 
313 
314 if (m0->m_pkthdr.len > map->_dm_size) 
315 return (EINVAL); 
316 
317 /* 
318 * Try to load the map the normal way. If this errors out, 
319 * and we can bounce, we will. 
320 */ 
321 error = _bus_dmamap_load_mbuf(t, map, m0, flags); 
322 if (error == 0 || 
323 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) 
324 return (error); 
325 
326 /* 
327 * First attempt failed; bounce it. 
328 * 
329 * Allocate bounce pages, if necessary. 
330 */ 
331 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { 
332 error = integrator_dma_alloc_bouncebuf(t, map, 
333 m0->m_pkthdr.len, flags); 
334 if (error) 
335 return (error); 
336 } 
337 
338 /* 
339 * Cache a pointer to the caller's buffer and load the DMA map 
340 * with the bounce buffer. 
341 */ 
342 cookie->id_origbuf = m0; 
343 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ 
344 cookie->id_buftype = ID_BUFTYPE_MBUF; 
345 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, 
346 m0->m_pkthdr.len, NULL, flags); 
347 if (error) { 
348 /* 
349 * Free the bounce pages, unless our resources 
350 * are reserved for our exclusive use. 
351 */ 
352 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 
353 integrator_dma_free_bouncebuf(t, map); 
354 return (error); 
355 } 
356 
357 /* ...so integrator_bus_dmamap_sync() knows we're bouncing */ 
358 cookie->id_flags |= ID_IS_BOUNCING; 
359 return (0); 
360} 
361 
362/* 
363 * Like integrator_bus_dmamap_load(), but for uios. 
364 */ 
365static int 
366integrator_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 
367 struct uio *uio, int flags) 
368{ 
369 
370 panic("integrator_bus_dmamap_load_uio: not implemented"); 
371} 
372 
373/* 
374 * Like intgrator_bus_dmamap_load(), but for raw memory allocated with 
375 * bus_dmamem_alloc(). 
376 */ 
377static int 
378integrator_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 
379 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 
380{ 
381 
382 panic("integrator_bus_dmamap_load_raw: not implemented"); 
383} 
384 
385/* 
386 * Unload an Integrator DMA map. 
387 */ 
388static void 
389integrator_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 
390{ 
391 struct integrator_dma_cookie *cookie = map->_dm_cookie; 
392 
393 /* 
394 * If we have bounce pages, free them, unless they're 
395 * reserved for our exclusive use. 
396 */ 
397 if ((cookie->id_flags & ID_HAS_BOUNCE) && 
398 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) 
399 integrator_dma_free_bouncebuf(t, map); 
400 
401 cookie->id_flags &= ~ID_IS_BOUNCING; 
402 cookie->id_buftype = ID_BUFTYPE_INVALID; 
403 
404 /* 
405 * Do the generic bits of the unload. 
406 */ 
407 _bus_dmamap_unload(t, map); 
408} 
409 
410/* 
411 * Synchronize an Integrator DMA map. 
412 */ 
413static void 
414integrator_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 
415 bus_addr_t offset, bus_size_t len, int ops) 
416{ 
417 struct integrator_dma_cookie *cookie = map->_dm_cookie; 
418 
419 DEBUG(printf("I_bus_dmamap_sync (tag %x, map %x, offset %x, size %u," 
420 " ops %d\n", (unsigned)t, (unsigned)map, (unsigned)offset , 
421 (unsigned)len, ops)); 
422 /* 
423 * Mixing PRE and POST operations is not allowed. 
424 */ 
425 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 
426 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 
427 panic("integrator_bus_dmamap_sync: mix PRE and POST"); 
428 
429#ifdef DIAGNOSTIC 
430 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { 
431 if (offset >= map->dm_mapsize) 
432 panic("integrator_bus_dmamap_sync: bad offset"); 
433 if (len == 0 || (offset + len) > map->dm_mapsize) 
434 panic("integrator_bus_dmamap_sync: bad length"); 
435 } 
436#endif 
437 
438 /* 
439 * If we're not bouncing then use the standard code. 
440 */ 
441 if ((cookie->id_flags & ID_IS_BOUNCING) == 0) { 
442 _bus_dmamap_sync(t, map, offset, len, ops); 
443 return; 
444 } 
445 
446 DEBUG(printf("dmamap_sync("); 
447 if (ops & BUS_DMASYNC_PREREAD) 
448 printf("preread "); 
449 if (ops & BUS_DMASYNC_PREWRITE) 
450 printf("prewrite "); 
451 if (ops & BUS_DMASYNC_POSTREAD) 
452 printf("postread "); 
453 if (ops & BUS_DMASYNC_POSTWRITE) 
454 printf("postwrite ");) 
455 
456 switch (cookie->id_buftype) { 
457 case ID_BUFTYPE_LINEAR: 
458 if (ops & BUS_DMASYNC_PREWRITE) { 
459 /* 
460 * Copy the caller's buffer to the bounce buffer. 
461 */ 
462 memcpy((uint8_t *)cookie->id_bouncebuf + offset, 
463 (uint8_t *)cookie->id_origbuf + offset, len); 
464 cpu_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf + 
465 offset, len); 
466 } 
467 if (ops & BUS_DMASYNC_PREREAD) { 
468 cpu_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf + 
469 offset, len); 
470 } 
471 if (ops & BUS_DMASYNC_POSTREAD) { 
472 /* 
473 * Copy the bounce buffer to the caller's buffer. 
474 */ 
475 memcpy((uint8_t *)cookie->id_origbuf + offset, 
476 (uint8_t *)cookie->id_bouncebuf + offset, len); 
477 } 
478 
479 /* 
480 * Nothing to do for post-write. 
481 */ 
482 break; 
483 
484 case ID_BUFTYPE_MBUF: 
485 { 
486 struct mbuf *m, *m0 = cookie->id_origbuf; 
487 bus_size_t minlen, moff; 
488 
489 if (ops & BUS_DMASYNC_PREWRITE) { 
490 /* 
491 * Copy the caller's buffer to the bounce buffer. 
492 */ 
493 m_copydata(m0, offset, len, 
494 (uint8_t *)cookie->id_bouncebuf + offset); 
495 cpu_dcache_wb_range((vaddr_t)cookie->id_bouncebuf + 
496 offset, len); 
497 } 
498 if (ops & BUS_DMASYNC_PREREAD) { 
499 cpu_dcache_wbinv_range ((vaddr_t)cookie->id_bouncebuf + 
500 offset, len); 
501 } 
502 if (ops & BUS_DMASYNC_POSTREAD) { 
503 /* 
504 * Copy the bounce buffer to the caller's buffer. 
505 */ 
506 for (moff = offset, m = m0; m != NULL && len != 0; 
507 m = m->m_next) { 
508 /* Find the beginning mbuf. */ 
509 if (moff >= m->m_len) { 
510 moff -= m->m_len; 
511 continue; 
512 } 
513 
514 /* 
515 * Now at the first mbuf to sync; nail 
516 * each one until we have exhausted the 
517 * length. 
518 */ 
519 minlen = len < m->m_len - moff ? 
520 len : m->m_len - moff; 
521 
522 memcpy(mtod(m, uint8_t *) + moff, 
523 (uint8_t *)cookie->id_bouncebuf + offset, 
524 minlen); 
525 
526 moff = 0; 
527 len -= minlen; 
528 offset += minlen; 
529 } 
530 } 
531 /* 
532 * Nothing to do for post-write. 
533 */ 
534 break; 
535 } 
536  
537 case ID_BUFTYPE_UIO: 
538 panic("integrator_bus_dmamap_sync: ID_BUFTYPE_UIO"); 
539 break; 
540 
541 case ID_BUFTYPE_RAW: 
542 panic("integrator_bus_dmamap_sync: ID_BUFTYPE_RAW"); 
543 break; 
544 
545 case ID_BUFTYPE_INVALID: 
546 panic("integrator_bus_dmamap_sync: ID_BUFTYPE_INVALID"); 
547 break; 
548 
549 default: 
550 printf("unknown buffer type %d\n", cookie->id_buftype); 
551 panic("integrator_bus_dmamap_sync"); 
552 } 
553} 
554 
555/* 
556 * Allocate memory safe for Integrator DMA. 
557 */ 
558static int 
559integrator_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, 
560 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 
561 int nsegs, int *rsegs, int flags) 
562{ 
563 
564 if (t->_ranges == NULL) 
565 return (ENOMEM); 
566 
567 /* _bus_dmamem_alloc() does the range checks for us. */ 
568 return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, 
569 rsegs, flags)); 
570} 
571 
572/********************************************************************** 
573 * Integrator DMA utility functions 
574 **********************************************************************/ 
575 
576static int 
577integrator_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, 
578 bus_size_t size, int flags) 
579{ 
580 struct integrator_dma_cookie *cookie = map->_dm_cookie; 
581 int error = 0; 
582 
583 DEBUG(printf("Alloc bouncebuf\n")); 
584 cookie->id_bouncebuflen = round_page(size); 
585 error = integrator_bus_dmamem_alloc(t, cookie->id_bouncebuflen, 
586 NBPG, map->_dm_boundary, cookie->id_bouncesegs, 
587 map->_dm_segcnt, &cookie->id_nbouncesegs, flags); 
588 if (error) 
589 goto out; 
590 { 
591 int seg; 
592 
593 for (seg = 0; seg < cookie->id_nbouncesegs; seg++) 
594 DEBUG(printf("Seg %d @ PA 0x%08x+0x%x\n", seg, 
595 (unsigned) cookie->id_bouncesegs[seg].ds_addr, 
596 (unsigned) cookie->id_bouncesegs[seg].ds_len)); 
597 } 
598 error = _bus_dmamem_map(t, cookie->id_bouncesegs, 
599 cookie->id_nbouncesegs, cookie->id_bouncebuflen, 
600 (void **)&cookie->id_bouncebuf, flags); 
601 
602 out: 
603 if (error) { 
604 _bus_dmamem_free(t, cookie->id_bouncesegs, 
605 cookie->id_nbouncesegs); 
606 cookie->id_bouncebuflen = 0; 
607 cookie->id_nbouncesegs = 0; 
608 } else { 
609 DEBUG(printf("Alloc bouncebuf OK\n")); 
610 cookie->id_flags |= ID_HAS_BOUNCE; 
611 } 
612 
613 return (error); 
614} 
615 
616static void 
617integrator_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map) 
618{ 
619 struct integrator_dma_cookie *cookie = map->_dm_cookie; 
620 
621 _bus_dmamem_unmap(t, cookie->id_bouncebuf, 
622 cookie->id_bouncebuflen); 
623 _bus_dmamem_free(t, cookie->id_bouncesegs, 
624 cookie->id_nbouncesegs); 
625 cookie->id_bouncebuflen = 0; 
626 cookie->id_nbouncesegs = 0; 
627 cookie->id_flags &= ~ID_HAS_BOUNCE; 
628} 
629 
630void 66void
631integrator_pci_dma_init(bus_dma_tag_t dmat) 67integrator_pci_dma_init(bus_dma_tag_t dmat)
632{ 68{
633 struct arm32_dma_range *dr = integrator_dma_ranges; 69 struct arm32_dma_range *dr = integrator_dma_ranges;
634 int i; 70 int i;
635 int nranges = 0; 71 int nranges = 0;
636  72
637 for (i = 0; i < bootconfig.dramblocks; i++) 73 for (i = 0; i < bootconfig.dramblocks; i++) {
638 if (bootconfig.dram[i].flags & BOOT_DRAM_CAN_DMA) { 74 if (bootconfig.dram[i].flags & BOOT_DRAM_CAN_DMA) {
639 dr[nranges].dr_sysbase = bootconfig.dram[i].address; 75 dr[nranges].dr_sysbase = bootconfig.dram[i].address;
640 dr[nranges].dr_busbase =  76 dr[nranges].dr_busbase =
641 LOCAL_TO_CM_ALIAS(dr[nranges].dr_sysbase); 77 LOCAL_TO_CM_ALIAS(dr[nranges].dr_sysbase);
642 dr[nranges].dr_len = bootconfig.dram[i].pages * NBPG; 78 dr[nranges].dr_len = bootconfig.dram[i].pages * NBPG;
643 nranges++; 79 nranges++;
644 } 80 }
 81 }
645 82
646 if (nranges == 0) 83 if (nranges == 0)
647 panic ("integrator_pci_dma_init: No DMA capable memory");  84 panic ("integrator_pci_dma_init: No DMA capable memory");
648 85
649 dmat->_ranges = dr; 86 dmat->_ranges = dr;
650 dmat->_nranges = nranges; 87 dmat->_nranges = nranges;
651 88
652 dmat->_dmamap_create = integrator_bus_dmamap_create; 89 dmat->_dmamap_create = _bus_dmamap_create;
653 dmat->_dmamap_destroy = integrator_bus_dmamap_destroy; 90 dmat->_dmamap_destroy = _bus_dmamap_destroy;
654 dmat->_dmamap_load = integrator_bus_dmamap_load; 91 dmat->_dmamap_load = _bus_dmamap_load;
655 dmat->_dmamap_load_mbuf = integrator_bus_dmamap_load_mbuf; 92 dmat->_dmamap_load_mbuf = _bus_dmamap_load_mbuf;
656 dmat->_dmamap_load_uio = integrator_bus_dmamap_load_uio; 93 dmat->_dmamap_load_uio = _bus_dmamap_load_uio;
657 dmat->_dmamap_load_raw = integrator_bus_dmamap_load_raw; 94 dmat->_dmamap_load_raw = _bus_dmamap_load_raw;
658 dmat->_dmamap_unload = integrator_bus_dmamap_unload; 95 dmat->_dmamap_unload = _bus_dmamap_unload;
659 dmat->_dmamap_sync_pre = integrator_bus_dmamap_sync; 96 dmat->_dmamap_sync_pre = _bus_dmamap_sync;
660 dmat->_dmamap_sync_post = integrator_bus_dmamap_sync; 97 dmat->_dmamap_sync_post = _bus_dmamap_sync;
661 98
662 dmat->_dmamem_alloc = integrator_bus_dmamem_alloc; 99 dmat->_dmamem_alloc = _bus_dmamem_alloc;
663 dmat->_dmamem_free = _bus_dmamem_free; 100 dmat->_dmamem_free = _bus_dmamem_free;
664 dmat->_dmamem_map = _bus_dmamem_map; 101 dmat->_dmamem_map = _bus_dmamem_map;
665 dmat->_dmamem_unmap = _bus_dmamem_unmap; 102 dmat->_dmamem_unmap = _bus_dmamem_unmap;
666 dmat->_dmamem_mmap = _bus_dmamem_mmap; 103 dmat->_dmamem_mmap = _bus_dmamem_mmap;
 104
 105 dmat->_dmatag_subregion = _bus_dmatag_subregion;
 106 dmat->_dmatag_destroy = _bus_dmatag_destroy;
667} 107}