Wed Feb 16 23:30:52 2022 UTC ()
powerpc: Implement bus_dmamap_load_raw.

Can probably delete some of the round-trips between bus addresses and
physical addresses -- did these only to copy the logic already in
_bus_dmamap_load_buffer.


(riastradh)
diff -r1.52 -r1.53 src/sys/arch/powerpc/powerpc/bus_dma.c

cvs diff -r1.52 -r1.53 src/sys/arch/powerpc/powerpc/bus_dma.c (switch to unified diff)

--- src/sys/arch/powerpc/powerpc/bus_dma.c 2020/07/06 10:31:24 1.52
+++ src/sys/arch/powerpc/powerpc/bus_dma.c 2022/02/16 23:30:52 1.53
@@ -1,798 +1,884 @@ @@ -1,798 +1,884 @@
1/* $NetBSD: bus_dma.c,v 1.52 2020/07/06 10:31:24 rin Exp $ */ 1/* $NetBSD: bus_dma.c,v 1.53 2022/02/16 23:30:52 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#define _POWERPC_BUS_DMA_PRIVATE 33#define _POWERPC_BUS_DMA_PRIVATE
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.52 2020/07/06 10:31:24 rin Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.53 2022/02/16 23:30:52 riastradh Exp $");
37 37
38#ifdef _KERNEL_OPT 38#ifdef _KERNEL_OPT
39#include "opt_ppcarch.h" 39#include "opt_ppcarch.h"
40#endif 40#endif
41 41
42#include <sys/param.h> 42#include <sys/param.h>
43#include <sys/systm.h> 43#include <sys/systm.h>
44#include <sys/kernel.h> 44#include <sys/kernel.h>
45#include <sys/device.h> 45#include <sys/device.h>
46#include <sys/kmem.h> 46#include <sys/kmem.h>
47#include <sys/proc.h> 47#include <sys/proc.h>
48#include <sys/mbuf.h> 48#include <sys/mbuf.h>
49#include <sys/bus.h> 49#include <sys/bus.h>
50#include <sys/intr.h> 50#include <sys/intr.h>
51 51
52#include <uvm/uvm.h> 52#include <uvm/uvm.h>
53#include <uvm/uvm_physseg.h> 53#include <uvm/uvm_physseg.h>
54 54
55#if defined(PPC_BOOKE) 55#if defined(PPC_BOOKE)
56#define EIEIO __asm volatile("mbar\t0") 56#define EIEIO __asm volatile("mbar\t0")
57#define SYNC __asm volatile("msync") 57#define SYNC __asm volatile("msync")
58#elif defined(PPC_IBM4XX) && !defined(PPC_IBM440) 58#elif defined(PPC_IBM4XX) && !defined(PPC_IBM440)
59/* eieio is implemented as sync */ 59/* eieio is implemented as sync */
60#define EIEIO __asm volatile("eieio") 60#define EIEIO __asm volatile("eieio")
61#define SYNC /* nothing */ 61#define SYNC /* nothing */
62#else 62#else
63#define EIEIO __asm volatile("eieio") 63#define EIEIO __asm volatile("eieio")
64#define SYNC __asm volatile("sync") 64#define SYNC __asm volatile("sync")
65#endif 65#endif
66 66
67int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *, 67int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *,
68 bus_size_t, struct vmspace *, int, paddr_t *, int *, int); 68 bus_size_t, struct vmspace *, int, paddr_t *, int *, int);
69 69
70static inline void 70static inline void
71dcbst(paddr_t pa, long len, int dcache_line_size) 71dcbst(paddr_t pa, long len, int dcache_line_size)
72{ 72{
73 paddr_t epa; 73 paddr_t epa;
74 for (epa = pa + len; pa < epa; pa += dcache_line_size) 74 for (epa = pa + len; pa < epa; pa += dcache_line_size)
75 __asm volatile("dcbst 0,%0" :: "r"(pa)); 75 __asm volatile("dcbst 0,%0" :: "r"(pa));
76} 76}
77 77
78static inline void 78static inline void
79dcbi(paddr_t pa, long len, int dcache_line_size) 79dcbi(paddr_t pa, long len, int dcache_line_size)
80{ 80{
81 paddr_t epa; 81 paddr_t epa;
82 for (epa = pa + len; pa < epa; pa += dcache_line_size) 82 for (epa = pa + len; pa < epa; pa += dcache_line_size)
83 __asm volatile("dcbi 0,%0" :: "r"(pa)); 83 __asm volatile("dcbi 0,%0" :: "r"(pa));
84} 84}
85 85
86static inline void 86static inline void
87dcbf(paddr_t pa, long len, int dcache_line_size) 87dcbf(paddr_t pa, long len, int dcache_line_size)
88{ 88{
89 paddr_t epa; 89 paddr_t epa;
90 for (epa = pa + len; pa < epa; pa += dcache_line_size) 90 for (epa = pa + len; pa < epa; pa += dcache_line_size)
91 __asm volatile("dcbf 0,%0" :: "r"(pa)); 91 __asm volatile("dcbf 0,%0" :: "r"(pa));
92} 92}
93 93
94/* 94/*
95 * Common function for DMA map creation. May be called by bus-specific 95 * Common function for DMA map creation. May be called by bus-specific
96 * DMA map creation functions. 96 * DMA map creation functions.
97 */ 97 */
98int 98int
99_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 99_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
100{ 100{
101 struct powerpc_bus_dmamap *map; 101 struct powerpc_bus_dmamap *map;
102 void *mapstore; 102 void *mapstore;
103 size_t mapsize; 103 size_t mapsize;
104 104
105 /* 105 /*
106 * Allocate and initialize the DMA map. The end of the map 106 * Allocate and initialize the DMA map. The end of the map
107 * is a variable-sized array of segments, so we allocate enough 107 * is a variable-sized array of segments, so we allocate enough
108 * room for them in one shot. 108 * room for them in one shot.
109 * 109 *
110 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 110 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
111 * of ALLOCNOW notifies others that we've reserved these resources, 111 * of ALLOCNOW notifies others that we've reserved these resources,
112 * and they are not to be freed. 112 * and they are not to be freed.
113 * 113 *
114 * The bus_dmamap_t includes one bus_dma_segment_t, hence 114 * The bus_dmamap_t includes one bus_dma_segment_t, hence
115 * the (nsegments - 1). 115 * the (nsegments - 1).
116 */ 116 */
117 mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]); 117 mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]);
118 if ((mapstore = kmem_intr_alloc(mapsize, 118 if ((mapstore = kmem_intr_alloc(mapsize,
119 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 119 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
120 return (ENOMEM); 120 return (ENOMEM);
121 121
122 memset(mapstore, 0, mapsize); 122 memset(mapstore, 0, mapsize);
123 map = (struct powerpc_bus_dmamap *)mapstore; 123 map = (struct powerpc_bus_dmamap *)mapstore;
124 map->_dm_size = size; 124 map->_dm_size = size;
125 map->_dm_segcnt = nsegments; 125 map->_dm_segcnt = nsegments;
126 map->_dm_maxmaxsegsz = maxsegsz; 126 map->_dm_maxmaxsegsz = maxsegsz;
127 map->_dm_boundary = boundary; 127 map->_dm_boundary = boundary;
128 map->_dm_bounce_thresh = t->_bounce_thresh; 128 map->_dm_bounce_thresh = t->_bounce_thresh;
129 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 129 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
130 map->dm_maxsegsz = maxsegsz; 130 map->dm_maxsegsz = maxsegsz;
131 map->dm_mapsize = 0; /* no valid mappings */ 131 map->dm_mapsize = 0; /* no valid mappings */
132 map->dm_nsegs = 0; 132 map->dm_nsegs = 0;
133 133
134 *dmamp = map; 134 *dmamp = map;
135 return (0); 135 return (0);
136} 136}
137 137
138/* 138/*
139 * Common function for DMA map destruction. May be called by bus-specific 139 * Common function for DMA map destruction. May be called by bus-specific
140 * DMA map destruction functions. 140 * DMA map destruction functions.
141 */ 141 */
142void 142void
143_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 143_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
144{ 144{
145 145
146 size_t mapsize = sizeof(*map) 146 size_t mapsize = sizeof(*map)
147 + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]); 147 + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]);
148 kmem_intr_free(map, mapsize); 148 kmem_intr_free(map, mapsize);
149} 149}
150 150
151/* 151/*
152 * Utility function to load a linear buffer. lastaddrp holds state 152 * Utility function to load a linear buffer. lastaddrp holds state
153 * between invocations (for multiple-buffer loads). segp contains 153 * between invocations (for multiple-buffer loads). segp contains
154 * the starting segment on entrance, and the ending segment on exit. 154 * the starting segment on entrance, and the ending segment on exit.
155 * first indicates if this is the first invocation of this function. 155 * first indicates if this is the first invocation of this function.
156 */ 156 */
157int 157int
158_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, int *segp, int first) 158_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, int *segp, int first)
159{ 159{
160 bus_size_t sgsize; 160 bus_size_t sgsize;
161 bus_addr_t curaddr, lastaddr, baddr, bmask; 161 bus_addr_t curaddr, lastaddr, baddr, bmask;
162 vaddr_t vaddr = (vaddr_t)buf; 162 vaddr_t vaddr = (vaddr_t)buf;
163 int seg; 163 int seg;
164 164
165// printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__, 165// printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__,
166// t, map, buf, buflen, vm, flags, lastaddrp, segp, first); 166// t, map, buf, buflen, vm, flags, lastaddrp, segp, first);
167 167
168 lastaddr = *lastaddrp; 168 lastaddr = *lastaddrp;
169 bmask = ~(map->_dm_boundary - 1); 169 bmask = ~(map->_dm_boundary - 1);
170 170
171 for (seg = *segp; buflen > 0 ; ) { 171 for (seg = *segp; buflen > 0 ; ) {
172 /* 172 /*
173 * Get the physical address for this segment. 173 * Get the physical address for this segment.
174 */ 174 */
175 if (!VMSPACE_IS_KERNEL_P(vm)) 175 if (!VMSPACE_IS_KERNEL_P(vm))
176 (void) pmap_extract(vm_map_pmap(&vm->vm_map), 176 (void) pmap_extract(vm_map_pmap(&vm->vm_map),
177 vaddr, (void *)&curaddr); 177 vaddr, (void *)&curaddr);
178 else 178 else
179 curaddr = vtophys(vaddr); 179 curaddr = vtophys(vaddr);
180 180
181 /* 181 /*
182 * If we're beyond the bounce threshold, notify 182 * If we're beyond the bounce threshold, notify
183 * the caller. 183 * the caller.
184 */ 184 */
185 if (map->_dm_bounce_thresh != 0 && 185 if (map->_dm_bounce_thresh != 0 &&
186 curaddr >= map->_dm_bounce_thresh) 186 curaddr >= map->_dm_bounce_thresh)
187 return (EINVAL); 187 return (EINVAL);
188 188
189 /* 189 /*
190 * Compute the segment size, and adjust counts. 190 * Compute the segment size, and adjust counts.
191 */ 191 */
192 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 192 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
193 if (buflen < sgsize) 193 if (buflen < sgsize)
194 sgsize = buflen; 194 sgsize = buflen;
195 sgsize = uimin(sgsize, map->dm_maxsegsz); 195 sgsize = uimin(sgsize, map->dm_maxsegsz);
196 196
197 /* 197 /*
198 * Make sure we don't cross any boundaries. 198 * Make sure we don't cross any boundaries.
199 */ 199 */
200 if (map->_dm_boundary > 0) { 200 if (map->_dm_boundary > 0) {
201 baddr = (curaddr + map->_dm_boundary) & bmask; 201 baddr = (curaddr + map->_dm_boundary) & bmask;
202 if (sgsize > (baddr - curaddr)) 202 if (sgsize > (baddr - curaddr))
203 sgsize = (baddr - curaddr); 203 sgsize = (baddr - curaddr);
204 } 204 }
205 205
206 /* 206 /*
207 * Insert chunk into a segment, coalescing with 207 * Insert chunk into a segment, coalescing with
208 * the previous segment if possible. 208 * the previous segment if possible.
209 */ 209 */
210 if (first) { 210 if (first) {
211 map->dm_segs[seg].ds_addr = PHYS_TO_BUS_MEM(t, curaddr); 211 map->dm_segs[seg].ds_addr = PHYS_TO_BUS_MEM(t, curaddr);
212 map->dm_segs[seg].ds_len = sgsize; 212 map->dm_segs[seg].ds_len = sgsize;
213 first = 0; 213 first = 0;
214 } else { 214 } else {
215 if (curaddr == lastaddr && 215 if (curaddr == lastaddr &&
216 (map->dm_segs[seg].ds_len + sgsize) <= 216 (map->dm_segs[seg].ds_len + sgsize) <=
217 map->dm_maxsegsz && 217 map->dm_maxsegsz &&
218 (map->_dm_boundary == 0 || 218 (map->_dm_boundary == 0 ||
219 (map->dm_segs[seg].ds_addr & bmask) == 219 (map->dm_segs[seg].ds_addr & bmask) ==
220 (PHYS_TO_BUS_MEM(t, curaddr) & bmask))) 220 (PHYS_TO_BUS_MEM(t, curaddr) & bmask)))
221 map->dm_segs[seg].ds_len += sgsize; 221 map->dm_segs[seg].ds_len += sgsize;
222 else { 222 else {
223 if (++seg >= map->_dm_segcnt) 223 if (++seg >= map->_dm_segcnt)
224 break; 224 break;
225 map->dm_segs[seg].ds_addr = 225 map->dm_segs[seg].ds_addr =
226 PHYS_TO_BUS_MEM(t, curaddr); 226 PHYS_TO_BUS_MEM(t, curaddr);
227 map->dm_segs[seg].ds_len = sgsize; 227 map->dm_segs[seg].ds_len = sgsize;
228 } 228 }
229 } 229 }
230 230
231 lastaddr = curaddr + sgsize; 231 lastaddr = curaddr + sgsize;
232 vaddr += sgsize; 232 vaddr += sgsize;
233 buflen -= sgsize; 233 buflen -= sgsize;
234 } 234 }
235 235
236 *segp = seg; 236 *segp = seg;
237 *lastaddrp = lastaddr; 237 *lastaddrp = lastaddr;
238 238
239 /* 239 /*
240 * Did we fit? 240 * Did we fit?
241 */ 241 */
242 if (buflen != 0) 242 if (buflen != 0)
243 return (EFBIG); /* XXX better return value here? */ 243 return (EFBIG); /* XXX better return value here? */
244 244
245 return (0); 245 return (0);
246} 246}
247 247
248/* 248/*
249 * Common function for loading a DMA map with a linear buffer. May 249 * Common function for loading a DMA map with a linear buffer. May
250 * be called by bus-specific DMA map load functions. 250 * be called by bus-specific DMA map load functions.
251 */ 251 */
252int 252int
253_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags) 253_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags)
254{ 254{
255 paddr_t lastaddr = 0; 255 paddr_t lastaddr = 0;
256 int seg, error; 256 int seg, error;
257 struct vmspace *vm; 257 struct vmspace *vm;
258 258
259 /* 259 /*
260 * Make sure that on error condition we return "no valid mappings". 260 * Make sure that on error condition we return "no valid mappings".
261 */ 261 */
262 map->dm_mapsize = 0; 262 map->dm_mapsize = 0;
263 map->dm_nsegs = 0; 263 map->dm_nsegs = 0;
264 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 264 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
265 265
266 if (buflen > map->_dm_size) 266 if (buflen > map->_dm_size)
267 return (EINVAL); 267 return (EINVAL);
268 268
269 if (p != NULL) { 269 if (p != NULL) {
270 vm = p->p_vmspace; 270 vm = p->p_vmspace;
271 } else { 271 } else {
272 vm = vmspace_kernel(); 272 vm = vmspace_kernel();
273 } 273 }
274 274
275 seg = 0; 275 seg = 0;
276 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, 276 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
277 &lastaddr, &seg, 1); 277 &lastaddr, &seg, 1);
278 if (error == 0) { 278 if (error == 0) {
279 map->dm_mapsize = buflen; 279 map->dm_mapsize = buflen;
280 map->dm_nsegs = seg + 1; 280 map->dm_nsegs = seg + 1;
281 } 281 }
282 return (error); 282 return (error);
283} 283}
284 284
285/* 285/*
286 * Like _bus_dmamap_load(), but for mbufs. 286 * Like _bus_dmamap_load(), but for mbufs.
287 */ 287 */
288int 288int
289_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags) 289_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags)
290{ 290{
291 paddr_t lastaddr = 0; 291 paddr_t lastaddr = 0;
292 int seg, error, first; 292 int seg, error, first;
293 struct mbuf *m; 293 struct mbuf *m;
294 294
295 /* 295 /*
296 * Make sure that on error condition we return "no valid mappings." 296 * Make sure that on error condition we return "no valid mappings."
297 */ 297 */
298 map->dm_mapsize = 0; 298 map->dm_mapsize = 0;
299 map->dm_nsegs = 0; 299 map->dm_nsegs = 0;
300 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 300 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
301 301
302#ifdef DIAGNOSTIC 302#ifdef DIAGNOSTIC
303 if ((m0->m_flags & M_PKTHDR) == 0) 303 if ((m0->m_flags & M_PKTHDR) == 0)
304 panic("_bus_dmamap_load_mbuf: no packet header"); 304 panic("_bus_dmamap_load_mbuf: no packet header");
305#endif 305#endif
306 306
307 if (m0->m_pkthdr.len > map->_dm_size) 307 if (m0->m_pkthdr.len > map->_dm_size)
308 return (EINVAL); 308 return (EINVAL);
309 309
310 first = 1; 310 first = 1;
311 seg = 0; 311 seg = 0;
312 error = 0; 312 error = 0;
313 for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) { 313 for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) {
314 if (m->m_len == 0) 314 if (m->m_len == 0)
315 continue; 315 continue;
316#ifdef POOL_VTOPHYS 316#ifdef POOL_VTOPHYS
317 /* XXX Could be better about coalescing. */ 317 /* XXX Could be better about coalescing. */
318 /* XXX Doesn't check boundaries. */ 318 /* XXX Doesn't check boundaries. */
319 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { 319 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
320 case M_EXT|M_EXT_CLUSTER: 320 case M_EXT|M_EXT_CLUSTER:
321 /* XXX KDASSERT */ 321 /* XXX KDASSERT */
322 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 322 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
323 lastaddr = m->m_ext.ext_paddr + 323 lastaddr = m->m_ext.ext_paddr +
324 (m->m_data - m->m_ext.ext_buf); 324 (m->m_data - m->m_ext.ext_buf);
325 have_addr: 325 have_addr:
326 if (first == 0 && ++seg >= map->_dm_segcnt) { 326 if (first == 0 && ++seg >= map->_dm_segcnt) {
327 error = EFBIG; 327 error = EFBIG;
328 continue; 328 continue;
329 } 329 }
330 map->dm_segs[seg].ds_addr = 330 map->dm_segs[seg].ds_addr =
331 PHYS_TO_BUS_MEM(t, lastaddr); 331 PHYS_TO_BUS_MEM(t, lastaddr);
332 map->dm_segs[seg].ds_len = m->m_len; 332 map->dm_segs[seg].ds_len = m->m_len;
333 lastaddr += m->m_len; 333 lastaddr += m->m_len;
334 continue; 334 continue;
335 335
336 case 0: 336 case 0:
337 lastaddr = m->m_paddr + M_BUFOFFSET(m) + 337 lastaddr = m->m_paddr + M_BUFOFFSET(m) +
338 (m->m_data - M_BUFADDR(m)); 338 (m->m_data - M_BUFADDR(m));
339 goto have_addr; 339 goto have_addr;
340 340
341 default: 341 default:
342 break; 342 break;
343 } 343 }
344#endif 344#endif
345 error = _bus_dmamap_load_buffer(t, map, m->m_data, 345 error = _bus_dmamap_load_buffer(t, map, m->m_data,
346 m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first); 346 m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first);
347 } 347 }
348 if (error == 0) { 348 if (error == 0) {
349 map->dm_mapsize = m0->m_pkthdr.len; 349 map->dm_mapsize = m0->m_pkthdr.len;
350 map->dm_nsegs = seg + 1; 350 map->dm_nsegs = seg + 1;
351 } 351 }
352 return (error); 352 return (error);
353} 353}
354 354
355/* 355/*
356 * Like _bus_dmamap_load(), but for uios. 356 * Like _bus_dmamap_load(), but for uios.
357 */ 357 */
358int 358int
359_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags) 359_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
360{ 360{
361 paddr_t lastaddr = 0; 361 paddr_t lastaddr = 0;
362 int seg, i, error, first; 362 int seg, i, error, first;
363 bus_size_t minlen, resid; 363 bus_size_t minlen, resid;
364 struct iovec *iov; 364 struct iovec *iov;
365 void *addr; 365 void *addr;
366 366
367 /* 367 /*
368 * Make sure that on error condition we return "no valid mappings." 368 * Make sure that on error condition we return "no valid mappings."
369 */ 369 */
370 map->dm_mapsize = 0; 370 map->dm_mapsize = 0;
371 map->dm_nsegs = 0; 371 map->dm_nsegs = 0;
372 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 372 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
373 373
374 resid = uio->uio_resid; 374 resid = uio->uio_resid;
375 iov = uio->uio_iov; 375 iov = uio->uio_iov;
376 376
377 first = 1; 377 first = 1;
378 seg = 0; 378 seg = 0;
379 error = 0; 379 error = 0;
380 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 380 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
381 /* 381 /*
382 * Now at the first iovec to load. Load each iovec 382 * Now at the first iovec to load. Load each iovec
383 * until we have exhausted the residual count. 383 * until we have exhausted the residual count.
384 */ 384 */
385 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 385 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
386 addr = (void *)iov[i].iov_base; 386 addr = (void *)iov[i].iov_base;
387 387
388 error = _bus_dmamap_load_buffer(t, map, addr, minlen, 388 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
389 uio->uio_vmspace, flags, &lastaddr, &seg, first); 389 uio->uio_vmspace, flags, &lastaddr, &seg, first);
390 first = 0; 390 first = 0;
391 391
392 resid -= minlen; 392 resid -= minlen;
393 } 393 }
394 if (error == 0) { 394 if (error == 0) {
395 map->dm_mapsize = uio->uio_resid; 395 map->dm_mapsize = uio->uio_resid;
396 map->dm_nsegs = seg + 1; 396 map->dm_nsegs = seg + 1;
397 } 397 }
398 return (error); 398 return (error);
399} 399}
400 400
401/* 401/*
402 * Like _bus_dmamap_load(), but for raw memory allocated with 402 * Like _bus_dmamap_load(), but for raw memory allocated with
403 * bus_dmamem_alloc(). 403 * bus_dmamem_alloc().
 404 *
 405 * XXX This is too much copypasta of _bus_dmamap_load_buffer.
404 */ 406 */
405int 407int
406_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 408_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
 409 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
407{ 410{
 411 bus_size_t sgsize, isgsize;
 412 bus_size_t busaddr, curaddr, lastaddr, baddr, bmask;
 413 int seg, iseg, first;
 414
 415 if (size == 0)
 416 return 0;
 417
 418 lastaddr = 0;
 419 bmask = ~(map->_dm_boundary - 1);
 420
 421 first = 0;
 422 iseg = 0;
 423 busaddr = segs[iseg].ds_addr;
 424 isgsize = segs[iseg].ds_len;
 425 for (seg = 0; size > 0;) {
 426 /*
 427 * Get the physical address for this segment.
 428 */
 429 curaddr = BUS_MEM_TO_PHYS(t, busaddr);
 430
 431 /*
 432 * If we're beyond the bounce threshold, notify
 433 * the caller.
 434 */
 435 if (map->_dm_bounce_thresh != 0 &&
 436 curaddr >= map->_dm_bounce_thresh)
 437 return EINVAL;
 438
 439 /*
 440 * Compute the segment size, and adjust counts.
 441 */
 442 sgsize = PAGE_SIZE - ((u_long)curaddr & PGOFSET);
 443 sgsize = MIN(sgsize, isgsize);
 444 sgsize = MIN(sgsize, size);
 445 sgsize = MIN(sgsize, map->dm_maxsegsz);
 446
 447 /*
 448 * Make sure we don't cross any boundaries.
 449 */
 450 if (map->_dm_boundary > 0) {
 451 baddr = (curaddr + map->_dm_boundary) & bmask;
 452 if (sgsize > (baddr - curaddr))
 453 sgsize = (baddr - curaddr);
 454 }
 455
 456 /*
 457 * Insert chunk into a segment, coalescing with
 458 * the previous segment if possible.
 459 */
 460 if (first) {
 461 map->dm_segs[seg].ds_addr =
 462 PHYS_TO_BUS_MEM(t, curaddr);
 463 map->dm_segs[seg].ds_len = sgsize;
 464 first = 0;
 465 } else {
 466 if (curaddr == lastaddr &&
 467 (map->dm_segs[seg].ds_len + sgsize) <=
 468 map->dm_maxsegsz &&
 469 (map->_dm_boundary == 0 ||
 470 (map->dm_segs[seg].ds_addr & bmask) ==
 471 (PHYS_TO_BUS_MEM(t, curaddr) & bmask)))
 472 map->dm_segs[seg].ds_len += sgsize;
 473 else {
 474 if (++seg >= map->_dm_segcnt)
 475 break;
 476 map->dm_segs[seg].ds_addr =
 477 PHYS_TO_BUS_MEM(t, curaddr);
 478 map->dm_segs[seg].ds_len = sgsize;
 479 }
 480 }
 481
 482 lastaddr = curaddr + sgsize;
 483 size -= sgsize;
 484 if ((isgsize -= sgsize) == 0) {
 485 iseg++;
 486 KASSERT(iseg < nsegs);
 487 busaddr = segs[iseg].ds_addr;
 488 isgsize = segs[iseg].ds_len;
 489 }
 490 }
 491
 492 if (size > 0)
 493 return EFBIG;
408 494
409 panic("_bus_dmamap_load_raw: not implemented"); 495 return 0;
410} 496}
411 497
412/* 498/*
413 * Common function for unloading a DMA map. May be called by 499 * Common function for unloading a DMA map. May be called by
414 * chipset-specific DMA map unload functions. 500 * chipset-specific DMA map unload functions.
415 */ 501 */
416void 502void
417_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 503_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
418{ 504{
419 505
420 /* 506 /*
421 * No resources to free; just mark the mappings as 507 * No resources to free; just mark the mappings as
422 * invalid. 508 * invalid.
423 */ 509 */
424 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 510 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
425 map->dm_mapsize = 0; 511 map->dm_mapsize = 0;
426 map->dm_nsegs = 0; 512 map->dm_nsegs = 0;
427} 513}
428 514
429/* 515/*
430 * Common function for DMA map synchronization. May be called 516 * Common function for DMA map synchronization. May be called
431 * by chipset-specific DMA map synchronization functions. 517 * by chipset-specific DMA map synchronization functions.
432 */ 518 */
433void 519void
434_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) 520_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops)
435{ 521{
436 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 522 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size;
437 const bus_dma_segment_t *ds = map->dm_segs; 523 const bus_dma_segment_t *ds = map->dm_segs;
438 524
439// printf("%s(%p,%p,%#x,%u,%#x) from %p\n", __func__, 525// printf("%s(%p,%p,%#x,%u,%#x) from %p\n", __func__,
440// t, map, offset, len, ops, __builtin_return_address(0)); 526// t, map, offset, len, ops, __builtin_return_address(0));
441 527
442 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 528 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
443 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 529 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
444 panic("_bus_dmamap_sync: invalid ops %#x", ops); 530 panic("_bus_dmamap_sync: invalid ops %#x", ops);
445 531
446#ifdef DIAGNOSTIC 532#ifdef DIAGNOSTIC
447 if (offset + len > map->dm_mapsize) 533 if (offset + len > map->dm_mapsize)
448 panic("%s: ops %#x mapsize %u: bad offset (%u) and/or length (%u)", __func__, ops, map->dm_mapsize, offset, len); 534 panic("%s: ops %#x mapsize %u: bad offset (%u) and/or length (%u)", __func__, ops, map->dm_mapsize, offset, len);
449#endif 535#endif
450 536
451 /* 537 /*
452 * Skip leading amount 538 * Skip leading amount
453 */ 539 */
454 while (offset >= ds->ds_len) { 540 while (offset >= ds->ds_len) {
455 offset -= ds->ds_len; 541 offset -= ds->ds_len;
456 ds++; 542 ds++;
457 } 543 }
458 EIEIO; 544 EIEIO;
459 for (; len > 0; ds++, offset = 0) { 545 for (; len > 0; ds++, offset = 0) {
460 bus_size_t seglen = ds->ds_len - offset; 546 bus_size_t seglen = ds->ds_len - offset;
461 bus_addr_t addr = BUS_MEM_TO_PHYS(t, ds->ds_addr) + offset; 547 bus_addr_t addr = BUS_MEM_TO_PHYS(t, ds->ds_addr) + offset;
462 if (seglen > len) 548 if (seglen > len)
463 seglen = len; 549 seglen = len;
464 len -= seglen; 550 len -= seglen;
465 KASSERT(ds < &map->dm_segs[map->dm_nsegs]); 551 KASSERT(ds < &map->dm_segs[map->dm_nsegs]);
466 /* 552 /*
467 * Readjust things to start on cacheline boundarys 553 * Readjust things to start on cacheline boundarys
468 */ 554 */
469 offset = (addr & (dcache_line_size-1)); 555 offset = (addr & (dcache_line_size-1));
470 seglen += offset; 556 seglen += offset;
471 addr -= offset; 557 addr -= offset;
472 /* 558 /*
473 * Now do the appropriate thing. 559 * Now do the appropriate thing.
474 */ 560 */
475 switch (ops) { 561 switch (ops) {
476 case BUS_DMASYNC_PREWRITE: 562 case BUS_DMASYNC_PREWRITE:
477 /* 563 /*
478 * Make sure cache contents are in memory for the DMA. 564 * Make sure cache contents are in memory for the DMA.
479 */ 565 */
480 dcbst(addr, seglen, dcache_line_size); 566 dcbst(addr, seglen, dcache_line_size);
481 break; 567 break;
482 case BUS_DMASYNC_PREREAD: 568 case BUS_DMASYNC_PREREAD:
483 /* 569 /*
484 * If the region to be invalidated doesn't fall on 570 * If the region to be invalidated doesn't fall on
485 * cacheline boundary, flush that cacheline so we 571 * cacheline boundary, flush that cacheline so we
486 * preserve the leading content. 572 * preserve the leading content.
487 */ 573 */
488 if (offset) { 574 if (offset) {
489 dcbf(addr, 1, 1); 575 dcbf(addr, 1, 1);
490 /* 576 /*
491 * If we are doing <= one cache line, stop now. 577 * If we are doing <= one cache line, stop now.
492 */ 578 */
493 if (seglen <= dcache_line_size) 579 if (seglen <= dcache_line_size)
494 break; 580 break;
495 /* 581 /*
496 * Advance one cache line since we've flushed 582 * Advance one cache line since we've flushed
497 * this one. 583 * this one.
498 */ 584 */
499 addr += dcache_line_size; 585 addr += dcache_line_size;
500 seglen -= dcache_line_size; 586 seglen -= dcache_line_size;
501 } 587 }
502 /* 588 /*
503 * If the byte after the region to be invalidated 589 * If the byte after the region to be invalidated
504 * doesn't fall on cacheline boundary, flush that 590 * doesn't fall on cacheline boundary, flush that
505 * cacheline so we preserve the trailing content. 591 * cacheline so we preserve the trailing content.
506 */ 592 */
507 if (seglen & (dcache_line_size-1)) { 593 if (seglen & (dcache_line_size-1)) {
508 dcbf(addr + seglen, 1, 1); 594 dcbf(addr + seglen, 1, 1);
509 if (seglen <= dcache_line_size) 595 if (seglen <= dcache_line_size)
510 break; 596 break;
511 /* 597 /*
512 * Truncate the length to a multiple of a 598 * Truncate the length to a multiple of a
513 * dcache line size. No reason to flush 599 * dcache line size. No reason to flush
514 * the last entry again. 600 * the last entry again.
515 */ 601 */
516 seglen &= ~(dcache_line_size - 1); 602 seglen &= ~(dcache_line_size - 1);
517 } 603 }
518 SYNC; /* is this needed? */ 604 SYNC; /* is this needed? */
519 EIEIO; /* is this needed? */ 605 EIEIO; /* is this needed? */
520 /* FALLTHROUGH */ 606 /* FALLTHROUGH */
521 case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE: 607 case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
522 case BUS_DMASYNC_POSTREAD: 608 case BUS_DMASYNC_POSTREAD:
523 /* 609 /*
524 * The contents will have changed, make sure to remove 610 * The contents will have changed, make sure to remove
525 * them from the cache. Note: some implementation 611 * them from the cache. Note: some implementation
526 * implement dcbi identically to dcbf. Thus if the 612 * implement dcbi identically to dcbf. Thus if the
527 * cacheline has data, it will be written to memory. 613 * cacheline has data, it will be written to memory.
528 * If the DMA is updating the same cacheline at the 614 * If the DMA is updating the same cacheline at the
529 * time, bad things can happen. 615 * time, bad things can happen.
530 */ 616 */
531 dcbi(addr, seglen, dcache_line_size); 617 dcbi(addr, seglen, dcache_line_size);
532 break; 618 break;
533 case BUS_DMASYNC_POSTWRITE: 619 case BUS_DMASYNC_POSTWRITE:
534 /* 620 /*
535 * Do nothing. 621 * Do nothing.
536 */ 622 */
537 break; 623 break;
538 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 624 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
539 /* 625 /*
540 * Force it to memory and remove from cache. 626 * Force it to memory and remove from cache.
541 */ 627 */
542 dcbf(addr, seglen, dcache_line_size); 628 dcbf(addr, seglen, dcache_line_size);
543 break; 629 break;
544 } 630 }
545 } 631 }
546 __asm volatile("sync"); 632 __asm volatile("sync");
547} 633}
548 634
549/* 635/*
550 * Common function for DMA-safe memory allocation. May be called 636 * Common function for DMA-safe memory allocation. May be called
551 * by bus-specific DMA memory allocation functions. 637 * by bus-specific DMA memory allocation functions.
552 */ 638 */
553int 639int
554_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) 640_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
555{ 641{
556 paddr_t start = 0xffffffff, end = 0; 642 paddr_t start = 0xffffffff, end = 0;
557 uvm_physseg_t bank; 643 uvm_physseg_t bank;
558 644
559 for (bank = uvm_physseg_get_first(); 645 for (bank = uvm_physseg_get_first();
560 uvm_physseg_valid_p(bank); 646 uvm_physseg_valid_p(bank);
561 bank = uvm_physseg_get_next(bank)) { 647 bank = uvm_physseg_get_next(bank)) {
562 if (start > ptoa(uvm_physseg_get_avail_start(bank))) 648 if (start > ptoa(uvm_physseg_get_avail_start(bank)))
563 start = ptoa(uvm_physseg_get_avail_start(bank)); 649 start = ptoa(uvm_physseg_get_avail_start(bank));
564 if (end < ptoa(uvm_physseg_get_avail_end(bank))) 650 if (end < ptoa(uvm_physseg_get_avail_end(bank)))
565 end = ptoa(uvm_physseg_get_avail_end(bank)); 651 end = ptoa(uvm_physseg_get_avail_end(bank));
566 } 652 }
567 653
568 return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, 654 return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs,
569 nsegs, rsegs, flags, start, end - PAGE_SIZE); 655 nsegs, rsegs, flags, start, end - PAGE_SIZE);
570} 656}
571 657
572/* 658/*
573 * Common function for freeing DMA-safe memory. May be called by 659 * Common function for freeing DMA-safe memory. May be called by
574 * bus-specific DMA memory free functions. 660 * bus-specific DMA memory free functions.
575 */ 661 */
576void 662void
577_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 663_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
578{ 664{
579 struct vm_page *m; 665 struct vm_page *m;
580 bus_addr_t addr; 666 bus_addr_t addr;
581 struct pglist mlist; 667 struct pglist mlist;
582 int curseg; 668 int curseg;
583 669
584 /* 670 /*
585 * Build a list of pages to free back to the VM system. 671 * Build a list of pages to free back to the VM system.
586 */ 672 */
587 TAILQ_INIT(&mlist); 673 TAILQ_INIT(&mlist);
588 for (curseg = 0; curseg < nsegs; curseg++) { 674 for (curseg = 0; curseg < nsegs; curseg++) {
589 for (addr = BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr); 675 for (addr = BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr);
590 addr < (BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr) 676 addr < (BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr)
591 + segs[curseg].ds_len); 677 + segs[curseg].ds_len);
592 addr += PAGE_SIZE) { 678 addr += PAGE_SIZE) {
593 m = PHYS_TO_VM_PAGE(addr); 679 m = PHYS_TO_VM_PAGE(addr);
594 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue); 680 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
595 } 681 }
596 } 682 }
597 683
598 uvm_pglistfree(&mlist); 684 uvm_pglistfree(&mlist);
599} 685}
600 686
601/* 687/*
602 * Common function for mapping DMA-safe memory. May be called by 688 * Common function for mapping DMA-safe memory. May be called by
603 * bus-specific DMA memory map functions. 689 * bus-specific DMA memory map functions.
604 */ 690 */
605int 691int
606_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags) 692_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags)
607{ 693{
608 vaddr_t va; 694 vaddr_t va;
609 bus_addr_t addr; 695 bus_addr_t addr;
610 int curseg; 696 int curseg;
611 const uvm_flag_t kmflags = 697 const uvm_flag_t kmflags =
612 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 698 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
613 699
614 size = round_page(size); 700 size = round_page(size);
615 701
616#ifdef PMAP_MAP_POOLPAGE 702#ifdef PMAP_MAP_POOLPAGE
617 /* 703 /*
618 * If we are mapping a cacheable physically contiguous segment, treat 704 * If we are mapping a cacheable physically contiguous segment, treat
619 * it as if we are mapping a poolpage and avoid consuming any KVAs. 705 * it as if we are mapping a poolpage and avoid consuming any KVAs.
620 */ 706 */
621 if (nsegs == 1 && (flags & BUS_DMA_DONTCACHE) == 0) { 707 if (nsegs == 1 && (flags & BUS_DMA_DONTCACHE) == 0) {
622 KASSERT(size == segs->ds_len); 708 KASSERT(size == segs->ds_len);
623 addr = BUS_MEM_TO_PHYS(t, segs->ds_addr); 709 addr = BUS_MEM_TO_PHYS(t, segs->ds_addr);
624 *kvap = (void *)PMAP_MAP_POOLPAGE(addr); 710 *kvap = (void *)PMAP_MAP_POOLPAGE(addr);
625 return 0; 711 return 0;
626 } 712 }
627#endif 713#endif
628 714
629 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 715 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
630 716
631 if (va == 0) 717 if (va == 0)
632 return (ENOMEM); 718 return (ENOMEM);
633 719
634 *kvap = (void *)va; 720 *kvap = (void *)va;
635 721
636 for (curseg = 0; curseg < nsegs; curseg++) { 722 for (curseg = 0; curseg < nsegs; curseg++) {
637 for (addr = BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr); 723 for (addr = BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr);
638 addr < (BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr) 724 addr < (BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr)
639 + segs[curseg].ds_len); 725 + segs[curseg].ds_len);
640 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { 726 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
641 if (size == 0) 727 if (size == 0)
642 panic("_bus_dmamem_map: size botch"); 728 panic("_bus_dmamem_map: size botch");
643 /* 729 /*
644 * If we are mapping nocache, flush the page from 730 * If we are mapping nocache, flush the page from
645 * cache before we map it. 731 * cache before we map it.
646 */ 732 */
647 if (flags & BUS_DMA_DONTCACHE) 733 if (flags & BUS_DMA_DONTCACHE)
648 dcbf(addr, PAGE_SIZE, 734 dcbf(addr, PAGE_SIZE,
649 curcpu()->ci_ci.dcache_line_size); 735 curcpu()->ci_ci.dcache_line_size);
650 pmap_kenter_pa(va, addr, 736 pmap_kenter_pa(va, addr,
651 VM_PROT_READ | VM_PROT_WRITE, 737 VM_PROT_READ | VM_PROT_WRITE,
652 PMAP_WIRED | 738 PMAP_WIRED |
653 ((flags & BUS_DMA_DONTCACHE) ? PMAP_NOCACHE : 0)); 739 ((flags & BUS_DMA_DONTCACHE) ? PMAP_NOCACHE : 0));
654 } 740 }
655 } 741 }
656 742
657 return (0); 743 return (0);
658} 744}
659 745
660/* 746/*
661 * Common function for unmapping DMA-safe memory. May be called by 747 * Common function for unmapping DMA-safe memory. May be called by
662 * bus-specific DMA memory unmapping functions. 748 * bus-specific DMA memory unmapping functions.
663 */ 749 */
664void 750void
665_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 751_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
666{ 752{
667 vaddr_t va = (vaddr_t) kva; 753 vaddr_t va = (vaddr_t) kva;
668 754
669#ifdef DIAGNOSTIC 755#ifdef DIAGNOSTIC
670 if (va & PGOFSET) 756 if (va & PGOFSET)
671 panic("_bus_dmamem_unmap"); 757 panic("_bus_dmamem_unmap");
672#endif 758#endif
673 759
674 if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) { 760 if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) {
675 size = round_page(size); 761 size = round_page(size);
676 pmap_kremove(va, size); 762 pmap_kremove(va, size);
677 uvm_km_free(kernel_map, va, size, UVM_KMF_VAONLY); 763 uvm_km_free(kernel_map, va, size, UVM_KMF_VAONLY);
678 } 764 }
679} 765}
680 766
681/* 767/*
682 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 768 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
683 * bus-specific DMA mmap(2)'ing functions. 769 * bus-specific DMA mmap(2)'ing functions.
684 */ 770 */
685paddr_t 771paddr_t
686_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags) 772_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags)
687{ 773{
688 int i; 774 int i;
689 775
690 for (i = 0; i < nsegs; i++) { 776 for (i = 0; i < nsegs; i++) {
691#ifdef DIAGNOSTIC 777#ifdef DIAGNOSTIC
692 if (off & PGOFSET) 778 if (off & PGOFSET)
693 panic("_bus_dmamem_mmap: offset unaligned"); 779 panic("_bus_dmamem_mmap: offset unaligned");
694 if (BUS_MEM_TO_PHYS(t, segs[i].ds_addr) & PGOFSET) 780 if (BUS_MEM_TO_PHYS(t, segs[i].ds_addr) & PGOFSET)
695 panic("_bus_dmamem_mmap: segment unaligned"); 781 panic("_bus_dmamem_mmap: segment unaligned");
696 if (segs[i].ds_len & PGOFSET) 782 if (segs[i].ds_len & PGOFSET)
697 panic("_bus_dmamem_mmap: segment size not multiple" 783 panic("_bus_dmamem_mmap: segment size not multiple"
698 " of page size"); 784 " of page size");
699#endif 785#endif
700 if (off >= segs[i].ds_len) { 786 if (off >= segs[i].ds_len) {
701 off -= segs[i].ds_len; 787 off -= segs[i].ds_len;
702 continue; 788 continue;
703 } 789 }
704 790
705 return (BUS_MEM_TO_PHYS(t, segs[i].ds_addr) + off); 791 return (BUS_MEM_TO_PHYS(t, segs[i].ds_addr) + off);
706 } 792 }
707 793
708 /* Page not found. */ 794 /* Page not found. */
709 return (-1); 795 return (-1);
710} 796}
711 797
712/* 798/*
713 * Allocate physical memory from the given physical address range. 799 * Allocate physical memory from the given physical address range.
714 * Called by DMA-safe memory allocation methods. 800 * Called by DMA-safe memory allocation methods.
715 */ 801 */
716int 802int
717_bus_dmamem_alloc_range( 803_bus_dmamem_alloc_range(
718 bus_dma_tag_t t, 804 bus_dma_tag_t t,
719 bus_size_t size, 805 bus_size_t size,
720 bus_size_t alignment, 806 bus_size_t alignment,
721 bus_size_t boundary, 807 bus_size_t boundary,
722 bus_dma_segment_t *segs, 808 bus_dma_segment_t *segs,
723 int nsegs, 809 int nsegs,
724 int *rsegs, 810 int *rsegs,
725 int flags, 811 int flags,
726 paddr_t low, 812 paddr_t low,
727 paddr_t high) 813 paddr_t high)
728{ 814{
729 paddr_t curaddr, lastaddr; 815 paddr_t curaddr, lastaddr;
730 struct vm_page *m; 816 struct vm_page *m;
731 struct pglist mlist; 817 struct pglist mlist;
732 int curseg, error; 818 int curseg, error;
733 819
734 /* Always round the size. */ 820 /* Always round the size. */
735 size = round_page(size); 821 size = round_page(size);
736 822
737 /* 823 /*
738 * Allocate pages from the VM system. 824 * Allocate pages from the VM system.
739 */ 825 */
740 error = uvm_pglistalloc(size, low, high, alignment, boundary, 826 error = uvm_pglistalloc(size, low, high, alignment, boundary,
741 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 827 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
742 if (error) 828 if (error)
743 return (error); 829 return (error);
744 830
745 /* 831 /*
746 * Compute the location, size, and number of segments actually 832 * Compute the location, size, and number of segments actually
747 * returned by the VM code. 833 * returned by the VM code.
748 */ 834 */
749 m = mlist.tqh_first; 835 m = mlist.tqh_first;
750 curseg = 0; 836 curseg = 0;
751 lastaddr = VM_PAGE_TO_PHYS(m); 837 lastaddr = VM_PAGE_TO_PHYS(m);
752 segs[curseg].ds_addr = PHYS_TO_BUS_MEM(t, lastaddr); 838 segs[curseg].ds_addr = PHYS_TO_BUS_MEM(t, lastaddr);
753 segs[curseg].ds_len = PAGE_SIZE; 839 segs[curseg].ds_len = PAGE_SIZE;
754 m = m->pageq.queue.tqe_next; 840 m = m->pageq.queue.tqe_next;
755 841
756 for (; m != NULL; m = m->pageq.queue.tqe_next) { 842 for (; m != NULL; m = m->pageq.queue.tqe_next) {
757 curaddr = VM_PAGE_TO_PHYS(m); 843 curaddr = VM_PAGE_TO_PHYS(m);
758#ifdef DIAGNOSTIC 844#ifdef DIAGNOSTIC
759 if (curaddr < low || curaddr >= high) { 845 if (curaddr < low || curaddr >= high) {
760 printf("vm_page_alloc_memory returned non-sensical" 846 printf("vm_page_alloc_memory returned non-sensical"
761 " address 0x%lx\n", curaddr); 847 " address 0x%lx\n", curaddr);
762 panic("_bus_dmamem_alloc_range"); 848 panic("_bus_dmamem_alloc_range");
763 } 849 }
764#endif 850#endif
765 if (curaddr == (lastaddr + PAGE_SIZE)) 851 if (curaddr == (lastaddr + PAGE_SIZE))
766 segs[curseg].ds_len += PAGE_SIZE; 852 segs[curseg].ds_len += PAGE_SIZE;
767 else { 853 else {
768 curseg++; 854 curseg++;
769 segs[curseg].ds_addr = PHYS_TO_BUS_MEM(t, curaddr); 855 segs[curseg].ds_addr = PHYS_TO_BUS_MEM(t, curaddr);
770 segs[curseg].ds_len = PAGE_SIZE; 856 segs[curseg].ds_len = PAGE_SIZE;
771 } 857 }
772 lastaddr = curaddr; 858 lastaddr = curaddr;
773 } 859 }
774 860
775 *rsegs = curseg + 1; 861 *rsegs = curseg + 1;
776 862
777 return (0); 863 return (0);
778} 864}
779 865
780/* 866/*
781 * Generic form of PHYS_TO_BUS_MEM(). 867 * Generic form of PHYS_TO_BUS_MEM().
782 */ 868 */
783bus_addr_t 869bus_addr_t
784_bus_dma_phys_to_bus_mem_generic(bus_dma_tag_t t, bus_addr_t addr) 870_bus_dma_phys_to_bus_mem_generic(bus_dma_tag_t t, bus_addr_t addr)
785{ 871{
786 872
787 return (addr); 873 return (addr);
788} 874}
789 875
790/* 876/*
791 * Generic form of BUS_MEM_TO_PHYS(). 877 * Generic form of BUS_MEM_TO_PHYS().
792 */ 878 */
793bus_addr_t 879bus_addr_t
794_bus_dma_bus_mem_to_phys_generic(bus_dma_tag_t t, bus_addr_t addr) 880_bus_dma_bus_mem_to_phys_generic(bus_dma_tag_t t, bus_addr_t addr)
795{ 881{
796 882
797 return (addr); 883 return (addr);
798} 884}