Wed Feb 16 23:30:52 2022 UTC ()
powerpc: Implement bus_dmamap_load_raw.

Can probably delete some of the round-trips between bus addresses and
physical addresses -- did these only to copy the logic already in
_bus_dmamap_load_buffer.


(riastradh)
diff -r1.52 -r1.53 src/sys/arch/powerpc/powerpc/bus_dma.c

cvs diff -r1.52 -r1.53 src/sys/arch/powerpc/powerpc/bus_dma.c (expand / switch to unified diff)

--- src/sys/arch/powerpc/powerpc/bus_dma.c 2020/07/06 10:31:24 1.52
+++ src/sys/arch/powerpc/powerpc/bus_dma.c 2022/02/16 23:30:52 1.53
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: bus_dma.c,v 1.52 2020/07/06 10:31:24 rin Exp $ */ 1/* $NetBSD: bus_dma.c,v 1.53 2022/02/16 23:30:52 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -23,27 +23,27 @@ @@ -23,27 +23,27 @@
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#define _POWERPC_BUS_DMA_PRIVATE 33#define _POWERPC_BUS_DMA_PRIVATE
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.52 2020/07/06 10:31:24 rin Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.53 2022/02/16 23:30:52 riastradh Exp $");
37 37
38#ifdef _KERNEL_OPT 38#ifdef _KERNEL_OPT
39#include "opt_ppcarch.h" 39#include "opt_ppcarch.h"
40#endif 40#endif
41 41
42#include <sys/param.h> 42#include <sys/param.h>
43#include <sys/systm.h> 43#include <sys/systm.h>
44#include <sys/kernel.h> 44#include <sys/kernel.h>
45#include <sys/device.h> 45#include <sys/device.h>
46#include <sys/kmem.h> 46#include <sys/kmem.h>
47#include <sys/proc.h> 47#include <sys/proc.h>
48#include <sys/mbuf.h> 48#include <sys/mbuf.h>
49#include <sys/bus.h> 49#include <sys/bus.h>
@@ -391,32 +391,118 @@ _bus_dmamap_load_uio(bus_dma_tag_t t, bu @@ -391,32 +391,118 @@ _bus_dmamap_load_uio(bus_dma_tag_t t, bu
391 391
392 resid -= minlen; 392 resid -= minlen;
393 } 393 }
394 if (error == 0) { 394 if (error == 0) {
395 map->dm_mapsize = uio->uio_resid; 395 map->dm_mapsize = uio->uio_resid;
396 map->dm_nsegs = seg + 1; 396 map->dm_nsegs = seg + 1;
397 } 397 }
398 return (error); 398 return (error);
399} 399}
400 400
401/* 401/*
402 * Like _bus_dmamap_load(), but for raw memory allocated with 402 * Like _bus_dmamap_load(), but for raw memory allocated with
403 * bus_dmamem_alloc(). 403 * bus_dmamem_alloc().
 404 *
 405 * XXX This is too much copypasta of _bus_dmamap_load_buffer.
404 */ 406 */
405int 407int
406_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 408_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
 409 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
407{ 410{
 411 bus_size_t sgsize, isgsize;
 412 bus_size_t busaddr, curaddr, lastaddr, baddr, bmask;
 413 int seg, iseg, first;
 414
 415 if (size == 0)
 416 return 0;
 417
 418 lastaddr = 0;
 419 bmask = ~(map->_dm_boundary - 1);
 420
 421 first = 0;
 422 iseg = 0;
 423 busaddr = segs[iseg].ds_addr;
 424 isgsize = segs[iseg].ds_len;
 425 for (seg = 0; size > 0;) {
 426 /*
 427 * Get the physical address for this segment.
 428 */
 429 curaddr = BUS_MEM_TO_PHYS(t, busaddr);
 430
 431 /*
 432 * If we're beyond the bounce threshold, notify
 433 * the caller.
 434 */
 435 if (map->_dm_bounce_thresh != 0 &&
 436 curaddr >= map->_dm_bounce_thresh)
 437 return EINVAL;
 438
 439 /*
 440 * Compute the segment size, and adjust counts.
 441 */
 442 sgsize = PAGE_SIZE - ((u_long)curaddr & PGOFSET);
 443 sgsize = MIN(sgsize, isgsize);
 444 sgsize = MIN(sgsize, size);
 445 sgsize = MIN(sgsize, map->dm_maxsegsz);
 446
 447 /*
 448 * Make sure we don't cross any boundaries.
 449 */
 450 if (map->_dm_boundary > 0) {
 451 baddr = (curaddr + map->_dm_boundary) & bmask;
 452 if (sgsize > (baddr - curaddr))
 453 sgsize = (baddr - curaddr);
 454 }
 455
 456 /*
 457 * Insert chunk into a segment, coalescing with
 458 * the previous segment if possible.
 459 */
 460 if (first) {
 461 map->dm_segs[seg].ds_addr =
 462 PHYS_TO_BUS_MEM(t, curaddr);
 463 map->dm_segs[seg].ds_len = sgsize;
 464 first = 0;
 465 } else {
 466 if (curaddr == lastaddr &&
 467 (map->dm_segs[seg].ds_len + sgsize) <=
 468 map->dm_maxsegsz &&
 469 (map->_dm_boundary == 0 ||
 470 (map->dm_segs[seg].ds_addr & bmask) ==
 471 (PHYS_TO_BUS_MEM(t, curaddr) & bmask)))
 472 map->dm_segs[seg].ds_len += sgsize;
 473 else {
 474 if (++seg >= map->_dm_segcnt)
 475 break;
 476 map->dm_segs[seg].ds_addr =
 477 PHYS_TO_BUS_MEM(t, curaddr);
 478 map->dm_segs[seg].ds_len = sgsize;
 479 }
 480 }
 481
 482 lastaddr = curaddr + sgsize;
 483 size -= sgsize;
 484 if ((isgsize -= sgsize) == 0) {
 485 iseg++;
 486 KASSERT(iseg < nsegs);
 487 busaddr = segs[iseg].ds_addr;
 488 isgsize = segs[iseg].ds_len;
 489 }
 490 }
 491
 492 if (size > 0)
 493 return EFBIG;
408 494
409 panic("_bus_dmamap_load_raw: not implemented"); 495 return 0;
410} 496}
411 497
412/* 498/*
413 * Common function for unloading a DMA map. May be called by 499 * Common function for unloading a DMA map. May be called by
414 * chipset-specific DMA map unload functions. 500 * chipset-specific DMA map unload functions.
415 */ 501 */
416void 502void
417_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 503_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
418{ 504{
419 505
420 /* 506 /*
421 * No resources to free; just mark the mappings as 507 * No resources to free; just mark the mappings as
422 * invalid. 508 * invalid.