Sun Jul 18 05:12:27 2021 UTC ()
Allow for the SGMAP implementation to specify a minimum alignment for
SGMAP DMA segments.  If not specified, PAGE_SIZE will be used, as before.


(thorpej)
diff -r1.28 -r1.29 src/sys/arch/alpha/common/sgmap_common.c
diff -r1.42 -r1.43 src/sys/arch/alpha/common/sgmap_typedep.c
diff -r1.5 -r1.6 src/sys/arch/alpha/include/bus_defs.h
diff -r1.14 -r1.15 src/sys/arch/alpha/tc/tc_dma.c
diff -r1.23 -r1.24 src/sys/arch/alpha/tc/tc_dma_3000_500.c

cvs diff -r1.28 -r1.29 src/sys/arch/alpha/common/sgmap_common.c (switch to unified diff)

--- src/sys/arch/alpha/common/sgmap_common.c 2021/07/04 22:42:35 1.28
+++ src/sys/arch/alpha/common/sgmap_common.c 2021/07/18 05:12:27 1.29
@@ -1,173 +1,181 @@ @@ -1,173 +1,181 @@
1/* $NetBSD: sgmap_common.c,v 1.28 2021/07/04 22:42:35 thorpej Exp $ */ 1/* $NetBSD: sgmap_common.c,v 1.29 2021/07/18 05:12:27 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 33#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
34 34
35__KERNEL_RCSID(0, "$NetBSD: sgmap_common.c,v 1.28 2021/07/04 22:42:35 thorpej Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: sgmap_common.c,v 1.29 2021/07/18 05:12:27 thorpej Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/proc.h> 40#include <sys/proc.h>
41 41
42#include <uvm/uvm_extern.h> 42#include <uvm/uvm_extern.h>
43 43
44#define _ALPHA_BUS_DMA_PRIVATE 44#define _ALPHA_BUS_DMA_PRIVATE
45#include <sys/bus.h> 45#include <sys/bus.h>
46 46
47#include <alpha/common/sgmapvar.h> 47#include <alpha/common/sgmapvar.h>
48 48
49/* 49/*
50 * Some systems will prefetch the next page during a memory -> device DMA. 50 * Some systems will prefetch the next page during a memory -> device DMA.
51 * This can cause machine checks if there is not a spill page after the 51 * This can cause machine checks if there is not a spill page after the
52 * last page of the DMA (thus avoiding hitting an invalid SGMAP PTE). 52 * last page of the DMA (thus avoiding hitting an invalid SGMAP PTE).
53 */ 53 */
54vaddr_t alpha_sgmap_prefetch_spill_page_va; 54vaddr_t alpha_sgmap_prefetch_spill_page_va;
55bus_addr_t alpha_sgmap_prefetch_spill_page_pa; 55bus_addr_t alpha_sgmap_prefetch_spill_page_pa;
56 56
57void 57void
58alpha_sgmap_init(bus_dma_tag_t t, struct alpha_sgmap *sgmap, const char *name, 58alpha_sgmap_init(bus_dma_tag_t t, struct alpha_sgmap *sgmap, const char *name,
59 bus_addr_t wbase, bus_addr_t sgvabase, bus_size_t sgvasize, size_t ptesize, 59 bus_addr_t wbase, bus_addr_t sgvabase, bus_size_t sgvasize, size_t ptesize,
60 void *ptva, bus_size_t minptalign) 60 void *ptva, bus_size_t minptalign)
61{ 61{
62 bus_dma_segment_t seg; 62 bus_dma_segment_t seg;
63 size_t ptsize; 63 size_t ptsize;
64 int rseg; 64 int rseg;
65 65
66 if (sgvasize & PGOFSET) { 66 if (sgvasize & PGOFSET) {
67 printf("size botch for sgmap `%s'\n", name); 67 printf("size botch for sgmap `%s'\n", name);
68 goto die; 68 goto die;
69 } 69 }
70 70
 71 /*
 72 * If we don't yet have a minimum SGVA alignment, default
 73 * to the system page size.
 74 */
 75 if (t->_sgmap_minalign < PAGE_SIZE) {
 76 t->_sgmap_minalign = PAGE_SIZE;
 77 }
 78
71 sgmap->aps_wbase = wbase; 79 sgmap->aps_wbase = wbase;
72 sgmap->aps_sgvabase = sgvabase; 80 sgmap->aps_sgvabase = sgvabase;
73 sgmap->aps_sgvasize = sgvasize; 81 sgmap->aps_sgvasize = sgvasize;
74 82
75 if (ptva != NULL) { 83 if (ptva != NULL) {
76 /* 84 /*
77 * We already have a page table; this may be a system 85 * We already have a page table; this may be a system
78 * where the page table resides in bridge-resident SRAM. 86 * where the page table resides in bridge-resident SRAM.
79 */ 87 */
80 sgmap->aps_pt = ptva; 88 sgmap->aps_pt = ptva;
81 sgmap->aps_ptpa = 0; 89 sgmap->aps_ptpa = 0;
82 } else { 90 } else {
83 /* 91 /*
84 * Compute the page table size and allocate it. At minimum, 92 * Compute the page table size and allocate it. At minimum,
85 * this must be aligned to the page table size. However, 93 * this must be aligned to the page table size. However,
86 * some platforms have more strict alignment reqirements. 94 * some platforms have more strict alignment reqirements.
87 */ 95 */
88 ptsize = (sgvasize / PAGE_SIZE) * ptesize; 96 ptsize = (sgvasize / PAGE_SIZE) * ptesize;
89 if (minptalign != 0) { 97 if (minptalign != 0) {
90 if (minptalign < ptsize) 98 if (minptalign < ptsize)
91 minptalign = ptsize; 99 minptalign = ptsize;
92 } else 100 } else
93 minptalign = ptsize; 101 minptalign = ptsize;
94 if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg, 102 if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
95 BUS_DMA_NOWAIT)) { 103 BUS_DMA_NOWAIT)) {
96 panic("unable to allocate page table for sgmap `%s'", 104 panic("unable to allocate page table for sgmap `%s'",
97 name); 105 name);
98 goto die; 106 goto die;
99 } 107 }
100 sgmap->aps_ptpa = seg.ds_addr; 108 sgmap->aps_ptpa = seg.ds_addr;
101 sgmap->aps_pt = (void *)ALPHA_PHYS_TO_K0SEG(sgmap->aps_ptpa); 109 sgmap->aps_pt = (void *)ALPHA_PHYS_TO_K0SEG(sgmap->aps_ptpa);
102 } 110 }
103 111
104 /* 112 /*
105 * Create the arena used to manage the virtual address 113 * Create the arena used to manage the virtual address
106 * space. 114 * space.
107 * 115 *
108 * XXX Consider using a quantum cache up to MAXPHYS+PAGE_SIZE 116 * XXX Consider using a quantum cache up to MAXPHYS+PAGE_SIZE
109 * XXX (extra page to handle the spill page). For now, we don't, 117 * XXX (extra page to handle the spill page). For now, we don't,
110 * XXX because we are using constrained allocations everywhere. 118 * XXX because we are using constrained allocations everywhere.
111 */ 119 */
112 sgmap->aps_arena = vmem_create(name, sgvabase, sgvasize, 120 sgmap->aps_arena = vmem_create(name, sgvabase, sgvasize,
113 PAGE_SIZE, /* quantum */ 121 PAGE_SIZE, /* quantum */
114 NULL, /* importfn */ 122 NULL, /* importfn */
115 NULL, /* releasefn */ 123 NULL, /* releasefn */
116 NULL, /* source */ 124 NULL, /* source */
117 0, /* qcache_max */ 125 0, /* qcache_max */
118 VM_SLEEP, 126 VM_SLEEP,
119 IPL_VM); 127 IPL_VM);
120 KASSERT(sgmap->aps_arena != NULL); 128 KASSERT(sgmap->aps_arena != NULL);
121 129
122 /* 130 /*
123 * Allocate a spill page if that hasn't already been done. 131 * Allocate a spill page if that hasn't already been done.
124 */ 132 */
125 if (alpha_sgmap_prefetch_spill_page_va == 0) { 133 if (alpha_sgmap_prefetch_spill_page_va == 0) {
126 if (bus_dmamem_alloc(t, PAGE_SIZE, 0, 0, &seg, 1, &rseg, 134 if (bus_dmamem_alloc(t, PAGE_SIZE, 0, 0, &seg, 1, &rseg,
127 BUS_DMA_NOWAIT)) { 135 BUS_DMA_NOWAIT)) {
128 printf("unable to allocate spill page for sgmap `%s'\n", 136 printf("unable to allocate spill page for sgmap `%s'\n",
129 name); 137 name);
130 goto die; 138 goto die;
131 } 139 }
132 alpha_sgmap_prefetch_spill_page_pa = seg.ds_addr; 140 alpha_sgmap_prefetch_spill_page_pa = seg.ds_addr;
133 alpha_sgmap_prefetch_spill_page_va = 141 alpha_sgmap_prefetch_spill_page_va =
134 ALPHA_PHYS_TO_K0SEG(alpha_sgmap_prefetch_spill_page_pa); 142 ALPHA_PHYS_TO_K0SEG(alpha_sgmap_prefetch_spill_page_pa);
135 memset((void *)alpha_sgmap_prefetch_spill_page_va, 0, 143 memset((void *)alpha_sgmap_prefetch_spill_page_va, 0,
136 PAGE_SIZE); 144 PAGE_SIZE);
137 } 145 }
138  146
139 return; 147 return;
140 die: 148 die:
141 panic("alpha_sgmap_init"); 149 panic("alpha_sgmap_init");
142} 150}
143 151
144int 152int
145alpha_sgmap_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 153alpha_sgmap_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
146 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 154 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
147{ 155{
148 bus_dmamap_t map; 156 bus_dmamap_t map;
149 int error; 157 int error;
150 158
151 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, 159 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
152 boundary, flags, &map); 160 boundary, flags, &map);
153 if (error) 161 if (error)
154 return (error); 162 return (error);
155 163
156 /* XXX BUS_DMA_ALLOCNOW */ 164 /* XXX BUS_DMA_ALLOCNOW */
157 165
158 if (error == 0) 166 if (error == 0)
159 *dmamp = map; 167 *dmamp = map;
160 else 168 else
161 alpha_sgmap_dmamap_destroy(t, map); 169 alpha_sgmap_dmamap_destroy(t, map);
162 170
163 return (error); 171 return (error);
164} 172}
165 173
166void 174void
167alpha_sgmap_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 175alpha_sgmap_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
168{ 176{
169 177
170 KASSERT(map->dm_mapsize == 0); 178 KASSERT(map->dm_mapsize == 0);
171 179
172 _bus_dmamap_destroy(t, map); 180 _bus_dmamap_destroy(t, map);
173} 181}

cvs diff -r1.42 -r1.43 src/sys/arch/alpha/common/sgmap_typedep.c (switch to unified diff)

--- src/sys/arch/alpha/common/sgmap_typedep.c 2021/06/24 16:41:16 1.42
+++ src/sys/arch/alpha/common/sgmap_typedep.c 2021/07/18 05:12:27 1.43
@@ -1,599 +1,603 @@ @@ -1,599 +1,603 @@
1/* $NetBSD: sgmap_typedep.c,v 1.42 2021/06/24 16:41:16 thorpej Exp $ */ 1/* $NetBSD: sgmap_typedep.c,v 1.43 2021/07/18 05:12:27 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(1, "$NetBSD: sgmap_typedep.c,v 1.42 2021/06/24 16:41:16 thorpej Exp $"); 34__KERNEL_RCSID(1, "$NetBSD: sgmap_typedep.c,v 1.43 2021/07/18 05:12:27 thorpej Exp $");
35 35
36#include "opt_ddb.h" 36#include "opt_ddb.h"
37 37
38#include <sys/evcnt.h> 38#include <sys/evcnt.h>
39#include <uvm/uvm_extern.h> 39#include <uvm/uvm_extern.h>
40 40
41#define DMA_COUNT_DECL(cnt) _DMA_COUNT_DECL(dma_sgmap, cnt) 41#define DMA_COUNT_DECL(cnt) _DMA_COUNT_DECL(dma_sgmap, cnt)
42#define DMA_COUNT(cnt) _DMA_COUNT(dma_sgmap, cnt) 42#define DMA_COUNT(cnt) _DMA_COUNT(dma_sgmap, cnt)
43 43
44#ifdef SGMAP_DEBUG 44#ifdef SGMAP_DEBUG
45int __C(SGMAP_TYPE,_debug) = 0; 45int __C(SGMAP_TYPE,_debug) = 0;
46#endif 46#endif
47 47
48SGMAP_PTE_TYPE __C(SGMAP_TYPE,_prefetch_spill_page_pte); 48SGMAP_PTE_TYPE __C(SGMAP_TYPE,_prefetch_spill_page_pte);
49 49
50static void __C(SGMAP_TYPE,_do_unload)(bus_dma_tag_t, bus_dmamap_t, 50static void __C(SGMAP_TYPE,_do_unload)(bus_dma_tag_t, bus_dmamap_t,
51 struct alpha_sgmap *); 51 struct alpha_sgmap *);
52 52
53void 53void
54__C(SGMAP_TYPE,_init_spill_page_pte)(void) 54__C(SGMAP_TYPE,_init_spill_page_pte)(void)
55{ 55{
56 56
57 __C(SGMAP_TYPE,_prefetch_spill_page_pte) = 57 __C(SGMAP_TYPE,_prefetch_spill_page_pte) =
58 (alpha_sgmap_prefetch_spill_page_pa >> 58 (alpha_sgmap_prefetch_spill_page_pa >>
59 SGPTE_PGADDR_SHIFT) | SGPTE_VALID; 59 SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
60} 60}
61 61
62DMA_COUNT_DECL(spill_page); 62DMA_COUNT_DECL(spill_page);
63DMA_COUNT_DECL(extra_segment); 63DMA_COUNT_DECL(extra_segment);
64DMA_COUNT_DECL(extra_segment_and_spill); 64DMA_COUNT_DECL(extra_segment_and_spill);
65 65
66static int 66static int
67__C(SGMAP_TYPE,_load_buffer)(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 67__C(SGMAP_TYPE,_load_buffer)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
68 size_t buflen, struct vmspace *vm, int flags, int * const segp, 68 size_t buflen, struct vmspace *vm, int flags, int * const segp,
69 struct alpha_sgmap *sgmap) 69 struct alpha_sgmap *sgmap)
70{ 70{
71 vaddr_t endva, va = (vaddr_t)buf; 71 vaddr_t endva, va = (vaddr_t)buf;
72 paddr_t pa; 72 paddr_t pa;
73 bus_addr_t dmaoffset, sgva, extra_sgva; 73 bus_addr_t dmaoffset, sgva, extra_sgva;
74 bus_size_t sgvalen, extra_sgvalen, boundary, alignment; 74 bus_size_t sgvalen, extra_sgvalen, boundary, alignment;
75 SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt; 75 SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
76 int pteidx, error, spill, seg = *segp; 76 int pteidx, error, spill, seg = *segp;
77 77
78 /* Initialize the spill page PTE if it hasn't been already. */ 78 /* Initialize the spill page PTE if it hasn't been already. */
79 if (__C(SGMAP_TYPE,_prefetch_spill_page_pte) == 0) 79 if (__C(SGMAP_TYPE,_prefetch_spill_page_pte) == 0)
80 __C(SGMAP_TYPE,_init_spill_page_pte)(); 80 __C(SGMAP_TYPE,_init_spill_page_pte)();
81 81
82 if (seg == map->_dm_segcnt) { 82 if (seg == map->_dm_segcnt) {
83 /* Ran of segments. */ 83 /* Ran of segments. */
84 return EFBIG; 84 return EFBIG;
85 } 85 }
86 KASSERT(seg < map->_dm_segcnt); 86 KASSERT(seg < map->_dm_segcnt);
87 87
88 /* 88 /*
89 * Remember the offset into the first page and the total 89 * Remember the offset into the first page and the total
90 * transfer length. 90 * transfer length.
91 */ 91 */
92 dmaoffset = ((u_long)buf) & PGOFSET; 92 dmaoffset = ((u_long)buf) & PGOFSET;
93 93
94#ifdef SGMAP_DEBUG 94#ifdef SGMAP_DEBUG
95 if (__C(SGMAP_TYPE,_debug)) { 95 if (__C(SGMAP_TYPE,_debug)) {
96 printf("sgmap_load: ----- buf = %p -----\n", buf); 96 printf("sgmap_load: ----- buf = %p -----\n", buf);
97 printf("sgmap_load: dmaoffset = 0x%lx, buflen = 0x%lx\n", 97 printf("sgmap_load: dmaoffset = 0x%lx, buflen = 0x%lx\n",
98 dmaoffset, buflen); 98 dmaoffset, buflen);
99 } 99 }
100#endif 100#endif
101 101
102 /* 102 /*
103 * Allocate the necessary virtual address space for the 103 * Allocate the necessary virtual address space for the
104 * mapping. Round the size, since we deal with whole pages. 104 * mapping. Round the size, since we deal with whole pages.
105 */ 105 */
106 106
107 /* 107 /*
108 * XXX Always allocate a spill page for now. Note 108 * XXX Always allocate a spill page for now. Note
109 * the spill page is not needed for an in-bound-only 109 * the spill page is not needed for an in-bound-only
110 * transfer. 110 * transfer.
111 */ 111 */
112 if ((flags & BUS_DMA_READ) == 0) 112 if ((flags & BUS_DMA_READ) == 0)
113 spill = 1; 113 spill = 1;
114 else 114 else
115 spill = 0; 115 spill = 0;
116 116
117 boundary = map->_dm_boundary; 117 boundary = map->_dm_boundary;
118 118
119 /* 119 /*
120 * Caller's mistake if the requested length is larger than 120 * Caller's mistake if the requested length is larger than
121 * their own boundary constraint. 121 * their own boundary constraint.
122 */ 122 */
123 if (__predict_false(boundary != 0 && buflen > boundary)) { 123 if (__predict_false(boundary != 0 && buflen > boundary)) {
124 return EINVAL; 124 return EINVAL;
125 } 125 }
126 126
127 endva = round_page(va + buflen); 127 endva = round_page(va + buflen);
128 va = trunc_page(va); 128 va = trunc_page(va);
129 129
130 const vm_flag_t vmflags = VM_INSTANTFIT | 130 const vm_flag_t vmflags = VM_INSTANTFIT |
131 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 131 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
132 132
133 alignment = PAGE_SIZE; 133 KASSERT(t->_sgmap_minalign != 0);
 134 alignment = t->_sgmap_minalign;
134 sgvalen = (endva - va); 135 sgvalen = (endva - va);
135 136
136 SGMAP_PTE_TYPE spill_pte_v = __C(SGMAP_TYPE,_prefetch_spill_page_pte); 137 SGMAP_PTE_TYPE spill_pte_v = __C(SGMAP_TYPE,_prefetch_spill_page_pte);
137 138
138 /* 139 /*
139 * If we have a boundary constraint, it's possible to end up in 140 * If we have a boundary constraint, it's possible to end up in
140 * a situation where sgvalen > boundary if the caller's buffer 141 * a situation where sgvalen > boundary if the caller's buffer
141 * is not page aligned. In this case, we will have to allocate 142 * is not page aligned. In this case, we will have to allocate
142 * an extra SG segment and split the buffer. 143 * an extra SG segment and split the buffer.
143 */ 144 */
144 if (__predict_false(boundary != 0 && boundary < sgvalen)) { 145 if (__predict_false(boundary != 0 && boundary < sgvalen)) {
145#ifdef SGMAP_DEBUG 146#ifdef SGMAP_DEBUG
146 if (__C(SGMAP_TYPE,_debug)) { 147 if (__C(SGMAP_TYPE,_debug)) {
147 printf("sgmap_load: extra segment needed\n"); 148 printf("sgmap_load: extra segment needed\n");
148 } 149 }
149#endif 150#endif
150 DMA_COUNT(extra_segment); 151 DMA_COUNT(extra_segment);
151 152
152 /* This should only ever happen for unaligned buffers. */ 153 /* This should only ever happen for unaligned buffers. */
153 KASSERT(dmaoffset != 0); 154 KASSERT(dmaoffset != 0);
154 155
155 extra_sgvalen = sgvalen - boundary; 156 extra_sgvalen = sgvalen - boundary;
156 KASSERT(extra_sgvalen == PAGE_SIZE); 157 KASSERT(extra_sgvalen == PAGE_SIZE);
157 158
158 /* 159 /*
159 * Adjust the lengths of the first segment. The length 160 * Adjust the lengths of the first segment. The length
160 * of the second segment will be dmaoffset. 161 * of the second segment will be dmaoffset.
161 */ 162 */
162 sgvalen -= extra_sgvalen; 163 sgvalen -= extra_sgvalen;
163 endva -= extra_sgvalen; 164 endva -= extra_sgvalen;
164 buflen -= dmaoffset; 165 buflen -= dmaoffset;
165 166
166 if (spill) { 167 if (spill) {
167 DMA_COUNT(extra_segment_and_spill); 168 DMA_COUNT(extra_segment_and_spill);
168 extra_sgvalen += PAGE_SIZE; 169 extra_sgvalen += PAGE_SIZE;
169 } 170 }
170 171
171 error = vmem_xalloc(sgmap->aps_arena, extra_sgvalen, 172 error = vmem_xalloc(sgmap->aps_arena, extra_sgvalen,
172 alignment, /* alignment */ 173 alignment, /* alignment */
173 0, /* phase */ 174 0, /* phase */
174 boundary, /* nocross */ 175 boundary, /* nocross */
175 VMEM_ADDR_MIN, /* minaddr */ 176 VMEM_ADDR_MIN, /* minaddr */
176 VMEM_ADDR_MAX, /* maxaddr */ 177 VMEM_ADDR_MAX, /* maxaddr */
177 vmflags, 178 vmflags,
178 &extra_sgva); 179 &extra_sgva);
179 if (error) { 180 if (error) {
180 return error; 181 return error;
181 } 182 }
182 } else { 183 } else {
183 extra_sgvalen = 0; 184 extra_sgvalen = 0;
184 extra_sgva = 0; 185 extra_sgva = 0;
185 } 186 }
186 187
187 188
188 if (spill) { 189 if (spill) {
189 DMA_COUNT(spill_page); 190 DMA_COUNT(spill_page);
190 sgvalen += PAGE_SIZE; 191 sgvalen += PAGE_SIZE;
191 192
192 /* 193 /*
193 * ARGH! If the addition of the spill page bumped us 194 * ARGH! If the addition of the spill page bumped us
194 * over our boundary, we have to 2x the boundary limit. 195 * over our boundary, we have to 2x the boundary limit.
195 * To compensate (and enforce the original boundary 196 * To compensate (and enforce the original boundary
196 * constraint), we force our alignment to be the previous 197 * constraint), we force our alignment to be at least the
197 * boundary, thus ensuring that the only boundary violation 198 * previous boundary, thus ensuring that the only boundary
198 * is the pre-fetch that the SGMAP controller performs that 199 * violation is the pre-fetch that the SGMAP controller
199 * necessitates the spill page in the first place. 200 * performs that necessitates the spill page in the first
 201 * place.
200 */ 202 */
201 if (boundary && boundary < sgvalen) { 203 if (boundary && boundary < sgvalen) {
202 alignment = boundary; 204 if (alignment < boundary) {
 205 alignment = boundary;
 206 }
203 do { 207 do {
204 boundary <<= 1; 208 boundary <<= 1;
205 } while (boundary < sgvalen); 209 } while (boundary < sgvalen);
206 } 210 }
207 } 211 }
208 212
209#ifdef SGMAP_DEBUG 213#ifdef SGMAP_DEBUG
210 if (__C(SGMAP_TYPE,_debug)) { 214 if (__C(SGMAP_TYPE,_debug)) {
211 printf("sgmap_load: va:endva = 0x%lx:0x%lx\n", va, endva); 215 printf("sgmap_load: va:endva = 0x%lx:0x%lx\n", va, endva);
212 printf("sgmap_load: sgvalen = 0x%lx, boundary = 0x%lx\n", 216 printf("sgmap_load: sgvalen = 0x%lx, boundary = 0x%lx\n",
213 sgvalen, boundary); 217 sgvalen, boundary);
214 } 218 }
215#endif 219#endif
216 220
217 error = vmem_xalloc(sgmap->aps_arena, sgvalen, 221 error = vmem_xalloc(sgmap->aps_arena, sgvalen,
218 alignment, /* alignment */ 222 alignment, /* alignment */
219 0, /* phase */ 223 0, /* phase */
220 boundary, /* nocross */ 224 boundary, /* nocross */
221 VMEM_ADDR_MIN, /* minaddr */ 225 VMEM_ADDR_MIN, /* minaddr */
222 VMEM_ADDR_MAX, /* maxaddr */ 226 VMEM_ADDR_MAX, /* maxaddr */
223 vmflags, 227 vmflags,
224 &sgva); 228 &sgva);
225 if (error) { 229 if (error) {
226 if (extra_sgvalen != 0) { 230 if (extra_sgvalen != 0) {
227 vmem_xfree(sgmap->aps_arena, extra_sgva, extra_sgvalen); 231 vmem_xfree(sgmap->aps_arena, extra_sgva, extra_sgvalen);
228 } 232 }
229 return error; 233 return error;
230 } 234 }
231 235
232 pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT; 236 pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
233 pte = &page_table[pteidx * SGMAP_PTE_SPACING]; 237 pte = &page_table[pteidx * SGMAP_PTE_SPACING];
234 238
235#ifdef SGMAP_DEBUG 239#ifdef SGMAP_DEBUG
236 if (__C(SGMAP_TYPE,_debug)) 240 if (__C(SGMAP_TYPE,_debug))
237 printf("sgmap_load: sgva = 0x%lx, pteidx = %d, " 241 printf("sgmap_load: sgva = 0x%lx, pteidx = %d, "
238 "pte = %p (pt = %p)\n", sgva, pteidx, pte, 242 "pte = %p (pt = %p)\n", sgva, pteidx, pte,
239 page_table); 243 page_table);
240#endif 244#endif
241 245
242 /* Generate the DMA address. */ 246 /* Generate the DMA address. */
243 map->dm_segs[seg].ds_addr = sgmap->aps_wbase | sgva | dmaoffset; 247 map->dm_segs[seg].ds_addr = sgmap->aps_wbase | sgva | dmaoffset;
244 map->dm_segs[seg].ds_len = buflen; 248 map->dm_segs[seg].ds_len = buflen;
245 if (__predict_false(extra_sgvalen != 0)) { 249 if (__predict_false(extra_sgvalen != 0)) {
246 if (++seg == map->_dm_segcnt) { 250 if (++seg == map->_dm_segcnt) {
247 /* Boo! Ran out of segments! */ 251 /* Boo! Ran out of segments! */
248 vmem_xfree(sgmap->aps_arena, extra_sgva, extra_sgvalen); 252 vmem_xfree(sgmap->aps_arena, extra_sgva, extra_sgvalen);
249 vmem_xfree(sgmap->aps_arena, sgva, sgvalen); 253 vmem_xfree(sgmap->aps_arena, sgva, sgvalen);
250 return EFBIG; 254 return EFBIG;
251 } 255 }
252 map->dm_segs[seg].ds_addr = sgmap->aps_wbase | extra_sgva; 256 map->dm_segs[seg].ds_addr = sgmap->aps_wbase | extra_sgva;
253 map->dm_segs[seg].ds_len = dmaoffset; 257 map->dm_segs[seg].ds_len = dmaoffset;
254 *segp = seg; 258 *segp = seg;
255 } 259 }
256 260
257#ifdef SGMAP_DEBUG 261#ifdef SGMAP_DEBUG
258 if (__C(SGMAP_TYPE,_debug)) 262 if (__C(SGMAP_TYPE,_debug))
259 printf("sgmap_load: wbase = 0x%lx, vpage = 0x%lx, " 263 printf("sgmap_load: wbase = 0x%lx, vpage = 0x%lx, "
260 "DMA addr = 0x%lx\n", sgmap->aps_wbase, (uint64_t)sgva, 264 "DMA addr = 0x%lx\n", sgmap->aps_wbase, (uint64_t)sgva,
261 map->dm_segs[seg].ds_addr); 265 map->dm_segs[seg].ds_addr);
262#endif 266#endif
263 267
264 for (; va < endva; va += PAGE_SIZE, pteidx++, 268 for (; va < endva; va += PAGE_SIZE, pteidx++,
265 pte = &page_table[pteidx * SGMAP_PTE_SPACING]) { 269 pte = &page_table[pteidx * SGMAP_PTE_SPACING]) {
266 /* Get the physical address for this segment. */ 270 /* Get the physical address for this segment. */
267 if (!VMSPACE_IS_KERNEL_P(vm)) 271 if (!VMSPACE_IS_KERNEL_P(vm))
268 (void) pmap_extract(vm->vm_map.pmap, va, &pa); 272 (void) pmap_extract(vm->vm_map.pmap, va, &pa);
269 else 273 else
270 pa = vtophys(va); 274 pa = vtophys(va);
271 275
272 /* Load the current PTE with this page. */ 276 /* Load the current PTE with this page. */
273 *pte = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID; 277 *pte = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
274#ifdef SGMAP_DEBUG 278#ifdef SGMAP_DEBUG
275 if (__C(SGMAP_TYPE,_debug)) 279 if (__C(SGMAP_TYPE,_debug))
276 printf("sgmap_load: pa = 0x%lx, pte = %p, " 280 printf("sgmap_load: pa = 0x%lx, pte = %p, "
277 "*pte = 0x%lx\n", pa, pte, (u_long)(*pte)); 281 "*pte = 0x%lx\n", pa, pte, (u_long)(*pte));
278#endif 282#endif
279 } 283 }
280 284
281 if (__predict_false(extra_sgvalen != 0)) { 285 if (__predict_false(extra_sgvalen != 0)) {
282 int extra_pteidx = extra_sgva >> SGMAP_ADDR_PTEIDX_SHIFT; 286 int extra_pteidx = extra_sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
283 SGMAP_PTE_TYPE *extra_pte = 287 SGMAP_PTE_TYPE *extra_pte =
284 &page_table[extra_pteidx * SGMAP_PTE_SPACING]; 288 &page_table[extra_pteidx * SGMAP_PTE_SPACING];
285 289
286 /* va == endva == address of extra page */ 290 /* va == endva == address of extra page */
287 KASSERT(va == endva); 291 KASSERT(va == endva);
288 if (!VMSPACE_IS_KERNEL_P(vm)) 292 if (!VMSPACE_IS_KERNEL_P(vm))
289 (void) pmap_extract(vm->vm_map.pmap, va, &pa); 293 (void) pmap_extract(vm->vm_map.pmap, va, &pa);
290 else 294 else
291 pa = vtophys(va); 295 pa = vtophys(va);
292 296
293 /* 297 /*
294 * If a spill page is needed, the previous segment will 298 * If a spill page is needed, the previous segment will
295 * need to use this PTE value for it. 299 * need to use this PTE value for it.
296 */ 300 */
297 spill_pte_v = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID; 301 spill_pte_v = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
298 *extra_pte = spill_pte_v; 302 *extra_pte = spill_pte_v;
299 303
300 /* ...but the extra segment uses the real spill PTE. */ 304 /* ...but the extra segment uses the real spill PTE. */
301 if (spill) { 305 if (spill) {
302 extra_pteidx++; 306 extra_pteidx++;
303 extra_pte = 307 extra_pte =
304 &page_table[extra_pteidx * SGMAP_PTE_SPACING]; 308 &page_table[extra_pteidx * SGMAP_PTE_SPACING];
305 *extra_pte = __C(SGMAP_TYPE,_prefetch_spill_page_pte); 309 *extra_pte = __C(SGMAP_TYPE,_prefetch_spill_page_pte);
306 } 310 }
307 } 311 }
308 312
309 if (spill) { 313 if (spill) {
310 /* ...and the prefetch-spill page. */ 314 /* ...and the prefetch-spill page. */
311 *pte = spill_pte_v; 315 *pte = spill_pte_v;
312#ifdef SGMAP_DEBUG 316#ifdef SGMAP_DEBUG
313 if (__C(SGMAP_TYPE,_debug)) { 317 if (__C(SGMAP_TYPE,_debug)) {
314 printf("sgmap_load: spill page, pte = %p, " 318 printf("sgmap_load: spill page, pte = %p, "
315 "*pte = 0x%lx\n", pte, (uint64_t)*pte); 319 "*pte = 0x%lx\n", pte, (uint64_t)*pte);
316 } 320 }
317#endif 321#endif
318 } 322 }
319 323
320 return (0); 324 return (0);
321} 325}
322 326
323DMA_COUNT_DECL(load); 327DMA_COUNT_DECL(load);
324DMA_COUNT_DECL(load_next_window); 328DMA_COUNT_DECL(load_next_window);
325 329
326int 330int
327__C(SGMAP_TYPE,_load)(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 331__C(SGMAP_TYPE,_load)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
328 bus_size_t buflen, struct proc *p, int flags, struct alpha_sgmap *sgmap) 332 bus_size_t buflen, struct proc *p, int flags, struct alpha_sgmap *sgmap)
329{ 333{
330 int seg, error; 334 int seg, error;
331 struct vmspace *vm; 335 struct vmspace *vm;
332 336
333 /* 337 /*
334 * Make sure that on error condition we return "no valid mappings". 338 * Make sure that on error condition we return "no valid mappings".
335 */ 339 */
336 map->dm_mapsize = 0; 340 map->dm_mapsize = 0;
337 map->dm_nsegs = 0; 341 map->dm_nsegs = 0;
338 342
339 if (buflen > map->_dm_size) 343 if (buflen > map->_dm_size)
340 return (EINVAL); 344 return (EINVAL);
341 345
342 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 346 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
343 KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) != 347 KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
344 (BUS_DMA_READ|BUS_DMA_WRITE)); 348 (BUS_DMA_READ|BUS_DMA_WRITE));
345 349
346 map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE); 350 map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
347 351
348 if (p != NULL) { 352 if (p != NULL) {
349 vm = p->p_vmspace; 353 vm = p->p_vmspace;
350 } else { 354 } else {
351 vm = vmspace_kernel(); 355 vm = vmspace_kernel();
352 } 356 }
353 seg = 0; 357 seg = 0;
354 error = __C(SGMAP_TYPE,_load_buffer)(t, map, buf, buflen, vm, 358 error = __C(SGMAP_TYPE,_load_buffer)(t, map, buf, buflen, vm,
355 flags, &seg, sgmap); 359 flags, &seg, sgmap);
356 360
357 alpha_mb(); 361 alpha_mb();
358 362
359#if defined(SGMAP_DEBUG) && defined(DDB) 363#if defined(SGMAP_DEBUG) && defined(DDB)
360 if (__C(SGMAP_TYPE,_debug) > 1) 364 if (__C(SGMAP_TYPE,_debug) > 1)
361 Debugger(); 365 Debugger();
362#endif 366#endif
363 367
364 if (error == 0) { 368 if (error == 0) {
365 DMA_COUNT(load); 369 DMA_COUNT(load);
366 map->dm_mapsize = buflen; 370 map->dm_mapsize = buflen;
367 map->dm_nsegs = seg + 1; 371 map->dm_nsegs = seg + 1;
368 map->_dm_window = t; 372 map->_dm_window = t;
369 } else { 373 } else {
370 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE); 374 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
371 if (t->_next_window != NULL) { 375 if (t->_next_window != NULL) {
372 /* Give the next window a chance. */ 376 /* Give the next window a chance. */
373 DMA_COUNT(load_next_window); 377 DMA_COUNT(load_next_window);
374 error = bus_dmamap_load(t->_next_window, map, buf, 378 error = bus_dmamap_load(t->_next_window, map, buf,
375 buflen, p, flags); 379 buflen, p, flags);
376 } 380 }
377 } 381 }
378 return (error); 382 return (error);
379} 383}
380 384
381DMA_COUNT_DECL(load_mbuf); 385DMA_COUNT_DECL(load_mbuf);
382DMA_COUNT_DECL(load_mbuf_next_window); 386DMA_COUNT_DECL(load_mbuf_next_window);
383 387
384int 388int
385__C(SGMAP_TYPE,_load_mbuf)(bus_dma_tag_t t, bus_dmamap_t map, 389__C(SGMAP_TYPE,_load_mbuf)(bus_dma_tag_t t, bus_dmamap_t map,
386 struct mbuf *m0, int flags, struct alpha_sgmap *sgmap) 390 struct mbuf *m0, int flags, struct alpha_sgmap *sgmap)
387{ 391{
388 struct mbuf *m; 392 struct mbuf *m;
389 int seg, error; 393 int seg, error;
390 394
391 /* 395 /*
392 * Make sure that on error condition we return "no valid mappings". 396 * Make sure that on error condition we return "no valid mappings".
393 */ 397 */
394 map->dm_mapsize = 0; 398 map->dm_mapsize = 0;
395 map->dm_nsegs = 0; 399 map->dm_nsegs = 0;
396 400
397#ifdef DIAGNOSTIC 401#ifdef DIAGNOSTIC
398 if ((m0->m_flags & M_PKTHDR) == 0) 402 if ((m0->m_flags & M_PKTHDR) == 0)
399 panic(__S(__C(SGMAP_TYPE,_load_mbuf)) ": no packet header"); 403 panic(__S(__C(SGMAP_TYPE,_load_mbuf)) ": no packet header");
400#endif 404#endif
401 405
402 if (m0->m_pkthdr.len > map->_dm_size) 406 if (m0->m_pkthdr.len > map->_dm_size)
403 return (EINVAL); 407 return (EINVAL);
404 408
405 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 409 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
406 KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) != 410 KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
407 (BUS_DMA_READ|BUS_DMA_WRITE)); 411 (BUS_DMA_READ|BUS_DMA_WRITE));
408 412
409 map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE); 413 map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
410 414
411 seg = 0; 415 seg = 0;
412 error = 0; 416 error = 0;
413 for (m = m0; m != NULL && error == 0; m = m->m_next) { 417 for (m = m0; m != NULL && error == 0; m = m->m_next) {
414 if (m->m_len == 0) 418 if (m->m_len == 0)
415 continue; 419 continue;
416 error = __C(SGMAP_TYPE,_load_buffer)(t, map, 420 error = __C(SGMAP_TYPE,_load_buffer)(t, map,
417 m->m_data, m->m_len, vmspace_kernel(), flags, &seg, sgmap); 421 m->m_data, m->m_len, vmspace_kernel(), flags, &seg, sgmap);
418 seg++; 422 seg++;
419 } 423 }
420 424
421 alpha_mb(); 425 alpha_mb();
422 426
423#if defined(SGMAP_DEBUG) && defined(DDB) 427#if defined(SGMAP_DEBUG) && defined(DDB)
424 if (__C(SGMAP_TYPE,_debug) > 1) 428 if (__C(SGMAP_TYPE,_debug) > 1)
425 Debugger(); 429 Debugger();
426#endif 430#endif
427 431
428 if (error == 0) { 432 if (error == 0) {
429 DMA_COUNT(load_mbuf); 433 DMA_COUNT(load_mbuf);
430 map->dm_mapsize = m0->m_pkthdr.len; 434 map->dm_mapsize = m0->m_pkthdr.len;
431 map->dm_nsegs = seg; 435 map->dm_nsegs = seg;
432 map->_dm_window = t; 436 map->_dm_window = t;
433 } else { 437 } else {
434 /* Need to back out what we've done so far. */ 438 /* Need to back out what we've done so far. */
435 map->dm_nsegs = seg - 1; 439 map->dm_nsegs = seg - 1;
436 __C(SGMAP_TYPE,_do_unload)(t, map, sgmap); 440 __C(SGMAP_TYPE,_do_unload)(t, map, sgmap);
437 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE); 441 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
438 if (t->_next_window != NULL) { 442 if (t->_next_window != NULL) {
439 /* Give the next window a chance. */ 443 /* Give the next window a chance. */
440 DMA_COUNT(load_mbuf_next_window); 444 DMA_COUNT(load_mbuf_next_window);
441 error = bus_dmamap_load_mbuf(t->_next_window, map, 445 error = bus_dmamap_load_mbuf(t->_next_window, map,
442 m0, flags); 446 m0, flags);
443 } 447 }
444 } 448 }
445 449
446 return (error); 450 return (error);
447} 451}
448 452
449DMA_COUNT_DECL(load_uio); 453DMA_COUNT_DECL(load_uio);
450DMA_COUNT_DECL(load_uio_next_window); 454DMA_COUNT_DECL(load_uio_next_window);
451 455
452int 456int
453__C(SGMAP_TYPE,_load_uio)(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, 457__C(SGMAP_TYPE,_load_uio)(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
454 int flags, struct alpha_sgmap *sgmap) 458 int flags, struct alpha_sgmap *sgmap)
455{ 459{
456 bus_size_t minlen, resid; 460 bus_size_t minlen, resid;
457 struct vmspace *vm; 461 struct vmspace *vm;
458 struct iovec *iov; 462 struct iovec *iov;
459 void *addr; 463 void *addr;
460 int i, seg, error; 464 int i, seg, error;
461 465
462 /* 466 /*
463 * Make sure that on error condition we return "no valid mappings". 467 * Make sure that on error condition we return "no valid mappings".
464 */ 468 */
465 map->dm_mapsize = 0; 469 map->dm_mapsize = 0;
466 map->dm_nsegs = 0; 470 map->dm_nsegs = 0;
467 471
468 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 472 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
469 KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) != 473 KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
470 (BUS_DMA_READ|BUS_DMA_WRITE)); 474 (BUS_DMA_READ|BUS_DMA_WRITE));
471 475
472 map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE); 476 map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
473 477
474 resid = uio->uio_resid; 478 resid = uio->uio_resid;
475 iov = uio->uio_iov; 479 iov = uio->uio_iov;
476 480
477 vm = uio->uio_vmspace; 481 vm = uio->uio_vmspace;
478 482
479 seg = 0; 483 seg = 0;
480 error = 0; 484 error = 0;
481 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 485 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
482 /* 486 /*
483 * Now at the first iovec to load. Load each iovec 487 * Now at the first iovec to load. Load each iovec
484 * until we have exhausted the residual count. 488 * until we have exhausted the residual count.
485 */ 489 */
486 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 490 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
487 addr = (void *)iov[i].iov_base; 491 addr = (void *)iov[i].iov_base;
488 492
489 error = __C(SGMAP_TYPE,_load_buffer)(t, map, 493 error = __C(SGMAP_TYPE,_load_buffer)(t, map,
490 addr, minlen, vm, flags, &seg, sgmap); 494 addr, minlen, vm, flags, &seg, sgmap);
491 seg++; 495 seg++;
492 496
493 resid -= minlen; 497 resid -= minlen;
494 } 498 }
495 499
496 alpha_mb(); 500 alpha_mb();
497 501
498#if defined(SGMAP_DEBUG) && defined(DDB) 502#if defined(SGMAP_DEBUG) && defined(DDB)
499 if (__C(SGMAP_TYPE,_debug) > 1) 503 if (__C(SGMAP_TYPE,_debug) > 1)
500 Debugger(); 504 Debugger();
501#endif 505#endif
502 506
503 if (error == 0) { 507 if (error == 0) {
504 DMA_COUNT(load_uio); 508 DMA_COUNT(load_uio);
505 map->dm_mapsize = uio->uio_resid; 509 map->dm_mapsize = uio->uio_resid;
506 map->dm_nsegs = seg; 510 map->dm_nsegs = seg;
507 map->_dm_window = t; 511 map->_dm_window = t;
508 } else { 512 } else {
509 /* Need to back out what we've done so far. */ 513 /* Need to back out what we've done so far. */
510 map->dm_nsegs = seg - 1; 514 map->dm_nsegs = seg - 1;
511 __C(SGMAP_TYPE,_do_unload)(t, map, sgmap); 515 __C(SGMAP_TYPE,_do_unload)(t, map, sgmap);
512 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE); 516 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
513 if (t->_next_window != NULL) { 517 if (t->_next_window != NULL) {
514 /* Give the next window a chance. */ 518 /* Give the next window a chance. */
515 DMA_COUNT(load_uio_next_window); 519 DMA_COUNT(load_uio_next_window);
516 error = bus_dmamap_load_uio(t->_next_window, map, 520 error = bus_dmamap_load_uio(t->_next_window, map,
517 uio, flags); 521 uio, flags);
518 } 522 }
519 } 523 }
520 524
521 return (error); 525 return (error);
522} 526}
523 527
524int 528int
525__C(SGMAP_TYPE,_load_raw)(bus_dma_tag_t t, bus_dmamap_t map, 529__C(SGMAP_TYPE,_load_raw)(bus_dma_tag_t t, bus_dmamap_t map,
526 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags, 530 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags,
527 struct alpha_sgmap *sgmap) 531 struct alpha_sgmap *sgmap)
528{ 532{
529 533
530 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 534 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
531 KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) != 535 KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
532 (BUS_DMA_READ|BUS_DMA_WRITE)); 536 (BUS_DMA_READ|BUS_DMA_WRITE));
533 537
534 panic(__S(__C(SGMAP_TYPE,_load_raw)) ": not implemented"); 538 panic(__S(__C(SGMAP_TYPE,_load_raw)) ": not implemented");
535} 539}
536 540
537static void 541static void
538__C(SGMAP_TYPE,_do_unload)(bus_dma_tag_t t, bus_dmamap_t map, 542__C(SGMAP_TYPE,_do_unload)(bus_dma_tag_t t, bus_dmamap_t map,
539 struct alpha_sgmap *sgmap) 543 struct alpha_sgmap *sgmap)
540{ 544{
541 SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt; 545 SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
542 bus_addr_t osgva, sgva, esgva; 546 bus_addr_t osgva, sgva, esgva;
543 int spill, seg, pteidx; 547 int spill, seg, pteidx;
544 548
545 for (seg = 0; seg < map->dm_nsegs; seg++) { 549 for (seg = 0; seg < map->dm_nsegs; seg++) {
546 /* 550 /*
547 * XXX Always allocate a spill page for now. Note 551 * XXX Always allocate a spill page for now. Note
548 * the spill page is not needed for an in-bound-only 552 * the spill page is not needed for an in-bound-only
549 * transfer. 553 * transfer.
550 */ 554 */
551 if ((map->_dm_flags & BUS_DMA_READ) == 0) 555 if ((map->_dm_flags & BUS_DMA_READ) == 0)
552 spill = 1; 556 spill = 1;
553 else 557 else
554 spill = 0; 558 spill = 0;
555 559
556 sgva = map->dm_segs[seg].ds_addr & ~sgmap->aps_wbase; 560 sgva = map->dm_segs[seg].ds_addr & ~sgmap->aps_wbase;
557 561
558 esgva = round_page(sgva + map->dm_segs[seg].ds_len); 562 esgva = round_page(sgva + map->dm_segs[seg].ds_len);
559 osgva = sgva = trunc_page(sgva); 563 osgva = sgva = trunc_page(sgva);
560 564
561 if (spill) 565 if (spill)
562 esgva += PAGE_SIZE; 566 esgva += PAGE_SIZE;
563 567
564 /* Invalidate the PTEs for the mapping. */ 568 /* Invalidate the PTEs for the mapping. */
565 for (pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT; 569 for (pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
566 sgva < esgva; sgva += PAGE_SIZE, pteidx++) { 570 sgva < esgva; sgva += PAGE_SIZE, pteidx++) {
567 pte = &page_table[pteidx * SGMAP_PTE_SPACING]; 571 pte = &page_table[pteidx * SGMAP_PTE_SPACING];
568#ifdef SGMAP_DEBUG 572#ifdef SGMAP_DEBUG
569 if (__C(SGMAP_TYPE,_debug)) 573 if (__C(SGMAP_TYPE,_debug))
570 printf("sgmap_unload: pte = %p, " 574 printf("sgmap_unload: pte = %p, "
571 "*pte = 0x%lx\n", pte, (u_long)(*pte)); 575 "*pte = 0x%lx\n", pte, (u_long)(*pte));
572#endif 576#endif
573 *pte = 0; 577 *pte = 0;
574 } 578 }
575 579
576 alpha_mb(); 580 alpha_mb();
577 581
578 /* Free the virtual address space used by the mapping. */ 582 /* Free the virtual address space used by the mapping. */
579 vmem_xfree(sgmap->aps_arena, osgva, (esgva - osgva)); 583 vmem_xfree(sgmap->aps_arena, osgva, (esgva - osgva));
580 } 584 }
581 585
582 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE); 586 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
583 587
584 /* Mark the mapping invalid. */ 588 /* Mark the mapping invalid. */
585 map->dm_mapsize = 0; 589 map->dm_mapsize = 0;
586 map->dm_nsegs = 0; 590 map->dm_nsegs = 0;
587 map->_dm_window = NULL; 591 map->_dm_window = NULL;
588} 592}
589 593
590DMA_COUNT_DECL(unload); 594DMA_COUNT_DECL(unload);
591 595
592void 596void
593__C(SGMAP_TYPE,_unload)(bus_dma_tag_t t, bus_dmamap_t map, 597__C(SGMAP_TYPE,_unload)(bus_dma_tag_t t, bus_dmamap_t map,
594 struct alpha_sgmap *sgmap) 598 struct alpha_sgmap *sgmap)
595{ 599{
596 KASSERT(map->_dm_window == t); 600 KASSERT(map->_dm_window == t);
597 DMA_COUNT(unload); 601 DMA_COUNT(unload);
598 __C(SGMAP_TYPE,_do_unload)(t, map, sgmap); 602 __C(SGMAP_TYPE,_do_unload)(t, map, sgmap);
599} 603}

cvs diff -r1.5 -r1.6 src/sys/arch/alpha/include/bus_defs.h (switch to unified diff)

--- src/sys/arch/alpha/include/bus_defs.h 2019/09/23 16:17:54 1.5
+++ src/sys/arch/alpha/include/bus_defs.h 2021/07/18 05:12:27 1.6
@@ -1,453 +1,459 @@ @@ -1,453 +1,459 @@
1/* $NetBSD: bus_defs.h,v 1.5 2019/09/23 16:17:54 skrll Exp $ */ 1/* $NetBSD: bus_defs.h,v 1.6 2021/07/18 05:12:27 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998, 2000, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1998, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1996 Carnegie-Mellon University. 34 * Copyright (c) 1996 Carnegie-Mellon University.
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * Author: Chris G. Demetriou 37 * Author: Chris G. Demetriou
38 * 38 *
39 * Permission to use, copy, modify and distribute this software and 39 * Permission to use, copy, modify and distribute this software and
40 * its documentation is hereby granted, provided that both the copyright 40 * its documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the 41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions 42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation. 43 * thereof, and that both notices appear in supporting documentation.
44 * 44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * 48 *
49 * Carnegie Mellon requests users of this software to return to 49 * Carnegie Mellon requests users of this software to return to
50 * 50 *
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science 52 * School of Computer Science
53 * Carnegie Mellon University 53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890 54 * Pittsburgh PA 15213-3890
55 * 55 *
56 * any improvements or extensions that they make and grant Carnegie the 56 * any improvements or extensions that they make and grant Carnegie the
57 * rights to redistribute these changes. 57 * rights to redistribute these changes.
58 */ 58 */
59 59
60#ifndef _ALPHA_BUS_DEFS_H_ 60#ifndef _ALPHA_BUS_DEFS_H_
61#define _ALPHA_BUS_DEFS_H_ 61#define _ALPHA_BUS_DEFS_H_
62 62
63#include <sys/types.h> 63#include <sys/types.h>
64#include <machine/bus_user.h> 64#include <machine/bus_user.h>
65 65
66#if !defined(_KERNEL) && !defined(_STANDALONE) 66#if !defined(_KERNEL) && !defined(_STANDALONE)
67#include <stdbool.h> 67#include <stdbool.h>
68#endif 68#endif
69#include <sys/stdint.h> 69#include <sys/stdint.h>
70 70
71#ifdef _KERNEL 71#ifdef _KERNEL
72 72
73/* 73/*
74 * Turn on BUS_SPACE_DEBUG if the global DEBUG option is enabled. 74 * Turn on BUS_SPACE_DEBUG if the global DEBUG option is enabled.
75 */ 75 */
76#if defined(DEBUG) && !defined(BUS_SPACE_DEBUG) 76#if defined(DEBUG) && !defined(BUS_SPACE_DEBUG)
77#define BUS_SPACE_DEBUG 77#define BUS_SPACE_DEBUG
78#endif 78#endif
79 79
80#ifdef BUS_SPACE_DEBUG 80#ifdef BUS_SPACE_DEBUG
81#include <sys/systm.h> /* for printf() prototype */ 81#include <sys/systm.h> /* for printf() prototype */
82/* 82/*
83 * Macros for checking the aligned-ness of pointers passed to bus 83 * Macros for checking the aligned-ness of pointers passed to bus
84 * space ops. Strict alignment is required by the Alpha architecture, 84 * space ops. Strict alignment is required by the Alpha architecture,
85 * and a trap will occur if unaligned access is performed. These 85 * and a trap will occur if unaligned access is performed. These
86 * may aid in the debugging of a broken device driver by displaying 86 * may aid in the debugging of a broken device driver by displaying
87 * useful information about the problem. 87 * useful information about the problem.
88 */ 88 */
89#define __BUS_SPACE_ALIGNED_ADDRESS(p, t) \ 89#define __BUS_SPACE_ALIGNED_ADDRESS(p, t) \
90 ((((u_long)(p)) & (sizeof(t)-1)) == 0) 90 ((((u_long)(p)) & (sizeof(t)-1)) == 0)
91 91
92#define __BUS_SPACE_ADDRESS_SANITY(p, t, d) \ 92#define __BUS_SPACE_ADDRESS_SANITY(p, t, d) \
93({ \ 93({ \
94 if (__BUS_SPACE_ALIGNED_ADDRESS((p), t) == 0) { \ 94 if (__BUS_SPACE_ALIGNED_ADDRESS((p), t) == 0) { \
95 printf("%s 0x%lx not aligned to %lu bytes %s:%d\n", \ 95 printf("%s 0x%lx not aligned to %lu bytes %s:%d\n", \
96 d, (u_long)(p), sizeof(t), __FILE__, __LINE__); \ 96 d, (u_long)(p), sizeof(t), __FILE__, __LINE__); \
97 } \ 97 } \
98 (void) 0; \ 98 (void) 0; \
99}) 99})
100 100
101#define BUS_SPACE_ALIGNED_POINTER(p, t) __BUS_SPACE_ALIGNED_ADDRESS(p, t) 101#define BUS_SPACE_ALIGNED_POINTER(p, t) __BUS_SPACE_ALIGNED_ADDRESS(p, t)
102#else 102#else
103#define __BUS_SPACE_ADDRESS_SANITY(p, t, d) (void) 0 103#define __BUS_SPACE_ADDRESS_SANITY(p, t, d) (void) 0
104#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t) 104#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
105#endif /* BUS_SPACE_DEBUG */ 105#endif /* BUS_SPACE_DEBUG */
106#endif /* _KERNEL */ 106#endif /* _KERNEL */
107 107
108struct alpha_bus_space_translation; 108struct alpha_bus_space_translation;
109 109
110/* 110/*
111 * Access methods for bus space. 111 * Access methods for bus space.
112 */ 112 */
113typedef struct alpha_bus_space *bus_space_tag_t; 113typedef struct alpha_bus_space *bus_space_tag_t;
114typedef u_long bus_space_handle_t; 114typedef u_long bus_space_handle_t;
115 115
116#define PRIxBSH "lx" 116#define PRIxBSH "lx"
117 117
118struct alpha_bus_space { 118struct alpha_bus_space {
119 /* cookie */ 119 /* cookie */
120 void *abs_cookie; 120 void *abs_cookie;
121 121
122 /* mapping/unmapping */ 122 /* mapping/unmapping */
123 int (*abs_map)(void *, bus_addr_t, bus_size_t, 123 int (*abs_map)(void *, bus_addr_t, bus_size_t,
124 int, bus_space_handle_t *, int); 124 int, bus_space_handle_t *, int);
125 void (*abs_unmap)(void *, bus_space_handle_t, 125 void (*abs_unmap)(void *, bus_space_handle_t,
126 bus_size_t, int); 126 bus_size_t, int);
127 int (*abs_subregion)(void *, bus_space_handle_t, 127 int (*abs_subregion)(void *, bus_space_handle_t,
128 bus_size_t, bus_size_t, bus_space_handle_t *); 128 bus_size_t, bus_size_t, bus_space_handle_t *);
129 129
130 /* ALPHA SPECIFIC MAPPING METHOD */ 130 /* ALPHA SPECIFIC MAPPING METHOD */
131 int (*abs_translate)(void *, bus_addr_t, bus_size_t, 131 int (*abs_translate)(void *, bus_addr_t, bus_size_t,
132 int, struct alpha_bus_space_translation *); 132 int, struct alpha_bus_space_translation *);
133 int (*abs_get_window)(void *, int, 133 int (*abs_get_window)(void *, int,
134 struct alpha_bus_space_translation *); 134 struct alpha_bus_space_translation *);
135 135
136 /* allocation/deallocation */ 136 /* allocation/deallocation */
137 int (*abs_alloc)(void *, bus_addr_t, bus_addr_t, 137 int (*abs_alloc)(void *, bus_addr_t, bus_addr_t,
138 bus_size_t, bus_size_t, bus_size_t, int, 138 bus_size_t, bus_size_t, bus_size_t, int,
139 bus_addr_t *, bus_space_handle_t *); 139 bus_addr_t *, bus_space_handle_t *);
140 void (*abs_free)(void *, bus_space_handle_t, 140 void (*abs_free)(void *, bus_space_handle_t,
141 bus_size_t); 141 bus_size_t);
142 142
143 /* get kernel virtual address */ 143 /* get kernel virtual address */
144 void * (*abs_vaddr)(void *, bus_space_handle_t); 144 void * (*abs_vaddr)(void *, bus_space_handle_t);
145 145
146 /* mmap bus space for user */ 146 /* mmap bus space for user */
147 paddr_t (*abs_mmap)(void *, bus_addr_t, off_t, int, int); 147 paddr_t (*abs_mmap)(void *, bus_addr_t, off_t, int, int);
148 148
149 /* barrier */ 149 /* barrier */
150 void (*abs_barrier)(void *, bus_space_handle_t, 150 void (*abs_barrier)(void *, bus_space_handle_t,
151 bus_size_t, bus_size_t, int); 151 bus_size_t, bus_size_t, int);
152 152
153 /* read (single) */ 153 /* read (single) */
154 uint8_t (*abs_r_1)(void *, bus_space_handle_t, 154 uint8_t (*abs_r_1)(void *, bus_space_handle_t,
155 bus_size_t); 155 bus_size_t);
156 uint16_t (*abs_r_2)(void *, bus_space_handle_t, 156 uint16_t (*abs_r_2)(void *, bus_space_handle_t,
157 bus_size_t); 157 bus_size_t);
158 uint32_t (*abs_r_4)(void *, bus_space_handle_t, 158 uint32_t (*abs_r_4)(void *, bus_space_handle_t,
159 bus_size_t); 159 bus_size_t);
160 uint64_t (*abs_r_8)(void *, bus_space_handle_t, 160 uint64_t (*abs_r_8)(void *, bus_space_handle_t,
161 bus_size_t); 161 bus_size_t);
162 162
163 /* read multiple */ 163 /* read multiple */
164 void (*abs_rm_1)(void *, bus_space_handle_t, 164 void (*abs_rm_1)(void *, bus_space_handle_t,
165 bus_size_t, uint8_t *, bus_size_t); 165 bus_size_t, uint8_t *, bus_size_t);
166 void (*abs_rm_2)(void *, bus_space_handle_t, 166 void (*abs_rm_2)(void *, bus_space_handle_t,
167 bus_size_t, uint16_t *, bus_size_t); 167 bus_size_t, uint16_t *, bus_size_t);
168 void (*abs_rm_4)(void *, bus_space_handle_t, 168 void (*abs_rm_4)(void *, bus_space_handle_t,
169 bus_size_t, uint32_t *, bus_size_t); 169 bus_size_t, uint32_t *, bus_size_t);
170 void (*abs_rm_8)(void *, bus_space_handle_t, 170 void (*abs_rm_8)(void *, bus_space_handle_t,
171 bus_size_t, uint64_t *, bus_size_t); 171 bus_size_t, uint64_t *, bus_size_t);
172  172
173 /* read region */ 173 /* read region */
174 void (*abs_rr_1)(void *, bus_space_handle_t, 174 void (*abs_rr_1)(void *, bus_space_handle_t,
175 bus_size_t, uint8_t *, bus_size_t); 175 bus_size_t, uint8_t *, bus_size_t);
176 void (*abs_rr_2)(void *, bus_space_handle_t, 176 void (*abs_rr_2)(void *, bus_space_handle_t,
177 bus_size_t, uint16_t *, bus_size_t); 177 bus_size_t, uint16_t *, bus_size_t);
178 void (*abs_rr_4)(void *, bus_space_handle_t, 178 void (*abs_rr_4)(void *, bus_space_handle_t,
179 bus_size_t, uint32_t *, bus_size_t); 179 bus_size_t, uint32_t *, bus_size_t);
180 void (*abs_rr_8)(void *, bus_space_handle_t, 180 void (*abs_rr_8)(void *, bus_space_handle_t,
181 bus_size_t, uint64_t *, bus_size_t); 181 bus_size_t, uint64_t *, bus_size_t);
182  182
183 /* write (single) */ 183 /* write (single) */
184 void (*abs_w_1)(void *, bus_space_handle_t, 184 void (*abs_w_1)(void *, bus_space_handle_t,
185 bus_size_t, uint8_t); 185 bus_size_t, uint8_t);
186 void (*abs_w_2)(void *, bus_space_handle_t, 186 void (*abs_w_2)(void *, bus_space_handle_t,
187 bus_size_t, uint16_t); 187 bus_size_t, uint16_t);
188 void (*abs_w_4)(void *, bus_space_handle_t, 188 void (*abs_w_4)(void *, bus_space_handle_t,
189 bus_size_t, uint32_t); 189 bus_size_t, uint32_t);
190 void (*abs_w_8)(void *, bus_space_handle_t, 190 void (*abs_w_8)(void *, bus_space_handle_t,
191 bus_size_t, uint64_t); 191 bus_size_t, uint64_t);
192 192
193 /* write multiple */ 193 /* write multiple */
194 void (*abs_wm_1)(void *, bus_space_handle_t, 194 void (*abs_wm_1)(void *, bus_space_handle_t,
195 bus_size_t, const uint8_t *, bus_size_t); 195 bus_size_t, const uint8_t *, bus_size_t);
196 void (*abs_wm_2)(void *, bus_space_handle_t, 196 void (*abs_wm_2)(void *, bus_space_handle_t,
197 bus_size_t, const uint16_t *, bus_size_t); 197 bus_size_t, const uint16_t *, bus_size_t);
198 void (*abs_wm_4)(void *, bus_space_handle_t, 198 void (*abs_wm_4)(void *, bus_space_handle_t,
199 bus_size_t, const uint32_t *, bus_size_t); 199 bus_size_t, const uint32_t *, bus_size_t);
200 void (*abs_wm_8)(void *, bus_space_handle_t, 200 void (*abs_wm_8)(void *, bus_space_handle_t,
201 bus_size_t, const uint64_t *, bus_size_t); 201 bus_size_t, const uint64_t *, bus_size_t);
202  202
203 /* write region */ 203 /* write region */
204 void (*abs_wr_1)(void *, bus_space_handle_t, 204 void (*abs_wr_1)(void *, bus_space_handle_t,
205 bus_size_t, const uint8_t *, bus_size_t); 205 bus_size_t, const uint8_t *, bus_size_t);
206 void (*abs_wr_2)(void *, bus_space_handle_t, 206 void (*abs_wr_2)(void *, bus_space_handle_t,
207 bus_size_t, const uint16_t *, bus_size_t); 207 bus_size_t, const uint16_t *, bus_size_t);
208 void (*abs_wr_4)(void *, bus_space_handle_t, 208 void (*abs_wr_4)(void *, bus_space_handle_t,
209 bus_size_t, const uint32_t *, bus_size_t); 209 bus_size_t, const uint32_t *, bus_size_t);
210 void (*abs_wr_8)(void *, bus_space_handle_t, 210 void (*abs_wr_8)(void *, bus_space_handle_t,
211 bus_size_t, const uint64_t *, bus_size_t); 211 bus_size_t, const uint64_t *, bus_size_t);
212 212
213 /* set multiple */ 213 /* set multiple */
214 void (*abs_sm_1)(void *, bus_space_handle_t, 214 void (*abs_sm_1)(void *, bus_space_handle_t,
215 bus_size_t, uint8_t, bus_size_t); 215 bus_size_t, uint8_t, bus_size_t);
216 void (*abs_sm_2)(void *, bus_space_handle_t, 216 void (*abs_sm_2)(void *, bus_space_handle_t,
217 bus_size_t, uint16_t, bus_size_t); 217 bus_size_t, uint16_t, bus_size_t);
218 void (*abs_sm_4)(void *, bus_space_handle_t, 218 void (*abs_sm_4)(void *, bus_space_handle_t,
219 bus_size_t, uint32_t, bus_size_t); 219 bus_size_t, uint32_t, bus_size_t);
220 void (*abs_sm_8)(void *, bus_space_handle_t, 220 void (*abs_sm_8)(void *, bus_space_handle_t,
221 bus_size_t, uint64_t, bus_size_t); 221 bus_size_t, uint64_t, bus_size_t);
222 222
223 /* set region */ 223 /* set region */
224 void (*abs_sr_1)(void *, bus_space_handle_t, 224 void (*abs_sr_1)(void *, bus_space_handle_t,
225 bus_size_t, uint8_t, bus_size_t); 225 bus_size_t, uint8_t, bus_size_t);
226 void (*abs_sr_2)(void *, bus_space_handle_t, 226 void (*abs_sr_2)(void *, bus_space_handle_t,
227 bus_size_t, uint16_t, bus_size_t); 227 bus_size_t, uint16_t, bus_size_t);
228 void (*abs_sr_4)(void *, bus_space_handle_t, 228 void (*abs_sr_4)(void *, bus_space_handle_t,
229 bus_size_t, uint32_t, bus_size_t); 229 bus_size_t, uint32_t, bus_size_t);
230 void (*abs_sr_8)(void *, bus_space_handle_t, 230 void (*abs_sr_8)(void *, bus_space_handle_t,
231 bus_size_t, uint64_t, bus_size_t); 231 bus_size_t, uint64_t, bus_size_t);
232 232
233 /* copy */ 233 /* copy */
234 void (*abs_c_1)(void *, bus_space_handle_t, bus_size_t, 234 void (*abs_c_1)(void *, bus_space_handle_t, bus_size_t,
235 bus_space_handle_t, bus_size_t, bus_size_t); 235 bus_space_handle_t, bus_size_t, bus_size_t);
236 void (*abs_c_2)(void *, bus_space_handle_t, bus_size_t, 236 void (*abs_c_2)(void *, bus_space_handle_t, bus_size_t,
237 bus_space_handle_t, bus_size_t, bus_size_t); 237 bus_space_handle_t, bus_size_t, bus_size_t);
238 void (*abs_c_4)(void *, bus_space_handle_t, bus_size_t, 238 void (*abs_c_4)(void *, bus_space_handle_t, bus_size_t,
239 bus_space_handle_t, bus_size_t, bus_size_t); 239 bus_space_handle_t, bus_size_t, bus_size_t);
240 void (*abs_c_8)(void *, bus_space_handle_t, bus_size_t, 240 void (*abs_c_8)(void *, bus_space_handle_t, bus_size_t,
241 bus_space_handle_t, bus_size_t, bus_size_t); 241 bus_space_handle_t, bus_size_t, bus_size_t);
242}; 242};
243 243
244#define BUS_SPACE_MAP_CACHEABLE 0x01 244#define BUS_SPACE_MAP_CACHEABLE 0x01
245 245
246#ifdef _KERNEL 246#ifdef _KERNEL
247 247
248#define BUS_SPACE_BARRIER_READ 0x01 248#define BUS_SPACE_BARRIER_READ 0x01
249#define BUS_SPACE_BARRIER_WRITE 0x02 249#define BUS_SPACE_BARRIER_WRITE 0x02
250/* 250/*
251 * Bus stream operations--defined in terms of non-stream counterparts 251 * Bus stream operations--defined in terms of non-stream counterparts
252 */ 252 */
253#define __BUS_SPACE_HAS_STREAM_METHODS 1 253#define __BUS_SPACE_HAS_STREAM_METHODS 1
254 254
255/* 255/*
256 * Flags used in various bus DMA methods. 256 * Flags used in various bus DMA methods.
257 */ 257 */
258#define BUS_DMA_WAITOK 0x000 /* safe to sleep (pseudo-flag) */ 258#define BUS_DMA_WAITOK 0x000 /* safe to sleep (pseudo-flag) */
259#define BUS_DMA_NOWAIT 0x001 /* not safe to sleep */ 259#define BUS_DMA_NOWAIT 0x001 /* not safe to sleep */
260#define BUS_DMA_ALLOCNOW 0x002 /* perform resource allocation now */ 260#define BUS_DMA_ALLOCNOW 0x002 /* perform resource allocation now */
261#define BUS_DMA_COHERENT 0x004 /* hint: map memory DMA coherent */ 261#define BUS_DMA_COHERENT 0x004 /* hint: map memory DMA coherent */
262#define BUS_DMA_STREAMING 0x008 /* hint: sequential, unidirectional */ 262#define BUS_DMA_STREAMING 0x008 /* hint: sequential, unidirectional */
263#define BUS_DMA_BUS1 0x010 /* placeholders for bus functions... */ 263#define BUS_DMA_BUS1 0x010 /* placeholders for bus functions... */
264#define BUS_DMA_BUS2 0x020 264#define BUS_DMA_BUS2 0x020
265#define BUS_DMA_BUS3 0x040 265#define BUS_DMA_BUS3 0x040
266#define BUS_DMA_BUS4 0x080 266#define BUS_DMA_BUS4 0x080
267#define BUS_DMA_READ 0x100 /* mapping is device -> memory only */ 267#define BUS_DMA_READ 0x100 /* mapping is device -> memory only */
268#define BUS_DMA_WRITE 0x200 /* mapping is memory -> device only */ 268#define BUS_DMA_WRITE 0x200 /* mapping is memory -> device only */
269#define BUS_DMA_NOCACHE 0x400 /* hint: map non-cached memory */ 269#define BUS_DMA_NOCACHE 0x400 /* hint: map non-cached memory */
270 270
271/* 271/*
272 * Private flags stored in the DMA map. 272 * Private flags stored in the DMA map.
273 */ 273 */
274#define DMAMAP_NO_COALESCE 0x40000000 /* don't coalesce adjacent 274#define DMAMAP_NO_COALESCE 0x40000000 /* don't coalesce adjacent
275 segments */ 275 segments */
276 276
277/* Forwards needed by prototypes below. */ 277/* Forwards needed by prototypes below. */
278struct mbuf; 278struct mbuf;
279struct uio; 279struct uio;
280struct alpha_sgmap; 280struct alpha_sgmap;
281 281
282/* 282/*
283 * Operations performed by bus_dmamap_sync(). 283 * Operations performed by bus_dmamap_sync().
284 */ 284 */
285#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */ 285#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
286#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */ 286#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
287#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */ 287#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
288#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */ 288#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
289 289
290/* 290/*
291 * alpha_bus_t 291 * alpha_bus_t
292 * 292 *
293 * Busses supported by NetBSD/alpha, used by internal 293 * Busses supported by NetBSD/alpha, used by internal
294 * utility functions. NOT TO BE USED BY MACHINE-INDEPENDENT 294 * utility functions. NOT TO BE USED BY MACHINE-INDEPENDENT
295 * CODE! 295 * CODE!
296 */ 296 */
297typedef enum { 297typedef enum {
298 ALPHA_BUS_TURBOCHANNEL, 298 ALPHA_BUS_TURBOCHANNEL,
299 ALPHA_BUS_PCI, 299 ALPHA_BUS_PCI,
300 ALPHA_BUS_EISA, 300 ALPHA_BUS_EISA,
301 ALPHA_BUS_ISA, 301 ALPHA_BUS_ISA,
302 ALPHA_BUS_TLSB, 302 ALPHA_BUS_TLSB,
303} alpha_bus_t; 303} alpha_bus_t;
304 304
305typedef struct alpha_bus_dma_tag *bus_dma_tag_t; 305typedef struct alpha_bus_dma_tag *bus_dma_tag_t;
306typedef struct alpha_bus_dmamap *bus_dmamap_t; 306typedef struct alpha_bus_dmamap *bus_dmamap_t;
307 307
308#define BUS_DMA_TAG_VALID(t) ((t) != (bus_dma_tag_t)0) 308#define BUS_DMA_TAG_VALID(t) ((t) != (bus_dma_tag_t)0)
309 309
310/* 310/*
311 * bus_dma_segment_t 311 * bus_dma_segment_t
312 * 312 *
313 * Describes a single contiguous DMA transaction. Values 313 * Describes a single contiguous DMA transaction. Values
314 * are suitable for programming into DMA registers. 314 * are suitable for programming into DMA registers.
315 */ 315 */
316struct alpha_bus_dma_segment { 316struct alpha_bus_dma_segment {
317 bus_addr_t ds_addr; /* DMA address */ 317 bus_addr_t ds_addr; /* DMA address */
318 bus_size_t ds_len; /* length of transfer */ 318 bus_size_t ds_len; /* length of transfer */
319}; 319};
320typedef struct alpha_bus_dma_segment bus_dma_segment_t; 320typedef struct alpha_bus_dma_segment bus_dma_segment_t;
321 321
322/* 322/*
323 * bus_dma_tag_t 323 * bus_dma_tag_t
324 * 324 *
325 * A machine-dependent opaque type describing the implementation of 325 * A machine-dependent opaque type describing the implementation of
326 * DMA for a given bus. 326 * DMA for a given bus.
327 */ 327 */
328struct alpha_bus_dma_tag { 328struct alpha_bus_dma_tag {
329 void *_cookie; /* cookie used in the guts */ 329 void *_cookie; /* cookie used in the guts */
330 bus_addr_t _wbase; /* DMA window base */ 330 bus_addr_t _wbase; /* DMA window base */
331 331
332 /* 332 /*
333 * The following two members are used to chain DMA windows 333 * The following two members are used to chain DMA windows
334 * together. If, during the course of a map load, the 334 * together. If, during the course of a map load, the
335 * resulting physical memory address is too large to 335 * resulting physical memory address is too large to
336 * be addressed by the window, the next window will be 336 * be addressed by the window, the next window will be
337 * attempted. These would be chained together like so: 337 * attempted. These would be chained together like so:
338 * 338 *
339 * direct -> sgmap -> NULL 339 * direct -> sgmap -> NULL
340 * or 340 * or
341 * sgmap -> NULL 341 * sgmap -> NULL
342 * or 342 * or
343 * direct -> NULL 343 * direct -> NULL
344 * 344 *
345 * If the window size is 0, it will not be checked (e.g. 345 * If the window size is 0, it will not be checked (e.g.
346 * TURBOchannel DMA). 346 * TURBOchannel DMA).
347 */ 347 */
348 bus_size_t _wsize; 348 bus_size_t _wsize;
349 struct alpha_bus_dma_tag *_next_window; 349 struct alpha_bus_dma_tag *_next_window;
350 350
351 /* 351 /*
352 * Some chipsets have a built-in boundary constraint, independent 352 * Some chipsets have a built-in boundary constraint, independent
353 * of what the device requests. This allows that boundary to 353 * of what the device requests. This allows that boundary to
354 * be specified. If the device has a more restrictive constraint, 354 * be specified. If the device has a more restrictive constraint,
355 * the map will use that, otherwise this boundary will be used. 355 * the map will use that, otherwise this boundary will be used.
356 * This value is ignored if 0. 356 * This value is ignored if 0.
357 */ 357 */
358 bus_size_t _boundary; 358 bus_size_t _boundary;
359 359
360 /* 360 /*
361 * A chipset may have more than one SGMAP window, so SGMAP 361 * A chipset may have more than one SGMAP window, so SGMAP
362 * windows also get a pointer to their SGMAP state. 362 * windows also get a pointer to their SGMAP state.
363 */ 363 */
364 struct alpha_sgmap *_sgmap; 364 struct alpha_sgmap *_sgmap;
365 365
366 /* 366 /*
 367 * Some chipsets may want to enforce a minimum alignment
 368 * constraint for SGMAP DMA addresses.
 369 */
 370 bus_size_t _sgmap_minalign;
 371
 372 /*
367 * The SGMAP MMU implements a prefetch FIFO to keep data 373 * The SGMAP MMU implements a prefetch FIFO to keep data
368 * moving down the pipe, when doing host->bus DMA writes. 374 * moving down the pipe, when doing host->bus DMA writes.
369 * The threshold (distance until the next page) used to 375 * The threshold (distance until the next page) used to
370 * trigger the prefetch is differnet on different chipsets, 376 * trigger the prefetch is differnet on different chipsets,
371 * and we need to know what it is in order to know whether 377 * and we need to know what it is in order to know whether
372 * or not to allocate a spill page. 378 * or not to allocate a spill page.
373 */ 379 */
374 bus_size_t _pfthresh; 380 bus_size_t _pfthresh;
375 381
376 /* 382 /*
377 * Internal-use only utility methods. NOT TO BE USED BY 383 * Internal-use only utility methods. NOT TO BE USED BY
378 * MACHINE-INDEPENDENT CODE! 384 * MACHINE-INDEPENDENT CODE!
379 */ 385 */
380 bus_dma_tag_t (*_get_tag)(bus_dma_tag_t, alpha_bus_t); 386 bus_dma_tag_t (*_get_tag)(bus_dma_tag_t, alpha_bus_t);
381 387
382 /* 388 /*
383 * DMA mapping methods. 389 * DMA mapping methods.
384 */ 390 */
385 int (*_dmamap_create)(bus_dma_tag_t, bus_size_t, int, 391 int (*_dmamap_create)(bus_dma_tag_t, bus_size_t, int,
386 bus_size_t, bus_size_t, int, bus_dmamap_t *); 392 bus_size_t, bus_size_t, int, bus_dmamap_t *);
387 void (*_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t); 393 void (*_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
388 int (*_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *, 394 int (*_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
389 bus_size_t, struct proc *, int); 395 bus_size_t, struct proc *, int);
390 int (*_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t, 396 int (*_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t,
391 struct mbuf *, int); 397 struct mbuf *, int);
392 int (*_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t, 398 int (*_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t,
393 struct uio *, int); 399 struct uio *, int);
394 int (*_dmamap_load_raw)(bus_dma_tag_t, bus_dmamap_t, 400 int (*_dmamap_load_raw)(bus_dma_tag_t, bus_dmamap_t,
395 bus_dma_segment_t *, int, bus_size_t, int); 401 bus_dma_segment_t *, int, bus_size_t, int);
396 void (*_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t); 402 void (*_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
397 void (*_dmamap_sync)(bus_dma_tag_t, bus_dmamap_t, 403 void (*_dmamap_sync)(bus_dma_tag_t, bus_dmamap_t,
398 bus_addr_t, bus_size_t, int); 404 bus_addr_t, bus_size_t, int);
399 405
400 /* 406 /*
401 * DMA memory utility functions. 407 * DMA memory utility functions.
402 */ 408 */
403 int (*_dmamem_alloc)(bus_dma_tag_t, bus_size_t, bus_size_t, 409 int (*_dmamem_alloc)(bus_dma_tag_t, bus_size_t, bus_size_t,
404 bus_size_t, bus_dma_segment_t *, int, int *, int); 410 bus_size_t, bus_dma_segment_t *, int, int *, int);
405 void (*_dmamem_free)(bus_dma_tag_t, 411 void (*_dmamem_free)(bus_dma_tag_t,
406 bus_dma_segment_t *, int); 412 bus_dma_segment_t *, int);
407 int (*_dmamem_map)(bus_dma_tag_t, bus_dma_segment_t *, 413 int (*_dmamem_map)(bus_dma_tag_t, bus_dma_segment_t *,
408 int, size_t, void **, int); 414 int, size_t, void **, int);
409 void (*_dmamem_unmap)(bus_dma_tag_t, void *, size_t); 415 void (*_dmamem_unmap)(bus_dma_tag_t, void *, size_t);
410 paddr_t (*_dmamem_mmap)(bus_dma_tag_t, bus_dma_segment_t *, 416 paddr_t (*_dmamem_mmap)(bus_dma_tag_t, bus_dma_segment_t *,
411 int, off_t, int, int); 417 int, off_t, int, int);
412}; 418};
413 419
414#define alphabus_dma_get_tag(t, b) \ 420#define alphabus_dma_get_tag(t, b) \
415 (*(t)->_get_tag)(t, b) 421 (*(t)->_get_tag)(t, b)
416 422
417/* 423/*
418 * bus_dmamap_t 424 * bus_dmamap_t
419 * 425 *
420 * Describes a DMA mapping. 426 * Describes a DMA mapping.
421 */ 427 */
422struct alpha_bus_dmamap { 428struct alpha_bus_dmamap {
423 /* 429 /*
424 * PRIVATE MEMBERS: not for use my machine-independent code. 430 * PRIVATE MEMBERS: not for use my machine-independent code.
425 */ 431 */
426 bus_size_t _dm_size; /* largest DMA transfer mappable */ 432 bus_size_t _dm_size; /* largest DMA transfer mappable */
427 int _dm_segcnt; /* number of segs this map can map */ 433 int _dm_segcnt; /* number of segs this map can map */
428 bus_size_t _dm_maxmaxsegsz; /* fixed largest possible segment */ 434 bus_size_t _dm_maxmaxsegsz; /* fixed largest possible segment */
429 bus_size_t _dm_boundary; /* don't cross this */ 435 bus_size_t _dm_boundary; /* don't cross this */
430 int _dm_flags; /* misc. flags */ 436 int _dm_flags; /* misc. flags */
431 437
432 /* 438 /*
433 * Private cookie to be used by the DMA back-end. 439 * Private cookie to be used by the DMA back-end.
434 */ 440 */
435 void *_dm_cookie; 441 void *_dm_cookie;
436 442
437 /* 443 /*
438 * The DMA window that we ended up being mapped in. 444 * The DMA window that we ended up being mapped in.
439 */ 445 */
440 bus_dma_tag_t _dm_window; 446 bus_dma_tag_t _dm_window;
441 447
442 /* 448 /*
443 * PUBLIC MEMBERS: these are used by machine-independent code. 449 * PUBLIC MEMBERS: these are used by machine-independent code.
444 */ 450 */
445 bus_size_t dm_maxsegsz; /* largest possible segment */ 451 bus_size_t dm_maxsegsz; /* largest possible segment */
446 bus_size_t dm_mapsize; /* size of the mapping */ 452 bus_size_t dm_mapsize; /* size of the mapping */
447 int dm_nsegs; /* # valid segments in mapping */ 453 int dm_nsegs; /* # valid segments in mapping */
448 bus_dma_segment_t dm_segs[1]; /* segments; variable length */ 454 bus_dma_segment_t dm_segs[1]; /* segments; variable length */
449}; 455};
450 456
451#endif /* _KERNEL */ 457#endif /* _KERNEL */
452 458
453#endif /* _ALPHA_BUS_DEFS_H_ */ 459#endif /* _ALPHA_BUS_DEFS_H_ */

cvs diff -r1.14 -r1.15 src/sys/arch/alpha/tc/tc_dma.c (switch to unified diff)

--- src/sys/arch/alpha/tc/tc_dma.c 2020/10/10 21:59:03 1.14
+++ src/sys/arch/alpha/tc/tc_dma.c 2021/07/18 05:12:27 1.15
@@ -1,73 +1,74 @@ @@ -1,73 +1,74 @@
1/* $NetBSD: tc_dma.c,v 1.14 2020/10/10 21:59:03 thorpej Exp $ */ 1/* $NetBSD: tc_dma.c,v 1.15 2021/07/18 05:12:27 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 33#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
34 34
35__KERNEL_RCSID(0, "$NetBSD: tc_dma.c,v 1.14 2020/10/10 21:59:03 thorpej Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: tc_dma.c,v 1.15 2021/07/18 05:12:27 thorpej Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/device.h> 38#include <sys/device.h>
39 39
40#define _ALPHA_BUS_DMA_PRIVATE 40#define _ALPHA_BUS_DMA_PRIVATE
41#include <sys/bus.h> 41#include <sys/bus.h>
42 42
43#include <dev/tc/tcvar.h> 43#include <dev/tc/tcvar.h>
44 44
45struct alpha_bus_dma_tag tc_dmat_direct = { 45struct alpha_bus_dma_tag tc_dmat_direct = {
46 NULL, /* _cookie */ 46 NULL, /* _cookie */
47 0, /* _wbase */ 47 0, /* _wbase */
48 0, /* _wsize */ 48 0, /* _wsize */
49 NULL, /* _next_window */ 49 NULL, /* _next_window */
50 0, /* _boundary */ 50 0, /* _boundary */
51 NULL, /* _sgmap */ 51 NULL, /* _sgmap */
 52 0, /* _sgmap_minalign */
52 0, /* _pfthresh */ 53 0, /* _pfthresh */
53 NULL, /* _get_tag */ 54 NULL, /* _get_tag */
54 _bus_dmamap_create, 55 _bus_dmamap_create,
55 _bus_dmamap_destroy, 56 _bus_dmamap_destroy,
56 _bus_dmamap_load_direct, 57 _bus_dmamap_load_direct,
57 _bus_dmamap_load_mbuf_direct, 58 _bus_dmamap_load_mbuf_direct,
58 _bus_dmamap_load_uio_direct, 59 _bus_dmamap_load_uio_direct,
59 _bus_dmamap_load_raw_direct, 60 _bus_dmamap_load_raw_direct,
60 _bus_dmamap_unload, 61 _bus_dmamap_unload,
61 _bus_dmamap_sync, 62 _bus_dmamap_sync,
62 _bus_dmamem_alloc, 63 _bus_dmamem_alloc,
63 _bus_dmamem_free, 64 _bus_dmamem_free,
64 _bus_dmamem_map, 65 _bus_dmamem_map,
65 _bus_dmamem_unmap, 66 _bus_dmamem_unmap,
66 _bus_dmamem_mmap, 67 _bus_dmamem_mmap,
67}; 68};
68 69
69void 70void
70tc_dma_init(void) 71tc_dma_init(void)
71{ 72{
72 /* Nada. */ 73 /* Nada. */
73} 74}

cvs diff -r1.23 -r1.24 src/sys/arch/alpha/tc/tc_dma_3000_500.c (switch to unified diff)

--- src/sys/arch/alpha/tc/tc_dma_3000_500.c 2020/11/18 02:04:30 1.23
+++ src/sys/arch/alpha/tc/tc_dma_3000_500.c 2021/07/18 05:12:27 1.24
@@ -1,210 +1,211 @@ @@ -1,210 +1,211 @@
1/* $NetBSD: tc_dma_3000_500.c,v 1.23 2020/11/18 02:04:30 thorpej Exp $ */ 1/* $NetBSD: tc_dma_3000_500.c,v 1.24 2021/07/18 05:12:27 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 33#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
34 34
35__KERNEL_RCSID(0, "$NetBSD: tc_dma_3000_500.c,v 1.23 2020/11/18 02:04:30 thorpej Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: tc_dma_3000_500.c,v 1.24 2021/07/18 05:12:27 thorpej Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/device.h> 39#include <sys/device.h>
40#include <sys/kernel.h> 40#include <sys/kernel.h>
41#include <sys/kmem.h> 41#include <sys/kmem.h>
42 42
43#define _ALPHA_BUS_DMA_PRIVATE 43#define _ALPHA_BUS_DMA_PRIVATE
44#include <sys/bus.h> 44#include <sys/bus.h>
45 45
46#include <dev/tc/tcvar.h> 46#include <dev/tc/tcvar.h>
47#include <alpha/tc/tc_sgmap.h> 47#include <alpha/tc/tc_sgmap.h>
48#include <alpha/tc/tc_dma_3000_500.h> 48#include <alpha/tc/tc_dma_3000_500.h>
49 49
50struct alpha_bus_dma_tag tc_dmat_sgmap = { 50struct alpha_bus_dma_tag tc_dmat_sgmap = {
51 NULL, /* _cookie */ 51 NULL, /* _cookie */
52 0, /* _wbase */ 52 0, /* _wbase */
53 0, /* _wsize */ 53 0, /* _wsize */
54 NULL, /* _next_window */ 54 NULL, /* _next_window */
55 0, /* _boundary */ 55 0, /* _boundary */
56 NULL, /* _sgmap */ 56 NULL, /* _sgmap */
 57 PAGE_SIZE, /* _sgmap_minalign */
57 0, /* _pfthresh */ 58 0, /* _pfthresh */
58 NULL, /* _get_tag */ 59 NULL, /* _get_tag */
59 tc_bus_dmamap_create_sgmap, 60 tc_bus_dmamap_create_sgmap,
60 tc_bus_dmamap_destroy_sgmap, 61 tc_bus_dmamap_destroy_sgmap,
61 tc_bus_dmamap_load_sgmap, 62 tc_bus_dmamap_load_sgmap,
62 tc_bus_dmamap_load_mbuf_sgmap, 63 tc_bus_dmamap_load_mbuf_sgmap,
63 tc_bus_dmamap_load_uio_sgmap, 64 tc_bus_dmamap_load_uio_sgmap,
64 tc_bus_dmamap_load_raw_sgmap, 65 tc_bus_dmamap_load_raw_sgmap,
65 tc_bus_dmamap_unload_sgmap, 66 tc_bus_dmamap_unload_sgmap,
66 _bus_dmamap_sync, 67 _bus_dmamap_sync,
67 _bus_dmamem_alloc, 68 _bus_dmamem_alloc,
68 _bus_dmamem_free, 69 _bus_dmamem_free,
69 _bus_dmamem_map, 70 _bus_dmamem_map,
70 _bus_dmamem_unmap, 71 _bus_dmamem_unmap,
71 _bus_dmamem_mmap, 72 _bus_dmamem_mmap,
72}; 73};
73 74
74struct tc_dma_slot_info { 75struct tc_dma_slot_info {
75 struct alpha_sgmap tdsi_sgmap; /* sgmap for slot */ 76 struct alpha_sgmap tdsi_sgmap; /* sgmap for slot */
76 struct alpha_bus_dma_tag tdsi_dmat; /* dma tag for slot */ 77 struct alpha_bus_dma_tag tdsi_dmat; /* dma tag for slot */
77}; 78};
78struct tc_dma_slot_info *tc_dma_slot_info; 79struct tc_dma_slot_info *tc_dma_slot_info;
79 80
80void 81void
81tc_dma_init_3000_500(int nslots) 82tc_dma_init_3000_500(int nslots)
82{ 83{
83 extern struct alpha_bus_dma_tag tc_dmat_direct; 84 extern struct alpha_bus_dma_tag tc_dmat_direct;
84 size_t sisize; 85 size_t sisize;
85 int i; 86 int i;
86 87
87 /* Allocate per-slot DMA info. */ 88 /* Allocate per-slot DMA info. */
88 sisize = nslots * sizeof(struct tc_dma_slot_info); 89 sisize = nslots * sizeof(struct tc_dma_slot_info);
89 tc_dma_slot_info = kmem_zalloc(sisize, KM_SLEEP); 90 tc_dma_slot_info = kmem_zalloc(sisize, KM_SLEEP);
90 91
91 /* Default all slots to direct-mapped. */ 92 /* Default all slots to direct-mapped. */
92 for (i = 0; i < nslots; i++) 93 for (i = 0; i < nslots; i++)
93 memcpy(&tc_dma_slot_info[i].tdsi_dmat, &tc_dmat_direct, 94 memcpy(&tc_dma_slot_info[i].tdsi_dmat, &tc_dmat_direct,
94 sizeof(tc_dma_slot_info[i].tdsi_dmat)); 95 sizeof(tc_dma_slot_info[i].tdsi_dmat));
95} 96}
96 97
97/* 98/*
98 * Return the DMA tag for the given slot. 99 * Return the DMA tag for the given slot.
99 */ 100 */
100bus_dma_tag_t 101bus_dma_tag_t
101tc_dma_get_tag_3000_500(int slot) 102tc_dma_get_tag_3000_500(int slot)
102{ 103{
103 104
104 return (&tc_dma_slot_info[slot].tdsi_dmat); 105 return (&tc_dma_slot_info[slot].tdsi_dmat);
105} 106}
106 107
107/* 108/*
108 * Create a TURBOchannel SGMAP-mapped DMA map. 109 * Create a TURBOchannel SGMAP-mapped DMA map.
109 */ 110 */
110int 111int
111tc_bus_dmamap_create_sgmap( 112tc_bus_dmamap_create_sgmap(
112 bus_dma_tag_t t, 113 bus_dma_tag_t t,
113 bus_size_t size, 114 bus_size_t size,
114 int nsegments, 115 int nsegments,
115 bus_size_t maxsegsz, 116 bus_size_t maxsegsz,
116 bus_size_t boundary, 117 bus_size_t boundary,
117 int flags, 118 int flags,
118 bus_dmamap_t *dmamp) 119 bus_dmamap_t *dmamp)
119{ 120{
120 int error; 121 int error;
121 122
122 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, 123 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
123 boundary, flags, dmamp); 124 boundary, flags, dmamp);
124 if (error) 125 if (error)
125 return (error); 126 return (error);
126 127
127 (void)*dmamp; 128 (void)*dmamp;
128 129
129 /* XXX BUS_DMA_ALLOCNOW */ 130 /* XXX BUS_DMA_ALLOCNOW */
130 131
131 return (error); 132 return (error);
132} 133}
133 134
134/* 135/*
135 * Destroy a TURBOchannel SGMAP-mapped DMA map. 136 * Destroy a TURBOchannel SGMAP-mapped DMA map.
136 */ 137 */
137void 138void
138tc_bus_dmamap_destroy_sgmap(bus_dma_tag_t t, bus_dmamap_t map) 139tc_bus_dmamap_destroy_sgmap(bus_dma_tag_t t, bus_dmamap_t map)
139{ 140{
140 141
141 KASSERT(map->dm_mapsize == 0); 142 KASSERT(map->dm_mapsize == 0);
142 143
143 _bus_dmamap_destroy(t, map); 144 _bus_dmamap_destroy(t, map);
144} 145}
145 146
146/* 147/*
147 * Load a TURBOchannel SGMAP-mapped DMA map with a linear buffer. 148 * Load a TURBOchannel SGMAP-mapped DMA map with a linear buffer.
148 */ 149 */
149int 150int
150tc_bus_dmamap_load_sgmap(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags) 151tc_bus_dmamap_load_sgmap(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags)
151{ 152{
152 struct tc_dma_slot_info *tdsi = t->_cookie; 153 struct tc_dma_slot_info *tdsi = t->_cookie;
153 154
154 return (tc_sgmap_load(t, map, buf, buflen, p, flags, 155 return (tc_sgmap_load(t, map, buf, buflen, p, flags,
155 &tdsi->tdsi_sgmap)); 156 &tdsi->tdsi_sgmap));
156} 157}
157 158
158/* 159/*
159 * Load a TURBOchannel SGMAP-mapped DMA map with an mbuf chain. 160 * Load a TURBOchannel SGMAP-mapped DMA map with an mbuf chain.
160 */ 161 */
161int 162int
162tc_bus_dmamap_load_mbuf_sgmap(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m, int flags) 163tc_bus_dmamap_load_mbuf_sgmap(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m, int flags)
163{ 164{
164 struct tc_dma_slot_info *tdsi = t->_cookie; 165 struct tc_dma_slot_info *tdsi = t->_cookie;
165 166
166 return (tc_sgmap_load_mbuf(t, map, m, flags, &tdsi->tdsi_sgmap)); 167 return (tc_sgmap_load_mbuf(t, map, m, flags, &tdsi->tdsi_sgmap));
167} 168}
168 169
169/* 170/*
170 * Load a TURBOchannel SGMAP-mapped DMA map with a uio. 171 * Load a TURBOchannel SGMAP-mapped DMA map with a uio.
171 */ 172 */
172int 173int
173tc_bus_dmamap_load_uio_sgmap(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags) 174tc_bus_dmamap_load_uio_sgmap(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
174{ 175{
175 struct tc_dma_slot_info *tdsi = t->_cookie; 176 struct tc_dma_slot_info *tdsi = t->_cookie;
176 177
177 return (tc_sgmap_load_uio(t, map, uio, flags, &tdsi->tdsi_sgmap)); 178 return (tc_sgmap_load_uio(t, map, uio, flags, &tdsi->tdsi_sgmap));
178} 179}
179 180
180/* 181/*
181 * Load a TURBOchannel SGMAP-mapped DMA map with raw memory. 182 * Load a TURBOchannel SGMAP-mapped DMA map with raw memory.
182 */ 183 */
183int 184int
184tc_bus_dmamap_load_raw_sgmap(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 185tc_bus_dmamap_load_raw_sgmap(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
185{ 186{
186 struct tc_dma_slot_info *tdsi = t->_cookie; 187 struct tc_dma_slot_info *tdsi = t->_cookie;
187 188
188 return (tc_sgmap_load_raw(t, map, segs, nsegs, size, flags, 189 return (tc_sgmap_load_raw(t, map, segs, nsegs, size, flags,
189 &tdsi->tdsi_sgmap)); 190 &tdsi->tdsi_sgmap));
190} 191}
191 192
192/* 193/*
193 * Unload a TURBOchannel SGMAP-mapped DMA map. 194 * Unload a TURBOchannel SGMAP-mapped DMA map.
194 */ 195 */
195void 196void
196tc_bus_dmamap_unload_sgmap(bus_dma_tag_t t, bus_dmamap_t map) 197tc_bus_dmamap_unload_sgmap(bus_dma_tag_t t, bus_dmamap_t map)
197{ 198{
198 struct tc_dma_slot_info *tdsi = t->_cookie; 199 struct tc_dma_slot_info *tdsi = t->_cookie;
199 200
200 /* 201 /*
201 * Invalidate any SGMAP page table entries used by this 202 * Invalidate any SGMAP page table entries used by this
202 * mapping. 203 * mapping.
203 */ 204 */
204 tc_sgmap_unload(t, map, &tdsi->tdsi_sgmap); 205 tc_sgmap_unload(t, map, &tdsi->tdsi_sgmap);
205 206
206 /* 207 /*
207 * Do the generic bits of the unload. 208 * Do the generic bits of the unload.
208 */ 209 */
209 _bus_dmamap_unload_common(t, map); 210 _bus_dmamap_unload_common(t, map);
210} 211}