| @@ -1,883 +1,1040 @@ | | | @@ -1,883 +1,1040 @@ |
1 | /* $NetBSD: bus.c,v 1.59 2018/01/20 17:37:15 tsutsui Exp $ */ | | 1 | /* $NetBSD: bus.c,v 1.60 2018/03/10 03:44:43 tsutsui Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1998 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1998 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center and by Chris G. Demetriou. | | 9 | * NASA Ames Research Center and by Chris G. Demetriou. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. | | 15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright | | 16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the | | 17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. | | 18 | * documentation and/or other materials provided with the distribution. |
19 | * | | 19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. | | 30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | #include "opt_m68k_arch.h" | | 33 | #include "opt_m68k_arch.h" |
34 | | | 34 | |
35 | #include <sys/cdefs.h> | | 35 | #include <sys/cdefs.h> |
36 | __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.59 2018/01/20 17:37:15 tsutsui Exp $"); | | 36 | __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.60 2018/03/10 03:44:43 tsutsui Exp $"); |
37 | | | 37 | |
38 | #include <sys/param.h> | | 38 | #include <sys/param.h> |
39 | #include <sys/systm.h> | | 39 | #include <sys/systm.h> |
40 | #include <sys/extent.h> | | 40 | #include <sys/extent.h> |
41 | #include <sys/malloc.h> | | 41 | #include <sys/malloc.h> |
42 | #include <sys/mbuf.h> | | 42 | #include <sys/mbuf.h> |
43 | #include <sys/proc.h> | | 43 | #include <sys/proc.h> |
44 | | | 44 | |
45 | #include <uvm/uvm.h> | | 45 | #include <uvm/uvm.h> |
46 | | | 46 | |
47 | #include <machine/cpu.h> | | 47 | #include <machine/cpu.h> |
48 | #include <m68k/cacheops.h> | | 48 | #include <m68k/cacheops.h> |
49 | #define _ATARI_BUS_DMA_PRIVATE | | 49 | #define _ATARI_BUS_DMA_PRIVATE |
50 | #include <sys/bus.h> | | 50 | #include <sys/bus.h> |
51 | | | 51 | |
52 | int bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size, | | 52 | int bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size, |
53 | bus_size_t alignment, bus_size_t boundary, | | 53 | bus_size_t alignment, bus_size_t boundary, |
54 | bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags, | | 54 | bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags, |
55 | paddr_t low, paddr_t high); | | 55 | paddr_t low, paddr_t high); |
56 | static int _bus_dmamap_load_buffer(bus_dma_tag_t tag, bus_dmamap_t, | | 56 | static int _bus_dmamap_load_buffer(bus_dma_tag_t tag, bus_dmamap_t, |
57 | void *, bus_size_t, struct vmspace *, int, paddr_t *, | | 57 | void *, bus_size_t, struct vmspace *, int, paddr_t *, |
58 | int *, int); | | 58 | int *, int); |
59 | static int bus_mem_add_mapping(bus_space_tag_t t, bus_addr_t bpa, | | 59 | static int bus_mem_add_mapping(bus_space_tag_t t, bus_addr_t bpa, |
60 | bus_size_t size, int flags, bus_space_handle_t *bsph); | | 60 | bus_size_t size, int flags, bus_space_handle_t *bsph); |
61 | | | 61 | |
62 | extern struct extent *iomem_ex; | | 62 | extern struct extent *iomem_ex; |
63 | extern int iomem_malloc_safe; | | 63 | extern int iomem_malloc_safe; |
64 | | | 64 | |
65 | extern paddr_t avail_end; | | 65 | extern paddr_t avail_end; |
66 | | | 66 | |
67 | /* | | 67 | /* |
68 | * We need these for the early memory allocator. The idea is this: | | 68 | * We need these for the early memory allocator. The idea is this: |
69 | * Allocate VA-space through ptextra (atari_init.c:startc()). When | | 69 | * Allocate VA-space through ptextra (atari_init.c:startc()). When |
70 | * The VA & size of this space are known, call bootm_init(). | | 70 | * The VA & size of this space are known, call bootm_init(). |
71 | * Until the VM-system is up, bus_mem_add_mapping() allocates its virtual | | 71 | * Until the VM-system is up, bus_mem_add_mapping() allocates its virtual |
72 | * addresses from this extent-map. | | 72 | * addresses from this extent-map. |
73 | * | | 73 | * |
74 | * This allows for the console code to use the bus_space interface at a | | 74 | * This allows for the console code to use the bus_space interface at a |
75 | * very early stage of the system configuration. | | 75 | * very early stage of the system configuration. |
76 | */ | | 76 | */ |
77 | static pt_entry_t *bootm_ptep; | | 77 | static pt_entry_t *bootm_ptep; |
78 | static long bootm_ex_storage[EXTENT_FIXED_STORAGE_SIZE(32) / | | 78 | static long bootm_ex_storage[EXTENT_FIXED_STORAGE_SIZE(32) / |
79 | sizeof(long)]; | | 79 | sizeof(long)]; |
80 | static struct extent *bootm_ex; | | 80 | static struct extent *bootm_ex; |
81 | | | 81 | |
82 | void bootm_init(vaddr_t, pt_entry_t *, u_long); | | 82 | void bootm_init(vaddr_t, pt_entry_t *, u_long); |
83 | static vaddr_t bootm_alloc(paddr_t pa, u_long size, int flags); | | 83 | static vaddr_t bootm_alloc(paddr_t pa, u_long size, int flags); |
84 | static int bootm_free(vaddr_t va, u_long size); | | 84 | static int bootm_free(vaddr_t va, u_long size); |
85 | | | 85 | |
86 | void | | 86 | void |
87 | bootm_init(vaddr_t va, pt_entry_t *ptep, u_long size) | | 87 | bootm_init(vaddr_t va, pt_entry_t *ptep, u_long size) |
88 | { | | 88 | { |
89 | bootm_ex = extent_create("bootmem", va, va + size, | | 89 | bootm_ex = extent_create("bootmem", va, va + size, |
90 | (void *)bootm_ex_storage, sizeof(bootm_ex_storage), | | 90 | (void *)bootm_ex_storage, sizeof(bootm_ex_storage), |
91 | EX_NOCOALESCE|EX_NOWAIT); | | 91 | EX_NOCOALESCE|EX_NOWAIT); |
92 | bootm_ptep = ptep; | | 92 | bootm_ptep = ptep; |
93 | } | | 93 | } |
94 | | | 94 | |
95 | vaddr_t | | 95 | vaddr_t |
96 | bootm_alloc(paddr_t pa, u_long size, int flags) | | 96 | bootm_alloc(paddr_t pa, u_long size, int flags) |
97 | { | | 97 | { |
98 | pt_entry_t *pg, *epg; | | 98 | pt_entry_t *pg, *epg; |
99 | pt_entry_t pg_proto; | | 99 | pt_entry_t pg_proto; |
100 | vaddr_t va, rva; | | 100 | vaddr_t va, rva; |
101 | | | 101 | |
102 | if (extent_alloc(bootm_ex, size, PAGE_SIZE, 0, EX_NOWAIT, &rva)) { | | 102 | if (extent_alloc(bootm_ex, size, PAGE_SIZE, 0, EX_NOWAIT, &rva)) { |
103 | printf("bootm_alloc fails! Not enough fixed extents?\n"); | | 103 | printf("bootm_alloc fails! Not enough fixed extents?\n"); |
104 | printf("Requested extent: pa=%lx, size=%lx\n", | | 104 | printf("Requested extent: pa=%lx, size=%lx\n", |
105 | (u_long)pa, size); | | 105 | (u_long)pa, size); |
106 | return 0; | | 106 | return 0; |
107 | } | | 107 | } |
108 | | | 108 | |
109 | pg = &bootm_ptep[btoc(rva - bootm_ex->ex_start)]; | | 109 | pg = &bootm_ptep[btoc(rva - bootm_ex->ex_start)]; |
110 | epg = &pg[btoc(size)]; | | 110 | epg = &pg[btoc(size)]; |
111 | va = rva; | | 111 | va = rva; |
112 | pg_proto = pa | PG_RW | PG_V; | | 112 | pg_proto = pa | PG_RW | PG_V; |
113 | if (!(flags & BUS_SPACE_MAP_CACHEABLE)) | | 113 | if (!(flags & BUS_SPACE_MAP_CACHEABLE)) |
114 | pg_proto |= PG_CI; | | 114 | pg_proto |= PG_CI; |
115 | while (pg < epg) { | | 115 | while (pg < epg) { |
116 | *pg++ = pg_proto; | | 116 | *pg++ = pg_proto; |
117 | pg_proto += PAGE_SIZE; | | 117 | pg_proto += PAGE_SIZE; |
118 | #if defined(M68040) || defined(M68060) | | 118 | #if defined(M68040) || defined(M68060) |
119 | if (mmutype == MMU_68040) { | | 119 | if (mmutype == MMU_68040) { |
120 | DCFP(pa); | | 120 | DCFP(pa); |
121 | pa += PAGE_SIZE; | | 121 | pa += PAGE_SIZE; |
122 | } | | 122 | } |
123 | #endif | | 123 | #endif |
124 | TBIS(va); | | 124 | TBIS(va); |
125 | va += PAGE_SIZE; | | 125 | va += PAGE_SIZE; |
126 | } | | 126 | } |
127 | return rva; | | 127 | return rva; |
128 | } | | 128 | } |
129 | | | 129 | |
130 | int | | 130 | int |
131 | bootm_free(vaddr_t va, u_long size) | | 131 | bootm_free(vaddr_t va, u_long size) |
132 | { | | 132 | { |
133 | | | 133 | |
134 | if ((va < bootm_ex->ex_start) || ((va + size) > bootm_ex->ex_end)) | | 134 | if ((va < bootm_ex->ex_start) || ((va + size) > bootm_ex->ex_end)) |
135 | return 0; /* Not for us! */ | | 135 | return 0; /* Not for us! */ |
136 | extent_free(bootm_ex, va, size, EX_NOWAIT); | | 136 | extent_free(bootm_ex, va, size, EX_NOWAIT); |
137 | return 1; | | 137 | return 1; |
138 | } | | 138 | } |
139 | | | 139 | |
140 | int | | 140 | int |
141 | bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags, | | 141 | bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags, |
142 | bus_space_handle_t *mhp) | | 142 | bus_space_handle_t *mhp) |
143 | { | | 143 | { |
144 | int error; | | 144 | int error; |
145 | | | 145 | |
146 | /* | | 146 | /* |
147 | * Before we go any further, let's make sure that this | | 147 | * Before we go any further, let's make sure that this |
148 | * region is available. | | 148 | * region is available. |
149 | */ | | 149 | */ |
150 | error = extent_alloc_region(iomem_ex, bpa + t->base, size, | | 150 | error = extent_alloc_region(iomem_ex, bpa + t->base, size, |
151 | EX_NOWAIT | (iomem_malloc_safe ? EX_MALLOCOK : 0)); | | 151 | EX_NOWAIT | (iomem_malloc_safe ? EX_MALLOCOK : 0)); |
152 | | | 152 | |
153 | if (error) | | 153 | if (error) |
154 | return error; | | 154 | return error; |
155 | | | 155 | |
156 | error = bus_mem_add_mapping(t, bpa, size, flags, mhp); | | 156 | error = bus_mem_add_mapping(t, bpa, size, flags, mhp); |
157 | if (error) { | | 157 | if (error) { |
158 | if (extent_free(iomem_ex, bpa + t->base, size, EX_NOWAIT | | | 158 | if (extent_free(iomem_ex, bpa + t->base, size, EX_NOWAIT | |
159 | (iomem_malloc_safe ? EX_MALLOCOK : 0))) { | | 159 | (iomem_malloc_safe ? EX_MALLOCOK : 0))) { |
160 | printf("bus_space_map: pa 0x%lx, size 0x%lx\n", | | 160 | printf("bus_space_map: pa 0x%lx, size 0x%lx\n", |
161 | bpa, size); | | 161 | bpa, size); |
162 | printf("bus_space_map: can't free region\n"); | | 162 | printf("bus_space_map: can't free region\n"); |
163 | } | | 163 | } |
164 | } | | 164 | } |
165 | return error; | | 165 | return error; |
166 | } | | 166 | } |
167 | | | 167 | |
168 | int | | 168 | int |
169 | bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend, | | 169 | bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend, |
170 | bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags, | | 170 | bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags, |
171 | bus_addr_t *bpap, bus_space_handle_t *bshp) | | 171 | bus_addr_t *bpap, bus_space_handle_t *bshp) |
172 | { | | 172 | { |
173 | u_long bpa; | | 173 | u_long bpa; |
174 | int error; | | 174 | int error; |
175 | | | 175 | |
176 | #ifdef DIAGNOSTIC | | 176 | #ifdef DIAGNOSTIC |
177 | /* | | 177 | /* |
178 | * Sanity check the allocation against the extent's boundaries. | | 178 | * Sanity check the allocation against the extent's boundaries. |
179 | * XXX: Since we manage the whole of memory in a single map, | | 179 | * XXX: Since we manage the whole of memory in a single map, |
180 | * this is nonsense for now! Brace it DIAGNOSTIC.... | | 180 | * this is nonsense for now! Brace it DIAGNOSTIC.... |
181 | */ | | 181 | */ |
182 | if ((rstart + t->base) < iomem_ex->ex_start || | | 182 | if ((rstart + t->base) < iomem_ex->ex_start || |
183 | (rend + t->base) > iomem_ex->ex_end) | | 183 | (rend + t->base) > iomem_ex->ex_end) |
184 | panic("bus_space_alloc: bad region start/end"); | | 184 | panic("bus_space_alloc: bad region start/end"); |
185 | #endif /* DIAGNOSTIC */ | | 185 | #endif /* DIAGNOSTIC */ |
186 | | | 186 | |
187 | /* | | 187 | /* |
188 | * Do the requested allocation. | | 188 | * Do the requested allocation. |
189 | */ | | 189 | */ |
190 | error = extent_alloc_subregion(iomem_ex, rstart + t->base, | | 190 | error = extent_alloc_subregion(iomem_ex, rstart + t->base, |
191 | rend + t->base, size, alignment, boundary, | | 191 | rend + t->base, size, alignment, boundary, |
192 | EX_FAST | EX_NOWAIT | (iomem_malloc_safe ? EX_MALLOCOK : 0), | | 192 | EX_FAST | EX_NOWAIT | (iomem_malloc_safe ? EX_MALLOCOK : 0), |
193 | &bpa); | | 193 | &bpa); |
194 | | | 194 | |
195 | if (error) | | 195 | if (error) |
196 | return error; | | 196 | return error; |
197 | | | 197 | |
198 | /* | | 198 | /* |
199 | * Map the bus physical address to a kernel virtual address. | | 199 | * Map the bus physical address to a kernel virtual address. |
200 | */ | | 200 | */ |
201 | error = bus_mem_add_mapping(t, bpa, size, flags, bshp); | | 201 | error = bus_mem_add_mapping(t, bpa, size, flags, bshp); |
202 | if (error) { | | 202 | if (error) { |
203 | if (extent_free(iomem_ex, bpa, size, EX_NOWAIT | | | 203 | if (extent_free(iomem_ex, bpa, size, EX_NOWAIT | |
204 | (iomem_malloc_safe ? EX_MALLOCOK : 0))) { | | 204 | (iomem_malloc_safe ? EX_MALLOCOK : 0))) { |
205 | printf("bus_space_alloc: pa 0x%lx, size 0x%lx\n", | | 205 | printf("bus_space_alloc: pa 0x%lx, size 0x%lx\n", |
206 | bpa, size); | | 206 | bpa, size); |
207 | printf("bus_space_alloc: can't free region\n"); | | 207 | printf("bus_space_alloc: can't free region\n"); |
208 | } | | 208 | } |
209 | } | | 209 | } |
210 | | | 210 | |
211 | *bpap = bpa; | | 211 | *bpap = bpa; |
212 | | | 212 | |
213 | return error; | | 213 | return error; |
214 | } | | 214 | } |
215 | | | 215 | |
216 | static int | | 216 | static int |
217 | bus_mem_add_mapping(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, | | 217 | bus_mem_add_mapping(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, |
218 | int flags, bus_space_handle_t *bshp) | | 218 | int flags, bus_space_handle_t *bshp) |
219 | { | | 219 | { |
220 | vaddr_t va; | | 220 | vaddr_t va; |
221 | paddr_t pa, endpa; | | 221 | paddr_t pa, endpa; |
222 | | | 222 | |
223 | pa = m68k_trunc_page(bpa + t->base); | | 223 | pa = m68k_trunc_page(bpa + t->base); |
224 | endpa = m68k_round_page((bpa + t->base + size) - 1); | | 224 | endpa = m68k_round_page((bpa + t->base + size) - 1); |
225 | | | 225 | |
226 | #ifdef DIAGNOSTIC | | 226 | #ifdef DIAGNOSTIC |
227 | if (endpa <= pa) | | 227 | if (endpa <= pa) |
228 | panic("bus_mem_add_mapping: overflow"); | | 228 | panic("bus_mem_add_mapping: overflow"); |
229 | #endif | | 229 | #endif |
230 | | | 230 | |
231 | if (kernel_map == NULL) { | | 231 | if (kernel_map == NULL) { |
232 | /* | | 232 | /* |
233 | * The VM-system is not yet operational, allocate from | | 233 | * The VM-system is not yet operational, allocate from |
234 | * a special pool. | | 234 | * a special pool. |
235 | */ | | 235 | */ |
236 | va = bootm_alloc(pa, endpa - pa, flags); | | 236 | va = bootm_alloc(pa, endpa - pa, flags); |
237 | if (va == 0) | | 237 | if (va == 0) |
238 | return ENOMEM; | | 238 | return ENOMEM; |
239 | *bshp = va + (bpa & PGOFSET); | | 239 | *bshp = va + (bpa & PGOFSET); |
240 | return 0; | | 240 | return 0; |
241 | } | | 241 | } |
242 | | | 242 | |
243 | va = uvm_km_alloc(kernel_map, endpa - pa, 0, | | 243 | va = uvm_km_alloc(kernel_map, endpa - pa, 0, |
244 | UVM_KMF_VAONLY | UVM_KMF_NOWAIT); | | 244 | UVM_KMF_VAONLY | UVM_KMF_NOWAIT); |
245 | if (va == 0) | | 245 | if (va == 0) |
246 | return ENOMEM; | | 246 | return ENOMEM; |
247 | | | 247 | |
248 | *bshp = va + (bpa & PGOFSET); | | 248 | *bshp = va + (bpa & PGOFSET); |
249 | | | 249 | |
250 | for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { | | 250 | for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { |
251 | u_int *ptep, npte; | | 251 | u_int *ptep, npte; |
252 | | | 252 | |
253 | pmap_enter(pmap_kernel(), (vaddr_t)va, pa, | | 253 | pmap_enter(pmap_kernel(), (vaddr_t)va, pa, |
254 | VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE); | | 254 | VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE); |
255 | | | 255 | |
256 | ptep = kvtopte(va); | | 256 | ptep = kvtopte(va); |
257 | npte = *ptep & ~PG_CMASK; | | 257 | npte = *ptep & ~PG_CMASK; |
258 | | | 258 | |
259 | if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0) | | 259 | if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0) |
260 | npte |= PG_CI; | | 260 | npte |= PG_CI; |
261 | else if (mmutype == MMU_68040) | | 261 | else if (mmutype == MMU_68040) |
262 | npte |= PG_CCB; | | 262 | npte |= PG_CCB; |
263 | | | 263 | |
264 | *ptep = npte; | | 264 | *ptep = npte; |
265 | } | | 265 | } |
266 | pmap_update(pmap_kernel()); | | 266 | pmap_update(pmap_kernel()); |
267 | TBIAS(); | | 267 | TBIAS(); |
268 | return 0; | | 268 | return 0; |
269 | } | | 269 | } |
270 | | | 270 | |
271 | void | | 271 | void |
272 | bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size) | | 272 | bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size) |
273 | { | | 273 | { |
274 | vaddr_t va, endva; | | 274 | vaddr_t va, endva; |
275 | paddr_t bpa; | | 275 | paddr_t bpa; |
276 | | | 276 | |
277 | va = m68k_trunc_page(bsh); | | 277 | va = m68k_trunc_page(bsh); |
278 | endva = m68k_round_page(((char *)bsh + size) - 1); | | 278 | endva = m68k_round_page(((char *)bsh + size) - 1); |
279 | #ifdef DIAGNOSTIC | | 279 | #ifdef DIAGNOSTIC |
280 | if (endva < va) | | 280 | if (endva < va) |
281 | panic("unmap_iospace: overflow"); | | 281 | panic("unmap_iospace: overflow"); |
282 | #endif | | 282 | #endif |
283 | | | 283 | |
284 | (void)pmap_extract(pmap_kernel(), va, &bpa); | | 284 | (void)pmap_extract(pmap_kernel(), va, &bpa); |
285 | bpa += ((u_long)bsh & PGOFSET); | | 285 | bpa += ((u_long)bsh & PGOFSET); |
286 | | | 286 | |
287 | /* | | 287 | /* |
288 | * Free the kernel virtual mapping. | | 288 | * Free the kernel virtual mapping. |
289 | */ | | 289 | */ |
290 | if (!bootm_free(va, endva - va)) { | | 290 | if (!bootm_free(va, endva - va)) { |
291 | pmap_remove(pmap_kernel(), va, endva); | | 291 | pmap_remove(pmap_kernel(), va, endva); |
292 | pmap_update(pmap_kernel()); | | 292 | pmap_update(pmap_kernel()); |
293 | uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY); | | 293 | uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY); |
294 | } | | 294 | } |
295 | | | 295 | |
296 | /* | | 296 | /* |
297 | * Mark as free in the extent map. | | 297 | * Mark as free in the extent map. |
298 | */ | | 298 | */ |
299 | if (extent_free(iomem_ex, bpa, size, | | 299 | if (extent_free(iomem_ex, bpa, size, |
300 | EX_NOWAIT | (iomem_malloc_safe ? EX_MALLOCOK : 0))) { | | 300 | EX_NOWAIT | (iomem_malloc_safe ? EX_MALLOCOK : 0))) { |
301 | printf("bus_space_unmap: pa 0x%lx, size 0x%lx\n", bpa, size); | | 301 | printf("bus_space_unmap: pa 0x%lx, size 0x%lx\n", bpa, size); |
302 | printf("bus_space_unmap: can't free region\n"); | | 302 | printf("bus_space_unmap: can't free region\n"); |
303 | } | | 303 | } |
304 | } | | 304 | } |
305 | | | 305 | |
306 | /* | | 306 | /* |
307 | * Get a new handle for a subregion of an already-mapped area of bus space. | | 307 | * Get a new handle for a subregion of an already-mapped area of bus space. |
308 | */ | | 308 | */ |
309 | int | | 309 | int |
310 | bus_space_subregion(bus_space_tag_t t, bus_space_handle_t memh, | | 310 | bus_space_subregion(bus_space_tag_t t, bus_space_handle_t memh, |
311 | bus_size_t off, bus_size_t sz, bus_space_handle_t *mhp) | | 311 | bus_size_t off, bus_size_t sz, bus_space_handle_t *mhp) |
312 | { | | 312 | { |
313 | | | 313 | |
314 | *mhp = memh + off; | | 314 | *mhp = memh + off; |
315 | return 0; | | 315 | return 0; |
316 | } | | 316 | } |
317 | | | 317 | |
318 | paddr_t | | 318 | paddr_t |
319 | bus_space_mmap(bus_space_tag_t t, bus_addr_t addr, off_t off, int prot, | | 319 | bus_space_mmap(bus_space_tag_t t, bus_addr_t addr, off_t off, int prot, |
320 | int flags) | | 320 | int flags) |
321 | { | | 321 | { |
322 | | | 322 | |
323 | /* | | 323 | /* |
324 | * "addr" is the base address of the device we're mapping. | | 324 | * "addr" is the base address of the device we're mapping. |
325 | * "off" is the offset into that device. | | 325 | * "off" is the offset into that device. |
326 | * | | 326 | * |
327 | * Note we are called for each "page" in the device that | | 327 | * Note we are called for each "page" in the device that |
328 | * the upper layers want to map. | | 328 | * the upper layers want to map. |
329 | */ | | 329 | */ |
330 | return m68k_btop(addr + off); | | 330 | return m68k_btop(addr + off); |
331 | } | | 331 | } |
332 | | | 332 | |
333 | /* | | 333 | /* |
334 | * Common function for DMA map creation. May be called by bus-specific | | 334 | * Common function for DMA map creation. May be called by bus-specific |
335 | * DMA map creation functions. | | 335 | * DMA map creation functions. |
336 | */ | | 336 | */ |
337 | int | | 337 | int |
338 | _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, | | 338 | _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, |
339 | bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) | | 339 | bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) |
340 | { | | 340 | { |
341 | struct atari_bus_dmamap *map; | | 341 | struct atari_bus_dmamap *map; |
342 | void *mapstore; | | 342 | void *mapstore; |
343 | size_t mapsize; | | 343 | size_t mapsize; |
344 | | | 344 | |
345 | /* | | 345 | /* |
346 | * Allocate and initialize the DMA map. The end of the map | | 346 | * Allocate and initialize the DMA map. The end of the map |
347 | * is a variable-sized array of segments, so we allocate enough | | 347 | * is a variable-sized array of segments, so we allocate enough |
348 | * room for them in one shot. | | 348 | * room for them in one shot. |
349 | * | | 349 | * |
350 | * Note we don't preserve the WAITOK or NOWAIT flags. Preservation | | 350 | * Note we don't preserve the WAITOK or NOWAIT flags. Preservation |
351 | * of ALLOCNOW notifies others that we've reserved these resources, | | 351 | * of ALLOCNOW notifies others that we've reserved these resources, |
352 | * and they are not to be freed. | | 352 | * and they are not to be freed. |
353 | * | | 353 | * |
354 | * The bus_dmamap_t includes one bus_dma_segment_t, hence | | 354 | * The bus_dmamap_t includes one bus_dma_segment_t, hence |
355 | * the (nsegments - 1). | | 355 | * the (nsegments - 1). |
356 | */ | | 356 | */ |
357 | mapsize = sizeof(struct atari_bus_dmamap) + | | 357 | mapsize = sizeof(struct atari_bus_dmamap) + |
358 | (sizeof(bus_dma_segment_t) * (nsegments - 1)); | | 358 | (sizeof(bus_dma_segment_t) * (nsegments - 1)); |
359 | if ((mapstore = malloc(mapsize, M_DMAMAP, | | 359 | if ((mapstore = malloc(mapsize, M_DMAMAP, |
360 | (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) | | 360 | (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) |
361 | return ENOMEM; | | 361 | return ENOMEM; |
362 | | | 362 | |
363 | memset(mapstore, 0, mapsize); | | 363 | memset(mapstore, 0, mapsize); |
364 | map = (struct atari_bus_dmamap *)mapstore; | | 364 | map = (struct atari_bus_dmamap *)mapstore; |
365 | map->_dm_size = size; | | 365 | map->_dm_size = size; |
366 | map->_dm_segcnt = nsegments; | | 366 | map->_dm_segcnt = nsegments; |
367 | map->_dm_maxmaxsegsz = maxsegsz; | | 367 | map->_dm_maxmaxsegsz = maxsegsz; |
368 | map->_dm_boundary = boundary; | | 368 | map->_dm_boundary = boundary; |
369 | map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); | | 369 | map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); |
370 | map->dm_maxsegsz = maxsegsz; | | 370 | map->dm_maxsegsz = maxsegsz; |
371 | map->dm_mapsize = 0; /* no valid mappings */ | | 371 | map->dm_mapsize = 0; /* no valid mappings */ |
372 | map->dm_nsegs = 0; | | 372 | map->dm_nsegs = 0; |
373 | | | 373 | |
374 | *dmamp = map; | | 374 | *dmamp = map; |
375 | return 0; | | 375 | return 0; |
376 | } | | 376 | } |
377 | | | 377 | |
378 | /* | | 378 | /* |
379 | * Common function for DMA map destruction. May be called by bus-specific | | 379 | * Common function for DMA map destruction. May be called by bus-specific |
380 | * DMA map destruction functions. | | 380 | * DMA map destruction functions. |
381 | */ | | 381 | */ |
382 | void | | 382 | void |
383 | _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) | | 383 | _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) |
384 | { | | 384 | { |
385 | | | 385 | |
386 | free(map, M_DMAMAP); | | 386 | free(map, M_DMAMAP); |
387 | } | | 387 | } |
388 | | | 388 | |
389 | /* | | 389 | /* |
390 | * Common function for loading a DMA map with a linear buffer. May | | 390 | * Common function for loading a DMA map with a linear buffer. May |
391 | * be called by bus-specific DMA map load functions. | | 391 | * be called by bus-specific DMA map load functions. |
392 | */ | | 392 | */ |
393 | int | | 393 | int |
394 | _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, | | 394 | _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, |
395 | bus_size_t buflen, struct proc *p, int flags) | | 395 | bus_size_t buflen, struct proc *p, int flags) |
396 | { | | 396 | { |
397 | paddr_t lastaddr; | | 397 | paddr_t lastaddr; |
398 | int seg, error; | | 398 | int seg, error; |
399 | struct vmspace *vm; | | 399 | struct vmspace *vm; |
400 | | | 400 | |
401 | /* | | 401 | /* |
402 | * Make sure that on error condition we return "no valid mappings". | | 402 | * Make sure that on error condition we return "no valid mappings". |
403 | */ | | 403 | */ |
404 | map->dm_mapsize = 0; | | 404 | map->dm_mapsize = 0; |
405 | map->dm_nsegs = 0; | | 405 | map->dm_nsegs = 0; |
406 | KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); | | 406 | KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); |
407 | | | 407 | |
408 | if (buflen > map->_dm_size) | | 408 | if (buflen > map->_dm_size) |
409 | return EINVAL; | | 409 | return EINVAL; |
410 | | | 410 | |
411 | if (p != NULL) { | | 411 | if (p != NULL) { |
412 | vm = p->p_vmspace; | | 412 | vm = p->p_vmspace; |
413 | } else { | | 413 | } else { |
414 | vm = vmspace_kernel(); | | 414 | vm = vmspace_kernel(); |
415 | } | | 415 | } |
416 | | | 416 | |
417 | seg = 0; | | 417 | seg = 0; |
418 | error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, | | 418 | error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, |
419 | &lastaddr, &seg, 1); | | 419 | &lastaddr, &seg, 1); |
420 | if (error == 0) { | | 420 | if (error == 0) { |
421 | map->dm_mapsize = buflen; | | 421 | map->dm_mapsize = buflen; |
422 | map->dm_nsegs = seg + 1; | | 422 | map->dm_nsegs = seg + 1; |
423 | } | | 423 | } |
424 | return error; | | 424 | return error; |
425 | } | | 425 | } |
426 | | | 426 | |
427 | /* | | 427 | /* |
428 | * Like _bus_dmamap_load(), but for mbufs. | | 428 | * Like _bus_dmamap_load(), but for mbufs. |
429 | */ | | 429 | */ |
430 | int | | 430 | int |
431 | _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, | | 431 | _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, |
432 | int flags) | | 432 | int flags) |
433 | { | | 433 | { |
434 | paddr_t lastaddr; | | 434 | paddr_t lastaddr; |
435 | int seg, error, first; | | 435 | int seg, error, first; |
436 | struct mbuf *m; | | 436 | struct mbuf *m; |
437 | | | 437 | |
438 | /* | | 438 | /* |
439 | * Make sure that on error condition we return "no valid mappings." | | 439 | * Make sure that on error condition we return "no valid mappings." |
440 | */ | | 440 | */ |
441 | map->dm_mapsize = 0; | | 441 | map->dm_mapsize = 0; |
442 | map->dm_nsegs = 0; | | 442 | map->dm_nsegs = 0; |
443 | KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); | | 443 | KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); |
444 | | | 444 | |
445 | #ifdef DIAGNOSTIC | | 445 | #ifdef DIAGNOSTIC |
446 | if ((m0->m_flags & M_PKTHDR) == 0) | | 446 | if ((m0->m_flags & M_PKTHDR) == 0) |
447 | panic("_bus_dmamap_load_mbuf: no packet header"); | | 447 | panic("_bus_dmamap_load_mbuf: no packet header"); |
448 | #endif | | 448 | #endif |
449 | | | 449 | |
450 | if (m0->m_pkthdr.len > map->_dm_size) | | 450 | if (m0->m_pkthdr.len > map->_dm_size) |
451 | return EINVAL; | | 451 | return EINVAL; |
452 | | | 452 | |
453 | first = 1; | | 453 | first = 1; |
454 | seg = 0; | | 454 | seg = 0; |
455 | error = 0; | | 455 | error = 0; |
456 | for (m = m0; m != NULL && error == 0; m = m->m_next) { | | 456 | for (m = m0; m != NULL && error == 0; m = m->m_next) { |
457 | if (m->m_len == 0) | | 457 | if (m->m_len == 0) |
458 | continue; | | 458 | continue; |
459 | error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len, | | 459 | error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len, |
460 | vmspace_kernel(), flags, &lastaddr, &seg, first); | | 460 | vmspace_kernel(), flags, &lastaddr, &seg, first); |
461 | first = 0; | | 461 | first = 0; |
462 | } | | 462 | } |
463 | if (error == 0) { | | 463 | if (error == 0) { |
464 | map->dm_mapsize = m0->m_pkthdr.len; | | 464 | map->dm_mapsize = m0->m_pkthdr.len; |
465 | map->dm_nsegs = seg + 1; | | 465 | map->dm_nsegs = seg + 1; |
466 | } | | 466 | } |
467 | return error; | | 467 | return error; |
468 | } | | 468 | } |
469 | | | 469 | |
470 | /* | | 470 | /* |
471 | * Like _bus_dmamap_load(), but for uios. | | 471 | * Like _bus_dmamap_load(), but for uios. |
472 | */ | | 472 | */ |
473 | int | | 473 | int |
474 | _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, | | 474 | _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, |
475 | int flags) | | 475 | int flags) |
476 | { | | 476 | { |
477 | paddr_t lastaddr; | | 477 | paddr_t lastaddr; |
478 | int seg, i, error, first; | | 478 | int seg, i, error, first; |
479 | bus_size_t minlen, resid; | | 479 | bus_size_t minlen, resid; |
480 | struct iovec *iov; | | 480 | struct iovec *iov; |
481 | void *addr; | | 481 | void *addr; |
482 | | | 482 | |
483 | /* | | 483 | /* |
484 | * Make sure that on error condition we return "no valid mappings." | | 484 | * Make sure that on error condition we return "no valid mappings." |
485 | */ | | 485 | */ |
486 | map->dm_mapsize = 0; | | 486 | map->dm_mapsize = 0; |
487 | map->dm_nsegs = 0; | | 487 | map->dm_nsegs = 0; |
488 | KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); | | 488 | KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); |
489 | | | 489 | |
490 | resid = uio->uio_resid; | | 490 | resid = uio->uio_resid; |
491 | iov = uio->uio_iov; | | 491 | iov = uio->uio_iov; |
492 | | | 492 | |
493 | first = 1; | | 493 | first = 1; |
494 | seg = 0; | | 494 | seg = 0; |
495 | error = 0; | | 495 | error = 0; |
496 | for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { | | 496 | for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { |
497 | /* | | 497 | /* |
498 | * Now at the first iovec to load. Load each iovec | | 498 | * Now at the first iovec to load. Load each iovec |
499 | * until we have exhausted the residual count. | | 499 | * until we have exhausted the residual count. |
500 | */ | | 500 | */ |
501 | minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; | | 501 | minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; |
502 | addr = (void *)iov[i].iov_base; | | 502 | addr = (void *)iov[i].iov_base; |
503 | | | 503 | |
504 | error = _bus_dmamap_load_buffer(t, map, addr, minlen, | | 504 | error = _bus_dmamap_load_buffer(t, map, addr, minlen, |
505 | uio->uio_vmspace, flags, &lastaddr, &seg, first); | | 505 | uio->uio_vmspace, flags, &lastaddr, &seg, first); |
506 | first = 0; | | 506 | first = 0; |
507 | | | 507 | |
508 | resid -= minlen; | | 508 | resid -= minlen; |
509 | } | | 509 | } |
510 | if (error == 0) { | | 510 | if (error == 0) { |
511 | map->dm_mapsize = uio->uio_resid; | | 511 | map->dm_mapsize = uio->uio_resid; |
512 | map->dm_nsegs = seg + 1; | | 512 | map->dm_nsegs = seg + 1; |
513 | } | | 513 | } |
514 | return error; | | 514 | return error; |
515 | } | | 515 | } |
516 | | | 516 | |
517 | /* | | 517 | /* |
518 | * Like _bus_dmamap_load(), but for raw memory allocated with | | 518 | * Like _bus_dmamap_load(), but for raw memory allocated with |
519 | * bus_dmamem_alloc(). | | 519 | * bus_dmamem_alloc(). |
520 | */ | | 520 | */ |
521 | int | | 521 | int |
522 | _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, | | 522 | _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, |
523 | bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) | | 523 | bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) |
524 | { | | 524 | { |
525 | | | 525 | |
526 | panic("bus_dmamap_load_raw: not implemented"); | | 526 | panic("bus_dmamap_load_raw: not implemented"); |
527 | } | | 527 | } |
528 | | | 528 | |
529 | /* | | 529 | /* |
530 | * Common function for unloading a DMA map. May be called by | | 530 | * Common function for unloading a DMA map. May be called by |
531 | * bus-specific DMA map unload functions. | | 531 | * bus-specific DMA map unload functions. |
532 | */ | | 532 | */ |
533 | void | | 533 | void |
534 | _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) | | 534 | _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) |
535 | { | | 535 | { |
536 | | | 536 | |
537 | /* | | 537 | /* |
538 | * No resources to free; just mark the mappings as | | 538 | * No resources to free; just mark the mappings as |
539 | * invalid. | | 539 | * invalid. |
540 | */ | | 540 | */ |
541 | map->dm_maxsegsz = map->_dm_maxmaxsegsz; | | 541 | map->dm_maxsegsz = map->_dm_maxmaxsegsz; |
542 | map->dm_mapsize = 0; | | 542 | map->dm_mapsize = 0; |
543 | map->dm_nsegs = 0; | | 543 | map->dm_nsegs = 0; |
544 | } | | 544 | } |
545 | | | 545 | |
546 | /* | | 546 | /* |
547 | * Common function for DMA map synchronization. May be called | | 547 | * Common function for DMA map synchronization. May be called |
548 | * by bus-specific DMA map synchronization functions. | | 548 | * by bus-specific DMA map synchronization functions. |
549 | */ | | 549 | */ |
550 | void | | 550 | void |
551 | _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t off, | | 551 | _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, |
552 | bus_size_t len, int ops) | | 552 | bus_size_t len, int ops) |
553 | { | | 553 | { |
554 | #if defined(M68040) || defined(M68060) | | 554 | #if defined(M68040) || defined(M68060) |
555 | int i, pa_off, inc, seglen; | | 555 | bus_addr_t p, e, ps, pe; |
556 | u_long pa, end_pa; | | 556 | bus_size_t seglen; |
| | | 557 | bus_dma_segment_t *seg; |
| | | 558 | int i; |
| | | 559 | #endif |
557 | | | 560 | |
558 | pa_off = t->_displacement; | | 561 | #if defined(M68020) || defined(M68030) |
| | | 562 | #if defined(M68040) || defined(M68060) |
| | | 563 | if (cputype == CPU_68020 || cputype == CPU_68030) |
| | | 564 | #endif |
| | | 565 | /* assume no L2 physical cache */ |
| | | 566 | return; |
| | | 567 | #endif |
559 | | | 568 | |
560 | /* Flush granularity */ | | 569 | #if defined(M68040) || defined(M68060) |
561 | inc = (len > 1024) ? PAGE_SIZE : 16; | | 570 | /* If the whole DMA map is uncached, do nothing. */ |
| | | 571 | if ((map->_dm_flags & BUS_DMA_COHERENT) != 0) |
| | | 572 | return; |
| | | 573 | |
| | | 574 | /* Short-circuit for unsupported `ops' */ |
| | | 575 | if ((ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) == 0) |
| | | 576 | return; |
562 | | | 577 | |
563 | for (i = 0; i < map->dm_nsegs && len > 0; i++) { | | 578 | /* |
564 | if (map->dm_segs[i].ds_len <= off) { | | 579 | * flush/purge the cache. |
| | | 580 | */ |
| | | 581 | for (i = 0; i < map->dm_nsegs && len != 0; i++) { |
| | | 582 | seg = &map->dm_segs[i]; |
| | | 583 | if (seg->ds_len <= offset) { |
565 | /* Segment irrelevant - before requested offset */ | | 584 | /* Segment irrelevant - before requested offset */ |
566 | off -= map->dm_segs[i].ds_len; | | 585 | offset -= seg->ds_len; |
567 | continue; | | 586 | continue; |
568 | } | | 587 | } |
569 | seglen = map->dm_segs[i].ds_len - off; | | 588 | |
| | | 589 | /* |
| | | 590 | * Now at the first segment to sync; nail |
| | | 591 | * each segment until we have exhausted the |
| | | 592 | * length. |
| | | 593 | */ |
| | | 594 | seglen = seg->ds_len - offset; |
570 | if (seglen > len) | | 595 | if (seglen > len) |
571 | seglen = len; | | 596 | seglen = len; |
572 | len -= seglen; | | | |
573 | pa = map->dm_segs[i].ds_addr + off - pa_off; | | | |
574 | end_pa = pa + seglen; | | | |
575 | | | 597 | |
576 | if (inc == 16) { | | 598 | ps = seg->ds_addr + offset; |
577 | pa &= ~15; | | 599 | pe = ps + seglen; |
578 | while (pa < end_pa) { | | 600 | |
579 | DCFL(pa); | | 601 | if (ops & BUS_DMASYNC_PREWRITE) { |
580 | pa += 16; | | 602 | p = ps & ~CACHELINE_MASK; |
| | | 603 | e = (pe + CACHELINE_MASK) & ~CACHELINE_MASK; |
| | | 604 | |
| | | 605 | /* flush cacheline */ |
| | | 606 | while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) { |
| | | 607 | DCFL(p); |
| | | 608 | p += CACHELINE_SIZE; |
581 | } | | 609 | } |
582 | } else { | | 610 | |
583 | pa &= ~PGOFSET; | | 611 | /* flush cachelines per 128bytes */ |
584 | while (pa < end_pa) { | | 612 | while ((p < e) && (p & PAGE_MASK) != 0) { |
585 | DCFP(pa); | | 613 | DCFL(p); |
586 | pa += PAGE_SIZE; | | 614 | p += CACHELINE_SIZE; |
| | | 615 | DCFL(p); |
| | | 616 | p += CACHELINE_SIZE; |
| | | 617 | DCFL(p); |
| | | 618 | p += CACHELINE_SIZE; |
| | | 619 | DCFL(p); |
| | | 620 | p += CACHELINE_SIZE; |
| | | 621 | DCFL(p); |
| | | 622 | p += CACHELINE_SIZE; |
| | | 623 | DCFL(p); |
| | | 624 | p += CACHELINE_SIZE; |
| | | 625 | DCFL(p); |
| | | 626 | p += CACHELINE_SIZE; |
| | | 627 | DCFL(p); |
| | | 628 | p += CACHELINE_SIZE; |
| | | 629 | } |
| | | 630 | |
| | | 631 | /* flush page */ |
| | | 632 | while (p + PAGE_SIZE <= e) { |
| | | 633 | DCFP(p); |
| | | 634 | p += PAGE_SIZE; |
| | | 635 | } |
| | | 636 | |
| | | 637 | /* flush cachelines per 128bytes */ |
| | | 638 | while (p + CACHELINE_SIZE * 8 <= e) { |
| | | 639 | DCFL(p); |
| | | 640 | p += CACHELINE_SIZE; |
| | | 641 | DCFL(p); |
| | | 642 | p += CACHELINE_SIZE; |
| | | 643 | DCFL(p); |
| | | 644 | p += CACHELINE_SIZE; |
| | | 645 | DCFL(p); |
| | | 646 | p += CACHELINE_SIZE; |
| | | 647 | DCFL(p); |
| | | 648 | p += CACHELINE_SIZE; |
| | | 649 | DCFL(p); |
| | | 650 | p += CACHELINE_SIZE; |
| | | 651 | DCFL(p); |
| | | 652 | p += CACHELINE_SIZE; |
| | | 653 | DCFL(p); |
| | | 654 | p += CACHELINE_SIZE; |
| | | 655 | } |
| | | 656 | |
| | | 657 | /* flush cacheline */ |
| | | 658 | while (p < e) { |
| | | 659 | DCFL(p); |
| | | 660 | p += CACHELINE_SIZE; |
587 | } | | 661 | } |
588 | } | | 662 | } |
| | | 663 | |
| | | 664 | /* |
| | | 665 | * Normally, the `PREREAD' flag instructs us to purge the |
| | | 666 | * cache for the specified offset and length. However, if |
| | | 667 | * the offset/length is not aligned to a cacheline boundary, |
| | | 668 | * we may end up purging some legitimate data from the |
| | | 669 | * start/end of the cache. In such a case, *flush* the |
| | | 670 | * cachelines at the start and end of the required region. |
| | | 671 | */ |
| | | 672 | else if (ops & BUS_DMASYNC_PREREAD) { |
| | | 673 | /* flush cacheline on start boundary */ |
| | | 674 | if (ps & CACHELINE_MASK) { |
| | | 675 | DCFL(ps & ~CACHELINE_MASK); |
| | | 676 | } |
| | | 677 | |
| | | 678 | p = (ps + CACHELINE_MASK) & ~CACHELINE_MASK; |
| | | 679 | e = pe & ~CACHELINE_MASK; |
| | | 680 | |
| | | 681 | /* purge cacheline */ |
| | | 682 | while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) { |
| | | 683 | DCPL(p); |
| | | 684 | p += CACHELINE_SIZE; |
| | | 685 | } |
| | | 686 | |
| | | 687 | /* purge cachelines per 128bytes */ |
| | | 688 | while ((p < e) && (p & PAGE_MASK) != 0) { |
| | | 689 | DCPL(p); |
| | | 690 | p += CACHELINE_SIZE; |
| | | 691 | DCPL(p); |
| | | 692 | p += CACHELINE_SIZE; |
| | | 693 | DCPL(p); |
| | | 694 | p += CACHELINE_SIZE; |
| | | 695 | DCPL(p); |
| | | 696 | p += CACHELINE_SIZE; |
| | | 697 | DCPL(p); |
| | | 698 | p += CACHELINE_SIZE; |
| | | 699 | DCPL(p); |
| | | 700 | p += CACHELINE_SIZE; |
| | | 701 | DCPL(p); |
| | | 702 | p += CACHELINE_SIZE; |
| | | 703 | DCPL(p); |
| | | 704 | p += CACHELINE_SIZE; |
| | | 705 | } |
| | | 706 | |
| | | 707 | /* purge page */ |
| | | 708 | while (p + PAGE_SIZE <= e) { |
| | | 709 | DCPP(p); |
| | | 710 | p += PAGE_SIZE; |
| | | 711 | } |
| | | 712 | |
| | | 713 | /* purge cachelines per 128bytes */ |
| | | 714 | while (p + CACHELINE_SIZE * 8 <= e) { |
| | | 715 | DCPL(p); |
| | | 716 | p += CACHELINE_SIZE; |
| | | 717 | DCPL(p); |
| | | 718 | p += CACHELINE_SIZE; |
| | | 719 | DCPL(p); |
| | | 720 | p += CACHELINE_SIZE; |
| | | 721 | DCPL(p); |
| | | 722 | p += CACHELINE_SIZE; |
| | | 723 | DCPL(p); |
| | | 724 | p += CACHELINE_SIZE; |
| | | 725 | DCPL(p); |
| | | 726 | p += CACHELINE_SIZE; |
| | | 727 | DCPL(p); |
| | | 728 | p += CACHELINE_SIZE; |
| | | 729 | DCPL(p); |
| | | 730 | p += CACHELINE_SIZE; |
| | | 731 | } |
| | | 732 | |
| | | 733 | /* purge cacheline */ |
| | | 734 | while (p < e) { |
| | | 735 | DCPL(p); |
| | | 736 | p += CACHELINE_SIZE; |
| | | 737 | } |
| | | 738 | |
| | | 739 | /* flush cacheline on end boundary */ |
| | | 740 | if (p < pe) { |
| | | 741 | DCFL(p); |
| | | 742 | } |
| | | 743 | } |
| | | 744 | offset = 0; |
| | | 745 | len -= seglen; |
589 | } | | 746 | } |
590 | #endif | | 747 | #endif /* defined(M68040) || defined(M68060) */ |
591 | } | | 748 | } |
592 | | | 749 | |
593 | /* | | 750 | /* |
594 | * Common function for DMA-safe memory allocation. May be called | | 751 | * Common function for DMA-safe memory allocation. May be called |
595 | * by bus-specific DMA memory allocation functions. | | 752 | * by bus-specific DMA memory allocation functions. |
596 | */ | | 753 | */ |
597 | int | | 754 | int |
598 | bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, | | 755 | bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, |
599 | bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, | | 756 | bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, |
600 | int flags) | | 757 | int flags) |
601 | { | | 758 | { |
602 | | | 759 | |
603 | return bus_dmamem_alloc_range(t, size, alignment, boundary, | | 760 | return bus_dmamem_alloc_range(t, size, alignment, boundary, |
604 | segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)); | | 761 | segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)); |
605 | } | | 762 | } |
606 | | | 763 | |
607 | /* | | 764 | /* |
608 | * Common function for freeing DMA-safe memory. May be called by | | 765 | * Common function for freeing DMA-safe memory. May be called by |
609 | * bus-specific DMA memory free functions. | | 766 | * bus-specific DMA memory free functions. |
610 | */ | | 767 | */ |
611 | void | | 768 | void |
612 | bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) | | 769 | bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) |
613 | { | | 770 | { |
614 | struct vm_page *m; | | 771 | struct vm_page *m; |
615 | bus_addr_t addr, offset; | | 772 | bus_addr_t addr, offset; |
616 | struct pglist mlist; | | 773 | struct pglist mlist; |
617 | int curseg; | | 774 | int curseg; |
618 | | | 775 | |
619 | offset = t->_displacement; | | 776 | offset = t->_displacement; |
620 | | | 777 | |
621 | /* | | 778 | /* |
622 | * Build a list of pages to free back to the VM system. | | 779 | * Build a list of pages to free back to the VM system. |
623 | */ | | 780 | */ |
624 | TAILQ_INIT(&mlist); | | 781 | TAILQ_INIT(&mlist); |
625 | for (curseg = 0; curseg < nsegs; curseg++) { | | 782 | for (curseg = 0; curseg < nsegs; curseg++) { |
626 | for (addr = segs[curseg].ds_addr; | | 783 | for (addr = segs[curseg].ds_addr; |
627 | addr < (segs[curseg].ds_addr + segs[curseg].ds_len); | | 784 | addr < (segs[curseg].ds_addr + segs[curseg].ds_len); |
628 | addr += PAGE_SIZE) { | | 785 | addr += PAGE_SIZE) { |
629 | m = PHYS_TO_VM_PAGE(addr - offset); | | 786 | m = PHYS_TO_VM_PAGE(addr - offset); |
630 | TAILQ_INSERT_TAIL(&mlist, m, pageq.queue); | | 787 | TAILQ_INSERT_TAIL(&mlist, m, pageq.queue); |
631 | } | | 788 | } |
632 | } | | 789 | } |
633 | | | 790 | |
634 | uvm_pglistfree(&mlist); | | 791 | uvm_pglistfree(&mlist); |
635 | } | | 792 | } |
636 | | | 793 | |
637 | /* | | 794 | /* |
638 | * Common function for mapping DMA-safe memory. May be called by | | 795 | * Common function for mapping DMA-safe memory. May be called by |
639 | * bus-specific DMA memory map functions. | | 796 | * bus-specific DMA memory map functions. |
640 | */ | | 797 | */ |
641 | int | | 798 | int |
642 | bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, | | 799 | bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, |
643 | size_t size, void **kvap, int flags) | | 800 | size_t size, void **kvap, int flags) |
644 | { | | 801 | { |
645 | vaddr_t va; | | 802 | vaddr_t va; |
646 | bus_addr_t addr, offset; | | 803 | bus_addr_t addr, offset; |
647 | int curseg; | | 804 | int curseg; |
648 | const uvm_flag_t kmflags = | | 805 | const uvm_flag_t kmflags = |
649 | (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; | | 806 | (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; |
650 | | | 807 | |
651 | offset = t->_displacement; | | 808 | offset = t->_displacement; |
652 | | | 809 | |
653 | size = round_page(size); | | 810 | size = round_page(size); |
654 | | | 811 | |
655 | va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); | | 812 | va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); |
656 | | | 813 | |
657 | if (va == 0) | | 814 | if (va == 0) |
658 | return ENOMEM; | | 815 | return ENOMEM; |
659 | | | 816 | |
660 | *kvap = (void *)va; | | 817 | *kvap = (void *)va; |
661 | | | 818 | |
662 | for (curseg = 0; curseg < nsegs; curseg++) { | | 819 | for (curseg = 0; curseg < nsegs; curseg++) { |
663 | for (addr = segs[curseg].ds_addr; | | 820 | for (addr = segs[curseg].ds_addr; |
664 | addr < (segs[curseg].ds_addr + segs[curseg].ds_len); | | 821 | addr < (segs[curseg].ds_addr + segs[curseg].ds_len); |
665 | addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { | | 822 | addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { |
666 | if (size == 0) | | 823 | if (size == 0) |
667 | panic("_bus_dmamem_map: size botch"); | | 824 | panic("_bus_dmamem_map: size botch"); |
668 | pmap_enter(pmap_kernel(), va, addr - offset, | | 825 | pmap_enter(pmap_kernel(), va, addr - offset, |
669 | VM_PROT_READ | VM_PROT_WRITE, | | 826 | VM_PROT_READ | VM_PROT_WRITE, |
670 | VM_PROT_READ | VM_PROT_WRITE); | | 827 | VM_PROT_READ | VM_PROT_WRITE); |
671 | } | | 828 | } |
672 | } | | 829 | } |
673 | pmap_update(pmap_kernel()); | | 830 | pmap_update(pmap_kernel()); |
674 | | | 831 | |
675 | return 0; | | 832 | return 0; |
676 | } | | 833 | } |
677 | | | 834 | |
678 | /* | | 835 | /* |
679 | * Common function for unmapping DMA-safe memory. May be called by | | 836 | * Common function for unmapping DMA-safe memory. May be called by |
680 | * bus-specific DMA memory unmapping functions. | | 837 | * bus-specific DMA memory unmapping functions. |
681 | */ | | 838 | */ |
682 | void | | 839 | void |
683 | bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) | | 840 | bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) |
684 | { | | 841 | { |
685 | | | 842 | |
686 | #ifdef DIAGNOSTIC | | 843 | #ifdef DIAGNOSTIC |
687 | if ((u_long)kva & PGOFSET) | | 844 | if ((u_long)kva & PGOFSET) |
688 | panic("_bus_dmamem_unmap"); | | 845 | panic("_bus_dmamem_unmap"); |
689 | #endif | | 846 | #endif |
690 | | | 847 | |
691 | size = round_page(size); | | 848 | size = round_page(size); |
692 | | | 849 | |
693 | pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size); | | 850 | pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size); |
694 | pmap_update(pmap_kernel()); | | 851 | pmap_update(pmap_kernel()); |
695 | uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); | | 852 | uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); |
696 | } | | 853 | } |
697 | | | 854 | |
698 | /* | | 855 | /* |
699 | * Common functin for mmap(2)'ing DMA-safe memory. May be called by | | 856 | * Common functin for mmap(2)'ing DMA-safe memory. May be called by |
700 | * bus-specific DMA mmap(2)'ing functions. | | 857 | * bus-specific DMA mmap(2)'ing functions. |
701 | */ | | 858 | */ |
702 | paddr_t | | 859 | paddr_t |
703 | bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off, | | 860 | bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off, |
704 | int prot, int flags) | | 861 | int prot, int flags) |
705 | { | | 862 | { |
706 | int i, offset; | | 863 | int i, offset; |
707 | | | 864 | |
708 | offset = t->_displacement; | | 865 | offset = t->_displacement; |
709 | | | 866 | |
710 | for (i = 0; i < nsegs; i++) { | | 867 | for (i = 0; i < nsegs; i++) { |
711 | #ifdef DIAGNOSTIC | | 868 | #ifdef DIAGNOSTIC |
712 | if (off & PGOFSET) | | 869 | if (off & PGOFSET) |
713 | panic("_bus_dmamem_mmap: offset unaligned"); | | 870 | panic("_bus_dmamem_mmap: offset unaligned"); |
714 | if (segs[i].ds_addr & PGOFSET) | | 871 | if (segs[i].ds_addr & PGOFSET) |
715 | panic("_bus_dmamem_mmap: segment unaligned"); | | 872 | panic("_bus_dmamem_mmap: segment unaligned"); |
716 | if (segs[i].ds_len & PGOFSET) | | 873 | if (segs[i].ds_len & PGOFSET) |
717 | panic("_bus_dmamem_mmap: segment size not multiple" | | 874 | panic("_bus_dmamem_mmap: segment size not multiple" |
718 | " of page size"); | | 875 | " of page size"); |
719 | #endif | | 876 | #endif |
720 | if (off >= segs[i].ds_len) { | | 877 | if (off >= segs[i].ds_len) { |
721 | off -= segs[i].ds_len; | | 878 | off -= segs[i].ds_len; |
722 | continue; | | 879 | continue; |
723 | } | | 880 | } |
724 | | | 881 | |
725 | return (m68k_btop((char *)segs[i].ds_addr - offset + off)); | | 882 | return (m68k_btop((char *)segs[i].ds_addr - offset + off)); |
726 | } | | 883 | } |
727 | | | 884 | |
728 | /* Page not found. */ | | 885 | /* Page not found. */ |
729 | return -1; | | 886 | return -1; |
730 | } | | 887 | } |
731 | | | 888 | |
732 | /********************************************************************** | | 889 | /********************************************************************** |
733 | * DMA utility functions | | 890 | * DMA utility functions |
734 | **********************************************************************/ | | 891 | **********************************************************************/ |
735 | | | 892 | |
736 | /* | | 893 | /* |
737 | * Utility function to load a linear buffer. lastaddrp holds state | | 894 | * Utility function to load a linear buffer. lastaddrp holds state |
738 | * between invocations (for multiple-buffer loads). segp contains | | 895 | * between invocations (for multiple-buffer loads). segp contains |
739 | * the starting segment on entrace, and the ending segment on exit. | | 896 | * the starting segment on entrace, and the ending segment on exit. |
740 | * first indicates if this is the first invocation of this function. | | 897 | * first indicates if this is the first invocation of this function. |
741 | */ | | 898 | */ |
742 | static int | | 899 | static int |
743 | _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, | | 900 | _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, |
744 | bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, | | 901 | bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, |
745 | int *segp, int first) | | 902 | int *segp, int first) |
746 | { | | 903 | { |
747 | bus_size_t sgsize; | | 904 | bus_size_t sgsize; |
748 | bus_addr_t curaddr, lastaddr, offset, baddr, bmask; | | 905 | bus_addr_t curaddr, lastaddr, offset, baddr, bmask; |
749 | vaddr_t vaddr = (vaddr_t)buf; | | 906 | vaddr_t vaddr = (vaddr_t)buf; |
750 | int seg; | | 907 | int seg; |
751 | pmap_t pmap; | | 908 | pmap_t pmap; |
752 | | | 909 | |
753 | offset = t->_displacement; | | 910 | offset = t->_displacement; |
754 | | | 911 | |
755 | pmap = vm_map_pmap(&vm->vm_map); | | 912 | pmap = vm_map_pmap(&vm->vm_map); |
756 | | | 913 | |
757 | lastaddr = *lastaddrp; | | 914 | lastaddr = *lastaddrp; |
758 | bmask = ~(map->_dm_boundary - 1); | | 915 | bmask = ~(map->_dm_boundary - 1); |
759 | | | 916 | |
760 | for (seg = *segp; buflen > 0 ; ) { | | 917 | for (seg = *segp; buflen > 0 ; ) { |
761 | /* | | 918 | /* |
762 | * Get the physical address for this segment. | | 919 | * Get the physical address for this segment. |
763 | */ | | 920 | */ |
764 | (void) pmap_extract(pmap, vaddr, &curaddr); | | 921 | (void) pmap_extract(pmap, vaddr, &curaddr); |
765 | | | 922 | |
766 | /* | | 923 | /* |
767 | * Compute the segment size, and adjust counts. | | 924 | * Compute the segment size, and adjust counts. |
768 | */ | | 925 | */ |
769 | sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); | | 926 | sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); |
770 | if (buflen < sgsize) | | 927 | if (buflen < sgsize) |
771 | sgsize = buflen; | | 928 | sgsize = buflen; |
772 | | | 929 | |
773 | /* | | 930 | /* |
774 | * Make sure we don't cross any boundaries. | | 931 | * Make sure we don't cross any boundaries. |
775 | */ | | 932 | */ |
776 | if (map->_dm_boundary > 0) { | | 933 | if (map->_dm_boundary > 0) { |
777 | baddr = (curaddr + map->_dm_boundary) & bmask; | | 934 | baddr = (curaddr + map->_dm_boundary) & bmask; |
778 | if (sgsize > (baddr - curaddr)) | | 935 | if (sgsize > (baddr - curaddr)) |
779 | sgsize = (baddr - curaddr); | | 936 | sgsize = (baddr - curaddr); |
780 | } | | 937 | } |
781 | | | 938 | |
782 | /* | | 939 | /* |
783 | * Insert chunk into a segment, coalescing with | | 940 | * Insert chunk into a segment, coalescing with |
784 | * previous segment if possible. | | 941 | * previous segment if possible. |
785 | */ | | 942 | */ |
786 | if (first) { | | 943 | if (first) { |
787 | map->dm_segs[seg].ds_addr = curaddr + offset; | | 944 | map->dm_segs[seg].ds_addr = curaddr + offset; |
788 | map->dm_segs[seg].ds_len = sgsize; | | 945 | map->dm_segs[seg].ds_len = sgsize; |
789 | first = 0; | | 946 | first = 0; |
790 | } else { | | 947 | } else { |
791 | if (curaddr == lastaddr && | | 948 | if (curaddr == lastaddr && |
792 | (map->dm_segs[seg].ds_len + sgsize) <= | | 949 | (map->dm_segs[seg].ds_len + sgsize) <= |
793 | map->dm_maxsegsz && | | 950 | map->dm_maxsegsz && |
794 | (map->_dm_boundary == 0 || | | 951 | (map->_dm_boundary == 0 || |
795 | (map->dm_segs[seg].ds_addr & bmask) == | | 952 | (map->dm_segs[seg].ds_addr & bmask) == |
796 | (curaddr & bmask))) | | 953 | (curaddr & bmask))) |
797 | map->dm_segs[seg].ds_len += sgsize; | | 954 | map->dm_segs[seg].ds_len += sgsize; |
798 | else { | | 955 | else { |
799 | if (++seg >= map->_dm_segcnt) | | 956 | if (++seg >= map->_dm_segcnt) |
800 | break; | | 957 | break; |
801 | map->dm_segs[seg].ds_addr = curaddr + offset; | | 958 | map->dm_segs[seg].ds_addr = curaddr + offset; |
802 | map->dm_segs[seg].ds_len = sgsize; | | 959 | map->dm_segs[seg].ds_len = sgsize; |
803 | } | | 960 | } |
804 | } | | 961 | } |
805 | | | 962 | |
806 | lastaddr = curaddr + sgsize; | | 963 | lastaddr = curaddr + sgsize; |
807 | vaddr += sgsize; | | 964 | vaddr += sgsize; |
808 | buflen -= sgsize; | | 965 | buflen -= sgsize; |
809 | } | | 966 | } |
810 | | | 967 | |
811 | *segp = seg; | | 968 | *segp = seg; |
812 | *lastaddrp = lastaddr; | | 969 | *lastaddrp = lastaddr; |
813 | | | 970 | |
814 | /* | | 971 | /* |
815 | * Did we fit? | | 972 | * Did we fit? |
816 | */ | | 973 | */ |
817 | if (buflen != 0) | | 974 | if (buflen != 0) |
818 | return EFBIG; /* XXX better return value here? */ | | 975 | return EFBIG; /* XXX better return value here? */ |
819 | return 0; | | 976 | return 0; |
820 | } | | 977 | } |
821 | | | 978 | |
822 | /* | | 979 | /* |
823 | * Allocate physical memory from the given physical address range. | | 980 | * Allocate physical memory from the given physical address range. |
824 | * Called by DMA-safe memory allocation methods. | | 981 | * Called by DMA-safe memory allocation methods. |
825 | */ | | 982 | */ |
826 | int | | 983 | int |
827 | bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, | | 984 | bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, |
828 | bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, | | 985 | bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, |
829 | int flags, paddr_t low, paddr_t high) | | 986 | int flags, paddr_t low, paddr_t high) |
830 | { | | 987 | { |
831 | paddr_t curaddr, lastaddr; | | 988 | paddr_t curaddr, lastaddr; |
832 | bus_addr_t offset; | | 989 | bus_addr_t offset; |
833 | struct vm_page *m; | | 990 | struct vm_page *m; |
834 | struct pglist mlist; | | 991 | struct pglist mlist; |
835 | int curseg, error; | | 992 | int curseg, error; |
836 | | | 993 | |
837 | offset = t->_displacement; | | 994 | offset = t->_displacement; |
838 | | | 995 | |
839 | /* Always round the size. */ | | 996 | /* Always round the size. */ |
840 | size = round_page(size); | | 997 | size = round_page(size); |
841 | | | 998 | |
842 | /* | | 999 | /* |
843 | * Allocate pages from the VM system. | | 1000 | * Allocate pages from the VM system. |
844 | */ | | 1001 | */ |
845 | error = uvm_pglistalloc(size, low, high, alignment, boundary, | | 1002 | error = uvm_pglistalloc(size, low, high, alignment, boundary, |
846 | &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); | | 1003 | &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); |
847 | if (error) | | 1004 | if (error) |
848 | return error; | | 1005 | return error; |
849 | | | 1006 | |
850 | /* | | 1007 | /* |
851 | * Compute the location, size, and number of segments actually | | 1008 | * Compute the location, size, and number of segments actually |
852 | * returned by the VM code. | | 1009 | * returned by the VM code. |
853 | */ | | 1010 | */ |
854 | m = mlist.tqh_first; | | 1011 | m = mlist.tqh_first; |
855 | curseg = 0; | | 1012 | curseg = 0; |
856 | lastaddr = VM_PAGE_TO_PHYS(m); | | 1013 | lastaddr = VM_PAGE_TO_PHYS(m); |
857 | segs[curseg].ds_addr = lastaddr + offset; | | 1014 | segs[curseg].ds_addr = lastaddr + offset; |
858 | segs[curseg].ds_len = PAGE_SIZE; | | 1015 | segs[curseg].ds_len = PAGE_SIZE; |
859 | m = m->pageq.queue.tqe_next; | | 1016 | m = m->pageq.queue.tqe_next; |
860 | | | 1017 | |
861 | for (; m != NULL; m = m->pageq.queue.tqe_next) { | | 1018 | for (; m != NULL; m = m->pageq.queue.tqe_next) { |
862 | curaddr = VM_PAGE_TO_PHYS(m); | | 1019 | curaddr = VM_PAGE_TO_PHYS(m); |
863 | #ifdef DIAGNOSTIC | | 1020 | #ifdef DIAGNOSTIC |
864 | if (curaddr < low || curaddr >= high) { | | 1021 | if (curaddr < low || curaddr >= high) { |
865 | printf("uvm_pglistalloc returned non-sensical" | | 1022 | printf("uvm_pglistalloc returned non-sensical" |
866 | " address 0x%lx\n", curaddr); | | 1023 | " address 0x%lx\n", curaddr); |
867 | panic("_bus_dmamem_alloc_range"); | | 1024 | panic("_bus_dmamem_alloc_range"); |
868 | } | | 1025 | } |
869 | #endif | | 1026 | #endif |
870 | if (curaddr == (lastaddr + PAGE_SIZE)) | | 1027 | if (curaddr == (lastaddr + PAGE_SIZE)) |
871 | segs[curseg].ds_len += PAGE_SIZE; | | 1028 | segs[curseg].ds_len += PAGE_SIZE; |
872 | else { | | 1029 | else { |
873 | curseg++; | | 1030 | curseg++; |
874 | segs[curseg].ds_addr = curaddr + offset; | | 1031 | segs[curseg].ds_addr = curaddr + offset; |
875 | segs[curseg].ds_len = PAGE_SIZE; | | 1032 | segs[curseg].ds_len = PAGE_SIZE; |
876 | } | | 1033 | } |
877 | lastaddr = curaddr; | | 1034 | lastaddr = curaddr; |
878 | } | | 1035 | } |
879 | | | 1036 | |
880 | *rsegs = curseg + 1; | | 1037 | *rsegs = curseg + 1; |
881 | | | 1038 | |
882 | return 0; | | 1039 | return 0; |
883 | } | | 1040 | } |