Thu Jan 30 06:25:46 2020 UTC ()
Fix typo


(martin)
diff -r1.53 -r1.54 src/sys/arch/sgimips/hpc/if_sq.c

cvs diff -r1.53 -r1.54 src/sys/arch/sgimips/hpc/if_sq.c (switch to unified diff)

--- src/sys/arch/sgimips/hpc/if_sq.c 2020/01/29 05:37:08 1.53
+++ src/sys/arch/sgimips/hpc/if_sq.c 2020/01/30 06:25:46 1.54
@@ -1,1332 +1,1332 @@ @@ -1,1332 +1,1332 @@
1/* $NetBSD: if_sq.c,v 1.53 2020/01/29 05:37:08 thorpej Exp $ */ 1/* $NetBSD: if_sq.c,v 1.54 2020/01/30 06:25:46 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001 Rafal K. Boni 4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Portions of this code are derived from software contributed to The 8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace 9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center. 10 * Simulation Facility, NASA Ames Research Center.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products 20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission. 21 * derived from this software without specific prior written permission.
22 * 22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.53 2020/01/29 05:37:08 thorpej Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.54 2020/01/30 06:25:46 martin Exp $");
37 37
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40#include <sys/systm.h> 40#include <sys/systm.h>
41#include <sys/device.h> 41#include <sys/device.h>
42#include <sys/callout.h> 42#include <sys/callout.h>
43#include <sys/mbuf.h> 43#include <sys/mbuf.h>
44#include <sys/malloc.h> 44#include <sys/malloc.h>
45#include <sys/kernel.h> 45#include <sys/kernel.h>
46#include <sys/socket.h> 46#include <sys/socket.h>
47#include <sys/ioctl.h> 47#include <sys/ioctl.h>
48#include <sys/errno.h> 48#include <sys/errno.h>
49#include <sys/syslog.h> 49#include <sys/syslog.h>
50 50
51#include <uvm/uvm_extern.h> 51#include <uvm/uvm_extern.h>
52 52
53#include <machine/endian.h> 53#include <machine/endian.h>
54 54
55#include <net/if.h> 55#include <net/if.h>
56#include <net/if_dl.h> 56#include <net/if_dl.h>
57#include <net/if_media.h> 57#include <net/if_media.h>
58#include <net/if_ether.h> 58#include <net/if_ether.h>
59 59
60#include <net/bpf.h> 60#include <net/bpf.h>
61 61
62#include <sys/bus.h> 62#include <sys/bus.h>
63#include <machine/intr.h> 63#include <machine/intr.h>
64#include <machine/sysconf.h> 64#include <machine/sysconf.h>
65 65
66#include <dev/ic/seeq8003reg.h> 66#include <dev/ic/seeq8003reg.h>
67 67
68#include <sgimips/hpc/sqvar.h> 68#include <sgimips/hpc/sqvar.h>
69#include <sgimips/hpc/hpcvar.h> 69#include <sgimips/hpc/hpcvar.h>
70#include <sgimips/hpc/hpcreg.h> 70#include <sgimips/hpc/hpcreg.h>
71 71
72#include <dev/arcbios/arcbios.h> 72#include <dev/arcbios/arcbios.h>
73#include <dev/arcbios/arcbiosvar.h> 73#include <dev/arcbios/arcbiosvar.h>
74 74
75#define static 75#define static
76 76
77/* 77/*
78 * Short TODO list: 78 * Short TODO list:
79 * (1) Do counters for bad-RX packets. 79 * (1) Do counters for bad-RX packets.
80 * (2) Allow multi-segment transmits, instead of copying to a single, 80 * (2) Allow multi-segment transmits, instead of copying to a single,
81 * contiguous mbuf. 81 * contiguous mbuf.
82 * (3) Verify sq_stop() turns off enough stuff; I was still getting 82 * (3) Verify sq_stop() turns off enough stuff; I was still getting
83 * seeq interrupts after sq_stop(). 83 * seeq interrupts after sq_stop().
84 * (4) Implement EDLC modes: especially packet auto-pad and simplex 84 * (4) Implement EDLC modes: especially packet auto-pad and simplex
85 * mode. 85 * mode.
86 * (5) Should the driver filter out its own transmissions in non-EDLC 86 * (5) Should the driver filter out its own transmissions in non-EDLC
87 * mode? 87 * mode?
88 * (6) Multicast support -- multicast filter, address management, ... 88 * (6) Multicast support -- multicast filter, address management, ...
89 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need 89 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
90 * to figure out if RB0 is read-only as stated in one spot in the 90 * to figure out if RB0 is read-only as stated in one spot in the
91 * HPC spec or read-write (ie, is the 'write a one to clear it') 91 * HPC spec or read-write (ie, is the 'write a one to clear it')
92 * the correct thing? 92 * the correct thing?
93 */ 93 */
94 94
95#if defined(SQ_DEBUG) 95#if defined(SQ_DEBUG)
96 int sq_debug = 0; 96 int sq_debug = 0;
97 #define SQ_DPRINTF(x) if (sq_debug) printf x 97 #define SQ_DPRINTF(x) if (sq_debug) printf x
98#else 98#else
99 #define SQ_DPRINTF(x) 99 #define SQ_DPRINTF(x)
100#endif 100#endif
101 101
102static int sq_match(device_t, cfdata_t, void *); 102static int sq_match(device_t, cfdata_t, void *);
103static void sq_attach(device_t, device_t, void *); 103static void sq_attach(device_t, device_t, void *);
104static int sq_init(struct ifnet *); 104static int sq_init(struct ifnet *);
105static void sq_start(struct ifnet *); 105static void sq_start(struct ifnet *);
106static void sq_stop(struct ifnet *, int); 106static void sq_stop(struct ifnet *, int);
107static void sq_watchdog(struct ifnet *); 107static void sq_watchdog(struct ifnet *);
108static int sq_ioctl(struct ifnet *, u_long, void *); 108static int sq_ioctl(struct ifnet *, u_long, void *);
109 109
110static void sq_set_filter(struct sq_softc *); 110static void sq_set_filter(struct sq_softc *);
111static int sq_intr(void *); 111static int sq_intr(void *);
112static int sq_rxintr(struct sq_softc *); 112static int sq_rxintr(struct sq_softc *);
113static int sq_txintr(struct sq_softc *); 113static int sq_txintr(struct sq_softc *);
114static void sq_txring_hpc1(struct sq_softc *); 114static void sq_txring_hpc1(struct sq_softc *);
115static void sq_txring_hpc3(struct sq_softc *); 115static void sq_txring_hpc3(struct sq_softc *);
116static void sq_reset(struct sq_softc *); 116static void sq_reset(struct sq_softc *);
117static int sq_add_rxbuf(struct sq_softc *, int); 117static int sq_add_rxbuf(struct sq_softc *, int);
118static void sq_dump_buffer(paddr_t, psize_t); 118static void sq_dump_buffer(paddr_t, psize_t);
119static void sq_trace_dump(struct sq_softc *); 119static void sq_trace_dump(struct sq_softc *);
120 120
121CFATTACH_DECL_NEW(sq, sizeof(struct sq_softc), 121CFATTACH_DECL_NEW(sq, sizeof(struct sq_softc),
122 sq_match, sq_attach, NULL, NULL); 122 sq_match, sq_attach, NULL, NULL);
123 123
124#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 124#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
125 125
126#define sq_seeq_read(sc, off) \ 126#define sq_seeq_read(sc, off) \
127 bus_space_read_1(sc->sc_regt, sc->sc_regh, (off << 2) + 3) 127 bus_space_read_1(sc->sc_regt, sc->sc_regh, (off << 2) + 3)
128#define sq_seeq_write(sc, off, val) \ 128#define sq_seeq_write(sc, off, val) \
129 bus_space_write_1(sc->sc_regt, sc->sc_regh, (off << 2) + 3, val) 129 bus_space_write_1(sc->sc_regt, sc->sc_regh, (off << 2) + 3, val)
130 130
131#define sq_hpc_read(sc, off) \ 131#define sq_hpc_read(sc, off) \
132 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off) 132 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
133#define sq_hpc_write(sc, off, val) \ 133#define sq_hpc_write(sc, off, val) \
134 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val) 134 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
135 135
136/* MAC address offset for non-onboard implementations */ 136/* MAC address offset for non-onboard implementations */
137#define SQ_HPC_EEPROM_ENADDR 250 137#define SQ_HPC_EEPROM_ENADDR 250
138 138
139#define SGI_OUI_0 0x08 139#define SGI_OUI_0 0x08
140#define SGI_OUI_1 0x00 140#define SGI_OUI_1 0x00
141#define SGI_OUI_2 0x69 141#define SGI_OUI_2 0x69
142 142
143static int 143static int
144sq_match(device_t parent, cfdata_t cf, void *aux) 144sq_match(device_t parent, cfdata_t cf, void *aux)
145{ 145{
146 struct hpc_attach_args *ha = aux; 146 struct hpc_attach_args *ha = aux;
147 147
148 if (strcmp(ha->ha_name, cf->cf_name) == 0) { 148 if (strcmp(ha->ha_name, cf->cf_name) == 0) {
149 vaddr_t reset, txstat; 149 vaddr_t reset, txstat;
150 150
151 reset = MIPS_PHYS_TO_KSEG1(ha->ha_sh + 151 reset = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
152 ha->ha_dmaoff + ha->hpc_regs->enetr_reset); 152 ha->ha_dmaoff + ha->hpc_regs->enetr_reset);
153 txstat = MIPS_PHYS_TO_KSEG1(ha->ha_sh + 153 txstat = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
154 ha->ha_devoff + (SEEQ_TXSTAT << 2)); 154 ha->ha_devoff + (SEEQ_TXSTAT << 2));
155 155
156 if (platform.badaddr((void *)reset, sizeof(reset))) 156 if (platform.badaddr((void *)reset, sizeof(reset)))
157 return 0; 157 return 0;
158 158
159 *(volatile uint32_t *)reset = 0x1; 159 *(volatile uint32_t *)reset = 0x1;
160 delay(20); 160 delay(20);
161 *(volatile uint32_t *)reset = 0x0; 161 *(volatile uint32_t *)reset = 0x0;
162 162
163 if (platform.badaddr((void *)txstat, sizeof(txstat))) 163 if (platform.badaddr((void *)txstat, sizeof(txstat)))
164 return 0; 164 return 0;
165 165
166 if ((*(volatile uint32_t *)txstat & 0xff) == TXSTAT_OLDNEW) 166 if ((*(volatile uint32_t *)txstat & 0xff) == TXSTAT_OLDNEW)
167 return 1; 167 return 1;
168 } 168 }
169 169
170 return 0; 170 return 0;
171} 171}
172 172
173static void 173static void
174sq_attach(device_t parent, device_t self, void *aux) 174sq_attach(device_t parent, device_t self, void *aux)
175{ 175{
176 int i, err; 176 int i, err;
177 const char* macaddr; 177 const char* macaddr;
178 struct sq_softc *sc = device_private(self); 178 struct sq_softc *sc = device_private(self);
179 struct hpc_attach_args *haa = aux; 179 struct hpc_attach_args *haa = aux;
180 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 180 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
181 181
182 sc->sc_dev = self; 182 sc->sc_dev = self;
183 sc->sc_hpct = haa->ha_st; 183 sc->sc_hpct = haa->ha_st;
184 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */ 184 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
185 185
186 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 186 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
187 haa->ha_dmaoff, sc->hpc_regs->enet_regs_size, 187 haa->ha_dmaoff, sc->hpc_regs->enet_regs_size,
188 &sc->sc_hpch)) != 0) { 188 &sc->sc_hpch)) != 0) {
189 printf(": unable to map HPC DMA registers, error = %d\n", err); 189 printf(": unable to map HPC DMA registers, error = %d\n", err);
190 goto fail_0; 190 goto fail_0;
191 } 191 }
192 192
193 sc->sc_regt = haa->ha_st; 193 sc->sc_regt = haa->ha_st;
194 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 194 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
195 haa->ha_devoff, sc->hpc_regs->enet_devregs_size, 195 haa->ha_devoff, sc->hpc_regs->enet_devregs_size,
196 &sc->sc_regh)) != 0) { 196 &sc->sc_regh)) != 0) {
197 printf(": unable to map Seeq registers, error = %d\n", err); 197 printf(": unable to map Seeq registers, error = %d\n", err);
198 goto fail_0; 198 goto fail_0;
199 } 199 }
200 200
201 sc->sc_dmat = haa->ha_dmat; 201 sc->sc_dmat = haa->ha_dmat;
202 202
203 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), 203 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
204 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 1, &sc->sc_ncdseg, 204 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 1, &sc->sc_ncdseg,
205 BUS_DMA_NOWAIT)) != 0) { 205 BUS_DMA_NOWAIT)) != 0) {
206 printf(": unable to allocate control data, error = %d\n", err); 206 printf(": unable to allocate control data, error = %d\n", err);
207 goto fail_0; 207 goto fail_0;
208 } 208 }
209 209
210 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, 210 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
211 sizeof(struct sq_control), (void **)&sc->sc_control, 211 sizeof(struct sq_control), (void **)&sc->sc_control,
212 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 212 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
213 printf(": unable to map control data, error = %d\n", err); 213 printf(": unable to map control data, error = %d\n", err);
214 goto fail_1; 214 goto fail_1;
215 } 215 }
216 216
217 if ((err = bus_dmamap_create(sc->sc_dmat, 217 if ((err = bus_dmamap_create(sc->sc_dmat,
218 sizeof(struct sq_control), 1, sizeof(struct sq_control), PAGE_SIZE, 218 sizeof(struct sq_control), 1, sizeof(struct sq_control), PAGE_SIZE,
219 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { 219 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
220 printf(": unable to create DMA map for control data, error " 220 printf(": unable to create DMA map for control data, error "
221 "= %d\n", err); 221 "= %d\n", err);
222 goto fail_2; 222 goto fail_2;
223 } 223 }
224 224
225 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, 225 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap,
226 sc->sc_control, sizeof(struct sq_control), NULL, 226 sc->sc_control, sizeof(struct sq_control), NULL,
227 BUS_DMA_NOWAIT)) != 0) { 227 BUS_DMA_NOWAIT)) != 0) {
228 printf(": unable to load DMA map for control data, error " 228 printf(": unable to load DMA map for control data, error "
229 "= %d\n", err); 229 "= %d\n", err);
230 goto fail_3; 230 goto fail_3;
231 } 231 }
232 232
233 memset(sc->sc_control, 0, sizeof(struct sq_control)); 233 memset(sc->sc_control, 0, sizeof(struct sq_control));
234 234
235 /* Create transmit buffer DMA maps */ 235 /* Create transmit buffer DMA maps */
236 for (i = 0; i < SQ_NTXDESC; i++) { 236 for (i = 0; i < SQ_NTXDESC; i++) {
237 if ((err = bus_dmamap_create(sc->sc_dmat, 237 if ((err = bus_dmamap_create(sc->sc_dmat,
238 MCLBYTES, 1, MCLBYTES, 0, 238 MCLBYTES, 1, MCLBYTES, 0,
239 BUS_DMA_NOWAIT, &sc->sc_txmap[i])) != 0) { 239 BUS_DMA_NOWAIT, &sc->sc_txmap[i])) != 0) {
240 printf(": unable to create tx DMA map %d, error = %d\n", 240 printf(": unable to create tx DMA map %d, error = %d\n",
241 i, err); 241 i, err);
242 goto fail_4; 242 goto fail_4;
243 } 243 }
244 } 244 }
245 245
246 /* Create receive buffer DMA maps */ 246 /* Create receive buffer DMA maps */
247 for (i = 0; i < SQ_NRXDESC; i++) { 247 for (i = 0; i < SQ_NRXDESC; i++) {
248 if ((err = bus_dmamap_create(sc->sc_dmat, 248 if ((err = bus_dmamap_create(sc->sc_dmat,
249 MCLBYTES, 1, MCLBYTES, 0, 249 MCLBYTES, 1, MCLBYTES, 0,
250 BUS_DMA_NOWAIT, &sc->sc_rxmap[i])) != 0) { 250 BUS_DMA_NOWAIT, &sc->sc_rxmap[i])) != 0) {
251 printf(": unable to create rx DMA map %d, error = %d\n", 251 printf(": unable to create rx DMA map %d, error = %d\n",
252 i, err); 252 i, err);
253 goto fail_5; 253 goto fail_5;
254 } 254 }
255 } 255 }
256 256
257 /* Pre-allocate the receive buffers. */ 257 /* Pre-allocate the receive buffers. */
258 for (i = 0; i < SQ_NRXDESC; i++) { 258 for (i = 0; i < SQ_NRXDESC; i++) {
259 if ((err = sq_add_rxbuf(sc, i)) != 0) { 259 if ((err = sq_add_rxbuf(sc, i)) != 0) {
260 printf(": unable to allocate or map rx buffer %d\n," 260 printf(": unable to allocate or map rx buffer %d\n,"
261 " error = %d\n", i, err); 261 " error = %d\n", i, err);
262 goto fail_6; 262 goto fail_6;
263 } 263 }
264 } 264 }
265 265
266 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR], 266 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR],
267 ETHER_ADDR_LEN); 267 ETHER_ADDR_LEN);
268 268
269 /* 269 /*
270 * If our mac address is bogus, obtain it from ARCBIOS. This will 270 * If our mac address is bogus, obtain it from ARCBIOS. This will
271 * be true of the onboard HPC3 on IP22, since there is no eeprom, 271 * be true of the onboard HPC3 on IP22, since there is no eeprom,
272 * but rather the DS1386 RTC's battery-backed ram is used. 272 * but rather the DS1386 RTC's battery-backed ram is used.
273 */ 273 */
274 if (sc->sc_enaddr[0] != SGI_OUI_0 || 274 if (sc->sc_enaddr[0] != SGI_OUI_0 ||
275 sc->sc_enaddr[1] != SGI_OUI_1 || 275 sc->sc_enaddr[1] != SGI_OUI_1 ||
276 sc->sc_enaddr[2] != SGI_OUI_2) { 276 sc->sc_enaddr[2] != SGI_OUI_2) {
277 macaddr = arcbios_GetEnvironmentVariable("eaddr"); 277 macaddr = arcbios_GetEnvironmentVariable("eaddr");
278 if (macaddr == NULL) { 278 if (macaddr == NULL) {
279 printf(": unable to get MAC address!\n"); 279 printf(": unable to get MAC address!\n");
280 goto fail_6; 280 goto fail_6;
281 } 281 }
282 ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr); 282 ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr);
283 } 283 }
284 284
285 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, 285 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
286 device_xname(self), "intr"); 286 device_xname(self), "intr");
287 287
288 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { 288 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
289 printf(": unable to establish interrupt!\n"); 289 printf(": unable to establish interrupt!\n");
290 goto fail_6; 290 goto fail_6;
291 } 291 }
292 292
293 /* Reset the chip to a known state. */ 293 /* Reset the chip to a known state. */
294 sq_reset(sc); 294 sq_reset(sc);
295 295
296 /* 296 /*
297 * Determine if we're an 8003 or 80c03 by setting the first 297 * Determine if we're an 8003 or 80c03 by setting the first
298 * MAC address register to non-zero, and then reading it back. 298 * MAC address register to non-zero, and then reading it back.
299 * If it's zero, we have an 80c03, because we will have read 299 * If it's zero, we have an 80c03, because we will have read
300 * the TxCollLSB register. 300 * the TxCollLSB register.
301 */ 301 */
302 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5); 302 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
303 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0) 303 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
304 sc->sc_type = SQ_TYPE_80C03; 304 sc->sc_type = SQ_TYPE_80C03;
305 else 305 else
306 sc->sc_type = SQ_TYPE_8003; 306 sc->sc_type = SQ_TYPE_8003;
307 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00); 307 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
308 308
309 printf(": SGI Seeq %s\n", 309 printf(": SGI Seeq %s\n",
310 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); 310 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
311 311
312 printf("%s: Ethernet address %s\n", 312 printf("%s: Ethernet address %s\n",
313 device_xname(self), ether_sprintf(sc->sc_enaddr)); 313 device_xname(self), ether_sprintf(sc->sc_enaddr));
314 314
315 strcpy(ifp->if_xname, device_xname(self)); 315 strcpy(ifp->if_xname, device_xname(self));
316 ifp->if_softc = sc; 316 ifp->if_softc = sc;
317 ifp->if_mtu = ETHERMTU; 317 ifp->if_mtu = ETHERMTU;
318 ifp->if_init = sq_init; 318 ifp->if_init = sq_init;
319 ifp->if_stop = sq_stop; 319 ifp->if_stop = sq_stop;
320 ifp->if_start = sq_start; 320 ifp->if_start = sq_start;
321 ifp->if_ioctl = sq_ioctl; 321 ifp->if_ioctl = sq_ioctl;
322 ifp->if_watchdog = sq_watchdog; 322 ifp->if_watchdog = sq_watchdog;
323 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST; 323 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
324 IFQ_SET_READY(&ifp->if_snd); 324 IFQ_SET_READY(&ifp->if_snd);
325 325
326 if_attach(ifp); 326 if_attach(ifp);
327 if_deferred_start_init(ifp, NULL); 327 if_deferred_start_init(ifp, NULL);
328 ether_ifattach(ifp, sc->sc_enaddr); 328 ether_ifattach(ifp, sc->sc_enaddr);
329 329
330 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace)); 330 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
331 /* Done! */ 331 /* Done! */
332 return; 332 return;
333 333
334 /* 334 /*
335 * Free any resources we've allocated during the failed attach 335 * Free any resources we've allocated during the failed attach
336 * attempt. Do this in reverse order and fall through. 336 * attempt. Do this in reverse order and fall through.
337 */ 337 */
338 fail_6: 338 fail_6:
339 for (i = 0; i < SQ_NRXDESC; i++) { 339 for (i = 0; i < SQ_NRXDESC; i++) {
340 if (sc->sc_rxmbuf[i] != NULL) { 340 if (sc->sc_rxmbuf[i] != NULL) {
341 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); 341 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
342 m_freem(sc->sc_rxmbuf[i]); 342 m_freem(sc->sc_rxmbuf[i]);
343 } 343 }
344 } 344 }
345 fail_5: 345 fail_5:
346 for (i = 0; i < SQ_NRXDESC; i++) { 346 for (i = 0; i < SQ_NRXDESC; i++) {
347 if (sc->sc_rxmap[i] != NULL) 347 if (sc->sc_rxmap[i] != NULL)
348 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); 348 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
349 } 349 }
350 fail_4: 350 fail_4:
351 for (i = 0; i < SQ_NTXDESC; i++) { 351 for (i = 0; i < SQ_NTXDESC; i++) {
352 if (sc->sc_txmap[i] != NULL) 352 if (sc->sc_txmap[i] != NULL)
353 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); 353 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
354 } 354 }
355 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); 355 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
356 fail_3: 356 fail_3:
357 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); 357 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
358 fail_2: 358 fail_2:
359 bus_dmamem_unmap(sc->sc_dmat, 359 bus_dmamem_unmap(sc->sc_dmat,
360 (void *)sc->sc_control, sizeof(struct sq_control)); 360 (void *)sc->sc_control, sizeof(struct sq_control));
361 fail_1: 361 fail_1:
362 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); 362 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
363 fail_0: 363 fail_0:
364 return; 364 return;
365} 365}
366 366
367/* Set up data to get the interface up and running. */ 367/* Set up data to get the interface up and running. */
368int 368int
369sq_init(struct ifnet *ifp) 369sq_init(struct ifnet *ifp)
370{ 370{
371 int i; 371 int i;
372 struct sq_softc *sc = ifp->if_softc; 372 struct sq_softc *sc = ifp->if_softc;
373 373
374 /* Cancel any in-progress I/O */ 374 /* Cancel any in-progress I/O */
375 sq_stop(ifp, 0); 375 sq_stop(ifp, 0);
376 376
377 sc->sc_nextrx = 0; 377 sc->sc_nextrx = 0;
378 378
379 sc->sc_nfreetx = SQ_NTXDESC; 379 sc->sc_nfreetx = SQ_NTXDESC;
380 sc->sc_nexttx = sc->sc_prevtx = 0; 380 sc->sc_nexttx = sc->sc_prevtx = 0;
381 381
382 SQ_TRACE(SQ_RESET, sc, 0, 0); 382 SQ_TRACE(SQ_RESET, sc, 0, 0);
383 383
384 /* Set into 8003 mode, bank 0 to program ethernet address */ 384 /* Set into 8003 mode, bank 0 to program ethernet address */
385 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0); 385 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
386 386
387 /* Now write the address */ 387 /* Now write the address */
388 for (i = 0; i < ETHER_ADDR_LEN; i++) 388 for (i = 0; i < ETHER_ADDR_LEN; i++)
389 sq_seeq_write(sc, i, sc->sc_enaddr[i]); 389 sq_seeq_write(sc, i, sc->sc_enaddr[i]);
390 390
391 sc->sc_rxcmd = 391 sc->sc_rxcmd =
392 RXCMD_IE_CRC | 392 RXCMD_IE_CRC |
393 RXCMD_IE_DRIB | 393 RXCMD_IE_DRIB |
394 RXCMD_IE_SHORT | 394 RXCMD_IE_SHORT |
395 RXCMD_IE_END | 395 RXCMD_IE_END |
396 RXCMD_IE_GOOD; 396 RXCMD_IE_GOOD;
397 397
398 /* 398 /*
399 * Set the receive filter -- this will add some bits to the 399 * Set the receive filter -- this will add some bits to the
400 * prototype RXCMD register. Do this before setting the 400 * prototype RXCMD register. Do this before setting the
401 * transmit config register, since we might need to switch 401 * transmit config register, since we might need to switch
402 * banks. 402 * banks.
403 */ 403 */
404 sq_set_filter(sc); 404 sq_set_filter(sc);
405 405
406 /* Set up Seeq transmit command register */ 406 /* Set up Seeq transmit command register */
407 sq_seeq_write(sc, SEEQ_TXCMD, 407 sq_seeq_write(sc, SEEQ_TXCMD,
408 TXCMD_IE_UFLOW | 408 TXCMD_IE_UFLOW |
409 TXCMD_IE_COLL | 409 TXCMD_IE_COLL |
410 TXCMD_IE_16COLL | 410 TXCMD_IE_16COLL |
411 TXCMD_IE_GOOD); 411 TXCMD_IE_GOOD);
412 412
413 /* Now write the receive command register. */ 413 /* Now write the receive command register. */
414 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd); 414 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
415 415
416 /* 416 /*
417 * Set up HPC ethernet PIO and DMA configurations. 417 * Set up HPC ethernet PIO and DMA configurations.
418 * 418 *
419 * The PROM appears to do most of this for the onboard HPC3, but 419 * The PROM appears to do most of this for the onboard HPC3, but
420 * not for the Challenge S's IOPLUS chip. We copy how the onboard 420 * not for the Challenge S's IOPLUS chip. We copy how the onboard
421 * chip is configured and assume that it's correct for both. 421 * chip is configured and assume that it's correct for both.
422 */ 422 */
423 if (sc->hpc_regs->revision == 3) { 423 if (sc->hpc_regs->revision == 3) {
424 uint32_t dmareg, pioreg; 424 uint32_t dmareg, pioreg;
425 425
426 pioreg = 426 pioreg =
427 HPC3_ENETR_PIOCFG_P1(1) | 427 HPC3_ENETR_PIOCFG_P1(1) |
428 HPC3_ENETR_PIOCFG_P2(6) | 428 HPC3_ENETR_PIOCFG_P2(6) |
429 HPC3_ENETR_PIOCFG_P3(1); 429 HPC3_ENETR_PIOCFG_P3(1);
430 430
431 dmareg = 431 dmareg =
432 HPC3_ENETR_DMACFG_D1(6) | 432 HPC3_ENETR_DMACFG_D1(6) |
433 HPC3_ENETR_DMACFG_D2(2) | 433 HPC3_ENETR_DMACFG_D2(2) |
434 HPC3_ENETR_DMACFG_D3(0) | 434 HPC3_ENETR_DMACFG_D3(0) |
435 HPC3_ENETR_DMACFG_FIX_RXDC | 435 HPC3_ENETR_DMACFG_FIX_RXDC |
436 HPC3_ENETR_DMACFG_FIX_INTR | 436 HPC3_ENETR_DMACFG_FIX_INTR |
437 HPC3_ENETR_DMACFG_FIX_EOP | 437 HPC3_ENETR_DMACFG_FIX_EOP |
438 HPC3_ENETR_DMACFG_TIMEOUT; 438 HPC3_ENETR_DMACFG_TIMEOUT;
439 439
440 sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg); 440 sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg);
441 sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg); 441 sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg);
442 } 442 }
443 443
444 /* Pass the start of the receive ring to the HPC */ 444 /* Pass the start of the receive ring to the HPC */
445 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0)); 445 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
446 446
447 /* And turn on the HPC ethernet receive channel */ 447 /* And turn on the HPC ethernet receive channel */
448 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 448 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
449 sc->hpc_regs->enetr_ctl_active); 449 sc->hpc_regs->enetr_ctl_active);
450 450
451 /* 451 /*
452 * Turn off delayed receive interrupts on HPC1. 452 * Turn off delayed receive interrupts on HPC1.
453 * (see Hollywood HPC Specification 2.1.4.3) 453 * (see Hollywood HPC Specification 2.1.4.3)
454 */ 454 */
455 if (sc->hpc_regs->revision != 3) 455 if (sc->hpc_regs->revision != 3)
456 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF); 456 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
457 457
458 ifp->if_flags |= IFF_RUNNING; 458 ifp->if_flags |= IFF_RUNNING;
459 ifp->if_flags &= ~IFF_OACTIVE; 459 ifp->if_flags &= ~IFF_OACTIVE;
460 460
461 return 0; 461 return 0;
462} 462}
463 463
464static void 464static void
465sq_set_filter(struct sq_softc *sc) 465sq_set_filter(struct sq_softc *sc)
466{ 466{
467 struct ethercom *ec = &sc->sc_ethercom; 467 struct ethercom *ec = &sc->sc_ethercom;
468 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 468 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
469 struct ether_multi *enm; 469 struct ether_multi *enm;
470 struct ether_multistep step; 470 struct ether_multistep step;
471 471
472 /* 472 /*
473 * Check for promiscuous mode. Also implies 473 * Check for promiscuous mode. Also implies
474 * all-multicast. 474 * all-multicast.
475 */ 475 */
476 if (ifp->if_flags & IFF_PROMISC) { 476 if (ifp->if_flags & IFF_PROMISC) {
477 sc->sc_rxcmd |= RXCMD_REC_ALL; 477 sc->sc_rxcmd |= RXCMD_REC_ALL;
478 ifp->if_flags |= IFF_ALLMULTI; 478 ifp->if_flags |= IFF_ALLMULTI;
479 return; 479 return;
480 } 480 }
481 481
482 /* 482 /*
483 * The 8003 has no hash table. If we have any multicast 483 * The 8003 has no hash table. If we have any multicast
484 * addresses on the list, enable reception of all multicast 484 * addresses on the list, enable reception of all multicast
485 * frames. 485 * frames.
486 * 486 *
487 * XXX The 80c03 has a hash table. We should use it. 487 * XXX The 80c03 has a hash table. We should use it.
488 */ 488 */
489 489
490 ETHER_FIRST_MULTI(step, ec, enm); 490 ETHER_FIRST_MULTI(step, ec, enm);
491 491
492 if (enm == NULL) { 492 if (enm == NULL) {
493 sc->sc_rxcmd &= ~RXCMD_REC_MASK; 493 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
494 sc->sc_rxcmd |= RXCMD_REC_BROAD; 494 sc->sc_rxcmd |= RXCMD_REC_BROAD;
495 495
496 ifp->if_flags &= ~IFF_ALLMULTI; 496 ifp->if_flags &= ~IFF_ALLMULTI;
497 return; 497 return;
498 } 498 }
499 499
500 sc->sc_rxcmd |= RXCMD_REC_MULTI; 500 sc->sc_rxcmd |= RXCMD_REC_MULTI;
501 ifp->if_flags |= IFF_ALLMULTI; 501 ifp->if_flags |= IFF_ALLMULTI;
502} 502}
503 503
504int 504int
505sq_ioctl(struct ifnet *ifp, u_long cmd, void *data) 505sq_ioctl(struct ifnet *ifp, u_long cmd, void *data)
506{ 506{
507 int s, error = 0; 507 int s, error = 0;
508 508
509 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0); 509 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
510 510
511 s = splnet(); 511 s = splnet();
512 512
513 error = ether_ioctl(ifp, cmd, data); 513 error = ether_ioctl(ifp, cmd, data);
514 if (error == ENETRESET) { 514 if (error == ENETRESET) {
515 /* 515 /*
516 * Multicast list has changed; set the hardware filter 516 * Multicast list has changed; set the hardware filter
517 * accordingly. 517 * accordingly.
518 */ 518 */
519 if (ifp->if_flags & IFF_RUNNING) 519 if (ifp->if_flags & IFF_RUNNING)
520 error = sq_init(ifp); 520 error = sq_init(ifp);
521 else 521 else
522 error = 0; 522 error = 0;
523 } 523 }
524 524
525 splx(s); 525 splx(s);
526 return error; 526 return error;
527} 527}
528 528
529void 529void
530sq_start(struct ifnet *ifp) 530sq_start(struct ifnet *ifp)
531{ 531{
532 struct sq_softc *sc = ifp->if_softc; 532 struct sq_softc *sc = ifp->if_softc;
533 uint32_t status; 533 uint32_t status;
534 struct mbuf *m0, *m; 534 struct mbuf *m0, *m;
535 bus_dmamap_t dmamap; 535 bus_dmamap_t dmamap;
536 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg; 536 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
537 537
538 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 538 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
539 return; 539 return;
540 540
541 /* 541 /*
542 * Remember the previous number of free descriptors and 542 * Remember the previous number of free descriptors and
543 * the first descriptor we'll use. 543 * the first descriptor we'll use.
544 */ 544 */
545 ofree = sc->sc_nfreetx; 545 ofree = sc->sc_nfreetx;
546 firsttx = sc->sc_nexttx; 546 firsttx = sc->sc_nexttx;
547 547
548 /* 548 /*
549 * Loop through the send queue, setting up transmit descriptors 549 * Loop through the send queue, setting up transmit descriptors
550 * until we drain the queue, or use up all available transmit 550 * until we drain the queue, or use up all available transmit
551 * descriptors. 551 * descriptors.
552 */ 552 */
553 while (sc->sc_nfreetx != 0) { 553 while (sc->sc_nfreetx != 0) {
554 /* 554 /*
555 * Grab a packet off the queue. 555 * Grab a packet off the queue.
556 */ 556 */
557 IFQ_POLL(&ifp->if_snd, m0); 557 IFQ_POLL(&ifp->if_snd, m0);
558 if (m0 == NULL) 558 if (m0 == NULL)
559 break; 559 break;
560 m = NULL; 560 m = NULL;
561 561
562 dmamap = sc->sc_txmap[sc->sc_nexttx]; 562 dmamap = sc->sc_txmap[sc->sc_nexttx];
563 563
564 /* 564 /*
565 * Load the DMA map. If this fails, the packet either 565 * Load the DMA map. If this fails, the packet either
566 * didn't fit in the alloted number of segments, or we were 566 * didn't fit in the alloted number of segments, or we were
567 * short on resources. In this case, we'll copy and try 567 * short on resources. In this case, we'll copy and try
568 * again. 568 * again.
569 * Also copy it if we need to pad, so that we are sure there 569 * Also copy it if we need to pad, so that we are sure there
570 * is room for the pad buffer. 570 * is room for the pad buffer.
571 * XXX the right way of doing this is to use a static buffer 571 * XXX the right way of doing this is to use a static buffer
572 * for padding and adding it to the transmit descriptor (see 572 * for padding and adding it to the transmit descriptor (see
573 * sys/dev/pci/if_tl.c for example). We can't do this here yet 573 * sys/dev/pci/if_tl.c for example). We can't do this here yet
574 * because we can't send packets with more than one fragment. 574 * because we can't send packets with more than one fragment.
575 */ 575 */
576 if (m0->m_pkthdr.len < ETHER_PAD_LEN || 576 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
577 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 577 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
578 BUS_DMA_NOWAIT) != 0) { 578 BUS_DMA_NOWAIT) != 0) {
579 MGETHDR(m, M_DONTWAIT, MT_DATA); 579 MGETHDR(m, M_DONTWAIT, MT_DATA);
580 if (m == NULL) { 580 if (m == NULL) {
581 printf("%s: unable to allocate Tx mbuf\n", 581 printf("%s: unable to allocate Tx mbuf\n",
582 device_xname(sc->sc_dev)); 582 device_xname(sc->sc_dev));
583 break; 583 break;
584 } 584 }
585 if (m0->m_pkthdr.len > MHLEN) { 585 if (m0->m_pkthdr.len > MHLEN) {
586 MCLGET(m, M_DONTWAIT); 586 MCLGET(m, M_DONTWAIT);
587 if ((m->m_flags & M_EXT) == 0) { 587 if ((m->m_flags & M_EXT) == 0) {
588 printf("%s: unable to allocate Tx " 588 printf("%s: unable to allocate Tx "
589 "cluster\n", 589 "cluster\n",
590 device_xname(sc->sc_dev)); 590 device_xname(sc->sc_dev));
591 m_freem(m); 591 m_freem(m);
592 break; 592 break;
593 } 593 }
594 } 594 }
595 595
596 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 596 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
597 if (m0->m_pkthdr.len < ETHER_PAD_LEN) { 597 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
598 memset(mtod(m, char *) + m0->m_pkthdr.len, 0, 598 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
599 ETHER_PAD_LEN - m0->m_pkthdr.len); 599 ETHER_PAD_LEN - m0->m_pkthdr.len);
600 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN; 600 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
601 } else 601 } else
602 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 602 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
603 603
604 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 604 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
605 m, BUS_DMA_NOWAIT)) != 0) { 605 m, BUS_DMA_NOWAIT)) != 0) {
606 printf("%s: unable to load Tx buffer, " 606 printf("%s: unable to load Tx buffer, "
607 "error = %d\n", 607 "error = %d\n",
608 device_xname(sc->sc_dev), err); 608 device_xname(sc->sc_dev), err);
609 break; 609 break;
610 } 610 }
611 } 611 }
612 612
613 /* 613 /*
614 * Ensure we have enough descriptors free to describe 614 * Ensure we have enough descriptors free to describe
615 * the packet. 615 * the packet.
616 */ 616 */
617 if (dmamap->dm_nsegs > sc->sc_nfreetx) { 617 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
618 /* 618 /*
619 * Not enough free descriptors to transmit this 619 * Not enough free descriptors to transmit this
620 * packet. We haven't committed to anything yet, 620 * packet. We haven't committed to anything yet,
621 * so just unload the DMA map, put the packet 621 * so just unload the DMA map, put the packet
622 * back on the queue, and punt. Notify the upper 622 * back on the queue, and punt. Notify the upper
623 * layer that there are no more slots left. 623 * layer that there are no more slots left.
624 * 624 *
625 * XXX We could allocate an mbuf and copy, but 625 * XXX We could allocate an mbuf and copy, but
626 * XXX it is worth it? 626 * XXX it is worth it?
627 */ 627 */
628 ifp->if_flags |= IFF_OACTIVE; 628 ifp->if_flags |= IFF_OACTIVE;
629 bus_dmamap_unload(sc->sc_dmat, dmamap); 629 bus_dmamap_unload(sc->sc_dmat, dmamap);
630 if (m != NULL) 630 if (m != NULL)
631 m_freem(m); 631 m_freem(m);
632 break; 632 break;
633 } 633 }
634 634
635 IFQ_DEQUEUE(&ifp->if_snd, m0); 635 IFQ_DEQUEUE(&ifp->if_snd, m0);
636 /* 636 /*
637 * Pass the packet to any BPF listeners. 637 * Pass the packet to any BPF listeners.
638 */ 638 */
639 bpf_mtap(ifp, m0, BPF_D_OUT); 639 bpf_mtap(ifp, m0, BPF_D_OUT);
640 if (m != NULL) { 640 if (m != NULL) {
641 m_freem(m0); 641 m_freem(m0);
642 m0 = m; 642 m0 = m;
643 } 643 }
644 644
645 /* 645 /*
646 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 646 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
647 */ 647 */
648 648
649 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0); 649 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
650 650
651 /* Sync the DMA map. */ 651 /* Sync the DMA map. */
652 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 652 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
653 BUS_DMASYNC_PREWRITE); 653 BUS_DMASYNC_PREWRITE);
654 654
655 /* 655 /*
656 * Initialize the transmit descriptors. 656 * Initialize the transmit descriptors.
657 */ 657 */
658 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; 658 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
659 seg < dmamap->dm_nsegs; 659 seg < dmamap->dm_nsegs;
660 seg++, nexttx = SQ_NEXTTX(nexttx)) { 660 seg++, nexttx = SQ_NEXTTX(nexttx)) {
661 if (sc->hpc_regs->revision == 3) { 661 if (sc->hpc_regs->revision == 3) {
662 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr = 662 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
663 dmamap->dm_segs[seg].ds_addr; 663 dmamap->dm_segs[seg].ds_addr;
664 sc->sc_txdesc[nexttx].hpc3_hdd_ctl = 664 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
665 dmamap->dm_segs[seg].ds_len; 665 dmamap->dm_segs[seg].ds_len;
666 } else { 666 } else {
667 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr = 667 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
668 dmamap->dm_segs[seg].ds_addr; 668 dmamap->dm_segs[seg].ds_addr;
669 sc->sc_txdesc[nexttx].hpc1_hdd_ctl = 669 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
670 dmamap->dm_segs[seg].ds_len; 670 dmamap->dm_segs[seg].ds_len;
671 } 671 }
672 sc->sc_txdesc[nexttx].hdd_descptr = 672 sc->sc_txdesc[nexttx].hdd_descptr =
673 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); 673 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
674 lasttx = nexttx; 674 lasttx = nexttx;
675 totlen += dmamap->dm_segs[seg].ds_len; 675 totlen += dmamap->dm_segs[seg].ds_len;
676 } 676 }
677 677
678 /* Last descriptor gets end-of-packet */ 678 /* Last descriptor gets end-of-packet */
679 KASSERT(lasttx != -1); 679 KASSERT(lasttx != -1);
680 if (sc->hpc_regs->revision == 3) 680 if (sc->hpc_regs->revision == 3)
681 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= 681 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
682 HPC3_HDD_CTL_EOPACKET; 682 HPC3_HDD_CTL_EOPACKET;
683 else 683 else
684 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= 684 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
685 HPC1_HDD_CTL_EOPACKET; 685 HPC1_HDD_CTL_EOPACKET;
686 686
687 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", 687 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n",
688 device_xname(sc->sc_dev), sc->sc_nexttx, lasttx, totlen)); 688 device_xname(sc->sc_dev), sc->sc_nexttx, lasttx, totlen));
689 689
690 if (ifp->if_flags & IFF_DEBUG) { 690 if (ifp->if_flags & IFF_DEBUG) {
691 printf(" transmit chain:\n"); 691 printf(" transmit chain:\n");
692 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { 692 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
693 printf(" descriptor %d:\n", seg); 693 printf(" descriptor %d:\n", seg);
694 printf(" hdd_bufptr: 0x%08x\n", 694 printf(" hdd_bufptr: 0x%08x\n",
695 (sc->hpc_regs->revision == 3) ? 695 (sc->hpc_regs->revision == 3) ?
696 sc->sc_txdesc[seg].hpc3_hdd_bufptr : 696 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
697 sc->sc_txdesc[seg].hpc1_hdd_bufptr); 697 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
698 printf(" hdd_ctl: 0x%08x\n", 698 printf(" hdd_ctl: 0x%08x\n",
699 (sc->hpc_regs->revision == 3) ? 699 (sc->hpc_regs->revision == 3) ?
700 sc->sc_txdesc[seg].hpc3_hdd_ctl: 700 sc->sc_txdesc[seg].hpc3_hdd_ctl:
701 sc->sc_txdesc[seg].hpc1_hdd_ctl); 701 sc->sc_txdesc[seg].hpc1_hdd_ctl);
702 printf(" hdd_descptr: 0x%08x\n", 702 printf(" hdd_descptr: 0x%08x\n",
703 sc->sc_txdesc[seg].hdd_descptr); 703 sc->sc_txdesc[seg].hdd_descptr);
704 704
705 if (seg == lasttx) 705 if (seg == lasttx)
706 break; 706 break;
707 } 707 }
708 } 708 }
709 709
710 /* Sync the descriptors we're using. */ 710 /* Sync the descriptors we're using. */
711 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, 711 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
712 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 712 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
713 713
714 /* Store a pointer to the packet so we can free it later */ 714 /* Store a pointer to the packet so we can free it later */
715 sc->sc_txmbuf[sc->sc_nexttx] = m0; 715 sc->sc_txmbuf[sc->sc_nexttx] = m0;
716 716
717 /* Advance the tx pointer. */ 717 /* Advance the tx pointer. */
718 sc->sc_nfreetx -= dmamap->dm_nsegs; 718 sc->sc_nfreetx -= dmamap->dm_nsegs;
719 sc->sc_nexttx = nexttx; 719 sc->sc_nexttx = nexttx;
720 } 720 }
721 721
722 /* All transmit descriptors used up, let upper layers know */ 722 /* All transmit descriptors used up, let upper layers know */
723 if (sc->sc_nfreetx == 0) 723 if (sc->sc_nfreetx == 0)
724 ifp->if_flags |= IFF_OACTIVE; 724 ifp->if_flags |= IFF_OACTIVE;
725 725
726 if (sc->sc_nfreetx != ofree) { 726 if (sc->sc_nfreetx != ofree) {
727 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n", 727 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
728 device_xname(sc->sc_dev), lasttx - firsttx + 1, 728 device_xname(sc->sc_dev), lasttx - firsttx + 1,
729 firsttx, lasttx)); 729 firsttx, lasttx));
730 730
731 /* 731 /*
732 * Cause a transmit interrupt to happen on the 732 * Cause a transmit interrupt to happen on the
733 * last packet we enqueued, mark it as the last 733 * last packet we enqueued, mark it as the last
734 * descriptor. 734 * descriptor.
735 * 735 *
736 * HPC1_HDD_CTL_INTR will generate an interrupt on 736 * HPC1_HDD_CTL_INTR will generate an interrupt on
737 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in 737 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
738 * addition to HPC3_HDD_CTL_INTR to interrupt. 738 * addition to HPC3_HDD_CTL_INTR to interrupt.
739 */ 739 */
740 KASSERT(lasttx != -1); 740 KASSERT(lasttx != -1);
741 if (sc->hpc_regs->revision == 3) { 741 if (sc->hpc_regs->revision == 3) {
742 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= 742 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
743 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN; 743 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
744 } else { 744 } else {
745 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR; 745 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
746 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |= 746 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
747 HPC1_HDD_CTL_EOCHAIN; 747 HPC1_HDD_CTL_EOCHAIN;
748 } 748 }
749 749
750 SQ_CDTXSYNC(sc, lasttx, 1, 750 SQ_CDTXSYNC(sc, lasttx, 1,
751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
752 752
753 /* 753 /*
754 * There is a potential race condition here if the HPC 754 * There is a potential race condition here if the HPC
755 * DMA channel is active and we try and either update 755 * DMA channel is active and we try and either update
756 * the 'next descriptor' pointer in the HPC PIO space 756 * the 'next descriptor' pointer in the HPC PIO space
757 * or the 'next descriptor' pointer in a previous desc- 757 * or the 'next descriptor' pointer in a previous desc-
758 * riptor. 758 * riptor.
759 * 759 *
760 * To avoid this, if the channel is active, we rely on 760 * To avoid this, if the channel is active, we rely on
761 * the transmit interrupt routine noticing that there 761 * the transmit interrupt routine noticing that there
762 * are more packets to send and restarting the HPC DMA 762 * are more packets to send and restarting the HPC DMA
763 * engine, rather than mucking with the DMA state here. 763 * engine, rather than mucking with the DMA state here.
764 */ 764 */
765 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl); 765 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
766 766
767 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) { 767 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
768 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status); 768 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
769 769
770 /* 770 /*
771 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and 771 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
772 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN 772 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
773 */ 773 */
774 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &= 774 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
775 ~HPC3_HDD_CTL_EOCHAIN; 775 ~HPC3_HDD_CTL_EOCHAIN;
776 776
777 if (sc->hpc_regs->revision != 3) 777 if (sc->hpc_regs->revision != 3)
778 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl 778 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
779 &= ~HPC1_HDD_CTL_INTR; 779 &= ~HPC1_HDD_CTL_INTR;
780 780
781 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, 781 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
782 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 782 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
783 } else if (sc->hpc_regs->revision == 3) { 783 } else if (sc->hpc_regs->revision == 3) {
784 SQ_TRACE(SQ_START_DMA, sc, firsttx, status); 784 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
785 785
786 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc, 786 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc,
787 firsttx)); 787 firsttx));
788 788
789 /* Kick DMA channel into life */ 789 /* Kick DMA channel into life */
790 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE); 790 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
791 } else { 791 } else {
792 /* 792 /*
793 * In the HPC1 case where transmit DMA is 793 * In the HPC1 case where transmit DMA is
794 * inactive, we can either kick off if 794 * inactive, we can either kick off if
795 * the ring was previously empty, or call 795 * the ring was previously empty, or call
796 * our transmit interrupt handler to 796 * our transmit interrupt handler to
797 * figure out if the ring stopped short 797 * figure out if the ring stopped short
798 * and restart at the right place. 798 * and restart at the right place.
799 */ 799 */
800 if (ofree == SQ_NTXDESC) { 800 if (ofree == SQ_NTXDESC) {
801 SQ_TRACE(SQ_START_DMA, sc, firsttx, status); 801 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
802 802
803 sq_hpc_write(sc, HPC1_ENETX_NDBP, 803 sq_hpc_write(sc, HPC1_ENETX_NDBP,
804 SQ_CDTXADDR(sc, firsttx)); 804 SQ_CDTXADDR(sc, firsttx));
805 sq_hpc_write(sc, HPC1_ENETX_CFXBP, 805 sq_hpc_write(sc, HPC1_ENETX_CFXBP,
806 SQ_CDTXADDR(sc, firsttx)); 806 SQ_CDTXADDR(sc, firsttx));
807 sq_hpc_write(sc, HPC1_ENETX_CBP, 807 sq_hpc_write(sc, HPC1_ENETX_CBP,
808 SQ_CDTXADDR(sc, firsttx)); 808 SQ_CDTXADDR(sc, firsttx));
809 809
810 /* Kick DMA channel into life */ 810 /* Kick DMA channel into life */
811 sq_hpc_write(sc, HPC1_ENETX_CTL, 811 sq_hpc_write(sc, HPC1_ENETX_CTL,
812 HPC1_ENETX_CTL_ACTIVE); 812 HPC1_ENETX_CTL_ACTIVE);
813 } else 813 } else
814 sq_txring_hpc1(sc); 814 sq_txring_hpc1(sc);
815 } 815 }
816 816
817 /* Set a watchdog timer in case the chip flakes out. */ 817 /* Set a watchdog timer in case the chip flakes out. */
818 ifp->if_timer = 5; 818 ifp->if_timer = 5;
819 } 819 }
820} 820}
821 821
822void 822void
823sq_stop(struct ifnet *ifp, int disable) 823sq_stop(struct ifnet *ifp, int disable)
824{ 824{
825 int i; 825 int i;
826 struct sq_softc *sc = ifp->if_softc; 826 struct sq_softc *sc = ifp->if_softc;
827 827
828 for (i = 0; i < SQ_NTXDESC; i++) { 828 for (i = 0; i < SQ_NTXDESC; i++) {
829 if (sc->sc_txmbuf[i] != NULL) { 829 if (sc->sc_txmbuf[i] != NULL) {
830 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 830 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
831 m_freem(sc->sc_txmbuf[i]); 831 m_freem(sc->sc_txmbuf[i]);
832 sc->sc_txmbuf[i] = NULL; 832 sc->sc_txmbuf[i] = NULL;
833 } 833 }
834 } 834 }
835 835
836 /* Clear Seeq transmit/receive command registers */ 836 /* Clear Seeq transmit/receive command registers */
837 sq_seeq_write(sc, SEEQ_TXCMD, 0); 837 sq_seeq_write(sc, SEEQ_TXCMD, 0);
838 sq_seeq_write(sc, SEEQ_RXCMD, 0); 838 sq_seeq_write(sc, SEEQ_RXCMD, 0);
839 839
840 sq_reset(sc); 840 sq_reset(sc);
841 841
842 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 842 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
843 ifp->if_timer = 0; 843 ifp->if_timer = 0;
844} 844}
845 845
846/* Device timeout/watchdog routine. */ 846/* Device timeout/watchdog routine. */
847void 847void
848sq_watchdog(struct ifnet *ifp) 848sq_watchdog(struct ifnet *ifp)
849{ 849{
850 uint32_t status; 850 uint32_t status;
851 struct sq_softc *sc = ifp->if_softc; 851 struct sq_softc *sc = ifp->if_softc;
852 852
853 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl); 853 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
854 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, " 854 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
855 "status %08x)\n", device_xname(sc->sc_dev), sc->sc_prevtx, 855 "status %08x)\n", device_xname(sc->sc_dev), sc->sc_prevtx,
856 sc->sc_nexttx, sc->sc_nfreetx, status); 856 sc->sc_nexttx, sc->sc_nfreetx, status);
857 857
858 sq_trace_dump(sc); 858 sq_trace_dump(sc);
859 859
860 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace)); 860 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
861 sc->sq_trace_idx = 0; 861 sc->sq_trace_idx = 0;
862 862
863 if_statinc(ifp, if_oerrors); 863 if_statinc(ifp, if_oerrors);
864 864
865 sq_init(ifp); 865 sq_init(ifp);
866} 866}
867 867
868static void 868static void
869sq_trace_dump(struct sq_softc *sc) 869sq_trace_dump(struct sq_softc *sc)
870{ 870{
871 int i; 871 int i;
872 const char *act; 872 const char *act;
873 873
874 for (i = 0; i < sc->sq_trace_idx; i++) { 874 for (i = 0; i < sc->sq_trace_idx; i++) {
875 switch (sc->sq_trace[i].action) { 875 switch (sc->sq_trace[i].action) {
876 case SQ_RESET: act = "SQ_RESET"; break; 876 case SQ_RESET: act = "SQ_RESET"; break;
877 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break; 877 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
878 case SQ_START_DMA: act = "SQ_START_DMA"; break; 878 case SQ_START_DMA: act = "SQ_START_DMA"; break;
879 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break; 879 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
880 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break; 880 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
881 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break; 881 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
882 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break; 882 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
883 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break; 883 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
884 case SQ_IOCTL: act = "SQ_IOCTL"; break; 884 case SQ_IOCTL: act = "SQ_IOCTL"; break;
885 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break; 885 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
886 default: act = "UNKNOWN"; 886 default: act = "UNKNOWN";
887 } 887 }
888 888
889 printf("%s: [%03d] action %-16s buf %03d free %03d " 889 printf("%s: [%03d] action %-16s buf %03d free %03d "
890 "status %08x line %d\n", device_xname(sc->sc_dev), i, act, 890 "status %08x line %d\n", device_xname(sc->sc_dev), i, act,
891 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf, 891 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
892 sc->sq_trace[i].status, sc->sq_trace[i].line); 892 sc->sq_trace[i].status, sc->sq_trace[i].line);
893 } 893 }
894} 894}
895 895
896static int 896static int
897sq_intr(void *arg) 897sq_intr(void *arg)
898{ 898{
899 struct sq_softc *sc = arg; 899 struct sq_softc *sc = arg;
900 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 900 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
901 int handled = 0; 901 int handled = 0;
902 uint32_t stat; 902 uint32_t stat;
903 903
904 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset); 904 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
905 905
906 if ((stat & 2) == 0) 906 if ((stat & 2) == 0)
907 SQ_DPRINTF(("%s: Unexpected interrupt!\n", 907 SQ_DPRINTF(("%s: Unexpected interrupt!\n",
908 device_xname(sc->sc_dev))); 908 device_xname(sc->sc_dev)));
909 else 909 else
910 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2)); 910 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
911 911
912 /* 912 /*
913 * If the interface isn't running, the interrupt couldn't 913 * If the interface isn't running, the interrupt couldn't
914 * possibly have come from us. 914 * possibly have come from us.
915 */ 915 */
916 if ((ifp->if_flags & IFF_RUNNING) == 0) 916 if ((ifp->if_flags & IFF_RUNNING) == 0)
917 return 0; 917 return 0;
918 918
919 sc->sq_intrcnt.ev_count++; 919 sc->sq_intrcnt.ev_count++;
920 920
921 /* Always check for received packets */ 921 /* Always check for received packets */
922 if (sq_rxintr(sc) != 0) 922 if (sq_rxintr(sc) != 0)
923 handled++; 923 handled++;
924 924
925 /* Only handle transmit interrupts if we actually sent something */ 925 /* Only handle transmit interrupts if we actually sent something */
926 if (sc->sc_nfreetx < SQ_NTXDESC) { 926 if (sc->sc_nfreetx < SQ_NTXDESC) {
927 sq_txintr(sc); 927 sq_txintr(sc);
928 handled++; 928 handled++;
929 } 929 }
930 930
931 if (handled) 931 if (handled)
932 rnd_add_uint32(&sc->rnd_source, stat); 932 rnd_add_uint32(&sc->rnd_source, stat);
933 return handled; 933 return handled;
934} 934}
935 935
936static int 936static int
937sq_rxintr(struct sq_softc *sc) 937sq_rxintr(struct sq_softc *sc)
938{ 938{
939 int count = 0; 939 int count = 0;
940 struct mbuf* m; 940 struct mbuf* m;
941 int i, framelen; 941 int i, framelen;
942 uint8_t pktstat; 942 uint8_t pktstat;
943 uint32_t status; 943 uint32_t status;
944 uint32_t ctl_reg; 944 uint32_t ctl_reg;
945 int new_end, orig_end; 945 int new_end, orig_end;
946 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 946 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
947 947
948 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) { 948 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
949 SQ_CDRXSYNC(sc, i, 949 SQ_CDRXSYNC(sc, i,
950 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 950 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
951 951
952 /* 952 /*
953 * If this is a CPU-owned buffer, we're at the end of the list. 953 * If this is a CPU-owned buffer, we're at the end of the list.
954 */ 954 */
955 if (sc->hpc_regs->revision == 3) 955 if (sc->hpc_regs->revision == 3)
956 ctl_reg = 956 ctl_reg =
957 sc->sc_rxdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_OWN; 957 sc->sc_rxdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_OWN;
958 else 958 else
959 ctl_reg = 959 ctl_reg =
960 sc->sc_rxdesc[i].hpc1_hdd_ctl & HPC1_HDD_CTL_OWN; 960 sc->sc_rxdesc[i].hpc1_hdd_ctl & HPC1_HDD_CTL_OWN;
961 961
962 if (ctl_reg) { 962 if (ctl_reg) {
963#if defined(SQ_DEBUG) 963#if defined(SQ_DEBUG)
964 uint32_t reg; 964 uint32_t reg;
965 965
966 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl); 966 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
967 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n", 967 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
968 device_xname(sc->sc_dev), i, reg)); 968 device_xname(sc->sc_dev), i, reg));
969#endif 969#endif
970 break; 970 break;
971 } 971 }
972 972
973 count++; 973 count++;
974 974
975 m = sc->sc_rxmbuf[i]; 975 m = sc->sc_rxmbuf[i];
976 framelen = m->m_ext.ext_size - 3; 976 framelen = m->m_ext.ext_size - 3;
977 if (sc->hpc_regs->revision == 3) 977 if (sc->hpc_regs->revision == 3)
978 framelen -= 978 framelen -=
979 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl); 979 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
980 else 980 else
981 framelen -= 981 framelen -=
982 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl); 982 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
983 983
984 /* Now sync the actual packet data */ 984 /* Now sync the actual packet data */
985 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 985 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
986 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 986 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
987 987
988 pktstat = *((uint8_t *)m->m_data + framelen + 2); 988 pktstat = *((uint8_t *)m->m_data + framelen + 2);
989 989
990 if ((pktstat & RXSTAT_GOOD) == 0) { 990 if ((pktstat & RXSTAT_GOOD) == 0) {
991 if_statinc(ifp, if_ierrors); 991 if_statinc(ifp, if_ierrors);
992 992
993 if (pktstat & RXSTAT_OFLOW) 993 if (pktstat & RXSTAT_OFLOW)
994 printf("%s: receive FIFO overflow\n", 994 printf("%s: receive FIFO overflow\n",
995 device_xname(sc->sc_dev)); 995 device_xname(sc->sc_dev));
996 996
997 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 997 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
998 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 998 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
999 SQ_INIT_RXDESC(sc, i); 999 SQ_INIT_RXDESC(sc, i);
1000 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n", 1000 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
1001 device_xname(sc->sc_dev), i)); 1001 device_xname(sc->sc_dev), i));
1002 continue; 1002 continue;
1003 } 1003 }
1004 1004
1005 if (sq_add_rxbuf(sc, i) != 0) { 1005 if (sq_add_rxbuf(sc, i) != 0) {
1006 if_statinc(ifp, if_ierrors); 1006 if_statinc(ifp, if_ierrors);
1007 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 1007 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
1008 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 1008 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
1009 SQ_INIT_RXDESC(sc, i); 1009 SQ_INIT_RXDESC(sc, i);
1010 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() " 1010 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
1011 "failed\n", device_xname(sc->sc_dev), i)); 1011 "failed\n", device_xname(sc->sc_dev), i));
1012 continue; 1012 continue;
1013 } 1013 }
1014 1014
1015 1015
1016 m->m_data += 2; 1016 m->m_data += 2;
1017 m_set_rcvif(m, ifp); 1017 m_set_rcvif(m, ifp);
1018 m->m_pkthdr.len = m->m_len = framelen; 1018 m->m_pkthdr.len = m->m_len = framelen;
1019 1019
1020 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n", 1020 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
1021 device_xname(sc->sc_dev), i, framelen)); 1021 device_xname(sc->sc_dev), i, framelen));
1022 1022
1023 if_percpuq_enqueue(ifp->if_percpuq, m); 1023 if_percpuq_enqueue(ifp->if_percpuq, m);
1024 } 1024 }
1025 1025
1026 1026
1027 /* If anything happened, move ring start/end pointers to new spot */ 1027 /* If anything happened, move ring start/end pointers to new spot */
1028 if (i != sc->sc_nextrx) { 1028 if (i != sc->sc_nextrx) {
1029 /* 1029 /*
1030 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and 1030 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
1031 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN 1031 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
1032 */ 1032 */
1033 1033
1034 new_end = SQ_PREVRX(i); 1034 new_end = SQ_PREVRX(i);
1035 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN; 1035 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
1036 SQ_CDRXSYNC(sc, new_end, 1036 SQ_CDRXSYNC(sc, new_end,
1037 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1037 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1038 1038
1039 orig_end = SQ_PREVRX(sc->sc_nextrx); 1039 orig_end = SQ_PREVRX(sc->sc_nextrx);
1040 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN; 1040 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
1041 SQ_CDRXSYNC(sc, orig_end, 1041 SQ_CDRXSYNC(sc, orig_end,
1042 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1042 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1043 1043
1044 sc->sc_nextrx = i; 1044 sc->sc_nextrx = i;
1045 } 1045 }
1046 1046
1047 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl); 1047 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
1048 1048
1049 /* If receive channel is stopped, restart it... */ 1049 /* If receive channel is stopped, restart it... */
1050 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) { 1050 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1051 /* Pass the start of the receive ring to the HPC */ 1051 /* Pass the start of the receive ring to the HPC */
1052 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, 1052 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp,
1053 SQ_CDRXADDR(sc, sc->sc_nextrx)); 1053 SQ_CDRXADDR(sc, sc->sc_nextrx));
1054 1054
1055 /* And turn on the HPC ethernet receive channel */ 1055 /* And turn on the HPC ethernet receive channel */
1056 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 1056 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
1057 sc->hpc_regs->enetr_ctl_active); 1057 sc->hpc_regs->enetr_ctl_active);
1058 } 1058 }
1059 1059
1060 return count; 1060 return count;
1061} 1061}
1062 1062
1063static int 1063static int
1064sq_txintr(struct sq_softc *sc) 1064sq_txintr(struct sq_softc *sc)
1065{ 1065{
1066 int shift = 0; 1066 int shift = 0;
1067 uint32_t status, tmp; 1067 uint32_t status, tmp;
1068 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1068 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1069 1069
1070 if (sc->hpc_regs->revision != 3) 1070 if (sc->hpc_regs->revision != 3)
1071 shift = 16; 1071 shift = 16;
1072 1072
1073 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift; 1073 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
1074 1074
1075 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status); 1075 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1076 1076
1077 net_stats_ref_t nsr = IF_STAT_GETREF(ifp); 1077 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1078 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD; 1078 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
1079 if ((status & tmp) == 0) { 1079 if ((status & tmp) == 0) {
1080 if (status & TXSTAT_COLL) 1080 if (status & TXSTAT_COLL)
1081 if_statinc_ref(nsr, if_collisions); 1081 if_statinc_ref(nsr, if_collisions);
1082 1082
1083 if (status & TXSTAT_UFLOW) { 1083 if (status & TXSTAT_UFLOW) {
1084 printf("%s: transmit underflow\n", 1084 printf("%s: transmit underflow\n",
1085 device_xname(sc->sc_dev)); 1085 device_xname(sc->sc_dev));
1086 if_statinc_ref(nsr, if_oerrors); 1086 if_statinc_ref(nsr, if_oerrors);
1087 } 1087 }
1088 1088
1089 if (status & TXSTAT_16COLL) { 1089 if (status & TXSTAT_16COLL) {
1090 printf("%s: max collisions reached\n", 1090 printf("%s: max collisions reached\n",
1091 device_xname(sc->sc_dev)); 1091 device_xname(sc->sc_dev));
1092 if_statinc_ref(nsr, if_oerrors); 1092 if_statinc_ref(nsr, if_oerrors);
1093 if_statadd_ref(nsr, if_collisions, 16); 1093 if_statadd_ref(nsr, if_collisions, 16);
1094 } 1094 }
1095 } 1095 }
1096 IF_STAT_PUTREF(ifp); 1096 IF_STAT_PUTREF(ifp);
1097 1097
1098 /* prevtx now points to next xmit packet not yet finished */ 1098 /* prevtx now points to next xmit packet not yet finished */
1099 if (sc->hpc_regs->revision == 3) 1099 if (sc->hpc_regs->revision == 3)
1100 sq_txring_hpc3(sc); 1100 sq_txring_hpc3(sc);
1101 else 1101 else
1102 sq_txring_hpc1(sc); 1102 sq_txring_hpc1(sc);
1103 1103
1104 /* If we have buffers free, let upper layers know */ 1104 /* If we have buffers free, let upper layers know */
1105 if (sc->sc_nfreetx > 0) 1105 if (sc->sc_nfreetx > 0)
1106 ifp->if_flags &= ~IFF_OACTIVE; 1106 ifp->if_flags &= ~IFF_OACTIVE;
1107 1107
1108 /* If all packets have left the coop, cancel watchdog */ 1108 /* If all packets have left the coop, cancel watchdog */
1109 if (sc->sc_nfreetx == SQ_NTXDESC) 1109 if (sc->sc_nfreetx == SQ_NTXDESC)
1110 ifp->if_timer = 0; 1110 ifp->if_timer = 0;
1111 1111
1112 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status); 1112 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1113 if_schedule_deferred_start(ifp); 1113 if_schedule_deferred_start(ifp);
1114 1114
1115 return 1; 1115 return 1;
1116} 1116}
1117 1117
1118/* 1118/*
1119 * Reclaim used transmit descriptors and restart the transmit DMA 1119 * Reclaim used transmit descriptors and restart the transmit DMA
1120 * engine if necessary. 1120 * engine if necessary.
1121 */ 1121 */
1122static void 1122static void
1123sq_txring_hpc1(struct sq_softc *sc) 1123sq_txring_hpc1(struct sq_softc *sc)
1124{ 1124{
1125 /* 1125 /*
1126 * HPC1 doesn't tag transmitted descriptors, however, 1126 * HPC1 doesn't tag transmitted descriptors, however,
1127 * the NDBP register points to the next descriptor that 1127 * the NDBP register points to the next descriptor that
1128 * has not yet been processed. If DMA is not in progress, 1128 * has not yet been processed. If DMA is not in progress,
1129 * we can safely reclaim all descriptors up to NDBP, and, 1129 * we can safely reclaim all descriptors up to NDBP, and,
1130 * if necessary, restart DMA at NDBP. Otherwise, if DMA 1130 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1131 * is active, we can only safely reclaim up to CBP. 1131 * is active, we can only safely reclaim up to CBP.
1132 * 1132 *
1133 * For now, we'll only reclaim on inactive DMA and assume 1133 * For now, we'll only reclaim on inactive DMA and assume
1134 * that a sufficiently large ring keeps us out of trouble. 1134 * that a sufficiently large ring keeps us out of trouble.
1135 */ 1135 */
1136 uint32_t reclaimto, status; 1136 uint32_t reclaimto, status;
1137 int reclaimall, i = sc->sc_prevtx; 1137 int reclaimall, i = sc->sc_prevtx;
1138 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1138 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1139 1139
1140 status = sq_hpc_read(sc, HPC1_ENETX_CTL); 1140 status = sq_hpc_read(sc, HPC1_ENETX_CTL);
1141 if (status & HPC1_ENETX_CTL_ACTIVE) { 1141 if (status & HPC1_ENETX_CTL_ACTIVE) {
1142 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status); 1142 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1143 return; 1143 return;
1144 } else 1144 } else
1145 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP); 1145 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
1146 1146
1147 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto) 1147 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1148 reclaimall = 1; 1148 reclaimall = 1;
1149 else 1149 else
1150 reclaimall = 0; 1150 reclaimall = 0;
1151 1151
1152 while (sc->sc_nfreetx < SQ_NTXDESC) { 1152 while (sc->sc_nfreetx < SQ_NTXDESC) {
1153 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall) 1153 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1154 break; 1154 break;
1155 1155
1156 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 1156 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1157 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1157 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1158 1158
1159 /* Sync the packet data, unload DMA map, free mbuf */ 1159 /* Sync the packet data, unload DMA map, free mbuf */
1160 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 1160 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i],
1161 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1161 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1162 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1162 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1163 m_freem(sc->sc_txmbuf[i]); 1163 m_freem(sc->sc_txmbuf[i]);
1164 sc->sc_txmbuf[i] = NULL; 1164 sc->sc_txmbuf[i] = NULL;
1165 1165
1166 if_statinc(ifp, if_opackets); 1166 if_statinc(ifp, if_opackets);
1167 sc->sc_nfreetx++; 1167 sc->sc_nfreetx++;
1168 1168
1169 SQ_TRACE(SQ_DONE_DMA, sc, i, status); 1169 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1170 1170
1171 i = SQ_NEXTTX(i); 1171 i = SQ_NEXTTX(i);
1172 } 1172 }
1173 1173
1174 if (sc->sc_nfreetx < SQ_NTXDESC) { 1174 if (sc->sc_nfreetx < SQ_NTXDESC) {
1175 SQ_TRACE(SQ_RESTART_DMA, sc, i, status); 1175 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1176 1176
1177 KASSERT(reclaimto == SQ_CDTXADDR(sc, i)); 1177 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1178 1178
1179 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto); 1179 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
1180 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto); 1180 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
1181 1181
1182 /* Kick DMA channel into life */ 1182 /* Kick DMA channel into life */
1183 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE); 1183 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
1184 1184
1185 /* 1185 /*
1186 * Set a watchdog timer in case the chip 1186 * Set a watchdog timer in case the chip
1187 * flakes out. 1187 * flakes out.
1188 */ 1188 */
1189 ifp->if_timer = 5; 1189 ifp->if_timer = 5;
1190 } 1190 }
1191 1191
1192 sc->sc_prevtx = i; 1192 sc->sc_prevtx = i;
1193} 1193}
1194 1194
1195/* 1195/*
1196 * Reclaim used transmit descriptors and restart the transmit DMA 1196 * Reclaim used transmit descriptors and restart the transmit DMA
1197 * engine if necessary. 1197 * engine if necessary.
1198 */ 1198 */
1199static void 1199static void
1200sq_txring_hpc3(struct sq_softc *sc) 1200sq_txring_hpc3(struct sq_softc *sc)
1201{ 1201{
1202 /* 1202 /*
1203 * HPC3 tags descriptors with a bit once they've been 1203 * HPC3 tags descriptors with a bit once they've been
1204 * transmitted. We need only free each XMITDONE'd 1204 * transmitted. We need only free each XMITDONE'd
1205 * descriptor, and restart the DMA engine if any 1205 * descriptor, and restart the DMA engine if any
1206 * descriptors are left over. 1206 * descriptors are left over.
1207 */ 1207 */
1208 int i; 1208 int i;
1209 uint32_t status = 0; 1209 uint32_t status = 0;
1210 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1210 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1211 1211
1212 i = sc->sc_prevtx; 1212 i = sc->sc_prevtx;
1213 while (sc->sc_nfreetx < SQ_NTXDESC) { 1213 while (sc->sc_nfreetx < SQ_NTXDESC) {
1214 /* 1214 /*
1215 * Check status first so we don't end up with a case of 1215 * Check status first so we don't end up with a case of
1216 * the buffer not being finished while the DMA channel 1216 * the buffer not being finished while the DMA channel
1217 * has gone idle. 1217 * has gone idle.
1218 */ 1218 */
1219 status = sq_hpc_read(sc, HPC3_ENETX_CTL); 1219 status = sq_hpc_read(sc, HPC3_ENETX_CTL);
1220 1220
1221 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 1221 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1222 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1222 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1223 1223
1224 /* Check for used descriptor and restart DMA chain if needed */ 1224 /* Check for used descriptor and restart DMA chain if needed */
1225 if ((sc->sc_txdesc[i].hpc3_hdd_ctl & 1225 if ((sc->sc_txdesc[i].hpc3_hdd_ctl &
1226 HPC3_HDD_CTL_XMITDONE) == 0) { 1226 HPC3_HDD_CTL_XMITDONE) == 0) {
1227 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) { 1227 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
1228 SQ_TRACE(SQ_RESTART_DMA, sc, i, status); 1228 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1229 1229
1230 sq_hpc_write(sc, HPC3_ENETX_NDBP, 1230 sq_hpc_write(sc, HPC3_ENETX_NDBP,
1231 SQ_CDTXADDR(sc, i)); 1231 SQ_CDTXADDR(sc, i));
1232 1232
1233 /* Kick DMA channel into life */ 1233 /* Kick DMA channel into life */
1234 sq_hpc_write(sc, HPC3_ENETX_CTL, 1234 sq_hpc_write(sc, HPC3_ENETX_CTL,
1235 HPC3_ENETX_CTL_ACTIVE); 1235 HPC3_ENETX_CTL_ACTIVE);
1236 1236
1237 /* 1237 /*
1238 * Set a watchdog timer in case the chip 1238 * Set a watchdog timer in case the chip
1239 * flakes out. 1239 * flakes out.
1240 */ 1240 */
1241 ifp->if_timer = 5; 1241 ifp->if_timer = 5;
1242 } else 1242 } else
1243 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status); 1243 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1244 break; 1244 break;
1245 } 1245 }
1246 1246
1247 /* Sync the packet data, unload DMA map, free mbuf */ 1247 /* Sync the packet data, unload DMA map, free mbuf */
1248 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 1248 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i],
1249 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1249 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1250 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1250 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1251 m_freem(sc->sc_txmbuf[i]); 1251 m_freem(sc->sc_txmbuf[i]);
1252 sc->sc_txmbuf[i] = NULL; 1252 sc->sc_txmbuf[i] = NULL;
1253 1253
1254 if_statinc(ifp, if_opackets); 1254 if_statinc(ifp, if_opackets);
1255 sc->sc_nfreetx++; 1255 sc->sc_nfreetx++;
1256 1256
1257 SQ_TRACE(SQ_DONE_DMA, sc, i, status); 1257 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1258 i = SQ_NEXTTX(i); 1258 i = SQ_NEXTTX(i);
1259 } 1259 }
1260 1260
1261 sc->sc_prevtx = i; 1261 sc->sc_prevtx = i;
1262} 1262}
1263 1263
1264void 1264void
1265sq_reset(struct sq_softc *sc) 1265sq_reset(struct sq_softc *sc)
1266{ 1266{
1267 1267
1268 /* Stop HPC dma channels */ 1268 /* Stop HPC dma channels */
1269 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0); 1269 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
1270 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0); 1270 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
1271 1271
1272 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3); 1272 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
1273 delay(20); 1273 delay(20);
1274 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0); 1274 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
1275} 1275}
1276 1276
1277/* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */ 1277/* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1278int 1278int
1279sq_add_rxbuf(struct sq_softc *sc, int idx) 1279sq_add_rxbuf(struct sq_softc *sc, int idx)
1280{ 1280{
1281 int err; 1281 int err;
1282 struct mbuf *m; 1282 struct mbuf *m;
1283 1283
1284 MGETHDR(m, M_DONTWAIT, MT_DATA); 1284 MGETHDR(m, M_DONTWAIT, MT_DATA);
1285 if (m == NULL) 1285 if (m == NULL)
1286 return ENOBUFS; 1286 return ENOBUFS;
1287 1287
1288 MCLGET(m, M_DONTWAIT); 1288 MCLGET(m, M_DONTWAIT);
1289 if ((m->m_flags & M_EXT) == 0) { 1289 if ((m->m_flags & M_EXT) == 0) {
1290 m_freem(m); 1290 m_freem(m);
1291 return ENOBUFS; 1291 return ENOBUFS;
1292 } 1292 }
1293 1293
1294 if (sc->sc_rxmbuf[idx] != NULL) 1294 if (sc->sc_rxmbuf[idx] != NULL)
1295 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]); 1295 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1296 1296
1297 sc->sc_rxmbuf[idx] = m; 1297 sc->sc_rxmbuf[idx] = m;
1298 1298
1299 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx], 1299 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1300 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1300 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1301 printf("%s: can't load rx DMA map %d, error = %d\n", 1301 printf("%s: can't load rx DMA map %d, error = %d\n",
1302 device_xname(sc->sc_dev), idx, err); 1302 device_xname(sc->sc_dev), idx, err);
1303 panic("sq_add_rxbuf"); /* XXX */ 1303 panic("sq_add_rxbuf"); /* XXX */
1304 } 1304 }
1305 1305
1306 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 1306 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx],
1307 0, sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1307 0, sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1308 1308
1309 SQ_INIT_RXDESC(sc, idx); 1309 SQ_INIT_RXDESC(sc, idx);
1310 1310
1311 return 0; 1311 return 0;
1312} 1312}
1313 1313
1314void 1314void
1315sq_dump_buffer(paddr_t addr, psize_t len) 1315sq_dump_buffer(paddr_t addr, psize_t len)
1316{ 1316{
1317 u_int i; 1317 u_int i;
1318 uint8_t *physaddr = (uint8_t *)MIPS_PHYS_TO_KSEG1(addr); 1318 uint8_t *physaddr = (uint8_t *)MIPS_PHYS_TO_KSEG1(addr);
1319 1319
1320 if (len == 0) 1320 if (len == 0)
1321 return; 1321 return;
1322 1322
1323 printf("%p: ", physaddr); 1323 printf("%p: ", physaddr);
1324 1324
1325 for (i = 0; i < len; i++) { 1325 for (i = 0; i < len; i++) {
1326 printf("%02x ", *(physaddr + i) & 0xff); 1326 printf("%02x ", *(physaddr + i) & 0xff);
1327 if ((i % 16) == 15 && i != len - 1) 1327 if ((i % 16) == 15 && i != len - 1)
1328 printf("\n%p: ", physaddr + i); 1328 printf("\n%p: ", physaddr + i);
1329 } 1329 }
1330 1330
1331 printf("\n"); 1331 printf("\n");
1332} 1332}