Mon Mar 16 12:02:00 2009 UTC ()
Fix a bug in calculation of checksum deduction:
- To get 16 bit one's complement value from uint32_t variable,
  higher 16 bits should be ignored.
- RFC 1624 describes methods to recalculate checksum field in headers,
  i.e. one's complement of one's complement sum that could be 0x0000,
  but we don't have to use the strategy to deduct one's complement sum
  itself which won't be zero but should be 0xffff.

Found on debugging mec(4) on sgimips O2.


(tsutsui)
diff -r1.82 -r1.83 src/sys/dev/ic/gem.c
diff -r1.72 -r1.73 src/sys/dev/ic/hme.c

cvs diff -r1.82 -r1.83 src/sys/dev/ic/gem.c (switch to unified diff)

--- src/sys/dev/ic/gem.c 2009/03/14 21:04:19 1.82
+++ src/sys/dev/ic/gem.c 2009/03/16 12:02:00 1.83
@@ -1,2642 +1,2641 @@ @@ -1,2642 +1,2641 @@
1/* $NetBSD: gem.c,v 1.82 2009/03/14 21:04:19 dsl Exp $ */ 1/* $NetBSD: gem.c,v 1.83 2009/03/16 12:02:00 tsutsui Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (C) 2001 Eduardo Horvath. 5 * Copyright (C) 2001 Eduardo Horvath.
6 * Copyright (c) 2001-2003 Thomas Moestl 6 * Copyright (c) 2001-2003 Thomas Moestl
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE. 29 * SUCH DAMAGE.
30 * 30 *
31 */ 31 */
32 32
33/* 33/*
34 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 34 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
35 * See `GEM Gigabit Ethernet ASIC Specification' 35 * See `GEM Gigabit Ethernet ASIC Specification'
36 * http://www.sun.com/processors/manuals/ge.pdf 36 * http://www.sun.com/processors/manuals/ge.pdf
37 */ 37 */
38 38
39#include <sys/cdefs.h> 39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: gem.c,v 1.82 2009/03/14 21:04:19 dsl Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: gem.c,v 1.83 2009/03/16 12:02:00 tsutsui Exp $");
41 41
42#include "opt_inet.h" 42#include "opt_inet.h"
43#include "bpfilter.h" 43#include "bpfilter.h"
44 44
45#include <sys/param.h> 45#include <sys/param.h>
46#include <sys/systm.h> 46#include <sys/systm.h>
47#include <sys/callout.h> 47#include <sys/callout.h>
48#include <sys/mbuf.h> 48#include <sys/mbuf.h>
49#include <sys/syslog.h> 49#include <sys/syslog.h>
50#include <sys/malloc.h> 50#include <sys/malloc.h>
51#include <sys/kernel.h> 51#include <sys/kernel.h>
52#include <sys/socket.h> 52#include <sys/socket.h>
53#include <sys/ioctl.h> 53#include <sys/ioctl.h>
54#include <sys/errno.h> 54#include <sys/errno.h>
55#include <sys/device.h> 55#include <sys/device.h>
56 56
57#include <machine/endian.h> 57#include <machine/endian.h>
58 58
59#include <uvm/uvm_extern.h> 59#include <uvm/uvm_extern.h>
60 60
61#include <net/if.h> 61#include <net/if.h>
62#include <net/if_dl.h> 62#include <net/if_dl.h>
63#include <net/if_media.h> 63#include <net/if_media.h>
64#include <net/if_ether.h> 64#include <net/if_ether.h>
65 65
66#ifdef INET 66#ifdef INET
67#include <netinet/in.h> 67#include <netinet/in.h>
68#include <netinet/in_systm.h> 68#include <netinet/in_systm.h>
69#include <netinet/in_var.h> 69#include <netinet/in_var.h>
70#include <netinet/ip.h> 70#include <netinet/ip.h>
71#include <netinet/tcp.h> 71#include <netinet/tcp.h>
72#include <netinet/udp.h> 72#include <netinet/udp.h>
73#endif 73#endif
74 74
75#if NBPFILTER > 0 75#if NBPFILTER > 0
76#include <net/bpf.h> 76#include <net/bpf.h>
77#endif 77#endif
78 78
79#include <sys/bus.h> 79#include <sys/bus.h>
80#include <sys/intr.h> 80#include <sys/intr.h>
81 81
82#include <dev/mii/mii.h> 82#include <dev/mii/mii.h>
83#include <dev/mii/miivar.h> 83#include <dev/mii/miivar.h>
84#include <dev/mii/mii_bitbang.h> 84#include <dev/mii/mii_bitbang.h>
85 85
86#include <dev/ic/gemreg.h> 86#include <dev/ic/gemreg.h>
87#include <dev/ic/gemvar.h> 87#include <dev/ic/gemvar.h>
88 88
89#define TRIES 10000 89#define TRIES 10000
90 90
91static void gem_start(struct ifnet *); 91static void gem_start(struct ifnet *);
92static void gem_stop(struct ifnet *, int); 92static void gem_stop(struct ifnet *, int);
93int gem_ioctl(struct ifnet *, u_long, void *); 93int gem_ioctl(struct ifnet *, u_long, void *);
94void gem_tick(void *); 94void gem_tick(void *);
95void gem_watchdog(struct ifnet *); 95void gem_watchdog(struct ifnet *);
96void gem_shutdown(void *); 96void gem_shutdown(void *);
97void gem_pcs_start(struct gem_softc *sc); 97void gem_pcs_start(struct gem_softc *sc);
98void gem_pcs_stop(struct gem_softc *sc, int); 98void gem_pcs_stop(struct gem_softc *sc, int);
99int gem_init(struct ifnet *); 99int gem_init(struct ifnet *);
100void gem_init_regs(struct gem_softc *sc); 100void gem_init_regs(struct gem_softc *sc);
101static int gem_ringsize(int sz); 101static int gem_ringsize(int sz);
102static int gem_meminit(struct gem_softc *); 102static int gem_meminit(struct gem_softc *);
103void gem_mifinit(struct gem_softc *); 103void gem_mifinit(struct gem_softc *);
104static int gem_bitwait(struct gem_softc *sc, bus_space_handle_t, int, 104static int gem_bitwait(struct gem_softc *sc, bus_space_handle_t, int,
105 u_int32_t, u_int32_t); 105 u_int32_t, u_int32_t);
106void gem_reset(struct gem_softc *); 106void gem_reset(struct gem_softc *);
107int gem_reset_rx(struct gem_softc *sc); 107int gem_reset_rx(struct gem_softc *sc);
108static void gem_reset_rxdma(struct gem_softc *sc); 108static void gem_reset_rxdma(struct gem_softc *sc);
109static void gem_rx_common(struct gem_softc *sc); 109static void gem_rx_common(struct gem_softc *sc);
110int gem_reset_tx(struct gem_softc *sc); 110int gem_reset_tx(struct gem_softc *sc);
111int gem_disable_rx(struct gem_softc *sc); 111int gem_disable_rx(struct gem_softc *sc);
112int gem_disable_tx(struct gem_softc *sc); 112int gem_disable_tx(struct gem_softc *sc);
113static void gem_rxdrain(struct gem_softc *sc); 113static void gem_rxdrain(struct gem_softc *sc);
114int gem_add_rxbuf(struct gem_softc *sc, int idx); 114int gem_add_rxbuf(struct gem_softc *sc, int idx);
115void gem_setladrf(struct gem_softc *); 115void gem_setladrf(struct gem_softc *);
116 116
117/* MII methods & callbacks */ 117/* MII methods & callbacks */
118static int gem_mii_readreg(struct device *, int, int); 118static int gem_mii_readreg(struct device *, int, int);
119static void gem_mii_writereg(struct device *, int, int, int); 119static void gem_mii_writereg(struct device *, int, int, int);
120static void gem_mii_statchg(struct device *); 120static void gem_mii_statchg(struct device *);
121 121
122static int gem_ifflags_cb(struct ethercom *); 122static int gem_ifflags_cb(struct ethercom *);
123 123
124void gem_statuschange(struct gem_softc *); 124void gem_statuschange(struct gem_softc *);
125 125
126int gem_ser_mediachange(struct ifnet *); 126int gem_ser_mediachange(struct ifnet *);
127void gem_ser_mediastatus(struct ifnet *, struct ifmediareq *); 127void gem_ser_mediastatus(struct ifnet *, struct ifmediareq *);
128 128
129struct mbuf *gem_get(struct gem_softc *, int, int); 129struct mbuf *gem_get(struct gem_softc *, int, int);
130int gem_put(struct gem_softc *, int, struct mbuf *); 130int gem_put(struct gem_softc *, int, struct mbuf *);
131void gem_read(struct gem_softc *, int, int); 131void gem_read(struct gem_softc *, int, int);
132int gem_pint(struct gem_softc *); 132int gem_pint(struct gem_softc *);
133int gem_eint(struct gem_softc *, u_int); 133int gem_eint(struct gem_softc *, u_int);
134int gem_rint(struct gem_softc *); 134int gem_rint(struct gem_softc *);
135int gem_tint(struct gem_softc *); 135int gem_tint(struct gem_softc *);
136void gem_power(int, void *); 136void gem_power(int, void *);
137 137
138#ifdef GEM_DEBUG 138#ifdef GEM_DEBUG
139static void gem_txsoft_print(const struct gem_softc *, int, int); 139static void gem_txsoft_print(const struct gem_softc *, int, int);
140#define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 140#define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
141 printf x 141 printf x
142#else 142#else
143#define DPRINTF(sc, x) /* nothing */ 143#define DPRINTF(sc, x) /* nothing */
144#endif 144#endif
145 145
146#define ETHER_MIN_TX (ETHERMIN + sizeof(struct ether_header)) 146#define ETHER_MIN_TX (ETHERMIN + sizeof(struct ether_header))
147 147
148 148
149/* 149/*
150 * gem_attach: 150 * gem_attach:
151 * 151 *
152 * Attach a Gem interface to the system. 152 * Attach a Gem interface to the system.
153 */ 153 */
154void 154void
155gem_attach(struct gem_softc *sc, const uint8_t *enaddr) 155gem_attach(struct gem_softc *sc, const uint8_t *enaddr)
156{ 156{
157 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 157 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
158 struct mii_data *mii = &sc->sc_mii; 158 struct mii_data *mii = &sc->sc_mii;
159 bus_space_tag_t t = sc->sc_bustag; 159 bus_space_tag_t t = sc->sc_bustag;
160 bus_space_handle_t h = sc->sc_h1; 160 bus_space_handle_t h = sc->sc_h1;
161 struct ifmedia_entry *ifm; 161 struct ifmedia_entry *ifm;
162 int i, error; 162 int i, error;
163 u_int32_t v; 163 u_int32_t v;
164 char *nullbuf; 164 char *nullbuf;
165 165
166 /* Make sure the chip is stopped. */ 166 /* Make sure the chip is stopped. */
167 ifp->if_softc = sc; 167 ifp->if_softc = sc;
168 gem_reset(sc); 168 gem_reset(sc);
169 169
170 /* 170 /*
171 * Allocate the control data structures, and create and load the 171 * Allocate the control data structures, and create and load the
172 * DMA map for it. gem_control_data is 9216 bytes, we have space for 172 * DMA map for it. gem_control_data is 9216 bytes, we have space for
173 * the padding buffer in the bus_dmamem_alloc()'d memory. 173 * the padding buffer in the bus_dmamem_alloc()'d memory.
174 */ 174 */
175 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 175 if ((error = bus_dmamem_alloc(sc->sc_dmatag,
176 sizeof(struct gem_control_data) + ETHER_MIN_TX, PAGE_SIZE, 176 sizeof(struct gem_control_data) + ETHER_MIN_TX, PAGE_SIZE,
177 0, &sc->sc_cdseg, 1, &sc->sc_cdnseg, 0)) != 0) { 177 0, &sc->sc_cdseg, 1, &sc->sc_cdnseg, 0)) != 0) {
178 aprint_error_dev(&sc->sc_dev, 178 aprint_error_dev(&sc->sc_dev,
179 "unable to allocate control data, error = %d\n", 179 "unable to allocate control data, error = %d\n",
180 error); 180 error);
181 goto fail_0; 181 goto fail_0;
182 } 182 }
183 183
184 /* XXX should map this in with correct endianness */ 184 /* XXX should map this in with correct endianness */
185 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 185 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
186 sizeof(struct gem_control_data), (void **)&sc->sc_control_data, 186 sizeof(struct gem_control_data), (void **)&sc->sc_control_data,
187 BUS_DMA_COHERENT)) != 0) { 187 BUS_DMA_COHERENT)) != 0) {
188 aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n", 188 aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
189 error); 189 error);
190 goto fail_1; 190 goto fail_1;
191 } 191 }
192 192
193 nullbuf = 193 nullbuf =
194 (char *)sc->sc_control_data + sizeof(struct gem_control_data); 194 (char *)sc->sc_control_data + sizeof(struct gem_control_data);
195 195
196 if ((error = bus_dmamap_create(sc->sc_dmatag, 196 if ((error = bus_dmamap_create(sc->sc_dmatag,
197 sizeof(struct gem_control_data), 1, 197 sizeof(struct gem_control_data), 1,
198 sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 198 sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
199 aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, " 199 aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
200 "error = %d\n", error); 200 "error = %d\n", error);
201 goto fail_2; 201 goto fail_2;
202 } 202 }
203 203
204 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 204 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
205 sc->sc_control_data, sizeof(struct gem_control_data), NULL, 205 sc->sc_control_data, sizeof(struct gem_control_data), NULL,
206 0)) != 0) { 206 0)) != 0) {
207 aprint_error_dev(&sc->sc_dev, 207 aprint_error_dev(&sc->sc_dev,
208 "unable to load control data DMA map, error = %d\n", 208 "unable to load control data DMA map, error = %d\n",
209 error); 209 error);
210 goto fail_3; 210 goto fail_3;
211 } 211 }
212 212
213 memset(nullbuf, 0, ETHER_MIN_TX); 213 memset(nullbuf, 0, ETHER_MIN_TX);
214 if ((error = bus_dmamap_create(sc->sc_dmatag, 214 if ((error = bus_dmamap_create(sc->sc_dmatag,
215 ETHER_MIN_TX, 1, ETHER_MIN_TX, 0, 0, &sc->sc_nulldmamap)) != 0) { 215 ETHER_MIN_TX, 1, ETHER_MIN_TX, 0, 0, &sc->sc_nulldmamap)) != 0) {
216 aprint_error_dev(&sc->sc_dev, "unable to create padding DMA map, " 216 aprint_error_dev(&sc->sc_dev, "unable to create padding DMA map, "
217 "error = %d\n", error); 217 "error = %d\n", error);
218 goto fail_4; 218 goto fail_4;
219 } 219 }
220 220
221 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_nulldmamap, 221 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_nulldmamap,
222 nullbuf, ETHER_MIN_TX, NULL, 0)) != 0) { 222 nullbuf, ETHER_MIN_TX, NULL, 0)) != 0) {
223 aprint_error_dev(&sc->sc_dev, 223 aprint_error_dev(&sc->sc_dev,
224 "unable to load padding DMA map, error = %d\n", 224 "unable to load padding DMA map, error = %d\n",
225 error); 225 error);
226 goto fail_5; 226 goto fail_5;
227 } 227 }
228 228
229 bus_dmamap_sync(sc->sc_dmatag, sc->sc_nulldmamap, 0, ETHER_MIN_TX, 229 bus_dmamap_sync(sc->sc_dmatag, sc->sc_nulldmamap, 0, ETHER_MIN_TX,
230 BUS_DMASYNC_PREWRITE); 230 BUS_DMASYNC_PREWRITE);
231 231
232 /* 232 /*
233 * Initialize the transmit job descriptors. 233 * Initialize the transmit job descriptors.
234 */ 234 */
235 SIMPLEQ_INIT(&sc->sc_txfreeq); 235 SIMPLEQ_INIT(&sc->sc_txfreeq);
236 SIMPLEQ_INIT(&sc->sc_txdirtyq); 236 SIMPLEQ_INIT(&sc->sc_txdirtyq);
237 237
238 /* 238 /*
239 * Create the transmit buffer DMA maps. 239 * Create the transmit buffer DMA maps.
240 */ 240 */
241 for (i = 0; i < GEM_TXQUEUELEN; i++) { 241 for (i = 0; i < GEM_TXQUEUELEN; i++) {
242 struct gem_txsoft *txs; 242 struct gem_txsoft *txs;
243 243
244 txs = &sc->sc_txsoft[i]; 244 txs = &sc->sc_txsoft[i];
245 txs->txs_mbuf = NULL; 245 txs->txs_mbuf = NULL;
246 if ((error = bus_dmamap_create(sc->sc_dmatag, 246 if ((error = bus_dmamap_create(sc->sc_dmatag,
247 ETHER_MAX_LEN_JUMBO, GEM_NTXSEGS, 247 ETHER_MAX_LEN_JUMBO, GEM_NTXSEGS,
248 ETHER_MAX_LEN_JUMBO, 0, 0, 248 ETHER_MAX_LEN_JUMBO, 0, 0,
249 &txs->txs_dmamap)) != 0) { 249 &txs->txs_dmamap)) != 0) {
250 aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, " 250 aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
251 "error = %d\n", i, error); 251 "error = %d\n", i, error);
252 goto fail_6; 252 goto fail_6;
253 } 253 }
254 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 254 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
255 } 255 }
256 256
257 /* 257 /*
258 * Create the receive buffer DMA maps. 258 * Create the receive buffer DMA maps.
259 */ 259 */
260 for (i = 0; i < GEM_NRXDESC; i++) { 260 for (i = 0; i < GEM_NRXDESC; i++) {
261 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, 261 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
262 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 262 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
263 aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, " 263 aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
264 "error = %d\n", i, error); 264 "error = %d\n", i, error);
265 goto fail_7; 265 goto fail_7;
266 } 266 }
267 sc->sc_rxsoft[i].rxs_mbuf = NULL; 267 sc->sc_rxsoft[i].rxs_mbuf = NULL;
268 } 268 }
269 269
270 /* Initialize ifmedia structures and MII info */ 270 /* Initialize ifmedia structures and MII info */
271 mii->mii_ifp = ifp; 271 mii->mii_ifp = ifp;
272 mii->mii_readreg = gem_mii_readreg; 272 mii->mii_readreg = gem_mii_readreg;
273 mii->mii_writereg = gem_mii_writereg; 273 mii->mii_writereg = gem_mii_writereg;
274 mii->mii_statchg = gem_mii_statchg; 274 mii->mii_statchg = gem_mii_statchg;
275 275
276 sc->sc_ethercom.ec_mii = mii; 276 sc->sc_ethercom.ec_mii = mii;
277 277
278 /* 278 /*
279 * Initialization based on `GEM Gigabit Ethernet ASIC Specification' 279 * Initialization based on `GEM Gigabit Ethernet ASIC Specification'
280 * Section 3.2.1 `Initialization Sequence'. 280 * Section 3.2.1 `Initialization Sequence'.
281 * However, we can't assume SERDES or Serialink if neither 281 * However, we can't assume SERDES or Serialink if neither
282 * GEM_MIF_CONFIG_MDI0 nor GEM_MIF_CONFIG_MDI1 are set 282 * GEM_MIF_CONFIG_MDI0 nor GEM_MIF_CONFIG_MDI1 are set
283 * being set, as both are set on Sun X1141A (with SERDES). So, 283 * being set, as both are set on Sun X1141A (with SERDES). So,
284 * we rely on our bus attachment setting GEM_SERDES or GEM_SERIAL. 284 * we rely on our bus attachment setting GEM_SERDES or GEM_SERIAL.
285 * Also, for Apple variants with 2 PHY's, we prefer the external 285 * Also, for Apple variants with 2 PHY's, we prefer the external
286 * PHY over the internal PHY. 286 * PHY over the internal PHY.
287 */ 287 */
288 gem_mifinit(sc); 288 gem_mifinit(sc);
289 289
290 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) { 290 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) {
291 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 291 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
292 ether_mediastatus); 292 ether_mediastatus);
293 mii_attach(&sc->sc_dev, mii, 0xffffffff, 293 mii_attach(&sc->sc_dev, mii, 0xffffffff,
294 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG); 294 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG);
295 if (LIST_EMPTY(&mii->mii_phys)) { 295 if (LIST_EMPTY(&mii->mii_phys)) {
296 /* No PHY attached */ 296 /* No PHY attached */
297 aprint_error_dev(&sc->sc_dev, "PHY probe failed\n"); 297 aprint_error_dev(&sc->sc_dev, "PHY probe failed\n");
298 goto fail_7; 298 goto fail_7;
299 } else { 299 } else {
300 struct mii_softc *child; 300 struct mii_softc *child;
301 301
302 /* 302 /*
303 * Walk along the list of attached MII devices and 303 * Walk along the list of attached MII devices and
304 * establish an `MII instance' to `PHY number' 304 * establish an `MII instance' to `PHY number'
305 * mapping. 305 * mapping.
306 */ 306 */
307 LIST_FOREACH(child, &mii->mii_phys, mii_list) { 307 LIST_FOREACH(child, &mii->mii_phys, mii_list) {
308 /* 308 /*
309 * Note: we support just one PHY: the internal 309 * Note: we support just one PHY: the internal
310 * or external MII is already selected for us 310 * or external MII is already selected for us
311 * by the GEM_MIF_CONFIG register. 311 * by the GEM_MIF_CONFIG register.
312 */ 312 */
313 if (child->mii_phy > 1 || child->mii_inst > 0) { 313 if (child->mii_phy > 1 || child->mii_inst > 0) {
314 aprint_error_dev(&sc->sc_dev, 314 aprint_error_dev(&sc->sc_dev,
315 "cannot accommodate MII device" 315 "cannot accommodate MII device"
316 " %s at PHY %d, instance %d\n", 316 " %s at PHY %d, instance %d\n",
317 device_xname(child->mii_dev), 317 device_xname(child->mii_dev),
318 child->mii_phy, child->mii_inst); 318 child->mii_phy, child->mii_inst);
319 continue; 319 continue;
320 } 320 }
321 sc->sc_phys[child->mii_inst] = child->mii_phy; 321 sc->sc_phys[child->mii_inst] = child->mii_phy;
322 } 322 }
323 323
324 /* 324 /*
325 * Now select and activate the PHY we will use. 325 * Now select and activate the PHY we will use.
326 * 326 *
327 * The order of preference is External (MDI1), 327 * The order of preference is External (MDI1),
328 * then Internal (MDI0), 328 * then Internal (MDI0),
329 */ 329 */
330 if (sc->sc_phys[1]) { 330 if (sc->sc_phys[1]) {
331#ifdef GEM_DEBUG 331#ifdef GEM_DEBUG
332 aprint_debug_dev(&sc->sc_dev, "using external PHY\n"); 332 aprint_debug_dev(&sc->sc_dev, "using external PHY\n");
333#endif 333#endif
334 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 334 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
335 } else { 335 } else {
336#ifdef GEM_DEBUG 336#ifdef GEM_DEBUG
337 aprint_debug_dev(&sc->sc_dev, "using internal PHY\n"); 337 aprint_debug_dev(&sc->sc_dev, "using internal PHY\n");
338 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 338 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
339#endif 339#endif
340 } 340 }
341 bus_space_write_4(t, h, GEM_MIF_CONFIG, 341 bus_space_write_4(t, h, GEM_MIF_CONFIG,
342 sc->sc_mif_config); 342 sc->sc_mif_config);
343 if (sc->sc_variant != GEM_SUN_ERI) 343 if (sc->sc_variant != GEM_SUN_ERI)
344 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 344 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
345 GEM_MII_DATAPATH_MII); 345 GEM_MII_DATAPATH_MII);
346 346
347 /* 347 /*
348 * XXX - we can really do the following ONLY if the 348 * XXX - we can really do the following ONLY if the
349 * PHY indeed has the auto negotiation capability!! 349 * PHY indeed has the auto negotiation capability!!
350 */ 350 */
351 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 351 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
352 } 352 }
353 } else { 353 } else {
354 ifmedia_init(&mii->mii_media, IFM_IMASK, gem_ser_mediachange, 354 ifmedia_init(&mii->mii_media, IFM_IMASK, gem_ser_mediachange,
355 gem_ser_mediastatus); 355 gem_ser_mediastatus);
356 /* SERDES or Serialink */ 356 /* SERDES or Serialink */
357 if (sc->sc_flags & GEM_SERDES) { 357 if (sc->sc_flags & GEM_SERDES) {
358 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 358 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
359 GEM_MII_DATAPATH_SERDES); 359 GEM_MII_DATAPATH_SERDES);
360 } else { 360 } else {
361 sc->sc_flags |= GEM_SERIAL; 361 sc->sc_flags |= GEM_SERIAL;
362 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 362 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
363 GEM_MII_DATAPATH_SERIAL); 363 GEM_MII_DATAPATH_SERIAL);
364 } 364 }
365 365
366 aprint_normal_dev(&sc->sc_dev, "using external PCS %s: ", 366 aprint_normal_dev(&sc->sc_dev, "using external PCS %s: ",
367 sc->sc_flags & GEM_SERDES ? "SERDES" : "Serialink"); 367 sc->sc_flags & GEM_SERDES ? "SERDES" : "Serialink");
368 368
369 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL); 369 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
370 /* Check for FDX and HDX capabilities */ 370 /* Check for FDX and HDX capabilities */
371 sc->sc_mii_anar = bus_space_read_4(t, h, GEM_MII_ANAR); 371 sc->sc_mii_anar = bus_space_read_4(t, h, GEM_MII_ANAR);
372 if (sc->sc_mii_anar & GEM_MII_ANEG_FUL_DUPLX) { 372 if (sc->sc_mii_anar & GEM_MII_ANEG_FUL_DUPLX) {
373 ifmedia_add(&sc->sc_mii.mii_media, 373 ifmedia_add(&sc->sc_mii.mii_media,
374 IFM_ETHER|IFM_1000_SX|IFM_MANUAL|IFM_FDX, 0, NULL); 374 IFM_ETHER|IFM_1000_SX|IFM_MANUAL|IFM_FDX, 0, NULL);
375 aprint_normal("1000baseSX-FDX, "); 375 aprint_normal("1000baseSX-FDX, ");
376 } 376 }
377 if (sc->sc_mii_anar & GEM_MII_ANEG_HLF_DUPLX) { 377 if (sc->sc_mii_anar & GEM_MII_ANEG_HLF_DUPLX) {
378 ifmedia_add(&sc->sc_mii.mii_media, 378 ifmedia_add(&sc->sc_mii.mii_media,
379 IFM_ETHER|IFM_1000_SX|IFM_MANUAL|IFM_HDX, 0, NULL); 379 IFM_ETHER|IFM_1000_SX|IFM_MANUAL|IFM_HDX, 0, NULL);
380 aprint_normal("1000baseSX-HDX, "); 380 aprint_normal("1000baseSX-HDX, ");
381 } 381 }
382 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 382 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
383 sc->sc_mii_media = IFM_AUTO; 383 sc->sc_mii_media = IFM_AUTO;
384 aprint_normal("auto\n"); 384 aprint_normal("auto\n");
385 385
386 gem_pcs_stop(sc, 1); 386 gem_pcs_stop(sc, 1);
387 } 387 }
388 388
389 /* 389 /*
390 * From this point forward, the attachment cannot fail. A failure 390 * From this point forward, the attachment cannot fail. A failure
391 * before this point releases all resources that may have been 391 * before this point releases all resources that may have been
392 * allocated. 392 * allocated.
393 */ 393 */
394 394
395 /* Announce ourselves. */ 395 /* Announce ourselves. */
396 aprint_normal_dev(&sc->sc_dev, "Ethernet address %s", 396 aprint_normal_dev(&sc->sc_dev, "Ethernet address %s",
397 ether_sprintf(enaddr)); 397 ether_sprintf(enaddr));
398 398
399 /* Get RX FIFO size */ 399 /* Get RX FIFO size */
400 sc->sc_rxfifosize = 64 * 400 sc->sc_rxfifosize = 64 *
401 bus_space_read_4(t, h, GEM_RX_FIFO_SIZE); 401 bus_space_read_4(t, h, GEM_RX_FIFO_SIZE);
402 aprint_normal(", %uKB RX fifo", sc->sc_rxfifosize / 1024); 402 aprint_normal(", %uKB RX fifo", sc->sc_rxfifosize / 1024);
403 403
404 /* Get TX FIFO size */ 404 /* Get TX FIFO size */
405 v = bus_space_read_4(t, h, GEM_TX_FIFO_SIZE); 405 v = bus_space_read_4(t, h, GEM_TX_FIFO_SIZE);
406 aprint_normal(", %uKB TX fifo\n", v / 16); 406 aprint_normal(", %uKB TX fifo\n", v / 16);
407 407
408 /* Initialize ifnet structure. */ 408 /* Initialize ifnet structure. */
409 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 409 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
410 ifp->if_softc = sc; 410 ifp->if_softc = sc;
411 ifp->if_flags = 411 ifp->if_flags =
412 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 412 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
413 sc->sc_if_flags = ifp->if_flags; 413 sc->sc_if_flags = ifp->if_flags;
414 /* 414 /*
415 * The GEM hardware supports basic TCP checksum offloading only. 415 * The GEM hardware supports basic TCP checksum offloading only.
416 * Several (all?) revisions (Sun rev. 01 and Apple rev. 00 and 80) 416 * Several (all?) revisions (Sun rev. 01 and Apple rev. 00 and 80)
417 * have bugs in the receive checksum, so don't enable it for now. 417 * have bugs in the receive checksum, so don't enable it for now.
418 if ((GEM_IS_SUN(sc) && sc->sc_chiprev != 1) || 418 if ((GEM_IS_SUN(sc) && sc->sc_chiprev != 1) ||
419 (GEM_IS_APPLE(sc) && 419 (GEM_IS_APPLE(sc) &&
420 (sc->sc_chiprev != 0 && sc->sc_chiprev != 0x80))) 420 (sc->sc_chiprev != 0 && sc->sc_chiprev != 0x80)))
421 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx; 421 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
422 */ 422 */
423 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx; 423 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
424 ifp->if_start = gem_start; 424 ifp->if_start = gem_start;
425 ifp->if_ioctl = gem_ioctl; 425 ifp->if_ioctl = gem_ioctl;
426 ifp->if_watchdog = gem_watchdog; 426 ifp->if_watchdog = gem_watchdog;
427 ifp->if_stop = gem_stop; 427 ifp->if_stop = gem_stop;
428 ifp->if_init = gem_init; 428 ifp->if_init = gem_init;
429 IFQ_SET_READY(&ifp->if_snd); 429 IFQ_SET_READY(&ifp->if_snd);
430 430
431 /* 431 /*
432 * If we support GigE media, we support jumbo frames too. 432 * If we support GigE media, we support jumbo frames too.
433 * Unless we are Apple. 433 * Unless we are Apple.
434 */ 434 */
435 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) { 435 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
436 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T || 436 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
437 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX || 437 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
438 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX || 438 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
439 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) { 439 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
440 if (!GEM_IS_APPLE(sc)) 440 if (!GEM_IS_APPLE(sc))
441 sc->sc_ethercom.ec_capabilities 441 sc->sc_ethercom.ec_capabilities
442 |= ETHERCAP_JUMBO_MTU; 442 |= ETHERCAP_JUMBO_MTU;
443 sc->sc_flags |= GEM_GIGABIT; 443 sc->sc_flags |= GEM_GIGABIT;
444 break; 444 break;
445 } 445 }
446 } 446 }
447 447
448 /* claim 802.1q capability */ 448 /* claim 802.1q capability */
449 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 449 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
450 450
451 /* Attach the interface. */ 451 /* Attach the interface. */
452 if_attach(ifp); 452 if_attach(ifp);
453 ether_ifattach(ifp, enaddr); 453 ether_ifattach(ifp, enaddr);
454 ether_set_ifflags_cb(&sc->sc_ethercom, gem_ifflags_cb); 454 ether_set_ifflags_cb(&sc->sc_ethercom, gem_ifflags_cb);
455 455
456 sc->sc_sh = shutdownhook_establish(gem_shutdown, sc); 456 sc->sc_sh = shutdownhook_establish(gem_shutdown, sc);
457 if (sc->sc_sh == NULL) 457 if (sc->sc_sh == NULL)
458 panic("gem_config: can't establish shutdownhook"); 458 panic("gem_config: can't establish shutdownhook");
459 459
460#if NRND > 0 460#if NRND > 0
461 rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev), 461 rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev),
462 RND_TYPE_NET, 0); 462 RND_TYPE_NET, 0);
463#endif 463#endif
464 464
465 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 465 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
466 NULL, device_xname(&sc->sc_dev), "interrupts"); 466 NULL, device_xname(&sc->sc_dev), "interrupts");
467#ifdef GEM_COUNTERS 467#ifdef GEM_COUNTERS
468 evcnt_attach_dynamic(&sc->sc_ev_txint, EVCNT_TYPE_INTR, 468 evcnt_attach_dynamic(&sc->sc_ev_txint, EVCNT_TYPE_INTR,
469 &sc->sc_ev_intr, device_xname(&sc->sc_dev), "tx interrupts"); 469 &sc->sc_ev_intr, device_xname(&sc->sc_dev), "tx interrupts");
470 evcnt_attach_dynamic(&sc->sc_ev_rxint, EVCNT_TYPE_INTR, 470 evcnt_attach_dynamic(&sc->sc_ev_rxint, EVCNT_TYPE_INTR,
471 &sc->sc_ev_intr, device_xname(&sc->sc_dev), "rx interrupts"); 471 &sc->sc_ev_intr, device_xname(&sc->sc_dev), "rx interrupts");
472 evcnt_attach_dynamic(&sc->sc_ev_rxfull, EVCNT_TYPE_INTR, 472 evcnt_attach_dynamic(&sc->sc_ev_rxfull, EVCNT_TYPE_INTR,
473 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx ring full"); 473 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx ring full");
474 evcnt_attach_dynamic(&sc->sc_ev_rxnobuf, EVCNT_TYPE_INTR, 474 evcnt_attach_dynamic(&sc->sc_ev_rxnobuf, EVCNT_TYPE_INTR,
475 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx malloc failure"); 475 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx malloc failure");
476 evcnt_attach_dynamic(&sc->sc_ev_rxhist[0], EVCNT_TYPE_INTR, 476 evcnt_attach_dynamic(&sc->sc_ev_rxhist[0], EVCNT_TYPE_INTR,
477 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 0desc"); 477 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 0desc");
478 evcnt_attach_dynamic(&sc->sc_ev_rxhist[1], EVCNT_TYPE_INTR, 478 evcnt_attach_dynamic(&sc->sc_ev_rxhist[1], EVCNT_TYPE_INTR,
479 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 1desc"); 479 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 1desc");
480 evcnt_attach_dynamic(&sc->sc_ev_rxhist[2], EVCNT_TYPE_INTR, 480 evcnt_attach_dynamic(&sc->sc_ev_rxhist[2], EVCNT_TYPE_INTR,
481 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 2desc"); 481 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 2desc");
482 evcnt_attach_dynamic(&sc->sc_ev_rxhist[3], EVCNT_TYPE_INTR, 482 evcnt_attach_dynamic(&sc->sc_ev_rxhist[3], EVCNT_TYPE_INTR,
483 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 3desc"); 483 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 3desc");
484 evcnt_attach_dynamic(&sc->sc_ev_rxhist[4], EVCNT_TYPE_INTR, 484 evcnt_attach_dynamic(&sc->sc_ev_rxhist[4], EVCNT_TYPE_INTR,
485 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >3desc"); 485 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >3desc");
486 evcnt_attach_dynamic(&sc->sc_ev_rxhist[5], EVCNT_TYPE_INTR, 486 evcnt_attach_dynamic(&sc->sc_ev_rxhist[5], EVCNT_TYPE_INTR,
487 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >7desc"); 487 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >7desc");
488 evcnt_attach_dynamic(&sc->sc_ev_rxhist[6], EVCNT_TYPE_INTR, 488 evcnt_attach_dynamic(&sc->sc_ev_rxhist[6], EVCNT_TYPE_INTR,
489 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >15desc"); 489 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >15desc");
490 evcnt_attach_dynamic(&sc->sc_ev_rxhist[7], EVCNT_TYPE_INTR, 490 evcnt_attach_dynamic(&sc->sc_ev_rxhist[7], EVCNT_TYPE_INTR,
491 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >31desc"); 491 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >31desc");
492 evcnt_attach_dynamic(&sc->sc_ev_rxhist[8], EVCNT_TYPE_INTR, 492 evcnt_attach_dynamic(&sc->sc_ev_rxhist[8], EVCNT_TYPE_INTR,
493 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >63desc"); 493 &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >63desc");
494#endif 494#endif
495 495
496#if notyet 496#if notyet
497 /* 497 /*
498 * Add a suspend hook to make sure we come back up after a 498 * Add a suspend hook to make sure we come back up after a
499 * resume. 499 * resume.
500 */ 500 */
501 sc->sc_powerhook = powerhook_establish(device_xname(&sc->sc_dev), 501 sc->sc_powerhook = powerhook_establish(device_xname(&sc->sc_dev),
502 gem_power, sc); 502 gem_power, sc);
503 if (sc->sc_powerhook == NULL) 503 if (sc->sc_powerhook == NULL)
504 aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish power hook\n"); 504 aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish power hook\n");
505#endif 505#endif
506 506
507 callout_init(&sc->sc_tick_ch, 0); 507 callout_init(&sc->sc_tick_ch, 0);
508 return; 508 return;
509 509
510 /* 510 /*
511 * Free any resources we've allocated during the failed attach 511 * Free any resources we've allocated during the failed attach
512 * attempt. Do this in reverse order and fall through. 512 * attempt. Do this in reverse order and fall through.
513 */ 513 */
514 fail_7: 514 fail_7:
515 for (i = 0; i < GEM_NRXDESC; i++) { 515 for (i = 0; i < GEM_NRXDESC; i++) {
516 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 516 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
517 bus_dmamap_destroy(sc->sc_dmatag, 517 bus_dmamap_destroy(sc->sc_dmatag,
518 sc->sc_rxsoft[i].rxs_dmamap); 518 sc->sc_rxsoft[i].rxs_dmamap);
519 } 519 }
520 fail_6: 520 fail_6:
521 for (i = 0; i < GEM_TXQUEUELEN; i++) { 521 for (i = 0; i < GEM_TXQUEUELEN; i++) {
522 if (sc->sc_txsoft[i].txs_dmamap != NULL) 522 if (sc->sc_txsoft[i].txs_dmamap != NULL)
523 bus_dmamap_destroy(sc->sc_dmatag, 523 bus_dmamap_destroy(sc->sc_dmatag,
524 sc->sc_txsoft[i].txs_dmamap); 524 sc->sc_txsoft[i].txs_dmamap);
525 } 525 }
526 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 526 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
527 fail_5: 527 fail_5:
528 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_nulldmamap); 528 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_nulldmamap);
529 fail_4: 529 fail_4:
530 bus_dmamem_unmap(sc->sc_dmatag, (void *)nullbuf, ETHER_MIN_TX); 530 bus_dmamem_unmap(sc->sc_dmatag, (void *)nullbuf, ETHER_MIN_TX);
531 fail_3: 531 fail_3:
532 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 532 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
533 fail_2: 533 fail_2:
534 bus_dmamem_unmap(sc->sc_dmatag, (void *)sc->sc_control_data, 534 bus_dmamem_unmap(sc->sc_dmatag, (void *)sc->sc_control_data,
535 sizeof(struct gem_control_data)); 535 sizeof(struct gem_control_data));
536 fail_1: 536 fail_1:
537 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 537 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
538 fail_0: 538 fail_0:
539 return; 539 return;
540} 540}
541 541
542 542
543void 543void
544gem_tick(void *arg) 544gem_tick(void *arg)
545{ 545{
546 struct gem_softc *sc = arg; 546 struct gem_softc *sc = arg;
547 int s; 547 int s;
548 548
549 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) { 549 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) {
550 /* 550 /*
551 * We have to reset everything if we failed to get a 551 * We have to reset everything if we failed to get a
552 * PCS interrupt. Restarting the callout is handled 552 * PCS interrupt. Restarting the callout is handled
553 * in gem_pcs_start(). 553 * in gem_pcs_start().
554 */ 554 */
555 gem_init(&sc->sc_ethercom.ec_if); 555 gem_init(&sc->sc_ethercom.ec_if);
556 } else { 556 } else {
557 s = splnet(); 557 s = splnet();
558 mii_tick(&sc->sc_mii); 558 mii_tick(&sc->sc_mii);
559 splx(s); 559 splx(s);
560 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 560 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
561 } 561 }
562} 562}
563 563
564static int 564static int
565gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r, u_int32_t clr, u_int32_t set) 565gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r, u_int32_t clr, u_int32_t set)
566{ 566{
567 int i; 567 int i;
568 u_int32_t reg; 568 u_int32_t reg;
569 569
570 for (i = TRIES; i--; DELAY(100)) { 570 for (i = TRIES; i--; DELAY(100)) {
571 reg = bus_space_read_4(sc->sc_bustag, h, r); 571 reg = bus_space_read_4(sc->sc_bustag, h, r);
572 if ((reg & clr) == 0 && (reg & set) == set) 572 if ((reg & clr) == 0 && (reg & set) == set)
573 return (1); 573 return (1);
574 } 574 }
575 return (0); 575 return (0);
576} 576}
577 577
578void 578void
579gem_reset(struct gem_softc *sc) 579gem_reset(struct gem_softc *sc)
580{ 580{
581 bus_space_tag_t t = sc->sc_bustag; 581 bus_space_tag_t t = sc->sc_bustag;
582 bus_space_handle_t h = sc->sc_h2; 582 bus_space_handle_t h = sc->sc_h2;
583 int s; 583 int s;
584 584
585 s = splnet(); 585 s = splnet();
586 DPRINTF(sc, ("%s: gem_reset\n", device_xname(&sc->sc_dev))); 586 DPRINTF(sc, ("%s: gem_reset\n", device_xname(&sc->sc_dev)));
587 gem_reset_rx(sc); 587 gem_reset_rx(sc);
588 gem_reset_tx(sc); 588 gem_reset_tx(sc);
589 589
590 /* Do a full reset */ 590 /* Do a full reset */
591 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX); 591 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
592 if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 592 if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
593 aprint_error_dev(&sc->sc_dev, "cannot reset device\n"); 593 aprint_error_dev(&sc->sc_dev, "cannot reset device\n");
594 splx(s); 594 splx(s);
595} 595}
596 596
597 597
598/* 598/*
599 * gem_rxdrain: 599 * gem_rxdrain:
600 * 600 *
601 * Drain the receive queue. 601 * Drain the receive queue.
602 */ 602 */
603static void 603static void
604gem_rxdrain(struct gem_softc *sc) 604gem_rxdrain(struct gem_softc *sc)
605{ 605{
606 struct gem_rxsoft *rxs; 606 struct gem_rxsoft *rxs;
607 int i; 607 int i;
608 608
609 for (i = 0; i < GEM_NRXDESC; i++) { 609 for (i = 0; i < GEM_NRXDESC; i++) {
610 rxs = &sc->sc_rxsoft[i]; 610 rxs = &sc->sc_rxsoft[i];
611 if (rxs->rxs_mbuf != NULL) { 611 if (rxs->rxs_mbuf != NULL) {
612 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 612 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
613 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 613 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
614 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 614 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
615 m_freem(rxs->rxs_mbuf); 615 m_freem(rxs->rxs_mbuf);
616 rxs->rxs_mbuf = NULL; 616 rxs->rxs_mbuf = NULL;
617 } 617 }
618 } 618 }
619} 619}
620 620
621/* 621/*
622 * Reset the whole thing. 622 * Reset the whole thing.
623 */ 623 */
624static void 624static void
625gem_stop(struct ifnet *ifp, int disable) 625gem_stop(struct ifnet *ifp, int disable)
626{ 626{
627 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 627 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
628 struct gem_txsoft *txs; 628 struct gem_txsoft *txs;
629 629
630 DPRINTF(sc, ("%s: gem_stop\n", device_xname(&sc->sc_dev))); 630 DPRINTF(sc, ("%s: gem_stop\n", device_xname(&sc->sc_dev)));
631 631
632 callout_stop(&sc->sc_tick_ch); 632 callout_stop(&sc->sc_tick_ch);
633 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) 633 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
634 gem_pcs_stop(sc, disable); 634 gem_pcs_stop(sc, disable);
635 else 635 else
636 mii_down(&sc->sc_mii); 636 mii_down(&sc->sc_mii);
637 637
638 /* XXX - Should we reset these instead? */ 638 /* XXX - Should we reset these instead? */
639 gem_disable_tx(sc); 639 gem_disable_tx(sc);
640 gem_disable_rx(sc); 640 gem_disable_rx(sc);
641 641
642 /* 642 /*
643 * Release any queued transmit buffers. 643 * Release any queued transmit buffers.
644 */ 644 */
645 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 645 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
646 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 646 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
647 if (txs->txs_mbuf != NULL) { 647 if (txs->txs_mbuf != NULL) {
648 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 0, 648 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 0,
649 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 649 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
650 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 650 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
651 m_freem(txs->txs_mbuf); 651 m_freem(txs->txs_mbuf);
652 txs->txs_mbuf = NULL; 652 txs->txs_mbuf = NULL;
653 } 653 }
654 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 654 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
655 } 655 }
656 656
657 /* 657 /*
658 * Mark the interface down and cancel the watchdog timer. 658 * Mark the interface down and cancel the watchdog timer.
659 */ 659 */
660 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 660 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
661 sc->sc_if_flags = ifp->if_flags; 661 sc->sc_if_flags = ifp->if_flags;
662 ifp->if_timer = 0; 662 ifp->if_timer = 0;
663 663
664 if (disable) 664 if (disable)
665 gem_rxdrain(sc); 665 gem_rxdrain(sc);
666} 666}
667 667
668 668
669/* 669/*
670 * Reset the receiver 670 * Reset the receiver
671 */ 671 */
672int 672int
673gem_reset_rx(struct gem_softc *sc) 673gem_reset_rx(struct gem_softc *sc)
674{ 674{
675 bus_space_tag_t t = sc->sc_bustag; 675 bus_space_tag_t t = sc->sc_bustag;
676 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 676 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
677 677
678 /* 678 /*
679 * Resetting while DMA is in progress can cause a bus hang, so we 679 * Resetting while DMA is in progress can cause a bus hang, so we
680 * disable DMA first. 680 * disable DMA first.
681 */ 681 */
682 gem_disable_rx(sc); 682 gem_disable_rx(sc);
683 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 683 bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
684 bus_space_barrier(t, h, GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 684 bus_space_barrier(t, h, GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
685 /* Wait till it finishes */ 685 /* Wait till it finishes */
686 if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0)) 686 if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
687 aprint_error_dev(&sc->sc_dev, "cannot disable read dma\n"); 687 aprint_error_dev(&sc->sc_dev, "cannot disable read dma\n");
688 688
689 /* Finally, reset the ERX */ 689 /* Finally, reset the ERX */
690 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX); 690 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
691 bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 691 bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
692 /* Wait till it finishes */ 692 /* Wait till it finishes */
693 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) { 693 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
694 aprint_error_dev(&sc->sc_dev, "cannot reset receiver\n"); 694 aprint_error_dev(&sc->sc_dev, "cannot reset receiver\n");
695 return (1); 695 return (1);
696 } 696 }
697 return (0); 697 return (0);
698} 698}
699 699
700 700
701/* 701/*
702 * Reset the receiver DMA engine. 702 * Reset the receiver DMA engine.
703 * 703 *
704 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 704 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
705 * etc in order to reset the receiver DMA engine only and not do a full 705 * etc in order to reset the receiver DMA engine only and not do a full
706 * reset which amongst others also downs the link and clears the FIFOs. 706 * reset which amongst others also downs the link and clears the FIFOs.
707 */ 707 */
708static void 708static void
709gem_reset_rxdma(struct gem_softc *sc) 709gem_reset_rxdma(struct gem_softc *sc)
710{ 710{
711 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 711 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
712 bus_space_tag_t t = sc->sc_bustag; 712 bus_space_tag_t t = sc->sc_bustag;
713 bus_space_handle_t h = sc->sc_h1; 713 bus_space_handle_t h = sc->sc_h1;
714 int i; 714 int i;
715 715
716 if (gem_reset_rx(sc) != 0) { 716 if (gem_reset_rx(sc) != 0) {
717 gem_init(ifp); 717 gem_init(ifp);
718 return; 718 return;
719 } 719 }
720 for (i = 0; i < GEM_NRXDESC; i++) 720 for (i = 0; i < GEM_NRXDESC; i++)
721 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 721 if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
722 GEM_UPDATE_RXDESC(sc, i); 722 GEM_UPDATE_RXDESC(sc, i);
723 sc->sc_rxptr = 0; 723 sc->sc_rxptr = 0;
724 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 724 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
725 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 725 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
726 726
727 /* Reprogram Descriptor Ring Base Addresses */ 727 /* Reprogram Descriptor Ring Base Addresses */
728 /* NOTE: we use only 32-bit DMA addresses here. */ 728 /* NOTE: we use only 32-bit DMA addresses here. */
729 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 729 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0);
730 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 730 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
731 731
732 /* Redo ERX Configuration */ 732 /* Redo ERX Configuration */
733 gem_rx_common(sc); 733 gem_rx_common(sc);
734 734
735 /* Give the reciever a swift kick */ 735 /* Give the reciever a swift kick */
736 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC - 4); 736 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC - 4);
737} 737}
738 738
739/* 739/*
740 * Common RX configuration for gem_init() and gem_reset_rxdma(). 740 * Common RX configuration for gem_init() and gem_reset_rxdma().
741 */ 741 */
742static void 742static void
743gem_rx_common(struct gem_softc *sc) 743gem_rx_common(struct gem_softc *sc)
744{ 744{
745 bus_space_tag_t t = sc->sc_bustag; 745 bus_space_tag_t t = sc->sc_bustag;
746 bus_space_handle_t h = sc->sc_h1; 746 bus_space_handle_t h = sc->sc_h1;
747 u_int32_t v; 747 u_int32_t v;
748 748
749 /* Encode Receive Descriptor ring size: four possible values */ 749 /* Encode Receive Descriptor ring size: four possible values */
750 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 750 v = gem_ringsize(GEM_NRXDESC /*XXX*/);
751 751
752 /* Set receive h/w checksum offset */ 752 /* Set receive h/w checksum offset */
753#ifdef INET 753#ifdef INET
754 v |= (ETHER_HDR_LEN + sizeof(struct ip) + 754 v |= (ETHER_HDR_LEN + sizeof(struct ip) +
755 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 755 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
756 ETHER_VLAN_ENCAP_LEN : 0)) << GEM_RX_CONFIG_CXM_START_SHFT; 756 ETHER_VLAN_ENCAP_LEN : 0)) << GEM_RX_CONFIG_CXM_START_SHFT;
757#endif 757#endif
758 758
759 /* Enable RX DMA */ 759 /* Enable RX DMA */
760 bus_space_write_4(t, h, GEM_RX_CONFIG, 760 bus_space_write_4(t, h, GEM_RX_CONFIG,
761 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 761 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
762 (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN); 762 (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN);
763 763
764 /* 764 /*
765 * The following value is for an OFF Threshold of about 3/4 full 765 * The following value is for an OFF Threshold of about 3/4 full
766 * and an ON Threshold of 1/4 full. 766 * and an ON Threshold of 1/4 full.
767 */ 767 */
768 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 768 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
769 (3 * sc->sc_rxfifosize / 256) | 769 (3 * sc->sc_rxfifosize / 256) |
770 ((sc->sc_rxfifosize / 256) << 12)); 770 ((sc->sc_rxfifosize / 256) << 12));
771 bus_space_write_4(t, h, GEM_RX_BLANKING, 771 bus_space_write_4(t, h, GEM_RX_BLANKING,
772 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 772 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
773} 773}
774 774
775/* 775/*
776 * Reset the transmitter 776 * Reset the transmitter
777 */ 777 */
778int 778int
779gem_reset_tx(struct gem_softc *sc) 779gem_reset_tx(struct gem_softc *sc)
780{ 780{
781 bus_space_tag_t t = sc->sc_bustag; 781 bus_space_tag_t t = sc->sc_bustag;
782 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 782 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
783 783
784 /* 784 /*
785 * Resetting while DMA is in progress can cause a bus hang, so we 785 * Resetting while DMA is in progress can cause a bus hang, so we
786 * disable DMA first. 786 * disable DMA first.
787 */ 787 */
788 gem_disable_tx(sc); 788 gem_disable_tx(sc);
789 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 789 bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
790 bus_space_barrier(t, h, GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 790 bus_space_barrier(t, h, GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
791 /* Wait till it finishes */ 791 /* Wait till it finishes */
792 if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0)) 792 if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
793 aprint_error_dev(&sc->sc_dev, "cannot disable read dma\n"); 793 aprint_error_dev(&sc->sc_dev, "cannot disable read dma\n");
794 /* Wait 5ms extra. */ 794 /* Wait 5ms extra. */
795 delay(5000); 795 delay(5000);
796 796
797 /* Finally, reset the ETX */ 797 /* Finally, reset the ETX */
798 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX); 798 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
799 bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 799 bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
800 /* Wait till it finishes */ 800 /* Wait till it finishes */
801 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) { 801 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
802 aprint_error_dev(&sc->sc_dev, "cannot reset receiver\n"); 802 aprint_error_dev(&sc->sc_dev, "cannot reset receiver\n");
803 return (1); 803 return (1);
804 } 804 }
805 return (0); 805 return (0);
806} 806}
807 807
808/* 808/*
809 * disable receiver. 809 * disable receiver.
810 */ 810 */
811int 811int
812gem_disable_rx(struct gem_softc *sc) 812gem_disable_rx(struct gem_softc *sc)
813{ 813{
814 bus_space_tag_t t = sc->sc_bustag; 814 bus_space_tag_t t = sc->sc_bustag;
815 bus_space_handle_t h = sc->sc_h1; 815 bus_space_handle_t h = sc->sc_h1;
816 u_int32_t cfg; 816 u_int32_t cfg;
817 817
818 /* Flip the enable bit */ 818 /* Flip the enable bit */
819 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 819 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
820 cfg &= ~GEM_MAC_RX_ENABLE; 820 cfg &= ~GEM_MAC_RX_ENABLE;
821 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 821 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
822 bus_space_barrier(t, h, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 822 bus_space_barrier(t, h, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
823 /* Wait for it to finish */ 823 /* Wait for it to finish */
824 return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 824 return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
825} 825}
826 826
827/* 827/*
828 * disable transmitter. 828 * disable transmitter.
829 */ 829 */
830int 830int
831gem_disable_tx(struct gem_softc *sc) 831gem_disable_tx(struct gem_softc *sc)
832{ 832{
833 bus_space_tag_t t = sc->sc_bustag; 833 bus_space_tag_t t = sc->sc_bustag;
834 bus_space_handle_t h = sc->sc_h1; 834 bus_space_handle_t h = sc->sc_h1;
835 u_int32_t cfg; 835 u_int32_t cfg;
836 836
837 /* Flip the enable bit */ 837 /* Flip the enable bit */
838 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 838 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
839 cfg &= ~GEM_MAC_TX_ENABLE; 839 cfg &= ~GEM_MAC_TX_ENABLE;
840 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 840 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
841 bus_space_barrier(t, h, GEM_MAC_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 841 bus_space_barrier(t, h, GEM_MAC_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
842 /* Wait for it to finish */ 842 /* Wait for it to finish */
843 return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 843 return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
844} 844}
845 845
846/* 846/*
847 * Initialize interface. 847 * Initialize interface.
848 */ 848 */
849int 849int
850gem_meminit(struct gem_softc *sc) 850gem_meminit(struct gem_softc *sc)
851{ 851{
852 struct gem_rxsoft *rxs; 852 struct gem_rxsoft *rxs;
853 int i, error; 853 int i, error;
854 854
855 /* 855 /*
856 * Initialize the transmit descriptor ring. 856 * Initialize the transmit descriptor ring.
857 */ 857 */
858 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 858 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
859 for (i = 0; i < GEM_NTXDESC; i++) { 859 for (i = 0; i < GEM_NTXDESC; i++) {
860 sc->sc_txdescs[i].gd_flags = 0; 860 sc->sc_txdescs[i].gd_flags = 0;
861 sc->sc_txdescs[i].gd_addr = 0; 861 sc->sc_txdescs[i].gd_addr = 0;
862 } 862 }
863 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 863 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
864 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 864 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
865 sc->sc_txfree = GEM_NTXDESC-1; 865 sc->sc_txfree = GEM_NTXDESC-1;
866 sc->sc_txnext = 0; 866 sc->sc_txnext = 0;
867 sc->sc_txwin = 0; 867 sc->sc_txwin = 0;
868 868
869 /* 869 /*
870 * Initialize the receive descriptor and receive job 870 * Initialize the receive descriptor and receive job
871 * descriptor rings. 871 * descriptor rings.
872 */ 872 */
873 for (i = 0; i < GEM_NRXDESC; i++) { 873 for (i = 0; i < GEM_NRXDESC; i++) {
874 rxs = &sc->sc_rxsoft[i]; 874 rxs = &sc->sc_rxsoft[i];
875 if (rxs->rxs_mbuf == NULL) { 875 if (rxs->rxs_mbuf == NULL) {
876 if ((error = gem_add_rxbuf(sc, i)) != 0) { 876 if ((error = gem_add_rxbuf(sc, i)) != 0) {
877 aprint_error_dev(&sc->sc_dev, "unable to allocate or map rx " 877 aprint_error_dev(&sc->sc_dev, "unable to allocate or map rx "
878 "buffer %d, error = %d\n", 878 "buffer %d, error = %d\n",
879 i, error); 879 i, error);
880 /* 880 /*
881 * XXX Should attempt to run with fewer receive 881 * XXX Should attempt to run with fewer receive
882 * XXX buffers instead of just failing. 882 * XXX buffers instead of just failing.
883 */ 883 */
884 gem_rxdrain(sc); 884 gem_rxdrain(sc);
885 return (1); 885 return (1);
886 } 886 }
887 } else 887 } else
888 GEM_INIT_RXDESC(sc, i); 888 GEM_INIT_RXDESC(sc, i);
889 } 889 }
890 sc->sc_rxptr = 0; 890 sc->sc_rxptr = 0;
891 sc->sc_meminited = 1; 891 sc->sc_meminited = 1;
892 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 892 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
893 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 893 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
894 894
895 return (0); 895 return (0);
896} 896}
897 897
898static int 898static int
899gem_ringsize(int sz) 899gem_ringsize(int sz)
900{ 900{
901 switch (sz) { 901 switch (sz) {
902 case 32: 902 case 32:
903 return GEM_RING_SZ_32; 903 return GEM_RING_SZ_32;
904 case 64: 904 case 64:
905 return GEM_RING_SZ_64; 905 return GEM_RING_SZ_64;
906 case 128: 906 case 128:
907 return GEM_RING_SZ_128; 907 return GEM_RING_SZ_128;
908 case 256: 908 case 256:
909 return GEM_RING_SZ_256; 909 return GEM_RING_SZ_256;
910 case 512: 910 case 512:
911 return GEM_RING_SZ_512; 911 return GEM_RING_SZ_512;
912 case 1024: 912 case 1024:
913 return GEM_RING_SZ_1024; 913 return GEM_RING_SZ_1024;
914 case 2048: 914 case 2048:
915 return GEM_RING_SZ_2048; 915 return GEM_RING_SZ_2048;
916 case 4096: 916 case 4096:
917 return GEM_RING_SZ_4096; 917 return GEM_RING_SZ_4096;
918 case 8192: 918 case 8192:
919 return GEM_RING_SZ_8192; 919 return GEM_RING_SZ_8192;
920 default: 920 default:
921 printf("gem: invalid Receive Descriptor ring size %d\n", sz); 921 printf("gem: invalid Receive Descriptor ring size %d\n", sz);
922 return GEM_RING_SZ_32; 922 return GEM_RING_SZ_32;
923 } 923 }
924} 924}
925 925
926 926
927/* 927/*
928 * Start PCS 928 * Start PCS
929 */ 929 */
930void 930void
931gem_pcs_start(struct gem_softc *sc) 931gem_pcs_start(struct gem_softc *sc)
932{ 932{
933 bus_space_tag_t t = sc->sc_bustag; 933 bus_space_tag_t t = sc->sc_bustag;
934 bus_space_handle_t h = sc->sc_h1; 934 bus_space_handle_t h = sc->sc_h1;
935 uint32_t v; 935 uint32_t v;
936 936
937#ifdef GEM_DEBUG 937#ifdef GEM_DEBUG
938 aprint_debug_dev(&sc->sc_dev, "gem_pcs_start()\n"); 938 aprint_debug_dev(&sc->sc_dev, "gem_pcs_start()\n");
939#endif 939#endif
940 940
941 /* 941 /*
942 * Set up. We must disable the MII before modifying the 942 * Set up. We must disable the MII before modifying the
943 * GEM_MII_ANAR register 943 * GEM_MII_ANAR register
944 */ 944 */
945 if (sc->sc_flags & GEM_SERDES) { 945 if (sc->sc_flags & GEM_SERDES) {
946 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 946 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
947 GEM_MII_DATAPATH_SERDES); 947 GEM_MII_DATAPATH_SERDES);
948 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 948 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
949 GEM_MII_SLINK_LOOPBACK); 949 GEM_MII_SLINK_LOOPBACK);
950 } else { 950 } else {
951 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 951 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
952 GEM_MII_DATAPATH_SERIAL); 952 GEM_MII_DATAPATH_SERIAL);
953 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 0); 953 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 0);
954 } 954 }
955 bus_space_write_4(t, h, GEM_MII_CONFIG, 0); 955 bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
956 v = bus_space_read_4(t, h, GEM_MII_ANAR); 956 v = bus_space_read_4(t, h, GEM_MII_ANAR);
957 v |= (GEM_MII_ANEG_SYM_PAUSE | GEM_MII_ANEG_ASYM_PAUSE); 957 v |= (GEM_MII_ANEG_SYM_PAUSE | GEM_MII_ANEG_ASYM_PAUSE);
958 if (sc->sc_mii_media == IFM_AUTO) 958 if (sc->sc_mii_media == IFM_AUTO)
959 v |= (GEM_MII_ANEG_FUL_DUPLX | GEM_MII_ANEG_HLF_DUPLX); 959 v |= (GEM_MII_ANEG_FUL_DUPLX | GEM_MII_ANEG_HLF_DUPLX);
960 else if (sc->sc_mii_media == IFM_FDX) { 960 else if (sc->sc_mii_media == IFM_FDX) {
961 v |= GEM_MII_ANEG_FUL_DUPLX; 961 v |= GEM_MII_ANEG_FUL_DUPLX;
962 v &= ~GEM_MII_ANEG_HLF_DUPLX; 962 v &= ~GEM_MII_ANEG_HLF_DUPLX;
963 } else if (sc->sc_mii_media == IFM_HDX) { 963 } else if (sc->sc_mii_media == IFM_HDX) {
964 v &= ~GEM_MII_ANEG_FUL_DUPLX; 964 v &= ~GEM_MII_ANEG_FUL_DUPLX;
965 v |= GEM_MII_ANEG_HLF_DUPLX; 965 v |= GEM_MII_ANEG_HLF_DUPLX;
966 } 966 }
967 967
968 /* Configure link. */ 968 /* Configure link. */
969 bus_space_write_4(t, h, GEM_MII_ANAR, v); 969 bus_space_write_4(t, h, GEM_MII_ANAR, v);
970 bus_space_write_4(t, h, GEM_MII_CONTROL, 970 bus_space_write_4(t, h, GEM_MII_CONTROL,
971 GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN); 971 GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN);
972 bus_space_write_4(t, h, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 972 bus_space_write_4(t, h, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
973 gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_ANEG_CPT); 973 gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_ANEG_CPT);
974 974
975 /* Start the 10 second timer */ 975 /* Start the 10 second timer */
976 callout_reset(&sc->sc_tick_ch, hz * 10, gem_tick, sc); 976 callout_reset(&sc->sc_tick_ch, hz * 10, gem_tick, sc);
977} 977}
978 978
979/* 979/*
980 * Stop PCS 980 * Stop PCS
981 */ 981 */
982void 982void
983gem_pcs_stop(struct gem_softc *sc, int disable) 983gem_pcs_stop(struct gem_softc *sc, int disable)
984{ 984{
985 bus_space_tag_t t = sc->sc_bustag; 985 bus_space_tag_t t = sc->sc_bustag;
986 bus_space_handle_t h = sc->sc_h1; 986 bus_space_handle_t h = sc->sc_h1;
987 987
988#ifdef GEM_DEBUG 988#ifdef GEM_DEBUG
989 aprint_debug_dev(&sc->sc_dev, "gem_pcs_stop()\n"); 989 aprint_debug_dev(&sc->sc_dev, "gem_pcs_stop()\n");
990#endif 990#endif
991 991
992 /* Tell link partner that we're going away */ 992 /* Tell link partner that we're going away */
993 bus_space_write_4(t, h, GEM_MII_ANAR, GEM_MII_ANEG_RF); 993 bus_space_write_4(t, h, GEM_MII_ANAR, GEM_MII_ANEG_RF);
994 994
995 /* 995 /*
996 * Disable PCS MII. The documentation suggests that setting 996 * Disable PCS MII. The documentation suggests that setting
997 * GEM_MII_CONFIG_ENABLE to zero and then restarting auto- 997 * GEM_MII_CONFIG_ENABLE to zero and then restarting auto-
998 * negotiation will shut down the link. However, it appears 998 * negotiation will shut down the link. However, it appears
999 * that we also need to unset the datapath mode. 999 * that we also need to unset the datapath mode.
1000 */ 1000 */
1001 bus_space_write_4(t, h, GEM_MII_CONFIG, 0); 1001 bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
1002 bus_space_write_4(t, h, GEM_MII_CONTROL, 1002 bus_space_write_4(t, h, GEM_MII_CONTROL,
1003 GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN); 1003 GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN);
1004 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII); 1004 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII);
1005 bus_space_write_4(t, h, GEM_MII_CONFIG, 0); 1005 bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
1006 1006
1007 if (disable) { 1007 if (disable) {
1008 if (sc->sc_flags & GEM_SERDES) 1008 if (sc->sc_flags & GEM_SERDES)
1009 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 1009 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
1010 GEM_MII_SLINK_POWER_OFF); 1010 GEM_MII_SLINK_POWER_OFF);
1011 else 1011 else
1012 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 1012 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
1013 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_POWER_OFF); 1013 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_POWER_OFF);
1014 } 1014 }
1015 1015
1016 sc->sc_flags &= ~GEM_LINK; 1016 sc->sc_flags &= ~GEM_LINK;
1017 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE; 1017 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE;
1018 sc->sc_mii.mii_media_status = IFM_AVALID; 1018 sc->sc_mii.mii_media_status = IFM_AVALID;
1019} 1019}
1020 1020
1021 1021
1022/* 1022/*
1023 * Initialization of interface; set up initialization block 1023 * Initialization of interface; set up initialization block
1024 * and transmit/receive descriptor rings. 1024 * and transmit/receive descriptor rings.
1025 */ 1025 */
1026int 1026int
1027gem_init(struct ifnet *ifp) 1027gem_init(struct ifnet *ifp)
1028{ 1028{
1029 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1029 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1030 bus_space_tag_t t = sc->sc_bustag; 1030 bus_space_tag_t t = sc->sc_bustag;
1031 bus_space_handle_t h = sc->sc_h1; 1031 bus_space_handle_t h = sc->sc_h1;
1032 int rc = 0, s; 1032 int rc = 0, s;
1033 u_int max_frame_size; 1033 u_int max_frame_size;
1034 u_int32_t v; 1034 u_int32_t v;
1035 1035
1036 s = splnet(); 1036 s = splnet();
1037 1037
1038 DPRINTF(sc, ("%s: gem_init: calling stop\n", device_xname(&sc->sc_dev))); 1038 DPRINTF(sc, ("%s: gem_init: calling stop\n", device_xname(&sc->sc_dev)));
1039 /* 1039 /*
1040 * Initialization sequence. The numbered steps below correspond 1040 * Initialization sequence. The numbered steps below correspond
1041 * to the sequence outlined in section 6.3.5.1 in the Ethernet 1041 * to the sequence outlined in section 6.3.5.1 in the Ethernet
1042 * Channel Engine manual (part of the PCIO manual). 1042 * Channel Engine manual (part of the PCIO manual).
1043 * See also the STP2002-STQ document from Sun Microsystems. 1043 * See also the STP2002-STQ document from Sun Microsystems.
1044 */ 1044 */
1045 1045
1046 /* step 1 & 2. Reset the Ethernet Channel */ 1046 /* step 1 & 2. Reset the Ethernet Channel */
1047 gem_stop(ifp, 0); 1047 gem_stop(ifp, 0);
1048 gem_reset(sc); 1048 gem_reset(sc);
1049 DPRINTF(sc, ("%s: gem_init: restarting\n", device_xname(&sc->sc_dev))); 1049 DPRINTF(sc, ("%s: gem_init: restarting\n", device_xname(&sc->sc_dev)));
1050 1050
1051 /* Re-initialize the MIF */ 1051 /* Re-initialize the MIF */
1052 gem_mifinit(sc); 1052 gem_mifinit(sc);
1053 1053
1054 /* Set up correct datapath for non-SERDES/Serialink */ 1054 /* Set up correct datapath for non-SERDES/Serialink */
1055 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 && 1055 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 &&
1056 sc->sc_variant != GEM_SUN_ERI) 1056 sc->sc_variant != GEM_SUN_ERI)
1057 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 1057 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
1058 GEM_MII_DATAPATH_MII); 1058 GEM_MII_DATAPATH_MII);
1059 1059
1060 /* Call MI reset function if any */ 1060 /* Call MI reset function if any */
1061 if (sc->sc_hwreset) 1061 if (sc->sc_hwreset)
1062 (*sc->sc_hwreset)(sc); 1062 (*sc->sc_hwreset)(sc);
1063 1063
1064 /* step 3. Setup data structures in host memory */ 1064 /* step 3. Setup data structures in host memory */
1065 if (gem_meminit(sc) != 0) 1065 if (gem_meminit(sc) != 0)
1066 return 1; 1066 return 1;
1067 1067
1068 /* step 4. TX MAC registers & counters */ 1068 /* step 4. TX MAC registers & counters */
1069 gem_init_regs(sc); 1069 gem_init_regs(sc);
1070 max_frame_size = max(sc->sc_ethercom.ec_if.if_mtu, ETHERMTU); 1070 max_frame_size = max(sc->sc_ethercom.ec_if.if_mtu, ETHERMTU);
1071 max_frame_size += ETHER_HDR_LEN + ETHER_CRC_LEN; 1071 max_frame_size += ETHER_HDR_LEN + ETHER_CRC_LEN;
1072 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) 1072 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
1073 max_frame_size += ETHER_VLAN_ENCAP_LEN; 1073 max_frame_size += ETHER_VLAN_ENCAP_LEN;
1074 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1074 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
1075 max_frame_size|/* burst size */(0x2000<<16)); 1075 max_frame_size|/* burst size */(0x2000<<16));
1076 1076
1077 /* step 5. RX MAC registers & counters */ 1077 /* step 5. RX MAC registers & counters */
1078 gem_setladrf(sc); 1078 gem_setladrf(sc);
1079 1079
1080 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 1080 /* step 6 & 7. Program Descriptor Ring Base Addresses */
1081 /* NOTE: we use only 32-bit DMA addresses here. */ 1081 /* NOTE: we use only 32-bit DMA addresses here. */
1082 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 1082 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0);
1083 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 1083 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
1084 1084
1085 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 1085 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0);
1086 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 1086 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
1087 1087
1088 /* step 8. Global Configuration & Interrupt Mask */ 1088 /* step 8. Global Configuration & Interrupt Mask */
1089 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) 1089 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
1090 v = GEM_INTR_PCS; 1090 v = GEM_INTR_PCS;
1091 else 1091 else
1092 v = GEM_INTR_MIF; 1092 v = GEM_INTR_MIF;
1093 bus_space_write_4(t, h, GEM_INTMASK, 1093 bus_space_write_4(t, h, GEM_INTMASK,
1094 ~(GEM_INTR_TX_INTME | 1094 ~(GEM_INTR_TX_INTME |
1095 GEM_INTR_TX_EMPTY | 1095 GEM_INTR_TX_EMPTY |
1096 GEM_INTR_TX_MAC | 1096 GEM_INTR_TX_MAC |
1097 GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF| 1097 GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF|
1098 GEM_INTR_RX_TAG_ERR | GEM_INTR_MAC_CONTROL| 1098 GEM_INTR_RX_TAG_ERR | GEM_INTR_MAC_CONTROL|
1099 GEM_INTR_BERR | v)); 1099 GEM_INTR_BERR | v));
1100 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 1100 bus_space_write_4(t, h, GEM_MAC_RX_MASK,
1101 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 1101 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
1102 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXX */ 1102 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXX */
1103 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 1103 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK,
1104 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 1104 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
1105 1105
1106 /* step 9. ETX Configuration: use mostly default values */ 1106 /* step 9. ETX Configuration: use mostly default values */
1107 1107
1108 /* Enable TX DMA */ 1108 /* Enable TX DMA */
1109 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 1109 v = gem_ringsize(GEM_NTXDESC /*XXX*/);
1110 bus_space_write_4(t, h, GEM_TX_CONFIG, 1110 bus_space_write_4(t, h, GEM_TX_CONFIG,
1111 v|GEM_TX_CONFIG_TXDMA_EN| 1111 v|GEM_TX_CONFIG_TXDMA_EN|
1112 ((0x4FF<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 1112 ((0x4FF<<10)&GEM_TX_CONFIG_TXFIFO_TH));
1113 bus_space_write_4(t, h, GEM_TX_KICK, sc->sc_txnext); 1113 bus_space_write_4(t, h, GEM_TX_KICK, sc->sc_txnext);
1114 1114
1115 /* step 10. ERX Configuration */ 1115 /* step 10. ERX Configuration */
1116 gem_rx_common(sc); 1116 gem_rx_common(sc);
1117 1117
1118 /* step 11. Configure Media */ 1118 /* step 11. Configure Media */
1119 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 && 1119 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 &&
1120 (rc = mii_ifmedia_change(&sc->sc_mii)) != 0) 1120 (rc = mii_ifmedia_change(&sc->sc_mii)) != 0)
1121 goto out; 1121 goto out;
1122 1122
1123 /* step 12. RX_MAC Configuration Register */ 1123 /* step 12. RX_MAC Configuration Register */
1124 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1124 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
1125 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC; 1125 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
1126 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1126 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
1127 1127
1128 /* step 14. Issue Transmit Pending command */ 1128 /* step 14. Issue Transmit Pending command */
1129 1129
1130 /* Call MI initialization function if any */ 1130 /* Call MI initialization function if any */
1131 if (sc->sc_hwinit) 1131 if (sc->sc_hwinit)
1132 (*sc->sc_hwinit)(sc); 1132 (*sc->sc_hwinit)(sc);
1133 1133
1134 1134
1135 /* step 15. Give the reciever a swift kick */ 1135 /* step 15. Give the reciever a swift kick */
1136 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 1136 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4);
1137 1137
1138 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) 1138 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
1139 /* Configure PCS */ 1139 /* Configure PCS */
1140 gem_pcs_start(sc); 1140 gem_pcs_start(sc);
1141 else 1141 else
1142 /* Start the one second timer. */ 1142 /* Start the one second timer. */
1143 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1143 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1144 1144
1145 sc->sc_flags &= ~GEM_LINK; 1145 sc->sc_flags &= ~GEM_LINK;
1146 ifp->if_flags |= IFF_RUNNING; 1146 ifp->if_flags |= IFF_RUNNING;
1147 ifp->if_flags &= ~IFF_OACTIVE; 1147 ifp->if_flags &= ~IFF_OACTIVE;
1148 ifp->if_timer = 0; 1148 ifp->if_timer = 0;
1149 sc->sc_if_flags = ifp->if_flags; 1149 sc->sc_if_flags = ifp->if_flags;
1150out: 1150out:
1151 splx(s); 1151 splx(s);
1152 1152
1153 return (0); 1153 return (0);
1154} 1154}
1155 1155
1156void 1156void
1157gem_init_regs(struct gem_softc *sc) 1157gem_init_regs(struct gem_softc *sc)
1158{ 1158{
1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1160 bus_space_tag_t t = sc->sc_bustag; 1160 bus_space_tag_t t = sc->sc_bustag;
1161 bus_space_handle_t h = sc->sc_h1; 1161 bus_space_handle_t h = sc->sc_h1;
1162 const u_char *laddr = CLLADDR(ifp->if_sadl); 1162 const u_char *laddr = CLLADDR(ifp->if_sadl);
1163 u_int32_t v; 1163 u_int32_t v;
1164 1164
1165 /* These regs are not cleared on reset */ 1165 /* These regs are not cleared on reset */
1166 if (!sc->sc_inited) { 1166 if (!sc->sc_inited) {
1167 1167
1168 /* Load recommended values */ 1168 /* Load recommended values */
1169 bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00); 1169 bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00);
1170 bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08); 1170 bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08);
1171 bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04); 1171 bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04);
1172 1172
1173 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1173 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1174 /* Max frame and max burst size */ 1174 /* Max frame and max burst size */
1175 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1175 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
1176 ETHER_MAX_LEN | (0x2000<<16)); 1176 ETHER_MAX_LEN | (0x2000<<16));
1177 1177
1178 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07); 1178 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07);
1179 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04); 1179 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04);
1180 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1180 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1181 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1181 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
1182 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1182 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
1183 ((laddr[5]<<8)|laddr[4])&0x3ff); 1183 ((laddr[5]<<8)|laddr[4])&0x3ff);
1184 1184
1185 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1185 /* Secondary MAC addr set to 0:0:0:0:0:0 */
1186 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1186 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
1187 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1187 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
1188 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1188 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
1189 1189
1190 /* MAC control addr set to 01:80:c2:00:00:01 */ 1190 /* MAC control addr set to 01:80:c2:00:00:01 */
1191 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1191 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
1192 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1192 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
1193 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1193 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
1194 1194
1195 /* MAC filter addr set to 0:0:0:0:0:0 */ 1195 /* MAC filter addr set to 0:0:0:0:0:0 */
1196 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1196 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
1197 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1197 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
1198 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1198 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
1199 1199
1200 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1200 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
1201 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1201 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
1202 1202
1203 sc->sc_inited = 1; 1203 sc->sc_inited = 1;
1204 } 1204 }
1205 1205
1206 /* Counters need to be zeroed */ 1206 /* Counters need to be zeroed */
1207 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1207 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
1208 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1208 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
1209 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1209 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
1210 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1210 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
1211 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1211 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
1212 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1212 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
1213 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1213 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
1214 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1214 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
1215 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1215 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
1216 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1216 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
1217 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1217 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
1218 1218
1219 /* Set XOFF PAUSE time. */ 1219 /* Set XOFF PAUSE time. */
1220 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1220 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1221 1221
1222 /* 1222 /*
1223 * Set the internal arbitration to "infinite" bursts of the 1223 * Set the internal arbitration to "infinite" bursts of the
1224 * maximum length of 31 * 64 bytes so DMA transfers aren't 1224 * maximum length of 31 * 64 bytes so DMA transfers aren't
1225 * split up in cache line size chunks. This greatly improves 1225 * split up in cache line size chunks. This greatly improves
1226 * especially RX performance. 1226 * especially RX performance.
1227 * Enable silicon bug workarounds for the Apple variants. 1227 * Enable silicon bug workarounds for the Apple variants.
1228 */ 1228 */
1229 bus_space_write_4(t, h, GEM_CONFIG, 1229 bus_space_write_4(t, h, GEM_CONFIG,
1230 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 1230 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
1231 ((sc->sc_flags & GEM_PCI) ? 1231 ((sc->sc_flags & GEM_PCI) ?
1232 GEM_CONFIG_BURST_INF : GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ? 1232 GEM_CONFIG_BURST_INF : GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ?
1233 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 1233 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
1234 1234
1235 /* 1235 /*
1236 * Set the station address. 1236 * Set the station address.
1237 */ 1237 */
1238 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1238 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
1239 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1239 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
1240 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1240 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
1241 1241
1242 /* 1242 /*
1243 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1243 * Enable MII outputs. Enable GMII if there is a gigabit PHY.
1244 */ 1244 */
1245 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1245 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG);
1246 v = GEM_MAC_XIF_TX_MII_ENA; 1246 v = GEM_MAC_XIF_TX_MII_ENA;
1247 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) { 1247 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) {
1248 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1248 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
1249 v |= GEM_MAC_XIF_FDPLX_LED; 1249 v |= GEM_MAC_XIF_FDPLX_LED;
1250 if (sc->sc_flags & GEM_GIGABIT) 1250 if (sc->sc_flags & GEM_GIGABIT)
1251 v |= GEM_MAC_XIF_GMII_MODE; 1251 v |= GEM_MAC_XIF_GMII_MODE;
1252 } 1252 }
1253 } else { 1253 } else {
1254 v |= GEM_MAC_XIF_GMII_MODE; 1254 v |= GEM_MAC_XIF_GMII_MODE;
1255 } 1255 }
1256 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1256 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v);
1257} 1257}
1258 1258
1259#ifdef GEM_DEBUG 1259#ifdef GEM_DEBUG
1260static void 1260static void
1261gem_txsoft_print(const struct gem_softc *sc, int firstdesc, int lastdesc) 1261gem_txsoft_print(const struct gem_softc *sc, int firstdesc, int lastdesc)
1262{ 1262{
1263 int i; 1263 int i;
1264 1264
1265 for (i = firstdesc;; i = GEM_NEXTTX(i)) { 1265 for (i = firstdesc;; i = GEM_NEXTTX(i)) {
1266 printf("descriptor %d:\t", i); 1266 printf("descriptor %d:\t", i);
1267 printf("gd_flags: 0x%016" PRIx64 "\t", 1267 printf("gd_flags: 0x%016" PRIx64 "\t",
1268 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1268 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
1269 printf("gd_addr: 0x%016" PRIx64 "\n", 1269 printf("gd_addr: 0x%016" PRIx64 "\n",
1270 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1270 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
1271 if (i == lastdesc) 1271 if (i == lastdesc)
1272 break; 1272 break;
1273 } 1273 }
1274} 1274}
1275#endif 1275#endif
1276 1276
1277static void 1277static void
1278gem_start(struct ifnet *ifp) 1278gem_start(struct ifnet *ifp)
1279{ 1279{
1280 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1280 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1281 struct mbuf *m0, *m; 1281 struct mbuf *m0, *m;
1282 struct gem_txsoft *txs; 1282 struct gem_txsoft *txs;
1283 bus_dmamap_t dmamap; 1283 bus_dmamap_t dmamap;
1284 int error, firsttx, nexttx = -1, lasttx = -1, ofree, seg; 1284 int error, firsttx, nexttx = -1, lasttx = -1, ofree, seg;
1285 uint64_t flags = 0; 1285 uint64_t flags = 0;
1286 1286
1287 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1287 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1288 return; 1288 return;
1289 1289
1290 /* 1290 /*
1291 * Remember the previous number of free descriptors and 1291 * Remember the previous number of free descriptors and
1292 * the first descriptor we'll use. 1292 * the first descriptor we'll use.
1293 */ 1293 */
1294 ofree = sc->sc_txfree; 1294 ofree = sc->sc_txfree;
1295 firsttx = sc->sc_txnext; 1295 firsttx = sc->sc_txnext;
1296 1296
1297 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", 1297 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n",
1298 device_xname(&sc->sc_dev), ofree, firsttx)); 1298 device_xname(&sc->sc_dev), ofree, firsttx));
1299 1299
1300 /* 1300 /*
1301 * Loop through the send queue, setting up transmit descriptors 1301 * Loop through the send queue, setting up transmit descriptors
1302 * until we drain the queue, or use up all available transmit 1302 * until we drain the queue, or use up all available transmit
1303 * descriptors. 1303 * descriptors.
1304 */ 1304 */
1305 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL && 1305 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
1306 sc->sc_txfree != 0) { 1306 sc->sc_txfree != 0) {
1307 /* 1307 /*
1308 * Grab a packet off the queue. 1308 * Grab a packet off the queue.
1309 */ 1309 */
1310 IFQ_POLL(&ifp->if_snd, m0); 1310 IFQ_POLL(&ifp->if_snd, m0);
1311 if (m0 == NULL) 1311 if (m0 == NULL)
1312 break; 1312 break;
1313 m = NULL; 1313 m = NULL;
1314 1314
1315 dmamap = txs->txs_dmamap; 1315 dmamap = txs->txs_dmamap;
1316 1316
1317 /* 1317 /*
1318 * Load the DMA map. If this fails, the packet either 1318 * Load the DMA map. If this fails, the packet either
1319 * didn't fit in the alloted number of segments, or we were 1319 * didn't fit in the alloted number of segments, or we were
1320 * short on resources. In this case, we'll copy and try 1320 * short on resources. In this case, we'll copy and try
1321 * again. 1321 * again.
1322 */ 1322 */
1323 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m0, 1323 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m0,
1324 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0 || 1324 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0 ||
1325 (m0->m_pkthdr.len < ETHER_MIN_TX && 1325 (m0->m_pkthdr.len < ETHER_MIN_TX &&
1326 dmamap->dm_nsegs == GEM_NTXSEGS)) { 1326 dmamap->dm_nsegs == GEM_NTXSEGS)) {
1327 if (m0->m_pkthdr.len > MCLBYTES) { 1327 if (m0->m_pkthdr.len > MCLBYTES) {
1328 aprint_error_dev(&sc->sc_dev, "unable to allocate jumbo Tx " 1328 aprint_error_dev(&sc->sc_dev, "unable to allocate jumbo Tx "
1329 "cluster\n"); 1329 "cluster\n");
1330 IFQ_DEQUEUE(&ifp->if_snd, m0); 1330 IFQ_DEQUEUE(&ifp->if_snd, m0);
1331 m_freem(m0); 1331 m_freem(m0);
1332 continue; 1332 continue;
1333 } 1333 }
1334 MGETHDR(m, M_DONTWAIT, MT_DATA); 1334 MGETHDR(m, M_DONTWAIT, MT_DATA);
1335 if (m == NULL) { 1335 if (m == NULL) {
1336 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx mbuf\n"); 1336 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx mbuf\n");
1337 break; 1337 break;
1338 } 1338 }
1339 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 1339 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1340 if (m0->m_pkthdr.len > MHLEN) { 1340 if (m0->m_pkthdr.len > MHLEN) {
1341 MCLGET(m, M_DONTWAIT); 1341 MCLGET(m, M_DONTWAIT);
1342 if ((m->m_flags & M_EXT) == 0) { 1342 if ((m->m_flags & M_EXT) == 0) {
1343 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx " 1343 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx "
1344 "cluster\n"); 1344 "cluster\n");
1345 m_freem(m); 1345 m_freem(m);
1346 break; 1346 break;
1347 } 1347 }
1348 } 1348 }
1349 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 1349 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
1350 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1350 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1351 error = bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, 1351 error = bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap,
1352 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1352 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1353 if (error) { 1353 if (error) {
1354 aprint_error_dev(&sc->sc_dev, "unable to load Tx buffer, " 1354 aprint_error_dev(&sc->sc_dev, "unable to load Tx buffer, "
1355 "error = %d\n", error); 1355 "error = %d\n", error);
1356 break; 1356 break;
1357 } 1357 }
1358 } 1358 }
1359 1359
1360 /* 1360 /*
1361 * Ensure we have enough descriptors free to describe 1361 * Ensure we have enough descriptors free to describe
1362 * the packet. 1362 * the packet.
1363 */ 1363 */
1364 if (dmamap->dm_nsegs > ((m0->m_pkthdr.len < ETHER_MIN_TX) ? 1364 if (dmamap->dm_nsegs > ((m0->m_pkthdr.len < ETHER_MIN_TX) ?
1365 (sc->sc_txfree - 1) : sc->sc_txfree)) { 1365 (sc->sc_txfree - 1) : sc->sc_txfree)) {
1366 /* 1366 /*
1367 * Not enough free descriptors to transmit this 1367 * Not enough free descriptors to transmit this
1368 * packet. We haven't committed to anything yet, 1368 * packet. We haven't committed to anything yet,
1369 * so just unload the DMA map, put the packet 1369 * so just unload the DMA map, put the packet
1370 * back on the queue, and punt. Notify the upper 1370 * back on the queue, and punt. Notify the upper
1371 * layer that there are no more slots left. 1371 * layer that there are no more slots left.
1372 * 1372 *
1373 * XXX We could allocate an mbuf and copy, but 1373 * XXX We could allocate an mbuf and copy, but
1374 * XXX it is worth it? 1374 * XXX it is worth it?
1375 */ 1375 */
1376 ifp->if_flags |= IFF_OACTIVE; 1376 ifp->if_flags |= IFF_OACTIVE;
1377 sc->sc_if_flags = ifp->if_flags; 1377 sc->sc_if_flags = ifp->if_flags;
1378 bus_dmamap_unload(sc->sc_dmatag, dmamap); 1378 bus_dmamap_unload(sc->sc_dmatag, dmamap);
1379 if (m != NULL) 1379 if (m != NULL)
1380 m_freem(m); 1380 m_freem(m);
1381 break; 1381 break;
1382 } 1382 }
1383 1383
1384 IFQ_DEQUEUE(&ifp->if_snd, m0); 1384 IFQ_DEQUEUE(&ifp->if_snd, m0);
1385 if (m != NULL) { 1385 if (m != NULL) {
1386 m_freem(m0); 1386 m_freem(m0);
1387 m0 = m; 1387 m0 = m;
1388 } 1388 }
1389 1389
1390 /* 1390 /*
1391 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1391 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1392 */ 1392 */
1393 1393
1394 /* Sync the DMA map. */ 1394 /* Sync the DMA map. */
1395 bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize, 1395 bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize,
1396 BUS_DMASYNC_PREWRITE); 1396 BUS_DMASYNC_PREWRITE);
1397 1397
1398 /* 1398 /*
1399 * Initialize the transmit descriptors. 1399 * Initialize the transmit descriptors.
1400 */ 1400 */
1401 for (nexttx = sc->sc_txnext, seg = 0; 1401 for (nexttx = sc->sc_txnext, seg = 0;
1402 seg < dmamap->dm_nsegs; 1402 seg < dmamap->dm_nsegs;
1403 seg++, nexttx = GEM_NEXTTX(nexttx)) { 1403 seg++, nexttx = GEM_NEXTTX(nexttx)) {
1404 1404
1405 /* 1405 /*
1406 * If this is the first descriptor we're 1406 * If this is the first descriptor we're
1407 * enqueueing, set the start of packet flag, 1407 * enqueueing, set the start of packet flag,
1408 * and the checksum stuff if we want the hardware 1408 * and the checksum stuff if we want the hardware
1409 * to do it. 1409 * to do it.
1410 */ 1410 */
1411 sc->sc_txdescs[nexttx].gd_addr = 1411 sc->sc_txdescs[nexttx].gd_addr =
1412 GEM_DMA_WRITE(sc, dmamap->dm_segs[seg].ds_addr); 1412 GEM_DMA_WRITE(sc, dmamap->dm_segs[seg].ds_addr);
1413 flags = dmamap->dm_segs[seg].ds_len & GEM_TD_BUFSIZE; 1413 flags = dmamap->dm_segs[seg].ds_len & GEM_TD_BUFSIZE;
1414 if (nexttx == firsttx) { 1414 if (nexttx == firsttx) {
1415 flags |= GEM_TD_START_OF_PACKET; 1415 flags |= GEM_TD_START_OF_PACKET;
1416 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1416 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1417 sc->sc_txwin = 0; 1417 sc->sc_txwin = 0;
1418 flags |= GEM_TD_INTERRUPT_ME; 1418 flags |= GEM_TD_INTERRUPT_ME;
1419 } 1419 }
1420 1420
1421#ifdef INET 1421#ifdef INET
1422 /* h/w checksum */ 1422 /* h/w checksum */
1423 if (ifp->if_csum_flags_tx & M_CSUM_TCPv4 && 1423 if (ifp->if_csum_flags_tx & M_CSUM_TCPv4 &&
1424 m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) { 1424 m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1425 struct ether_header *eh; 1425 struct ether_header *eh;
1426 uint16_t offset, start; 1426 uint16_t offset, start;
1427 1427
1428 eh = mtod(m0, struct ether_header *); 1428 eh = mtod(m0, struct ether_header *);
1429 switch (ntohs(eh->ether_type)) { 1429 switch (ntohs(eh->ether_type)) {
1430 case ETHERTYPE_IP: 1430 case ETHERTYPE_IP:
1431 start = ETHER_HDR_LEN; 1431 start = ETHER_HDR_LEN;
1432 break; 1432 break;
1433 case ETHERTYPE_VLAN: 1433 case ETHERTYPE_VLAN:
1434 start = ETHER_HDR_LEN + 1434 start = ETHER_HDR_LEN +
1435 ETHER_VLAN_ENCAP_LEN; 1435 ETHER_VLAN_ENCAP_LEN;
1436 break; 1436 break;
1437 default: 1437 default:
1438 /* unsupported, drop it */ 1438 /* unsupported, drop it */
1439 m_free(m0); 1439 m_free(m0);
1440 continue; 1440 continue;
1441 } 1441 }
1442 start += M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1442 start += M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1443 offset = M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data) + start; 1443 offset = M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data) + start;
1444 flags |= (start << 1444 flags |= (start <<
1445 GEM_TD_CXSUM_STARTSHFT) | 1445 GEM_TD_CXSUM_STARTSHFT) |
1446 (offset << 1446 (offset <<
1447 GEM_TD_CXSUM_STUFFSHFT) | 1447 GEM_TD_CXSUM_STUFFSHFT) |
1448 GEM_TD_CXSUM_ENABLE; 1448 GEM_TD_CXSUM_ENABLE;
1449 } 1449 }
1450#endif 1450#endif
1451 } 1451 }
1452 if (seg == dmamap->dm_nsegs - 1) { 1452 if (seg == dmamap->dm_nsegs - 1) {
1453 flags |= GEM_TD_END_OF_PACKET; 1453 flags |= GEM_TD_END_OF_PACKET;
1454 } else { 1454 } else {
1455 /* last flag set outside of loop */ 1455 /* last flag set outside of loop */
1456 sc->sc_txdescs[nexttx].gd_flags = 1456 sc->sc_txdescs[nexttx].gd_flags =
1457 GEM_DMA_WRITE(sc, flags); 1457 GEM_DMA_WRITE(sc, flags);
1458 } 1458 }
1459 lasttx = nexttx; 1459 lasttx = nexttx;
1460 } 1460 }
1461 if (m0->m_pkthdr.len < ETHER_MIN_TX) { 1461 if (m0->m_pkthdr.len < ETHER_MIN_TX) {
1462 /* add padding buffer at end of chain */ 1462 /* add padding buffer at end of chain */
1463 flags &= ~GEM_TD_END_OF_PACKET; 1463 flags &= ~GEM_TD_END_OF_PACKET;
1464 sc->sc_txdescs[lasttx].gd_flags = 1464 sc->sc_txdescs[lasttx].gd_flags =
1465 GEM_DMA_WRITE(sc, flags); 1465 GEM_DMA_WRITE(sc, flags);
1466 1466
1467 sc->sc_txdescs[nexttx].gd_addr = 1467 sc->sc_txdescs[nexttx].gd_addr =
1468 GEM_DMA_WRITE(sc, 1468 GEM_DMA_WRITE(sc,
1469 sc->sc_nulldmamap->dm_segs[0].ds_addr); 1469 sc->sc_nulldmamap->dm_segs[0].ds_addr);
1470 flags = ((ETHER_MIN_TX - m0->m_pkthdr.len) & 1470 flags = ((ETHER_MIN_TX - m0->m_pkthdr.len) &
1471 GEM_TD_BUFSIZE) | GEM_TD_END_OF_PACKET; 1471 GEM_TD_BUFSIZE) | GEM_TD_END_OF_PACKET;
1472 lasttx = nexttx; 1472 lasttx = nexttx;
1473 nexttx = GEM_NEXTTX(nexttx); 1473 nexttx = GEM_NEXTTX(nexttx);
1474 seg++; 1474 seg++;
1475 } 1475 }
1476 sc->sc_txdescs[lasttx].gd_flags = GEM_DMA_WRITE(sc, flags); 1476 sc->sc_txdescs[lasttx].gd_flags = GEM_DMA_WRITE(sc, flags);
1477 1477
1478 KASSERT(lasttx != -1); 1478 KASSERT(lasttx != -1);
1479 1479
1480 /* 1480 /*
1481 * Store a pointer to the packet so we can free it later, 1481 * Store a pointer to the packet so we can free it later,
1482 * and remember what txdirty will be once the packet is 1482 * and remember what txdirty will be once the packet is
1483 * done. 1483 * done.
1484 */ 1484 */
1485 txs->txs_mbuf = m0; 1485 txs->txs_mbuf = m0;
1486 txs->txs_firstdesc = sc->sc_txnext; 1486 txs->txs_firstdesc = sc->sc_txnext;
1487 txs->txs_lastdesc = lasttx; 1487 txs->txs_lastdesc = lasttx;
1488 txs->txs_ndescs = seg; 1488 txs->txs_ndescs = seg;
1489 1489
1490#ifdef GEM_DEBUG 1490#ifdef GEM_DEBUG
1491 if (ifp->if_flags & IFF_DEBUG) { 1491 if (ifp->if_flags & IFF_DEBUG) {
1492 printf(" gem_start %p transmit chain:\n", txs); 1492 printf(" gem_start %p transmit chain:\n", txs);
1493 gem_txsoft_print(sc, txs->txs_firstdesc, 1493 gem_txsoft_print(sc, txs->txs_firstdesc,
1494 txs->txs_lastdesc); 1494 txs->txs_lastdesc);
1495 } 1495 }
1496#endif 1496#endif
1497 1497
1498 /* Sync the descriptors we're using. */ 1498 /* Sync the descriptors we're using. */
1499 GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs, 1499 GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs,
1500 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1500 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1501 1501
1502 /* Advance the tx pointer. */ 1502 /* Advance the tx pointer. */
1503 sc->sc_txfree -= txs->txs_ndescs; 1503 sc->sc_txfree -= txs->txs_ndescs;
1504 sc->sc_txnext = nexttx; 1504 sc->sc_txnext = nexttx;
1505 1505
1506 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1506 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1507 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1507 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1508 1508
1509#if NBPFILTER > 0 1509#if NBPFILTER > 0
1510 /* 1510 /*
1511 * Pass the packet to any BPF listeners. 1511 * Pass the packet to any BPF listeners.
1512 */ 1512 */
1513 if (ifp->if_bpf) 1513 if (ifp->if_bpf)
1514 bpf_mtap(ifp->if_bpf, m0); 1514 bpf_mtap(ifp->if_bpf, m0);
1515#endif /* NBPFILTER > 0 */ 1515#endif /* NBPFILTER > 0 */
1516 } 1516 }
1517 1517
1518 if (txs == NULL || sc->sc_txfree == 0) { 1518 if (txs == NULL || sc->sc_txfree == 0) {
1519 /* No more slots left; notify upper layer. */ 1519 /* No more slots left; notify upper layer. */
1520 ifp->if_flags |= IFF_OACTIVE; 1520 ifp->if_flags |= IFF_OACTIVE;
1521 sc->sc_if_flags = ifp->if_flags; 1521 sc->sc_if_flags = ifp->if_flags;
1522 } 1522 }
1523 1523
1524 if (sc->sc_txfree != ofree) { 1524 if (sc->sc_txfree != ofree) {
1525 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 1525 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
1526 device_xname(&sc->sc_dev), lasttx, firsttx)); 1526 device_xname(&sc->sc_dev), lasttx, firsttx));
1527 /* 1527 /*
1528 * The entire packet chain is set up. 1528 * The entire packet chain is set up.
1529 * Kick the transmitter. 1529 * Kick the transmitter.
1530 */ 1530 */
1531 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", 1531 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n",
1532 device_xname(&sc->sc_dev), nexttx)); 1532 device_xname(&sc->sc_dev), nexttx));
1533 bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, 1533 bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK,
1534 sc->sc_txnext); 1534 sc->sc_txnext);
1535 1535
1536 /* Set a watchdog timer in case the chip flakes out. */ 1536 /* Set a watchdog timer in case the chip flakes out. */
1537 ifp->if_timer = 5; 1537 ifp->if_timer = 5;
1538 DPRINTF(sc, ("%s: gem_start: watchdog %d\n", 1538 DPRINTF(sc, ("%s: gem_start: watchdog %d\n",
1539 device_xname(&sc->sc_dev), ifp->if_timer)); 1539 device_xname(&sc->sc_dev), ifp->if_timer));
1540 } 1540 }
1541} 1541}
1542 1542
1543/* 1543/*
1544 * Transmit interrupt. 1544 * Transmit interrupt.
1545 */ 1545 */
1546int 1546int
1547gem_tint(struct gem_softc *sc) 1547gem_tint(struct gem_softc *sc)
1548{ 1548{
1549 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1549 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1550 bus_space_tag_t t = sc->sc_bustag; 1550 bus_space_tag_t t = sc->sc_bustag;
1551 bus_space_handle_t mac = sc->sc_h1; 1551 bus_space_handle_t mac = sc->sc_h1;
1552 struct gem_txsoft *txs; 1552 struct gem_txsoft *txs;
1553 int txlast; 1553 int txlast;
1554 int progress = 0; 1554 int progress = 0;
1555 u_int32_t v; 1555 u_int32_t v;
1556 1556
1557 DPRINTF(sc, ("%s: gem_tint\n", device_xname(&sc->sc_dev))); 1557 DPRINTF(sc, ("%s: gem_tint\n", device_xname(&sc->sc_dev)));
1558 1558
1559 /* Unload collision counters ... */ 1559 /* Unload collision counters ... */
1560 v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1560 v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
1561 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1561 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
1562 ifp->if_collisions += v + 1562 ifp->if_collisions += v +
1563 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1563 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
1564 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT); 1564 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT);
1565 ifp->if_oerrors += v; 1565 ifp->if_oerrors += v;
1566 1566
1567 /* ... then clear the hardware counters. */ 1567 /* ... then clear the hardware counters. */
1568 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1568 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
1569 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1569 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
1570 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1570 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
1571 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1571 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
1572 1572
1573 /* 1573 /*
1574 * Go through our Tx list and free mbufs for those 1574 * Go through our Tx list and free mbufs for those
1575 * frames that have been transmitted. 1575 * frames that have been transmitted.
1576 */ 1576 */
1577 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1577 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1578 /* 1578 /*
1579 * In theory, we could harvest some descriptors before 1579 * In theory, we could harvest some descriptors before
1580 * the ring is empty, but that's a bit complicated. 1580 * the ring is empty, but that's a bit complicated.
1581 * 1581 *
1582 * GEM_TX_COMPLETION points to the last descriptor 1582 * GEM_TX_COMPLETION points to the last descriptor
1583 * processed +1. 1583 * processed +1.
1584 * 1584 *
1585 * Let's assume that the NIC writes back to the Tx 1585 * Let's assume that the NIC writes back to the Tx
1586 * descriptors before it updates the completion 1586 * descriptors before it updates the completion
1587 * register. If the NIC has posted writes to the 1587 * register. If the NIC has posted writes to the
1588 * Tx descriptors, PCI ordering requires that the 1588 * Tx descriptors, PCI ordering requires that the
1589 * posted writes flush to RAM before the register-read 1589 * posted writes flush to RAM before the register-read
1590 * finishes. So let's read the completion register, 1590 * finishes. So let's read the completion register,
1591 * before syncing the descriptors, so that we 1591 * before syncing the descriptors, so that we
1592 * examine Tx descriptors that are at least as 1592 * examine Tx descriptors that are at least as
1593 * current as the completion register. 1593 * current as the completion register.
1594 */ 1594 */
1595 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1595 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION);
1596 DPRINTF(sc, 1596 DPRINTF(sc,
1597 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", 1597 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n",
1598 txs->txs_lastdesc, txlast)); 1598 txs->txs_lastdesc, txlast));
1599 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1599 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1600 if (txlast >= txs->txs_firstdesc && 1600 if (txlast >= txs->txs_firstdesc &&
1601 txlast <= txs->txs_lastdesc) 1601 txlast <= txs->txs_lastdesc)
1602 break; 1602 break;
1603 } else if (txlast >= txs->txs_firstdesc || 1603 } else if (txlast >= txs->txs_firstdesc ||
1604 txlast <= txs->txs_lastdesc) 1604 txlast <= txs->txs_lastdesc)
1605 break; 1605 break;
1606 1606
1607 GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs, 1607 GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs,
1608 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1608 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1609 1609
1610#ifdef GEM_DEBUG /* XXX DMA synchronization? */ 1610#ifdef GEM_DEBUG /* XXX DMA synchronization? */
1611 if (ifp->if_flags & IFF_DEBUG) { 1611 if (ifp->if_flags & IFF_DEBUG) {
1612 printf(" txsoft %p transmit chain:\n", txs); 1612 printf(" txsoft %p transmit chain:\n", txs);
1613 gem_txsoft_print(sc, txs->txs_firstdesc, 1613 gem_txsoft_print(sc, txs->txs_firstdesc,
1614 txs->txs_lastdesc); 1614 txs->txs_lastdesc);
1615 } 1615 }
1616#endif 1616#endif
1617 1617
1618 1618
1619 DPRINTF(sc, ("gem_tint: releasing a desc\n")); 1619 DPRINTF(sc, ("gem_tint: releasing a desc\n"));
1620 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1620 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1621 1621
1622 sc->sc_txfree += txs->txs_ndescs; 1622 sc->sc_txfree += txs->txs_ndescs;
1623 1623
1624 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 1624 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap,
1625 0, txs->txs_dmamap->dm_mapsize, 1625 0, txs->txs_dmamap->dm_mapsize,
1626 BUS_DMASYNC_POSTWRITE); 1626 BUS_DMASYNC_POSTWRITE);
1627 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1627 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
1628 if (txs->txs_mbuf != NULL) { 1628 if (txs->txs_mbuf != NULL) {
1629 m_freem(txs->txs_mbuf); 1629 m_freem(txs->txs_mbuf);
1630 txs->txs_mbuf = NULL; 1630 txs->txs_mbuf = NULL;
1631 } 1631 }
1632 1632
1633 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1633 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1634 1634
1635 ifp->if_opackets++; 1635 ifp->if_opackets++;
1636 progress = 1; 1636 progress = 1;
1637 } 1637 }
1638 1638
1639#if 0 1639#if 0
1640 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " 1640 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x "
1641 "GEM_TX_DATA_PTR %" PRIx64 "GEM_TX_COMPLETION %" PRIx32 "\n", 1641 "GEM_TX_DATA_PTR %" PRIx64 "GEM_TX_COMPLETION %" PRIx32 "\n",
1642 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_STATE_MACHINE), 1642 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_STATE_MACHINE),
1643 ((uint64_t)bus_space_read_4(sc->sc_bustag, sc->sc_h1, 1643 ((uint64_t)bus_space_read_4(sc->sc_bustag, sc->sc_h1,
1644 GEM_TX_DATA_PTR_HI) << 32) | 1644 GEM_TX_DATA_PTR_HI) << 32) |
1645 bus_space_read_4(sc->sc_bustag, sc->sc_h1, 1645 bus_space_read_4(sc->sc_bustag, sc->sc_h1,
1646 GEM_TX_DATA_PTR_LO), 1646 GEM_TX_DATA_PTR_LO),
1647 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_COMPLETION))); 1647 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_COMPLETION)));
1648#endif 1648#endif
1649 1649
1650 if (progress) { 1650 if (progress) {
1651 if (sc->sc_txfree == GEM_NTXDESC - 1) 1651 if (sc->sc_txfree == GEM_NTXDESC - 1)
1652 sc->sc_txwin = 0; 1652 sc->sc_txwin = 0;
1653 1653
1654 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1654 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */
1655 ifp->if_flags &= ~IFF_OACTIVE; 1655 ifp->if_flags &= ~IFF_OACTIVE;
1656 sc->sc_if_flags = ifp->if_flags; 1656 sc->sc_if_flags = ifp->if_flags;
1657 ifp->if_timer = SIMPLEQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1657 ifp->if_timer = SIMPLEQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
1658 gem_start(ifp); 1658 gem_start(ifp);
1659 } 1659 }
1660 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", 1660 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n",
1661 device_xname(&sc->sc_dev), ifp->if_timer)); 1661 device_xname(&sc->sc_dev), ifp->if_timer));
1662 1662
1663 return (1); 1663 return (1);
1664} 1664}
1665 1665
1666/* 1666/*
1667 * Receive interrupt. 1667 * Receive interrupt.
1668 */ 1668 */
1669int 1669int
1670gem_rint(struct gem_softc *sc) 1670gem_rint(struct gem_softc *sc)
1671{ 1671{
1672 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1672 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1673 bus_space_tag_t t = sc->sc_bustag; 1673 bus_space_tag_t t = sc->sc_bustag;
1674 bus_space_handle_t h = sc->sc_h1; 1674 bus_space_handle_t h = sc->sc_h1;
1675 struct gem_rxsoft *rxs; 1675 struct gem_rxsoft *rxs;
1676 struct mbuf *m; 1676 struct mbuf *m;
1677 u_int64_t rxstat; 1677 u_int64_t rxstat;
1678 u_int32_t rxcomp; 1678 u_int32_t rxcomp;
1679 int i, len, progress = 0; 1679 int i, len, progress = 0;
1680 1680
1681 DPRINTF(sc, ("%s: gem_rint\n", device_xname(&sc->sc_dev))); 1681 DPRINTF(sc, ("%s: gem_rint\n", device_xname(&sc->sc_dev)));
1682 1682
1683 /* 1683 /*
1684 * Ignore spurious interrupt that sometimes occurs before 1684 * Ignore spurious interrupt that sometimes occurs before
1685 * we are set up when we network boot. 1685 * we are set up when we network boot.
1686 */ 1686 */
1687 if (!sc->sc_meminited) 1687 if (!sc->sc_meminited)
1688 return 1; 1688 return 1;
1689 1689
1690 /* 1690 /*
1691 * Read the completion register once. This limits 1691 * Read the completion register once. This limits
1692 * how long the following loop can execute. 1692 * how long the following loop can execute.
1693 */ 1693 */
1694 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1694 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION);
1695 1695
1696 /* 1696 /*
1697 * XXX Read the lastrx only once at the top for speed. 1697 * XXX Read the lastrx only once at the top for speed.
1698 */ 1698 */
1699 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", 1699 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n",
1700 sc->sc_rxptr, rxcomp)); 1700 sc->sc_rxptr, rxcomp));
1701 1701
1702 /* 1702 /*
1703 * Go into the loop at least once. 1703 * Go into the loop at least once.
1704 */ 1704 */
1705 for (i = sc->sc_rxptr; i == sc->sc_rxptr || i != rxcomp; 1705 for (i = sc->sc_rxptr; i == sc->sc_rxptr || i != rxcomp;
1706 i = GEM_NEXTRX(i)) { 1706 i = GEM_NEXTRX(i)) {
1707 rxs = &sc->sc_rxsoft[i]; 1707 rxs = &sc->sc_rxsoft[i];
1708 1708
1709 GEM_CDRXSYNC(sc, i, 1709 GEM_CDRXSYNC(sc, i,
1710 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1710 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1711 1711
1712 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1712 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
1713 1713
1714 if (rxstat & GEM_RD_OWN) { 1714 if (rxstat & GEM_RD_OWN) {
1715 GEM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 1715 GEM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1716 /* 1716 /*
1717 * We have processed all of the receive buffers. 1717 * We have processed all of the receive buffers.
1718 */ 1718 */
1719 break; 1719 break;
1720 } 1720 }
1721 1721
1722 progress++; 1722 progress++;
1723 ifp->if_ipackets++; 1723 ifp->if_ipackets++;
1724 1724
1725 if (rxstat & GEM_RD_BAD_CRC) { 1725 if (rxstat & GEM_RD_BAD_CRC) {
1726 ifp->if_ierrors++; 1726 ifp->if_ierrors++;
1727 aprint_error_dev(&sc->sc_dev, "receive error: CRC error\n"); 1727 aprint_error_dev(&sc->sc_dev, "receive error: CRC error\n");
1728 GEM_INIT_RXDESC(sc, i); 1728 GEM_INIT_RXDESC(sc, i);
1729 continue; 1729 continue;
1730 } 1730 }
1731 1731
1732 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1732 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1733 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1733 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1734#ifdef GEM_DEBUG 1734#ifdef GEM_DEBUG
1735 if (ifp->if_flags & IFF_DEBUG) { 1735 if (ifp->if_flags & IFF_DEBUG) {
1736 printf(" rxsoft %p descriptor %d: ", rxs, i); 1736 printf(" rxsoft %p descriptor %d: ", rxs, i);
1737 printf("gd_flags: 0x%016llx\t", (long long) 1737 printf("gd_flags: 0x%016llx\t", (long long)
1738 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1738 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
1739 printf("gd_addr: 0x%016llx\n", (long long) 1739 printf("gd_addr: 0x%016llx\n", (long long)
1740 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1740 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
1741 } 1741 }
1742#endif 1742#endif
1743 1743
1744 /* No errors; receive the packet. */ 1744 /* No errors; receive the packet. */
1745 len = GEM_RD_BUFLEN(rxstat); 1745 len = GEM_RD_BUFLEN(rxstat);
1746 1746
1747 /* 1747 /*
1748 * Allocate a new mbuf cluster. If that fails, we are 1748 * Allocate a new mbuf cluster. If that fails, we are
1749 * out of memory, and must drop the packet and recycle 1749 * out of memory, and must drop the packet and recycle
1750 * the buffer that's already attached to this descriptor. 1750 * the buffer that's already attached to this descriptor.
1751 */ 1751 */
1752 m = rxs->rxs_mbuf; 1752 m = rxs->rxs_mbuf;
1753 if (gem_add_rxbuf(sc, i) != 0) { 1753 if (gem_add_rxbuf(sc, i) != 0) {
1754 GEM_COUNTER_INCR(sc, sc_ev_rxnobuf); 1754 GEM_COUNTER_INCR(sc, sc_ev_rxnobuf);
1755 ifp->if_ierrors++; 1755 ifp->if_ierrors++;
1756 GEM_INIT_RXDESC(sc, i); 1756 GEM_INIT_RXDESC(sc, i);
1757 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1757 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1758 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1758 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1759 continue; 1759 continue;
1760 } 1760 }
1761 m->m_data += 2; /* We're already off by two */ 1761 m->m_data += 2; /* We're already off by two */
1762 1762
1763 m->m_pkthdr.rcvif = ifp; 1763 m->m_pkthdr.rcvif = ifp;
1764 m->m_pkthdr.len = m->m_len = len; 1764 m->m_pkthdr.len = m->m_len = len;
1765 1765
1766#if NBPFILTER > 0 1766#if NBPFILTER > 0
1767 /* 1767 /*
1768 * Pass this up to any BPF listeners, but only 1768 * Pass this up to any BPF listeners, but only
1769 * pass it up the stack if it's for us. 1769 * pass it up the stack if it's for us.
1770 */ 1770 */
1771 if (ifp->if_bpf) 1771 if (ifp->if_bpf)
1772 bpf_mtap(ifp->if_bpf, m); 1772 bpf_mtap(ifp->if_bpf, m);
1773#endif /* NBPFILTER > 0 */ 1773#endif /* NBPFILTER > 0 */
1774 1774
1775#ifdef INET 1775#ifdef INET
1776 /* hardware checksum */ 1776 /* hardware checksum */
1777 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) { 1777 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1778 struct ether_header *eh; 1778 struct ether_header *eh;
1779 struct ip *ip; 1779 struct ip *ip;
1780 int32_t hlen, pktlen; 1780 int32_t hlen, pktlen;
1781 1781
1782 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) { 1782 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) {
1783 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN - 1783 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN -
1784 ETHER_VLAN_ENCAP_LEN; 1784 ETHER_VLAN_ENCAP_LEN;
1785 eh = (struct ether_header *) (mtod(m, char *) + 1785 eh = (struct ether_header *) (mtod(m, char *) +
1786 ETHER_VLAN_ENCAP_LEN); 1786 ETHER_VLAN_ENCAP_LEN);
1787 } else { 1787 } else {
1788 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN; 1788 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN;
1789 eh = mtod(m, struct ether_header *); 1789 eh = mtod(m, struct ether_header *);
1790 } 1790 }
1791 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 1791 if (ntohs(eh->ether_type) != ETHERTYPE_IP)
1792 goto swcsum; 1792 goto swcsum;
1793 ip = (struct ip *) ((char *)eh + ETHER_HDR_LEN); 1793 ip = (struct ip *) ((char *)eh + ETHER_HDR_LEN);
1794 1794
1795 /* IPv4 only */ 1795 /* IPv4 only */
1796 if (ip->ip_v != IPVERSION) 1796 if (ip->ip_v != IPVERSION)
1797 goto swcsum; 1797 goto swcsum;
1798 1798
1799 hlen = ip->ip_hl << 2; 1799 hlen = ip->ip_hl << 2;
1800 if (hlen < sizeof(struct ip)) 1800 if (hlen < sizeof(struct ip))
1801 goto swcsum; 1801 goto swcsum;
1802 1802
1803 /* 1803 /*
1804 * bail if too short, has random trailing garbage, 1804 * bail if too short, has random trailing garbage,
1805 * truncated, fragment, or has ethernet pad. 1805 * truncated, fragment, or has ethernet pad.
1806 */ 1806 */
1807 if ((ntohs(ip->ip_len) < hlen) || 1807 if ((ntohs(ip->ip_len) < hlen) ||
1808 (ntohs(ip->ip_len) != pktlen) || 1808 (ntohs(ip->ip_len) != pktlen) ||
1809 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK))) 1809 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)))
1810 goto swcsum; 1810 goto swcsum;
1811 1811
1812 switch (ip->ip_p) { 1812 switch (ip->ip_p) {
1813 case IPPROTO_TCP: 1813 case IPPROTO_TCP:
1814 if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 1814 if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4))
1815 goto swcsum; 1815 goto swcsum;
1816 if (pktlen < (hlen + sizeof(struct tcphdr))) 1816 if (pktlen < (hlen + sizeof(struct tcphdr)))
1817 goto swcsum; 1817 goto swcsum;
1818 m->m_pkthdr.csum_flags = M_CSUM_TCPv4; 1818 m->m_pkthdr.csum_flags = M_CSUM_TCPv4;
1819 break; 1819 break;
1820 case IPPROTO_UDP: 1820 case IPPROTO_UDP:
1821 /* FALLTHROUGH */ 1821 /* FALLTHROUGH */
1822 default: 1822 default:
1823 goto swcsum; 1823 goto swcsum;
1824 } 1824 }
1825 1825
1826 /* the uncomplemented sum is expected */ 1826 /* the uncomplemented sum is expected */
1827 m->m_pkthdr.csum_data = (~rxstat) & GEM_RD_CHECKSUM; 1827 m->m_pkthdr.csum_data = (~rxstat) & GEM_RD_CHECKSUM;
1828 1828
1829 /* if the pkt had ip options, we have to deduct them */ 1829 /* if the pkt had ip options, we have to deduct them */
1830 if (hlen > sizeof(struct ip)) { 1830 if (hlen > sizeof(struct ip)) {
1831 uint16_t *opts; 1831 uint16_t *opts;
1832 uint32_t optsum, temp; 1832 uint32_t optsum, temp;
1833 1833
1834 optsum = 0; 1834 optsum = 0;
1835 temp = hlen - sizeof(struct ip); 1835 temp = hlen - sizeof(struct ip);
1836 opts = (uint16_t *) ((char *) ip + 1836 opts = (uint16_t *) ((char *) ip +
1837 sizeof(struct ip)); 1837 sizeof(struct ip));
1838 1838
1839 while (temp > 1) { 1839 while (temp > 1) {
1840 optsum += ntohs(*opts++); 1840 optsum += ntohs(*opts++);
1841 temp -= 2; 1841 temp -= 2;
1842 } 1842 }
1843 while (optsum >> 16) 1843 while (optsum >> 16)
1844 optsum = (optsum >> 16) + 1844 optsum = (optsum >> 16) +
1845 (optsum & 0xffff); 1845 (optsum & 0xffff);
1846 1846
1847 /* Deduct ip opts sum from hwsum (rfc 1624). */ 1847 /* Deduct ip opts sum from hwsum. */
1848 m->m_pkthdr.csum_data = 1848 m->m_pkthdr.csum_data += (uint16_t)~optsum;
1849 ~((~m->m_pkthdr.csum_data) - ~optsum); 
1850 1849
1851 while (m->m_pkthdr.csum_data >> 16) 1850 while (m->m_pkthdr.csum_data >> 16)
1852 m->m_pkthdr.csum_data = 1851 m->m_pkthdr.csum_data =
1853 (m->m_pkthdr.csum_data >> 16) + 1852 (m->m_pkthdr.csum_data >> 16) +
1854 (m->m_pkthdr.csum_data & 1853 (m->m_pkthdr.csum_data &
1855 0xffff); 1854 0xffff);
1856 } 1855 }
1857 1856
1858 m->m_pkthdr.csum_flags |= M_CSUM_DATA | 1857 m->m_pkthdr.csum_flags |= M_CSUM_DATA |
1859 M_CSUM_NO_PSEUDOHDR; 1858 M_CSUM_NO_PSEUDOHDR;
1860 } else 1859 } else
1861swcsum: 1860swcsum:
1862 m->m_pkthdr.csum_flags = 0; 1861 m->m_pkthdr.csum_flags = 0;
1863#endif 1862#endif
1864 /* Pass it on. */ 1863 /* Pass it on. */
1865 (*ifp->if_input)(ifp, m); 1864 (*ifp->if_input)(ifp, m);
1866 } 1865 }
1867 1866
1868 if (progress) { 1867 if (progress) {
1869 /* Update the receive pointer. */ 1868 /* Update the receive pointer. */
1870 if (i == sc->sc_rxptr) { 1869 if (i == sc->sc_rxptr) {
1871 GEM_COUNTER_INCR(sc, sc_ev_rxfull); 1870 GEM_COUNTER_INCR(sc, sc_ev_rxfull);
1872#ifdef GEM_DEBUG 1871#ifdef GEM_DEBUG
1873 if (ifp->if_flags & IFF_DEBUG) 1872 if (ifp->if_flags & IFF_DEBUG)
1874 printf("%s: rint: ring wrap\n", 1873 printf("%s: rint: ring wrap\n",
1875 device_xname(&sc->sc_dev)); 1874 device_xname(&sc->sc_dev));
1876#endif 1875#endif
1877 } 1876 }
1878 sc->sc_rxptr = i; 1877 sc->sc_rxptr = i;
1879 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1878 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i));
1880 } 1879 }
1881#ifdef GEM_COUNTERS 1880#ifdef GEM_COUNTERS
1882 if (progress <= 4) { 1881 if (progress <= 4) {
1883 GEM_COUNTER_INCR(sc, sc_ev_rxhist[progress]); 1882 GEM_COUNTER_INCR(sc, sc_ev_rxhist[progress]);
1884 } else if (progress < 32) { 1883 } else if (progress < 32) {
1885 if (progress < 16) 1884 if (progress < 16)
1886 GEM_COUNTER_INCR(sc, sc_ev_rxhist[5]); 1885 GEM_COUNTER_INCR(sc, sc_ev_rxhist[5]);
1887 else 1886 else
1888 GEM_COUNTER_INCR(sc, sc_ev_rxhist[6]); 1887 GEM_COUNTER_INCR(sc, sc_ev_rxhist[6]);
1889 1888
1890 } else { 1889 } else {
1891 if (progress < 64) 1890 if (progress < 64)
1892 GEM_COUNTER_INCR(sc, sc_ev_rxhist[7]); 1891 GEM_COUNTER_INCR(sc, sc_ev_rxhist[7]);
1893 else 1892 else
1894 GEM_COUNTER_INCR(sc, sc_ev_rxhist[8]); 1893 GEM_COUNTER_INCR(sc, sc_ev_rxhist[8]);
1895 } 1894 }
1896#endif 1895#endif
1897 1896
1898 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1897 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n",
1899 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1898 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
1900 1899
1901 /* Read error counters ... */ 1900 /* Read error counters ... */
1902 ifp->if_ierrors += 1901 ifp->if_ierrors +=
1903 bus_space_read_4(t, h, GEM_MAC_RX_LEN_ERR_CNT) + 1902 bus_space_read_4(t, h, GEM_MAC_RX_LEN_ERR_CNT) +
1904 bus_space_read_4(t, h, GEM_MAC_RX_ALIGN_ERR) + 1903 bus_space_read_4(t, h, GEM_MAC_RX_ALIGN_ERR) +
1905 bus_space_read_4(t, h, GEM_MAC_RX_CRC_ERR_CNT) + 1904 bus_space_read_4(t, h, GEM_MAC_RX_CRC_ERR_CNT) +
1906 bus_space_read_4(t, h, GEM_MAC_RX_CODE_VIOL); 1905 bus_space_read_4(t, h, GEM_MAC_RX_CODE_VIOL);
1907 1906
1908 /* ... then clear the hardware counters. */ 1907 /* ... then clear the hardware counters. */
1909 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1908 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
1910 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1909 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
1911 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1910 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
1912 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1911 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
1913 1912
1914 return (1); 1913 return (1);
1915} 1914}
1916 1915
1917 1916
1918/* 1917/*
1919 * gem_add_rxbuf: 1918 * gem_add_rxbuf:
1920 * 1919 *
1921 * Add a receive buffer to the indicated descriptor. 1920 * Add a receive buffer to the indicated descriptor.
1922 */ 1921 */
1923int 1922int
1924gem_add_rxbuf(struct gem_softc *sc, int idx) 1923gem_add_rxbuf(struct gem_softc *sc, int idx)
1925{ 1924{
1926 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1925 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1927 struct mbuf *m; 1926 struct mbuf *m;
1928 int error; 1927 int error;
1929 1928
1930 MGETHDR(m, M_DONTWAIT, MT_DATA); 1929 MGETHDR(m, M_DONTWAIT, MT_DATA);
1931 if (m == NULL) 1930 if (m == NULL)
1932 return (ENOBUFS); 1931 return (ENOBUFS);
1933 1932
1934 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1933 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1935 MCLGET(m, M_DONTWAIT); 1934 MCLGET(m, M_DONTWAIT);
1936 if ((m->m_flags & M_EXT) == 0) { 1935 if ((m->m_flags & M_EXT) == 0) {
1937 m_freem(m); 1936 m_freem(m);
1938 return (ENOBUFS); 1937 return (ENOBUFS);
1939 } 1938 }
1940 1939
1941#ifdef GEM_DEBUG 1940#ifdef GEM_DEBUG
1942/* bzero the packet to check DMA */ 1941/* bzero the packet to check DMA */
1943 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1942 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1944#endif 1943#endif
1945 1944
1946 if (rxs->rxs_mbuf != NULL) 1945 if (rxs->rxs_mbuf != NULL)
1947 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1946 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
1948 1947
1949 rxs->rxs_mbuf = m; 1948 rxs->rxs_mbuf = m;
1950 1949
1951 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1950 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap,
1952 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1951 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1953 BUS_DMA_READ|BUS_DMA_NOWAIT); 1952 BUS_DMA_READ|BUS_DMA_NOWAIT);
1954 if (error) { 1953 if (error) {
1955 aprint_error_dev(&sc->sc_dev, "can't load rx DMA map %d, error = %d\n", 1954 aprint_error_dev(&sc->sc_dev, "can't load rx DMA map %d, error = %d\n",
1956 idx, error); 1955 idx, error);
1957 panic("gem_add_rxbuf"); /* XXX */ 1956 panic("gem_add_rxbuf"); /* XXX */
1958 } 1957 }
1959 1958
1960 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1959 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1961 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1960 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1962 1961
1963 GEM_INIT_RXDESC(sc, idx); 1962 GEM_INIT_RXDESC(sc, idx);
1964 1963
1965 return (0); 1964 return (0);
1966} 1965}
1967 1966
1968 1967
1969int 1968int
1970gem_eint(struct gem_softc *sc, u_int status) 1969gem_eint(struct gem_softc *sc, u_int status)
1971{ 1970{
1972 char bits[128]; 1971 char bits[128];
1973 u_int32_t r, v; 1972 u_int32_t r, v;
1974 1973
1975 if ((status & GEM_INTR_MIF) != 0) { 1974 if ((status & GEM_INTR_MIF) != 0) {
1976 printf("%s: XXXlink status changed\n", device_xname(&sc->sc_dev)); 1975 printf("%s: XXXlink status changed\n", device_xname(&sc->sc_dev));
1977 return (1); 1976 return (1);
1978 } 1977 }
1979 1978
1980 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1979 if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1981 gem_reset_rxdma(sc); 1980 gem_reset_rxdma(sc);
1982 return (1); 1981 return (1);
1983 } 1982 }
1984 1983
1985 if (status & GEM_INTR_BERR) { 1984 if (status & GEM_INTR_BERR) {
1986 if (sc->sc_flags & GEM_PCI) 1985 if (sc->sc_flags & GEM_PCI)
1987 r = GEM_ERROR_STATUS; 1986 r = GEM_ERROR_STATUS;
1988 else 1987 else
1989 r = GEM_SBUS_ERROR_STATUS; 1988 r = GEM_SBUS_ERROR_STATUS;
1990 bus_space_read_4(sc->sc_bustag, sc->sc_h2, r); 1989 bus_space_read_4(sc->sc_bustag, sc->sc_h2, r);
1991 v = bus_space_read_4(sc->sc_bustag, sc->sc_h2, r); 1990 v = bus_space_read_4(sc->sc_bustag, sc->sc_h2, r);
1992 aprint_error_dev(&sc->sc_dev, "bus error interrupt: 0x%02x\n", 1991 aprint_error_dev(&sc->sc_dev, "bus error interrupt: 0x%02x\n",
1993 v); 1992 v);
1994 return (1); 1993 return (1);
1995 } 1994 }
1996 snprintb(bits, sizeof(bits), GEM_INTR_BITS, status); 1995 snprintb(bits, sizeof(bits), GEM_INTR_BITS, status);
1997 printf("%s: status=%s\n", device_xname(&sc->sc_dev), bits); 1996 printf("%s: status=%s\n", device_xname(&sc->sc_dev), bits);
1998  1997
1999 return (1); 1998 return (1);
2000} 1999}
2001 2000
2002 2001
2003/* 2002/*
2004 * PCS interrupts. 2003 * PCS interrupts.
2005 * We should receive these when the link status changes, but sometimes 2004 * We should receive these when the link status changes, but sometimes
2006 * we don't receive them for link up. We compensate for this in the 2005 * we don't receive them for link up. We compensate for this in the
2007 * gem_tick() callout. 2006 * gem_tick() callout.
2008 */ 2007 */
2009int 2008int
2010gem_pint(struct gem_softc *sc) 2009gem_pint(struct gem_softc *sc)
2011{ 2010{
2012 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2011 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2013 bus_space_tag_t t = sc->sc_bustag; 2012 bus_space_tag_t t = sc->sc_bustag;
2014 bus_space_handle_t h = sc->sc_h1; 2013 bus_space_handle_t h = sc->sc_h1;
2015 u_int32_t v, v2; 2014 u_int32_t v, v2;
2016 2015
2017 /* 2016 /*
2018 * Clear the PCS interrupt from GEM_STATUS. The PCS register is 2017 * Clear the PCS interrupt from GEM_STATUS. The PCS register is
2019 * latched, so we have to read it twice. There is only one bit in 2018 * latched, so we have to read it twice. There is only one bit in
2020 * use, so the value is meaningless. 2019 * use, so the value is meaningless.
2021 */ 2020 */
2022 bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS); 2021 bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS);
2023 bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS); 2022 bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS);
2024 2023
2025 if ((ifp->if_flags & IFF_UP) == 0) 2024 if ((ifp->if_flags & IFF_UP) == 0)
2026 return 1; 2025 return 1;
2027 2026
2028 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) 2027 if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0)
2029 return 1; 2028 return 1;
2030 2029
2031 v = bus_space_read_4(t, h, GEM_MII_STATUS); 2030 v = bus_space_read_4(t, h, GEM_MII_STATUS);
2032 /* If we see remote fault, our link partner is probably going away */ 2031 /* If we see remote fault, our link partner is probably going away */
2033 if ((v & GEM_MII_STATUS_REM_FLT) != 0) { 2032 if ((v & GEM_MII_STATUS_REM_FLT) != 0) {
2034 gem_bitwait(sc, h, GEM_MII_STATUS, GEM_MII_STATUS_REM_FLT, 0); 2033 gem_bitwait(sc, h, GEM_MII_STATUS, GEM_MII_STATUS_REM_FLT, 0);
2035 v = bus_space_read_4(t, h, GEM_MII_STATUS); 2034 v = bus_space_read_4(t, h, GEM_MII_STATUS);
2036 /* Otherwise, we may need to wait after auto-negotiation completes */ 2035 /* Otherwise, we may need to wait after auto-negotiation completes */
2037 } else if ((v & (GEM_MII_STATUS_LINK_STS | GEM_MII_STATUS_ANEG_CPT)) == 2036 } else if ((v & (GEM_MII_STATUS_LINK_STS | GEM_MII_STATUS_ANEG_CPT)) ==
2038 GEM_MII_STATUS_ANEG_CPT) { 2037 GEM_MII_STATUS_ANEG_CPT) {
2039 gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_LINK_STS); 2038 gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_LINK_STS);
2040 v = bus_space_read_4(t, h, GEM_MII_STATUS); 2039 v = bus_space_read_4(t, h, GEM_MII_STATUS);
2041 } 2040 }
2042 if ((v & GEM_MII_STATUS_LINK_STS) != 0) { 2041 if ((v & GEM_MII_STATUS_LINK_STS) != 0) {
2043 if (sc->sc_flags & GEM_LINK) { 2042 if (sc->sc_flags & GEM_LINK) {
2044 return 1; 2043 return 1;
2045 } 2044 }
2046 callout_stop(&sc->sc_tick_ch); 2045 callout_stop(&sc->sc_tick_ch);
2047 v = bus_space_read_4(t, h, GEM_MII_ANAR); 2046 v = bus_space_read_4(t, h, GEM_MII_ANAR);
2048 v2 = bus_space_read_4(t, h, GEM_MII_ANLPAR); 2047 v2 = bus_space_read_4(t, h, GEM_MII_ANLPAR);
2049 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_1000_SX; 2048 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_1000_SX;
2050 sc->sc_mii.mii_media_status = IFM_AVALID | IFM_ACTIVE; 2049 sc->sc_mii.mii_media_status = IFM_AVALID | IFM_ACTIVE;
2051 v &= v2; 2050 v &= v2;
2052 if (v & GEM_MII_ANEG_FUL_DUPLX) { 2051 if (v & GEM_MII_ANEG_FUL_DUPLX) {
2053 sc->sc_mii.mii_media_active |= IFM_FDX; 2052 sc->sc_mii.mii_media_active |= IFM_FDX;
2054#ifdef GEM_DEBUG 2053#ifdef GEM_DEBUG
2055 aprint_debug_dev(&sc->sc_dev, "link up: full duplex\n"); 2054 aprint_debug_dev(&sc->sc_dev, "link up: full duplex\n");
2056#endif 2055#endif
2057 } else if (v & GEM_MII_ANEG_HLF_DUPLX) { 2056 } else if (v & GEM_MII_ANEG_HLF_DUPLX) {
2058 sc->sc_mii.mii_media_active |= IFM_HDX; 2057 sc->sc_mii.mii_media_active |= IFM_HDX;
2059#ifdef GEM_DEBUG 2058#ifdef GEM_DEBUG
2060 aprint_debug_dev(&sc->sc_dev, "link up: half duplex\n"); 2059 aprint_debug_dev(&sc->sc_dev, "link up: half duplex\n");
2061#endif 2060#endif
2062 } else { 2061 } else {
2063#ifdef GEM_DEBUG 2062#ifdef GEM_DEBUG
2064 aprint_debug_dev(&sc->sc_dev, "duplex mismatch\n"); 2063 aprint_debug_dev(&sc->sc_dev, "duplex mismatch\n");
2065#endif 2064#endif
2066 } 2065 }
2067 gem_statuschange(sc); 2066 gem_statuschange(sc);
2068 } else { 2067 } else {
2069 if ((sc->sc_flags & GEM_LINK) == 0) { 2068 if ((sc->sc_flags & GEM_LINK) == 0) {
2070 return 1; 2069 return 1;
2071 } 2070 }
2072 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE; 2071 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE;
2073 sc->sc_mii.mii_media_status = IFM_AVALID; 2072 sc->sc_mii.mii_media_status = IFM_AVALID;
2074#ifdef GEM_DEBUG 2073#ifdef GEM_DEBUG
2075 aprint_debug_dev(&sc->sc_dev, "link down\n"); 2074 aprint_debug_dev(&sc->sc_dev, "link down\n");
2076#endif 2075#endif
2077 gem_statuschange(sc); 2076 gem_statuschange(sc);
2078 2077
2079 /* Start the 10 second timer */ 2078 /* Start the 10 second timer */
2080 callout_reset(&sc->sc_tick_ch, hz * 10, gem_tick, sc); 2079 callout_reset(&sc->sc_tick_ch, hz * 10, gem_tick, sc);
2081 } 2080 }
2082 return 1; 2081 return 1;
2083} 2082}
2084 2083
2085 2084
2086 2085
2087int 2086int
2088gem_intr(void *v) 2087gem_intr(void *v)
2089{ 2088{
2090 struct gem_softc *sc = (struct gem_softc *)v; 2089 struct gem_softc *sc = (struct gem_softc *)v;
2091 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2090 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2092 bus_space_tag_t t = sc->sc_bustag; 2091 bus_space_tag_t t = sc->sc_bustag;
2093 bus_space_handle_t h = sc->sc_h1; 2092 bus_space_handle_t h = sc->sc_h1;
2094 u_int32_t status; 2093 u_int32_t status;
2095 int r = 0; 2094 int r = 0;
2096#ifdef GEM_DEBUG 2095#ifdef GEM_DEBUG
2097 char bits[128]; 2096 char bits[128];
2098#endif 2097#endif
2099 2098
2100 /* XXX We should probably mask out interrupts until we're done */ 2099 /* XXX We should probably mask out interrupts until we're done */
2101 2100
2102 sc->sc_ev_intr.ev_count++; 2101 sc->sc_ev_intr.ev_count++;
2103 2102
2104 status = bus_space_read_4(t, h, GEM_STATUS); 2103 status = bus_space_read_4(t, h, GEM_STATUS);
2105#ifdef GEM_DEBUG 2104#ifdef GEM_DEBUG
2106 snprintb(bits, sizeof(bits), GEM_INTR_BITS, status); 2105 snprintb(bits, sizeof(bits), GEM_INTR_BITS, status);
2107#endif 2106#endif
2108 DPRINTF(sc, ("%s: gem_intr: cplt 0x%x status %s\n", 2107 DPRINTF(sc, ("%s: gem_intr: cplt 0x%x status %s\n",
2109 device_xname(&sc->sc_dev), (status >> 19), bits)); 2108 device_xname(&sc->sc_dev), (status >> 19), bits));
2110  2109
2111 2110
2112 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 2111 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
2113 r |= gem_eint(sc, status); 2112 r |= gem_eint(sc, status);
2114 2113
2115 /* We don't bother with GEM_INTR_TX_DONE */ 2114 /* We don't bother with GEM_INTR_TX_DONE */
2116 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) { 2115 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) {
2117 GEM_COUNTER_INCR(sc, sc_ev_txint); 2116 GEM_COUNTER_INCR(sc, sc_ev_txint);
2118 r |= gem_tint(sc); 2117 r |= gem_tint(sc);
2119 } 2118 }
2120 2119
2121 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) { 2120 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) {
2122 GEM_COUNTER_INCR(sc, sc_ev_rxint); 2121 GEM_COUNTER_INCR(sc, sc_ev_rxint);
2123 r |= gem_rint(sc); 2122 r |= gem_rint(sc);
2124 } 2123 }
2125 2124
2126 /* We should eventually do more than just print out error stats. */ 2125 /* We should eventually do more than just print out error stats. */
2127 if (status & GEM_INTR_TX_MAC) { 2126 if (status & GEM_INTR_TX_MAC) {
2128 int txstat = bus_space_read_4(t, h, GEM_MAC_TX_STATUS); 2127 int txstat = bus_space_read_4(t, h, GEM_MAC_TX_STATUS);
2129 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 2128 if (txstat & ~GEM_MAC_TX_XMIT_DONE)
2130 printf("%s: MAC tx fault, status %x\n", 2129 printf("%s: MAC tx fault, status %x\n",
2131 device_xname(&sc->sc_dev), txstat); 2130 device_xname(&sc->sc_dev), txstat);
2132 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 2131 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
2133 gem_init(ifp); 2132 gem_init(ifp);
2134 } 2133 }
2135 if (status & GEM_INTR_RX_MAC) { 2134 if (status & GEM_INTR_RX_MAC) {
2136 int rxstat = bus_space_read_4(t, h, GEM_MAC_RX_STATUS); 2135 int rxstat = bus_space_read_4(t, h, GEM_MAC_RX_STATUS);
2137 /* 2136 /*
2138 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 2137 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
2139 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 2138 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
2140 * silicon bug so handle them silently. Moreover, it's 2139 * silicon bug so handle them silently. Moreover, it's
2141 * likely that the receiver has hung so we reset it. 2140 * likely that the receiver has hung so we reset it.
2142 */ 2141 */
2143 if (rxstat & GEM_MAC_RX_OVERFLOW) { 2142 if (rxstat & GEM_MAC_RX_OVERFLOW) {
2144 ifp->if_ierrors++; 2143 ifp->if_ierrors++;
2145 gem_reset_rxdma(sc); 2144 gem_reset_rxdma(sc);
2146 } else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 2145 } else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
2147 printf("%s: MAC rx fault, status 0x%02x\n", 2146 printf("%s: MAC rx fault, status 0x%02x\n",
2148 device_xname(&sc->sc_dev), rxstat); 2147 device_xname(&sc->sc_dev), rxstat);
2149 } 2148 }
2150 if (status & GEM_INTR_PCS) { 2149 if (status & GEM_INTR_PCS) {
2151 r |= gem_pint(sc); 2150 r |= gem_pint(sc);
2152 } 2151 }
2153 2152
2154/* Do we need to do anything with these? 2153/* Do we need to do anything with these?
2155 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 2154 if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
2156 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS); 2155 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS);
2157 if ((status2 & GEM_MAC_PAUSED) != 0) 2156 if ((status2 & GEM_MAC_PAUSED) != 0)
2158 aprintf_debug_dev(&sc->sc_dev, "PAUSE received (%d slots)\n", 2157 aprintf_debug_dev(&sc->sc_dev, "PAUSE received (%d slots)\n",
2159 GEM_MAC_PAUSE_TIME(status2)); 2158 GEM_MAC_PAUSE_TIME(status2));
2160 if ((status2 & GEM_MAC_PAUSE) != 0) 2159 if ((status2 & GEM_MAC_PAUSE) != 0)
2161 aprintf_debug_dev(&sc->sc_dev, "transited to PAUSE state\n"); 2160 aprintf_debug_dev(&sc->sc_dev, "transited to PAUSE state\n");
2162 if ((status2 & GEM_MAC_RESUME) != 0) 2161 if ((status2 & GEM_MAC_RESUME) != 0)
2163 aprintf_debug_dev(&sc->sc_dev, "transited to non-PAUSE state\n"); 2162 aprintf_debug_dev(&sc->sc_dev, "transited to non-PAUSE state\n");
2164 } 2163 }
2165 if ((status & GEM_INTR_MIF) != 0) 2164 if ((status & GEM_INTR_MIF) != 0)
2166 aprintf_debug_dev(&sc->sc_dev, "MIF interrupt\n"); 2165 aprintf_debug_dev(&sc->sc_dev, "MIF interrupt\n");
2167*/ 2166*/
2168#if NRND > 0 2167#if NRND > 0
2169 rnd_add_uint32(&sc->rnd_source, status); 2168 rnd_add_uint32(&sc->rnd_source, status);
2170#endif 2169#endif
2171 return (r); 2170 return (r);
2172} 2171}
2173 2172
2174 2173
2175void 2174void
2176gem_watchdog(struct ifnet *ifp) 2175gem_watchdog(struct ifnet *ifp)
2177{ 2176{
2178 struct gem_softc *sc = ifp->if_softc; 2177 struct gem_softc *sc = ifp->if_softc;
2179 2178
2180 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 2179 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
2181 "GEM_MAC_RX_CONFIG %x\n", 2180 "GEM_MAC_RX_CONFIG %x\n",
2182 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG), 2181 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
2183 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS), 2182 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
2184 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG))); 2183 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
2185 2184
2186 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev)); 2185 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev));
2187 ++ifp->if_oerrors; 2186 ++ifp->if_oerrors;
2188 2187
2189 /* Try to get more packets going. */ 2188 /* Try to get more packets going. */
2190 gem_start(ifp); 2189 gem_start(ifp);
2191} 2190}
2192 2191
2193/* 2192/*
2194 * Initialize the MII Management Interface 2193 * Initialize the MII Management Interface
2195 */ 2194 */
2196void 2195void
2197gem_mifinit(struct gem_softc *sc) 2196gem_mifinit(struct gem_softc *sc)
2198{ 2197{
2199 bus_space_tag_t t = sc->sc_bustag; 2198 bus_space_tag_t t = sc->sc_bustag;
2200 bus_space_handle_t mif = sc->sc_h1; 2199 bus_space_handle_t mif = sc->sc_h1;
2201 2200
2202 /* Configure the MIF in frame mode */ 2201 /* Configure the MIF in frame mode */
2203 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 2202 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
2204 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 2203 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
2205 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 2204 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
2206} 2205}
2207 2206
2208/* 2207/*
2209 * MII interface 2208 * MII interface
2210 * 2209 *
2211 * The GEM MII interface supports at least three different operating modes: 2210 * The GEM MII interface supports at least three different operating modes:
2212 * 2211 *
2213 * Bitbang mode is implemented using data, clock and output enable registers. 2212 * Bitbang mode is implemented using data, clock and output enable registers.
2214 * 2213 *
2215 * Frame mode is implemented by loading a complete frame into the frame 2214 * Frame mode is implemented by loading a complete frame into the frame
2216 * register and polling the valid bit for completion. 2215 * register and polling the valid bit for completion.
2217 * 2216 *
2218 * Polling mode uses the frame register but completion is indicated by 2217 * Polling mode uses the frame register but completion is indicated by
2219 * an interrupt. 2218 * an interrupt.
2220 * 2219 *
2221 */ 2220 */
2222static int 2221static int
2223gem_mii_readreg(struct device *self, int phy, int reg) 2222gem_mii_readreg(struct device *self, int phy, int reg)
2224{ 2223{
2225 struct gem_softc *sc = (void *)self; 2224 struct gem_softc *sc = (void *)self;
2226 bus_space_tag_t t = sc->sc_bustag; 2225 bus_space_tag_t t = sc->sc_bustag;
2227 bus_space_handle_t mif = sc->sc_h1; 2226 bus_space_handle_t mif = sc->sc_h1;
2228 int n; 2227 int n;
2229 u_int32_t v; 2228 u_int32_t v;
2230 2229
2231#ifdef GEM_DEBUG1 2230#ifdef GEM_DEBUG1
2232 if (sc->sc_debug) 2231 if (sc->sc_debug)
2233 printf("gem_mii_readreg: PHY %d reg %d\n", phy, reg); 2232 printf("gem_mii_readreg: PHY %d reg %d\n", phy, reg);
2234#endif 2233#endif
2235 2234
2236 /* Construct the frame command */ 2235 /* Construct the frame command */
2237 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 2236 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) |
2238 GEM_MIF_FRAME_READ; 2237 GEM_MIF_FRAME_READ;
2239 2238
2240 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 2239 bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
2241 for (n = 0; n < 100; n++) { 2240 for (n = 0; n < 100; n++) {
2242 DELAY(1); 2241 DELAY(1);
2243 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 2242 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
2244 if (v & GEM_MIF_FRAME_TA0) 2243 if (v & GEM_MIF_FRAME_TA0)
2245 return (v & GEM_MIF_FRAME_DATA); 2244 return (v & GEM_MIF_FRAME_DATA);
2246 } 2245 }
2247 2246
2248 printf("%s: mii_read timeout\n", device_xname(&sc->sc_dev)); 2247 printf("%s: mii_read timeout\n", device_xname(&sc->sc_dev));
2249 return (0); 2248 return (0);
2250} 2249}
2251 2250
2252static void 2251static void
2253gem_mii_writereg(struct device *self, int phy, int reg, int val) 2252gem_mii_writereg(struct device *self, int phy, int reg, int val)
2254{ 2253{
2255 struct gem_softc *sc = (void *)self; 2254 struct gem_softc *sc = (void *)self;
2256 bus_space_tag_t t = sc->sc_bustag; 2255 bus_space_tag_t t = sc->sc_bustag;
2257 bus_space_handle_t mif = sc->sc_h1; 2256 bus_space_handle_t mif = sc->sc_h1;
2258 int n; 2257 int n;
2259 u_int32_t v; 2258 u_int32_t v;
2260 2259
2261#ifdef GEM_DEBUG1 2260#ifdef GEM_DEBUG1
2262 if (sc->sc_debug) 2261 if (sc->sc_debug)
2263 printf("gem_mii_writereg: PHY %d reg %d val %x\n", 2262 printf("gem_mii_writereg: PHY %d reg %d val %x\n",
2264 phy, reg, val); 2263 phy, reg, val);
2265#endif 2264#endif
2266 2265
2267 /* Construct the frame command */ 2266 /* Construct the frame command */
2268 v = GEM_MIF_FRAME_WRITE | 2267 v = GEM_MIF_FRAME_WRITE |
2269 (phy << GEM_MIF_PHY_SHIFT) | 2268 (phy << GEM_MIF_PHY_SHIFT) |
2270 (reg << GEM_MIF_REG_SHIFT) | 2269 (reg << GEM_MIF_REG_SHIFT) |
2271 (val & GEM_MIF_FRAME_DATA); 2270 (val & GEM_MIF_FRAME_DATA);
2272 2271
2273 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 2272 bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
2274 for (n = 0; n < 100; n++) { 2273 for (n = 0; n < 100; n++) {
2275 DELAY(1); 2274 DELAY(1);
2276 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 2275 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
2277 if (v & GEM_MIF_FRAME_TA0) 2276 if (v & GEM_MIF_FRAME_TA0)
2278 return; 2277 return;
2279 } 2278 }
2280 2279
2281 printf("%s: mii_write timeout\n", device_xname(&sc->sc_dev)); 2280 printf("%s: mii_write timeout\n", device_xname(&sc->sc_dev));
2282} 2281}
2283 2282
2284static void 2283static void
2285gem_mii_statchg(struct device *dev) 2284gem_mii_statchg(struct device *dev)
2286{ 2285{
2287 struct gem_softc *sc = (void *)dev; 2286 struct gem_softc *sc = (void *)dev;
2288#ifdef GEM_DEBUG 2287#ifdef GEM_DEBUG
2289 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 2288 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
2290#endif 2289#endif
2291 2290
2292#ifdef GEM_DEBUG 2291#ifdef GEM_DEBUG
2293 if (sc->sc_debug) 2292 if (sc->sc_debug)
2294 printf("gem_mii_statchg: status change: phy = %d\n", 2293 printf("gem_mii_statchg: status change: phy = %d\n",
2295 sc->sc_phys[instance]); 2294 sc->sc_phys[instance]);
2296#endif 2295#endif
2297 gem_statuschange(sc); 2296 gem_statuschange(sc);
2298} 2297}
2299 2298
2300/* 2299/*
2301 * Common status change for gem_mii_statchg() and gem_pint() 2300 * Common status change for gem_mii_statchg() and gem_pint()
2302 */ 2301 */
2303void 2302void
2304gem_statuschange(struct gem_softc* sc) 2303gem_statuschange(struct gem_softc* sc)
2305{ 2304{
2306 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2305 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2307 bus_space_tag_t t = sc->sc_bustag; 2306 bus_space_tag_t t = sc->sc_bustag;
2308 bus_space_handle_t mac = sc->sc_h1; 2307 bus_space_handle_t mac = sc->sc_h1;
2309 int gigabit; 2308 int gigabit;
2310 u_int32_t rxcfg, txcfg, v; 2309 u_int32_t rxcfg, txcfg, v;
2311 2310
2312 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0 && 2311 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0 &&
2313 IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) 2312 IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE)
2314 sc->sc_flags |= GEM_LINK; 2313 sc->sc_flags |= GEM_LINK;
2315 else 2314 else
2316 sc->sc_flags &= ~GEM_LINK; 2315 sc->sc_flags &= ~GEM_LINK;
2317 2316
2318 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000)) 2317 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
2319 gigabit = 1; 2318 gigabit = 1;
2320 else 2319 else
2321 gigabit = 0; 2320 gigabit = 0;
2322 2321
2323 /* 2322 /*
2324 * The configuration done here corresponds to the steps F) and 2323 * The configuration done here corresponds to the steps F) and
2325 * G) and as far as enabling of RX and TX MAC goes also step H) 2324 * G) and as far as enabling of RX and TX MAC goes also step H)
2326 * of the initialization sequence outlined in section 3.2.1 of 2325 * of the initialization sequence outlined in section 3.2.1 of
2327 * the GEM Gigabit Ethernet ASIC Specification. 2326 * the GEM Gigabit Ethernet ASIC Specification.
2328 */ 2327 */
2329 2328
2330 rxcfg = bus_space_read_4(t, mac, GEM_MAC_RX_CONFIG); 2329 rxcfg = bus_space_read_4(t, mac, GEM_MAC_RX_CONFIG);
2331 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 2330 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE);
2332 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 2331 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
2333 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 2332 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
2334 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 2333 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
2335 else if (gigabit) { 2334 else if (gigabit) {
2336 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 2335 rxcfg |= GEM_MAC_RX_CARR_EXTEND;
2337 txcfg |= GEM_MAC_RX_CARR_EXTEND; 2336 txcfg |= GEM_MAC_RX_CARR_EXTEND;
2338 } 2337 }
2339 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 2338 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
2340 bus_space_barrier(t, mac, GEM_MAC_TX_CONFIG, 4, 2339 bus_space_barrier(t, mac, GEM_MAC_TX_CONFIG, 4,
2341 BUS_SPACE_BARRIER_WRITE); 2340 BUS_SPACE_BARRIER_WRITE);
2342 if (!gem_bitwait(sc, mac, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 2341 if (!gem_bitwait(sc, mac, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
2343 aprint_normal_dev(&sc->sc_dev, "cannot disable TX MAC\n"); 2342 aprint_normal_dev(&sc->sc_dev, "cannot disable TX MAC\n");
2344 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, txcfg); 2343 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, txcfg);
2345 bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, 0); 2344 bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, 0);
2346 bus_space_barrier(t, mac, GEM_MAC_RX_CONFIG, 4, 2345 bus_space_barrier(t, mac, GEM_MAC_RX_CONFIG, 4,
2347 BUS_SPACE_BARRIER_WRITE); 2346 BUS_SPACE_BARRIER_WRITE);
2348 if (!gem_bitwait(sc, mac, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 2347 if (!gem_bitwait(sc, mac, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
2349 aprint_normal_dev(&sc->sc_dev, "cannot disable RX MAC\n"); 2348 aprint_normal_dev(&sc->sc_dev, "cannot disable RX MAC\n");
2350 bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, rxcfg); 2349 bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, rxcfg);
2351 2350
2352 v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG) & 2351 v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG) &
2353 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 2352 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2354 bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v); 2353 bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v);
2355 2354
2356 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) == 0 && 2355 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) == 0 &&
2357 gigabit != 0) 2356 gigabit != 0)
2358 bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME, 2357 bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME,
2359 GEM_MAC_SLOT_TIME_CARR_EXTEND); 2358 GEM_MAC_SLOT_TIME_CARR_EXTEND);
2360 else 2359 else
2361 bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME, 2360 bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME,
2362 GEM_MAC_SLOT_TIME_NORMAL); 2361 GEM_MAC_SLOT_TIME_NORMAL);
2363 2362
2364 /* XIF Configuration */ 2363 /* XIF Configuration */
2365 if (sc->sc_flags & GEM_LINK) 2364 if (sc->sc_flags & GEM_LINK)
2366 v = GEM_MAC_XIF_LINK_LED; 2365 v = GEM_MAC_XIF_LINK_LED;
2367 else 2366 else
2368 v = 0; 2367 v = 0;
2369 v |= GEM_MAC_XIF_TX_MII_ENA; 2368 v |= GEM_MAC_XIF_TX_MII_ENA;
2370 2369
2371 /* If an external transceiver is connected, enable its MII drivers */ 2370 /* If an external transceiver is connected, enable its MII drivers */
2372 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 2371 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG);
2373 if ((sc->sc_flags &(GEM_SERDES | GEM_SERIAL)) == 0) { 2372 if ((sc->sc_flags &(GEM_SERDES | GEM_SERIAL)) == 0) {
2374 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 2373 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
2375 /* External MII needs echo disable if half duplex. */ 2374 /* External MII needs echo disable if half duplex. */
2376 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & 2375 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) &
2377 IFM_FDX) != 0) 2376 IFM_FDX) != 0)
2378 /* turn on full duplex LED */ 2377 /* turn on full duplex LED */
2379 v |= GEM_MAC_XIF_FDPLX_LED; 2378 v |= GEM_MAC_XIF_FDPLX_LED;
2380 else 2379 else
2381 /* half duplex -- disable echo */ 2380 /* half duplex -- disable echo */
2382 v |= GEM_MAC_XIF_ECHO_DISABL; 2381 v |= GEM_MAC_XIF_ECHO_DISABL;
2383 if (gigabit) 2382 if (gigabit)
2384 v |= GEM_MAC_XIF_GMII_MODE; 2383 v |= GEM_MAC_XIF_GMII_MODE;
2385 else 2384 else
2386 v &= ~GEM_MAC_XIF_GMII_MODE; 2385 v &= ~GEM_MAC_XIF_GMII_MODE;
2387 } else 2386 } else
2388 /* Internal MII needs buf enable */ 2387 /* Internal MII needs buf enable */
2389 v |= GEM_MAC_XIF_MII_BUF_ENA; 2388 v |= GEM_MAC_XIF_MII_BUF_ENA;
2390 } else { 2389 } else {
2391 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 2390 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
2392 v |= GEM_MAC_XIF_FDPLX_LED; 2391 v |= GEM_MAC_XIF_FDPLX_LED;
2393 v |= GEM_MAC_XIF_GMII_MODE; 2392 v |= GEM_MAC_XIF_GMII_MODE;
2394 } 2393 }
2395 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 2394 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
2396 2395
2397 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2396 if ((ifp->if_flags & IFF_RUNNING) != 0 &&
2398 (sc->sc_flags & GEM_LINK) != 0) { 2397 (sc->sc_flags & GEM_LINK) != 0) {
2399 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 2398 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG,
2400 txcfg | GEM_MAC_TX_ENABLE); 2399 txcfg | GEM_MAC_TX_ENABLE);
2401 bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, 2400 bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG,
2402 rxcfg | GEM_MAC_RX_ENABLE); 2401 rxcfg | GEM_MAC_RX_ENABLE);
2403 } 2402 }
2404} 2403}
2405 2404
2406int 2405int
2407gem_ser_mediachange(struct ifnet *ifp) 2406gem_ser_mediachange(struct ifnet *ifp)
2408{ 2407{
2409 struct gem_softc *sc = ifp->if_softc; 2408 struct gem_softc *sc = ifp->if_softc;
2410 u_int s, t; 2409 u_int s, t;
2411 2410
2412 if (IFM_TYPE(sc->sc_mii.mii_media.ifm_media) != IFM_ETHER) 2411 if (IFM_TYPE(sc->sc_mii.mii_media.ifm_media) != IFM_ETHER)
2413 return EINVAL; 2412 return EINVAL;
2414 2413
2415 s = IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media); 2414 s = IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media);
2416 if (s == IFM_AUTO) { 2415 if (s == IFM_AUTO) {
2417 if (sc->sc_mii_media != s) { 2416 if (sc->sc_mii_media != s) {
2418#ifdef GEM_DEBUG 2417#ifdef GEM_DEBUG
2419 aprint_debug_dev(&sc->sc_dev, "setting media to auto\n"); 2418 aprint_debug_dev(&sc->sc_dev, "setting media to auto\n");
2420#endif 2419#endif
2421 sc->sc_mii_media = s; 2420 sc->sc_mii_media = s;
2422 if (ifp->if_flags & IFF_UP) { 2421 if (ifp->if_flags & IFF_UP) {
2423 gem_pcs_stop(sc, 0); 2422 gem_pcs_stop(sc, 0);
2424 gem_pcs_start(sc); 2423 gem_pcs_start(sc);
2425 } 2424 }
2426 } 2425 }
2427 return 0; 2426 return 0;
2428 } 2427 }
2429 if (s == IFM_1000_SX) { 2428 if (s == IFM_1000_SX) {
2430 t = IFM_OPTIONS(sc->sc_mii.mii_media.ifm_media); 2429 t = IFM_OPTIONS(sc->sc_mii.mii_media.ifm_media);
2431 if (t == IFM_FDX || t == IFM_HDX) { 2430 if (t == IFM_FDX || t == IFM_HDX) {
2432 if (sc->sc_mii_media != t) { 2431 if (sc->sc_mii_media != t) {
2433 sc->sc_mii_media = t; 2432 sc->sc_mii_media = t;
2434#ifdef GEM_DEBUG 2433#ifdef GEM_DEBUG
2435 aprint_debug_dev(&sc->sc_dev, 2434 aprint_debug_dev(&sc->sc_dev,
2436 "setting media to 1000baseSX-%s\n", 2435 "setting media to 1000baseSX-%s\n",
2437 t == IFM_FDX ? "FDX" : "HDX"); 2436 t == IFM_FDX ? "FDX" : "HDX");
2438#endif 2437#endif
2439 if (ifp->if_flags & IFF_UP) { 2438 if (ifp->if_flags & IFF_UP) {
2440 gem_pcs_stop(sc, 0); 2439 gem_pcs_stop(sc, 0);
2441 gem_pcs_start(sc); 2440 gem_pcs_start(sc);
2442 } 2441 }
2443 } 2442 }
2444 return 0; 2443 return 0;
2445 } 2444 }
2446 } 2445 }
2447 return EINVAL; 2446 return EINVAL;
2448} 2447}
2449 2448
2450void 2449void
2451gem_ser_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2450gem_ser_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2452{ 2451{
2453 struct gem_softc *sc = ifp->if_softc; 2452 struct gem_softc *sc = ifp->if_softc;
2454 2453
2455 if ((ifp->if_flags & IFF_UP) == 0) 2454 if ((ifp->if_flags & IFF_UP) == 0)
2456 return; 2455 return;
2457 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2456 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2458 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2457 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2459} 2458}
2460 2459
2461static int 2460static int
2462gem_ifflags_cb(struct ethercom *ec) 2461gem_ifflags_cb(struct ethercom *ec)
2463{ 2462{
2464 struct ifnet *ifp = &ec->ec_if; 2463 struct ifnet *ifp = &ec->ec_if;
2465 struct gem_softc *sc = ifp->if_softc; 2464 struct gem_softc *sc = ifp->if_softc;
2466 int change = ifp->if_flags ^ sc->sc_if_flags; 2465 int change = ifp->if_flags ^ sc->sc_if_flags;
2467 2466
2468 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 2467 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2469 return ENETRESET; 2468 return ENETRESET;
2470 else if ((change & IFF_PROMISC) != 0) 2469 else if ((change & IFF_PROMISC) != 0)
2471 gem_setladrf(sc); 2470 gem_setladrf(sc);
2472 return 0; 2471 return 0;
2473} 2472}
2474 2473
2475/* 2474/*
2476 * Process an ioctl request. 2475 * Process an ioctl request.
2477 */ 2476 */
2478int 2477int
2479gem_ioctl(struct ifnet *ifp, unsigned long cmd, void *data) 2478gem_ioctl(struct ifnet *ifp, unsigned long cmd, void *data)
2480{ 2479{
2481 struct gem_softc *sc = ifp->if_softc; 2480 struct gem_softc *sc = ifp->if_softc;
2482 int s, error = 0; 2481 int s, error = 0;
2483 2482
2484 s = splnet(); 2483 s = splnet();
2485 2484
2486 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 2485 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
2487 error = 0; 2486 error = 0;
2488 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 2487 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2489 ; 2488 ;
2490 else if (ifp->if_flags & IFF_RUNNING) { 2489 else if (ifp->if_flags & IFF_RUNNING) {
2491 /* 2490 /*
2492 * Multicast list has changed; set the hardware filter 2491 * Multicast list has changed; set the hardware filter
2493 * accordingly. 2492 * accordingly.
2494 */ 2493 */
2495 gem_setladrf(sc); 2494 gem_setladrf(sc);
2496 } 2495 }
2497 } 2496 }
2498 2497
2499 /* Try to get things going again */ 2498 /* Try to get things going again */
2500 if (ifp->if_flags & IFF_UP) 2499 if (ifp->if_flags & IFF_UP)
2501 gem_start(ifp); 2500 gem_start(ifp);
2502 splx(s); 2501 splx(s);
2503 return (error); 2502 return (error);
2504} 2503}
2505 2504
2506 2505
2507void 2506void
2508gem_shutdown(void *arg) 2507gem_shutdown(void *arg)
2509{ 2508{
2510 struct gem_softc *sc = (struct gem_softc *)arg; 2509 struct gem_softc *sc = (struct gem_softc *)arg;
2511 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2510 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2512 2511
2513 gem_stop(ifp, 1); 2512 gem_stop(ifp, 1);
2514} 2513}
2515 2514
2516/* 2515/*
2517 * Set up the logical address filter. 2516 * Set up the logical address filter.
2518 */ 2517 */
2519void 2518void
2520gem_setladrf(struct gem_softc *sc) 2519gem_setladrf(struct gem_softc *sc)
2521{ 2520{
2522 struct ethercom *ec = &sc->sc_ethercom; 2521 struct ethercom *ec = &sc->sc_ethercom;
2523 struct ifnet *ifp = &ec->ec_if; 2522 struct ifnet *ifp = &ec->ec_if;
2524 struct ether_multi *enm; 2523 struct ether_multi *enm;
2525 struct ether_multistep step; 2524 struct ether_multistep step;
2526 bus_space_tag_t t = sc->sc_bustag; 2525 bus_space_tag_t t = sc->sc_bustag;
2527 bus_space_handle_t h = sc->sc_h1; 2526 bus_space_handle_t h = sc->sc_h1;
2528 u_int32_t crc; 2527 u_int32_t crc;
2529 u_int32_t hash[16]; 2528 u_int32_t hash[16];
2530 u_int32_t v; 2529 u_int32_t v;
2531 int i; 2530 int i;
2532 2531
2533 /* Get current RX configuration */ 2532 /* Get current RX configuration */
2534 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 2533 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
2535 2534
2536 /* 2535 /*
2537 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2536 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2538 * and hash filter. Depending on the case, the right bit will be 2537 * and hash filter. Depending on the case, the right bit will be
2539 * enabled. 2538 * enabled.
2540 */ 2539 */
2541 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 2540 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
2542 GEM_MAC_RX_PROMISC_GRP); 2541 GEM_MAC_RX_PROMISC_GRP);
2543 2542
2544 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2543 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2545 /* Turn on promiscuous mode */ 2544 /* Turn on promiscuous mode */
2546 v |= GEM_MAC_RX_PROMISCUOUS; 2545 v |= GEM_MAC_RX_PROMISCUOUS;
2547 ifp->if_flags |= IFF_ALLMULTI; 2546 ifp->if_flags |= IFF_ALLMULTI;
2548 goto chipit; 2547 goto chipit;
2549 } 2548 }
2550 2549
2551 /* 2550 /*
2552 * Set up multicast address filter by passing all multicast addresses 2551 * Set up multicast address filter by passing all multicast addresses
2553 * through a crc generator, and then using the high order 8 bits as an 2552 * through a crc generator, and then using the high order 8 bits as an
2554 * index into the 256 bit logical address filter. The high order 4 2553 * index into the 256 bit logical address filter. The high order 4
2555 * bits selects the word, while the other 4 bits select the bit within 2554 * bits selects the word, while the other 4 bits select the bit within
2556 * the word (where bit 0 is the MSB). 2555 * the word (where bit 0 is the MSB).
2557 */ 2556 */
2558 2557
2559 /* Clear hash table */ 2558 /* Clear hash table */
2560 memset(hash, 0, sizeof(hash)); 2559 memset(hash, 0, sizeof(hash));
2561 2560
2562 ETHER_FIRST_MULTI(step, ec, enm); 2561 ETHER_FIRST_MULTI(step, ec, enm);
2563 while (enm != NULL) { 2562 while (enm != NULL) {
2564 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2563 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2565 /* 2564 /*
2566 * We must listen to a range of multicast addresses. 2565 * We must listen to a range of multicast addresses.
2567 * For now, just accept all multicasts, rather than 2566 * For now, just accept all multicasts, rather than
2568 * trying to set only those filter bits needed to match 2567 * trying to set only those filter bits needed to match
2569 * the range. (At this time, the only use of address 2568 * the range. (At this time, the only use of address
2570 * ranges is for IP multicast routing, for which the 2569 * ranges is for IP multicast routing, for which the
2571 * range is big enough to require all bits set.) 2570 * range is big enough to require all bits set.)
2572 * XXX should use the address filters for this 2571 * XXX should use the address filters for this
2573 */ 2572 */
2574 ifp->if_flags |= IFF_ALLMULTI; 2573 ifp->if_flags |= IFF_ALLMULTI;
2575 v |= GEM_MAC_RX_PROMISC_GRP; 2574 v |= GEM_MAC_RX_PROMISC_GRP;
2576 goto chipit; 2575 goto chipit;
2577 } 2576 }
2578 2577
2579 /* Get the LE CRC32 of the address */ 2578 /* Get the LE CRC32 of the address */
2580 crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo)); 2579 crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo));
2581 2580
2582 /* Just want the 8 most significant bits. */ 2581 /* Just want the 8 most significant bits. */
2583 crc >>= 24; 2582 crc >>= 24;
2584 2583
2585 /* Set the corresponding bit in the filter. */ 2584 /* Set the corresponding bit in the filter. */
2586 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2585 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2587 2586
2588 ETHER_NEXT_MULTI(step, enm); 2587 ETHER_NEXT_MULTI(step, enm);
2589 } 2588 }
2590 2589
2591 v |= GEM_MAC_RX_HASH_FILTER; 2590 v |= GEM_MAC_RX_HASH_FILTER;
2592 ifp->if_flags &= ~IFF_ALLMULTI; 2591 ifp->if_flags &= ~IFF_ALLMULTI;
2593 2592
2594 /* Now load the hash table into the chip (if we are using it) */ 2593 /* Now load the hash table into the chip (if we are using it) */
2595 for (i = 0; i < 16; i++) { 2594 for (i = 0; i < 16; i++) {
2596 bus_space_write_4(t, h, 2595 bus_space_write_4(t, h,
2597 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 2596 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
2598 hash[i]); 2597 hash[i]);
2599 } 2598 }
2600 2599
2601chipit: 2600chipit:
2602 sc->sc_if_flags = ifp->if_flags; 2601 sc->sc_if_flags = ifp->if_flags;
2603 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 2602 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
2604} 2603}
2605 2604
2606#if notyet 2605#if notyet
2607 2606
2608/* 2607/*
2609 * gem_power: 2608 * gem_power:
2610 * 2609 *
2611 * Power management (suspend/resume) hook. 2610 * Power management (suspend/resume) hook.
2612 */ 2611 */
2613void 2612void
2614gem_power(int why, void *arg) 2613gem_power(int why, void *arg)
2615{ 2614{
2616 struct gem_softc *sc = arg; 2615 struct gem_softc *sc = arg;
2617 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2616 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2618 int s; 2617 int s;
2619 2618
2620 s = splnet(); 2619 s = splnet();
2621 switch (why) { 2620 switch (why) {
2622 case PWR_SUSPEND: 2621 case PWR_SUSPEND:
2623 case PWR_STANDBY: 2622 case PWR_STANDBY:
2624 gem_stop(ifp, 1); 2623 gem_stop(ifp, 1);
2625 if (sc->sc_power != NULL) 2624 if (sc->sc_power != NULL)
2626 (*sc->sc_power)(sc, why); 2625 (*sc->sc_power)(sc, why);
2627 break; 2626 break;
2628 case PWR_RESUME: 2627 case PWR_RESUME:
2629 if (ifp->if_flags & IFF_UP) { 2628 if (ifp->if_flags & IFF_UP) {
2630 if (sc->sc_power != NULL) 2629 if (sc->sc_power != NULL)
2631 (*sc->sc_power)(sc, why); 2630 (*sc->sc_power)(sc, why);
2632 gem_init(ifp); 2631 gem_init(ifp);
2633 } 2632 }
2634 break; 2633 break;
2635 case PWR_SOFTSUSPEND: 2634 case PWR_SOFTSUSPEND:
2636 case PWR_SOFTSTANDBY: 2635 case PWR_SOFTSTANDBY:
2637 case PWR_SOFTRESUME: 2636 case PWR_SOFTRESUME:
2638 break; 2637 break;
2639 } 2638 }
2640 splx(s); 2639 splx(s);
2641} 2640}
2642#endif 2641#endif

cvs diff -r1.72 -r1.73 src/sys/dev/ic/hme.c (switch to unified diff)

--- src/sys/dev/ic/hme.c 2009/03/14 21:04:20 1.72
+++ src/sys/dev/ic/hme.c 2009/03/16 12:02:00 1.73
@@ -1,1605 +1,1604 @@ @@ -1,1605 +1,1604 @@
1/* $NetBSD: hme.c,v 1.72 2009/03/14 21:04:20 dsl Exp $ */ 1/* $NetBSD: hme.c,v 1.73 2009/03/16 12:02:00 tsutsui Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg. 8 * by Paul Kranenburg.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * HME Ethernet module driver. 33 * HME Ethernet module driver.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: hme.c,v 1.72 2009/03/14 21:04:20 dsl Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: hme.c,v 1.73 2009/03/16 12:02:00 tsutsui Exp $");
38 38
39/* #define HMEDEBUG */ 39/* #define HMEDEBUG */
40 40
41#include "opt_inet.h" 41#include "opt_inet.h"
42#include "bpfilter.h" 42#include "bpfilter.h"
43#include "rnd.h" 43#include "rnd.h"
44 44
45#include <sys/param.h> 45#include <sys/param.h>
46#include <sys/systm.h> 46#include <sys/systm.h>
47#include <sys/kernel.h> 47#include <sys/kernel.h>
48#include <sys/mbuf.h> 48#include <sys/mbuf.h>
49#include <sys/syslog.h> 49#include <sys/syslog.h>
50#include <sys/socket.h> 50#include <sys/socket.h>
51#include <sys/device.h> 51#include <sys/device.h>
52#include <sys/malloc.h> 52#include <sys/malloc.h>
53#include <sys/ioctl.h> 53#include <sys/ioctl.h>
54#include <sys/errno.h> 54#include <sys/errno.h>
55#if NRND > 0 55#if NRND > 0
56#include <sys/rnd.h> 56#include <sys/rnd.h>
57#endif 57#endif
58 58
59#include <net/if.h> 59#include <net/if.h>
60#include <net/if_dl.h> 60#include <net/if_dl.h>
61#include <net/if_ether.h> 61#include <net/if_ether.h>
62#include <net/if_media.h> 62#include <net/if_media.h>
63 63
64#ifdef INET 64#ifdef INET
65#include <netinet/in.h> 65#include <netinet/in.h>
66#include <netinet/if_inarp.h> 66#include <netinet/if_inarp.h>
67#include <netinet/in_systm.h> 67#include <netinet/in_systm.h>
68#include <netinet/in_var.h> 68#include <netinet/in_var.h>
69#include <netinet/ip.h> 69#include <netinet/ip.h>
70#include <netinet/tcp.h> 70#include <netinet/tcp.h>
71#include <netinet/udp.h> 71#include <netinet/udp.h>
72#endif 72#endif
73 73
74 74
75#if NBPFILTER > 0 75#if NBPFILTER > 0
76#include <net/bpf.h> 76#include <net/bpf.h>
77#include <net/bpfdesc.h> 77#include <net/bpfdesc.h>
78#endif 78#endif
79 79
80#include <dev/mii/mii.h> 80#include <dev/mii/mii.h>
81#include <dev/mii/miivar.h> 81#include <dev/mii/miivar.h>
82 82
83#include <sys/bus.h> 83#include <sys/bus.h>
84 84
85#include <dev/ic/hmereg.h> 85#include <dev/ic/hmereg.h>
86#include <dev/ic/hmevar.h> 86#include <dev/ic/hmevar.h>
87 87
88void hme_start(struct ifnet *); 88void hme_start(struct ifnet *);
89void hme_stop(struct hme_softc *,bool); 89void hme_stop(struct hme_softc *,bool);
90int hme_ioctl(struct ifnet *, u_long, void *); 90int hme_ioctl(struct ifnet *, u_long, void *);
91void hme_tick(void *); 91void hme_tick(void *);
92void hme_watchdog(struct ifnet *); 92void hme_watchdog(struct ifnet *);
93void hme_shutdown(void *); 93void hme_shutdown(void *);
94int hme_init(struct hme_softc *); 94int hme_init(struct hme_softc *);
95void hme_meminit(struct hme_softc *); 95void hme_meminit(struct hme_softc *);
96void hme_mifinit(struct hme_softc *); 96void hme_mifinit(struct hme_softc *);
97void hme_reset(struct hme_softc *); 97void hme_reset(struct hme_softc *);
98void hme_setladrf(struct hme_softc *); 98void hme_setladrf(struct hme_softc *);
99 99
100/* MII methods & callbacks */ 100/* MII methods & callbacks */
101static int hme_mii_readreg(struct device *, int, int); 101static int hme_mii_readreg(struct device *, int, int);
102static void hme_mii_writereg(struct device *, int, int, int); 102static void hme_mii_writereg(struct device *, int, int, int);
103static void hme_mii_statchg(struct device *); 103static void hme_mii_statchg(struct device *);
104 104
105int hme_mediachange(struct ifnet *); 105int hme_mediachange(struct ifnet *);
106 106
107struct mbuf *hme_get(struct hme_softc *, int, uint32_t); 107struct mbuf *hme_get(struct hme_softc *, int, uint32_t);
108int hme_put(struct hme_softc *, int, struct mbuf *); 108int hme_put(struct hme_softc *, int, struct mbuf *);
109void hme_read(struct hme_softc *, int, uint32_t); 109void hme_read(struct hme_softc *, int, uint32_t);
110int hme_eint(struct hme_softc *, u_int); 110int hme_eint(struct hme_softc *, u_int);
111int hme_rint(struct hme_softc *); 111int hme_rint(struct hme_softc *);
112int hme_tint(struct hme_softc *); 112int hme_tint(struct hme_softc *);
113 113
114/* Default buffer copy routines */ 114/* Default buffer copy routines */
115void hme_copytobuf_contig(struct hme_softc *, void *, int, int); 115void hme_copytobuf_contig(struct hme_softc *, void *, int, int);
116void hme_copyfrombuf_contig(struct hme_softc *, void *, int, int); 116void hme_copyfrombuf_contig(struct hme_softc *, void *, int, int);
117void hme_zerobuf_contig(struct hme_softc *, int, int); 117void hme_zerobuf_contig(struct hme_softc *, int, int);
118 118
119 119
120void 120void
121hme_config(struct hme_softc *sc) 121hme_config(struct hme_softc *sc)
122{ 122{
123 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 123 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
124 struct mii_data *mii = &sc->sc_mii; 124 struct mii_data *mii = &sc->sc_mii;
125 struct mii_softc *child; 125 struct mii_softc *child;
126 bus_dma_tag_t dmatag = sc->sc_dmatag; 126 bus_dma_tag_t dmatag = sc->sc_dmatag;
127 bus_dma_segment_t seg; 127 bus_dma_segment_t seg;
128 bus_size_t size; 128 bus_size_t size;
129 int rseg, error; 129 int rseg, error;
130 130
131 /* 131 /*
132 * HME common initialization. 132 * HME common initialization.
133 * 133 *
134 * hme_softc fields that must be initialized by the front-end: 134 * hme_softc fields that must be initialized by the front-end:
135 * 135 *
136 * the bus tag: 136 * the bus tag:
137 * sc_bustag 137 * sc_bustag
138 * 138 *
139 * the DMA bus tag: 139 * the DMA bus tag:
140 * sc_dmatag 140 * sc_dmatag
141 * 141 *
142 * the bus handles: 142 * the bus handles:
143 * sc_seb (Shared Ethernet Block registers) 143 * sc_seb (Shared Ethernet Block registers)
144 * sc_erx (Receiver Unit registers) 144 * sc_erx (Receiver Unit registers)
145 * sc_etx (Transmitter Unit registers) 145 * sc_etx (Transmitter Unit registers)
146 * sc_mac (MAC registers) 146 * sc_mac (MAC registers)
147 * sc_mif (Management Interface registers) 147 * sc_mif (Management Interface registers)
148 * 148 *
149 * the maximum bus burst size: 149 * the maximum bus burst size:
150 * sc_burst 150 * sc_burst
151 * 151 *
152 * (notyet:DMA capable memory for the ring descriptors & packet buffers: 152 * (notyet:DMA capable memory for the ring descriptors & packet buffers:
153 * rb_membase, rb_dmabase) 153 * rb_membase, rb_dmabase)
154 * 154 *
155 * the local Ethernet address: 155 * the local Ethernet address:
156 * sc_enaddr 156 * sc_enaddr
157 * 157 *
158 */ 158 */
159 159
160 /* Make sure the chip is stopped. */ 160 /* Make sure the chip is stopped. */
161 hme_stop(sc, true); 161 hme_stop(sc, true);
162 162
163 163
164 /* 164 /*
165 * Allocate descriptors and buffers 165 * Allocate descriptors and buffers
166 * XXX - do all this differently.. and more configurably, 166 * XXX - do all this differently.. and more configurably,
167 * eg. use things as `dma_load_mbuf()' on transmit, 167 * eg. use things as `dma_load_mbuf()' on transmit,
168 * and a pool of `EXTMEM' mbufs (with buffers DMA-mapped 168 * and a pool of `EXTMEM' mbufs (with buffers DMA-mapped
169 * all the time) on the receiver side. 169 * all the time) on the receiver side.
170 * 170 *
171 * Note: receive buffers must be 64-byte aligned. 171 * Note: receive buffers must be 64-byte aligned.
172 * Also, apparently, the buffers must extend to a DMA burst 172 * Also, apparently, the buffers must extend to a DMA burst
173 * boundary beyond the maximum packet size. 173 * boundary beyond the maximum packet size.
174 */ 174 */
175#define _HME_NDESC 128 175#define _HME_NDESC 128
176#define _HME_BUFSZ 1600 176#define _HME_BUFSZ 1600
177 177
178 /* Note: the # of descriptors must be a multiple of 16 */ 178 /* Note: the # of descriptors must be a multiple of 16 */
179 sc->sc_rb.rb_ntbuf = _HME_NDESC; 179 sc->sc_rb.rb_ntbuf = _HME_NDESC;
180 sc->sc_rb.rb_nrbuf = _HME_NDESC; 180 sc->sc_rb.rb_nrbuf = _HME_NDESC;
181 181
182 /* 182 /*
183 * Allocate DMA capable memory 183 * Allocate DMA capable memory
184 * Buffer descriptors must be aligned on a 2048 byte boundary; 184 * Buffer descriptors must be aligned on a 2048 byte boundary;
185 * take this into account when calculating the size. Note that 185 * take this into account when calculating the size. Note that
186 * the maximum number of descriptors (256) occupies 2048 bytes, 186 * the maximum number of descriptors (256) occupies 2048 bytes,
187 * so we allocate that much regardless of _HME_NDESC. 187 * so we allocate that much regardless of _HME_NDESC.
188 */ 188 */
189 size = 2048 + /* TX descriptors */ 189 size = 2048 + /* TX descriptors */
190 2048 + /* RX descriptors */ 190 2048 + /* RX descriptors */
191 sc->sc_rb.rb_ntbuf * _HME_BUFSZ + /* TX buffers */ 191 sc->sc_rb.rb_ntbuf * _HME_BUFSZ + /* TX buffers */
192 sc->sc_rb.rb_nrbuf * _HME_BUFSZ; /* RX buffers */ 192 sc->sc_rb.rb_nrbuf * _HME_BUFSZ; /* RX buffers */
193 193
194 /* Allocate DMA buffer */ 194 /* Allocate DMA buffer */
195 if ((error = bus_dmamem_alloc(dmatag, size, 195 if ((error = bus_dmamem_alloc(dmatag, size,
196 2048, 0, 196 2048, 0,
197 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 197 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
198 aprint_error_dev(&sc->sc_dev, "DMA buffer alloc error %d\n", 198 aprint_error_dev(&sc->sc_dev, "DMA buffer alloc error %d\n",
199 error); 199 error);
200 return; 200 return;
201 } 201 }
202 202
203 /* Map DMA memory in CPU addressable space */ 203 /* Map DMA memory in CPU addressable space */
204 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 204 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
205 &sc->sc_rb.rb_membase, 205 &sc->sc_rb.rb_membase,
206 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 206 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
207 aprint_error_dev(&sc->sc_dev, "DMA buffer map error %d\n", 207 aprint_error_dev(&sc->sc_dev, "DMA buffer map error %d\n",
208 error); 208 error);
209 bus_dmamap_unload(dmatag, sc->sc_dmamap); 209 bus_dmamap_unload(dmatag, sc->sc_dmamap);
210 bus_dmamem_free(dmatag, &seg, rseg); 210 bus_dmamem_free(dmatag, &seg, rseg);
211 return; 211 return;
212 } 212 }
213 213
214 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 214 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
215 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 215 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
216 aprint_error_dev(&sc->sc_dev, "DMA map create error %d\n", 216 aprint_error_dev(&sc->sc_dev, "DMA map create error %d\n",
217 error); 217 error);
218 return; 218 return;
219 } 219 }
220 220
221 /* Load the buffer */ 221 /* Load the buffer */
222 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 222 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
223 sc->sc_rb.rb_membase, size, NULL, 223 sc->sc_rb.rb_membase, size, NULL,
224 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 224 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
225 aprint_error_dev(&sc->sc_dev, "DMA buffer map load error %d\n", 225 aprint_error_dev(&sc->sc_dev, "DMA buffer map load error %d\n",
226 error); 226 error);
227 bus_dmamem_free(dmatag, &seg, rseg); 227 bus_dmamem_free(dmatag, &seg, rseg);
228 return; 228 return;
229 } 229 }
230 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 230 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
231 231
232 printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev), 232 printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev),
233 ether_sprintf(sc->sc_enaddr)); 233 ether_sprintf(sc->sc_enaddr));
234 234
235 /* Initialize ifnet structure. */ 235 /* Initialize ifnet structure. */
236 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 236 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
237 ifp->if_softc = sc; 237 ifp->if_softc = sc;
238 ifp->if_start = hme_start; 238 ifp->if_start = hme_start;
239 ifp->if_ioctl = hme_ioctl; 239 ifp->if_ioctl = hme_ioctl;
240 ifp->if_watchdog = hme_watchdog; 240 ifp->if_watchdog = hme_watchdog;
241 ifp->if_flags = 241 ifp->if_flags =
242 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 242 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
243 sc->sc_if_flags = ifp->if_flags; 243 sc->sc_if_flags = ifp->if_flags;
244 ifp->if_capabilities |= 244 ifp->if_capabilities |=
245 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 245 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
246 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 246 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
247 IFQ_SET_READY(&ifp->if_snd); 247 IFQ_SET_READY(&ifp->if_snd);
248 248
249 /* Initialize ifmedia structures and MII info */ 249 /* Initialize ifmedia structures and MII info */
250 mii->mii_ifp = ifp; 250 mii->mii_ifp = ifp;
251 mii->mii_readreg = hme_mii_readreg; 251 mii->mii_readreg = hme_mii_readreg;
252 mii->mii_writereg = hme_mii_writereg; 252 mii->mii_writereg = hme_mii_writereg;
253 mii->mii_statchg = hme_mii_statchg; 253 mii->mii_statchg = hme_mii_statchg;
254 254
255 sc->sc_ethercom.ec_mii = mii; 255 sc->sc_ethercom.ec_mii = mii;
256 ifmedia_init(&mii->mii_media, 0, hme_mediachange, ether_mediastatus); 256 ifmedia_init(&mii->mii_media, 0, hme_mediachange, ether_mediastatus);
257 257
258 hme_mifinit(sc); 258 hme_mifinit(sc);
259 259
260 mii_attach(&sc->sc_dev, mii, 0xffffffff, 260 mii_attach(&sc->sc_dev, mii, 0xffffffff,
261 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG); 261 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG);
262 262
263 child = LIST_FIRST(&mii->mii_phys); 263 child = LIST_FIRST(&mii->mii_phys);
264 if (child == NULL) { 264 if (child == NULL) {
265 /* No PHY attached */ 265 /* No PHY attached */
266 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 266 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
267 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 267 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
268 } else { 268 } else {
269 /* 269 /*
270 * Walk along the list of attached MII devices and 270 * Walk along the list of attached MII devices and
271 * establish an `MII instance' to `phy number' 271 * establish an `MII instance' to `phy number'
272 * mapping. We'll use this mapping in media change 272 * mapping. We'll use this mapping in media change
273 * requests to determine which phy to use to program 273 * requests to determine which phy to use to program
274 * the MIF configuration register. 274 * the MIF configuration register.
275 */ 275 */
276 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 276 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
277 /* 277 /*
278 * Note: we support just two PHYs: the built-in 278 * Note: we support just two PHYs: the built-in
279 * internal device and an external on the MII 279 * internal device and an external on the MII
280 * connector. 280 * connector.
281 */ 281 */
282 if (child->mii_phy > 1 || child->mii_inst > 1) { 282 if (child->mii_phy > 1 || child->mii_inst > 1) {
283 aprint_error_dev(&sc->sc_dev, "cannot accommodate MII device %s" 283 aprint_error_dev(&sc->sc_dev, "cannot accommodate MII device %s"
284 " at phy %d, instance %d\n", 284 " at phy %d, instance %d\n",
285 device_xname(child->mii_dev), 285 device_xname(child->mii_dev),
286 child->mii_phy, child->mii_inst); 286 child->mii_phy, child->mii_inst);
287 continue; 287 continue;
288 } 288 }
289 289
290 sc->sc_phys[child->mii_inst] = child->mii_phy; 290 sc->sc_phys[child->mii_inst] = child->mii_phy;
291 } 291 }
292 292
293 /* 293 /*
294 * XXX - we can really do the following ONLY if the 294 * XXX - we can really do the following ONLY if the
295 * phy indeed has the auto negotiation capability!! 295 * phy indeed has the auto negotiation capability!!
296 */ 296 */
297 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 297 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
298 } 298 }
299 299
300 /* claim 802.1q capability */ 300 /* claim 802.1q capability */
301 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 301 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
302 302
303 /* Attach the interface. */ 303 /* Attach the interface. */
304 if_attach(ifp); 304 if_attach(ifp);
305 ether_ifattach(ifp, sc->sc_enaddr); 305 ether_ifattach(ifp, sc->sc_enaddr);
306 306
307 sc->sc_sh = shutdownhook_establish(hme_shutdown, sc); 307 sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
308 if (sc->sc_sh == NULL) 308 if (sc->sc_sh == NULL)
309 panic("hme_config: can't establish shutdownhook"); 309 panic("hme_config: can't establish shutdownhook");
310 310
311#if NRND > 0 311#if NRND > 0
312 rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev), 312 rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev),
313 RND_TYPE_NET, 0); 313 RND_TYPE_NET, 0);
314#endif 314#endif
315 315
316 callout_init(&sc->sc_tick_ch, 0); 316 callout_init(&sc->sc_tick_ch, 0);
317} 317}
318 318
319void 319void
320hme_tick(void *arg) 320hme_tick(void *arg)
321{ 321{
322 struct hme_softc *sc = arg; 322 struct hme_softc *sc = arg;
323 int s; 323 int s;
324 324
325 s = splnet(); 325 s = splnet();
326 mii_tick(&sc->sc_mii); 326 mii_tick(&sc->sc_mii);
327 splx(s); 327 splx(s);
328 328
329 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 329 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
330} 330}
331 331
332void 332void
333hme_reset(struct hme_softc *sc) 333hme_reset(struct hme_softc *sc)
334{ 334{
335 int s; 335 int s;
336 336
337 s = splnet(); 337 s = splnet();
338 (void)hme_init(sc); 338 (void)hme_init(sc);
339 splx(s); 339 splx(s);
340} 340}
341 341
342void 342void
343hme_stop(struct hme_softc *sc, bool chip_only) 343hme_stop(struct hme_softc *sc, bool chip_only)
344{ 344{
345 bus_space_tag_t t = sc->sc_bustag; 345 bus_space_tag_t t = sc->sc_bustag;
346 bus_space_handle_t seb = sc->sc_seb; 346 bus_space_handle_t seb = sc->sc_seb;
347 int n; 347 int n;
348 348
349 if (!chip_only) { 349 if (!chip_only) {
350 callout_stop(&sc->sc_tick_ch); 350 callout_stop(&sc->sc_tick_ch);
351 mii_down(&sc->sc_mii); 351 mii_down(&sc->sc_mii);
352 } 352 }
353 353
354 /* Mask all interrupts */ 354 /* Mask all interrupts */
355 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff); 355 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff);
356 356
357 /* Reset transmitter and receiver */ 357 /* Reset transmitter and receiver */
358 bus_space_write_4(t, seb, HME_SEBI_RESET, 358 bus_space_write_4(t, seb, HME_SEBI_RESET,
359 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)); 359 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
360 360
361 for (n = 0; n < 20; n++) { 361 for (n = 0; n < 20; n++) {
362 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET); 362 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
363 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 363 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
364 return; 364 return;
365 DELAY(20); 365 DELAY(20);
366 } 366 }
367 367
368 printf("%s: hme_stop: reset failed\n", device_xname(&sc->sc_dev)); 368 printf("%s: hme_stop: reset failed\n", device_xname(&sc->sc_dev));
369} 369}
370 370
371void 371void
372hme_meminit(struct hme_softc *sc) 372hme_meminit(struct hme_softc *sc)
373{ 373{
374 bus_addr_t txbufdma, rxbufdma; 374 bus_addr_t txbufdma, rxbufdma;
375 bus_addr_t dma; 375 bus_addr_t dma;
376 char *p; 376 char *p;
377 unsigned int ntbuf, nrbuf, i; 377 unsigned int ntbuf, nrbuf, i;
378 struct hme_ring *hr = &sc->sc_rb; 378 struct hme_ring *hr = &sc->sc_rb;
379 379
380 p = hr->rb_membase; 380 p = hr->rb_membase;
381 dma = hr->rb_dmabase; 381 dma = hr->rb_dmabase;
382 382
383 ntbuf = hr->rb_ntbuf; 383 ntbuf = hr->rb_ntbuf;
384 nrbuf = hr->rb_nrbuf; 384 nrbuf = hr->rb_nrbuf;
385 385
386 /* 386 /*
387 * Allocate transmit descriptors 387 * Allocate transmit descriptors
388 */ 388 */
389 hr->rb_txd = p; 389 hr->rb_txd = p;
390 hr->rb_txddma = dma; 390 hr->rb_txddma = dma;
391 p += ntbuf * HME_XD_SIZE; 391 p += ntbuf * HME_XD_SIZE;
392 dma += ntbuf * HME_XD_SIZE; 392 dma += ntbuf * HME_XD_SIZE;
393 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 393 /* We have reserved descriptor space until the next 2048 byte boundary.*/
394 dma = (bus_addr_t)roundup((u_long)dma, 2048); 394 dma = (bus_addr_t)roundup((u_long)dma, 2048);
395 p = (void *)roundup((u_long)p, 2048); 395 p = (void *)roundup((u_long)p, 2048);
396 396
397 /* 397 /*
398 * Allocate receive descriptors 398 * Allocate receive descriptors
399 */ 399 */
400 hr->rb_rxd = p; 400 hr->rb_rxd = p;
401 hr->rb_rxddma = dma; 401 hr->rb_rxddma = dma;
402 p += nrbuf * HME_XD_SIZE; 402 p += nrbuf * HME_XD_SIZE;
403 dma += nrbuf * HME_XD_SIZE; 403 dma += nrbuf * HME_XD_SIZE;
404 /* Again move forward to the next 2048 byte boundary.*/ 404 /* Again move forward to the next 2048 byte boundary.*/
405 dma = (bus_addr_t)roundup((u_long)dma, 2048); 405 dma = (bus_addr_t)roundup((u_long)dma, 2048);
406 p = (void *)roundup((u_long)p, 2048); 406 p = (void *)roundup((u_long)p, 2048);
407 407
408 408
409 /* 409 /*
410 * Allocate transmit buffers 410 * Allocate transmit buffers
411 */ 411 */
412 hr->rb_txbuf = p; 412 hr->rb_txbuf = p;
413 txbufdma = dma; 413 txbufdma = dma;
414 p += ntbuf * _HME_BUFSZ; 414 p += ntbuf * _HME_BUFSZ;
415 dma += ntbuf * _HME_BUFSZ; 415 dma += ntbuf * _HME_BUFSZ;
416 416
417 /* 417 /*
418 * Allocate receive buffers 418 * Allocate receive buffers
419 */ 419 */
420 hr->rb_rxbuf = p; 420 hr->rb_rxbuf = p;
421 rxbufdma = dma; 421 rxbufdma = dma;
422 p += nrbuf * _HME_BUFSZ; 422 p += nrbuf * _HME_BUFSZ;
423 dma += nrbuf * _HME_BUFSZ; 423 dma += nrbuf * _HME_BUFSZ;
424 424
425 /* 425 /*
426 * Initialize transmit buffer descriptors 426 * Initialize transmit buffer descriptors
427 */ 427 */
428 for (i = 0; i < ntbuf; i++) { 428 for (i = 0; i < ntbuf; i++) {
429 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, txbufdma + i * _HME_BUFSZ); 429 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, txbufdma + i * _HME_BUFSZ);
430 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 430 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
431 } 431 }
432 432
433 /* 433 /*
434 * Initialize receive buffer descriptors 434 * Initialize receive buffer descriptors
435 */ 435 */
436 for (i = 0; i < nrbuf; i++) { 436 for (i = 0; i < nrbuf; i++) {
437 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ); 437 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ);
438 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 438 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i,
439 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ)); 439 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
440 } 440 }
441 441
442 hr->rb_tdhead = hr->rb_tdtail = 0; 442 hr->rb_tdhead = hr->rb_tdtail = 0;
443 hr->rb_td_nbusy = 0; 443 hr->rb_td_nbusy = 0;
444 hr->rb_rdtail = 0; 444 hr->rb_rdtail = 0;
445} 445}
446 446
447/* 447/*
448 * Initialization of interface; set up initialization block 448 * Initialization of interface; set up initialization block
449 * and transmit/receive descriptor rings. 449 * and transmit/receive descriptor rings.
450 */ 450 */
451int 451int
452hme_init(struct hme_softc *sc) 452hme_init(struct hme_softc *sc)
453{ 453{
454 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 454 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
455 bus_space_tag_t t = sc->sc_bustag; 455 bus_space_tag_t t = sc->sc_bustag;
456 bus_space_handle_t seb = sc->sc_seb; 456 bus_space_handle_t seb = sc->sc_seb;
457 bus_space_handle_t etx = sc->sc_etx; 457 bus_space_handle_t etx = sc->sc_etx;
458 bus_space_handle_t erx = sc->sc_erx; 458 bus_space_handle_t erx = sc->sc_erx;
459 bus_space_handle_t mac = sc->sc_mac; 459 bus_space_handle_t mac = sc->sc_mac;
460 u_int8_t *ea; 460 u_int8_t *ea;
461 u_int32_t v; 461 u_int32_t v;
462 int rc; 462 int rc;
463 463
464 /* 464 /*
465 * Initialization sequence. The numbered steps below correspond 465 * Initialization sequence. The numbered steps below correspond
466 * to the sequence outlined in section 6.3.5.1 in the Ethernet 466 * to the sequence outlined in section 6.3.5.1 in the Ethernet
467 * Channel Engine manual (part of the PCIO manual). 467 * Channel Engine manual (part of the PCIO manual).
468 * See also the STP2002-STQ document from Sun Microsystems. 468 * See also the STP2002-STQ document from Sun Microsystems.
469 */ 469 */
470 470
471 /* step 1 & 2. Reset the Ethernet Channel */ 471 /* step 1 & 2. Reset the Ethernet Channel */
472 hme_stop(sc, false); 472 hme_stop(sc, false);
473 473
474 /* Re-initialize the MIF */ 474 /* Re-initialize the MIF */
475 hme_mifinit(sc); 475 hme_mifinit(sc);
476 476
477 /* Call MI reset function if any */ 477 /* Call MI reset function if any */
478 if (sc->sc_hwreset) 478 if (sc->sc_hwreset)
479 (*sc->sc_hwreset)(sc); 479 (*sc->sc_hwreset)(sc);
480 480
481#if 0 481#if 0
482 /* Mask all MIF interrupts, just in case */ 482 /* Mask all MIF interrupts, just in case */
483 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff); 483 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
484#endif 484#endif
485 485
486 /* step 3. Setup data structures in host memory */ 486 /* step 3. Setup data structures in host memory */
487 hme_meminit(sc); 487 hme_meminit(sc);
488 488
489 /* step 4. TX MAC registers & counters */ 489 /* step 4. TX MAC registers & counters */
490 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 490 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
491 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 491 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
492 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 492 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
493 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 493 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
494 bus_space_write_4(t, mac, HME_MACI_TXSIZE, 494 bus_space_write_4(t, mac, HME_MACI_TXSIZE,
495 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 495 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
496 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN); 496 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN);
497 sc->sc_ec_capenable = sc->sc_ethercom.ec_capenable; 497 sc->sc_ec_capenable = sc->sc_ethercom.ec_capenable;
498 498
499 /* Load station MAC address */ 499 /* Load station MAC address */
500 ea = sc->sc_enaddr; 500 ea = sc->sc_enaddr;
501 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 501 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
502 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 502 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
503 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 503 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
504 504
505 /* 505 /*
506 * Init seed for backoff 506 * Init seed for backoff
507 * (source suggested by manual: low 10 bits of MAC address) 507 * (source suggested by manual: low 10 bits of MAC address)
508 */ 508 */
509 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 509 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
510 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v); 510 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
511 511
512 512
513 /* Note: Accepting power-on default for other MAC registers here.. */ 513 /* Note: Accepting power-on default for other MAC registers here.. */
514 514
515 515
516 /* step 5. RX MAC registers & counters */ 516 /* step 5. RX MAC registers & counters */
517 hme_setladrf(sc); 517 hme_setladrf(sc);
518 518
519 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 519 /* step 6 & 7. Program Descriptor Ring Base Addresses */
520 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma); 520 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
521 bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf); 521 bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf);
522 522
523 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 523 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
524 bus_space_write_4(t, mac, HME_MACI_RXSIZE, 524 bus_space_write_4(t, mac, HME_MACI_RXSIZE,
525 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 525 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
526 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN); 526 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN);
527 527
528 /* step 8. Global Configuration & Interrupt Mask */ 528 /* step 8. Global Configuration & Interrupt Mask */
529 bus_space_write_4(t, seb, HME_SEBI_IMASK, 529 bus_space_write_4(t, seb, HME_SEBI_IMASK,
530 ~( 530 ~(
531 /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 531 /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
532 HME_SEB_STAT_HOSTTOTX | 532 HME_SEB_STAT_HOSTTOTX |
533 HME_SEB_STAT_RXTOHOST | 533 HME_SEB_STAT_RXTOHOST |
534 HME_SEB_STAT_TXALL | 534 HME_SEB_STAT_TXALL |
535 HME_SEB_STAT_TXPERR | 535 HME_SEB_STAT_TXPERR |
536 HME_SEB_STAT_RCNTEXP | 536 HME_SEB_STAT_RCNTEXP |
537 /*HME_SEB_STAT_MIFIRQ |*/ 537 /*HME_SEB_STAT_MIFIRQ |*/
538 HME_SEB_STAT_ALL_ERRORS )); 538 HME_SEB_STAT_ALL_ERRORS ));
539 539
540 switch (sc->sc_burst) { 540 switch (sc->sc_burst) {
541 default: 541 default:
542 v = 0; 542 v = 0;
543 break; 543 break;
544 case 16: 544 case 16:
545 v = HME_SEB_CFG_BURST16; 545 v = HME_SEB_CFG_BURST16;
546 break; 546 break;
547 case 32: 547 case 32:
548 v = HME_SEB_CFG_BURST32; 548 v = HME_SEB_CFG_BURST32;
549 break; 549 break;
550 case 64: 550 case 64:
551 v = HME_SEB_CFG_BURST64; 551 v = HME_SEB_CFG_BURST64;
552 break; 552 break;
553 } 553 }
554 bus_space_write_4(t, seb, HME_SEBI_CFG, v); 554 bus_space_write_4(t, seb, HME_SEBI_CFG, v);
555 555
556 /* step 9. ETX Configuration: use mostly default values */ 556 /* step 9. ETX Configuration: use mostly default values */
557 557
558 /* Enable DMA */ 558 /* Enable DMA */
559 v = bus_space_read_4(t, etx, HME_ETXI_CFG); 559 v = bus_space_read_4(t, etx, HME_ETXI_CFG);
560 v |= HME_ETX_CFG_DMAENABLE; 560 v |= HME_ETX_CFG_DMAENABLE;
561 bus_space_write_4(t, etx, HME_ETXI_CFG, v); 561 bus_space_write_4(t, etx, HME_ETXI_CFG, v);
562 562
563 /* Transmit Descriptor ring size: in increments of 16 */ 563 /* Transmit Descriptor ring size: in increments of 16 */
564 bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1); 564 bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1);
565 565
566 566
567 /* step 10. ERX Configuration */ 567 /* step 10. ERX Configuration */
568 v = bus_space_read_4(t, erx, HME_ERXI_CFG); 568 v = bus_space_read_4(t, erx, HME_ERXI_CFG);
569 569
570 /* Encode Receive Descriptor ring size: four possible values */ 570 /* Encode Receive Descriptor ring size: four possible values */
571 switch (_HME_NDESC /*XXX*/) { 571 switch (_HME_NDESC /*XXX*/) {
572 case 32: 572 case 32:
573 v |= HME_ERX_CFG_RINGSIZE32; 573 v |= HME_ERX_CFG_RINGSIZE32;
574 break; 574 break;
575 case 64: 575 case 64:
576 v |= HME_ERX_CFG_RINGSIZE64; 576 v |= HME_ERX_CFG_RINGSIZE64;
577 break; 577 break;
578 case 128: 578 case 128:
579 v |= HME_ERX_CFG_RINGSIZE128; 579 v |= HME_ERX_CFG_RINGSIZE128;
580 break; 580 break;
581 case 256: 581 case 256:
582 v |= HME_ERX_CFG_RINGSIZE256; 582 v |= HME_ERX_CFG_RINGSIZE256;
583 break; 583 break;
584 default: 584 default:
585 printf("hme: invalid Receive Descriptor ring size\n"); 585 printf("hme: invalid Receive Descriptor ring size\n");
586 break; 586 break;
587 } 587 }
588 588
589 /* Enable DMA */ 589 /* Enable DMA */
590 v |= HME_ERX_CFG_DMAENABLE; 590 v |= HME_ERX_CFG_DMAENABLE;
591 591
592 /* set h/w rx checksum start offset (# of half-words) */ 592 /* set h/w rx checksum start offset (# of half-words) */
593#ifdef INET 593#ifdef INET
594 v |= (((ETHER_HDR_LEN + sizeof(struct ip) + 594 v |= (((ETHER_HDR_LEN + sizeof(struct ip) +
595 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 595 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
596 ETHER_VLAN_ENCAP_LEN : 0)) / 2) << HME_ERX_CFG_CSUMSHIFT) & 596 ETHER_VLAN_ENCAP_LEN : 0)) / 2) << HME_ERX_CFG_CSUMSHIFT) &
597 HME_ERX_CFG_CSUMSTART; 597 HME_ERX_CFG_CSUMSTART;
598#endif 598#endif
599 bus_space_write_4(t, erx, HME_ERXI_CFG, v); 599 bus_space_write_4(t, erx, HME_ERXI_CFG, v);
600 600
601 /* step 11. XIF Configuration */ 601 /* step 11. XIF Configuration */
602 v = bus_space_read_4(t, mac, HME_MACI_XIF); 602 v = bus_space_read_4(t, mac, HME_MACI_XIF);
603 v |= HME_MAC_XIF_OE; 603 v |= HME_MAC_XIF_OE;
604 bus_space_write_4(t, mac, HME_MACI_XIF, v); 604 bus_space_write_4(t, mac, HME_MACI_XIF, v);
605 605
606 /* step 12. RX_MAC Configuration Register */ 606 /* step 12. RX_MAC Configuration Register */
607 v = bus_space_read_4(t, mac, HME_MACI_RXCFG); 607 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
608 v |= HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_PSTRIP; 608 v |= HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_PSTRIP;
609 bus_space_write_4(t, mac, HME_MACI_RXCFG, v); 609 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
610 610
611 /* step 13. TX_MAC Configuration Register */ 611 /* step 13. TX_MAC Configuration Register */
612 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 612 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
613 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 613 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
614 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 614 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
615 615
616 /* step 14. Issue Transmit Pending command */ 616 /* step 14. Issue Transmit Pending command */
617 617
618 /* Call MI initialization function if any */ 618 /* Call MI initialization function if any */
619 if (sc->sc_hwinit) 619 if (sc->sc_hwinit)
620 (*sc->sc_hwinit)(sc); 620 (*sc->sc_hwinit)(sc);
621 621
622 /* Set the current media. */ 622 /* Set the current media. */
623 if ((rc = hme_mediachange(ifp)) != 0) 623 if ((rc = hme_mediachange(ifp)) != 0)
624 return rc; 624 return rc;
625 625
626 /* Start the one second timer. */ 626 /* Start the one second timer. */
627 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 627 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
628 628
629 ifp->if_flags |= IFF_RUNNING; 629 ifp->if_flags |= IFF_RUNNING;
630 ifp->if_flags &= ~IFF_OACTIVE; 630 ifp->if_flags &= ~IFF_OACTIVE;
631 sc->sc_if_flags = ifp->if_flags; 631 sc->sc_if_flags = ifp->if_flags;
632 ifp->if_timer = 0; 632 ifp->if_timer = 0;
633 hme_start(ifp); 633 hme_start(ifp);
634 return 0; 634 return 0;
635} 635}
636 636
637/* 637/*
638 * Routine to copy from mbuf chain to transmit buffer in 638 * Routine to copy from mbuf chain to transmit buffer in
639 * network buffer memory. 639 * network buffer memory.
640 * Returns the amount of data copied. 640 * Returns the amount of data copied.
641 */ 641 */
642int 642int
643hme_put(struct hme_softc *sc, int ri, struct mbuf *m) 643hme_put(struct hme_softc *sc, int ri, struct mbuf *m)
644 /* ri: Ring index */ 644 /* ri: Ring index */
645{ 645{
646 struct mbuf *n; 646 struct mbuf *n;
647 int len, tlen = 0; 647 int len, tlen = 0;
648 char *bp; 648 char *bp;
649 649
650 bp = (char *)sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ; 650 bp = (char *)sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ;
651 for (; m; m = n) { 651 for (; m; m = n) {
652 len = m->m_len; 652 len = m->m_len;
653 if (len == 0) { 653 if (len == 0) {
654 MFREE(m, n); 654 MFREE(m, n);
655 continue; 655 continue;
656 } 656 }
657 memcpy(bp, mtod(m, void *), len); 657 memcpy(bp, mtod(m, void *), len);
658 bp += len; 658 bp += len;
659 tlen += len; 659 tlen += len;
660 MFREE(m, n); 660 MFREE(m, n);
661 } 661 }
662 return (tlen); 662 return (tlen);
663} 663}
664 664
665/* 665/*
666 * Pull data off an interface. 666 * Pull data off an interface.
667 * Len is length of data, with local net header stripped. 667 * Len is length of data, with local net header stripped.
668 * We copy the data into mbufs. When full cluster sized units are present 668 * We copy the data into mbufs. When full cluster sized units are present
669 * we copy into clusters. 669 * we copy into clusters.
670 */ 670 */
671struct mbuf * 671struct mbuf *
672hme_get(struct hme_softc *sc, int ri, u_int32_t flags) 672hme_get(struct hme_softc *sc, int ri, u_int32_t flags)
673{ 673{
674 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 674 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
675 struct mbuf *m, *m0, *newm; 675 struct mbuf *m, *m0, *newm;
676 char *bp; 676 char *bp;
677 int len, totlen; 677 int len, totlen;
678 678
679 totlen = HME_XD_DECODE_RSIZE(flags); 679 totlen = HME_XD_DECODE_RSIZE(flags);
680 MGETHDR(m0, M_DONTWAIT, MT_DATA); 680 MGETHDR(m0, M_DONTWAIT, MT_DATA);
681 if (m0 == 0) 681 if (m0 == 0)
682 return (0); 682 return (0);
683 m0->m_pkthdr.rcvif = ifp; 683 m0->m_pkthdr.rcvif = ifp;
684 m0->m_pkthdr.len = totlen; 684 m0->m_pkthdr.len = totlen;
685 len = MHLEN; 685 len = MHLEN;
686 m = m0; 686 m = m0;
687 687
688 bp = (char *)sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ; 688 bp = (char *)sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ;
689 689
690 while (totlen > 0) { 690 while (totlen > 0) {
691 if (totlen >= MINCLSIZE) { 691 if (totlen >= MINCLSIZE) {
692 MCLGET(m, M_DONTWAIT); 692 MCLGET(m, M_DONTWAIT);
693 if ((m->m_flags & M_EXT) == 0) 693 if ((m->m_flags & M_EXT) == 0)
694 goto bad; 694 goto bad;
695 len = MCLBYTES; 695 len = MCLBYTES;
696 } 696 }
697 697
698 if (m == m0) { 698 if (m == m0) {
699 char *newdata = (char *) 699 char *newdata = (char *)
700 ALIGN(m->m_data + sizeof(struct ether_header)) - 700 ALIGN(m->m_data + sizeof(struct ether_header)) -
701 sizeof(struct ether_header); 701 sizeof(struct ether_header);
702 len -= newdata - m->m_data; 702 len -= newdata - m->m_data;
703 m->m_data = newdata; 703 m->m_data = newdata;
704 } 704 }
705 705
706 m->m_len = len = min(totlen, len); 706 m->m_len = len = min(totlen, len);
707 memcpy(mtod(m, void *), bp, len); 707 memcpy(mtod(m, void *), bp, len);
708 bp += len; 708 bp += len;
709 709
710 totlen -= len; 710 totlen -= len;
711 if (totlen > 0) { 711 if (totlen > 0) {
712 MGET(newm, M_DONTWAIT, MT_DATA); 712 MGET(newm, M_DONTWAIT, MT_DATA);
713 if (newm == 0) 713 if (newm == 0)
714 goto bad; 714 goto bad;
715 len = MLEN; 715 len = MLEN;
716 m = m->m_next = newm; 716 m = m->m_next = newm;
717 } 717 }
718 } 718 }
719 719
720#ifdef INET 720#ifdef INET
721 /* hardware checksum */ 721 /* hardware checksum */
722 if (ifp->if_csum_flags_rx & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 722 if (ifp->if_csum_flags_rx & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
723 struct ether_header *eh; 723 struct ether_header *eh;
724 struct ip *ip; 724 struct ip *ip;
725 struct udphdr *uh; 725 struct udphdr *uh;
726 uint16_t *opts; 726 uint16_t *opts;
727 int32_t hlen, pktlen; 727 int32_t hlen, pktlen;
728 uint32_t temp; 728 uint32_t temp;
729 729
730 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) { 730 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) {
731 pktlen = m0->m_pkthdr.len - ETHER_HDR_LEN - 731 pktlen = m0->m_pkthdr.len - ETHER_HDR_LEN -
732 ETHER_VLAN_ENCAP_LEN; 732 ETHER_VLAN_ENCAP_LEN;
733 eh = (struct ether_header *) mtod(m0, void *) + 733 eh = (struct ether_header *) mtod(m0, void *) +
734 ETHER_VLAN_ENCAP_LEN; 734 ETHER_VLAN_ENCAP_LEN;
735 } else { 735 } else {
736 pktlen = m0->m_pkthdr.len - ETHER_HDR_LEN; 736 pktlen = m0->m_pkthdr.len - ETHER_HDR_LEN;
737 eh = mtod(m0, struct ether_header *); 737 eh = mtod(m0, struct ether_header *);
738 } 738 }
739 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 739 if (ntohs(eh->ether_type) != ETHERTYPE_IP)
740 goto swcsum; 740 goto swcsum;
741 ip = (struct ip *) ((char *)eh + ETHER_HDR_LEN); 741 ip = (struct ip *) ((char *)eh + ETHER_HDR_LEN);
742 742
743 /* IPv4 only */ 743 /* IPv4 only */
744 if (ip->ip_v != IPVERSION) 744 if (ip->ip_v != IPVERSION)
745 goto swcsum; 745 goto swcsum;
746 746
747 hlen = ip->ip_hl << 2; 747 hlen = ip->ip_hl << 2;
748 if (hlen < sizeof(struct ip)) 748 if (hlen < sizeof(struct ip))
749 goto swcsum; 749 goto swcsum;
750 750
751 /* 751 /*
752 * bail if too short, has random trailing garbage, truncated, 752 * bail if too short, has random trailing garbage, truncated,
753 * fragment, or has ethernet pad. 753 * fragment, or has ethernet pad.
754 */ 754 */
755 if ((ntohs(ip->ip_len) < hlen) || (ntohs(ip->ip_len) != pktlen) 755 if ((ntohs(ip->ip_len) < hlen) || (ntohs(ip->ip_len) != pktlen)
756 || (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK))) 756 || (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)))
757 goto swcsum; 757 goto swcsum;
758 758
759 switch (ip->ip_p) { 759 switch (ip->ip_p) {
760 case IPPROTO_TCP: 760 case IPPROTO_TCP:
761 if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 761 if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4))
762 goto swcsum; 762 goto swcsum;
763 if (pktlen < (hlen + sizeof(struct tcphdr))) 763 if (pktlen < (hlen + sizeof(struct tcphdr)))
764 goto swcsum; 764 goto swcsum;
765 m0->m_pkthdr.csum_flags = M_CSUM_TCPv4; 765 m0->m_pkthdr.csum_flags = M_CSUM_TCPv4;
766 break; 766 break;
767 case IPPROTO_UDP: 767 case IPPROTO_UDP:
768 if (! (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 768 if (! (ifp->if_csum_flags_rx & M_CSUM_UDPv4))
769 goto swcsum; 769 goto swcsum;
770 if (pktlen < (hlen + sizeof(struct udphdr))) 770 if (pktlen < (hlen + sizeof(struct udphdr)))
771 goto swcsum; 771 goto swcsum;
772 uh = (struct udphdr *)((char *)ip + hlen); 772 uh = (struct udphdr *)((char *)ip + hlen);
773 /* no checksum */ 773 /* no checksum */
774 if (uh->uh_sum == 0) 774 if (uh->uh_sum == 0)
775 goto swcsum; 775 goto swcsum;
776 m0->m_pkthdr.csum_flags = M_CSUM_UDPv4; 776 m0->m_pkthdr.csum_flags = M_CSUM_UDPv4;
777 break; 777 break;
778 default: 778 default:
779 goto swcsum; 779 goto swcsum;
780 } 780 }
781 781
782 /* w/ M_CSUM_NO_PSEUDOHDR, the uncomplemented sum is expected */ 782 /* w/ M_CSUM_NO_PSEUDOHDR, the uncomplemented sum is expected */
783 m0->m_pkthdr.csum_data = (~flags) & HME_XD_RXCKSUM; 783 m0->m_pkthdr.csum_data = (~flags) & HME_XD_RXCKSUM;
784 784
785 /* if the pkt had ip options, we have to deduct them */ 785 /* if the pkt had ip options, we have to deduct them */
786 if (hlen > sizeof(struct ip)) { 786 if (hlen > sizeof(struct ip)) {
787 uint32_t optsum; 787 uint32_t optsum;
788 788
789 optsum = 0; 789 optsum = 0;
790 temp = hlen - sizeof(struct ip); 790 temp = hlen - sizeof(struct ip);
791 opts = (uint16_t *)((char *)ip + sizeof(struct ip)); 791 opts = (uint16_t *)((char *)ip + sizeof(struct ip));
792 792
793 while (temp > 1) { 793 while (temp > 1) {
794 optsum += ntohs(*opts++); 794 optsum += ntohs(*opts++);
795 temp -= 2; 795 temp -= 2;
796 } 796 }
797 while (optsum >> 16) 797 while (optsum >> 16)
798 optsum = (optsum >> 16) + (optsum & 0xffff); 798 optsum = (optsum >> 16) + (optsum & 0xffff);
799 799
800 /* Deduct the ip opts sum from the hwsum (rfc 1624). */ 800 /* Deduct the ip opts sum from the hwsum. */
801 m0->m_pkthdr.csum_data = ~((~m0->m_pkthdr.csum_data) - 801 m0->m_pkthdr.csum_data += (uint16_t)~optsum;
802 ~optsum); 
803 802
804 while (m0->m_pkthdr.csum_data >> 16) 803 while (m0->m_pkthdr.csum_data >> 16)
805 m0->m_pkthdr.csum_data = 804 m0->m_pkthdr.csum_data =
806 (m0->m_pkthdr.csum_data >> 16) + 805 (m0->m_pkthdr.csum_data >> 16) +
807 (m0->m_pkthdr.csum_data & 0xffff); 806 (m0->m_pkthdr.csum_data & 0xffff);
808 } 807 }
809 808
810 m0->m_pkthdr.csum_flags |= M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; 809 m0->m_pkthdr.csum_flags |= M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
811 } else 810 } else
812swcsum: 811swcsum:
813 m0->m_pkthdr.csum_flags = 0; 812 m0->m_pkthdr.csum_flags = 0;
814#endif 813#endif
815 814
816 return (m0); 815 return (m0);
817 816
818bad: 817bad:
819 m_freem(m0); 818 m_freem(m0);
820 return (0); 819 return (0);
821} 820}
822 821
823/* 822/*
824 * Pass a packet to the higher levels. 823 * Pass a packet to the higher levels.
825 */ 824 */
826void 825void
827hme_read(struct hme_softc *sc, int ix, u_int32_t flags) 826hme_read(struct hme_softc *sc, int ix, u_int32_t flags)
828{ 827{
829 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 828 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
830 struct mbuf *m; 829 struct mbuf *m;
831 int len; 830 int len;
832 831
833 len = HME_XD_DECODE_RSIZE(flags); 832 len = HME_XD_DECODE_RSIZE(flags);
834 if (len <= sizeof(struct ether_header) || 833 if (len <= sizeof(struct ether_header) ||
835 len > ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 834 len > ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
836 ETHER_VLAN_ENCAP_LEN + ETHERMTU + sizeof(struct ether_header) : 835 ETHER_VLAN_ENCAP_LEN + ETHERMTU + sizeof(struct ether_header) :
837 ETHERMTU + sizeof(struct ether_header))) { 836 ETHERMTU + sizeof(struct ether_header))) {
838#ifdef HMEDEBUG 837#ifdef HMEDEBUG
839 printf("%s: invalid packet size %d; dropping\n", 838 printf("%s: invalid packet size %d; dropping\n",
840 device_xname(&sc->sc_dev), len); 839 device_xname(&sc->sc_dev), len);
841#endif 840#endif
842 ifp->if_ierrors++; 841 ifp->if_ierrors++;
843 return; 842 return;
844 } 843 }
845 844
846 /* Pull packet off interface. */ 845 /* Pull packet off interface. */
847 m = hme_get(sc, ix, flags); 846 m = hme_get(sc, ix, flags);
848 if (m == 0) { 847 if (m == 0) {
849 ifp->if_ierrors++; 848 ifp->if_ierrors++;
850 return; 849 return;
851 } 850 }
852 851
853 ifp->if_ipackets++; 852 ifp->if_ipackets++;
854 853
855#if NBPFILTER > 0 854#if NBPFILTER > 0
856 /* 855 /*
857 * Check if there's a BPF listener on this interface. 856 * Check if there's a BPF listener on this interface.
858 * If so, hand off the raw packet to BPF. 857 * If so, hand off the raw packet to BPF.
859 */ 858 */
860 if (ifp->if_bpf) 859 if (ifp->if_bpf)
861 bpf_mtap(ifp->if_bpf, m); 860 bpf_mtap(ifp->if_bpf, m);
862#endif 861#endif
863 862
864 /* Pass the packet up. */ 863 /* Pass the packet up. */
865 (*ifp->if_input)(ifp, m); 864 (*ifp->if_input)(ifp, m);
866} 865}
867 866
868void 867void
869hme_start(struct ifnet *ifp) 868hme_start(struct ifnet *ifp)
870{ 869{
871 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 870 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
872 void *txd = sc->sc_rb.rb_txd; 871 void *txd = sc->sc_rb.rb_txd;
873 struct mbuf *m; 872 struct mbuf *m;
874 unsigned int txflags; 873 unsigned int txflags;
875 unsigned int ri, len; 874 unsigned int ri, len;
876 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 875 unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
877 876
878 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 877 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
879 return; 878 return;
880 879
881 ri = sc->sc_rb.rb_tdhead; 880 ri = sc->sc_rb.rb_tdhead;
882 881
883 for (;;) { 882 for (;;) {
884 IFQ_DEQUEUE(&ifp->if_snd, m); 883 IFQ_DEQUEUE(&ifp->if_snd, m);
885 if (m == 0) 884 if (m == 0)
886 break; 885 break;
887 886
888#if NBPFILTER > 0 887#if NBPFILTER > 0
889 /* 888 /*
890 * If BPF is listening on this interface, let it see the 889 * If BPF is listening on this interface, let it see the
891 * packet before we commit it to the wire. 890 * packet before we commit it to the wire.
892 */ 891 */
893 if (ifp->if_bpf) 892 if (ifp->if_bpf)
894 bpf_mtap(ifp->if_bpf, m); 893 bpf_mtap(ifp->if_bpf, m);
895#endif 894#endif
896 895
897#ifdef INET 896#ifdef INET
898 /* collect bits for h/w csum, before hme_put frees the mbuf */ 897 /* collect bits for h/w csum, before hme_put frees the mbuf */
899 if (ifp->if_csum_flags_tx & (M_CSUM_TCPv4 | M_CSUM_UDPv4) && 898 if (ifp->if_csum_flags_tx & (M_CSUM_TCPv4 | M_CSUM_UDPv4) &&
900 m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 899 m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
901 struct ether_header *eh; 900 struct ether_header *eh;
902 uint16_t offset, start; 901 uint16_t offset, start;
903 902
904 eh = mtod(m, struct ether_header *); 903 eh = mtod(m, struct ether_header *);
905 switch (ntohs(eh->ether_type)) { 904 switch (ntohs(eh->ether_type)) {
906 case ETHERTYPE_IP: 905 case ETHERTYPE_IP:
907 start = ETHER_HDR_LEN; 906 start = ETHER_HDR_LEN;
908 break; 907 break;
909 case ETHERTYPE_VLAN: 908 case ETHERTYPE_VLAN:
910 start = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 909 start = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
911 break; 910 break;
912 default: 911 default:
913 /* unsupported, drop it */ 912 /* unsupported, drop it */
914 m_free(m); 913 m_free(m);
915 continue; 914 continue;
916 } 915 }
917 start += M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 916 start += M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
918 offset = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data) 917 offset = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data)
919 + start; 918 + start;
920 txflags = HME_XD_TXCKSUM | 919 txflags = HME_XD_TXCKSUM |
921 (offset << HME_XD_TXCSSTUFFSHIFT) | 920 (offset << HME_XD_TXCSSTUFFSHIFT) |
922 (start << HME_XD_TXCSSTARTSHIFT); 921 (start << HME_XD_TXCSSTARTSHIFT);
923 } else 922 } else
924#endif 923#endif
925 txflags = 0; 924 txflags = 0;
926 925
927 /* 926 /*
928 * Copy the mbuf chain into the transmit buffer. 927 * Copy the mbuf chain into the transmit buffer.
929 */ 928 */
930 len = hme_put(sc, ri, m); 929 len = hme_put(sc, ri, m);
931 930
932 /* 931 /*
933 * Initialize transmit registers and start transmission 932 * Initialize transmit registers and start transmission
934 */ 933 */
935 HME_XD_SETFLAGS(sc->sc_pci, txd, ri, 934 HME_XD_SETFLAGS(sc->sc_pci, txd, ri,
936 HME_XD_OWN | HME_XD_SOP | HME_XD_EOP | 935 HME_XD_OWN | HME_XD_SOP | HME_XD_EOP |
937 HME_XD_ENCODE_TSIZE(len) | txflags); 936 HME_XD_ENCODE_TSIZE(len) | txflags);
938 937
939 /*if (sc->sc_rb.rb_td_nbusy <= 0)*/ 938 /*if (sc->sc_rb.rb_td_nbusy <= 0)*/
940 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING, 939 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
941 HME_ETX_TP_DMAWAKEUP); 940 HME_ETX_TP_DMAWAKEUP);
942 941
943 if (++ri == ntbuf) 942 if (++ri == ntbuf)
944 ri = 0; 943 ri = 0;
945 944
946 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 945 if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
947 ifp->if_flags |= IFF_OACTIVE; 946 ifp->if_flags |= IFF_OACTIVE;
948 break; 947 break;
949 } 948 }
950 } 949 }
951 950
952 sc->sc_rb.rb_tdhead = ri; 951 sc->sc_rb.rb_tdhead = ri;
953} 952}
954 953
955/* 954/*
956 * Transmit interrupt. 955 * Transmit interrupt.
957 */ 956 */
958int 957int
959hme_tint(struct hme_softc *sc) 958hme_tint(struct hme_softc *sc)
960{ 959{
961 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 960 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
962 bus_space_tag_t t = sc->sc_bustag; 961 bus_space_tag_t t = sc->sc_bustag;
963 bus_space_handle_t mac = sc->sc_mac; 962 bus_space_handle_t mac = sc->sc_mac;
964 unsigned int ri, txflags; 963 unsigned int ri, txflags;
965 964
966 /* 965 /*
967 * Unload collision counters 966 * Unload collision counters
968 */ 967 */
969 ifp->if_collisions += 968 ifp->if_collisions +=
970 bus_space_read_4(t, mac, HME_MACI_NCCNT) + 969 bus_space_read_4(t, mac, HME_MACI_NCCNT) +
971 bus_space_read_4(t, mac, HME_MACI_FCCNT) + 970 bus_space_read_4(t, mac, HME_MACI_FCCNT) +
972 bus_space_read_4(t, mac, HME_MACI_EXCNT) + 971 bus_space_read_4(t, mac, HME_MACI_EXCNT) +
973 bus_space_read_4(t, mac, HME_MACI_LTCNT); 972 bus_space_read_4(t, mac, HME_MACI_LTCNT);
974 973
975 /* 974 /*
976 * then clear the hardware counters. 975 * then clear the hardware counters.
977 */ 976 */
978 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 977 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
979 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 978 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
980 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 979 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
981 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 980 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
982 981
983 /* Fetch current position in the transmit ring */ 982 /* Fetch current position in the transmit ring */
984 ri = sc->sc_rb.rb_tdtail; 983 ri = sc->sc_rb.rb_tdtail;
985 984
986 for (;;) { 985 for (;;) {
987 if (sc->sc_rb.rb_td_nbusy <= 0) 986 if (sc->sc_rb.rb_td_nbusy <= 0)
988 break; 987 break;
989 988
990 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 989 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
991 990
992 if (txflags & HME_XD_OWN) 991 if (txflags & HME_XD_OWN)
993 break; 992 break;
994 993
995 ifp->if_flags &= ~IFF_OACTIVE; 994 ifp->if_flags &= ~IFF_OACTIVE;
996 ifp->if_opackets++; 995 ifp->if_opackets++;
997 996
998 if (++ri == sc->sc_rb.rb_ntbuf) 997 if (++ri == sc->sc_rb.rb_ntbuf)
999 ri = 0; 998 ri = 0;
1000 999
1001 --sc->sc_rb.rb_td_nbusy; 1000 --sc->sc_rb.rb_td_nbusy;
1002 } 1001 }
1003 1002
1004 /* Update ring */ 1003 /* Update ring */
1005 sc->sc_rb.rb_tdtail = ri; 1004 sc->sc_rb.rb_tdtail = ri;
1006 1005
1007 hme_start(ifp); 1006 hme_start(ifp);
1008 1007
1009 if (sc->sc_rb.rb_td_nbusy == 0) 1008 if (sc->sc_rb.rb_td_nbusy == 0)
1010 ifp->if_timer = 0; 1009 ifp->if_timer = 0;
1011 1010
1012 return (1); 1011 return (1);
1013} 1012}
1014 1013
1015/* 1014/*
1016 * Receive interrupt. 1015 * Receive interrupt.
1017 */ 1016 */
1018int 1017int
1019hme_rint(struct hme_softc *sc) 1018hme_rint(struct hme_softc *sc)
1020{ 1019{
1021 void *xdr = sc->sc_rb.rb_rxd; 1020 void *xdr = sc->sc_rb.rb_rxd;
1022 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 1021 unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
1023 unsigned int ri; 1022 unsigned int ri;
1024 u_int32_t flags; 1023 u_int32_t flags;
1025 1024
1026 ri = sc->sc_rb.rb_rdtail; 1025 ri = sc->sc_rb.rb_rdtail;
1027 1026
1028 /* 1027 /*
1029 * Process all buffers with valid data. 1028 * Process all buffers with valid data.
1030 */ 1029 */
1031 for (;;) { 1030 for (;;) {
1032 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1031 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1033 if (flags & HME_XD_OWN) 1032 if (flags & HME_XD_OWN)
1034 break; 1033 break;
1035 1034
1036 if (flags & HME_XD_OFL) { 1035 if (flags & HME_XD_OFL) {
1037 printf("%s: buffer overflow, ri=%d; flags=0x%x\n", 1036 printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
1038 device_xname(&sc->sc_dev), ri, flags); 1037 device_xname(&sc->sc_dev), ri, flags);
1039 } else 1038 } else
1040 hme_read(sc, ri, flags); 1039 hme_read(sc, ri, flags);
1041 1040
1042 /* This buffer can be used by the hardware again */ 1041 /* This buffer can be used by the hardware again */
1043 HME_XD_SETFLAGS(sc->sc_pci, xdr, ri, 1042 HME_XD_SETFLAGS(sc->sc_pci, xdr, ri,
1044 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ)); 1043 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
1045 1044
1046 if (++ri == nrbuf) 1045 if (++ri == nrbuf)
1047 ri = 0; 1046 ri = 0;
1048 } 1047 }
1049 1048
1050 sc->sc_rb.rb_rdtail = ri; 1049 sc->sc_rb.rb_rdtail = ri;
1051 1050
1052 return (1); 1051 return (1);
1053} 1052}
1054 1053
1055int 1054int
1056hme_eint(struct hme_softc *sc, u_int status) 1055hme_eint(struct hme_softc *sc, u_int status)
1057{ 1056{
1058 char bits[128]; 1057 char bits[128];
1059 1058
1060 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1059 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1061 bus_space_tag_t t = sc->sc_bustag; 1060 bus_space_tag_t t = sc->sc_bustag;
1062 bus_space_handle_t mif = sc->sc_mif; 1061 bus_space_handle_t mif = sc->sc_mif;
1063 u_int32_t cf, st, sm; 1062 u_int32_t cf, st, sm;
1064 cf = bus_space_read_4(t, mif, HME_MIFI_CFG); 1063 cf = bus_space_read_4(t, mif, HME_MIFI_CFG);
1065 st = bus_space_read_4(t, mif, HME_MIFI_STAT); 1064 st = bus_space_read_4(t, mif, HME_MIFI_STAT);
1066 sm = bus_space_read_4(t, mif, HME_MIFI_SM); 1065 sm = bus_space_read_4(t, mif, HME_MIFI_SM);
1067 printf("%s: XXXlink status changed: cfg=%x, stat %x, sm %x\n", 1066 printf("%s: XXXlink status changed: cfg=%x, stat %x, sm %x\n",
1068 device_xname(&sc->sc_dev), cf, st, sm); 1067 device_xname(&sc->sc_dev), cf, st, sm);
1069 return (1); 1068 return (1);
1070 } 1069 }
1071 snprintb(bits, sizeof(bits), HME_SEB_STAT_BITS, status); 1070 snprintb(bits, sizeof(bits), HME_SEB_STAT_BITS, status);
1072 printf("%s: status=%s\n", device_xname(&sc->sc_dev), bits); 1071 printf("%s: status=%s\n", device_xname(&sc->sc_dev), bits);
1073  1072
1074 return (1); 1073 return (1);
1075} 1074}
1076 1075
1077int 1076int
1078hme_intr(void *v) 1077hme_intr(void *v)
1079{ 1078{
1080 struct hme_softc *sc = (struct hme_softc *)v; 1079 struct hme_softc *sc = (struct hme_softc *)v;
1081 bus_space_tag_t t = sc->sc_bustag; 1080 bus_space_tag_t t = sc->sc_bustag;
1082 bus_space_handle_t seb = sc->sc_seb; 1081 bus_space_handle_t seb = sc->sc_seb;
1083 u_int32_t status; 1082 u_int32_t status;
1084 int r = 0; 1083 int r = 0;
1085 1084
1086 status = bus_space_read_4(t, seb, HME_SEBI_STAT); 1085 status = bus_space_read_4(t, seb, HME_SEBI_STAT);
1087 1086
1088 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1087 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1089 r |= hme_eint(sc, status); 1088 r |= hme_eint(sc, status);
1090 1089
1091 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1090 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1092 r |= hme_tint(sc); 1091 r |= hme_tint(sc);
1093 1092
1094 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1093 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1095 r |= hme_rint(sc); 1094 r |= hme_rint(sc);
1096 1095
1097#if NRND > 0 1096#if NRND > 0
1098 rnd_add_uint32(&sc->rnd_source, status); 1097 rnd_add_uint32(&sc->rnd_source, status);
1099#endif 1098#endif
1100 1099
1101 return (r); 1100 return (r);
1102} 1101}
1103 1102
1104 1103
1105void 1104void
1106hme_watchdog(struct ifnet *ifp) 1105hme_watchdog(struct ifnet *ifp)
1107{ 1106{
1108 struct hme_softc *sc = ifp->if_softc; 1107 struct hme_softc *sc = ifp->if_softc;
1109 1108
1110 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev)); 1109 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev));
1111 ++ifp->if_oerrors; 1110 ++ifp->if_oerrors;
1112 1111
1113 hme_reset(sc); 1112 hme_reset(sc);
1114} 1113}
1115 1114
1116/* 1115/*
1117 * Initialize the MII Management Interface 1116 * Initialize the MII Management Interface
1118 */ 1117 */
1119void 1118void
1120hme_mifinit(struct hme_softc *sc) 1119hme_mifinit(struct hme_softc *sc)
1121{ 1120{
1122 bus_space_tag_t t = sc->sc_bustag; 1121 bus_space_tag_t t = sc->sc_bustag;
1123 bus_space_handle_t mif = sc->sc_mif; 1122 bus_space_handle_t mif = sc->sc_mif;
1124 bus_space_handle_t mac = sc->sc_mac; 1123 bus_space_handle_t mac = sc->sc_mac;
1125 int instance, phy; 1124 int instance, phy;
1126 u_int32_t v; 1125 u_int32_t v;
1127 1126
1128 if (sc->sc_mii.mii_media.ifm_cur != NULL) { 1127 if (sc->sc_mii.mii_media.ifm_cur != NULL) {
1129 instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1128 instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1130 phy = sc->sc_phys[instance]; 1129 phy = sc->sc_phys[instance];
1131 } else 1130 } else
1132 /* No media set yet, pick phy arbitrarily.. */ 1131 /* No media set yet, pick phy arbitrarily.. */
1133 phy = HME_PHYAD_EXTERNAL; 1132 phy = HME_PHYAD_EXTERNAL;
1134 1133
1135 /* Configure the MIF in frame mode, no poll, current phy select */ 1134 /* Configure the MIF in frame mode, no poll, current phy select */
1136 v = 0; 1135 v = 0;
1137 if (phy == HME_PHYAD_EXTERNAL) 1136 if (phy == HME_PHYAD_EXTERNAL)
1138 v |= HME_MIF_CFG_PHY; 1137 v |= HME_MIF_CFG_PHY;
1139 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1138 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1140 1139
1141 /* If an external transceiver is selected, enable its MII drivers */ 1140 /* If an external transceiver is selected, enable its MII drivers */
1142 v = bus_space_read_4(t, mac, HME_MACI_XIF); 1141 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1143 v &= ~HME_MAC_XIF_MIIENABLE; 1142 v &= ~HME_MAC_XIF_MIIENABLE;
1144 if (phy == HME_PHYAD_EXTERNAL) 1143 if (phy == HME_PHYAD_EXTERNAL)
1145 v |= HME_MAC_XIF_MIIENABLE; 1144 v |= HME_MAC_XIF_MIIENABLE;
1146 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1145 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1147} 1146}
1148 1147
1149/* 1148/*
1150 * MII interface 1149 * MII interface
1151 */ 1150 */
1152static int 1151static int
1153hme_mii_readreg(struct device *self, int phy, int reg) 1152hme_mii_readreg(struct device *self, int phy, int reg)
1154{ 1153{
1155 struct hme_softc *sc = (void *)self; 1154 struct hme_softc *sc = (void *)self;
1156 bus_space_tag_t t = sc->sc_bustag; 1155 bus_space_tag_t t = sc->sc_bustag;
1157 bus_space_handle_t mif = sc->sc_mif; 1156 bus_space_handle_t mif = sc->sc_mif;
1158 bus_space_handle_t mac = sc->sc_mac; 1157 bus_space_handle_t mac = sc->sc_mac;
1159 u_int32_t v, xif_cfg, mifi_cfg; 1158 u_int32_t v, xif_cfg, mifi_cfg;
1160 int n; 1159 int n;
1161 1160
1162 /* We can at most have two PHYs */ 1161 /* We can at most have two PHYs */
1163 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 1162 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1164 return (0); 1163 return (0);
1165 1164
1166 /* Select the desired PHY in the MIF configuration register */ 1165 /* Select the desired PHY in the MIF configuration register */
1167 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 1166 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1168 v &= ~HME_MIF_CFG_PHY; 1167 v &= ~HME_MIF_CFG_PHY;
1169 if (phy == HME_PHYAD_EXTERNAL) 1168 if (phy == HME_PHYAD_EXTERNAL)
1170 v |= HME_MIF_CFG_PHY; 1169 v |= HME_MIF_CFG_PHY;
1171 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1170 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1172 1171
1173 /* Enable MII drivers on external transceiver */ 1172 /* Enable MII drivers on external transceiver */
1174 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 1173 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1175 if (phy == HME_PHYAD_EXTERNAL) 1174 if (phy == HME_PHYAD_EXTERNAL)
1176 v |= HME_MAC_XIF_MIIENABLE; 1175 v |= HME_MAC_XIF_MIIENABLE;
1177 else 1176 else
1178 v &= ~HME_MAC_XIF_MIIENABLE; 1177 v &= ~HME_MAC_XIF_MIIENABLE;
1179 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1178 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1180 1179
1181#if 0 1180#if 0
1182/* This doesn't work reliably; the MDIO_1 bit is off most of the time */ 1181/* This doesn't work reliably; the MDIO_1 bit is off most of the time */
1183 /* 1182 /*
1184 * Check whether a transceiver is connected by testing 1183 * Check whether a transceiver is connected by testing
1185 * the MIF configuration register's MDI_X bits. Note that 1184 * the MIF configuration register's MDI_X bits. Note that
1186 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h 1185 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h
1187 */ 1186 */
1188 mif_mdi_bit = 1 << (8 + (1 - phy)); 1187 mif_mdi_bit = 1 << (8 + (1 - phy));
1189 delay(100); 1188 delay(100);
1190 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1189 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1191 if ((v & mif_mdi_bit) == 0) 1190 if ((v & mif_mdi_bit) == 0)
1192 return (0); 1191 return (0);
1193#endif 1192#endif
1194 1193
1195 /* Construct the frame command */ 1194 /* Construct the frame command */
1196 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1195 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1197 HME_MIF_FO_TAMSB | 1196 HME_MIF_FO_TAMSB |
1198 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1197 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1199 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1198 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1200 (reg << HME_MIF_FO_REGAD_SHIFT); 1199 (reg << HME_MIF_FO_REGAD_SHIFT);
1201 1200
1202 bus_space_write_4(t, mif, HME_MIFI_FO, v); 1201 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1203 for (n = 0; n < 100; n++) { 1202 for (n = 0; n < 100; n++) {
1204 DELAY(1); 1203 DELAY(1);
1205 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1204 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1206 if (v & HME_MIF_FO_TALSB) { 1205 if (v & HME_MIF_FO_TALSB) {
1207 v &= HME_MIF_FO_DATA; 1206 v &= HME_MIF_FO_DATA;
1208 goto out; 1207 goto out;
1209 } 1208 }
1210 } 1209 }
1211 1210
1212 v = 0; 1211 v = 0;
1213 printf("%s: mii_read timeout\n", device_xname(&sc->sc_dev)); 1212 printf("%s: mii_read timeout\n", device_xname(&sc->sc_dev));
1214 1213
1215out: 1214out:
1216 /* Restore MIFI_CFG register */ 1215 /* Restore MIFI_CFG register */
1217 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1216 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1218 /* Restore XIF register */ 1217 /* Restore XIF register */
1219 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1218 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1220 return (v); 1219 return (v);
1221} 1220}
1222 1221
1223static void 1222static void
1224hme_mii_writereg(struct device *self, int phy, int reg, int val) 1223hme_mii_writereg(struct device *self, int phy, int reg, int val)
1225{ 1224{
1226 struct hme_softc *sc = (void *)self; 1225 struct hme_softc *sc = (void *)self;
1227 bus_space_tag_t t = sc->sc_bustag; 1226 bus_space_tag_t t = sc->sc_bustag;
1228 bus_space_handle_t mif = sc->sc_mif; 1227 bus_space_handle_t mif = sc->sc_mif;
1229 bus_space_handle_t mac = sc->sc_mac; 1228 bus_space_handle_t mac = sc->sc_mac;
1230 u_int32_t v, xif_cfg, mifi_cfg; 1229 u_int32_t v, xif_cfg, mifi_cfg;
1231 int n; 1230 int n;
1232 1231
1233 /* We can at most have two PHYs */ 1232 /* We can at most have two PHYs */
1234 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 1233 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1235 return; 1234 return;
1236 1235
1237 /* Select the desired PHY in the MIF configuration register */ 1236 /* Select the desired PHY in the MIF configuration register */
1238 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 1237 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1239 v &= ~HME_MIF_CFG_PHY; 1238 v &= ~HME_MIF_CFG_PHY;
1240 if (phy == HME_PHYAD_EXTERNAL) 1239 if (phy == HME_PHYAD_EXTERNAL)
1241 v |= HME_MIF_CFG_PHY; 1240 v |= HME_MIF_CFG_PHY;
1242 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1241 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1243 1242
1244 /* Enable MII drivers on external transceiver */ 1243 /* Enable MII drivers on external transceiver */
1245 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 1244 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1246 if (phy == HME_PHYAD_EXTERNAL) 1245 if (phy == HME_PHYAD_EXTERNAL)
1247 v |= HME_MAC_XIF_MIIENABLE; 1246 v |= HME_MAC_XIF_MIIENABLE;
1248 else 1247 else
1249 v &= ~HME_MAC_XIF_MIIENABLE; 1248 v &= ~HME_MAC_XIF_MIIENABLE;
1250 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1249 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1251 1250
1252#if 0 1251#if 0
1253/* This doesn't work reliably; the MDIO_1 bit is off most of the time */ 1252/* This doesn't work reliably; the MDIO_1 bit is off most of the time */
1254 /* 1253 /*
1255 * Check whether a transceiver is connected by testing 1254 * Check whether a transceiver is connected by testing
1256 * the MIF configuration register's MDI_X bits. Note that 1255 * the MIF configuration register's MDI_X bits. Note that
1257 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h 1256 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h
1258 */ 1257 */
1259 mif_mdi_bit = 1 << (8 + (1 - phy)); 1258 mif_mdi_bit = 1 << (8 + (1 - phy));
1260 delay(100); 1259 delay(100);
1261 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1260 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1262 if ((v & mif_mdi_bit) == 0) 1261 if ((v & mif_mdi_bit) == 0)
1263 return; 1262 return;
1264#endif 1263#endif
1265 1264
1266 /* Construct the frame command */ 1265 /* Construct the frame command */
1267 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1266 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1268 HME_MIF_FO_TAMSB | 1267 HME_MIF_FO_TAMSB |
1269 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1268 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1270 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1269 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1271 (reg << HME_MIF_FO_REGAD_SHIFT) | 1270 (reg << HME_MIF_FO_REGAD_SHIFT) |
1272 (val & HME_MIF_FO_DATA); 1271 (val & HME_MIF_FO_DATA);
1273 1272
1274 bus_space_write_4(t, mif, HME_MIFI_FO, v); 1273 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1275 for (n = 0; n < 100; n++) { 1274 for (n = 0; n < 100; n++) {
1276 DELAY(1); 1275 DELAY(1);
1277 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1276 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1278 if (v & HME_MIF_FO_TALSB) 1277 if (v & HME_MIF_FO_TALSB)
1279 goto out; 1278 goto out;
1280 } 1279 }
1281 1280
1282 printf("%s: mii_write timeout\n", device_xname(&sc->sc_dev)); 1281 printf("%s: mii_write timeout\n", device_xname(&sc->sc_dev));
1283out: 1282out:
1284 /* Restore MIFI_CFG register */ 1283 /* Restore MIFI_CFG register */
1285 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1284 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1286 /* Restore XIF register */ 1285 /* Restore XIF register */
1287 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1286 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1288} 1287}
1289 1288
1290static void 1289static void
1291hme_mii_statchg(struct device *dev) 1290hme_mii_statchg(struct device *dev)
1292{ 1291{
1293 struct hme_softc *sc = (void *)dev; 1292 struct hme_softc *sc = (void *)dev;
1294 bus_space_tag_t t = sc->sc_bustag; 1293 bus_space_tag_t t = sc->sc_bustag;
1295 bus_space_handle_t mac = sc->sc_mac; 1294 bus_space_handle_t mac = sc->sc_mac;
1296 u_int32_t v; 1295 u_int32_t v;
1297 1296
1298#ifdef HMEDEBUG 1297#ifdef HMEDEBUG
1299 if (sc->sc_debug) 1298 if (sc->sc_debug)
1300 printf("hme_mii_statchg: status change\n"); 1299 printf("hme_mii_statchg: status change\n");
1301#endif 1300#endif
1302 1301
1303 /* Set the MAC Full Duplex bit appropriately */ 1302 /* Set the MAC Full Duplex bit appropriately */
1304 /* Apparently the hme chip is SIMPLEX if working in full duplex mode, 1303 /* Apparently the hme chip is SIMPLEX if working in full duplex mode,
1305 but not otherwise. */ 1304 but not otherwise. */
1306 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 1305 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1307 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1306 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1308 v |= HME_MAC_TXCFG_FULLDPLX; 1307 v |= HME_MAC_TXCFG_FULLDPLX;
1309 sc->sc_ethercom.ec_if.if_flags |= IFF_SIMPLEX; 1308 sc->sc_ethercom.ec_if.if_flags |= IFF_SIMPLEX;
1310 } else { 1309 } else {
1311 v &= ~HME_MAC_TXCFG_FULLDPLX; 1310 v &= ~HME_MAC_TXCFG_FULLDPLX;
1312 sc->sc_ethercom.ec_if.if_flags &= ~IFF_SIMPLEX; 1311 sc->sc_ethercom.ec_if.if_flags &= ~IFF_SIMPLEX;
1313 } 1312 }
1314 sc->sc_if_flags = sc->sc_ethercom.ec_if.if_flags; 1313 sc->sc_if_flags = sc->sc_ethercom.ec_if.if_flags;
1315 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 1314 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1316} 1315}
1317 1316
1318int 1317int
1319hme_mediachange(struct ifnet *ifp) 1318hme_mediachange(struct ifnet *ifp)
1320{ 1319{
1321 struct hme_softc *sc = ifp->if_softc; 1320 struct hme_softc *sc = ifp->if_softc;
1322 bus_space_tag_t t = sc->sc_bustag; 1321 bus_space_tag_t t = sc->sc_bustag;
1323 bus_space_handle_t mif = sc->sc_mif; 1322 bus_space_handle_t mif = sc->sc_mif;
1324 bus_space_handle_t mac = sc->sc_mac; 1323 bus_space_handle_t mac = sc->sc_mac;
1325 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1324 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1326 int phy = sc->sc_phys[instance]; 1325 int phy = sc->sc_phys[instance];
1327 int rc; 1326 int rc;
1328 u_int32_t v; 1327 u_int32_t v;
1329 1328
1330#ifdef HMEDEBUG 1329#ifdef HMEDEBUG
1331 if (sc->sc_debug) 1330 if (sc->sc_debug)
1332 printf("hme_mediachange: phy = %d\n", phy); 1331 printf("hme_mediachange: phy = %d\n", phy);
1333#endif 1332#endif
1334 1333
1335 /* Select the current PHY in the MIF configuration register */ 1334 /* Select the current PHY in the MIF configuration register */
1336 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1335 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1337 v &= ~HME_MIF_CFG_PHY; 1336 v &= ~HME_MIF_CFG_PHY;
1338 if (phy == HME_PHYAD_EXTERNAL) 1337 if (phy == HME_PHYAD_EXTERNAL)
1339 v |= HME_MIF_CFG_PHY; 1338 v |= HME_MIF_CFG_PHY;
1340 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1339 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1341 1340
1342 /* If an external transceiver is selected, enable its MII drivers */ 1341 /* If an external transceiver is selected, enable its MII drivers */
1343 v = bus_space_read_4(t, mac, HME_MACI_XIF); 1342 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1344 v &= ~HME_MAC_XIF_MIIENABLE; 1343 v &= ~HME_MAC_XIF_MIIENABLE;
1345 if (phy == HME_PHYAD_EXTERNAL) 1344 if (phy == HME_PHYAD_EXTERNAL)
1346 v |= HME_MAC_XIF_MIIENABLE; 1345 v |= HME_MAC_XIF_MIIENABLE;
1347 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1346 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1348 1347
1349 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 1348 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
1350 return 0; 1349 return 0;
1351 return rc; 1350 return rc;
1352} 1351}
1353 1352
1354/* 1353/*
1355 * Process an ioctl request. 1354 * Process an ioctl request.
1356 */ 1355 */
1357int 1356int
1358hme_ioctl(struct ifnet *ifp, unsigned long cmd, void *data) 1357hme_ioctl(struct ifnet *ifp, unsigned long cmd, void *data)
1359{ 1358{
1360 struct hme_softc *sc = ifp->if_softc; 1359 struct hme_softc *sc = ifp->if_softc;
1361 struct ifaddr *ifa = (struct ifaddr *)data; 1360 struct ifaddr *ifa = (struct ifaddr *)data;
1362 int s, error = 0; 1361 int s, error = 0;
1363 1362
1364 s = splnet(); 1363 s = splnet();
1365 1364
1366 switch (cmd) { 1365 switch (cmd) {
1367 1366
1368 case SIOCINITIFADDR: 1367 case SIOCINITIFADDR:
1369 switch (ifa->ifa_addr->sa_family) { 1368 switch (ifa->ifa_addr->sa_family) {
1370#ifdef INET 1369#ifdef INET
1371 case AF_INET: 1370 case AF_INET:
1372 if (ifp->if_flags & IFF_UP) 1371 if (ifp->if_flags & IFF_UP)
1373 hme_setladrf(sc); 1372 hme_setladrf(sc);
1374 else { 1373 else {
1375 ifp->if_flags |= IFF_UP; 1374 ifp->if_flags |= IFF_UP;
1376 error = hme_init(sc); 1375 error = hme_init(sc);
1377 } 1376 }
1378 arp_ifinit(ifp, ifa); 1377 arp_ifinit(ifp, ifa);
1379 break; 1378 break;
1380#endif 1379#endif
1381 default: 1380 default:
1382 ifp->if_flags |= IFF_UP; 1381 ifp->if_flags |= IFF_UP;
1383 error = hme_init(sc); 1382 error = hme_init(sc);
1384 break; 1383 break;
1385 } 1384 }
1386 break; 1385 break;
1387 1386
1388 case SIOCSIFFLAGS: 1387 case SIOCSIFFLAGS:
1389#ifdef HMEDEBUG 1388#ifdef HMEDEBUG
1390 { 1389 {
1391 struct ifreq *ifr = data; 1390 struct ifreq *ifr = data;
1392 sc->sc_debug = 1391 sc->sc_debug =
1393 (ifr->ifr_flags & IFF_DEBUG) != 0 ? 1 : 0; 1392 (ifr->ifr_flags & IFF_DEBUG) != 0 ? 1 : 0;
1394 } 1393 }
1395#endif 1394#endif
1396 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1395 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1397 break; 1396 break;
1398 1397
1399 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 1398 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
1400 case IFF_RUNNING: 1399 case IFF_RUNNING:
1401 /* 1400 /*
1402 * If interface is marked down and it is running, then 1401 * If interface is marked down and it is running, then
1403 * stop it. 1402 * stop it.
1404 */ 1403 */
1405 hme_stop(sc, false); 1404 hme_stop(sc, false);
1406 ifp->if_flags &= ~IFF_RUNNING; 1405 ifp->if_flags &= ~IFF_RUNNING;
1407 break; 1406 break;
1408 case IFF_UP: 1407 case IFF_UP:
1409 /* 1408 /*
1410 * If interface is marked up and it is stopped, then 1409 * If interface is marked up and it is stopped, then
1411 * start it. 1410 * start it.
1412 */ 1411 */
1413 error = hme_init(sc); 1412 error = hme_init(sc);
1414 break; 1413 break;
1415 case IFF_UP|IFF_RUNNING: 1414 case IFF_UP|IFF_RUNNING:
1416 /* 1415 /*
1417 * If setting debug or promiscuous mode, do not reset 1416 * If setting debug or promiscuous mode, do not reset
1418 * the chip; for everything else, call hme_init() 1417 * the chip; for everything else, call hme_init()
1419 * which will trigger a reset. 1418 * which will trigger a reset.
1420 */ 1419 */
1421#define RESETIGN (IFF_CANTCHANGE | IFF_DEBUG) 1420#define RESETIGN (IFF_CANTCHANGE | IFF_DEBUG)
1422 if (ifp->if_flags != sc->sc_if_flags) { 1421 if (ifp->if_flags != sc->sc_if_flags) {
1423 if ((ifp->if_flags & (~RESETIGN)) 1422 if ((ifp->if_flags & (~RESETIGN))
1424 == (sc->sc_if_flags & (~RESETIGN))) 1423 == (sc->sc_if_flags & (~RESETIGN)))
1425 hme_setladrf(sc); 1424 hme_setladrf(sc);
1426 else 1425 else
1427 error = hme_init(sc); 1426 error = hme_init(sc);
1428 } 1427 }
1429#undef RESETIGN 1428#undef RESETIGN
1430 break; 1429 break;
1431 case 0: 1430 case 0:
1432 break; 1431 break;
1433 } 1432 }
1434 1433
1435 if (sc->sc_ec_capenable != sc->sc_ethercom.ec_capenable) 1434 if (sc->sc_ec_capenable != sc->sc_ethercom.ec_capenable)
1436 error = hme_init(sc); 1435 error = hme_init(sc);
1437 1436
1438 break; 1437 break;
1439 1438
1440 default: 1439 default:
1441 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 1440 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1442 break; 1441 break;
1443 1442
1444 error = 0; 1443 error = 0;
1445 1444
1446 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1445 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1447 ; 1446 ;
1448 else if (ifp->if_flags & IFF_RUNNING) { 1447 else if (ifp->if_flags & IFF_RUNNING) {
1449 /* 1448 /*
1450 * Multicast list has changed; set the hardware filter 1449 * Multicast list has changed; set the hardware filter
1451 * accordingly. 1450 * accordingly.
1452 */ 1451 */
1453 hme_setladrf(sc); 1452 hme_setladrf(sc);
1454 } 1453 }
1455 break; 1454 break;
1456 } 1455 }
1457 1456
1458 sc->sc_if_flags = ifp->if_flags; 1457 sc->sc_if_flags = ifp->if_flags;
1459 splx(s); 1458 splx(s);
1460 return (error); 1459 return (error);
1461} 1460}
1462 1461
1463void 1462void
1464hme_shutdown(void *arg) 1463hme_shutdown(void *arg)
1465{ 1464{
1466 1465
1467 hme_stop((struct hme_softc *)arg, false); 1466 hme_stop((struct hme_softc *)arg, false);
1468} 1467}
1469 1468
1470/* 1469/*
1471 * Set up the logical address filter. 1470 * Set up the logical address filter.
1472 */ 1471 */
1473void 1472void
1474hme_setladrf(struct hme_softc *sc) 1473hme_setladrf(struct hme_softc *sc)
1475{ 1474{
1476 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1475 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1477 struct ether_multi *enm; 1476 struct ether_multi *enm;
1478 struct ether_multistep step; 1477 struct ether_multistep step;
1479 struct ethercom *ec = &sc->sc_ethercom; 1478 struct ethercom *ec = &sc->sc_ethercom;
1480 bus_space_tag_t t = sc->sc_bustag; 1479 bus_space_tag_t t = sc->sc_bustag;
1481 bus_space_handle_t mac = sc->sc_mac; 1480 bus_space_handle_t mac = sc->sc_mac;
1482 u_char *cp; 1481 u_char *cp;
1483 u_int32_t crc; 1482 u_int32_t crc;
1484 u_int32_t hash[4]; 1483 u_int32_t hash[4];
1485 u_int32_t v; 1484 u_int32_t v;
1486 int len; 1485 int len;
1487 1486
1488 /* Clear hash table */ 1487 /* Clear hash table */
1489 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1488 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1490 1489
1491 /* Get current RX configuration */ 1490 /* Get current RX configuration */
1492 v = bus_space_read_4(t, mac, HME_MACI_RXCFG); 1491 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1493 1492
1494 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1493 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1495 /* Turn on promiscuous mode; turn off the hash filter */ 1494 /* Turn on promiscuous mode; turn off the hash filter */
1496 v |= HME_MAC_RXCFG_PMISC; 1495 v |= HME_MAC_RXCFG_PMISC;
1497 v &= ~HME_MAC_RXCFG_HENABLE; 1496 v &= ~HME_MAC_RXCFG_HENABLE;
1498 ifp->if_flags |= IFF_ALLMULTI; 1497 ifp->if_flags |= IFF_ALLMULTI;
1499 goto chipit; 1498 goto chipit;
1500 } 1499 }
1501 1500
1502 /* Turn off promiscuous mode; turn on the hash filter */ 1501 /* Turn off promiscuous mode; turn on the hash filter */
1503 v &= ~HME_MAC_RXCFG_PMISC; 1502 v &= ~HME_MAC_RXCFG_PMISC;
1504 v |= HME_MAC_RXCFG_HENABLE; 1503 v |= HME_MAC_RXCFG_HENABLE;
1505 1504
1506 /* 1505 /*
1507 * Set up multicast address filter by passing all multicast addresses 1506 * Set up multicast address filter by passing all multicast addresses
1508 * through a crc generator, and then using the high order 6 bits as an 1507 * through a crc generator, and then using the high order 6 bits as an
1509 * index into the 64 bit logical address filter. The high order bit 1508 * index into the 64 bit logical address filter. The high order bit
1510 * selects the word, while the rest of the bits select the bit within 1509 * selects the word, while the rest of the bits select the bit within
1511 * the word. 1510 * the word.
1512 */ 1511 */
1513 1512
1514 ETHER_FIRST_MULTI(step, ec, enm); 1513 ETHER_FIRST_MULTI(step, ec, enm);
1515 while (enm != NULL) { 1514 while (enm != NULL) {
1516 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1515 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1517 /* 1516 /*
1518 * We must listen to a range of multicast addresses. 1517 * We must listen to a range of multicast addresses.
1519 * For now, just accept all multicasts, rather than 1518 * For now, just accept all multicasts, rather than
1520 * trying to set only those filter bits needed to match 1519 * trying to set only those filter bits needed to match
1521 * the range. (At this time, the only use of address 1520 * the range. (At this time, the only use of address
1522 * ranges is for IP multicast routing, for which the 1521 * ranges is for IP multicast routing, for which the
1523 * range is big enough to require all bits set.) 1522 * range is big enough to require all bits set.)
1524 */ 1523 */
1525 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1524 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1526 ifp->if_flags |= IFF_ALLMULTI; 1525 ifp->if_flags |= IFF_ALLMULTI;
1527 goto chipit; 1526 goto chipit;
1528 } 1527 }
1529 1528
1530 cp = enm->enm_addrlo; 1529 cp = enm->enm_addrlo;
1531 crc = 0xffffffff; 1530 crc = 0xffffffff;
1532 for (len = sizeof(enm->enm_addrlo); --len >= 0;) { 1531 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1533 int octet = *cp++; 1532 int octet = *cp++;
1534 int i; 1533 int i;
1535 1534
1536#define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1535#define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */
1537 for (i = 0; i < 8; i++) { 1536 for (i = 0; i < 8; i++) {
1538 if ((crc & 1) ^ (octet & 1)) { 1537 if ((crc & 1) ^ (octet & 1)) {
1539 crc >>= 1; 1538 crc >>= 1;
1540 crc ^= MC_POLY_LE; 1539 crc ^= MC_POLY_LE;
1541 } else { 1540 } else {
1542 crc >>= 1; 1541 crc >>= 1;
1543 } 1542 }
1544 octet >>= 1; 1543 octet >>= 1;
1545 } 1544 }
1546 } 1545 }
1547 /* Just want the 6 most significant bits. */ 1546 /* Just want the 6 most significant bits. */
1548 crc >>= 26; 1547 crc >>= 26;
1549 1548
1550 /* Set the corresponding bit in the filter. */ 1549 /* Set the corresponding bit in the filter. */
1551 hash[crc >> 4] |= 1 << (crc & 0xf); 1550 hash[crc >> 4] |= 1 << (crc & 0xf);
1552 1551
1553 ETHER_NEXT_MULTI(step, enm); 1552 ETHER_NEXT_MULTI(step, enm);
1554 } 1553 }
1555 1554
1556 ifp->if_flags &= ~IFF_ALLMULTI; 1555 ifp->if_flags &= ~IFF_ALLMULTI;
1557 1556
1558chipit: 1557chipit:
1559 /* Now load the hash table into the chip */ 1558 /* Now load the hash table into the chip */
1560 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]); 1559 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1561 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]); 1560 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1562 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]); 1561 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1563 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]); 1562 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1564 bus_space_write_4(t, mac, HME_MACI_RXCFG, v); 1563 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
1565} 1564}
1566 1565
1567/* 1566/*
1568 * Routines for accessing the transmit and receive buffers. 1567 * Routines for accessing the transmit and receive buffers.
1569 * The various CPU and adapter configurations supported by this 1568 * The various CPU and adapter configurations supported by this
1570 * driver require three different access methods for buffers 1569 * driver require three different access methods for buffers
1571 * and descriptors: 1570 * and descriptors:
1572 * (1) contig (contiguous data; no padding), 1571 * (1) contig (contiguous data; no padding),
1573 * (2) gap2 (two bytes of data followed by two bytes of padding), 1572 * (2) gap2 (two bytes of data followed by two bytes of padding),
1574 * (3) gap16 (16 bytes of data followed by 16 bytes of padding). 1573 * (3) gap16 (16 bytes of data followed by 16 bytes of padding).
1575 */ 1574 */
1576 1575
1577#if 0 1576#if 0
1578/* 1577/*
1579 * contig: contiguous data with no padding. 1578 * contig: contiguous data with no padding.
1580 * 1579 *
1581 * Buffers may have any alignment. 1580 * Buffers may have any alignment.
1582 */ 1581 */
1583 1582
1584void 1583void
1585hme_copytobuf_contig(struct hme_softc *sc, void *from, int ri, int len) 1584hme_copytobuf_contig(struct hme_softc *sc, void *from, int ri, int len)
1586{ 1585{
1587 volatile void *buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ); 1586 volatile void *buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ);
1588 1587
1589 /* 1588 /*
1590 * Just call memcpy() to do the work. 1589 * Just call memcpy() to do the work.
1591 */ 1590 */
1592 memcpy(buf, from, len); 1591 memcpy(buf, from, len);
1593} 1592}
1594 1593
1595void 1594void
1596hme_copyfrombuf_contig(struct hme_softc *sc, void *to, int boff, int len) 1595hme_copyfrombuf_contig(struct hme_softc *sc, void *to, int boff, int len)
1597{ 1596{
1598 volatile void *buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ); 1597 volatile void *buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ);
1599 1598
1600 /* 1599 /*
1601 * Just call memcpy() to do the work. 1600 * Just call memcpy() to do the work.
1602 */ 1601 */
1603 memcpy(to, buf, len); 1602 memcpy(to, buf, len);
1604} 1603}
1605#endif 1604#endif