Sun May 3 17:56:06 2009 UTC ()
Pull up following revision(s) (requested by tls in ticket #627):
	sys/dev/pci/if_wm.c: revision 1.172
Missed a small but important change to enable hardware VLAN support.


(snj)
diff -r1.162.4.5 -r1.162.4.6 src/sys/dev/pci/if_wm.c

cvs diff -r1.162.4.5 -r1.162.4.6 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2009/05/03 17:54:07 1.162.4.5
+++ src/sys/dev/pci/if_wm.c 2009/05/03 17:56:05 1.162.4.6
@@ -1,2548 +1,2548 @@ @@ -1,2548 +1,2548 @@
1/* $NetBSD: if_wm.c,v 1.162.4.5 2009/05/03 17:54:07 snj Exp $ */ 1/* $NetBSD: if_wm.c,v 1.162.4.6 2009/05/03 17:56:05 snj Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation  40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42  42
43 Redistribution and use in source and binary forms, with or without  43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45  45
46 1. Redistributions of source code must retain the above copyright notice,  46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48  48
49 2. Redistributions in binary form must reproduce the above copyright  49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the  50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52  52
53 3. Neither the name of the Intel Corporation nor the names of its  53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from  54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56  56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE  58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE  59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE  60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR  61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF  62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS  63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN  64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)  65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Rework how parameters are loaded from the EEPROM. 75 * - Rework how parameters are loaded from the EEPROM.
76 * - Figure out what to do with the i82545GM and i82546GB 76 * - Figure out what to do with the i82545GM and i82546GB
77 * SERDES controllers. 77 * SERDES controllers.
78 * - Fix hw VLAN assist. 78 * - Fix hw VLAN assist.
79 */ 79 */
80 80
81#include <sys/cdefs.h> 81#include <sys/cdefs.h>
82__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.162.4.5 2009/05/03 17:54:07 snj Exp $"); 82__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.162.4.6 2009/05/03 17:56:05 snj Exp $");
83 83
84#include "bpfilter.h" 84#include "bpfilter.h"
85#include "rnd.h" 85#include "rnd.h"
86 86
87#include <sys/param.h> 87#include <sys/param.h>
88#include <sys/systm.h> 88#include <sys/systm.h>
89#include <sys/callout.h> 89#include <sys/callout.h>
90#include <sys/mbuf.h> 90#include <sys/mbuf.h>
91#include <sys/malloc.h> 91#include <sys/malloc.h>
92#include <sys/kernel.h> 92#include <sys/kernel.h>
93#include <sys/socket.h> 93#include <sys/socket.h>
94#include <sys/ioctl.h> 94#include <sys/ioctl.h>
95#include <sys/errno.h> 95#include <sys/errno.h>
96#include <sys/device.h> 96#include <sys/device.h>
97#include <sys/queue.h> 97#include <sys/queue.h>
98#include <sys/syslog.h> 98#include <sys/syslog.h>
99 99
100#include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 100#include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101 101
102#if NRND > 0 102#if NRND > 0
103#include <sys/rnd.h> 103#include <sys/rnd.h>
104#endif 104#endif
105 105
106#include <net/if.h> 106#include <net/if.h>
107#include <net/if_dl.h> 107#include <net/if_dl.h>
108#include <net/if_media.h> 108#include <net/if_media.h>
109#include <net/if_ether.h> 109#include <net/if_ether.h>
110 110
111#if NBPFILTER > 0 111#if NBPFILTER > 0
112#include <net/bpf.h> 112#include <net/bpf.h>
113#endif 113#endif
114 114
115#include <netinet/in.h> /* XXX for struct ip */ 115#include <netinet/in.h> /* XXX for struct ip */
116#include <netinet/in_systm.h> /* XXX for struct ip */ 116#include <netinet/in_systm.h> /* XXX for struct ip */
117#include <netinet/ip.h> /* XXX for struct ip */ 117#include <netinet/ip.h> /* XXX for struct ip */
118#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 118#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119#include <netinet/tcp.h> /* XXX for struct tcphdr */ 119#include <netinet/tcp.h> /* XXX for struct tcphdr */
120 120
121#include <sys/bus.h> 121#include <sys/bus.h>
122#include <sys/intr.h> 122#include <sys/intr.h>
123#include <machine/endian.h> 123#include <machine/endian.h>
124 124
125#include <dev/mii/mii.h> 125#include <dev/mii/mii.h>
126#include <dev/mii/miivar.h> 126#include <dev/mii/miivar.h>
127#include <dev/mii/mii_bitbang.h> 127#include <dev/mii/mii_bitbang.h>
128#include <dev/mii/ikphyreg.h> 128#include <dev/mii/ikphyreg.h>
129 129
130#include <dev/pci/pcireg.h> 130#include <dev/pci/pcireg.h>
131#include <dev/pci/pcivar.h> 131#include <dev/pci/pcivar.h>
132#include <dev/pci/pcidevs.h> 132#include <dev/pci/pcidevs.h>
133 133
134#include <dev/pci/if_wmreg.h> 134#include <dev/pci/if_wmreg.h>
135 135
136#ifdef WM_DEBUG 136#ifdef WM_DEBUG
137#define WM_DEBUG_LINK 0x01 137#define WM_DEBUG_LINK 0x01
138#define WM_DEBUG_TX 0x02 138#define WM_DEBUG_TX 0x02
139#define WM_DEBUG_RX 0x04 139#define WM_DEBUG_RX 0x04
140#define WM_DEBUG_GMII 0x08 140#define WM_DEBUG_GMII 0x08
141int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII; 141int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142 142
143#define DPRINTF(x, y) if (wm_debug & (x)) printf y 143#define DPRINTF(x, y) if (wm_debug & (x)) printf y
144#else 144#else
145#define DPRINTF(x, y) /* nothing */ 145#define DPRINTF(x, y) /* nothing */
146#endif /* WM_DEBUG */ 146#endif /* WM_DEBUG */
147 147
148/* 148/*
149 * Transmit descriptor list size. Due to errata, we can only have 149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096 150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot 151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time. 153 * of them at a time.
154 * 154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet 155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy 156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames. 157 * situations with jumbo frames.
158 */ 158 */
159#define WM_NTXSEGS 256 159#define WM_NTXSEGS 256
160#define WM_IFQUEUELEN 256 160#define WM_IFQUEUELEN 256
161#define WM_TXQUEUELEN_MAX 64 161#define WM_TXQUEUELEN_MAX 64
162#define WM_TXQUEUELEN_MAX_82547 16 162#define WM_TXQUEUELEN_MAX_82547 16
163#define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 163#define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164#define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 164#define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165#define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 165#define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166#define WM_NTXDESC_82542 256 166#define WM_NTXDESC_82542 256
167#define WM_NTXDESC_82544 4096 167#define WM_NTXDESC_82544 4096
168#define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 168#define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169#define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 169#define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170#define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 170#define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171#define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 171#define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172#define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 172#define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173 173
174#define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 174#define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
175 175
176/* 176/*
177 * Receive descriptor list size. We have one Rx buffer for normal 177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k 179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */ 181 */
182#define WM_NRXDESC 256 182#define WM_NRXDESC 256
183#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 183#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 184#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 185#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186 186
187/* 187/*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in 188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things 189 * a single clump that maps to a single DMA segment to make several things
190 * easier. 190 * easier.
191 */ 191 */
192struct wm_control_data_82544 { 192struct wm_control_data_82544 {
193 /* 193 /*
194 * The receive descriptors. 194 * The receive descriptors.
195 */ 195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197 197
198 /* 198 /*
199 * The transmit descriptors. Put these at the end, because 199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them. 200 * we might use a smaller number of them.
201 */ 201 */
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544]; 202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203}; 203};
204 204
205struct wm_control_data_82542 { 205struct wm_control_data_82542 {
206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208}; 208};
209 209
210#define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 210#define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
211#define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)]) 211#define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
212#define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 212#define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
213 213
214/* 214/*
215 * Software state for transmit jobs. 215 * Software state for transmit jobs.
216 */ 216 */
217struct wm_txsoft { 217struct wm_txsoft {
218 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 218 struct mbuf *txs_mbuf; /* head of our mbuf chain */
219 bus_dmamap_t txs_dmamap; /* our DMA map */ 219 bus_dmamap_t txs_dmamap; /* our DMA map */
220 int txs_firstdesc; /* first descriptor in packet */ 220 int txs_firstdesc; /* first descriptor in packet */
221 int txs_lastdesc; /* last descriptor in packet */ 221 int txs_lastdesc; /* last descriptor in packet */
222 int txs_ndesc; /* # of descriptors used */ 222 int txs_ndesc; /* # of descriptors used */
223}; 223};
224 224
225/* 225/*
226 * Software state for receive buffers. Each descriptor gets a 226 * Software state for receive buffers. Each descriptor gets a
227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
228 * more than one buffer, we chain them together. 228 * more than one buffer, we chain them together.
229 */ 229 */
230struct wm_rxsoft { 230struct wm_rxsoft {
231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
232 bus_dmamap_t rxs_dmamap; /* our DMA map */ 232 bus_dmamap_t rxs_dmamap; /* our DMA map */
233}; 233};
234 234
235typedef enum { 235typedef enum {
236 WM_T_unknown = 0, 236 WM_T_unknown = 0,
237 WM_T_82542_2_0, /* i82542 2.0 (really old) */ 237 WM_T_82542_2_0, /* i82542 2.0 (really old) */
238 WM_T_82542_2_1, /* i82542 2.1+ (old) */ 238 WM_T_82542_2_1, /* i82542 2.1+ (old) */
239 WM_T_82543, /* i82543 */ 239 WM_T_82543, /* i82543 */
240 WM_T_82544, /* i82544 */ 240 WM_T_82544, /* i82544 */
241 WM_T_82540, /* i82540 */ 241 WM_T_82540, /* i82540 */
242 WM_T_82545, /* i82545 */ 242 WM_T_82545, /* i82545 */
243 WM_T_82545_3, /* i82545 3.0+ */ 243 WM_T_82545_3, /* i82545 3.0+ */
244 WM_T_82546, /* i82546 */ 244 WM_T_82546, /* i82546 */
245 WM_T_82546_3, /* i82546 3.0+ */ 245 WM_T_82546_3, /* i82546 3.0+ */
246 WM_T_82541, /* i82541 */ 246 WM_T_82541, /* i82541 */
247 WM_T_82541_2, /* i82541 2.0+ */ 247 WM_T_82541_2, /* i82541 2.0+ */
248 WM_T_82547, /* i82547 */ 248 WM_T_82547, /* i82547 */
249 WM_T_82547_2, /* i82547 2.0+ */ 249 WM_T_82547_2, /* i82547 2.0+ */
250 WM_T_82571, /* i82571 */ 250 WM_T_82571, /* i82571 */
251 WM_T_82572, /* i82572 */ 251 WM_T_82572, /* i82572 */
252 WM_T_82573, /* i82573 */ 252 WM_T_82573, /* i82573 */
253 WM_T_82574, /* i82574 */ 253 WM_T_82574, /* i82574 */
254 WM_T_80003, /* i80003 */ 254 WM_T_80003, /* i80003 */
255 WM_T_ICH8, /* ICH8 LAN */ 255 WM_T_ICH8, /* ICH8 LAN */
256 WM_T_ICH9, /* ICH9 LAN */ 256 WM_T_ICH9, /* ICH9 LAN */
257} wm_chip_type; 257} wm_chip_type;
258 258
259/* 259/*
260 * Software state per device. 260 * Software state per device.
261 */ 261 */
262struct wm_softc { 262struct wm_softc {
263 device_t sc_dev; /* generic device information */ 263 device_t sc_dev; /* generic device information */
264 bus_space_tag_t sc_st; /* bus space tag */ 264 bus_space_tag_t sc_st; /* bus space tag */
265 bus_space_handle_t sc_sh; /* bus space handle */ 265 bus_space_handle_t sc_sh; /* bus space handle */
266 bus_space_tag_t sc_iot; /* I/O space tag */ 266 bus_space_tag_t sc_iot; /* I/O space tag */
267 bus_space_handle_t sc_ioh; /* I/O space handle */ 267 bus_space_handle_t sc_ioh; /* I/O space handle */
268 bus_space_tag_t sc_flasht; /* flash registers space tag */ 268 bus_space_tag_t sc_flasht; /* flash registers space tag */
269 bus_space_handle_t sc_flashh; /* flash registers space handle */ 269 bus_space_handle_t sc_flashh; /* flash registers space handle */
270 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 270 bus_dma_tag_t sc_dmat; /* bus DMA tag */
271 struct ethercom sc_ethercom; /* ethernet common data */ 271 struct ethercom sc_ethercom; /* ethernet common data */
272 pci_chipset_tag_t sc_pc; 272 pci_chipset_tag_t sc_pc;
273 pcitag_t sc_pcitag; 273 pcitag_t sc_pcitag;
274 274
275 wm_chip_type sc_type; /* chip type */ 275 wm_chip_type sc_type; /* chip type */
276 int sc_flags; /* flags; see below */ 276 int sc_flags; /* flags; see below */
277 int sc_bus_speed; /* PCI/PCIX bus speed */ 277 int sc_bus_speed; /* PCI/PCIX bus speed */
278 int sc_pcix_offset; /* PCIX capability register offset */ 278 int sc_pcix_offset; /* PCIX capability register offset */
279 int sc_flowflags; /* 802.3x flow control flags */ 279 int sc_flowflags; /* 802.3x flow control flags */
280 280
281 void *sc_ih; /* interrupt cookie */ 281 void *sc_ih; /* interrupt cookie */
282 282
283 int sc_ee_addrbits; /* EEPROM address bits */ 283 int sc_ee_addrbits; /* EEPROM address bits */
284 284
285 struct mii_data sc_mii; /* MII/media information */ 285 struct mii_data sc_mii; /* MII/media information */
286 286
287 callout_t sc_tick_ch; /* tick callout */ 287 callout_t sc_tick_ch; /* tick callout */
288 288
289 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 289 bus_dmamap_t sc_cddmamap; /* control data DMA map */
290#define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 290#define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
291 291
292 int sc_align_tweak; 292 int sc_align_tweak;
293 293
294 /* 294 /*
295 * Software state for the transmit and receive descriptors. 295 * Software state for the transmit and receive descriptors.
296 */ 296 */
297 int sc_txnum; /* must be a power of two */ 297 int sc_txnum; /* must be a power of two */
298 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 298 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
299 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 299 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
300 300
301 /* 301 /*
302 * Control data structures. 302 * Control data structures.
303 */ 303 */
304 int sc_ntxdesc; /* must be a power of two */ 304 int sc_ntxdesc; /* must be a power of two */
305 struct wm_control_data_82544 *sc_control_data; 305 struct wm_control_data_82544 *sc_control_data;
306#define sc_txdescs sc_control_data->wcd_txdescs 306#define sc_txdescs sc_control_data->wcd_txdescs
307#define sc_rxdescs sc_control_data->wcd_rxdescs 307#define sc_rxdescs sc_control_data->wcd_rxdescs
308 308
309#ifdef WM_EVENT_COUNTERS 309#ifdef WM_EVENT_COUNTERS
310 /* Event counters. */ 310 /* Event counters. */
311 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 311 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
312 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 312 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
313 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 313 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
314 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 314 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
315 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 315 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
316 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 316 struct evcnt sc_ev_rxintr; /* Rx interrupts */
317 struct evcnt sc_ev_linkintr; /* Link interrupts */ 317 struct evcnt sc_ev_linkintr; /* Link interrupts */
318 318
319 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 319 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
320 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 320 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
321 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 321 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
322 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 322 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
323 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 323 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
324 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ 324 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
325 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ 325 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
326 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 326 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
327 327
328 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 328 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
329 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 329 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
330 330
331 struct evcnt sc_ev_tu; /* Tx underrun */ 331 struct evcnt sc_ev_tu; /* Tx underrun */
332 332
333 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 333 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
334 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 334 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
335 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 335 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
336 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 336 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
337 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 337 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
338#endif /* WM_EVENT_COUNTERS */ 338#endif /* WM_EVENT_COUNTERS */
339 339
340 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 340 bus_addr_t sc_tdt_reg; /* offset of TDT register */
341 341
342 int sc_txfree; /* number of free Tx descriptors */ 342 int sc_txfree; /* number of free Tx descriptors */
343 int sc_txnext; /* next ready Tx descriptor */ 343 int sc_txnext; /* next ready Tx descriptor */
344 344
345 int sc_txsfree; /* number of free Tx jobs */ 345 int sc_txsfree; /* number of free Tx jobs */
346 int sc_txsnext; /* next free Tx job */ 346 int sc_txsnext; /* next free Tx job */
347 int sc_txsdirty; /* dirty Tx jobs */ 347 int sc_txsdirty; /* dirty Tx jobs */
348 348
349 /* These 5 variables are used only on the 82547. */ 349 /* These 5 variables are used only on the 82547. */
350 int sc_txfifo_size; /* Tx FIFO size */ 350 int sc_txfifo_size; /* Tx FIFO size */
351 int sc_txfifo_head; /* current head of FIFO */ 351 int sc_txfifo_head; /* current head of FIFO */
352 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 352 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
353 int sc_txfifo_stall; /* Tx FIFO is stalled */ 353 int sc_txfifo_stall; /* Tx FIFO is stalled */
354 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 354 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
355 355
356 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 356 bus_addr_t sc_rdt_reg; /* offset of RDT register */
357 357
358 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 358 int sc_rxptr; /* next ready Rx descriptor/queue ent */
359 int sc_rxdiscard; 359 int sc_rxdiscard;
360 int sc_rxlen; 360 int sc_rxlen;
361 struct mbuf *sc_rxhead; 361 struct mbuf *sc_rxhead;
362 struct mbuf *sc_rxtail; 362 struct mbuf *sc_rxtail;
363 struct mbuf **sc_rxtailp; 363 struct mbuf **sc_rxtailp;
364 364
365 uint32_t sc_ctrl; /* prototype CTRL register */ 365 uint32_t sc_ctrl; /* prototype CTRL register */
366#if 0 366#if 0
367 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 367 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
368#endif 368#endif
369 uint32_t sc_icr; /* prototype interrupt bits */ 369 uint32_t sc_icr; /* prototype interrupt bits */
370 uint32_t sc_itr; /* prototype intr throttling reg */ 370 uint32_t sc_itr; /* prototype intr throttling reg */
371 uint32_t sc_tctl; /* prototype TCTL register */ 371 uint32_t sc_tctl; /* prototype TCTL register */
372 uint32_t sc_rctl; /* prototype RCTL register */ 372 uint32_t sc_rctl; /* prototype RCTL register */
373 uint32_t sc_txcw; /* prototype TXCW register */ 373 uint32_t sc_txcw; /* prototype TXCW register */
374 uint32_t sc_tipg; /* prototype TIPG register */ 374 uint32_t sc_tipg; /* prototype TIPG register */
375 uint32_t sc_fcrtl; /* prototype FCRTL register */ 375 uint32_t sc_fcrtl; /* prototype FCRTL register */
376 uint32_t sc_pba; /* prototype PBA register */ 376 uint32_t sc_pba; /* prototype PBA register */
377 377
378 int sc_tbi_linkup; /* TBI link status */ 378 int sc_tbi_linkup; /* TBI link status */
379 int sc_tbi_anstate; /* autonegotiation state */ 379 int sc_tbi_anstate; /* autonegotiation state */
380 380
381 int sc_mchash_type; /* multicast filter offset */ 381 int sc_mchash_type; /* multicast filter offset */
382 382
383#if NRND > 0 383#if NRND > 0
384 rndsource_element_t rnd_source; /* random source */ 384 rndsource_element_t rnd_source; /* random source */
385#endif 385#endif
386 int sc_ich8_flash_base; 386 int sc_ich8_flash_base;
387 int sc_ich8_flash_bank_size; 387 int sc_ich8_flash_bank_size;
388}; 388};
389 389
390#define WM_RXCHAIN_RESET(sc) \ 390#define WM_RXCHAIN_RESET(sc) \
391do { \ 391do { \
392 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 392 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
393 *(sc)->sc_rxtailp = NULL; \ 393 *(sc)->sc_rxtailp = NULL; \
394 (sc)->sc_rxlen = 0; \ 394 (sc)->sc_rxlen = 0; \
395} while (/*CONSTCOND*/0) 395} while (/*CONSTCOND*/0)
396 396
397#define WM_RXCHAIN_LINK(sc, m) \ 397#define WM_RXCHAIN_LINK(sc, m) \
398do { \ 398do { \
399 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 399 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
400 (sc)->sc_rxtailp = &(m)->m_next; \ 400 (sc)->sc_rxtailp = &(m)->m_next; \
401} while (/*CONSTCOND*/0) 401} while (/*CONSTCOND*/0)
402 402
403/* sc_flags */ 403/* sc_flags */
404#define WM_F_HAS_MII 0x0001 /* has MII */ 404#define WM_F_HAS_MII 0x0001 /* has MII */
405#define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */ 405#define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
406#define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */ 406#define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
407#define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */ 407#define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
408#define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */ 408#define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
409#define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */ 409#define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
410#define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */ 410#define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
411#define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */ 411#define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
412#define WM_F_BUS64 0x0100 /* bus is 64-bit */ 412#define WM_F_BUS64 0x0100 /* bus is 64-bit */
413#define WM_F_PCIX 0x0200 /* bus is PCI-X */ 413#define WM_F_PCIX 0x0200 /* bus is PCI-X */
414#define WM_F_CSA 0x0400 /* bus is CSA */ 414#define WM_F_CSA 0x0400 /* bus is CSA */
415#define WM_F_PCIE 0x0800 /* bus is PCI-Express */ 415#define WM_F_PCIE 0x0800 /* bus is PCI-Express */
416#define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */ 416#define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
417#define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */ 417#define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */
418 418
419#ifdef WM_EVENT_COUNTERS 419#ifdef WM_EVENT_COUNTERS
420#define WM_EVCNT_INCR(ev) (ev)->ev_count++ 420#define WM_EVCNT_INCR(ev) (ev)->ev_count++
421#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 421#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
422#else 422#else
423#define WM_EVCNT_INCR(ev) /* nothing */ 423#define WM_EVCNT_INCR(ev) /* nothing */
424#define WM_EVCNT_ADD(ev, val) /* nothing */ 424#define WM_EVCNT_ADD(ev, val) /* nothing */
425#endif 425#endif
426 426
427#define CSR_READ(sc, reg) \ 427#define CSR_READ(sc, reg) \
428 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 428 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
429#define CSR_WRITE(sc, reg, val) \ 429#define CSR_WRITE(sc, reg, val) \
430 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 430 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
431#define CSR_WRITE_FLUSH(sc) \ 431#define CSR_WRITE_FLUSH(sc) \
432 (void) CSR_READ((sc), WMREG_STATUS) 432 (void) CSR_READ((sc), WMREG_STATUS)
433 433
434#define ICH8_FLASH_READ32(sc, reg) \ 434#define ICH8_FLASH_READ32(sc, reg) \
435 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 435 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
436#define ICH8_FLASH_WRITE32(sc, reg, data) \ 436#define ICH8_FLASH_WRITE32(sc, reg, data) \
437 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 437 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
438 438
439#define ICH8_FLASH_READ16(sc, reg) \ 439#define ICH8_FLASH_READ16(sc, reg) \
440 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 440 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
441#define ICH8_FLASH_WRITE16(sc, reg, data) \ 441#define ICH8_FLASH_WRITE16(sc, reg, data) \
442 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 442 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
443 443
444#define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 444#define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
445#define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 445#define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
446 446
447#define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 447#define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
448#define WM_CDTXADDR_HI(sc, x) \ 448#define WM_CDTXADDR_HI(sc, x) \
449 (sizeof(bus_addr_t) == 8 ? \ 449 (sizeof(bus_addr_t) == 8 ? \
450 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 450 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
451 451
452#define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 452#define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
453#define WM_CDRXADDR_HI(sc, x) \ 453#define WM_CDRXADDR_HI(sc, x) \
454 (sizeof(bus_addr_t) == 8 ? \ 454 (sizeof(bus_addr_t) == 8 ? \
455 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 455 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
456 456
457#define WM_CDTXSYNC(sc, x, n, ops) \ 457#define WM_CDTXSYNC(sc, x, n, ops) \
458do { \ 458do { \
459 int __x, __n; \ 459 int __x, __n; \
460 \ 460 \
461 __x = (x); \ 461 __x = (x); \
462 __n = (n); \ 462 __n = (n); \
463 \ 463 \
464 /* If it will wrap around, sync to the end of the ring. */ \ 464 /* If it will wrap around, sync to the end of the ring. */ \
465 if ((__x + __n) > WM_NTXDESC(sc)) { \ 465 if ((__x + __n) > WM_NTXDESC(sc)) { \
466 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 466 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
467 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 467 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
468 (WM_NTXDESC(sc) - __x), (ops)); \ 468 (WM_NTXDESC(sc) - __x), (ops)); \
469 __n -= (WM_NTXDESC(sc) - __x); \ 469 __n -= (WM_NTXDESC(sc) - __x); \
470 __x = 0; \ 470 __x = 0; \
471 } \ 471 } \
472 \ 472 \
473 /* Now sync whatever is left. */ \ 473 /* Now sync whatever is left. */ \
474 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 474 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
475 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 475 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
476} while (/*CONSTCOND*/0) 476} while (/*CONSTCOND*/0)
477 477
478#define WM_CDRXSYNC(sc, x, ops) \ 478#define WM_CDRXSYNC(sc, x, ops) \
479do { \ 479do { \
480 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 480 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
481 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 481 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
482} while (/*CONSTCOND*/0) 482} while (/*CONSTCOND*/0)
483 483
484#define WM_INIT_RXDESC(sc, x) \ 484#define WM_INIT_RXDESC(sc, x) \
485do { \ 485do { \
486 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 486 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
487 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 487 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
488 struct mbuf *__m = __rxs->rxs_mbuf; \ 488 struct mbuf *__m = __rxs->rxs_mbuf; \
489 \ 489 \
490 /* \ 490 /* \
491 * Note: We scoot the packet forward 2 bytes in the buffer \ 491 * Note: We scoot the packet forward 2 bytes in the buffer \
492 * so that the payload after the Ethernet header is aligned \ 492 * so that the payload after the Ethernet header is aligned \
493 * to a 4-byte boundary. \ 493 * to a 4-byte boundary. \
494 * \ 494 * \
495 * XXX BRAINDAMAGE ALERT! \ 495 * XXX BRAINDAMAGE ALERT! \
496 * The stupid chip uses the same size for every buffer, which \ 496 * The stupid chip uses the same size for every buffer, which \
497 * is set in the Receive Control register. We are using the 2K \ 497 * is set in the Receive Control register. We are using the 2K \
498 * size option, but what we REALLY want is (2K - 2)! For this \ 498 * size option, but what we REALLY want is (2K - 2)! For this \
499 * reason, we can't "scoot" packets longer than the standard \ 499 * reason, we can't "scoot" packets longer than the standard \
500 * Ethernet MTU. On strict-alignment platforms, if the total \ 500 * Ethernet MTU. On strict-alignment platforms, if the total \
501 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 501 * size exceeds (2K - 2) we set align_tweak to 0 and let \
502 * the upper layer copy the headers. \ 502 * the upper layer copy the headers. \
503 */ \ 503 */ \
504 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 504 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
505 \ 505 \
506 wm_set_dma_addr(&__rxd->wrx_addr, \ 506 wm_set_dma_addr(&__rxd->wrx_addr, \
507 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 507 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
508 __rxd->wrx_len = 0; \ 508 __rxd->wrx_len = 0; \
509 __rxd->wrx_cksum = 0; \ 509 __rxd->wrx_cksum = 0; \
510 __rxd->wrx_status = 0; \ 510 __rxd->wrx_status = 0; \
511 __rxd->wrx_errors = 0; \ 511 __rxd->wrx_errors = 0; \
512 __rxd->wrx_special = 0; \ 512 __rxd->wrx_special = 0; \
513 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 513 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
514 \ 514 \
515 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 515 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
516} while (/*CONSTCOND*/0) 516} while (/*CONSTCOND*/0)
517 517
518static void wm_start(struct ifnet *); 518static void wm_start(struct ifnet *);
519static void wm_watchdog(struct ifnet *); 519static void wm_watchdog(struct ifnet *);
520static int wm_ioctl(struct ifnet *, u_long, void *); 520static int wm_ioctl(struct ifnet *, u_long, void *);
521static int wm_init(struct ifnet *); 521static int wm_init(struct ifnet *);
522static void wm_stop(struct ifnet *, int); 522static void wm_stop(struct ifnet *, int);
523 523
524static void wm_reset(struct wm_softc *); 524static void wm_reset(struct wm_softc *);
525static void wm_rxdrain(struct wm_softc *); 525static void wm_rxdrain(struct wm_softc *);
526static int wm_add_rxbuf(struct wm_softc *, int); 526static int wm_add_rxbuf(struct wm_softc *, int);
527static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 527static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
528static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 528static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
529static int wm_validate_eeprom_checksum(struct wm_softc *); 529static int wm_validate_eeprom_checksum(struct wm_softc *);
530static void wm_tick(void *); 530static void wm_tick(void *);
531 531
532static void wm_set_filter(struct wm_softc *); 532static void wm_set_filter(struct wm_softc *);
533 533
534static int wm_intr(void *); 534static int wm_intr(void *);
535static void wm_txintr(struct wm_softc *); 535static void wm_txintr(struct wm_softc *);
536static void wm_rxintr(struct wm_softc *); 536static void wm_rxintr(struct wm_softc *);
537static void wm_linkintr(struct wm_softc *, uint32_t); 537static void wm_linkintr(struct wm_softc *, uint32_t);
538 538
539static void wm_tbi_mediainit(struct wm_softc *); 539static void wm_tbi_mediainit(struct wm_softc *);
540static int wm_tbi_mediachange(struct ifnet *); 540static int wm_tbi_mediachange(struct ifnet *);
541static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 541static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
542 542
543static void wm_tbi_set_linkled(struct wm_softc *); 543static void wm_tbi_set_linkled(struct wm_softc *);
544static void wm_tbi_check_link(struct wm_softc *); 544static void wm_tbi_check_link(struct wm_softc *);
545 545
546static void wm_gmii_reset(struct wm_softc *); 546static void wm_gmii_reset(struct wm_softc *);
547 547
548static int wm_gmii_i82543_readreg(device_t, int, int); 548static int wm_gmii_i82543_readreg(device_t, int, int);
549static void wm_gmii_i82543_writereg(device_t, int, int, int); 549static void wm_gmii_i82543_writereg(device_t, int, int, int);
550 550
551static int wm_gmii_i82544_readreg(device_t, int, int); 551static int wm_gmii_i82544_readreg(device_t, int, int);
552static void wm_gmii_i82544_writereg(device_t, int, int, int); 552static void wm_gmii_i82544_writereg(device_t, int, int, int);
553 553
554static int wm_gmii_i80003_readreg(device_t, int, int); 554static int wm_gmii_i80003_readreg(device_t, int, int);
555static void wm_gmii_i80003_writereg(device_t, int, int, int); 555static void wm_gmii_i80003_writereg(device_t, int, int, int);
556 556
557static void wm_gmii_statchg(device_t); 557static void wm_gmii_statchg(device_t);
558 558
559static void wm_gmii_mediainit(struct wm_softc *); 559static void wm_gmii_mediainit(struct wm_softc *);
560static int wm_gmii_mediachange(struct ifnet *); 560static int wm_gmii_mediachange(struct ifnet *);
561static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 561static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
562 562
563static int wm_kmrn_i80003_readreg(struct wm_softc *, int); 563static int wm_kmrn_i80003_readreg(struct wm_softc *, int);
564static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int); 564static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
565 565
566static int wm_match(device_t, cfdata_t, void *); 566static int wm_match(device_t, cfdata_t, void *);
567static void wm_attach(device_t, device_t, void *); 567static void wm_attach(device_t, device_t, void *);
568static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 568static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
569static void wm_get_auto_rd_done(struct wm_softc *); 569static void wm_get_auto_rd_done(struct wm_softc *);
570static int wm_get_swsm_semaphore(struct wm_softc *); 570static int wm_get_swsm_semaphore(struct wm_softc *);
571static void wm_put_swsm_semaphore(struct wm_softc *); 571static void wm_put_swsm_semaphore(struct wm_softc *);
572static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 572static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
573static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 573static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
574static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 574static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
575static int wm_get_swfwhw_semaphore(struct wm_softc *); 575static int wm_get_swfwhw_semaphore(struct wm_softc *);
576static void wm_put_swfwhw_semaphore(struct wm_softc *); 576static void wm_put_swfwhw_semaphore(struct wm_softc *);
577 577
578static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *); 578static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
579static int32_t wm_ich8_cycle_init(struct wm_softc *); 579static int32_t wm_ich8_cycle_init(struct wm_softc *);
580static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 580static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
581static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, 581static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
582 uint32_t, uint16_t *); 582 uint32_t, uint16_t *);
583static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *); 583static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
584static void wm_82547_txfifo_stall(void *); 584static void wm_82547_txfifo_stall(void *);
585static int wm_check_mng_mode(struct wm_softc *); 585static int wm_check_mng_mode(struct wm_softc *);
586static int wm_check_mng_mode_ich8lan(struct wm_softc *); 586static int wm_check_mng_mode_ich8lan(struct wm_softc *);
587static int wm_check_mng_mode_82574(struct wm_softc *); 587static int wm_check_mng_mode_82574(struct wm_softc *);
588static int wm_check_mng_mode_generic(struct wm_softc *); 588static int wm_check_mng_mode_generic(struct wm_softc *);
589static void wm_get_hw_control(struct wm_softc *); 589static void wm_get_hw_control(struct wm_softc *);
590 590
591CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc), 591CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
592 wm_match, wm_attach, NULL, NULL); 592 wm_match, wm_attach, NULL, NULL);
593 593
594 594
595/* 595/*
596 * Devices supported by this driver. 596 * Devices supported by this driver.
597 */ 597 */
598static const struct wm_product { 598static const struct wm_product {
599 pci_vendor_id_t wmp_vendor; 599 pci_vendor_id_t wmp_vendor;
600 pci_product_id_t wmp_product; 600 pci_product_id_t wmp_product;
601 const char *wmp_name; 601 const char *wmp_name;
602 wm_chip_type wmp_type; 602 wm_chip_type wmp_type;
603 int wmp_flags; 603 int wmp_flags;
604#define WMP_F_1000X 0x01 604#define WMP_F_1000X 0x01
605#define WMP_F_1000T 0x02 605#define WMP_F_1000T 0x02
606} wm_products[] = { 606} wm_products[] = {
607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
608 "Intel i82542 1000BASE-X Ethernet", 608 "Intel i82542 1000BASE-X Ethernet",
609 WM_T_82542_2_1, WMP_F_1000X }, 609 WM_T_82542_2_1, WMP_F_1000X },
610 610
611 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 611 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
612 "Intel i82543GC 1000BASE-X Ethernet", 612 "Intel i82543GC 1000BASE-X Ethernet",
613 WM_T_82543, WMP_F_1000X }, 613 WM_T_82543, WMP_F_1000X },
614 614
615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
616 "Intel i82543GC 1000BASE-T Ethernet", 616 "Intel i82543GC 1000BASE-T Ethernet",
617 WM_T_82543, WMP_F_1000T }, 617 WM_T_82543, WMP_F_1000T },
618 618
619 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 619 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
620 "Intel i82544EI 1000BASE-T Ethernet", 620 "Intel i82544EI 1000BASE-T Ethernet",
621 WM_T_82544, WMP_F_1000T }, 621 WM_T_82544, WMP_F_1000T },
622 622
623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
624 "Intel i82544EI 1000BASE-X Ethernet", 624 "Intel i82544EI 1000BASE-X Ethernet",
625 WM_T_82544, WMP_F_1000X }, 625 WM_T_82544, WMP_F_1000X },
626 626
627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
628 "Intel i82544GC 1000BASE-T Ethernet", 628 "Intel i82544GC 1000BASE-T Ethernet",
629 WM_T_82544, WMP_F_1000T }, 629 WM_T_82544, WMP_F_1000T },
630 630
631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
632 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 632 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
633 WM_T_82544, WMP_F_1000T }, 633 WM_T_82544, WMP_F_1000T },
634 634
635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
636 "Intel i82540EM 1000BASE-T Ethernet", 636 "Intel i82540EM 1000BASE-T Ethernet",
637 WM_T_82540, WMP_F_1000T }, 637 WM_T_82540, WMP_F_1000T },
638 638
639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
640 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 640 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
641 WM_T_82540, WMP_F_1000T }, 641 WM_T_82540, WMP_F_1000T },
642 642
643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
644 "Intel i82540EP 1000BASE-T Ethernet", 644 "Intel i82540EP 1000BASE-T Ethernet",
645 WM_T_82540, WMP_F_1000T }, 645 WM_T_82540, WMP_F_1000T },
646 646
647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
648 "Intel i82540EP 1000BASE-T Ethernet", 648 "Intel i82540EP 1000BASE-T Ethernet",
649 WM_T_82540, WMP_F_1000T }, 649 WM_T_82540, WMP_F_1000T },
650 650
651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
652 "Intel i82540EP 1000BASE-T Ethernet", 652 "Intel i82540EP 1000BASE-T Ethernet",
653 WM_T_82540, WMP_F_1000T }, 653 WM_T_82540, WMP_F_1000T },
654 654
655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
656 "Intel i82545EM 1000BASE-T Ethernet", 656 "Intel i82545EM 1000BASE-T Ethernet",
657 WM_T_82545, WMP_F_1000T }, 657 WM_T_82545, WMP_F_1000T },
658 658
659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
660 "Intel i82545GM 1000BASE-T Ethernet", 660 "Intel i82545GM 1000BASE-T Ethernet",
661 WM_T_82545_3, WMP_F_1000T }, 661 WM_T_82545_3, WMP_F_1000T },
662 662
663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
664 "Intel i82545GM 1000BASE-X Ethernet", 664 "Intel i82545GM 1000BASE-X Ethernet",
665 WM_T_82545_3, WMP_F_1000X }, 665 WM_T_82545_3, WMP_F_1000X },
666#if 0 666#if 0
667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
668 "Intel i82545GM Gigabit Ethernet (SERDES)", 668 "Intel i82545GM Gigabit Ethernet (SERDES)",
669 WM_T_82545_3, WMP_F_SERDES }, 669 WM_T_82545_3, WMP_F_SERDES },
670#endif 670#endif
671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
672 "Intel i82546EB 1000BASE-T Ethernet", 672 "Intel i82546EB 1000BASE-T Ethernet",
673 WM_T_82546, WMP_F_1000T }, 673 WM_T_82546, WMP_F_1000T },
674 674
675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
676 "Intel i82546EB 1000BASE-T Ethernet", 676 "Intel i82546EB 1000BASE-T Ethernet",
677 WM_T_82546, WMP_F_1000T }, 677 WM_T_82546, WMP_F_1000T },
678 678
679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
680 "Intel i82545EM 1000BASE-X Ethernet", 680 "Intel i82545EM 1000BASE-X Ethernet",
681 WM_T_82545, WMP_F_1000X }, 681 WM_T_82545, WMP_F_1000X },
682 682
683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
684 "Intel i82546EB 1000BASE-X Ethernet", 684 "Intel i82546EB 1000BASE-X Ethernet",
685 WM_T_82546, WMP_F_1000X }, 685 WM_T_82546, WMP_F_1000X },
686 686
687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
688 "Intel i82546GB 1000BASE-T Ethernet", 688 "Intel i82546GB 1000BASE-T Ethernet",
689 WM_T_82546_3, WMP_F_1000T }, 689 WM_T_82546_3, WMP_F_1000T },
690 690
691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
692 "Intel i82546GB 1000BASE-X Ethernet", 692 "Intel i82546GB 1000BASE-X Ethernet",
693 WM_T_82546_3, WMP_F_1000X }, 693 WM_T_82546_3, WMP_F_1000X },
694#if 0 694#if 0
695 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 695 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
696 "Intel i82546GB Gigabit Ethernet (SERDES)", 696 "Intel i82546GB Gigabit Ethernet (SERDES)",
697 WM_T_82546_3, WMP_F_SERDES }, 697 WM_T_82546_3, WMP_F_SERDES },
698#endif 698#endif
699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
700 "i82546GB quad-port Gigabit Ethernet", 700 "i82546GB quad-port Gigabit Ethernet",
701 WM_T_82546_3, WMP_F_1000T }, 701 WM_T_82546_3, WMP_F_1000T },
702 702
703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
704 "i82546GB quad-port Gigabit Ethernet (KSP3)", 704 "i82546GB quad-port Gigabit Ethernet (KSP3)",
705 WM_T_82546_3, WMP_F_1000T }, 705 WM_T_82546_3, WMP_F_1000T },
706 706
707 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 707 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
708 "Intel PRO/1000MT (82546GB)", 708 "Intel PRO/1000MT (82546GB)",
709 WM_T_82546_3, WMP_F_1000T }, 709 WM_T_82546_3, WMP_F_1000T },
710 710
711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
712 "Intel i82541EI 1000BASE-T Ethernet", 712 "Intel i82541EI 1000BASE-T Ethernet",
713 WM_T_82541, WMP_F_1000T }, 713 WM_T_82541, WMP_F_1000T },
714 714
715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
716 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 716 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
717 WM_T_82541, WMP_F_1000T }, 717 WM_T_82541, WMP_F_1000T },
718 718
719 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 719 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
720 "Intel i82541EI Mobile 1000BASE-T Ethernet", 720 "Intel i82541EI Mobile 1000BASE-T Ethernet",
721 WM_T_82541, WMP_F_1000T }, 721 WM_T_82541, WMP_F_1000T },
722 722
723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
724 "Intel i82541ER 1000BASE-T Ethernet", 724 "Intel i82541ER 1000BASE-T Ethernet",
725 WM_T_82541_2, WMP_F_1000T }, 725 WM_T_82541_2, WMP_F_1000T },
726 726
727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
728 "Intel i82541GI 1000BASE-T Ethernet", 728 "Intel i82541GI 1000BASE-T Ethernet",
729 WM_T_82541_2, WMP_F_1000T }, 729 WM_T_82541_2, WMP_F_1000T },
730 730
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
732 "Intel i82541GI Mobile 1000BASE-T Ethernet", 732 "Intel i82541GI Mobile 1000BASE-T Ethernet",
733 WM_T_82541_2, WMP_F_1000T }, 733 WM_T_82541_2, WMP_F_1000T },
734 734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
736 "Intel i82541PI 1000BASE-T Ethernet", 736 "Intel i82541PI 1000BASE-T Ethernet",
737 WM_T_82541_2, WMP_F_1000T }, 737 WM_T_82541_2, WMP_F_1000T },
738 738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
740 "Intel i82547EI 1000BASE-T Ethernet", 740 "Intel i82547EI 1000BASE-T Ethernet",
741 WM_T_82547, WMP_F_1000T }, 741 WM_T_82547, WMP_F_1000T },
742 742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
744 "Intel i82547EI Mobile 1000BASE-T Ethernet", 744 "Intel i82547EI Mobile 1000BASE-T Ethernet",
745 WM_T_82547, WMP_F_1000T }, 745 WM_T_82547, WMP_F_1000T },
746 746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
748 "Intel i82547GI 1000BASE-T Ethernet", 748 "Intel i82547GI 1000BASE-T Ethernet",
749 WM_T_82547_2, WMP_F_1000T }, 749 WM_T_82547_2, WMP_F_1000T },
750 750
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
752 "Intel PRO/1000 PT (82571EB)", 752 "Intel PRO/1000 PT (82571EB)",
753 WM_T_82571, WMP_F_1000T }, 753 WM_T_82571, WMP_F_1000T },
754 754
755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
756 "Intel PRO/1000 PF (82571EB)", 756 "Intel PRO/1000 PF (82571EB)",
757 WM_T_82571, WMP_F_1000X }, 757 WM_T_82571, WMP_F_1000X },
758#if 0 758#if 0
759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
760 "Intel PRO/1000 PB (82571EB)", 760 "Intel PRO/1000 PB (82571EB)",
761 WM_T_82571, WMP_F_SERDES }, 761 WM_T_82571, WMP_F_SERDES },
762#endif 762#endif
763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
764 "Intel PRO/1000 QT (82571EB)", 764 "Intel PRO/1000 QT (82571EB)",
765 WM_T_82571, WMP_F_1000T }, 765 WM_T_82571, WMP_F_1000T },
766 766
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
768 "Intel i82572EI 1000baseT Ethernet", 768 "Intel i82572EI 1000baseT Ethernet",
769 WM_T_82572, WMP_F_1000T }, 769 WM_T_82572, WMP_F_1000T },
770 770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, 771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
772 "Intel® PRO/1000 PT Quad Port Server Adapter", 772 "Intel® PRO/1000 PT Quad Port Server Adapter",
773 WM_T_82571, WMP_F_1000T, }, 773 WM_T_82571, WMP_F_1000T, },
774 774
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
776 "Intel i82572EI 1000baseX Ethernet", 776 "Intel i82572EI 1000baseX Ethernet",
777 WM_T_82572, WMP_F_1000X }, 777 WM_T_82572, WMP_F_1000X },
778#if 0 778#if 0
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
780 "Intel i82572EI Gigabit Ethernet (SERDES)", 780 "Intel i82572EI Gigabit Ethernet (SERDES)",
781 WM_T_82572, WMP_F_SERDES }, 781 WM_T_82572, WMP_F_SERDES },
782#endif 782#endif
783 783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
785 "Intel i82572EI 1000baseT Ethernet", 785 "Intel i82572EI 1000baseT Ethernet",
786 WM_T_82572, WMP_F_1000T }, 786 WM_T_82572, WMP_F_1000T },
787 787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
789 "Intel i82573E", 789 "Intel i82573E",
790 WM_T_82573, WMP_F_1000T }, 790 WM_T_82573, WMP_F_1000T },
791 791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
793 "Intel i82573E IAMT", 793 "Intel i82573E IAMT",
794 WM_T_82573, WMP_F_1000T }, 794 WM_T_82573, WMP_F_1000T },
795 795
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
797 "Intel i82573L Gigabit Ethernet", 797 "Intel i82573L Gigabit Ethernet",
798 WM_T_82573, WMP_F_1000T }, 798 WM_T_82573, WMP_F_1000T },
799 799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, 800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
801 "Intel i82574L", 801 "Intel i82574L",
802 WM_T_82574, WMP_F_1000T }, 802 WM_T_82574, WMP_F_1000T },
803 803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
805 "i80003 dual 1000baseT Ethernet", 805 "i80003 dual 1000baseT Ethernet",
806 WM_T_80003, WMP_F_1000T }, 806 WM_T_80003, WMP_F_1000T },
807 807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
809 "i80003 dual 1000baseX Ethernet", 809 "i80003 dual 1000baseX Ethernet",
810 WM_T_80003, WMP_F_1000T }, 810 WM_T_80003, WMP_F_1000T },
811#if 0 811#if 0
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
813 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 813 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
814 WM_T_80003, WMP_F_SERDES }, 814 WM_T_80003, WMP_F_SERDES },
815#endif 815#endif
816 816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
818 "Intel i80003 1000baseT Ethernet", 818 "Intel i80003 1000baseT Ethernet",
819 WM_T_80003, WMP_F_1000T }, 819 WM_T_80003, WMP_F_1000T },
820#if 0 820#if 0
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
822 "Intel i80003 Gigabit Ethernet (SERDES)", 822 "Intel i80003 Gigabit Ethernet (SERDES)",
823 WM_T_80003, WMP_F_SERDES }, 823 WM_T_80003, WMP_F_SERDES },
824#endif 824#endif
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, 825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
826 "Intel i82801H (M_AMT) LAN Controller", 826 "Intel i82801H (M_AMT) LAN Controller",
827 WM_T_ICH8, WMP_F_1000T }, 827 WM_T_ICH8, WMP_F_1000T },
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, 828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
829 "Intel i82801H (AMT) LAN Controller", 829 "Intel i82801H (AMT) LAN Controller",
830 WM_T_ICH8, WMP_F_1000T }, 830 WM_T_ICH8, WMP_F_1000T },
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, 831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
832 "Intel i82801H LAN Controller", 832 "Intel i82801H LAN Controller",
833 WM_T_ICH8, WMP_F_1000T }, 833 WM_T_ICH8, WMP_F_1000T },
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, 834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
835 "Intel i82801H (IFE) LAN Controller", 835 "Intel i82801H (IFE) LAN Controller",
836 WM_T_ICH8, WMP_F_1000T }, 836 WM_T_ICH8, WMP_F_1000T },
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, 837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
838 "Intel i82801H (M) LAN Controller", 838 "Intel i82801H (M) LAN Controller",
839 WM_T_ICH8, WMP_F_1000T }, 839 WM_T_ICH8, WMP_F_1000T },
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, 840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
841 "Intel i82801H IFE (GT) LAN Controller", 841 "Intel i82801H IFE (GT) LAN Controller",
842 WM_T_ICH8, WMP_F_1000T }, 842 WM_T_ICH8, WMP_F_1000T },
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, 843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
844 "Intel i82801H IFE (G) LAN Controller", 844 "Intel i82801H IFE (G) LAN Controller",
845 WM_T_ICH8, WMP_F_1000T }, 845 WM_T_ICH8, WMP_F_1000T },
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, 846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
847 "82801I (AMT) LAN Controller", 847 "82801I (AMT) LAN Controller",
848 WM_T_ICH9, WMP_F_1000T }, 848 WM_T_ICH9, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, 849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
850 "82801I LAN Controller", 850 "82801I LAN Controller",
851 WM_T_ICH9, WMP_F_1000T }, 851 WM_T_ICH9, WMP_F_1000T },
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, 852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
853 "82801I (G) LAN Controller", 853 "82801I (G) LAN Controller",
854 WM_T_ICH9, WMP_F_1000T }, 854 WM_T_ICH9, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, 855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
856 "82801I (GT) LAN Controller", 856 "82801I (GT) LAN Controller",
857 WM_T_ICH9, WMP_F_1000T }, 857 WM_T_ICH9, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, 858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
859 "82801I (C) LAN Controller", 859 "82801I (C) LAN Controller",
860 WM_T_ICH9, WMP_F_1000T }, 860 WM_T_ICH9, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, 861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
862 "82801I mobile LAN Controller", 862 "82801I mobile LAN Controller",
863 WM_T_ICH9, WMP_F_1000T }, 863 WM_T_ICH9, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, 864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
865 "82801I mobile (V) LAN Controller", 865 "82801I mobile (V) LAN Controller",
866 WM_T_ICH9, WMP_F_1000T }, 866 WM_T_ICH9, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, 867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
868 "82801I mobile (AMT) LAN Controller", 868 "82801I mobile (AMT) LAN Controller",
869 WM_T_ICH9, WMP_F_1000T }, 869 WM_T_ICH9, WMP_F_1000T },
870 { 0, 0, 870 { 0, 0,
871 NULL, 871 NULL,
872 0, 0 }, 872 0, 0 },
873}; 873};
874 874
875#ifdef WM_EVENT_COUNTERS 875#ifdef WM_EVENT_COUNTERS
876static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 876static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
877#endif /* WM_EVENT_COUNTERS */ 877#endif /* WM_EVENT_COUNTERS */
878 878
879#if 0 /* Not currently used */ 879#if 0 /* Not currently used */
880static inline uint32_t 880static inline uint32_t
881wm_io_read(struct wm_softc *sc, int reg) 881wm_io_read(struct wm_softc *sc, int reg)
882{ 882{
883 883
884 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 884 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
885 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 885 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
886} 886}
887#endif 887#endif
888 888
889static inline void 889static inline void
890wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 890wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
891{ 891{
892 892
893 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 893 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
894 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 894 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
895} 895}
896 896
897static inline void 897static inline void
898wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 898wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
899{ 899{
900 wa->wa_low = htole32(v & 0xffffffffU); 900 wa->wa_low = htole32(v & 0xffffffffU);
901 if (sizeof(bus_addr_t) == 8) 901 if (sizeof(bus_addr_t) == 8)
902 wa->wa_high = htole32((uint64_t) v >> 32); 902 wa->wa_high = htole32((uint64_t) v >> 32);
903 else 903 else
904 wa->wa_high = 0; 904 wa->wa_high = 0;
905} 905}
906 906
907static const struct wm_product * 907static const struct wm_product *
908wm_lookup(const struct pci_attach_args *pa) 908wm_lookup(const struct pci_attach_args *pa)
909{ 909{
910 const struct wm_product *wmp; 910 const struct wm_product *wmp;
911 911
912 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 912 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
913 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 913 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
914 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 914 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
915 return (wmp); 915 return (wmp);
916 } 916 }
917 return (NULL); 917 return (NULL);
918} 918}
919 919
920static int 920static int
921wm_match(device_t parent, cfdata_t cf, void *aux) 921wm_match(device_t parent, cfdata_t cf, void *aux)
922{ 922{
923 struct pci_attach_args *pa = aux; 923 struct pci_attach_args *pa = aux;
924 924
925 if (wm_lookup(pa) != NULL) 925 if (wm_lookup(pa) != NULL)
926 return (1); 926 return (1);
927 927
928 return (0); 928 return (0);
929} 929}
930 930
931static void 931static void
932wm_attach(device_t parent, device_t self, void *aux) 932wm_attach(device_t parent, device_t self, void *aux)
933{ 933{
934 struct wm_softc *sc = device_private(self); 934 struct wm_softc *sc = device_private(self);
935 struct pci_attach_args *pa = aux; 935 struct pci_attach_args *pa = aux;
936 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 936 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
937 pci_chipset_tag_t pc = pa->pa_pc; 937 pci_chipset_tag_t pc = pa->pa_pc;
938 pci_intr_handle_t ih; 938 pci_intr_handle_t ih;
939 size_t cdata_size; 939 size_t cdata_size;
940 const char *intrstr = NULL; 940 const char *intrstr = NULL;
941 const char *eetype, *xname; 941 const char *eetype, *xname;
942 bus_space_tag_t memt; 942 bus_space_tag_t memt;
943 bus_space_handle_t memh; 943 bus_space_handle_t memh;
944 bus_dma_segment_t seg; 944 bus_dma_segment_t seg;
945 int memh_valid; 945 int memh_valid;
946 int i, rseg, error; 946 int i, rseg, error;
947 const struct wm_product *wmp; 947 const struct wm_product *wmp;
948 prop_data_t ea; 948 prop_data_t ea;
949 prop_number_t pn; 949 prop_number_t pn;
950 uint8_t enaddr[ETHER_ADDR_LEN]; 950 uint8_t enaddr[ETHER_ADDR_LEN];
951 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin; 951 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
952 pcireg_t preg, memtype; 952 pcireg_t preg, memtype;
953 uint32_t reg; 953 uint32_t reg;
954 954
955 sc->sc_dev = self; 955 sc->sc_dev = self;
956 callout_init(&sc->sc_tick_ch, 0); 956 callout_init(&sc->sc_tick_ch, 0);
957 957
958 wmp = wm_lookup(pa); 958 wmp = wm_lookup(pa);
959 if (wmp == NULL) { 959 if (wmp == NULL) {
960 printf("\n"); 960 printf("\n");
961 panic("wm_attach: impossible"); 961 panic("wm_attach: impossible");
962 } 962 }
963 963
964 sc->sc_pc = pa->pa_pc; 964 sc->sc_pc = pa->pa_pc;
965 sc->sc_pcitag = pa->pa_tag; 965 sc->sc_pcitag = pa->pa_tag;
966 966
967 if (pci_dma64_available(pa)) 967 if (pci_dma64_available(pa))
968 sc->sc_dmat = pa->pa_dmat64; 968 sc->sc_dmat = pa->pa_dmat64;
969 else 969 else
970 sc->sc_dmat = pa->pa_dmat; 970 sc->sc_dmat = pa->pa_dmat;
971 971
972 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 972 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
973 aprint_naive(": Ethernet controller\n"); 973 aprint_naive(": Ethernet controller\n");
974 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg); 974 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
975 975
976 sc->sc_type = wmp->wmp_type; 976 sc->sc_type = wmp->wmp_type;
977 if (sc->sc_type < WM_T_82543) { 977 if (sc->sc_type < WM_T_82543) {
978 if (preg < 2) { 978 if (preg < 2) {
979 aprint_error_dev(sc->sc_dev, 979 aprint_error_dev(sc->sc_dev,
980 "i82542 must be at least rev. 2\n"); 980 "i82542 must be at least rev. 2\n");
981 return; 981 return;
982 } 982 }
983 if (preg < 3) 983 if (preg < 3)
984 sc->sc_type = WM_T_82542_2_0; 984 sc->sc_type = WM_T_82542_2_0;
985 } 985 }
986 986
987 /* 987 /*
988 * Map the device. All devices support memory-mapped acccess, 988 * Map the device. All devices support memory-mapped acccess,
989 * and it is really required for normal operation. 989 * and it is really required for normal operation.
990 */ 990 */
991 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 991 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
992 switch (memtype) { 992 switch (memtype) {
993 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 993 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
994 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 994 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
995 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 995 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
996 memtype, 0, &memt, &memh, NULL, NULL) == 0); 996 memtype, 0, &memt, &memh, NULL, NULL) == 0);
997 break; 997 break;
998 default: 998 default:
999 memh_valid = 0; 999 memh_valid = 0;
1000 } 1000 }
1001 1001
1002 if (memh_valid) { 1002 if (memh_valid) {
1003 sc->sc_st = memt; 1003 sc->sc_st = memt;
1004 sc->sc_sh = memh; 1004 sc->sc_sh = memh;
1005 } else { 1005 } else {
1006 aprint_error_dev(sc->sc_dev, 1006 aprint_error_dev(sc->sc_dev,
1007 "unable to map device registers\n"); 1007 "unable to map device registers\n");
1008 return; 1008 return;
1009 } 1009 }
1010 1010
1011 /* 1011 /*
1012 * In addition, i82544 and later support I/O mapped indirect 1012 * In addition, i82544 and later support I/O mapped indirect
1013 * register access. It is not desirable (nor supported in 1013 * register access. It is not desirable (nor supported in
1014 * this driver) to use it for normal operation, though it is 1014 * this driver) to use it for normal operation, though it is
1015 * required to work around bugs in some chip versions. 1015 * required to work around bugs in some chip versions.
1016 */ 1016 */
1017 if (sc->sc_type >= WM_T_82544) { 1017 if (sc->sc_type >= WM_T_82544) {
1018 /* First we have to find the I/O BAR. */ 1018 /* First we have to find the I/O BAR. */
1019 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 1019 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1020 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) == 1020 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1021 PCI_MAPREG_TYPE_IO) 1021 PCI_MAPREG_TYPE_IO)
1022 break; 1022 break;
1023 } 1023 }
1024 if (i == PCI_MAPREG_END) 1024 if (i == PCI_MAPREG_END)
1025 aprint_error_dev(sc->sc_dev, 1025 aprint_error_dev(sc->sc_dev,
1026 "WARNING: unable to find I/O BAR\n"); 1026 "WARNING: unable to find I/O BAR\n");
1027 else { 1027 else {
1028 /* 1028 /*
1029 * The i8254x doesn't apparently respond when the 1029 * The i8254x doesn't apparently respond when the
1030 * I/O BAR is 0, which looks somewhat like it's not 1030 * I/O BAR is 0, which looks somewhat like it's not
1031 * been configured. 1031 * been configured.
1032 */ 1032 */
1033 preg = pci_conf_read(pc, pa->pa_tag, i); 1033 preg = pci_conf_read(pc, pa->pa_tag, i);
1034 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 1034 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1035 aprint_error_dev(sc->sc_dev, 1035 aprint_error_dev(sc->sc_dev,
1036 "WARNING: I/O BAR at zero.\n"); 1036 "WARNING: I/O BAR at zero.\n");
1037 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 1037 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1038 0, &sc->sc_iot, &sc->sc_ioh, 1038 0, &sc->sc_iot, &sc->sc_ioh,
1039 NULL, NULL) == 0) { 1039 NULL, NULL) == 0) {
1040 sc->sc_flags |= WM_F_IOH_VALID; 1040 sc->sc_flags |= WM_F_IOH_VALID;
1041 } else { 1041 } else {
1042 aprint_error_dev(sc->sc_dev, 1042 aprint_error_dev(sc->sc_dev,
1043 "WARNING: unable to map I/O space\n"); 1043 "WARNING: unable to map I/O space\n");
1044 } 1044 }
1045 } 1045 }
1046 1046
1047 } 1047 }
1048 1048
1049 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 1049 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1050 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1050 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1051 preg |= PCI_COMMAND_MASTER_ENABLE; 1051 preg |= PCI_COMMAND_MASTER_ENABLE;
1052 if (sc->sc_type < WM_T_82542_2_1) 1052 if (sc->sc_type < WM_T_82542_2_1)
1053 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 1053 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1054 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 1054 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1055 1055
1056 /* power up chip */ 1056 /* power up chip */
1057 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, 1057 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1058 NULL)) && error != EOPNOTSUPP) { 1058 NULL)) && error != EOPNOTSUPP) {
1059 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); 1059 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1060 return; 1060 return;
1061 } 1061 }
1062 1062
1063 /* 1063 /*
1064 * Map and establish our interrupt. 1064 * Map and establish our interrupt.
1065 */ 1065 */
1066 if (pci_intr_map(pa, &ih)) { 1066 if (pci_intr_map(pa, &ih)) {
1067 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 1067 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1068 return; 1068 return;
1069 } 1069 }
1070 intrstr = pci_intr_string(pc, ih); 1070 intrstr = pci_intr_string(pc, ih);
1071 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 1071 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1072 if (sc->sc_ih == NULL) { 1072 if (sc->sc_ih == NULL) {
1073 aprint_error_dev(sc->sc_dev, "unable to establish interrupt"); 1073 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1074 if (intrstr != NULL) 1074 if (intrstr != NULL)
1075 aprint_normal(" at %s", intrstr); 1075 aprint_normal(" at %s", intrstr);
1076 aprint_normal("\n"); 1076 aprint_normal("\n");
1077 return; 1077 return;
1078 } 1078 }
1079 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1079 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1080 1080
1081 /* 1081 /*
1082 * Determine a few things about the bus we're connected to. 1082 * Determine a few things about the bus we're connected to.
1083 */ 1083 */
1084 if (sc->sc_type < WM_T_82543) { 1084 if (sc->sc_type < WM_T_82543) {
1085 /* We don't really know the bus characteristics here. */ 1085 /* We don't really know the bus characteristics here. */
1086 sc->sc_bus_speed = 33; 1086 sc->sc_bus_speed = 33;
1087 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 1087 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1088 /* 1088 /*
1089 * CSA (Communication Streaming Architecture) is about as fast 1089 * CSA (Communication Streaming Architecture) is about as fast
1090 * a 32-bit 66MHz PCI Bus. 1090 * a 32-bit 66MHz PCI Bus.
1091 */ 1091 */
1092 sc->sc_flags |= WM_F_CSA; 1092 sc->sc_flags |= WM_F_CSA;
1093 sc->sc_bus_speed = 66; 1093 sc->sc_bus_speed = 66;
1094 aprint_verbose_dev(sc->sc_dev, 1094 aprint_verbose_dev(sc->sc_dev,
1095 "Communication Streaming Architecture\n"); 1095 "Communication Streaming Architecture\n");
1096 if (sc->sc_type == WM_T_82547) { 1096 if (sc->sc_type == WM_T_82547) {
1097 callout_init(&sc->sc_txfifo_ch, 0); 1097 callout_init(&sc->sc_txfifo_ch, 0);
1098 callout_setfunc(&sc->sc_txfifo_ch, 1098 callout_setfunc(&sc->sc_txfifo_ch,
1099 wm_82547_txfifo_stall, sc); 1099 wm_82547_txfifo_stall, sc);
1100 aprint_verbose_dev(sc->sc_dev, 1100 aprint_verbose_dev(sc->sc_dev,
1101 "using 82547 Tx FIFO stall work-around\n"); 1101 "using 82547 Tx FIFO stall work-around\n");
1102 } 1102 }
1103 } else if (sc->sc_type >= WM_T_82571) { 1103 } else if (sc->sc_type >= WM_T_82571) {
1104 sc->sc_flags |= WM_F_PCIE; 1104 sc->sc_flags |= WM_F_PCIE;
1105 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)) 1105 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9))
1106 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE; 1106 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1107 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n"); 1107 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1108 } else { 1108 } else {
1109 reg = CSR_READ(sc, WMREG_STATUS); 1109 reg = CSR_READ(sc, WMREG_STATUS);
1110 if (reg & STATUS_BUS64) 1110 if (reg & STATUS_BUS64)
1111 sc->sc_flags |= WM_F_BUS64; 1111 sc->sc_flags |= WM_F_BUS64;
1112 if (sc->sc_type >= WM_T_82544 && 1112 if (sc->sc_type >= WM_T_82544 &&
1113 (reg & STATUS_PCIX_MODE) != 0) { 1113 (reg & STATUS_PCIX_MODE) != 0) {
1114 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 1114 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1115 1115
1116 sc->sc_flags |= WM_F_PCIX; 1116 sc->sc_flags |= WM_F_PCIX;
1117 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1117 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1118 PCI_CAP_PCIX, 1118 PCI_CAP_PCIX,
1119 &sc->sc_pcix_offset, NULL) == 0) 1119 &sc->sc_pcix_offset, NULL) == 0)
1120 aprint_error_dev(sc->sc_dev, 1120 aprint_error_dev(sc->sc_dev,
1121 "unable to find PCIX capability\n"); 1121 "unable to find PCIX capability\n");
1122 else if (sc->sc_type != WM_T_82545_3 && 1122 else if (sc->sc_type != WM_T_82545_3 &&
1123 sc->sc_type != WM_T_82546_3) { 1123 sc->sc_type != WM_T_82546_3) {
1124 /* 1124 /*
1125 * Work around a problem caused by the BIOS 1125 * Work around a problem caused by the BIOS
1126 * setting the max memory read byte count 1126 * setting the max memory read byte count
1127 * incorrectly. 1127 * incorrectly.
1128 */ 1128 */
1129 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 1129 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1130 sc->sc_pcix_offset + PCI_PCIX_CMD); 1130 sc->sc_pcix_offset + PCI_PCIX_CMD);
1131 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 1131 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1132 sc->sc_pcix_offset + PCI_PCIX_STATUS); 1132 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1133 1133
1134 bytecnt = 1134 bytecnt =
1135 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >> 1135 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1136 PCI_PCIX_CMD_BYTECNT_SHIFT; 1136 PCI_PCIX_CMD_BYTECNT_SHIFT;
1137 maxb = 1137 maxb =
1138 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >> 1138 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1139 PCI_PCIX_STATUS_MAXB_SHIFT; 1139 PCI_PCIX_STATUS_MAXB_SHIFT;
1140 if (bytecnt > maxb) { 1140 if (bytecnt > maxb) {
1141 aprint_verbose_dev(sc->sc_dev, 1141 aprint_verbose_dev(sc->sc_dev,
1142 "resetting PCI-X MMRBC: %d -> %d\n", 1142 "resetting PCI-X MMRBC: %d -> %d\n",
1143 512 << bytecnt, 512 << maxb); 1143 512 << bytecnt, 512 << maxb);
1144 pcix_cmd = (pcix_cmd & 1144 pcix_cmd = (pcix_cmd &
1145 ~PCI_PCIX_CMD_BYTECNT_MASK) | 1145 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1146 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT); 1146 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1147 pci_conf_write(pa->pa_pc, pa->pa_tag, 1147 pci_conf_write(pa->pa_pc, pa->pa_tag,
1148 sc->sc_pcix_offset + PCI_PCIX_CMD, 1148 sc->sc_pcix_offset + PCI_PCIX_CMD,
1149 pcix_cmd); 1149 pcix_cmd);
1150 } 1150 }
1151 } 1151 }
1152 } 1152 }
1153 /* 1153 /*
1154 * The quad port adapter is special; it has a PCIX-PCIX 1154 * The quad port adapter is special; it has a PCIX-PCIX
1155 * bridge on the board, and can run the secondary bus at 1155 * bridge on the board, and can run the secondary bus at
1156 * a higher speed. 1156 * a higher speed.
1157 */ 1157 */
1158 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 1158 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1159 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 1159 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1160 : 66; 1160 : 66;
1161 } else if (sc->sc_flags & WM_F_PCIX) { 1161 } else if (sc->sc_flags & WM_F_PCIX) {
1162 switch (reg & STATUS_PCIXSPD_MASK) { 1162 switch (reg & STATUS_PCIXSPD_MASK) {
1163 case STATUS_PCIXSPD_50_66: 1163 case STATUS_PCIXSPD_50_66:
1164 sc->sc_bus_speed = 66; 1164 sc->sc_bus_speed = 66;
1165 break; 1165 break;
1166 case STATUS_PCIXSPD_66_100: 1166 case STATUS_PCIXSPD_66_100:
1167 sc->sc_bus_speed = 100; 1167 sc->sc_bus_speed = 100;
1168 break; 1168 break;
1169 case STATUS_PCIXSPD_100_133: 1169 case STATUS_PCIXSPD_100_133:
1170 sc->sc_bus_speed = 133; 1170 sc->sc_bus_speed = 133;
1171 break; 1171 break;
1172 default: 1172 default:
1173 aprint_error_dev(sc->sc_dev, 1173 aprint_error_dev(sc->sc_dev,
1174 "unknown PCIXSPD %d; assuming 66MHz\n", 1174 "unknown PCIXSPD %d; assuming 66MHz\n",
1175 reg & STATUS_PCIXSPD_MASK); 1175 reg & STATUS_PCIXSPD_MASK);
1176 sc->sc_bus_speed = 66; 1176 sc->sc_bus_speed = 66;
1177 } 1177 }
1178 } else 1178 } else
1179 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 1179 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1180 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n", 1180 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1181 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 1181 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1182 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 1182 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1183 } 1183 }
1184 1184
1185 /* 1185 /*
1186 * Allocate the control data structures, and create and load the 1186 * Allocate the control data structures, and create and load the
1187 * DMA map for it. 1187 * DMA map for it.
1188 * 1188 *
1189 * NOTE: All Tx descriptors must be in the same 4G segment of 1189 * NOTE: All Tx descriptors must be in the same 4G segment of
1190 * memory. So must Rx descriptors. We simplify by allocating 1190 * memory. So must Rx descriptors. We simplify by allocating
1191 * both sets within the same 4G segment. 1191 * both sets within the same 4G segment.
1192 */ 1192 */
1193 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 1193 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1194 WM_NTXDESC_82542 : WM_NTXDESC_82544; 1194 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1195 cdata_size = sc->sc_type < WM_T_82544 ? 1195 cdata_size = sc->sc_type < WM_T_82544 ?
1196 sizeof(struct wm_control_data_82542) : 1196 sizeof(struct wm_control_data_82542) :
1197 sizeof(struct wm_control_data_82544); 1197 sizeof(struct wm_control_data_82544);
1198 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE, 1198 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1199 (bus_size_t) 0x100000000ULL, 1199 (bus_size_t) 0x100000000ULL,
1200 &seg, 1, &rseg, 0)) != 0) { 1200 &seg, 1, &rseg, 0)) != 0) {
1201 aprint_error_dev(sc->sc_dev, 1201 aprint_error_dev(sc->sc_dev,
1202 "unable to allocate control data, error = %d\n", 1202 "unable to allocate control data, error = %d\n",
1203 error); 1203 error);
1204 goto fail_0; 1204 goto fail_0;
1205 } 1205 }
1206 1206
1207 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size, 1207 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1208 (void **)&sc->sc_control_data,  1208 (void **)&sc->sc_control_data,
1209 BUS_DMA_COHERENT)) != 0) { 1209 BUS_DMA_COHERENT)) != 0) {
1210 aprint_error_dev(sc->sc_dev, 1210 aprint_error_dev(sc->sc_dev,
1211 "unable to map control data, error = %d\n", error); 1211 "unable to map control data, error = %d\n", error);
1212 goto fail_1; 1212 goto fail_1;
1213 } 1213 }
1214 1214
1215 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size, 1215 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1216 0, 0, &sc->sc_cddmamap)) != 0) { 1216 0, 0, &sc->sc_cddmamap)) != 0) {
1217 aprint_error_dev(sc->sc_dev, 1217 aprint_error_dev(sc->sc_dev,
1218 "unable to create control data DMA map, error = %d\n", 1218 "unable to create control data DMA map, error = %d\n",
1219 error); 1219 error);
1220 goto fail_2; 1220 goto fail_2;
1221 } 1221 }
1222 1222
1223 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1223 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1224 sc->sc_control_data, cdata_size, NULL, 1224 sc->sc_control_data, cdata_size, NULL,
1225 0)) != 0) { 1225 0)) != 0) {
1226 aprint_error_dev(sc->sc_dev, 1226 aprint_error_dev(sc->sc_dev,
1227 "unable to load control data DMA map, error = %d\n", 1227 "unable to load control data DMA map, error = %d\n",
1228 error); 1228 error);
1229 goto fail_3; 1229 goto fail_3;
1230 } 1230 }
1231 1231
1232 1232
1233 /* 1233 /*
1234 * Create the transmit buffer DMA maps. 1234 * Create the transmit buffer DMA maps.
1235 */ 1235 */
1236 WM_TXQUEUELEN(sc) = 1236 WM_TXQUEUELEN(sc) =
1237 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 1237 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1238 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 1238 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1239 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1239 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1240 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1240 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1241 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1241 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1242 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1242 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1243 aprint_error_dev(sc->sc_dev, 1243 aprint_error_dev(sc->sc_dev,
1244 "unable to create Tx DMA map %d, error = %d\n", 1244 "unable to create Tx DMA map %d, error = %d\n",
1245 i, error); 1245 i, error);
1246 goto fail_4; 1246 goto fail_4;
1247 } 1247 }
1248 } 1248 }
1249 1249
1250 /* 1250 /*
1251 * Create the receive buffer DMA maps. 1251 * Create the receive buffer DMA maps.
1252 */ 1252 */
1253 for (i = 0; i < WM_NRXDESC; i++) { 1253 for (i = 0; i < WM_NRXDESC; i++) {
1254 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1254 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1255 MCLBYTES, 0, 0, 1255 MCLBYTES, 0, 0,
1256 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1256 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1257 aprint_error_dev(sc->sc_dev, 1257 aprint_error_dev(sc->sc_dev,
1258 "unable to create Rx DMA map %d error = %d\n", 1258 "unable to create Rx DMA map %d error = %d\n",
1259 i, error); 1259 i, error);
1260 goto fail_5; 1260 goto fail_5;
1261 } 1261 }
1262 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1262 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1263 } 1263 }
1264 1264
1265 /* clear interesting stat counters */ 1265 /* clear interesting stat counters */
1266 CSR_READ(sc, WMREG_COLC); 1266 CSR_READ(sc, WMREG_COLC);
1267 CSR_READ(sc, WMREG_RXERRC); 1267 CSR_READ(sc, WMREG_RXERRC);
1268 1268
1269 /* 1269 /*
1270 * Reset the chip to a known state. 1270 * Reset the chip to a known state.
1271 */ 1271 */
1272 wm_reset(sc); 1272 wm_reset(sc);
1273 1273
1274 switch (sc->sc_type) { 1274 switch (sc->sc_type) {
1275 case WM_T_82573: 1275 case WM_T_82573:
1276 case WM_T_ICH8: 1276 case WM_T_ICH8:
1277 case WM_T_ICH9: 1277 case WM_T_ICH9:
1278 if (wm_check_mng_mode(sc) != 0) 1278 if (wm_check_mng_mode(sc) != 0)
1279 wm_get_hw_control(sc); 1279 wm_get_hw_control(sc);
1280 break; 1280 break;
1281 default: 1281 default:
1282 break; 1282 break;
1283 } 1283 }
1284 1284
1285 /* 1285 /*
1286 * Get some information about the EEPROM. 1286 * Get some information about the EEPROM.
1287 */ 1287 */
1288 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) { 1288 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
1289 uint32_t flash_size; 1289 uint32_t flash_size;
1290 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH; 1290 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1291 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH); 1291 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1292 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 1292 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1293 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) { 1293 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1294 aprint_error_dev(sc->sc_dev, 1294 aprint_error_dev(sc->sc_dev,
1295 "can't map FLASH registers\n"); 1295 "can't map FLASH registers\n");
1296 return; 1296 return;
1297 } 1297 }
1298 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 1298 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1299 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) * 1299 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1300 ICH_FLASH_SECTOR_SIZE; 1300 ICH_FLASH_SECTOR_SIZE;
1301 sc->sc_ich8_flash_bank_size =  1301 sc->sc_ich8_flash_bank_size =
1302 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1; 1302 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1303 sc->sc_ich8_flash_bank_size -= 1303 sc->sc_ich8_flash_bank_size -=
1304 (flash_size & ICH_GFPREG_BASE_MASK); 1304 (flash_size & ICH_GFPREG_BASE_MASK);
1305 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 1305 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1306 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 1306 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1307 } else if (sc->sc_type == WM_T_80003) 1307 } else if (sc->sc_type == WM_T_80003)
1308 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC; 1308 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1309 else if (sc->sc_type == WM_T_82573) 1309 else if (sc->sc_type == WM_T_82573)
1310 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1310 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1311 else if (sc->sc_type == WM_T_82574) 1311 else if (sc->sc_type == WM_T_82574)
1312 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1312 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1313 else if (sc->sc_type > WM_T_82544) 1313 else if (sc->sc_type > WM_T_82544)
1314 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1314 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1315 1315
1316 if (sc->sc_type <= WM_T_82544) 1316 if (sc->sc_type <= WM_T_82544)
1317 sc->sc_ee_addrbits = 6; 1317 sc->sc_ee_addrbits = 6;
1318 else if (sc->sc_type <= WM_T_82546_3) { 1318 else if (sc->sc_type <= WM_T_82546_3) {
1319 reg = CSR_READ(sc, WMREG_EECD); 1319 reg = CSR_READ(sc, WMREG_EECD);
1320 if (reg & EECD_EE_SIZE) 1320 if (reg & EECD_EE_SIZE)
1321 sc->sc_ee_addrbits = 8; 1321 sc->sc_ee_addrbits = 8;
1322 else 1322 else
1323 sc->sc_ee_addrbits = 6; 1323 sc->sc_ee_addrbits = 6;
1324 } else if (sc->sc_type <= WM_T_82547_2) { 1324 } else if (sc->sc_type <= WM_T_82547_2) {
1325 reg = CSR_READ(sc, WMREG_EECD); 1325 reg = CSR_READ(sc, WMREG_EECD);
1326 if (reg & EECD_EE_TYPE) { 1326 if (reg & EECD_EE_TYPE) {
1327 sc->sc_flags |= WM_F_EEPROM_SPI; 1327 sc->sc_flags |= WM_F_EEPROM_SPI;
1328 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1328 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1329 } else 1329 } else
1330 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1330 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1331 } else if ((sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) && 1331 } else if ((sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) &&
1332 (wm_is_onboard_nvm_eeprom(sc) == 0)) { 1332 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1333 sc->sc_flags |= WM_F_EEPROM_FLASH; 1333 sc->sc_flags |= WM_F_EEPROM_FLASH;
1334 } else { 1334 } else {
1335 /* Assume everything else is SPI. */ 1335 /* Assume everything else is SPI. */
1336 reg = CSR_READ(sc, WMREG_EECD); 1336 reg = CSR_READ(sc, WMREG_EECD);
1337 sc->sc_flags |= WM_F_EEPROM_SPI; 1337 sc->sc_flags |= WM_F_EEPROM_SPI;
1338 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1338 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1339 } 1339 }
1340 1340
1341 /* 1341 /*
1342 * Defer printing the EEPROM type until after verifying the checksum 1342 * Defer printing the EEPROM type until after verifying the checksum
1343 * This allows the EEPROM type to be printed correctly in the case 1343 * This allows the EEPROM type to be printed correctly in the case
1344 * that no EEPROM is attached. 1344 * that no EEPROM is attached.
1345 */ 1345 */
1346 1346
1347 /* 1347 /*
1348 * Validate the EEPROM checksum. If the checksum fails, flag this for 1348 * Validate the EEPROM checksum. If the checksum fails, flag this for
1349 * later, so we can fail future reads from the EEPROM. 1349 * later, so we can fail future reads from the EEPROM.
1350 */ 1350 */
1351 if (wm_validate_eeprom_checksum(sc)) { 1351 if (wm_validate_eeprom_checksum(sc)) {
1352 /* 1352 /*
1353 * Read twice again because some PCI-e parts fail the first 1353 * Read twice again because some PCI-e parts fail the first
1354 * check due to the link being in sleep state. 1354 * check due to the link being in sleep state.
1355 */ 1355 */
1356 if (wm_validate_eeprom_checksum(sc)) 1356 if (wm_validate_eeprom_checksum(sc))
1357 sc->sc_flags |= WM_F_EEPROM_INVALID; 1357 sc->sc_flags |= WM_F_EEPROM_INVALID;
1358 } 1358 }
1359 1359
1360 if (sc->sc_flags & WM_F_EEPROM_INVALID) 1360 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1361 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n"); 1361 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1362 else if (sc->sc_flags & WM_F_EEPROM_FLASH) { 1362 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1363 aprint_verbose_dev(sc->sc_dev, "FLASH\n"); 1363 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1364 } else { 1364 } else {
1365 if (sc->sc_flags & WM_F_EEPROM_SPI) 1365 if (sc->sc_flags & WM_F_EEPROM_SPI)
1366 eetype = "SPI"; 1366 eetype = "SPI";
1367 else 1367 else
1368 eetype = "MicroWire"; 1368 eetype = "MicroWire";
1369 aprint_verbose_dev(sc->sc_dev, 1369 aprint_verbose_dev(sc->sc_dev,
1370 "%u word (%d address bits) %s EEPROM\n", 1370 "%u word (%d address bits) %s EEPROM\n",
1371 1U << sc->sc_ee_addrbits, 1371 1U << sc->sc_ee_addrbits,
1372 sc->sc_ee_addrbits, eetype); 1372 sc->sc_ee_addrbits, eetype);
1373 } 1373 }
1374 1374
1375 /* 1375 /*
1376 * Read the Ethernet address from the EEPROM, if not first found 1376 * Read the Ethernet address from the EEPROM, if not first found
1377 * in device properties. 1377 * in device properties.
1378 */ 1378 */
1379 ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr"); 1379 ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr");
1380 if (ea != NULL) { 1380 if (ea != NULL) {
1381 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 1381 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1382 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 1382 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1383 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 1383 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1384 } else { 1384 } else {
1385 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR, 1385 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1386 sizeof(myea) / sizeof(myea[0]), myea)) { 1386 sizeof(myea) / sizeof(myea[0]), myea)) {
1387 aprint_error_dev(sc->sc_dev, 1387 aprint_error_dev(sc->sc_dev,
1388 "unable to read Ethernet address\n"); 1388 "unable to read Ethernet address\n");
1389 return; 1389 return;
1390 } 1390 }
1391 enaddr[0] = myea[0] & 0xff; 1391 enaddr[0] = myea[0] & 0xff;
1392 enaddr[1] = myea[0] >> 8; 1392 enaddr[1] = myea[0] >> 8;
1393 enaddr[2] = myea[1] & 0xff; 1393 enaddr[2] = myea[1] & 0xff;
1394 enaddr[3] = myea[1] >> 8; 1394 enaddr[3] = myea[1] >> 8;
1395 enaddr[4] = myea[2] & 0xff; 1395 enaddr[4] = myea[2] & 0xff;
1396 enaddr[5] = myea[2] >> 8; 1396 enaddr[5] = myea[2] >> 8;
1397 } 1397 }
1398 1398
1399 /* 1399 /*
1400 * Toggle the LSB of the MAC address on the second port 1400 * Toggle the LSB of the MAC address on the second port
1401 * of the dual port controller. 1401 * of the dual port controller.
1402 */ 1402 */
1403 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3 1403 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1404 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) { 1404 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1405 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1) 1405 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1406 enaddr[5] ^= 1; 1406 enaddr[5] ^= 1;
1407 } 1407 }
1408 1408
1409 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 1409 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1410 ether_sprintf(enaddr)); 1410 ether_sprintf(enaddr));
1411 1411
1412 /* 1412 /*
1413 * Read the config info from the EEPROM, and set up various 1413 * Read the config info from the EEPROM, and set up various
1414 * bits in the control registers based on their contents. 1414 * bits in the control registers based on their contents.
1415 */ 1415 */
1416 pn = prop_dictionary_get(device_properties(sc->sc_dev), 1416 pn = prop_dictionary_get(device_properties(sc->sc_dev),
1417 "i82543-cfg1"); 1417 "i82543-cfg1");
1418 if (pn != NULL) { 1418 if (pn != NULL) {
1419 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1419 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1420 cfg1 = (uint16_t) prop_number_integer_value(pn); 1420 cfg1 = (uint16_t) prop_number_integer_value(pn);
1421 } else { 1421 } else {
1422 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1422 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1423 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n"); 1423 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1424 return; 1424 return;
1425 } 1425 }
1426 } 1426 }
1427 1427
1428 pn = prop_dictionary_get(device_properties(sc->sc_dev), 1428 pn = prop_dictionary_get(device_properties(sc->sc_dev),
1429 "i82543-cfg2"); 1429 "i82543-cfg2");
1430 if (pn != NULL) { 1430 if (pn != NULL) {
1431 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1431 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1432 cfg2 = (uint16_t) prop_number_integer_value(pn); 1432 cfg2 = (uint16_t) prop_number_integer_value(pn);
1433 } else { 1433 } else {
1434 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1434 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1435 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n"); 1435 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1436 return; 1436 return;
1437 } 1437 }
1438 } 1438 }
1439 1439
1440 if (sc->sc_type >= WM_T_82544) { 1440 if (sc->sc_type >= WM_T_82544) {
1441 pn = prop_dictionary_get(device_properties(sc->sc_dev), 1441 pn = prop_dictionary_get(device_properties(sc->sc_dev),
1442 "i82543-swdpin"); 1442 "i82543-swdpin");
1443 if (pn != NULL) { 1443 if (pn != NULL) {
1444 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1444 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1445 swdpin = (uint16_t) prop_number_integer_value(pn); 1445 swdpin = (uint16_t) prop_number_integer_value(pn);
1446 } else { 1446 } else {
1447 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1447 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1448 aprint_error_dev(sc->sc_dev, 1448 aprint_error_dev(sc->sc_dev,
1449 "unable to read SWDPIN\n"); 1449 "unable to read SWDPIN\n");
1450 return; 1450 return;
1451 } 1451 }
1452 } 1452 }
1453 } 1453 }
1454 1454
1455 if (cfg1 & EEPROM_CFG1_ILOS) 1455 if (cfg1 & EEPROM_CFG1_ILOS)
1456 sc->sc_ctrl |= CTRL_ILOS; 1456 sc->sc_ctrl |= CTRL_ILOS;
1457 if (sc->sc_type >= WM_T_82544) { 1457 if (sc->sc_type >= WM_T_82544) {
1458 sc->sc_ctrl |= 1458 sc->sc_ctrl |=
1459 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1459 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1460 CTRL_SWDPIO_SHIFT; 1460 CTRL_SWDPIO_SHIFT;
1461 sc->sc_ctrl |= 1461 sc->sc_ctrl |=
1462 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1462 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1463 CTRL_SWDPINS_SHIFT; 1463 CTRL_SWDPINS_SHIFT;
1464 } else { 1464 } else {
1465 sc->sc_ctrl |= 1465 sc->sc_ctrl |=
1466 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1466 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1467 CTRL_SWDPIO_SHIFT; 1467 CTRL_SWDPIO_SHIFT;
1468 } 1468 }
1469 1469
1470#if 0 1470#if 0
1471 if (sc->sc_type >= WM_T_82544) { 1471 if (sc->sc_type >= WM_T_82544) {
1472 if (cfg1 & EEPROM_CFG1_IPS0) 1472 if (cfg1 & EEPROM_CFG1_IPS0)
1473 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1473 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1474 if (cfg1 & EEPROM_CFG1_IPS1) 1474 if (cfg1 & EEPROM_CFG1_IPS1)
1475 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1475 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1476 sc->sc_ctrl_ext |= 1476 sc->sc_ctrl_ext |=
1477 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1477 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1478 CTRL_EXT_SWDPIO_SHIFT; 1478 CTRL_EXT_SWDPIO_SHIFT;
1479 sc->sc_ctrl_ext |= 1479 sc->sc_ctrl_ext |=
1480 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1480 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1481 CTRL_EXT_SWDPINS_SHIFT; 1481 CTRL_EXT_SWDPINS_SHIFT;
1482 } else { 1482 } else {
1483 sc->sc_ctrl_ext |= 1483 sc->sc_ctrl_ext |=
1484 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1484 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1485 CTRL_EXT_SWDPIO_SHIFT; 1485 CTRL_EXT_SWDPIO_SHIFT;
1486 } 1486 }
1487#endif 1487#endif
1488 1488
1489 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1489 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1490#if 0 1490#if 0
1491 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1491 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1492#endif 1492#endif
1493 1493
1494 /* 1494 /*
1495 * Set up some register offsets that are different between 1495 * Set up some register offsets that are different between
1496 * the i82542 and the i82543 and later chips. 1496 * the i82542 and the i82543 and later chips.
1497 */ 1497 */
1498 if (sc->sc_type < WM_T_82543) { 1498 if (sc->sc_type < WM_T_82543) {
1499 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1499 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1500 sc->sc_tdt_reg = WMREG_OLD_TDT; 1500 sc->sc_tdt_reg = WMREG_OLD_TDT;
1501 } else { 1501 } else {
1502 sc->sc_rdt_reg = WMREG_RDT; 1502 sc->sc_rdt_reg = WMREG_RDT;
1503 sc->sc_tdt_reg = WMREG_TDT; 1503 sc->sc_tdt_reg = WMREG_TDT;
1504 } 1504 }
1505 1505
1506 /* 1506 /*
1507 * Determine if we're TBI or GMII mode, and initialize the 1507 * Determine if we're TBI or GMII mode, and initialize the
1508 * media structures accordingly. 1508 * media structures accordingly.
1509 */ 1509 */
1510 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 1510 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1511 || sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) { 1511 || sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) {
1512 /* STATUS_TBIMODE reserved/reused, can't rely on it */ 1512 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1513 wm_gmii_mediainit(sc); 1513 wm_gmii_mediainit(sc);
1514 } else if (sc->sc_type < WM_T_82543 || 1514 } else if (sc->sc_type < WM_T_82543 ||
1515 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1515 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1516 if (wmp->wmp_flags & WMP_F_1000T) 1516 if (wmp->wmp_flags & WMP_F_1000T)
1517 aprint_error_dev(sc->sc_dev, 1517 aprint_error_dev(sc->sc_dev,
1518 "WARNING: TBIMODE set on 1000BASE-T product!\n"); 1518 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1519 wm_tbi_mediainit(sc); 1519 wm_tbi_mediainit(sc);
1520 } else { 1520 } else {
1521 if (wmp->wmp_flags & WMP_F_1000X) 1521 if (wmp->wmp_flags & WMP_F_1000X)
1522 aprint_error_dev(sc->sc_dev, 1522 aprint_error_dev(sc->sc_dev,
1523 "WARNING: TBIMODE clear on 1000BASE-X product!\n"); 1523 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1524 wm_gmii_mediainit(sc); 1524 wm_gmii_mediainit(sc);
1525 } 1525 }
1526 1526
1527 ifp = &sc->sc_ethercom.ec_if; 1527 ifp = &sc->sc_ethercom.ec_if;
1528 xname = device_xname(sc->sc_dev); 1528 xname = device_xname(sc->sc_dev);
1529 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 1529 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1530 ifp->if_softc = sc; 1530 ifp->if_softc = sc;
1531 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1531 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1532 ifp->if_ioctl = wm_ioctl; 1532 ifp->if_ioctl = wm_ioctl;
1533 ifp->if_start = wm_start; 1533 ifp->if_start = wm_start;
1534 ifp->if_watchdog = wm_watchdog; 1534 ifp->if_watchdog = wm_watchdog;
1535 ifp->if_init = wm_init; 1535 ifp->if_init = wm_init;
1536 ifp->if_stop = wm_stop; 1536 ifp->if_stop = wm_stop;
1537 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1537 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1538 IFQ_SET_READY(&ifp->if_snd); 1538 IFQ_SET_READY(&ifp->if_snd);
1539 1539
1540 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 && 1540 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
1541 sc->sc_type != WM_T_ICH8) 1541 sc->sc_type != WM_T_ICH8)
1542 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1542 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1543 1543
1544 /* 1544 /*
1545 * If we're a i82543 or greater, we can support VLANs. 1545 * If we're a i82543 or greater, we can support VLANs.
1546 */ 1546 */
1547 if (sc->sc_type >= WM_T_82543) 1547 if (sc->sc_type >= WM_T_82543)
1548 sc->sc_ethercom.ec_capabilities |= 1548 sc->sc_ethercom.ec_capabilities |=
1549 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */; 1549 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1550 1550
1551 /* 1551 /*
1552 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 1552 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1553 * on i82543 and later. 1553 * on i82543 and later.
1554 */ 1554 */
1555 if (sc->sc_type >= WM_T_82543) { 1555 if (sc->sc_type >= WM_T_82543) {
1556 ifp->if_capabilities |= 1556 ifp->if_capabilities |=
1557 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1557 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1558 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1558 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1559 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 1559 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1560 IFCAP_CSUM_TCPv6_Tx | 1560 IFCAP_CSUM_TCPv6_Tx |
1561 IFCAP_CSUM_UDPv6_Tx; 1561 IFCAP_CSUM_UDPv6_Tx;
1562 } 1562 }
1563 1563
1564 /* 1564 /*
1565 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 1565 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1566 * 1566 *
1567 * 82541GI (8086:1076) ... no 1567 * 82541GI (8086:1076) ... no
1568 * 82572EI (8086:10b9) ... yes 1568 * 82572EI (8086:10b9) ... yes
1569 */ 1569 */
1570 if (sc->sc_type >= WM_T_82571) { 1570 if (sc->sc_type >= WM_T_82571) {
1571 ifp->if_capabilities |= 1571 ifp->if_capabilities |=
1572 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 1572 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1573 } 1573 }
1574 1574
1575 /*  1575 /*
1576 * If we're a i82544 or greater (except i82547), we can do 1576 * If we're a i82544 or greater (except i82547), we can do
1577 * TCP segmentation offload. 1577 * TCP segmentation offload.
1578 */ 1578 */
1579 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 1579 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1580 ifp->if_capabilities |= IFCAP_TSOv4; 1580 ifp->if_capabilities |= IFCAP_TSOv4;
1581 } 1581 }
1582 1582
1583 if (sc->sc_type >= WM_T_82571) { 1583 if (sc->sc_type >= WM_T_82571) {
1584 ifp->if_capabilities |= IFCAP_TSOv6; 1584 ifp->if_capabilities |= IFCAP_TSOv6;
1585 } 1585 }
1586 1586
1587 /* 1587 /*
1588 * Attach the interface. 1588 * Attach the interface.
1589 */ 1589 */
1590 if_attach(ifp); 1590 if_attach(ifp);
1591 ether_ifattach(ifp, enaddr); 1591 ether_ifattach(ifp, enaddr);
1592#if NRND > 0 1592#if NRND > 0
1593 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0); 1593 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1594#endif 1594#endif
1595 1595
1596#ifdef WM_EVENT_COUNTERS 1596#ifdef WM_EVENT_COUNTERS
1597 /* Attach event counters. */ 1597 /* Attach event counters. */
1598 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 1598 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1599 NULL, xname, "txsstall"); 1599 NULL, xname, "txsstall");
1600 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 1600 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1601 NULL, xname, "txdstall"); 1601 NULL, xname, "txdstall");
1602 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 1602 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1603 NULL, xname, "txfifo_stall"); 1603 NULL, xname, "txfifo_stall");
1604 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 1604 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1605 NULL, xname, "txdw"); 1605 NULL, xname, "txdw");
1606 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 1606 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1607 NULL, xname, "txqe"); 1607 NULL, xname, "txqe");
1608 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1608 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1609 NULL, xname, "rxintr"); 1609 NULL, xname, "rxintr");
1610 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 1610 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1611 NULL, xname, "linkintr"); 1611 NULL, xname, "linkintr");
1612 1612
1613 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 1613 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1614 NULL, xname, "rxipsum"); 1614 NULL, xname, "rxipsum");
1615 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 1615 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1616 NULL, xname, "rxtusum"); 1616 NULL, xname, "rxtusum");
1617 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 1617 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1618 NULL, xname, "txipsum"); 1618 NULL, xname, "txipsum");
1619 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 1619 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1620 NULL, xname, "txtusum"); 1620 NULL, xname, "txtusum");
1621 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC, 1621 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1622 NULL, xname, "txtusum6"); 1622 NULL, xname, "txtusum6");
1623 1623
1624 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC, 1624 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1625 NULL, xname, "txtso"); 1625 NULL, xname, "txtso");
1626 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC, 1626 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1627 NULL, xname, "txtso6"); 1627 NULL, xname, "txtso6");
1628 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC, 1628 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1629 NULL, xname, "txtsopain"); 1629 NULL, xname, "txtsopain");
1630 1630
1631 for (i = 0; i < WM_NTXSEGS; i++) { 1631 for (i = 0; i < WM_NTXSEGS; i++) {
1632 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 1632 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1633 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 1633 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1634 NULL, xname, wm_txseg_evcnt_names[i]); 1634 NULL, xname, wm_txseg_evcnt_names[i]);
1635 } 1635 }
1636 1636
1637 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 1637 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1638 NULL, xname, "txdrop"); 1638 NULL, xname, "txdrop");
1639 1639
1640 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 1640 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1641 NULL, xname, "tu"); 1641 NULL, xname, "tu");
1642 1642
1643 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 1643 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1644 NULL, xname, "tx_xoff"); 1644 NULL, xname, "tx_xoff");
1645 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 1645 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1646 NULL, xname, "tx_xon"); 1646 NULL, xname, "tx_xon");
1647 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 1647 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1648 NULL, xname, "rx_xoff"); 1648 NULL, xname, "rx_xoff");
1649 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 1649 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1650 NULL, xname, "rx_xon"); 1650 NULL, xname, "rx_xon");
1651 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 1651 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1652 NULL, xname, "rx_macctl"); 1652 NULL, xname, "rx_macctl");
1653#endif /* WM_EVENT_COUNTERS */ 1653#endif /* WM_EVENT_COUNTERS */
1654 1654
1655 if (!pmf_device_register(self, NULL, NULL)) 1655 if (!pmf_device_register(self, NULL, NULL))
1656 aprint_error_dev(self, "couldn't establish power handler\n"); 1656 aprint_error_dev(self, "couldn't establish power handler\n");
1657 else 1657 else
1658 pmf_class_network_register(self, ifp); 1658 pmf_class_network_register(self, ifp);
1659 1659
1660 return; 1660 return;
1661 1661
1662 /* 1662 /*
1663 * Free any resources we've allocated during the failed attach 1663 * Free any resources we've allocated during the failed attach
1664 * attempt. Do this in reverse order and fall through. 1664 * attempt. Do this in reverse order and fall through.
1665 */ 1665 */
1666 fail_5: 1666 fail_5:
1667 for (i = 0; i < WM_NRXDESC; i++) { 1667 for (i = 0; i < WM_NRXDESC; i++) {
1668 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1668 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1669 bus_dmamap_destroy(sc->sc_dmat, 1669 bus_dmamap_destroy(sc->sc_dmat,
1670 sc->sc_rxsoft[i].rxs_dmamap); 1670 sc->sc_rxsoft[i].rxs_dmamap);
1671 } 1671 }
1672 fail_4: 1672 fail_4:
1673 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1673 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1674 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1674 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1675 bus_dmamap_destroy(sc->sc_dmat, 1675 bus_dmamap_destroy(sc->sc_dmat,
1676 sc->sc_txsoft[i].txs_dmamap); 1676 sc->sc_txsoft[i].txs_dmamap);
1677 } 1677 }
1678 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1678 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1679 fail_3: 1679 fail_3:
1680 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1680 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1681 fail_2: 1681 fail_2:
1682 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 1682 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1683 cdata_size); 1683 cdata_size);
1684 fail_1: 1684 fail_1:
1685 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1685 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1686 fail_0: 1686 fail_0:
1687 return; 1687 return;
1688} 1688}
1689 1689
1690/* 1690/*
1691 * wm_tx_offload: 1691 * wm_tx_offload:
1692 * 1692 *
1693 * Set up TCP/IP checksumming parameters for the 1693 * Set up TCP/IP checksumming parameters for the
1694 * specified packet. 1694 * specified packet.
1695 */ 1695 */
1696static int 1696static int
1697wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 1697wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1698 uint8_t *fieldsp) 1698 uint8_t *fieldsp)
1699{ 1699{
1700 struct mbuf *m0 = txs->txs_mbuf; 1700 struct mbuf *m0 = txs->txs_mbuf;
1701 struct livengood_tcpip_ctxdesc *t; 1701 struct livengood_tcpip_ctxdesc *t;
1702 uint32_t ipcs, tucs, cmd, cmdlen, seg; 1702 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1703 uint32_t ipcse; 1703 uint32_t ipcse;
1704 struct ether_header *eh; 1704 struct ether_header *eh;
1705 int offset, iphl; 1705 int offset, iphl;
1706 uint8_t fields; 1706 uint8_t fields;
1707 1707
1708 /* 1708 /*
1709 * XXX It would be nice if the mbuf pkthdr had offset 1709 * XXX It would be nice if the mbuf pkthdr had offset
1710 * fields for the protocol headers. 1710 * fields for the protocol headers.
1711 */ 1711 */
1712 1712
1713 eh = mtod(m0, struct ether_header *); 1713 eh = mtod(m0, struct ether_header *);
1714 switch (htons(eh->ether_type)) { 1714 switch (htons(eh->ether_type)) {
1715 case ETHERTYPE_IP: 1715 case ETHERTYPE_IP:
1716 case ETHERTYPE_IPV6: 1716 case ETHERTYPE_IPV6:
1717 offset = ETHER_HDR_LEN; 1717 offset = ETHER_HDR_LEN;
1718 break; 1718 break;
1719 1719
1720 case ETHERTYPE_VLAN: 1720 case ETHERTYPE_VLAN:
1721 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1721 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1722 break; 1722 break;
1723 1723
1724 default: 1724 default:
1725 /* 1725 /*
1726 * Don't support this protocol or encapsulation. 1726 * Don't support this protocol or encapsulation.
1727 */ 1727 */
1728 *fieldsp = 0; 1728 *fieldsp = 0;
1729 *cmdp = 0; 1729 *cmdp = 0;
1730 return (0); 1730 return (0);
1731 } 1731 }
1732 1732
1733 if ((m0->m_pkthdr.csum_flags & 1733 if ((m0->m_pkthdr.csum_flags &
1734 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 1734 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1735 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1735 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1736 } else { 1736 } else {
1737 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 1737 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1738 } 1738 }
1739 ipcse = offset + iphl - 1; 1739 ipcse = offset + iphl - 1;
1740 1740
1741 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 1741 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1742 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 1742 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1743 seg = 0; 1743 seg = 0;
1744 fields = 0; 1744 fields = 0;
1745 1745
1746 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 1746 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1747 int hlen = offset + iphl; 1747 int hlen = offset + iphl;
1748 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 1748 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1749 1749
1750 if (__predict_false(m0->m_len < 1750 if (__predict_false(m0->m_len <
1751 (hlen + sizeof(struct tcphdr)))) { 1751 (hlen + sizeof(struct tcphdr)))) {
1752 /* 1752 /*
1753 * TCP/IP headers are not in the first mbuf; we need 1753 * TCP/IP headers are not in the first mbuf; we need
1754 * to do this the slow and painful way. Let's just 1754 * to do this the slow and painful way. Let's just
1755 * hope this doesn't happen very often. 1755 * hope this doesn't happen very often.
1756 */ 1756 */
1757 struct tcphdr th; 1757 struct tcphdr th;
1758 1758
1759 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 1759 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1760 1760
1761 m_copydata(m0, hlen, sizeof(th), &th); 1761 m_copydata(m0, hlen, sizeof(th), &th);
1762 if (v4) { 1762 if (v4) {
1763 struct ip ip; 1763 struct ip ip;
1764 1764
1765 m_copydata(m0, offset, sizeof(ip), &ip); 1765 m_copydata(m0, offset, sizeof(ip), &ip);
1766 ip.ip_len = 0; 1766 ip.ip_len = 0;
1767 m_copyback(m0, 1767 m_copyback(m0,
1768 offset + offsetof(struct ip, ip_len), 1768 offset + offsetof(struct ip, ip_len),
1769 sizeof(ip.ip_len), &ip.ip_len); 1769 sizeof(ip.ip_len), &ip.ip_len);
1770 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 1770 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1771 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 1771 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1772 } else { 1772 } else {
1773 struct ip6_hdr ip6; 1773 struct ip6_hdr ip6;
1774 1774
1775 m_copydata(m0, offset, sizeof(ip6), &ip6); 1775 m_copydata(m0, offset, sizeof(ip6), &ip6);
1776 ip6.ip6_plen = 0; 1776 ip6.ip6_plen = 0;
1777 m_copyback(m0, 1777 m_copyback(m0,
1778 offset + offsetof(struct ip6_hdr, ip6_plen), 1778 offset + offsetof(struct ip6_hdr, ip6_plen),
1779 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 1779 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1780 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 1780 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1781 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 1781 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1782 } 1782 }
1783 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 1783 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1784 sizeof(th.th_sum), &th.th_sum); 1784 sizeof(th.th_sum), &th.th_sum);
1785 1785
1786 hlen += th.th_off << 2; 1786 hlen += th.th_off << 2;
1787 } else { 1787 } else {
1788 /* 1788 /*
1789 * TCP/IP headers are in the first mbuf; we can do 1789 * TCP/IP headers are in the first mbuf; we can do
1790 * this the easy way. 1790 * this the easy way.
1791 */ 1791 */
1792 struct tcphdr *th; 1792 struct tcphdr *th;
1793 1793
1794 if (v4) { 1794 if (v4) {
1795 struct ip *ip = 1795 struct ip *ip =
1796 (void *)(mtod(m0, char *) + offset); 1796 (void *)(mtod(m0, char *) + offset);
1797 th = (void *)(mtod(m0, char *) + hlen); 1797 th = (void *)(mtod(m0, char *) + hlen);
1798 1798
1799 ip->ip_len = 0; 1799 ip->ip_len = 0;
1800 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 1800 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1801 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1801 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1802 } else { 1802 } else {
1803 struct ip6_hdr *ip6 = 1803 struct ip6_hdr *ip6 =
1804 (void *)(mtod(m0, char *) + offset); 1804 (void *)(mtod(m0, char *) + offset);
1805 th = (void *)(mtod(m0, char *) + hlen); 1805 th = (void *)(mtod(m0, char *) + hlen);
1806 1806
1807 ip6->ip6_plen = 0; 1807 ip6->ip6_plen = 0;
1808 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 1808 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1809 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 1809 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1810 } 1810 }
1811 hlen += th->th_off << 2; 1811 hlen += th->th_off << 2;
1812 } 1812 }
1813 1813
1814 if (v4) { 1814 if (v4) {
1815 WM_EVCNT_INCR(&sc->sc_ev_txtso); 1815 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1816 cmdlen |= WTX_TCPIP_CMD_IP; 1816 cmdlen |= WTX_TCPIP_CMD_IP;
1817 } else { 1817 } else {
1818 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 1818 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1819 ipcse = 0; 1819 ipcse = 0;
1820 } 1820 }
1821 cmd |= WTX_TCPIP_CMD_TSE; 1821 cmd |= WTX_TCPIP_CMD_TSE;
1822 cmdlen |= WTX_TCPIP_CMD_TSE | 1822 cmdlen |= WTX_TCPIP_CMD_TSE |
1823 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 1823 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1824 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 1824 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1825 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 1825 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1826 } 1826 }
1827 1827
1828 /* 1828 /*
1829 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1829 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1830 * offload feature, if we load the context descriptor, we 1830 * offload feature, if we load the context descriptor, we
1831 * MUST provide valid values for IPCSS and TUCSS fields. 1831 * MUST provide valid values for IPCSS and TUCSS fields.
1832 */ 1832 */
1833 1833
1834 ipcs = WTX_TCPIP_IPCSS(offset) | 1834 ipcs = WTX_TCPIP_IPCSS(offset) |
1835 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1835 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1836 WTX_TCPIP_IPCSE(ipcse); 1836 WTX_TCPIP_IPCSE(ipcse);
1837 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 1837 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1838 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 1838 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1839 fields |= WTX_IXSM; 1839 fields |= WTX_IXSM;
1840 } 1840 }
1841 1841
1842 offset += iphl; 1842 offset += iphl;
1843 1843
1844 if (m0->m_pkthdr.csum_flags & 1844 if (m0->m_pkthdr.csum_flags &
1845 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 1845 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1846 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 1846 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1847 fields |= WTX_TXSM; 1847 fields |= WTX_TXSM;
1848 tucs = WTX_TCPIP_TUCSS(offset) | 1848 tucs = WTX_TCPIP_TUCSS(offset) |
1849 WTX_TCPIP_TUCSO(offset + 1849 WTX_TCPIP_TUCSO(offset +
1850 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 1850 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1851 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1851 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1852 } else if ((m0->m_pkthdr.csum_flags & 1852 } else if ((m0->m_pkthdr.csum_flags &
1853 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) { 1853 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1854 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 1854 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1855 fields |= WTX_TXSM; 1855 fields |= WTX_TXSM;
1856 tucs = WTX_TCPIP_TUCSS(offset) | 1856 tucs = WTX_TCPIP_TUCSS(offset) |
1857 WTX_TCPIP_TUCSO(offset + 1857 WTX_TCPIP_TUCSO(offset +
1858 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 1858 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1859 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1859 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1860 } else { 1860 } else {
1861 /* Just initialize it to a valid TCP context. */ 1861 /* Just initialize it to a valid TCP context. */
1862 tucs = WTX_TCPIP_TUCSS(offset) | 1862 tucs = WTX_TCPIP_TUCSS(offset) |
1863 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1863 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1864 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1864 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1865 } 1865 }
1866 1866
1867 /* Fill in the context descriptor. */ 1867 /* Fill in the context descriptor. */
1868 t = (struct livengood_tcpip_ctxdesc *) 1868 t = (struct livengood_tcpip_ctxdesc *)
1869 &sc->sc_txdescs[sc->sc_txnext]; 1869 &sc->sc_txdescs[sc->sc_txnext];
1870 t->tcpip_ipcs = htole32(ipcs); 1870 t->tcpip_ipcs = htole32(ipcs);
1871 t->tcpip_tucs = htole32(tucs); 1871 t->tcpip_tucs = htole32(tucs);
1872 t->tcpip_cmdlen = htole32(cmdlen); 1872 t->tcpip_cmdlen = htole32(cmdlen);
1873 t->tcpip_seg = htole32(seg); 1873 t->tcpip_seg = htole32(seg);
1874 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1874 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1875 1875
1876 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 1876 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1877 txs->txs_ndesc++; 1877 txs->txs_ndesc++;
1878 1878
1879 *cmdp = cmd; 1879 *cmdp = cmd;
1880 *fieldsp = fields; 1880 *fieldsp = fields;
1881 1881
1882 return (0); 1882 return (0);
1883} 1883}
1884 1884
1885static void 1885static void
1886wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 1886wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1887{ 1887{
1888 struct mbuf *m; 1888 struct mbuf *m;
1889 int i; 1889 int i;
1890 1890
1891 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev)); 1891 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1892 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 1892 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1893 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 1893 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1894 "m_flags = 0x%08x\n", device_xname(sc->sc_dev), 1894 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1895 m->m_data, m->m_len, m->m_flags); 1895 m->m_data, m->m_len, m->m_flags);
1896 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev), 1896 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1897 i, i == 1 ? "" : "s"); 1897 i, i == 1 ? "" : "s");
1898} 1898}
1899 1899
1900/* 1900/*
1901 * wm_82547_txfifo_stall: 1901 * wm_82547_txfifo_stall:
1902 * 1902 *
1903 * Callout used to wait for the 82547 Tx FIFO to drain, 1903 * Callout used to wait for the 82547 Tx FIFO to drain,
1904 * reset the FIFO pointers, and restart packet transmission. 1904 * reset the FIFO pointers, and restart packet transmission.
1905 */ 1905 */
1906static void 1906static void
1907wm_82547_txfifo_stall(void *arg) 1907wm_82547_txfifo_stall(void *arg)
1908{ 1908{
1909 struct wm_softc *sc = arg; 1909 struct wm_softc *sc = arg;
1910 int s; 1910 int s;
1911 1911
1912 s = splnet(); 1912 s = splnet();
1913 1913
1914 if (sc->sc_txfifo_stall) { 1914 if (sc->sc_txfifo_stall) {
1915 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 1915 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1916 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 1916 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1917 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 1917 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1918 /* 1918 /*
1919 * Packets have drained. Stop transmitter, reset 1919 * Packets have drained. Stop transmitter, reset
1920 * FIFO pointers, restart transmitter, and kick 1920 * FIFO pointers, restart transmitter, and kick
1921 * the packet queue. 1921 * the packet queue.
1922 */ 1922 */
1923 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 1923 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1924 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 1924 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1925 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 1925 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1926 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 1926 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1927 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 1927 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1928 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 1928 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1929 CSR_WRITE(sc, WMREG_TCTL, tctl); 1929 CSR_WRITE(sc, WMREG_TCTL, tctl);
1930 CSR_WRITE_FLUSH(sc); 1930 CSR_WRITE_FLUSH(sc);
1931 1931
1932 sc->sc_txfifo_head = 0; 1932 sc->sc_txfifo_head = 0;
1933 sc->sc_txfifo_stall = 0; 1933 sc->sc_txfifo_stall = 0;
1934 wm_start(&sc->sc_ethercom.ec_if); 1934 wm_start(&sc->sc_ethercom.ec_if);
1935 } else { 1935 } else {
1936 /* 1936 /*
1937 * Still waiting for packets to drain; try again in 1937 * Still waiting for packets to drain; try again in
1938 * another tick. 1938 * another tick.
1939 */ 1939 */
1940 callout_schedule(&sc->sc_txfifo_ch, 1); 1940 callout_schedule(&sc->sc_txfifo_ch, 1);
1941 } 1941 }
1942 } 1942 }
1943 1943
1944 splx(s); 1944 splx(s);
1945} 1945}
1946 1946
1947/* 1947/*
1948 * wm_82547_txfifo_bugchk: 1948 * wm_82547_txfifo_bugchk:
1949 * 1949 *
1950 * Check for bug condition in the 82547 Tx FIFO. We need to 1950 * Check for bug condition in the 82547 Tx FIFO. We need to
1951 * prevent enqueueing a packet that would wrap around the end 1951 * prevent enqueueing a packet that would wrap around the end
1952 * if the Tx FIFO ring buffer, otherwise the chip will croak. 1952 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1953 * 1953 *
1954 * We do this by checking the amount of space before the end 1954 * We do this by checking the amount of space before the end
1955 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 1955 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1956 * the Tx FIFO, wait for all remaining packets to drain, reset 1956 * the Tx FIFO, wait for all remaining packets to drain, reset
1957 * the internal FIFO pointers to the beginning, and restart 1957 * the internal FIFO pointers to the beginning, and restart
1958 * transmission on the interface. 1958 * transmission on the interface.
1959 */ 1959 */
1960#define WM_FIFO_HDR 0x10 1960#define WM_FIFO_HDR 0x10
1961#define WM_82547_PAD_LEN 0x3e0 1961#define WM_82547_PAD_LEN 0x3e0
1962static int 1962static int
1963wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 1963wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1964{ 1964{
1965 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 1965 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1966 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 1966 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1967 1967
1968 /* Just return if already stalled. */ 1968 /* Just return if already stalled. */
1969 if (sc->sc_txfifo_stall) 1969 if (sc->sc_txfifo_stall)
1970 return (1); 1970 return (1);
1971 1971
1972 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1972 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1973 /* Stall only occurs in half-duplex mode. */ 1973 /* Stall only occurs in half-duplex mode. */
1974 goto send_packet; 1974 goto send_packet;
1975 } 1975 }
1976 1976
1977 if (len >= WM_82547_PAD_LEN + space) { 1977 if (len >= WM_82547_PAD_LEN + space) {
1978 sc->sc_txfifo_stall = 1; 1978 sc->sc_txfifo_stall = 1;
1979 callout_schedule(&sc->sc_txfifo_ch, 1); 1979 callout_schedule(&sc->sc_txfifo_ch, 1);
1980 return (1); 1980 return (1);
1981 } 1981 }
1982 1982
1983 send_packet: 1983 send_packet:
1984 sc->sc_txfifo_head += len; 1984 sc->sc_txfifo_head += len;
1985 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 1985 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
1986 sc->sc_txfifo_head -= sc->sc_txfifo_size; 1986 sc->sc_txfifo_head -= sc->sc_txfifo_size;
1987 1987
1988 return (0); 1988 return (0);
1989} 1989}
1990 1990
1991/* 1991/*
1992 * wm_start: [ifnet interface function] 1992 * wm_start: [ifnet interface function]
1993 * 1993 *
1994 * Start packet transmission on the interface. 1994 * Start packet transmission on the interface.
1995 */ 1995 */
1996static void 1996static void
1997wm_start(struct ifnet *ifp) 1997wm_start(struct ifnet *ifp)
1998{ 1998{
1999 struct wm_softc *sc = ifp->if_softc; 1999 struct wm_softc *sc = ifp->if_softc;
2000 struct mbuf *m0; 2000 struct mbuf *m0;
2001 struct m_tag *mtag; 2001 struct m_tag *mtag;
2002 struct wm_txsoft *txs; 2002 struct wm_txsoft *txs;
2003 bus_dmamap_t dmamap; 2003 bus_dmamap_t dmamap;
2004 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 2004 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2005 bus_addr_t curaddr; 2005 bus_addr_t curaddr;
2006 bus_size_t seglen, curlen; 2006 bus_size_t seglen, curlen;
2007 uint32_t cksumcmd; 2007 uint32_t cksumcmd;
2008 uint8_t cksumfields; 2008 uint8_t cksumfields;
2009 2009
2010 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 2010 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2011 return; 2011 return;
2012 2012
2013 /* 2013 /*
2014 * Remember the previous number of free descriptors. 2014 * Remember the previous number of free descriptors.
2015 */ 2015 */
2016 ofree = sc->sc_txfree; 2016 ofree = sc->sc_txfree;
2017 2017
2018 /* 2018 /*
2019 * Loop through the send queue, setting up transmit descriptors 2019 * Loop through the send queue, setting up transmit descriptors
2020 * until we drain the queue, or use up all available transmit 2020 * until we drain the queue, or use up all available transmit
2021 * descriptors. 2021 * descriptors.
2022 */ 2022 */
2023 for (;;) { 2023 for (;;) {
2024 /* Grab a packet off the queue. */ 2024 /* Grab a packet off the queue. */
2025 IFQ_POLL(&ifp->if_snd, m0); 2025 IFQ_POLL(&ifp->if_snd, m0);
2026 if (m0 == NULL) 2026 if (m0 == NULL)
2027 break; 2027 break;
2028 2028
2029 DPRINTF(WM_DEBUG_TX, 2029 DPRINTF(WM_DEBUG_TX,
2030 ("%s: TX: have packet to transmit: %p\n", 2030 ("%s: TX: have packet to transmit: %p\n",
2031 device_xname(sc->sc_dev), m0)); 2031 device_xname(sc->sc_dev), m0));
2032 2032
2033 /* Get a work queue entry. */ 2033 /* Get a work queue entry. */
2034 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 2034 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2035 wm_txintr(sc); 2035 wm_txintr(sc);
2036 if (sc->sc_txsfree == 0) { 2036 if (sc->sc_txsfree == 0) {
2037 DPRINTF(WM_DEBUG_TX, 2037 DPRINTF(WM_DEBUG_TX,
2038 ("%s: TX: no free job descriptors\n", 2038 ("%s: TX: no free job descriptors\n",
2039 device_xname(sc->sc_dev))); 2039 device_xname(sc->sc_dev)));
2040 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 2040 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2041 break; 2041 break;
2042 } 2042 }
2043 } 2043 }
2044 2044
2045 txs = &sc->sc_txsoft[sc->sc_txsnext]; 2045 txs = &sc->sc_txsoft[sc->sc_txsnext];
2046 dmamap = txs->txs_dmamap; 2046 dmamap = txs->txs_dmamap;
2047 2047
2048 use_tso = (m0->m_pkthdr.csum_flags & 2048 use_tso = (m0->m_pkthdr.csum_flags &
2049 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 2049 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2050 2050
2051 /* 2051 /*
2052 * So says the Linux driver: 2052 * So says the Linux driver:
2053 * The controller does a simple calculation to make sure 2053 * The controller does a simple calculation to make sure
2054 * there is enough room in the FIFO before initiating the 2054 * there is enough room in the FIFO before initiating the
2055 * DMA for each buffer. The calc is: 2055 * DMA for each buffer. The calc is:
2056 * 4 = ceil(buffer len / MSS) 2056 * 4 = ceil(buffer len / MSS)
2057 * To make sure we don't overrun the FIFO, adjust the max 2057 * To make sure we don't overrun the FIFO, adjust the max
2058 * buffer len if the MSS drops. 2058 * buffer len if the MSS drops.
2059 */ 2059 */
2060 dmamap->dm_maxsegsz = 2060 dmamap->dm_maxsegsz =
2061 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 2061 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2062 ? m0->m_pkthdr.segsz << 2 2062 ? m0->m_pkthdr.segsz << 2
2063 : WTX_MAX_LEN; 2063 : WTX_MAX_LEN;
2064 2064
2065 /* 2065 /*
2066 * Load the DMA map. If this fails, the packet either 2066 * Load the DMA map. If this fails, the packet either
2067 * didn't fit in the allotted number of segments, or we 2067 * didn't fit in the allotted number of segments, or we
2068 * were short on resources. For the too-many-segments 2068 * were short on resources. For the too-many-segments
2069 * case, we simply report an error and drop the packet, 2069 * case, we simply report an error and drop the packet,
2070 * since we can't sanely copy a jumbo packet to a single 2070 * since we can't sanely copy a jumbo packet to a single
2071 * buffer. 2071 * buffer.
2072 */ 2072 */
2073 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 2073 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2074 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 2074 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2075 if (error) { 2075 if (error) {
2076 if (error == EFBIG) { 2076 if (error == EFBIG) {
2077 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 2077 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2078 log(LOG_ERR, "%s: Tx packet consumes too many " 2078 log(LOG_ERR, "%s: Tx packet consumes too many "
2079 "DMA segments, dropping...\n", 2079 "DMA segments, dropping...\n",
2080 device_xname(sc->sc_dev)); 2080 device_xname(sc->sc_dev));
2081 IFQ_DEQUEUE(&ifp->if_snd, m0); 2081 IFQ_DEQUEUE(&ifp->if_snd, m0);
2082 wm_dump_mbuf_chain(sc, m0); 2082 wm_dump_mbuf_chain(sc, m0);
2083 m_freem(m0); 2083 m_freem(m0);
2084 continue; 2084 continue;
2085 } 2085 }
2086 /* 2086 /*
2087 * Short on resources, just stop for now. 2087 * Short on resources, just stop for now.
2088 */ 2088 */
2089 DPRINTF(WM_DEBUG_TX, 2089 DPRINTF(WM_DEBUG_TX,
2090 ("%s: TX: dmamap load failed: %d\n", 2090 ("%s: TX: dmamap load failed: %d\n",
2091 device_xname(sc->sc_dev), error)); 2091 device_xname(sc->sc_dev), error));
2092 break; 2092 break;
2093 } 2093 }
2094 2094
2095 segs_needed = dmamap->dm_nsegs; 2095 segs_needed = dmamap->dm_nsegs;
2096 if (use_tso) { 2096 if (use_tso) {
2097 /* For sentinel descriptor; see below. */ 2097 /* For sentinel descriptor; see below. */
2098 segs_needed++; 2098 segs_needed++;
2099 } 2099 }
2100 2100
2101 /* 2101 /*
2102 * Ensure we have enough descriptors free to describe 2102 * Ensure we have enough descriptors free to describe
2103 * the packet. Note, we always reserve one descriptor 2103 * the packet. Note, we always reserve one descriptor
2104 * at the end of the ring due to the semantics of the 2104 * at the end of the ring due to the semantics of the
2105 * TDT register, plus one more in the event we need 2105 * TDT register, plus one more in the event we need
2106 * to load offload context. 2106 * to load offload context.
2107 */ 2107 */
2108 if (segs_needed > sc->sc_txfree - 2) { 2108 if (segs_needed > sc->sc_txfree - 2) {
2109 /* 2109 /*
2110 * Not enough free descriptors to transmit this 2110 * Not enough free descriptors to transmit this
2111 * packet. We haven't committed anything yet, 2111 * packet. We haven't committed anything yet,
2112 * so just unload the DMA map, put the packet 2112 * so just unload the DMA map, put the packet
2113 * pack on the queue, and punt. Notify the upper 2113 * pack on the queue, and punt. Notify the upper
2114 * layer that there are no more slots left. 2114 * layer that there are no more slots left.
2115 */ 2115 */
2116 DPRINTF(WM_DEBUG_TX, 2116 DPRINTF(WM_DEBUG_TX,
2117 ("%s: TX: need %d (%d) descriptors, have %d\n", 2117 ("%s: TX: need %d (%d) descriptors, have %d\n",
2118 device_xname(sc->sc_dev), dmamap->dm_nsegs, 2118 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2119 segs_needed, sc->sc_txfree - 1)); 2119 segs_needed, sc->sc_txfree - 1));
2120 ifp->if_flags |= IFF_OACTIVE; 2120 ifp->if_flags |= IFF_OACTIVE;
2121 bus_dmamap_unload(sc->sc_dmat, dmamap); 2121 bus_dmamap_unload(sc->sc_dmat, dmamap);
2122 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 2122 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2123 break; 2123 break;
2124 } 2124 }
2125 2125
2126 /* 2126 /*
2127 * Check for 82547 Tx FIFO bug. We need to do this 2127 * Check for 82547 Tx FIFO bug. We need to do this
2128 * once we know we can transmit the packet, since we 2128 * once we know we can transmit the packet, since we
2129 * do some internal FIFO space accounting here. 2129 * do some internal FIFO space accounting here.
2130 */ 2130 */
2131 if (sc->sc_type == WM_T_82547 && 2131 if (sc->sc_type == WM_T_82547 &&
2132 wm_82547_txfifo_bugchk(sc, m0)) { 2132 wm_82547_txfifo_bugchk(sc, m0)) {
2133 DPRINTF(WM_DEBUG_TX, 2133 DPRINTF(WM_DEBUG_TX,
2134 ("%s: TX: 82547 Tx FIFO bug detected\n", 2134 ("%s: TX: 82547 Tx FIFO bug detected\n",
2135 device_xname(sc->sc_dev))); 2135 device_xname(sc->sc_dev)));
2136 ifp->if_flags |= IFF_OACTIVE; 2136 ifp->if_flags |= IFF_OACTIVE;
2137 bus_dmamap_unload(sc->sc_dmat, dmamap); 2137 bus_dmamap_unload(sc->sc_dmat, dmamap);
2138 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 2138 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2139 break; 2139 break;
2140 } 2140 }
2141 2141
2142 IFQ_DEQUEUE(&ifp->if_snd, m0); 2142 IFQ_DEQUEUE(&ifp->if_snd, m0);
2143 2143
2144 /* 2144 /*
2145 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 2145 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2146 */ 2146 */
2147 2147
2148 DPRINTF(WM_DEBUG_TX, 2148 DPRINTF(WM_DEBUG_TX,
2149 ("%s: TX: packet has %d (%d) DMA segments\n", 2149 ("%s: TX: packet has %d (%d) DMA segments\n",
2150 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 2150 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2151 2151
2152 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 2152 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2153 2153
2154 /* 2154 /*
2155 * Store a pointer to the packet so that we can free it 2155 * Store a pointer to the packet so that we can free it
2156 * later. 2156 * later.
2157 * 2157 *
2158 * Initially, we consider the number of descriptors the 2158 * Initially, we consider the number of descriptors the
2159 * packet uses the number of DMA segments. This may be 2159 * packet uses the number of DMA segments. This may be
2160 * incremented by 1 if we do checksum offload (a descriptor 2160 * incremented by 1 if we do checksum offload (a descriptor
2161 * is used to set the checksum context). 2161 * is used to set the checksum context).
2162 */ 2162 */
2163 txs->txs_mbuf = m0; 2163 txs->txs_mbuf = m0;
2164 txs->txs_firstdesc = sc->sc_txnext; 2164 txs->txs_firstdesc = sc->sc_txnext;
2165 txs->txs_ndesc = segs_needed; 2165 txs->txs_ndesc = segs_needed;
2166 2166
2167 /* Set up offload parameters for this packet. */ 2167 /* Set up offload parameters for this packet. */
2168 if (m0->m_pkthdr.csum_flags & 2168 if (m0->m_pkthdr.csum_flags &
2169 (M_CSUM_TSOv4|M_CSUM_TSOv6| 2169 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2170 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 2170 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2171 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 2171 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2172 if (wm_tx_offload(sc, txs, &cksumcmd, 2172 if (wm_tx_offload(sc, txs, &cksumcmd,
2173 &cksumfields) != 0) { 2173 &cksumfields) != 0) {
2174 /* Error message already displayed. */ 2174 /* Error message already displayed. */
2175 bus_dmamap_unload(sc->sc_dmat, dmamap); 2175 bus_dmamap_unload(sc->sc_dmat, dmamap);
2176 continue; 2176 continue;
2177 } 2177 }
2178 } else { 2178 } else {
2179 cksumcmd = 0; 2179 cksumcmd = 0;
2180 cksumfields = 0; 2180 cksumfields = 0;
2181 } 2181 }
2182 2182
2183 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 2183 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2184 2184
2185 /* Sync the DMA map. */ 2185 /* Sync the DMA map. */
2186 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 2186 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2187 BUS_DMASYNC_PREWRITE); 2187 BUS_DMASYNC_PREWRITE);
2188 2188
2189 /* 2189 /*
2190 * Initialize the transmit descriptor. 2190 * Initialize the transmit descriptor.
2191 */ 2191 */
2192 for (nexttx = sc->sc_txnext, seg = 0; 2192 for (nexttx = sc->sc_txnext, seg = 0;
2193 seg < dmamap->dm_nsegs; seg++) { 2193 seg < dmamap->dm_nsegs; seg++) {
2194 for (seglen = dmamap->dm_segs[seg].ds_len, 2194 for (seglen = dmamap->dm_segs[seg].ds_len,
2195 curaddr = dmamap->dm_segs[seg].ds_addr; 2195 curaddr = dmamap->dm_segs[seg].ds_addr;
2196 seglen != 0; 2196 seglen != 0;
2197 curaddr += curlen, seglen -= curlen, 2197 curaddr += curlen, seglen -= curlen,
2198 nexttx = WM_NEXTTX(sc, nexttx)) { 2198 nexttx = WM_NEXTTX(sc, nexttx)) {
2199 curlen = seglen; 2199 curlen = seglen;
2200 2200
2201 /* 2201 /*
2202 * So says the Linux driver: 2202 * So says the Linux driver:
2203 * Work around for premature descriptor 2203 * Work around for premature descriptor
2204 * write-backs in TSO mode. Append a 2204 * write-backs in TSO mode. Append a
2205 * 4-byte sentinel descriptor. 2205 * 4-byte sentinel descriptor.
2206 */ 2206 */
2207 if (use_tso && 2207 if (use_tso &&
2208 seg == dmamap->dm_nsegs - 1 && 2208 seg == dmamap->dm_nsegs - 1 &&
2209 curlen > 8) 2209 curlen > 8)
2210 curlen -= 4; 2210 curlen -= 4;
2211 2211
2212 wm_set_dma_addr( 2212 wm_set_dma_addr(
2213 &sc->sc_txdescs[nexttx].wtx_addr, 2213 &sc->sc_txdescs[nexttx].wtx_addr,
2214 curaddr); 2214 curaddr);
2215 sc->sc_txdescs[nexttx].wtx_cmdlen = 2215 sc->sc_txdescs[nexttx].wtx_cmdlen =
2216 htole32(cksumcmd | curlen); 2216 htole32(cksumcmd | curlen);
2217 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 2217 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2218 0; 2218 0;
2219 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 2219 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2220 cksumfields; 2220 cksumfields;
2221 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 2221 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2222 lasttx = nexttx; 2222 lasttx = nexttx;
2223 2223
2224 DPRINTF(WM_DEBUG_TX, 2224 DPRINTF(WM_DEBUG_TX,
2225 ("%s: TX: desc %d: low 0x%08lx, " 2225 ("%s: TX: desc %d: low 0x%08lx, "
2226 "len 0x%04x\n", 2226 "len 0x%04x\n",
2227 device_xname(sc->sc_dev), nexttx, 2227 device_xname(sc->sc_dev), nexttx,
2228 curaddr & 0xffffffffUL, (unsigned)curlen)); 2228 curaddr & 0xffffffffUL, (unsigned)curlen));
2229 } 2229 }
2230 } 2230 }
2231 2231
2232 KASSERT(lasttx != -1); 2232 KASSERT(lasttx != -1);
2233 2233
2234 /* 2234 /*
2235 * Set up the command byte on the last descriptor of 2235 * Set up the command byte on the last descriptor of
2236 * the packet. If we're in the interrupt delay window, 2236 * the packet. If we're in the interrupt delay window,
2237 * delay the interrupt. 2237 * delay the interrupt.
2238 */ 2238 */
2239 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2239 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2240 htole32(WTX_CMD_EOP | WTX_CMD_RS); 2240 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2241 2241
2242 /* 2242 /*
2243 * If VLANs are enabled and the packet has a VLAN tag, set 2243 * If VLANs are enabled and the packet has a VLAN tag, set
2244 * up the descriptor to encapsulate the packet for us. 2244 * up the descriptor to encapsulate the packet for us.
2245 * 2245 *
2246 * This is only valid on the last descriptor of the packet. 2246 * This is only valid on the last descriptor of the packet.
2247 */ 2247 */
2248 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2248 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2249 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2249 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2250 htole32(WTX_CMD_VLE); 2250 htole32(WTX_CMD_VLE);
2251 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 2251 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2252 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 2252 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2253 } 2253 }
2254 2254
2255 txs->txs_lastdesc = lasttx; 2255 txs->txs_lastdesc = lasttx;
2256 2256
2257 DPRINTF(WM_DEBUG_TX, 2257 DPRINTF(WM_DEBUG_TX,
2258 ("%s: TX: desc %d: cmdlen 0x%08x\n", 2258 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2259 device_xname(sc->sc_dev), 2259 device_xname(sc->sc_dev),
2260 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 2260 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2261 2261
2262 /* Sync the descriptors we're using. */ 2262 /* Sync the descriptors we're using. */
2263 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 2263 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2264 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2264 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2265 2265
2266 /* Give the packet to the chip. */ 2266 /* Give the packet to the chip. */
2267 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 2267 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2268 2268
2269 DPRINTF(WM_DEBUG_TX, 2269 DPRINTF(WM_DEBUG_TX,
2270 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 2270 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2271 2271
2272 DPRINTF(WM_DEBUG_TX, 2272 DPRINTF(WM_DEBUG_TX,
2273 ("%s: TX: finished transmitting packet, job %d\n", 2273 ("%s: TX: finished transmitting packet, job %d\n",
2274 device_xname(sc->sc_dev), sc->sc_txsnext)); 2274 device_xname(sc->sc_dev), sc->sc_txsnext));
2275 2275
2276 /* Advance the tx pointer. */ 2276 /* Advance the tx pointer. */
2277 sc->sc_txfree -= txs->txs_ndesc; 2277 sc->sc_txfree -= txs->txs_ndesc;
2278 sc->sc_txnext = nexttx; 2278 sc->sc_txnext = nexttx;
2279 2279
2280 sc->sc_txsfree--; 2280 sc->sc_txsfree--;
2281 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 2281 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2282 2282
2283#if NBPFILTER > 0 2283#if NBPFILTER > 0
2284 /* Pass the packet to any BPF listeners. */ 2284 /* Pass the packet to any BPF listeners. */
2285 if (ifp->if_bpf) 2285 if (ifp->if_bpf)
2286 bpf_mtap(ifp->if_bpf, m0); 2286 bpf_mtap(ifp->if_bpf, m0);
2287#endif /* NBPFILTER > 0 */ 2287#endif /* NBPFILTER > 0 */
2288 } 2288 }
2289 2289
2290 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 2290 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2291 /* No more slots; notify upper layer. */ 2291 /* No more slots; notify upper layer. */
2292 ifp->if_flags |= IFF_OACTIVE; 2292 ifp->if_flags |= IFF_OACTIVE;
2293 } 2293 }
2294 2294
2295 if (sc->sc_txfree != ofree) { 2295 if (sc->sc_txfree != ofree) {
2296 /* Set a watchdog timer in case the chip flakes out. */ 2296 /* Set a watchdog timer in case the chip flakes out. */
2297 ifp->if_timer = 5; 2297 ifp->if_timer = 5;
2298 } 2298 }
2299} 2299}
2300 2300
2301/* 2301/*
2302 * wm_watchdog: [ifnet interface function] 2302 * wm_watchdog: [ifnet interface function]
2303 * 2303 *
2304 * Watchdog timer handler. 2304 * Watchdog timer handler.
2305 */ 2305 */
2306static void 2306static void
2307wm_watchdog(struct ifnet *ifp) 2307wm_watchdog(struct ifnet *ifp)
2308{ 2308{
2309 struct wm_softc *sc = ifp->if_softc; 2309 struct wm_softc *sc = ifp->if_softc;
2310 2310
2311 /* 2311 /*
2312 * Since we're using delayed interrupts, sweep up 2312 * Since we're using delayed interrupts, sweep up
2313 * before we report an error. 2313 * before we report an error.
2314 */ 2314 */
2315 wm_txintr(sc); 2315 wm_txintr(sc);
2316 2316
2317 if (sc->sc_txfree != WM_NTXDESC(sc)) { 2317 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2318 log(LOG_ERR, 2318 log(LOG_ERR,
2319 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 2319 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2320 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, 2320 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2321 sc->sc_txnext); 2321 sc->sc_txnext);
2322 ifp->if_oerrors++; 2322 ifp->if_oerrors++;
2323 2323
2324 /* Reset the interface. */ 2324 /* Reset the interface. */
2325 (void) wm_init(ifp); 2325 (void) wm_init(ifp);
2326 } 2326 }
2327 2327
2328 /* Try to get more packets going. */ 2328 /* Try to get more packets going. */
2329 wm_start(ifp); 2329 wm_start(ifp);
2330} 2330}
2331 2331
2332/* 2332/*
2333 * wm_ioctl: [ifnet interface function] 2333 * wm_ioctl: [ifnet interface function]
2334 * 2334 *
2335 * Handle control requests from the operator. 2335 * Handle control requests from the operator.
2336 */ 2336 */
2337static int 2337static int
2338wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 2338wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2339{ 2339{
2340 struct wm_softc *sc = ifp->if_softc; 2340 struct wm_softc *sc = ifp->if_softc;
2341 struct ifreq *ifr = (struct ifreq *) data; 2341 struct ifreq *ifr = (struct ifreq *) data;
2342 int s, error; 2342 int s, error;
2343 2343
2344 s = splnet(); 2344 s = splnet();
2345 2345
2346 switch (cmd) { 2346 switch (cmd) {
2347 case SIOCSIFMEDIA: 2347 case SIOCSIFMEDIA:
2348 case SIOCGIFMEDIA: 2348 case SIOCGIFMEDIA:
2349 /* Flow control requires full-duplex mode. */ 2349 /* Flow control requires full-duplex mode. */
2350 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 2350 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2351 (ifr->ifr_media & IFM_FDX) == 0) 2351 (ifr->ifr_media & IFM_FDX) == 0)
2352 ifr->ifr_media &= ~IFM_ETH_FMASK; 2352 ifr->ifr_media &= ~IFM_ETH_FMASK;
2353 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 2353 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2354 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 2354 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2355 /* We can do both TXPAUSE and RXPAUSE. */ 2355 /* We can do both TXPAUSE and RXPAUSE. */
2356 ifr->ifr_media |= 2356 ifr->ifr_media |=
2357 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 2357 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2358 } 2358 }
2359 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 2359 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2360 } 2360 }
2361 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 2361 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2362 break; 2362 break;
2363 default: 2363 default:
2364 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 2364 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2365 break; 2365 break;
2366 2366
2367 error = 0; 2367 error = 0;
2368 2368
2369 if (cmd == SIOCSIFCAP) 2369 if (cmd == SIOCSIFCAP)
2370 error = (*ifp->if_init)(ifp); 2370 error = (*ifp->if_init)(ifp);
2371 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 2371 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2372 ; 2372 ;
2373 else if (ifp->if_flags & IFF_RUNNING) { 2373 else if (ifp->if_flags & IFF_RUNNING) {
2374 /* 2374 /*
2375 * Multicast list has changed; set the hardware filter 2375 * Multicast list has changed; set the hardware filter
2376 * accordingly. 2376 * accordingly.
2377 */ 2377 */
2378 wm_set_filter(sc); 2378 wm_set_filter(sc);
2379 } 2379 }
2380 break; 2380 break;
2381 } 2381 }
2382 2382
2383 /* Try to get more packets going. */ 2383 /* Try to get more packets going. */
2384 wm_start(ifp); 2384 wm_start(ifp);
2385 2385
2386 splx(s); 2386 splx(s);
2387 return (error); 2387 return (error);
2388} 2388}
2389 2389
2390/* 2390/*
2391 * wm_intr: 2391 * wm_intr:
2392 * 2392 *
2393 * Interrupt service routine. 2393 * Interrupt service routine.
2394 */ 2394 */
2395static int 2395static int
2396wm_intr(void *arg) 2396wm_intr(void *arg)
2397{ 2397{
2398 struct wm_softc *sc = arg; 2398 struct wm_softc *sc = arg;
2399 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2399 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2400 uint32_t icr; 2400 uint32_t icr;
2401 int handled = 0; 2401 int handled = 0;
2402 2402
2403 while (1 /* CONSTCOND */) { 2403 while (1 /* CONSTCOND */) {
2404 icr = CSR_READ(sc, WMREG_ICR); 2404 icr = CSR_READ(sc, WMREG_ICR);
2405 if ((icr & sc->sc_icr) == 0) 2405 if ((icr & sc->sc_icr) == 0)
2406 break; 2406 break;
2407#if 0 /*NRND > 0*/ 2407#if 0 /*NRND > 0*/
2408 if (RND_ENABLED(&sc->rnd_source)) 2408 if (RND_ENABLED(&sc->rnd_source))
2409 rnd_add_uint32(&sc->rnd_source, icr); 2409 rnd_add_uint32(&sc->rnd_source, icr);
2410#endif 2410#endif
2411 2411
2412 handled = 1; 2412 handled = 1;
2413 2413
2414#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2414#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2415 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 2415 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2416 DPRINTF(WM_DEBUG_RX, 2416 DPRINTF(WM_DEBUG_RX,
2417 ("%s: RX: got Rx intr 0x%08x\n", 2417 ("%s: RX: got Rx intr 0x%08x\n",
2418 device_xname(sc->sc_dev), 2418 device_xname(sc->sc_dev),
2419 icr & (ICR_RXDMT0|ICR_RXT0))); 2419 icr & (ICR_RXDMT0|ICR_RXT0)));
2420 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 2420 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2421 } 2421 }
2422#endif 2422#endif
2423 wm_rxintr(sc); 2423 wm_rxintr(sc);
2424 2424
2425#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2425#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2426 if (icr & ICR_TXDW) { 2426 if (icr & ICR_TXDW) {
2427 DPRINTF(WM_DEBUG_TX, 2427 DPRINTF(WM_DEBUG_TX,
2428 ("%s: TX: got TXDW interrupt\n", 2428 ("%s: TX: got TXDW interrupt\n",
2429 device_xname(sc->sc_dev))); 2429 device_xname(sc->sc_dev)));
2430 WM_EVCNT_INCR(&sc->sc_ev_txdw); 2430 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2431 } 2431 }
2432#endif 2432#endif
2433 wm_txintr(sc); 2433 wm_txintr(sc);
2434 2434
2435 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 2435 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2436 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 2436 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2437 wm_linkintr(sc, icr); 2437 wm_linkintr(sc, icr);
2438 } 2438 }
2439 2439
2440 if (icr & ICR_RXO) { 2440 if (icr & ICR_RXO) {
2441 ifp->if_ierrors++; 2441 ifp->if_ierrors++;
2442#if defined(WM_DEBUG) 2442#if defined(WM_DEBUG)
2443 log(LOG_WARNING, "%s: Receive overrun\n", 2443 log(LOG_WARNING, "%s: Receive overrun\n",
2444 device_xname(sc->sc_dev)); 2444 device_xname(sc->sc_dev));
2445#endif /* defined(WM_DEBUG) */ 2445#endif /* defined(WM_DEBUG) */
2446 } 2446 }
2447 } 2447 }
2448 2448
2449 if (handled) { 2449 if (handled) {
2450 /* Try to get more packets going. */ 2450 /* Try to get more packets going. */
2451 wm_start(ifp); 2451 wm_start(ifp);
2452 } 2452 }
2453 2453
2454 return (handled); 2454 return (handled);
2455} 2455}
2456 2456
2457/* 2457/*
2458 * wm_txintr: 2458 * wm_txintr:
2459 * 2459 *
2460 * Helper; handle transmit interrupts. 2460 * Helper; handle transmit interrupts.
2461 */ 2461 */
2462static void 2462static void
2463wm_txintr(struct wm_softc *sc) 2463wm_txintr(struct wm_softc *sc)
2464{ 2464{
2465 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2465 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2466 struct wm_txsoft *txs; 2466 struct wm_txsoft *txs;
2467 uint8_t status; 2467 uint8_t status;
2468 int i; 2468 int i;
2469 2469
2470 ifp->if_flags &= ~IFF_OACTIVE; 2470 ifp->if_flags &= ~IFF_OACTIVE;
2471 2471
2472 /* 2472 /*
2473 * Go through the Tx list and free mbufs for those 2473 * Go through the Tx list and free mbufs for those
2474 * frames which have been transmitted. 2474 * frames which have been transmitted.
2475 */ 2475 */
2476 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 2476 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2477 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 2477 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2478 txs = &sc->sc_txsoft[i]; 2478 txs = &sc->sc_txsoft[i];
2479 2479
2480 DPRINTF(WM_DEBUG_TX, 2480 DPRINTF(WM_DEBUG_TX,
2481 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); 2481 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2482 2482
2483 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 2483 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2484 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2484 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2485 2485
2486 status = 2486 status =
2487 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 2487 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2488 if ((status & WTX_ST_DD) == 0) { 2488 if ((status & WTX_ST_DD) == 0) {
2489 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 2489 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2490 BUS_DMASYNC_PREREAD); 2490 BUS_DMASYNC_PREREAD);
2491 break; 2491 break;
2492 } 2492 }
2493 2493
2494 DPRINTF(WM_DEBUG_TX, 2494 DPRINTF(WM_DEBUG_TX,
2495 ("%s: TX: job %d done: descs %d..%d\n", 2495 ("%s: TX: job %d done: descs %d..%d\n",
2496 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 2496 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2497 txs->txs_lastdesc)); 2497 txs->txs_lastdesc));
2498 2498
2499 /* 2499 /*
2500 * XXX We should probably be using the statistics 2500 * XXX We should probably be using the statistics
2501 * XXX registers, but I don't know if they exist 2501 * XXX registers, but I don't know if they exist
2502 * XXX on chips before the i82544. 2502 * XXX on chips before the i82544.
2503 */ 2503 */
2504 2504
2505#ifdef WM_EVENT_COUNTERS 2505#ifdef WM_EVENT_COUNTERS
2506 if (status & WTX_ST_TU) 2506 if (status & WTX_ST_TU)
2507 WM_EVCNT_INCR(&sc->sc_ev_tu); 2507 WM_EVCNT_INCR(&sc->sc_ev_tu);
2508#endif /* WM_EVENT_COUNTERS */ 2508#endif /* WM_EVENT_COUNTERS */
2509 2509
2510 if (status & (WTX_ST_EC|WTX_ST_LC)) { 2510 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2511 ifp->if_oerrors++; 2511 ifp->if_oerrors++;
2512 if (status & WTX_ST_LC) 2512 if (status & WTX_ST_LC)
2513 log(LOG_WARNING, "%s: late collision\n", 2513 log(LOG_WARNING, "%s: late collision\n",
2514 device_xname(sc->sc_dev)); 2514 device_xname(sc->sc_dev));
2515 else if (status & WTX_ST_EC) { 2515 else if (status & WTX_ST_EC) {
2516 ifp->if_collisions += 16; 2516 ifp->if_collisions += 16;
2517 log(LOG_WARNING, "%s: excessive collisions\n", 2517 log(LOG_WARNING, "%s: excessive collisions\n",
2518 device_xname(sc->sc_dev)); 2518 device_xname(sc->sc_dev));
2519 } 2519 }
2520 } else 2520 } else
2521 ifp->if_opackets++; 2521 ifp->if_opackets++;
2522 2522
2523 sc->sc_txfree += txs->txs_ndesc; 2523 sc->sc_txfree += txs->txs_ndesc;
2524 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 2524 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2525 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2525 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2526 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2526 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2527 m_freem(txs->txs_mbuf); 2527 m_freem(txs->txs_mbuf);
2528 txs->txs_mbuf = NULL; 2528 txs->txs_mbuf = NULL;
2529 } 2529 }
2530 2530
2531 /* Update the dirty transmit buffer pointer. */ 2531 /* Update the dirty transmit buffer pointer. */
2532 sc->sc_txsdirty = i; 2532 sc->sc_txsdirty = i;
2533 DPRINTF(WM_DEBUG_TX, 2533 DPRINTF(WM_DEBUG_TX,
2534 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); 2534 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2535 2535
2536 /* 2536 /*
2537 * If there are no more pending transmissions, cancel the watchdog 2537 * If there are no more pending transmissions, cancel the watchdog
2538 * timer. 2538 * timer.
2539 */ 2539 */
2540 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 2540 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2541 ifp->if_timer = 0; 2541 ifp->if_timer = 0;
2542} 2542}
2543 2543
2544/* 2544/*
2545 * wm_rxintr: 2545 * wm_rxintr:
2546 * 2546 *
2547 * Helper; handle receive interrupts. 2547 * Helper; handle receive interrupts.
2548 */ 2548 */