Tue Jan 7 13:14:39 2014 UTC ()
 Insert completion barrier between register write and delay().
One exception is chip reset (to avoid hangup).


(msaitoh)
diff -r1.265 -r1.266 src/sys/dev/pci/if_wm.c

cvs diff -r1.265 -r1.266 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2013/12/29 21:28:41 1.265
+++ src/sys/dev/pci/if_wm.c 2014/01/07 13:14:39 1.266
@@ -1,1078 +1,1078 @@ @@ -1,1078 +1,1078 @@
1/* $NetBSD: if_wm.c,v 1.265 2013/12/29 21:28:41 msaitoh Exp $ */ 1/* $NetBSD: if_wm.c,v 1.266 2014/01/07 13:14:39 msaitoh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation 40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42  42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45  45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48  48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52  52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56  56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Rework how parameters are loaded from the EEPROM. 75 * - Rework how parameters are loaded from the EEPROM.
76 */ 76 */
77 77
78#include <sys/cdefs.h> 78#include <sys/cdefs.h>
79__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.265 2013/12/29 21:28:41 msaitoh Exp $"); 79__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.266 2014/01/07 13:14:39 msaitoh Exp $");
80 80
81#include <sys/param.h> 81#include <sys/param.h>
82#include <sys/systm.h> 82#include <sys/systm.h>
83#include <sys/callout.h> 83#include <sys/callout.h>
84#include <sys/mbuf.h> 84#include <sys/mbuf.h>
85#include <sys/malloc.h> 85#include <sys/malloc.h>
86#include <sys/kernel.h> 86#include <sys/kernel.h>
87#include <sys/socket.h> 87#include <sys/socket.h>
88#include <sys/ioctl.h> 88#include <sys/ioctl.h>
89#include <sys/errno.h> 89#include <sys/errno.h>
90#include <sys/device.h> 90#include <sys/device.h>
91#include <sys/queue.h> 91#include <sys/queue.h>
92#include <sys/syslog.h> 92#include <sys/syslog.h>
93 93
94#include <sys/rnd.h> 94#include <sys/rnd.h>
95 95
96#include <net/if.h> 96#include <net/if.h>
97#include <net/if_dl.h> 97#include <net/if_dl.h>
98#include <net/if_media.h> 98#include <net/if_media.h>
99#include <net/if_ether.h> 99#include <net/if_ether.h>
100 100
101#include <net/bpf.h> 101#include <net/bpf.h>
102 102
103#include <netinet/in.h> /* XXX for struct ip */ 103#include <netinet/in.h> /* XXX for struct ip */
104#include <netinet/in_systm.h> /* XXX for struct ip */ 104#include <netinet/in_systm.h> /* XXX for struct ip */
105#include <netinet/ip.h> /* XXX for struct ip */ 105#include <netinet/ip.h> /* XXX for struct ip */
106#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 106#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107#include <netinet/tcp.h> /* XXX for struct tcphdr */ 107#include <netinet/tcp.h> /* XXX for struct tcphdr */
108 108
109#include <sys/bus.h> 109#include <sys/bus.h>
110#include <sys/intr.h> 110#include <sys/intr.h>
111#include <machine/endian.h> 111#include <machine/endian.h>
112 112
113#include <dev/mii/mii.h> 113#include <dev/mii/mii.h>
114#include <dev/mii/miivar.h> 114#include <dev/mii/miivar.h>
115#include <dev/mii/miidevs.h> 115#include <dev/mii/miidevs.h>
116#include <dev/mii/mii_bitbang.h> 116#include <dev/mii/mii_bitbang.h>
117#include <dev/mii/ikphyreg.h> 117#include <dev/mii/ikphyreg.h>
118#include <dev/mii/igphyreg.h> 118#include <dev/mii/igphyreg.h>
119#include <dev/mii/igphyvar.h> 119#include <dev/mii/igphyvar.h>
120#include <dev/mii/inbmphyreg.h> 120#include <dev/mii/inbmphyreg.h>
121 121
122#include <dev/pci/pcireg.h> 122#include <dev/pci/pcireg.h>
123#include <dev/pci/pcivar.h> 123#include <dev/pci/pcivar.h>
124#include <dev/pci/pcidevs.h> 124#include <dev/pci/pcidevs.h>
125 125
126#include <dev/pci/if_wmreg.h> 126#include <dev/pci/if_wmreg.h>
127#include <dev/pci/if_wmvar.h> 127#include <dev/pci/if_wmvar.h>
128 128
129#ifdef WM_DEBUG 129#ifdef WM_DEBUG
130#define WM_DEBUG_LINK 0x01 130#define WM_DEBUG_LINK 0x01
131#define WM_DEBUG_TX 0x02 131#define WM_DEBUG_TX 0x02
132#define WM_DEBUG_RX 0x04 132#define WM_DEBUG_RX 0x04
133#define WM_DEBUG_GMII 0x08 133#define WM_DEBUG_GMII 0x08
134#define WM_DEBUG_MANAGE 0x10 134#define WM_DEBUG_MANAGE 0x10
135#define WM_DEBUG_NVM 0x20 135#define WM_DEBUG_NVM 0x20
136int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII 136int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM; 137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138 138
139#define DPRINTF(x, y) if (wm_debug & (x)) printf y 139#define DPRINTF(x, y) if (wm_debug & (x)) printf y
140#else 140#else
141#define DPRINTF(x, y) /* nothing */ 141#define DPRINTF(x, y) /* nothing */
142#endif /* WM_DEBUG */ 142#endif /* WM_DEBUG */
143 143
144/* 144/*
145 * Transmit descriptor list size. Due to errata, we can only have 145 * Transmit descriptor list size. Due to errata, we can only have
146 * 256 hardware descriptors in the ring on < 82544, but we use 4096 146 * 256 hardware descriptors in the ring on < 82544, but we use 4096
147 * on >= 82544. We tell the upper layers that they can queue a lot 147 * on >= 82544. We tell the upper layers that they can queue a lot
148 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 148 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149 * of them at a time. 149 * of them at a time.
150 * 150 *
151 * We allow up to 256 (!) DMA segments per packet. Pathological packet 151 * We allow up to 256 (!) DMA segments per packet. Pathological packet
152 * chains containing many small mbufs have been observed in zero-copy 152 * chains containing many small mbufs have been observed in zero-copy
153 * situations with jumbo frames. 153 * situations with jumbo frames.
154 */ 154 */
155#define WM_NTXSEGS 256 155#define WM_NTXSEGS 256
156#define WM_IFQUEUELEN 256 156#define WM_IFQUEUELEN 256
157#define WM_TXQUEUELEN_MAX 64 157#define WM_TXQUEUELEN_MAX 64
158#define WM_TXQUEUELEN_MAX_82547 16 158#define WM_TXQUEUELEN_MAX_82547 16
159#define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 159#define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
160#define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 160#define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
161#define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 161#define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
162#define WM_NTXDESC_82542 256 162#define WM_NTXDESC_82542 256
163#define WM_NTXDESC_82544 4096 163#define WM_NTXDESC_82544 4096
164#define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 164#define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
165#define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 165#define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
166#define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 166#define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167#define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 167#define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
168#define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 168#define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169 169
170#define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 170#define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
171 171
172/* 172/*
173 * Receive descriptor list size. We have one Rx buffer for normal 173 * Receive descriptor list size. We have one Rx buffer for normal
174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
175 * packet. We allocate 256 receive descriptors, each with a 2k 175 * packet. We allocate 256 receive descriptors, each with a 2k
176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177 */ 177 */
178#define WM_NRXDESC 256 178#define WM_NRXDESC 256
179#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 179#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
180#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 180#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
181#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 181#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
182 182
183/* 183/*
184 * Control structures are DMA'd to the i82542 chip. We allocate them in 184 * Control structures are DMA'd to the i82542 chip. We allocate them in
185 * a single clump that maps to a single DMA segment to make several things 185 * a single clump that maps to a single DMA segment to make several things
186 * easier. 186 * easier.
187 */ 187 */
188struct wm_control_data_82544 { 188struct wm_control_data_82544 {
189 /* 189 /*
190 * The receive descriptors. 190 * The receive descriptors.
191 */ 191 */
192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193 193
194 /* 194 /*
195 * The transmit descriptors. Put these at the end, because 195 * The transmit descriptors. Put these at the end, because
196 * we might use a smaller number of them. 196 * we might use a smaller number of them.
197 */ 197 */
198 union { 198 union {
199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544]; 199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544]; 200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
201 } wdc_u; 201 } wdc_u;
202}; 202};
203 203
204struct wm_control_data_82542 { 204struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207}; 207};
208 208
209#define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 209#define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210#define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)]) 210#define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211#define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 211#define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212 212
213/* 213/*
214 * Software state for transmit jobs. 214 * Software state for transmit jobs.
215 */ 215 */
216struct wm_txsoft { 216struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */ 218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */ 219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */ 220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */ 221 int txs_ndesc; /* # of descriptors used */
222}; 222};
223 223
224/* 224/*
225 * Software state for receive buffers. Each descriptor gets a 225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together. 227 * more than one buffer, we chain them together.
228 */ 228 */
229struct wm_rxsoft { 229struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */ 231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232}; 232};
233 233
234#define WM_LINKUP_TIMEOUT 50 234#define WM_LINKUP_TIMEOUT 50
235 235
236static uint16_t swfwphysem[] = { 236static uint16_t swfwphysem[] = {
237 SWFW_PHY0_SM, 237 SWFW_PHY0_SM,
238 SWFW_PHY1_SM, 238 SWFW_PHY1_SM,
239 SWFW_PHY2_SM, 239 SWFW_PHY2_SM,
240 SWFW_PHY3_SM 240 SWFW_PHY3_SM
241}; 241};
242 242
243/* 243/*
244 * Software state per device. 244 * Software state per device.
245 */ 245 */
246struct wm_softc { 246struct wm_softc {
247 device_t sc_dev; /* generic device information */ 247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */ 248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */ 249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_size_t sc_ss; /* bus space size */ 250 bus_size_t sc_ss; /* bus space size */
251 bus_space_tag_t sc_iot; /* I/O space tag */ 251 bus_space_tag_t sc_iot; /* I/O space tag */
252 bus_space_handle_t sc_ioh; /* I/O space handle */ 252 bus_space_handle_t sc_ioh; /* I/O space handle */
253 bus_size_t sc_ios; /* I/O space size */ 253 bus_size_t sc_ios; /* I/O space size */
254 bus_space_tag_t sc_flasht; /* flash registers space tag */ 254 bus_space_tag_t sc_flasht; /* flash registers space tag */
255 bus_space_handle_t sc_flashh; /* flash registers space handle */ 255 bus_space_handle_t sc_flashh; /* flash registers space handle */
256 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 256 bus_dma_tag_t sc_dmat; /* bus DMA tag */
257 257
258 struct ethercom sc_ethercom; /* ethernet common data */ 258 struct ethercom sc_ethercom; /* ethernet common data */
259 struct mii_data sc_mii; /* MII/media information */ 259 struct mii_data sc_mii; /* MII/media information */
260 260
261 pci_chipset_tag_t sc_pc; 261 pci_chipset_tag_t sc_pc;
262 pcitag_t sc_pcitag; 262 pcitag_t sc_pcitag;
263 int sc_bus_speed; /* PCI/PCIX bus speed */ 263 int sc_bus_speed; /* PCI/PCIX bus speed */
264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */ 264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
265 265
266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */ 266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 wm_chip_type sc_type; /* MAC type */ 267 wm_chip_type sc_type; /* MAC type */
268 int sc_rev; /* MAC revision */ 268 int sc_rev; /* MAC revision */
269 wm_phy_type sc_phytype; /* PHY type */ 269 wm_phy_type sc_phytype; /* PHY type */
270 int sc_funcid; /* unit number of the chip (0 to 3) */ 270 int sc_funcid; /* unit number of the chip (0 to 3) */
271 int sc_flags; /* flags; see below */ 271 int sc_flags; /* flags; see below */
272 int sc_if_flags; /* last if_flags */ 272 int sc_if_flags; /* last if_flags */
273 int sc_flowflags; /* 802.3x flow control flags */ 273 int sc_flowflags; /* 802.3x flow control flags */
274 int sc_align_tweak; 274 int sc_align_tweak;
275 275
276 void *sc_ih; /* interrupt cookie */ 276 void *sc_ih; /* interrupt cookie */
277 callout_t sc_tick_ch; /* tick callout */ 277 callout_t sc_tick_ch; /* tick callout */
278 278
279 int sc_ee_addrbits; /* EEPROM address bits */ 279 int sc_ee_addrbits; /* EEPROM address bits */
280 int sc_ich8_flash_base; 280 int sc_ich8_flash_base;
281 int sc_ich8_flash_bank_size; 281 int sc_ich8_flash_bank_size;
282 int sc_nvm_k1_enabled; 282 int sc_nvm_k1_enabled;
283 283
284 /* 284 /*
285 * Software state for the transmit and receive descriptors. 285 * Software state for the transmit and receive descriptors.
286 */ 286 */
287 int sc_txnum; /* must be a power of two */ 287 int sc_txnum; /* must be a power of two */
288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290 290
291 /* 291 /*
292 * Control data structures. 292 * Control data structures.
293 */ 293 */
294 int sc_ntxdesc; /* must be a power of two */ 294 int sc_ntxdesc; /* must be a power of two */
295 struct wm_control_data_82544 *sc_control_data; 295 struct wm_control_data_82544 *sc_control_data;
296 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 296 bus_dmamap_t sc_cddmamap; /* control data DMA map */
297 bus_dma_segment_t sc_cd_seg; /* control data segment */ 297 bus_dma_segment_t sc_cd_seg; /* control data segment */
298 int sc_cd_rseg; /* real number of control segment */ 298 int sc_cd_rseg; /* real number of control segment */
299 size_t sc_cd_size; /* control data size */ 299 size_t sc_cd_size; /* control data size */
300#define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 300#define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
301#define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs 301#define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
302#define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs 302#define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
303#define sc_rxdescs sc_control_data->wcd_rxdescs 303#define sc_rxdescs sc_control_data->wcd_rxdescs
304 304
305#ifdef WM_EVENT_COUNTERS 305#ifdef WM_EVENT_COUNTERS
306 /* Event counters. */ 306 /* Event counters. */
307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
312 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 312 struct evcnt sc_ev_rxintr; /* Rx interrupts */
313 struct evcnt sc_ev_linkintr; /* Link interrupts */ 313 struct evcnt sc_ev_linkintr; /* Link interrupts */
314 314
315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ 320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ 321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
323 323
324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
326 326
327 struct evcnt sc_ev_tu; /* Tx underrun */ 327 struct evcnt sc_ev_tu; /* Tx underrun */
328 328
329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
334#endif /* WM_EVENT_COUNTERS */ 334#endif /* WM_EVENT_COUNTERS */
335 335
336 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 336 bus_addr_t sc_tdt_reg; /* offset of TDT register */
337 337
338 int sc_txfree; /* number of free Tx descriptors */ 338 int sc_txfree; /* number of free Tx descriptors */
339 int sc_txnext; /* next ready Tx descriptor */ 339 int sc_txnext; /* next ready Tx descriptor */
340 340
341 int sc_txsfree; /* number of free Tx jobs */ 341 int sc_txsfree; /* number of free Tx jobs */
342 int sc_txsnext; /* next free Tx job */ 342 int sc_txsnext; /* next free Tx job */
343 int sc_txsdirty; /* dirty Tx jobs */ 343 int sc_txsdirty; /* dirty Tx jobs */
344 344
345 /* These 5 variables are used only on the 82547. */ 345 /* These 5 variables are used only on the 82547. */
346 int sc_txfifo_size; /* Tx FIFO size */ 346 int sc_txfifo_size; /* Tx FIFO size */
347 int sc_txfifo_head; /* current head of FIFO */ 347 int sc_txfifo_head; /* current head of FIFO */
348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
349 int sc_txfifo_stall; /* Tx FIFO is stalled */ 349 int sc_txfifo_stall; /* Tx FIFO is stalled */
350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
351 351
352 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 352 bus_addr_t sc_rdt_reg; /* offset of RDT register */
353 353
354 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 354 int sc_rxptr; /* next ready Rx descriptor/queue ent */
355 int sc_rxdiscard; 355 int sc_rxdiscard;
356 int sc_rxlen; 356 int sc_rxlen;
357 struct mbuf *sc_rxhead; 357 struct mbuf *sc_rxhead;
358 struct mbuf *sc_rxtail; 358 struct mbuf *sc_rxtail;
359 struct mbuf **sc_rxtailp; 359 struct mbuf **sc_rxtailp;
360 360
361 uint32_t sc_ctrl; /* prototype CTRL register */ 361 uint32_t sc_ctrl; /* prototype CTRL register */
362#if 0 362#if 0
363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
364#endif 364#endif
365 uint32_t sc_icr; /* prototype interrupt bits */ 365 uint32_t sc_icr; /* prototype interrupt bits */
366 uint32_t sc_itr; /* prototype intr throttling reg */ 366 uint32_t sc_itr; /* prototype intr throttling reg */
367 uint32_t sc_tctl; /* prototype TCTL register */ 367 uint32_t sc_tctl; /* prototype TCTL register */
368 uint32_t sc_rctl; /* prototype RCTL register */ 368 uint32_t sc_rctl; /* prototype RCTL register */
369 uint32_t sc_txcw; /* prototype TXCW register */ 369 uint32_t sc_txcw; /* prototype TXCW register */
370 uint32_t sc_tipg; /* prototype TIPG register */ 370 uint32_t sc_tipg; /* prototype TIPG register */
371 uint32_t sc_fcrtl; /* prototype FCRTL register */ 371 uint32_t sc_fcrtl; /* prototype FCRTL register */
372 uint32_t sc_pba; /* prototype PBA register */ 372 uint32_t sc_pba; /* prototype PBA register */
373 373
374 int sc_tbi_linkup; /* TBI link status */ 374 int sc_tbi_linkup; /* TBI link status */
375 int sc_tbi_anegticks; /* autonegotiation ticks */ 375 int sc_tbi_anegticks; /* autonegotiation ticks */
376 int sc_tbi_ticks; /* tbi ticks */ 376 int sc_tbi_ticks; /* tbi ticks */
377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */ 377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */ 378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
379 379
380 int sc_mchash_type; /* multicast filter offset */ 380 int sc_mchash_type; /* multicast filter offset */
381 381
382 krndsource_t rnd_source; /* random source */ 382 krndsource_t rnd_source; /* random source */
383}; 383};
384 384
385#define WM_RXCHAIN_RESET(sc) \ 385#define WM_RXCHAIN_RESET(sc) \
386do { \ 386do { \
387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
388 *(sc)->sc_rxtailp = NULL; \ 388 *(sc)->sc_rxtailp = NULL; \
389 (sc)->sc_rxlen = 0; \ 389 (sc)->sc_rxlen = 0; \
390} while (/*CONSTCOND*/0) 390} while (/*CONSTCOND*/0)
391 391
392#define WM_RXCHAIN_LINK(sc, m) \ 392#define WM_RXCHAIN_LINK(sc, m) \
393do { \ 393do { \
394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
395 (sc)->sc_rxtailp = &(m)->m_next; \ 395 (sc)->sc_rxtailp = &(m)->m_next; \
396} while (/*CONSTCOND*/0) 396} while (/*CONSTCOND*/0)
397 397
398#ifdef WM_EVENT_COUNTERS 398#ifdef WM_EVENT_COUNTERS
399#define WM_EVCNT_INCR(ev) (ev)->ev_count++ 399#define WM_EVCNT_INCR(ev) (ev)->ev_count++
400#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 400#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
401#else 401#else
402#define WM_EVCNT_INCR(ev) /* nothing */ 402#define WM_EVCNT_INCR(ev) /* nothing */
403#define WM_EVCNT_ADD(ev, val) /* nothing */ 403#define WM_EVCNT_ADD(ev, val) /* nothing */
404#endif 404#endif
405 405
406#define CSR_READ(sc, reg) \ 406#define CSR_READ(sc, reg) \
407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408#define CSR_WRITE(sc, reg, val) \ 408#define CSR_WRITE(sc, reg, val) \
409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410#define CSR_WRITE_FLUSH(sc) \ 410#define CSR_WRITE_FLUSH(sc) \
411 (void) CSR_READ((sc), WMREG_STATUS) 411 (void) CSR_READ((sc), WMREG_STATUS)
412 412
413#define ICH8_FLASH_READ32(sc, reg) \ 413#define ICH8_FLASH_READ32(sc, reg) \
414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415#define ICH8_FLASH_WRITE32(sc, reg, data) \ 415#define ICH8_FLASH_WRITE32(sc, reg, data) \
416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417 417
418#define ICH8_FLASH_READ16(sc, reg) \ 418#define ICH8_FLASH_READ16(sc, reg) \
419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420#define ICH8_FLASH_WRITE16(sc, reg, data) \ 420#define ICH8_FLASH_WRITE16(sc, reg, data) \
421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422 422
423#define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 423#define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
424#define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 424#define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
425 425
426#define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 426#define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427#define WM_CDTXADDR_HI(sc, x) \ 427#define WM_CDTXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \ 428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430 430
431#define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 431#define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432#define WM_CDRXADDR_HI(sc, x) \ 432#define WM_CDRXADDR_HI(sc, x) \
433 (sizeof(bus_addr_t) == 8 ? \ 433 (sizeof(bus_addr_t) == 8 ? \
434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435 435
436#define WM_CDTXSYNC(sc, x, n, ops) \ 436#define WM_CDTXSYNC(sc, x, n, ops) \
437do { \ 437do { \
438 int __x, __n; \ 438 int __x, __n; \
439 \ 439 \
440 __x = (x); \ 440 __x = (x); \
441 __n = (n); \ 441 __n = (n); \
442 \ 442 \
443 /* If it will wrap around, sync to the end of the ring. */ \ 443 /* If it will wrap around, sync to the end of the ring. */ \
444 if ((__x + __n) > WM_NTXDESC(sc)) { \ 444 if ((__x + __n) > WM_NTXDESC(sc)) { \
445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
447 (WM_NTXDESC(sc) - __x), (ops)); \ 447 (WM_NTXDESC(sc) - __x), (ops)); \
448 __n -= (WM_NTXDESC(sc) - __x); \ 448 __n -= (WM_NTXDESC(sc) - __x); \
449 __x = 0; \ 449 __x = 0; \
450 } \ 450 } \
451 \ 451 \
452 /* Now sync whatever is left. */ \ 452 /* Now sync whatever is left. */ \
453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
455} while (/*CONSTCOND*/0) 455} while (/*CONSTCOND*/0)
456 456
457#define WM_CDRXSYNC(sc, x, ops) \ 457#define WM_CDRXSYNC(sc, x, ops) \
458do { \ 458do { \
459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
461} while (/*CONSTCOND*/0) 461} while (/*CONSTCOND*/0)
462 462
463#define WM_INIT_RXDESC(sc, x) \ 463#define WM_INIT_RXDESC(sc, x) \
464do { \ 464do { \
465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
467 struct mbuf *__m = __rxs->rxs_mbuf; \ 467 struct mbuf *__m = __rxs->rxs_mbuf; \
468 \ 468 \
469 /* \ 469 /* \
470 * Note: We scoot the packet forward 2 bytes in the buffer \ 470 * Note: We scoot the packet forward 2 bytes in the buffer \
471 * so that the payload after the Ethernet header is aligned \ 471 * so that the payload after the Ethernet header is aligned \
472 * to a 4-byte boundary. \ 472 * to a 4-byte boundary. \
473 * \ 473 * \
474 * XXX BRAINDAMAGE ALERT! \ 474 * XXX BRAINDAMAGE ALERT! \
475 * The stupid chip uses the same size for every buffer, which \ 475 * The stupid chip uses the same size for every buffer, which \
476 * is set in the Receive Control register. We are using the 2K \ 476 * is set in the Receive Control register. We are using the 2K \
477 * size option, but what we REALLY want is (2K - 2)! For this \ 477 * size option, but what we REALLY want is (2K - 2)! For this \
478 * reason, we can't "scoot" packets longer than the standard \ 478 * reason, we can't "scoot" packets longer than the standard \
479 * Ethernet MTU. On strict-alignment platforms, if the total \ 479 * Ethernet MTU. On strict-alignment platforms, if the total \
480 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 480 * size exceeds (2K - 2) we set align_tweak to 0 and let \
481 * the upper layer copy the headers. \ 481 * the upper layer copy the headers. \
482 */ \ 482 */ \
483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
484 \ 484 \
485 wm_set_dma_addr(&__rxd->wrx_addr, \ 485 wm_set_dma_addr(&__rxd->wrx_addr, \
486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 __rxd->wrx_len = 0; \ 487 __rxd->wrx_len = 0; \
488 __rxd->wrx_cksum = 0; \ 488 __rxd->wrx_cksum = 0; \
489 __rxd->wrx_status = 0; \ 489 __rxd->wrx_status = 0; \
490 __rxd->wrx_errors = 0; \ 490 __rxd->wrx_errors = 0; \
491 __rxd->wrx_special = 0; \ 491 __rxd->wrx_special = 0; \
492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 \ 493 \
494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
495} while (/*CONSTCOND*/0) 495} while (/*CONSTCOND*/0)
496 496
497static void wm_start(struct ifnet *); 497static void wm_start(struct ifnet *);
498static void wm_nq_start(struct ifnet *); 498static void wm_nq_start(struct ifnet *);
499static void wm_watchdog(struct ifnet *); 499static void wm_watchdog(struct ifnet *);
500static int wm_ifflags_cb(struct ethercom *); 500static int wm_ifflags_cb(struct ethercom *);
501static int wm_ioctl(struct ifnet *, u_long, void *); 501static int wm_ioctl(struct ifnet *, u_long, void *);
502static int wm_init(struct ifnet *); 502static int wm_init(struct ifnet *);
503static void wm_stop(struct ifnet *, int); 503static void wm_stop(struct ifnet *, int);
504static bool wm_suspend(device_t, const pmf_qual_t *); 504static bool wm_suspend(device_t, const pmf_qual_t *);
505static bool wm_resume(device_t, const pmf_qual_t *); 505static bool wm_resume(device_t, const pmf_qual_t *);
506 506
507static void wm_reset(struct wm_softc *); 507static void wm_reset(struct wm_softc *);
508static void wm_rxdrain(struct wm_softc *); 508static void wm_rxdrain(struct wm_softc *);
509static int wm_add_rxbuf(struct wm_softc *, int); 509static int wm_add_rxbuf(struct wm_softc *, int);
510static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 510static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 511static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512static int wm_validate_eeprom_checksum(struct wm_softc *); 512static int wm_validate_eeprom_checksum(struct wm_softc *);
513static int wm_check_alt_mac_addr(struct wm_softc *); 513static int wm_check_alt_mac_addr(struct wm_softc *);
514static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 514static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
515static void wm_tick(void *); 515static void wm_tick(void *);
516 516
517static void wm_set_filter(struct wm_softc *); 517static void wm_set_filter(struct wm_softc *);
518static void wm_set_vlan(struct wm_softc *); 518static void wm_set_vlan(struct wm_softc *);
519 519
520static int wm_intr(void *); 520static int wm_intr(void *);
521static void wm_txintr(struct wm_softc *); 521static void wm_txintr(struct wm_softc *);
522static void wm_rxintr(struct wm_softc *); 522static void wm_rxintr(struct wm_softc *);
523static void wm_linkintr(struct wm_softc *, uint32_t); 523static void wm_linkintr(struct wm_softc *, uint32_t);
524 524
525static void wm_tbi_mediainit(struct wm_softc *); 525static void wm_tbi_mediainit(struct wm_softc *);
526static int wm_tbi_mediachange(struct ifnet *); 526static int wm_tbi_mediachange(struct ifnet *);
527static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 527static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528 528
529static void wm_tbi_set_linkled(struct wm_softc *); 529static void wm_tbi_set_linkled(struct wm_softc *);
530static void wm_tbi_check_link(struct wm_softc *); 530static void wm_tbi_check_link(struct wm_softc *);
531 531
532static void wm_gmii_reset(struct wm_softc *); 532static void wm_gmii_reset(struct wm_softc *);
533 533
534static int wm_gmii_i82543_readreg(device_t, int, int); 534static int wm_gmii_i82543_readreg(device_t, int, int);
535static void wm_gmii_i82543_writereg(device_t, int, int, int); 535static void wm_gmii_i82543_writereg(device_t, int, int, int);
536static int wm_gmii_i82544_readreg(device_t, int, int); 536static int wm_gmii_i82544_readreg(device_t, int, int);
537static void wm_gmii_i82544_writereg(device_t, int, int, int); 537static void wm_gmii_i82544_writereg(device_t, int, int, int);
538static int wm_gmii_i80003_readreg(device_t, int, int); 538static int wm_gmii_i80003_readreg(device_t, int, int);
539static void wm_gmii_i80003_writereg(device_t, int, int, int); 539static void wm_gmii_i80003_writereg(device_t, int, int, int);
540static int wm_gmii_bm_readreg(device_t, int, int); 540static int wm_gmii_bm_readreg(device_t, int, int);
541static void wm_gmii_bm_writereg(device_t, int, int, int); 541static void wm_gmii_bm_writereg(device_t, int, int, int);
542static int wm_gmii_hv_readreg(device_t, int, int); 542static int wm_gmii_hv_readreg(device_t, int, int);
543static void wm_gmii_hv_writereg(device_t, int, int, int); 543static void wm_gmii_hv_writereg(device_t, int, int, int);
544static int wm_gmii_82580_readreg(device_t, int, int); 544static int wm_gmii_82580_readreg(device_t, int, int);
545static void wm_gmii_82580_writereg(device_t, int, int, int); 545static void wm_gmii_82580_writereg(device_t, int, int, int);
546static bool wm_sgmii_uses_mdio(struct wm_softc *); 546static bool wm_sgmii_uses_mdio(struct wm_softc *);
547static int wm_sgmii_readreg(device_t, int, int); 547static int wm_sgmii_readreg(device_t, int, int);
548static void wm_sgmii_writereg(device_t, int, int, int); 548static void wm_sgmii_writereg(device_t, int, int, int);
549 549
550static void wm_gmii_statchg(struct ifnet *); 550static void wm_gmii_statchg(struct ifnet *);
551 551
552static int wm_get_phy_id_82575(struct wm_softc *); 552static int wm_get_phy_id_82575(struct wm_softc *);
553static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 553static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
554static int wm_gmii_mediachange(struct ifnet *); 554static int wm_gmii_mediachange(struct ifnet *);
555static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 555static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
556 556
557static int wm_kmrn_readreg(struct wm_softc *, int); 557static int wm_kmrn_readreg(struct wm_softc *, int);
558static void wm_kmrn_writereg(struct wm_softc *, int, int); 558static void wm_kmrn_writereg(struct wm_softc *, int, int);
559 559
560static void wm_set_spiaddrbits(struct wm_softc *); 560static void wm_set_spiaddrbits(struct wm_softc *);
561static int wm_match(device_t, cfdata_t, void *); 561static int wm_match(device_t, cfdata_t, void *);
562static void wm_attach(device_t, device_t, void *); 562static void wm_attach(device_t, device_t, void *);
563static int wm_detach(device_t, int); 563static int wm_detach(device_t, int);
564static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 564static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
565static void wm_get_auto_rd_done(struct wm_softc *); 565static void wm_get_auto_rd_done(struct wm_softc *);
566static void wm_lan_init_done(struct wm_softc *); 566static void wm_lan_init_done(struct wm_softc *);
567static void wm_get_cfg_done(struct wm_softc *); 567static void wm_get_cfg_done(struct wm_softc *);
568static int wm_get_swsm_semaphore(struct wm_softc *); 568static int wm_get_swsm_semaphore(struct wm_softc *);
569static void wm_put_swsm_semaphore(struct wm_softc *); 569static void wm_put_swsm_semaphore(struct wm_softc *);
570static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 570static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
571static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 571static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
572static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 572static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
573static int wm_get_swfwhw_semaphore(struct wm_softc *); 573static int wm_get_swfwhw_semaphore(struct wm_softc *);
574static void wm_put_swfwhw_semaphore(struct wm_softc *); 574static void wm_put_swfwhw_semaphore(struct wm_softc *);
575static int wm_get_hw_semaphore_82573(struct wm_softc *); 575static int wm_get_hw_semaphore_82573(struct wm_softc *);
576static void wm_put_hw_semaphore_82573(struct wm_softc *); 576static void wm_put_hw_semaphore_82573(struct wm_softc *);
577 577
578static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *); 578static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
579static int32_t wm_ich8_cycle_init(struct wm_softc *); 579static int32_t wm_ich8_cycle_init(struct wm_softc *);
580static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 580static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
581static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, 581static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
582 uint32_t, uint16_t *); 582 uint32_t, uint16_t *);
583static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 583static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
584static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 584static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
585static void wm_82547_txfifo_stall(void *); 585static void wm_82547_txfifo_stall(void *);
586static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int); 586static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
587static int wm_check_mng_mode(struct wm_softc *); 587static int wm_check_mng_mode(struct wm_softc *);
588static int wm_check_mng_mode_ich8lan(struct wm_softc *); 588static int wm_check_mng_mode_ich8lan(struct wm_softc *);
589static int wm_check_mng_mode_82574(struct wm_softc *); 589static int wm_check_mng_mode_82574(struct wm_softc *);
590static int wm_check_mng_mode_generic(struct wm_softc *); 590static int wm_check_mng_mode_generic(struct wm_softc *);
591static int wm_enable_mng_pass_thru(struct wm_softc *); 591static int wm_enable_mng_pass_thru(struct wm_softc *);
592static int wm_check_reset_block(struct wm_softc *); 592static int wm_check_reset_block(struct wm_softc *);
593static void wm_get_hw_control(struct wm_softc *); 593static void wm_get_hw_control(struct wm_softc *);
594static int wm_check_for_link(struct wm_softc *); 594static int wm_check_for_link(struct wm_softc *);
595static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 595static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
596static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 596static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
597#ifdef WM_WOL 597#ifdef WM_WOL
598static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 598static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
599#endif 599#endif
600static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); 600static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
601static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); 601static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
602static void wm_k1_gig_workaround_hv(struct wm_softc *, int); 602static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
603static void wm_set_mdio_slow_mode_hv(struct wm_softc *); 603static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
604static void wm_configure_k1_ich8lan(struct wm_softc *, int); 604static void wm_configure_k1_ich8lan(struct wm_softc *, int);
605static void wm_smbustopci(struct wm_softc *); 605static void wm_smbustopci(struct wm_softc *);
606static void wm_set_pcie_completion_timeout(struct wm_softc *); 606static void wm_set_pcie_completion_timeout(struct wm_softc *);
607static void wm_reset_init_script_82575(struct wm_softc *); 607static void wm_reset_init_script_82575(struct wm_softc *);
608static void wm_release_manageability(struct wm_softc *); 608static void wm_release_manageability(struct wm_softc *);
609static void wm_release_hw_control(struct wm_softc *); 609static void wm_release_hw_control(struct wm_softc *);
610static void wm_get_wakeup(struct wm_softc *); 610static void wm_get_wakeup(struct wm_softc *);
611#ifdef WM_WOL 611#ifdef WM_WOL
612static void wm_enable_phy_wakeup(struct wm_softc *); 612static void wm_enable_phy_wakeup(struct wm_softc *);
613static void wm_enable_wakeup(struct wm_softc *); 613static void wm_enable_wakeup(struct wm_softc *);
614#endif 614#endif
615static void wm_init_manageability(struct wm_softc *); 615static void wm_init_manageability(struct wm_softc *);
616static void wm_set_eee_i350(struct wm_softc *); 616static void wm_set_eee_i350(struct wm_softc *);
617 617
618CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 618CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
619 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 619 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
620 620
621/* 621/*
622 * Devices supported by this driver. 622 * Devices supported by this driver.
623 */ 623 */
624static const struct wm_product { 624static const struct wm_product {
625 pci_vendor_id_t wmp_vendor; 625 pci_vendor_id_t wmp_vendor;
626 pci_product_id_t wmp_product; 626 pci_product_id_t wmp_product;
627 const char *wmp_name; 627 const char *wmp_name;
628 wm_chip_type wmp_type; 628 wm_chip_type wmp_type;
629 int wmp_flags; 629 int wmp_flags;
630#define WMP_F_1000X 0x01 630#define WMP_F_1000X 0x01
631#define WMP_F_1000T 0x02 631#define WMP_F_1000T 0x02
632#define WMP_F_SERDES 0x04 632#define WMP_F_SERDES 0x04
633} wm_products[] = { 633} wm_products[] = {
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
635 "Intel i82542 1000BASE-X Ethernet", 635 "Intel i82542 1000BASE-X Ethernet",
636 WM_T_82542_2_1, WMP_F_1000X }, 636 WM_T_82542_2_1, WMP_F_1000X },
637 637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
639 "Intel i82543GC 1000BASE-X Ethernet", 639 "Intel i82543GC 1000BASE-X Ethernet",
640 WM_T_82543, WMP_F_1000X }, 640 WM_T_82543, WMP_F_1000X },
641 641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
643 "Intel i82543GC 1000BASE-T Ethernet", 643 "Intel i82543GC 1000BASE-T Ethernet",
644 WM_T_82543, WMP_F_1000T }, 644 WM_T_82543, WMP_F_1000T },
645 645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
647 "Intel i82544EI 1000BASE-T Ethernet", 647 "Intel i82544EI 1000BASE-T Ethernet",
648 WM_T_82544, WMP_F_1000T }, 648 WM_T_82544, WMP_F_1000T },
649 649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
651 "Intel i82544EI 1000BASE-X Ethernet", 651 "Intel i82544EI 1000BASE-X Ethernet",
652 WM_T_82544, WMP_F_1000X }, 652 WM_T_82544, WMP_F_1000X },
653 653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
655 "Intel i82544GC 1000BASE-T Ethernet", 655 "Intel i82544GC 1000BASE-T Ethernet",
656 WM_T_82544, WMP_F_1000T }, 656 WM_T_82544, WMP_F_1000T },
657 657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
659 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 659 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
660 WM_T_82544, WMP_F_1000T }, 660 WM_T_82544, WMP_F_1000T },
661 661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
663 "Intel i82540EM 1000BASE-T Ethernet", 663 "Intel i82540EM 1000BASE-T Ethernet",
664 WM_T_82540, WMP_F_1000T }, 664 WM_T_82540, WMP_F_1000T },
665 665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
667 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 667 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
668 WM_T_82540, WMP_F_1000T }, 668 WM_T_82540, WMP_F_1000T },
669 669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
671 "Intel i82540EP 1000BASE-T Ethernet", 671 "Intel i82540EP 1000BASE-T Ethernet",
672 WM_T_82540, WMP_F_1000T }, 672 WM_T_82540, WMP_F_1000T },
673 673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
675 "Intel i82540EP 1000BASE-T Ethernet", 675 "Intel i82540EP 1000BASE-T Ethernet",
676 WM_T_82540, WMP_F_1000T }, 676 WM_T_82540, WMP_F_1000T },
677 677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
679 "Intel i82540EP 1000BASE-T Ethernet", 679 "Intel i82540EP 1000BASE-T Ethernet",
680 WM_T_82540, WMP_F_1000T }, 680 WM_T_82540, WMP_F_1000T },
681 681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
683 "Intel i82545EM 1000BASE-T Ethernet", 683 "Intel i82545EM 1000BASE-T Ethernet",
684 WM_T_82545, WMP_F_1000T }, 684 WM_T_82545, WMP_F_1000T },
685 685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
687 "Intel i82545GM 1000BASE-T Ethernet", 687 "Intel i82545GM 1000BASE-T Ethernet",
688 WM_T_82545_3, WMP_F_1000T }, 688 WM_T_82545_3, WMP_F_1000T },
689 689
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
691 "Intel i82545GM 1000BASE-X Ethernet", 691 "Intel i82545GM 1000BASE-X Ethernet",
692 WM_T_82545_3, WMP_F_1000X }, 692 WM_T_82545_3, WMP_F_1000X },
693#if 0 693#if 0
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
695 "Intel i82545GM Gigabit Ethernet (SERDES)", 695 "Intel i82545GM Gigabit Ethernet (SERDES)",
696 WM_T_82545_3, WMP_F_SERDES }, 696 WM_T_82545_3, WMP_F_SERDES },
697#endif 697#endif
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
699 "Intel i82546EB 1000BASE-T Ethernet", 699 "Intel i82546EB 1000BASE-T Ethernet",
700 WM_T_82546, WMP_F_1000T }, 700 WM_T_82546, WMP_F_1000T },
701 701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
703 "Intel i82546EB 1000BASE-T Ethernet", 703 "Intel i82546EB 1000BASE-T Ethernet",
704 WM_T_82546, WMP_F_1000T }, 704 WM_T_82546, WMP_F_1000T },
705 705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
707 "Intel i82545EM 1000BASE-X Ethernet", 707 "Intel i82545EM 1000BASE-X Ethernet",
708 WM_T_82545, WMP_F_1000X }, 708 WM_T_82545, WMP_F_1000X },
709 709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
711 "Intel i82546EB 1000BASE-X Ethernet", 711 "Intel i82546EB 1000BASE-X Ethernet",
712 WM_T_82546, WMP_F_1000X }, 712 WM_T_82546, WMP_F_1000X },
713 713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
715 "Intel i82546GB 1000BASE-T Ethernet", 715 "Intel i82546GB 1000BASE-T Ethernet",
716 WM_T_82546_3, WMP_F_1000T }, 716 WM_T_82546_3, WMP_F_1000T },
717 717
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
719 "Intel i82546GB 1000BASE-X Ethernet", 719 "Intel i82546GB 1000BASE-X Ethernet",
720 WM_T_82546_3, WMP_F_1000X }, 720 WM_T_82546_3, WMP_F_1000X },
721#if 0 721#if 0
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
723 "Intel i82546GB Gigabit Ethernet (SERDES)", 723 "Intel i82546GB Gigabit Ethernet (SERDES)",
724 WM_T_82546_3, WMP_F_SERDES }, 724 WM_T_82546_3, WMP_F_SERDES },
725#endif 725#endif
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
727 "i82546GB quad-port Gigabit Ethernet", 727 "i82546GB quad-port Gigabit Ethernet",
728 WM_T_82546_3, WMP_F_1000T }, 728 WM_T_82546_3, WMP_F_1000T },
729 729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
731 "i82546GB quad-port Gigabit Ethernet (KSP3)", 731 "i82546GB quad-port Gigabit Ethernet (KSP3)",
732 WM_T_82546_3, WMP_F_1000T }, 732 WM_T_82546_3, WMP_F_1000T },
733 733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
735 "Intel PRO/1000MT (82546GB)", 735 "Intel PRO/1000MT (82546GB)",
736 WM_T_82546_3, WMP_F_1000T }, 736 WM_T_82546_3, WMP_F_1000T },
737 737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
739 "Intel i82541EI 1000BASE-T Ethernet", 739 "Intel i82541EI 1000BASE-T Ethernet",
740 WM_T_82541, WMP_F_1000T }, 740 WM_T_82541, WMP_F_1000T },
741 741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
743 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 743 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
744 WM_T_82541, WMP_F_1000T }, 744 WM_T_82541, WMP_F_1000T },
745 745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
747 "Intel i82541EI Mobile 1000BASE-T Ethernet", 747 "Intel i82541EI Mobile 1000BASE-T Ethernet",
748 WM_T_82541, WMP_F_1000T }, 748 WM_T_82541, WMP_F_1000T },
749 749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
751 "Intel i82541ER 1000BASE-T Ethernet", 751 "Intel i82541ER 1000BASE-T Ethernet",
752 WM_T_82541_2, WMP_F_1000T }, 752 WM_T_82541_2, WMP_F_1000T },
753 753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
755 "Intel i82541GI 1000BASE-T Ethernet", 755 "Intel i82541GI 1000BASE-T Ethernet",
756 WM_T_82541_2, WMP_F_1000T }, 756 WM_T_82541_2, WMP_F_1000T },
757 757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
759 "Intel i82541GI Mobile 1000BASE-T Ethernet", 759 "Intel i82541GI Mobile 1000BASE-T Ethernet",
760 WM_T_82541_2, WMP_F_1000T }, 760 WM_T_82541_2, WMP_F_1000T },
761 761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
763 "Intel i82541PI 1000BASE-T Ethernet", 763 "Intel i82541PI 1000BASE-T Ethernet",
764 WM_T_82541_2, WMP_F_1000T }, 764 WM_T_82541_2, WMP_F_1000T },
765 765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
767 "Intel i82547EI 1000BASE-T Ethernet", 767 "Intel i82547EI 1000BASE-T Ethernet",
768 WM_T_82547, WMP_F_1000T }, 768 WM_T_82547, WMP_F_1000T },
769 769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
771 "Intel i82547EI Mobile 1000BASE-T Ethernet", 771 "Intel i82547EI Mobile 1000BASE-T Ethernet",
772 WM_T_82547, WMP_F_1000T }, 772 WM_T_82547, WMP_F_1000T },
773 773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
775 "Intel i82547GI 1000BASE-T Ethernet", 775 "Intel i82547GI 1000BASE-T Ethernet",
776 WM_T_82547_2, WMP_F_1000T }, 776 WM_T_82547_2, WMP_F_1000T },
777 777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
779 "Intel PRO/1000 PT (82571EB)", 779 "Intel PRO/1000 PT (82571EB)",
780 WM_T_82571, WMP_F_1000T }, 780 WM_T_82571, WMP_F_1000T },
781 781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
783 "Intel PRO/1000 PF (82571EB)", 783 "Intel PRO/1000 PF (82571EB)",
784 WM_T_82571, WMP_F_1000X }, 784 WM_T_82571, WMP_F_1000X },
785#if 0 785#if 0
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
787 "Intel PRO/1000 PB (82571EB)", 787 "Intel PRO/1000 PB (82571EB)",
788 WM_T_82571, WMP_F_SERDES }, 788 WM_T_82571, WMP_F_SERDES },
789#endif 789#endif
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
791 "Intel PRO/1000 QT (82571EB)", 791 "Intel PRO/1000 QT (82571EB)",
792 WM_T_82571, WMP_F_1000T }, 792 WM_T_82571, WMP_F_1000T },
793 793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
795 "Intel i82572EI 1000baseT Ethernet", 795 "Intel i82572EI 1000baseT Ethernet",
796 WM_T_82572, WMP_F_1000T }, 796 WM_T_82572, WMP_F_1000T },
797 797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, 798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
799 "Intel PRO/1000 PT Quad Port Server Adapter", 799 "Intel PRO/1000 PT Quad Port Server Adapter",
800 WM_T_82571, WMP_F_1000T, }, 800 WM_T_82571, WMP_F_1000T, },
801 801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
803 "Intel i82572EI 1000baseX Ethernet", 803 "Intel i82572EI 1000baseX Ethernet",
804 WM_T_82572, WMP_F_1000X }, 804 WM_T_82572, WMP_F_1000X },
805#if 0 805#if 0
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
807 "Intel i82572EI Gigabit Ethernet (SERDES)", 807 "Intel i82572EI Gigabit Ethernet (SERDES)",
808 WM_T_82572, WMP_F_SERDES }, 808 WM_T_82572, WMP_F_SERDES },
809#endif 809#endif
810 810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
812 "Intel i82572EI 1000baseT Ethernet", 812 "Intel i82572EI 1000baseT Ethernet",
813 WM_T_82572, WMP_F_1000T }, 813 WM_T_82572, WMP_F_1000T },
814 814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
816 "Intel i82573E", 816 "Intel i82573E",
817 WM_T_82573, WMP_F_1000T }, 817 WM_T_82573, WMP_F_1000T },
818 818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
820 "Intel i82573E IAMT", 820 "Intel i82573E IAMT",
821 WM_T_82573, WMP_F_1000T }, 821 WM_T_82573, WMP_F_1000T },
822 822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
824 "Intel i82573L Gigabit Ethernet", 824 "Intel i82573L Gigabit Ethernet",
825 WM_T_82573, WMP_F_1000T }, 825 WM_T_82573, WMP_F_1000T },
826 826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, 827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
828 "Intel i82574L", 828 "Intel i82574L",
829 WM_T_82574, WMP_F_1000T }, 829 WM_T_82574, WMP_F_1000T },
830 830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, 831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
832 "Intel i82583V", 832 "Intel i82583V",
833 WM_T_82583, WMP_F_1000T }, 833 WM_T_82583, WMP_F_1000T },
834 834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
836 "i80003 dual 1000baseT Ethernet", 836 "i80003 dual 1000baseT Ethernet",
837 WM_T_80003, WMP_F_1000T }, 837 WM_T_80003, WMP_F_1000T },
838 838
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
840 "i80003 dual 1000baseX Ethernet", 840 "i80003 dual 1000baseX Ethernet",
841 WM_T_80003, WMP_F_1000T }, 841 WM_T_80003, WMP_F_1000T },
842#if 0 842#if 0
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
844 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 844 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
845 WM_T_80003, WMP_F_SERDES }, 845 WM_T_80003, WMP_F_SERDES },
846#endif 846#endif
847 847
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
849 "Intel i80003 1000baseT Ethernet", 849 "Intel i80003 1000baseT Ethernet",
850 WM_T_80003, WMP_F_1000T }, 850 WM_T_80003, WMP_F_1000T },
851#if 0 851#if 0
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
853 "Intel i80003 Gigabit Ethernet (SERDES)", 853 "Intel i80003 Gigabit Ethernet (SERDES)",
854 WM_T_80003, WMP_F_SERDES }, 854 WM_T_80003, WMP_F_SERDES },
855#endif 855#endif
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, 856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
857 "Intel i82801H (M_AMT) LAN Controller", 857 "Intel i82801H (M_AMT) LAN Controller",
858 WM_T_ICH8, WMP_F_1000T }, 858 WM_T_ICH8, WMP_F_1000T },
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, 859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
860 "Intel i82801H (AMT) LAN Controller", 860 "Intel i82801H (AMT) LAN Controller",
861 WM_T_ICH8, WMP_F_1000T }, 861 WM_T_ICH8, WMP_F_1000T },
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, 862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
863 "Intel i82801H LAN Controller", 863 "Intel i82801H LAN Controller",
864 WM_T_ICH8, WMP_F_1000T }, 864 WM_T_ICH8, WMP_F_1000T },
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, 865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
866 "Intel i82801H (IFE) LAN Controller", 866 "Intel i82801H (IFE) LAN Controller",
867 WM_T_ICH8, WMP_F_1000T }, 867 WM_T_ICH8, WMP_F_1000T },
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, 868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
869 "Intel i82801H (M) LAN Controller", 869 "Intel i82801H (M) LAN Controller",
870 WM_T_ICH8, WMP_F_1000T }, 870 WM_T_ICH8, WMP_F_1000T },
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, 871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
872 "Intel i82801H IFE (GT) LAN Controller", 872 "Intel i82801H IFE (GT) LAN Controller",
873 WM_T_ICH8, WMP_F_1000T }, 873 WM_T_ICH8, WMP_F_1000T },
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, 874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
875 "Intel i82801H IFE (G) LAN Controller", 875 "Intel i82801H IFE (G) LAN Controller",
876 WM_T_ICH8, WMP_F_1000T }, 876 WM_T_ICH8, WMP_F_1000T },
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, 877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
878 "82801I (AMT) LAN Controller", 878 "82801I (AMT) LAN Controller",
879 WM_T_ICH9, WMP_F_1000T }, 879 WM_T_ICH9, WMP_F_1000T },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, 880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
881 "82801I LAN Controller", 881 "82801I LAN Controller",
882 WM_T_ICH9, WMP_F_1000T }, 882 WM_T_ICH9, WMP_F_1000T },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, 883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
884 "82801I (G) LAN Controller", 884 "82801I (G) LAN Controller",
885 WM_T_ICH9, WMP_F_1000T }, 885 WM_T_ICH9, WMP_F_1000T },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, 886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
887 "82801I (GT) LAN Controller", 887 "82801I (GT) LAN Controller",
888 WM_T_ICH9, WMP_F_1000T }, 888 WM_T_ICH9, WMP_F_1000T },
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, 889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
890 "82801I (C) LAN Controller", 890 "82801I (C) LAN Controller",
891 WM_T_ICH9, WMP_F_1000T }, 891 WM_T_ICH9, WMP_F_1000T },
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, 892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
893 "82801I mobile LAN Controller", 893 "82801I mobile LAN Controller",
894 WM_T_ICH9, WMP_F_1000T }, 894 WM_T_ICH9, WMP_F_1000T },
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, 895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
896 "82801I mobile (V) LAN Controller", 896 "82801I mobile (V) LAN Controller",
897 WM_T_ICH9, WMP_F_1000T }, 897 WM_T_ICH9, WMP_F_1000T },
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, 898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
899 "82801I mobile (AMT) LAN Controller", 899 "82801I mobile (AMT) LAN Controller",
900 WM_T_ICH9, WMP_F_1000T }, 900 WM_T_ICH9, WMP_F_1000T },
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, 901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
902 "82567LM-4 LAN Controller", 902 "82567LM-4 LAN Controller",
903 WM_T_ICH9, WMP_F_1000T }, 903 WM_T_ICH9, WMP_F_1000T },
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3, 904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
905 "82567V-3 LAN Controller", 905 "82567V-3 LAN Controller",
906 WM_T_ICH9, WMP_F_1000T }, 906 WM_T_ICH9, WMP_F_1000T },
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, 907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
908 "82567LM-2 LAN Controller", 908 "82567LM-2 LAN Controller",
909 WM_T_ICH10, WMP_F_1000T }, 909 WM_T_ICH10, WMP_F_1000T },
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, 910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
911 "82567LF-2 LAN Controller", 911 "82567LF-2 LAN Controller",
912 WM_T_ICH10, WMP_F_1000T }, 912 WM_T_ICH10, WMP_F_1000T },
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, 913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
914 "82567LM-3 LAN Controller", 914 "82567LM-3 LAN Controller",
915 WM_T_ICH10, WMP_F_1000T }, 915 WM_T_ICH10, WMP_F_1000T },
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, 916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
917 "82567LF-3 LAN Controller", 917 "82567LF-3 LAN Controller",
918 WM_T_ICH10, WMP_F_1000T }, 918 WM_T_ICH10, WMP_F_1000T },
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, 919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
920 "82567V-2 LAN Controller", 920 "82567V-2 LAN Controller",
921 WM_T_ICH10, WMP_F_1000T }, 921 WM_T_ICH10, WMP_F_1000T },
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V, 922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
923 "82567V-3? LAN Controller", 923 "82567V-3? LAN Controller",
924 WM_T_ICH10, WMP_F_1000T }, 924 WM_T_ICH10, WMP_F_1000T },
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE, 925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
926 "HANKSVILLE LAN Controller", 926 "HANKSVILLE LAN Controller",
927 WM_T_ICH10, WMP_F_1000T }, 927 WM_T_ICH10, WMP_F_1000T },
928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM, 928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
929 "PCH LAN (82577LM) Controller", 929 "PCH LAN (82577LM) Controller",
930 WM_T_PCH, WMP_F_1000T }, 930 WM_T_PCH, WMP_F_1000T },
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC, 931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
932 "PCH LAN (82577LC) Controller", 932 "PCH LAN (82577LC) Controller",
933 WM_T_PCH, WMP_F_1000T }, 933 WM_T_PCH, WMP_F_1000T },
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM, 934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
935 "PCH LAN (82578DM) Controller", 935 "PCH LAN (82578DM) Controller",
936 WM_T_PCH, WMP_F_1000T }, 936 WM_T_PCH, WMP_F_1000T },
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC, 937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
938 "PCH LAN (82578DC) Controller", 938 "PCH LAN (82578DC) Controller",
939 WM_T_PCH, WMP_F_1000T }, 939 WM_T_PCH, WMP_F_1000T },
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM, 940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
941 "PCH2 LAN (82579LM) Controller", 941 "PCH2 LAN (82579LM) Controller",
942 WM_T_PCH2, WMP_F_1000T }, 942 WM_T_PCH2, WMP_F_1000T },
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V, 943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
944 "PCH2 LAN (82579V) Controller", 944 "PCH2 LAN (82579V) Controller",
945 WM_T_PCH2, WMP_F_1000T }, 945 WM_T_PCH2, WMP_F_1000T },
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER, 946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
947 "82575EB dual-1000baseT Ethernet", 947 "82575EB dual-1000baseT Ethernet",
948 WM_T_82575, WMP_F_1000T }, 948 WM_T_82575, WMP_F_1000T },
949#if 0 949#if 0
950 /* 950 /*
951 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so 951 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
952 * disabled for now ... 952 * disabled for now ...
953 */ 953 */
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES, 954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
955 "82575EB dual-1000baseX Ethernet (SERDES)", 955 "82575EB dual-1000baseX Ethernet (SERDES)",
956 WM_T_82575, WMP_F_SERDES }, 956 WM_T_82575, WMP_F_SERDES },
957#endif 957#endif
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER, 958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
959 "82575GB quad-1000baseT Ethernet", 959 "82575GB quad-1000baseT Ethernet",
960 WM_T_82575, WMP_F_1000T }, 960 WM_T_82575, WMP_F_1000T },
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM, 961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
962 "82575GB quad-1000baseT Ethernet (PM)", 962 "82575GB quad-1000baseT Ethernet (PM)",
963 WM_T_82575, WMP_F_1000T }, 963 WM_T_82575, WMP_F_1000T },
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER, 964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
965 "82576 1000BaseT Ethernet", 965 "82576 1000BaseT Ethernet",
966 WM_T_82576, WMP_F_1000T }, 966 WM_T_82576, WMP_F_1000T },
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER, 967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
968 "82576 1000BaseX Ethernet", 968 "82576 1000BaseX Ethernet",
969 WM_T_82576, WMP_F_1000X }, 969 WM_T_82576, WMP_F_1000X },
970#if 0 970#if 0
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES, 971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
972 "82576 gigabit Ethernet (SERDES)", 972 "82576 gigabit Ethernet (SERDES)",
973 WM_T_82576, WMP_F_SERDES }, 973 WM_T_82576, WMP_F_SERDES },
974#endif 974#endif
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER, 975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
976 "82576 quad-1000BaseT Ethernet", 976 "82576 quad-1000BaseT Ethernet",
977 WM_T_82576, WMP_F_1000T }, 977 WM_T_82576, WMP_F_1000T },
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS, 978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
979 "82576 gigabit Ethernet", 979 "82576 gigabit Ethernet",
980 WM_T_82576, WMP_F_1000T }, 980 WM_T_82576, WMP_F_1000T },
981#if 0 981#if 0
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES, 982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
983 "82576 gigabit Ethernet (SERDES)", 983 "82576 gigabit Ethernet (SERDES)",
984 WM_T_82576, WMP_F_SERDES }, 984 WM_T_82576, WMP_F_SERDES },
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD, 985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
986 "82576 quad-gigabit Ethernet (SERDES)", 986 "82576 quad-gigabit Ethernet (SERDES)",
987 WM_T_82576, WMP_F_SERDES }, 987 WM_T_82576, WMP_F_SERDES },
988#endif 988#endif
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER, 989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
990 "82580 1000BaseT Ethernet", 990 "82580 1000BaseT Ethernet",
991 WM_T_82580, WMP_F_1000T }, 991 WM_T_82580, WMP_F_1000T },
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER, 992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
993 "82580 1000BaseX Ethernet", 993 "82580 1000BaseX Ethernet",
994 WM_T_82580, WMP_F_1000X }, 994 WM_T_82580, WMP_F_1000X },
995#if 0 995#if 0
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES, 996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
997 "82580 1000BaseT Ethernet (SERDES)", 997 "82580 1000BaseT Ethernet (SERDES)",
998 WM_T_82580, WMP_F_SERDES }, 998 WM_T_82580, WMP_F_SERDES },
999#endif 999#endif
1000 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII, 1000 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1001 "82580 gigabit Ethernet (SGMII)", 1001 "82580 gigabit Ethernet (SGMII)",
1002 WM_T_82580, WMP_F_1000T }, 1002 WM_T_82580, WMP_F_1000T },
1003 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL, 1003 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1004 "82580 dual-1000BaseT Ethernet", 1004 "82580 dual-1000BaseT Ethernet",
1005 WM_T_82580, WMP_F_1000T }, 1005 WM_T_82580, WMP_F_1000T },
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER, 1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
1007 "82580 1000BaseT Ethernet", 1007 "82580 1000BaseT Ethernet",
1008 WM_T_82580ER, WMP_F_1000T }, 1008 WM_T_82580ER, WMP_F_1000T },
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL, 1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1010 "82580 dual-1000BaseT Ethernet", 1010 "82580 dual-1000BaseT Ethernet",
1011 WM_T_82580ER, WMP_F_1000T }, 1011 WM_T_82580ER, WMP_F_1000T },
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER, 1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1013 "82580 quad-1000BaseX Ethernet", 1013 "82580 quad-1000BaseX Ethernet",
1014 WM_T_82580, WMP_F_1000X }, 1014 WM_T_82580, WMP_F_1000X },
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER, 1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1016 "I350 Gigabit Network Connection", 1016 "I350 Gigabit Network Connection",
1017 WM_T_I350, WMP_F_1000T }, 1017 WM_T_I350, WMP_F_1000T },
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER, 1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1019 "I350 Gigabit Fiber Network Connection", 1019 "I350 Gigabit Fiber Network Connection",
1020 WM_T_I350, WMP_F_1000X }, 1020 WM_T_I350, WMP_F_1000X },
1021#if 0 1021#if 0
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES, 1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1023 "I350 Gigabit Backplane Connection", 1023 "I350 Gigabit Backplane Connection",
1024 WM_T_I350, WMP_F_SERDES }, 1024 WM_T_I350, WMP_F_SERDES },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII, 1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1026 "I350 Gigabit Connection", 1026 "I350 Gigabit Connection",
1027 WM_T_I350, WMP_F_1000T }, 1027 WM_T_I350, WMP_F_1000T },
1028#endif 1028#endif
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII, 1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1030 "I354 Gigabit Connection", 1030 "I354 Gigabit Connection",
1031 WM_T_I354, WMP_F_1000T }, 1031 WM_T_I354, WMP_F_1000T },
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1, 1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1033 "I210-T1 Ethernet Server Adapter", 1033 "I210-T1 Ethernet Server Adapter",
1034 WM_T_I210, WMP_F_1000T }, 1034 WM_T_I210, WMP_F_1000T },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1, 1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1036 "I210 Ethernet (Copper OEM)", 1036 "I210 Ethernet (Copper OEM)",
1037 WM_T_I210, WMP_F_1000T }, 1037 WM_T_I210, WMP_F_1000T },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT, 1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1039 "I210 Ethernet (Copper IT)", 1039 "I210 Ethernet (Copper IT)",
1040 WM_T_I210, WMP_F_1000T }, 1040 WM_T_I210, WMP_F_1000T },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER, 1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1042 "I210 Gigabit Ethernet (Fiber)", 1042 "I210 Gigabit Ethernet (Fiber)",
1043 WM_T_I210, WMP_F_1000X }, 1043 WM_T_I210, WMP_F_1000X },
1044#if 0 1044#if 0
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES, 1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1046 "I210 Gigabit Ethernet (SERDES)", 1046 "I210 Gigabit Ethernet (SERDES)",
1047 WM_T_I210, WMP_F_SERDES }, 1047 WM_T_I210, WMP_F_SERDES },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII, 1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1049 "I210 Gigabit Ethernet (SGMII)", 1049 "I210 Gigabit Ethernet (SGMII)",
1050 WM_T_I210, WMP_F_SERDES }, 1050 WM_T_I210, WMP_F_SERDES },
1051#endif 1051#endif
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER, 1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1053 "I211 Ethernet (COPPER)", 1053 "I211 Ethernet (COPPER)",
1054 WM_T_I211, WMP_F_1000T }, 1054 WM_T_I211, WMP_F_1000T },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V, 1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1056 "I217 V Ethernet Connection", 1056 "I217 V Ethernet Connection",
1057 WM_T_PCH_LPT, WMP_F_1000T }, 1057 WM_T_PCH_LPT, WMP_F_1000T },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM, 1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1059 "I217 LM Ethernet Connection", 1059 "I217 LM Ethernet Connection",
1060 WM_T_PCH_LPT, WMP_F_1000T }, 1060 WM_T_PCH_LPT, WMP_F_1000T },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V, 1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1062 "I218 V Ethernet Connection", 1062 "I218 V Ethernet Connection",
1063 WM_T_PCH_LPT, WMP_F_1000T }, 1063 WM_T_PCH_LPT, WMP_F_1000T },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM, 1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1065 "I218 LM Ethernet Connection", 1065 "I218 LM Ethernet Connection",
1066 WM_T_PCH_LPT, WMP_F_1000T }, 1066 WM_T_PCH_LPT, WMP_F_1000T },
1067 { 0, 0, 1067 { 0, 0,
1068 NULL, 1068 NULL,
1069 0, 0 }, 1069 0, 0 },
1070}; 1070};
1071 1071
1072#ifdef WM_EVENT_COUNTERS 1072#ifdef WM_EVENT_COUNTERS
1073static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 1073static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1074#endif /* WM_EVENT_COUNTERS */ 1074#endif /* WM_EVENT_COUNTERS */
1075 1075
1076#if 0 /* Not currently used */ 1076#if 0 /* Not currently used */
1077static inline uint32_t 1077static inline uint32_t
1078wm_io_read(struct wm_softc *sc, int reg) 1078wm_io_read(struct wm_softc *sc, int reg)
@@ -3130,5697 +3130,5741 @@ wm_nq_start(struct ifnet *ifp) @@ -3130,5697 +3130,5741 @@ wm_nq_start(struct ifnet *ifp)
3130 * the packet. Note, we always reserve one descriptor 3130 * the packet. Note, we always reserve one descriptor
3131 * at the end of the ring due to the semantics of the 3131 * at the end of the ring due to the semantics of the
3132 * TDT register, plus one more in the event we need 3132 * TDT register, plus one more in the event we need
3133 * to load offload context. 3133 * to load offload context.
3134 */ 3134 */
3135 if (segs_needed > sc->sc_txfree - 2) { 3135 if (segs_needed > sc->sc_txfree - 2) {
3136 /* 3136 /*
3137 * Not enough free descriptors to transmit this 3137 * Not enough free descriptors to transmit this
3138 * packet. We haven't committed anything yet, 3138 * packet. We haven't committed anything yet,
3139 * so just unload the DMA map, put the packet 3139 * so just unload the DMA map, put the packet
3140 * pack on the queue, and punt. Notify the upper 3140 * pack on the queue, and punt. Notify the upper
3141 * layer that there are no more slots left. 3141 * layer that there are no more slots left.
3142 */ 3142 */
3143 DPRINTF(WM_DEBUG_TX, 3143 DPRINTF(WM_DEBUG_TX,
3144 ("%s: TX: need %d (%d) descriptors, have %d\n", 3144 ("%s: TX: need %d (%d) descriptors, have %d\n",
3145 device_xname(sc->sc_dev), dmamap->dm_nsegs, 3145 device_xname(sc->sc_dev), dmamap->dm_nsegs,
3146 segs_needed, sc->sc_txfree - 1)); 3146 segs_needed, sc->sc_txfree - 1));
3147 ifp->if_flags |= IFF_OACTIVE; 3147 ifp->if_flags |= IFF_OACTIVE;
3148 bus_dmamap_unload(sc->sc_dmat, dmamap); 3148 bus_dmamap_unload(sc->sc_dmat, dmamap);
3149 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 3149 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3150 break; 3150 break;
3151 } 3151 }
3152 3152
3153 IFQ_DEQUEUE(&ifp->if_snd, m0); 3153 IFQ_DEQUEUE(&ifp->if_snd, m0);
3154 3154
3155 /* 3155 /*
3156 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 3156 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3157 */ 3157 */
3158 3158
3159 DPRINTF(WM_DEBUG_TX, 3159 DPRINTF(WM_DEBUG_TX,
3160 ("%s: TX: packet has %d (%d) DMA segments\n", 3160 ("%s: TX: packet has %d (%d) DMA segments\n",
3161 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 3161 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3162 3162
3163 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 3163 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3164 3164
3165 /* 3165 /*
3166 * Store a pointer to the packet so that we can free it 3166 * Store a pointer to the packet so that we can free it
3167 * later. 3167 * later.
3168 * 3168 *
3169 * Initially, we consider the number of descriptors the 3169 * Initially, we consider the number of descriptors the
3170 * packet uses the number of DMA segments. This may be 3170 * packet uses the number of DMA segments. This may be
3171 * incremented by 1 if we do checksum offload (a descriptor 3171 * incremented by 1 if we do checksum offload (a descriptor
3172 * is used to set the checksum context). 3172 * is used to set the checksum context).
3173 */ 3173 */
3174 txs->txs_mbuf = m0; 3174 txs->txs_mbuf = m0;
3175 txs->txs_firstdesc = sc->sc_txnext; 3175 txs->txs_firstdesc = sc->sc_txnext;
3176 txs->txs_ndesc = segs_needed; 3176 txs->txs_ndesc = segs_needed;
3177 3177
3178 /* Set up offload parameters for this packet. */ 3178 /* Set up offload parameters for this packet. */
3179 uint32_t cmdlen, fields, dcmdlen; 3179 uint32_t cmdlen, fields, dcmdlen;
3180 if (m0->m_pkthdr.csum_flags & 3180 if (m0->m_pkthdr.csum_flags &
3181 (M_CSUM_TSOv4|M_CSUM_TSOv6| 3181 (M_CSUM_TSOv4|M_CSUM_TSOv6|
3182 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 3182 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3183 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 3183 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3184 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields, 3184 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3185 &do_csum) != 0) { 3185 &do_csum) != 0) {
3186 /* Error message already displayed. */ 3186 /* Error message already displayed. */
3187 bus_dmamap_unload(sc->sc_dmat, dmamap); 3187 bus_dmamap_unload(sc->sc_dmat, dmamap);
3188 continue; 3188 continue;
3189 } 3189 }
3190 } else { 3190 } else {
3191 do_csum = false; 3191 do_csum = false;
3192 cmdlen = 0; 3192 cmdlen = 0;
3193 fields = 0; 3193 fields = 0;
3194 } 3194 }
3195 3195
3196 /* Sync the DMA map. */ 3196 /* Sync the DMA map. */
3197 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 3197 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3198 BUS_DMASYNC_PREWRITE); 3198 BUS_DMASYNC_PREWRITE);
3199 3199
3200 /* 3200 /*
3201 * Initialize the first transmit descriptor. 3201 * Initialize the first transmit descriptor.
3202 */ 3202 */
3203 nexttx = sc->sc_txnext; 3203 nexttx = sc->sc_txnext;
3204 if (!do_csum) { 3204 if (!do_csum) {
3205 /* setup a legacy descriptor */ 3205 /* setup a legacy descriptor */
3206 wm_set_dma_addr( 3206 wm_set_dma_addr(
3207 &sc->sc_txdescs[nexttx].wtx_addr, 3207 &sc->sc_txdescs[nexttx].wtx_addr,
3208 dmamap->dm_segs[0].ds_addr); 3208 dmamap->dm_segs[0].ds_addr);
3209 sc->sc_txdescs[nexttx].wtx_cmdlen = 3209 sc->sc_txdescs[nexttx].wtx_cmdlen =
3210 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); 3210 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3211 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0; 3211 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3212 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0; 3212 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3213 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != 3213 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3214 NULL) { 3214 NULL) {
3215 sc->sc_txdescs[nexttx].wtx_cmdlen |= 3215 sc->sc_txdescs[nexttx].wtx_cmdlen |=
3216 htole32(WTX_CMD_VLE); 3216 htole32(WTX_CMD_VLE);
3217 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 3217 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3218 htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 3218 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3219 } else { 3219 } else {
3220 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 3220 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3221 } 3221 }
3222 dcmdlen = 0; 3222 dcmdlen = 0;
3223 } else { 3223 } else {
3224 /* setup an advanced data descriptor */ 3224 /* setup an advanced data descriptor */
3225 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = 3225 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3226 htole64(dmamap->dm_segs[0].ds_addr); 3226 htole64(dmamap->dm_segs[0].ds_addr);
3227 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); 3227 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3228 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = 3228 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3229 htole32(dmamap->dm_segs[0].ds_len | cmdlen ); 3229 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3230 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 3230 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3231 htole32(fields); 3231 htole32(fields);
3232 DPRINTF(WM_DEBUG_TX, 3232 DPRINTF(WM_DEBUG_TX,
3233 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", 3233 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3234 device_xname(sc->sc_dev), nexttx, 3234 device_xname(sc->sc_dev), nexttx,
3235 (uint64_t)dmamap->dm_segs[0].ds_addr)); 3235 (uint64_t)dmamap->dm_segs[0].ds_addr));
3236 DPRINTF(WM_DEBUG_TX, 3236 DPRINTF(WM_DEBUG_TX,
3237 ("\t 0x%08x%08x\n", fields, 3237 ("\t 0x%08x%08x\n", fields,
3238 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); 3238 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3239 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; 3239 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3240 } 3240 }
3241 3241
3242 lasttx = nexttx; 3242 lasttx = nexttx;
3243 nexttx = WM_NEXTTX(sc, nexttx); 3243 nexttx = WM_NEXTTX(sc, nexttx);
3244 /* 3244 /*
3245 * fill in the next descriptors. legacy or adcanced format 3245 * fill in the next descriptors. legacy or adcanced format
3246 * is the same here 3246 * is the same here
3247 */ 3247 */
3248 for (seg = 1; seg < dmamap->dm_nsegs; 3248 for (seg = 1; seg < dmamap->dm_nsegs;
3249 seg++, nexttx = WM_NEXTTX(sc, nexttx)) { 3249 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3250 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = 3250 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3251 htole64(dmamap->dm_segs[seg].ds_addr); 3251 htole64(dmamap->dm_segs[seg].ds_addr);
3252 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = 3252 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3253 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); 3253 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3254 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); 3254 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3255 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0; 3255 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3256 lasttx = nexttx; 3256 lasttx = nexttx;
3257 3257
3258 DPRINTF(WM_DEBUG_TX, 3258 DPRINTF(WM_DEBUG_TX,
3259 ("%s: TX: desc %d: %#" PRIx64 ", " 3259 ("%s: TX: desc %d: %#" PRIx64 ", "
3260 "len %#04zx\n", 3260 "len %#04zx\n",
3261 device_xname(sc->sc_dev), nexttx, 3261 device_xname(sc->sc_dev), nexttx,
3262 (uint64_t)dmamap->dm_segs[seg].ds_addr, 3262 (uint64_t)dmamap->dm_segs[seg].ds_addr,
3263 dmamap->dm_segs[seg].ds_len)); 3263 dmamap->dm_segs[seg].ds_len));
3264 } 3264 }
3265 3265
3266 KASSERT(lasttx != -1); 3266 KASSERT(lasttx != -1);
3267 3267
3268 /* 3268 /*
3269 * Set up the command byte on the last descriptor of 3269 * Set up the command byte on the last descriptor of
3270 * the packet. If we're in the interrupt delay window, 3270 * the packet. If we're in the interrupt delay window,
3271 * delay the interrupt. 3271 * delay the interrupt.
3272 */ 3272 */
3273 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == 3273 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3274 (NQTX_CMD_EOP | NQTX_CMD_RS)); 3274 (NQTX_CMD_EOP | NQTX_CMD_RS));
3275 sc->sc_txdescs[lasttx].wtx_cmdlen |= 3275 sc->sc_txdescs[lasttx].wtx_cmdlen |=
3276 htole32(WTX_CMD_EOP | WTX_CMD_RS); 3276 htole32(WTX_CMD_EOP | WTX_CMD_RS);
3277 3277
3278 txs->txs_lastdesc = lasttx; 3278 txs->txs_lastdesc = lasttx;
3279 3279
3280 DPRINTF(WM_DEBUG_TX, 3280 DPRINTF(WM_DEBUG_TX,
3281 ("%s: TX: desc %d: cmdlen 0x%08x\n", 3281 ("%s: TX: desc %d: cmdlen 0x%08x\n",
3282 device_xname(sc->sc_dev), 3282 device_xname(sc->sc_dev),
3283 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 3283 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3284 3284
3285 /* Sync the descriptors we're using. */ 3285 /* Sync the descriptors we're using. */
3286 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 3286 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3287 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3287 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3288 3288
3289 /* Give the packet to the chip. */ 3289 /* Give the packet to the chip. */
3290 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 3290 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3291 sent = true; 3291 sent = true;
3292 3292
3293 DPRINTF(WM_DEBUG_TX, 3293 DPRINTF(WM_DEBUG_TX,
3294 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 3294 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3295 3295
3296 DPRINTF(WM_DEBUG_TX, 3296 DPRINTF(WM_DEBUG_TX,
3297 ("%s: TX: finished transmitting packet, job %d\n", 3297 ("%s: TX: finished transmitting packet, job %d\n",
3298 device_xname(sc->sc_dev), sc->sc_txsnext)); 3298 device_xname(sc->sc_dev), sc->sc_txsnext));
3299 3299
3300 /* Advance the tx pointer. */ 3300 /* Advance the tx pointer. */
3301 sc->sc_txfree -= txs->txs_ndesc; 3301 sc->sc_txfree -= txs->txs_ndesc;
3302 sc->sc_txnext = nexttx; 3302 sc->sc_txnext = nexttx;
3303 3303
3304 sc->sc_txsfree--; 3304 sc->sc_txsfree--;
3305 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 3305 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3306 3306
3307 /* Pass the packet to any BPF listeners. */ 3307 /* Pass the packet to any BPF listeners. */
3308 bpf_mtap(ifp, m0); 3308 bpf_mtap(ifp, m0);
3309 } 3309 }
3310 3310
3311 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 3311 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3312 /* No more slots; notify upper layer. */ 3312 /* No more slots; notify upper layer. */
3313 ifp->if_flags |= IFF_OACTIVE; 3313 ifp->if_flags |= IFF_OACTIVE;
3314 } 3314 }
3315 3315
3316 if (sent) { 3316 if (sent) {
3317 /* Set a watchdog timer in case the chip flakes out. */ 3317 /* Set a watchdog timer in case the chip flakes out. */
3318 ifp->if_timer = 5; 3318 ifp->if_timer = 5;
3319 } 3319 }
3320} 3320}
3321 3321
3322/* 3322/*
3323 * wm_watchdog: [ifnet interface function] 3323 * wm_watchdog: [ifnet interface function]
3324 * 3324 *
3325 * Watchdog timer handler. 3325 * Watchdog timer handler.
3326 */ 3326 */
3327static void 3327static void
3328wm_watchdog(struct ifnet *ifp) 3328wm_watchdog(struct ifnet *ifp)
3329{ 3329{
3330 struct wm_softc *sc = ifp->if_softc; 3330 struct wm_softc *sc = ifp->if_softc;
3331 3331
3332 /* 3332 /*
3333 * Since we're using delayed interrupts, sweep up 3333 * Since we're using delayed interrupts, sweep up
3334 * before we report an error. 3334 * before we report an error.
3335 */ 3335 */
3336 wm_txintr(sc); 3336 wm_txintr(sc);
3337 3337
3338 if (sc->sc_txfree != WM_NTXDESC(sc)) { 3338 if (sc->sc_txfree != WM_NTXDESC(sc)) {
3339#ifdef WM_DEBUG 3339#ifdef WM_DEBUG
3340 int i, j; 3340 int i, j;
3341 struct wm_txsoft *txs; 3341 struct wm_txsoft *txs;
3342#endif 3342#endif
3343 log(LOG_ERR, 3343 log(LOG_ERR,
3344 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 3344 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3345 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, 3345 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3346 sc->sc_txnext); 3346 sc->sc_txnext);
3347 ifp->if_oerrors++; 3347 ifp->if_oerrors++;
3348#ifdef WM_DEBUG 3348#ifdef WM_DEBUG
3349 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ; 3349 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3350 i = WM_NEXTTXS(sc, i)) { 3350 i = WM_NEXTTXS(sc, i)) {
3351 txs = &sc->sc_txsoft[i]; 3351 txs = &sc->sc_txsoft[i];
3352 printf("txs %d tx %d -> %d\n", 3352 printf("txs %d tx %d -> %d\n",
3353 i, txs->txs_firstdesc, txs->txs_lastdesc); 3353 i, txs->txs_firstdesc, txs->txs_lastdesc);
3354 for (j = txs->txs_firstdesc; ; 3354 for (j = txs->txs_firstdesc; ;
3355 j = WM_NEXTTX(sc, j)) { 3355 j = WM_NEXTTX(sc, j)) {
3356 printf("\tdesc %d: 0x%" PRIx64 "\n", j, 3356 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3357 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr); 3357 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3358 printf("\t %#08x%08x\n", 3358 printf("\t %#08x%08x\n",
3359 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields, 3359 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3360 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen); 3360 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3361 if (j == txs->txs_lastdesc) 3361 if (j == txs->txs_lastdesc)
3362 break; 3362 break;
3363 } 3363 }
3364 } 3364 }
3365#endif 3365#endif
3366 /* Reset the interface. */ 3366 /* Reset the interface. */
3367 (void) wm_init(ifp); 3367 (void) wm_init(ifp);
3368 } 3368 }
3369 3369
3370 /* Try to get more packets going. */ 3370 /* Try to get more packets going. */
3371 ifp->if_start(ifp); 3371 ifp->if_start(ifp);
3372} 3372}
3373 3373
3374static int 3374static int
3375wm_ifflags_cb(struct ethercom *ec) 3375wm_ifflags_cb(struct ethercom *ec)
3376{ 3376{
3377 struct ifnet *ifp = &ec->ec_if; 3377 struct ifnet *ifp = &ec->ec_if;
3378 struct wm_softc *sc = ifp->if_softc; 3378 struct wm_softc *sc = ifp->if_softc;
3379 int change = ifp->if_flags ^ sc->sc_if_flags; 3379 int change = ifp->if_flags ^ sc->sc_if_flags;
3380 3380
3381 if (change != 0) 3381 if (change != 0)
3382 sc->sc_if_flags = ifp->if_flags; 3382 sc->sc_if_flags = ifp->if_flags;
3383 3383
3384 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 3384 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3385 return ENETRESET; 3385 return ENETRESET;
3386 3386
3387 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3387 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3388 wm_set_filter(sc); 3388 wm_set_filter(sc);
3389 3389
3390 wm_set_vlan(sc); 3390 wm_set_vlan(sc);
3391 3391
3392 return 0; 3392 return 0;
3393} 3393}
3394 3394
3395/* 3395/*
3396 * wm_ioctl: [ifnet interface function] 3396 * wm_ioctl: [ifnet interface function]
3397 * 3397 *
3398 * Handle control requests from the operator. 3398 * Handle control requests from the operator.
3399 */ 3399 */
3400static int 3400static int
3401wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3401wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3402{ 3402{
3403 struct wm_softc *sc = ifp->if_softc; 3403 struct wm_softc *sc = ifp->if_softc;
3404 struct ifreq *ifr = (struct ifreq *) data; 3404 struct ifreq *ifr = (struct ifreq *) data;
3405 struct ifaddr *ifa = (struct ifaddr *)data; 3405 struct ifaddr *ifa = (struct ifaddr *)data;
3406 struct sockaddr_dl *sdl; 3406 struct sockaddr_dl *sdl;
3407 int s, error; 3407 int s, error;
3408 3408
3409 s = splnet(); 3409 s = splnet();
3410 3410
3411 switch (cmd) { 3411 switch (cmd) {
3412 case SIOCSIFMEDIA: 3412 case SIOCSIFMEDIA:
3413 case SIOCGIFMEDIA: 3413 case SIOCGIFMEDIA:
3414 /* Flow control requires full-duplex mode. */ 3414 /* Flow control requires full-duplex mode. */
3415 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3415 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3416 (ifr->ifr_media & IFM_FDX) == 0) 3416 (ifr->ifr_media & IFM_FDX) == 0)
3417 ifr->ifr_media &= ~IFM_ETH_FMASK; 3417 ifr->ifr_media &= ~IFM_ETH_FMASK;
3418 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3418 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3419 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3419 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3420 /* We can do both TXPAUSE and RXPAUSE. */ 3420 /* We can do both TXPAUSE and RXPAUSE. */
3421 ifr->ifr_media |= 3421 ifr->ifr_media |=
3422 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3422 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3423 } 3423 }
3424 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3424 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3425 } 3425 }
3426 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 3426 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3427 break; 3427 break;
3428 case SIOCINITIFADDR: 3428 case SIOCINITIFADDR:
3429 if (ifa->ifa_addr->sa_family == AF_LINK) { 3429 if (ifa->ifa_addr->sa_family == AF_LINK) {
3430 sdl = satosdl(ifp->if_dl->ifa_addr); 3430 sdl = satosdl(ifp->if_dl->ifa_addr);
3431 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, 3431 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3432 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); 3432 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3433 /* unicast address is first multicast entry */ 3433 /* unicast address is first multicast entry */
3434 wm_set_filter(sc); 3434 wm_set_filter(sc);
3435 error = 0; 3435 error = 0;
3436 break; 3436 break;
3437 } 3437 }
3438 /*FALLTHROUGH*/ 3438 /*FALLTHROUGH*/
3439 default: 3439 default:
3440 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 3440 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3441 break; 3441 break;
3442 3442
3443 error = 0; 3443 error = 0;
3444 3444
3445 if (cmd == SIOCSIFCAP) 3445 if (cmd == SIOCSIFCAP)
3446 error = (*ifp->if_init)(ifp); 3446 error = (*ifp->if_init)(ifp);
3447 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 3447 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3448 ; 3448 ;
3449 else if (ifp->if_flags & IFF_RUNNING) { 3449 else if (ifp->if_flags & IFF_RUNNING) {
3450 /* 3450 /*
3451 * Multicast list has changed; set the hardware filter 3451 * Multicast list has changed; set the hardware filter
3452 * accordingly. 3452 * accordingly.
3453 */ 3453 */
3454 wm_set_filter(sc); 3454 wm_set_filter(sc);
3455 } 3455 }
3456 break; 3456 break;
3457 } 3457 }
3458 3458
3459 /* Try to get more packets going. */ 3459 /* Try to get more packets going. */
3460 ifp->if_start(ifp); 3460 ifp->if_start(ifp);
3461 3461
3462 splx(s); 3462 splx(s);
3463 return error; 3463 return error;
3464} 3464}
3465 3465
3466/* 3466/*
3467 * wm_intr: 3467 * wm_intr:
3468 * 3468 *
3469 * Interrupt service routine. 3469 * Interrupt service routine.
3470 */ 3470 */
3471static int 3471static int
3472wm_intr(void *arg) 3472wm_intr(void *arg)
3473{ 3473{
3474 struct wm_softc *sc = arg; 3474 struct wm_softc *sc = arg;
3475 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3475 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3476 uint32_t icr; 3476 uint32_t icr;
3477 int handled = 0; 3477 int handled = 0;
3478 3478
3479 while (1 /* CONSTCOND */) { 3479 while (1 /* CONSTCOND */) {
3480 icr = CSR_READ(sc, WMREG_ICR); 3480 icr = CSR_READ(sc, WMREG_ICR);
3481 if ((icr & sc->sc_icr) == 0) 3481 if ((icr & sc->sc_icr) == 0)
3482 break; 3482 break;
3483 rnd_add_uint32(&sc->rnd_source, icr); 3483 rnd_add_uint32(&sc->rnd_source, icr);
3484 3484
3485 handled = 1; 3485 handled = 1;
3486 3486
3487#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 3487#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3488 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 3488 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3489 DPRINTF(WM_DEBUG_RX, 3489 DPRINTF(WM_DEBUG_RX,
3490 ("%s: RX: got Rx intr 0x%08x\n", 3490 ("%s: RX: got Rx intr 0x%08x\n",
3491 device_xname(sc->sc_dev), 3491 device_xname(sc->sc_dev),
3492 icr & (ICR_RXDMT0|ICR_RXT0))); 3492 icr & (ICR_RXDMT0|ICR_RXT0)));
3493 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 3493 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3494 } 3494 }
3495#endif 3495#endif
3496 wm_rxintr(sc); 3496 wm_rxintr(sc);
3497 3497
3498#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 3498#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3499 if (icr & ICR_TXDW) { 3499 if (icr & ICR_TXDW) {
3500 DPRINTF(WM_DEBUG_TX, 3500 DPRINTF(WM_DEBUG_TX,
3501 ("%s: TX: got TXDW interrupt\n", 3501 ("%s: TX: got TXDW interrupt\n",
3502 device_xname(sc->sc_dev))); 3502 device_xname(sc->sc_dev)));
3503 WM_EVCNT_INCR(&sc->sc_ev_txdw); 3503 WM_EVCNT_INCR(&sc->sc_ev_txdw);
3504 } 3504 }
3505#endif 3505#endif
3506 wm_txintr(sc); 3506 wm_txintr(sc);
3507 3507
3508 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 3508 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3509 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 3509 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3510 wm_linkintr(sc, icr); 3510 wm_linkintr(sc, icr);
3511 } 3511 }
3512 3512
3513 if (icr & ICR_RXO) { 3513 if (icr & ICR_RXO) {
3514#if defined(WM_DEBUG) 3514#if defined(WM_DEBUG)
3515 log(LOG_WARNING, "%s: Receive overrun\n", 3515 log(LOG_WARNING, "%s: Receive overrun\n",
3516 device_xname(sc->sc_dev)); 3516 device_xname(sc->sc_dev));
3517#endif /* defined(WM_DEBUG) */ 3517#endif /* defined(WM_DEBUG) */
3518 } 3518 }
3519 } 3519 }
3520 3520
3521 if (handled) { 3521 if (handled) {
3522 /* Try to get more packets going. */ 3522 /* Try to get more packets going. */
3523 ifp->if_start(ifp); 3523 ifp->if_start(ifp);
3524 } 3524 }
3525 3525
3526 return handled; 3526 return handled;
3527} 3527}
3528 3528
3529/* 3529/*
3530 * wm_txintr: 3530 * wm_txintr:
3531 * 3531 *
3532 * Helper; handle transmit interrupts. 3532 * Helper; handle transmit interrupts.
3533 */ 3533 */
3534static void 3534static void
3535wm_txintr(struct wm_softc *sc) 3535wm_txintr(struct wm_softc *sc)
3536{ 3536{
3537 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3537 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3538 struct wm_txsoft *txs; 3538 struct wm_txsoft *txs;
3539 uint8_t status; 3539 uint8_t status;
3540 int i; 3540 int i;
3541 3541
3542 ifp->if_flags &= ~IFF_OACTIVE; 3542 ifp->if_flags &= ~IFF_OACTIVE;
3543 3543
3544 /* 3544 /*
3545 * Go through the Tx list and free mbufs for those 3545 * Go through the Tx list and free mbufs for those
3546 * frames which have been transmitted. 3546 * frames which have been transmitted.
3547 */ 3547 */
3548 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 3548 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3549 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 3549 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3550 txs = &sc->sc_txsoft[i]; 3550 txs = &sc->sc_txsoft[i];
3551 3551
3552 DPRINTF(WM_DEBUG_TX, 3552 DPRINTF(WM_DEBUG_TX,
3553 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); 3553 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3554 3554
3555 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 3555 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3556 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3556 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3557 3557
3558 status = 3558 status =
3559 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 3559 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3560 if ((status & WTX_ST_DD) == 0) { 3560 if ((status & WTX_ST_DD) == 0) {
3561 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 3561 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3562 BUS_DMASYNC_PREREAD); 3562 BUS_DMASYNC_PREREAD);
3563 break; 3563 break;
3564 } 3564 }
3565 3565
3566 DPRINTF(WM_DEBUG_TX, 3566 DPRINTF(WM_DEBUG_TX,
3567 ("%s: TX: job %d done: descs %d..%d\n", 3567 ("%s: TX: job %d done: descs %d..%d\n",
3568 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 3568 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3569 txs->txs_lastdesc)); 3569 txs->txs_lastdesc));
3570 3570
3571 /* 3571 /*
3572 * XXX We should probably be using the statistics 3572 * XXX We should probably be using the statistics
3573 * XXX registers, but I don't know if they exist 3573 * XXX registers, but I don't know if they exist
3574 * XXX on chips before the i82544. 3574 * XXX on chips before the i82544.
3575 */ 3575 */
3576 3576
3577#ifdef WM_EVENT_COUNTERS 3577#ifdef WM_EVENT_COUNTERS
3578 if (status & WTX_ST_TU) 3578 if (status & WTX_ST_TU)
3579 WM_EVCNT_INCR(&sc->sc_ev_tu); 3579 WM_EVCNT_INCR(&sc->sc_ev_tu);
3580#endif /* WM_EVENT_COUNTERS */ 3580#endif /* WM_EVENT_COUNTERS */
3581 3581
3582 if (status & (WTX_ST_EC|WTX_ST_LC)) { 3582 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3583 ifp->if_oerrors++; 3583 ifp->if_oerrors++;
3584 if (status & WTX_ST_LC) 3584 if (status & WTX_ST_LC)
3585 log(LOG_WARNING, "%s: late collision\n", 3585 log(LOG_WARNING, "%s: late collision\n",
3586 device_xname(sc->sc_dev)); 3586 device_xname(sc->sc_dev));
3587 else if (status & WTX_ST_EC) { 3587 else if (status & WTX_ST_EC) {
3588 ifp->if_collisions += 16; 3588 ifp->if_collisions += 16;
3589 log(LOG_WARNING, "%s: excessive collisions\n", 3589 log(LOG_WARNING, "%s: excessive collisions\n",
3590 device_xname(sc->sc_dev)); 3590 device_xname(sc->sc_dev));
3591 } 3591 }
3592 } else 3592 } else
3593 ifp->if_opackets++; 3593 ifp->if_opackets++;
3594 3594
3595 sc->sc_txfree += txs->txs_ndesc; 3595 sc->sc_txfree += txs->txs_ndesc;
3596 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 3596 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3597 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3597 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3598 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 3598 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3599 m_freem(txs->txs_mbuf); 3599 m_freem(txs->txs_mbuf);
3600 txs->txs_mbuf = NULL; 3600 txs->txs_mbuf = NULL;
3601 } 3601 }
3602 3602
3603 /* Update the dirty transmit buffer pointer. */ 3603 /* Update the dirty transmit buffer pointer. */
3604 sc->sc_txsdirty = i; 3604 sc->sc_txsdirty = i;
3605 DPRINTF(WM_DEBUG_TX, 3605 DPRINTF(WM_DEBUG_TX,
3606 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); 3606 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3607 3607
3608 /* 3608 /*
3609 * If there are no more pending transmissions, cancel the watchdog 3609 * If there are no more pending transmissions, cancel the watchdog
3610 * timer. 3610 * timer.
3611 */ 3611 */
3612 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 3612 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3613 ifp->if_timer = 0; 3613 ifp->if_timer = 0;
3614} 3614}
3615 3615
3616/* 3616/*
3617 * wm_rxintr: 3617 * wm_rxintr:
3618 * 3618 *
3619 * Helper; handle receive interrupts. 3619 * Helper; handle receive interrupts.
3620 */ 3620 */
3621static void 3621static void
3622wm_rxintr(struct wm_softc *sc) 3622wm_rxintr(struct wm_softc *sc)
3623{ 3623{
3624 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3624 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3625 struct wm_rxsoft *rxs; 3625 struct wm_rxsoft *rxs;
3626 struct mbuf *m; 3626 struct mbuf *m;
3627 int i, len; 3627 int i, len;
3628 uint8_t status, errors; 3628 uint8_t status, errors;
3629 uint16_t vlantag; 3629 uint16_t vlantag;
3630 3630
3631 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 3631 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3632 rxs = &sc->sc_rxsoft[i]; 3632 rxs = &sc->sc_rxsoft[i];
3633 3633
3634 DPRINTF(WM_DEBUG_RX, 3634 DPRINTF(WM_DEBUG_RX,
3635 ("%s: RX: checking descriptor %d\n", 3635 ("%s: RX: checking descriptor %d\n",
3636 device_xname(sc->sc_dev), i)); 3636 device_xname(sc->sc_dev), i));
3637 3637
3638 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3638 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3639 3639
3640 status = sc->sc_rxdescs[i].wrx_status; 3640 status = sc->sc_rxdescs[i].wrx_status;
3641 errors = sc->sc_rxdescs[i].wrx_errors; 3641 errors = sc->sc_rxdescs[i].wrx_errors;
3642 len = le16toh(sc->sc_rxdescs[i].wrx_len); 3642 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3643 vlantag = sc->sc_rxdescs[i].wrx_special; 3643 vlantag = sc->sc_rxdescs[i].wrx_special;
3644 3644
3645 if ((status & WRX_ST_DD) == 0) { 3645 if ((status & WRX_ST_DD) == 0) {
3646 /* 3646 /*
3647 * We have processed all of the receive descriptors. 3647 * We have processed all of the receive descriptors.
3648 */ 3648 */
3649 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 3649 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3650 break; 3650 break;
3651 } 3651 }
3652 3652
3653 if (__predict_false(sc->sc_rxdiscard)) { 3653 if (__predict_false(sc->sc_rxdiscard)) {
3654 DPRINTF(WM_DEBUG_RX, 3654 DPRINTF(WM_DEBUG_RX,
3655 ("%s: RX: discarding contents of descriptor %d\n", 3655 ("%s: RX: discarding contents of descriptor %d\n",
3656 device_xname(sc->sc_dev), i)); 3656 device_xname(sc->sc_dev), i));
3657 WM_INIT_RXDESC(sc, i); 3657 WM_INIT_RXDESC(sc, i);
3658 if (status & WRX_ST_EOP) { 3658 if (status & WRX_ST_EOP) {
3659 /* Reset our state. */ 3659 /* Reset our state. */
3660 DPRINTF(WM_DEBUG_RX, 3660 DPRINTF(WM_DEBUG_RX,
3661 ("%s: RX: resetting rxdiscard -> 0\n", 3661 ("%s: RX: resetting rxdiscard -> 0\n",
3662 device_xname(sc->sc_dev))); 3662 device_xname(sc->sc_dev)));
3663 sc->sc_rxdiscard = 0; 3663 sc->sc_rxdiscard = 0;
3664 } 3664 }
3665 continue; 3665 continue;
3666 } 3666 }
3667 3667
3668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3670 3670
3671 m = rxs->rxs_mbuf; 3671 m = rxs->rxs_mbuf;
3672 3672
3673 /* 3673 /*
3674 * Add a new receive buffer to the ring, unless of 3674 * Add a new receive buffer to the ring, unless of
3675 * course the length is zero. Treat the latter as a 3675 * course the length is zero. Treat the latter as a
3676 * failed mapping. 3676 * failed mapping.
3677 */ 3677 */
3678 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 3678 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3679 /* 3679 /*
3680 * Failed, throw away what we've done so 3680 * Failed, throw away what we've done so
3681 * far, and discard the rest of the packet. 3681 * far, and discard the rest of the packet.
3682 */ 3682 */
3683 ifp->if_ierrors++; 3683 ifp->if_ierrors++;
3684 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3684 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3685 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3685 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3686 WM_INIT_RXDESC(sc, i); 3686 WM_INIT_RXDESC(sc, i);
3687 if ((status & WRX_ST_EOP) == 0) 3687 if ((status & WRX_ST_EOP) == 0)
3688 sc->sc_rxdiscard = 1; 3688 sc->sc_rxdiscard = 1;
3689 if (sc->sc_rxhead != NULL) 3689 if (sc->sc_rxhead != NULL)
3690 m_freem(sc->sc_rxhead); 3690 m_freem(sc->sc_rxhead);
3691 WM_RXCHAIN_RESET(sc); 3691 WM_RXCHAIN_RESET(sc);
3692 DPRINTF(WM_DEBUG_RX, 3692 DPRINTF(WM_DEBUG_RX,
3693 ("%s: RX: Rx buffer allocation failed, " 3693 ("%s: RX: Rx buffer allocation failed, "
3694 "dropping packet%s\n", device_xname(sc->sc_dev), 3694 "dropping packet%s\n", device_xname(sc->sc_dev),
3695 sc->sc_rxdiscard ? " (discard)" : "")); 3695 sc->sc_rxdiscard ? " (discard)" : ""));
3696 continue; 3696 continue;
3697 } 3697 }
3698 3698
3699 m->m_len = len; 3699 m->m_len = len;
3700 sc->sc_rxlen += len; 3700 sc->sc_rxlen += len;
3701 DPRINTF(WM_DEBUG_RX, 3701 DPRINTF(WM_DEBUG_RX,
3702 ("%s: RX: buffer at %p len %d\n", 3702 ("%s: RX: buffer at %p len %d\n",
3703 device_xname(sc->sc_dev), m->m_data, len)); 3703 device_xname(sc->sc_dev), m->m_data, len));
3704 3704
3705 /* 3705 /*
3706 * If this is not the end of the packet, keep 3706 * If this is not the end of the packet, keep
3707 * looking. 3707 * looking.
3708 */ 3708 */
3709 if ((status & WRX_ST_EOP) == 0) { 3709 if ((status & WRX_ST_EOP) == 0) {
3710 WM_RXCHAIN_LINK(sc, m); 3710 WM_RXCHAIN_LINK(sc, m);
3711 DPRINTF(WM_DEBUG_RX, 3711 DPRINTF(WM_DEBUG_RX,
3712 ("%s: RX: not yet EOP, rxlen -> %d\n", 3712 ("%s: RX: not yet EOP, rxlen -> %d\n",
3713 device_xname(sc->sc_dev), sc->sc_rxlen)); 3713 device_xname(sc->sc_dev), sc->sc_rxlen));
3714 continue; 3714 continue;
3715 } 3715 }
3716 3716
3717 /* 3717 /*
3718 * Okay, we have the entire packet now. The chip is 3718 * Okay, we have the entire packet now. The chip is
3719 * configured to include the FCS except I350 and I21[01] 3719 * configured to include the FCS except I350 and I21[01]
3720 * (not all chips can be configured to strip it), 3720 * (not all chips can be configured to strip it),
3721 * so we need to trim it. 3721 * so we need to trim it.
3722 * May need to adjust length of previous mbuf in the 3722 * May need to adjust length of previous mbuf in the
3723 * chain if the current mbuf is too short. 3723 * chain if the current mbuf is too short.
3724 * For an eratta, the RCTL_SECRC bit in RCTL register 3724 * For an eratta, the RCTL_SECRC bit in RCTL register
3725 * is always set in I350, so we don't trim it. 3725 * is always set in I350, so we don't trim it.
3726 */ 3726 */
3727 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354) 3727 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
3728 && (sc->sc_type != WM_T_I210) 3728 && (sc->sc_type != WM_T_I210)
3729 && (sc->sc_type != WM_T_I211)) { 3729 && (sc->sc_type != WM_T_I211)) {
3730 if (m->m_len < ETHER_CRC_LEN) { 3730 if (m->m_len < ETHER_CRC_LEN) {
3731 sc->sc_rxtail->m_len 3731 sc->sc_rxtail->m_len
3732 -= (ETHER_CRC_LEN - m->m_len); 3732 -= (ETHER_CRC_LEN - m->m_len);
3733 m->m_len = 0; 3733 m->m_len = 0;
3734 } else 3734 } else
3735 m->m_len -= ETHER_CRC_LEN; 3735 m->m_len -= ETHER_CRC_LEN;
3736 len = sc->sc_rxlen - ETHER_CRC_LEN; 3736 len = sc->sc_rxlen - ETHER_CRC_LEN;
3737 } else 3737 } else
3738 len = sc->sc_rxlen; 3738 len = sc->sc_rxlen;
3739 3739
3740 WM_RXCHAIN_LINK(sc, m); 3740 WM_RXCHAIN_LINK(sc, m);
3741 3741
3742 *sc->sc_rxtailp = NULL; 3742 *sc->sc_rxtailp = NULL;
3743 m = sc->sc_rxhead; 3743 m = sc->sc_rxhead;
3744 3744
3745 WM_RXCHAIN_RESET(sc); 3745 WM_RXCHAIN_RESET(sc);
3746 3746
3747 DPRINTF(WM_DEBUG_RX, 3747 DPRINTF(WM_DEBUG_RX,
3748 ("%s: RX: have entire packet, len -> %d\n", 3748 ("%s: RX: have entire packet, len -> %d\n",
3749 device_xname(sc->sc_dev), len)); 3749 device_xname(sc->sc_dev), len));
3750 3750
3751 /* 3751 /*
3752 * If an error occurred, update stats and drop the packet. 3752 * If an error occurred, update stats and drop the packet.
3753 */ 3753 */
3754 if (errors & 3754 if (errors &
3755 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 3755 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3756 if (errors & WRX_ER_SE) 3756 if (errors & WRX_ER_SE)
3757 log(LOG_WARNING, "%s: symbol error\n", 3757 log(LOG_WARNING, "%s: symbol error\n",
3758 device_xname(sc->sc_dev)); 3758 device_xname(sc->sc_dev));
3759 else if (errors & WRX_ER_SEQ) 3759 else if (errors & WRX_ER_SEQ)
3760 log(LOG_WARNING, "%s: receive sequence error\n", 3760 log(LOG_WARNING, "%s: receive sequence error\n",
3761 device_xname(sc->sc_dev)); 3761 device_xname(sc->sc_dev));
3762 else if (errors & WRX_ER_CE) 3762 else if (errors & WRX_ER_CE)
3763 log(LOG_WARNING, "%s: CRC error\n", 3763 log(LOG_WARNING, "%s: CRC error\n",
3764 device_xname(sc->sc_dev)); 3764 device_xname(sc->sc_dev));
3765 m_freem(m); 3765 m_freem(m);
3766 continue; 3766 continue;
3767 } 3767 }
3768 3768
3769 /* 3769 /*
3770 * No errors. Receive the packet. 3770 * No errors. Receive the packet.
3771 */ 3771 */
3772 m->m_pkthdr.rcvif = ifp; 3772 m->m_pkthdr.rcvif = ifp;
3773 m->m_pkthdr.len = len; 3773 m->m_pkthdr.len = len;
3774 3774
3775 /* 3775 /*
3776 * If VLANs are enabled, VLAN packets have been unwrapped 3776 * If VLANs are enabled, VLAN packets have been unwrapped
3777 * for us. Associate the tag with the packet. 3777 * for us. Associate the tag with the packet.
3778 */ 3778 */
3779 /* XXXX should check for i350 and i354 */ 3779 /* XXXX should check for i350 and i354 */
3780 if ((status & WRX_ST_VP) != 0) { 3780 if ((status & WRX_ST_VP) != 0) {
3781 VLAN_INPUT_TAG(ifp, m, 3781 VLAN_INPUT_TAG(ifp, m,
3782 le16toh(vlantag), 3782 le16toh(vlantag),
3783 continue); 3783 continue);
3784 } 3784 }
3785 3785
3786 /* 3786 /*
3787 * Set up checksum info for this packet. 3787 * Set up checksum info for this packet.
3788 */ 3788 */
3789 if ((status & WRX_ST_IXSM) == 0) { 3789 if ((status & WRX_ST_IXSM) == 0) {
3790 if (status & WRX_ST_IPCS) { 3790 if (status & WRX_ST_IPCS) {
3791 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 3791 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3792 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 3792 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3793 if (errors & WRX_ER_IPE) 3793 if (errors & WRX_ER_IPE)
3794 m->m_pkthdr.csum_flags |= 3794 m->m_pkthdr.csum_flags |=
3795 M_CSUM_IPv4_BAD; 3795 M_CSUM_IPv4_BAD;
3796 } 3796 }
3797 if (status & WRX_ST_TCPCS) { 3797 if (status & WRX_ST_TCPCS) {
3798 /* 3798 /*
3799 * Note: we don't know if this was TCP or UDP, 3799 * Note: we don't know if this was TCP or UDP,
3800 * so we just set both bits, and expect the 3800 * so we just set both bits, and expect the
3801 * upper layers to deal. 3801 * upper layers to deal.
3802 */ 3802 */
3803 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 3803 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3804 m->m_pkthdr.csum_flags |= 3804 m->m_pkthdr.csum_flags |=
3805 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 3805 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3806 M_CSUM_TCPv6 | M_CSUM_UDPv6; 3806 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3807 if (errors & WRX_ER_TCPE) 3807 if (errors & WRX_ER_TCPE)
3808 m->m_pkthdr.csum_flags |= 3808 m->m_pkthdr.csum_flags |=
3809 M_CSUM_TCP_UDP_BAD; 3809 M_CSUM_TCP_UDP_BAD;
3810 } 3810 }
3811 } 3811 }
3812 3812
3813 ifp->if_ipackets++; 3813 ifp->if_ipackets++;
3814 3814
3815 /* Pass this up to any BPF listeners. */ 3815 /* Pass this up to any BPF listeners. */
3816 bpf_mtap(ifp, m); 3816 bpf_mtap(ifp, m);
3817 3817
3818 /* Pass it on. */ 3818 /* Pass it on. */
3819 (*ifp->if_input)(ifp, m); 3819 (*ifp->if_input)(ifp, m);
3820 } 3820 }
3821 3821
3822 /* Update the receive pointer. */ 3822 /* Update the receive pointer. */
3823 sc->sc_rxptr = i; 3823 sc->sc_rxptr = i;
3824 3824
3825 DPRINTF(WM_DEBUG_RX, 3825 DPRINTF(WM_DEBUG_RX,
3826 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); 3826 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3827} 3827}
3828 3828
3829/* 3829/*
3830 * wm_linkintr_gmii: 3830 * wm_linkintr_gmii:
3831 * 3831 *
3832 * Helper; handle link interrupts for GMII. 3832 * Helper; handle link interrupts for GMII.
3833 */ 3833 */
3834static void 3834static void
3835wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) 3835wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3836{ 3836{
3837 3837
3838 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 3838 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3839 __func__)); 3839 __func__));
3840 3840
3841 if (icr & ICR_LSC) { 3841 if (icr & ICR_LSC) {
3842 DPRINTF(WM_DEBUG_LINK, 3842 DPRINTF(WM_DEBUG_LINK,
3843 ("%s: LINK: LSC -> mii_pollstat\n", 3843 ("%s: LINK: LSC -> mii_pollstat\n",
3844 device_xname(sc->sc_dev))); 3844 device_xname(sc->sc_dev)));
3845 mii_pollstat(&sc->sc_mii); 3845 mii_pollstat(&sc->sc_mii);
3846 if (sc->sc_type == WM_T_82543) { 3846 if (sc->sc_type == WM_T_82543) {
3847 int miistatus, active; 3847 int miistatus, active;
3848 3848
3849 /* 3849 /*
3850 * With 82543, we need to force speed and 3850 * With 82543, we need to force speed and
3851 * duplex on the MAC equal to what the PHY 3851 * duplex on the MAC equal to what the PHY
3852 * speed and duplex configuration is. 3852 * speed and duplex configuration is.
3853 */ 3853 */
3854 miistatus = sc->sc_mii.mii_media_status; 3854 miistatus = sc->sc_mii.mii_media_status;
3855 3855
3856 if (miistatus & IFM_ACTIVE) { 3856 if (miistatus & IFM_ACTIVE) {
3857 active = sc->sc_mii.mii_media_active; 3857 active = sc->sc_mii.mii_media_active;
3858 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 3858 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3859 switch (IFM_SUBTYPE(active)) { 3859 switch (IFM_SUBTYPE(active)) {
3860 case IFM_10_T: 3860 case IFM_10_T:
3861 sc->sc_ctrl |= CTRL_SPEED_10; 3861 sc->sc_ctrl |= CTRL_SPEED_10;
3862 break; 3862 break;
3863 case IFM_100_TX: 3863 case IFM_100_TX:
3864 sc->sc_ctrl |= CTRL_SPEED_100; 3864 sc->sc_ctrl |= CTRL_SPEED_100;
3865 break; 3865 break;
3866 case IFM_1000_T: 3866 case IFM_1000_T:
3867 sc->sc_ctrl |= CTRL_SPEED_1000; 3867 sc->sc_ctrl |= CTRL_SPEED_1000;
3868 break; 3868 break;
3869 default: 3869 default:
3870 /* 3870 /*
3871 * fiber? 3871 * fiber?
3872 * Shoud not enter here. 3872 * Shoud not enter here.
3873 */ 3873 */
3874 printf("unknown media (%x)\n", 3874 printf("unknown media (%x)\n",
3875 active); 3875 active);
3876 break; 3876 break;
3877 } 3877 }
3878 if (active & IFM_FDX) 3878 if (active & IFM_FDX)
3879 sc->sc_ctrl |= CTRL_FD; 3879 sc->sc_ctrl |= CTRL_FD;
3880 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3880 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3881 } 3881 }
3882 } else if ((sc->sc_type == WM_T_ICH8) 3882 } else if ((sc->sc_type == WM_T_ICH8)
3883 && (sc->sc_phytype == WMPHY_IGP_3)) { 3883 && (sc->sc_phytype == WMPHY_IGP_3)) {
3884 wm_kmrn_lock_loss_workaround_ich8lan(sc); 3884 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3885 } else if (sc->sc_type == WM_T_PCH) { 3885 } else if (sc->sc_type == WM_T_PCH) {
3886 wm_k1_gig_workaround_hv(sc, 3886 wm_k1_gig_workaround_hv(sc,
3887 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); 3887 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3888 } 3888 }
3889 3889
3890 if ((sc->sc_phytype == WMPHY_82578) 3890 if ((sc->sc_phytype == WMPHY_82578)
3891 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) 3891 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3892 == IFM_1000_T)) { 3892 == IFM_1000_T)) {
3893 3893
3894 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { 3894 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3895 delay(200*1000); /* XXX too big */ 3895 delay(200*1000); /* XXX too big */
3896 3896
3897 /* Link stall fix for link up */ 3897 /* Link stall fix for link up */
3898 wm_gmii_hv_writereg(sc->sc_dev, 1, 3898 wm_gmii_hv_writereg(sc->sc_dev, 1,
3899 HV_MUX_DATA_CTRL, 3899 HV_MUX_DATA_CTRL,
3900 HV_MUX_DATA_CTRL_GEN_TO_MAC 3900 HV_MUX_DATA_CTRL_GEN_TO_MAC
3901 | HV_MUX_DATA_CTRL_FORCE_SPEED); 3901 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3902 wm_gmii_hv_writereg(sc->sc_dev, 1, 3902 wm_gmii_hv_writereg(sc->sc_dev, 1,
3903 HV_MUX_DATA_CTRL, 3903 HV_MUX_DATA_CTRL,
3904 HV_MUX_DATA_CTRL_GEN_TO_MAC); 3904 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3905 } 3905 }
3906 } 3906 }
3907 } else if (icr & ICR_RXSEQ) { 3907 } else if (icr & ICR_RXSEQ) {
3908 DPRINTF(WM_DEBUG_LINK, 3908 DPRINTF(WM_DEBUG_LINK,
3909 ("%s: LINK Receive sequence error\n", 3909 ("%s: LINK Receive sequence error\n",
3910 device_xname(sc->sc_dev))); 3910 device_xname(sc->sc_dev)));
3911 } 3911 }
3912} 3912}
3913 3913
3914/* 3914/*
3915 * wm_linkintr_tbi: 3915 * wm_linkintr_tbi:
3916 * 3916 *
3917 * Helper; handle link interrupts for TBI mode. 3917 * Helper; handle link interrupts for TBI mode.
3918 */ 3918 */
3919static void 3919static void
3920wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) 3920wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3921{ 3921{
3922 uint32_t status; 3922 uint32_t status;
3923 3923
3924 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 3924 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3925 __func__)); 3925 __func__));
3926 3926
3927 status = CSR_READ(sc, WMREG_STATUS); 3927 status = CSR_READ(sc, WMREG_STATUS);
3928 if (icr & ICR_LSC) { 3928 if (icr & ICR_LSC) {
3929 if (status & STATUS_LU) { 3929 if (status & STATUS_LU) {
3930 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 3930 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3931 device_xname(sc->sc_dev), 3931 device_xname(sc->sc_dev),
3932 (status & STATUS_FD) ? "FDX" : "HDX")); 3932 (status & STATUS_FD) ? "FDX" : "HDX"));
3933 /* 3933 /*
3934 * NOTE: CTRL will update TFCE and RFCE automatically, 3934 * NOTE: CTRL will update TFCE and RFCE automatically,
3935 * so we should update sc->sc_ctrl 3935 * so we should update sc->sc_ctrl
3936 */ 3936 */
3937 3937
3938 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 3938 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3939 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3939 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3940 sc->sc_fcrtl &= ~FCRTL_XONE; 3940 sc->sc_fcrtl &= ~FCRTL_XONE;
3941 if (status & STATUS_FD) 3941 if (status & STATUS_FD)
3942 sc->sc_tctl |= 3942 sc->sc_tctl |=
3943 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3943 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3944 else 3944 else
3945 sc->sc_tctl |= 3945 sc->sc_tctl |=
3946 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3946 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3947 if (sc->sc_ctrl & CTRL_TFCE) 3947 if (sc->sc_ctrl & CTRL_TFCE)
3948 sc->sc_fcrtl |= FCRTL_XONE; 3948 sc->sc_fcrtl |= FCRTL_XONE;
3949 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3949 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3950 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3950 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3951 WMREG_OLD_FCRTL : WMREG_FCRTL, 3951 WMREG_OLD_FCRTL : WMREG_FCRTL,
3952 sc->sc_fcrtl); 3952 sc->sc_fcrtl);
3953 sc->sc_tbi_linkup = 1; 3953 sc->sc_tbi_linkup = 1;
3954 } else { 3954 } else {
3955 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 3955 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3956 device_xname(sc->sc_dev))); 3956 device_xname(sc->sc_dev)));
3957 sc->sc_tbi_linkup = 0; 3957 sc->sc_tbi_linkup = 0;
3958 } 3958 }
3959 wm_tbi_set_linkled(sc); 3959 wm_tbi_set_linkled(sc);
3960 } else if (icr & ICR_RXCFG) { 3960 } else if (icr & ICR_RXCFG) {
3961 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 3961 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3962 device_xname(sc->sc_dev))); 3962 device_xname(sc->sc_dev)));
3963 sc->sc_tbi_nrxcfg++; 3963 sc->sc_tbi_nrxcfg++;
3964 wm_check_for_link(sc); 3964 wm_check_for_link(sc);
3965 } else if (icr & ICR_RXSEQ) { 3965 } else if (icr & ICR_RXSEQ) {
3966 DPRINTF(WM_DEBUG_LINK, 3966 DPRINTF(WM_DEBUG_LINK,
3967 ("%s: LINK: Receive sequence error\n", 3967 ("%s: LINK: Receive sequence error\n",
3968 device_xname(sc->sc_dev))); 3968 device_xname(sc->sc_dev)));
3969 } 3969 }
3970} 3970}
3971 3971
3972/* 3972/*
3973 * wm_linkintr: 3973 * wm_linkintr:
3974 * 3974 *
3975 * Helper; handle link interrupts. 3975 * Helper; handle link interrupts.
3976 */ 3976 */
3977static void 3977static void
3978wm_linkintr(struct wm_softc *sc, uint32_t icr) 3978wm_linkintr(struct wm_softc *sc, uint32_t icr)
3979{ 3979{
3980 3980
3981 if (sc->sc_flags & WM_F_HAS_MII) 3981 if (sc->sc_flags & WM_F_HAS_MII)
3982 wm_linkintr_gmii(sc, icr); 3982 wm_linkintr_gmii(sc, icr);
3983 else 3983 else
3984 wm_linkintr_tbi(sc, icr); 3984 wm_linkintr_tbi(sc, icr);
3985} 3985}
3986 3986
3987/* 3987/*
3988 * wm_tick: 3988 * wm_tick:
3989 * 3989 *
3990 * One second timer, used to check link status, sweep up 3990 * One second timer, used to check link status, sweep up
3991 * completed transmit jobs, etc. 3991 * completed transmit jobs, etc.
3992 */ 3992 */
3993static void 3993static void
3994wm_tick(void *arg) 3994wm_tick(void *arg)
3995{ 3995{
3996 struct wm_softc *sc = arg; 3996 struct wm_softc *sc = arg;
3997 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3997 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3998 int s; 3998 int s;
3999 3999
4000 s = splnet(); 4000 s = splnet();
4001 4001
4002 if (sc->sc_type >= WM_T_82542_2_1) { 4002 if (sc->sc_type >= WM_T_82542_2_1) {
4003 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 4003 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
4004 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 4004 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
4005 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 4005 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
4006 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 4006 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
4007 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 4007 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
4008 } 4008 }
4009 4009
4010 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 4010 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4011 ifp->if_ierrors += 0ULL + /* ensure quad_t */ 4011 ifp->if_ierrors += 0ULL + /* ensure quad_t */
4012 + CSR_READ(sc, WMREG_CRCERRS) 4012 + CSR_READ(sc, WMREG_CRCERRS)
4013 + CSR_READ(sc, WMREG_ALGNERRC) 4013 + CSR_READ(sc, WMREG_ALGNERRC)
4014 + CSR_READ(sc, WMREG_SYMERRC) 4014 + CSR_READ(sc, WMREG_SYMERRC)
4015 + CSR_READ(sc, WMREG_RXERRC) 4015 + CSR_READ(sc, WMREG_RXERRC)
4016 + CSR_READ(sc, WMREG_SEC) 4016 + CSR_READ(sc, WMREG_SEC)
4017 + CSR_READ(sc, WMREG_CEXTERR) 4017 + CSR_READ(sc, WMREG_CEXTERR)
4018 + CSR_READ(sc, WMREG_RLEC); 4018 + CSR_READ(sc, WMREG_RLEC);
4019 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC); 4019 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4020 4020
4021 if (sc->sc_flags & WM_F_HAS_MII) 4021 if (sc->sc_flags & WM_F_HAS_MII)
4022 mii_tick(&sc->sc_mii); 4022 mii_tick(&sc->sc_mii);
4023 else 4023 else
4024 wm_tbi_check_link(sc); 4024 wm_tbi_check_link(sc);
4025 4025
4026 splx(s); 4026 splx(s);
4027 4027
4028 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 4028 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4029} 4029}
4030 4030
4031/* 4031/*
4032 * wm_reset: 4032 * wm_reset:
4033 * 4033 *
4034 * Reset the i82542 chip. 4034 * Reset the i82542 chip.
4035 */ 4035 */
4036static void 4036static void
4037wm_reset(struct wm_softc *sc) 4037wm_reset(struct wm_softc *sc)
4038{ 4038{
4039 int phy_reset = 0; 4039 int phy_reset = 0;
4040 uint32_t reg, mask; 4040 uint32_t reg, mask;
4041 4041
4042 /* 4042 /*
4043 * Allocate on-chip memory according to the MTU size. 4043 * Allocate on-chip memory according to the MTU size.
4044 * The Packet Buffer Allocation register must be written 4044 * The Packet Buffer Allocation register must be written
4045 * before the chip is reset. 4045 * before the chip is reset.
4046 */ 4046 */
4047 switch (sc->sc_type) { 4047 switch (sc->sc_type) {
4048 case WM_T_82547: 4048 case WM_T_82547:
4049 case WM_T_82547_2: 4049 case WM_T_82547_2:
4050 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 4050 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4051 PBA_22K : PBA_30K; 4051 PBA_22K : PBA_30K;
4052 sc->sc_txfifo_head = 0; 4052 sc->sc_txfifo_head = 0;
4053 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 4053 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4054 sc->sc_txfifo_size = 4054 sc->sc_txfifo_size =
4055 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 4055 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4056 sc->sc_txfifo_stall = 0; 4056 sc->sc_txfifo_stall = 0;
4057 break; 4057 break;
4058 case WM_T_82571: 4058 case WM_T_82571:
4059 case WM_T_82572: 4059 case WM_T_82572:
4060 case WM_T_82575: /* XXX need special handing for jumbo frames */ 4060 case WM_T_82575: /* XXX need special handing for jumbo frames */
4061 case WM_T_I350: 4061 case WM_T_I350:
4062 case WM_T_I354: 4062 case WM_T_I354:
4063 case WM_T_80003: 4063 case WM_T_80003:
4064 sc->sc_pba = PBA_32K; 4064 sc->sc_pba = PBA_32K;
4065 break; 4065 break;
4066 case WM_T_82580: 4066 case WM_T_82580:
4067 case WM_T_82580ER: 4067 case WM_T_82580ER:
4068 sc->sc_pba = PBA_35K; 4068 sc->sc_pba = PBA_35K;
4069 break; 4069 break;
4070 case WM_T_I210: 4070 case WM_T_I210:
4071 case WM_T_I211: 4071 case WM_T_I211:
4072 sc->sc_pba = PBA_34K; 4072 sc->sc_pba = PBA_34K;
4073 break; 4073 break;
4074 case WM_T_82576: 4074 case WM_T_82576:
4075 sc->sc_pba = PBA_64K; 4075 sc->sc_pba = PBA_64K;
4076 break; 4076 break;
4077 case WM_T_82573: 4077 case WM_T_82573:
4078 sc->sc_pba = PBA_12K; 4078 sc->sc_pba = PBA_12K;
4079 break; 4079 break;
4080 case WM_T_82574: 4080 case WM_T_82574:
4081 case WM_T_82583: 4081 case WM_T_82583:
4082 sc->sc_pba = PBA_20K; 4082 sc->sc_pba = PBA_20K;
4083 break; 4083 break;
4084 case WM_T_ICH8: 4084 case WM_T_ICH8:
4085 sc->sc_pba = PBA_8K; 4085 sc->sc_pba = PBA_8K;
4086 CSR_WRITE(sc, WMREG_PBS, PBA_16K); 4086 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4087 break; 4087 break;
4088 case WM_T_ICH9: 4088 case WM_T_ICH9:
4089 case WM_T_ICH10: 4089 case WM_T_ICH10:
4090 sc->sc_pba = PBA_10K; 4090 sc->sc_pba = PBA_10K;
4091 break; 4091 break;
4092 case WM_T_PCH: 4092 case WM_T_PCH:
4093 case WM_T_PCH2: 4093 case WM_T_PCH2:
4094 case WM_T_PCH_LPT: 4094 case WM_T_PCH_LPT:
4095 sc->sc_pba = PBA_26K; 4095 sc->sc_pba = PBA_26K;
4096 break; 4096 break;
4097 default: 4097 default:
4098 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 4098 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4099 PBA_40K : PBA_48K; 4099 PBA_40K : PBA_48K;
4100 break; 4100 break;
4101 } 4101 }
4102 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 4102 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4103 4103
4104 /* Prevent the PCI-E bus from sticking */ 4104 /* Prevent the PCI-E bus from sticking */
4105 if (sc->sc_flags & WM_F_PCIE) { 4105 if (sc->sc_flags & WM_F_PCIE) {
4106 int timeout = 800; 4106 int timeout = 800;
4107 4107
4108 sc->sc_ctrl |= CTRL_GIO_M_DIS; 4108 sc->sc_ctrl |= CTRL_GIO_M_DIS;
4109 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4109 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4110 4110
4111 while (timeout--) { 4111 while (timeout--) {
4112 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) 4112 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4113 == 0) 4113 == 0)
4114 break; 4114 break;
4115 delay(100); 4115 delay(100);
4116 } 4116 }
4117 } 4117 }
4118 4118
4119 /* Set the completion timeout for interface */ 4119 /* Set the completion timeout for interface */
4120 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 4120 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4121 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 4121 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4122 wm_set_pcie_completion_timeout(sc); 4122 wm_set_pcie_completion_timeout(sc);
4123 4123
4124 /* Clear interrupt */ 4124 /* Clear interrupt */
4125 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4125 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4126 4126
4127 /* Stop the transmit and receive processes. */ 4127 /* Stop the transmit and receive processes. */
4128 CSR_WRITE(sc, WMREG_RCTL, 0); 4128 CSR_WRITE(sc, WMREG_RCTL, 0);
4129 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP); 
4130 sc->sc_rctl &= ~RCTL_EN; 4129 sc->sc_rctl &= ~RCTL_EN;
 4130 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
 4131 CSR_WRITE_FLUSH(sc);
4131 4132
4132 /* XXX set_tbi_sbp_82543() */ 4133 /* XXX set_tbi_sbp_82543() */
4133 4134
4134 delay(10*1000); 4135 delay(10*1000);
4135 4136
4136 /* Must acquire the MDIO ownership before MAC reset */ 4137 /* Must acquire the MDIO ownership before MAC reset */
4137 switch (sc->sc_type) { 4138 switch (sc->sc_type) {
4138 case WM_T_82573: 4139 case WM_T_82573:
4139 case WM_T_82574: 4140 case WM_T_82574:
4140 case WM_T_82583: 4141 case WM_T_82583:
4141 wm_get_hw_semaphore_82573(sc); 4142 wm_get_hw_semaphore_82573(sc);
4142 break; 4143 break;
4143 default: 4144 default:
4144 break; 4145 break;
4145 } 4146 }
4146 4147
4147 /* 4148 /*
4148 * 82541 Errata 29? & 82547 Errata 28? 4149 * 82541 Errata 29? & 82547 Errata 28?
4149 * See also the description about PHY_RST bit in CTRL register 4150 * See also the description about PHY_RST bit in CTRL register
4150 * in 8254x_GBe_SDM.pdf. 4151 * in 8254x_GBe_SDM.pdf.
4151 */ 4152 */
4152 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { 4153 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4153 CSR_WRITE(sc, WMREG_CTRL, 4154 CSR_WRITE(sc, WMREG_CTRL,
4154 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); 4155 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
 4156 CSR_WRITE_FLUSH(sc);
4155 delay(5000); 4157 delay(5000);
4156 } 4158 }
4157 4159
4158 switch (sc->sc_type) { 4160 switch (sc->sc_type) {
4159 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ 4161 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4160 case WM_T_82541: 4162 case WM_T_82541:
4161 case WM_T_82541_2: 4163 case WM_T_82541_2:
4162 case WM_T_82547: 4164 case WM_T_82547:
4163 case WM_T_82547_2: 4165 case WM_T_82547_2:
4164 /* 4166 /*
4165 * On some chipsets, a reset through a memory-mapped write 4167 * On some chipsets, a reset through a memory-mapped write
4166 * cycle can cause the chip to reset before completing the 4168 * cycle can cause the chip to reset before completing the
4167 * write cycle. This causes major headache that can be 4169 * write cycle. This causes major headache that can be
4168 * avoided by issuing the reset via indirect register writes 4170 * avoided by issuing the reset via indirect register writes
4169 * through I/O space. 4171 * through I/O space.
4170 * 4172 *
4171 * So, if we successfully mapped the I/O BAR at attach time, 4173 * So, if we successfully mapped the I/O BAR at attach time,
4172 * use that. Otherwise, try our luck with a memory-mapped 4174 * use that. Otherwise, try our luck with a memory-mapped
4173 * reset. 4175 * reset.
4174 */ 4176 */
4175 if (sc->sc_flags & WM_F_IOH_VALID) 4177 if (sc->sc_flags & WM_F_IOH_VALID)
4176 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 4178 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4177 else 4179 else
4178 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 4180 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4179 break; 4181 break;
4180 case WM_T_82545_3: 4182 case WM_T_82545_3:
4181 case WM_T_82546_3: 4183 case WM_T_82546_3:
4182 /* Use the shadow control register on these chips. */ 4184 /* Use the shadow control register on these chips. */
4183 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 4185 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4184 break; 4186 break;
4185 case WM_T_80003: 4187 case WM_T_80003:
4186 mask = swfwphysem[sc->sc_funcid]; 4188 mask = swfwphysem[sc->sc_funcid];
4187 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 4189 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4188 wm_get_swfw_semaphore(sc, mask); 4190 wm_get_swfw_semaphore(sc, mask);
4189 CSR_WRITE(sc, WMREG_CTRL, reg); 4191 CSR_WRITE(sc, WMREG_CTRL, reg);
4190 wm_put_swfw_semaphore(sc, mask); 4192 wm_put_swfw_semaphore(sc, mask);
4191 break; 4193 break;
4192 case WM_T_ICH8: 4194 case WM_T_ICH8:
4193 case WM_T_ICH9: 4195 case WM_T_ICH9:
4194 case WM_T_ICH10: 4196 case WM_T_ICH10:
4195 case WM_T_PCH: 4197 case WM_T_PCH:
4196 case WM_T_PCH2: 4198 case WM_T_PCH2:
4197 case WM_T_PCH_LPT: 4199 case WM_T_PCH_LPT:
4198 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 4200 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4199 if (wm_check_reset_block(sc) == 0) { 4201 if (wm_check_reset_block(sc) == 0) {
4200 /* 4202 /*
4201 * Gate automatic PHY configuration by hardware on 4203 * Gate automatic PHY configuration by hardware on
4202 * non-managed 82579 4204 * non-managed 82579
4203 */ 4205 */
4204 if ((sc->sc_type == WM_T_PCH2) 4206 if ((sc->sc_type == WM_T_PCH2)
4205 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) 4207 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4206 != 0)) 4208 != 0))
4207 wm_gate_hw_phy_config_ich8lan(sc, 1); 4209 wm_gate_hw_phy_config_ich8lan(sc, 1);
4208 4210
4209 4211
4210 reg |= CTRL_PHY_RESET; 4212 reg |= CTRL_PHY_RESET;
4211 phy_reset = 1; 4213 phy_reset = 1;
4212 } 4214 }
4213 wm_get_swfwhw_semaphore(sc); 4215 wm_get_swfwhw_semaphore(sc);
4214 CSR_WRITE(sc, WMREG_CTRL, reg); 4216 CSR_WRITE(sc, WMREG_CTRL, reg);
 4217 /* Don't insert a completion barrier when reset */
4215 delay(20*1000); 4218 delay(20*1000);
4216 wm_put_swfwhw_semaphore(sc); 4219 wm_put_swfwhw_semaphore(sc);
4217 break; 4220 break;
4218 case WM_T_82542_2_0: 4221 case WM_T_82542_2_0:
4219 case WM_T_82542_2_1: 4222 case WM_T_82542_2_1:
4220 case WM_T_82543: 4223 case WM_T_82543:
4221 case WM_T_82540: 4224 case WM_T_82540:
4222 case WM_T_82545: 4225 case WM_T_82545:
4223 case WM_T_82546: 4226 case WM_T_82546:
4224 case WM_T_82571: 4227 case WM_T_82571:
4225 case WM_T_82572: 4228 case WM_T_82572:
4226 case WM_T_82573: 4229 case WM_T_82573:
4227 case WM_T_82574: 4230 case WM_T_82574:
4228 case WM_T_82575: 4231 case WM_T_82575:
4229 case WM_T_82576: 4232 case WM_T_82576:
4230 case WM_T_82580: 4233 case WM_T_82580:
4231 case WM_T_82580ER: 4234 case WM_T_82580ER:
4232 case WM_T_82583: 4235 case WM_T_82583:
4233 case WM_T_I350: 4236 case WM_T_I350:
4234 case WM_T_I354: 4237 case WM_T_I354:
4235 case WM_T_I210: 4238 case WM_T_I210:
4236 case WM_T_I211: 4239 case WM_T_I211:
4237 default: 4240 default:
4238 /* Everything else can safely use the documented method. */ 4241 /* Everything else can safely use the documented method. */
4239 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); 4242 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4240 break; 4243 break;
4241 } 4244 }
4242 4245
4243 /* Must release the MDIO ownership after MAC reset */ 4246 /* Must release the MDIO ownership after MAC reset */
4244 switch (sc->sc_type) { 4247 switch (sc->sc_type) {
4245 case WM_T_82574: 4248 case WM_T_82574:
4246 case WM_T_82583: 4249 case WM_T_82583:
4247 wm_put_hw_semaphore_82573(sc); 4250 wm_put_hw_semaphore_82573(sc);
4248 break; 4251 break;
4249 default: 4252 default:
4250 break; 4253 break;
4251 } 4254 }
4252 4255
4253 if (phy_reset != 0) 4256 if (phy_reset != 0)
4254 wm_get_cfg_done(sc); 4257 wm_get_cfg_done(sc);
4255 4258
4256 /* reload EEPROM */ 4259 /* reload EEPROM */
4257 switch (sc->sc_type) { 4260 switch (sc->sc_type) {
4258 case WM_T_82542_2_0: 4261 case WM_T_82542_2_0:
4259 case WM_T_82542_2_1: 4262 case WM_T_82542_2_1:
4260 case WM_T_82543: 4263 case WM_T_82543:
4261 case WM_T_82544: 4264 case WM_T_82544:
4262 delay(10); 4265 delay(10);
4263 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 4266 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4264 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4267 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
 4268 CSR_WRITE_FLUSH(sc);
4265 delay(2000); 4269 delay(2000);
4266 break; 4270 break;
4267 case WM_T_82540: 4271 case WM_T_82540:
4268 case WM_T_82545: 4272 case WM_T_82545:
4269 case WM_T_82545_3: 4273 case WM_T_82545_3:
4270 case WM_T_82546: 4274 case WM_T_82546:
4271 case WM_T_82546_3: 4275 case WM_T_82546_3:
4272 delay(5*1000); 4276 delay(5*1000);
4273 /* XXX Disable HW ARPs on ASF enabled adapters */ 4277 /* XXX Disable HW ARPs on ASF enabled adapters */
4274 break; 4278 break;
4275 case WM_T_82541: 4279 case WM_T_82541:
4276 case WM_T_82541_2: 4280 case WM_T_82541_2:
4277 case WM_T_82547: 4281 case WM_T_82547:
4278 case WM_T_82547_2: 4282 case WM_T_82547_2:
4279 delay(20000); 4283 delay(20000);
4280 /* XXX Disable HW ARPs on ASF enabled adapters */ 4284 /* XXX Disable HW ARPs on ASF enabled adapters */
4281 break; 4285 break;
4282 case WM_T_82571: 4286 case WM_T_82571:
4283 case WM_T_82572: 4287 case WM_T_82572:
4284 case WM_T_82573: 4288 case WM_T_82573:
4285 case WM_T_82574: 4289 case WM_T_82574:
4286 case WM_T_82583: 4290 case WM_T_82583:
4287 if (sc->sc_flags & WM_F_EEPROM_FLASH) { 4291 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4288 delay(10); 4292 delay(10);
4289 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 4293 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4290 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4294 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
 4295 CSR_WRITE_FLUSH(sc);
4291 } 4296 }
4292 /* check EECD_EE_AUTORD */ 4297 /* check EECD_EE_AUTORD */
4293 wm_get_auto_rd_done(sc); 4298 wm_get_auto_rd_done(sc);
4294 /* 4299 /*
4295 * Phy configuration from NVM just starts after EECD_AUTO_RD 4300 * Phy configuration from NVM just starts after EECD_AUTO_RD
4296 * is set. 4301 * is set.
4297 */ 4302 */
4298 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) 4303 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4299 || (sc->sc_type == WM_T_82583)) 4304 || (sc->sc_type == WM_T_82583))
4300 delay(25*1000); 4305 delay(25*1000);
4301 break; 4306 break;
4302 case WM_T_82575: 4307 case WM_T_82575:
4303 case WM_T_82576: 4308 case WM_T_82576:
4304 case WM_T_82580: 4309 case WM_T_82580:
4305 case WM_T_82580ER: 4310 case WM_T_82580ER:
4306 case WM_T_I350: 4311 case WM_T_I350:
4307 case WM_T_I354: 4312 case WM_T_I354:
4308 case WM_T_I210: 4313 case WM_T_I210:
4309 case WM_T_I211: 4314 case WM_T_I211:
4310 case WM_T_80003: 4315 case WM_T_80003:
4311 /* check EECD_EE_AUTORD */ 4316 /* check EECD_EE_AUTORD */
4312 wm_get_auto_rd_done(sc); 4317 wm_get_auto_rd_done(sc);
4313 break; 4318 break;
4314 case WM_T_ICH8: 4319 case WM_T_ICH8:
4315 case WM_T_ICH9: 4320 case WM_T_ICH9:
4316 case WM_T_ICH10: 4321 case WM_T_ICH10:
4317 case WM_T_PCH: 4322 case WM_T_PCH:
4318 case WM_T_PCH2: 4323 case WM_T_PCH2:
4319 case WM_T_PCH_LPT: 4324 case WM_T_PCH_LPT:
4320 break; 4325 break;
4321 default: 4326 default:
4322 panic("%s: unknown type\n", __func__); 4327 panic("%s: unknown type\n", __func__);
4323 } 4328 }
4324 4329
4325 /* Check whether EEPROM is present or not */ 4330 /* Check whether EEPROM is present or not */
4326 switch (sc->sc_type) { 4331 switch (sc->sc_type) {
4327 case WM_T_82575: 4332 case WM_T_82575:
4328 case WM_T_82576: 4333 case WM_T_82576:
4329#if 0 /* XXX */ 4334#if 0 /* XXX */
4330 case WM_T_82580: 4335 case WM_T_82580:
4331 case WM_T_82580ER: 4336 case WM_T_82580ER:
4332#endif 4337#endif
4333 case WM_T_I350: 4338 case WM_T_I350:
4334 case WM_T_I354: 4339 case WM_T_I354:
4335 case WM_T_ICH8: 4340 case WM_T_ICH8:
4336 case WM_T_ICH9: 4341 case WM_T_ICH9:
4337 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { 4342 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4338 /* Not found */ 4343 /* Not found */
4339 sc->sc_flags |= WM_F_EEPROM_INVALID; 4344 sc->sc_flags |= WM_F_EEPROM_INVALID;
4340 if ((sc->sc_type == WM_T_82575) 4345 if ((sc->sc_type == WM_T_82575)
4341 || (sc->sc_type == WM_T_82576) 4346 || (sc->sc_type == WM_T_82576)
4342 || (sc->sc_type == WM_T_82580) 4347 || (sc->sc_type == WM_T_82580)
4343 || (sc->sc_type == WM_T_82580ER) 4348 || (sc->sc_type == WM_T_82580ER)
4344 || (sc->sc_type == WM_T_I350) 4349 || (sc->sc_type == WM_T_I350)
4345 || (sc->sc_type == WM_T_I354)) 4350 || (sc->sc_type == WM_T_I354))
4346 wm_reset_init_script_82575(sc); 4351 wm_reset_init_script_82575(sc);
4347 } 4352 }
4348 break; 4353 break;
4349 default: 4354 default:
4350 break; 4355 break;
4351 } 4356 }
4352 4357
4353 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 4358 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4354 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) { 4359 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4355 /* clear global device reset status bit */ 4360 /* clear global device reset status bit */
4356 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); 4361 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4357 } 4362 }
4358 4363
4359 /* Clear any pending interrupt events. */ 4364 /* Clear any pending interrupt events. */
4360 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4365 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4361 reg = CSR_READ(sc, WMREG_ICR); 4366 reg = CSR_READ(sc, WMREG_ICR);
4362 4367
4363 /* reload sc_ctrl */ 4368 /* reload sc_ctrl */
4364 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 4369 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4365 4370
4366 if (sc->sc_type == WM_T_I350) 4371 if (sc->sc_type == WM_T_I350)
4367 wm_set_eee_i350(sc); 4372 wm_set_eee_i350(sc);
4368 4373
4369 /* dummy read from WUC */ 4374 /* dummy read from WUC */
4370 if (sc->sc_type == WM_T_PCH) 4375 if (sc->sc_type == WM_T_PCH)
4371 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); 4376 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4372 /* 4377 /*
4373 * For PCH, this write will make sure that any noise will be detected 4378 * For PCH, this write will make sure that any noise will be detected
4374 * as a CRC error and be dropped rather than show up as a bad packet 4379 * as a CRC error and be dropped rather than show up as a bad packet
4375 * to the DMA engine 4380 * to the DMA engine
4376 */ 4381 */
4377 if (sc->sc_type == WM_T_PCH) 4382 if (sc->sc_type == WM_T_PCH)
4378 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); 4383 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4379 4384
4380 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4385 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4381 CSR_WRITE(sc, WMREG_WUC, 0); 4386 CSR_WRITE(sc, WMREG_WUC, 0);
4382 4387
4383 /* XXX need special handling for 82580 */ 4388 /* XXX need special handling for 82580 */
4384} 4389}
4385 4390
4386static void 4391static void
4387wm_set_vlan(struct wm_softc *sc) 4392wm_set_vlan(struct wm_softc *sc)
4388{ 4393{
4389 /* Deal with VLAN enables. */ 4394 /* Deal with VLAN enables. */
4390 if (VLAN_ATTACHED(&sc->sc_ethercom)) 4395 if (VLAN_ATTACHED(&sc->sc_ethercom))
4391 sc->sc_ctrl |= CTRL_VME; 4396 sc->sc_ctrl |= CTRL_VME;
4392 else 4397 else
4393 sc->sc_ctrl &= ~CTRL_VME; 4398 sc->sc_ctrl &= ~CTRL_VME;
4394 4399
4395 /* Write the control registers. */ 4400 /* Write the control registers. */
4396 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4401 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4397} 4402}
4398 4403
4399/* 4404/*
4400 * wm_init: [ifnet interface function] 4405 * wm_init: [ifnet interface function]
4401 * 4406 *
4402 * Initialize the interface. Must be called at splnet(). 4407 * Initialize the interface. Must be called at splnet().
4403 */ 4408 */
4404static int 4409static int
4405wm_init(struct ifnet *ifp) 4410wm_init(struct ifnet *ifp)
4406{ 4411{
4407 struct wm_softc *sc = ifp->if_softc; 4412 struct wm_softc *sc = ifp->if_softc;
4408 struct wm_rxsoft *rxs; 4413 struct wm_rxsoft *rxs;
4409 int i, j, trynum, error = 0; 4414 int i, j, trynum, error = 0;
4410 uint32_t reg; 4415 uint32_t reg;
4411 4416
4412 /* 4417 /*
4413 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 4418 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4414 * There is a small but measurable benefit to avoiding the adjusment 4419 * There is a small but measurable benefit to avoiding the adjusment
4415 * of the descriptor so that the headers are aligned, for normal mtu, 4420 * of the descriptor so that the headers are aligned, for normal mtu,
4416 * on such platforms. One possibility is that the DMA itself is 4421 * on such platforms. One possibility is that the DMA itself is
4417 * slightly more efficient if the front of the entire packet (instead 4422 * slightly more efficient if the front of the entire packet (instead
4418 * of the front of the headers) is aligned. 4423 * of the front of the headers) is aligned.
4419 * 4424 *
4420 * Note we must always set align_tweak to 0 if we are using 4425 * Note we must always set align_tweak to 0 if we are using
4421 * jumbo frames. 4426 * jumbo frames.
4422 */ 4427 */
4423#ifdef __NO_STRICT_ALIGNMENT 4428#ifdef __NO_STRICT_ALIGNMENT
4424 sc->sc_align_tweak = 0; 4429 sc->sc_align_tweak = 0;
4425#else 4430#else
4426 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 4431 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4427 sc->sc_align_tweak = 0; 4432 sc->sc_align_tweak = 0;
4428 else 4433 else
4429 sc->sc_align_tweak = 2; 4434 sc->sc_align_tweak = 2;
4430#endif /* __NO_STRICT_ALIGNMENT */ 4435#endif /* __NO_STRICT_ALIGNMENT */
4431 4436
4432 /* Cancel any pending I/O. */ 4437 /* Cancel any pending I/O. */
4433 wm_stop(ifp, 0); 4438 wm_stop(ifp, 0);
4434 4439
4435 /* update statistics before reset */ 4440 /* update statistics before reset */
4436 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 4441 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4437 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 4442 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4438 4443
4439 /* Reset the chip to a known state. */ 4444 /* Reset the chip to a known state. */
4440 wm_reset(sc); 4445 wm_reset(sc);
4441 4446
4442 switch (sc->sc_type) { 4447 switch (sc->sc_type) {
4443 case WM_T_82571: 4448 case WM_T_82571:
4444 case WM_T_82572: 4449 case WM_T_82572:
4445 case WM_T_82573: 4450 case WM_T_82573:
4446 case WM_T_82574: 4451 case WM_T_82574:
4447 case WM_T_82583: 4452 case WM_T_82583:
4448 case WM_T_80003: 4453 case WM_T_80003:
4449 case WM_T_ICH8: 4454 case WM_T_ICH8:
4450 case WM_T_ICH9: 4455 case WM_T_ICH9:
4451 case WM_T_ICH10: 4456 case WM_T_ICH10:
4452 case WM_T_PCH: 4457 case WM_T_PCH:
4453 case WM_T_PCH2: 4458 case WM_T_PCH2:
4454 case WM_T_PCH_LPT: 4459 case WM_T_PCH_LPT:
4455 if (wm_check_mng_mode(sc) != 0) 4460 if (wm_check_mng_mode(sc) != 0)
4456 wm_get_hw_control(sc); 4461 wm_get_hw_control(sc);
4457 break; 4462 break;
4458 default: 4463 default:
4459 break; 4464 break;
4460 } 4465 }
4461 4466
4462 /* Reset the PHY. */ 4467 /* Reset the PHY. */
4463 if (sc->sc_flags & WM_F_HAS_MII) 4468 if (sc->sc_flags & WM_F_HAS_MII)
4464 wm_gmii_reset(sc); 4469 wm_gmii_reset(sc);
4465 4470
4466 reg = CSR_READ(sc, WMREG_CTRL_EXT); 4471 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4467 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 4472 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4468 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 4473 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
4469 || (sc->sc_type == WM_T_PCH_LPT)) 4474 || (sc->sc_type == WM_T_PCH_LPT))
4470 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN); 4475 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4471 4476
4472 /* Initialize the transmit descriptor ring. */ 4477 /* Initialize the transmit descriptor ring. */
4473 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 4478 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4474 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 4479 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4475 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 4480 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4476 sc->sc_txfree = WM_NTXDESC(sc); 4481 sc->sc_txfree = WM_NTXDESC(sc);
4477 sc->sc_txnext = 0; 4482 sc->sc_txnext = 0;
4478 4483
4479 if (sc->sc_type < WM_T_82543) { 4484 if (sc->sc_type < WM_T_82543) {
4480 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0)); 4485 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4481 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0)); 4486 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4482 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 4487 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4483 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 4488 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4484 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 4489 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4485 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 4490 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4486 } else { 4491 } else {
4487 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0)); 4492 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4488 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0)); 4493 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4489 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 4494 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4490 CSR_WRITE(sc, WMREG_TDH, 0); 4495 CSR_WRITE(sc, WMREG_TDH, 0);
4491 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */ 4496 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
4492 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */ 4497 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
4493 4498
4494 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4499 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4495 /* 4500 /*
4496 * Don't write TDT before TCTL.EN is set. 4501 * Don't write TDT before TCTL.EN is set.
4497 * See the document. 4502 * See the document.
4498 */ 4503 */
4499 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE 4504 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4500 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) 4505 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4501 | TXDCTL_WTHRESH(0)); 4506 | TXDCTL_WTHRESH(0));
4502 else { 4507 else {
4503 CSR_WRITE(sc, WMREG_TDT, 0); 4508 CSR_WRITE(sc, WMREG_TDT, 0);
4504 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 4509 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4505 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 4510 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4506 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 4511 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4507 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 4512 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4508 } 4513 }
4509 } 4514 }
4510 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 4515 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4511 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 4516 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4512 4517
4513 /* Initialize the transmit job descriptors. */ 4518 /* Initialize the transmit job descriptors. */
4514 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 4519 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4515 sc->sc_txsoft[i].txs_mbuf = NULL; 4520 sc->sc_txsoft[i].txs_mbuf = NULL;
4516 sc->sc_txsfree = WM_TXQUEUELEN(sc); 4521 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4517 sc->sc_txsnext = 0; 4522 sc->sc_txsnext = 0;
4518 sc->sc_txsdirty = 0; 4523 sc->sc_txsdirty = 0;
4519 4524
4520 /* 4525 /*
4521 * Initialize the receive descriptor and receive job 4526 * Initialize the receive descriptor and receive job
4522 * descriptor rings. 4527 * descriptor rings.
4523 */ 4528 */
4524 if (sc->sc_type < WM_T_82543) { 4529 if (sc->sc_type < WM_T_82543) {
4525 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 4530 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4526 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 4531 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4527 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 4532 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4528 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 4533 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4529 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 4534 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4530 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 4535 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4531 4536
4532 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 4537 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4533 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 4538 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4534 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 4539 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4535 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 4540 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4536 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 4541 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4537 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 4542 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4538 } else { 4543 } else {
4539 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 4544 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4540 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 4545 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4541 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 4546 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4542 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 4547 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4543 CSR_WRITE(sc, WMREG_EITR(0), 450); 4548 CSR_WRITE(sc, WMREG_EITR(0), 450);
4544 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) 4549 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4545 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES); 4550 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4546 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY 4551 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4547 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); 4552 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4548 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE 4553 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4549 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) 4554 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4550 | RXDCTL_WTHRESH(1)); 4555 | RXDCTL_WTHRESH(1));
4551 } else { 4556 } else {
4552 CSR_WRITE(sc, WMREG_RDH, 0); 4557 CSR_WRITE(sc, WMREG_RDH, 0);
4553 CSR_WRITE(sc, WMREG_RDT, 0); 4558 CSR_WRITE(sc, WMREG_RDT, 0);
4554 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */ 4559 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4555 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */ 4560 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4556 } 4561 }
4557 } 4562 }
4558 for (i = 0; i < WM_NRXDESC; i++) { 4563 for (i = 0; i < WM_NRXDESC; i++) {
4559 rxs = &sc->sc_rxsoft[i]; 4564 rxs = &sc->sc_rxsoft[i];
4560 if (rxs->rxs_mbuf == NULL) { 4565 if (rxs->rxs_mbuf == NULL) {
4561 if ((error = wm_add_rxbuf(sc, i)) != 0) { 4566 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4562 log(LOG_ERR, "%s: unable to allocate or map " 4567 log(LOG_ERR, "%s: unable to allocate or map "
4563 "rx buffer %d, error = %d\n", 4568 "rx buffer %d, error = %d\n",
4564 device_xname(sc->sc_dev), i, error); 4569 device_xname(sc->sc_dev), i, error);
4565 /* 4570 /*
4566 * XXX Should attempt to run with fewer receive 4571 * XXX Should attempt to run with fewer receive
4567 * XXX buffers instead of just failing. 4572 * XXX buffers instead of just failing.
4568 */ 4573 */
4569 wm_rxdrain(sc); 4574 wm_rxdrain(sc);
4570 goto out; 4575 goto out;
4571 } 4576 }
4572 } else { 4577 } else {
4573 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 4578 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4574 WM_INIT_RXDESC(sc, i); 4579 WM_INIT_RXDESC(sc, i);
4575 /* 4580 /*
4576 * For 82575 and newer device, the RX descriptors 4581 * For 82575 and newer device, the RX descriptors
4577 * must be initialized after the setting of RCTL.EN in 4582 * must be initialized after the setting of RCTL.EN in
4578 * wm_set_filter() 4583 * wm_set_filter()
4579 */ 4584 */
4580 } 4585 }
4581 } 4586 }
4582 sc->sc_rxptr = 0; 4587 sc->sc_rxptr = 0;
4583 sc->sc_rxdiscard = 0; 4588 sc->sc_rxdiscard = 0;
4584 WM_RXCHAIN_RESET(sc); 4589 WM_RXCHAIN_RESET(sc);
4585 4590
4586 /* 4591 /*
4587 * Clear out the VLAN table -- we don't use it (yet). 4592 * Clear out the VLAN table -- we don't use it (yet).
4588 */ 4593 */
4589 CSR_WRITE(sc, WMREG_VET, 0); 4594 CSR_WRITE(sc, WMREG_VET, 0);
4590 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 4595 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4591 trynum = 10; /* Due to hw errata */ 4596 trynum = 10; /* Due to hw errata */
4592 else 4597 else
4593 trynum = 1; 4598 trynum = 1;
4594 for (i = 0; i < WM_VLAN_TABSIZE; i++) 4599 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4595 for (j = 0; j < trynum; j++) 4600 for (j = 0; j < trynum; j++)
4596 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 4601 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4597 4602
4598 /* 4603 /*
4599 * Set up flow-control parameters. 4604 * Set up flow-control parameters.
4600 * 4605 *
4601 * XXX Values could probably stand some tuning. 4606 * XXX Values could probably stand some tuning.
4602 */ 4607 */
4603 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 4608 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4604 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) 4609 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4605 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) { 4610 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4606 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 4611 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4607 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 4612 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4608 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 4613 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4609 } 4614 }
4610 4615
4611 sc->sc_fcrtl = FCRTL_DFLT; 4616 sc->sc_fcrtl = FCRTL_DFLT;
4612 if (sc->sc_type < WM_T_82543) { 4617 if (sc->sc_type < WM_T_82543) {
4613 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 4618 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4614 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 4619 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4615 } else { 4620 } else {
4616 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 4621 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4617 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 4622 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4618 } 4623 }
4619 4624
4620 if (sc->sc_type == WM_T_80003) 4625 if (sc->sc_type == WM_T_80003)
4621 CSR_WRITE(sc, WMREG_FCTTV, 0xffff); 4626 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4622 else 4627 else
4623 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 4628 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4624 4629
4625 /* Writes the control register. */ 4630 /* Writes the control register. */
4626 wm_set_vlan(sc); 4631 wm_set_vlan(sc);
4627 4632
4628 if (sc->sc_flags & WM_F_HAS_MII) { 4633 if (sc->sc_flags & WM_F_HAS_MII) {
4629 int val; 4634 int val;
4630 4635
4631 switch (sc->sc_type) { 4636 switch (sc->sc_type) {
4632 case WM_T_80003: 4637 case WM_T_80003:
4633 case WM_T_ICH8: 4638 case WM_T_ICH8:
4634 case WM_T_ICH9: 4639 case WM_T_ICH9:
4635 case WM_T_ICH10: 4640 case WM_T_ICH10:
4636 case WM_T_PCH: 4641 case WM_T_PCH:
4637 case WM_T_PCH2: 4642 case WM_T_PCH2:
4638 case WM_T_PCH_LPT: 4643 case WM_T_PCH_LPT:
4639 /* 4644 /*
4640 * Set the mac to wait the maximum time between each 4645 * Set the mac to wait the maximum time between each
4641 * iteration and increase the max iterations when 4646 * iteration and increase the max iterations when
4642 * polling the phy; this fixes erroneous timeouts at 4647 * polling the phy; this fixes erroneous timeouts at
4643 * 10Mbps. 4648 * 10Mbps.
4644 */ 4649 */
4645 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 4650 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4646 0xFFFF); 4651 0xFFFF);
4647 val = wm_kmrn_readreg(sc, 4652 val = wm_kmrn_readreg(sc,
4648 KUMCTRLSTA_OFFSET_INB_PARAM); 4653 KUMCTRLSTA_OFFSET_INB_PARAM);
4649 val |= 0x3F; 4654 val |= 0x3F;
4650 wm_kmrn_writereg(sc, 4655 wm_kmrn_writereg(sc,
4651 KUMCTRLSTA_OFFSET_INB_PARAM, val); 4656 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4652 break; 4657 break;
4653 default: 4658 default:
4654 break; 4659 break;
4655 } 4660 }
4656 4661
4657 if (sc->sc_type == WM_T_80003) { 4662 if (sc->sc_type == WM_T_80003) {
4658 val = CSR_READ(sc, WMREG_CTRL_EXT); 4663 val = CSR_READ(sc, WMREG_CTRL_EXT);
4659 val &= ~CTRL_EXT_LINK_MODE_MASK; 4664 val &= ~CTRL_EXT_LINK_MODE_MASK;
4660 CSR_WRITE(sc, WMREG_CTRL_EXT, val); 4665 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4661 4666
4662 /* Bypass RX and TX FIFO's */ 4667 /* Bypass RX and TX FIFO's */
4663 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 4668 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4664 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 4669 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4665 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 4670 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4666 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 4671 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4667 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 4672 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4668 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 4673 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4669 } 4674 }
4670 } 4675 }
4671#if 0 4676#if 0
4672 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 4677 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4673#endif 4678#endif
4674 4679
4675 /* 4680 /*
4676 * Set up checksum offload parameters. 4681 * Set up checksum offload parameters.
4677 */ 4682 */
4678 reg = CSR_READ(sc, WMREG_RXCSUM); 4683 reg = CSR_READ(sc, WMREG_RXCSUM);
4679 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 4684 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4680 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 4685 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4681 reg |= RXCSUM_IPOFL; 4686 reg |= RXCSUM_IPOFL;
4682 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 4687 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4683 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 4688 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4684 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 4689 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4685 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 4690 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4686 CSR_WRITE(sc, WMREG_RXCSUM, reg); 4691 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4687 4692
4688 /* Reset TBI's RXCFG count */ 4693 /* Reset TBI's RXCFG count */
4689 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0; 4694 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4690 4695
4691 /* 4696 /*
4692 * Set up the interrupt registers. 4697 * Set up the interrupt registers.
4693 */ 4698 */
4694 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4699 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4695 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 4700 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4696 ICR_RXO | ICR_RXT0; 4701 ICR_RXO | ICR_RXT0;
4697 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 4702 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4698 sc->sc_icr |= ICR_RXCFG; 4703 sc->sc_icr |= ICR_RXCFG;
4699 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 4704 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4700 4705
4701 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4706 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4702 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 4707 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4703 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 4708 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4704 reg = CSR_READ(sc, WMREG_KABGTXD); 4709 reg = CSR_READ(sc, WMREG_KABGTXD);
4705 reg |= KABGTXD_BGSQLBIAS; 4710 reg |= KABGTXD_BGSQLBIAS;
4706 CSR_WRITE(sc, WMREG_KABGTXD, reg); 4711 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4707 } 4712 }
4708 4713
4709 /* Set up the inter-packet gap. */ 4714 /* Set up the inter-packet gap. */
4710 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 4715 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4711 4716
4712 if (sc->sc_type >= WM_T_82543) { 4717 if (sc->sc_type >= WM_T_82543) {
4713 /* 4718 /*
4714 * Set up the interrupt throttling register (units of 256ns) 4719 * Set up the interrupt throttling register (units of 256ns)
4715 * Note that a footnote in Intel's documentation says this 4720 * Note that a footnote in Intel's documentation says this
4716 * ticker runs at 1/4 the rate when the chip is in 100Mbit 4721 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4717 * or 10Mbit mode. Empirically, it appears to be the case 4722 * or 10Mbit mode. Empirically, it appears to be the case
4718 * that that is also true for the 1024ns units of the other 4723 * that that is also true for the 1024ns units of the other
4719 * interrupt-related timer registers -- so, really, we ought 4724 * interrupt-related timer registers -- so, really, we ought
4720 * to divide this value by 4 when the link speed is low. 4725 * to divide this value by 4 when the link speed is low.
4721 * 4726 *
4722 * XXX implement this division at link speed change! 4727 * XXX implement this division at link speed change!
4723 */ 4728 */
4724 4729
4725 /* 4730 /*
4726 * For N interrupts/sec, set this value to: 4731 * For N interrupts/sec, set this value to:
4727 * 1000000000 / (N * 256). Note that we set the 4732 * 1000000000 / (N * 256). Note that we set the
4728 * absolute and packet timer values to this value 4733 * absolute and packet timer values to this value
4729 * divided by 4 to get "simple timer" behavior. 4734 * divided by 4 to get "simple timer" behavior.
4730 */ 4735 */
4731 4736
4732 sc->sc_itr = 1500; /* 2604 ints/sec */ 4737 sc->sc_itr = 1500; /* 2604 ints/sec */
4733 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); 4738 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4734 } 4739 }
4735 4740
4736 /* Set the VLAN ethernetype. */ 4741 /* Set the VLAN ethernetype. */
4737 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 4742 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4738 4743
4739 /* 4744 /*
4740 * Set up the transmit control register; we start out with 4745 * Set up the transmit control register; we start out with
4741 * a collision distance suitable for FDX, but update it whe 4746 * a collision distance suitable for FDX, but update it whe
4742 * we resolve the media type. 4747 * we resolve the media type.
4743 */ 4748 */
4744 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC 4749 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4745 | TCTL_CT(TX_COLLISION_THRESHOLD) 4750 | TCTL_CT(TX_COLLISION_THRESHOLD)
4746 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4751 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4747 if (sc->sc_type >= WM_T_82571) 4752 if (sc->sc_type >= WM_T_82571)
4748 sc->sc_tctl |= TCTL_MULR; 4753 sc->sc_tctl |= TCTL_MULR;
4749 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4754 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4750 4755
4751 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 4756 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4752 /* 4757 /*
4753 * Write TDT after TCTL.EN is set. 4758 * Write TDT after TCTL.EN is set.
4754 * See the document. 4759 * See the document.
4755 */ 4760 */
4756 CSR_WRITE(sc, WMREG_TDT, 0); 4761 CSR_WRITE(sc, WMREG_TDT, 0);
4757 } 4762 }
4758 4763
4759 if (sc->sc_type == WM_T_80003) { 4764 if (sc->sc_type == WM_T_80003) {
4760 reg = CSR_READ(sc, WMREG_TCTL_EXT); 4765 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4761 reg &= ~TCTL_EXT_GCEX_MASK; 4766 reg &= ~TCTL_EXT_GCEX_MASK;
4762 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; 4767 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4763 CSR_WRITE(sc, WMREG_TCTL_EXT, reg); 4768 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4764 } 4769 }
4765 4770
4766 /* Set the media. */ 4771 /* Set the media. */
4767 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) 4772 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4768 goto out; 4773 goto out;
4769 4774
4770 /* Configure for OS presence */ 4775 /* Configure for OS presence */
4771 wm_init_manageability(sc); 4776 wm_init_manageability(sc);
4772 4777
4773 /* 4778 /*
4774 * Set up the receive control register; we actually program 4779 * Set up the receive control register; we actually program
4775 * the register when we set the receive filter. Use multicast 4780 * the register when we set the receive filter. Use multicast
4776 * address offset type 0. 4781 * address offset type 0.
4777 * 4782 *
4778 * Only the i82544 has the ability to strip the incoming 4783 * Only the i82544 has the ability to strip the incoming
4779 * CRC, so we don't enable that feature. 4784 * CRC, so we don't enable that feature.
4780 */ 4785 */
4781 sc->sc_mchash_type = 0; 4786 sc->sc_mchash_type = 0;
4782 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 4787 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4783 | RCTL_MO(sc->sc_mchash_type); 4788 | RCTL_MO(sc->sc_mchash_type);
4784 4789
4785 /* 4790 /*
4786 * The I350 has a bug where it always strips the CRC whether 4791 * The I350 has a bug where it always strips the CRC whether
4787 * asked to or not. So ask for stripped CRC here and cope in rxeof 4792 * asked to or not. So ask for stripped CRC here and cope in rxeof
4788 */ 4793 */
4789 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 4794 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4790 || (sc->sc_type == WM_T_I210)) 4795 || (sc->sc_type == WM_T_I210))
4791 sc->sc_rctl |= RCTL_SECRC; 4796 sc->sc_rctl |= RCTL_SECRC;
4792 4797
4793 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) 4798 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4794 && (ifp->if_mtu > ETHERMTU)) { 4799 && (ifp->if_mtu > ETHERMTU)) {
4795 sc->sc_rctl |= RCTL_LPE; 4800 sc->sc_rctl |= RCTL_LPE;
4796 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4801 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4797 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); 4802 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4798 } 4803 }
4799 4804
4800 if (MCLBYTES == 2048) { 4805 if (MCLBYTES == 2048) {
4801 sc->sc_rctl |= RCTL_2k; 4806 sc->sc_rctl |= RCTL_2k;
4802 } else { 4807 } else {
4803 if (sc->sc_type >= WM_T_82543) { 4808 if (sc->sc_type >= WM_T_82543) {
4804 switch (MCLBYTES) { 4809 switch (MCLBYTES) {
4805 case 4096: 4810 case 4096:
4806 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 4811 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4807 break; 4812 break;
4808 case 8192: 4813 case 8192:
4809 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 4814 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4810 break; 4815 break;
4811 case 16384: 4816 case 16384:
4812 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 4817 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4813 break; 4818 break;
4814 default: 4819 default:
4815 panic("wm_init: MCLBYTES %d unsupported", 4820 panic("wm_init: MCLBYTES %d unsupported",
4816 MCLBYTES); 4821 MCLBYTES);
4817 break; 4822 break;
4818 } 4823 }
4819 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 4824 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4820 } 4825 }
4821 4826
4822 /* Set the receive filter. */ 4827 /* Set the receive filter. */
4823 wm_set_filter(sc); 4828 wm_set_filter(sc);
4824 4829
4825 /* Enable ECC */ 4830 /* Enable ECC */
4826 switch (sc->sc_type) { 4831 switch (sc->sc_type) {
4827 case WM_T_82571: 4832 case WM_T_82571:
4828 reg = CSR_READ(sc, WMREG_PBA_ECC); 4833 reg = CSR_READ(sc, WMREG_PBA_ECC);
4829 reg |= PBA_ECC_CORR_EN; 4834 reg |= PBA_ECC_CORR_EN;
4830 CSR_WRITE(sc, WMREG_PBA_ECC, reg); 4835 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4831 break; 4836 break;
4832 case WM_T_PCH_LPT: 4837 case WM_T_PCH_LPT:
4833 reg = CSR_READ(sc, WMREG_PBECCSTS); 4838 reg = CSR_READ(sc, WMREG_PBECCSTS);
4834 reg |= PBECCSTS_UNCORR_ECC_ENABLE; 4839 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4835 CSR_WRITE(sc, WMREG_PBECCSTS, reg); 4840 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4836 4841
4837 reg = CSR_READ(sc, WMREG_CTRL); 4842 reg = CSR_READ(sc, WMREG_CTRL);
4838 reg |= CTRL_MEHE; 4843 reg |= CTRL_MEHE;
4839 CSR_WRITE(sc, WMREG_CTRL, reg); 4844 CSR_WRITE(sc, WMREG_CTRL, reg);
4840 break; 4845 break;
4841 default: 4846 default:
4842 break; 4847 break;
4843 } 4848 }
4844 4849
4845 /* On 575 and later set RDT only if RX enabled */ 4850 /* On 575 and later set RDT only if RX enabled */
4846 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4851 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4847 for (i = 0; i < WM_NRXDESC; i++) 4852 for (i = 0; i < WM_NRXDESC; i++)
4848 WM_INIT_RXDESC(sc, i); 4853 WM_INIT_RXDESC(sc, i);
4849 4854
4850 /* Start the one second link check clock. */ 4855 /* Start the one second link check clock. */
4851 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 4856 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4852 4857
4853 /* ...all done! */ 4858 /* ...all done! */
4854 ifp->if_flags |= IFF_RUNNING; 4859 ifp->if_flags |= IFF_RUNNING;
4855 ifp->if_flags &= ~IFF_OACTIVE; 4860 ifp->if_flags &= ~IFF_OACTIVE;
4856 4861
4857 out: 4862 out:
4858 sc->sc_if_flags = ifp->if_flags; 4863 sc->sc_if_flags = ifp->if_flags;
4859 if (error) 4864 if (error)
4860 log(LOG_ERR, "%s: interface not running\n", 4865 log(LOG_ERR, "%s: interface not running\n",
4861 device_xname(sc->sc_dev)); 4866 device_xname(sc->sc_dev));
4862 return error; 4867 return error;
4863} 4868}
4864 4869
4865/* 4870/*
4866 * wm_rxdrain: 4871 * wm_rxdrain:
4867 * 4872 *
4868 * Drain the receive queue. 4873 * Drain the receive queue.
4869 */ 4874 */
4870static void 4875static void
4871wm_rxdrain(struct wm_softc *sc) 4876wm_rxdrain(struct wm_softc *sc)
4872{ 4877{
4873 struct wm_rxsoft *rxs; 4878 struct wm_rxsoft *rxs;
4874 int i; 4879 int i;
4875 4880
4876 for (i = 0; i < WM_NRXDESC; i++) { 4881 for (i = 0; i < WM_NRXDESC; i++) {
4877 rxs = &sc->sc_rxsoft[i]; 4882 rxs = &sc->sc_rxsoft[i];
4878 if (rxs->rxs_mbuf != NULL) { 4883 if (rxs->rxs_mbuf != NULL) {
4879 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 4884 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4880 m_freem(rxs->rxs_mbuf); 4885 m_freem(rxs->rxs_mbuf);
4881 rxs->rxs_mbuf = NULL; 4886 rxs->rxs_mbuf = NULL;
4882 } 4887 }
4883 } 4888 }
4884} 4889}
4885 4890
4886/* 4891/*
4887 * wm_stop: [ifnet interface function] 4892 * wm_stop: [ifnet interface function]
4888 * 4893 *
4889 * Stop transmission on the interface. 4894 * Stop transmission on the interface.
4890 */ 4895 */
4891static void 4896static void
4892wm_stop(struct ifnet *ifp, int disable) 4897wm_stop(struct ifnet *ifp, int disable)
4893{ 4898{
4894 struct wm_softc *sc = ifp->if_softc; 4899 struct wm_softc *sc = ifp->if_softc;
4895 struct wm_txsoft *txs; 4900 struct wm_txsoft *txs;
4896 int i; 4901 int i;
4897 4902
4898 /* Stop the one second clock. */ 4903 /* Stop the one second clock. */
4899 callout_stop(&sc->sc_tick_ch); 4904 callout_stop(&sc->sc_tick_ch);
4900 4905
4901 /* Stop the 82547 Tx FIFO stall check timer. */ 4906 /* Stop the 82547 Tx FIFO stall check timer. */
4902 if (sc->sc_type == WM_T_82547) 4907 if (sc->sc_type == WM_T_82547)
4903 callout_stop(&sc->sc_txfifo_ch); 4908 callout_stop(&sc->sc_txfifo_ch);
4904 4909
4905 if (sc->sc_flags & WM_F_HAS_MII) { 4910 if (sc->sc_flags & WM_F_HAS_MII) {
4906 /* Down the MII. */ 4911 /* Down the MII. */
4907 mii_down(&sc->sc_mii); 4912 mii_down(&sc->sc_mii);
4908 } else { 4913 } else {
4909#if 0 4914#if 0
4910 /* Should we clear PHY's status properly? */ 4915 /* Should we clear PHY's status properly? */
4911 wm_reset(sc); 4916 wm_reset(sc);
4912#endif 4917#endif
4913 } 4918 }
4914 4919
4915 /* Stop the transmit and receive processes. */ 4920 /* Stop the transmit and receive processes. */
4916 CSR_WRITE(sc, WMREG_TCTL, 0); 4921 CSR_WRITE(sc, WMREG_TCTL, 0);
4917 CSR_WRITE(sc, WMREG_RCTL, 0); 4922 CSR_WRITE(sc, WMREG_RCTL, 0);
4918 sc->sc_rctl &= ~RCTL_EN; 4923 sc->sc_rctl &= ~RCTL_EN;
4919 4924
4920 /* 4925 /*
4921 * Clear the interrupt mask to ensure the device cannot assert its 4926 * Clear the interrupt mask to ensure the device cannot assert its
4922 * interrupt line. 4927 * interrupt line.
4923 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service 4928 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4924 * any currently pending or shared interrupt. 4929 * any currently pending or shared interrupt.
4925 */ 4930 */
4926 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4931 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4927 sc->sc_icr = 0; 4932 sc->sc_icr = 0;
4928 4933
4929 /* Release any queued transmit buffers. */ 4934 /* Release any queued transmit buffers. */
4930 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 4935 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4931 txs = &sc->sc_txsoft[i]; 4936 txs = &sc->sc_txsoft[i];
4932 if (txs->txs_mbuf != NULL) { 4937 if (txs->txs_mbuf != NULL) {
4933 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 4938 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4934 m_freem(txs->txs_mbuf); 4939 m_freem(txs->txs_mbuf);
4935 txs->txs_mbuf = NULL; 4940 txs->txs_mbuf = NULL;
4936 } 4941 }
4937 } 4942 }
4938 4943
4939 /* Mark the interface as down and cancel the watchdog timer. */ 4944 /* Mark the interface as down and cancel the watchdog timer. */
4940 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4945 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4941 ifp->if_timer = 0; 4946 ifp->if_timer = 0;
4942 4947
4943 if (disable) 4948 if (disable)
4944 wm_rxdrain(sc); 4949 wm_rxdrain(sc);
4945 4950
4946#if 0 /* notyet */ 4951#if 0 /* notyet */
4947 if (sc->sc_type >= WM_T_82544) 4952 if (sc->sc_type >= WM_T_82544)
4948 CSR_WRITE(sc, WMREG_WUC, 0); 4953 CSR_WRITE(sc, WMREG_WUC, 0);
4949#endif 4954#endif
4950} 4955}
4951 4956
4952void 4957void
4953wm_get_auto_rd_done(struct wm_softc *sc) 4958wm_get_auto_rd_done(struct wm_softc *sc)
4954{ 4959{
4955 int i; 4960 int i;
4956 4961
4957 /* wait for eeprom to reload */ 4962 /* wait for eeprom to reload */
4958 switch (sc->sc_type) { 4963 switch (sc->sc_type) {
4959 case WM_T_82571: 4964 case WM_T_82571:
4960 case WM_T_82572: 4965 case WM_T_82572:
4961 case WM_T_82573: 4966 case WM_T_82573:
4962 case WM_T_82574: 4967 case WM_T_82574:
4963 case WM_T_82583: 4968 case WM_T_82583:
4964 case WM_T_82575: 4969 case WM_T_82575:
4965 case WM_T_82576: 4970 case WM_T_82576:
4966 case WM_T_82580: 4971 case WM_T_82580:
4967 case WM_T_82580ER: 4972 case WM_T_82580ER:
4968 case WM_T_I350: 4973 case WM_T_I350:
4969 case WM_T_I354: 4974 case WM_T_I354:
4970 case WM_T_I210: 4975 case WM_T_I210:
4971 case WM_T_I211: 4976 case WM_T_I211:
4972 case WM_T_80003: 4977 case WM_T_80003:
4973 case WM_T_ICH8: 4978 case WM_T_ICH8:
4974 case WM_T_ICH9: 4979 case WM_T_ICH9:
4975 for (i = 0; i < 10; i++) { 4980 for (i = 0; i < 10; i++) {
4976 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) 4981 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4977 break; 4982 break;
4978 delay(1000); 4983 delay(1000);
4979 } 4984 }
4980 if (i == 10) { 4985 if (i == 10) {
4981 log(LOG_ERR, "%s: auto read from eeprom failed to " 4986 log(LOG_ERR, "%s: auto read from eeprom failed to "
4982 "complete\n", device_xname(sc->sc_dev)); 4987 "complete\n", device_xname(sc->sc_dev));
4983 } 4988 }
4984 break; 4989 break;
4985 default: 4990 default:
4986 break; 4991 break;
4987 } 4992 }
4988} 4993}
4989 4994
4990void 4995void
4991wm_lan_init_done(struct wm_softc *sc) 4996wm_lan_init_done(struct wm_softc *sc)
4992{ 4997{
4993 uint32_t reg = 0; 4998 uint32_t reg = 0;
4994 int i; 4999 int i;
4995 5000
4996 /* wait for eeprom to reload */ 5001 /* wait for eeprom to reload */
4997 switch (sc->sc_type) { 5002 switch (sc->sc_type) {
4998 case WM_T_ICH10: 5003 case WM_T_ICH10:
4999 case WM_T_PCH: 5004 case WM_T_PCH:
5000 case WM_T_PCH2: 5005 case WM_T_PCH2:
5001 case WM_T_PCH_LPT: 5006 case WM_T_PCH_LPT:
5002 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) { 5007 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
5003 reg = CSR_READ(sc, WMREG_STATUS); 5008 reg = CSR_READ(sc, WMREG_STATUS);
5004 if ((reg & STATUS_LAN_INIT_DONE) != 0) 5009 if ((reg & STATUS_LAN_INIT_DONE) != 0)
5005 break; 5010 break;
5006 delay(100); 5011 delay(100);
5007 } 5012 }
5008 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) { 5013 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
5009 log(LOG_ERR, "%s: %s: lan_init_done failed to " 5014 log(LOG_ERR, "%s: %s: lan_init_done failed to "
5010 "complete\n", device_xname(sc->sc_dev), __func__); 5015 "complete\n", device_xname(sc->sc_dev), __func__);
5011 } 5016 }
5012 break; 5017 break;
5013 default: 5018 default:
5014 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 5019 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5015 __func__); 5020 __func__);
5016 break; 5021 break;
5017 } 5022 }
5018 5023
5019 reg &= ~STATUS_LAN_INIT_DONE; 5024 reg &= ~STATUS_LAN_INIT_DONE;
5020 CSR_WRITE(sc, WMREG_STATUS, reg); 5025 CSR_WRITE(sc, WMREG_STATUS, reg);
5021} 5026}
5022 5027
5023void 5028void
5024wm_get_cfg_done(struct wm_softc *sc) 5029wm_get_cfg_done(struct wm_softc *sc)
5025{ 5030{
5026 int mask; 5031 int mask;
5027 uint32_t reg; 5032 uint32_t reg;
5028 int i; 5033 int i;
5029 5034
5030 /* wait for eeprom to reload */ 5035 /* wait for eeprom to reload */
5031 switch (sc->sc_type) { 5036 switch (sc->sc_type) {
5032 case WM_T_82542_2_0: 5037 case WM_T_82542_2_0:
5033 case WM_T_82542_2_1: 5038 case WM_T_82542_2_1:
5034 /* null */ 5039 /* null */
5035 break; 5040 break;
5036 case WM_T_82543: 5041 case WM_T_82543:
5037 case WM_T_82544: 5042 case WM_T_82544:
5038 case WM_T_82540: 5043 case WM_T_82540:
5039 case WM_T_82545: 5044 case WM_T_82545:
5040 case WM_T_82545_3: 5045 case WM_T_82545_3:
5041 case WM_T_82546: 5046 case WM_T_82546:
5042 case WM_T_82546_3: 5047 case WM_T_82546_3:
5043 case WM_T_82541: 5048 case WM_T_82541:
5044 case WM_T_82541_2: 5049 case WM_T_82541_2:
5045 case WM_T_82547: 5050 case WM_T_82547:
5046 case WM_T_82547_2: 5051 case WM_T_82547_2:
5047 case WM_T_82573: 5052 case WM_T_82573:
5048 case WM_T_82574: 5053 case WM_T_82574:
5049 case WM_T_82583: 5054 case WM_T_82583:
5050 /* generic */ 5055 /* generic */
5051 delay(10*1000); 5056 delay(10*1000);
5052 break; 5057 break;
5053 case WM_T_80003: 5058 case WM_T_80003:
5054 case WM_T_82571: 5059 case WM_T_82571:
5055 case WM_T_82572: 5060 case WM_T_82572:
5056 case WM_T_82575: 5061 case WM_T_82575:
5057 case WM_T_82576: 5062 case WM_T_82576:
5058 case WM_T_82580: 5063 case WM_T_82580:
5059 case WM_T_82580ER: 5064 case WM_T_82580ER:
5060 case WM_T_I350: 5065 case WM_T_I350:
5061 case WM_T_I354: 5066 case WM_T_I354:
5062 case WM_T_I210: 5067 case WM_T_I210:
5063 case WM_T_I211: 5068 case WM_T_I211:
5064 if (sc->sc_type == WM_T_82571) { 5069 if (sc->sc_type == WM_T_82571) {
5065 /* Only 82571 shares port 0 */ 5070 /* Only 82571 shares port 0 */
5066 mask = EEMNGCTL_CFGDONE_0; 5071 mask = EEMNGCTL_CFGDONE_0;
5067 } else 5072 } else
5068 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid; 5073 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5069 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) { 5074 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5070 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask) 5075 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5071 break; 5076 break;
5072 delay(1000); 5077 delay(1000);
5073 } 5078 }
5074 if (i >= WM_PHY_CFG_TIMEOUT) { 5079 if (i >= WM_PHY_CFG_TIMEOUT) {
5075 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n", 5080 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5076 device_xname(sc->sc_dev), __func__)); 5081 device_xname(sc->sc_dev), __func__));
5077 } 5082 }
5078 break; 5083 break;
5079 case WM_T_ICH8: 5084 case WM_T_ICH8:
5080 case WM_T_ICH9: 5085 case WM_T_ICH9:
5081 case WM_T_ICH10: 5086 case WM_T_ICH10:
5082 case WM_T_PCH: 5087 case WM_T_PCH:
5083 case WM_T_PCH2: 5088 case WM_T_PCH2:
5084 case WM_T_PCH_LPT: 5089 case WM_T_PCH_LPT:
5085 delay(10*1000); 5090 delay(10*1000);
5086 if (sc->sc_type >= WM_T_ICH10) 5091 if (sc->sc_type >= WM_T_ICH10)
5087 wm_lan_init_done(sc); 5092 wm_lan_init_done(sc);
5088 else 5093 else
5089 wm_get_auto_rd_done(sc); 5094 wm_get_auto_rd_done(sc);
5090 5095
5091 reg = CSR_READ(sc, WMREG_STATUS); 5096 reg = CSR_READ(sc, WMREG_STATUS);
5092 if ((reg & STATUS_PHYRA) != 0) 5097 if ((reg & STATUS_PHYRA) != 0)
5093 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA); 5098 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
5094 break; 5099 break;
5095 default: 5100 default:
5096 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 5101 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5097 __func__); 5102 __func__);
5098 break; 5103 break;
5099 } 5104 }
5100} 5105}
5101 5106
5102/* 5107/*
5103 * wm_acquire_eeprom: 5108 * wm_acquire_eeprom:
5104 * 5109 *
5105 * Perform the EEPROM handshake required on some chips. 5110 * Perform the EEPROM handshake required on some chips.
5106 */ 5111 */
5107static int 5112static int
5108wm_acquire_eeprom(struct wm_softc *sc) 5113wm_acquire_eeprom(struct wm_softc *sc)
5109{ 5114{
5110 uint32_t reg; 5115 uint32_t reg;
5111 int x; 5116 int x;
5112 int ret = 0; 5117 int ret = 0;
5113 5118
5114 /* always success */ 5119 /* always success */
5115 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 5120 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5116 return 0; 5121 return 0;
5117 5122
5118 if (sc->sc_flags & WM_F_SWFWHW_SYNC) { 5123 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5119 ret = wm_get_swfwhw_semaphore(sc); 5124 ret = wm_get_swfwhw_semaphore(sc);
5120 } else if (sc->sc_flags & WM_F_SWFW_SYNC) { 5125 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5121 /* this will also do wm_get_swsm_semaphore() if needed */ 5126 /* this will also do wm_get_swsm_semaphore() if needed */
5122 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM); 5127 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5123 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 5128 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5124 ret = wm_get_swsm_semaphore(sc); 5129 ret = wm_get_swsm_semaphore(sc);
5125 } 5130 }
5126 5131
5127 if (ret) { 5132 if (ret) {
5128 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 5133 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5129 __func__); 5134 __func__);
5130 return 1; 5135 return 1;
5131 } 5136 }
5132 5137
5133 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 5138 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5134 reg = CSR_READ(sc, WMREG_EECD); 5139 reg = CSR_READ(sc, WMREG_EECD);
5135 5140
5136 /* Request EEPROM access. */ 5141 /* Request EEPROM access. */
5137 reg |= EECD_EE_REQ; 5142 reg |= EECD_EE_REQ;
5138 CSR_WRITE(sc, WMREG_EECD, reg); 5143 CSR_WRITE(sc, WMREG_EECD, reg);
5139 5144
5140 /* ..and wait for it to be granted. */ 5145 /* ..and wait for it to be granted. */
5141 for (x = 0; x < 1000; x++) { 5146 for (x = 0; x < 1000; x++) {
5142 reg = CSR_READ(sc, WMREG_EECD); 5147 reg = CSR_READ(sc, WMREG_EECD);
5143 if (reg & EECD_EE_GNT) 5148 if (reg & EECD_EE_GNT)
5144 break; 5149 break;
5145 delay(5); 5150 delay(5);
5146 } 5151 }
5147 if ((reg & EECD_EE_GNT) == 0) { 5152 if ((reg & EECD_EE_GNT) == 0) {
5148 aprint_error_dev(sc->sc_dev, 5153 aprint_error_dev(sc->sc_dev,
5149 "could not acquire EEPROM GNT\n"); 5154 "could not acquire EEPROM GNT\n");
5150 reg &= ~EECD_EE_REQ; 5155 reg &= ~EECD_EE_REQ;
5151 CSR_WRITE(sc, WMREG_EECD, reg); 5156 CSR_WRITE(sc, WMREG_EECD, reg);
5152 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 5157 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5153 wm_put_swfwhw_semaphore(sc); 5158 wm_put_swfwhw_semaphore(sc);
5154 if (sc->sc_flags & WM_F_SWFW_SYNC) 5159 if (sc->sc_flags & WM_F_SWFW_SYNC)
5155 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 5160 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5156 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 5161 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5157 wm_put_swsm_semaphore(sc); 5162 wm_put_swsm_semaphore(sc);
5158 return 1; 5163 return 1;
5159 } 5164 }
5160 } 5165 }
5161 5166
5162 return 0; 5167 return 0;
5163} 5168}
5164 5169
5165/* 5170/*
5166 * wm_release_eeprom: 5171 * wm_release_eeprom:
5167 * 5172 *
5168 * Release the EEPROM mutex. 5173 * Release the EEPROM mutex.
5169 */ 5174 */
5170static void 5175static void
5171wm_release_eeprom(struct wm_softc *sc) 5176wm_release_eeprom(struct wm_softc *sc)
5172{ 5177{
5173 uint32_t reg; 5178 uint32_t reg;
5174 5179
5175 /* always success */ 5180 /* always success */
5176 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 5181 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5177 return; 5182 return;
5178 5183
5179 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 5184 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5180 reg = CSR_READ(sc, WMREG_EECD); 5185 reg = CSR_READ(sc, WMREG_EECD);
5181 reg &= ~EECD_EE_REQ; 5186 reg &= ~EECD_EE_REQ;
5182 CSR_WRITE(sc, WMREG_EECD, reg); 5187 CSR_WRITE(sc, WMREG_EECD, reg);
5183 } 5188 }
5184 5189
5185 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 5190 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5186 wm_put_swfwhw_semaphore(sc); 5191 wm_put_swfwhw_semaphore(sc);
5187 if (sc->sc_flags & WM_F_SWFW_SYNC) 5192 if (sc->sc_flags & WM_F_SWFW_SYNC)
5188 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 5193 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5189 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 5194 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5190 wm_put_swsm_semaphore(sc); 5195 wm_put_swsm_semaphore(sc);
5191} 5196}
5192 5197
5193/* 5198/*
5194 * wm_eeprom_sendbits: 5199 * wm_eeprom_sendbits:
5195 * 5200 *
5196 * Send a series of bits to the EEPROM. 5201 * Send a series of bits to the EEPROM.
5197 */ 5202 */
5198static void 5203static void
5199wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 5204wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5200{ 5205{
5201 uint32_t reg; 5206 uint32_t reg;
5202 int x; 5207 int x;
5203 5208
5204 reg = CSR_READ(sc, WMREG_EECD); 5209 reg = CSR_READ(sc, WMREG_EECD);
5205 5210
5206 for (x = nbits; x > 0; x--) { 5211 for (x = nbits; x > 0; x--) {
5207 if (bits & (1U << (x - 1))) 5212 if (bits & (1U << (x - 1)))
5208 reg |= EECD_DI; 5213 reg |= EECD_DI;
5209 else 5214 else
5210 reg &= ~EECD_DI; 5215 reg &= ~EECD_DI;
5211 CSR_WRITE(sc, WMREG_EECD, reg); 5216 CSR_WRITE(sc, WMREG_EECD, reg);
 5217 CSR_WRITE_FLUSH(sc);
5212 delay(2); 5218 delay(2);
5213 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 5219 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
 5220 CSR_WRITE_FLUSH(sc);
5214 delay(2); 5221 delay(2);
5215 CSR_WRITE(sc, WMREG_EECD, reg); 5222 CSR_WRITE(sc, WMREG_EECD, reg);
 5223 CSR_WRITE_FLUSH(sc);
5216 delay(2); 5224 delay(2);
5217 } 5225 }
5218} 5226}
5219 5227
5220/* 5228/*
5221 * wm_eeprom_recvbits: 5229 * wm_eeprom_recvbits:
5222 * 5230 *
5223 * Receive a series of bits from the EEPROM. 5231 * Receive a series of bits from the EEPROM.
5224 */ 5232 */
5225static void 5233static void
5226wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 5234wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5227{ 5235{
5228 uint32_t reg, val; 5236 uint32_t reg, val;
5229 int x; 5237 int x;
5230 5238
5231 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 5239 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5232 5240
5233 val = 0; 5241 val = 0;
5234 for (x = nbits; x > 0; x--) { 5242 for (x = nbits; x > 0; x--) {
5235 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 5243 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
 5244 CSR_WRITE_FLUSH(sc);
5236 delay(2); 5245 delay(2);
5237 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 5246 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5238 val |= (1U << (x - 1)); 5247 val |= (1U << (x - 1));
5239 CSR_WRITE(sc, WMREG_EECD, reg); 5248 CSR_WRITE(sc, WMREG_EECD, reg);
 5249 CSR_WRITE_FLUSH(sc);
5240 delay(2); 5250 delay(2);
5241 } 5251 }
5242 *valp = val; 5252 *valp = val;
5243} 5253}
5244 5254
5245/* 5255/*
5246 * wm_read_eeprom_uwire: 5256 * wm_read_eeprom_uwire:
5247 * 5257 *
5248 * Read a word from the EEPROM using the MicroWire protocol. 5258 * Read a word from the EEPROM using the MicroWire protocol.
5249 */ 5259 */
5250static int 5260static int
5251wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 5261wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5252{ 5262{
5253 uint32_t reg, val; 5263 uint32_t reg, val;
5254 int i; 5264 int i;
5255 5265
5256 for (i = 0; i < wordcnt; i++) { 5266 for (i = 0; i < wordcnt; i++) {
5257 /* Clear SK and DI. */ 5267 /* Clear SK and DI. */
5258 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 5268 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5259 CSR_WRITE(sc, WMREG_EECD, reg); 5269 CSR_WRITE(sc, WMREG_EECD, reg);
5260 5270
5261 /* 5271 /*
5262 * XXX: workaround for a bug in qemu-0.12.x and prior 5272 * XXX: workaround for a bug in qemu-0.12.x and prior
5263 * and Xen. 5273 * and Xen.
5264 * 5274 *
5265 * We use this workaround only for 82540 because qemu's 5275 * We use this workaround only for 82540 because qemu's
5266 * e1000 act as 82540. 5276 * e1000 act as 82540.
5267 */ 5277 */
5268 if (sc->sc_type == WM_T_82540) { 5278 if (sc->sc_type == WM_T_82540) {
5269 reg |= EECD_SK; 5279 reg |= EECD_SK;
5270 CSR_WRITE(sc, WMREG_EECD, reg); 5280 CSR_WRITE(sc, WMREG_EECD, reg);
5271 reg &= ~EECD_SK; 5281 reg &= ~EECD_SK;
5272 CSR_WRITE(sc, WMREG_EECD, reg); 5282 CSR_WRITE(sc, WMREG_EECD, reg);
 5283 CSR_WRITE_FLUSH(sc);
5273 delay(2); 5284 delay(2);
5274 } 5285 }
5275 /* XXX: end of workaround */ 5286 /* XXX: end of workaround */
5276  5287
5277 /* Set CHIP SELECT. */ 5288 /* Set CHIP SELECT. */
5278 reg |= EECD_CS; 5289 reg |= EECD_CS;
5279 CSR_WRITE(sc, WMREG_EECD, reg); 5290 CSR_WRITE(sc, WMREG_EECD, reg);
 5291 CSR_WRITE_FLUSH(sc);
5280 delay(2); 5292 delay(2);
5281 5293
5282 /* Shift in the READ command. */ 5294 /* Shift in the READ command. */
5283 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 5295 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5284 5296
5285 /* Shift in address. */ 5297 /* Shift in address. */
5286 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 5298 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5287 5299
5288 /* Shift out the data. */ 5300 /* Shift out the data. */
5289 wm_eeprom_recvbits(sc, &val, 16); 5301 wm_eeprom_recvbits(sc, &val, 16);
5290 data[i] = val & 0xffff; 5302 data[i] = val & 0xffff;
5291 5303
5292 /* Clear CHIP SELECT. */ 5304 /* Clear CHIP SELECT. */
5293 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 5305 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5294 CSR_WRITE(sc, WMREG_EECD, reg); 5306 CSR_WRITE(sc, WMREG_EECD, reg);
 5307 CSR_WRITE_FLUSH(sc);
5295 delay(2); 5308 delay(2);
5296 } 5309 }
5297 5310
5298 return 0; 5311 return 0;
5299} 5312}
5300 5313
5301/* 5314/*
5302 * wm_spi_eeprom_ready: 5315 * wm_spi_eeprom_ready:
5303 * 5316 *
5304 * Wait for a SPI EEPROM to be ready for commands. 5317 * Wait for a SPI EEPROM to be ready for commands.
5305 */ 5318 */
5306static int 5319static int
5307wm_spi_eeprom_ready(struct wm_softc *sc) 5320wm_spi_eeprom_ready(struct wm_softc *sc)
5308{ 5321{
5309 uint32_t val; 5322 uint32_t val;
5310 int usec; 5323 int usec;
5311 5324
5312 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 5325 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5313 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 5326 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5314 wm_eeprom_recvbits(sc, &val, 8); 5327 wm_eeprom_recvbits(sc, &val, 8);
5315 if ((val & SPI_SR_RDY) == 0) 5328 if ((val & SPI_SR_RDY) == 0)
5316 break; 5329 break;
5317 } 5330 }
5318 if (usec >= SPI_MAX_RETRIES) { 5331 if (usec >= SPI_MAX_RETRIES) {
5319 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n"); 5332 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5320 return 1; 5333 return 1;
5321 } 5334 }
5322 return 0; 5335 return 0;
5323} 5336}
5324 5337
5325/* 5338/*
5326 * wm_read_eeprom_spi: 5339 * wm_read_eeprom_spi:
5327 * 5340 *
5328 * Read a work from the EEPROM using the SPI protocol. 5341 * Read a work from the EEPROM using the SPI protocol.
5329 */ 5342 */
5330static int 5343static int
5331wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 5344wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5332{ 5345{
5333 uint32_t reg, val; 5346 uint32_t reg, val;
5334 int i; 5347 int i;
5335 uint8_t opc; 5348 uint8_t opc;
5336 5349
5337 /* Clear SK and CS. */ 5350 /* Clear SK and CS. */
5338 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 5351 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5339 CSR_WRITE(sc, WMREG_EECD, reg); 5352 CSR_WRITE(sc, WMREG_EECD, reg);
 5353 CSR_WRITE_FLUSH(sc);
5340 delay(2); 5354 delay(2);
5341 5355
5342 if (wm_spi_eeprom_ready(sc)) 5356 if (wm_spi_eeprom_ready(sc))
5343 return 1; 5357 return 1;
5344 5358
5345 /* Toggle CS to flush commands. */ 5359 /* Toggle CS to flush commands. */
5346 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 5360 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
 5361 CSR_WRITE_FLUSH(sc);
5347 delay(2); 5362 delay(2);
5348 CSR_WRITE(sc, WMREG_EECD, reg); 5363 CSR_WRITE(sc, WMREG_EECD, reg);
 5364 CSR_WRITE_FLUSH(sc);
5349 delay(2); 5365 delay(2);
5350 5366
5351 opc = SPI_OPC_READ; 5367 opc = SPI_OPC_READ;
5352 if (sc->sc_ee_addrbits == 8 && word >= 128) 5368 if (sc->sc_ee_addrbits == 8 && word >= 128)
5353 opc |= SPI_OPC_A8; 5369 opc |= SPI_OPC_A8;
5354 5370
5355 wm_eeprom_sendbits(sc, opc, 8); 5371 wm_eeprom_sendbits(sc, opc, 8);
5356 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 5372 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5357 5373
5358 for (i = 0; i < wordcnt; i++) { 5374 for (i = 0; i < wordcnt; i++) {
5359 wm_eeprom_recvbits(sc, &val, 16); 5375 wm_eeprom_recvbits(sc, &val, 16);
5360 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 5376 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5361 } 5377 }
5362 5378
5363 /* Raise CS and clear SK. */ 5379 /* Raise CS and clear SK. */
5364 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 5380 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5365 CSR_WRITE(sc, WMREG_EECD, reg); 5381 CSR_WRITE(sc, WMREG_EECD, reg);
 5382 CSR_WRITE_FLUSH(sc);
5366 delay(2); 5383 delay(2);
5367 5384
5368 return 0; 5385 return 0;
5369} 5386}
5370 5387
5371#define NVM_CHECKSUM 0xBABA 5388#define NVM_CHECKSUM 0xBABA
5372#define EEPROM_SIZE 0x0040 5389#define EEPROM_SIZE 0x0040
5373#define NVM_COMPAT 0x0003 5390#define NVM_COMPAT 0x0003
5374#define NVM_COMPAT_VALID_CHECKSUM 0x0001 5391#define NVM_COMPAT_VALID_CHECKSUM 0x0001
5375#define NVM_FUTURE_INIT_WORD1 0x0019 5392#define NVM_FUTURE_INIT_WORD1 0x0019
5376#define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040 5393#define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
5377 5394
5378/* 5395/*
5379 * wm_validate_eeprom_checksum 5396 * wm_validate_eeprom_checksum
5380 * 5397 *
5381 * The checksum is defined as the sum of the first 64 (16 bit) words. 5398 * The checksum is defined as the sum of the first 64 (16 bit) words.
5382 */ 5399 */
5383static int 5400static int
5384wm_validate_eeprom_checksum(struct wm_softc *sc) 5401wm_validate_eeprom_checksum(struct wm_softc *sc)
5385{ 5402{
5386 uint16_t checksum; 5403 uint16_t checksum;
5387 uint16_t eeprom_data; 5404 uint16_t eeprom_data;
5388#ifdef WM_DEBUG 5405#ifdef WM_DEBUG
5389 uint16_t csum_wordaddr, valid_checksum; 5406 uint16_t csum_wordaddr, valid_checksum;
5390#endif 5407#endif
5391 int i; 5408 int i;
5392 5409
5393 checksum = 0; 5410 checksum = 0;
5394 5411
5395 /* Don't check for I211 */ 5412 /* Don't check for I211 */
5396 if (sc->sc_type == WM_T_I211) 5413 if (sc->sc_type == WM_T_I211)
5397 return 0; 5414 return 0;
5398 5415
5399#ifdef WM_DEBUG 5416#ifdef WM_DEBUG
5400 if (sc->sc_type == WM_T_PCH_LPT) { 5417 if (sc->sc_type == WM_T_PCH_LPT) {
5401 csum_wordaddr = NVM_COMPAT; 5418 csum_wordaddr = NVM_COMPAT;
5402 valid_checksum = NVM_COMPAT_VALID_CHECKSUM; 5419 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5403 } else { 5420 } else {
5404 csum_wordaddr = NVM_FUTURE_INIT_WORD1; 5421 csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5405 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM; 5422 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5406 } 5423 }
5407 5424
5408 /* Dump EEPROM image for debug */ 5425 /* Dump EEPROM image for debug */
5409 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5426 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5410 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5427 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5411 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 5428 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5412 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data); 5429 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5413 if ((eeprom_data & valid_checksum) == 0) { 5430 if ((eeprom_data & valid_checksum) == 0) {
5414 DPRINTF(WM_DEBUG_NVM, 5431 DPRINTF(WM_DEBUG_NVM,
5415 ("%s: NVM need to be updated (%04x != %04x)\n", 5432 ("%s: NVM need to be updated (%04x != %04x)\n",
5416 device_xname(sc->sc_dev), eeprom_data, 5433 device_xname(sc->sc_dev), eeprom_data,
5417 valid_checksum)); 5434 valid_checksum));
5418 } 5435 }
5419 } 5436 }
5420 5437
5421 if ((wm_debug & WM_DEBUG_NVM) != 0) { 5438 if ((wm_debug & WM_DEBUG_NVM) != 0) {
5422 printf("%s: NVM dump:\n", device_xname(sc->sc_dev)); 5439 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5423 for (i = 0; i < EEPROM_SIZE; i++) { 5440 for (i = 0; i < EEPROM_SIZE; i++) {
5424 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 5441 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5425 printf("XX "); 5442 printf("XX ");
5426 else 5443 else
5427 printf("%04x ", eeprom_data); 5444 printf("%04x ", eeprom_data);
5428 if (i % 8 == 7) 5445 if (i % 8 == 7)
5429 printf("\n"); 5446 printf("\n");
5430 } 5447 }
5431 } 5448 }
5432 5449
5433#endif /* WM_DEBUG */ 5450#endif /* WM_DEBUG */
5434 5451
5435 for (i = 0; i < EEPROM_SIZE; i++) { 5452 for (i = 0; i < EEPROM_SIZE; i++) {
5436 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 5453 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5437 return 1; 5454 return 1;
5438 checksum += eeprom_data; 5455 checksum += eeprom_data;
5439 } 5456 }
5440 5457
5441 if (checksum != (uint16_t) NVM_CHECKSUM) { 5458 if (checksum != (uint16_t) NVM_CHECKSUM) {
5442#ifdef WM_DEBUG 5459#ifdef WM_DEBUG
5443 printf("%s: NVM checksum mismatch (%04x != %04x)\n", 5460 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5444 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM); 5461 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5445#endif 5462#endif
5446 } 5463 }
5447 5464
5448 return 0; 5465 return 0;
5449} 5466}
5450 5467
5451/* 5468/*
5452 * wm_read_eeprom: 5469 * wm_read_eeprom:
5453 * 5470 *
5454 * Read data from the serial EEPROM. 5471 * Read data from the serial EEPROM.
5455 */ 5472 */
5456static int 5473static int
5457wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 5474wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5458{ 5475{
5459 int rv; 5476 int rv;
5460 5477
5461 if (sc->sc_flags & WM_F_EEPROM_INVALID) 5478 if (sc->sc_flags & WM_F_EEPROM_INVALID)
5462 return 1; 5479 return 1;
5463 5480
5464 if (wm_acquire_eeprom(sc)) 5481 if (wm_acquire_eeprom(sc))
5465 return 1; 5482 return 1;
5466 5483
5467 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5484 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5468 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5485 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5469 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) 5486 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5470 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data); 5487 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5471 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 5488 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5472 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data); 5489 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5473 else if (sc->sc_flags & WM_F_EEPROM_SPI) 5490 else if (sc->sc_flags & WM_F_EEPROM_SPI)
5474 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 5491 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5475 else 5492 else
5476 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 5493 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5477 5494
5478 wm_release_eeprom(sc); 5495 wm_release_eeprom(sc);
5479 return rv; 5496 return rv;
5480} 5497}
5481 5498
5482static int 5499static int
5483wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt, 5500wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5484 uint16_t *data) 5501 uint16_t *data)
5485{ 5502{
5486 int i, eerd = 0; 5503 int i, eerd = 0;
5487 int error = 0; 5504 int error = 0;
5488 5505
5489 for (i = 0; i < wordcnt; i++) { 5506 for (i = 0; i < wordcnt; i++) {
5490 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 5507 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5491 5508
5492 CSR_WRITE(sc, WMREG_EERD, eerd); 5509 CSR_WRITE(sc, WMREG_EERD, eerd);
5493 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 5510 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5494 if (error != 0) 5511 if (error != 0)
5495 break; 5512 break;
5496 5513
5497 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 5514 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5498 } 5515 }
5499 5516
5500 return error; 5517 return error;
5501} 5518}
5502 5519
5503static int 5520static int
5504wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 5521wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5505{ 5522{
5506 uint32_t attempts = 100000; 5523 uint32_t attempts = 100000;
5507 uint32_t i, reg = 0; 5524 uint32_t i, reg = 0;
5508 int32_t done = -1; 5525 int32_t done = -1;
5509 5526
5510 for (i = 0; i < attempts; i++) { 5527 for (i = 0; i < attempts; i++) {
5511 reg = CSR_READ(sc, rw); 5528 reg = CSR_READ(sc, rw);
5512 5529
5513 if (reg & EERD_DONE) { 5530 if (reg & EERD_DONE) {
5514 done = 0; 5531 done = 0;
5515 break; 5532 break;
5516 } 5533 }
5517 delay(5); 5534 delay(5);
5518 } 5535 }
5519 5536
5520 return done; 5537 return done;
5521} 5538}
5522 5539
5523static int 5540static int
5524wm_check_alt_mac_addr(struct wm_softc *sc) 5541wm_check_alt_mac_addr(struct wm_softc *sc)
5525{ 5542{
5526 uint16_t myea[ETHER_ADDR_LEN / 2]; 5543 uint16_t myea[ETHER_ADDR_LEN / 2];
5527 uint16_t offset = EEPROM_OFF_MACADDR; 5544 uint16_t offset = EEPROM_OFF_MACADDR;
5528 5545
5529 /* Try to read alternative MAC address pointer */ 5546 /* Try to read alternative MAC address pointer */
5530 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0) 5547 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5531 return -1; 5548 return -1;
5532 5549
5533 /* Check pointer */ 5550 /* Check pointer */
5534 if (offset == 0xffff) 5551 if (offset == 0xffff)
5535 return -1; 5552 return -1;
5536 5553
5537 /* 5554 /*
5538 * Check whether alternative MAC address is valid or not. 5555 * Check whether alternative MAC address is valid or not.
5539 * Some cards have non 0xffff pointer but those don't use 5556 * Some cards have non 0xffff pointer but those don't use
5540 * alternative MAC address in reality. 5557 * alternative MAC address in reality.
5541 * 5558 *
5542 * Check whether the broadcast bit is set or not. 5559 * Check whether the broadcast bit is set or not.
5543 */ 5560 */
5544 if (wm_read_eeprom(sc, offset, 1, myea) == 0) 5561 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5545 if (((myea[0] & 0xff) & 0x01) == 0) 5562 if (((myea[0] & 0xff) & 0x01) == 0)
5546 return 0; /* found! */ 5563 return 0; /* found! */
5547 5564
5548 /* not found */ 5565 /* not found */
5549 return -1; 5566 return -1;
5550} 5567}
5551 5568
5552static int 5569static int
5553wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr) 5570wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5554{ 5571{
5555 uint16_t myea[ETHER_ADDR_LEN / 2]; 5572 uint16_t myea[ETHER_ADDR_LEN / 2];
5556 uint16_t offset = EEPROM_OFF_MACADDR; 5573 uint16_t offset = EEPROM_OFF_MACADDR;
5557 int do_invert = 0; 5574 int do_invert = 0;
5558 5575
5559 switch (sc->sc_type) { 5576 switch (sc->sc_type) {
5560 case WM_T_82580: 5577 case WM_T_82580:
5561 case WM_T_82580ER: 5578 case WM_T_82580ER:
5562 case WM_T_I350: 5579 case WM_T_I350:
5563 case WM_T_I354: 5580 case WM_T_I354:
5564 switch (sc->sc_funcid) { 5581 switch (sc->sc_funcid) {
5565 case 0: 5582 case 0:
5566 /* default value (== EEPROM_OFF_MACADDR) */ 5583 /* default value (== EEPROM_OFF_MACADDR) */
5567 break; 5584 break;
5568 case 1: 5585 case 1:
5569 offset = EEPROM_OFF_LAN1; 5586 offset = EEPROM_OFF_LAN1;
5570 break; 5587 break;
5571 case 2: 5588 case 2:
5572 offset = EEPROM_OFF_LAN2; 5589 offset = EEPROM_OFF_LAN2;
5573 break; 5590 break;
5574 case 3: 5591 case 3:
5575 offset = EEPROM_OFF_LAN3; 5592 offset = EEPROM_OFF_LAN3;
5576 break; 5593 break;
5577 default: 5594 default:
5578 goto bad; 5595 goto bad;
5579 /* NOTREACHED */ 5596 /* NOTREACHED */
5580 break; 5597 break;
5581 } 5598 }
5582 break; 5599 break;
5583 case WM_T_82571: 5600 case WM_T_82571:
5584 case WM_T_82575: 5601 case WM_T_82575:
5585 case WM_T_82576: 5602 case WM_T_82576:
5586 case WM_T_80003: 5603 case WM_T_80003:
5587 case WM_T_I210: 5604 case WM_T_I210:
5588 case WM_T_I211: 5605 case WM_T_I211:
5589 if (wm_check_alt_mac_addr(sc) != 0) { 5606 if (wm_check_alt_mac_addr(sc) != 0) {
5590 /* reset the offset to LAN0 */ 5607 /* reset the offset to LAN0 */
5591 offset = EEPROM_OFF_MACADDR; 5608 offset = EEPROM_OFF_MACADDR;
5592 if ((sc->sc_funcid & 0x01) == 1) 5609 if ((sc->sc_funcid & 0x01) == 1)
5593 do_invert = 1; 5610 do_invert = 1;
5594 goto do_read; 5611 goto do_read;
5595 } 5612 }
5596 switch (sc->sc_funcid) { 5613 switch (sc->sc_funcid) {
5597 case 0: 5614 case 0:
5598 /* 5615 /*
5599 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR 5616 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5600 * itself. 5617 * itself.
5601 */ 5618 */
5602 break; 5619 break;
5603 case 1: 5620 case 1:
5604 offset += EEPROM_OFF_MACADDR_LAN1; 5621 offset += EEPROM_OFF_MACADDR_LAN1;
5605 break; 5622 break;
5606 case 2: 5623 case 2:
5607 offset += EEPROM_OFF_MACADDR_LAN2; 5624 offset += EEPROM_OFF_MACADDR_LAN2;
5608 break; 5625 break;
5609 case 3: 5626 case 3:
5610 offset += EEPROM_OFF_MACADDR_LAN3; 5627 offset += EEPROM_OFF_MACADDR_LAN3;
5611 break; 5628 break;
5612 default: 5629 default:
5613 goto bad; 5630 goto bad;
5614 /* NOTREACHED */ 5631 /* NOTREACHED */
5615 break; 5632 break;
5616 } 5633 }
5617 break; 5634 break;
5618 default: 5635 default:
5619 if ((sc->sc_funcid & 0x01) == 1) 5636 if ((sc->sc_funcid & 0x01) == 1)
5620 do_invert = 1; 5637 do_invert = 1;
5621 break; 5638 break;
5622 } 5639 }
5623 5640
5624 do_read: 5641 do_read:
5625 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]), 5642 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5626 myea) != 0) { 5643 myea) != 0) {
5627 goto bad; 5644 goto bad;
5628 } 5645 }
5629 5646
5630 enaddr[0] = myea[0] & 0xff; 5647 enaddr[0] = myea[0] & 0xff;
5631 enaddr[1] = myea[0] >> 8; 5648 enaddr[1] = myea[0] >> 8;
5632 enaddr[2] = myea[1] & 0xff; 5649 enaddr[2] = myea[1] & 0xff;
5633 enaddr[3] = myea[1] >> 8; 5650 enaddr[3] = myea[1] >> 8;
5634 enaddr[4] = myea[2] & 0xff; 5651 enaddr[4] = myea[2] & 0xff;
5635 enaddr[5] = myea[2] >> 8; 5652 enaddr[5] = myea[2] >> 8;
5636 5653
5637 /* 5654 /*
5638 * Toggle the LSB of the MAC address on the second port 5655 * Toggle the LSB of the MAC address on the second port
5639 * of some dual port cards. 5656 * of some dual port cards.
5640 */ 5657 */
5641 if (do_invert != 0) 5658 if (do_invert != 0)
5642 enaddr[5] ^= 1; 5659 enaddr[5] ^= 1;
5643 5660
5644 return 0; 5661 return 0;
5645 5662
5646 bad: 5663 bad:
5647 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n"); 5664 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5648 5665
5649 return -1; 5666 return -1;
5650} 5667}
5651 5668
5652/* 5669/*
5653 * wm_add_rxbuf: 5670 * wm_add_rxbuf:
5654 * 5671 *
5655 * Add a receive buffer to the indiciated descriptor. 5672 * Add a receive buffer to the indiciated descriptor.
5656 */ 5673 */
5657static int 5674static int
5658wm_add_rxbuf(struct wm_softc *sc, int idx) 5675wm_add_rxbuf(struct wm_softc *sc, int idx)
5659{ 5676{
5660 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 5677 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5661 struct mbuf *m; 5678 struct mbuf *m;
5662 int error; 5679 int error;
5663 5680
5664 MGETHDR(m, M_DONTWAIT, MT_DATA); 5681 MGETHDR(m, M_DONTWAIT, MT_DATA);
5665 if (m == NULL) 5682 if (m == NULL)
5666 return ENOBUFS; 5683 return ENOBUFS;
5667 5684
5668 MCLGET(m, M_DONTWAIT); 5685 MCLGET(m, M_DONTWAIT);
5669 if ((m->m_flags & M_EXT) == 0) { 5686 if ((m->m_flags & M_EXT) == 0) {
5670 m_freem(m); 5687 m_freem(m);
5671 return ENOBUFS; 5688 return ENOBUFS;
5672 } 5689 }
5673 5690
5674 if (rxs->rxs_mbuf != NULL) 5691 if (rxs->rxs_mbuf != NULL)
5675 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 5692 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5676 5693
5677 rxs->rxs_mbuf = m; 5694 rxs->rxs_mbuf = m;
5678 5695
5679 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 5696 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5680 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 5697 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5681 BUS_DMA_READ|BUS_DMA_NOWAIT); 5698 BUS_DMA_READ|BUS_DMA_NOWAIT);
5682 if (error) { 5699 if (error) {
5683 /* XXX XXX XXX */ 5700 /* XXX XXX XXX */
5684 aprint_error_dev(sc->sc_dev, 5701 aprint_error_dev(sc->sc_dev,
5685 "unable to load rx DMA map %d, error = %d\n", 5702 "unable to load rx DMA map %d, error = %d\n",
5686 idx, error); 5703 idx, error);
5687 panic("wm_add_rxbuf"); 5704 panic("wm_add_rxbuf");
5688 } 5705 }
5689 5706
5690 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 5707 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5691 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 5708 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5692 5709
5693 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 5710 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5694 if ((sc->sc_rctl & RCTL_EN) != 0) 5711 if ((sc->sc_rctl & RCTL_EN) != 0)
5695 WM_INIT_RXDESC(sc, idx); 5712 WM_INIT_RXDESC(sc, idx);
5696 } else 5713 } else
5697 WM_INIT_RXDESC(sc, idx); 5714 WM_INIT_RXDESC(sc, idx);
5698 5715
5699 return 0; 5716 return 0;
5700} 5717}
5701 5718
5702/* 5719/*
5703 * wm_set_ral: 5720 * wm_set_ral:
5704 * 5721 *
5705 * Set an entery in the receive address list. 5722 * Set an entery in the receive address list.
5706 */ 5723 */
5707static void 5724static void
5708wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 5725wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5709{ 5726{
5710 uint32_t ral_lo, ral_hi; 5727 uint32_t ral_lo, ral_hi;
5711 5728
5712 if (enaddr != NULL) { 5729 if (enaddr != NULL) {
5713 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 5730 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5714 (enaddr[3] << 24); 5731 (enaddr[3] << 24);
5715 ral_hi = enaddr[4] | (enaddr[5] << 8); 5732 ral_hi = enaddr[4] | (enaddr[5] << 8);
5716 ral_hi |= RAL_AV; 5733 ral_hi |= RAL_AV;
5717 } else { 5734 } else {
5718 ral_lo = 0; 5735 ral_lo = 0;
5719 ral_hi = 0; 5736 ral_hi = 0;
5720 } 5737 }
5721 5738
5722 if (sc->sc_type >= WM_T_82544) { 5739 if (sc->sc_type >= WM_T_82544) {
5723 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 5740 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5724 ral_lo); 5741 ral_lo);
5725 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 5742 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5726 ral_hi); 5743 ral_hi);
5727 } else { 5744 } else {
5728 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 5745 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5729 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 5746 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5730 } 5747 }
5731} 5748}
5732 5749
5733/* 5750/*
5734 * wm_mchash: 5751 * wm_mchash:
5735 * 5752 *
5736 * Compute the hash of the multicast address for the 4096-bit 5753 * Compute the hash of the multicast address for the 4096-bit
5737 * multicast filter. 5754 * multicast filter.
5738 */ 5755 */
5739static uint32_t 5756static uint32_t
5740wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 5757wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5741{ 5758{
5742 static const int lo_shift[4] = { 4, 3, 2, 0 }; 5759 static const int lo_shift[4] = { 4, 3, 2, 0 };
5743 static const int hi_shift[4] = { 4, 5, 6, 8 }; 5760 static const int hi_shift[4] = { 4, 5, 6, 8 };
5744 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 }; 5761 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5745 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 }; 5762 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5746 uint32_t hash; 5763 uint32_t hash;
5747 5764
5748 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5765 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5749 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5766 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5750 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 5767 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5751 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | 5768 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5752 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); 5769 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5753 return (hash & 0x3ff); 5770 return (hash & 0x3ff);
5754 } 5771 }
5755 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 5772 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5756 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 5773 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5757 5774
5758 return (hash & 0xfff); 5775 return (hash & 0xfff);
5759} 5776}
5760 5777
5761/* 5778/*
5762 * wm_set_filter: 5779 * wm_set_filter:
5763 * 5780 *
5764 * Set up the receive filter. 5781 * Set up the receive filter.
5765 */ 5782 */
5766static void 5783static void
5767wm_set_filter(struct wm_softc *sc) 5784wm_set_filter(struct wm_softc *sc)
5768{ 5785{
5769 struct ethercom *ec = &sc->sc_ethercom; 5786 struct ethercom *ec = &sc->sc_ethercom;
5770 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 5787 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5771 struct ether_multi *enm; 5788 struct ether_multi *enm;
5772 struct ether_multistep step; 5789 struct ether_multistep step;
5773 bus_addr_t mta_reg; 5790 bus_addr_t mta_reg;
5774 uint32_t hash, reg, bit; 5791 uint32_t hash, reg, bit;
5775 int i, size; 5792 int i, size;
5776 5793
5777 if (sc->sc_type >= WM_T_82544) 5794 if (sc->sc_type >= WM_T_82544)
5778 mta_reg = WMREG_CORDOVA_MTA; 5795 mta_reg = WMREG_CORDOVA_MTA;
5779 else 5796 else
5780 mta_reg = WMREG_MTA; 5797 mta_reg = WMREG_MTA;
5781 5798
5782 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 5799 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5783 5800
5784 if (ifp->if_flags & IFF_BROADCAST) 5801 if (ifp->if_flags & IFF_BROADCAST)
5785 sc->sc_rctl |= RCTL_BAM; 5802 sc->sc_rctl |= RCTL_BAM;
5786 if (ifp->if_flags & IFF_PROMISC) { 5803 if (ifp->if_flags & IFF_PROMISC) {
5787 sc->sc_rctl |= RCTL_UPE; 5804 sc->sc_rctl |= RCTL_UPE;
5788 goto allmulti; 5805 goto allmulti;
5789 } 5806 }
5790 5807
5791 /* 5808 /*
5792 * Set the station address in the first RAL slot, and 5809 * Set the station address in the first RAL slot, and
5793 * clear the remaining slots. 5810 * clear the remaining slots.
5794 */ 5811 */
5795 if (sc->sc_type == WM_T_ICH8) 5812 if (sc->sc_type == WM_T_ICH8)
5796 size = WM_RAL_TABSIZE_ICH8 -1; 5813 size = WM_RAL_TABSIZE_ICH8 -1;
5797 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) 5814 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5798 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 5815 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5799 || (sc->sc_type == WM_T_PCH_LPT)) 5816 || (sc->sc_type == WM_T_PCH_LPT))
5800 size = WM_RAL_TABSIZE_ICH8; 5817 size = WM_RAL_TABSIZE_ICH8;
5801 else if (sc->sc_type == WM_T_82575) 5818 else if (sc->sc_type == WM_T_82575)
5802 size = WM_RAL_TABSIZE_82575; 5819 size = WM_RAL_TABSIZE_82575;
5803 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580)) 5820 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5804 size = WM_RAL_TABSIZE_82576; 5821 size = WM_RAL_TABSIZE_82576;
5805 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 5822 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5806 size = WM_RAL_TABSIZE_I350; 5823 size = WM_RAL_TABSIZE_I350;
5807 else 5824 else
5808 size = WM_RAL_TABSIZE; 5825 size = WM_RAL_TABSIZE;
5809 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 5826 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5810 for (i = 1; i < size; i++) 5827 for (i = 1; i < size; i++)
5811 wm_set_ral(sc, NULL, i); 5828 wm_set_ral(sc, NULL, i);
5812 5829
5813 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5830 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5814 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5831 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5815 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) 5832 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5816 size = WM_ICH8_MC_TABSIZE; 5833 size = WM_ICH8_MC_TABSIZE;
5817 else 5834 else
5818 size = WM_MC_TABSIZE; 5835 size = WM_MC_TABSIZE;
5819 /* Clear out the multicast table. */ 5836 /* Clear out the multicast table. */
5820 for (i = 0; i < size; i++) 5837 for (i = 0; i < size; i++)
5821 CSR_WRITE(sc, mta_reg + (i << 2), 0); 5838 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5822 5839
5823 ETHER_FIRST_MULTI(step, ec, enm); 5840 ETHER_FIRST_MULTI(step, ec, enm);
5824 while (enm != NULL) { 5841 while (enm != NULL) {
5825 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 5842 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5826 /* 5843 /*
5827 * We must listen to a range of multicast addresses. 5844 * We must listen to a range of multicast addresses.
5828 * For now, just accept all multicasts, rather than 5845 * For now, just accept all multicasts, rather than
5829 * trying to set only those filter bits needed to match 5846 * trying to set only those filter bits needed to match
5830 * the range. (At this time, the only use of address 5847 * the range. (At this time, the only use of address
5831 * ranges is for IP multicast routing, for which the 5848 * ranges is for IP multicast routing, for which the
5832 * range is big enough to require all bits set.) 5849 * range is big enough to require all bits set.)
5833 */ 5850 */
5834 goto allmulti; 5851 goto allmulti;
5835 } 5852 }
5836 5853
5837 hash = wm_mchash(sc, enm->enm_addrlo); 5854 hash = wm_mchash(sc, enm->enm_addrlo);
5838 5855
5839 reg = (hash >> 5); 5856 reg = (hash >> 5);
5840 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5857 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5841 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5858 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5842 || (sc->sc_type == WM_T_PCH2) 5859 || (sc->sc_type == WM_T_PCH2)
5843 || (sc->sc_type == WM_T_PCH_LPT)) 5860 || (sc->sc_type == WM_T_PCH_LPT))
5844 reg &= 0x1f; 5861 reg &= 0x1f;
5845 else 5862 else
5846 reg &= 0x7f; 5863 reg &= 0x7f;
5847 bit = hash & 0x1f; 5864 bit = hash & 0x1f;
5848 5865
5849 hash = CSR_READ(sc, mta_reg + (reg << 2)); 5866 hash = CSR_READ(sc, mta_reg + (reg << 2));
5850 hash |= 1U << bit; 5867 hash |= 1U << bit;
5851 5868
5852 /* XXX Hardware bug?? */ 5869 /* XXX Hardware bug?? */
5853 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 5870 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5854 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 5871 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5855 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 5872 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5856 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 5873 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5857 } else 5874 } else
5858 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 5875 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5859 5876
5860 ETHER_NEXT_MULTI(step, enm); 5877 ETHER_NEXT_MULTI(step, enm);
5861 } 5878 }
5862 5879
5863 ifp->if_flags &= ~IFF_ALLMULTI; 5880 ifp->if_flags &= ~IFF_ALLMULTI;
5864 goto setit; 5881 goto setit;
5865 5882
5866 allmulti: 5883 allmulti:
5867 ifp->if_flags |= IFF_ALLMULTI; 5884 ifp->if_flags |= IFF_ALLMULTI;
5868 sc->sc_rctl |= RCTL_MPE; 5885 sc->sc_rctl |= RCTL_MPE;
5869 5886
5870 setit: 5887 setit:
5871 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 5888 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5872} 5889}
5873 5890
5874/* 5891/*
5875 * wm_tbi_mediainit: 5892 * wm_tbi_mediainit:
5876 * 5893 *
5877 * Initialize media for use on 1000BASE-X devices. 5894 * Initialize media for use on 1000BASE-X devices.
5878 */ 5895 */
5879static void 5896static void
5880wm_tbi_mediainit(struct wm_softc *sc) 5897wm_tbi_mediainit(struct wm_softc *sc)
5881{ 5898{
5882 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 5899 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5883 const char *sep = ""; 5900 const char *sep = "";
5884 5901
5885 if (sc->sc_type < WM_T_82543) 5902 if (sc->sc_type < WM_T_82543)
5886 sc->sc_tipg = TIPG_WM_DFLT; 5903 sc->sc_tipg = TIPG_WM_DFLT;
5887 else 5904 else
5888 sc->sc_tipg = TIPG_LG_DFLT; 5905 sc->sc_tipg = TIPG_LG_DFLT;
5889 5906
5890 sc->sc_tbi_anegticks = 5; 5907 sc->sc_tbi_anegticks = 5;
5891 5908
5892 /* Initialize our media structures */ 5909 /* Initialize our media structures */
5893 sc->sc_mii.mii_ifp = ifp; 5910 sc->sc_mii.mii_ifp = ifp;
5894 5911
5895 sc->sc_ethercom.ec_mii = &sc->sc_mii; 5912 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5896 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 5913 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5897 wm_tbi_mediastatus); 5914 wm_tbi_mediastatus);
5898 5915
5899 /* 5916 /*
5900 * SWD Pins: 5917 * SWD Pins:
5901 * 5918 *
5902 * 0 = Link LED (output) 5919 * 0 = Link LED (output)
5903 * 1 = Loss Of Signal (input) 5920 * 1 = Loss Of Signal (input)
5904 */ 5921 */
5905 sc->sc_ctrl |= CTRL_SWDPIO(0); 5922 sc->sc_ctrl |= CTRL_SWDPIO(0);
5906 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 5923 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5907 5924
5908 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5925 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5909 5926
5910#define ADD(ss, mm, dd) \ 5927#define ADD(ss, mm, dd) \
5911do { \ 5928do { \
5912 aprint_normal("%s%s", sep, ss); \ 5929 aprint_normal("%s%s", sep, ss); \
5913 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 5930 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5914 sep = ", "; \ 5931 sep = ", "; \
5915} while (/*CONSTCOND*/0) 5932} while (/*CONSTCOND*/0)
5916 5933
5917 aprint_normal_dev(sc->sc_dev, ""); 5934 aprint_normal_dev(sc->sc_dev, "");
5918 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 5935 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5919 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 5936 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5920 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 5937 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5921 aprint_normal("\n"); 5938 aprint_normal("\n");
5922 5939
5923#undef ADD 5940#undef ADD
5924 5941
5925 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 5942 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5926} 5943}
5927 5944
5928/* 5945/*
5929 * wm_tbi_mediastatus: [ifmedia interface function] 5946 * wm_tbi_mediastatus: [ifmedia interface function]
5930 * 5947 *
5931 * Get the current interface media status on a 1000BASE-X device. 5948 * Get the current interface media status on a 1000BASE-X device.
5932 */ 5949 */
5933static void 5950static void
5934wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 5951wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5935{ 5952{
5936 struct wm_softc *sc = ifp->if_softc; 5953 struct wm_softc *sc = ifp->if_softc;
5937 uint32_t ctrl, status; 5954 uint32_t ctrl, status;
5938 5955
5939 ifmr->ifm_status = IFM_AVALID; 5956 ifmr->ifm_status = IFM_AVALID;
5940 ifmr->ifm_active = IFM_ETHER; 5957 ifmr->ifm_active = IFM_ETHER;
5941 5958
5942 status = CSR_READ(sc, WMREG_STATUS); 5959 status = CSR_READ(sc, WMREG_STATUS);
5943 if ((status & STATUS_LU) == 0) { 5960 if ((status & STATUS_LU) == 0) {
5944 ifmr->ifm_active |= IFM_NONE; 5961 ifmr->ifm_active |= IFM_NONE;
5945 return; 5962 return;
5946 } 5963 }
5947 5964
5948 ifmr->ifm_status |= IFM_ACTIVE; 5965 ifmr->ifm_status |= IFM_ACTIVE;
5949 ifmr->ifm_active |= IFM_1000_SX; 5966 ifmr->ifm_active |= IFM_1000_SX;
5950 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 5967 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5951 ifmr->ifm_active |= IFM_FDX; 5968 ifmr->ifm_active |= IFM_FDX;
5952 ctrl = CSR_READ(sc, WMREG_CTRL); 5969 ctrl = CSR_READ(sc, WMREG_CTRL);
5953 if (ctrl & CTRL_RFCE) 5970 if (ctrl & CTRL_RFCE)
5954 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 5971 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5955 if (ctrl & CTRL_TFCE) 5972 if (ctrl & CTRL_TFCE)
5956 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 5973 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5957} 5974}
5958 5975
5959/* 5976/*
5960 * wm_tbi_mediachange: [ifmedia interface function] 5977 * wm_tbi_mediachange: [ifmedia interface function]
5961 * 5978 *
5962 * Set hardware to newly-selected media on a 1000BASE-X device. 5979 * Set hardware to newly-selected media on a 1000BASE-X device.
5963 */ 5980 */
5964static int 5981static int
5965wm_tbi_mediachange(struct ifnet *ifp) 5982wm_tbi_mediachange(struct ifnet *ifp)
5966{ 5983{
5967 struct wm_softc *sc = ifp->if_softc; 5984 struct wm_softc *sc = ifp->if_softc;
5968 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 5985 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5969 uint32_t status; 5986 uint32_t status;
5970 int i; 5987 int i;
5971 5988
5972 sc->sc_txcw = 0; 5989 sc->sc_txcw = 0;
5973 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 5990 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5974 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 5991 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5975 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE; 5992 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5976 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 5993 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5977 sc->sc_txcw |= TXCW_ANE; 5994 sc->sc_txcw |= TXCW_ANE;
5978 } else { 5995 } else {
5979 /* 5996 /*
5980 * If autonegotiation is turned off, force link up and turn on 5997 * If autonegotiation is turned off, force link up and turn on
5981 * full duplex 5998 * full duplex
5982 */ 5999 */
5983 sc->sc_txcw &= ~TXCW_ANE; 6000 sc->sc_txcw &= ~TXCW_ANE;
5984 sc->sc_ctrl |= CTRL_SLU | CTRL_FD; 6001 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5985 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 6002 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5986 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6003 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 6004 CSR_WRITE_FLUSH(sc);
5987 delay(1000); 6005 delay(1000);
5988 } 6006 }
5989 6007
5990 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n", 6008 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5991 device_xname(sc->sc_dev),sc->sc_txcw)); 6009 device_xname(sc->sc_dev),sc->sc_txcw));
5992 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 6010 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
 6011 CSR_WRITE_FLUSH(sc);
5993 delay(10000); 6012 delay(10000);
5994 6013
5995 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1); 6014 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5996 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i)); 6015 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5997 6016
5998 /* 6017 /*
5999 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the 6018 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
6000 * optics detect a signal, 0 if they don't. 6019 * optics detect a signal, 0 if they don't.
6001 */ 6020 */
6002 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) { 6021 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
6003 /* Have signal; wait for the link to come up. */ 6022 /* Have signal; wait for the link to come up. */
6004 6023
6005 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 6024 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6006 /* 6025 /*
6007 * Reset the link, and let autonegotiation do its thing 6026 * Reset the link, and let autonegotiation do its thing
6008 */ 6027 */
6009 sc->sc_ctrl |= CTRL_LRST; 6028 sc->sc_ctrl |= CTRL_LRST;
6010 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6029 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 6030 CSR_WRITE_FLUSH(sc);
6011 delay(1000); 6031 delay(1000);
6012 sc->sc_ctrl &= ~CTRL_LRST; 6032 sc->sc_ctrl &= ~CTRL_LRST;
6013 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6033 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 6034 CSR_WRITE_FLUSH(sc);
6014 delay(1000); 6035 delay(1000);
6015 } 6036 }
6016 6037
6017 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) { 6038 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
6018 delay(10000); 6039 delay(10000);
6019 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 6040 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
6020 break; 6041 break;
6021 } 6042 }
6022 6043
6023 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n", 6044 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
6024 device_xname(sc->sc_dev),i)); 6045 device_xname(sc->sc_dev),i));
6025 6046
6026 status = CSR_READ(sc, WMREG_STATUS); 6047 status = CSR_READ(sc, WMREG_STATUS);
6027 DPRINTF(WM_DEBUG_LINK, 6048 DPRINTF(WM_DEBUG_LINK,
6028 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n", 6049 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
6029 device_xname(sc->sc_dev),status, STATUS_LU)); 6050 device_xname(sc->sc_dev),status, STATUS_LU));
6030 if (status & STATUS_LU) { 6051 if (status & STATUS_LU) {
6031 /* Link is up. */ 6052 /* Link is up. */
6032 DPRINTF(WM_DEBUG_LINK, 6053 DPRINTF(WM_DEBUG_LINK,
6033 ("%s: LINK: set media -> link up %s\n", 6054 ("%s: LINK: set media -> link up %s\n",
6034 device_xname(sc->sc_dev), 6055 device_xname(sc->sc_dev),
6035 (status & STATUS_FD) ? "FDX" : "HDX")); 6056 (status & STATUS_FD) ? "FDX" : "HDX"));
6036 6057
6037 /* 6058 /*
6038 * NOTE: CTRL will update TFCE and RFCE automatically, 6059 * NOTE: CTRL will update TFCE and RFCE automatically,
6039 * so we should update sc->sc_ctrl 6060 * so we should update sc->sc_ctrl
6040 */ 6061 */
6041 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 6062 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6042 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 6063 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6043 sc->sc_fcrtl &= ~FCRTL_XONE; 6064 sc->sc_fcrtl &= ~FCRTL_XONE;
6044 if (status & STATUS_FD) 6065 if (status & STATUS_FD)
6045 sc->sc_tctl |= 6066 sc->sc_tctl |=
6046 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 6067 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6047 else 6068 else
6048 sc->sc_tctl |= 6069 sc->sc_tctl |=
6049 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 6070 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6050 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 6071 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6051 sc->sc_fcrtl |= FCRTL_XONE; 6072 sc->sc_fcrtl |= FCRTL_XONE;
6052 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 6073 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6053 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 6074 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6054 WMREG_OLD_FCRTL : WMREG_FCRTL, 6075 WMREG_OLD_FCRTL : WMREG_FCRTL,
6055 sc->sc_fcrtl); 6076 sc->sc_fcrtl);
6056 sc->sc_tbi_linkup = 1; 6077 sc->sc_tbi_linkup = 1;
6057 } else { 6078 } else {
6058 if (i == WM_LINKUP_TIMEOUT) 6079 if (i == WM_LINKUP_TIMEOUT)
6059 wm_check_for_link(sc); 6080 wm_check_for_link(sc);
6060 /* Link is down. */ 6081 /* Link is down. */
6061 DPRINTF(WM_DEBUG_LINK, 6082 DPRINTF(WM_DEBUG_LINK,
6062 ("%s: LINK: set media -> link down\n", 6083 ("%s: LINK: set media -> link down\n",
6063 device_xname(sc->sc_dev))); 6084 device_xname(sc->sc_dev)));
6064 sc->sc_tbi_linkup = 0; 6085 sc->sc_tbi_linkup = 0;
6065 } 6086 }
6066 } else { 6087 } else {
6067 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 6088 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6068 device_xname(sc->sc_dev))); 6089 device_xname(sc->sc_dev)));
6069 sc->sc_tbi_linkup = 0; 6090 sc->sc_tbi_linkup = 0;
6070 } 6091 }
6071 6092
6072 wm_tbi_set_linkled(sc); 6093 wm_tbi_set_linkled(sc);
6073 6094
6074 return 0; 6095 return 0;
6075} 6096}
6076 6097
6077/* 6098/*
6078 * wm_tbi_set_linkled: 6099 * wm_tbi_set_linkled:
6079 * 6100 *
6080 * Update the link LED on 1000BASE-X devices. 6101 * Update the link LED on 1000BASE-X devices.
6081 */ 6102 */
6082static void 6103static void
6083wm_tbi_set_linkled(struct wm_softc *sc) 6104wm_tbi_set_linkled(struct wm_softc *sc)
6084{ 6105{
6085 6106
6086 if (sc->sc_tbi_linkup) 6107 if (sc->sc_tbi_linkup)
6087 sc->sc_ctrl |= CTRL_SWDPIN(0); 6108 sc->sc_ctrl |= CTRL_SWDPIN(0);
6088 else 6109 else
6089 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 6110 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6090 6111
6091 /* 82540 or newer devices are active low */ 6112 /* 82540 or newer devices are active low */
6092 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; 6113 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6093 6114
6094 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6115 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6095} 6116}
6096 6117
6097/* 6118/*
6098 * wm_tbi_check_link: 6119 * wm_tbi_check_link:
6099 * 6120 *
6100 * Check the link on 1000BASE-X devices. 6121 * Check the link on 1000BASE-X devices.
6101 */ 6122 */
6102static void 6123static void
6103wm_tbi_check_link(struct wm_softc *sc) 6124wm_tbi_check_link(struct wm_softc *sc)
6104{ 6125{
6105 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 6126 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6106 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 6127 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6107 uint32_t status; 6128 uint32_t status;
6108 6129
6109 status = CSR_READ(sc, WMREG_STATUS); 6130 status = CSR_READ(sc, WMREG_STATUS);
6110 6131
6111 /* XXX is this needed? */ 6132 /* XXX is this needed? */
6112 (void)CSR_READ(sc, WMREG_RXCW); 6133 (void)CSR_READ(sc, WMREG_RXCW);
6113 (void)CSR_READ(sc, WMREG_CTRL); 6134 (void)CSR_READ(sc, WMREG_CTRL);
6114 6135
6115 /* set link status */ 6136 /* set link status */
6116 if ((status & STATUS_LU) == 0) { 6137 if ((status & STATUS_LU) == 0) {
6117 DPRINTF(WM_DEBUG_LINK, 6138 DPRINTF(WM_DEBUG_LINK,
6118 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev))); 6139 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6119 sc->sc_tbi_linkup = 0; 6140 sc->sc_tbi_linkup = 0;
6120 } else if (sc->sc_tbi_linkup == 0) { 6141 } else if (sc->sc_tbi_linkup == 0) {
6121 DPRINTF(WM_DEBUG_LINK, 6142 DPRINTF(WM_DEBUG_LINK,
6122 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev), 6143 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6123 (status & STATUS_FD) ? "FDX" : "HDX")); 6144 (status & STATUS_FD) ? "FDX" : "HDX"));
6124 sc->sc_tbi_linkup = 1; 6145 sc->sc_tbi_linkup = 1;
6125 } 6146 }
6126 6147
6127 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) 6148 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6128 && ((status & STATUS_LU) == 0)) { 6149 && ((status & STATUS_LU) == 0)) {
6129 sc->sc_tbi_linkup = 0; 6150 sc->sc_tbi_linkup = 0;
6130 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) { 6151 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6131 /* RXCFG storm! */ 6152 /* RXCFG storm! */
6132 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n", 6153 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6133 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg)); 6154 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6134 wm_init(ifp); 6155 wm_init(ifp);
6135 ifp->if_start(ifp); 6156 ifp->if_start(ifp);
6136 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 6157 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6137 /* If the timer expired, retry autonegotiation */ 6158 /* If the timer expired, retry autonegotiation */
6138 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) { 6159 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6139 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n")); 6160 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6140 sc->sc_tbi_ticks = 0; 6161 sc->sc_tbi_ticks = 0;
6141 /* 6162 /*
6142 * Reset the link, and let autonegotiation do 6163 * Reset the link, and let autonegotiation do
6143 * its thing 6164 * its thing
6144 */ 6165 */
6145 sc->sc_ctrl |= CTRL_LRST; 6166 sc->sc_ctrl |= CTRL_LRST;
6146 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6167 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 6168 CSR_WRITE_FLUSH(sc);
6147 delay(1000); 6169 delay(1000);
6148 sc->sc_ctrl &= ~CTRL_LRST; 6170 sc->sc_ctrl &= ~CTRL_LRST;
6149 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6171 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 6172 CSR_WRITE_FLUSH(sc);
6150 delay(1000); 6173 delay(1000);
6151 CSR_WRITE(sc, WMREG_TXCW, 6174 CSR_WRITE(sc, WMREG_TXCW,
6152 sc->sc_txcw & ~TXCW_ANE); 6175 sc->sc_txcw & ~TXCW_ANE);
6153 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 6176 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6154 } 6177 }
6155 } 6178 }
6156 } 6179 }
6157 6180
6158 wm_tbi_set_linkled(sc); 6181 wm_tbi_set_linkled(sc);
6159} 6182}
6160 6183
6161/* 6184/*
6162 * wm_gmii_reset: 6185 * wm_gmii_reset:
6163 * 6186 *
6164 * Reset the PHY. 6187 * Reset the PHY.
6165 */ 6188 */
6166static void 6189static void
6167wm_gmii_reset(struct wm_softc *sc) 6190wm_gmii_reset(struct wm_softc *sc)
6168{ 6191{
6169 uint32_t reg; 6192 uint32_t reg;
6170 int rv; 6193 int rv;
6171 6194
6172 /* get phy semaphore */ 6195 /* get phy semaphore */
6173 switch (sc->sc_type) { 6196 switch (sc->sc_type) {
6174 case WM_T_82571: 6197 case WM_T_82571:
6175 case WM_T_82572: 6198 case WM_T_82572:
6176 case WM_T_82573: 6199 case WM_T_82573:
6177 case WM_T_82574: 6200 case WM_T_82574:
6178 case WM_T_82583: 6201 case WM_T_82583:
6179 /* XXX should get sw semaphore, too */ 6202 /* XXX should get sw semaphore, too */
6180 rv = wm_get_swsm_semaphore(sc); 6203 rv = wm_get_swsm_semaphore(sc);
6181 break; 6204 break;
6182 case WM_T_82575: 6205 case WM_T_82575:
6183 case WM_T_82576: 6206 case WM_T_82576:
6184 case WM_T_82580: 6207 case WM_T_82580:
6185 case WM_T_82580ER: 6208 case WM_T_82580ER:
6186 case WM_T_I350: 6209 case WM_T_I350:
6187 case WM_T_I354: 6210 case WM_T_I354:
6188 case WM_T_I210: 6211 case WM_T_I210:
6189 case WM_T_I211: 6212 case WM_T_I211:
6190 case WM_T_80003: 6213 case WM_T_80003:
6191 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 6214 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6192 break; 6215 break;
6193 case WM_T_ICH8: 6216 case WM_T_ICH8:
6194 case WM_T_ICH9: 6217 case WM_T_ICH9:
6195 case WM_T_ICH10: 6218 case WM_T_ICH10:
6196 case WM_T_PCH: 6219 case WM_T_PCH:
6197 case WM_T_PCH2: 6220 case WM_T_PCH2:
6198 case WM_T_PCH_LPT: 6221 case WM_T_PCH_LPT:
6199 rv = wm_get_swfwhw_semaphore(sc); 6222 rv = wm_get_swfwhw_semaphore(sc);
6200 break; 6223 break;
6201 default: 6224 default:
6202 /* nothing to do*/ 6225 /* nothing to do*/
6203 rv = 0; 6226 rv = 0;
6204 break; 6227 break;
6205 } 6228 }
6206 if (rv != 0) { 6229 if (rv != 0) {
6207 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6230 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6208 __func__); 6231 __func__);
6209 return; 6232 return;
6210 } 6233 }
6211 6234
6212 switch (sc->sc_type) { 6235 switch (sc->sc_type) {
6213 case WM_T_82542_2_0: 6236 case WM_T_82542_2_0:
6214 case WM_T_82542_2_1: 6237 case WM_T_82542_2_1:
6215 /* null */ 6238 /* null */
6216 break; 6239 break;
6217 case WM_T_82543: 6240 case WM_T_82543:
6218 /* 6241 /*
6219 * With 82543, we need to force speed and duplex on the MAC 6242 * With 82543, we need to force speed and duplex on the MAC
6220 * equal to what the PHY speed and duplex configuration is. 6243 * equal to what the PHY speed and duplex configuration is.
6221 * In addition, we need to perform a hardware reset on the PHY 6244 * In addition, we need to perform a hardware reset on the PHY
6222 * to take it out of reset. 6245 * to take it out of reset.
6223 */ 6246 */
6224 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 6247 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6225 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6248 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6226 6249
6227 /* The PHY reset pin is active-low. */ 6250 /* The PHY reset pin is active-low. */
6228 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6251 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6229 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 6252 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6230 CTRL_EXT_SWDPIN(4)); 6253 CTRL_EXT_SWDPIN(4));
6231 reg |= CTRL_EXT_SWDPIO(4); 6254 reg |= CTRL_EXT_SWDPIO(4);
6232 6255
6233 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6256 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
 6257 CSR_WRITE_FLUSH(sc);
6234 delay(10*1000); 6258 delay(10*1000);
6235 6259
6236 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 6260 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
 6261 CSR_WRITE_FLUSH(sc);
6237 delay(150); 6262 delay(150);
6238#if 0 6263#if 0
6239 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 6264 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6240#endif 6265#endif
6241 delay(20*1000); /* XXX extra delay to get PHY ID? */ 6266 delay(20*1000); /* XXX extra delay to get PHY ID? */
6242 break; 6267 break;
6243 case WM_T_82544: /* reset 10000us */ 6268 case WM_T_82544: /* reset 10000us */
6244 case WM_T_82540: 6269 case WM_T_82540:
6245 case WM_T_82545: 6270 case WM_T_82545:
6246 case WM_T_82545_3: 6271 case WM_T_82545_3:
6247 case WM_T_82546: 6272 case WM_T_82546:
6248 case WM_T_82546_3: 6273 case WM_T_82546_3:
6249 case WM_T_82541: 6274 case WM_T_82541:
6250 case WM_T_82541_2: 6275 case WM_T_82541_2:
6251 case WM_T_82547: 6276 case WM_T_82547:
6252 case WM_T_82547_2: 6277 case WM_T_82547_2:
6253 case WM_T_82571: /* reset 100us */ 6278 case WM_T_82571: /* reset 100us */
6254 case WM_T_82572: 6279 case WM_T_82572:
6255 case WM_T_82573: 6280 case WM_T_82573:
6256 case WM_T_82574: 6281 case WM_T_82574:
6257 case WM_T_82575: 6282 case WM_T_82575:
6258 case WM_T_82576: 6283 case WM_T_82576:
6259 case WM_T_82580: 6284 case WM_T_82580:
6260 case WM_T_82580ER: 6285 case WM_T_82580ER:
6261 case WM_T_I350: 6286 case WM_T_I350:
6262 case WM_T_I354: 6287 case WM_T_I354:
6263 case WM_T_I210: 6288 case WM_T_I210:
6264 case WM_T_I211: 6289 case WM_T_I211:
6265 case WM_T_82583: 6290 case WM_T_82583:
6266 case WM_T_80003: 6291 case WM_T_80003:
6267 /* generic reset */ 6292 /* generic reset */
6268 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 6293 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
 6294 CSR_WRITE_FLUSH(sc);
6269 delay(20000); 6295 delay(20000);
6270 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6296 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 6297 CSR_WRITE_FLUSH(sc);
6271 delay(20000); 6298 delay(20000);
6272 6299
6273 if ((sc->sc_type == WM_T_82541) 6300 if ((sc->sc_type == WM_T_82541)
6274 || (sc->sc_type == WM_T_82541_2) 6301 || (sc->sc_type == WM_T_82541_2)
6275 || (sc->sc_type == WM_T_82547) 6302 || (sc->sc_type == WM_T_82547)
6276 || (sc->sc_type == WM_T_82547_2)) { 6303 || (sc->sc_type == WM_T_82547_2)) {
6277 /* workaround for igp are done in igp_reset() */ 6304 /* workaround for igp are done in igp_reset() */
6278 /* XXX add code to set LED after phy reset */ 6305 /* XXX add code to set LED after phy reset */
6279 } 6306 }
6280 break; 6307 break;
6281 case WM_T_ICH8: 6308 case WM_T_ICH8:
6282 case WM_T_ICH9: 6309 case WM_T_ICH9:
6283 case WM_T_ICH10: 6310 case WM_T_ICH10:
6284 case WM_T_PCH: 6311 case WM_T_PCH:
6285 case WM_T_PCH2: 6312 case WM_T_PCH2:
6286 case WM_T_PCH_LPT: 6313 case WM_T_PCH_LPT:
6287 /* generic reset */ 6314 /* generic reset */
6288 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 6315 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
 6316 CSR_WRITE_FLUSH(sc);
6289 delay(100); 6317 delay(100);
6290 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6318 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 6319 CSR_WRITE_FLUSH(sc);
6291 delay(150); 6320 delay(150);
6292 break; 6321 break;
6293 default: 6322 default:
6294 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 6323 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6295 __func__); 6324 __func__);
6296 break; 6325 break;
6297 } 6326 }
6298 6327
6299 /* release PHY semaphore */ 6328 /* release PHY semaphore */
6300 switch (sc->sc_type) { 6329 switch (sc->sc_type) {
6301 case WM_T_82571: 6330 case WM_T_82571:
6302 case WM_T_82572: 6331 case WM_T_82572:
6303 case WM_T_82573: 6332 case WM_T_82573:
6304 case WM_T_82574: 6333 case WM_T_82574:
6305 case WM_T_82583: 6334 case WM_T_82583:
6306 /* XXX should put sw semaphore, too */ 6335 /* XXX should put sw semaphore, too */
6307 wm_put_swsm_semaphore(sc); 6336 wm_put_swsm_semaphore(sc);
6308 break; 6337 break;
6309 case WM_T_82575: 6338 case WM_T_82575:
6310 case WM_T_82576: 6339 case WM_T_82576:
6311 case WM_T_82580: 6340 case WM_T_82580:
6312 case WM_T_82580ER: 6341 case WM_T_82580ER:
6313 case WM_T_I350: 6342 case WM_T_I350:
6314 case WM_T_I354: 6343 case WM_T_I354:
6315 case WM_T_I210: 6344 case WM_T_I210:
6316 case WM_T_I211: 6345 case WM_T_I211:
6317 case WM_T_80003: 6346 case WM_T_80003:
6318 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 6347 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6319 break; 6348 break;
6320 case WM_T_ICH8: 6349 case WM_T_ICH8:
6321 case WM_T_ICH9: 6350 case WM_T_ICH9:
6322 case WM_T_ICH10: 6351 case WM_T_ICH10:
6323 case WM_T_PCH: 6352 case WM_T_PCH:
6324 case WM_T_PCH2: 6353 case WM_T_PCH2:
6325 case WM_T_PCH_LPT: 6354 case WM_T_PCH_LPT:
6326 wm_put_swfwhw_semaphore(sc); 6355 wm_put_swfwhw_semaphore(sc);
6327 break; 6356 break;
6328 default: 6357 default:
6329 /* nothing to do*/ 6358 /* nothing to do*/
6330 rv = 0; 6359 rv = 0;
6331 break; 6360 break;
6332 } 6361 }
6333 6362
6334 /* get_cfg_done */ 6363 /* get_cfg_done */
6335 wm_get_cfg_done(sc); 6364 wm_get_cfg_done(sc);
6336 6365
6337 /* extra setup */ 6366 /* extra setup */
6338 switch (sc->sc_type) { 6367 switch (sc->sc_type) {
6339 case WM_T_82542_2_0: 6368 case WM_T_82542_2_0:
6340 case WM_T_82542_2_1: 6369 case WM_T_82542_2_1:
6341 case WM_T_82543: 6370 case WM_T_82543:
6342 case WM_T_82544: 6371 case WM_T_82544:
6343 case WM_T_82540: 6372 case WM_T_82540:
6344 case WM_T_82545: 6373 case WM_T_82545:
6345 case WM_T_82545_3: 6374 case WM_T_82545_3:
6346 case WM_T_82546: 6375 case WM_T_82546:
6347 case WM_T_82546_3: 6376 case WM_T_82546_3:
6348 case WM_T_82541_2: 6377 case WM_T_82541_2:
6349 case WM_T_82547_2: 6378 case WM_T_82547_2:
6350 case WM_T_82571: 6379 case WM_T_82571:
6351 case WM_T_82572: 6380 case WM_T_82572:
6352 case WM_T_82573: 6381 case WM_T_82573:
6353 case WM_T_82574: 6382 case WM_T_82574:
6354 case WM_T_82575: 6383 case WM_T_82575:
6355 case WM_T_82576: 6384 case WM_T_82576:
6356 case WM_T_82580: 6385 case WM_T_82580:
6357 case WM_T_82580ER: 6386 case WM_T_82580ER:
6358 case WM_T_I350: 6387 case WM_T_I350:
6359 case WM_T_I354: 6388 case WM_T_I354:
6360 case WM_T_I210: 6389 case WM_T_I210:
6361 case WM_T_I211: 6390 case WM_T_I211:
6362 case WM_T_82583: 6391 case WM_T_82583:
6363 case WM_T_80003: 6392 case WM_T_80003:
6364 /* null */ 6393 /* null */
6365 break; 6394 break;
6366 case WM_T_82541: 6395 case WM_T_82541:
6367 case WM_T_82547: 6396 case WM_T_82547:
6368 /* XXX Configure actively LED after PHY reset */ 6397 /* XXX Configure actively LED after PHY reset */
6369 break; 6398 break;
6370 case WM_T_ICH8: 6399 case WM_T_ICH8:
6371 case WM_T_ICH9: 6400 case WM_T_ICH9:
6372 case WM_T_ICH10: 6401 case WM_T_ICH10:
6373 case WM_T_PCH: 6402 case WM_T_PCH:
6374 case WM_T_PCH2: 6403 case WM_T_PCH2:
6375 case WM_T_PCH_LPT: 6404 case WM_T_PCH_LPT:
6376 /* Allow time for h/w to get to a quiescent state afer reset */ 6405 /* Allow time for h/w to get to a quiescent state afer reset */
6377 delay(10*1000); 6406 delay(10*1000);
6378 6407
6379 if (sc->sc_type == WM_T_PCH) 6408 if (sc->sc_type == WM_T_PCH)
6380 wm_hv_phy_workaround_ich8lan(sc); 6409 wm_hv_phy_workaround_ich8lan(sc);
6381 6410
6382 if (sc->sc_type == WM_T_PCH2) 6411 if (sc->sc_type == WM_T_PCH2)
6383 wm_lv_phy_workaround_ich8lan(sc); 6412 wm_lv_phy_workaround_ich8lan(sc);
6384 6413
6385 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) { 6414 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6386 /* 6415 /*
6387 * dummy read to clear the phy wakeup bit after lcd 6416 * dummy read to clear the phy wakeup bit after lcd
6388 * reset 6417 * reset
6389 */ 6418 */
6390 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); 6419 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6391 } 6420 }
6392 6421
6393 /* 6422 /*
6394 * XXX Configure the LCD with th extended configuration region 6423 * XXX Configure the LCD with th extended configuration region
6395 * in NVM 6424 * in NVM
6396 */ 6425 */
6397 6426
6398 /* Configure the LCD with the OEM bits in NVM */ 6427 /* Configure the LCD with the OEM bits in NVM */
6399 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 6428 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6400 || (sc->sc_type == WM_T_PCH_LPT)) { 6429 || (sc->sc_type == WM_T_PCH_LPT)) {
6401 /* 6430 /*
6402 * Disable LPLU. 6431 * Disable LPLU.
6403 * XXX It seems that 82567 has LPLU, too. 6432 * XXX It seems that 82567 has LPLU, too.
6404 */ 6433 */
6405 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS); 6434 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6406 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU); 6435 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6407 reg |= HV_OEM_BITS_ANEGNOW; 6436 reg |= HV_OEM_BITS_ANEGNOW;
6408 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg); 6437 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6409 } 6438 }
6410 break; 6439 break;
6411 default: 6440 default:
6412 panic("%s: unknown type\n", __func__); 6441 panic("%s: unknown type\n", __func__);
6413 break; 6442 break;
6414 } 6443 }
6415} 6444}
6416 6445
6417/* 6446/*
6418 * wm_get_phy_id_82575: 6447 * wm_get_phy_id_82575:
6419 * 6448 *
6420 * Return PHY ID. Return -1 if it failed. 6449 * Return PHY ID. Return -1 if it failed.
6421 */ 6450 */
6422static int 6451static int
6423wm_get_phy_id_82575(struct wm_softc *sc) 6452wm_get_phy_id_82575(struct wm_softc *sc)
6424{ 6453{
6425 uint32_t reg; 6454 uint32_t reg;
6426 int phyid = -1; 6455 int phyid = -1;
6427 6456
6428 /* XXX */ 6457 /* XXX */
6429 if ((sc->sc_flags & WM_F_SGMII) == 0) 6458 if ((sc->sc_flags & WM_F_SGMII) == 0)
6430 return -1; 6459 return -1;
6431 6460
6432 if (wm_sgmii_uses_mdio(sc)) { 6461 if (wm_sgmii_uses_mdio(sc)) {
6433 switch (sc->sc_type) { 6462 switch (sc->sc_type) {
6434 case WM_T_82575: 6463 case WM_T_82575:
6435 case WM_T_82576: 6464 case WM_T_82576:
6436 reg = CSR_READ(sc, WMREG_MDIC); 6465 reg = CSR_READ(sc, WMREG_MDIC);
6437 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT; 6466 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6438 break; 6467 break;
6439 case WM_T_82580: 6468 case WM_T_82580:
6440 case WM_T_I350: 6469 case WM_T_I350:
6441 case WM_T_I354: 6470 case WM_T_I354:
6442 case WM_T_I210: 6471 case WM_T_I210:
6443 case WM_T_I211: 6472 case WM_T_I211:
6444 reg = CSR_READ(sc, WMREG_MDICNFG); 6473 reg = CSR_READ(sc, WMREG_MDICNFG);
6445 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT; 6474 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6446 break; 6475 break;
6447 default: 6476 default:
6448 return -1; 6477 return -1;
6449 } 6478 }
6450 } 6479 }
6451 6480
6452 return phyid; 6481 return phyid;
6453} 6482}
6454 6483
6455 6484
6456/* 6485/*
6457 * wm_gmii_mediainit: 6486 * wm_gmii_mediainit:
6458 * 6487 *
6459 * Initialize media for use on 1000BASE-T devices. 6488 * Initialize media for use on 1000BASE-T devices.
6460 */ 6489 */
6461static void 6490static void
6462wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) 6491wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6463{ 6492{
6464 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 6493 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6465 struct mii_data *mii = &sc->sc_mii; 6494 struct mii_data *mii = &sc->sc_mii;
6466 6495
6467 /* We have MII. */ 6496 /* We have MII. */
6468 sc->sc_flags |= WM_F_HAS_MII; 6497 sc->sc_flags |= WM_F_HAS_MII;
6469 6498
6470 if (sc->sc_type == WM_T_80003) 6499 if (sc->sc_type == WM_T_80003)
6471 sc->sc_tipg = TIPG_1000T_80003_DFLT; 6500 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6472 else 6501 else
6473 sc->sc_tipg = TIPG_1000T_DFLT; 6502 sc->sc_tipg = TIPG_1000T_DFLT;
6474 6503
6475 /* 6504 /*
6476 * Let the chip set speed/duplex on its own based on 6505 * Let the chip set speed/duplex on its own based on
6477 * signals from the PHY. 6506 * signals from the PHY.
6478 * XXXbouyer - I'm not sure this is right for the 80003, 6507 * XXXbouyer - I'm not sure this is right for the 80003,
6479 * the em driver only sets CTRL_SLU here - but it seems to work. 6508 * the em driver only sets CTRL_SLU here - but it seems to work.
6480 */ 6509 */
6481 sc->sc_ctrl |= CTRL_SLU; 6510 sc->sc_ctrl |= CTRL_SLU;
6482 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6511 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6483 6512
6484 /* Initialize our media structures and probe the GMII. */ 6513 /* Initialize our media structures and probe the GMII. */
6485 mii->mii_ifp = ifp; 6514 mii->mii_ifp = ifp;
6486 6515
6487 /* 6516 /*
6488 * Determine the PHY access method. 6517 * Determine the PHY access method.
6489 * 6518 *
6490 * For SGMII, use SGMII specific method. 6519 * For SGMII, use SGMII specific method.
6491 * 6520 *
6492 * For some devices, we can determine the PHY access method 6521 * For some devices, we can determine the PHY access method
6493 * from sc_type. 6522 * from sc_type.
6494 * 6523 *
6495 * For ICH8 variants, it's difficult to detemine the PHY access 6524 * For ICH8 variants, it's difficult to detemine the PHY access
6496 * method by sc_type, so use the PCI product ID for some devices. 6525 * method by sc_type, so use the PCI product ID for some devices.
6497 * For other ICH8 variants, try to use igp's method. If the PHY 6526 * For other ICH8 variants, try to use igp's method. If the PHY
6498 * can't detect, then use bm's method. 6527 * can't detect, then use bm's method.
6499 */ 6528 */
6500 switch (prodid) { 6529 switch (prodid) {
6501 case PCI_PRODUCT_INTEL_PCH_M_LM: 6530 case PCI_PRODUCT_INTEL_PCH_M_LM:
6502 case PCI_PRODUCT_INTEL_PCH_M_LC: 6531 case PCI_PRODUCT_INTEL_PCH_M_LC:
6503 /* 82577 */ 6532 /* 82577 */
6504 sc->sc_phytype = WMPHY_82577; 6533 sc->sc_phytype = WMPHY_82577;
6505 mii->mii_readreg = wm_gmii_hv_readreg; 6534 mii->mii_readreg = wm_gmii_hv_readreg;
6506 mii->mii_writereg = wm_gmii_hv_writereg; 6535 mii->mii_writereg = wm_gmii_hv_writereg;
6507 break; 6536 break;
6508 case PCI_PRODUCT_INTEL_PCH_D_DM: 6537 case PCI_PRODUCT_INTEL_PCH_D_DM:
6509 case PCI_PRODUCT_INTEL_PCH_D_DC: 6538 case PCI_PRODUCT_INTEL_PCH_D_DC:
6510 /* 82578 */ 6539 /* 82578 */
6511 sc->sc_phytype = WMPHY_82578; 6540 sc->sc_phytype = WMPHY_82578;
6512 mii->mii_readreg = wm_gmii_hv_readreg; 6541 mii->mii_readreg = wm_gmii_hv_readreg;
6513 mii->mii_writereg = wm_gmii_hv_writereg; 6542 mii->mii_writereg = wm_gmii_hv_writereg;
6514 break; 6543 break;
6515 case PCI_PRODUCT_INTEL_PCH2_LV_LM: 6544 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6516 case PCI_PRODUCT_INTEL_PCH2_LV_V: 6545 case PCI_PRODUCT_INTEL_PCH2_LV_V:
6517 /* 82579 */ 6546 /* 82579 */
6518 sc->sc_phytype = WMPHY_82579; 6547 sc->sc_phytype = WMPHY_82579;
6519 mii->mii_readreg = wm_gmii_hv_readreg; 6548 mii->mii_readreg = wm_gmii_hv_readreg;
6520 mii->mii_writereg = wm_gmii_hv_writereg; 6549 mii->mii_writereg = wm_gmii_hv_writereg;
6521 break; 6550 break;
6522 case PCI_PRODUCT_INTEL_I217_LM: 6551 case PCI_PRODUCT_INTEL_I217_LM:
6523 case PCI_PRODUCT_INTEL_I217_V: 6552 case PCI_PRODUCT_INTEL_I217_V:
6524 case PCI_PRODUCT_INTEL_I218_LM: 6553 case PCI_PRODUCT_INTEL_I218_LM:
6525 case PCI_PRODUCT_INTEL_I218_V: 6554 case PCI_PRODUCT_INTEL_I218_V:
6526 /* I21[78] */ 6555 /* I21[78] */
6527 mii->mii_readreg = wm_gmii_hv_readreg; 6556 mii->mii_readreg = wm_gmii_hv_readreg;
6528 mii->mii_writereg = wm_gmii_hv_writereg; 6557 mii->mii_writereg = wm_gmii_hv_writereg;
6529 break; 6558 break;
6530 case PCI_PRODUCT_INTEL_82801I_BM: 6559 case PCI_PRODUCT_INTEL_82801I_BM:
6531 case PCI_PRODUCT_INTEL_82801J_R_BM_LM: 6560 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6532 case PCI_PRODUCT_INTEL_82801J_R_BM_LF: 6561 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6533 case PCI_PRODUCT_INTEL_82801J_D_BM_LM: 6562 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6534 case PCI_PRODUCT_INTEL_82801J_D_BM_LF: 6563 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6535 case PCI_PRODUCT_INTEL_82801J_R_BM_V: 6564 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6536 /* 82567 */ 6565 /* 82567 */
6537 sc->sc_phytype = WMPHY_BM; 6566 sc->sc_phytype = WMPHY_BM;
6538 mii->mii_readreg = wm_gmii_bm_readreg; 6567 mii->mii_readreg = wm_gmii_bm_readreg;
6539 mii->mii_writereg = wm_gmii_bm_writereg; 6568 mii->mii_writereg = wm_gmii_bm_writereg;
6540 break; 6569 break;
6541 default: 6570 default:
6542 if (((sc->sc_flags & WM_F_SGMII) != 0) 6571 if (((sc->sc_flags & WM_F_SGMII) != 0)
6543 && !wm_sgmii_uses_mdio(sc)){ 6572 && !wm_sgmii_uses_mdio(sc)){
6544 mii->mii_readreg = wm_sgmii_readreg; 6573 mii->mii_readreg = wm_sgmii_readreg;
6545 mii->mii_writereg = wm_sgmii_writereg; 6574 mii->mii_writereg = wm_sgmii_writereg;
6546 } else if (sc->sc_type >= WM_T_80003) { 6575 } else if (sc->sc_type >= WM_T_80003) {
6547 mii->mii_readreg = wm_gmii_i80003_readreg; 6576 mii->mii_readreg = wm_gmii_i80003_readreg;
6548 mii->mii_writereg = wm_gmii_i80003_writereg; 6577 mii->mii_writereg = wm_gmii_i80003_writereg;
6549 } else if (sc->sc_type >= WM_T_I210) { 6578 } else if (sc->sc_type >= WM_T_I210) {
6550 mii->mii_readreg = wm_gmii_i82544_readreg; 6579 mii->mii_readreg = wm_gmii_i82544_readreg;
6551 mii->mii_writereg = wm_gmii_i82544_writereg; 6580 mii->mii_writereg = wm_gmii_i82544_writereg;
6552 } else if (sc->sc_type >= WM_T_82580) { 6581 } else if (sc->sc_type >= WM_T_82580) {
6553 sc->sc_phytype = WMPHY_82580; 6582 sc->sc_phytype = WMPHY_82580;
6554 mii->mii_readreg = wm_gmii_82580_readreg; 6583 mii->mii_readreg = wm_gmii_82580_readreg;
6555 mii->mii_writereg = wm_gmii_82580_writereg; 6584 mii->mii_writereg = wm_gmii_82580_writereg;
6556 } else if (sc->sc_type >= WM_T_82544) { 6585 } else if (sc->sc_type >= WM_T_82544) {
6557 mii->mii_readreg = wm_gmii_i82544_readreg; 6586 mii->mii_readreg = wm_gmii_i82544_readreg;
6558 mii->mii_writereg = wm_gmii_i82544_writereg; 6587 mii->mii_writereg = wm_gmii_i82544_writereg;
6559 } else { 6588 } else {
6560 mii->mii_readreg = wm_gmii_i82543_readreg; 6589 mii->mii_readreg = wm_gmii_i82543_readreg;
6561 mii->mii_writereg = wm_gmii_i82543_writereg; 6590 mii->mii_writereg = wm_gmii_i82543_writereg;
6562 } 6591 }
6563 break; 6592 break;
6564 } 6593 }
6565 mii->mii_statchg = wm_gmii_statchg; 6594 mii->mii_statchg = wm_gmii_statchg;
6566 6595
6567 wm_gmii_reset(sc); 6596 wm_gmii_reset(sc);
6568 6597
6569 sc->sc_ethercom.ec_mii = &sc->sc_mii; 6598 sc->sc_ethercom.ec_mii = &sc->sc_mii;
6570 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, 6599 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6571 wm_gmii_mediastatus); 6600 wm_gmii_mediastatus);
6572 6601
6573 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 6602 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6574 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 6603 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6575 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 6604 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6576 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) { 6605 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6577 if ((sc->sc_flags & WM_F_SGMII) == 0) { 6606 if ((sc->sc_flags & WM_F_SGMII) == 0) {
6578 /* Attach only one port */ 6607 /* Attach only one port */
6579 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, 6608 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6580 MII_OFFSET_ANY, MIIF_DOPAUSE); 6609 MII_OFFSET_ANY, MIIF_DOPAUSE);
6581 } else { 6610 } else {
6582 int i, id; 6611 int i, id;
6583 uint32_t ctrl_ext; 6612 uint32_t ctrl_ext;
6584 6613
6585 id = wm_get_phy_id_82575(sc); 6614 id = wm_get_phy_id_82575(sc);
6586 if (id != -1) { 6615 if (id != -1) {
6587 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 6616 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6588 id, MII_OFFSET_ANY, MIIF_DOPAUSE); 6617 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6589 } 6618 }
6590 if ((id == -1) 6619 if ((id == -1)
6591 || (LIST_FIRST(&mii->mii_phys) == NULL)) { 6620 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6592 /* Power on sgmii phy if it is disabled */ 6621 /* Power on sgmii phy if it is disabled */
6593 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 6622 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6594 CSR_WRITE(sc, WMREG_CTRL_EXT, 6623 CSR_WRITE(sc, WMREG_CTRL_EXT,
6595 ctrl_ext &~ CTRL_EXT_SWDPIN(3)); 6624 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6596 CSR_WRITE_FLUSH(sc); 6625 CSR_WRITE_FLUSH(sc);
6597 delay(300*1000); /* XXX too long */ 6626 delay(300*1000); /* XXX too long */
6598 6627
6599 /* from 1 to 8 */ 6628 /* from 1 to 8 */
6600 for (i = 1; i < 8; i++) 6629 for (i = 1; i < 8; i++)
6601 mii_attach(sc->sc_dev, &sc->sc_mii, 6630 mii_attach(sc->sc_dev, &sc->sc_mii,
6602 0xffffffff, i, MII_OFFSET_ANY, 6631 0xffffffff, i, MII_OFFSET_ANY,
6603 MIIF_DOPAUSE); 6632 MIIF_DOPAUSE);
6604 6633
6605 /* restore previous sfp cage power state */ 6634 /* restore previous sfp cage power state */
6606 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 6635 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6607 } 6636 }
6608 } 6637 }
6609 } else { 6638 } else {
6610 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 6639 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6611 MII_OFFSET_ANY, MIIF_DOPAUSE); 6640 MII_OFFSET_ANY, MIIF_DOPAUSE);
6612 } 6641 }
6613 6642
6614 /* 6643 /*
6615 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call 6644 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6616 * wm_set_mdio_slow_mode_hv() for a workaround and retry. 6645 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6617 */ 6646 */
6618 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) && 6647 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6619 (LIST_FIRST(&mii->mii_phys) == NULL)) { 6648 (LIST_FIRST(&mii->mii_phys) == NULL)) {
6620 wm_set_mdio_slow_mode_hv(sc); 6649 wm_set_mdio_slow_mode_hv(sc);
6621 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 6650 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6622 MII_OFFSET_ANY, MIIF_DOPAUSE); 6651 MII_OFFSET_ANY, MIIF_DOPAUSE);
6623 } 6652 }
6624 6653
6625 /* 6654 /*
6626 * (For ICH8 variants) 6655 * (For ICH8 variants)
6627 * If PHY detection failed, use BM's r/w function and retry. 6656 * If PHY detection failed, use BM's r/w function and retry.
6628 */ 6657 */
6629 if (LIST_FIRST(&mii->mii_phys) == NULL) { 6658 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6630 /* if failed, retry with *_bm_* */ 6659 /* if failed, retry with *_bm_* */
6631 mii->mii_readreg = wm_gmii_bm_readreg; 6660 mii->mii_readreg = wm_gmii_bm_readreg;
6632 mii->mii_writereg = wm_gmii_bm_writereg; 6661 mii->mii_writereg = wm_gmii_bm_writereg;
6633 6662
6634 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 6663 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6635 MII_OFFSET_ANY, MIIF_DOPAUSE); 6664 MII_OFFSET_ANY, MIIF_DOPAUSE);
6636 } 6665 }
6637 6666
6638 if (LIST_FIRST(&mii->mii_phys) == NULL) { 6667 if (LIST_FIRST(&mii->mii_phys) == NULL) {
6639 /* Any PHY wasn't find */ 6668 /* Any PHY wasn't find */
6640 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 6669 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6641 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE); 6670 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6642 sc->sc_phytype = WMPHY_NONE; 6671 sc->sc_phytype = WMPHY_NONE;
6643 } else { 6672 } else {
6644 /* 6673 /*
6645 * PHY Found! 6674 * PHY Found!
6646 * Check PHY type. 6675 * Check PHY type.
6647 */ 6676 */
6648 uint32_t model; 6677 uint32_t model;
6649 struct mii_softc *child; 6678 struct mii_softc *child;
6650 6679
6651 child = LIST_FIRST(&mii->mii_phys); 6680 child = LIST_FIRST(&mii->mii_phys);
6652 if (device_is_a(child->mii_dev, "igphy")) { 6681 if (device_is_a(child->mii_dev, "igphy")) {
6653 struct igphy_softc *isc = (struct igphy_softc *)child; 6682 struct igphy_softc *isc = (struct igphy_softc *)child;
6654 6683
6655 model = isc->sc_mii.mii_mpd_model; 6684 model = isc->sc_mii.mii_mpd_model;
6656 if (model == MII_MODEL_yyINTEL_I82566) 6685 if (model == MII_MODEL_yyINTEL_I82566)
6657 sc->sc_phytype = WMPHY_IGP_3; 6686 sc->sc_phytype = WMPHY_IGP_3;
6658 } 6687 }
6659 6688
6660 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 6689 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6661 } 6690 }
6662} 6691}
6663 6692
6664/* 6693/*
6665 * wm_gmii_mediastatus: [ifmedia interface function] 6694 * wm_gmii_mediastatus: [ifmedia interface function]
6666 * 6695 *
6667 * Get the current interface media status on a 1000BASE-T device. 6696 * Get the current interface media status on a 1000BASE-T device.
6668 */ 6697 */
6669static void 6698static void
6670wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 6699wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6671{ 6700{
6672 struct wm_softc *sc = ifp->if_softc; 6701 struct wm_softc *sc = ifp->if_softc;
6673 6702
6674 ether_mediastatus(ifp, ifmr); 6703 ether_mediastatus(ifp, ifmr);
6675 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 6704 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6676 | sc->sc_flowflags; 6705 | sc->sc_flowflags;
6677} 6706}
6678 6707
6679/* 6708/*
6680 * wm_gmii_mediachange: [ifmedia interface function] 6709 * wm_gmii_mediachange: [ifmedia interface function]
6681 * 6710 *
6682 * Set hardware to newly-selected media on a 1000BASE-T device. 6711 * Set hardware to newly-selected media on a 1000BASE-T device.
6683 */ 6712 */
6684static int 6713static int
6685wm_gmii_mediachange(struct ifnet *ifp) 6714wm_gmii_mediachange(struct ifnet *ifp)
6686{ 6715{
6687 struct wm_softc *sc = ifp->if_softc; 6716 struct wm_softc *sc = ifp->if_softc;
6688 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 6717 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6689 int rc; 6718 int rc;
6690 6719
6691 if ((ifp->if_flags & IFF_UP) == 0) 6720 if ((ifp->if_flags & IFF_UP) == 0)
6692 return 0; 6721 return 0;
6693 6722
6694 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 6723 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6695 sc->sc_ctrl |= CTRL_SLU; 6724 sc->sc_ctrl |= CTRL_SLU;
6696 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 6725 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6697 || (sc->sc_type > WM_T_82543)) { 6726 || (sc->sc_type > WM_T_82543)) {
6698 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); 6727 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6699 } else { 6728 } else {
6700 sc->sc_ctrl &= ~CTRL_ASDE; 6729 sc->sc_ctrl &= ~CTRL_ASDE;
6701 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 6730 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6702 if (ife->ifm_media & IFM_FDX) 6731 if (ife->ifm_media & IFM_FDX)
6703 sc->sc_ctrl |= CTRL_FD; 6732 sc->sc_ctrl |= CTRL_FD;
6704 switch (IFM_SUBTYPE(ife->ifm_media)) { 6733 switch (IFM_SUBTYPE(ife->ifm_media)) {
6705 case IFM_10_T: 6734 case IFM_10_T:
6706 sc->sc_ctrl |= CTRL_SPEED_10; 6735 sc->sc_ctrl |= CTRL_SPEED_10;
6707 break; 6736 break;
6708 case IFM_100_TX: 6737 case IFM_100_TX:
6709 sc->sc_ctrl |= CTRL_SPEED_100; 6738 sc->sc_ctrl |= CTRL_SPEED_100;
6710 break; 6739 break;
6711 case IFM_1000_T: 6740 case IFM_1000_T:
6712 sc->sc_ctrl |= CTRL_SPEED_1000; 6741 sc->sc_ctrl |= CTRL_SPEED_1000;
6713 break; 6742 break;
6714 default: 6743 default:
6715 panic("wm_gmii_mediachange: bad media 0x%x", 6744 panic("wm_gmii_mediachange: bad media 0x%x",
6716 ife->ifm_media); 6745 ife->ifm_media);
6717 } 6746 }
6718 } 6747 }
6719 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6748 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6720 if (sc->sc_type <= WM_T_82543) 6749 if (sc->sc_type <= WM_T_82543)
6721 wm_gmii_reset(sc); 6750 wm_gmii_reset(sc);
6722 6751
6723 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 6752 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6724 return 0; 6753 return 0;
6725 return rc; 6754 return rc;
6726} 6755}
6727 6756
6728#define MDI_IO CTRL_SWDPIN(2) 6757#define MDI_IO CTRL_SWDPIN(2)
6729#define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 6758#define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
6730#define MDI_CLK CTRL_SWDPIN(3) 6759#define MDI_CLK CTRL_SWDPIN(3)
6731 6760
6732static void 6761static void
6733i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 6762i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6734{ 6763{
6735 uint32_t i, v; 6764 uint32_t i, v;
6736 6765
6737 v = CSR_READ(sc, WMREG_CTRL); 6766 v = CSR_READ(sc, WMREG_CTRL);
6738 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 6767 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6739 v |= MDI_DIR | CTRL_SWDPIO(3); 6768 v |= MDI_DIR | CTRL_SWDPIO(3);
6740 6769
6741 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 6770 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6742 if (data & i) 6771 if (data & i)
6743 v |= MDI_IO; 6772 v |= MDI_IO;
6744 else 6773 else
6745 v &= ~MDI_IO; 6774 v &= ~MDI_IO;
6746 CSR_WRITE(sc, WMREG_CTRL, v); 6775 CSR_WRITE(sc, WMREG_CTRL, v);
 6776 CSR_WRITE_FLUSH(sc);
6747 delay(10); 6777 delay(10);
6748 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6778 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
 6779 CSR_WRITE_FLUSH(sc);
6749 delay(10); 6780 delay(10);
6750 CSR_WRITE(sc, WMREG_CTRL, v); 6781 CSR_WRITE(sc, WMREG_CTRL, v);
 6782 CSR_WRITE_FLUSH(sc);
6751 delay(10); 6783 delay(10);
6752 } 6784 }
6753} 6785}
6754 6786
6755static uint32_t 6787static uint32_t
6756i82543_mii_recvbits(struct wm_softc *sc) 6788i82543_mii_recvbits(struct wm_softc *sc)
6757{ 6789{
6758 uint32_t v, i, data = 0; 6790 uint32_t v, i, data = 0;
6759 6791
6760 v = CSR_READ(sc, WMREG_CTRL); 6792 v = CSR_READ(sc, WMREG_CTRL);
6761 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 6793 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6762 v |= CTRL_SWDPIO(3); 6794 v |= CTRL_SWDPIO(3);
6763 6795
6764 CSR_WRITE(sc, WMREG_CTRL, v); 6796 CSR_WRITE(sc, WMREG_CTRL, v);
 6797 CSR_WRITE_FLUSH(sc);
6765 delay(10); 6798 delay(10);
6766 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6799 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
 6800 CSR_WRITE_FLUSH(sc);
6767 delay(10); 6801 delay(10);
6768 CSR_WRITE(sc, WMREG_CTRL, v); 6802 CSR_WRITE(sc, WMREG_CTRL, v);
 6803 CSR_WRITE_FLUSH(sc);
6769 delay(10); 6804 delay(10);
6770 6805
6771 for (i = 0; i < 16; i++) { 6806 for (i = 0; i < 16; i++) {
6772 data <<= 1; 6807 data <<= 1;
6773 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6808 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
 6809 CSR_WRITE_FLUSH(sc);
6774 delay(10); 6810 delay(10);
6775 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 6811 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6776 data |= 1; 6812 data |= 1;
6777 CSR_WRITE(sc, WMREG_CTRL, v); 6813 CSR_WRITE(sc, WMREG_CTRL, v);
 6814 CSR_WRITE_FLUSH(sc);
6778 delay(10); 6815 delay(10);
6779 } 6816 }
6780 6817
6781 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6818 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
 6819 CSR_WRITE_FLUSH(sc);
6782 delay(10); 6820 delay(10);
6783 CSR_WRITE(sc, WMREG_CTRL, v); 6821 CSR_WRITE(sc, WMREG_CTRL, v);
 6822 CSR_WRITE_FLUSH(sc);
6784 delay(10); 6823 delay(10);
6785 6824
6786 return data; 6825 return data;
6787} 6826}
6788 6827
6789#undef MDI_IO 6828#undef MDI_IO
6790#undef MDI_DIR 6829#undef MDI_DIR
6791#undef MDI_CLK 6830#undef MDI_CLK
6792 6831
6793/* 6832/*
6794 * wm_gmii_i82543_readreg: [mii interface function] 6833 * wm_gmii_i82543_readreg: [mii interface function]
6795 * 6834 *
6796 * Read a PHY register on the GMII (i82543 version). 6835 * Read a PHY register on the GMII (i82543 version).
6797 */ 6836 */
6798static int 6837static int
6799wm_gmii_i82543_readreg(device_t self, int phy, int reg) 6838wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6800{ 6839{
6801 struct wm_softc *sc = device_private(self); 6840 struct wm_softc *sc = device_private(self);
6802 int rv; 6841 int rv;
6803 6842
6804 i82543_mii_sendbits(sc, 0xffffffffU, 32); 6843 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6805 i82543_mii_sendbits(sc, reg | (phy << 5) | 6844 i82543_mii_sendbits(sc, reg | (phy << 5) |
6806 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 6845 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6807 rv = i82543_mii_recvbits(sc) & 0xffff; 6846 rv = i82543_mii_recvbits(sc) & 0xffff;
6808 6847
6809 DPRINTF(WM_DEBUG_GMII, 6848 DPRINTF(WM_DEBUG_GMII,
6810 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 6849 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6811 device_xname(sc->sc_dev), phy, reg, rv)); 6850 device_xname(sc->sc_dev), phy, reg, rv));
6812 6851
6813 return rv; 6852 return rv;
6814} 6853}
6815 6854
6816/* 6855/*
6817 * wm_gmii_i82543_writereg: [mii interface function] 6856 * wm_gmii_i82543_writereg: [mii interface function]
6818 * 6857 *
6819 * Write a PHY register on the GMII (i82543 version). 6858 * Write a PHY register on the GMII (i82543 version).
6820 */ 6859 */
6821static void 6860static void
6822wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val) 6861wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6823{ 6862{
6824 struct wm_softc *sc = device_private(self); 6863 struct wm_softc *sc = device_private(self);
6825 6864
6826 i82543_mii_sendbits(sc, 0xffffffffU, 32); 6865 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6827 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 6866 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6828 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 6867 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6829 (MII_COMMAND_START << 30), 32); 6868 (MII_COMMAND_START << 30), 32);
6830} 6869}
6831 6870
6832/* 6871/*
6833 * wm_gmii_i82544_readreg: [mii interface function] 6872 * wm_gmii_i82544_readreg: [mii interface function]
6834 * 6873 *
6835 * Read a PHY register on the GMII. 6874 * Read a PHY register on the GMII.
6836 */ 6875 */
6837static int 6876static int
6838wm_gmii_i82544_readreg(device_t self, int phy, int reg) 6877wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6839{ 6878{
6840 struct wm_softc *sc = device_private(self); 6879 struct wm_softc *sc = device_private(self);
6841 uint32_t mdic = 0; 6880 uint32_t mdic = 0;
6842 int i, rv; 6881 int i, rv;
6843 6882
6844 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 6883 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6845 MDIC_REGADD(reg)); 6884 MDIC_REGADD(reg));
6846 6885
6847 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 6886 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6848 mdic = CSR_READ(sc, WMREG_MDIC); 6887 mdic = CSR_READ(sc, WMREG_MDIC);
6849 if (mdic & MDIC_READY) 6888 if (mdic & MDIC_READY)
6850 break; 6889 break;
6851 delay(50); 6890 delay(50);
6852 } 6891 }
6853 6892
6854 if ((mdic & MDIC_READY) == 0) { 6893 if ((mdic & MDIC_READY) == 0) {
6855 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 6894 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6856 device_xname(sc->sc_dev), phy, reg); 6895 device_xname(sc->sc_dev), phy, reg);
6857 rv = 0; 6896 rv = 0;
6858 } else if (mdic & MDIC_E) { 6897 } else if (mdic & MDIC_E) {
6859#if 0 /* This is normal if no PHY is present. */ 6898#if 0 /* This is normal if no PHY is present. */
6860 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 6899 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6861 device_xname(sc->sc_dev), phy, reg); 6900 device_xname(sc->sc_dev), phy, reg);
6862#endif 6901#endif
6863 rv = 0; 6902 rv = 0;
6864 } else { 6903 } else {
6865 rv = MDIC_DATA(mdic); 6904 rv = MDIC_DATA(mdic);
6866 if (rv == 0xffff) 6905 if (rv == 0xffff)
6867 rv = 0; 6906 rv = 0;
6868 } 6907 }
6869 6908
6870 return rv; 6909 return rv;
6871} 6910}
6872 6911
6873/* 6912/*
6874 * wm_gmii_i82544_writereg: [mii interface function] 6913 * wm_gmii_i82544_writereg: [mii interface function]
6875 * 6914 *
6876 * Write a PHY register on the GMII. 6915 * Write a PHY register on the GMII.
6877 */ 6916 */
6878static void 6917static void
6879wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val) 6918wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6880{ 6919{
6881 struct wm_softc *sc = device_private(self); 6920 struct wm_softc *sc = device_private(self);
6882 uint32_t mdic = 0; 6921 uint32_t mdic = 0;
6883 int i; 6922 int i;
6884 6923
6885 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 6924 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6886 MDIC_REGADD(reg) | MDIC_DATA(val)); 6925 MDIC_REGADD(reg) | MDIC_DATA(val));
6887 6926
6888 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 6927 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6889 mdic = CSR_READ(sc, WMREG_MDIC); 6928 mdic = CSR_READ(sc, WMREG_MDIC);
6890 if (mdic & MDIC_READY) 6929 if (mdic & MDIC_READY)
6891 break; 6930 break;
6892 delay(50); 6931 delay(50);
6893 } 6932 }
6894 6933
6895 if ((mdic & MDIC_READY) == 0) 6934 if ((mdic & MDIC_READY) == 0)
6896 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", 6935 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6897 device_xname(sc->sc_dev), phy, reg); 6936 device_xname(sc->sc_dev), phy, reg);
6898 else if (mdic & MDIC_E) 6937 else if (mdic & MDIC_E)
6899 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", 6938 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6900 device_xname(sc->sc_dev), phy, reg); 6939 device_xname(sc->sc_dev), phy, reg);
6901} 6940}
6902 6941
6903/* 6942/*
6904 * wm_gmii_i80003_readreg: [mii interface function] 6943 * wm_gmii_i80003_readreg: [mii interface function]
6905 * 6944 *
6906 * Read a PHY register on the kumeran 6945 * Read a PHY register on the kumeran
6907 * This could be handled by the PHY layer if we didn't have to lock the 6946 * This could be handled by the PHY layer if we didn't have to lock the
6908 * ressource ... 6947 * ressource ...
6909 */ 6948 */
6910static int 6949static int
6911wm_gmii_i80003_readreg(device_t self, int phy, int reg) 6950wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6912{ 6951{
6913 struct wm_softc *sc = device_private(self); 6952 struct wm_softc *sc = device_private(self);
6914 int sem; 6953 int sem;
6915 int rv; 6954 int rv;
6916 6955
6917 if (phy != 1) /* only one PHY on kumeran bus */ 6956 if (phy != 1) /* only one PHY on kumeran bus */
6918 return 0; 6957 return 0;
6919 6958
6920 sem = swfwphysem[sc->sc_funcid]; 6959 sem = swfwphysem[sc->sc_funcid];
6921 if (wm_get_swfw_semaphore(sc, sem)) { 6960 if (wm_get_swfw_semaphore(sc, sem)) {
6922 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6961 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6923 __func__); 6962 __func__);
6924 return 0; 6963 return 0;
6925 } 6964 }
6926 6965
6927 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 6966 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6928 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 6967 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6929 reg >> GG82563_PAGE_SHIFT); 6968 reg >> GG82563_PAGE_SHIFT);
6930 } else { 6969 } else {
6931 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 6970 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6932 reg >> GG82563_PAGE_SHIFT); 6971 reg >> GG82563_PAGE_SHIFT);
6933 } 6972 }
6934 /* Wait more 200us for a bug of the ready bit in the MDIC register */ 6973 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6935 delay(200); 6974 delay(200);
6936 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 6975 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6937 delay(200); 6976 delay(200);
6938 6977
6939 wm_put_swfw_semaphore(sc, sem); 6978 wm_put_swfw_semaphore(sc, sem);
6940 return rv; 6979 return rv;
6941} 6980}
6942 6981
6943/* 6982/*
6944 * wm_gmii_i80003_writereg: [mii interface function] 6983 * wm_gmii_i80003_writereg: [mii interface function]
6945 * 6984 *
6946 * Write a PHY register on the kumeran. 6985 * Write a PHY register on the kumeran.
6947 * This could be handled by the PHY layer if we didn't have to lock the 6986 * This could be handled by the PHY layer if we didn't have to lock the
6948 * ressource ... 6987 * ressource ...
6949 */ 6988 */
6950static void 6989static void
6951wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val) 6990wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6952{ 6991{
6953 struct wm_softc *sc = device_private(self); 6992 struct wm_softc *sc = device_private(self);
6954 int sem; 6993 int sem;
6955 6994
6956 if (phy != 1) /* only one PHY on kumeran bus */ 6995 if (phy != 1) /* only one PHY on kumeran bus */
6957 return; 6996 return;
6958 6997
6959 sem = swfwphysem[sc->sc_funcid]; 6998 sem = swfwphysem[sc->sc_funcid];
6960 if (wm_get_swfw_semaphore(sc, sem)) { 6999 if (wm_get_swfw_semaphore(sc, sem)) {
6961 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7000 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6962 __func__); 7001 __func__);
6963 return; 7002 return;
6964 } 7003 }
6965 7004
6966 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 7005 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6967 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 7006 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6968 reg >> GG82563_PAGE_SHIFT); 7007 reg >> GG82563_PAGE_SHIFT);
6969 } else { 7008 } else {
6970 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 7009 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6971 reg >> GG82563_PAGE_SHIFT); 7010 reg >> GG82563_PAGE_SHIFT);
6972 } 7011 }
6973 /* Wait more 200us for a bug of the ready bit in the MDIC register */ 7012 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6974 delay(200); 7013 delay(200);
6975 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 7014 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6976 delay(200); 7015 delay(200);
6977 7016
6978 wm_put_swfw_semaphore(sc, sem); 7017 wm_put_swfw_semaphore(sc, sem);
6979} 7018}
6980 7019
6981/* 7020/*
6982 * wm_gmii_bm_readreg: [mii interface function] 7021 * wm_gmii_bm_readreg: [mii interface function]
6983 * 7022 *
6984 * Read a PHY register on the kumeran 7023 * Read a PHY register on the kumeran
6985 * This could be handled by the PHY layer if we didn't have to lock the 7024 * This could be handled by the PHY layer if we didn't have to lock the
6986 * ressource ... 7025 * ressource ...
6987 */ 7026 */
6988static int 7027static int
6989wm_gmii_bm_readreg(device_t self, int phy, int reg) 7028wm_gmii_bm_readreg(device_t self, int phy, int reg)
6990{ 7029{
6991 struct wm_softc *sc = device_private(self); 7030 struct wm_softc *sc = device_private(self);
6992 int sem; 7031 int sem;
6993 int rv; 7032 int rv;
6994 7033
6995 sem = swfwphysem[sc->sc_funcid]; 7034 sem = swfwphysem[sc->sc_funcid];
6996 if (wm_get_swfw_semaphore(sc, sem)) { 7035 if (wm_get_swfw_semaphore(sc, sem)) {
6997 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7036 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6998 __func__); 7037 __func__);
6999 return 0; 7038 return 0;
7000 } 7039 }
7001 7040
7002 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 7041 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7003 if (phy == 1) 7042 if (phy == 1)
7004 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, 7043 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7005 reg); 7044 reg);
7006 else 7045 else
7007 wm_gmii_i82544_writereg(self, phy, 7046 wm_gmii_i82544_writereg(self, phy,
7008 GG82563_PHY_PAGE_SELECT, 7047 GG82563_PHY_PAGE_SELECT,
7009 reg >> GG82563_PAGE_SHIFT); 7048 reg >> GG82563_PAGE_SHIFT);
7010 } 7049 }
7011 7050
7012 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 7051 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7013 wm_put_swfw_semaphore(sc, sem); 7052 wm_put_swfw_semaphore(sc, sem);
7014 return rv; 7053 return rv;
7015} 7054}
7016 7055
7017/* 7056/*
7018 * wm_gmii_bm_writereg: [mii interface function] 7057 * wm_gmii_bm_writereg: [mii interface function]
7019 * 7058 *
7020 * Write a PHY register on the kumeran. 7059 * Write a PHY register on the kumeran.
7021 * This could be handled by the PHY layer if we didn't have to lock the 7060 * This could be handled by the PHY layer if we didn't have to lock the
7022 * ressource ... 7061 * ressource ...
7023 */ 7062 */
7024static void 7063static void
7025wm_gmii_bm_writereg(device_t self, int phy, int reg, int val) 7064wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7026{ 7065{
7027 struct wm_softc *sc = device_private(self); 7066 struct wm_softc *sc = device_private(self);
7028 int sem; 7067 int sem;
7029 7068
7030 sem = swfwphysem[sc->sc_funcid]; 7069 sem = swfwphysem[sc->sc_funcid];
7031 if (wm_get_swfw_semaphore(sc, sem)) { 7070 if (wm_get_swfw_semaphore(sc, sem)) {
7032 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7071 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7033 __func__); 7072 __func__);
7034 return; 7073 return;
7035 } 7074 }
7036 7075
7037 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 7076 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7038 if (phy == 1) 7077 if (phy == 1)
7039 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, 7078 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7040 reg); 7079 reg);
7041 else 7080 else
7042 wm_gmii_i82544_writereg(self, phy, 7081 wm_gmii_i82544_writereg(self, phy,
7043 GG82563_PHY_PAGE_SELECT, 7082 GG82563_PHY_PAGE_SELECT,
7044 reg >> GG82563_PAGE_SHIFT); 7083 reg >> GG82563_PAGE_SHIFT);
7045 } 7084 }
7046 7085
7047 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 7086 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7048 wm_put_swfw_semaphore(sc, sem); 7087 wm_put_swfw_semaphore(sc, sem);
7049} 7088}
7050 7089
7051static void 7090static void
7052wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd) 7091wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7053{ 7092{
7054 struct wm_softc *sc = device_private(self); 7093 struct wm_softc *sc = device_private(self);
7055 uint16_t regnum = BM_PHY_REG_NUM(offset); 7094 uint16_t regnum = BM_PHY_REG_NUM(offset);
7056 uint16_t wuce; 7095 uint16_t wuce;
7057 7096
7058 /* XXX Gig must be disabled for MDIO accesses to page 800 */ 7097 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7059 if (sc->sc_type == WM_T_PCH) { 7098 if (sc->sc_type == WM_T_PCH) {
7060 /* XXX e1000 driver do nothing... why? */ 7099 /* XXX e1000 driver do nothing... why? */
7061 } 7100 }
7062 7101
7063 /* Set page 769 */ 7102 /* Set page 769 */
7064 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7103 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7065 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); 7104 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7066 7105
7067 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG); 7106 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7068 7107
7069 wuce &= ~BM_WUC_HOST_WU_BIT; 7108 wuce &= ~BM_WUC_HOST_WU_BIT;
7070 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, 7109 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7071 wuce | BM_WUC_ENABLE_BIT); 7110 wuce | BM_WUC_ENABLE_BIT);
7072 7111
7073 /* Select page 800 */ 7112 /* Select page 800 */
7074 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7113 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7075 BM_WUC_PAGE << BME1000_PAGE_SHIFT); 7114 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7076 7115
7077 /* Write page 800 */ 7116 /* Write page 800 */
7078 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum); 7117 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7079 7118
7080 if (rd) 7119 if (rd)
7081 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE); 7120 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7082 else 7121 else
7083 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val); 7122 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7084 7123
7085 /* Set page 769 */ 7124 /* Set page 769 */
7086 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7125 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7087 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); 7126 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7088 7127
7089 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce); 7128 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7090} 7129}
7091 7130
7092/* 7131/*
7093 * wm_gmii_hv_readreg: [mii interface function] 7132 * wm_gmii_hv_readreg: [mii interface function]
7094 * 7133 *
7095 * Read a PHY register on the kumeran 7134 * Read a PHY register on the kumeran
7096 * This could be handled by the PHY layer if we didn't have to lock the 7135 * This could be handled by the PHY layer if we didn't have to lock the
7097 * ressource ... 7136 * ressource ...
7098 */ 7137 */
7099static int 7138static int
7100wm_gmii_hv_readreg(device_t self, int phy, int reg) 7139wm_gmii_hv_readreg(device_t self, int phy, int reg)
7101{ 7140{
7102 struct wm_softc *sc = device_private(self); 7141 struct wm_softc *sc = device_private(self);
7103 uint16_t page = BM_PHY_REG_PAGE(reg); 7142 uint16_t page = BM_PHY_REG_PAGE(reg);
7104 uint16_t regnum = BM_PHY_REG_NUM(reg); 7143 uint16_t regnum = BM_PHY_REG_NUM(reg);
7105 uint16_t val; 7144 uint16_t val;
7106 int rv; 7145 int rv;
7107 7146
7108 if (wm_get_swfwhw_semaphore(sc)) { 7147 if (wm_get_swfwhw_semaphore(sc)) {
7109 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7148 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7110 __func__); 7149 __func__);
7111 return 0; 7150 return 0;
7112 } 7151 }
7113 7152
7114 /* XXX Workaround failure in MDIO access while cable is disconnected */ 7153 /* XXX Workaround failure in MDIO access while cable is disconnected */
7115 if (sc->sc_phytype == WMPHY_82577) { 7154 if (sc->sc_phytype == WMPHY_82577) {
7116 /* XXX must write */ 7155 /* XXX must write */
7117 } 7156 }
7118 7157
7119 /* Page 800 works differently than the rest so it has its own func */ 7158 /* Page 800 works differently than the rest so it has its own func */
7120 if (page == BM_WUC_PAGE) { 7159 if (page == BM_WUC_PAGE) {
7121 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1); 7160 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7122 return val; 7161 return val;
7123 } 7162 }
7124 7163
7125 /* 7164 /*
7126 * Lower than page 768 works differently than the rest so it has its 7165 * Lower than page 768 works differently than the rest so it has its
7127 * own func 7166 * own func
7128 */ 7167 */
7129 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { 7168 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7130 printf("gmii_hv_readreg!!!\n"); 7169 printf("gmii_hv_readreg!!!\n");
7131 return 0; 7170 return 0;
7132 } 7171 }
7133 7172
7134 if (regnum > BME1000_MAX_MULTI_PAGE_REG) { 7173 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7135 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7174 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7136 page << BME1000_PAGE_SHIFT); 7175 page << BME1000_PAGE_SHIFT);
7137 } 7176 }
7138 7177
7139 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR); 7178 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7140 wm_put_swfwhw_semaphore(sc); 7179 wm_put_swfwhw_semaphore(sc);
7141 return rv; 7180 return rv;
7142} 7181}
7143 7182
7144/* 7183/*
7145 * wm_gmii_hv_writereg: [mii interface function] 7184 * wm_gmii_hv_writereg: [mii interface function]
7146 * 7185 *
7147 * Write a PHY register on the kumeran. 7186 * Write a PHY register on the kumeran.
7148 * This could be handled by the PHY layer if we didn't have to lock the 7187 * This could be handled by the PHY layer if we didn't have to lock the
7149 * ressource ... 7188 * ressource ...
7150 */ 7189 */
7151static void 7190static void
7152wm_gmii_hv_writereg(device_t self, int phy, int reg, int val) 7191wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7153{ 7192{
7154 struct wm_softc *sc = device_private(self); 7193 struct wm_softc *sc = device_private(self);
7155 uint16_t page = BM_PHY_REG_PAGE(reg); 7194 uint16_t page = BM_PHY_REG_PAGE(reg);
7156 uint16_t regnum = BM_PHY_REG_NUM(reg); 7195 uint16_t regnum = BM_PHY_REG_NUM(reg);
7157 7196
7158 if (wm_get_swfwhw_semaphore(sc)) { 7197 if (wm_get_swfwhw_semaphore(sc)) {
7159 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7198 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7160 __func__); 7199 __func__);
7161 return; 7200 return;
7162 } 7201 }
7163 7202
7164 /* XXX Workaround failure in MDIO access while cable is disconnected */ 7203 /* XXX Workaround failure in MDIO access while cable is disconnected */
7165 7204
7166 /* Page 800 works differently than the rest so it has its own func */ 7205 /* Page 800 works differently than the rest so it has its own func */
7167 if (page == BM_WUC_PAGE) { 7206 if (page == BM_WUC_PAGE) {
7168 uint16_t tmp; 7207 uint16_t tmp;
7169 7208
7170 tmp = val; 7209 tmp = val;
7171 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0); 7210 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7172 return; 7211 return;
7173 } 7212 }
7174 7213
7175 /* 7214 /*
7176 * Lower than page 768 works differently than the rest so it has its 7215 * Lower than page 768 works differently than the rest so it has its
7177 * own func 7216 * own func
7178 */ 7217 */
7179 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { 7218 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7180 printf("gmii_hv_writereg!!!\n"); 7219 printf("gmii_hv_writereg!!!\n");
7181 return; 7220 return;
7182 } 7221 }
7183 7222
7184 /* 7223 /*
7185 * XXX Workaround MDIO accesses being disabled after entering IEEE 7224 * XXX Workaround MDIO accesses being disabled after entering IEEE
7186 * Power Down (whenever bit 11 of the PHY control register is set) 7225 * Power Down (whenever bit 11 of the PHY control register is set)
7187 */ 7226 */
7188 7227
7189 if (regnum > BME1000_MAX_MULTI_PAGE_REG) { 7228 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7190 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7229 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7191 page << BME1000_PAGE_SHIFT); 7230 page << BME1000_PAGE_SHIFT);
7192 } 7231 }
7193 7232
7194 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val); 7233 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7195 wm_put_swfwhw_semaphore(sc); 7234 wm_put_swfwhw_semaphore(sc);
7196} 7235}
7197 7236
7198/* 7237/*
7199 * wm_sgmii_uses_mdio 7238 * wm_sgmii_uses_mdio
7200 * 7239 *
7201 * Check whether the transaction is to the internal PHY or the external 7240 * Check whether the transaction is to the internal PHY or the external
7202 * MDIO interface. Return true if it's MDIO. 7241 * MDIO interface. Return true if it's MDIO.
7203 */ 7242 */
7204static bool 7243static bool
7205wm_sgmii_uses_mdio(struct wm_softc *sc) 7244wm_sgmii_uses_mdio(struct wm_softc *sc)
7206{ 7245{
7207 uint32_t reg; 7246 uint32_t reg;
7208 bool ismdio = false; 7247 bool ismdio = false;
7209 7248
7210 switch (sc->sc_type) { 7249 switch (sc->sc_type) {
7211 case WM_T_82575: 7250 case WM_T_82575:
7212 case WM_T_82576: 7251 case WM_T_82576:
7213 reg = CSR_READ(sc, WMREG_MDIC); 7252 reg = CSR_READ(sc, WMREG_MDIC);
7214 ismdio = ((reg & MDIC_DEST) != 0); 7253 ismdio = ((reg & MDIC_DEST) != 0);
7215 break; 7254 break;
7216 case WM_T_82580: 7255 case WM_T_82580:
7217 case WM_T_82580ER: 7256 case WM_T_82580ER:
7218 case WM_T_I350: 7257 case WM_T_I350:
7219 case WM_T_I354: 7258 case WM_T_I354:
7220 case WM_T_I210: 7259 case WM_T_I210:
7221 case WM_T_I211: 7260 case WM_T_I211:
7222 reg = CSR_READ(sc, WMREG_MDICNFG); 7261 reg = CSR_READ(sc, WMREG_MDICNFG);
7223 ismdio = ((reg & MDICNFG_DEST) != 0); 7262 ismdio = ((reg & MDICNFG_DEST) != 0);
7224 break; 7263 break;
7225 default: 7264 default:
7226 break; 7265 break;
7227 } 7266 }
7228 7267
7229 return ismdio; 7268 return ismdio;
7230} 7269}
7231 7270
7232/* 7271/*
7233 * wm_sgmii_readreg: [mii interface function] 7272 * wm_sgmii_readreg: [mii interface function]
7234 * 7273 *
7235 * Read a PHY register on the SGMII 7274 * Read a PHY register on the SGMII
7236 * This could be handled by the PHY layer if we didn't have to lock the 7275 * This could be handled by the PHY layer if we didn't have to lock the
7237 * ressource ... 7276 * ressource ...
7238 */ 7277 */
7239static int 7278static int
7240wm_sgmii_readreg(device_t self, int phy, int reg) 7279wm_sgmii_readreg(device_t self, int phy, int reg)
7241{ 7280{
7242 struct wm_softc *sc = device_private(self); 7281 struct wm_softc *sc = device_private(self);
7243 uint32_t i2ccmd; 7282 uint32_t i2ccmd;
7244 int i, rv; 7283 int i, rv;
7245 7284
7246 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) { 7285 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7247 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7286 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7248 __func__); 7287 __func__);
7249 return 0; 7288 return 0;
7250 } 7289 }
7251 7290
7252 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) 7291 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7253 | (phy << I2CCMD_PHY_ADDR_SHIFT) 7292 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7254 | I2CCMD_OPCODE_READ; 7293 | I2CCMD_OPCODE_READ;
7255 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); 7294 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7256 7295
7257 /* Poll the ready bit */ 7296 /* Poll the ready bit */
7258 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { 7297 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7259 delay(50); 7298 delay(50);
7260 i2ccmd = CSR_READ(sc, WMREG_I2CCMD); 7299 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7261 if (i2ccmd & I2CCMD_READY) 7300 if (i2ccmd & I2CCMD_READY)
7262 break; 7301 break;
7263 } 7302 }
7264 if ((i2ccmd & I2CCMD_READY) == 0) 7303 if ((i2ccmd & I2CCMD_READY) == 0)
7265 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n"); 7304 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7266 if ((i2ccmd & I2CCMD_ERROR) != 0) 7305 if ((i2ccmd & I2CCMD_ERROR) != 0)
7267 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n"); 7306 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7268 7307
7269 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00); 7308 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7270 7309
7271 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 7310 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7272 return rv; 7311 return rv;
7273} 7312}
7274 7313
7275/* 7314/*
7276 * wm_sgmii_writereg: [mii interface function] 7315 * wm_sgmii_writereg: [mii interface function]
7277 * 7316 *
7278 * Write a PHY register on the SGMII. 7317 * Write a PHY register on the SGMII.
7279 * This could be handled by the PHY layer if we didn't have to lock the 7318 * This could be handled by the PHY layer if we didn't have to lock the
7280 * ressource ... 7319 * ressource ...
7281 */ 7320 */
7282static void 7321static void
7283wm_sgmii_writereg(device_t self, int phy, int reg, int val) 7322wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7284{ 7323{
7285 struct wm_softc *sc = device_private(self); 7324 struct wm_softc *sc = device_private(self);
7286 uint32_t i2ccmd; 7325 uint32_t i2ccmd;
7287 int i; 7326 int i;
7288 7327
7289 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) { 7328 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7290 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7329 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7291 __func__); 7330 __func__);
7292 return; 7331 return;
7293 } 7332 }
7294 7333
7295 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) 7334 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7296 | (phy << I2CCMD_PHY_ADDR_SHIFT) 7335 | (phy << I2CCMD_PHY_ADDR_SHIFT)
7297 | I2CCMD_OPCODE_WRITE; 7336 | I2CCMD_OPCODE_WRITE;
7298 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); 7337 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7299 7338
7300 /* Poll the ready bit */ 7339 /* Poll the ready bit */
7301 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { 7340 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7302 delay(50); 7341 delay(50);
7303 i2ccmd = CSR_READ(sc, WMREG_I2CCMD); 7342 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7304 if (i2ccmd & I2CCMD_READY) 7343 if (i2ccmd & I2CCMD_READY)
7305 break; 7344 break;
7306 } 7345 }
7307 if ((i2ccmd & I2CCMD_READY) == 0) 7346 if ((i2ccmd & I2CCMD_READY) == 0)
7308 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n"); 7347 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7309 if ((i2ccmd & I2CCMD_ERROR) != 0) 7348 if ((i2ccmd & I2CCMD_ERROR) != 0)
7310 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n"); 7349 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7311 7350
7312 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM); 7351 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7313} 7352}
7314 7353
7315/* 7354/*
7316 * wm_gmii_82580_readreg: [mii interface function] 7355 * wm_gmii_82580_readreg: [mii interface function]
7317 * 7356 *
7318 * Read a PHY register on the 82580 and I350. 7357 * Read a PHY register on the 82580 and I350.
7319 * This could be handled by the PHY layer if we didn't have to lock the 7358 * This could be handled by the PHY layer if we didn't have to lock the
7320 * ressource ... 7359 * ressource ...
7321 */ 7360 */
7322static int 7361static int
7323wm_gmii_82580_readreg(device_t self, int phy, int reg) 7362wm_gmii_82580_readreg(device_t self, int phy, int reg)
7324{ 7363{
7325 struct wm_softc *sc = device_private(self); 7364 struct wm_softc *sc = device_private(self);
7326 int sem; 7365 int sem;
7327 int rv; 7366 int rv;
7328 7367
7329 sem = swfwphysem[sc->sc_funcid]; 7368 sem = swfwphysem[sc->sc_funcid];
7330 if (wm_get_swfw_semaphore(sc, sem)) { 7369 if (wm_get_swfw_semaphore(sc, sem)) {
7331 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7370 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7332 __func__); 7371 __func__);
7333 return 0; 7372 return 0;
7334 } 7373 }
7335 7374
7336 rv = wm_gmii_i82544_readreg(self, phy, reg); 7375 rv = wm_gmii_i82544_readreg(self, phy, reg);
7337 7376
7338 wm_put_swfw_semaphore(sc, sem); 7377 wm_put_swfw_semaphore(sc, sem);
7339 return rv; 7378 return rv;
7340} 7379}
7341 7380
7342/* 7381/*
7343 * wm_gmii_82580_writereg: [mii interface function] 7382 * wm_gmii_82580_writereg: [mii interface function]
7344 * 7383 *
7345 * Write a PHY register on the 82580 and I350. 7384 * Write a PHY register on the 82580 and I350.
7346 * This could be handled by the PHY layer if we didn't have to lock the 7385 * This could be handled by the PHY layer if we didn't have to lock the
7347 * ressource ... 7386 * ressource ...
7348 */ 7387 */
7349static void 7388static void
7350wm_gmii_82580_writereg(device_t self, int phy, int reg, int val) 7389wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7351{ 7390{
7352 struct wm_softc *sc = device_private(self); 7391 struct wm_softc *sc = device_private(self);
7353 int sem; 7392 int sem;
7354 7393
7355 sem = swfwphysem[sc->sc_funcid]; 7394 sem = swfwphysem[sc->sc_funcid];
7356 if (wm_get_swfw_semaphore(sc, sem)) { 7395 if (wm_get_swfw_semaphore(sc, sem)) {
7357 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7396 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7358 __func__); 7397 __func__);
7359 return; 7398 return;
7360 } 7399 }
7361 7400
7362 wm_gmii_i82544_writereg(self, phy, reg, val); 7401 wm_gmii_i82544_writereg(self, phy, reg, val);
7363 7402
7364 wm_put_swfw_semaphore(sc, sem); 7403 wm_put_swfw_semaphore(sc, sem);
7365} 7404}
7366 7405
7367/* 7406/*
7368 * wm_gmii_statchg: [mii interface function] 7407 * wm_gmii_statchg: [mii interface function]
7369 * 7408 *
7370 * Callback from MII layer when media changes. 7409 * Callback from MII layer when media changes.
7371 */ 7410 */
7372static void 7411static void
7373wm_gmii_statchg(struct ifnet *ifp) 7412wm_gmii_statchg(struct ifnet *ifp)
7374{ 7413{
7375 struct wm_softc *sc = ifp->if_softc; 7414 struct wm_softc *sc = ifp->if_softc;
7376 struct mii_data *mii = &sc->sc_mii; 7415 struct mii_data *mii = &sc->sc_mii;
7377 7416
7378 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 7417 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7379 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 7418 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7380 sc->sc_fcrtl &= ~FCRTL_XONE; 7419 sc->sc_fcrtl &= ~FCRTL_XONE;
7381 7420
7382 /* 7421 /*
7383 * Get flow control negotiation result. 7422 * Get flow control negotiation result.
7384 */ 7423 */
7385 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 7424 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7386 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 7425 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7387 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 7426 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7388 mii->mii_media_active &= ~IFM_ETH_FMASK; 7427 mii->mii_media_active &= ~IFM_ETH_FMASK;
7389 } 7428 }
7390 7429
7391 if (sc->sc_flowflags & IFM_FLOW) { 7430 if (sc->sc_flowflags & IFM_FLOW) {
7392 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 7431 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7393 sc->sc_ctrl |= CTRL_TFCE; 7432 sc->sc_ctrl |= CTRL_TFCE;
7394 sc->sc_fcrtl |= FCRTL_XONE; 7433 sc->sc_fcrtl |= FCRTL_XONE;
7395 } 7434 }
7396 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 7435 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7397 sc->sc_ctrl |= CTRL_RFCE; 7436 sc->sc_ctrl |= CTRL_RFCE;
7398 } 7437 }
7399 7438
7400 if (sc->sc_mii.mii_media_active & IFM_FDX) { 7439 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7401 DPRINTF(WM_DEBUG_LINK, 7440 DPRINTF(WM_DEBUG_LINK,
7402 ("%s: LINK: statchg: FDX\n", ifp->if_xname)); 7441 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7403 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 7442 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7404 } else { 7443 } else {
7405 DPRINTF(WM_DEBUG_LINK, 7444 DPRINTF(WM_DEBUG_LINK,
7406 ("%s: LINK: statchg: HDX\n", ifp->if_xname)); 7445 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7407 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 7446 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7408 } 7447 }
7409 7448
7410 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 7449 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7411 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 7450 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7412 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 7451 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7413 : WMREG_FCRTL, sc->sc_fcrtl); 7452 : WMREG_FCRTL, sc->sc_fcrtl);
7414 if (sc->sc_type == WM_T_80003) { 7453 if (sc->sc_type == WM_T_80003) {
7415 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 7454 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7416 case IFM_1000_T: 7455 case IFM_1000_T:
7417 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 7456 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7418 KUMCTRLSTA_HD_CTRL_1000_DEFAULT); 7457 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7419 sc->sc_tipg = TIPG_1000T_80003_DFLT; 7458 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7420 break; 7459 break;
7421 default: 7460 default:
7422 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 7461 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7423 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT); 7462 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7424 sc->sc_tipg = TIPG_10_100_80003_DFLT; 7463 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7425 break; 7464 break;
7426 } 7465 }
7427 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 7466 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7428 } 7467 }
7429} 7468}
7430 7469
7431/* 7470/*
7432 * wm_kmrn_readreg: 7471 * wm_kmrn_readreg:
7433 * 7472 *
7434 * Read a kumeran register 7473 * Read a kumeran register
7435 */ 7474 */
7436static int 7475static int
7437wm_kmrn_readreg(struct wm_softc *sc, int reg) 7476wm_kmrn_readreg(struct wm_softc *sc, int reg)
7438{ 7477{
7439 int rv; 7478 int rv;
7440 7479
7441 if (sc->sc_flags == WM_F_SWFW_SYNC) { 7480 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7442 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) { 7481 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7443 aprint_error_dev(sc->sc_dev, 7482 aprint_error_dev(sc->sc_dev,
7444 "%s: failed to get semaphore\n", __func__); 7483 "%s: failed to get semaphore\n", __func__);
7445 return 0; 7484 return 0;
7446 } 7485 }
7447 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) { 7486 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7448 if (wm_get_swfwhw_semaphore(sc)) { 7487 if (wm_get_swfwhw_semaphore(sc)) {
7449 aprint_error_dev(sc->sc_dev, 7488 aprint_error_dev(sc->sc_dev,
7450 "%s: failed to get semaphore\n", __func__); 7489 "%s: failed to get semaphore\n", __func__);
7451 return 0; 7490 return 0;
7452 } 7491 }
7453 } 7492 }
7454 7493
7455 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 7494 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7456 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 7495 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7457 KUMCTRLSTA_REN); 7496 KUMCTRLSTA_REN);
 7497 CSR_WRITE_FLUSH(sc);
7458 delay(2); 7498 delay(2);
7459 7499
7460 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; 7500 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7461 7501
7462 if (sc->sc_flags == WM_F_SWFW_SYNC) 7502 if (sc->sc_flags == WM_F_SWFW_SYNC)
7463 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 7503 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7464 else if (sc->sc_flags == WM_F_SWFWHW_SYNC) 7504 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7465 wm_put_swfwhw_semaphore(sc); 7505 wm_put_swfwhw_semaphore(sc);
7466 7506
7467 return rv; 7507 return rv;
7468} 7508}
7469 7509
7470/* 7510/*
7471 * wm_kmrn_writereg: 7511 * wm_kmrn_writereg:
7472 * 7512 *
7473 * Write a kumeran register 7513 * Write a kumeran register
7474 */ 7514 */
7475static void 7515static void
7476wm_kmrn_writereg(struct wm_softc *sc, int reg, int val) 7516wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7477{ 7517{
7478 7518
7479 if (sc->sc_flags == WM_F_SWFW_SYNC) { 7519 if (sc->sc_flags == WM_F_SWFW_SYNC) {
7480 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) { 7520 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7481 aprint_error_dev(sc->sc_dev, 7521 aprint_error_dev(sc->sc_dev,
7482 "%s: failed to get semaphore\n", __func__); 7522 "%s: failed to get semaphore\n", __func__);
7483 return; 7523 return;
7484 } 7524 }
7485 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) { 7525 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7486 if (wm_get_swfwhw_semaphore(sc)) { 7526 if (wm_get_swfwhw_semaphore(sc)) {
7487 aprint_error_dev(sc->sc_dev, 7527 aprint_error_dev(sc->sc_dev,
7488 "%s: failed to get semaphore\n", __func__); 7528 "%s: failed to get semaphore\n", __func__);
7489 return; 7529 return;
7490 } 7530 }
7491 } 7531 }
7492 7532
7493 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 7533 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7494 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 7534 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7495 (val & KUMCTRLSTA_MASK)); 7535 (val & KUMCTRLSTA_MASK));
7496 7536
7497 if (sc->sc_flags == WM_F_SWFW_SYNC) 7537 if (sc->sc_flags == WM_F_SWFW_SYNC)
7498 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 7538 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7499 else if (sc->sc_flags == WM_F_SWFWHW_SYNC) 7539 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7500 wm_put_swfwhw_semaphore(sc); 7540 wm_put_swfwhw_semaphore(sc);
7501} 7541}
7502 7542
7503static int 7543static int
7504wm_is_onboard_nvm_eeprom(struct wm_softc *sc) 7544wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7505{ 7545{
7506 uint32_t eecd = 0; 7546 uint32_t eecd = 0;
7507 7547
7508 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574 7548 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7509 || sc->sc_type == WM_T_82583) { 7549 || sc->sc_type == WM_T_82583) {
7510 eecd = CSR_READ(sc, WMREG_EECD); 7550 eecd = CSR_READ(sc, WMREG_EECD);
7511 7551
7512 /* Isolate bits 15 & 16 */ 7552 /* Isolate bits 15 & 16 */
7513 eecd = ((eecd >> 15) & 0x03); 7553 eecd = ((eecd >> 15) & 0x03);
7514 7554
7515 /* If both bits are set, device is Flash type */ 7555 /* If both bits are set, device is Flash type */
7516 if (eecd == 0x03) 7556 if (eecd == 0x03)
7517 return 0; 7557 return 0;
7518 } 7558 }
7519 return 1; 7559 return 1;
7520} 7560}
7521 7561
7522static int 7562static int
7523wm_get_swsm_semaphore(struct wm_softc *sc) 7563wm_get_swsm_semaphore(struct wm_softc *sc)
7524{ 7564{
7525 int32_t timeout; 7565 int32_t timeout;
7526 uint32_t swsm; 7566 uint32_t swsm;
7527 7567
7528 /* Get the FW semaphore. */ 7568 /* Get the FW semaphore. */
7529 timeout = 1000 + 1; /* XXX */ 7569 timeout = 1000 + 1; /* XXX */
7530 while (timeout) { 7570 while (timeout) {
7531 swsm = CSR_READ(sc, WMREG_SWSM); 7571 swsm = CSR_READ(sc, WMREG_SWSM);
7532 swsm |= SWSM_SWESMBI; 7572 swsm |= SWSM_SWESMBI;
7533 CSR_WRITE(sc, WMREG_SWSM, swsm); 7573 CSR_WRITE(sc, WMREG_SWSM, swsm);
7534 /* if we managed to set the bit we got the semaphore. */ 7574 /* if we managed to set the bit we got the semaphore. */
7535 swsm = CSR_READ(sc, WMREG_SWSM); 7575 swsm = CSR_READ(sc, WMREG_SWSM);
7536 if (swsm & SWSM_SWESMBI) 7576 if (swsm & SWSM_SWESMBI)
7537 break; 7577 break;
7538 7578
7539 delay(50); 7579 delay(50);
7540 timeout--; 7580 timeout--;
7541 } 7581 }
7542 7582
7543 if (timeout == 0) { 7583 if (timeout == 0) {
7544 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n"); 7584 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7545 /* Release semaphores */ 7585 /* Release semaphores */
7546 wm_put_swsm_semaphore(sc); 7586 wm_put_swsm_semaphore(sc);
7547 return 1; 7587 return 1;
7548 } 7588 }
7549 return 0; 7589 return 0;
7550} 7590}
7551 7591
7552static void 7592static void
7553wm_put_swsm_semaphore(struct wm_softc *sc) 7593wm_put_swsm_semaphore(struct wm_softc *sc)
7554{ 7594{
7555 uint32_t swsm; 7595 uint32_t swsm;
7556 7596
7557 swsm = CSR_READ(sc, WMREG_SWSM); 7597 swsm = CSR_READ(sc, WMREG_SWSM);
7558 swsm &= ~(SWSM_SWESMBI); 7598 swsm &= ~(SWSM_SWESMBI);
7559 CSR_WRITE(sc, WMREG_SWSM, swsm); 7599 CSR_WRITE(sc, WMREG_SWSM, swsm);
7560} 7600}
7561 7601
7562static int 7602static int
7563wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 7603wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7564{ 7604{
7565 uint32_t swfw_sync; 7605 uint32_t swfw_sync;
7566 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 7606 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7567 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 7607 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7568 int timeout = 200; 7608 int timeout = 200;
7569 7609
7570 for (timeout = 0; timeout < 200; timeout++) { 7610 for (timeout = 0; timeout < 200; timeout++) {
7571 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 7611 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7572 if (wm_get_swsm_semaphore(sc)) { 7612 if (wm_get_swsm_semaphore(sc)) {
7573 aprint_error_dev(sc->sc_dev, 7613 aprint_error_dev(sc->sc_dev,
7574 "%s: failed to get semaphore\n", 7614 "%s: failed to get semaphore\n",
7575 __func__); 7615 __func__);
7576 return 1; 7616 return 1;
7577 } 7617 }
7578 } 7618 }
7579 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 7619 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7580 if ((swfw_sync & (swmask | fwmask)) == 0) { 7620 if ((swfw_sync & (swmask | fwmask)) == 0) {
7581 swfw_sync |= swmask; 7621 swfw_sync |= swmask;
7582 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 7622 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7583 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 7623 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7584 wm_put_swsm_semaphore(sc); 7624 wm_put_swsm_semaphore(sc);
7585 return 0; 7625 return 0;
7586 } 7626 }
7587 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 7627 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7588 wm_put_swsm_semaphore(sc); 7628 wm_put_swsm_semaphore(sc);
7589 delay(5000); 7629 delay(5000);
7590 } 7630 }
7591 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 7631 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7592 device_xname(sc->sc_dev), mask, swfw_sync); 7632 device_xname(sc->sc_dev), mask, swfw_sync);
7593 return 1; 7633 return 1;
7594} 7634}
7595 7635
7596static void 7636static void
7597wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 7637wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7598{ 7638{
7599 uint32_t swfw_sync; 7639 uint32_t swfw_sync;
7600 7640
7601 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 7641 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7602 while (wm_get_swsm_semaphore(sc) != 0) 7642 while (wm_get_swsm_semaphore(sc) != 0)
7603 continue; 7643 continue;
7604 } 7644 }
7605 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 7645 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7606 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 7646 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7607 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 7647 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7608 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 7648 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7609 wm_put_swsm_semaphore(sc); 7649 wm_put_swsm_semaphore(sc);
7610} 7650}
7611 7651
7612static int 7652static int
7613wm_get_swfwhw_semaphore(struct wm_softc *sc) 7653wm_get_swfwhw_semaphore(struct wm_softc *sc)
7614{ 7654{
7615 uint32_t ext_ctrl; 7655 uint32_t ext_ctrl;
7616 int timeout = 200; 7656 int timeout = 200;
7617 7657
7618 for (timeout = 0; timeout < 200; timeout++) { 7658 for (timeout = 0; timeout < 200; timeout++) {
7619 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 7659 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7620 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 7660 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7621 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 7661 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7622 7662
7623 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 7663 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7624 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 7664 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7625 return 0; 7665 return 0;
7626 delay(5000); 7666 delay(5000);
7627 } 7667 }
7628 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n", 7668 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7629 device_xname(sc->sc_dev), ext_ctrl); 7669 device_xname(sc->sc_dev), ext_ctrl);
7630 return 1; 7670 return 1;
7631} 7671}
7632 7672
7633static void 7673static void
7634wm_put_swfwhw_semaphore(struct wm_softc *sc) 7674wm_put_swfwhw_semaphore(struct wm_softc *sc)
7635{ 7675{
7636 uint32_t ext_ctrl; 7676 uint32_t ext_ctrl;
7637 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 7677 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7638 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 7678 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7639 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 7679 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7640} 7680}
7641 7681
7642static int 7682static int
7643wm_get_hw_semaphore_82573(struct wm_softc *sc) 7683wm_get_hw_semaphore_82573(struct wm_softc *sc)
7644{ 7684{
7645 int i = 0; 7685 int i = 0;
7646 uint32_t reg; 7686 uint32_t reg;
7647 7687
7648 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 7688 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7649 do { 7689 do {
7650 CSR_WRITE(sc, WMREG_EXTCNFCTR, 7690 CSR_WRITE(sc, WMREG_EXTCNFCTR,
7651 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP); 7691 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
7652 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 7692 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7653 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0) 7693 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
7654 break; 7694 break;
7655 delay(2*1000); 7695 delay(2*1000);
7656 i++; 7696 i++;
7657 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT); 7697 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
7658 7698
7659 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) { 7699 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
7660 wm_put_hw_semaphore_82573(sc); 7700 wm_put_hw_semaphore_82573(sc);
7661 log(LOG_ERR, "%s: Driver can't access the PHY\n", 7701 log(LOG_ERR, "%s: Driver can't access the PHY\n",
7662 device_xname(sc->sc_dev)); 7702 device_xname(sc->sc_dev));
7663 return -1; 7703 return -1;
7664 } 7704 }
7665 7705
7666 return 0; 7706 return 0;
7667} 7707}
7668 7708
7669static void 7709static void
7670wm_put_hw_semaphore_82573(struct wm_softc *sc) 7710wm_put_hw_semaphore_82573(struct wm_softc *sc)
7671{ 7711{
7672 uint32_t reg; 7712 uint32_t reg;
7673 7713
7674 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 7714 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7675 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 7715 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
7676 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 7716 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
7677} 7717}
7678 7718
7679static int 7719static int
7680wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank) 7720wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7681{ 7721{
7682 uint32_t eecd; 7722 uint32_t eecd;
7683 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1; 7723 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7684 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t); 7724 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7685 uint8_t sig_byte = 0; 7725 uint8_t sig_byte = 0;
7686 7726
7687 switch (sc->sc_type) { 7727 switch (sc->sc_type) {
7688 case WM_T_ICH8: 7728 case WM_T_ICH8:
7689 case WM_T_ICH9: 7729 case WM_T_ICH9:
7690 eecd = CSR_READ(sc, WMREG_EECD); 7730 eecd = CSR_READ(sc, WMREG_EECD);
7691 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) { 7731 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7692 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0; 7732 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7693 return 0; 7733 return 0;
7694 } 7734 }
7695 /* FALLTHROUGH */ 7735 /* FALLTHROUGH */
7696 default: 7736 default:
7697 /* Default to 0 */ 7737 /* Default to 0 */
7698 *bank = 0; 7738 *bank = 0;
7699 7739
7700 /* Check bank 0 */ 7740 /* Check bank 0 */
7701 wm_read_ich8_byte(sc, act_offset, &sig_byte); 7741 wm_read_ich8_byte(sc, act_offset, &sig_byte);
7702 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 7742 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7703 *bank = 0; 7743 *bank = 0;
7704 return 0; 7744 return 0;
7705 } 7745 }
7706 7746
7707 /* Check bank 1 */ 7747 /* Check bank 1 */
7708 wm_read_ich8_byte(sc, act_offset + bank1_offset, 7748 wm_read_ich8_byte(sc, act_offset + bank1_offset,
7709 &sig_byte); 7749 &sig_byte);
7710 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 7750 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7711 *bank = 1; 7751 *bank = 1;
7712 return 0; 7752 return 0;
7713 } 7753 }
7714 } 7754 }
7715 7755
7716 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n", 7756 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7717 device_xname(sc->sc_dev))); 7757 device_xname(sc->sc_dev)));
7718 return -1; 7758 return -1;
7719} 7759}
7720 7760
7721/****************************************************************************** 7761/******************************************************************************
7722 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 7762 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7723 * register. 7763 * register.
7724 * 7764 *
7725 * sc - Struct containing variables accessed by shared code 7765 * sc - Struct containing variables accessed by shared code
7726 * offset - offset of word in the EEPROM to read 7766 * offset - offset of word in the EEPROM to read
7727 * data - word read from the EEPROM 7767 * data - word read from the EEPROM
7728 * words - number of words to read 7768 * words - number of words to read
7729 *****************************************************************************/ 7769 *****************************************************************************/
7730static int 7770static int
7731wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) 7771wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7732{ 7772{
7733 int32_t error = 0; 7773 int32_t error = 0;
7734 uint32_t flash_bank = 0; 7774 uint32_t flash_bank = 0;
7735 uint32_t act_offset = 0; 7775 uint32_t act_offset = 0;
7736 uint32_t bank_offset = 0; 7776 uint32_t bank_offset = 0;
7737 uint16_t word = 0; 7777 uint16_t word = 0;
7738 uint16_t i = 0; 7778 uint16_t i = 0;
7739 7779
7740 /* We need to know which is the valid flash bank. In the event 7780 /* We need to know which is the valid flash bank. In the event
7741 * that we didn't allocate eeprom_shadow_ram, we may not be 7781 * that we didn't allocate eeprom_shadow_ram, we may not be
7742 * managing flash_bank. So it cannot be trusted and needs 7782 * managing flash_bank. So it cannot be trusted and needs
7743 * to be updated with each read. 7783 * to be updated with each read.
7744 */ 7784 */
7745 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank); 7785 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7746 if (error) { 7786 if (error) {
7747 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n", 7787 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7748 __func__); 7788 __func__);
7749 flash_bank = 0; 7789 flash_bank = 0;
7750 } 7790 }
7751 7791
7752 /* 7792 /*
7753 * Adjust offset appropriately if we're on bank 1 - adjust for word 7793 * Adjust offset appropriately if we're on bank 1 - adjust for word
7754 * size 7794 * size
7755 */ 7795 */
7756 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 7796 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7757 7797
7758 error = wm_get_swfwhw_semaphore(sc); 7798 error = wm_get_swfwhw_semaphore(sc);
7759 if (error) { 7799 if (error) {
7760 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7800 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7761 __func__); 7801 __func__);
7762 return error; 7802 return error;
7763 } 7803 }
7764 7804
7765 for (i = 0; i < words; i++) { 7805 for (i = 0; i < words; i++) {
7766 /* The NVM part needs a byte offset, hence * 2 */ 7806 /* The NVM part needs a byte offset, hence * 2 */
7767 act_offset = bank_offset + ((offset + i) * 2); 7807 act_offset = bank_offset + ((offset + i) * 2);
7768 error = wm_read_ich8_word(sc, act_offset, &word); 7808 error = wm_read_ich8_word(sc, act_offset, &word);
7769 if (error) { 7809 if (error) {
7770 aprint_error_dev(sc->sc_dev, 7810 aprint_error_dev(sc->sc_dev,
7771 "%s: failed to read NVM\n", __func__); 7811 "%s: failed to read NVM\n", __func__);
7772 break; 7812 break;
7773 } 7813 }
7774 data[i] = word; 7814 data[i] = word;
7775 } 7815 }
7776 7816
7777 wm_put_swfwhw_semaphore(sc); 7817 wm_put_swfwhw_semaphore(sc);
7778 return error; 7818 return error;
7779} 7819}
7780 7820
7781/****************************************************************************** 7821/******************************************************************************
7782 * This function does initial flash setup so that a new read/write/erase cycle 7822 * This function does initial flash setup so that a new read/write/erase cycle
7783 * can be started. 7823 * can be started.
7784 * 7824 *
7785 * sc - The pointer to the hw structure 7825 * sc - The pointer to the hw structure
7786 ****************************************************************************/ 7826 ****************************************************************************/
7787static int32_t 7827static int32_t
7788wm_ich8_cycle_init(struct wm_softc *sc) 7828wm_ich8_cycle_init(struct wm_softc *sc)
7789{ 7829{
7790 uint16_t hsfsts; 7830 uint16_t hsfsts;
7791 int32_t error = 1; 7831 int32_t error = 1;
7792 int32_t i = 0; 7832 int32_t i = 0;
7793 7833
7794 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7834 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7795 7835
7796 /* May be check the Flash Des Valid bit in Hw status */ 7836 /* May be check the Flash Des Valid bit in Hw status */
7797 if ((hsfsts & HSFSTS_FLDVAL) == 0) { 7837 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7798 return error; 7838 return error;
7799 } 7839 }
7800 7840
7801 /* Clear FCERR in Hw status by writing 1 */ 7841 /* Clear FCERR in Hw status by writing 1 */
7802 /* Clear DAEL in Hw status by writing a 1 */ 7842 /* Clear DAEL in Hw status by writing a 1 */
7803 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; 7843 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7804 7844
7805 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 7845 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7806 7846
7807 /* 7847 /*
7808 * Either we should have a hardware SPI cycle in progress bit to check 7848 * Either we should have a hardware SPI cycle in progress bit to check
7809 * against, in order to start a new cycle or FDONE bit should be 7849 * against, in order to start a new cycle or FDONE bit should be
7810 * changed in the hardware so that it is 1 after harware reset, which 7850 * changed in the hardware so that it is 1 after harware reset, which
7811 * can then be used as an indication whether a cycle is in progress or 7851 * can then be used as an indication whether a cycle is in progress or
7812 * has been completed .. we should also have some software semaphore 7852 * has been completed .. we should also have some software semaphore
7813 * mechanism to guard FDONE or the cycle in progress bit so that two 7853 * mechanism to guard FDONE or the cycle in progress bit so that two
7814 * threads access to those bits can be sequentiallized or a way so that 7854 * threads access to those bits can be sequentiallized or a way so that
7815 * 2 threads dont start the cycle at the same time 7855 * 2 threads dont start the cycle at the same time
7816 */ 7856 */
7817 7857
7818 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 7858 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7819 /* 7859 /*
7820 * There is no cycle running at present, so we can start a 7860 * There is no cycle running at present, so we can start a
7821 * cycle 7861 * cycle
7822 */ 7862 */
7823 7863
7824 /* Begin by setting Flash Cycle Done. */ 7864 /* Begin by setting Flash Cycle Done. */
7825 hsfsts |= HSFSTS_DONE; 7865 hsfsts |= HSFSTS_DONE;
7826 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 7866 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7827 error = 0; 7867 error = 0;
7828 } else { 7868 } else {
7829 /* 7869 /*
7830 * otherwise poll for sometime so the current cycle has a 7870 * otherwise poll for sometime so the current cycle has a
7831 * chance to end before giving up. 7871 * chance to end before giving up.
7832 */ 7872 */
7833 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { 7873 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7834 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7874 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7835 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 7875 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7836 error = 0; 7876 error = 0;
7837 break; 7877 break;
7838 } 7878 }
7839 delay(1); 7879 delay(1);
7840 } 7880 }
7841 if (error == 0) { 7881 if (error == 0) {
7842 /* 7882 /*
7843 * Successful in waiting for previous cycle to timeout, 7883 * Successful in waiting for previous cycle to timeout,
7844 * now set the Flash Cycle Done. 7884 * now set the Flash Cycle Done.
7845 */ 7885 */
7846 hsfsts |= HSFSTS_DONE; 7886 hsfsts |= HSFSTS_DONE;
7847 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 7887 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7848 } 7888 }
7849 } 7889 }
7850 return error; 7890 return error;
7851} 7891}
7852 7892
7853/****************************************************************************** 7893/******************************************************************************
7854 * This function starts a flash cycle and waits for its completion 7894 * This function starts a flash cycle and waits for its completion
7855 * 7895 *
7856 * sc - The pointer to the hw structure 7896 * sc - The pointer to the hw structure
7857 ****************************************************************************/ 7897 ****************************************************************************/
7858static int32_t 7898static int32_t
7859wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout) 7899wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7860{ 7900{
7861 uint16_t hsflctl; 7901 uint16_t hsflctl;
7862 uint16_t hsfsts; 7902 uint16_t hsfsts;
7863 int32_t error = 1; 7903 int32_t error = 1;
7864 uint32_t i = 0; 7904 uint32_t i = 0;
7865 7905
7866 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 7906 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7867 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 7907 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7868 hsflctl |= HSFCTL_GO; 7908 hsflctl |= HSFCTL_GO;
7869 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 7909 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7870 7910
7871 /* wait till FDONE bit is set to 1 */ 7911 /* wait till FDONE bit is set to 1 */
7872 do { 7912 do {
7873 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7913 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7874 if (hsfsts & HSFSTS_DONE) 7914 if (hsfsts & HSFSTS_DONE)
7875 break; 7915 break;
7876 delay(1); 7916 delay(1);
7877 i++; 7917 i++;
7878 } while (i < timeout); 7918 } while (i < timeout);
7879 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) 7919 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7880 error = 0; 7920 error = 0;
7881 7921
7882 return error; 7922 return error;
7883} 7923}
7884 7924
7885/****************************************************************************** 7925/******************************************************************************
7886 * Reads a byte or word from the NVM using the ICH8 flash access registers. 7926 * Reads a byte or word from the NVM using the ICH8 flash access registers.
7887 * 7927 *
7888 * sc - The pointer to the hw structure 7928 * sc - The pointer to the hw structure
7889 * index - The index of the byte or word to read. 7929 * index - The index of the byte or word to read.
7890 * size - Size of data to read, 1=byte 2=word 7930 * size - Size of data to read, 1=byte 2=word
7891 * data - Pointer to the word to store the value read. 7931 * data - Pointer to the word to store the value read.
7892 *****************************************************************************/ 7932 *****************************************************************************/
7893static int32_t 7933static int32_t
7894wm_read_ich8_data(struct wm_softc *sc, uint32_t index, 7934wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7895 uint32_t size, uint16_t* data) 7935 uint32_t size, uint16_t* data)
7896{ 7936{
7897 uint16_t hsfsts; 7937 uint16_t hsfsts;
7898 uint16_t hsflctl; 7938 uint16_t hsflctl;
7899 uint32_t flash_linear_address; 7939 uint32_t flash_linear_address;
7900 uint32_t flash_data = 0; 7940 uint32_t flash_data = 0;
7901 int32_t error = 1; 7941 int32_t error = 1;
7902 int32_t count = 0; 7942 int32_t count = 0;
7903 7943
7904 if (size < 1 || size > 2 || data == 0x0 || 7944 if (size < 1 || size > 2 || data == 0x0 ||
7905 index > ICH_FLASH_LINEAR_ADDR_MASK) 7945 index > ICH_FLASH_LINEAR_ADDR_MASK)
7906 return error; 7946 return error;
7907 7947
7908 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + 7948 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7909 sc->sc_ich8_flash_base; 7949 sc->sc_ich8_flash_base;
7910 7950
7911 do { 7951 do {
7912 delay(1); 7952 delay(1);
7913 /* Steps */ 7953 /* Steps */
7914 error = wm_ich8_cycle_init(sc); 7954 error = wm_ich8_cycle_init(sc);
7915 if (error) 7955 if (error)
7916 break; 7956 break;
7917 7957
7918 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 7958 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7919 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 7959 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7920 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) 7960 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7921 & HSFCTL_BCOUNT_MASK; 7961 & HSFCTL_BCOUNT_MASK;
7922 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; 7962 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7923 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 7963 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7924 7964
7925 /* 7965 /*
7926 * Write the last 24 bits of index into Flash Linear address 7966 * Write the last 24 bits of index into Flash Linear address
7927 * field in Flash Address 7967 * field in Flash Address
7928 */ 7968 */
7929 /* TODO: TBD maybe check the index against the size of flash */ 7969 /* TODO: TBD maybe check the index against the size of flash */
7930 7970
7931 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address); 7971 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7932 7972
7933 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT); 7973 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7934 7974
7935 /* 7975 /*
7936 * Check if FCERR is set to 1, if set to 1, clear it and try 7976 * Check if FCERR is set to 1, if set to 1, clear it and try
7937 * the whole sequence a few more times, else read in (shift in) 7977 * the whole sequence a few more times, else read in (shift in)
7938 * the Flash Data0, the order is least significant byte first 7978 * the Flash Data0, the order is least significant byte first
7939 * msb to lsb 7979 * msb to lsb
7940 */ 7980 */
7941 if (error == 0) { 7981 if (error == 0) {
7942 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0); 7982 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7943 if (size == 1) 7983 if (size == 1)
7944 *data = (uint8_t)(flash_data & 0x000000FF); 7984 *data = (uint8_t)(flash_data & 0x000000FF);
7945 else if (size == 2) 7985 else if (size == 2)
7946 *data = (uint16_t)(flash_data & 0x0000FFFF); 7986 *data = (uint16_t)(flash_data & 0x0000FFFF);
7947 break; 7987 break;
7948 } else { 7988 } else {
7949 /* 7989 /*
7950 * If we've gotten here, then things are probably 7990 * If we've gotten here, then things are probably
7951 * completely hosed, but if the error condition is 7991 * completely hosed, but if the error condition is
7952 * detected, it won't hurt to give it another try... 7992 * detected, it won't hurt to give it another try...
7953 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 7993 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7954 */ 7994 */
7955 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7995 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7956 if (hsfsts & HSFSTS_ERR) { 7996 if (hsfsts & HSFSTS_ERR) {
7957 /* Repeat for some time before giving up. */ 7997 /* Repeat for some time before giving up. */
7958 continue; 7998 continue;
7959 } else if ((hsfsts & HSFSTS_DONE) == 0) 7999 } else if ((hsfsts & HSFSTS_DONE) == 0)
7960 break; 8000 break;
7961 } 8001 }
7962 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 8002 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7963 8003
7964 return error; 8004 return error;
7965} 8005}
7966 8006
7967/****************************************************************************** 8007/******************************************************************************
7968 * Reads a single byte from the NVM using the ICH8 flash access registers. 8008 * Reads a single byte from the NVM using the ICH8 flash access registers.
7969 * 8009 *
7970 * sc - pointer to wm_hw structure 8010 * sc - pointer to wm_hw structure
7971 * index - The index of the byte to read. 8011 * index - The index of the byte to read.
7972 * data - Pointer to a byte to store the value read. 8012 * data - Pointer to a byte to store the value read.
7973 *****************************************************************************/ 8013 *****************************************************************************/
7974static int32_t 8014static int32_t
7975wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data) 8015wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7976{ 8016{
7977 int32_t status; 8017 int32_t status;
7978 uint16_t word = 0; 8018 uint16_t word = 0;
7979 8019
7980 status = wm_read_ich8_data(sc, index, 1, &word); 8020 status = wm_read_ich8_data(sc, index, 1, &word);
7981 if (status == 0) 8021 if (status == 0)
7982 *data = (uint8_t)word; 8022 *data = (uint8_t)word;
7983 else 8023 else
7984 *data = 0; 8024 *data = 0;
7985 8025
7986 return status; 8026 return status;
7987} 8027}
7988 8028
7989/****************************************************************************** 8029/******************************************************************************
7990 * Reads a word from the NVM using the ICH8 flash access registers. 8030 * Reads a word from the NVM using the ICH8 flash access registers.
7991 * 8031 *
7992 * sc - pointer to wm_hw structure 8032 * sc - pointer to wm_hw structure
7993 * index - The starting byte index of the word to read. 8033 * index - The starting byte index of the word to read.
7994 * data - Pointer to a word to store the value read. 8034 * data - Pointer to a word to store the value read.
7995 *****************************************************************************/ 8035 *****************************************************************************/
7996static int32_t 8036static int32_t
7997wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data) 8037wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7998{ 8038{
7999 int32_t status; 8039 int32_t status;
8000 8040
8001 status = wm_read_ich8_data(sc, index, 2, data); 8041 status = wm_read_ich8_data(sc, index, 2, data);
8002 return status; 8042 return status;
8003} 8043}
8004 8044
8005static int 8045static int
8006wm_check_mng_mode(struct wm_softc *sc) 8046wm_check_mng_mode(struct wm_softc *sc)
8007{ 8047{
8008 int rv; 8048 int rv;
8009 8049
8010 switch (sc->sc_type) { 8050 switch (sc->sc_type) {
8011 case WM_T_ICH8: 8051 case WM_T_ICH8:
8012 case WM_T_ICH9: 8052 case WM_T_ICH9:
8013 case WM_T_ICH10: 8053 case WM_T_ICH10:
8014 case WM_T_PCH: 8054 case WM_T_PCH:
8015 case WM_T_PCH2: 8055 case WM_T_PCH2:
8016 case WM_T_PCH_LPT: 8056 case WM_T_PCH_LPT:
8017 rv = wm_check_mng_mode_ich8lan(sc); 8057 rv = wm_check_mng_mode_ich8lan(sc);
8018 break; 8058 break;
8019 case WM_T_82574: 8059 case WM_T_82574:
8020 case WM_T_82583: 8060 case WM_T_82583:
8021 rv = wm_check_mng_mode_82574(sc); 8061 rv = wm_check_mng_mode_82574(sc);
8022 break; 8062 break;
8023 case WM_T_82571: 8063 case WM_T_82571:
8024 case WM_T_82572: 8064 case WM_T_82572:
8025 case WM_T_82573: 8065 case WM_T_82573:
8026 case WM_T_80003: 8066 case WM_T_80003:
8027 rv = wm_check_mng_mode_generic(sc); 8067 rv = wm_check_mng_mode_generic(sc);
8028 break; 8068 break;
8029 default: 8069 default:
8030 /* noting to do */ 8070 /* noting to do */
8031 rv = 0; 8071 rv = 0;
8032 break; 8072 break;
8033 } 8073 }
8034 8074
8035 return rv; 8075 return rv;
8036} 8076}
8037 8077
8038static int 8078static int
8039wm_check_mng_mode_ich8lan(struct wm_softc *sc) 8079wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8040{ 8080{
8041 uint32_t fwsm; 8081 uint32_t fwsm;
8042 8082
8043 fwsm = CSR_READ(sc, WMREG_FWSM); 8083 fwsm = CSR_READ(sc, WMREG_FWSM);
8044 8084
8045 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)) 8085 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8046 return 1; 8086 return 1;
8047 8087
8048 return 0; 8088 return 0;
8049} 8089}
8050 8090
8051static int 8091static int
8052wm_check_mng_mode_82574(struct wm_softc *sc) 8092wm_check_mng_mode_82574(struct wm_softc *sc)
8053{ 8093{
8054 uint16_t data; 8094 uint16_t data;
8055 8095
8056 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data); 8096 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8057 8097
8058 if ((data & EEPROM_CFG2_MNGM_MASK) != 0) 8098 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8059 return 1; 8099 return 1;
8060 8100
8061 return 0; 8101 return 0;
8062} 8102}
8063 8103
8064static int 8104static int
8065wm_check_mng_mode_generic(struct wm_softc *sc) 8105wm_check_mng_mode_generic(struct wm_softc *sc)
8066{ 8106{
8067 uint32_t fwsm; 8107 uint32_t fwsm;
8068 8108
8069 fwsm = CSR_READ(sc, WMREG_FWSM); 8109 fwsm = CSR_READ(sc, WMREG_FWSM);
8070 8110
8071 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT)) 8111 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8072 return 1; 8112 return 1;
8073 8113
8074 return 0; 8114 return 0;
8075} 8115}
8076 8116
8077static int 8117static int
8078wm_enable_mng_pass_thru(struct wm_softc *sc) 8118wm_enable_mng_pass_thru(struct wm_softc *sc)
8079{ 8119{
8080 uint32_t manc, fwsm, factps; 8120 uint32_t manc, fwsm, factps;
8081 8121
8082 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0) 8122 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8083 return 0; 8123 return 0;
8084 8124
8085 manc = CSR_READ(sc, WMREG_MANC); 8125 manc = CSR_READ(sc, WMREG_MANC);
8086 8126
8087 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n", 8127 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8088 device_xname(sc->sc_dev), manc)); 8128 device_xname(sc->sc_dev), manc));
8089 if ((manc & MANC_RECV_TCO_EN) == 0) 8129 if ((manc & MANC_RECV_TCO_EN) == 0)
8090 return 0; 8130 return 0;
8091 8131
8092 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) { 8132 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8093 fwsm = CSR_READ(sc, WMREG_FWSM); 8133 fwsm = CSR_READ(sc, WMREG_FWSM);
8094 factps = CSR_READ(sc, WMREG_FACTPS); 8134 factps = CSR_READ(sc, WMREG_FACTPS);
8095 if (((factps & FACTPS_MNGCG) == 0) 8135 if (((factps & FACTPS_MNGCG) == 0)
8096 && ((fwsm & FWSM_MODE_MASK) 8136 && ((fwsm & FWSM_MODE_MASK)
8097 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))) 8137 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8098 return 1; 8138 return 1;
8099 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ 8139 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8100 uint16_t data; 8140 uint16_t data;
8101 8141
8102 factps = CSR_READ(sc, WMREG_FACTPS); 8142 factps = CSR_READ(sc, WMREG_FACTPS);
8103 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data); 8143 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8104 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n", 8144 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8105 device_xname(sc->sc_dev), factps, data)); 8145 device_xname(sc->sc_dev), factps, data));
8106 if (((factps & FACTPS_MNGCG) == 0) 8146 if (((factps & FACTPS_MNGCG) == 0)
8107 && ((data & EEPROM_CFG2_MNGM_MASK) 8147 && ((data & EEPROM_CFG2_MNGM_MASK)
8108 == (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT))) 8148 == (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8109 return 1; 8149 return 1;
8110 } else if (((manc & MANC_SMBUS_EN) != 0) 8150 } else if (((manc & MANC_SMBUS_EN) != 0)
8111 && ((manc & MANC_ASF_EN) == 0)) 8151 && ((manc & MANC_ASF_EN) == 0))
8112 return 1; 8152 return 1;
8113 8153
8114 return 0; 8154 return 0;
8115} 8155}
8116 8156
8117static int 8157static int
8118wm_check_reset_block(struct wm_softc *sc) 8158wm_check_reset_block(struct wm_softc *sc)
8119{ 8159{
8120 uint32_t reg; 8160 uint32_t reg;
8121 8161
8122 switch (sc->sc_type) { 8162 switch (sc->sc_type) {
8123 case WM_T_ICH8: 8163 case WM_T_ICH8:
8124 case WM_T_ICH9: 8164 case WM_T_ICH9:
8125 case WM_T_ICH10: 8165 case WM_T_ICH10:
8126 case WM_T_PCH: 8166 case WM_T_PCH:
8127 case WM_T_PCH2: 8167 case WM_T_PCH2:
8128 case WM_T_PCH_LPT: 8168 case WM_T_PCH_LPT:
8129 reg = CSR_READ(sc, WMREG_FWSM); 8169 reg = CSR_READ(sc, WMREG_FWSM);
8130 if ((reg & FWSM_RSPCIPHY) != 0) 8170 if ((reg & FWSM_RSPCIPHY) != 0)
8131 return 0; 8171 return 0;
8132 else 8172 else
8133 return -1; 8173 return -1;
8134 break; 8174 break;
8135 case WM_T_82571: 8175 case WM_T_82571:
8136 case WM_T_82572: 8176 case WM_T_82572:
8137 case WM_T_82573: 8177 case WM_T_82573:
8138 case WM_T_82574: 8178 case WM_T_82574:
8139 case WM_T_82583: 8179 case WM_T_82583:
8140 case WM_T_80003: 8180 case WM_T_80003:
8141 reg = CSR_READ(sc, WMREG_MANC); 8181 reg = CSR_READ(sc, WMREG_MANC);
8142 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0) 8182 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8143 return -1; 8183 return -1;
8144 else 8184 else
8145 return 0; 8185 return 0;
8146 break; 8186 break;
8147 default: 8187 default:
8148 /* no problem */ 8188 /* no problem */
8149 break; 8189 break;
8150 } 8190 }
8151 8191
8152 return 0; 8192 return 0;
8153} 8193}
8154 8194
8155static void 8195static void
8156wm_get_hw_control(struct wm_softc *sc) 8196wm_get_hw_control(struct wm_softc *sc)
8157{ 8197{
8158 uint32_t reg; 8198 uint32_t reg;
8159 8199
8160 switch (sc->sc_type) { 8200 switch (sc->sc_type) {
8161 case WM_T_82573: 8201 case WM_T_82573:
8162 reg = CSR_READ(sc, WMREG_SWSM); 8202 reg = CSR_READ(sc, WMREG_SWSM);
8163 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD); 8203 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8164 break; 8204 break;
8165 case WM_T_82571: 8205 case WM_T_82571:
8166 case WM_T_82572: 8206 case WM_T_82572:
8167 case WM_T_82574: 8207 case WM_T_82574:
8168 case WM_T_82583: 8208 case WM_T_82583:
8169 case WM_T_80003: 8209 case WM_T_80003:
8170 case WM_T_ICH8: 8210 case WM_T_ICH8:
8171 case WM_T_ICH9: 8211 case WM_T_ICH9:
8172 case WM_T_ICH10: 8212 case WM_T_ICH10:
8173 case WM_T_PCH: 8213 case WM_T_PCH:
8174 case WM_T_PCH2: 8214 case WM_T_PCH2:
8175 case WM_T_PCH_LPT: 8215 case WM_T_PCH_LPT:
8176 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8216 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8177 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD); 8217 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8178 break; 8218 break;
8179 default: 8219 default:
8180 break; 8220 break;
8181 } 8221 }
8182} 8222}
8183 8223
8184static void 8224static void
8185wm_release_hw_control(struct wm_softc *sc) 8225wm_release_hw_control(struct wm_softc *sc)
8186{ 8226{
8187 uint32_t reg; 8227 uint32_t reg;
8188 8228
8189 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0) 8229 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8190 return; 8230 return;
8191 8231
8192 if (sc->sc_type == WM_T_82573) { 8232 if (sc->sc_type == WM_T_82573) {
8193 reg = CSR_READ(sc, WMREG_SWSM); 8233 reg = CSR_READ(sc, WMREG_SWSM);
8194 reg &= ~SWSM_DRV_LOAD; 8234 reg &= ~SWSM_DRV_LOAD;
8195 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD); 8235 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8196 } else { 8236 } else {
8197 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8237 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8198 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD); 8238 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8199 } 8239 }
8200} 8240}
8201 8241
8202/* XXX Currently TBI only */ 8242/* XXX Currently TBI only */
8203static int 8243static int
8204wm_check_for_link(struct wm_softc *sc) 8244wm_check_for_link(struct wm_softc *sc)
8205{ 8245{
8206 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 8246 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8207 uint32_t rxcw; 8247 uint32_t rxcw;
8208 uint32_t ctrl; 8248 uint32_t ctrl;
8209 uint32_t status; 8249 uint32_t status;
8210 uint32_t sig; 8250 uint32_t sig;
8211 8251
8212 rxcw = CSR_READ(sc, WMREG_RXCW); 8252 rxcw = CSR_READ(sc, WMREG_RXCW);
8213 ctrl = CSR_READ(sc, WMREG_CTRL); 8253 ctrl = CSR_READ(sc, WMREG_CTRL);
8214 status = CSR_READ(sc, WMREG_STATUS); 8254 status = CSR_READ(sc, WMREG_STATUS);
8215 8255
8216 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0; 8256 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8217 8257
8218 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n", 8258 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8219 device_xname(sc->sc_dev), __func__, 8259 device_xname(sc->sc_dev), __func__,
8220 ((ctrl & CTRL_SWDPIN(1)) == sig), 8260 ((ctrl & CTRL_SWDPIN(1)) == sig),
8221 ((status & STATUS_LU) != 0), 8261 ((status & STATUS_LU) != 0),
8222 ((rxcw & RXCW_C) != 0) 8262 ((rxcw & RXCW_C) != 0)
8223 )); 8263 ));
8224 8264
8225 /* 8265 /*
8226 * SWDPIN LU RXCW 8266 * SWDPIN LU RXCW
8227 * 0 0 0 8267 * 0 0 0
8228 * 0 0 1 (should not happen) 8268 * 0 0 1 (should not happen)
8229 * 0 1 0 (should not happen) 8269 * 0 1 0 (should not happen)
8230 * 0 1 1 (should not happen) 8270 * 0 1 1 (should not happen)
8231 * 1 0 0 Disable autonego and force linkup 8271 * 1 0 0 Disable autonego and force linkup
8232 * 1 0 1 got /C/ but not linkup yet 8272 * 1 0 1 got /C/ but not linkup yet
8233 * 1 1 0 (linkup) 8273 * 1 1 0 (linkup)
8234 * 1 1 1 If IFM_AUTO, back to autonego 8274 * 1 1 1 If IFM_AUTO, back to autonego
8235 * 8275 *
8236 */ 8276 */
8237 if (((ctrl & CTRL_SWDPIN(1)) == sig) 8277 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8238 && ((status & STATUS_LU) == 0) 8278 && ((status & STATUS_LU) == 0)
8239 && ((rxcw & RXCW_C) == 0)) { 8279 && ((rxcw & RXCW_C) == 0)) {
8240 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n", 8280 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8241 __func__)); 8281 __func__));
8242 sc->sc_tbi_linkup = 0; 8282 sc->sc_tbi_linkup = 0;
8243 /* Disable auto-negotiation in the TXCW register */ 8283 /* Disable auto-negotiation in the TXCW register */
8244 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE)); 8284 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8245 8285
8246 /* 8286 /*
8247 * Force link-up and also force full-duplex. 8287 * Force link-up and also force full-duplex.
8248 * 8288 *
8249 * NOTE: CTRL was updated TFCE and RFCE automatically, 8289 * NOTE: CTRL was updated TFCE and RFCE automatically,
8250 * so we should update sc->sc_ctrl 8290 * so we should update sc->sc_ctrl
8251 */ 8291 */
8252 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD; 8292 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8253 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8293 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8254 } else if (((status & STATUS_LU) != 0) 8294 } else if (((status & STATUS_LU) != 0)
8255 && ((rxcw & RXCW_C) != 0) 8295 && ((rxcw & RXCW_C) != 0)
8256 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) { 8296 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8257 sc->sc_tbi_linkup = 1; 8297 sc->sc_tbi_linkup = 1;
8258 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n", 8298 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8259 __func__)); 8299 __func__));
8260 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 8300 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8261 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU)); 8301 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8262 } else if (((ctrl & CTRL_SWDPIN(1)) == sig) 8302 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8263 && ((rxcw & RXCW_C) != 0)) { 8303 && ((rxcw & RXCW_C) != 0)) {
8264 DPRINTF(WM_DEBUG_LINK, ("/C/")); 8304 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8265 } else { 8305 } else {
8266 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl, 8306 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8267 status)); 8307 status));
8268 } 8308 }
8269 8309
8270 return 0; 8310 return 0;
8271} 8311}
8272 8312
8273/* Work-around for 82566 Kumeran PCS lock loss */ 8313/* Work-around for 82566 Kumeran PCS lock loss */
8274static void 8314static void
8275wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc) 8315wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8276{ 8316{
8277 int miistatus, active, i; 8317 int miistatus, active, i;
8278 int reg; 8318 int reg;
8279 8319
8280 miistatus = sc->sc_mii.mii_media_status; 8320 miistatus = sc->sc_mii.mii_media_status;
8281 8321
8282 /* If the link is not up, do nothing */ 8322 /* If the link is not up, do nothing */
8283 if ((miistatus & IFM_ACTIVE) != 0) 8323 if ((miistatus & IFM_ACTIVE) != 0)
8284 return; 8324 return;
8285 8325
8286 active = sc->sc_mii.mii_media_active; 8326 active = sc->sc_mii.mii_media_active;
8287 8327
8288 /* Nothing to do if the link is other than 1Gbps */ 8328 /* Nothing to do if the link is other than 1Gbps */
8289 if (IFM_SUBTYPE(active) != IFM_1000_T) 8329 if (IFM_SUBTYPE(active) != IFM_1000_T)
8290 return; 8330 return;
8291 8331
8292 for (i = 0; i < 10; i++) { 8332 for (i = 0; i < 10; i++) {
8293 /* read twice */ 8333 /* read twice */
8294 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); 8334 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8295 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); 8335 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8296 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0) 8336 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8297 goto out; /* GOOD! */ 8337 goto out; /* GOOD! */
8298 8338
8299 /* Reset the PHY */ 8339 /* Reset the PHY */
8300 wm_gmii_reset(sc); 8340 wm_gmii_reset(sc);
8301 delay(5*1000); 8341 delay(5*1000);
8302 } 8342 }
8303 8343
8304 /* Disable GigE link negotiation */ 8344 /* Disable GigE link negotiation */
8305 reg = CSR_READ(sc, WMREG_PHY_CTRL); 8345 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8306 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 8346 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8307 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 8347 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8308 8348
8309 /* 8349 /*
8310 * Call gig speed drop workaround on Gig disable before accessing 8350 * Call gig speed drop workaround on Gig disable before accessing
8311 * any PHY registers. 8351 * any PHY registers.
8312 */ 8352 */
8313 wm_gig_downshift_workaround_ich8lan(sc); 8353 wm_gig_downshift_workaround_ich8lan(sc);
8314 8354
8315out: 8355out:
8316 return; 8356 return;
8317} 8357}
8318 8358
8319/* WOL from S5 stops working */ 8359/* WOL from S5 stops working */
8320static void 8360static void
8321wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc) 8361wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8322{ 8362{
8323 uint16_t kmrn_reg; 8363 uint16_t kmrn_reg;
8324 8364
8325 /* Only for igp3 */ 8365 /* Only for igp3 */
8326 if (sc->sc_phytype == WMPHY_IGP_3) { 8366 if (sc->sc_phytype == WMPHY_IGP_3) {
8327 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG); 8367 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8328 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK; 8368 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8329 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); 8369 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8330 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK; 8370 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8331 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); 8371 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8332 } 8372 }
8333} 8373}
8334 8374
8335#ifdef WM_WOL 8375#ifdef WM_WOL
8336/* Power down workaround on D3 */ 8376/* Power down workaround on D3 */
8337static void 8377static void
8338wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc) 8378wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8339{ 8379{
8340 uint32_t reg; 8380 uint32_t reg;
8341 int i; 8381 int i;
8342 8382
8343 for (i = 0; i < 2; i++) { 8383 for (i = 0; i < 2; i++) {
8344 /* Disable link */ 8384 /* Disable link */
8345 reg = CSR_READ(sc, WMREG_PHY_CTRL); 8385 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8346 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 8386 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8347 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 8387 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8348 8388
8349 /* 8389 /*
8350 * Call gig speed drop workaround on Gig disable before 8390 * Call gig speed drop workaround on Gig disable before
8351 * accessing any PHY registers 8391 * accessing any PHY registers
8352 */ 8392 */
8353 if (sc->sc_type == WM_T_ICH8) 8393 if (sc->sc_type == WM_T_ICH8)
8354 wm_gig_downshift_workaround_ich8lan(sc); 8394 wm_gig_downshift_workaround_ich8lan(sc);
8355 8395
8356 /* Write VR power-down enable */ 8396 /* Write VR power-down enable */
8357 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); 8397 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8358 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 8398 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8359 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN; 8399 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8360 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg); 8400 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8361 8401
8362 /* Read it back and test */ 8402 /* Read it back and test */
8363 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); 8403 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8364 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 8404 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8365 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0)) 8405 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8366 break; 8406 break;
8367 8407
8368 /* Issue PHY reset and repeat at most one more time */ 8408 /* Issue PHY reset and repeat at most one more time */
8369 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 8409 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8370 } 8410 }
8371} 8411}
8372#endif /* WM_WOL */ 8412#endif /* WM_WOL */
8373 8413
8374/* 8414/*
8375 * Workaround for pch's PHYs 8415 * Workaround for pch's PHYs
8376 * XXX should be moved to new PHY driver? 8416 * XXX should be moved to new PHY driver?
8377 */ 8417 */
8378static void 8418static void
8379wm_hv_phy_workaround_ich8lan(struct wm_softc *sc) 8419wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8380{ 8420{
8381 if (sc->sc_phytype == WMPHY_82577) 8421 if (sc->sc_phytype == WMPHY_82577)
8382 wm_set_mdio_slow_mode_hv(sc); 8422 wm_set_mdio_slow_mode_hv(sc);
8383 8423
8384 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */ 8424 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8385 8425
8386 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/ 8426 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8387 8427
8388 /* 82578 */ 8428 /* 82578 */
8389 if (sc->sc_phytype == WMPHY_82578) { 8429 if (sc->sc_phytype == WMPHY_82578) {
8390 /* PCH rev. < 3 */ 8430 /* PCH rev. < 3 */
8391 if (sc->sc_rev < 3) { 8431 if (sc->sc_rev < 3) {
8392 /* XXX 6 bit shift? Why? Is it page2? */ 8432 /* XXX 6 bit shift? Why? Is it page2? */
8393 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29), 8433 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8394 0x66c0); 8434 0x66c0);
8395 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e), 8435 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8396 0xffff); 8436 0xffff);
8397 } 8437 }
8398 8438
8399 /* XXX phy rev. < 2 */ 8439 /* XXX phy rev. < 2 */
8400 } 8440 }
8401 8441
8402 /* Select page 0 */ 8442 /* Select page 0 */
8403 8443
8404 /* XXX acquire semaphore */ 8444 /* XXX acquire semaphore */
8405 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0); 8445 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8406 /* XXX release semaphore */ 8446 /* XXX release semaphore */
8407 8447
8408 /* 8448 /*
8409 * Configure the K1 Si workaround during phy reset assuming there is 8449 * Configure the K1 Si workaround during phy reset assuming there is
8410 * link so that it disables K1 if link is in 1Gbps. 8450 * link so that it disables K1 if link is in 1Gbps.
8411 */ 8451 */
8412 wm_k1_gig_workaround_hv(sc, 1); 8452 wm_k1_gig_workaround_hv(sc, 1);
8413} 8453}
8414 8454
8415static void 8455static void
8416wm_lv_phy_workaround_ich8lan(struct wm_softc *sc) 8456wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8417{ 8457{
8418 8458
8419 wm_set_mdio_slow_mode_hv(sc); 8459 wm_set_mdio_slow_mode_hv(sc);
8420} 8460}
8421 8461
8422static void 8462static void
8423wm_k1_gig_workaround_hv(struct wm_softc *sc, int link) 8463wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8424{ 8464{
8425 int k1_enable = sc->sc_nvm_k1_enabled; 8465 int k1_enable = sc->sc_nvm_k1_enabled;
8426 8466
8427 /* XXX acquire semaphore */ 8467 /* XXX acquire semaphore */
8428 8468
8429 if (link) { 8469 if (link) {
8430 k1_enable = 0; 8470 k1_enable = 0;
8431 8471
8432 /* Link stall fix for link up */ 8472 /* Link stall fix for link up */
8433 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100); 8473 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8434 } else { 8474 } else {
8435 /* Link stall fix for link down */ 8475 /* Link stall fix for link down */
8436 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100); 8476 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8437 } 8477 }
8438 8478
8439 wm_configure_k1_ich8lan(sc, k1_enable); 8479 wm_configure_k1_ich8lan(sc, k1_enable);
8440 8480
8441 /* XXX release semaphore */ 8481 /* XXX release semaphore */
8442} 8482}
8443 8483
8444static void 8484static void
8445wm_set_mdio_slow_mode_hv(struct wm_softc *sc) 8485wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8446{ 8486{
8447 uint32_t reg; 8487 uint32_t reg;
8448 8488
8449 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL); 8489 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8450 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, 8490 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8451 reg | HV_KMRN_MDIO_SLOW); 8491 reg | HV_KMRN_MDIO_SLOW);
8452} 8492}
8453 8493
8454static void 8494static void
8455wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable) 8495wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8456{ 8496{
8457 uint32_t ctrl, ctrl_ext, tmp; 8497 uint32_t ctrl, ctrl_ext, tmp;
8458 uint16_t kmrn_reg; 8498 uint16_t kmrn_reg;
8459 8499
8460 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG); 8500 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8461 8501
8462 if (k1_enable) 8502 if (k1_enable)
8463 kmrn_reg |= KUMCTRLSTA_K1_ENABLE; 8503 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8464 else 8504 else
8465 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE; 8505 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8466 8506
8467 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg); 8507 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8468 8508
8469 delay(20); 8509 delay(20);
8470 8510
8471 ctrl = CSR_READ(sc, WMREG_CTRL); 8511 ctrl = CSR_READ(sc, WMREG_CTRL);
8472 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 8512 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8473 8513
8474 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100); 8514 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8475 tmp |= CTRL_FRCSPD; 8515 tmp |= CTRL_FRCSPD;
8476 8516
8477 CSR_WRITE(sc, WMREG_CTRL, tmp); 8517 CSR_WRITE(sc, WMREG_CTRL, tmp);
8478 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS); 8518 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
 8519 CSR_WRITE_FLUSH(sc);
8479 delay(20); 8520 delay(20);
8480 8521
8481 CSR_WRITE(sc, WMREG_CTRL, ctrl); 8522 CSR_WRITE(sc, WMREG_CTRL, ctrl);
8482 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 8523 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
 8524 CSR_WRITE_FLUSH(sc);
8483 delay(20); 8525 delay(20);
8484} 8526}
8485 8527
8486static void 8528static void
8487wm_smbustopci(struct wm_softc *sc) 8529wm_smbustopci(struct wm_softc *sc)
8488{ 8530{
8489 uint32_t fwsm; 8531 uint32_t fwsm;
8490 8532
8491 fwsm = CSR_READ(sc, WMREG_FWSM); 8533 fwsm = CSR_READ(sc, WMREG_FWSM);
8492 if (((fwsm & FWSM_FW_VALID) == 0) 8534 if (((fwsm & FWSM_FW_VALID) == 0)
8493 && ((wm_check_reset_block(sc) == 0))) { 8535 && ((wm_check_reset_block(sc) == 0))) {
8494 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE; 8536 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8495 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE; 8537 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8496 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8538 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 8539 CSR_WRITE_FLUSH(sc);
8497 delay(10); 8540 delay(10);
8498 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE; 8541 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8499 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8542 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 8543 CSR_WRITE_FLUSH(sc);
8500 delay(50*1000); 8544 delay(50*1000);
8501 8545
8502 /* 8546 /*
8503 * Gate automatic PHY configuration by hardware on non-managed 8547 * Gate automatic PHY configuration by hardware on non-managed
8504 * 82579 8548 * 82579
8505 */ 8549 */
8506 if (sc->sc_type == WM_T_PCH2) 8550 if (sc->sc_type == WM_T_PCH2)
8507 wm_gate_hw_phy_config_ich8lan(sc, 1); 8551 wm_gate_hw_phy_config_ich8lan(sc, 1);
8508 } 8552 }
8509} 8553}
8510 8554
8511static void 8555static void
8512wm_set_pcie_completion_timeout(struct wm_softc *sc) 8556wm_set_pcie_completion_timeout(struct wm_softc *sc)
8513{ 8557{
8514 uint32_t gcr; 8558 uint32_t gcr;
8515 pcireg_t ctrl2; 8559 pcireg_t ctrl2;
8516 8560
8517 gcr = CSR_READ(sc, WMREG_GCR); 8561 gcr = CSR_READ(sc, WMREG_GCR);
8518 8562
8519 /* Only take action if timeout value is defaulted to 0 */ 8563 /* Only take action if timeout value is defaulted to 0 */
8520 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0) 8564 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8521 goto out; 8565 goto out;
8522 8566
8523 if ((gcr & GCR_CAP_VER2) == 0) { 8567 if ((gcr & GCR_CAP_VER2) == 0) {
8524 gcr |= GCR_CMPL_TMOUT_10MS; 8568 gcr |= GCR_CMPL_TMOUT_10MS;
8525 goto out; 8569 goto out;
8526 } 8570 }
8527 8571
8528 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 8572 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8529 sc->sc_pcixe_capoff + PCIE_DCSR2); 8573 sc->sc_pcixe_capoff + PCIE_DCSR2);
8530 ctrl2 |= WM_PCIE_DCSR2_16MS; 8574 ctrl2 |= WM_PCIE_DCSR2_16MS;
8531 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 8575 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8532 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2); 8576 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8533 8577
8534out: 8578out:
8535 /* Disable completion timeout resend */ 8579 /* Disable completion timeout resend */
8536 gcr &= ~GCR_CMPL_TMOUT_RESEND; 8580 gcr &= ~GCR_CMPL_TMOUT_RESEND;
8537 8581
8538 CSR_WRITE(sc, WMREG_GCR, gcr); 8582 CSR_WRITE(sc, WMREG_GCR, gcr);
8539} 8583}
8540 8584
8541/* special case - for 82575 - need to do manual init ... */ 8585/* special case - for 82575 - need to do manual init ... */
8542static void 8586static void
8543wm_reset_init_script_82575(struct wm_softc *sc) 8587wm_reset_init_script_82575(struct wm_softc *sc)
8544{ 8588{
8545 /* 8589 /*
8546 * remark: this is untested code - we have no board without EEPROM 8590 * remark: this is untested code - we have no board without EEPROM
8547 * same setup as mentioned int the freeBSD driver for the i82575 8591 * same setup as mentioned int the freeBSD driver for the i82575
8548 */ 8592 */
8549 8593
8550 /* SerDes configuration via SERDESCTRL */ 8594 /* SerDes configuration via SERDESCTRL */
8551 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c); 8595 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8552 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78); 8596 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8553 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23); 8597 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8554 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15); 8598 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8555 8599
8556 /* CCM configuration via CCMCTL register */ 8600 /* CCM configuration via CCMCTL register */
8557 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00); 8601 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8558 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00); 8602 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8559 8603
8560 /* PCIe lanes configuration */ 8604 /* PCIe lanes configuration */
8561 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec); 8605 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8562 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf); 8606 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8563 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05); 8607 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8564 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81); 8608 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8565 8609
8566 /* PCIe PLL Configuration */ 8610 /* PCIe PLL Configuration */
8567 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47); 8611 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8568 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00); 8612 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8569 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00); 8613 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8570} 8614}
8571 8615
8572static void 8616static void
8573wm_init_manageability(struct wm_softc *sc) 8617wm_init_manageability(struct wm_softc *sc)
8574{ 8618{
8575 8619
8576 if (sc->sc_flags & WM_F_HAS_MANAGE) { 8620 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8577 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H); 8621 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8578 uint32_t manc = CSR_READ(sc, WMREG_MANC); 8622 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8579 8623
8580 /* disabl hardware interception of ARP */ 8624 /* disabl hardware interception of ARP */
8581 manc &= ~MANC_ARP_EN; 8625 manc &= ~MANC_ARP_EN;
8582 8626
8583 /* enable receiving management packets to the host */ 8627 /* enable receiving management packets to the host */
8584 if (sc->sc_type >= WM_T_82571) { 8628 if (sc->sc_type >= WM_T_82571) {
8585 manc |= MANC_EN_MNG2HOST; 8629 manc |= MANC_EN_MNG2HOST;
8586 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624; 8630 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8587 CSR_WRITE(sc, WMREG_MANC2H, manc2h); 8631 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8588  8632
8589 } 8633 }
8590 8634
8591 CSR_WRITE(sc, WMREG_MANC, manc); 8635 CSR_WRITE(sc, WMREG_MANC, manc);
8592 } 8636 }
8593} 8637}
8594 8638
8595static void 8639static void
8596wm_release_manageability(struct wm_softc *sc) 8640wm_release_manageability(struct wm_softc *sc)
8597{ 8641{
8598 8642
8599 if (sc->sc_flags & WM_F_HAS_MANAGE) { 8643 if (sc->sc_flags & WM_F_HAS_MANAGE) {
8600 uint32_t manc = CSR_READ(sc, WMREG_MANC); 8644 uint32_t manc = CSR_READ(sc, WMREG_MANC);
8601 8645
8602 manc |= MANC_ARP_EN; 8646 manc |= MANC_ARP_EN;
8603 if (sc->sc_type >= WM_T_82571) 8647 if (sc->sc_type >= WM_T_82571)
8604 manc &= ~MANC_EN_MNG2HOST; 8648 manc &= ~MANC_EN_MNG2HOST;
8605 8649
8606 CSR_WRITE(sc, WMREG_MANC, manc); 8650 CSR_WRITE(sc, WMREG_MANC, manc);
8607 } 8651 }
8608} 8652}
8609 8653
8610static void 8654static void
8611wm_get_wakeup(struct wm_softc *sc) 8655wm_get_wakeup(struct wm_softc *sc)
8612{ 8656{
8613 8657
8614 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */ 8658 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8615 switch (sc->sc_type) { 8659 switch (sc->sc_type) {
8616 case WM_T_82573: 8660 case WM_T_82573:
8617 case WM_T_82583: 8661 case WM_T_82583:
8618 sc->sc_flags |= WM_F_HAS_AMT; 8662 sc->sc_flags |= WM_F_HAS_AMT;
8619 /* FALLTHROUGH */ 8663 /* FALLTHROUGH */
8620 case WM_T_80003: 8664 case WM_T_80003:
8621 case WM_T_82541: 8665 case WM_T_82541:
8622 case WM_T_82547: 8666 case WM_T_82547:
8623 case WM_T_82571: 8667 case WM_T_82571:
8624 case WM_T_82572: 8668 case WM_T_82572:
8625 case WM_T_82574: 8669 case WM_T_82574:
8626 case WM_T_82575: 8670 case WM_T_82575:
8627 case WM_T_82576: 8671 case WM_T_82576:
8628 case WM_T_82580: 8672 case WM_T_82580:
8629 case WM_T_82580ER: 8673 case WM_T_82580ER:
8630 case WM_T_I350: 8674 case WM_T_I350:
8631 case WM_T_I354: 8675 case WM_T_I354:
8632 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0) 8676 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8633 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID; 8677 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8634 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 8678 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8635 break; 8679 break;
8636 case WM_T_ICH8: 8680 case WM_T_ICH8:
8637 case WM_T_ICH9: 8681 case WM_T_ICH9:
8638 case WM_T_ICH10: 8682 case WM_T_ICH10:
8639 case WM_T_PCH: 8683 case WM_T_PCH:
8640 case WM_T_PCH2: 8684 case WM_T_PCH2:
8641 case WM_T_PCH_LPT: 8685 case WM_T_PCH_LPT:
8642 sc->sc_flags |= WM_F_HAS_AMT; 8686 sc->sc_flags |= WM_F_HAS_AMT;
8643 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 8687 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8644 break; 8688 break;
8645 default: 8689 default:
8646 break; 8690 break;
8647 } 8691 }
8648 8692
8649 /* 1: HAS_MANAGE */ 8693 /* 1: HAS_MANAGE */
8650 if (wm_enable_mng_pass_thru(sc) != 0) 8694 if (wm_enable_mng_pass_thru(sc) != 0)
8651 sc->sc_flags |= WM_F_HAS_MANAGE; 8695 sc->sc_flags |= WM_F_HAS_MANAGE;
8652 8696
8653#ifdef WM_DEBUG 8697#ifdef WM_DEBUG
8654 printf("\n"); 8698 printf("\n");
8655 if ((sc->sc_flags & WM_F_HAS_AMT) != 0) 8699 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8656 printf("HAS_AMT,"); 8700 printf("HAS_AMT,");
8657 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) 8701 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8658 printf("ARC_SUBSYS_VALID,"); 8702 printf("ARC_SUBSYS_VALID,");
8659 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0) 8703 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8660 printf("ASF_FIRMWARE_PRES,"); 8704 printf("ASF_FIRMWARE_PRES,");
8661 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0) 8705 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8662 printf("HAS_MANAGE,"); 8706 printf("HAS_MANAGE,");
8663 printf("\n"); 8707 printf("\n");
8664#endif 8708#endif
8665 /* 8709 /*
8666 * Note that the WOL flags is set after the resetting of the eeprom 8710 * Note that the WOL flags is set after the resetting of the eeprom
8667 * stuff 8711 * stuff
8668 */ 8712 */
8669} 8713}
8670 8714
8671#ifdef WM_WOL 8715#ifdef WM_WOL
8672/* WOL in the newer chipset interfaces (pchlan) */ 8716/* WOL in the newer chipset interfaces (pchlan) */
8673static void 8717static void
8674wm_enable_phy_wakeup(struct wm_softc *sc) 8718wm_enable_phy_wakeup(struct wm_softc *sc)
8675{ 8719{
8676#if 0 8720#if 0
8677 uint16_t preg; 8721 uint16_t preg;
8678 8722
8679 /* Copy MAC RARs to PHY RARs */ 8723 /* Copy MAC RARs to PHY RARs */
8680 8724
8681 /* Copy MAC MTA to PHY MTA */ 8725 /* Copy MAC MTA to PHY MTA */
8682 8726
8683 /* Configure PHY Rx Control register */ 8727 /* Configure PHY Rx Control register */
8684 8728
8685 /* Enable PHY wakeup in MAC register */ 8729 /* Enable PHY wakeup in MAC register */
8686 8730
8687 /* Configure and enable PHY wakeup in PHY registers */ 8731 /* Configure and enable PHY wakeup in PHY registers */
8688 8732
8689 /* Activate PHY wakeup */ 8733 /* Activate PHY wakeup */
8690 8734
8691 /* XXX */ 8735 /* XXX */
8692#endif 8736#endif
8693} 8737}
8694 8738
8695static void 8739static void
8696wm_enable_wakeup(struct wm_softc *sc) 8740wm_enable_wakeup(struct wm_softc *sc)
8697{ 8741{
8698 uint32_t reg, pmreg; 8742 uint32_t reg, pmreg;
8699 pcireg_t pmode; 8743 pcireg_t pmode;
8700 8744
8701 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, 8745 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8702 &pmreg, NULL) == 0) 8746 &pmreg, NULL) == 0)
8703 return; 8747 return;
8704 8748
8705 /* Advertise the wakeup capability */ 8749 /* Advertise the wakeup capability */
8706 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2) 8750 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8707 | CTRL_SWDPIN(3)); 8751 | CTRL_SWDPIN(3));
8708 CSR_WRITE(sc, WMREG_WUC, WUC_APME); 8752 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8709 8753
8710 /* ICH workaround */ 8754 /* ICH workaround */
8711 switch (sc->sc_type) { 8755 switch (sc->sc_type) {
8712 case WM_T_ICH8: 8756 case WM_T_ICH8:
8713 case WM_T_ICH9: 8757 case WM_T_ICH9:
8714 case WM_T_ICH10: 8758 case WM_T_ICH10:
8715 case WM_T_PCH: 8759 case WM_T_PCH:
8716 case WM_T_PCH2: 8760 case WM_T_PCH2:
8717 case WM_T_PCH_LPT: 8761 case WM_T_PCH_LPT:
8718 /* Disable gig during WOL */ 8762 /* Disable gig during WOL */
8719 reg = CSR_READ(sc, WMREG_PHY_CTRL); 8763 reg = CSR_READ(sc, WMREG_PHY_CTRL);
8720 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS; 8764 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8721 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 8765 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8722 if (sc->sc_type == WM_T_PCH) 8766 if (sc->sc_type == WM_T_PCH)
8723 wm_gmii_reset(sc); 8767 wm_gmii_reset(sc);
8724 8768
8725 /* Power down workaround */ 8769 /* Power down workaround */
8726 if (sc->sc_phytype == WMPHY_82577) { 8770 if (sc->sc_phytype == WMPHY_82577) {
8727 struct mii_softc *child; 8771 struct mii_softc *child;
8728 8772
8729 /* Assume that the PHY is copper */ 8773 /* Assume that the PHY is copper */
8730 child = LIST_FIRST(&sc->sc_mii.mii_phys); 8774 child = LIST_FIRST(&sc->sc_mii.mii_phys);
8731 if (child->mii_mpd_rev <= 2) 8775 if (child->mii_mpd_rev <= 2)
8732 sc->sc_mii.mii_writereg(sc->sc_dev, 1, 8776 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8733 (768 << 5) | 25, 0x0444); /* magic num */ 8777 (768 << 5) | 25, 0x0444); /* magic num */
8734 } 8778 }
8735 break; 8779 break;
8736 default: 8780 default:
8737 break; 8781 break;
8738 } 8782 }
8739 8783
8740 /* Keep the laser running on fiber adapters */ 8784 /* Keep the laser running on fiber adapters */
8741 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0) 8785 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8742 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) { 8786 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8743 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8787 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8744 reg |= CTRL_EXT_SWDPIN(3); 8788 reg |= CTRL_EXT_SWDPIN(3);
8745 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 8789 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8746 } 8790 }
8747 8791
8748 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG; 8792 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8749#if 0 /* for the multicast packet */ 8793#if 0 /* for the multicast packet */
8750 reg |= WUFC_MC; 8794 reg |= WUFC_MC;
8751 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE); 8795 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8752#endif 8796#endif
8753 8797
8754 if (sc->sc_type == WM_T_PCH) { 8798 if (sc->sc_type == WM_T_PCH) {
8755 wm_enable_phy_wakeup(sc); 8799 wm_enable_phy_wakeup(sc);
8756 } else { 8800 } else {
8757 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN); 8801 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8758 CSR_WRITE(sc, WMREG_WUFC, reg); 8802 CSR_WRITE(sc, WMREG_WUFC, reg);
8759 } 8803 }
8760 8804
8761 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 8805 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8762 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 8806 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8763 || (sc->sc_type == WM_T_PCH2)) 8807 || (sc->sc_type == WM_T_PCH2))
8764 && (sc->sc_phytype == WMPHY_IGP_3)) 8808 && (sc->sc_phytype == WMPHY_IGP_3))
8765 wm_igp3_phy_powerdown_workaround_ich8lan(sc); 8809 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8766 8810
8767 /* Request PME */ 8811 /* Request PME */
8768 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR); 8812 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8769#if 0 8813#if 0
8770 /* Disable WOL */ 8814 /* Disable WOL */
8771 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN); 8815 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8772#else 8816#else
8773 /* For WOL */ 8817 /* For WOL */
8774 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN; 8818 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8775#endif 8819#endif
8776 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode); 8820 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8777} 8821}
8778#endif /* WM_WOL */ 8822#endif /* WM_WOL */
8779 8823
8780static bool 8824static bool
8781wm_suspend(device_t self, const pmf_qual_t *qual) 8825wm_suspend(device_t self, const pmf_qual_t *qual)
8782{ 8826{
8783 struct wm_softc *sc = device_private(self); 8827 struct wm_softc *sc = device_private(self);
8784 8828
8785 wm_release_manageability(sc); 8829 wm_release_manageability(sc);
8786 wm_release_hw_control(sc); 8830 wm_release_hw_control(sc);
8787#ifdef WM_WOL 8831#ifdef WM_WOL
8788 wm_enable_wakeup(sc); 8832 wm_enable_wakeup(sc);
8789#endif 8833#endif
8790 8834
8791 return true; 8835 return true;
8792} 8836}
8793 8837
8794static bool 8838static bool
8795wm_resume(device_t self, const pmf_qual_t *qual) 8839wm_resume(device_t self, const pmf_qual_t *qual)
8796{ 8840{
8797 struct wm_softc *sc = device_private(self); 8841 struct wm_softc *sc = device_private(self);
8798 8842
8799 wm_init_manageability(sc); 8843 wm_init_manageability(sc);
8800 8844
8801 return true; 8845 return true;
8802} 8846}
8803 8847
8804static void 8848static void
8805wm_set_eee_i350(struct wm_softc * sc) 8849wm_set_eee_i350(struct wm_softc * sc)
8806{ 8850{
8807 uint32_t ipcnfg, eeer; 8851 uint32_t ipcnfg, eeer;
8808 8852
8809 ipcnfg = CSR_READ(sc, WMREG_IPCNFG); 8853 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8810 eeer = CSR_READ(sc, WMREG_EEER); 8854 eeer = CSR_READ(sc, WMREG_EEER);
8811 8855
8812 if ((sc->sc_flags & WM_F_EEE) != 0) { 8856 if ((sc->sc_flags & WM_F_EEE) != 0) {
8813 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); 8857 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8814 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN 8858 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8815 | EEER_LPI_FC); 8859 | EEER_LPI_FC);
8816 } else { 8860 } else {
8817 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); 8861 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8818 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN 8862 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8819 | EEER_LPI_FC); 8863 | EEER_LPI_FC);
8820 } 8864 }
8821 8865
8822 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg); 8866 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8823 CSR_WRITE(sc, WMREG_EEER, eeer); 8867 CSR_WRITE(sc, WMREG_EEER, eeer);
8824 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */ 8868 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8825 CSR_READ(sc, WMREG_EEER); /* XXX flush? */ 8869 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8826} 8870}