Fri Dec 21 08:29:22 2018 UTC ()
 Fix WoL detection once again.


(msaitoh)
diff -r1.610 -r1.611 src/sys/dev/pci/if_wm.c

cvs diff -r1.610 -r1.611 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2018/12/20 09:32:13 1.610
+++ src/sys/dev/pci/if_wm.c 2018/12/21 08:29:22 1.611
@@ -1,1085 +1,1085 @@ @@ -1,1085 +1,1085 @@
1/* $NetBSD: if_wm.c,v 1.610 2018/12/20 09:32:13 msaitoh Exp $ */ 1/* $NetBSD: if_wm.c,v 1.611 2018/12/21 08:29:22 msaitoh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation 40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42  42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45  45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48  48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52  52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56  56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic) 76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors 77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) 78 * - EEE (Energy Efficiency Ethernet)
79 * - Virtual Function 79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM) 80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM. 81 * - Rework how parameters are loaded from the EEPROM.
82 * - Image Unique ID 82 * - Image Unique ID
83 */ 83 */
84 84
85#include <sys/cdefs.h> 85#include <sys/cdefs.h>
86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.610 2018/12/20 09:32:13 msaitoh Exp $"); 86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.611 2018/12/21 08:29:22 msaitoh Exp $");
87 87
88#ifdef _KERNEL_OPT 88#ifdef _KERNEL_OPT
89#include "opt_net_mpsafe.h" 89#include "opt_net_mpsafe.h"
90#include "opt_if_wm.h" 90#include "opt_if_wm.h"
91#endif 91#endif
92 92
93#include <sys/param.h> 93#include <sys/param.h>
94#include <sys/systm.h> 94#include <sys/systm.h>
95#include <sys/callout.h> 95#include <sys/callout.h>
96#include <sys/mbuf.h> 96#include <sys/mbuf.h>
97#include <sys/malloc.h> 97#include <sys/malloc.h>
98#include <sys/kmem.h> 98#include <sys/kmem.h>
99#include <sys/kernel.h> 99#include <sys/kernel.h>
100#include <sys/socket.h> 100#include <sys/socket.h>
101#include <sys/ioctl.h> 101#include <sys/ioctl.h>
102#include <sys/errno.h> 102#include <sys/errno.h>
103#include <sys/device.h> 103#include <sys/device.h>
104#include <sys/queue.h> 104#include <sys/queue.h>
105#include <sys/syslog.h> 105#include <sys/syslog.h>
106#include <sys/interrupt.h> 106#include <sys/interrupt.h>
107#include <sys/cpu.h> 107#include <sys/cpu.h>
108#include <sys/pcq.h> 108#include <sys/pcq.h>
109 109
110#include <sys/rndsource.h> 110#include <sys/rndsource.h>
111 111
112#include <net/if.h> 112#include <net/if.h>
113#include <net/if_dl.h> 113#include <net/if_dl.h>
114#include <net/if_media.h> 114#include <net/if_media.h>
115#include <net/if_ether.h> 115#include <net/if_ether.h>
116 116
117#include <net/bpf.h> 117#include <net/bpf.h>
118 118
119#include <net/rss_config.h> 119#include <net/rss_config.h>
120 120
121#include <netinet/in.h> /* XXX for struct ip */ 121#include <netinet/in.h> /* XXX for struct ip */
122#include <netinet/in_systm.h> /* XXX for struct ip */ 122#include <netinet/in_systm.h> /* XXX for struct ip */
123#include <netinet/ip.h> /* XXX for struct ip */ 123#include <netinet/ip.h> /* XXX for struct ip */
124#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 124#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
125#include <netinet/tcp.h> /* XXX for struct tcphdr */ 125#include <netinet/tcp.h> /* XXX for struct tcphdr */
126 126
127#include <sys/bus.h> 127#include <sys/bus.h>
128#include <sys/intr.h> 128#include <sys/intr.h>
129#include <machine/endian.h> 129#include <machine/endian.h>
130 130
131#include <dev/mii/mii.h> 131#include <dev/mii/mii.h>
132#include <dev/mii/miivar.h> 132#include <dev/mii/miivar.h>
133#include <dev/mii/miidevs.h> 133#include <dev/mii/miidevs.h>
134#include <dev/mii/mii_bitbang.h> 134#include <dev/mii/mii_bitbang.h>
135#include <dev/mii/ikphyreg.h> 135#include <dev/mii/ikphyreg.h>
136#include <dev/mii/igphyreg.h> 136#include <dev/mii/igphyreg.h>
137#include <dev/mii/igphyvar.h> 137#include <dev/mii/igphyvar.h>
138#include <dev/mii/inbmphyreg.h> 138#include <dev/mii/inbmphyreg.h>
139#include <dev/mii/ihphyreg.h> 139#include <dev/mii/ihphyreg.h>
140 140
141#include <dev/pci/pcireg.h> 141#include <dev/pci/pcireg.h>
142#include <dev/pci/pcivar.h> 142#include <dev/pci/pcivar.h>
143#include <dev/pci/pcidevs.h> 143#include <dev/pci/pcidevs.h>
144 144
145#include <dev/pci/if_wmreg.h> 145#include <dev/pci/if_wmreg.h>
146#include <dev/pci/if_wmvar.h> 146#include <dev/pci/if_wmvar.h>
147 147
148#ifdef WM_DEBUG 148#ifdef WM_DEBUG
149#define WM_DEBUG_LINK __BIT(0) 149#define WM_DEBUG_LINK __BIT(0)
150#define WM_DEBUG_TX __BIT(1) 150#define WM_DEBUG_TX __BIT(1)
151#define WM_DEBUG_RX __BIT(2) 151#define WM_DEBUG_RX __BIT(2)
152#define WM_DEBUG_GMII __BIT(3) 152#define WM_DEBUG_GMII __BIT(3)
153#define WM_DEBUG_MANAGE __BIT(4) 153#define WM_DEBUG_MANAGE __BIT(4)
154#define WM_DEBUG_NVM __BIT(5) 154#define WM_DEBUG_NVM __BIT(5)
155#define WM_DEBUG_INIT __BIT(6) 155#define WM_DEBUG_INIT __BIT(6)
156#define WM_DEBUG_LOCK __BIT(7) 156#define WM_DEBUG_LOCK __BIT(7)
157int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII 157int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
158 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK; 158 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
159 159
160#define DPRINTF(x, y) if (wm_debug & (x)) printf y 160#define DPRINTF(x, y) if (wm_debug & (x)) printf y
161#else 161#else
162#define DPRINTF(x, y) /* nothing */ 162#define DPRINTF(x, y) /* nothing */
163#endif /* WM_DEBUG */ 163#endif /* WM_DEBUG */
164 164
165#ifdef NET_MPSAFE 165#ifdef NET_MPSAFE
166#define WM_MPSAFE 1 166#define WM_MPSAFE 1
167#define CALLOUT_FLAGS CALLOUT_MPSAFE 167#define CALLOUT_FLAGS CALLOUT_MPSAFE
168#else 168#else
169#define CALLOUT_FLAGS 0 169#define CALLOUT_FLAGS 0
170#endif 170#endif
171 171
172/* 172/*
173 * This device driver's max interrupt numbers. 173 * This device driver's max interrupt numbers.
174 */ 174 */
175#define WM_MAX_NQUEUEINTR 16 175#define WM_MAX_NQUEUEINTR 16
176#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) 176#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
177 177
178#ifndef WM_DISABLE_MSI 178#ifndef WM_DISABLE_MSI
179#define WM_DISABLE_MSI 0 179#define WM_DISABLE_MSI 0
180#endif 180#endif
181#ifndef WM_DISABLE_MSIX 181#ifndef WM_DISABLE_MSIX
182#define WM_DISABLE_MSIX 0 182#define WM_DISABLE_MSIX 0
183#endif 183#endif
184 184
185int wm_disable_msi = WM_DISABLE_MSI; 185int wm_disable_msi = WM_DISABLE_MSI;
186int wm_disable_msix = WM_DISABLE_MSIX; 186int wm_disable_msix = WM_DISABLE_MSIX;
187 187
188#ifndef WM_WATCHDOG_TIMEOUT 188#ifndef WM_WATCHDOG_TIMEOUT
189#define WM_WATCHDOG_TIMEOUT 5 189#define WM_WATCHDOG_TIMEOUT 5
190#endif 190#endif
191static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT; 191static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
192 192
193/* 193/*
194 * Transmit descriptor list size. Due to errata, we can only have 194 * Transmit descriptor list size. Due to errata, we can only have
195 * 256 hardware descriptors in the ring on < 82544, but we use 4096 195 * 256 hardware descriptors in the ring on < 82544, but we use 4096
196 * on >= 82544. We tell the upper layers that they can queue a lot 196 * on >= 82544. We tell the upper layers that they can queue a lot
197 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 197 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
198 * of them at a time. 198 * of them at a time.
199 * 199 *
200 * We allow up to 64 DMA segments per packet. Pathological packet 200 * We allow up to 64 DMA segments per packet. Pathological packet
201 * chains containing many small mbufs have been observed in zero-copy 201 * chains containing many small mbufs have been observed in zero-copy
202 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments, 202 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
203 * m_defrag() is called to reduce it. 203 * m_defrag() is called to reduce it.
204 */ 204 */
205#define WM_NTXSEGS 64 205#define WM_NTXSEGS 64
206#define WM_IFQUEUELEN 256 206#define WM_IFQUEUELEN 256
207#define WM_TXQUEUELEN_MAX 64 207#define WM_TXQUEUELEN_MAX 64
208#define WM_TXQUEUELEN_MAX_82547 16 208#define WM_TXQUEUELEN_MAX_82547 16
209#define WM_TXQUEUELEN(txq) ((txq)->txq_num) 209#define WM_TXQUEUELEN(txq) ((txq)->txq_num)
210#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) 210#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
211#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) 211#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
212#define WM_NTXDESC_82542 256 212#define WM_NTXDESC_82542 256
213#define WM_NTXDESC_82544 4096 213#define WM_NTXDESC_82544 4096
214#define WM_NTXDESC(txq) ((txq)->txq_ndesc) 214#define WM_NTXDESC(txq) ((txq)->txq_ndesc)
215#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) 215#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
216#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) 216#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
217#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) 217#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
218#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) 218#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
219 219
220#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ 220#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
221 221
222#define WM_TXINTERQSIZE 256 222#define WM_TXINTERQSIZE 256
223 223
224#ifndef WM_TX_PROCESS_LIMIT_DEFAULT 224#ifndef WM_TX_PROCESS_LIMIT_DEFAULT
225#define WM_TX_PROCESS_LIMIT_DEFAULT 100U 225#define WM_TX_PROCESS_LIMIT_DEFAULT 100U
226#endif 226#endif
227#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT 227#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
228#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U 228#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
229#endif 229#endif
230 230
231/* 231/*
232 * Receive descriptor list size. We have one Rx buffer for normal 232 * Receive descriptor list size. We have one Rx buffer for normal
233 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 233 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
234 * packet. We allocate 256 receive descriptors, each with a 2k 234 * packet. We allocate 256 receive descriptors, each with a 2k
235 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 235 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
236 */ 236 */
237#define WM_NRXDESC 256 237#define WM_NRXDESC 256
238#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 238#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
239#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 239#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
240#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 240#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
241 241
242#ifndef WM_RX_PROCESS_LIMIT_DEFAULT 242#ifndef WM_RX_PROCESS_LIMIT_DEFAULT
243#define WM_RX_PROCESS_LIMIT_DEFAULT 100U 243#define WM_RX_PROCESS_LIMIT_DEFAULT 100U
244#endif 244#endif
245#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT 245#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
246#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U 246#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
247#endif 247#endif
248 248
249typedef union txdescs { 249typedef union txdescs {
250 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; 250 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
251 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; 251 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
252} txdescs_t; 252} txdescs_t;
253 253
254typedef union rxdescs { 254typedef union rxdescs {
255 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; 255 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
256 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ 256 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
257 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ 257 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
258} rxdescs_t; 258} rxdescs_t;
259 259
260#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) 260#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
261#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x)) 261#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
262 262
263/* 263/*
264 * Software state for transmit jobs. 264 * Software state for transmit jobs.
265 */ 265 */
266struct wm_txsoft { 266struct wm_txsoft {
267 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 267 struct mbuf *txs_mbuf; /* head of our mbuf chain */
268 bus_dmamap_t txs_dmamap; /* our DMA map */ 268 bus_dmamap_t txs_dmamap; /* our DMA map */
269 int txs_firstdesc; /* first descriptor in packet */ 269 int txs_firstdesc; /* first descriptor in packet */
270 int txs_lastdesc; /* last descriptor in packet */ 270 int txs_lastdesc; /* last descriptor in packet */
271 int txs_ndesc; /* # of descriptors used */ 271 int txs_ndesc; /* # of descriptors used */
272}; 272};
273 273
274/* 274/*
275 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES) 275 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
276 * buffer and a DMA map. For packets which fill more than one buffer, we chain 276 * buffer and a DMA map. For packets which fill more than one buffer, we chain
277 * them together. 277 * them together.
278 */ 278 */
279struct wm_rxsoft { 279struct wm_rxsoft {
280 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 280 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
281 bus_dmamap_t rxs_dmamap; /* our DMA map */ 281 bus_dmamap_t rxs_dmamap; /* our DMA map */
282}; 282};
283 283
284#define WM_LINKUP_TIMEOUT 50 284#define WM_LINKUP_TIMEOUT 50
285 285
286static uint16_t swfwphysem[] = { 286static uint16_t swfwphysem[] = {
287 SWFW_PHY0_SM, 287 SWFW_PHY0_SM,
288 SWFW_PHY1_SM, 288 SWFW_PHY1_SM,
289 SWFW_PHY2_SM, 289 SWFW_PHY2_SM,
290 SWFW_PHY3_SM 290 SWFW_PHY3_SM
291}; 291};
292 292
293static const uint32_t wm_82580_rxpbs_table[] = { 293static const uint32_t wm_82580_rxpbs_table[] = {
294 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 294 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
295}; 295};
296 296
297struct wm_softc; 297struct wm_softc;
298 298
299#ifdef WM_EVENT_COUNTERS 299#ifdef WM_EVENT_COUNTERS
300#define WM_Q_EVCNT_DEFINE(qname, evname) \ 300#define WM_Q_EVCNT_DEFINE(qname, evname) \
301 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ 301 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
302 struct evcnt qname##_ev_##evname; 302 struct evcnt qname##_ev_##evname;
303 303
304#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ 304#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
305 do { \ 305 do { \
306 snprintf((q)->qname##_##evname##_evcnt_name, \ 306 snprintf((q)->qname##_##evname##_evcnt_name, \
307 sizeof((q)->qname##_##evname##_evcnt_name), \ 307 sizeof((q)->qname##_##evname##_evcnt_name), \
308 "%s%02d%s", #qname, (qnum), #evname); \ 308 "%s%02d%s", #qname, (qnum), #evname); \
309 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ 309 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
310 (evtype), NULL, (xname), \ 310 (evtype), NULL, (xname), \
311 (q)->qname##_##evname##_evcnt_name); \ 311 (q)->qname##_##evname##_evcnt_name); \
312 } while (0) 312 } while (0)
313 313
314#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 314#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
315 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) 315 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
316 316
317#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 317#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
318 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) 318 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
319 319
320#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \ 320#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
321 evcnt_detach(&(q)->qname##_ev_##evname); 321 evcnt_detach(&(q)->qname##_ev_##evname);
322#endif /* WM_EVENT_COUNTERS */ 322#endif /* WM_EVENT_COUNTERS */
323 323
324struct wm_txqueue { 324struct wm_txqueue {
325 kmutex_t *txq_lock; /* lock for tx operations */ 325 kmutex_t *txq_lock; /* lock for tx operations */
326 326
327 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ 327 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
328 328
329 /* Software state for the transmit descriptors. */ 329 /* Software state for the transmit descriptors. */
330 int txq_num; /* must be a power of two */ 330 int txq_num; /* must be a power of two */
331 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; 331 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
332 332
333 /* TX control data structures. */ 333 /* TX control data structures. */
334 int txq_ndesc; /* must be a power of two */ 334 int txq_ndesc; /* must be a power of two */
335 size_t txq_descsize; /* a tx descriptor size */ 335 size_t txq_descsize; /* a tx descriptor size */
336 txdescs_t *txq_descs_u; 336 txdescs_t *txq_descs_u;
337 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ 337 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
338 bus_dma_segment_t txq_desc_seg; /* control data segment */ 338 bus_dma_segment_t txq_desc_seg; /* control data segment */
339 int txq_desc_rseg; /* real number of control segment */ 339 int txq_desc_rseg; /* real number of control segment */
340#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr 340#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
341#define txq_descs txq_descs_u->sctxu_txdescs 341#define txq_descs txq_descs_u->sctxu_txdescs
342#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs 342#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
343 343
344 bus_addr_t txq_tdt_reg; /* offset of TDT register */ 344 bus_addr_t txq_tdt_reg; /* offset of TDT register */
345 345
346 int txq_free; /* number of free Tx descriptors */ 346 int txq_free; /* number of free Tx descriptors */
347 int txq_next; /* next ready Tx descriptor */ 347 int txq_next; /* next ready Tx descriptor */
348 348
349 int txq_sfree; /* number of free Tx jobs */ 349 int txq_sfree; /* number of free Tx jobs */
350 int txq_snext; /* next free Tx job */ 350 int txq_snext; /* next free Tx job */
351 int txq_sdirty; /* dirty Tx jobs */ 351 int txq_sdirty; /* dirty Tx jobs */
352 352
353 /* These 4 variables are used only on the 82547. */ 353 /* These 4 variables are used only on the 82547. */
354 int txq_fifo_size; /* Tx FIFO size */ 354 int txq_fifo_size; /* Tx FIFO size */
355 int txq_fifo_head; /* current head of FIFO */ 355 int txq_fifo_head; /* current head of FIFO */
356 uint32_t txq_fifo_addr; /* internal address of start of FIFO */ 356 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
357 int txq_fifo_stall; /* Tx FIFO is stalled */ 357 int txq_fifo_stall; /* Tx FIFO is stalled */
358 358
359 /* 359 /*
360 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 360 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
361 * CPUs. This queue intermediate them without block. 361 * CPUs. This queue intermediate them without block.
362 */ 362 */
363 pcq_t *txq_interq; 363 pcq_t *txq_interq;
364 364
365 /* 365 /*
366 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 366 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
367 * to manage Tx H/W queue's busy flag. 367 * to manage Tx H/W queue's busy flag.
368 */ 368 */
369 int txq_flags; /* flags for H/W queue, see below */ 369 int txq_flags; /* flags for H/W queue, see below */
370#define WM_TXQ_NO_SPACE 0x1 370#define WM_TXQ_NO_SPACE 0x1
371 371
372 bool txq_stopping; 372 bool txq_stopping;
373 373
374 bool txq_sending; 374 bool txq_sending;
375 time_t txq_lastsent; 375 time_t txq_lastsent;
376 376
377 uint32_t txq_packets; /* for AIM */ 377 uint32_t txq_packets; /* for AIM */
378 uint32_t txq_bytes; /* for AIM */ 378 uint32_t txq_bytes; /* for AIM */
379#ifdef WM_EVENT_COUNTERS 379#ifdef WM_EVENT_COUNTERS
380 /* TX event counters */ 380 /* TX event counters */
381 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */ 381 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */
382 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */ 382 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */
383 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */ 383 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */
384 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ 384 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
385 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ 385 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
386 /* XXX not used? */ 386 /* XXX not used? */
387 387
388 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */ 388 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */
389 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */ 389 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */
390 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */ 390 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */
391 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */ 391 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */
392 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */ 392 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */
393 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */ 393 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */
394 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */ 394 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */
395 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */ 395 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */
396 /* other than toomanyseg */ 396 /* other than toomanyseg */
397 397
398 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */ 398 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */
399 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */ 399 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */
400 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */ 400 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */
401 401
402 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; 402 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
403 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 403 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
404#endif /* WM_EVENT_COUNTERS */ 404#endif /* WM_EVENT_COUNTERS */
405}; 405};
406 406
407struct wm_rxqueue { 407struct wm_rxqueue {
408 kmutex_t *rxq_lock; /* lock for rx operations */ 408 kmutex_t *rxq_lock; /* lock for rx operations */
409 409
410 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ 410 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
411 411
412 /* Software state for the receive descriptors. */ 412 /* Software state for the receive descriptors. */
413 struct wm_rxsoft rxq_soft[WM_NRXDESC]; 413 struct wm_rxsoft rxq_soft[WM_NRXDESC];
414 414
415 /* RX control data structures. */ 415 /* RX control data structures. */
416 int rxq_ndesc; /* must be a power of two */ 416 int rxq_ndesc; /* must be a power of two */
417 size_t rxq_descsize; /* a rx descriptor size */ 417 size_t rxq_descsize; /* a rx descriptor size */
418 rxdescs_t *rxq_descs_u; 418 rxdescs_t *rxq_descs_u;
419 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ 419 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
420 bus_dma_segment_t rxq_desc_seg; /* control data segment */ 420 bus_dma_segment_t rxq_desc_seg; /* control data segment */
421 int rxq_desc_rseg; /* real number of control segment */ 421 int rxq_desc_rseg; /* real number of control segment */
422#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr 422#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
423#define rxq_descs rxq_descs_u->sctxu_rxdescs 423#define rxq_descs rxq_descs_u->sctxu_rxdescs
424#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs 424#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
425#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs 425#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
426 426
427 bus_addr_t rxq_rdt_reg; /* offset of RDT register */ 427 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
428 428
429 int rxq_ptr; /* next ready Rx desc/queue ent */ 429 int rxq_ptr; /* next ready Rx desc/queue ent */
430 int rxq_discard; 430 int rxq_discard;
431 int rxq_len; 431 int rxq_len;
432 struct mbuf *rxq_head; 432 struct mbuf *rxq_head;
433 struct mbuf *rxq_tail; 433 struct mbuf *rxq_tail;
434 struct mbuf **rxq_tailp; 434 struct mbuf **rxq_tailp;
435 435
436 bool rxq_stopping; 436 bool rxq_stopping;
437 437
438 uint32_t rxq_packets; /* for AIM */ 438 uint32_t rxq_packets; /* for AIM */
439 uint32_t rxq_bytes; /* for AIM */ 439 uint32_t rxq_bytes; /* for AIM */
440#ifdef WM_EVENT_COUNTERS 440#ifdef WM_EVENT_COUNTERS
441 /* RX event counters */ 441 /* RX event counters */
442 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */ 442 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
443 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */ 443 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
444 444
445 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */ 445 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
446 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */ 446 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
447#endif 447#endif
448}; 448};
449 449
450struct wm_queue { 450struct wm_queue {
451 int wmq_id; /* index of TX/RX queues */ 451 int wmq_id; /* index of TX/RX queues */
452 int wmq_intr_idx; /* index of MSI-X tables */ 452 int wmq_intr_idx; /* index of MSI-X tables */
453 453
454 uint32_t wmq_itr; /* interrupt interval per queue. */ 454 uint32_t wmq_itr; /* interrupt interval per queue. */
455 bool wmq_set_itr; 455 bool wmq_set_itr;
456 456
457 struct wm_txqueue wmq_txq; 457 struct wm_txqueue wmq_txq;
458 struct wm_rxqueue wmq_rxq; 458 struct wm_rxqueue wmq_rxq;
459 459
460 void *wmq_si; 460 void *wmq_si;
461}; 461};
462 462
463struct wm_phyop { 463struct wm_phyop {
464 int (*acquire)(struct wm_softc *); 464 int (*acquire)(struct wm_softc *);
465 void (*release)(struct wm_softc *); 465 void (*release)(struct wm_softc *);
466 int (*readreg_locked)(device_t, int, int, uint16_t *); 466 int (*readreg_locked)(device_t, int, int, uint16_t *);
467 int (*writereg_locked)(device_t, int, int, uint16_t); 467 int (*writereg_locked)(device_t, int, int, uint16_t);
468 int reset_delay_us; 468 int reset_delay_us;
469}; 469};
470 470
471struct wm_nvmop { 471struct wm_nvmop {
472 int (*acquire)(struct wm_softc *); 472 int (*acquire)(struct wm_softc *);
473 void (*release)(struct wm_softc *); 473 void (*release)(struct wm_softc *);
474 int (*read)(struct wm_softc *, int, int, uint16_t *); 474 int (*read)(struct wm_softc *, int, int, uint16_t *);
475}; 475};
476 476
477/* 477/*
478 * Software state per device. 478 * Software state per device.
479 */ 479 */
480struct wm_softc { 480struct wm_softc {
481 device_t sc_dev; /* generic device information */ 481 device_t sc_dev; /* generic device information */
482 bus_space_tag_t sc_st; /* bus space tag */ 482 bus_space_tag_t sc_st; /* bus space tag */
483 bus_space_handle_t sc_sh; /* bus space handle */ 483 bus_space_handle_t sc_sh; /* bus space handle */
484 bus_size_t sc_ss; /* bus space size */ 484 bus_size_t sc_ss; /* bus space size */
485 bus_space_tag_t sc_iot; /* I/O space tag */ 485 bus_space_tag_t sc_iot; /* I/O space tag */
486 bus_space_handle_t sc_ioh; /* I/O space handle */ 486 bus_space_handle_t sc_ioh; /* I/O space handle */
487 bus_size_t sc_ios; /* I/O space size */ 487 bus_size_t sc_ios; /* I/O space size */
488 bus_space_tag_t sc_flasht; /* flash registers space tag */ 488 bus_space_tag_t sc_flasht; /* flash registers space tag */
489 bus_space_handle_t sc_flashh; /* flash registers space handle */ 489 bus_space_handle_t sc_flashh; /* flash registers space handle */
490 bus_size_t sc_flashs; /* flash registers space size */ 490 bus_size_t sc_flashs; /* flash registers space size */
491 off_t sc_flashreg_offset; /* 491 off_t sc_flashreg_offset; /*
492 * offset to flash registers from 492 * offset to flash registers from
493 * start of BAR 493 * start of BAR
494 */ 494 */
495 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 495 bus_dma_tag_t sc_dmat; /* bus DMA tag */
496 496
497 struct ethercom sc_ethercom; /* ethernet common data */ 497 struct ethercom sc_ethercom; /* ethernet common data */
498 struct mii_data sc_mii; /* MII/media information */ 498 struct mii_data sc_mii; /* MII/media information */
499 499
500 pci_chipset_tag_t sc_pc; 500 pci_chipset_tag_t sc_pc;
501 pcitag_t sc_pcitag; 501 pcitag_t sc_pcitag;
502 int sc_bus_speed; /* PCI/PCIX bus speed */ 502 int sc_bus_speed; /* PCI/PCIX bus speed */
503 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ 503 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
504 504
505 uint16_t sc_pcidevid; /* PCI device ID */ 505 uint16_t sc_pcidevid; /* PCI device ID */
506 wm_chip_type sc_type; /* MAC type */ 506 wm_chip_type sc_type; /* MAC type */
507 int sc_rev; /* MAC revision */ 507 int sc_rev; /* MAC revision */
508 wm_phy_type sc_phytype; /* PHY type */ 508 wm_phy_type sc_phytype; /* PHY type */
509 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ 509 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
510#define WM_MEDIATYPE_UNKNOWN 0x00 510#define WM_MEDIATYPE_UNKNOWN 0x00
511#define WM_MEDIATYPE_FIBER 0x01 511#define WM_MEDIATYPE_FIBER 0x01
512#define WM_MEDIATYPE_COPPER 0x02 512#define WM_MEDIATYPE_COPPER 0x02
513#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ 513#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
514 int sc_funcid; /* unit number of the chip (0 to 3) */ 514 int sc_funcid; /* unit number of the chip (0 to 3) */
515 int sc_flags; /* flags; see below */ 515 int sc_flags; /* flags; see below */
516 int sc_if_flags; /* last if_flags */ 516 int sc_if_flags; /* last if_flags */
517 int sc_flowflags; /* 802.3x flow control flags */ 517 int sc_flowflags; /* 802.3x flow control flags */
518 int sc_align_tweak; 518 int sc_align_tweak;
519 519
520 void *sc_ihs[WM_MAX_NINTR]; /* 520 void *sc_ihs[WM_MAX_NINTR]; /*
521 * interrupt cookie. 521 * interrupt cookie.
522 * - legacy and msi use sc_ihs[0] only 522 * - legacy and msi use sc_ihs[0] only
523 * - msix use sc_ihs[0] to sc_ihs[nintrs-1] 523 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
524 */ 524 */
525 pci_intr_handle_t *sc_intrs; /* 525 pci_intr_handle_t *sc_intrs; /*
526 * legacy and msi use sc_intrs[0] only 526 * legacy and msi use sc_intrs[0] only
527 * msix use sc_intrs[0] to sc_ihs[nintrs-1] 527 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
528 */ 528 */
529 int sc_nintrs; /* number of interrupts */ 529 int sc_nintrs; /* number of interrupts */
530 530
531 int sc_link_intr_idx; /* index of MSI-X tables */ 531 int sc_link_intr_idx; /* index of MSI-X tables */
532 532
533 callout_t sc_tick_ch; /* tick callout */ 533 callout_t sc_tick_ch; /* tick callout */
534 bool sc_core_stopping; 534 bool sc_core_stopping;
535 535
536 int sc_nvm_ver_major; 536 int sc_nvm_ver_major;
537 int sc_nvm_ver_minor; 537 int sc_nvm_ver_minor;
538 int sc_nvm_ver_build; 538 int sc_nvm_ver_build;
539 int sc_nvm_addrbits; /* NVM address bits */ 539 int sc_nvm_addrbits; /* NVM address bits */
540 unsigned int sc_nvm_wordsize; /* NVM word size */ 540 unsigned int sc_nvm_wordsize; /* NVM word size */
541 int sc_ich8_flash_base; 541 int sc_ich8_flash_base;
542 int sc_ich8_flash_bank_size; 542 int sc_ich8_flash_bank_size;
543 int sc_nvm_k1_enabled; 543 int sc_nvm_k1_enabled;
544 544
545 int sc_nqueues; 545 int sc_nqueues;
546 struct wm_queue *sc_queue; 546 struct wm_queue *sc_queue;
547 u_int sc_tx_process_limit; /* Tx processing repeat limit in softint */ 547 u_int sc_tx_process_limit; /* Tx processing repeat limit in softint */
548 u_int sc_tx_intr_process_limit; /* Tx processing repeat limit in H/W intr */ 548 u_int sc_tx_intr_process_limit; /* Tx processing repeat limit in H/W intr */
549 u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */ 549 u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */
550 u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */ 550 u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */
551 551
552 int sc_affinity_offset; 552 int sc_affinity_offset;
553 553
554#ifdef WM_EVENT_COUNTERS 554#ifdef WM_EVENT_COUNTERS
555 /* Event counters. */ 555 /* Event counters. */
556 struct evcnt sc_ev_linkintr; /* Link interrupts */ 556 struct evcnt sc_ev_linkintr; /* Link interrupts */
557 557
558 /* WM_T_82542_2_1 only */ 558 /* WM_T_82542_2_1 only */
559 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 559 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
560 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 560 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
561 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 561 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
562 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 562 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
563 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 563 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
564#endif /* WM_EVENT_COUNTERS */ 564#endif /* WM_EVENT_COUNTERS */
565 565
566 /* This variable are used only on the 82547. */ 566 /* This variable are used only on the 82547. */
567 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 567 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
568 568
569 uint32_t sc_ctrl; /* prototype CTRL register */ 569 uint32_t sc_ctrl; /* prototype CTRL register */
570#if 0 570#if 0
571 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 571 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
572#endif 572#endif
573 uint32_t sc_icr; /* prototype interrupt bits */ 573 uint32_t sc_icr; /* prototype interrupt bits */
574 uint32_t sc_itr_init; /* prototype intr throttling reg */ 574 uint32_t sc_itr_init; /* prototype intr throttling reg */
575 uint32_t sc_tctl; /* prototype TCTL register */ 575 uint32_t sc_tctl; /* prototype TCTL register */
576 uint32_t sc_rctl; /* prototype RCTL register */ 576 uint32_t sc_rctl; /* prototype RCTL register */
577 uint32_t sc_txcw; /* prototype TXCW register */ 577 uint32_t sc_txcw; /* prototype TXCW register */
578 uint32_t sc_tipg; /* prototype TIPG register */ 578 uint32_t sc_tipg; /* prototype TIPG register */
579 uint32_t sc_fcrtl; /* prototype FCRTL register */ 579 uint32_t sc_fcrtl; /* prototype FCRTL register */
580 uint32_t sc_pba; /* prototype PBA register */ 580 uint32_t sc_pba; /* prototype PBA register */
581 581
582 int sc_tbi_linkup; /* TBI link status */ 582 int sc_tbi_linkup; /* TBI link status */
583 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ 583 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
584 int sc_tbi_serdes_ticks; /* tbi ticks */ 584 int sc_tbi_serdes_ticks; /* tbi ticks */
585 585
586 int sc_mchash_type; /* multicast filter offset */ 586 int sc_mchash_type; /* multicast filter offset */
587 587
588 krndsource_t rnd_source; /* random source */ 588 krndsource_t rnd_source; /* random source */
589 589
590 struct if_percpuq *sc_ipq; /* softint-based input queues */ 590 struct if_percpuq *sc_ipq; /* softint-based input queues */
591 591
592 kmutex_t *sc_core_lock; /* lock for softc operations */ 592 kmutex_t *sc_core_lock; /* lock for softc operations */
593 kmutex_t *sc_ich_phymtx; /* 593 kmutex_t *sc_ich_phymtx; /*
594 * 82574/82583/ICH/PCH specific PHY 594 * 82574/82583/ICH/PCH specific PHY
595 * mutex. For 82574/82583, the mutex 595 * mutex. For 82574/82583, the mutex
596 * is used for both PHY and NVM. 596 * is used for both PHY and NVM.
597 */ 597 */
598 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ 598 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
599 599
600 struct wm_phyop phy; 600 struct wm_phyop phy;
601 struct wm_nvmop nvm; 601 struct wm_nvmop nvm;
602}; 602};
603 603
604#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) 604#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
605#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) 605#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
606#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) 606#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
607 607
608#define WM_RXCHAIN_RESET(rxq) \ 608#define WM_RXCHAIN_RESET(rxq) \
609do { \ 609do { \
610 (rxq)->rxq_tailp = &(rxq)->rxq_head; \ 610 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
611 *(rxq)->rxq_tailp = NULL; \ 611 *(rxq)->rxq_tailp = NULL; \
612 (rxq)->rxq_len = 0; \ 612 (rxq)->rxq_len = 0; \
613} while (/*CONSTCOND*/0) 613} while (/*CONSTCOND*/0)
614 614
615#define WM_RXCHAIN_LINK(rxq, m) \ 615#define WM_RXCHAIN_LINK(rxq, m) \
616do { \ 616do { \
617 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ 617 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
618 (rxq)->rxq_tailp = &(m)->m_next; \ 618 (rxq)->rxq_tailp = &(m)->m_next; \
619} while (/*CONSTCOND*/0) 619} while (/*CONSTCOND*/0)
620 620
621#ifdef WM_EVENT_COUNTERS 621#ifdef WM_EVENT_COUNTERS
622#define WM_EVCNT_INCR(ev) (ev)->ev_count++ 622#define WM_EVCNT_INCR(ev) (ev)->ev_count++
623#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 623#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
624 624
625#define WM_Q_EVCNT_INCR(qname, evname) \ 625#define WM_Q_EVCNT_INCR(qname, evname) \
626 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) 626 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
627#define WM_Q_EVCNT_ADD(qname, evname, val) \ 627#define WM_Q_EVCNT_ADD(qname, evname, val) \
628 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) 628 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
629#else /* !WM_EVENT_COUNTERS */ 629#else /* !WM_EVENT_COUNTERS */
630#define WM_EVCNT_INCR(ev) /* nothing */ 630#define WM_EVCNT_INCR(ev) /* nothing */
631#define WM_EVCNT_ADD(ev, val) /* nothing */ 631#define WM_EVCNT_ADD(ev, val) /* nothing */
632 632
633#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */ 633#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
634#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */ 634#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
635#endif /* !WM_EVENT_COUNTERS */ 635#endif /* !WM_EVENT_COUNTERS */
636 636
637#define CSR_READ(sc, reg) \ 637#define CSR_READ(sc, reg) \
638 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 638 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
639#define CSR_WRITE(sc, reg, val) \ 639#define CSR_WRITE(sc, reg, val) \
640 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 640 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
641#define CSR_WRITE_FLUSH(sc) \ 641#define CSR_WRITE_FLUSH(sc) \
642 (void) CSR_READ((sc), WMREG_STATUS) 642 (void) CSR_READ((sc), WMREG_STATUS)
643 643
644#define ICH8_FLASH_READ32(sc, reg) \ 644#define ICH8_FLASH_READ32(sc, reg) \
645 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ 645 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
646 (reg) + sc->sc_flashreg_offset) 646 (reg) + sc->sc_flashreg_offset)
647#define ICH8_FLASH_WRITE32(sc, reg, data) \ 647#define ICH8_FLASH_WRITE32(sc, reg, data) \
648 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ 648 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
649 (reg) + sc->sc_flashreg_offset, (data)) 649 (reg) + sc->sc_flashreg_offset, (data))
650 650
651#define ICH8_FLASH_READ16(sc, reg) \ 651#define ICH8_FLASH_READ16(sc, reg) \
652 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ 652 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
653 (reg) + sc->sc_flashreg_offset) 653 (reg) + sc->sc_flashreg_offset)
654#define ICH8_FLASH_WRITE16(sc, reg, data) \ 654#define ICH8_FLASH_WRITE16(sc, reg, data) \
655 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ 655 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
656 (reg) + sc->sc_flashreg_offset, (data)) 656 (reg) + sc->sc_flashreg_offset, (data))
657 657
658#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) 658#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
659#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x))) 659#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
660 660
661#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) 661#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
662#define WM_CDTXADDR_HI(txq, x) \ 662#define WM_CDTXADDR_HI(txq, x) \
663 (sizeof(bus_addr_t) == 8 ? \ 663 (sizeof(bus_addr_t) == 8 ? \
664 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) 664 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
665 665
666#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) 666#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
667#define WM_CDRXADDR_HI(rxq, x) \ 667#define WM_CDRXADDR_HI(rxq, x) \
668 (sizeof(bus_addr_t) == 8 ? \ 668 (sizeof(bus_addr_t) == 8 ? \
669 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) 669 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
670 670
671/* 671/*
672 * Register read/write functions. 672 * Register read/write functions.
673 * Other than CSR_{READ|WRITE}(). 673 * Other than CSR_{READ|WRITE}().
674 */ 674 */
675#if 0 675#if 0
676static inline uint32_t wm_io_read(struct wm_softc *, int); 676static inline uint32_t wm_io_read(struct wm_softc *, int);
677#endif 677#endif
678static inline void wm_io_write(struct wm_softc *, int, uint32_t); 678static inline void wm_io_write(struct wm_softc *, int, uint32_t);
679static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 679static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
680 uint32_t, uint32_t); 680 uint32_t, uint32_t);
681static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 681static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
682 682
683/* 683/*
684 * Descriptor sync/init functions. 684 * Descriptor sync/init functions.
685 */ 685 */
686static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); 686static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
687static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); 687static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
688static inline void wm_init_rxdesc(struct wm_rxqueue *, int); 688static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
689 689
690/* 690/*
691 * Device driver interface functions and commonly used functions. 691 * Device driver interface functions and commonly used functions.
692 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 692 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
693 */ 693 */
694static const struct wm_product *wm_lookup(const struct pci_attach_args *); 694static const struct wm_product *wm_lookup(const struct pci_attach_args *);
695static int wm_match(device_t, cfdata_t, void *); 695static int wm_match(device_t, cfdata_t, void *);
696static void wm_attach(device_t, device_t, void *); 696static void wm_attach(device_t, device_t, void *);
697static int wm_detach(device_t, int); 697static int wm_detach(device_t, int);
698static bool wm_suspend(device_t, const pmf_qual_t *); 698static bool wm_suspend(device_t, const pmf_qual_t *);
699static bool wm_resume(device_t, const pmf_qual_t *); 699static bool wm_resume(device_t, const pmf_qual_t *);
700static void wm_watchdog(struct ifnet *); 700static void wm_watchdog(struct ifnet *);
701static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, 701static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
702 uint16_t *); 702 uint16_t *);
703static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, 703static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
704 uint16_t *); 704 uint16_t *);
705static void wm_tick(void *); 705static void wm_tick(void *);
706static int wm_ifflags_cb(struct ethercom *); 706static int wm_ifflags_cb(struct ethercom *);
707static int wm_ioctl(struct ifnet *, u_long, void *); 707static int wm_ioctl(struct ifnet *, u_long, void *);
708/* MAC address related */ 708/* MAC address related */
709static uint16_t wm_check_alt_mac_addr(struct wm_softc *); 709static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
710static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 710static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
711static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 711static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
712static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 712static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
713static int wm_rar_count(struct wm_softc *); 713static int wm_rar_count(struct wm_softc *);
714static void wm_set_filter(struct wm_softc *); 714static void wm_set_filter(struct wm_softc *);
715/* Reset and init related */ 715/* Reset and init related */
716static void wm_set_vlan(struct wm_softc *); 716static void wm_set_vlan(struct wm_softc *);
717static void wm_set_pcie_completion_timeout(struct wm_softc *); 717static void wm_set_pcie_completion_timeout(struct wm_softc *);
718static void wm_get_auto_rd_done(struct wm_softc *); 718static void wm_get_auto_rd_done(struct wm_softc *);
719static void wm_lan_init_done(struct wm_softc *); 719static void wm_lan_init_done(struct wm_softc *);
720static void wm_get_cfg_done(struct wm_softc *); 720static void wm_get_cfg_done(struct wm_softc *);
721static void wm_phy_post_reset(struct wm_softc *); 721static void wm_phy_post_reset(struct wm_softc *);
722static int wm_write_smbus_addr(struct wm_softc *); 722static int wm_write_smbus_addr(struct wm_softc *);
723static void wm_init_lcd_from_nvm(struct wm_softc *); 723static void wm_init_lcd_from_nvm(struct wm_softc *);
724static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool); 724static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
725static void wm_initialize_hardware_bits(struct wm_softc *); 725static void wm_initialize_hardware_bits(struct wm_softc *);
726static uint32_t wm_rxpbs_adjust_82580(uint32_t); 726static uint32_t wm_rxpbs_adjust_82580(uint32_t);
727static int wm_reset_phy(struct wm_softc *); 727static int wm_reset_phy(struct wm_softc *);
728static void wm_flush_desc_rings(struct wm_softc *); 728static void wm_flush_desc_rings(struct wm_softc *);
729static void wm_reset(struct wm_softc *); 729static void wm_reset(struct wm_softc *);
730static int wm_add_rxbuf(struct wm_rxqueue *, int); 730static int wm_add_rxbuf(struct wm_rxqueue *, int);
731static void wm_rxdrain(struct wm_rxqueue *); 731static void wm_rxdrain(struct wm_rxqueue *);
732static void wm_init_rss(struct wm_softc *); 732static void wm_init_rss(struct wm_softc *);
733static void wm_adjust_qnum(struct wm_softc *, int); 733static void wm_adjust_qnum(struct wm_softc *, int);
734static inline bool wm_is_using_msix(struct wm_softc *); 734static inline bool wm_is_using_msix(struct wm_softc *);
735static inline bool wm_is_using_multiqueue(struct wm_softc *); 735static inline bool wm_is_using_multiqueue(struct wm_softc *);
736static int wm_softint_establish(struct wm_softc *, int, int); 736static int wm_softint_establish(struct wm_softc *, int, int);
737static int wm_setup_legacy(struct wm_softc *); 737static int wm_setup_legacy(struct wm_softc *);
738static int wm_setup_msix(struct wm_softc *); 738static int wm_setup_msix(struct wm_softc *);
739static int wm_init(struct ifnet *); 739static int wm_init(struct ifnet *);
740static int wm_init_locked(struct ifnet *); 740static int wm_init_locked(struct ifnet *);
741static void wm_unset_stopping_flags(struct wm_softc *); 741static void wm_unset_stopping_flags(struct wm_softc *);
742static void wm_set_stopping_flags(struct wm_softc *); 742static void wm_set_stopping_flags(struct wm_softc *);
743static void wm_stop(struct ifnet *, int); 743static void wm_stop(struct ifnet *, int);
744static void wm_stop_locked(struct ifnet *, int); 744static void wm_stop_locked(struct ifnet *, int);
745static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 745static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
746static void wm_82547_txfifo_stall(void *); 746static void wm_82547_txfifo_stall(void *);
747static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 747static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
748static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *); 748static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
749/* DMA related */ 749/* DMA related */
750static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 750static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
751static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 751static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
752static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 752static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
753static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, 753static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
754 struct wm_txqueue *); 754 struct wm_txqueue *);
755static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 755static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
756static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 756static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
757static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, 757static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
758 struct wm_rxqueue *); 758 struct wm_rxqueue *);
759static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 759static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
760static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 760static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
761static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 761static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
762static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 762static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
763static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 763static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
764static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 764static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
765static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, 765static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
766 struct wm_txqueue *); 766 struct wm_txqueue *);
767static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, 767static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
768 struct wm_rxqueue *); 768 struct wm_rxqueue *);
769static int wm_alloc_txrx_queues(struct wm_softc *); 769static int wm_alloc_txrx_queues(struct wm_softc *);
770static void wm_free_txrx_queues(struct wm_softc *); 770static void wm_free_txrx_queues(struct wm_softc *);
771static int wm_init_txrx_queues(struct wm_softc *); 771static int wm_init_txrx_queues(struct wm_softc *);
772/* Start */ 772/* Start */
773static int wm_tx_offload(struct wm_softc *, struct wm_txqueue *, 773static int wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
774 struct wm_txsoft *, uint32_t *, uint8_t *); 774 struct wm_txsoft *, uint32_t *, uint8_t *);
775static inline int wm_select_txqueue(struct ifnet *, struct mbuf *); 775static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
776static void wm_start(struct ifnet *); 776static void wm_start(struct ifnet *);
777static void wm_start_locked(struct ifnet *); 777static void wm_start_locked(struct ifnet *);
778static int wm_transmit(struct ifnet *, struct mbuf *); 778static int wm_transmit(struct ifnet *, struct mbuf *);
779static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); 779static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
780static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, 780static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
781 bool); 781 bool);
782static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, 782static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
783 struct wm_txsoft *, uint32_t *, uint32_t *, bool *); 783 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
784static void wm_nq_start(struct ifnet *); 784static void wm_nq_start(struct ifnet *);
785static void wm_nq_start_locked(struct ifnet *); 785static void wm_nq_start_locked(struct ifnet *);
786static int wm_nq_transmit(struct ifnet *, struct mbuf *); 786static int wm_nq_transmit(struct ifnet *, struct mbuf *);
787static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); 787static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
788static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, 788static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
789 bool); 789 bool);
790static void wm_deferred_start_locked(struct wm_txqueue *); 790static void wm_deferred_start_locked(struct wm_txqueue *);
791static void wm_handle_queue(void *); 791static void wm_handle_queue(void *);
792/* Interrupt */ 792/* Interrupt */
793static bool wm_txeof(struct wm_txqueue *, u_int); 793static bool wm_txeof(struct wm_txqueue *, u_int);
794static bool wm_rxeof(struct wm_rxqueue *, u_int); 794static bool wm_rxeof(struct wm_rxqueue *, u_int);
795static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 795static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
796static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 796static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
797static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 797static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
798static void wm_linkintr(struct wm_softc *, uint32_t); 798static void wm_linkintr(struct wm_softc *, uint32_t);
799static int wm_intr_legacy(void *); 799static int wm_intr_legacy(void *);
800static inline void wm_txrxintr_disable(struct wm_queue *); 800static inline void wm_txrxintr_disable(struct wm_queue *);
801static inline void wm_txrxintr_enable(struct wm_queue *); 801static inline void wm_txrxintr_enable(struct wm_queue *);
802static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *); 802static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
803static int wm_txrxintr_msix(void *); 803static int wm_txrxintr_msix(void *);
804static int wm_linkintr_msix(void *); 804static int wm_linkintr_msix(void *);
805 805
806/* 806/*
807 * Media related. 807 * Media related.
808 * GMII, SGMII, TBI, SERDES and SFP. 808 * GMII, SGMII, TBI, SERDES and SFP.
809 */ 809 */
810/* Common */ 810/* Common */
811static void wm_tbi_serdes_set_linkled(struct wm_softc *); 811static void wm_tbi_serdes_set_linkled(struct wm_softc *);
812/* GMII related */ 812/* GMII related */
813static void wm_gmii_reset(struct wm_softc *); 813static void wm_gmii_reset(struct wm_softc *);
814static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t); 814static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
815static int wm_get_phy_id_82575(struct wm_softc *); 815static int wm_get_phy_id_82575(struct wm_softc *);
816static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 816static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
817static int wm_gmii_mediachange(struct ifnet *); 817static int wm_gmii_mediachange(struct ifnet *);
818static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 818static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
819static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); 819static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
820static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); 820static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
821static int wm_gmii_i82543_readreg(device_t, int, int); 821static int wm_gmii_i82543_readreg(device_t, int, int);
822static void wm_gmii_i82543_writereg(device_t, int, int, int); 822static void wm_gmii_i82543_writereg(device_t, int, int, int);
823static int wm_gmii_mdic_readreg(device_t, int, int); 823static int wm_gmii_mdic_readreg(device_t, int, int);
824static void wm_gmii_mdic_writereg(device_t, int, int, int); 824static void wm_gmii_mdic_writereg(device_t, int, int, int);
825static int wm_gmii_i82544_readreg(device_t, int, int); 825static int wm_gmii_i82544_readreg(device_t, int, int);
826static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *); 826static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
827static void wm_gmii_i82544_writereg(device_t, int, int, int); 827static void wm_gmii_i82544_writereg(device_t, int, int, int);
828static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t); 828static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
829static int wm_gmii_i80003_readreg(device_t, int, int); 829static int wm_gmii_i80003_readreg(device_t, int, int);
830static void wm_gmii_i80003_writereg(device_t, int, int, int); 830static void wm_gmii_i80003_writereg(device_t, int, int, int);
831static int wm_gmii_bm_readreg(device_t, int, int); 831static int wm_gmii_bm_readreg(device_t, int, int);
832static void wm_gmii_bm_writereg(device_t, int, int, int); 832static void wm_gmii_bm_writereg(device_t, int, int, int);
833static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 833static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
834static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 834static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
835static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int, 835static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
836 bool); 836 bool);
837static int wm_gmii_hv_readreg(device_t, int, int); 837static int wm_gmii_hv_readreg(device_t, int, int);
838static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *); 838static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
839static void wm_gmii_hv_writereg(device_t, int, int, int); 839static void wm_gmii_hv_writereg(device_t, int, int, int);
840static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t); 840static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
841static int wm_gmii_82580_readreg(device_t, int, int); 841static int wm_gmii_82580_readreg(device_t, int, int);
842static void wm_gmii_82580_writereg(device_t, int, int, int); 842static void wm_gmii_82580_writereg(device_t, int, int, int);
843static int wm_gmii_gs40g_readreg(device_t, int, int); 843static int wm_gmii_gs40g_readreg(device_t, int, int);
844static void wm_gmii_gs40g_writereg(device_t, int, int, int); 844static void wm_gmii_gs40g_writereg(device_t, int, int, int);
845static void wm_gmii_statchg(struct ifnet *); 845static void wm_gmii_statchg(struct ifnet *);
846/* 846/*
847 * kumeran related (80003, ICH* and PCH*). 847 * kumeran related (80003, ICH* and PCH*).
848 * These functions are not for accessing MII registers but for accessing 848 * These functions are not for accessing MII registers but for accessing
849 * kumeran specific registers. 849 * kumeran specific registers.
850 */ 850 */
851static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *); 851static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
852static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *); 852static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
853static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t); 853static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
854static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t); 854static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
855/* SGMII */ 855/* SGMII */
856static bool wm_sgmii_uses_mdio(struct wm_softc *); 856static bool wm_sgmii_uses_mdio(struct wm_softc *);
857static int wm_sgmii_readreg(device_t, int, int); 857static int wm_sgmii_readreg(device_t, int, int);
858static void wm_sgmii_writereg(device_t, int, int, int); 858static void wm_sgmii_writereg(device_t, int, int, int);
859/* TBI related */ 859/* TBI related */
860static bool wm_tbi_havesignal(struct wm_softc *, uint32_t); 860static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
861static void wm_tbi_mediainit(struct wm_softc *); 861static void wm_tbi_mediainit(struct wm_softc *);
862static int wm_tbi_mediachange(struct ifnet *); 862static int wm_tbi_mediachange(struct ifnet *);
863static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 863static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
864static int wm_check_for_link(struct wm_softc *); 864static int wm_check_for_link(struct wm_softc *);
865static void wm_tbi_tick(struct wm_softc *); 865static void wm_tbi_tick(struct wm_softc *);
866/* SERDES related */ 866/* SERDES related */
867static void wm_serdes_power_up_link_82575(struct wm_softc *); 867static void wm_serdes_power_up_link_82575(struct wm_softc *);
868static int wm_serdes_mediachange(struct ifnet *); 868static int wm_serdes_mediachange(struct ifnet *);
869static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); 869static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
870static void wm_serdes_tick(struct wm_softc *); 870static void wm_serdes_tick(struct wm_softc *);
871/* SFP related */ 871/* SFP related */
872static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); 872static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
873static uint32_t wm_sfp_get_media_type(struct wm_softc *); 873static uint32_t wm_sfp_get_media_type(struct wm_softc *);
874 874
875/* 875/*
876 * NVM related. 876 * NVM related.
877 * Microwire, SPI (w/wo EERD) and Flash. 877 * Microwire, SPI (w/wo EERD) and Flash.
878 */ 878 */
879/* Misc functions */ 879/* Misc functions */
880static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); 880static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
881static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); 881static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
882static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); 882static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
883/* Microwire */ 883/* Microwire */
884static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); 884static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
885/* SPI */ 885/* SPI */
886static int wm_nvm_ready_spi(struct wm_softc *); 886static int wm_nvm_ready_spi(struct wm_softc *);
887static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); 887static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
888/* Using with EERD */ 888/* Using with EERD */
889static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 889static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
890static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); 890static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
891/* Flash */ 891/* Flash */
892static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, 892static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
893 unsigned int *); 893 unsigned int *);
894static int32_t wm_ich8_cycle_init(struct wm_softc *); 894static int32_t wm_ich8_cycle_init(struct wm_softc *);
895static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 895static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
896static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, 896static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
897 uint32_t *); 897 uint32_t *);
898static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 898static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
899static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 899static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
900static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); 900static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
901static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); 901static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
902static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); 902static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
903/* iNVM */ 903/* iNVM */
904static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); 904static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
905static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); 905static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
906/* Lock, detecting NVM type, validate checksum and read */ 906/* Lock, detecting NVM type, validate checksum and read */
907static int wm_nvm_is_onboard_eeprom(struct wm_softc *); 907static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
908static int wm_nvm_flash_presence_i210(struct wm_softc *); 908static int wm_nvm_flash_presence_i210(struct wm_softc *);
909static int wm_nvm_validate_checksum(struct wm_softc *); 909static int wm_nvm_validate_checksum(struct wm_softc *);
910static void wm_nvm_version_invm(struct wm_softc *); 910static void wm_nvm_version_invm(struct wm_softc *);
911static void wm_nvm_version(struct wm_softc *); 911static void wm_nvm_version(struct wm_softc *);
912static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); 912static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
913 913
914/* 914/*
915 * Hardware semaphores. 915 * Hardware semaphores.
916 * Very complexed... 916 * Very complexed...
917 */ 917 */
918static int wm_get_null(struct wm_softc *); 918static int wm_get_null(struct wm_softc *);
919static void wm_put_null(struct wm_softc *); 919static void wm_put_null(struct wm_softc *);
920static int wm_get_eecd(struct wm_softc *); 920static int wm_get_eecd(struct wm_softc *);
921static void wm_put_eecd(struct wm_softc *); 921static void wm_put_eecd(struct wm_softc *);
922static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ 922static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
923static void wm_put_swsm_semaphore(struct wm_softc *); 923static void wm_put_swsm_semaphore(struct wm_softc *);
924static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 924static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
925static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 925static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
926static int wm_get_nvm_80003(struct wm_softc *); 926static int wm_get_nvm_80003(struct wm_softc *);
927static void wm_put_nvm_80003(struct wm_softc *); 927static void wm_put_nvm_80003(struct wm_softc *);
928static int wm_get_nvm_82571(struct wm_softc *); 928static int wm_get_nvm_82571(struct wm_softc *);
929static void wm_put_nvm_82571(struct wm_softc *); 929static void wm_put_nvm_82571(struct wm_softc *);
930static int wm_get_phy_82575(struct wm_softc *); 930static int wm_get_phy_82575(struct wm_softc *);
931static void wm_put_phy_82575(struct wm_softc *); 931static void wm_put_phy_82575(struct wm_softc *);
932static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ 932static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
933static void wm_put_swfwhw_semaphore(struct wm_softc *); 933static void wm_put_swfwhw_semaphore(struct wm_softc *);
934static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ 934static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
935static void wm_put_swflag_ich8lan(struct wm_softc *); 935static void wm_put_swflag_ich8lan(struct wm_softc *);
936static int wm_get_nvm_ich8lan(struct wm_softc *); 936static int wm_get_nvm_ich8lan(struct wm_softc *);
937static void wm_put_nvm_ich8lan(struct wm_softc *); 937static void wm_put_nvm_ich8lan(struct wm_softc *);
938static int wm_get_hw_semaphore_82573(struct wm_softc *); 938static int wm_get_hw_semaphore_82573(struct wm_softc *);
939static void wm_put_hw_semaphore_82573(struct wm_softc *); 939static void wm_put_hw_semaphore_82573(struct wm_softc *);
940 940
941/* 941/*
942 * Management mode and power management related subroutines. 942 * Management mode and power management related subroutines.
943 * BMC, AMT, suspend/resume and EEE. 943 * BMC, AMT, suspend/resume and EEE.
944 */ 944 */
945#if 0 945#if 0
946static int wm_check_mng_mode(struct wm_softc *); 946static int wm_check_mng_mode(struct wm_softc *);
947static int wm_check_mng_mode_ich8lan(struct wm_softc *); 947static int wm_check_mng_mode_ich8lan(struct wm_softc *);
948static int wm_check_mng_mode_82574(struct wm_softc *); 948static int wm_check_mng_mode_82574(struct wm_softc *);
949static int wm_check_mng_mode_generic(struct wm_softc *); 949static int wm_check_mng_mode_generic(struct wm_softc *);
950#endif 950#endif
951static int wm_enable_mng_pass_thru(struct wm_softc *); 951static int wm_enable_mng_pass_thru(struct wm_softc *);
952static bool wm_phy_resetisblocked(struct wm_softc *); 952static bool wm_phy_resetisblocked(struct wm_softc *);
953static void wm_get_hw_control(struct wm_softc *); 953static void wm_get_hw_control(struct wm_softc *);
954static void wm_release_hw_control(struct wm_softc *); 954static void wm_release_hw_control(struct wm_softc *);
955static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); 955static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
956static int wm_init_phy_workarounds_pchlan(struct wm_softc *); 956static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
957static void wm_init_manageability(struct wm_softc *); 957static void wm_init_manageability(struct wm_softc *);
958static void wm_release_manageability(struct wm_softc *); 958static void wm_release_manageability(struct wm_softc *);
959static void wm_get_wakeup(struct wm_softc *); 959static void wm_get_wakeup(struct wm_softc *);
960static int wm_ulp_disable(struct wm_softc *); 960static int wm_ulp_disable(struct wm_softc *);
961static int wm_enable_phy_wakeup(struct wm_softc *); 961static int wm_enable_phy_wakeup(struct wm_softc *);
962static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 962static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
963static void wm_suspend_workarounds_ich8lan(struct wm_softc *); 963static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
964static int wm_resume_workarounds_pchlan(struct wm_softc *); 964static int wm_resume_workarounds_pchlan(struct wm_softc *);
965static void wm_enable_wakeup(struct wm_softc *); 965static void wm_enable_wakeup(struct wm_softc *);
966static void wm_disable_aspm(struct wm_softc *); 966static void wm_disable_aspm(struct wm_softc *);
967/* LPLU (Low Power Link Up) */ 967/* LPLU (Low Power Link Up) */
968static void wm_lplu_d0_disable(struct wm_softc *); 968static void wm_lplu_d0_disable(struct wm_softc *);
969/* EEE */ 969/* EEE */
970static void wm_set_eee_i350(struct wm_softc *); 970static void wm_set_eee_i350(struct wm_softc *);
971 971
972/* 972/*
973 * Workarounds (mainly PHY related). 973 * Workarounds (mainly PHY related).
974 * Basically, PHY's workarounds are in the PHY drivers. 974 * Basically, PHY's workarounds are in the PHY drivers.
975 */ 975 */
976static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 976static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
977static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 977static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
978static void wm_hv_phy_workarounds_ich8lan(struct wm_softc *); 978static void wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
979static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *); 979static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
980static void wm_lv_phy_workarounds_ich8lan(struct wm_softc *); 980static void wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
981static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool); 981static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
982static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 982static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
983static int wm_k1_workaround_lv(struct wm_softc *); 983static int wm_k1_workaround_lv(struct wm_softc *);
984static int wm_link_stall_workaround_hv(struct wm_softc *); 984static int wm_link_stall_workaround_hv(struct wm_softc *);
985static void wm_set_mdio_slow_mode_hv(struct wm_softc *); 985static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
986static void wm_configure_k1_ich8lan(struct wm_softc *, int); 986static void wm_configure_k1_ich8lan(struct wm_softc *, int);
987static void wm_reset_init_script_82575(struct wm_softc *); 987static void wm_reset_init_script_82575(struct wm_softc *);
988static void wm_reset_mdicnfg_82580(struct wm_softc *); 988static void wm_reset_mdicnfg_82580(struct wm_softc *);
989static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 989static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
990static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 990static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
991static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 991static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
992static void wm_pll_workaround_i210(struct wm_softc *); 992static void wm_pll_workaround_i210(struct wm_softc *);
993static void wm_legacy_irq_quirk_spt(struct wm_softc *); 993static void wm_legacy_irq_quirk_spt(struct wm_softc *);
994 994
995CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 995CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
996 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 996 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
997 997
998/* 998/*
999 * Devices supported by this driver. 999 * Devices supported by this driver.
1000 */ 1000 */
1001static const struct wm_product { 1001static const struct wm_product {
1002 pci_vendor_id_t wmp_vendor; 1002 pci_vendor_id_t wmp_vendor;
1003 pci_product_id_t wmp_product; 1003 pci_product_id_t wmp_product;
1004 const char *wmp_name; 1004 const char *wmp_name;
1005 wm_chip_type wmp_type; 1005 wm_chip_type wmp_type;
1006 uint32_t wmp_flags; 1006 uint32_t wmp_flags;
1007#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN 1007#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1008#define WMP_F_FIBER WM_MEDIATYPE_FIBER 1008#define WMP_F_FIBER WM_MEDIATYPE_FIBER
1009#define WMP_F_COPPER WM_MEDIATYPE_COPPER 1009#define WMP_F_COPPER WM_MEDIATYPE_COPPER
1010#define WMP_F_SERDES WM_MEDIATYPE_SERDES 1010#define WMP_F_SERDES WM_MEDIATYPE_SERDES
1011#define WMP_MEDIATYPE(x) ((x) & 0x03) 1011#define WMP_MEDIATYPE(x) ((x) & 0x03)
1012} wm_products[] = { 1012} wm_products[] = {
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1014 "Intel i82542 1000BASE-X Ethernet", 1014 "Intel i82542 1000BASE-X Ethernet",
1015 WM_T_82542_2_1, WMP_F_FIBER }, 1015 WM_T_82542_2_1, WMP_F_FIBER },
1016 1016
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1018 "Intel i82543GC 1000BASE-X Ethernet", 1018 "Intel i82543GC 1000BASE-X Ethernet",
1019 WM_T_82543, WMP_F_FIBER }, 1019 WM_T_82543, WMP_F_FIBER },
1020 1020
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1022 "Intel i82543GC 1000BASE-T Ethernet", 1022 "Intel i82543GC 1000BASE-T Ethernet",
1023 WM_T_82543, WMP_F_COPPER }, 1023 WM_T_82543, WMP_F_COPPER },
1024 1024
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1026 "Intel i82544EI 1000BASE-T Ethernet", 1026 "Intel i82544EI 1000BASE-T Ethernet",
1027 WM_T_82544, WMP_F_COPPER }, 1027 WM_T_82544, WMP_F_COPPER },
1028 1028
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1030 "Intel i82544EI 1000BASE-X Ethernet", 1030 "Intel i82544EI 1000BASE-X Ethernet",
1031 WM_T_82544, WMP_F_FIBER }, 1031 WM_T_82544, WMP_F_FIBER },
1032 1032
1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1034 "Intel i82544GC 1000BASE-T Ethernet", 1034 "Intel i82544GC 1000BASE-T Ethernet",
1035 WM_T_82544, WMP_F_COPPER }, 1035 WM_T_82544, WMP_F_COPPER },
1036 1036
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1038 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 1038 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1039 WM_T_82544, WMP_F_COPPER }, 1039 WM_T_82544, WMP_F_COPPER },
1040 1040
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1042 "Intel i82540EM 1000BASE-T Ethernet", 1042 "Intel i82540EM 1000BASE-T Ethernet",
1043 WM_T_82540, WMP_F_COPPER }, 1043 WM_T_82540, WMP_F_COPPER },
1044 1044
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1046 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 1046 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1047 WM_T_82540, WMP_F_COPPER }, 1047 WM_T_82540, WMP_F_COPPER },
1048 1048
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1050 "Intel i82540EP 1000BASE-T Ethernet", 1050 "Intel i82540EP 1000BASE-T Ethernet",
1051 WM_T_82540, WMP_F_COPPER }, 1051 WM_T_82540, WMP_F_COPPER },
1052 1052
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1054 "Intel i82540EP 1000BASE-T Ethernet", 1054 "Intel i82540EP 1000BASE-T Ethernet",
1055 WM_T_82540, WMP_F_COPPER }, 1055 WM_T_82540, WMP_F_COPPER },
1056 1056
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1058 "Intel i82540EP 1000BASE-T Ethernet", 1058 "Intel i82540EP 1000BASE-T Ethernet",
1059 WM_T_82540, WMP_F_COPPER }, 1059 WM_T_82540, WMP_F_COPPER },
1060 1060
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1062 "Intel i82545EM 1000BASE-T Ethernet", 1062 "Intel i82545EM 1000BASE-T Ethernet",
1063 WM_T_82545, WMP_F_COPPER }, 1063 WM_T_82545, WMP_F_COPPER },
1064 1064
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1066 "Intel i82545GM 1000BASE-T Ethernet", 1066 "Intel i82545GM 1000BASE-T Ethernet",
1067 WM_T_82545_3, WMP_F_COPPER }, 1067 WM_T_82545_3, WMP_F_COPPER },
1068 1068
1069 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 1069 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1070 "Intel i82545GM 1000BASE-X Ethernet", 1070 "Intel i82545GM 1000BASE-X Ethernet",
1071 WM_T_82545_3, WMP_F_FIBER }, 1071 WM_T_82545_3, WMP_F_FIBER },
1072 1072
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1074 "Intel i82545GM Gigabit Ethernet (SERDES)", 1074 "Intel i82545GM Gigabit Ethernet (SERDES)",
1075 WM_T_82545_3, WMP_F_SERDES }, 1075 WM_T_82545_3, WMP_F_SERDES },
1076 1076
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1078 "Intel i82546EB 1000BASE-T Ethernet", 1078 "Intel i82546EB 1000BASE-T Ethernet",
1079 WM_T_82546, WMP_F_COPPER }, 1079 WM_T_82546, WMP_F_COPPER },
1080 1080
1081 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 1081 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1082 "Intel i82546EB 1000BASE-T Ethernet", 1082 "Intel i82546EB 1000BASE-T Ethernet",
1083 WM_T_82546, WMP_F_COPPER }, 1083 WM_T_82546, WMP_F_COPPER },
1084 1084
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
@@ -1400,2004 +1400,2004 @@ static const struct wm_product { @@ -1400,2004 +1400,2004 @@ static const struct wm_product {
1400 "82580 dual-1000BaseT Ethernet", 1400 "82580 dual-1000BaseT Ethernet",
1401 WM_T_82580, WMP_F_COPPER }, 1401 WM_T_82580, WMP_F_COPPER },
1402 1402
1403 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER, 1403 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1404 "82580 quad-1000BaseX Ethernet", 1404 "82580 quad-1000BaseX Ethernet",
1405 WM_T_82580, WMP_F_FIBER }, 1405 WM_T_82580, WMP_F_FIBER },
1406 1406
1407 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII, 1407 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1408 "DH89XXCC Gigabit Ethernet (SGMII)", 1408 "DH89XXCC Gigabit Ethernet (SGMII)",
1409 WM_T_82580, WMP_F_COPPER }, 1409 WM_T_82580, WMP_F_COPPER },
1410 1410
1411 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES, 1411 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1412 "DH89XXCC Gigabit Ethernet (SERDES)", 1412 "DH89XXCC Gigabit Ethernet (SERDES)",
1413 WM_T_82580, WMP_F_SERDES }, 1413 WM_T_82580, WMP_F_SERDES },
1414 1414
1415 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE, 1415 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1416 "DH89XXCC 1000BASE-KX Ethernet", 1416 "DH89XXCC 1000BASE-KX Ethernet",
1417 WM_T_82580, WMP_F_SERDES }, 1417 WM_T_82580, WMP_F_SERDES },
1418 1418
1419 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP, 1419 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1420 "DH89XXCC Gigabit Ethernet (SFP)", 1420 "DH89XXCC Gigabit Ethernet (SFP)",
1421 WM_T_82580, WMP_F_SERDES }, 1421 WM_T_82580, WMP_F_SERDES },
1422 1422
1423 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER, 1423 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1424 "I350 Gigabit Network Connection", 1424 "I350 Gigabit Network Connection",
1425 WM_T_I350, WMP_F_COPPER }, 1425 WM_T_I350, WMP_F_COPPER },
1426 1426
1427 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER, 1427 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1428 "I350 Gigabit Fiber Network Connection", 1428 "I350 Gigabit Fiber Network Connection",
1429 WM_T_I350, WMP_F_FIBER }, 1429 WM_T_I350, WMP_F_FIBER },
1430 1430
1431 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES, 1431 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1432 "I350 Gigabit Backplane Connection", 1432 "I350 Gigabit Backplane Connection",
1433 WM_T_I350, WMP_F_SERDES }, 1433 WM_T_I350, WMP_F_SERDES },
1434 1434
1435 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4, 1435 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1436 "I350 Quad Port Gigabit Ethernet", 1436 "I350 Quad Port Gigabit Ethernet",
1437 WM_T_I350, WMP_F_SERDES }, 1437 WM_T_I350, WMP_F_SERDES },
1438 1438
1439 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII, 1439 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1440 "I350 Gigabit Connection", 1440 "I350 Gigabit Connection",
1441 WM_T_I350, WMP_F_COPPER }, 1441 WM_T_I350, WMP_F_COPPER },
1442 1442
1443 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX, 1443 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1444 "I354 Gigabit Ethernet (KX)", 1444 "I354 Gigabit Ethernet (KX)",
1445 WM_T_I354, WMP_F_SERDES }, 1445 WM_T_I354, WMP_F_SERDES },
1446 1446
1447 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII, 1447 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1448 "I354 Gigabit Ethernet (SGMII)", 1448 "I354 Gigabit Ethernet (SGMII)",
1449 WM_T_I354, WMP_F_COPPER }, 1449 WM_T_I354, WMP_F_COPPER },
1450 1450
1451 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE, 1451 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1452 "I354 Gigabit Ethernet (2.5G)", 1452 "I354 Gigabit Ethernet (2.5G)",
1453 WM_T_I354, WMP_F_COPPER }, 1453 WM_T_I354, WMP_F_COPPER },
1454 1454
1455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1, 1455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1456 "I210-T1 Ethernet Server Adapter", 1456 "I210-T1 Ethernet Server Adapter",
1457 WM_T_I210, WMP_F_COPPER }, 1457 WM_T_I210, WMP_F_COPPER },
1458 1458
1459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1, 1459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1460 "I210 Ethernet (Copper OEM)", 1460 "I210 Ethernet (Copper OEM)",
1461 WM_T_I210, WMP_F_COPPER }, 1461 WM_T_I210, WMP_F_COPPER },
1462 1462
1463 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT, 1463 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1464 "I210 Ethernet (Copper IT)", 1464 "I210 Ethernet (Copper IT)",
1465 WM_T_I210, WMP_F_COPPER }, 1465 WM_T_I210, WMP_F_COPPER },
1466 1466
1467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF, 1467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1468 "I210 Ethernet (FLASH less)", 1468 "I210 Ethernet (FLASH less)",
1469 WM_T_I210, WMP_F_COPPER }, 1469 WM_T_I210, WMP_F_COPPER },
1470 1470
1471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER, 1471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1472 "I210 Gigabit Ethernet (Fiber)", 1472 "I210 Gigabit Ethernet (Fiber)",
1473 WM_T_I210, WMP_F_FIBER }, 1473 WM_T_I210, WMP_F_FIBER },
1474 1474
1475 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES, 1475 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1476 "I210 Gigabit Ethernet (SERDES)", 1476 "I210 Gigabit Ethernet (SERDES)",
1477 WM_T_I210, WMP_F_SERDES }, 1477 WM_T_I210, WMP_F_SERDES },
1478 1478
1479 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF, 1479 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1480 "I210 Gigabit Ethernet (FLASH less)", 1480 "I210 Gigabit Ethernet (FLASH less)",
1481 WM_T_I210, WMP_F_SERDES }, 1481 WM_T_I210, WMP_F_SERDES },
1482 1482
1483 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII, 1483 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1484 "I210 Gigabit Ethernet (SGMII)", 1484 "I210 Gigabit Ethernet (SGMII)",
1485 WM_T_I210, WMP_F_COPPER }, 1485 WM_T_I210, WMP_F_COPPER },
1486 1486
1487 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER, 1487 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1488 "I211 Ethernet (COPPER)", 1488 "I211 Ethernet (COPPER)",
1489 WM_T_I211, WMP_F_COPPER }, 1489 WM_T_I211, WMP_F_COPPER },
1490 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V, 1490 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1491 "I217 V Ethernet Connection", 1491 "I217 V Ethernet Connection",
1492 WM_T_PCH_LPT, WMP_F_COPPER }, 1492 WM_T_PCH_LPT, WMP_F_COPPER },
1493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM, 1493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1494 "I217 LM Ethernet Connection", 1494 "I217 LM Ethernet Connection",
1495 WM_T_PCH_LPT, WMP_F_COPPER }, 1495 WM_T_PCH_LPT, WMP_F_COPPER },
1496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V, 1496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1497 "I218 V Ethernet Connection", 1497 "I218 V Ethernet Connection",
1498 WM_T_PCH_LPT, WMP_F_COPPER }, 1498 WM_T_PCH_LPT, WMP_F_COPPER },
1499 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2, 1499 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1500 "I218 V Ethernet Connection", 1500 "I218 V Ethernet Connection",
1501 WM_T_PCH_LPT, WMP_F_COPPER }, 1501 WM_T_PCH_LPT, WMP_F_COPPER },
1502 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3, 1502 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1503 "I218 V Ethernet Connection", 1503 "I218 V Ethernet Connection",
1504 WM_T_PCH_LPT, WMP_F_COPPER }, 1504 WM_T_PCH_LPT, WMP_F_COPPER },
1505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM, 1505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1506 "I218 LM Ethernet Connection", 1506 "I218 LM Ethernet Connection",
1507 WM_T_PCH_LPT, WMP_F_COPPER }, 1507 WM_T_PCH_LPT, WMP_F_COPPER },
1508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2, 1508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1509 "I218 LM Ethernet Connection", 1509 "I218 LM Ethernet Connection",
1510 WM_T_PCH_LPT, WMP_F_COPPER }, 1510 WM_T_PCH_LPT, WMP_F_COPPER },
1511 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3, 1511 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1512 "I218 LM Ethernet Connection", 1512 "I218 LM Ethernet Connection",
1513 WM_T_PCH_LPT, WMP_F_COPPER }, 1513 WM_T_PCH_LPT, WMP_F_COPPER },
1514 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V, 1514 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1515 "I219 V Ethernet Connection", 1515 "I219 V Ethernet Connection",
1516 WM_T_PCH_SPT, WMP_F_COPPER }, 1516 WM_T_PCH_SPT, WMP_F_COPPER },
1517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2, 1517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1518 "I219 V Ethernet Connection", 1518 "I219 V Ethernet Connection",
1519 WM_T_PCH_SPT, WMP_F_COPPER }, 1519 WM_T_PCH_SPT, WMP_F_COPPER },
1520 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4, 1520 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1521 "I219 V Ethernet Connection", 1521 "I219 V Ethernet Connection",
1522 WM_T_PCH_SPT, WMP_F_COPPER }, 1522 WM_T_PCH_SPT, WMP_F_COPPER },
1523 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5, 1523 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1524 "I219 V Ethernet Connection", 1524 "I219 V Ethernet Connection",
1525 WM_T_PCH_SPT, WMP_F_COPPER }, 1525 WM_T_PCH_SPT, WMP_F_COPPER },
1526 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM, 1526 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1527 "I219 LM Ethernet Connection", 1527 "I219 LM Ethernet Connection",
1528 WM_T_PCH_SPT, WMP_F_COPPER }, 1528 WM_T_PCH_SPT, WMP_F_COPPER },
1529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2, 1529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1530 "I219 LM Ethernet Connection", 1530 "I219 LM Ethernet Connection",
1531 WM_T_PCH_SPT, WMP_F_COPPER }, 1531 WM_T_PCH_SPT, WMP_F_COPPER },
1532 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3, 1532 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1533 "I219 LM Ethernet Connection", 1533 "I219 LM Ethernet Connection",
1534 WM_T_PCH_SPT, WMP_F_COPPER }, 1534 WM_T_PCH_SPT, WMP_F_COPPER },
1535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4, 1535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1536 "I219 LM Ethernet Connection", 1536 "I219 LM Ethernet Connection",
1537 WM_T_PCH_SPT, WMP_F_COPPER }, 1537 WM_T_PCH_SPT, WMP_F_COPPER },
1538 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5, 1538 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1539 "I219 LM Ethernet Connection", 1539 "I219 LM Ethernet Connection",
1540 WM_T_PCH_SPT, WMP_F_COPPER }, 1540 WM_T_PCH_SPT, WMP_F_COPPER },
1541 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6, 1541 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1542 "I219 V Ethernet Connection", 1542 "I219 V Ethernet Connection",
1543 WM_T_PCH_CNP, WMP_F_COPPER }, 1543 WM_T_PCH_CNP, WMP_F_COPPER },
1544 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7, 1544 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1545 "I219 V Ethernet Connection", 1545 "I219 V Ethernet Connection",
1546 WM_T_PCH_CNP, WMP_F_COPPER }, 1546 WM_T_PCH_CNP, WMP_F_COPPER },
1547 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6, 1547 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1548 "I219 LM Ethernet Connection", 1548 "I219 LM Ethernet Connection",
1549 WM_T_PCH_CNP, WMP_F_COPPER }, 1549 WM_T_PCH_CNP, WMP_F_COPPER },
1550 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7, 1550 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1551 "I219 LM Ethernet Connection", 1551 "I219 LM Ethernet Connection",
1552 WM_T_PCH_CNP, WMP_F_COPPER }, 1552 WM_T_PCH_CNP, WMP_F_COPPER },
1553 { 0, 0, 1553 { 0, 0,
1554 NULL, 1554 NULL,
1555 0, 0 }, 1555 0, 0 },
1556}; 1556};
1557 1557
1558/* 1558/*
1559 * Register read/write functions. 1559 * Register read/write functions.
1560 * Other than CSR_{READ|WRITE}(). 1560 * Other than CSR_{READ|WRITE}().
1561 */ 1561 */
1562 1562
1563#if 0 /* Not currently used */ 1563#if 0 /* Not currently used */
1564static inline uint32_t 1564static inline uint32_t
1565wm_io_read(struct wm_softc *sc, int reg) 1565wm_io_read(struct wm_softc *sc, int reg)
1566{ 1566{
1567 1567
1568 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1568 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1569 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 1569 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1570} 1570}
1571#endif 1571#endif
1572 1572
1573static inline void 1573static inline void
1574wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 1574wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1575{ 1575{
1576 1576
1577 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1577 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1578 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 1578 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1579} 1579}
1580 1580
1581static inline void 1581static inline void
1582wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off, 1582wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1583 uint32_t data) 1583 uint32_t data)
1584{ 1584{
1585 uint32_t regval; 1585 uint32_t regval;
1586 int i; 1586 int i;
1587 1587
1588 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT); 1588 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1589 1589
1590 CSR_WRITE(sc, reg, regval); 1590 CSR_WRITE(sc, reg, regval);
1591 1591
1592 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) { 1592 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1593 delay(5); 1593 delay(5);
1594 if (CSR_READ(sc, reg) & SCTL_CTL_READY) 1594 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1595 break; 1595 break;
1596 } 1596 }
1597 if (i == SCTL_CTL_POLL_TIMEOUT) { 1597 if (i == SCTL_CTL_POLL_TIMEOUT) {
1598 aprint_error("%s: WARNING:" 1598 aprint_error("%s: WARNING:"
1599 " i82575 reg 0x%08x setup did not indicate ready\n", 1599 " i82575 reg 0x%08x setup did not indicate ready\n",
1600 device_xname(sc->sc_dev), reg); 1600 device_xname(sc->sc_dev), reg);
1601 } 1601 }
1602} 1602}
1603 1603
1604static inline void 1604static inline void
1605wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 1605wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1606{ 1606{
1607 wa->wa_low = htole32(v & 0xffffffffU); 1607 wa->wa_low = htole32(v & 0xffffffffU);
1608 if (sizeof(bus_addr_t) == 8) 1608 if (sizeof(bus_addr_t) == 8)
1609 wa->wa_high = htole32((uint64_t) v >> 32); 1609 wa->wa_high = htole32((uint64_t) v >> 32);
1610 else 1610 else
1611 wa->wa_high = 0; 1611 wa->wa_high = 0;
1612} 1612}
1613 1613
1614/* 1614/*
1615 * Descriptor sync/init functions. 1615 * Descriptor sync/init functions.
1616 */ 1616 */
1617static inline void 1617static inline void
1618wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops) 1618wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1619{ 1619{
1620 struct wm_softc *sc = txq->txq_sc; 1620 struct wm_softc *sc = txq->txq_sc;
1621 1621
1622 /* If it will wrap around, sync to the end of the ring. */ 1622 /* If it will wrap around, sync to the end of the ring. */
1623 if ((start + num) > WM_NTXDESC(txq)) { 1623 if ((start + num) > WM_NTXDESC(txq)) {
1624 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, 1624 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1625 WM_CDTXOFF(txq, start), txq->txq_descsize * 1625 WM_CDTXOFF(txq, start), txq->txq_descsize *
1626 (WM_NTXDESC(txq) - start), ops); 1626 (WM_NTXDESC(txq) - start), ops);
1627 num -= (WM_NTXDESC(txq) - start); 1627 num -= (WM_NTXDESC(txq) - start);
1628 start = 0; 1628 start = 0;
1629 } 1629 }
1630 1630
1631 /* Now sync whatever is left. */ 1631 /* Now sync whatever is left. */
1632 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, 1632 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1633 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops); 1633 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1634} 1634}
1635 1635
1636static inline void 1636static inline void
1637wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops) 1637wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1638{ 1638{
1639 struct wm_softc *sc = rxq->rxq_sc; 1639 struct wm_softc *sc = rxq->rxq_sc;
1640 1640
1641 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap, 1641 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1642 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops); 1642 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1643} 1643}
1644 1644
1645static inline void 1645static inline void
1646wm_init_rxdesc(struct wm_rxqueue *rxq, int start) 1646wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1647{ 1647{
1648 struct wm_softc *sc = rxq->rxq_sc; 1648 struct wm_softc *sc = rxq->rxq_sc;
1649 struct wm_rxsoft *rxs = &rxq->rxq_soft[start]; 1649 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1650 struct mbuf *m = rxs->rxs_mbuf; 1650 struct mbuf *m = rxs->rxs_mbuf;
1651 1651
1652 /* 1652 /*
1653 * Note: We scoot the packet forward 2 bytes in the buffer 1653 * Note: We scoot the packet forward 2 bytes in the buffer
1654 * so that the payload after the Ethernet header is aligned 1654 * so that the payload after the Ethernet header is aligned
1655 * to a 4-byte boundary. 1655 * to a 4-byte boundary.
1656 1656
1657 * XXX BRAINDAMAGE ALERT! 1657 * XXX BRAINDAMAGE ALERT!
1658 * The stupid chip uses the same size for every buffer, which 1658 * The stupid chip uses the same size for every buffer, which
1659 * is set in the Receive Control register. We are using the 2K 1659 * is set in the Receive Control register. We are using the 2K
1660 * size option, but what we REALLY want is (2K - 2)! For this 1660 * size option, but what we REALLY want is (2K - 2)! For this
1661 * reason, we can't "scoot" packets longer than the standard 1661 * reason, we can't "scoot" packets longer than the standard
1662 * Ethernet MTU. On strict-alignment platforms, if the total 1662 * Ethernet MTU. On strict-alignment platforms, if the total
1663 * size exceeds (2K - 2) we set align_tweak to 0 and let 1663 * size exceeds (2K - 2) we set align_tweak to 0 and let
1664 * the upper layer copy the headers. 1664 * the upper layer copy the headers.
1665 */ 1665 */
1666 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak; 1666 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1667 1667
1668 if (sc->sc_type == WM_T_82574) { 1668 if (sc->sc_type == WM_T_82574) {
1669 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start]; 1669 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1670 rxd->erx_data.erxd_addr = 1670 rxd->erx_data.erxd_addr =
1671 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1671 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1672 rxd->erx_data.erxd_dd = 0; 1672 rxd->erx_data.erxd_dd = 0;
1673 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 1673 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1674 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start]; 1674 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1675 1675
1676 rxd->nqrx_data.nrxd_paddr = 1676 rxd->nqrx_data.nrxd_paddr =
1677 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1677 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1678 /* Currently, split header is not supported. */ 1678 /* Currently, split header is not supported. */
1679 rxd->nqrx_data.nrxd_haddr = 0; 1679 rxd->nqrx_data.nrxd_haddr = 0;
1680 } else { 1680 } else {
1681 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start]; 1681 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1682 1682
1683 wm_set_dma_addr(&rxd->wrx_addr, 1683 wm_set_dma_addr(&rxd->wrx_addr,
1684 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1684 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1685 rxd->wrx_len = 0; 1685 rxd->wrx_len = 0;
1686 rxd->wrx_cksum = 0; 1686 rxd->wrx_cksum = 0;
1687 rxd->wrx_status = 0; 1687 rxd->wrx_status = 0;
1688 rxd->wrx_errors = 0; 1688 rxd->wrx_errors = 0;
1689 rxd->wrx_special = 0; 1689 rxd->wrx_special = 0;
1690 } 1690 }
1691 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1691 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1692 1692
1693 CSR_WRITE(sc, rxq->rxq_rdt_reg, start); 1693 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1694} 1694}
1695 1695
1696/* 1696/*
1697 * Device driver interface functions and commonly used functions. 1697 * Device driver interface functions and commonly used functions.
1698 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 1698 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1699 */ 1699 */
1700 1700
1701/* Lookup supported device table */ 1701/* Lookup supported device table */
1702static const struct wm_product * 1702static const struct wm_product *
1703wm_lookup(const struct pci_attach_args *pa) 1703wm_lookup(const struct pci_attach_args *pa)
1704{ 1704{
1705 const struct wm_product *wmp; 1705 const struct wm_product *wmp;
1706 1706
1707 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 1707 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1708 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 1708 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1709 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 1709 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1710 return wmp; 1710 return wmp;
1711 } 1711 }
1712 return NULL; 1712 return NULL;
1713} 1713}
1714 1714
1715/* The match function (ca_match) */ 1715/* The match function (ca_match) */
1716static int 1716static int
1717wm_match(device_t parent, cfdata_t cf, void *aux) 1717wm_match(device_t parent, cfdata_t cf, void *aux)
1718{ 1718{
1719 struct pci_attach_args *pa = aux; 1719 struct pci_attach_args *pa = aux;
1720 1720
1721 if (wm_lookup(pa) != NULL) 1721 if (wm_lookup(pa) != NULL)
1722 return 1; 1722 return 1;
1723 1723
1724 return 0; 1724 return 0;
1725} 1725}
1726 1726
1727/* The attach function (ca_attach) */ 1727/* The attach function (ca_attach) */
1728static void 1728static void
1729wm_attach(device_t parent, device_t self, void *aux) 1729wm_attach(device_t parent, device_t self, void *aux)
1730{ 1730{
1731 struct wm_softc *sc = device_private(self); 1731 struct wm_softc *sc = device_private(self);
1732 struct pci_attach_args *pa = aux; 1732 struct pci_attach_args *pa = aux;
1733 prop_dictionary_t dict; 1733 prop_dictionary_t dict;
1734 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1734 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1735 pci_chipset_tag_t pc = pa->pa_pc; 1735 pci_chipset_tag_t pc = pa->pa_pc;
1736 int counts[PCI_INTR_TYPE_SIZE]; 1736 int counts[PCI_INTR_TYPE_SIZE];
1737 pci_intr_type_t max_type; 1737 pci_intr_type_t max_type;
1738 const char *eetype, *xname; 1738 const char *eetype, *xname;
1739 bus_space_tag_t memt; 1739 bus_space_tag_t memt;
1740 bus_space_handle_t memh; 1740 bus_space_handle_t memh;
1741 bus_size_t memsize; 1741 bus_size_t memsize;
1742 int memh_valid; 1742 int memh_valid;
1743 int i, error; 1743 int i, error;
1744 const struct wm_product *wmp; 1744 const struct wm_product *wmp;
1745 prop_data_t ea; 1745 prop_data_t ea;
1746 prop_number_t pn; 1746 prop_number_t pn;
1747 uint8_t enaddr[ETHER_ADDR_LEN]; 1747 uint8_t enaddr[ETHER_ADDR_LEN];
1748 char buf[256]; 1748 char buf[256];
1749 uint16_t cfg1, cfg2, swdpin, nvmword; 1749 uint16_t cfg1, cfg2, swdpin, nvmword;
1750 pcireg_t preg, memtype; 1750 pcireg_t preg, memtype;
1751 uint16_t eeprom_data, apme_mask; 1751 uint16_t eeprom_data, apme_mask;
1752 bool force_clear_smbi; 1752 bool force_clear_smbi;
1753 uint32_t link_mode; 1753 uint32_t link_mode;
1754 uint32_t reg; 1754 uint32_t reg;
1755 1755
1756 sc->sc_dev = self; 1756 sc->sc_dev = self;
1757 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS); 1757 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1758 sc->sc_core_stopping = false; 1758 sc->sc_core_stopping = false;
1759 1759
1760 wmp = wm_lookup(pa); 1760 wmp = wm_lookup(pa);
1761#ifdef DIAGNOSTIC 1761#ifdef DIAGNOSTIC
1762 if (wmp == NULL) { 1762 if (wmp == NULL) {
1763 printf("\n"); 1763 printf("\n");
1764 panic("wm_attach: impossible"); 1764 panic("wm_attach: impossible");
1765 } 1765 }
1766#endif 1766#endif
1767 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags); 1767 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1768 1768
1769 sc->sc_pc = pa->pa_pc; 1769 sc->sc_pc = pa->pa_pc;
1770 sc->sc_pcitag = pa->pa_tag; 1770 sc->sc_pcitag = pa->pa_tag;
1771 1771
1772 if (pci_dma64_available(pa)) 1772 if (pci_dma64_available(pa))
1773 sc->sc_dmat = pa->pa_dmat64; 1773 sc->sc_dmat = pa->pa_dmat64;
1774 else 1774 else
1775 sc->sc_dmat = pa->pa_dmat; 1775 sc->sc_dmat = pa->pa_dmat;
1776 1776
1777 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id); 1777 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1778 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG)); 1778 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1779 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1); 1779 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1780 1780
1781 sc->sc_type = wmp->wmp_type; 1781 sc->sc_type = wmp->wmp_type;
1782 1782
1783 /* Set default function pointers */ 1783 /* Set default function pointers */
1784 sc->phy.acquire = sc->nvm.acquire = wm_get_null; 1784 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1785 sc->phy.release = sc->nvm.release = wm_put_null; 1785 sc->phy.release = sc->nvm.release = wm_put_null;
1786 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000; 1786 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1787 1787
1788 if (sc->sc_type < WM_T_82543) { 1788 if (sc->sc_type < WM_T_82543) {
1789 if (sc->sc_rev < 2) { 1789 if (sc->sc_rev < 2) {
1790 aprint_error_dev(sc->sc_dev, 1790 aprint_error_dev(sc->sc_dev,
1791 "i82542 must be at least rev. 2\n"); 1791 "i82542 must be at least rev. 2\n");
1792 return; 1792 return;
1793 } 1793 }
1794 if (sc->sc_rev < 3) 1794 if (sc->sc_rev < 3)
1795 sc->sc_type = WM_T_82542_2_0; 1795 sc->sc_type = WM_T_82542_2_0;
1796 } 1796 }
1797 1797
1798 /* 1798 /*
1799 * Disable MSI for Errata: 1799 * Disable MSI for Errata:
1800 * "Message Signaled Interrupt Feature May Corrupt Write Transactions" 1800 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1801 *  1801 *
1802 * 82544: Errata 25 1802 * 82544: Errata 25
1803 * 82540: Errata 6 (easy to reproduce device timeout) 1803 * 82540: Errata 6 (easy to reproduce device timeout)
1804 * 82545: Errata 4 (easy to reproduce device timeout) 1804 * 82545: Errata 4 (easy to reproduce device timeout)
1805 * 82546: Errata 26 (easy to reproduce device timeout) 1805 * 82546: Errata 26 (easy to reproduce device timeout)
1806 * 82541: Errata 7 (easy to reproduce device timeout) 1806 * 82541: Errata 7 (easy to reproduce device timeout)
1807 * 1807 *
1808 * "Byte Enables 2 and 3 are not set on MSI writes" 1808 * "Byte Enables 2 and 3 are not set on MSI writes"
1809 * 1809 *
1810 * 82571 & 82572: Errata 63 1810 * 82571 & 82572: Errata 63
1811 */ 1811 */
1812 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571) 1812 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1813 || (sc->sc_type == WM_T_82572)) 1813 || (sc->sc_type == WM_T_82572))
1814 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY; 1814 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1815 1815
1816 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 1816 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1817 || (sc->sc_type == WM_T_82580) 1817 || (sc->sc_type == WM_T_82580)
1818 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 1818 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1819 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) 1819 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1820 sc->sc_flags |= WM_F_NEWQUEUE; 1820 sc->sc_flags |= WM_F_NEWQUEUE;
1821 1821
1822 /* Set device properties (mactype) */ 1822 /* Set device properties (mactype) */
1823 dict = device_properties(sc->sc_dev); 1823 dict = device_properties(sc->sc_dev);
1824 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type); 1824 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1825 1825
1826 /* 1826 /*
1827 * Map the device. All devices support memory-mapped acccess, 1827 * Map the device. All devices support memory-mapped acccess,
1828 * and it is really required for normal operation. 1828 * and it is really required for normal operation.
1829 */ 1829 */
1830 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 1830 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1831 switch (memtype) { 1831 switch (memtype) {
1832 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1832 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1833 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1833 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1834 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 1834 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1835 memtype, 0, &memt, &memh, NULL, &memsize) == 0); 1835 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1836 break; 1836 break;
1837 default: 1837 default:
1838 memh_valid = 0; 1838 memh_valid = 0;
1839 break; 1839 break;
1840 } 1840 }
1841 1841
1842 if (memh_valid) { 1842 if (memh_valid) {
1843 sc->sc_st = memt; 1843 sc->sc_st = memt;
1844 sc->sc_sh = memh; 1844 sc->sc_sh = memh;
1845 sc->sc_ss = memsize; 1845 sc->sc_ss = memsize;
1846 } else { 1846 } else {
1847 aprint_error_dev(sc->sc_dev, 1847 aprint_error_dev(sc->sc_dev,
1848 "unable to map device registers\n"); 1848 "unable to map device registers\n");
1849 return; 1849 return;
1850 } 1850 }
1851 1851
1852 /* 1852 /*
1853 * In addition, i82544 and later support I/O mapped indirect 1853 * In addition, i82544 and later support I/O mapped indirect
1854 * register access. It is not desirable (nor supported in 1854 * register access. It is not desirable (nor supported in
1855 * this driver) to use it for normal operation, though it is 1855 * this driver) to use it for normal operation, though it is
1856 * required to work around bugs in some chip versions. 1856 * required to work around bugs in some chip versions.
1857 */ 1857 */
1858 if (sc->sc_type >= WM_T_82544) { 1858 if (sc->sc_type >= WM_T_82544) {
1859 /* First we have to find the I/O BAR. */ 1859 /* First we have to find the I/O BAR. */
1860 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 1860 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1861 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i); 1861 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1862 if (memtype == PCI_MAPREG_TYPE_IO) 1862 if (memtype == PCI_MAPREG_TYPE_IO)
1863 break; 1863 break;
1864 if (PCI_MAPREG_MEM_TYPE(memtype) == 1864 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1865 PCI_MAPREG_MEM_TYPE_64BIT) 1865 PCI_MAPREG_MEM_TYPE_64BIT)
1866 i += 4; /* skip high bits, too */ 1866 i += 4; /* skip high bits, too */
1867 } 1867 }
1868 if (i < PCI_MAPREG_END) { 1868 if (i < PCI_MAPREG_END) {
1869 /* 1869 /*
1870 * We found PCI_MAPREG_TYPE_IO. Note that 82580 1870 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1871 * (and newer?) chip has no PCI_MAPREG_TYPE_IO. 1871 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1872 * It's no problem because newer chips has no this 1872 * It's no problem because newer chips has no this
1873 * bug. 1873 * bug.
1874 * 1874 *
1875 * The i8254x doesn't apparently respond when the 1875 * The i8254x doesn't apparently respond when the
1876 * I/O BAR is 0, which looks somewhat like it's not 1876 * I/O BAR is 0, which looks somewhat like it's not
1877 * been configured. 1877 * been configured.
1878 */ 1878 */
1879 preg = pci_conf_read(pc, pa->pa_tag, i); 1879 preg = pci_conf_read(pc, pa->pa_tag, i);
1880 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 1880 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1881 aprint_error_dev(sc->sc_dev, 1881 aprint_error_dev(sc->sc_dev,
1882 "WARNING: I/O BAR at zero.\n"); 1882 "WARNING: I/O BAR at zero.\n");
1883 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 1883 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1884 0, &sc->sc_iot, &sc->sc_ioh, 1884 0, &sc->sc_iot, &sc->sc_ioh,
1885 NULL, &sc->sc_ios) == 0) { 1885 NULL, &sc->sc_ios) == 0) {
1886 sc->sc_flags |= WM_F_IOH_VALID; 1886 sc->sc_flags |= WM_F_IOH_VALID;
1887 } else 1887 } else
1888 aprint_error_dev(sc->sc_dev, 1888 aprint_error_dev(sc->sc_dev,
1889 "WARNING: unable to map I/O space\n"); 1889 "WARNING: unable to map I/O space\n");
1890 } 1890 }
1891 1891
1892 } 1892 }
1893 1893
1894 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 1894 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1895 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1895 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1896 preg |= PCI_COMMAND_MASTER_ENABLE; 1896 preg |= PCI_COMMAND_MASTER_ENABLE;
1897 if (sc->sc_type < WM_T_82542_2_1) 1897 if (sc->sc_type < WM_T_82542_2_1)
1898 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 1898 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1899 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 1899 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1900 1900
1901 /* power up chip */ 1901 /* power up chip */
1902 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL)) 1902 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
1903 && error != EOPNOTSUPP) { 1903 && error != EOPNOTSUPP) {
1904 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); 1904 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1905 return; 1905 return;
1906 } 1906 }
1907 1907
1908 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag)); 1908 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1909 /* 1909 /*
1910 * Don't use MSI-X if we can use only one queue to save interrupt 1910 * Don't use MSI-X if we can use only one queue to save interrupt
1911 * resource. 1911 * resource.
1912 */ 1912 */
1913 if (sc->sc_nqueues > 1) { 1913 if (sc->sc_nqueues > 1) {
1914 max_type = PCI_INTR_TYPE_MSIX; 1914 max_type = PCI_INTR_TYPE_MSIX;
1915 /* 1915 /*
1916 * 82583 has a MSI-X capability in the PCI configuration space 1916 * 82583 has a MSI-X capability in the PCI configuration space
1917 * but it doesn't support it. At least the document doesn't 1917 * but it doesn't support it. At least the document doesn't
1918 * say anything about MSI-X. 1918 * say anything about MSI-X.
1919 */ 1919 */
1920 counts[PCI_INTR_TYPE_MSIX] 1920 counts[PCI_INTR_TYPE_MSIX]
1921 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1; 1921 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
1922 } else { 1922 } else {
1923 max_type = PCI_INTR_TYPE_MSI; 1923 max_type = PCI_INTR_TYPE_MSI;
1924 counts[PCI_INTR_TYPE_MSIX] = 0; 1924 counts[PCI_INTR_TYPE_MSIX] = 0;
1925 } 1925 }
1926 1926
1927 /* Allocation settings */ 1927 /* Allocation settings */
1928 counts[PCI_INTR_TYPE_MSI] = 1; 1928 counts[PCI_INTR_TYPE_MSI] = 1;
1929 counts[PCI_INTR_TYPE_INTX] = 1; 1929 counts[PCI_INTR_TYPE_INTX] = 1;
1930 /* overridden by disable flags */ 1930 /* overridden by disable flags */
1931 if (wm_disable_msi != 0) { 1931 if (wm_disable_msi != 0) {
1932 counts[PCI_INTR_TYPE_MSI] = 0; 1932 counts[PCI_INTR_TYPE_MSI] = 0;
1933 if (wm_disable_msix != 0) { 1933 if (wm_disable_msix != 0) {
1934 max_type = PCI_INTR_TYPE_INTX; 1934 max_type = PCI_INTR_TYPE_INTX;
1935 counts[PCI_INTR_TYPE_MSIX] = 0; 1935 counts[PCI_INTR_TYPE_MSIX] = 0;
1936 } 1936 }
1937 } else if (wm_disable_msix != 0) { 1937 } else if (wm_disable_msix != 0) {
1938 max_type = PCI_INTR_TYPE_MSI; 1938 max_type = PCI_INTR_TYPE_MSI;
1939 counts[PCI_INTR_TYPE_MSIX] = 0; 1939 counts[PCI_INTR_TYPE_MSIX] = 0;
1940 } 1940 }
1941 1941
1942alloc_retry: 1942alloc_retry:
1943 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) { 1943 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1944 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n"); 1944 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1945 return; 1945 return;
1946 } 1946 }
1947 1947
1948 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) { 1948 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1949 error = wm_setup_msix(sc); 1949 error = wm_setup_msix(sc);
1950 if (error) { 1950 if (error) {
1951 pci_intr_release(pc, sc->sc_intrs, 1951 pci_intr_release(pc, sc->sc_intrs,
1952 counts[PCI_INTR_TYPE_MSIX]); 1952 counts[PCI_INTR_TYPE_MSIX]);
1953 1953
1954 /* Setup for MSI: Disable MSI-X */ 1954 /* Setup for MSI: Disable MSI-X */
1955 max_type = PCI_INTR_TYPE_MSI; 1955 max_type = PCI_INTR_TYPE_MSI;
1956 counts[PCI_INTR_TYPE_MSI] = 1; 1956 counts[PCI_INTR_TYPE_MSI] = 1;
1957 counts[PCI_INTR_TYPE_INTX] = 1; 1957 counts[PCI_INTR_TYPE_INTX] = 1;
1958 goto alloc_retry; 1958 goto alloc_retry;
1959 } 1959 }
1960 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) { 1960 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1961 wm_adjust_qnum(sc, 0); /* must not use multiqueue */ 1961 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1962 error = wm_setup_legacy(sc); 1962 error = wm_setup_legacy(sc);
1963 if (error) { 1963 if (error) {
1964 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1964 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1965 counts[PCI_INTR_TYPE_MSI]); 1965 counts[PCI_INTR_TYPE_MSI]);
1966 1966
1967 /* The next try is for INTx: Disable MSI */ 1967 /* The next try is for INTx: Disable MSI */
1968 max_type = PCI_INTR_TYPE_INTX; 1968 max_type = PCI_INTR_TYPE_INTX;
1969 counts[PCI_INTR_TYPE_INTX] = 1; 1969 counts[PCI_INTR_TYPE_INTX] = 1;
1970 goto alloc_retry; 1970 goto alloc_retry;
1971 } 1971 }
1972 } else { 1972 } else {
1973 wm_adjust_qnum(sc, 0); /* must not use multiqueue */ 1973 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1974 error = wm_setup_legacy(sc); 1974 error = wm_setup_legacy(sc);
1975 if (error) { 1975 if (error) {
1976 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1976 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1977 counts[PCI_INTR_TYPE_INTX]); 1977 counts[PCI_INTR_TYPE_INTX]);
1978 return; 1978 return;
1979 } 1979 }
1980 } 1980 }
1981 1981
1982 /* 1982 /*
1983 * Check the function ID (unit number of the chip). 1983 * Check the function ID (unit number of the chip).
1984 */ 1984 */
1985 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3) 1985 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1986 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003) 1986 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1987 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 1987 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1988 || (sc->sc_type == WM_T_82580) 1988 || (sc->sc_type == WM_T_82580)
1989 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 1989 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1990 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS) 1990 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1991 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK; 1991 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1992 else 1992 else
1993 sc->sc_funcid = 0; 1993 sc->sc_funcid = 0;
1994 1994
1995 /* 1995 /*
1996 * Determine a few things about the bus we're connected to. 1996 * Determine a few things about the bus we're connected to.
1997 */ 1997 */
1998 if (sc->sc_type < WM_T_82543) { 1998 if (sc->sc_type < WM_T_82543) {
1999 /* We don't really know the bus characteristics here. */ 1999 /* We don't really know the bus characteristics here. */
2000 sc->sc_bus_speed = 33; 2000 sc->sc_bus_speed = 33;
2001 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 2001 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2002 /* 2002 /*
2003 * CSA (Communication Streaming Architecture) is about as fast 2003 * CSA (Communication Streaming Architecture) is about as fast
2004 * a 32-bit 66MHz PCI Bus. 2004 * a 32-bit 66MHz PCI Bus.
2005 */ 2005 */
2006 sc->sc_flags |= WM_F_CSA; 2006 sc->sc_flags |= WM_F_CSA;
2007 sc->sc_bus_speed = 66; 2007 sc->sc_bus_speed = 66;
2008 aprint_verbose_dev(sc->sc_dev, 2008 aprint_verbose_dev(sc->sc_dev,
2009 "Communication Streaming Architecture\n"); 2009 "Communication Streaming Architecture\n");
2010 if (sc->sc_type == WM_T_82547) { 2010 if (sc->sc_type == WM_T_82547) {
2011 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS); 2011 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
2012 callout_setfunc(&sc->sc_txfifo_ch, 2012 callout_setfunc(&sc->sc_txfifo_ch,
2013 wm_82547_txfifo_stall, sc); 2013 wm_82547_txfifo_stall, sc);
2014 aprint_verbose_dev(sc->sc_dev, 2014 aprint_verbose_dev(sc->sc_dev,
2015 "using 82547 Tx FIFO stall work-around\n"); 2015 "using 82547 Tx FIFO stall work-around\n");
2016 } 2016 }
2017 } else if (sc->sc_type >= WM_T_82571) { 2017 } else if (sc->sc_type >= WM_T_82571) {
2018 sc->sc_flags |= WM_F_PCIE; 2018 sc->sc_flags |= WM_F_PCIE;
2019 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 2019 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2020 && (sc->sc_type != WM_T_ICH10) 2020 && (sc->sc_type != WM_T_ICH10)
2021 && (sc->sc_type != WM_T_PCH) 2021 && (sc->sc_type != WM_T_PCH)
2022 && (sc->sc_type != WM_T_PCH2) 2022 && (sc->sc_type != WM_T_PCH2)
2023 && (sc->sc_type != WM_T_PCH_LPT) 2023 && (sc->sc_type != WM_T_PCH_LPT)
2024 && (sc->sc_type != WM_T_PCH_SPT) 2024 && (sc->sc_type != WM_T_PCH_SPT)
2025 && (sc->sc_type != WM_T_PCH_CNP)) { 2025 && (sc->sc_type != WM_T_PCH_CNP)) {
2026 /* ICH* and PCH* have no PCIe capability registers */ 2026 /* ICH* and PCH* have no PCIe capability registers */
2027 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 2027 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2028 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff, 2028 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2029 NULL) == 0) 2029 NULL) == 0)
2030 aprint_error_dev(sc->sc_dev, 2030 aprint_error_dev(sc->sc_dev,
2031 "unable to find PCIe capability\n"); 2031 "unable to find PCIe capability\n");
2032 } 2032 }
2033 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n"); 2033 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2034 } else { 2034 } else {
2035 reg = CSR_READ(sc, WMREG_STATUS); 2035 reg = CSR_READ(sc, WMREG_STATUS);
2036 if (reg & STATUS_BUS64) 2036 if (reg & STATUS_BUS64)
2037 sc->sc_flags |= WM_F_BUS64; 2037 sc->sc_flags |= WM_F_BUS64;
2038 if ((reg & STATUS_PCIX_MODE) != 0) { 2038 if ((reg & STATUS_PCIX_MODE) != 0) {
2039 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 2039 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2040 2040
2041 sc->sc_flags |= WM_F_PCIX; 2041 sc->sc_flags |= WM_F_PCIX;
2042 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 2042 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2043 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0) 2043 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2044 aprint_error_dev(sc->sc_dev, 2044 aprint_error_dev(sc->sc_dev,
2045 "unable to find PCIX capability\n"); 2045 "unable to find PCIX capability\n");
2046 else if (sc->sc_type != WM_T_82545_3 && 2046 else if (sc->sc_type != WM_T_82545_3 &&
2047 sc->sc_type != WM_T_82546_3) { 2047 sc->sc_type != WM_T_82546_3) {
2048 /* 2048 /*
2049 * Work around a problem caused by the BIOS 2049 * Work around a problem caused by the BIOS
2050 * setting the max memory read byte count 2050 * setting the max memory read byte count
2051 * incorrectly. 2051 * incorrectly.
2052 */ 2052 */
2053 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 2053 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2054 sc->sc_pcixe_capoff + PCIX_CMD); 2054 sc->sc_pcixe_capoff + PCIX_CMD);
2055 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 2055 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2056 sc->sc_pcixe_capoff + PCIX_STATUS); 2056 sc->sc_pcixe_capoff + PCIX_STATUS);
2057 2057
2058 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >> 2058 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2059 PCIX_CMD_BYTECNT_SHIFT; 2059 PCIX_CMD_BYTECNT_SHIFT;
2060 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >> 2060 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2061 PCIX_STATUS_MAXB_SHIFT; 2061 PCIX_STATUS_MAXB_SHIFT;
2062 if (bytecnt > maxb) { 2062 if (bytecnt > maxb) {
2063 aprint_verbose_dev(sc->sc_dev, 2063 aprint_verbose_dev(sc->sc_dev,
2064 "resetting PCI-X MMRBC: %d -> %d\n", 2064 "resetting PCI-X MMRBC: %d -> %d\n",
2065 512 << bytecnt, 512 << maxb); 2065 512 << bytecnt, 512 << maxb);
2066 pcix_cmd = (pcix_cmd & 2066 pcix_cmd = (pcix_cmd &
2067 ~PCIX_CMD_BYTECNT_MASK) | 2067 ~PCIX_CMD_BYTECNT_MASK) |
2068 (maxb << PCIX_CMD_BYTECNT_SHIFT); 2068 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2069 pci_conf_write(pa->pa_pc, pa->pa_tag, 2069 pci_conf_write(pa->pa_pc, pa->pa_tag,
2070 sc->sc_pcixe_capoff + PCIX_CMD, 2070 sc->sc_pcixe_capoff + PCIX_CMD,
2071 pcix_cmd); 2071 pcix_cmd);
2072 } 2072 }
2073 } 2073 }
2074 } 2074 }
2075 /* 2075 /*
2076 * The quad port adapter is special; it has a PCIX-PCIX 2076 * The quad port adapter is special; it has a PCIX-PCIX
2077 * bridge on the board, and can run the secondary bus at 2077 * bridge on the board, and can run the secondary bus at
2078 * a higher speed. 2078 * a higher speed.
2079 */ 2079 */
2080 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 2080 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2081 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 2081 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2082 : 66; 2082 : 66;
2083 } else if (sc->sc_flags & WM_F_PCIX) { 2083 } else if (sc->sc_flags & WM_F_PCIX) {
2084 switch (reg & STATUS_PCIXSPD_MASK) { 2084 switch (reg & STATUS_PCIXSPD_MASK) {
2085 case STATUS_PCIXSPD_50_66: 2085 case STATUS_PCIXSPD_50_66:
2086 sc->sc_bus_speed = 66; 2086 sc->sc_bus_speed = 66;
2087 break; 2087 break;
2088 case STATUS_PCIXSPD_66_100: 2088 case STATUS_PCIXSPD_66_100:
2089 sc->sc_bus_speed = 100; 2089 sc->sc_bus_speed = 100;
2090 break; 2090 break;
2091 case STATUS_PCIXSPD_100_133: 2091 case STATUS_PCIXSPD_100_133:
2092 sc->sc_bus_speed = 133; 2092 sc->sc_bus_speed = 133;
2093 break; 2093 break;
2094 default: 2094 default:
2095 aprint_error_dev(sc->sc_dev, 2095 aprint_error_dev(sc->sc_dev,
2096 "unknown PCIXSPD %d; assuming 66MHz\n", 2096 "unknown PCIXSPD %d; assuming 66MHz\n",
2097 reg & STATUS_PCIXSPD_MASK); 2097 reg & STATUS_PCIXSPD_MASK);
2098 sc->sc_bus_speed = 66; 2098 sc->sc_bus_speed = 66;
2099 break; 2099 break;
2100 } 2100 }
2101 } else 2101 } else
2102 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 2102 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2103 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n", 2103 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2104 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 2104 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2105 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 2105 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2106 } 2106 }
2107 2107
2108 /* clear interesting stat counters */ 2108 /* clear interesting stat counters */
2109 CSR_READ(sc, WMREG_COLC); 2109 CSR_READ(sc, WMREG_COLC);
2110 CSR_READ(sc, WMREG_RXERRC); 2110 CSR_READ(sc, WMREG_RXERRC);
2111 2111
2112 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583) 2112 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2113 || (sc->sc_type >= WM_T_ICH8)) 2113 || (sc->sc_type >= WM_T_ICH8))
2114 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2114 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2115 if (sc->sc_type >= WM_T_ICH8) 2115 if (sc->sc_type >= WM_T_ICH8)
2116 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2116 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2117 2117
2118 /* Set PHY, NVM mutex related stuff */ 2118 /* Set PHY, NVM mutex related stuff */
2119 switch (sc->sc_type) { 2119 switch (sc->sc_type) {
2120 case WM_T_82542_2_0: 2120 case WM_T_82542_2_0:
2121 case WM_T_82542_2_1: 2121 case WM_T_82542_2_1:
2122 case WM_T_82543: 2122 case WM_T_82543:
2123 case WM_T_82544: 2123 case WM_T_82544:
2124 /* Microwire */ 2124 /* Microwire */
2125 sc->nvm.read = wm_nvm_read_uwire; 2125 sc->nvm.read = wm_nvm_read_uwire;
2126 sc->sc_nvm_wordsize = 64; 2126 sc->sc_nvm_wordsize = 64;
2127 sc->sc_nvm_addrbits = 6; 2127 sc->sc_nvm_addrbits = 6;
2128 break; 2128 break;
2129 case WM_T_82540: 2129 case WM_T_82540:
2130 case WM_T_82545: 2130 case WM_T_82545:
2131 case WM_T_82545_3: 2131 case WM_T_82545_3:
2132 case WM_T_82546: 2132 case WM_T_82546:
2133 case WM_T_82546_3: 2133 case WM_T_82546_3:
2134 /* Microwire */ 2134 /* Microwire */
2135 sc->nvm.read = wm_nvm_read_uwire; 2135 sc->nvm.read = wm_nvm_read_uwire;
2136 reg = CSR_READ(sc, WMREG_EECD); 2136 reg = CSR_READ(sc, WMREG_EECD);
2137 if (reg & EECD_EE_SIZE) { 2137 if (reg & EECD_EE_SIZE) {
2138 sc->sc_nvm_wordsize = 256; 2138 sc->sc_nvm_wordsize = 256;
2139 sc->sc_nvm_addrbits = 8; 2139 sc->sc_nvm_addrbits = 8;
2140 } else { 2140 } else {
2141 sc->sc_nvm_wordsize = 64; 2141 sc->sc_nvm_wordsize = 64;
2142 sc->sc_nvm_addrbits = 6; 2142 sc->sc_nvm_addrbits = 6;
2143 } 2143 }
2144 sc->sc_flags |= WM_F_LOCK_EECD; 2144 sc->sc_flags |= WM_F_LOCK_EECD;
2145 sc->nvm.acquire = wm_get_eecd; 2145 sc->nvm.acquire = wm_get_eecd;
2146 sc->nvm.release = wm_put_eecd; 2146 sc->nvm.release = wm_put_eecd;
2147 break; 2147 break;
2148 case WM_T_82541: 2148 case WM_T_82541:
2149 case WM_T_82541_2: 2149 case WM_T_82541_2:
2150 case WM_T_82547: 2150 case WM_T_82547:
2151 case WM_T_82547_2: 2151 case WM_T_82547_2:
2152 reg = CSR_READ(sc, WMREG_EECD); 2152 reg = CSR_READ(sc, WMREG_EECD);
2153 /* 2153 /*
2154 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only 2154 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2155 * on 8254[17], so set flags and functios before calling it. 2155 * on 8254[17], so set flags and functios before calling it.
2156 */ 2156 */
2157 sc->sc_flags |= WM_F_LOCK_EECD; 2157 sc->sc_flags |= WM_F_LOCK_EECD;
2158 sc->nvm.acquire = wm_get_eecd; 2158 sc->nvm.acquire = wm_get_eecd;
2159 sc->nvm.release = wm_put_eecd; 2159 sc->nvm.release = wm_put_eecd;
2160 if (reg & EECD_EE_TYPE) { 2160 if (reg & EECD_EE_TYPE) {
2161 /* SPI */ 2161 /* SPI */
2162 sc->nvm.read = wm_nvm_read_spi; 2162 sc->nvm.read = wm_nvm_read_spi;
2163 sc->sc_flags |= WM_F_EEPROM_SPI; 2163 sc->sc_flags |= WM_F_EEPROM_SPI;
2164 wm_nvm_set_addrbits_size_eecd(sc); 2164 wm_nvm_set_addrbits_size_eecd(sc);
2165 } else { 2165 } else {
2166 /* Microwire */ 2166 /* Microwire */
2167 sc->nvm.read = wm_nvm_read_uwire; 2167 sc->nvm.read = wm_nvm_read_uwire;
2168 if ((reg & EECD_EE_ABITS) != 0) { 2168 if ((reg & EECD_EE_ABITS) != 0) {
2169 sc->sc_nvm_wordsize = 256; 2169 sc->sc_nvm_wordsize = 256;
2170 sc->sc_nvm_addrbits = 8; 2170 sc->sc_nvm_addrbits = 8;
2171 } else { 2171 } else {
2172 sc->sc_nvm_wordsize = 64; 2172 sc->sc_nvm_wordsize = 64;
2173 sc->sc_nvm_addrbits = 6; 2173 sc->sc_nvm_addrbits = 6;
2174 } 2174 }
2175 } 2175 }
2176 break; 2176 break;
2177 case WM_T_82571: 2177 case WM_T_82571:
2178 case WM_T_82572: 2178 case WM_T_82572:
2179 /* SPI */ 2179 /* SPI */
2180 sc->nvm.read = wm_nvm_read_eerd; 2180 sc->nvm.read = wm_nvm_read_eerd;
2181 /* Not use WM_F_LOCK_EECD because we use EERD */ 2181 /* Not use WM_F_LOCK_EECD because we use EERD */
2182 sc->sc_flags |= WM_F_EEPROM_SPI; 2182 sc->sc_flags |= WM_F_EEPROM_SPI;
2183 wm_nvm_set_addrbits_size_eecd(sc); 2183 wm_nvm_set_addrbits_size_eecd(sc);
2184 sc->phy.acquire = wm_get_swsm_semaphore; 2184 sc->phy.acquire = wm_get_swsm_semaphore;
2185 sc->phy.release = wm_put_swsm_semaphore; 2185 sc->phy.release = wm_put_swsm_semaphore;
2186 sc->nvm.acquire = wm_get_nvm_82571; 2186 sc->nvm.acquire = wm_get_nvm_82571;
2187 sc->nvm.release = wm_put_nvm_82571; 2187 sc->nvm.release = wm_put_nvm_82571;
2188 break; 2188 break;
2189 case WM_T_82573: 2189 case WM_T_82573:
2190 case WM_T_82574: 2190 case WM_T_82574:
2191 case WM_T_82583: 2191 case WM_T_82583:
2192 sc->nvm.read = wm_nvm_read_eerd; 2192 sc->nvm.read = wm_nvm_read_eerd;
2193 /* Not use WM_F_LOCK_EECD because we use EERD */ 2193 /* Not use WM_F_LOCK_EECD because we use EERD */
2194 if (sc->sc_type == WM_T_82573) { 2194 if (sc->sc_type == WM_T_82573) {
2195 sc->phy.acquire = wm_get_swsm_semaphore; 2195 sc->phy.acquire = wm_get_swsm_semaphore;
2196 sc->phy.release = wm_put_swsm_semaphore; 2196 sc->phy.release = wm_put_swsm_semaphore;
2197 sc->nvm.acquire = wm_get_nvm_82571; 2197 sc->nvm.acquire = wm_get_nvm_82571;
2198 sc->nvm.release = wm_put_nvm_82571; 2198 sc->nvm.release = wm_put_nvm_82571;
2199 } else { 2199 } else {
2200 /* Both PHY and NVM use the same semaphore. */ 2200 /* Both PHY and NVM use the same semaphore. */
2201 sc->phy.acquire = sc->nvm.acquire 2201 sc->phy.acquire = sc->nvm.acquire
2202 = wm_get_swfwhw_semaphore; 2202 = wm_get_swfwhw_semaphore;
2203 sc->phy.release = sc->nvm.release 2203 sc->phy.release = sc->nvm.release
2204 = wm_put_swfwhw_semaphore; 2204 = wm_put_swfwhw_semaphore;
2205 } 2205 }
2206 if (wm_nvm_is_onboard_eeprom(sc) == 0) { 2206 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2207 sc->sc_flags |= WM_F_EEPROM_FLASH; 2207 sc->sc_flags |= WM_F_EEPROM_FLASH;
2208 sc->sc_nvm_wordsize = 2048; 2208 sc->sc_nvm_wordsize = 2048;
2209 } else { 2209 } else {
2210 /* SPI */ 2210 /* SPI */
2211 sc->sc_flags |= WM_F_EEPROM_SPI; 2211 sc->sc_flags |= WM_F_EEPROM_SPI;
2212 wm_nvm_set_addrbits_size_eecd(sc); 2212 wm_nvm_set_addrbits_size_eecd(sc);
2213 } 2213 }
2214 break; 2214 break;
2215 case WM_T_82575: 2215 case WM_T_82575:
2216 case WM_T_82576: 2216 case WM_T_82576:
2217 case WM_T_82580: 2217 case WM_T_82580:
2218 case WM_T_I350: 2218 case WM_T_I350:
2219 case WM_T_I354: 2219 case WM_T_I354:
2220 case WM_T_80003: 2220 case WM_T_80003:
2221 /* SPI */ 2221 /* SPI */
2222 sc->sc_flags |= WM_F_EEPROM_SPI; 2222 sc->sc_flags |= WM_F_EEPROM_SPI;
2223 wm_nvm_set_addrbits_size_eecd(sc); 2223 wm_nvm_set_addrbits_size_eecd(sc);
2224 if ((sc->sc_type == WM_T_80003) 2224 if ((sc->sc_type == WM_T_80003)
2225 || (sc->sc_nvm_wordsize < (1 << 15))) { 2225 || (sc->sc_nvm_wordsize < (1 << 15))) {
2226 sc->nvm.read = wm_nvm_read_eerd; 2226 sc->nvm.read = wm_nvm_read_eerd;
2227 /* Don't use WM_F_LOCK_EECD because we use EERD */ 2227 /* Don't use WM_F_LOCK_EECD because we use EERD */
2228 } else { 2228 } else {
2229 sc->nvm.read = wm_nvm_read_spi; 2229 sc->nvm.read = wm_nvm_read_spi;
2230 sc->sc_flags |= WM_F_LOCK_EECD; 2230 sc->sc_flags |= WM_F_LOCK_EECD;
2231 } 2231 }
2232 sc->phy.acquire = wm_get_phy_82575; 2232 sc->phy.acquire = wm_get_phy_82575;
2233 sc->phy.release = wm_put_phy_82575; 2233 sc->phy.release = wm_put_phy_82575;
2234 sc->nvm.acquire = wm_get_nvm_80003;  2234 sc->nvm.acquire = wm_get_nvm_80003;
2235 sc->nvm.release = wm_put_nvm_80003;  2235 sc->nvm.release = wm_put_nvm_80003;
2236 break; 2236 break;
2237 case WM_T_ICH8: 2237 case WM_T_ICH8:
2238 case WM_T_ICH9: 2238 case WM_T_ICH9:
2239 case WM_T_ICH10: 2239 case WM_T_ICH10:
2240 case WM_T_PCH: 2240 case WM_T_PCH:
2241 case WM_T_PCH2: 2241 case WM_T_PCH2:
2242 case WM_T_PCH_LPT: 2242 case WM_T_PCH_LPT:
2243 sc->nvm.read = wm_nvm_read_ich8; 2243 sc->nvm.read = wm_nvm_read_ich8;
2244 /* FLASH */ 2244 /* FLASH */
2245 sc->sc_flags |= WM_F_EEPROM_FLASH; 2245 sc->sc_flags |= WM_F_EEPROM_FLASH;
2246 sc->sc_nvm_wordsize = 2048; 2246 sc->sc_nvm_wordsize = 2048;
2247 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH); 2247 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2248 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 2248 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2249 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) { 2249 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2250 aprint_error_dev(sc->sc_dev, 2250 aprint_error_dev(sc->sc_dev,
2251 "can't map FLASH registers\n"); 2251 "can't map FLASH registers\n");
2252 goto out; 2252 goto out;
2253 } 2253 }
2254 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 2254 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2255 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) * 2255 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2256 ICH_FLASH_SECTOR_SIZE; 2256 ICH_FLASH_SECTOR_SIZE;
2257 sc->sc_ich8_flash_bank_size = 2257 sc->sc_ich8_flash_bank_size =
2258 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1; 2258 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2259 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK); 2259 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2260 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 2260 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2261 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 2261 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2262 sc->sc_flashreg_offset = 0; 2262 sc->sc_flashreg_offset = 0;
2263 sc->phy.acquire = wm_get_swflag_ich8lan; 2263 sc->phy.acquire = wm_get_swflag_ich8lan;
2264 sc->phy.release = wm_put_swflag_ich8lan; 2264 sc->phy.release = wm_put_swflag_ich8lan;
2265 sc->nvm.acquire = wm_get_nvm_ich8lan; 2265 sc->nvm.acquire = wm_get_nvm_ich8lan;
2266 sc->nvm.release = wm_put_nvm_ich8lan; 2266 sc->nvm.release = wm_put_nvm_ich8lan;
2267 break; 2267 break;
2268 case WM_T_PCH_SPT: 2268 case WM_T_PCH_SPT:
2269 case WM_T_PCH_CNP: 2269 case WM_T_PCH_CNP:
2270 sc->nvm.read = wm_nvm_read_spt; 2270 sc->nvm.read = wm_nvm_read_spt;
2271 /* SPT has no GFPREG; flash registers mapped through BAR0 */ 2271 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2272 sc->sc_flags |= WM_F_EEPROM_FLASH; 2272 sc->sc_flags |= WM_F_EEPROM_FLASH;
2273 sc->sc_flasht = sc->sc_st; 2273 sc->sc_flasht = sc->sc_st;
2274 sc->sc_flashh = sc->sc_sh; 2274 sc->sc_flashh = sc->sc_sh;
2275 sc->sc_ich8_flash_base = 0; 2275 sc->sc_ich8_flash_base = 0;
2276 sc->sc_nvm_wordsize = 2276 sc->sc_nvm_wordsize =
2277 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1) 2277 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2278 * NVM_SIZE_MULTIPLIER; 2278 * NVM_SIZE_MULTIPLIER;
2279 /* It is size in bytes, we want words */ 2279 /* It is size in bytes, we want words */
2280 sc->sc_nvm_wordsize /= 2; 2280 sc->sc_nvm_wordsize /= 2;
2281 /* assume 2 banks */ 2281 /* assume 2 banks */
2282 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2; 2282 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2283 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET; 2283 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2284 sc->phy.acquire = wm_get_swflag_ich8lan; 2284 sc->phy.acquire = wm_get_swflag_ich8lan;
2285 sc->phy.release = wm_put_swflag_ich8lan; 2285 sc->phy.release = wm_put_swflag_ich8lan;
2286 sc->nvm.acquire = wm_get_nvm_ich8lan; 2286 sc->nvm.acquire = wm_get_nvm_ich8lan;
2287 sc->nvm.release = wm_put_nvm_ich8lan; 2287 sc->nvm.release = wm_put_nvm_ich8lan;
2288 break; 2288 break;
2289 case WM_T_I210: 2289 case WM_T_I210:
2290 case WM_T_I211: 2290 case WM_T_I211:
2291 /* Allow a single clear of the SW semaphore on I210 and newer*/ 2291 /* Allow a single clear of the SW semaphore on I210 and newer*/
2292 sc->sc_flags |= WM_F_WA_I210_CLSEM; 2292 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2293 if (wm_nvm_flash_presence_i210(sc)) { 2293 if (wm_nvm_flash_presence_i210(sc)) {
2294 sc->nvm.read = wm_nvm_read_eerd; 2294 sc->nvm.read = wm_nvm_read_eerd;
2295 /* Don't use WM_F_LOCK_EECD because we use EERD */ 2295 /* Don't use WM_F_LOCK_EECD because we use EERD */
2296 sc->sc_flags |= WM_F_EEPROM_FLASH_HW; 2296 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2297 wm_nvm_set_addrbits_size_eecd(sc); 2297 wm_nvm_set_addrbits_size_eecd(sc);
2298 } else { 2298 } else {
2299 sc->nvm.read = wm_nvm_read_invm; 2299 sc->nvm.read = wm_nvm_read_invm;
2300 sc->sc_flags |= WM_F_EEPROM_INVM; 2300 sc->sc_flags |= WM_F_EEPROM_INVM;
2301 sc->sc_nvm_wordsize = INVM_SIZE; 2301 sc->sc_nvm_wordsize = INVM_SIZE;
2302 } 2302 }
2303 sc->phy.acquire = wm_get_phy_82575; 2303 sc->phy.acquire = wm_get_phy_82575;
2304 sc->phy.release = wm_put_phy_82575; 2304 sc->phy.release = wm_put_phy_82575;
2305 sc->nvm.acquire = wm_get_nvm_80003; 2305 sc->nvm.acquire = wm_get_nvm_80003;
2306 sc->nvm.release = wm_put_nvm_80003; 2306 sc->nvm.release = wm_put_nvm_80003;
2307 break; 2307 break;
2308 default: 2308 default:
2309 break; 2309 break;
2310 } 2310 }
2311 2311
2312 /* Ensure the SMBI bit is clear before first NVM or PHY access */ 2312 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2313 switch (sc->sc_type) { 2313 switch (sc->sc_type) {
2314 case WM_T_82571: 2314 case WM_T_82571:
2315 case WM_T_82572: 2315 case WM_T_82572:
2316 reg = CSR_READ(sc, WMREG_SWSM2); 2316 reg = CSR_READ(sc, WMREG_SWSM2);
2317 if ((reg & SWSM2_LOCK) == 0) { 2317 if ((reg & SWSM2_LOCK) == 0) {
2318 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK); 2318 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2319 force_clear_smbi = true; 2319 force_clear_smbi = true;
2320 } else 2320 } else
2321 force_clear_smbi = false; 2321 force_clear_smbi = false;
2322 break; 2322 break;
2323 case WM_T_82573: 2323 case WM_T_82573:
2324 case WM_T_82574: 2324 case WM_T_82574:
2325 case WM_T_82583: 2325 case WM_T_82583:
2326 force_clear_smbi = true; 2326 force_clear_smbi = true;
2327 break; 2327 break;
2328 default: 2328 default:
2329 force_clear_smbi = false; 2329 force_clear_smbi = false;
2330 break; 2330 break;
2331 } 2331 }
2332 if (force_clear_smbi) { 2332 if (force_clear_smbi) {
2333 reg = CSR_READ(sc, WMREG_SWSM); 2333 reg = CSR_READ(sc, WMREG_SWSM);
2334 if ((reg & SWSM_SMBI) != 0) 2334 if ((reg & SWSM_SMBI) != 0)
2335 aprint_error_dev(sc->sc_dev, 2335 aprint_error_dev(sc->sc_dev,
2336 "Please update the Bootagent\n"); 2336 "Please update the Bootagent\n");
2337 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI); 2337 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2338 } 2338 }
2339 2339
2340 /* 2340 /*
2341 * Defer printing the EEPROM type until after verifying the checksum 2341 * Defer printing the EEPROM type until after verifying the checksum
2342 * This allows the EEPROM type to be printed correctly in the case 2342 * This allows the EEPROM type to be printed correctly in the case
2343 * that no EEPROM is attached. 2343 * that no EEPROM is attached.
2344 */ 2344 */
2345 /* 2345 /*
2346 * Validate the EEPROM checksum. If the checksum fails, flag 2346 * Validate the EEPROM checksum. If the checksum fails, flag
2347 * this for later, so we can fail future reads from the EEPROM. 2347 * this for later, so we can fail future reads from the EEPROM.
2348 */ 2348 */
2349 if (wm_nvm_validate_checksum(sc)) { 2349 if (wm_nvm_validate_checksum(sc)) {
2350 /* 2350 /*
2351 * Read twice again because some PCI-e parts fail the 2351 * Read twice again because some PCI-e parts fail the
2352 * first check due to the link being in sleep state. 2352 * first check due to the link being in sleep state.
2353 */ 2353 */
2354 if (wm_nvm_validate_checksum(sc)) 2354 if (wm_nvm_validate_checksum(sc))
2355 sc->sc_flags |= WM_F_EEPROM_INVALID; 2355 sc->sc_flags |= WM_F_EEPROM_INVALID;
2356 } 2356 }
2357 2357
2358 if (sc->sc_flags & WM_F_EEPROM_INVALID) 2358 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2359 aprint_verbose_dev(sc->sc_dev, "No EEPROM"); 2359 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2360 else { 2360 else {
2361 aprint_verbose_dev(sc->sc_dev, "%u words ", 2361 aprint_verbose_dev(sc->sc_dev, "%u words ",
2362 sc->sc_nvm_wordsize); 2362 sc->sc_nvm_wordsize);
2363 if (sc->sc_flags & WM_F_EEPROM_INVM) 2363 if (sc->sc_flags & WM_F_EEPROM_INVM)
2364 aprint_verbose("iNVM"); 2364 aprint_verbose("iNVM");
2365 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) 2365 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2366 aprint_verbose("FLASH(HW)"); 2366 aprint_verbose("FLASH(HW)");
2367 else if (sc->sc_flags & WM_F_EEPROM_FLASH) 2367 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2368 aprint_verbose("FLASH"); 2368 aprint_verbose("FLASH");
2369 else { 2369 else {
2370 if (sc->sc_flags & WM_F_EEPROM_SPI) 2370 if (sc->sc_flags & WM_F_EEPROM_SPI)
2371 eetype = "SPI"; 2371 eetype = "SPI";
2372 else 2372 else
2373 eetype = "MicroWire"; 2373 eetype = "MicroWire";
2374 aprint_verbose("(%d address bits) %s EEPROM", 2374 aprint_verbose("(%d address bits) %s EEPROM",
2375 sc->sc_nvm_addrbits, eetype); 2375 sc->sc_nvm_addrbits, eetype);
2376 } 2376 }
2377 } 2377 }
2378 wm_nvm_version(sc); 2378 wm_nvm_version(sc);
2379 aprint_verbose("\n"); 2379 aprint_verbose("\n");
2380 2380
2381 /* 2381 /*
2382 * XXX The first call of wm_gmii_setup_phytype. The result might be 2382 * XXX The first call of wm_gmii_setup_phytype. The result might be
2383 * incorrect. 2383 * incorrect.
2384 */ 2384 */
2385 wm_gmii_setup_phytype(sc, 0, 0); 2385 wm_gmii_setup_phytype(sc, 0, 0);
2386 2386
2387 /* Check for WM_F_WOL on some chips before wm_reset() */ 2387 /* Check for WM_F_WOL on some chips before wm_reset() */
2388 switch (sc->sc_type) { 2388 switch (sc->sc_type) {
2389 case WM_T_ICH8: 2389 case WM_T_ICH8:
2390 case WM_T_ICH9: 2390 case WM_T_ICH9:
2391 case WM_T_ICH10: 2391 case WM_T_ICH10:
2392 case WM_T_PCH: 2392 case WM_T_PCH:
2393 case WM_T_PCH2: 2393 case WM_T_PCH2:
2394 case WM_T_PCH_LPT: 2394 case WM_T_PCH_LPT:
2395 case WM_T_PCH_SPT: 2395 case WM_T_PCH_SPT:
2396 case WM_T_PCH_CNP: 2396 case WM_T_PCH_CNP:
2397 apme_mask = WUC_APME; 2397 apme_mask = WUC_APME;
2398 eeprom_data = CSR_READ(sc, WMREG_WUC); 2398 eeprom_data = CSR_READ(sc, WMREG_WUC);
 2399 if ((eeprom_data & apme_mask) != 0)
 2400 sc->sc_flags |= WM_F_WOL;
2399 break; 2401 break;
2400 default: 2402 default:
2401 break; 2403 break;
2402 } 2404 }
2403 if ((eeprom_data & apme_mask) != 0) 
2404 sc->sc_flags |= WM_F_WOL; 
2405 2405
2406 /* Reset the chip to a known state. */ 2406 /* Reset the chip to a known state. */
2407 wm_reset(sc); 2407 wm_reset(sc);
2408 2408
2409 /* 2409 /*
2410 * Check for I21[01] PLL workaround. 2410 * Check for I21[01] PLL workaround.
2411 * 2411 *
2412 * Three cases: 2412 * Three cases:
2413 * a) Chip is I211. 2413 * a) Chip is I211.
2414 * b) Chip is I210 and it uses INVM (not FLASH). 2414 * b) Chip is I210 and it uses INVM (not FLASH).
2415 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25 2415 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2416 */ 2416 */
2417 if (sc->sc_type == WM_T_I211) 2417 if (sc->sc_type == WM_T_I211)
2418 sc->sc_flags |= WM_F_PLL_WA_I210; 2418 sc->sc_flags |= WM_F_PLL_WA_I210;
2419 if (sc->sc_type == WM_T_I210) { 2419 if (sc->sc_type == WM_T_I210) {
2420 if (!wm_nvm_flash_presence_i210(sc)) 2420 if (!wm_nvm_flash_presence_i210(sc))
2421 sc->sc_flags |= WM_F_PLL_WA_I210; 2421 sc->sc_flags |= WM_F_PLL_WA_I210;
2422 else if ((sc->sc_nvm_ver_major < 3) 2422 else if ((sc->sc_nvm_ver_major < 3)
2423 || ((sc->sc_nvm_ver_major == 3) 2423 || ((sc->sc_nvm_ver_major == 3)
2424 && (sc->sc_nvm_ver_minor < 25))) { 2424 && (sc->sc_nvm_ver_minor < 25))) {
2425 aprint_verbose_dev(sc->sc_dev, 2425 aprint_verbose_dev(sc->sc_dev,
2426 "ROM image version %d.%d is older than 3.25\n", 2426 "ROM image version %d.%d is older than 3.25\n",
2427 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor); 2427 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2428 sc->sc_flags |= WM_F_PLL_WA_I210; 2428 sc->sc_flags |= WM_F_PLL_WA_I210;
2429 } 2429 }
2430 } 2430 }
2431 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) 2431 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2432 wm_pll_workaround_i210(sc); 2432 wm_pll_workaround_i210(sc);
2433 2433
2434 wm_get_wakeup(sc); 2434 wm_get_wakeup(sc);
2435 2435
2436 /* Non-AMT based hardware can now take control from firmware */ 2436 /* Non-AMT based hardware can now take control from firmware */
2437 if ((sc->sc_flags & WM_F_HAS_AMT) == 0) 2437 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2438 wm_get_hw_control(sc); 2438 wm_get_hw_control(sc);
2439 2439
2440 /* 2440 /*
2441 * Read the Ethernet address from the EEPROM, if not first found 2441 * Read the Ethernet address from the EEPROM, if not first found
2442 * in device properties. 2442 * in device properties.
2443 */ 2443 */
2444 ea = prop_dictionary_get(dict, "mac-address"); 2444 ea = prop_dictionary_get(dict, "mac-address");
2445 if (ea != NULL) { 2445 if (ea != NULL) {
2446 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 2446 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2447 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 2447 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2448 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 2448 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2449 } else { 2449 } else {
2450 if (wm_read_mac_addr(sc, enaddr) != 0) { 2450 if (wm_read_mac_addr(sc, enaddr) != 0) {
2451 aprint_error_dev(sc->sc_dev, 2451 aprint_error_dev(sc->sc_dev,
2452 "unable to read Ethernet address\n"); 2452 "unable to read Ethernet address\n");
2453 goto out; 2453 goto out;
2454 } 2454 }
2455 } 2455 }
2456 2456
2457 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 2457 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2458 ether_sprintf(enaddr)); 2458 ether_sprintf(enaddr));
2459 2459
2460 /* 2460 /*
2461 * Read the config info from the EEPROM, and set up various 2461 * Read the config info from the EEPROM, and set up various
2462 * bits in the control registers based on their contents. 2462 * bits in the control registers based on their contents.
2463 */ 2463 */
2464 pn = prop_dictionary_get(dict, "i82543-cfg1"); 2464 pn = prop_dictionary_get(dict, "i82543-cfg1");
2465 if (pn != NULL) { 2465 if (pn != NULL) {
2466 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2466 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2467 cfg1 = (uint16_t) prop_number_integer_value(pn); 2467 cfg1 = (uint16_t) prop_number_integer_value(pn);
2468 } else { 2468 } else {
2469 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) { 2469 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2470 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n"); 2470 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2471 goto out; 2471 goto out;
2472 } 2472 }
2473 } 2473 }
2474 2474
2475 pn = prop_dictionary_get(dict, "i82543-cfg2"); 2475 pn = prop_dictionary_get(dict, "i82543-cfg2");
2476 if (pn != NULL) { 2476 if (pn != NULL) {
2477 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2477 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2478 cfg2 = (uint16_t) prop_number_integer_value(pn); 2478 cfg2 = (uint16_t) prop_number_integer_value(pn);
2479 } else { 2479 } else {
2480 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) { 2480 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2481 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n"); 2481 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2482 goto out; 2482 goto out;
2483 } 2483 }
2484 } 2484 }
2485 2485
2486 /* check for WM_F_WOL */ 2486 /* check for WM_F_WOL */
2487 switch (sc->sc_type) { 2487 switch (sc->sc_type) {
2488 case WM_T_82542_2_0: 2488 case WM_T_82542_2_0:
2489 case WM_T_82542_2_1: 2489 case WM_T_82542_2_1:
2490 case WM_T_82543: 2490 case WM_T_82543:
2491 /* dummy? */ 2491 /* dummy? */
2492 eeprom_data = 0; 2492 eeprom_data = 0;
2493 apme_mask = NVM_CFG3_APME; 2493 apme_mask = NVM_CFG3_APME;
2494 break; 2494 break;
2495 case WM_T_82544: 2495 case WM_T_82544:
2496 apme_mask = NVM_CFG2_82544_APM_EN; 2496 apme_mask = NVM_CFG2_82544_APM_EN;
2497 eeprom_data = cfg2; 2497 eeprom_data = cfg2;
2498 break; 2498 break;
2499 case WM_T_82546: 2499 case WM_T_82546:
2500 case WM_T_82546_3: 2500 case WM_T_82546_3:
2501 case WM_T_82571: 2501 case WM_T_82571:
2502 case WM_T_82572: 2502 case WM_T_82572:
2503 case WM_T_82573: 2503 case WM_T_82573:
2504 case WM_T_82574: 2504 case WM_T_82574:
2505 case WM_T_82583: 2505 case WM_T_82583:
2506 case WM_T_80003: 2506 case WM_T_80003:
2507 case WM_T_82575: 2507 case WM_T_82575:
2508 case WM_T_82576: 2508 case WM_T_82576:
2509 apme_mask = NVM_CFG3_APME; 2509 apme_mask = NVM_CFG3_APME;
2510 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB 2510 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2511 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data); 2511 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2512 break; 2512 break;
2513 case WM_T_82580: 2513 case WM_T_82580:
2514 case WM_T_I350: 2514 case WM_T_I350:
2515 case WM_T_I354: 2515 case WM_T_I354:
2516 case WM_T_I210: 2516 case WM_T_I210:
2517 case WM_T_I211: 2517 case WM_T_I211:
2518 apme_mask = NVM_CFG3_APME; 2518 apme_mask = NVM_CFG3_APME;
2519 wm_nvm_read(sc, 2519 wm_nvm_read(sc,
2520 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA, 2520 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2521 1, &eeprom_data); 2521 1, &eeprom_data);
2522 break; 2522 break;
2523 case WM_T_ICH8: 2523 case WM_T_ICH8:
2524 case WM_T_ICH9: 2524 case WM_T_ICH9:
2525 case WM_T_ICH10: 2525 case WM_T_ICH10:
2526 case WM_T_PCH: 2526 case WM_T_PCH:
2527 case WM_T_PCH2: 2527 case WM_T_PCH2:
2528 case WM_T_PCH_LPT: 2528 case WM_T_PCH_LPT:
2529 case WM_T_PCH_SPT: 2529 case WM_T_PCH_SPT:
2530 case WM_T_PCH_CNP: 2530 case WM_T_PCH_CNP:
2531 /* Already checked before wm_reset () */ 2531 /* Already checked before wm_reset () */
2532 apme_mask = eeprom_data = 0; 2532 apme_mask = eeprom_data = 0;
2533 break; 2533 break;
2534 default: /* XXX 82540 */ 2534 default: /* XXX 82540 */
2535 apme_mask = NVM_CFG3_APME; 2535 apme_mask = NVM_CFG3_APME;
2536 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data); 2536 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2537 break; 2537 break;
2538 } 2538 }
2539 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */ 2539 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2540 if ((eeprom_data & apme_mask) != 0) 2540 if ((eeprom_data & apme_mask) != 0)
2541 sc->sc_flags |= WM_F_WOL; 2541 sc->sc_flags |= WM_F_WOL;
2542 2542
2543 /* 2543 /*
2544 * We have the eeprom settings, now apply the special cases 2544 * We have the eeprom settings, now apply the special cases
2545 * where the eeprom may be wrong or the board won't support 2545 * where the eeprom may be wrong or the board won't support
2546 * wake on lan on a particular port 2546 * wake on lan on a particular port
2547 */ 2547 */
2548 switch (sc->sc_pcidevid) { 2548 switch (sc->sc_pcidevid) {
2549 case PCI_PRODUCT_INTEL_82546GB_PCIE: 2549 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2550 sc->sc_flags &= ~WM_F_WOL; 2550 sc->sc_flags &= ~WM_F_WOL;
2551 break; 2551 break;
2552 case PCI_PRODUCT_INTEL_82546EB_FIBER: 2552 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2553 case PCI_PRODUCT_INTEL_82546GB_FIBER: 2553 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2554 /* Wake events only supported on port A for dual fiber 2554 /* Wake events only supported on port A for dual fiber
2555 * regardless of eeprom setting */ 2555 * regardless of eeprom setting */
2556 if (sc->sc_funcid == 1) 2556 if (sc->sc_funcid == 1)
2557 sc->sc_flags &= ~WM_F_WOL; 2557 sc->sc_flags &= ~WM_F_WOL;
2558 break; 2558 break;
2559 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3: 2559 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2560 /* if quad port adapter, disable WoL on all but port A */ 2560 /* if quad port adapter, disable WoL on all but port A */
2561 if (sc->sc_funcid != 0) 2561 if (sc->sc_funcid != 0)
2562 sc->sc_flags &= ~WM_F_WOL; 2562 sc->sc_flags &= ~WM_F_WOL;
2563 break; 2563 break;
2564 case PCI_PRODUCT_INTEL_82571EB_FIBER: 2564 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2565 /* Wake events only supported on port A for dual fiber 2565 /* Wake events only supported on port A for dual fiber
2566 * regardless of eeprom setting */ 2566 * regardless of eeprom setting */
2567 if (sc->sc_funcid == 1) 2567 if (sc->sc_funcid == 1)
2568 sc->sc_flags &= ~WM_F_WOL; 2568 sc->sc_flags &= ~WM_F_WOL;
2569 break; 2569 break;
2570 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER: 2570 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2571 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER: 2571 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2572 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER: 2572 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2573 /* if quad port adapter, disable WoL on all but port A */ 2573 /* if quad port adapter, disable WoL on all but port A */
2574 if (sc->sc_funcid != 0) 2574 if (sc->sc_funcid != 0)
2575 sc->sc_flags &= ~WM_F_WOL; 2575 sc->sc_flags &= ~WM_F_WOL;
2576 break; 2576 break;
2577 } 2577 }
2578 2578
2579 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) { 2579 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2580 /* Check NVM for autonegotiation */ 2580 /* Check NVM for autonegotiation */
2581 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) { 2581 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2582 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0) 2582 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2583 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO; 2583 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2584 } 2584 }
2585 } 2585 }
2586 2586
2587 /* 2587 /*
2588 * XXX need special handling for some multiple port cards 2588 * XXX need special handling for some multiple port cards
2589 * to disable a paticular port. 2589 * to disable a paticular port.
2590 */ 2590 */
2591 2591
2592 if (sc->sc_type >= WM_T_82544) { 2592 if (sc->sc_type >= WM_T_82544) {
2593 pn = prop_dictionary_get(dict, "i82543-swdpin"); 2593 pn = prop_dictionary_get(dict, "i82543-swdpin");
2594 if (pn != NULL) { 2594 if (pn != NULL) {
2595 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2595 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2596 swdpin = (uint16_t) prop_number_integer_value(pn); 2596 swdpin = (uint16_t) prop_number_integer_value(pn);
2597 } else { 2597 } else {
2598 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) { 2598 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2599 aprint_error_dev(sc->sc_dev, 2599 aprint_error_dev(sc->sc_dev,
2600 "unable to read SWDPIN\n"); 2600 "unable to read SWDPIN\n");
2601 goto out; 2601 goto out;
2602 } 2602 }
2603 } 2603 }
2604 } 2604 }
2605 2605
2606 if (cfg1 & NVM_CFG1_ILOS) 2606 if (cfg1 & NVM_CFG1_ILOS)
2607 sc->sc_ctrl |= CTRL_ILOS; 2607 sc->sc_ctrl |= CTRL_ILOS;
2608 2608
2609 /* 2609 /*
2610 * XXX 2610 * XXX
2611 * This code isn't correct because pin 2 and 3 are located 2611 * This code isn't correct because pin 2 and 3 are located
2612 * in different position on newer chips. Check all datasheet. 2612 * in different position on newer chips. Check all datasheet.
2613 * 2613 *
2614 * Until resolve this problem, check if a chip < 82580 2614 * Until resolve this problem, check if a chip < 82580
2615 */ 2615 */
2616 if (sc->sc_type <= WM_T_82580) { 2616 if (sc->sc_type <= WM_T_82580) {
2617 if (sc->sc_type >= WM_T_82544) { 2617 if (sc->sc_type >= WM_T_82544) {
2618 sc->sc_ctrl |= 2618 sc->sc_ctrl |=
2619 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 2619 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2620 CTRL_SWDPIO_SHIFT; 2620 CTRL_SWDPIO_SHIFT;
2621 sc->sc_ctrl |= 2621 sc->sc_ctrl |=
2622 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 2622 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2623 CTRL_SWDPINS_SHIFT; 2623 CTRL_SWDPINS_SHIFT;
2624 } else { 2624 } else {
2625 sc->sc_ctrl |= 2625 sc->sc_ctrl |=
2626 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) << 2626 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2627 CTRL_SWDPIO_SHIFT; 2627 CTRL_SWDPIO_SHIFT;
2628 } 2628 }
2629 } 2629 }
2630 2630
2631 /* XXX For other than 82580? */ 2631 /* XXX For other than 82580? */
2632 if (sc->sc_type == WM_T_82580) { 2632 if (sc->sc_type == WM_T_82580) {
2633 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword); 2633 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2634 if (nvmword & __BIT(13)) 2634 if (nvmword & __BIT(13))
2635 sc->sc_ctrl |= CTRL_ILOS; 2635 sc->sc_ctrl |= CTRL_ILOS;
2636 } 2636 }
2637 2637
2638#if 0 2638#if 0
2639 if (sc->sc_type >= WM_T_82544) { 2639 if (sc->sc_type >= WM_T_82544) {
2640 if (cfg1 & NVM_CFG1_IPS0) 2640 if (cfg1 & NVM_CFG1_IPS0)
2641 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 2641 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2642 if (cfg1 & NVM_CFG1_IPS1) 2642 if (cfg1 & NVM_CFG1_IPS1)
2643 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 2643 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2644 sc->sc_ctrl_ext |= 2644 sc->sc_ctrl_ext |=
2645 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 2645 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2646 CTRL_EXT_SWDPIO_SHIFT; 2646 CTRL_EXT_SWDPIO_SHIFT;
2647 sc->sc_ctrl_ext |= 2647 sc->sc_ctrl_ext |=
2648 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 2648 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2649 CTRL_EXT_SWDPINS_SHIFT; 2649 CTRL_EXT_SWDPINS_SHIFT;
2650 } else { 2650 } else {
2651 sc->sc_ctrl_ext |= 2651 sc->sc_ctrl_ext |=
2652 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) << 2652 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2653 CTRL_EXT_SWDPIO_SHIFT; 2653 CTRL_EXT_SWDPIO_SHIFT;
2654 } 2654 }
2655#endif 2655#endif
2656 2656
2657 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2657 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2658#if 0 2658#if 0
2659 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 2659 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2660#endif 2660#endif
2661 2661
2662 if (sc->sc_type == WM_T_PCH) { 2662 if (sc->sc_type == WM_T_PCH) {
2663 uint16_t val; 2663 uint16_t val;
2664 2664
2665 /* Save the NVM K1 bit setting */ 2665 /* Save the NVM K1 bit setting */
2666 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val); 2666 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2667 2667
2668 if ((val & NVM_K1_CONFIG_ENABLE) != 0) 2668 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2669 sc->sc_nvm_k1_enabled = 1; 2669 sc->sc_nvm_k1_enabled = 1;
2670 else 2670 else
2671 sc->sc_nvm_k1_enabled = 0; 2671 sc->sc_nvm_k1_enabled = 0;
2672 } 2672 }
2673 2673
2674 /* Determine if we're GMII, TBI, SERDES or SGMII mode */ 2674 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2675 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 2675 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2676 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH 2676 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2677 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT 2677 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2678 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP 2678 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2679 || sc->sc_type == WM_T_82573 2679 || sc->sc_type == WM_T_82573
2680 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) { 2680 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2681 /* Copper only */ 2681 /* Copper only */
2682 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 2682 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2683 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350) 2683 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2684 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210) 2684 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2685 || (sc->sc_type ==WM_T_I211)) { 2685 || (sc->sc_type ==WM_T_I211)) {
2686 reg = CSR_READ(sc, WMREG_CTRL_EXT); 2686 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2687 link_mode = reg & CTRL_EXT_LINK_MODE_MASK; 2687 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2688 switch (link_mode) { 2688 switch (link_mode) {
2689 case CTRL_EXT_LINK_MODE_1000KX: 2689 case CTRL_EXT_LINK_MODE_1000KX:
2690 aprint_verbose_dev(sc->sc_dev, "1000KX\n"); 2690 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2691 sc->sc_mediatype = WM_MEDIATYPE_SERDES; 2691 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2692 break; 2692 break;
2693 case CTRL_EXT_LINK_MODE_SGMII: 2693 case CTRL_EXT_LINK_MODE_SGMII:
2694 if (wm_sgmii_uses_mdio(sc)) { 2694 if (wm_sgmii_uses_mdio(sc)) {
2695 aprint_verbose_dev(sc->sc_dev, 2695 aprint_verbose_dev(sc->sc_dev,
2696 "SGMII(MDIO)\n"); 2696 "SGMII(MDIO)\n");
2697 sc->sc_flags |= WM_F_SGMII; 2697 sc->sc_flags |= WM_F_SGMII;
2698 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2698 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2699 break; 2699 break;
2700 } 2700 }
2701 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n"); 2701 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2702 /*FALLTHROUGH*/ 2702 /*FALLTHROUGH*/
2703 case CTRL_EXT_LINK_MODE_PCIE_SERDES: 2703 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2704 sc->sc_mediatype = wm_sfp_get_media_type(sc); 2704 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2705 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) { 2705 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2706 if (link_mode 2706 if (link_mode
2707 == CTRL_EXT_LINK_MODE_SGMII) { 2707 == CTRL_EXT_LINK_MODE_SGMII) {
2708 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2708 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2709 sc->sc_flags |= WM_F_SGMII; 2709 sc->sc_flags |= WM_F_SGMII;
2710 } else { 2710 } else {
2711 sc->sc_mediatype = WM_MEDIATYPE_SERDES; 2711 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2712 aprint_verbose_dev(sc->sc_dev, 2712 aprint_verbose_dev(sc->sc_dev,
2713 "SERDES\n"); 2713 "SERDES\n");
2714 } 2714 }
2715 break; 2715 break;
2716 } 2716 }
2717 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) 2717 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2718 aprint_verbose_dev(sc->sc_dev, "SERDES\n"); 2718 aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2719 2719
2720 /* Change current link mode setting */ 2720 /* Change current link mode setting */
2721 reg &= ~CTRL_EXT_LINK_MODE_MASK; 2721 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2722 switch (sc->sc_mediatype) { 2722 switch (sc->sc_mediatype) {
2723 case WM_MEDIATYPE_COPPER: 2723 case WM_MEDIATYPE_COPPER:
2724 reg |= CTRL_EXT_LINK_MODE_SGMII; 2724 reg |= CTRL_EXT_LINK_MODE_SGMII;
2725 break; 2725 break;
2726 case WM_MEDIATYPE_SERDES: 2726 case WM_MEDIATYPE_SERDES:
2727 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES; 2727 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2728 break; 2728 break;
2729 default: 2729 default:
2730 break; 2730 break;
2731 } 2731 }
2732 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2732 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2733 break; 2733 break;
2734 case CTRL_EXT_LINK_MODE_GMII: 2734 case CTRL_EXT_LINK_MODE_GMII:
2735 default: 2735 default:
2736 aprint_verbose_dev(sc->sc_dev, "Copper\n"); 2736 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2737 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2737 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2738 break; 2738 break;
2739 } 2739 }
2740 2740
2741 reg &= ~CTRL_EXT_I2C_ENA; 2741 reg &= ~CTRL_EXT_I2C_ENA;
2742 if ((sc->sc_flags & WM_F_SGMII) != 0) 2742 if ((sc->sc_flags & WM_F_SGMII) != 0)
2743 reg |= CTRL_EXT_I2C_ENA; 2743 reg |= CTRL_EXT_I2C_ENA;
2744 else 2744 else
2745 reg &= ~CTRL_EXT_I2C_ENA; 2745 reg &= ~CTRL_EXT_I2C_ENA;
2746 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2746 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2747 } else if (sc->sc_type < WM_T_82543 || 2747 } else if (sc->sc_type < WM_T_82543 ||
2748 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 2748 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2749 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) { 2749 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2750 aprint_error_dev(sc->sc_dev, 2750 aprint_error_dev(sc->sc_dev,
2751 "WARNING: TBIMODE set on 1000BASE-T product!\n"); 2751 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2752 sc->sc_mediatype = WM_MEDIATYPE_FIBER; 2752 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2753 } 2753 }
2754 } else { 2754 } else {
2755 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) { 2755 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2756 aprint_error_dev(sc->sc_dev, 2756 aprint_error_dev(sc->sc_dev,
2757 "WARNING: TBIMODE clear on 1000BASE-X product!\n"); 2757 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2758 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2758 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2759 } 2759 }
2760 } 2760 }
2761 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags); 2761 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2762 aprint_verbose_dev(sc->sc_dev, "%s\n", buf); 2762 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2763 2763
2764 /* Set device properties (macflags) */ 2764 /* Set device properties (macflags) */
2765 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags); 2765 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2766 2766
2767 /* Initialize the media structures accordingly. */ 2767 /* Initialize the media structures accordingly. */
2768 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) 2768 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2769 wm_gmii_mediainit(sc, wmp->wmp_product); 2769 wm_gmii_mediainit(sc, wmp->wmp_product);
2770 else 2770 else
2771 wm_tbi_mediainit(sc); /* All others */ 2771 wm_tbi_mediainit(sc); /* All others */
2772 2772
2773 ifp = &sc->sc_ethercom.ec_if; 2773 ifp = &sc->sc_ethercom.ec_if;
2774 xname = device_xname(sc->sc_dev); 2774 xname = device_xname(sc->sc_dev);
2775 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 2775 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2776 ifp->if_softc = sc; 2776 ifp->if_softc = sc;
2777 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2777 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2778#ifdef WM_MPSAFE 2778#ifdef WM_MPSAFE
2779 ifp->if_extflags = IFEF_MPSAFE; 2779 ifp->if_extflags = IFEF_MPSAFE;
2780#endif 2780#endif
2781 ifp->if_ioctl = wm_ioctl; 2781 ifp->if_ioctl = wm_ioctl;
2782 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 2782 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2783 ifp->if_start = wm_nq_start; 2783 ifp->if_start = wm_nq_start;
2784 /* 2784 /*
2785 * When the number of CPUs is one and the controller can use 2785 * When the number of CPUs is one and the controller can use
2786 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue. 2786 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2787 * That is, wm(4) use two interrupts, one is used for Tx/Rx 2787 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2788 * and the other is used for link status changing. 2788 * and the other is used for link status changing.
2789 * In this situation, wm_nq_transmit() is disadvantageous 2789 * In this situation, wm_nq_transmit() is disadvantageous
2790 * because of wm_select_txqueue() and pcq(9) overhead. 2790 * because of wm_select_txqueue() and pcq(9) overhead.
2791 */ 2791 */
2792 if (wm_is_using_multiqueue(sc)) 2792 if (wm_is_using_multiqueue(sc))
2793 ifp->if_transmit = wm_nq_transmit; 2793 ifp->if_transmit = wm_nq_transmit;
2794 } else { 2794 } else {
2795 ifp->if_start = wm_start; 2795 ifp->if_start = wm_start;
2796 /* 2796 /*
2797 * wm_transmit() has the same disadvantage as wm_transmit(). 2797 * wm_transmit() has the same disadvantage as wm_transmit().
2798 */ 2798 */
2799 if (wm_is_using_multiqueue(sc)) 2799 if (wm_is_using_multiqueue(sc))
2800 ifp->if_transmit = wm_transmit; 2800 ifp->if_transmit = wm_transmit;
2801 } 2801 }
2802 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */ 2802 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
2803 ifp->if_init = wm_init; 2803 ifp->if_init = wm_init;
2804 ifp->if_stop = wm_stop; 2804 ifp->if_stop = wm_stop;
2805 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN)); 2805 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
2806 IFQ_SET_READY(&ifp->if_snd); 2806 IFQ_SET_READY(&ifp->if_snd);
2807 2807
2808 /* Check for jumbo frame */ 2808 /* Check for jumbo frame */
2809 switch (sc->sc_type) { 2809 switch (sc->sc_type) {
2810 case WM_T_82573: 2810 case WM_T_82573:
2811 /* XXX limited to 9234 if ASPM is disabled */ 2811 /* XXX limited to 9234 if ASPM is disabled */
2812 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword); 2812 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2813 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0) 2813 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2814 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2814 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2815 break; 2815 break;
2816 case WM_T_82571: 2816 case WM_T_82571:
2817 case WM_T_82572: 2817 case WM_T_82572:
2818 case WM_T_82574: 2818 case WM_T_82574:
2819 case WM_T_82583: 2819 case WM_T_82583:
2820 case WM_T_82575: 2820 case WM_T_82575:
2821 case WM_T_82576: 2821 case WM_T_82576:
2822 case WM_T_82580: 2822 case WM_T_82580:
2823 case WM_T_I350: 2823 case WM_T_I350:
2824 case WM_T_I354: 2824 case WM_T_I354:
2825 case WM_T_I210: 2825 case WM_T_I210:
2826 case WM_T_I211: 2826 case WM_T_I211:
2827 case WM_T_80003: 2827 case WM_T_80003:
2828 case WM_T_ICH9: 2828 case WM_T_ICH9:
2829 case WM_T_ICH10: 2829 case WM_T_ICH10:
2830 case WM_T_PCH2: /* PCH2 supports 9K frame size */ 2830 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2831 case WM_T_PCH_LPT: 2831 case WM_T_PCH_LPT:
2832 case WM_T_PCH_SPT: 2832 case WM_T_PCH_SPT:
2833 case WM_T_PCH_CNP: 2833 case WM_T_PCH_CNP:
2834 /* XXX limited to 9234 */ 2834 /* XXX limited to 9234 */
2835 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2835 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2836 break; 2836 break;
2837 case WM_T_PCH: 2837 case WM_T_PCH:
2838 /* XXX limited to 4096 */ 2838 /* XXX limited to 4096 */
2839 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2839 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2840 break; 2840 break;
2841 case WM_T_82542_2_0: 2841 case WM_T_82542_2_0:
2842 case WM_T_82542_2_1: 2842 case WM_T_82542_2_1:
2843 case WM_T_ICH8: 2843 case WM_T_ICH8:
2844 /* No support for jumbo frame */ 2844 /* No support for jumbo frame */
2845 break; 2845 break;
2846 default: 2846 default:
2847 /* ETHER_MAX_LEN_JUMBO */ 2847 /* ETHER_MAX_LEN_JUMBO */
2848 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2848 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2849 break; 2849 break;
2850 } 2850 }
2851 2851
2852 /* If we're a i82543 or greater, we can support VLANs. */ 2852 /* If we're a i82543 or greater, we can support VLANs. */
2853 if (sc->sc_type >= WM_T_82543) 2853 if (sc->sc_type >= WM_T_82543)
2854 sc->sc_ethercom.ec_capabilities |= 2854 sc->sc_ethercom.ec_capabilities |=
2855 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 2855 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2856 2856
2857 /* 2857 /*
2858 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 2858 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2859 * on i82543 and later. 2859 * on i82543 and later.
2860 */ 2860 */
2861 if (sc->sc_type >= WM_T_82543) { 2861 if (sc->sc_type >= WM_T_82543) {
2862 ifp->if_capabilities |= 2862 ifp->if_capabilities |=
2863 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2863 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2864 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2864 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2865 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 2865 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2866 IFCAP_CSUM_TCPv6_Tx | 2866 IFCAP_CSUM_TCPv6_Tx |
2867 IFCAP_CSUM_UDPv6_Tx; 2867 IFCAP_CSUM_UDPv6_Tx;
2868 } 2868 }
2869 2869
2870 /* 2870 /*
2871 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 2871 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2872 * 2872 *
2873 * 82541GI (8086:1076) ... no 2873 * 82541GI (8086:1076) ... no
2874 * 82572EI (8086:10b9) ... yes 2874 * 82572EI (8086:10b9) ... yes
2875 */ 2875 */
2876 if (sc->sc_type >= WM_T_82571) { 2876 if (sc->sc_type >= WM_T_82571) {
2877 ifp->if_capabilities |= 2877 ifp->if_capabilities |=
2878 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 2878 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2879 } 2879 }
2880 2880
2881 /* 2881 /*
2882 * If we're a i82544 or greater (except i82547), we can do 2882 * If we're a i82544 or greater (except i82547), we can do
2883 * TCP segmentation offload. 2883 * TCP segmentation offload.
2884 */ 2884 */
2885 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 2885 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2886 ifp->if_capabilities |= IFCAP_TSOv4; 2886 ifp->if_capabilities |= IFCAP_TSOv4;
2887 } 2887 }
2888 2888
2889 if (sc->sc_type >= WM_T_82571) { 2889 if (sc->sc_type >= WM_T_82571) {
2890 ifp->if_capabilities |= IFCAP_TSOv6; 2890 ifp->if_capabilities |= IFCAP_TSOv6;
2891 } 2891 }
2892 2892
2893 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT; 2893 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
2894 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT; 2894 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
2895 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT; 2895 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
2896 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT; 2896 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
2897 2897
2898#ifdef WM_MPSAFE 2898#ifdef WM_MPSAFE
2899 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2899 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2900#else 2900#else
2901 sc->sc_core_lock = NULL; 2901 sc->sc_core_lock = NULL;
2902#endif 2902#endif
2903 2903
2904 /* Attach the interface. */ 2904 /* Attach the interface. */
2905 error = if_initialize(ifp); 2905 error = if_initialize(ifp);
2906 if (error != 0) { 2906 if (error != 0) {
2907 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 2907 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
2908 error); 2908 error);
2909 return; /* Error */ 2909 return; /* Error */
2910 } 2910 }
2911 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if); 2911 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2912 ether_ifattach(ifp, enaddr); 2912 ether_ifattach(ifp, enaddr);
2913 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb); 2913 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2914 if_register(ifp); 2914 if_register(ifp);
2915 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 2915 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2916 RND_FLAG_DEFAULT); 2916 RND_FLAG_DEFAULT);
2917 2917
2918#ifdef WM_EVENT_COUNTERS 2918#ifdef WM_EVENT_COUNTERS
2919 /* Attach event counters. */ 2919 /* Attach event counters. */
2920 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 2920 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2921 NULL, xname, "linkintr"); 2921 NULL, xname, "linkintr");
2922 2922
2923 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 2923 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2924 NULL, xname, "tx_xoff"); 2924 NULL, xname, "tx_xoff");
2925 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 2925 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2926 NULL, xname, "tx_xon"); 2926 NULL, xname, "tx_xon");
2927 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 2927 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2928 NULL, xname, "rx_xoff"); 2928 NULL, xname, "rx_xoff");
2929 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 2929 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2930 NULL, xname, "rx_xon"); 2930 NULL, xname, "rx_xon");
2931 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 2931 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2932 NULL, xname, "rx_macctl"); 2932 NULL, xname, "rx_macctl");
2933#endif /* WM_EVENT_COUNTERS */ 2933#endif /* WM_EVENT_COUNTERS */
2934 2934
2935 if (pmf_device_register(self, wm_suspend, wm_resume)) 2935 if (pmf_device_register(self, wm_suspend, wm_resume))
2936 pmf_class_network_register(self, ifp); 2936 pmf_class_network_register(self, ifp);
2937 else 2937 else
2938 aprint_error_dev(self, "couldn't establish power handler\n"); 2938 aprint_error_dev(self, "couldn't establish power handler\n");
2939 2939
2940 sc->sc_flags |= WM_F_ATTACHED; 2940 sc->sc_flags |= WM_F_ATTACHED;
2941out: 2941out:
2942 return; 2942 return;
2943} 2943}
2944 2944
2945/* The detach function (ca_detach) */ 2945/* The detach function (ca_detach) */
2946static int 2946static int
2947wm_detach(device_t self, int flags __unused) 2947wm_detach(device_t self, int flags __unused)
2948{ 2948{
2949 struct wm_softc *sc = device_private(self); 2949 struct wm_softc *sc = device_private(self);
2950 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2950 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2951 int i; 2951 int i;
2952 2952
2953 if ((sc->sc_flags & WM_F_ATTACHED) == 0) 2953 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2954 return 0; 2954 return 0;
2955 2955
2956 /* Stop the interface. Callouts are stopped in it. */ 2956 /* Stop the interface. Callouts are stopped in it. */
2957 wm_stop(ifp, 1); 2957 wm_stop(ifp, 1);
2958 2958
2959 pmf_device_deregister(self); 2959 pmf_device_deregister(self);
2960 2960
2961#ifdef WM_EVENT_COUNTERS 2961#ifdef WM_EVENT_COUNTERS
2962 evcnt_detach(&sc->sc_ev_linkintr); 2962 evcnt_detach(&sc->sc_ev_linkintr);
2963 2963
2964 evcnt_detach(&sc->sc_ev_tx_xoff); 2964 evcnt_detach(&sc->sc_ev_tx_xoff);
2965 evcnt_detach(&sc->sc_ev_tx_xon); 2965 evcnt_detach(&sc->sc_ev_tx_xon);
2966 evcnt_detach(&sc->sc_ev_rx_xoff); 2966 evcnt_detach(&sc->sc_ev_rx_xoff);
2967 evcnt_detach(&sc->sc_ev_rx_xon); 2967 evcnt_detach(&sc->sc_ev_rx_xon);
2968 evcnt_detach(&sc->sc_ev_rx_macctl); 2968 evcnt_detach(&sc->sc_ev_rx_macctl);
2969#endif /* WM_EVENT_COUNTERS */ 2969#endif /* WM_EVENT_COUNTERS */
2970 2970
2971 /* Tell the firmware about the release */ 2971 /* Tell the firmware about the release */
2972 WM_CORE_LOCK(sc); 2972 WM_CORE_LOCK(sc);
2973 wm_release_manageability(sc); 2973 wm_release_manageability(sc);
2974 wm_release_hw_control(sc); 2974 wm_release_hw_control(sc);
2975 wm_enable_wakeup(sc); 2975 wm_enable_wakeup(sc);
2976 WM_CORE_UNLOCK(sc); 2976 WM_CORE_UNLOCK(sc);
2977 2977
2978 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 2978 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2979 2979
2980 /* Delete all remaining media. */ 2980 /* Delete all remaining media. */
2981 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 2981 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2982 2982
2983 ether_ifdetach(ifp); 2983 ether_ifdetach(ifp);
2984 if_detach(ifp); 2984 if_detach(ifp);
2985 if_percpuq_destroy(sc->sc_ipq); 2985 if_percpuq_destroy(sc->sc_ipq);
2986 2986
2987 /* Unload RX dmamaps and free mbufs */ 2987 /* Unload RX dmamaps and free mbufs */
2988 for (i = 0; i < sc->sc_nqueues; i++) { 2988 for (i = 0; i < sc->sc_nqueues; i++) {
2989 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 2989 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2990 mutex_enter(rxq->rxq_lock); 2990 mutex_enter(rxq->rxq_lock);
2991 wm_rxdrain(rxq); 2991 wm_rxdrain(rxq);
2992 mutex_exit(rxq->rxq_lock); 2992 mutex_exit(rxq->rxq_lock);
2993 } 2993 }
2994 /* Must unlock here */ 2994 /* Must unlock here */
2995 2995
2996 /* Disestablish the interrupt handler */ 2996 /* Disestablish the interrupt handler */
2997 for (i = 0; i < sc->sc_nintrs; i++) { 2997 for (i = 0; i < sc->sc_nintrs; i++) {
2998 if (sc->sc_ihs[i] != NULL) { 2998 if (sc->sc_ihs[i] != NULL) {
2999 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]); 2999 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3000 sc->sc_ihs[i] = NULL; 3000 sc->sc_ihs[i] = NULL;
3001 } 3001 }
3002 } 3002 }
3003 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs); 3003 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3004 3004
3005 wm_free_txrx_queues(sc); 3005 wm_free_txrx_queues(sc);
3006 3006
3007 /* Unmap the registers */ 3007 /* Unmap the registers */
3008 if (sc->sc_ss) { 3008 if (sc->sc_ss) {
3009 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss); 3009 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3010 sc->sc_ss = 0; 3010 sc->sc_ss = 0;
3011 } 3011 }
3012 if (sc->sc_ios) { 3012 if (sc->sc_ios) {
3013 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 3013 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3014 sc->sc_ios = 0; 3014 sc->sc_ios = 0;
3015 } 3015 }
3016 if (sc->sc_flashs) { 3016 if (sc->sc_flashs) {
3017 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs); 3017 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3018 sc->sc_flashs = 0; 3018 sc->sc_flashs = 0;
3019 } 3019 }
3020 3020
3021 if (sc->sc_core_lock) 3021 if (sc->sc_core_lock)
3022 mutex_obj_free(sc->sc_core_lock); 3022 mutex_obj_free(sc->sc_core_lock);
3023 if (sc->sc_ich_phymtx) 3023 if (sc->sc_ich_phymtx)
3024 mutex_obj_free(sc->sc_ich_phymtx); 3024 mutex_obj_free(sc->sc_ich_phymtx);
3025 if (sc->sc_ich_nvmmtx) 3025 if (sc->sc_ich_nvmmtx)
3026 mutex_obj_free(sc->sc_ich_nvmmtx); 3026 mutex_obj_free(sc->sc_ich_nvmmtx);
3027 3027
3028 return 0; 3028 return 0;
3029} 3029}
3030 3030
3031static bool 3031static bool
3032wm_suspend(device_t self, const pmf_qual_t *qual) 3032wm_suspend(device_t self, const pmf_qual_t *qual)
3033{ 3033{
3034 struct wm_softc *sc = device_private(self); 3034 struct wm_softc *sc = device_private(self);
3035 3035
3036 wm_release_manageability(sc); 3036 wm_release_manageability(sc);
3037 wm_release_hw_control(sc); 3037 wm_release_hw_control(sc);
3038 wm_enable_wakeup(sc); 3038 wm_enable_wakeup(sc);
3039 3039
3040 return true; 3040 return true;
3041} 3041}
3042 3042
3043static bool 3043static bool
3044wm_resume(device_t self, const pmf_qual_t *qual) 3044wm_resume(device_t self, const pmf_qual_t *qual)
3045{ 3045{
3046 struct wm_softc *sc = device_private(self); 3046 struct wm_softc *sc = device_private(self);
3047 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3047 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3048 pcireg_t reg; 3048 pcireg_t reg;
3049 char buf[256]; 3049 char buf[256];
3050 3050
3051 reg = CSR_READ(sc, WMREG_WUS); 3051 reg = CSR_READ(sc, WMREG_WUS);
3052 if (reg != 0) { 3052 if (reg != 0) {
3053 snprintb(buf, sizeof(buf), WUS_FLAGS, reg); 3053 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3054 device_printf(sc->sc_dev, "wakeup status %s\n", buf); 3054 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3055 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */ 3055 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3056 } 3056 }
3057 3057
3058 if (sc->sc_type >= WM_T_PCH2) 3058 if (sc->sc_type >= WM_T_PCH2)
3059 wm_resume_workarounds_pchlan(sc); 3059 wm_resume_workarounds_pchlan(sc);
3060 if ((ifp->if_flags & IFF_UP) == 0) { 3060 if ((ifp->if_flags & IFF_UP) == 0) {
3061 wm_reset(sc); 3061 wm_reset(sc);
3062 /* Non-AMT based hardware can now take control from firmware */ 3062 /* Non-AMT based hardware can now take control from firmware */
3063 if ((sc->sc_flags & WM_F_HAS_AMT) == 0) 3063 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3064 wm_get_hw_control(sc); 3064 wm_get_hw_control(sc);
3065 wm_init_manageability(sc); 3065 wm_init_manageability(sc);
3066 } else { 3066 } else {
3067 /* 3067 /*
3068 * We called pmf_class_network_register(), so if_init() is 3068 * We called pmf_class_network_register(), so if_init() is
3069 * automatically called when IFF_UP. wm_reset(), 3069 * automatically called when IFF_UP. wm_reset(),
3070 * wm_get_hw_control() and wm_init_manageability() are called 3070 * wm_get_hw_control() and wm_init_manageability() are called
3071 * via wm_init(). 3071 * via wm_init().
3072 */ 3072 */
3073 } 3073 }
3074 3074
3075 return true; 3075 return true;
3076} 3076}
3077 3077
3078/* 3078/*
3079 * wm_watchdog: [ifnet interface function] 3079 * wm_watchdog: [ifnet interface function]
3080 * 3080 *
3081 * Watchdog timer handler. 3081 * Watchdog timer handler.
3082 */ 3082 */
3083static void 3083static void
3084wm_watchdog(struct ifnet *ifp) 3084wm_watchdog(struct ifnet *ifp)
3085{ 3085{
3086 int qid; 3086 int qid;
3087 struct wm_softc *sc = ifp->if_softc; 3087 struct wm_softc *sc = ifp->if_softc;
3088 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */ 3088 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3089 3089
3090 for (qid = 0; qid < sc->sc_nqueues; qid++) { 3090 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3091 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq; 3091 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3092 3092
3093 wm_watchdog_txq(ifp, txq, &hang_queue); 3093 wm_watchdog_txq(ifp, txq, &hang_queue);
3094 } 3094 }
3095 3095
3096 /* 3096 /*
3097 * IF any of queues hanged up, reset the interface. 3097 * IF any of queues hanged up, reset the interface.
3098 */ 3098 */
3099 if (hang_queue != 0) { 3099 if (hang_queue != 0) {
3100 (void) wm_init(ifp); 3100 (void) wm_init(ifp);
3101 3101
3102 /* 3102 /*
3103 * There are still some upper layer processing which call 3103 * There are still some upper layer processing which call
3104 * ifp->if_start(). e.g. ALTQ or one CPU system 3104 * ifp->if_start(). e.g. ALTQ or one CPU system
3105 */ 3105 */
3106 /* Try to get more packets going. */ 3106 /* Try to get more packets going. */
3107 ifp->if_start(ifp); 3107 ifp->if_start(ifp);
3108 } 3108 }
3109} 3109}
3110 3110
3111 3111
3112static void 3112static void
3113wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang) 3113wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3114{ 3114{
3115 3115
3116 mutex_enter(txq->txq_lock); 3116 mutex_enter(txq->txq_lock);
3117 if (txq->txq_sending && 3117 if (txq->txq_sending &&
3118 time_uptime - txq->txq_lastsent > wm_watchdog_timeout) { 3118 time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
3119 wm_watchdog_txq_locked(ifp, txq, hang); 3119 wm_watchdog_txq_locked(ifp, txq, hang);
3120 } 3120 }
3121 mutex_exit(txq->txq_lock); 3121 mutex_exit(txq->txq_lock);
3122} 3122}
3123 3123
3124static void 3124static void
3125wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq, 3125wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3126 uint16_t *hang) 3126 uint16_t *hang)
3127{ 3127{
3128 struct wm_softc *sc = ifp->if_softc; 3128 struct wm_softc *sc = ifp->if_softc;
3129 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq); 3129 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3130 3130
3131 KASSERT(mutex_owned(txq->txq_lock)); 3131 KASSERT(mutex_owned(txq->txq_lock));
3132 3132
3133 /* 3133 /*
3134 * Since we're using delayed interrupts, sweep up 3134 * Since we're using delayed interrupts, sweep up
3135 * before we report an error. 3135 * before we report an error.
3136 */ 3136 */
3137 wm_txeof(txq, UINT_MAX); 3137 wm_txeof(txq, UINT_MAX);
3138 3138
3139 if (txq->txq_sending) 3139 if (txq->txq_sending)
3140 *hang |= __BIT(wmq->wmq_id); 3140 *hang |= __BIT(wmq->wmq_id);
3141 3141
3142 if (txq->txq_free == WM_NTXDESC(txq)) { 3142 if (txq->txq_free == WM_NTXDESC(txq)) {
3143 log(LOG_ERR, "%s: device timeout (lost interrupt)\n", 3143 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3144 device_xname(sc->sc_dev)); 3144 device_xname(sc->sc_dev));
3145 } else { 3145 } else {
3146#ifdef WM_DEBUG 3146#ifdef WM_DEBUG
3147 int i, j; 3147 int i, j;
3148 struct wm_txsoft *txs; 3148 struct wm_txsoft *txs;
3149#endif 3149#endif
3150 log(LOG_ERR, 3150 log(LOG_ERR,
3151 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 3151 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3152 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree, 3152 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3153 txq->txq_next); 3153 txq->txq_next);
3154 ifp->if_oerrors++; 3154 ifp->if_oerrors++;
3155#ifdef WM_DEBUG 3155#ifdef WM_DEBUG
3156 for (i = txq->txq_sdirty; i != txq->txq_snext; 3156 for (i = txq->txq_sdirty; i != txq->txq_snext;
3157 i = WM_NEXTTXS(txq, i)) { 3157 i = WM_NEXTTXS(txq, i)) {
3158 txs = &txq->txq_soft[i]; 3158 txs = &txq->txq_soft[i];
3159 printf("txs %d tx %d -> %d\n", 3159 printf("txs %d tx %d -> %d\n",
3160 i, txs->txs_firstdesc, txs->txs_lastdesc); 3160 i, txs->txs_firstdesc, txs->txs_lastdesc);
3161 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) { 3161 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3162 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 3162 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3163 printf("\tdesc %d: 0x%" PRIx64 "\n", j, 3163 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3164 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr); 3164 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3165 printf("\t %#08x%08x\n", 3165 printf("\t %#08x%08x\n",
3166 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields, 3166 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3167 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen); 3167 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3168 } else { 3168 } else {
3169 printf("\tdesc %d: 0x%" PRIx64 "\n", j, 3169 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3170 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 | 3170 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3171 txq->txq_descs[j].wtx_addr.wa_low); 3171 txq->txq_descs[j].wtx_addr.wa_low);
3172 printf("\t %#04x%02x%02x%08x\n", 3172 printf("\t %#04x%02x%02x%08x\n",
3173 txq->txq_descs[j].wtx_fields.wtxu_vlan, 3173 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3174 txq->txq_descs[j].wtx_fields.wtxu_options, 3174 txq->txq_descs[j].wtx_fields.wtxu_options,
3175 txq->txq_descs[j].wtx_fields.wtxu_status, 3175 txq->txq_descs[j].wtx_fields.wtxu_status,
3176 txq->txq_descs[j].wtx_cmdlen); 3176 txq->txq_descs[j].wtx_cmdlen);
3177 } 3177 }
3178 if (j == txs->txs_lastdesc) 3178 if (j == txs->txs_lastdesc)
3179 break; 3179 break;
3180 } 3180 }
3181 } 3181 }
3182#endif 3182#endif
3183 } 3183 }
3184} 3184}
3185 3185
3186/* 3186/*
3187 * wm_tick: 3187 * wm_tick:
3188 * 3188 *
3189 * One second timer, used to check link status, sweep up 3189 * One second timer, used to check link status, sweep up
3190 * completed transmit jobs, etc. 3190 * completed transmit jobs, etc.
3191 */ 3191 */
3192static void 3192static void
3193wm_tick(void *arg) 3193wm_tick(void *arg)
3194{ 3194{
3195 struct wm_softc *sc = arg; 3195 struct wm_softc *sc = arg;
3196 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3196 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3197#ifndef WM_MPSAFE 3197#ifndef WM_MPSAFE
3198 int s = splnet(); 3198 int s = splnet();
3199#endif 3199#endif
3200 3200
3201 WM_CORE_LOCK(sc); 3201 WM_CORE_LOCK(sc);
3202 3202
3203 if (sc->sc_core_stopping) { 3203 if (sc->sc_core_stopping) {
3204 WM_CORE_UNLOCK(sc); 3204 WM_CORE_UNLOCK(sc);
3205#ifndef WM_MPSAFE 3205#ifndef WM_MPSAFE
3206 splx(s); 3206 splx(s);
3207#endif 3207#endif
3208 return; 3208 return;
3209 } 3209 }
3210 3210
3211 if (sc->sc_type >= WM_T_82542_2_1) { 3211 if (sc->sc_type >= WM_T_82542_2_1) {
3212 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 3212 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3213 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 3213 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3214 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 3214 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3215 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 3215 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3216 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 3216 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3217 } 3217 }
3218 3218
3219 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 3219 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3220 ifp->if_ierrors += 0ULL /* ensure quad_t */ 3220 ifp->if_ierrors += 0ULL /* ensure quad_t */
3221 + CSR_READ(sc, WMREG_CRCERRS) 3221 + CSR_READ(sc, WMREG_CRCERRS)
3222 + CSR_READ(sc, WMREG_ALGNERRC) 3222 + CSR_READ(sc, WMREG_ALGNERRC)
3223 + CSR_READ(sc, WMREG_SYMERRC) 3223 + CSR_READ(sc, WMREG_SYMERRC)
3224 + CSR_READ(sc, WMREG_RXERRC) 3224 + CSR_READ(sc, WMREG_RXERRC)
3225 + CSR_READ(sc, WMREG_SEC) 3225 + CSR_READ(sc, WMREG_SEC)
3226 + CSR_READ(sc, WMREG_CEXTERR) 3226 + CSR_READ(sc, WMREG_CEXTERR)
3227 + CSR_READ(sc, WMREG_RLEC); 3227 + CSR_READ(sc, WMREG_RLEC);
3228 /* 3228 /*
3229 * WMREG_RNBC is incremented when there is no available buffers in host 3229 * WMREG_RNBC is incremented when there is no available buffers in host
3230 * memory. It does not mean the number of dropped packet. Because 3230 * memory. It does not mean the number of dropped packet. Because
3231 * ethernet controller can receive packets in such case if there is 3231 * ethernet controller can receive packets in such case if there is
3232 * space in phy's FIFO. 3232 * space in phy's FIFO.
3233 * 3233 *
3234 * If you want to know the nubmer of WMREG_RMBC, you should use such as 3234 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3235 * own EVCNT instead of if_iqdrops. 3235 * own EVCNT instead of if_iqdrops.
3236 */ 3236 */
3237 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC); 3237 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
3238 3238
3239 if (sc->sc_flags & WM_F_HAS_MII) 3239 if (sc->sc_flags & WM_F_HAS_MII)
3240 mii_tick(&sc->sc_mii); 3240 mii_tick(&sc->sc_mii);
3241 else if ((sc->sc_type >= WM_T_82575) 3241 else if ((sc->sc_type >= WM_T_82575)
3242 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) 3242 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3243 wm_serdes_tick(sc); 3243 wm_serdes_tick(sc);
3244 else 3244 else
3245 wm_tbi_tick(sc); 3245 wm_tbi_tick(sc);
3246 3246
3247 WM_CORE_UNLOCK(sc); 3247 WM_CORE_UNLOCK(sc);
3248 3248
3249 wm_watchdog(ifp); 3249 wm_watchdog(ifp);
3250 3250
3251 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 3251 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3252} 3252}
3253 3253
3254static int 3254static int
3255wm_ifflags_cb(struct ethercom *ec) 3255wm_ifflags_cb(struct ethercom *ec)
3256{ 3256{
3257 struct ifnet *ifp = &ec->ec_if; 3257 struct ifnet *ifp = &ec->ec_if;
3258 struct wm_softc *sc = ifp->if_softc; 3258 struct wm_softc *sc = ifp->if_softc;
3259 int rc = 0; 3259 int rc = 0;
3260 3260
3261 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 3261 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3262 device_xname(sc->sc_dev), __func__)); 3262 device_xname(sc->sc_dev), __func__));
3263 3263
3264 WM_CORE_LOCK(sc); 3264 WM_CORE_LOCK(sc);
3265 3265
3266 int change = ifp->if_flags ^ sc->sc_if_flags; 3266 int change = ifp->if_flags ^ sc->sc_if_flags;
3267 sc->sc_if_flags = ifp->if_flags; 3267 sc->sc_if_flags = ifp->if_flags;
3268 3268
3269 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 3269 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3270 rc = ENETRESET; 3270 rc = ENETRESET;
3271 goto out; 3271 goto out;
3272 } 3272 }
3273 3273
3274 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3274 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3275 wm_set_filter(sc); 3275 wm_set_filter(sc);
3276 3276
3277 wm_set_vlan(sc); 3277 wm_set_vlan(sc);
3278 3278
3279out: 3279out:
3280 WM_CORE_UNLOCK(sc); 3280 WM_CORE_UNLOCK(sc);
3281 3281
3282 return rc; 3282 return rc;
3283} 3283}
3284 3284
3285/* 3285/*
3286 * wm_ioctl: [ifnet interface function] 3286 * wm_ioctl: [ifnet interface function]
3287 * 3287 *
3288 * Handle control requests from the operator. 3288 * Handle control requests from the operator.
3289 */ 3289 */
3290static int 3290static int
3291wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3291wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3292{ 3292{
3293 struct wm_softc *sc = ifp->if_softc; 3293 struct wm_softc *sc = ifp->if_softc;
3294 struct ifreq *ifr = (struct ifreq *) data; 3294 struct ifreq *ifr = (struct ifreq *) data;
3295 struct ifaddr *ifa = (struct ifaddr *)data; 3295 struct ifaddr *ifa = (struct ifaddr *)data;
3296 struct sockaddr_dl *sdl; 3296 struct sockaddr_dl *sdl;
3297 int s, error; 3297 int s, error;
3298 3298
3299 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 3299 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3300 device_xname(sc->sc_dev), __func__)); 3300 device_xname(sc->sc_dev), __func__));
3301 3301
3302#ifndef WM_MPSAFE 3302#ifndef WM_MPSAFE
3303 s = splnet(); 3303 s = splnet();
3304#endif 3304#endif
3305 switch (cmd) { 3305 switch (cmd) {
3306 case SIOCSIFMEDIA: 3306 case SIOCSIFMEDIA:
3307 case SIOCGIFMEDIA: 3307 case SIOCGIFMEDIA:
3308 WM_CORE_LOCK(sc); 3308 WM_CORE_LOCK(sc);
3309 /* Flow control requires full-duplex mode. */ 3309 /* Flow control requires full-duplex mode. */
3310 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3310 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3311 (ifr->ifr_media & IFM_FDX) == 0) 3311 (ifr->ifr_media & IFM_FDX) == 0)
3312 ifr->ifr_media &= ~IFM_ETH_FMASK; 3312 ifr->ifr_media &= ~IFM_ETH_FMASK;
3313 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3313 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3314 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3314 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3315 /* We can do both TXPAUSE and RXPAUSE. */ 3315 /* We can do both TXPAUSE and RXPAUSE. */
3316 ifr->ifr_media |= 3316 ifr->ifr_media |=
3317 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3317 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3318 } 3318 }
3319 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3319 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3320 } 3320 }
3321 WM_CORE_UNLOCK(sc); 3321 WM_CORE_UNLOCK(sc);
3322#ifdef WM_MPSAFE 3322#ifdef WM_MPSAFE
3323 s = splnet(); 3323 s = splnet();
3324#endif 3324#endif
3325 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 3325 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3326#ifdef WM_MPSAFE 3326#ifdef WM_MPSAFE
3327 splx(s); 3327 splx(s);
3328#endif 3328#endif
3329 break; 3329 break;
3330 case SIOCINITIFADDR: 3330 case SIOCINITIFADDR:
3331 WM_CORE_LOCK(sc); 3331 WM_CORE_LOCK(sc);
3332 if (ifa->ifa_addr->sa_family == AF_LINK) { 3332 if (ifa->ifa_addr->sa_family == AF_LINK) {
3333 sdl = satosdl(ifp->if_dl->ifa_addr); 3333 sdl = satosdl(ifp->if_dl->ifa_addr);
3334 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, 3334 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3335 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); 3335 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3336 /* unicast address is first multicast entry */ 3336 /* unicast address is first multicast entry */
3337 wm_set_filter(sc); 3337 wm_set_filter(sc);
3338 error = 0; 3338 error = 0;
3339 WM_CORE_UNLOCK(sc); 3339 WM_CORE_UNLOCK(sc);
3340 break; 3340 break;
3341 } 3341 }
3342 WM_CORE_UNLOCK(sc); 3342 WM_CORE_UNLOCK(sc);
3343 /*FALLTHROUGH*/ 3343 /*FALLTHROUGH*/
3344 default: 3344 default:
3345#ifdef WM_MPSAFE 3345#ifdef WM_MPSAFE
3346 s = splnet(); 3346 s = splnet();
3347#endif 3347#endif
3348 /* It may call wm_start, so unlock here */ 3348 /* It may call wm_start, so unlock here */
3349 error = ether_ioctl(ifp, cmd, data); 3349 error = ether_ioctl(ifp, cmd, data);
3350#ifdef WM_MPSAFE 3350#ifdef WM_MPSAFE
3351 splx(s); 3351 splx(s);
3352#endif 3352#endif
3353 if (error != ENETRESET) 3353 if (error != ENETRESET)
3354 break; 3354 break;
3355 3355
3356 error = 0; 3356 error = 0;
3357 3357
3358 if (cmd == SIOCSIFCAP) 3358 if (cmd == SIOCSIFCAP)
3359 error = (*ifp->if_init)(ifp); 3359 error = (*ifp->if_init)(ifp);
3360 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 3360 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3361 ; 3361 ;
3362 else if (ifp->if_flags & IFF_RUNNING) { 3362 else if (ifp->if_flags & IFF_RUNNING) {
3363 /* 3363 /*
3364 * Multicast list has changed; set the hardware filter 3364 * Multicast list has changed; set the hardware filter
3365 * accordingly. 3365 * accordingly.
3366 */ 3366 */
3367 WM_CORE_LOCK(sc); 3367 WM_CORE_LOCK(sc);
3368 wm_set_filter(sc); 3368 wm_set_filter(sc);
3369 WM_CORE_UNLOCK(sc); 3369 WM_CORE_UNLOCK(sc);
3370 } 3370 }
3371 break; 3371 break;
3372 } 3372 }
3373 3373
3374#ifndef WM_MPSAFE 3374#ifndef WM_MPSAFE
3375 splx(s); 3375 splx(s);
3376#endif 3376#endif
3377 return error; 3377 return error;
3378} 3378}
3379 3379
3380/* MAC address related */ 3380/* MAC address related */
3381 3381
3382/* 3382/*
3383 * Get the offset of MAC address and return it. 3383 * Get the offset of MAC address and return it.
3384 * If error occured, use offset 0. 3384 * If error occured, use offset 0.
3385 */ 3385 */
3386static uint16_t 3386static uint16_t
3387wm_check_alt_mac_addr(struct wm_softc *sc) 3387wm_check_alt_mac_addr(struct wm_softc *sc)
3388{ 3388{
3389 uint16_t myea[ETHER_ADDR_LEN / 2]; 3389 uint16_t myea[ETHER_ADDR_LEN / 2];
3390 uint16_t offset = NVM_OFF_MACADDR; 3390 uint16_t offset = NVM_OFF_MACADDR;
3391 3391
3392 /* Try to read alternative MAC address pointer */ 3392 /* Try to read alternative MAC address pointer */
3393 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0) 3393 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3394 return 0; 3394 return 0;
3395 3395
3396 /* Check pointer if it's valid or not. */ 3396 /* Check pointer if it's valid or not. */
3397 if ((offset == 0x0000) || (offset == 0xffff)) 3397 if ((offset == 0x0000) || (offset == 0xffff))
3398 return 0; 3398 return 0;
3399 3399
3400 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid); 3400 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3401 /* 3401 /*
3402 * Check whether alternative MAC address is valid or not. 3402 * Check whether alternative MAC address is valid or not.
3403 * Some cards have non 0xffff pointer but those don't use 3403 * Some cards have non 0xffff pointer but those don't use