Thu Nov 30 09:24:18 2017 UTC ()
- 82583 supports jumbo frame. Fixes PR#52773 reported by Shinichi Doyashiki.
- Cleanup comment.


(msaitoh)
diff -r1.545 -r1.546 src/sys/dev/pci/if_wm.c

cvs diff -r1.545 -r1.546 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2017/11/30 03:53:24 1.545
+++ src/sys/dev/pci/if_wm.c 2017/11/30 09:24:18 1.546
@@ -1,1085 +1,1085 @@ @@ -1,1085 +1,1085 @@
1/* $NetBSD: if_wm.c,v 1.545 2017/11/30 03:53:24 msaitoh Exp $ */ 1/* $NetBSD: if_wm.c,v 1.546 2017/11/30 09:24:18 msaitoh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation 40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42  42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45  45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48  48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52  52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56  56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic) 76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors 77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) 78 * - EEE (Energy Efficiency Ethernet)
79 * - Virtual Function 79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM) 80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM. 81 * - Rework how parameters are loaded from the EEPROM.
82 * - Image Unique ID 82 * - Image Unique ID
83 */ 83 */
84 84
85#include <sys/cdefs.h> 85#include <sys/cdefs.h>
86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.545 2017/11/30 03:53:24 msaitoh Exp $"); 86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.546 2017/11/30 09:24:18 msaitoh Exp $");
87 87
88#ifdef _KERNEL_OPT 88#ifdef _KERNEL_OPT
89#include "opt_net_mpsafe.h" 89#include "opt_net_mpsafe.h"
90#include "opt_if_wm.h" 90#include "opt_if_wm.h"
91#endif 91#endif
92 92
93#include <sys/param.h> 93#include <sys/param.h>
94#include <sys/systm.h> 94#include <sys/systm.h>
95#include <sys/callout.h> 95#include <sys/callout.h>
96#include <sys/mbuf.h> 96#include <sys/mbuf.h>
97#include <sys/malloc.h> 97#include <sys/malloc.h>
98#include <sys/kmem.h> 98#include <sys/kmem.h>
99#include <sys/kernel.h> 99#include <sys/kernel.h>
100#include <sys/socket.h> 100#include <sys/socket.h>
101#include <sys/ioctl.h> 101#include <sys/ioctl.h>
102#include <sys/errno.h> 102#include <sys/errno.h>
103#include <sys/device.h> 103#include <sys/device.h>
104#include <sys/queue.h> 104#include <sys/queue.h>
105#include <sys/syslog.h> 105#include <sys/syslog.h>
106#include <sys/interrupt.h> 106#include <sys/interrupt.h>
107#include <sys/cpu.h> 107#include <sys/cpu.h>
108#include <sys/pcq.h> 108#include <sys/pcq.h>
109 109
110#include <sys/rndsource.h> 110#include <sys/rndsource.h>
111 111
112#include <net/if.h> 112#include <net/if.h>
113#include <net/if_dl.h> 113#include <net/if_dl.h>
114#include <net/if_media.h> 114#include <net/if_media.h>
115#include <net/if_ether.h> 115#include <net/if_ether.h>
116 116
117#include <net/bpf.h> 117#include <net/bpf.h>
118 118
119#include <netinet/in.h> /* XXX for struct ip */ 119#include <netinet/in.h> /* XXX for struct ip */
120#include <netinet/in_systm.h> /* XXX for struct ip */ 120#include <netinet/in_systm.h> /* XXX for struct ip */
121#include <netinet/ip.h> /* XXX for struct ip */ 121#include <netinet/ip.h> /* XXX for struct ip */
122#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 122#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
123#include <netinet/tcp.h> /* XXX for struct tcphdr */ 123#include <netinet/tcp.h> /* XXX for struct tcphdr */
124 124
125#include <sys/bus.h> 125#include <sys/bus.h>
126#include <sys/intr.h> 126#include <sys/intr.h>
127#include <machine/endian.h> 127#include <machine/endian.h>
128 128
129#include <dev/mii/mii.h> 129#include <dev/mii/mii.h>
130#include <dev/mii/miivar.h> 130#include <dev/mii/miivar.h>
131#include <dev/mii/miidevs.h> 131#include <dev/mii/miidevs.h>
132#include <dev/mii/mii_bitbang.h> 132#include <dev/mii/mii_bitbang.h>
133#include <dev/mii/ikphyreg.h> 133#include <dev/mii/ikphyreg.h>
134#include <dev/mii/igphyreg.h> 134#include <dev/mii/igphyreg.h>
135#include <dev/mii/igphyvar.h> 135#include <dev/mii/igphyvar.h>
136#include <dev/mii/inbmphyreg.h> 136#include <dev/mii/inbmphyreg.h>
137#include <dev/mii/ihphyreg.h> 137#include <dev/mii/ihphyreg.h>
138 138
139#include <dev/pci/pcireg.h> 139#include <dev/pci/pcireg.h>
140#include <dev/pci/pcivar.h> 140#include <dev/pci/pcivar.h>
141#include <dev/pci/pcidevs.h> 141#include <dev/pci/pcidevs.h>
142 142
143#include <dev/pci/if_wmreg.h> 143#include <dev/pci/if_wmreg.h>
144#include <dev/pci/if_wmvar.h> 144#include <dev/pci/if_wmvar.h>
145 145
146#ifdef WM_DEBUG 146#ifdef WM_DEBUG
147#define WM_DEBUG_LINK __BIT(0) 147#define WM_DEBUG_LINK __BIT(0)
148#define WM_DEBUG_TX __BIT(1) 148#define WM_DEBUG_TX __BIT(1)
149#define WM_DEBUG_RX __BIT(2) 149#define WM_DEBUG_RX __BIT(2)
150#define WM_DEBUG_GMII __BIT(3) 150#define WM_DEBUG_GMII __BIT(3)
151#define WM_DEBUG_MANAGE __BIT(4) 151#define WM_DEBUG_MANAGE __BIT(4)
152#define WM_DEBUG_NVM __BIT(5) 152#define WM_DEBUG_NVM __BIT(5)
153#define WM_DEBUG_INIT __BIT(6) 153#define WM_DEBUG_INIT __BIT(6)
154#define WM_DEBUG_LOCK __BIT(7) 154#define WM_DEBUG_LOCK __BIT(7)
155int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII 155int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
156 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK; 156 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
157 157
158#define DPRINTF(x, y) if (wm_debug & (x)) printf y 158#define DPRINTF(x, y) if (wm_debug & (x)) printf y
159#else 159#else
160#define DPRINTF(x, y) /* nothing */ 160#define DPRINTF(x, y) /* nothing */
161#endif /* WM_DEBUG */ 161#endif /* WM_DEBUG */
162 162
163#ifdef NET_MPSAFE 163#ifdef NET_MPSAFE
164#define WM_MPSAFE 1 164#define WM_MPSAFE 1
165#define CALLOUT_FLAGS CALLOUT_MPSAFE 165#define CALLOUT_FLAGS CALLOUT_MPSAFE
166#else 166#else
167#define CALLOUT_FLAGS 0 167#define CALLOUT_FLAGS 0
168#endif 168#endif
169 169
170/* 170/*
171 * This device driver's max interrupt numbers. 171 * This device driver's max interrupt numbers.
172 */ 172 */
173#define WM_MAX_NQUEUEINTR 16 173#define WM_MAX_NQUEUEINTR 16
174#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) 174#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
175 175
176#ifndef WM_DISABLE_MSI 176#ifndef WM_DISABLE_MSI
177#define WM_DISABLE_MSI 0 177#define WM_DISABLE_MSI 0
178#endif 178#endif
179#ifndef WM_DISABLE_MSIX 179#ifndef WM_DISABLE_MSIX
180#define WM_DISABLE_MSIX 0 180#define WM_DISABLE_MSIX 0
181#endif 181#endif
182 182
183int wm_disable_msi = WM_DISABLE_MSI; 183int wm_disable_msi = WM_DISABLE_MSI;
184int wm_disable_msix = WM_DISABLE_MSIX; 184int wm_disable_msix = WM_DISABLE_MSIX;
185 185
186/* 186/*
187 * Transmit descriptor list size. Due to errata, we can only have 187 * Transmit descriptor list size. Due to errata, we can only have
188 * 256 hardware descriptors in the ring on < 82544, but we use 4096 188 * 256 hardware descriptors in the ring on < 82544, but we use 4096
189 * on >= 82544. We tell the upper layers that they can queue a lot 189 * on >= 82544. We tell the upper layers that they can queue a lot
190 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 190 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
191 * of them at a time. 191 * of them at a time.
192 * 192 *
193 * We allow up to 256 (!) DMA segments per packet. Pathological packet 193 * We allow up to 256 (!) DMA segments per packet. Pathological packet
194 * chains containing many small mbufs have been observed in zero-copy 194 * chains containing many small mbufs have been observed in zero-copy
195 * situations with jumbo frames. 195 * situations with jumbo frames.
196 */ 196 */
197#define WM_NTXSEGS 256 197#define WM_NTXSEGS 256
198#define WM_IFQUEUELEN 256 198#define WM_IFQUEUELEN 256
199#define WM_TXQUEUELEN_MAX 64 199#define WM_TXQUEUELEN_MAX 64
200#define WM_TXQUEUELEN_MAX_82547 16 200#define WM_TXQUEUELEN_MAX_82547 16
201#define WM_TXQUEUELEN(txq) ((txq)->txq_num) 201#define WM_TXQUEUELEN(txq) ((txq)->txq_num)
202#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) 202#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
203#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) 203#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
204#define WM_NTXDESC_82542 256 204#define WM_NTXDESC_82542 256
205#define WM_NTXDESC_82544 4096 205#define WM_NTXDESC_82544 4096
206#define WM_NTXDESC(txq) ((txq)->txq_ndesc) 206#define WM_NTXDESC(txq) ((txq)->txq_ndesc)
207#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) 207#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
208#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) 208#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
209#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) 209#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
210#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) 210#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
211 211
212#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ 212#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
213 213
214#define WM_TXINTERQSIZE 256 214#define WM_TXINTERQSIZE 256
215 215
216/* 216/*
217 * Receive descriptor list size. We have one Rx buffer for normal 217 * Receive descriptor list size. We have one Rx buffer for normal
218 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 218 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
219 * packet. We allocate 256 receive descriptors, each with a 2k 219 * packet. We allocate 256 receive descriptors, each with a 2k
220 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 220 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
221 */ 221 */
222#define WM_NRXDESC 256 222#define WM_NRXDESC 256
223#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 223#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
224#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 224#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
225#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 225#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
226 226
227#ifndef WM_RX_PROCESS_LIMIT_DEFAULT 227#ifndef WM_RX_PROCESS_LIMIT_DEFAULT
228#define WM_RX_PROCESS_LIMIT_DEFAULT 100U 228#define WM_RX_PROCESS_LIMIT_DEFAULT 100U
229#endif 229#endif
230#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT 230#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
231#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U 231#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
232#endif 232#endif
233 233
234typedef union txdescs { 234typedef union txdescs {
235 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; 235 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
236 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; 236 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
237} txdescs_t; 237} txdescs_t;
238 238
239typedef union rxdescs { 239typedef union rxdescs {
240 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; 240 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
241 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ 241 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
242 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ 242 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
243} rxdescs_t; 243} rxdescs_t;
244 244
245#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) 245#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
246#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x)) 246#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
247 247
248/* 248/*
249 * Software state for transmit jobs. 249 * Software state for transmit jobs.
250 */ 250 */
251struct wm_txsoft { 251struct wm_txsoft {
252 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 252 struct mbuf *txs_mbuf; /* head of our mbuf chain */
253 bus_dmamap_t txs_dmamap; /* our DMA map */ 253 bus_dmamap_t txs_dmamap; /* our DMA map */
254 int txs_firstdesc; /* first descriptor in packet */ 254 int txs_firstdesc; /* first descriptor in packet */
255 int txs_lastdesc; /* last descriptor in packet */ 255 int txs_lastdesc; /* last descriptor in packet */
256 int txs_ndesc; /* # of descriptors used */ 256 int txs_ndesc; /* # of descriptors used */
257}; 257};
258 258
259/* 259/*
260 * Software state for receive buffers. Each descriptor gets a 260 * Software state for receive buffers. Each descriptor gets a
261 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 261 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
262 * more than one buffer, we chain them together. 262 * more than one buffer, we chain them together.
263 */ 263 */
264struct wm_rxsoft { 264struct wm_rxsoft {
265 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 265 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
266 bus_dmamap_t rxs_dmamap; /* our DMA map */ 266 bus_dmamap_t rxs_dmamap; /* our DMA map */
267}; 267};
268 268
269#define WM_LINKUP_TIMEOUT 50 269#define WM_LINKUP_TIMEOUT 50
270 270
271static uint16_t swfwphysem[] = { 271static uint16_t swfwphysem[] = {
272 SWFW_PHY0_SM, 272 SWFW_PHY0_SM,
273 SWFW_PHY1_SM, 273 SWFW_PHY1_SM,
274 SWFW_PHY2_SM, 274 SWFW_PHY2_SM,
275 SWFW_PHY3_SM 275 SWFW_PHY3_SM
276}; 276};
277 277
278static const uint32_t wm_82580_rxpbs_table[] = { 278static const uint32_t wm_82580_rxpbs_table[] = {
279 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 279 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
280}; 280};
281 281
282struct wm_softc; 282struct wm_softc;
283 283
284#ifdef WM_EVENT_COUNTERS 284#ifdef WM_EVENT_COUNTERS
285#define WM_Q_EVCNT_DEFINE(qname, evname) \ 285#define WM_Q_EVCNT_DEFINE(qname, evname) \
286 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ 286 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
287 struct evcnt qname##_ev_##evname; 287 struct evcnt qname##_ev_##evname;
288 288
289#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ 289#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
290 do{ \ 290 do{ \
291 snprintf((q)->qname##_##evname##_evcnt_name, \ 291 snprintf((q)->qname##_##evname##_evcnt_name, \
292 sizeof((q)->qname##_##evname##_evcnt_name), \ 292 sizeof((q)->qname##_##evname##_evcnt_name), \
293 "%s%02d%s", #qname, (qnum), #evname); \ 293 "%s%02d%s", #qname, (qnum), #evname); \
294 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ 294 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
295 (evtype), NULL, (xname), \ 295 (evtype), NULL, (xname), \
296 (q)->qname##_##evname##_evcnt_name); \ 296 (q)->qname##_##evname##_evcnt_name); \
297 }while(0) 297 }while(0)
298 298
299#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 299#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
300 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) 300 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
301 301
302#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 302#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
303 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) 303 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
304 304
305#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \ 305#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
306 evcnt_detach(&(q)->qname##_ev_##evname); 306 evcnt_detach(&(q)->qname##_ev_##evname);
307#endif /* WM_EVENT_COUNTERS */ 307#endif /* WM_EVENT_COUNTERS */
308 308
309struct wm_txqueue { 309struct wm_txqueue {
310 kmutex_t *txq_lock; /* lock for tx operations */ 310 kmutex_t *txq_lock; /* lock for tx operations */
311 311
312 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ 312 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
313 313
314 /* Software state for the transmit descriptors. */ 314 /* Software state for the transmit descriptors. */
315 int txq_num; /* must be a power of two */ 315 int txq_num; /* must be a power of two */
316 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; 316 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
317 317
318 /* TX control data structures. */ 318 /* TX control data structures. */
319 int txq_ndesc; /* must be a power of two */ 319 int txq_ndesc; /* must be a power of two */
320 size_t txq_descsize; /* a tx descriptor size */ 320 size_t txq_descsize; /* a tx descriptor size */
321 txdescs_t *txq_descs_u; 321 txdescs_t *txq_descs_u;
322 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ 322 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
323 bus_dma_segment_t txq_desc_seg; /* control data segment */ 323 bus_dma_segment_t txq_desc_seg; /* control data segment */
324 int txq_desc_rseg; /* real number of control segment */ 324 int txq_desc_rseg; /* real number of control segment */
325#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr 325#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
326#define txq_descs txq_descs_u->sctxu_txdescs 326#define txq_descs txq_descs_u->sctxu_txdescs
327#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs 327#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
328 328
329 bus_addr_t txq_tdt_reg; /* offset of TDT register */ 329 bus_addr_t txq_tdt_reg; /* offset of TDT register */
330 330
331 int txq_free; /* number of free Tx descriptors */ 331 int txq_free; /* number of free Tx descriptors */
332 int txq_next; /* next ready Tx descriptor */ 332 int txq_next; /* next ready Tx descriptor */
333 333
334 int txq_sfree; /* number of free Tx jobs */ 334 int txq_sfree; /* number of free Tx jobs */
335 int txq_snext; /* next free Tx job */ 335 int txq_snext; /* next free Tx job */
336 int txq_sdirty; /* dirty Tx jobs */ 336 int txq_sdirty; /* dirty Tx jobs */
337 337
338 /* These 4 variables are used only on the 82547. */ 338 /* These 4 variables are used only on the 82547. */
339 int txq_fifo_size; /* Tx FIFO size */ 339 int txq_fifo_size; /* Tx FIFO size */
340 int txq_fifo_head; /* current head of FIFO */ 340 int txq_fifo_head; /* current head of FIFO */
341 uint32_t txq_fifo_addr; /* internal address of start of FIFO */ 341 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
342 int txq_fifo_stall; /* Tx FIFO is stalled */ 342 int txq_fifo_stall; /* Tx FIFO is stalled */
343 343
344 /* 344 /*
345 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 345 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
346 * CPUs. This queue intermediate them without block. 346 * CPUs. This queue intermediate them without block.
347 */ 347 */
348 pcq_t *txq_interq; 348 pcq_t *txq_interq;
349 349
350 /* 350 /*
351 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 351 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
352 * to manage Tx H/W queue's busy flag. 352 * to manage Tx H/W queue's busy flag.
353 */ 353 */
354 int txq_flags; /* flags for H/W queue, see below */ 354 int txq_flags; /* flags for H/W queue, see below */
355#define WM_TXQ_NO_SPACE 0x1 355#define WM_TXQ_NO_SPACE 0x1
356 356
357 bool txq_stopping; 357 bool txq_stopping;
358 358
359 uint32_t txq_packets; /* for AIM */ 359 uint32_t txq_packets; /* for AIM */
360 uint32_t txq_bytes; /* for AIM */ 360 uint32_t txq_bytes; /* for AIM */
361#ifdef WM_EVENT_COUNTERS 361#ifdef WM_EVENT_COUNTERS
362 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */ 362 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */
363 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */ 363 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */
364 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */ 364 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */
365 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ 365 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
366 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ 366 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
367 /* XXX not used? */ 367 /* XXX not used? */
368 368
369 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */ 369 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */
370 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */ 370 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */
371 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */ 371 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */
372 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */ 372 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */
373 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */ 373 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */
374 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */ 374 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */
375 375
376 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */ 376 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */
377 377
378 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */ 378 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */
379 379
380 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; 380 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
381 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 381 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
382#endif /* WM_EVENT_COUNTERS */ 382#endif /* WM_EVENT_COUNTERS */
383}; 383};
384 384
385struct wm_rxqueue { 385struct wm_rxqueue {
386 kmutex_t *rxq_lock; /* lock for rx operations */ 386 kmutex_t *rxq_lock; /* lock for rx operations */
387 387
388 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ 388 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
389 389
390 /* Software state for the receive descriptors. */ 390 /* Software state for the receive descriptors. */
391 struct wm_rxsoft rxq_soft[WM_NRXDESC]; 391 struct wm_rxsoft rxq_soft[WM_NRXDESC];
392 392
393 /* RX control data structures. */ 393 /* RX control data structures. */
394 int rxq_ndesc; /* must be a power of two */ 394 int rxq_ndesc; /* must be a power of two */
395 size_t rxq_descsize; /* a rx descriptor size */ 395 size_t rxq_descsize; /* a rx descriptor size */
396 rxdescs_t *rxq_descs_u; 396 rxdescs_t *rxq_descs_u;
397 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ 397 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
398 bus_dma_segment_t rxq_desc_seg; /* control data segment */ 398 bus_dma_segment_t rxq_desc_seg; /* control data segment */
399 int rxq_desc_rseg; /* real number of control segment */ 399 int rxq_desc_rseg; /* real number of control segment */
400#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr 400#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
401#define rxq_descs rxq_descs_u->sctxu_rxdescs 401#define rxq_descs rxq_descs_u->sctxu_rxdescs
402#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs 402#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
403#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs 403#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
404 404
405 bus_addr_t rxq_rdt_reg; /* offset of RDT register */ 405 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
406 406
407 int rxq_ptr; /* next ready Rx desc/queue ent */ 407 int rxq_ptr; /* next ready Rx desc/queue ent */
408 int rxq_discard; 408 int rxq_discard;
409 int rxq_len; 409 int rxq_len;
410 struct mbuf *rxq_head; 410 struct mbuf *rxq_head;
411 struct mbuf *rxq_tail; 411 struct mbuf *rxq_tail;
412 struct mbuf **rxq_tailp; 412 struct mbuf **rxq_tailp;
413 413
414 bool rxq_stopping; 414 bool rxq_stopping;
415 415
416 uint32_t rxq_packets; /* for AIM */ 416 uint32_t rxq_packets; /* for AIM */
417 uint32_t rxq_bytes; /* for AIM */ 417 uint32_t rxq_bytes; /* for AIM */
418#ifdef WM_EVENT_COUNTERS 418#ifdef WM_EVENT_COUNTERS
419 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */ 419 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */
420 420
421 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */ 421 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */
422 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */ 422 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */
423#endif 423#endif
424}; 424};
425 425
426struct wm_queue { 426struct wm_queue {
427 int wmq_id; /* index of transmit and receive queues */ 427 int wmq_id; /* index of transmit and receive queues */
428 int wmq_intr_idx; /* index of MSI-X tables */ 428 int wmq_intr_idx; /* index of MSI-X tables */
429 429
430 uint32_t wmq_itr; /* interrupt interval per queue. */ 430 uint32_t wmq_itr; /* interrupt interval per queue. */
431 bool wmq_set_itr; 431 bool wmq_set_itr;
432 432
433 struct wm_txqueue wmq_txq; 433 struct wm_txqueue wmq_txq;
434 struct wm_rxqueue wmq_rxq; 434 struct wm_rxqueue wmq_rxq;
435 435
436 void *wmq_si; 436 void *wmq_si;
437}; 437};
438 438
439struct wm_phyop { 439struct wm_phyop {
440 int (*acquire)(struct wm_softc *); 440 int (*acquire)(struct wm_softc *);
441 void (*release)(struct wm_softc *); 441 void (*release)(struct wm_softc *);
442 int reset_delay_us; 442 int reset_delay_us;
443}; 443};
444 444
445struct wm_nvmop { 445struct wm_nvmop {
446 int (*acquire)(struct wm_softc *); 446 int (*acquire)(struct wm_softc *);
447 void (*release)(struct wm_softc *); 447 void (*release)(struct wm_softc *);
448 int (*read)(struct wm_softc *, int, int, uint16_t *); 448 int (*read)(struct wm_softc *, int, int, uint16_t *);
449}; 449};
450 450
451/* 451/*
452 * Software state per device. 452 * Software state per device.
453 */ 453 */
454struct wm_softc { 454struct wm_softc {
455 device_t sc_dev; /* generic device information */ 455 device_t sc_dev; /* generic device information */
456 bus_space_tag_t sc_st; /* bus space tag */ 456 bus_space_tag_t sc_st; /* bus space tag */
457 bus_space_handle_t sc_sh; /* bus space handle */ 457 bus_space_handle_t sc_sh; /* bus space handle */
458 bus_size_t sc_ss; /* bus space size */ 458 bus_size_t sc_ss; /* bus space size */
459 bus_space_tag_t sc_iot; /* I/O space tag */ 459 bus_space_tag_t sc_iot; /* I/O space tag */
460 bus_space_handle_t sc_ioh; /* I/O space handle */ 460 bus_space_handle_t sc_ioh; /* I/O space handle */
461 bus_size_t sc_ios; /* I/O space size */ 461 bus_size_t sc_ios; /* I/O space size */
462 bus_space_tag_t sc_flasht; /* flash registers space tag */ 462 bus_space_tag_t sc_flasht; /* flash registers space tag */
463 bus_space_handle_t sc_flashh; /* flash registers space handle */ 463 bus_space_handle_t sc_flashh; /* flash registers space handle */
464 bus_size_t sc_flashs; /* flash registers space size */ 464 bus_size_t sc_flashs; /* flash registers space size */
465 off_t sc_flashreg_offset; /* 465 off_t sc_flashreg_offset; /*
466 * offset to flash registers from 466 * offset to flash registers from
467 * start of BAR 467 * start of BAR
468 */ 468 */
469 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 469 bus_dma_tag_t sc_dmat; /* bus DMA tag */
470 470
471 struct ethercom sc_ethercom; /* ethernet common data */ 471 struct ethercom sc_ethercom; /* ethernet common data */
472 struct mii_data sc_mii; /* MII/media information */ 472 struct mii_data sc_mii; /* MII/media information */
473 473
474 pci_chipset_tag_t sc_pc; 474 pci_chipset_tag_t sc_pc;
475 pcitag_t sc_pcitag; 475 pcitag_t sc_pcitag;
476 int sc_bus_speed; /* PCI/PCIX bus speed */ 476 int sc_bus_speed; /* PCI/PCIX bus speed */
477 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ 477 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
478 478
479 uint16_t sc_pcidevid; /* PCI device ID */ 479 uint16_t sc_pcidevid; /* PCI device ID */
480 wm_chip_type sc_type; /* MAC type */ 480 wm_chip_type sc_type; /* MAC type */
481 int sc_rev; /* MAC revision */ 481 int sc_rev; /* MAC revision */
482 wm_phy_type sc_phytype; /* PHY type */ 482 wm_phy_type sc_phytype; /* PHY type */
483 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ 483 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
484#define WM_MEDIATYPE_UNKNOWN 0x00 484#define WM_MEDIATYPE_UNKNOWN 0x00
485#define WM_MEDIATYPE_FIBER 0x01 485#define WM_MEDIATYPE_FIBER 0x01
486#define WM_MEDIATYPE_COPPER 0x02 486#define WM_MEDIATYPE_COPPER 0x02
487#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ 487#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
488 int sc_funcid; /* unit number of the chip (0 to 3) */ 488 int sc_funcid; /* unit number of the chip (0 to 3) */
489 int sc_flags; /* flags; see below */ 489 int sc_flags; /* flags; see below */
490 int sc_if_flags; /* last if_flags */ 490 int sc_if_flags; /* last if_flags */
491 int sc_flowflags; /* 802.3x flow control flags */ 491 int sc_flowflags; /* 802.3x flow control flags */
492 int sc_align_tweak; 492 int sc_align_tweak;
493 493
494 void *sc_ihs[WM_MAX_NINTR]; /* 494 void *sc_ihs[WM_MAX_NINTR]; /*
495 * interrupt cookie. 495 * interrupt cookie.
496 * - legacy and msi use sc_ihs[0] only 496 * - legacy and msi use sc_ihs[0] only
497 * - msix use sc_ihs[0] to sc_ihs[nintrs-1] 497 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
498 */ 498 */
499 pci_intr_handle_t *sc_intrs; /* 499 pci_intr_handle_t *sc_intrs; /*
500 * legacy and msi use sc_intrs[0] only 500 * legacy and msi use sc_intrs[0] only
501 * msix use sc_intrs[0] to sc_ihs[nintrs-1] 501 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
502 */ 502 */
503 int sc_nintrs; /* number of interrupts */ 503 int sc_nintrs; /* number of interrupts */
504 504
505 int sc_link_intr_idx; /* index of MSI-X tables */ 505 int sc_link_intr_idx; /* index of MSI-X tables */
506 506
507 callout_t sc_tick_ch; /* tick callout */ 507 callout_t sc_tick_ch; /* tick callout */
508 bool sc_core_stopping; 508 bool sc_core_stopping;
509 509
510 int sc_nvm_ver_major; 510 int sc_nvm_ver_major;
511 int sc_nvm_ver_minor; 511 int sc_nvm_ver_minor;
512 int sc_nvm_ver_build; 512 int sc_nvm_ver_build;
513 int sc_nvm_addrbits; /* NVM address bits */ 513 int sc_nvm_addrbits; /* NVM address bits */
514 unsigned int sc_nvm_wordsize; /* NVM word size */ 514 unsigned int sc_nvm_wordsize; /* NVM word size */
515 int sc_ich8_flash_base; 515 int sc_ich8_flash_base;
516 int sc_ich8_flash_bank_size; 516 int sc_ich8_flash_bank_size;
517 int sc_nvm_k1_enabled; 517 int sc_nvm_k1_enabled;
518 518
519 int sc_nqueues; 519 int sc_nqueues;
520 struct wm_queue *sc_queue; 520 struct wm_queue *sc_queue;
521 u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */ 521 u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */
522 u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */ 522 u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */
523 523
524 int sc_affinity_offset; 524 int sc_affinity_offset;
525 525
526#ifdef WM_EVENT_COUNTERS 526#ifdef WM_EVENT_COUNTERS
527 /* Event counters. */ 527 /* Event counters. */
528 struct evcnt sc_ev_linkintr; /* Link interrupts */ 528 struct evcnt sc_ev_linkintr; /* Link interrupts */
529 529
530 /* WM_T_82542_2_1 only */ 530 /* WM_T_82542_2_1 only */
531 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 531 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
532 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 532 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
533 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 533 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
534 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 534 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
535 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 535 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
536#endif /* WM_EVENT_COUNTERS */ 536#endif /* WM_EVENT_COUNTERS */
537 537
538 /* This variable are used only on the 82547. */ 538 /* This variable are used only on the 82547. */
539 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 539 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
540 540
541 uint32_t sc_ctrl; /* prototype CTRL register */ 541 uint32_t sc_ctrl; /* prototype CTRL register */
542#if 0 542#if 0
543 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 543 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
544#endif 544#endif
545 uint32_t sc_icr; /* prototype interrupt bits */ 545 uint32_t sc_icr; /* prototype interrupt bits */
546 uint32_t sc_itr_init; /* prototype intr throttling reg */ 546 uint32_t sc_itr_init; /* prototype intr throttling reg */
547 uint32_t sc_tctl; /* prototype TCTL register */ 547 uint32_t sc_tctl; /* prototype TCTL register */
548 uint32_t sc_rctl; /* prototype RCTL register */ 548 uint32_t sc_rctl; /* prototype RCTL register */
549 uint32_t sc_txcw; /* prototype TXCW register */ 549 uint32_t sc_txcw; /* prototype TXCW register */
550 uint32_t sc_tipg; /* prototype TIPG register */ 550 uint32_t sc_tipg; /* prototype TIPG register */
551 uint32_t sc_fcrtl; /* prototype FCRTL register */ 551 uint32_t sc_fcrtl; /* prototype FCRTL register */
552 uint32_t sc_pba; /* prototype PBA register */ 552 uint32_t sc_pba; /* prototype PBA register */
553 553
554 int sc_tbi_linkup; /* TBI link status */ 554 int sc_tbi_linkup; /* TBI link status */
555 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ 555 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
556 int sc_tbi_serdes_ticks; /* tbi ticks */ 556 int sc_tbi_serdes_ticks; /* tbi ticks */
557 557
558 int sc_mchash_type; /* multicast filter offset */ 558 int sc_mchash_type; /* multicast filter offset */
559 559
560 krndsource_t rnd_source; /* random source */ 560 krndsource_t rnd_source; /* random source */
561 561
562 struct if_percpuq *sc_ipq; /* softint-based input queues */ 562 struct if_percpuq *sc_ipq; /* softint-based input queues */
563 563
564 kmutex_t *sc_core_lock; /* lock for softc operations */ 564 kmutex_t *sc_core_lock; /* lock for softc operations */
565 kmutex_t *sc_ich_phymtx; /* 565 kmutex_t *sc_ich_phymtx; /*
566 * 82574/82583/ICH/PCH specific PHY 566 * 82574/82583/ICH/PCH specific PHY
567 * mutex. For 82574/82583, the mutex 567 * mutex. For 82574/82583, the mutex
568 * is used for both PHY and NVM. 568 * is used for both PHY and NVM.
569 */ 569 */
570 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ 570 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
571 571
572 struct wm_phyop phy; 572 struct wm_phyop phy;
573 struct wm_nvmop nvm; 573 struct wm_nvmop nvm;
574}; 574};
575 575
576#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) 576#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
577#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) 577#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
578#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) 578#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
579 579
580#define WM_RXCHAIN_RESET(rxq) \ 580#define WM_RXCHAIN_RESET(rxq) \
581do { \ 581do { \
582 (rxq)->rxq_tailp = &(rxq)->rxq_head; \ 582 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
583 *(rxq)->rxq_tailp = NULL; \ 583 *(rxq)->rxq_tailp = NULL; \
584 (rxq)->rxq_len = 0; \ 584 (rxq)->rxq_len = 0; \
585} while (/*CONSTCOND*/0) 585} while (/*CONSTCOND*/0)
586 586
587#define WM_RXCHAIN_LINK(rxq, m) \ 587#define WM_RXCHAIN_LINK(rxq, m) \
588do { \ 588do { \
589 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ 589 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
590 (rxq)->rxq_tailp = &(m)->m_next; \ 590 (rxq)->rxq_tailp = &(m)->m_next; \
591} while (/*CONSTCOND*/0) 591} while (/*CONSTCOND*/0)
592 592
593#ifdef WM_EVENT_COUNTERS 593#ifdef WM_EVENT_COUNTERS
594#define WM_EVCNT_INCR(ev) (ev)->ev_count++ 594#define WM_EVCNT_INCR(ev) (ev)->ev_count++
595#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 595#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
596 596
597#define WM_Q_EVCNT_INCR(qname, evname) \ 597#define WM_Q_EVCNT_INCR(qname, evname) \
598 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) 598 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
599#define WM_Q_EVCNT_ADD(qname, evname, val) \ 599#define WM_Q_EVCNT_ADD(qname, evname, val) \
600 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) 600 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
601#else /* !WM_EVENT_COUNTERS */ 601#else /* !WM_EVENT_COUNTERS */
602#define WM_EVCNT_INCR(ev) /* nothing */ 602#define WM_EVCNT_INCR(ev) /* nothing */
603#define WM_EVCNT_ADD(ev, val) /* nothing */ 603#define WM_EVCNT_ADD(ev, val) /* nothing */
604 604
605#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */ 605#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
606#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */ 606#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
607#endif /* !WM_EVENT_COUNTERS */ 607#endif /* !WM_EVENT_COUNTERS */
608 608
609#define CSR_READ(sc, reg) \ 609#define CSR_READ(sc, reg) \
610 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 610 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
611#define CSR_WRITE(sc, reg, val) \ 611#define CSR_WRITE(sc, reg, val) \
612 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 612 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
613#define CSR_WRITE_FLUSH(sc) \ 613#define CSR_WRITE_FLUSH(sc) \
614 (void) CSR_READ((sc), WMREG_STATUS) 614 (void) CSR_READ((sc), WMREG_STATUS)
615 615
616#define ICH8_FLASH_READ32(sc, reg) \ 616#define ICH8_FLASH_READ32(sc, reg) \
617 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ 617 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
618 (reg) + sc->sc_flashreg_offset) 618 (reg) + sc->sc_flashreg_offset)
619#define ICH8_FLASH_WRITE32(sc, reg, data) \ 619#define ICH8_FLASH_WRITE32(sc, reg, data) \
620 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ 620 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
621 (reg) + sc->sc_flashreg_offset, (data)) 621 (reg) + sc->sc_flashreg_offset, (data))
622 622
623#define ICH8_FLASH_READ16(sc, reg) \ 623#define ICH8_FLASH_READ16(sc, reg) \
624 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ 624 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
625 (reg) + sc->sc_flashreg_offset) 625 (reg) + sc->sc_flashreg_offset)
626#define ICH8_FLASH_WRITE16(sc, reg, data) \ 626#define ICH8_FLASH_WRITE16(sc, reg, data) \
627 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ 627 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
628 (reg) + sc->sc_flashreg_offset, (data)) 628 (reg) + sc->sc_flashreg_offset, (data))
629 629
630#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) 630#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
631#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x))) 631#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
632 632
633#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) 633#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
634#define WM_CDTXADDR_HI(txq, x) \ 634#define WM_CDTXADDR_HI(txq, x) \
635 (sizeof(bus_addr_t) == 8 ? \ 635 (sizeof(bus_addr_t) == 8 ? \
636 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) 636 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
637 637
638#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) 638#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
639#define WM_CDRXADDR_HI(rxq, x) \ 639#define WM_CDRXADDR_HI(rxq, x) \
640 (sizeof(bus_addr_t) == 8 ? \ 640 (sizeof(bus_addr_t) == 8 ? \
641 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) 641 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
642 642
643/* 643/*
644 * Register read/write functions. 644 * Register read/write functions.
645 * Other than CSR_{READ|WRITE}(). 645 * Other than CSR_{READ|WRITE}().
646 */ 646 */
647#if 0 647#if 0
648static inline uint32_t wm_io_read(struct wm_softc *, int); 648static inline uint32_t wm_io_read(struct wm_softc *, int);
649#endif 649#endif
650static inline void wm_io_write(struct wm_softc *, int, uint32_t); 650static inline void wm_io_write(struct wm_softc *, int, uint32_t);
651static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 651static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
652 uint32_t, uint32_t); 652 uint32_t, uint32_t);
653static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 653static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
654 654
655/* 655/*
656 * Descriptor sync/init functions. 656 * Descriptor sync/init functions.
657 */ 657 */
658static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); 658static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
659static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); 659static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
660static inline void wm_init_rxdesc(struct wm_rxqueue *, int); 660static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
661 661
662/* 662/*
663 * Device driver interface functions and commonly used functions. 663 * Device driver interface functions and commonly used functions.
664 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 664 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
665 */ 665 */
666static const struct wm_product *wm_lookup(const struct pci_attach_args *); 666static const struct wm_product *wm_lookup(const struct pci_attach_args *);
667static int wm_match(device_t, cfdata_t, void *); 667static int wm_match(device_t, cfdata_t, void *);
668static void wm_attach(device_t, device_t, void *); 668static void wm_attach(device_t, device_t, void *);
669static int wm_detach(device_t, int); 669static int wm_detach(device_t, int);
670static bool wm_suspend(device_t, const pmf_qual_t *); 670static bool wm_suspend(device_t, const pmf_qual_t *);
671static bool wm_resume(device_t, const pmf_qual_t *); 671static bool wm_resume(device_t, const pmf_qual_t *);
672static void wm_watchdog(struct ifnet *); 672static void wm_watchdog(struct ifnet *);
673static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *); 673static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
674static void wm_tick(void *); 674static void wm_tick(void *);
675static int wm_ifflags_cb(struct ethercom *); 675static int wm_ifflags_cb(struct ethercom *);
676static int wm_ioctl(struct ifnet *, u_long, void *); 676static int wm_ioctl(struct ifnet *, u_long, void *);
677/* MAC address related */ 677/* MAC address related */
678static uint16_t wm_check_alt_mac_addr(struct wm_softc *); 678static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
679static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 679static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
680static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 680static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
681static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 681static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
682static void wm_set_filter(struct wm_softc *); 682static void wm_set_filter(struct wm_softc *);
683/* Reset and init related */ 683/* Reset and init related */
684static void wm_set_vlan(struct wm_softc *); 684static void wm_set_vlan(struct wm_softc *);
685static void wm_set_pcie_completion_timeout(struct wm_softc *); 685static void wm_set_pcie_completion_timeout(struct wm_softc *);
686static void wm_get_auto_rd_done(struct wm_softc *); 686static void wm_get_auto_rd_done(struct wm_softc *);
687static void wm_lan_init_done(struct wm_softc *); 687static void wm_lan_init_done(struct wm_softc *);
688static void wm_get_cfg_done(struct wm_softc *); 688static void wm_get_cfg_done(struct wm_softc *);
689static void wm_phy_post_reset(struct wm_softc *); 689static void wm_phy_post_reset(struct wm_softc *);
690static void wm_write_smbus_addr(struct wm_softc *); 690static void wm_write_smbus_addr(struct wm_softc *);
691static void wm_init_lcd_from_nvm(struct wm_softc *); 691static void wm_init_lcd_from_nvm(struct wm_softc *);
692static void wm_initialize_hardware_bits(struct wm_softc *); 692static void wm_initialize_hardware_bits(struct wm_softc *);
693static uint32_t wm_rxpbs_adjust_82580(uint32_t); 693static uint32_t wm_rxpbs_adjust_82580(uint32_t);
694static void wm_reset_phy(struct wm_softc *); 694static void wm_reset_phy(struct wm_softc *);
695static void wm_flush_desc_rings(struct wm_softc *); 695static void wm_flush_desc_rings(struct wm_softc *);
696static void wm_reset(struct wm_softc *); 696static void wm_reset(struct wm_softc *);
697static int wm_add_rxbuf(struct wm_rxqueue *, int); 697static int wm_add_rxbuf(struct wm_rxqueue *, int);
698static void wm_rxdrain(struct wm_rxqueue *); 698static void wm_rxdrain(struct wm_rxqueue *);
699static void wm_rss_getkey(uint8_t *); 699static void wm_rss_getkey(uint8_t *);
700static void wm_init_rss(struct wm_softc *); 700static void wm_init_rss(struct wm_softc *);
701static void wm_adjust_qnum(struct wm_softc *, int); 701static void wm_adjust_qnum(struct wm_softc *, int);
702static inline bool wm_is_using_msix(struct wm_softc *); 702static inline bool wm_is_using_msix(struct wm_softc *);
703static inline bool wm_is_using_multiqueue(struct wm_softc *); 703static inline bool wm_is_using_multiqueue(struct wm_softc *);
704static int wm_softint_establish(struct wm_softc *, int, int); 704static int wm_softint_establish(struct wm_softc *, int, int);
705static int wm_setup_legacy(struct wm_softc *); 705static int wm_setup_legacy(struct wm_softc *);
706static int wm_setup_msix(struct wm_softc *); 706static int wm_setup_msix(struct wm_softc *);
707static int wm_init(struct ifnet *); 707static int wm_init(struct ifnet *);
708static int wm_init_locked(struct ifnet *); 708static int wm_init_locked(struct ifnet *);
709static void wm_unset_stopping_flags(struct wm_softc *); 709static void wm_unset_stopping_flags(struct wm_softc *);
710static void wm_set_stopping_flags(struct wm_softc *); 710static void wm_set_stopping_flags(struct wm_softc *);
711static void wm_stop(struct ifnet *, int); 711static void wm_stop(struct ifnet *, int);
712static void wm_stop_locked(struct ifnet *, int); 712static void wm_stop_locked(struct ifnet *, int);
713static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 713static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
714static void wm_82547_txfifo_stall(void *); 714static void wm_82547_txfifo_stall(void *);
715static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 715static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
716static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *); 716static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
717/* DMA related */ 717/* DMA related */
718static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 718static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
719static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 719static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
720static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 720static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
721static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, 721static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
722 struct wm_txqueue *); 722 struct wm_txqueue *);
723static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 723static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
724static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 724static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
725static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, 725static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
726 struct wm_rxqueue *); 726 struct wm_rxqueue *);
727static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 727static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
728static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 728static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
729static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 729static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
730static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 730static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
731static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 731static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
732static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 732static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
733static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, 733static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
734 struct wm_txqueue *); 734 struct wm_txqueue *);
735static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, 735static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
736 struct wm_rxqueue *); 736 struct wm_rxqueue *);
737static int wm_alloc_txrx_queues(struct wm_softc *); 737static int wm_alloc_txrx_queues(struct wm_softc *);
738static void wm_free_txrx_queues(struct wm_softc *); 738static void wm_free_txrx_queues(struct wm_softc *);
739static int wm_init_txrx_queues(struct wm_softc *); 739static int wm_init_txrx_queues(struct wm_softc *);
740/* Start */ 740/* Start */
741static int wm_tx_offload(struct wm_softc *, struct wm_txqueue *, 741static int wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
742 struct wm_txsoft *, uint32_t *, uint8_t *); 742 struct wm_txsoft *, uint32_t *, uint8_t *);
743static inline int wm_select_txqueue(struct ifnet *, struct mbuf *); 743static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
744static void wm_start(struct ifnet *); 744static void wm_start(struct ifnet *);
745static void wm_start_locked(struct ifnet *); 745static void wm_start_locked(struct ifnet *);
746static int wm_transmit(struct ifnet *, struct mbuf *); 746static int wm_transmit(struct ifnet *, struct mbuf *);
747static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); 747static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
748static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); 748static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
749static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, 749static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
750 struct wm_txsoft *, uint32_t *, uint32_t *, bool *); 750 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
751static void wm_nq_start(struct ifnet *); 751static void wm_nq_start(struct ifnet *);
752static void wm_nq_start_locked(struct ifnet *); 752static void wm_nq_start_locked(struct ifnet *);
753static int wm_nq_transmit(struct ifnet *, struct mbuf *); 753static int wm_nq_transmit(struct ifnet *, struct mbuf *);
754static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); 754static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
755static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); 755static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
756static void wm_deferred_start_locked(struct wm_txqueue *); 756static void wm_deferred_start_locked(struct wm_txqueue *);
757static void wm_handle_queue(void *); 757static void wm_handle_queue(void *);
758/* Interrupt */ 758/* Interrupt */
759static int wm_txeof(struct wm_softc *, struct wm_txqueue *); 759static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
760static void wm_rxeof(struct wm_rxqueue *, u_int); 760static void wm_rxeof(struct wm_rxqueue *, u_int);
761static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 761static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
762static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 762static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
763static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 763static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
764static void wm_linkintr(struct wm_softc *, uint32_t); 764static void wm_linkintr(struct wm_softc *, uint32_t);
765static int wm_intr_legacy(void *); 765static int wm_intr_legacy(void *);
766static inline void wm_txrxintr_disable(struct wm_queue *); 766static inline void wm_txrxintr_disable(struct wm_queue *);
767static inline void wm_txrxintr_enable(struct wm_queue *); 767static inline void wm_txrxintr_enable(struct wm_queue *);
768static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *); 768static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
769static int wm_txrxintr_msix(void *); 769static int wm_txrxintr_msix(void *);
770static int wm_linkintr_msix(void *); 770static int wm_linkintr_msix(void *);
771 771
772/* 772/*
773 * Media related. 773 * Media related.
774 * GMII, SGMII, TBI, SERDES and SFP. 774 * GMII, SGMII, TBI, SERDES and SFP.
775 */ 775 */
776/* Common */ 776/* Common */
777static void wm_tbi_serdes_set_linkled(struct wm_softc *); 777static void wm_tbi_serdes_set_linkled(struct wm_softc *);
778/* GMII related */ 778/* GMII related */
779static void wm_gmii_reset(struct wm_softc *); 779static void wm_gmii_reset(struct wm_softc *);
780static void wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t); 780static void wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
781static int wm_get_phy_id_82575(struct wm_softc *); 781static int wm_get_phy_id_82575(struct wm_softc *);
782static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 782static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
783static int wm_gmii_mediachange(struct ifnet *); 783static int wm_gmii_mediachange(struct ifnet *);
784static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 784static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
785static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); 785static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
786static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); 786static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
787static int wm_gmii_i82543_readreg(device_t, int, int); 787static int wm_gmii_i82543_readreg(device_t, int, int);
788static void wm_gmii_i82543_writereg(device_t, int, int, int); 788static void wm_gmii_i82543_writereg(device_t, int, int, int);
789static int wm_gmii_mdic_readreg(device_t, int, int); 789static int wm_gmii_mdic_readreg(device_t, int, int);
790static void wm_gmii_mdic_writereg(device_t, int, int, int); 790static void wm_gmii_mdic_writereg(device_t, int, int, int);
791static int wm_gmii_i82544_readreg(device_t, int, int); 791static int wm_gmii_i82544_readreg(device_t, int, int);
792static void wm_gmii_i82544_writereg(device_t, int, int, int); 792static void wm_gmii_i82544_writereg(device_t, int, int, int);
793static int wm_gmii_i80003_readreg(device_t, int, int); 793static int wm_gmii_i80003_readreg(device_t, int, int);
794static void wm_gmii_i80003_writereg(device_t, int, int, int); 794static void wm_gmii_i80003_writereg(device_t, int, int, int);
795static int wm_gmii_bm_readreg(device_t, int, int); 795static int wm_gmii_bm_readreg(device_t, int, int);
796static void wm_gmii_bm_writereg(device_t, int, int, int); 796static void wm_gmii_bm_writereg(device_t, int, int, int);
797static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); 797static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
798static int wm_gmii_hv_readreg(device_t, int, int); 798static int wm_gmii_hv_readreg(device_t, int, int);
799static int wm_gmii_hv_readreg_locked(device_t, int, int); 799static int wm_gmii_hv_readreg_locked(device_t, int, int);
800static void wm_gmii_hv_writereg(device_t, int, int, int); 800static void wm_gmii_hv_writereg(device_t, int, int, int);
801static void wm_gmii_hv_writereg_locked(device_t, int, int, int); 801static void wm_gmii_hv_writereg_locked(device_t, int, int, int);
802static int wm_gmii_82580_readreg(device_t, int, int); 802static int wm_gmii_82580_readreg(device_t, int, int);
803static void wm_gmii_82580_writereg(device_t, int, int, int); 803static void wm_gmii_82580_writereg(device_t, int, int, int);
804static int wm_gmii_gs40g_readreg(device_t, int, int); 804static int wm_gmii_gs40g_readreg(device_t, int, int);
805static void wm_gmii_gs40g_writereg(device_t, int, int, int); 805static void wm_gmii_gs40g_writereg(device_t, int, int, int);
806static void wm_gmii_statchg(struct ifnet *); 806static void wm_gmii_statchg(struct ifnet *);
807/* 807/*
808 * kumeran related (80003, ICH* and PCH*). 808 * kumeran related (80003, ICH* and PCH*).
809 * These functions are not for accessing MII registers but for accessing 809 * These functions are not for accessing MII registers but for accessing
810 * kumeran specific registers. 810 * kumeran specific registers.
811 */ 811 */
812static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *); 812static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
813static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *); 813static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
814static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t); 814static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
815static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t); 815static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
816/* SGMII */ 816/* SGMII */
817static bool wm_sgmii_uses_mdio(struct wm_softc *); 817static bool wm_sgmii_uses_mdio(struct wm_softc *);
818static int wm_sgmii_readreg(device_t, int, int); 818static int wm_sgmii_readreg(device_t, int, int);
819static void wm_sgmii_writereg(device_t, int, int, int); 819static void wm_sgmii_writereg(device_t, int, int, int);
820/* TBI related */ 820/* TBI related */
821static void wm_tbi_mediainit(struct wm_softc *); 821static void wm_tbi_mediainit(struct wm_softc *);
822static int wm_tbi_mediachange(struct ifnet *); 822static int wm_tbi_mediachange(struct ifnet *);
823static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 823static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
824static int wm_check_for_link(struct wm_softc *); 824static int wm_check_for_link(struct wm_softc *);
825static void wm_tbi_tick(struct wm_softc *); 825static void wm_tbi_tick(struct wm_softc *);
826/* SERDES related */ 826/* SERDES related */
827static void wm_serdes_power_up_link_82575(struct wm_softc *); 827static void wm_serdes_power_up_link_82575(struct wm_softc *);
828static int wm_serdes_mediachange(struct ifnet *); 828static int wm_serdes_mediachange(struct ifnet *);
829static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); 829static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
830static void wm_serdes_tick(struct wm_softc *); 830static void wm_serdes_tick(struct wm_softc *);
831/* SFP related */ 831/* SFP related */
832static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); 832static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
833static uint32_t wm_sfp_get_media_type(struct wm_softc *); 833static uint32_t wm_sfp_get_media_type(struct wm_softc *);
834 834
835/* 835/*
836 * NVM related. 836 * NVM related.
837 * Microwire, SPI (w/wo EERD) and Flash. 837 * Microwire, SPI (w/wo EERD) and Flash.
838 */ 838 */
839/* Misc functions */ 839/* Misc functions */
840static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); 840static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
841static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); 841static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
842static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); 842static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
843/* Microwire */ 843/* Microwire */
844static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); 844static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
845/* SPI */ 845/* SPI */
846static int wm_nvm_ready_spi(struct wm_softc *); 846static int wm_nvm_ready_spi(struct wm_softc *);
847static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); 847static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
848/* Using with EERD */ 848/* Using with EERD */
849static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 849static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
850static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); 850static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
851/* Flash */ 851/* Flash */
852static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, 852static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
853 unsigned int *); 853 unsigned int *);
854static int32_t wm_ich8_cycle_init(struct wm_softc *); 854static int32_t wm_ich8_cycle_init(struct wm_softc *);
855static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 855static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
856static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, 856static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
857 uint32_t *); 857 uint32_t *);
858static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 858static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
859static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 859static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
860static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); 860static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
861static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); 861static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
862static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); 862static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
863/* iNVM */ 863/* iNVM */
864static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); 864static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
865static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); 865static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
866/* Lock, detecting NVM type, validate checksum and read */ 866/* Lock, detecting NVM type, validate checksum and read */
867static int wm_nvm_is_onboard_eeprom(struct wm_softc *); 867static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
868static int wm_nvm_get_flash_presence_i210(struct wm_softc *); 868static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
869static int wm_nvm_validate_checksum(struct wm_softc *); 869static int wm_nvm_validate_checksum(struct wm_softc *);
870static void wm_nvm_version_invm(struct wm_softc *); 870static void wm_nvm_version_invm(struct wm_softc *);
871static void wm_nvm_version(struct wm_softc *); 871static void wm_nvm_version(struct wm_softc *);
872static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); 872static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
873 873
874/* 874/*
875 * Hardware semaphores. 875 * Hardware semaphores.
876 * Very complexed... 876 * Very complexed...
877 */ 877 */
878static int wm_get_null(struct wm_softc *); 878static int wm_get_null(struct wm_softc *);
879static void wm_put_null(struct wm_softc *); 879static void wm_put_null(struct wm_softc *);
880static int wm_get_eecd(struct wm_softc *); 880static int wm_get_eecd(struct wm_softc *);
881static void wm_put_eecd(struct wm_softc *); 881static void wm_put_eecd(struct wm_softc *);
882static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ 882static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
883static void wm_put_swsm_semaphore(struct wm_softc *); 883static void wm_put_swsm_semaphore(struct wm_softc *);
884static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 884static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
885static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 885static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
886static int wm_get_nvm_80003(struct wm_softc *); 886static int wm_get_nvm_80003(struct wm_softc *);
887static void wm_put_nvm_80003(struct wm_softc *); 887static void wm_put_nvm_80003(struct wm_softc *);
888static int wm_get_nvm_82571(struct wm_softc *); 888static int wm_get_nvm_82571(struct wm_softc *);
889static void wm_put_nvm_82571(struct wm_softc *); 889static void wm_put_nvm_82571(struct wm_softc *);
890static int wm_get_phy_82575(struct wm_softc *); 890static int wm_get_phy_82575(struct wm_softc *);
891static void wm_put_phy_82575(struct wm_softc *); 891static void wm_put_phy_82575(struct wm_softc *);
892static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ 892static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
893static void wm_put_swfwhw_semaphore(struct wm_softc *); 893static void wm_put_swfwhw_semaphore(struct wm_softc *);
894static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ 894static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
895static void wm_put_swflag_ich8lan(struct wm_softc *); 895static void wm_put_swflag_ich8lan(struct wm_softc *);
896static int wm_get_nvm_ich8lan(struct wm_softc *); 896static int wm_get_nvm_ich8lan(struct wm_softc *);
897static void wm_put_nvm_ich8lan(struct wm_softc *); 897static void wm_put_nvm_ich8lan(struct wm_softc *);
898static int wm_get_hw_semaphore_82573(struct wm_softc *); 898static int wm_get_hw_semaphore_82573(struct wm_softc *);
899static void wm_put_hw_semaphore_82573(struct wm_softc *); 899static void wm_put_hw_semaphore_82573(struct wm_softc *);
900 900
901/* 901/*
902 * Management mode and power management related subroutines. 902 * Management mode and power management related subroutines.
903 * BMC, AMT, suspend/resume and EEE. 903 * BMC, AMT, suspend/resume and EEE.
904 */ 904 */
905#if 0 905#if 0
906static int wm_check_mng_mode(struct wm_softc *); 906static int wm_check_mng_mode(struct wm_softc *);
907static int wm_check_mng_mode_ich8lan(struct wm_softc *); 907static int wm_check_mng_mode_ich8lan(struct wm_softc *);
908static int wm_check_mng_mode_82574(struct wm_softc *); 908static int wm_check_mng_mode_82574(struct wm_softc *);
909static int wm_check_mng_mode_generic(struct wm_softc *); 909static int wm_check_mng_mode_generic(struct wm_softc *);
910#endif 910#endif
911static int wm_enable_mng_pass_thru(struct wm_softc *); 911static int wm_enable_mng_pass_thru(struct wm_softc *);
912static bool wm_phy_resetisblocked(struct wm_softc *); 912static bool wm_phy_resetisblocked(struct wm_softc *);
913static void wm_get_hw_control(struct wm_softc *); 913static void wm_get_hw_control(struct wm_softc *);
914static void wm_release_hw_control(struct wm_softc *); 914static void wm_release_hw_control(struct wm_softc *);
915static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); 915static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
916static void wm_smbustopci(struct wm_softc *); 916static void wm_smbustopci(struct wm_softc *);
917static void wm_init_manageability(struct wm_softc *); 917static void wm_init_manageability(struct wm_softc *);
918static void wm_release_manageability(struct wm_softc *); 918static void wm_release_manageability(struct wm_softc *);
919static void wm_get_wakeup(struct wm_softc *); 919static void wm_get_wakeup(struct wm_softc *);
920static void wm_ulp_disable(struct wm_softc *); 920static void wm_ulp_disable(struct wm_softc *);
921static void wm_enable_phy_wakeup(struct wm_softc *); 921static void wm_enable_phy_wakeup(struct wm_softc *);
922static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 922static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
923static void wm_enable_wakeup(struct wm_softc *); 923static void wm_enable_wakeup(struct wm_softc *);
924/* LPLU (Low Power Link Up) */ 924/* LPLU (Low Power Link Up) */
925static void wm_lplu_d0_disable(struct wm_softc *); 925static void wm_lplu_d0_disable(struct wm_softc *);
926/* EEE */ 926/* EEE */
927static void wm_set_eee_i350(struct wm_softc *); 927static void wm_set_eee_i350(struct wm_softc *);
928 928
929/* 929/*
930 * Workarounds (mainly PHY related). 930 * Workarounds (mainly PHY related).
931 * Basically, PHY's workarounds are in the PHY drivers. 931 * Basically, PHY's workarounds are in the PHY drivers.
932 */ 932 */
933static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 933static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
934static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 934static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
935static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); 935static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
936static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); 936static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
937static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 937static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
938static void wm_set_mdio_slow_mode_hv(struct wm_softc *); 938static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
939static void wm_configure_k1_ich8lan(struct wm_softc *, int); 939static void wm_configure_k1_ich8lan(struct wm_softc *, int);
940static void wm_reset_init_script_82575(struct wm_softc *); 940static void wm_reset_init_script_82575(struct wm_softc *);
941static void wm_reset_mdicnfg_82580(struct wm_softc *); 941static void wm_reset_mdicnfg_82580(struct wm_softc *);
942static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 942static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
943static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 943static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
944static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 944static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
945static void wm_pll_workaround_i210(struct wm_softc *); 945static void wm_pll_workaround_i210(struct wm_softc *);
946static void wm_legacy_irq_quirk_spt(struct wm_softc *); 946static void wm_legacy_irq_quirk_spt(struct wm_softc *);
947 947
948CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 948CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
949 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 949 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
950 950
951/* 951/*
952 * Devices supported by this driver. 952 * Devices supported by this driver.
953 */ 953 */
954static const struct wm_product { 954static const struct wm_product {
955 pci_vendor_id_t wmp_vendor; 955 pci_vendor_id_t wmp_vendor;
956 pci_product_id_t wmp_product; 956 pci_product_id_t wmp_product;
957 const char *wmp_name; 957 const char *wmp_name;
958 wm_chip_type wmp_type; 958 wm_chip_type wmp_type;
959 uint32_t wmp_flags; 959 uint32_t wmp_flags;
960#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN 960#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
961#define WMP_F_FIBER WM_MEDIATYPE_FIBER 961#define WMP_F_FIBER WM_MEDIATYPE_FIBER
962#define WMP_F_COPPER WM_MEDIATYPE_COPPER 962#define WMP_F_COPPER WM_MEDIATYPE_COPPER
963#define WMP_F_SERDES WM_MEDIATYPE_SERDES 963#define WMP_F_SERDES WM_MEDIATYPE_SERDES
964#define WMP_MEDIATYPE(x) ((x) & 0x03) 964#define WMP_MEDIATYPE(x) ((x) & 0x03)
965} wm_products[] = { 965} wm_products[] = {
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
967 "Intel i82542 1000BASE-X Ethernet", 967 "Intel i82542 1000BASE-X Ethernet",
968 WM_T_82542_2_1, WMP_F_FIBER }, 968 WM_T_82542_2_1, WMP_F_FIBER },
969 969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
971 "Intel i82543GC 1000BASE-X Ethernet", 971 "Intel i82543GC 1000BASE-X Ethernet",
972 WM_T_82543, WMP_F_FIBER }, 972 WM_T_82543, WMP_F_FIBER },
973 973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
975 "Intel i82543GC 1000BASE-T Ethernet", 975 "Intel i82543GC 1000BASE-T Ethernet",
976 WM_T_82543, WMP_F_COPPER }, 976 WM_T_82543, WMP_F_COPPER },
977 977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
979 "Intel i82544EI 1000BASE-T Ethernet", 979 "Intel i82544EI 1000BASE-T Ethernet",
980 WM_T_82544, WMP_F_COPPER }, 980 WM_T_82544, WMP_F_COPPER },
981 981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
983 "Intel i82544EI 1000BASE-X Ethernet", 983 "Intel i82544EI 1000BASE-X Ethernet",
984 WM_T_82544, WMP_F_FIBER }, 984 WM_T_82544, WMP_F_FIBER },
985 985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
987 "Intel i82544GC 1000BASE-T Ethernet", 987 "Intel i82544GC 1000BASE-T Ethernet",
988 WM_T_82544, WMP_F_COPPER }, 988 WM_T_82544, WMP_F_COPPER },
989 989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
991 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 991 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
992 WM_T_82544, WMP_F_COPPER }, 992 WM_T_82544, WMP_F_COPPER },
993 993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
995 "Intel i82540EM 1000BASE-T Ethernet", 995 "Intel i82540EM 1000BASE-T Ethernet",
996 WM_T_82540, WMP_F_COPPER }, 996 WM_T_82540, WMP_F_COPPER },
997 997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
999 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 999 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1000 WM_T_82540, WMP_F_COPPER }, 1000 WM_T_82540, WMP_F_COPPER },
1001 1001
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1003 "Intel i82540EP 1000BASE-T Ethernet", 1003 "Intel i82540EP 1000BASE-T Ethernet",
1004 WM_T_82540, WMP_F_COPPER }, 1004 WM_T_82540, WMP_F_COPPER },
1005 1005
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1007 "Intel i82540EP 1000BASE-T Ethernet", 1007 "Intel i82540EP 1000BASE-T Ethernet",
1008 WM_T_82540, WMP_F_COPPER }, 1008 WM_T_82540, WMP_F_COPPER },
1009 1009
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1011 "Intel i82540EP 1000BASE-T Ethernet", 1011 "Intel i82540EP 1000BASE-T Ethernet",
1012 WM_T_82540, WMP_F_COPPER }, 1012 WM_T_82540, WMP_F_COPPER },
1013 1013
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1015 "Intel i82545EM 1000BASE-T Ethernet", 1015 "Intel i82545EM 1000BASE-T Ethernet",
1016 WM_T_82545, WMP_F_COPPER }, 1016 WM_T_82545, WMP_F_COPPER },
1017 1017
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1019 "Intel i82545GM 1000BASE-T Ethernet", 1019 "Intel i82545GM 1000BASE-T Ethernet",
1020 WM_T_82545_3, WMP_F_COPPER }, 1020 WM_T_82545_3, WMP_F_COPPER },
1021 1021
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1023 "Intel i82545GM 1000BASE-X Ethernet", 1023 "Intel i82545GM 1000BASE-X Ethernet",
1024 WM_T_82545_3, WMP_F_FIBER }, 1024 WM_T_82545_3, WMP_F_FIBER },
1025 1025
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1027 "Intel i82545GM Gigabit Ethernet (SERDES)", 1027 "Intel i82545GM Gigabit Ethernet (SERDES)",
1028 WM_T_82545_3, WMP_F_SERDES }, 1028 WM_T_82545_3, WMP_F_SERDES },
1029 1029
1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1031 "Intel i82546EB 1000BASE-T Ethernet", 1031 "Intel i82546EB 1000BASE-T Ethernet",
1032 WM_T_82546, WMP_F_COPPER }, 1032 WM_T_82546, WMP_F_COPPER },
1033 1033
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1035 "Intel i82546EB 1000BASE-T Ethernet", 1035 "Intel i82546EB 1000BASE-T Ethernet",
1036 WM_T_82546, WMP_F_COPPER }, 1036 WM_T_82546, WMP_F_COPPER },
1037 1037
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1039 "Intel i82545EM 1000BASE-X Ethernet", 1039 "Intel i82545EM 1000BASE-X Ethernet",
1040 WM_T_82545, WMP_F_FIBER }, 1040 WM_T_82545, WMP_F_FIBER },
1041 1041
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1043 "Intel i82546EB 1000BASE-X Ethernet", 1043 "Intel i82546EB 1000BASE-X Ethernet",
1044 WM_T_82546, WMP_F_FIBER }, 1044 WM_T_82546, WMP_F_FIBER },
1045 1045
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1047 "Intel i82546GB 1000BASE-T Ethernet", 1047 "Intel i82546GB 1000BASE-T Ethernet",
1048 WM_T_82546_3, WMP_F_COPPER }, 1048 WM_T_82546_3, WMP_F_COPPER },
1049 1049
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1051 "Intel i82546GB 1000BASE-X Ethernet", 1051 "Intel i82546GB 1000BASE-X Ethernet",
1052 WM_T_82546_3, WMP_F_FIBER }, 1052 WM_T_82546_3, WMP_F_FIBER },
1053 1053
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1055 "Intel i82546GB Gigabit Ethernet (SERDES)", 1055 "Intel i82546GB Gigabit Ethernet (SERDES)",
1056 WM_T_82546_3, WMP_F_SERDES }, 1056 WM_T_82546_3, WMP_F_SERDES },
1057 1057
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1059 "i82546GB quad-port Gigabit Ethernet", 1059 "i82546GB quad-port Gigabit Ethernet",
1060 WM_T_82546_3, WMP_F_COPPER }, 1060 WM_T_82546_3, WMP_F_COPPER },
1061 1061
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1063 "i82546GB quad-port Gigabit Ethernet (KSP3)", 1063 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1064 WM_T_82546_3, WMP_F_COPPER }, 1064 WM_T_82546_3, WMP_F_COPPER },
1065 1065
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1067 "Intel PRO/1000MT (82546GB)", 1067 "Intel PRO/1000MT (82546GB)",
1068 WM_T_82546_3, WMP_F_COPPER }, 1068 WM_T_82546_3, WMP_F_COPPER },
1069 1069
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1071 "Intel i82541EI 1000BASE-T Ethernet", 1071 "Intel i82541EI 1000BASE-T Ethernet",
1072 WM_T_82541, WMP_F_COPPER }, 1072 WM_T_82541, WMP_F_COPPER },
1073 1073
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1075 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 1075 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1076 WM_T_82541, WMP_F_COPPER }, 1076 WM_T_82541, WMP_F_COPPER },
1077 1077
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1079 "Intel i82541EI Mobile 1000BASE-T Ethernet", 1079 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1080 WM_T_82541, WMP_F_COPPER }, 1080 WM_T_82541, WMP_F_COPPER },
1081 1081
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1083 "Intel i82541ER 1000BASE-T Ethernet", 1083 "Intel i82541ER 1000BASE-T Ethernet",
1084 WM_T_82541_2, WMP_F_COPPER }, 1084 WM_T_82541_2, WMP_F_COPPER },
1085 1085
@@ -1680,2021 +1680,2021 @@ wm_attach(device_t parent, device_t self @@ -1680,2021 +1680,2021 @@ wm_attach(device_t parent, device_t self
1680 pci_intr_type_t max_type; 1680 pci_intr_type_t max_type;
1681 const char *eetype, *xname; 1681 const char *eetype, *xname;
1682 bus_space_tag_t memt; 1682 bus_space_tag_t memt;
1683 bus_space_handle_t memh; 1683 bus_space_handle_t memh;
1684 bus_size_t memsize; 1684 bus_size_t memsize;
1685 int memh_valid; 1685 int memh_valid;
1686 int i, error; 1686 int i, error;
1687 const struct wm_product *wmp; 1687 const struct wm_product *wmp;
1688 prop_data_t ea; 1688 prop_data_t ea;
1689 prop_number_t pn; 1689 prop_number_t pn;
1690 uint8_t enaddr[ETHER_ADDR_LEN]; 1690 uint8_t enaddr[ETHER_ADDR_LEN];
1691 char buf[256]; 1691 char buf[256];
1692 uint16_t cfg1, cfg2, swdpin, nvmword; 1692 uint16_t cfg1, cfg2, swdpin, nvmword;
1693 pcireg_t preg, memtype; 1693 pcireg_t preg, memtype;
1694 uint16_t eeprom_data, apme_mask; 1694 uint16_t eeprom_data, apme_mask;
1695 bool force_clear_smbi; 1695 bool force_clear_smbi;
1696 uint32_t link_mode; 1696 uint32_t link_mode;
1697 uint32_t reg; 1697 uint32_t reg;
1698 1698
1699 sc->sc_dev = self; 1699 sc->sc_dev = self;
1700 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS); 1700 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1701 sc->sc_core_stopping = false; 1701 sc->sc_core_stopping = false;
1702 1702
1703 wmp = wm_lookup(pa); 1703 wmp = wm_lookup(pa);
1704#ifdef DIAGNOSTIC 1704#ifdef DIAGNOSTIC
1705 if (wmp == NULL) { 1705 if (wmp == NULL) {
1706 printf("\n"); 1706 printf("\n");
1707 panic("wm_attach: impossible"); 1707 panic("wm_attach: impossible");
1708 } 1708 }
1709#endif 1709#endif
1710 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags); 1710 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1711 1711
1712 sc->sc_pc = pa->pa_pc; 1712 sc->sc_pc = pa->pa_pc;
1713 sc->sc_pcitag = pa->pa_tag; 1713 sc->sc_pcitag = pa->pa_tag;
1714 1714
1715 if (pci_dma64_available(pa)) 1715 if (pci_dma64_available(pa))
1716 sc->sc_dmat = pa->pa_dmat64; 1716 sc->sc_dmat = pa->pa_dmat64;
1717 else 1717 else
1718 sc->sc_dmat = pa->pa_dmat; 1718 sc->sc_dmat = pa->pa_dmat;
1719 1719
1720 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id); 1720 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1721 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG)); 1721 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1722 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1); 1722 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1723 1723
1724 sc->sc_type = wmp->wmp_type; 1724 sc->sc_type = wmp->wmp_type;
1725 1725
1726 /* Set default function pointers */ 1726 /* Set default function pointers */
1727 sc->phy.acquire = sc->nvm.acquire = wm_get_null; 1727 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1728 sc->phy.release = sc->nvm.release = wm_put_null; 1728 sc->phy.release = sc->nvm.release = wm_put_null;
1729 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000; 1729 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1730 1730
1731 if (sc->sc_type < WM_T_82543) { 1731 if (sc->sc_type < WM_T_82543) {
1732 if (sc->sc_rev < 2) { 1732 if (sc->sc_rev < 2) {
1733 aprint_error_dev(sc->sc_dev, 1733 aprint_error_dev(sc->sc_dev,
1734 "i82542 must be at least rev. 2\n"); 1734 "i82542 must be at least rev. 2\n");
1735 return; 1735 return;
1736 } 1736 }
1737 if (sc->sc_rev < 3) 1737 if (sc->sc_rev < 3)
1738 sc->sc_type = WM_T_82542_2_0; 1738 sc->sc_type = WM_T_82542_2_0;
1739 } 1739 }
1740 1740
1741 /* 1741 /*
1742 * Disable MSI for Errata: 1742 * Disable MSI for Errata:
1743 * "Message Signaled Interrupt Feature May Corrupt Write Transactions" 1743 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1744 *  1744 *
1745 * 82544: Errata 25 1745 * 82544: Errata 25
1746 * 82540: Errata 6 (easy to reproduce device timeout) 1746 * 82540: Errata 6 (easy to reproduce device timeout)
1747 * 82545: Errata 4 (easy to reproduce device timeout) 1747 * 82545: Errata 4 (easy to reproduce device timeout)
1748 * 82546: Errata 26 (easy to reproduce device timeout) 1748 * 82546: Errata 26 (easy to reproduce device timeout)
1749 * 82541: Errata 7 (easy to reproduce device timeout) 1749 * 82541: Errata 7 (easy to reproduce device timeout)
1750 * 1750 *
1751 * "Byte Enables 2 and 3 are not set on MSI writes" 1751 * "Byte Enables 2 and 3 are not set on MSI writes"
1752 * 1752 *
1753 * 82571 & 82572: Errata 63 1753 * 82571 & 82572: Errata 63
1754 */ 1754 */
1755 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571) 1755 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1756 || (sc->sc_type == WM_T_82572)) 1756 || (sc->sc_type == WM_T_82572))
1757 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY; 1757 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1758 1758
1759 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 1759 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1760 || (sc->sc_type == WM_T_82580) 1760 || (sc->sc_type == WM_T_82580)
1761 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 1761 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1762 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) 1762 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1763 sc->sc_flags |= WM_F_NEWQUEUE; 1763 sc->sc_flags |= WM_F_NEWQUEUE;
1764 1764
1765 /* Set device properties (mactype) */ 1765 /* Set device properties (mactype) */
1766 dict = device_properties(sc->sc_dev); 1766 dict = device_properties(sc->sc_dev);
1767 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type); 1767 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1768 1768
1769 /* 1769 /*
1770 * Map the device. All devices support memory-mapped acccess, 1770 * Map the device. All devices support memory-mapped acccess,
1771 * and it is really required for normal operation. 1771 * and it is really required for normal operation.
1772 */ 1772 */
1773 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 1773 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1774 switch (memtype) { 1774 switch (memtype) {
1775 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1775 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1776 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1776 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1777 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 1777 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1778 memtype, 0, &memt, &memh, NULL, &memsize) == 0); 1778 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1779 break; 1779 break;
1780 default: 1780 default:
1781 memh_valid = 0; 1781 memh_valid = 0;
1782 break; 1782 break;
1783 } 1783 }
1784 1784
1785 if (memh_valid) { 1785 if (memh_valid) {
1786 sc->sc_st = memt; 1786 sc->sc_st = memt;
1787 sc->sc_sh = memh; 1787 sc->sc_sh = memh;
1788 sc->sc_ss = memsize; 1788 sc->sc_ss = memsize;
1789 } else { 1789 } else {
1790 aprint_error_dev(sc->sc_dev, 1790 aprint_error_dev(sc->sc_dev,
1791 "unable to map device registers\n"); 1791 "unable to map device registers\n");
1792 return; 1792 return;
1793 } 1793 }
1794 1794
1795 /* 1795 /*
1796 * In addition, i82544 and later support I/O mapped indirect 1796 * In addition, i82544 and later support I/O mapped indirect
1797 * register access. It is not desirable (nor supported in 1797 * register access. It is not desirable (nor supported in
1798 * this driver) to use it for normal operation, though it is 1798 * this driver) to use it for normal operation, though it is
1799 * required to work around bugs in some chip versions. 1799 * required to work around bugs in some chip versions.
1800 */ 1800 */
1801 if (sc->sc_type >= WM_T_82544) { 1801 if (sc->sc_type >= WM_T_82544) {
1802 /* First we have to find the I/O BAR. */ 1802 /* First we have to find the I/O BAR. */
1803 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 1803 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1804 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i); 1804 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1805 if (memtype == PCI_MAPREG_TYPE_IO) 1805 if (memtype == PCI_MAPREG_TYPE_IO)
1806 break; 1806 break;
1807 if (PCI_MAPREG_MEM_TYPE(memtype) == 1807 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1808 PCI_MAPREG_MEM_TYPE_64BIT) 1808 PCI_MAPREG_MEM_TYPE_64BIT)
1809 i += 4; /* skip high bits, too */ 1809 i += 4; /* skip high bits, too */
1810 } 1810 }
1811 if (i < PCI_MAPREG_END) { 1811 if (i < PCI_MAPREG_END) {
1812 /* 1812 /*
1813 * We found PCI_MAPREG_TYPE_IO. Note that 82580 1813 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1814 * (and newer?) chip has no PCI_MAPREG_TYPE_IO. 1814 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1815 * It's no problem because newer chips has no this 1815 * It's no problem because newer chips has no this
1816 * bug. 1816 * bug.
1817 * 1817 *
1818 * The i8254x doesn't apparently respond when the 1818 * The i8254x doesn't apparently respond when the
1819 * I/O BAR is 0, which looks somewhat like it's not 1819 * I/O BAR is 0, which looks somewhat like it's not
1820 * been configured. 1820 * been configured.
1821 */ 1821 */
1822 preg = pci_conf_read(pc, pa->pa_tag, i); 1822 preg = pci_conf_read(pc, pa->pa_tag, i);
1823 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 1823 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1824 aprint_error_dev(sc->sc_dev, 1824 aprint_error_dev(sc->sc_dev,
1825 "WARNING: I/O BAR at zero.\n"); 1825 "WARNING: I/O BAR at zero.\n");
1826 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 1826 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1827 0, &sc->sc_iot, &sc->sc_ioh, 1827 0, &sc->sc_iot, &sc->sc_ioh,
1828 NULL, &sc->sc_ios) == 0) { 1828 NULL, &sc->sc_ios) == 0) {
1829 sc->sc_flags |= WM_F_IOH_VALID; 1829 sc->sc_flags |= WM_F_IOH_VALID;
1830 } else { 1830 } else {
1831 aprint_error_dev(sc->sc_dev, 1831 aprint_error_dev(sc->sc_dev,
1832 "WARNING: unable to map I/O space\n"); 1832 "WARNING: unable to map I/O space\n");
1833 } 1833 }
1834 } 1834 }
1835 1835
1836 } 1836 }
1837 1837
1838 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 1838 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1839 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1839 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1840 preg |= PCI_COMMAND_MASTER_ENABLE; 1840 preg |= PCI_COMMAND_MASTER_ENABLE;
1841 if (sc->sc_type < WM_T_82542_2_1) 1841 if (sc->sc_type < WM_T_82542_2_1)
1842 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 1842 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1843 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 1843 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1844 1844
1845 /* power up chip */ 1845 /* power up chip */
1846 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, 1846 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1847 NULL)) && error != EOPNOTSUPP) { 1847 NULL)) && error != EOPNOTSUPP) {
1848 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); 1848 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1849 return; 1849 return;
1850 } 1850 }
1851 1851
1852 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag)); 1852 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1853 1853
1854 /* Allocation settings */ 1854 /* Allocation settings */
1855 max_type = PCI_INTR_TYPE_MSIX; 1855 max_type = PCI_INTR_TYPE_MSIX;
1856 /* 1856 /*
1857 * 82583 has a MSI-X capability in the PCI configuration space but 1857 * 82583 has a MSI-X capability in the PCI configuration space but
1858 * it doesn't support it. At least the document doesn't say anything 1858 * it doesn't support it. At least the document doesn't say anything
1859 * about MSI-X. 1859 * about MSI-X.
1860 */ 1860 */
1861 counts[PCI_INTR_TYPE_MSIX] 1861 counts[PCI_INTR_TYPE_MSIX]
1862 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1; 1862 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
1863 counts[PCI_INTR_TYPE_MSI] = 1; 1863 counts[PCI_INTR_TYPE_MSI] = 1;
1864 counts[PCI_INTR_TYPE_INTX] = 1; 1864 counts[PCI_INTR_TYPE_INTX] = 1;
1865 /* overridden by disable flags */ 1865 /* overridden by disable flags */
1866 if (wm_disable_msi != 0) { 1866 if (wm_disable_msi != 0) {
1867 counts[PCI_INTR_TYPE_MSI] = 0; 1867 counts[PCI_INTR_TYPE_MSI] = 0;
1868 if (wm_disable_msix != 0) { 1868 if (wm_disable_msix != 0) {
1869 max_type = PCI_INTR_TYPE_INTX; 1869 max_type = PCI_INTR_TYPE_INTX;
1870 counts[PCI_INTR_TYPE_MSIX] = 0; 1870 counts[PCI_INTR_TYPE_MSIX] = 0;
1871 } 1871 }
1872 } else if (wm_disable_msix != 0) { 1872 } else if (wm_disable_msix != 0) {
1873 max_type = PCI_INTR_TYPE_MSI; 1873 max_type = PCI_INTR_TYPE_MSI;
1874 counts[PCI_INTR_TYPE_MSIX] = 0; 1874 counts[PCI_INTR_TYPE_MSIX] = 0;
1875 } 1875 }
1876 1876
1877alloc_retry: 1877alloc_retry:
1878 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) { 1878 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1879 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n"); 1879 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1880 return; 1880 return;
1881 } 1881 }
1882 1882
1883 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) { 1883 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1884 error = wm_setup_msix(sc); 1884 error = wm_setup_msix(sc);
1885 if (error) { 1885 if (error) {
1886 pci_intr_release(pc, sc->sc_intrs, 1886 pci_intr_release(pc, sc->sc_intrs,
1887 counts[PCI_INTR_TYPE_MSIX]); 1887 counts[PCI_INTR_TYPE_MSIX]);
1888 1888
1889 /* Setup for MSI: Disable MSI-X */ 1889 /* Setup for MSI: Disable MSI-X */
1890 max_type = PCI_INTR_TYPE_MSI; 1890 max_type = PCI_INTR_TYPE_MSI;
1891 counts[PCI_INTR_TYPE_MSI] = 1; 1891 counts[PCI_INTR_TYPE_MSI] = 1;
1892 counts[PCI_INTR_TYPE_INTX] = 1; 1892 counts[PCI_INTR_TYPE_INTX] = 1;
1893 goto alloc_retry; 1893 goto alloc_retry;
1894 } 1894 }
1895 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) { 1895 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1896 wm_adjust_qnum(sc, 0); /* must not use multiqueue */ 1896 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1897 error = wm_setup_legacy(sc); 1897 error = wm_setup_legacy(sc);
1898 if (error) { 1898 if (error) {
1899 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1899 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1900 counts[PCI_INTR_TYPE_MSI]); 1900 counts[PCI_INTR_TYPE_MSI]);
1901 1901
1902 /* The next try is for INTx: Disable MSI */ 1902 /* The next try is for INTx: Disable MSI */
1903 max_type = PCI_INTR_TYPE_INTX; 1903 max_type = PCI_INTR_TYPE_INTX;
1904 counts[PCI_INTR_TYPE_INTX] = 1; 1904 counts[PCI_INTR_TYPE_INTX] = 1;
1905 goto alloc_retry; 1905 goto alloc_retry;
1906 } 1906 }
1907 } else { 1907 } else {
1908 wm_adjust_qnum(sc, 0); /* must not use multiqueue */ 1908 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1909 error = wm_setup_legacy(sc); 1909 error = wm_setup_legacy(sc);
1910 if (error) { 1910 if (error) {
1911 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1911 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1912 counts[PCI_INTR_TYPE_INTX]); 1912 counts[PCI_INTR_TYPE_INTX]);
1913 return; 1913 return;
1914 } 1914 }
1915 } 1915 }
1916 1916
1917 /* 1917 /*
1918 * Check the function ID (unit number of the chip). 1918 * Check the function ID (unit number of the chip).
1919 */ 1919 */
1920 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3) 1920 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1921 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003) 1921 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1922 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 1922 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1923 || (sc->sc_type == WM_T_82580) 1923 || (sc->sc_type == WM_T_82580)
1924 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 1924 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1925 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS) 1925 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1926 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK; 1926 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1927 else 1927 else
1928 sc->sc_funcid = 0; 1928 sc->sc_funcid = 0;
1929 1929
1930 /* 1930 /*
1931 * Determine a few things about the bus we're connected to. 1931 * Determine a few things about the bus we're connected to.
1932 */ 1932 */
1933 if (sc->sc_type < WM_T_82543) { 1933 if (sc->sc_type < WM_T_82543) {
1934 /* We don't really know the bus characteristics here. */ 1934 /* We don't really know the bus characteristics here. */
1935 sc->sc_bus_speed = 33; 1935 sc->sc_bus_speed = 33;
1936 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 1936 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1937 /* 1937 /*
1938 * CSA (Communication Streaming Architecture) is about as fast 1938 * CSA (Communication Streaming Architecture) is about as fast
1939 * a 32-bit 66MHz PCI Bus. 1939 * a 32-bit 66MHz PCI Bus.
1940 */ 1940 */
1941 sc->sc_flags |= WM_F_CSA; 1941 sc->sc_flags |= WM_F_CSA;
1942 sc->sc_bus_speed = 66; 1942 sc->sc_bus_speed = 66;
1943 aprint_verbose_dev(sc->sc_dev, 1943 aprint_verbose_dev(sc->sc_dev,
1944 "Communication Streaming Architecture\n"); 1944 "Communication Streaming Architecture\n");
1945 if (sc->sc_type == WM_T_82547) { 1945 if (sc->sc_type == WM_T_82547) {
1946 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS); 1946 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1947 callout_setfunc(&sc->sc_txfifo_ch, 1947 callout_setfunc(&sc->sc_txfifo_ch,
1948 wm_82547_txfifo_stall, sc); 1948 wm_82547_txfifo_stall, sc);
1949 aprint_verbose_dev(sc->sc_dev, 1949 aprint_verbose_dev(sc->sc_dev,
1950 "using 82547 Tx FIFO stall work-around\n"); 1950 "using 82547 Tx FIFO stall work-around\n");
1951 } 1951 }
1952 } else if (sc->sc_type >= WM_T_82571) { 1952 } else if (sc->sc_type >= WM_T_82571) {
1953 sc->sc_flags |= WM_F_PCIE; 1953 sc->sc_flags |= WM_F_PCIE;
1954 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 1954 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1955 && (sc->sc_type != WM_T_ICH10) 1955 && (sc->sc_type != WM_T_ICH10)
1956 && (sc->sc_type != WM_T_PCH) 1956 && (sc->sc_type != WM_T_PCH)
1957 && (sc->sc_type != WM_T_PCH2) 1957 && (sc->sc_type != WM_T_PCH2)
1958 && (sc->sc_type != WM_T_PCH_LPT) 1958 && (sc->sc_type != WM_T_PCH_LPT)
1959 && (sc->sc_type != WM_T_PCH_SPT)) { 1959 && (sc->sc_type != WM_T_PCH_SPT)) {
1960 /* ICH* and PCH* have no PCIe capability registers */ 1960 /* ICH* and PCH* have no PCIe capability registers */
1961 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1961 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1962 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff, 1962 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1963 NULL) == 0) 1963 NULL) == 0)
1964 aprint_error_dev(sc->sc_dev, 1964 aprint_error_dev(sc->sc_dev,
1965 "unable to find PCIe capability\n"); 1965 "unable to find PCIe capability\n");
1966 } 1966 }
1967 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n"); 1967 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1968 } else { 1968 } else {
1969 reg = CSR_READ(sc, WMREG_STATUS); 1969 reg = CSR_READ(sc, WMREG_STATUS);
1970 if (reg & STATUS_BUS64) 1970 if (reg & STATUS_BUS64)
1971 sc->sc_flags |= WM_F_BUS64; 1971 sc->sc_flags |= WM_F_BUS64;
1972 if ((reg & STATUS_PCIX_MODE) != 0) { 1972 if ((reg & STATUS_PCIX_MODE) != 0) {
1973 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 1973 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1974 1974
1975 sc->sc_flags |= WM_F_PCIX; 1975 sc->sc_flags |= WM_F_PCIX;
1976 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1976 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1977 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0) 1977 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1978 aprint_error_dev(sc->sc_dev, 1978 aprint_error_dev(sc->sc_dev,
1979 "unable to find PCIX capability\n"); 1979 "unable to find PCIX capability\n");
1980 else if (sc->sc_type != WM_T_82545_3 && 1980 else if (sc->sc_type != WM_T_82545_3 &&
1981 sc->sc_type != WM_T_82546_3) { 1981 sc->sc_type != WM_T_82546_3) {
1982 /* 1982 /*
1983 * Work around a problem caused by the BIOS 1983 * Work around a problem caused by the BIOS
1984 * setting the max memory read byte count 1984 * setting the max memory read byte count
1985 * incorrectly. 1985 * incorrectly.
1986 */ 1986 */
1987 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 1987 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1988 sc->sc_pcixe_capoff + PCIX_CMD); 1988 sc->sc_pcixe_capoff + PCIX_CMD);
1989 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 1989 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1990 sc->sc_pcixe_capoff + PCIX_STATUS); 1990 sc->sc_pcixe_capoff + PCIX_STATUS);
1991 1991
1992 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >> 1992 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1993 PCIX_CMD_BYTECNT_SHIFT; 1993 PCIX_CMD_BYTECNT_SHIFT;
1994 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >> 1994 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1995 PCIX_STATUS_MAXB_SHIFT; 1995 PCIX_STATUS_MAXB_SHIFT;
1996 if (bytecnt > maxb) { 1996 if (bytecnt > maxb) {
1997 aprint_verbose_dev(sc->sc_dev, 1997 aprint_verbose_dev(sc->sc_dev,
1998 "resetting PCI-X MMRBC: %d -> %d\n", 1998 "resetting PCI-X MMRBC: %d -> %d\n",
1999 512 << bytecnt, 512 << maxb); 1999 512 << bytecnt, 512 << maxb);
2000 pcix_cmd = (pcix_cmd & 2000 pcix_cmd = (pcix_cmd &
2001 ~PCIX_CMD_BYTECNT_MASK) | 2001 ~PCIX_CMD_BYTECNT_MASK) |
2002 (maxb << PCIX_CMD_BYTECNT_SHIFT); 2002 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2003 pci_conf_write(pa->pa_pc, pa->pa_tag, 2003 pci_conf_write(pa->pa_pc, pa->pa_tag,
2004 sc->sc_pcixe_capoff + PCIX_CMD, 2004 sc->sc_pcixe_capoff + PCIX_CMD,
2005 pcix_cmd); 2005 pcix_cmd);
2006 } 2006 }
2007 } 2007 }
2008 } 2008 }
2009 /* 2009 /*
2010 * The quad port adapter is special; it has a PCIX-PCIX 2010 * The quad port adapter is special; it has a PCIX-PCIX
2011 * bridge on the board, and can run the secondary bus at 2011 * bridge on the board, and can run the secondary bus at
2012 * a higher speed. 2012 * a higher speed.
2013 */ 2013 */
2014 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 2014 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2015 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 2015 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2016 : 66; 2016 : 66;
2017 } else if (sc->sc_flags & WM_F_PCIX) { 2017 } else if (sc->sc_flags & WM_F_PCIX) {
2018 switch (reg & STATUS_PCIXSPD_MASK) { 2018 switch (reg & STATUS_PCIXSPD_MASK) {
2019 case STATUS_PCIXSPD_50_66: 2019 case STATUS_PCIXSPD_50_66:
2020 sc->sc_bus_speed = 66; 2020 sc->sc_bus_speed = 66;
2021 break; 2021 break;
2022 case STATUS_PCIXSPD_66_100: 2022 case STATUS_PCIXSPD_66_100:
2023 sc->sc_bus_speed = 100; 2023 sc->sc_bus_speed = 100;
2024 break; 2024 break;
2025 case STATUS_PCIXSPD_100_133: 2025 case STATUS_PCIXSPD_100_133:
2026 sc->sc_bus_speed = 133; 2026 sc->sc_bus_speed = 133;
2027 break; 2027 break;
2028 default: 2028 default:
2029 aprint_error_dev(sc->sc_dev, 2029 aprint_error_dev(sc->sc_dev,
2030 "unknown PCIXSPD %d; assuming 66MHz\n", 2030 "unknown PCIXSPD %d; assuming 66MHz\n",
2031 reg & STATUS_PCIXSPD_MASK); 2031 reg & STATUS_PCIXSPD_MASK);
2032 sc->sc_bus_speed = 66; 2032 sc->sc_bus_speed = 66;
2033 break; 2033 break;
2034 } 2034 }
2035 } else 2035 } else
2036 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 2036 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2037 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n", 2037 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2038 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 2038 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2039 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 2039 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2040 } 2040 }
2041 2041
2042 /* clear interesting stat counters */ 2042 /* clear interesting stat counters */
2043 CSR_READ(sc, WMREG_COLC); 2043 CSR_READ(sc, WMREG_COLC);
2044 CSR_READ(sc, WMREG_RXERRC); 2044 CSR_READ(sc, WMREG_RXERRC);
2045 2045
2046 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583) 2046 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2047 || (sc->sc_type >= WM_T_ICH8)) 2047 || (sc->sc_type >= WM_T_ICH8))
2048 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2048 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2049 if (sc->sc_type >= WM_T_ICH8) 2049 if (sc->sc_type >= WM_T_ICH8)
2050 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2050 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2051 2051
2052 /* Set PHY, NVM mutex related stuff */ 2052 /* Set PHY, NVM mutex related stuff */
2053 switch (sc->sc_type) { 2053 switch (sc->sc_type) {
2054 case WM_T_82542_2_0: 2054 case WM_T_82542_2_0:
2055 case WM_T_82542_2_1: 2055 case WM_T_82542_2_1:
2056 case WM_T_82543: 2056 case WM_T_82543:
2057 case WM_T_82544: 2057 case WM_T_82544:
2058 /* Microwire */ 2058 /* Microwire */
2059 sc->nvm.read = wm_nvm_read_uwire; 2059 sc->nvm.read = wm_nvm_read_uwire;
2060 sc->sc_nvm_wordsize = 64; 2060 sc->sc_nvm_wordsize = 64;
2061 sc->sc_nvm_addrbits = 6; 2061 sc->sc_nvm_addrbits = 6;
2062 break; 2062 break;
2063 case WM_T_82540: 2063 case WM_T_82540:
2064 case WM_T_82545: 2064 case WM_T_82545:
2065 case WM_T_82545_3: 2065 case WM_T_82545_3:
2066 case WM_T_82546: 2066 case WM_T_82546:
2067 case WM_T_82546_3: 2067 case WM_T_82546_3:
2068 /* Microwire */ 2068 /* Microwire */
2069 sc->nvm.read = wm_nvm_read_uwire; 2069 sc->nvm.read = wm_nvm_read_uwire;
2070 reg = CSR_READ(sc, WMREG_EECD); 2070 reg = CSR_READ(sc, WMREG_EECD);
2071 if (reg & EECD_EE_SIZE) { 2071 if (reg & EECD_EE_SIZE) {
2072 sc->sc_nvm_wordsize = 256; 2072 sc->sc_nvm_wordsize = 256;
2073 sc->sc_nvm_addrbits = 8; 2073 sc->sc_nvm_addrbits = 8;
2074 } else { 2074 } else {
2075 sc->sc_nvm_wordsize = 64; 2075 sc->sc_nvm_wordsize = 64;
2076 sc->sc_nvm_addrbits = 6; 2076 sc->sc_nvm_addrbits = 6;
2077 } 2077 }
2078 sc->sc_flags |= WM_F_LOCK_EECD; 2078 sc->sc_flags |= WM_F_LOCK_EECD;
2079 sc->nvm.acquire = wm_get_eecd; 2079 sc->nvm.acquire = wm_get_eecd;
2080 sc->nvm.release = wm_put_eecd; 2080 sc->nvm.release = wm_put_eecd;
2081 break; 2081 break;
2082 case WM_T_82541: 2082 case WM_T_82541:
2083 case WM_T_82541_2: 2083 case WM_T_82541_2:
2084 case WM_T_82547: 2084 case WM_T_82547:
2085 case WM_T_82547_2: 2085 case WM_T_82547_2:
2086 reg = CSR_READ(sc, WMREG_EECD); 2086 reg = CSR_READ(sc, WMREG_EECD);
2087 /* 2087 /*
2088 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only 2088 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2089 * on 8254[17], so set flags and functios before calling it. 2089 * on 8254[17], so set flags and functios before calling it.
2090 */ 2090 */
2091 sc->sc_flags |= WM_F_LOCK_EECD; 2091 sc->sc_flags |= WM_F_LOCK_EECD;
2092 sc->nvm.acquire = wm_get_eecd; 2092 sc->nvm.acquire = wm_get_eecd;
2093 sc->nvm.release = wm_put_eecd; 2093 sc->nvm.release = wm_put_eecd;
2094 if (reg & EECD_EE_TYPE) { 2094 if (reg & EECD_EE_TYPE) {
2095 /* SPI */ 2095 /* SPI */
2096 sc->nvm.read = wm_nvm_read_spi; 2096 sc->nvm.read = wm_nvm_read_spi;
2097 sc->sc_flags |= WM_F_EEPROM_SPI; 2097 sc->sc_flags |= WM_F_EEPROM_SPI;
2098 wm_nvm_set_addrbits_size_eecd(sc); 2098 wm_nvm_set_addrbits_size_eecd(sc);
2099 } else { 2099 } else {
2100 /* Microwire */ 2100 /* Microwire */
2101 sc->nvm.read = wm_nvm_read_uwire; 2101 sc->nvm.read = wm_nvm_read_uwire;
2102 if ((reg & EECD_EE_ABITS) != 0) { 2102 if ((reg & EECD_EE_ABITS) != 0) {
2103 sc->sc_nvm_wordsize = 256; 2103 sc->sc_nvm_wordsize = 256;
2104 sc->sc_nvm_addrbits = 8; 2104 sc->sc_nvm_addrbits = 8;
2105 } else { 2105 } else {
2106 sc->sc_nvm_wordsize = 64; 2106 sc->sc_nvm_wordsize = 64;
2107 sc->sc_nvm_addrbits = 6; 2107 sc->sc_nvm_addrbits = 6;
2108 } 2108 }
2109 } 2109 }
2110 break; 2110 break;
2111 case WM_T_82571: 2111 case WM_T_82571:
2112 case WM_T_82572: 2112 case WM_T_82572:
2113 /* SPI */ 2113 /* SPI */
2114 sc->nvm.read = wm_nvm_read_eerd; 2114 sc->nvm.read = wm_nvm_read_eerd;
2115 /* Not use WM_F_LOCK_EECD because we use EERD */ 2115 /* Not use WM_F_LOCK_EECD because we use EERD */
2116 sc->sc_flags |= WM_F_EEPROM_SPI; 2116 sc->sc_flags |= WM_F_EEPROM_SPI;
2117 wm_nvm_set_addrbits_size_eecd(sc); 2117 wm_nvm_set_addrbits_size_eecd(sc);
2118 sc->phy.acquire = wm_get_swsm_semaphore; 2118 sc->phy.acquire = wm_get_swsm_semaphore;
2119 sc->phy.release = wm_put_swsm_semaphore; 2119 sc->phy.release = wm_put_swsm_semaphore;
2120 sc->nvm.acquire = wm_get_nvm_82571; 2120 sc->nvm.acquire = wm_get_nvm_82571;
2121 sc->nvm.release = wm_put_nvm_82571; 2121 sc->nvm.release = wm_put_nvm_82571;
2122 break; 2122 break;
2123 case WM_T_82573: 2123 case WM_T_82573:
2124 case WM_T_82574: 2124 case WM_T_82574:
2125 case WM_T_82583: 2125 case WM_T_82583:
2126 sc->nvm.read = wm_nvm_read_eerd; 2126 sc->nvm.read = wm_nvm_read_eerd;
2127 /* Not use WM_F_LOCK_EECD because we use EERD */ 2127 /* Not use WM_F_LOCK_EECD because we use EERD */
2128 if (sc->sc_type == WM_T_82573) { 2128 if (sc->sc_type == WM_T_82573) {
2129 sc->phy.acquire = wm_get_swsm_semaphore; 2129 sc->phy.acquire = wm_get_swsm_semaphore;
2130 sc->phy.release = wm_put_swsm_semaphore; 2130 sc->phy.release = wm_put_swsm_semaphore;
2131 sc->nvm.acquire = wm_get_nvm_82571; 2131 sc->nvm.acquire = wm_get_nvm_82571;
2132 sc->nvm.release = wm_put_nvm_82571; 2132 sc->nvm.release = wm_put_nvm_82571;
2133 } else { 2133 } else {
2134 /* Both PHY and NVM use the same semaphore. */ 2134 /* Both PHY and NVM use the same semaphore. */
2135 sc->phy.acquire = sc->nvm.acquire 2135 sc->phy.acquire = sc->nvm.acquire
2136 = wm_get_swfwhw_semaphore; 2136 = wm_get_swfwhw_semaphore;
2137 sc->phy.release = sc->nvm.release 2137 sc->phy.release = sc->nvm.release
2138 = wm_put_swfwhw_semaphore; 2138 = wm_put_swfwhw_semaphore;
2139 } 2139 }
2140 if (wm_nvm_is_onboard_eeprom(sc) == 0) { 2140 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2141 sc->sc_flags |= WM_F_EEPROM_FLASH; 2141 sc->sc_flags |= WM_F_EEPROM_FLASH;
2142 sc->sc_nvm_wordsize = 2048; 2142 sc->sc_nvm_wordsize = 2048;
2143 } else { 2143 } else {
2144 /* SPI */ 2144 /* SPI */
2145 sc->sc_flags |= WM_F_EEPROM_SPI; 2145 sc->sc_flags |= WM_F_EEPROM_SPI;
2146 wm_nvm_set_addrbits_size_eecd(sc); 2146 wm_nvm_set_addrbits_size_eecd(sc);
2147 } 2147 }
2148 break; 2148 break;
2149 case WM_T_82575: 2149 case WM_T_82575:
2150 case WM_T_82576: 2150 case WM_T_82576:
2151 case WM_T_82580: 2151 case WM_T_82580:
2152 case WM_T_I350: 2152 case WM_T_I350:
2153 case WM_T_I354: 2153 case WM_T_I354:
2154 case WM_T_80003: 2154 case WM_T_80003:
2155 /* SPI */ 2155 /* SPI */
2156 sc->sc_flags |= WM_F_EEPROM_SPI; 2156 sc->sc_flags |= WM_F_EEPROM_SPI;
2157 wm_nvm_set_addrbits_size_eecd(sc); 2157 wm_nvm_set_addrbits_size_eecd(sc);
2158 if((sc->sc_type == WM_T_80003) 2158 if((sc->sc_type == WM_T_80003)
2159 || (sc->sc_nvm_wordsize < (1 << 15))) { 2159 || (sc->sc_nvm_wordsize < (1 << 15))) {
2160 sc->nvm.read = wm_nvm_read_eerd; 2160 sc->nvm.read = wm_nvm_read_eerd;
2161 /* Don't use WM_F_LOCK_EECD because we use EERD */ 2161 /* Don't use WM_F_LOCK_EECD because we use EERD */
2162 } else { 2162 } else {
2163 sc->nvm.read = wm_nvm_read_spi; 2163 sc->nvm.read = wm_nvm_read_spi;
2164 sc->sc_flags |= WM_F_LOCK_EECD; 2164 sc->sc_flags |= WM_F_LOCK_EECD;
2165 } 2165 }
2166 sc->phy.acquire = wm_get_phy_82575; 2166 sc->phy.acquire = wm_get_phy_82575;
2167 sc->phy.release = wm_put_phy_82575; 2167 sc->phy.release = wm_put_phy_82575;
2168 sc->nvm.acquire = wm_get_nvm_80003;  2168 sc->nvm.acquire = wm_get_nvm_80003;
2169 sc->nvm.release = wm_put_nvm_80003;  2169 sc->nvm.release = wm_put_nvm_80003;
2170 break; 2170 break;
2171 case WM_T_ICH8: 2171 case WM_T_ICH8:
2172 case WM_T_ICH9: 2172 case WM_T_ICH9:
2173 case WM_T_ICH10: 2173 case WM_T_ICH10:
2174 case WM_T_PCH: 2174 case WM_T_PCH:
2175 case WM_T_PCH2: 2175 case WM_T_PCH2:
2176 case WM_T_PCH_LPT: 2176 case WM_T_PCH_LPT:
2177 sc->nvm.read = wm_nvm_read_ich8; 2177 sc->nvm.read = wm_nvm_read_ich8;
2178 /* FLASH */ 2178 /* FLASH */
2179 sc->sc_flags |= WM_F_EEPROM_FLASH; 2179 sc->sc_flags |= WM_F_EEPROM_FLASH;
2180 sc->sc_nvm_wordsize = 2048; 2180 sc->sc_nvm_wordsize = 2048;
2181 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH); 2181 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2182 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 2182 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2183 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) { 2183 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2184 aprint_error_dev(sc->sc_dev, 2184 aprint_error_dev(sc->sc_dev,
2185 "can't map FLASH registers\n"); 2185 "can't map FLASH registers\n");
2186 goto out; 2186 goto out;
2187 } 2187 }
2188 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 2188 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2189 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) * 2189 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2190 ICH_FLASH_SECTOR_SIZE; 2190 ICH_FLASH_SECTOR_SIZE;
2191 sc->sc_ich8_flash_bank_size = 2191 sc->sc_ich8_flash_bank_size =
2192 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1; 2192 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2193 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK); 2193 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2194 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 2194 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2195 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 2195 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2196 sc->sc_flashreg_offset = 0; 2196 sc->sc_flashreg_offset = 0;
2197 sc->phy.acquire = wm_get_swflag_ich8lan; 2197 sc->phy.acquire = wm_get_swflag_ich8lan;
2198 sc->phy.release = wm_put_swflag_ich8lan; 2198 sc->phy.release = wm_put_swflag_ich8lan;
2199 sc->nvm.acquire = wm_get_nvm_ich8lan; 2199 sc->nvm.acquire = wm_get_nvm_ich8lan;
2200 sc->nvm.release = wm_put_nvm_ich8lan; 2200 sc->nvm.release = wm_put_nvm_ich8lan;
2201 break; 2201 break;
2202 case WM_T_PCH_SPT: 2202 case WM_T_PCH_SPT:
2203 sc->nvm.read = wm_nvm_read_spt; 2203 sc->nvm.read = wm_nvm_read_spt;
2204 /* SPT has no GFPREG; flash registers mapped through BAR0 */ 2204 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2205 sc->sc_flags |= WM_F_EEPROM_FLASH; 2205 sc->sc_flags |= WM_F_EEPROM_FLASH;
2206 sc->sc_flasht = sc->sc_st; 2206 sc->sc_flasht = sc->sc_st;
2207 sc->sc_flashh = sc->sc_sh; 2207 sc->sc_flashh = sc->sc_sh;
2208 sc->sc_ich8_flash_base = 0; 2208 sc->sc_ich8_flash_base = 0;
2209 sc->sc_nvm_wordsize = 2209 sc->sc_nvm_wordsize =
2210 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1) 2210 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2211 * NVM_SIZE_MULTIPLIER; 2211 * NVM_SIZE_MULTIPLIER;
2212 /* It is size in bytes, we want words */ 2212 /* It is size in bytes, we want words */
2213 sc->sc_nvm_wordsize /= 2; 2213 sc->sc_nvm_wordsize /= 2;
2214 /* assume 2 banks */ 2214 /* assume 2 banks */
2215 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2; 2215 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2216 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET; 2216 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2217 sc->phy.acquire = wm_get_swflag_ich8lan; 2217 sc->phy.acquire = wm_get_swflag_ich8lan;
2218 sc->phy.release = wm_put_swflag_ich8lan; 2218 sc->phy.release = wm_put_swflag_ich8lan;
2219 sc->nvm.acquire = wm_get_nvm_ich8lan; 2219 sc->nvm.acquire = wm_get_nvm_ich8lan;
2220 sc->nvm.release = wm_put_nvm_ich8lan; 2220 sc->nvm.release = wm_put_nvm_ich8lan;
2221 break; 2221 break;
2222 case WM_T_I210: 2222 case WM_T_I210:
2223 case WM_T_I211: 2223 case WM_T_I211:
2224 /* Allow a single clear of the SW semaphore on I210 and newer*/ 2224 /* Allow a single clear of the SW semaphore on I210 and newer*/
2225 sc->sc_flags |= WM_F_WA_I210_CLSEM; 2225 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2226 if (wm_nvm_get_flash_presence_i210(sc)) { 2226 if (wm_nvm_get_flash_presence_i210(sc)) {
2227 sc->nvm.read = wm_nvm_read_eerd; 2227 sc->nvm.read = wm_nvm_read_eerd;
2228 /* Don't use WM_F_LOCK_EECD because we use EERD */ 2228 /* Don't use WM_F_LOCK_EECD because we use EERD */
2229 sc->sc_flags |= WM_F_EEPROM_FLASH_HW; 2229 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2230 wm_nvm_set_addrbits_size_eecd(sc); 2230 wm_nvm_set_addrbits_size_eecd(sc);
2231 } else { 2231 } else {
2232 sc->nvm.read = wm_nvm_read_invm; 2232 sc->nvm.read = wm_nvm_read_invm;
2233 sc->sc_flags |= WM_F_EEPROM_INVM; 2233 sc->sc_flags |= WM_F_EEPROM_INVM;
2234 sc->sc_nvm_wordsize = INVM_SIZE; 2234 sc->sc_nvm_wordsize = INVM_SIZE;
2235 } 2235 }
2236 sc->phy.acquire = wm_get_phy_82575; 2236 sc->phy.acquire = wm_get_phy_82575;
2237 sc->phy.release = wm_put_phy_82575; 2237 sc->phy.release = wm_put_phy_82575;
2238 sc->nvm.acquire = wm_get_nvm_80003; 2238 sc->nvm.acquire = wm_get_nvm_80003;
2239 sc->nvm.release = wm_put_nvm_80003; 2239 sc->nvm.release = wm_put_nvm_80003;
2240 break; 2240 break;
2241 default: 2241 default:
2242 break; 2242 break;
2243 } 2243 }
2244 2244
2245 /* Ensure the SMBI bit is clear before first NVM or PHY access */ 2245 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2246 switch (sc->sc_type) { 2246 switch (sc->sc_type) {
2247 case WM_T_82571: 2247 case WM_T_82571:
2248 case WM_T_82572: 2248 case WM_T_82572:
2249 reg = CSR_READ(sc, WMREG_SWSM2); 2249 reg = CSR_READ(sc, WMREG_SWSM2);
2250 if ((reg & SWSM2_LOCK) == 0) { 2250 if ((reg & SWSM2_LOCK) == 0) {
2251 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK); 2251 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2252 force_clear_smbi = true; 2252 force_clear_smbi = true;
2253 } else 2253 } else
2254 force_clear_smbi = false; 2254 force_clear_smbi = false;
2255 break; 2255 break;
2256 case WM_T_82573: 2256 case WM_T_82573:
2257 case WM_T_82574: 2257 case WM_T_82574:
2258 case WM_T_82583: 2258 case WM_T_82583:
2259 force_clear_smbi = true; 2259 force_clear_smbi = true;
2260 break; 2260 break;
2261 default: 2261 default:
2262 force_clear_smbi = false; 2262 force_clear_smbi = false;
2263 break; 2263 break;
2264 } 2264 }
2265 if (force_clear_smbi) { 2265 if (force_clear_smbi) {
2266 reg = CSR_READ(sc, WMREG_SWSM); 2266 reg = CSR_READ(sc, WMREG_SWSM);
2267 if ((reg & SWSM_SMBI) != 0) 2267 if ((reg & SWSM_SMBI) != 0)
2268 aprint_error_dev(sc->sc_dev, 2268 aprint_error_dev(sc->sc_dev,
2269 "Please update the Bootagent\n"); 2269 "Please update the Bootagent\n");
2270 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI); 2270 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2271 } 2271 }
2272 2272
2273 /* 2273 /*
2274 * Defer printing the EEPROM type until after verifying the checksum 2274 * Defer printing the EEPROM type until after verifying the checksum
2275 * This allows the EEPROM type to be printed correctly in the case 2275 * This allows the EEPROM type to be printed correctly in the case
2276 * that no EEPROM is attached. 2276 * that no EEPROM is attached.
2277 */ 2277 */
2278 /* 2278 /*
2279 * Validate the EEPROM checksum. If the checksum fails, flag 2279 * Validate the EEPROM checksum. If the checksum fails, flag
2280 * this for later, so we can fail future reads from the EEPROM. 2280 * this for later, so we can fail future reads from the EEPROM.
2281 */ 2281 */
2282 if (wm_nvm_validate_checksum(sc)) { 2282 if (wm_nvm_validate_checksum(sc)) {
2283 /* 2283 /*
2284 * Read twice again because some PCI-e parts fail the 2284 * Read twice again because some PCI-e parts fail the
2285 * first check due to the link being in sleep state. 2285 * first check due to the link being in sleep state.
2286 */ 2286 */
2287 if (wm_nvm_validate_checksum(sc)) 2287 if (wm_nvm_validate_checksum(sc))
2288 sc->sc_flags |= WM_F_EEPROM_INVALID; 2288 sc->sc_flags |= WM_F_EEPROM_INVALID;
2289 } 2289 }
2290 2290
2291 if (sc->sc_flags & WM_F_EEPROM_INVALID) 2291 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2292 aprint_verbose_dev(sc->sc_dev, "No EEPROM"); 2292 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2293 else { 2293 else {
2294 aprint_verbose_dev(sc->sc_dev, "%u words ", 2294 aprint_verbose_dev(sc->sc_dev, "%u words ",
2295 sc->sc_nvm_wordsize); 2295 sc->sc_nvm_wordsize);
2296 if (sc->sc_flags & WM_F_EEPROM_INVM) 2296 if (sc->sc_flags & WM_F_EEPROM_INVM)
2297 aprint_verbose("iNVM"); 2297 aprint_verbose("iNVM");
2298 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) 2298 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2299 aprint_verbose("FLASH(HW)"); 2299 aprint_verbose("FLASH(HW)");
2300 else if (sc->sc_flags & WM_F_EEPROM_FLASH) 2300 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2301 aprint_verbose("FLASH"); 2301 aprint_verbose("FLASH");
2302 else { 2302 else {
2303 if (sc->sc_flags & WM_F_EEPROM_SPI) 2303 if (sc->sc_flags & WM_F_EEPROM_SPI)
2304 eetype = "SPI"; 2304 eetype = "SPI";
2305 else 2305 else
2306 eetype = "MicroWire"; 2306 eetype = "MicroWire";
2307 aprint_verbose("(%d address bits) %s EEPROM", 2307 aprint_verbose("(%d address bits) %s EEPROM",
2308 sc->sc_nvm_addrbits, eetype); 2308 sc->sc_nvm_addrbits, eetype);
2309 } 2309 }
2310 } 2310 }
2311 wm_nvm_version(sc); 2311 wm_nvm_version(sc);
2312 aprint_verbose("\n"); 2312 aprint_verbose("\n");
2313 2313
2314 /* 2314 /*
2315 * XXX The first call of wm_gmii_setup_phytype. The result might be 2315 * XXX The first call of wm_gmii_setup_phytype. The result might be
2316 * incorrect. 2316 * incorrect.
2317 */ 2317 */
2318 wm_gmii_setup_phytype(sc, 0, 0); 2318 wm_gmii_setup_phytype(sc, 0, 0);
2319 2319
2320 /* Reset the chip to a known state. */ 2320 /* Reset the chip to a known state. */
2321 wm_reset(sc); 2321 wm_reset(sc);
2322 2322
2323 /* Check for I21[01] PLL workaround */ 2323 /* Check for I21[01] PLL workaround */
2324 if (sc->sc_type == WM_T_I210) 2324 if (sc->sc_type == WM_T_I210)
2325 sc->sc_flags |= WM_F_PLL_WA_I210; 2325 sc->sc_flags |= WM_F_PLL_WA_I210;
2326 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) { 2326 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2327 /* NVM image release 3.25 has a workaround */ 2327 /* NVM image release 3.25 has a workaround */
2328 if ((sc->sc_nvm_ver_major < 3) 2328 if ((sc->sc_nvm_ver_major < 3)
2329 || ((sc->sc_nvm_ver_major == 3) 2329 || ((sc->sc_nvm_ver_major == 3)
2330 && (sc->sc_nvm_ver_minor < 25))) { 2330 && (sc->sc_nvm_ver_minor < 25))) {
2331 aprint_verbose_dev(sc->sc_dev, 2331 aprint_verbose_dev(sc->sc_dev,
2332 "ROM image version %d.%d is older than 3.25\n", 2332 "ROM image version %d.%d is older than 3.25\n",
2333 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor); 2333 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2334 sc->sc_flags |= WM_F_PLL_WA_I210; 2334 sc->sc_flags |= WM_F_PLL_WA_I210;
2335 } 2335 }
2336 } 2336 }
2337 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) 2337 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2338 wm_pll_workaround_i210(sc); 2338 wm_pll_workaround_i210(sc);
2339 2339
2340 wm_get_wakeup(sc); 2340 wm_get_wakeup(sc);
2341 2341
2342 /* Non-AMT based hardware can now take control from firmware */ 2342 /* Non-AMT based hardware can now take control from firmware */
2343 if ((sc->sc_flags & WM_F_HAS_AMT) == 0) 2343 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2344 wm_get_hw_control(sc); 2344 wm_get_hw_control(sc);
2345 2345
2346 /* 2346 /*
2347 * Read the Ethernet address from the EEPROM, if not first found 2347 * Read the Ethernet address from the EEPROM, if not first found
2348 * in device properties. 2348 * in device properties.
2349 */ 2349 */
2350 ea = prop_dictionary_get(dict, "mac-address"); 2350 ea = prop_dictionary_get(dict, "mac-address");
2351 if (ea != NULL) { 2351 if (ea != NULL) {
2352 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 2352 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2353 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 2353 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2354 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 2354 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2355 } else { 2355 } else {
2356 if (wm_read_mac_addr(sc, enaddr) != 0) { 2356 if (wm_read_mac_addr(sc, enaddr) != 0) {
2357 aprint_error_dev(sc->sc_dev, 2357 aprint_error_dev(sc->sc_dev,
2358 "unable to read Ethernet address\n"); 2358 "unable to read Ethernet address\n");
2359 goto out; 2359 goto out;
2360 } 2360 }
2361 } 2361 }
2362 2362
2363 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 2363 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2364 ether_sprintf(enaddr)); 2364 ether_sprintf(enaddr));
2365 2365
2366 /* 2366 /*
2367 * Read the config info from the EEPROM, and set up various 2367 * Read the config info from the EEPROM, and set up various
2368 * bits in the control registers based on their contents. 2368 * bits in the control registers based on their contents.
2369 */ 2369 */
2370 pn = prop_dictionary_get(dict, "i82543-cfg1"); 2370 pn = prop_dictionary_get(dict, "i82543-cfg1");
2371 if (pn != NULL) { 2371 if (pn != NULL) {
2372 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2372 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2373 cfg1 = (uint16_t) prop_number_integer_value(pn); 2373 cfg1 = (uint16_t) prop_number_integer_value(pn);
2374 } else { 2374 } else {
2375 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) { 2375 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2376 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n"); 2376 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2377 goto out; 2377 goto out;
2378 } 2378 }
2379 } 2379 }
2380 2380
2381 pn = prop_dictionary_get(dict, "i82543-cfg2"); 2381 pn = prop_dictionary_get(dict, "i82543-cfg2");
2382 if (pn != NULL) { 2382 if (pn != NULL) {
2383 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2383 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2384 cfg2 = (uint16_t) prop_number_integer_value(pn); 2384 cfg2 = (uint16_t) prop_number_integer_value(pn);
2385 } else { 2385 } else {
2386 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) { 2386 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2387 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n"); 2387 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2388 goto out; 2388 goto out;
2389 } 2389 }
2390 } 2390 }
2391 2391
2392 /* check for WM_F_WOL */ 2392 /* check for WM_F_WOL */
2393 switch (sc->sc_type) { 2393 switch (sc->sc_type) {
2394 case WM_T_82542_2_0: 2394 case WM_T_82542_2_0:
2395 case WM_T_82542_2_1: 2395 case WM_T_82542_2_1:
2396 case WM_T_82543: 2396 case WM_T_82543:
2397 /* dummy? */ 2397 /* dummy? */
2398 eeprom_data = 0; 2398 eeprom_data = 0;
2399 apme_mask = NVM_CFG3_APME; 2399 apme_mask = NVM_CFG3_APME;
2400 break; 2400 break;
2401 case WM_T_82544: 2401 case WM_T_82544:
2402 apme_mask = NVM_CFG2_82544_APM_EN; 2402 apme_mask = NVM_CFG2_82544_APM_EN;
2403 eeprom_data = cfg2; 2403 eeprom_data = cfg2;
2404 break; 2404 break;
2405 case WM_T_82546: 2405 case WM_T_82546:
2406 case WM_T_82546_3: 2406 case WM_T_82546_3:
2407 case WM_T_82571: 2407 case WM_T_82571:
2408 case WM_T_82572: 2408 case WM_T_82572:
2409 case WM_T_82573: 2409 case WM_T_82573:
2410 case WM_T_82574: 2410 case WM_T_82574:
2411 case WM_T_82583: 2411 case WM_T_82583:
2412 case WM_T_80003: 2412 case WM_T_80003:
2413 default: 2413 default:
2414 apme_mask = NVM_CFG3_APME; 2414 apme_mask = NVM_CFG3_APME;
2415 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB 2415 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2416 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data); 2416 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2417 break; 2417 break;
2418 case WM_T_82575: 2418 case WM_T_82575:
2419 case WM_T_82576: 2419 case WM_T_82576:
2420 case WM_T_82580: 2420 case WM_T_82580:
2421 case WM_T_I350: 2421 case WM_T_I350:
2422 case WM_T_I354: /* XXX ok? */ 2422 case WM_T_I354: /* XXX ok? */
2423 case WM_T_ICH8: 2423 case WM_T_ICH8:
2424 case WM_T_ICH9: 2424 case WM_T_ICH9:
2425 case WM_T_ICH10: 2425 case WM_T_ICH10:
2426 case WM_T_PCH: 2426 case WM_T_PCH:
2427 case WM_T_PCH2: 2427 case WM_T_PCH2:
2428 case WM_T_PCH_LPT: 2428 case WM_T_PCH_LPT:
2429 case WM_T_PCH_SPT: 2429 case WM_T_PCH_SPT:
2430 /* XXX The funcid should be checked on some devices */ 2430 /* XXX The funcid should be checked on some devices */
2431 apme_mask = WUC_APME; 2431 apme_mask = WUC_APME;
2432 eeprom_data = CSR_READ(sc, WMREG_WUC); 2432 eeprom_data = CSR_READ(sc, WMREG_WUC);
2433 break; 2433 break;
2434 } 2434 }
2435 2435
2436 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */ 2436 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2437 if ((eeprom_data & apme_mask) != 0) 2437 if ((eeprom_data & apme_mask) != 0)
2438 sc->sc_flags |= WM_F_WOL; 2438 sc->sc_flags |= WM_F_WOL;
2439 2439
2440 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) { 2440 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2441 /* Check NVM for autonegotiation */ 2441 /* Check NVM for autonegotiation */
2442 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) { 2442 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2443 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0) 2443 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2444 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO; 2444 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2445 } 2445 }
2446 } 2446 }
2447 2447
2448 /* 2448 /*
2449 * XXX need special handling for some multiple port cards 2449 * XXX need special handling for some multiple port cards
2450 * to disable a paticular port. 2450 * to disable a paticular port.
2451 */ 2451 */
2452 2452
2453 if (sc->sc_type >= WM_T_82544) { 2453 if (sc->sc_type >= WM_T_82544) {
2454 pn = prop_dictionary_get(dict, "i82543-swdpin"); 2454 pn = prop_dictionary_get(dict, "i82543-swdpin");
2455 if (pn != NULL) { 2455 if (pn != NULL) {
2456 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2456 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2457 swdpin = (uint16_t) prop_number_integer_value(pn); 2457 swdpin = (uint16_t) prop_number_integer_value(pn);
2458 } else { 2458 } else {
2459 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) { 2459 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2460 aprint_error_dev(sc->sc_dev, 2460 aprint_error_dev(sc->sc_dev,
2461 "unable to read SWDPIN\n"); 2461 "unable to read SWDPIN\n");
2462 goto out; 2462 goto out;
2463 } 2463 }
2464 } 2464 }
2465 } 2465 }
2466 2466
2467 if (cfg1 & NVM_CFG1_ILOS) 2467 if (cfg1 & NVM_CFG1_ILOS)
2468 sc->sc_ctrl |= CTRL_ILOS; 2468 sc->sc_ctrl |= CTRL_ILOS;
2469 2469
2470 /* 2470 /*
2471 * XXX 2471 * XXX
2472 * This code isn't correct because pin 2 and 3 are located 2472 * This code isn't correct because pin 2 and 3 are located
2473 * in different position on newer chips. Check all datasheet. 2473 * in different position on newer chips. Check all datasheet.
2474 * 2474 *
2475 * Until resolve this problem, check if a chip < 82580 2475 * Until resolve this problem, check if a chip < 82580
2476 */ 2476 */
2477 if (sc->sc_type <= WM_T_82580) { 2477 if (sc->sc_type <= WM_T_82580) {
2478 if (sc->sc_type >= WM_T_82544) { 2478 if (sc->sc_type >= WM_T_82544) {
2479 sc->sc_ctrl |= 2479 sc->sc_ctrl |=
2480 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 2480 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2481 CTRL_SWDPIO_SHIFT; 2481 CTRL_SWDPIO_SHIFT;
2482 sc->sc_ctrl |= 2482 sc->sc_ctrl |=
2483 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 2483 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2484 CTRL_SWDPINS_SHIFT; 2484 CTRL_SWDPINS_SHIFT;
2485 } else { 2485 } else {
2486 sc->sc_ctrl |= 2486 sc->sc_ctrl |=
2487 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) << 2487 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2488 CTRL_SWDPIO_SHIFT; 2488 CTRL_SWDPIO_SHIFT;
2489 } 2489 }
2490 } 2490 }
2491 2491
2492 /* XXX For other than 82580? */ 2492 /* XXX For other than 82580? */
2493 if (sc->sc_type == WM_T_82580) { 2493 if (sc->sc_type == WM_T_82580) {
2494 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword); 2494 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2495 if (nvmword & __BIT(13)) 2495 if (nvmword & __BIT(13))
2496 sc->sc_ctrl |= CTRL_ILOS; 2496 sc->sc_ctrl |= CTRL_ILOS;
2497 } 2497 }
2498 2498
2499#if 0 2499#if 0
2500 if (sc->sc_type >= WM_T_82544) { 2500 if (sc->sc_type >= WM_T_82544) {
2501 if (cfg1 & NVM_CFG1_IPS0) 2501 if (cfg1 & NVM_CFG1_IPS0)
2502 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 2502 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2503 if (cfg1 & NVM_CFG1_IPS1) 2503 if (cfg1 & NVM_CFG1_IPS1)
2504 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 2504 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2505 sc->sc_ctrl_ext |= 2505 sc->sc_ctrl_ext |=
2506 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 2506 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2507 CTRL_EXT_SWDPIO_SHIFT; 2507 CTRL_EXT_SWDPIO_SHIFT;
2508 sc->sc_ctrl_ext |= 2508 sc->sc_ctrl_ext |=
2509 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 2509 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2510 CTRL_EXT_SWDPINS_SHIFT; 2510 CTRL_EXT_SWDPINS_SHIFT;
2511 } else { 2511 } else {
2512 sc->sc_ctrl_ext |= 2512 sc->sc_ctrl_ext |=
2513 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) << 2513 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2514 CTRL_EXT_SWDPIO_SHIFT; 2514 CTRL_EXT_SWDPIO_SHIFT;
2515 } 2515 }
2516#endif 2516#endif
2517 2517
2518 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2518 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2519#if 0 2519#if 0
2520 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 2520 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2521#endif 2521#endif
2522 2522
2523 if (sc->sc_type == WM_T_PCH) { 2523 if (sc->sc_type == WM_T_PCH) {
2524 uint16_t val; 2524 uint16_t val;
2525 2525
2526 /* Save the NVM K1 bit setting */ 2526 /* Save the NVM K1 bit setting */
2527 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val); 2527 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2528 2528
2529 if ((val & NVM_K1_CONFIG_ENABLE) != 0) 2529 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2530 sc->sc_nvm_k1_enabled = 1; 2530 sc->sc_nvm_k1_enabled = 1;
2531 else 2531 else
2532 sc->sc_nvm_k1_enabled = 0; 2532 sc->sc_nvm_k1_enabled = 0;
2533 } 2533 }
2534 2534
2535 /* Determine if we're GMII, TBI, SERDES or SGMII mode */ 2535 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2536 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 2536 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2537 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH 2537 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2538 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT 2538 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2539 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573 2539 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2540 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) { 2540 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2541 /* Copper only */ 2541 /* Copper only */
2542 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 2542 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2543 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350) 2543 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2544 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210) 2544 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2545 || (sc->sc_type ==WM_T_I211)) { 2545 || (sc->sc_type ==WM_T_I211)) {
2546 reg = CSR_READ(sc, WMREG_CTRL_EXT); 2546 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2547 link_mode = reg & CTRL_EXT_LINK_MODE_MASK; 2547 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2548 switch (link_mode) { 2548 switch (link_mode) {
2549 case CTRL_EXT_LINK_MODE_1000KX: 2549 case CTRL_EXT_LINK_MODE_1000KX:
2550 aprint_verbose_dev(sc->sc_dev, "1000KX\n"); 2550 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2551 sc->sc_mediatype = WM_MEDIATYPE_SERDES; 2551 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2552 break; 2552 break;
2553 case CTRL_EXT_LINK_MODE_SGMII: 2553 case CTRL_EXT_LINK_MODE_SGMII:
2554 if (wm_sgmii_uses_mdio(sc)) { 2554 if (wm_sgmii_uses_mdio(sc)) {
2555 aprint_verbose_dev(sc->sc_dev, 2555 aprint_verbose_dev(sc->sc_dev,
2556 "SGMII(MDIO)\n"); 2556 "SGMII(MDIO)\n");
2557 sc->sc_flags |= WM_F_SGMII; 2557 sc->sc_flags |= WM_F_SGMII;
2558 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2558 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2559 break; 2559 break;
2560 } 2560 }
2561 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n"); 2561 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2562 /*FALLTHROUGH*/ 2562 /*FALLTHROUGH*/
2563 case CTRL_EXT_LINK_MODE_PCIE_SERDES: 2563 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2564 sc->sc_mediatype = wm_sfp_get_media_type(sc); 2564 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2565 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) { 2565 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2566 if (link_mode 2566 if (link_mode
2567 == CTRL_EXT_LINK_MODE_SGMII) { 2567 == CTRL_EXT_LINK_MODE_SGMII) {
2568 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2568 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2569 sc->sc_flags |= WM_F_SGMII; 2569 sc->sc_flags |= WM_F_SGMII;
2570 } else { 2570 } else {
2571 sc->sc_mediatype = WM_MEDIATYPE_SERDES; 2571 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2572 aprint_verbose_dev(sc->sc_dev, 2572 aprint_verbose_dev(sc->sc_dev,
2573 "SERDES\n"); 2573 "SERDES\n");
2574 } 2574 }
2575 break; 2575 break;
2576 } 2576 }
2577 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) 2577 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2578 aprint_verbose_dev(sc->sc_dev, "SERDES\n"); 2578 aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2579 2579
2580 /* Change current link mode setting */ 2580 /* Change current link mode setting */
2581 reg &= ~CTRL_EXT_LINK_MODE_MASK; 2581 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2582 switch (sc->sc_mediatype) { 2582 switch (sc->sc_mediatype) {
2583 case WM_MEDIATYPE_COPPER: 2583 case WM_MEDIATYPE_COPPER:
2584 reg |= CTRL_EXT_LINK_MODE_SGMII; 2584 reg |= CTRL_EXT_LINK_MODE_SGMII;
2585 break; 2585 break;
2586 case WM_MEDIATYPE_SERDES: 2586 case WM_MEDIATYPE_SERDES:
2587 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES; 2587 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2588 break; 2588 break;
2589 default: 2589 default:
2590 break; 2590 break;
2591 } 2591 }
2592 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2592 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2593 break; 2593 break;
2594 case CTRL_EXT_LINK_MODE_GMII: 2594 case CTRL_EXT_LINK_MODE_GMII:
2595 default: 2595 default:
2596 aprint_verbose_dev(sc->sc_dev, "Copper\n"); 2596 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2597 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2597 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2598 break; 2598 break;
2599 } 2599 }
2600 2600
2601 reg &= ~CTRL_EXT_I2C_ENA; 2601 reg &= ~CTRL_EXT_I2C_ENA;
2602 if ((sc->sc_flags & WM_F_SGMII) != 0) 2602 if ((sc->sc_flags & WM_F_SGMII) != 0)
2603 reg |= CTRL_EXT_I2C_ENA; 2603 reg |= CTRL_EXT_I2C_ENA;
2604 else 2604 else
2605 reg &= ~CTRL_EXT_I2C_ENA; 2605 reg &= ~CTRL_EXT_I2C_ENA;
2606 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2606 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2607 } else if (sc->sc_type < WM_T_82543 || 2607 } else if (sc->sc_type < WM_T_82543 ||
2608 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 2608 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2609 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) { 2609 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2610 aprint_error_dev(sc->sc_dev, 2610 aprint_error_dev(sc->sc_dev,
2611 "WARNING: TBIMODE set on 1000BASE-T product!\n"); 2611 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2612 sc->sc_mediatype = WM_MEDIATYPE_FIBER; 2612 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2613 } 2613 }
2614 } else { 2614 } else {
2615 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) { 2615 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2616 aprint_error_dev(sc->sc_dev, 2616 aprint_error_dev(sc->sc_dev,
2617 "WARNING: TBIMODE clear on 1000BASE-X product!\n"); 2617 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2618 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2618 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2619 } 2619 }
2620 } 2620 }
2621 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags); 2621 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2622 aprint_verbose_dev(sc->sc_dev, "%s\n", buf); 2622 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2623 2623
2624 /* Set device properties (macflags) */ 2624 /* Set device properties (macflags) */
2625 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags); 2625 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2626 2626
2627 /* Initialize the media structures accordingly. */ 2627 /* Initialize the media structures accordingly. */
2628 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) 2628 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2629 wm_gmii_mediainit(sc, wmp->wmp_product); 2629 wm_gmii_mediainit(sc, wmp->wmp_product);
2630 else 2630 else
2631 wm_tbi_mediainit(sc); /* All others */ 2631 wm_tbi_mediainit(sc); /* All others */
2632 2632
2633 ifp = &sc->sc_ethercom.ec_if; 2633 ifp = &sc->sc_ethercom.ec_if;
2634 xname = device_xname(sc->sc_dev); 2634 xname = device_xname(sc->sc_dev);
2635 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 2635 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2636 ifp->if_softc = sc; 2636 ifp->if_softc = sc;
2637 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2637 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2638#ifdef WM_MPSAFE 2638#ifdef WM_MPSAFE
2639 ifp->if_extflags = IFEF_MPSAFE; 2639 ifp->if_extflags = IFEF_MPSAFE;
2640#endif 2640#endif
2641 ifp->if_ioctl = wm_ioctl; 2641 ifp->if_ioctl = wm_ioctl;
2642 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 2642 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2643 ifp->if_start = wm_nq_start; 2643 ifp->if_start = wm_nq_start;
2644 /* 2644 /*
2645 * When the number of CPUs is one and the controller can use 2645 * When the number of CPUs is one and the controller can use
2646 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue. 2646 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2647 * That is, wm(4) use two interrupts, one is used for Tx/Rx 2647 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2648 * and the other is used for link status changing. 2648 * and the other is used for link status changing.
2649 * In this situation, wm_nq_transmit() is disadvantageous 2649 * In this situation, wm_nq_transmit() is disadvantageous
2650 * because of wm_select_txqueue() and pcq(9) overhead. 2650 * because of wm_select_txqueue() and pcq(9) overhead.
2651 */ 2651 */
2652 if (wm_is_using_multiqueue(sc)) 2652 if (wm_is_using_multiqueue(sc))
2653 ifp->if_transmit = wm_nq_transmit; 2653 ifp->if_transmit = wm_nq_transmit;
2654 } else { 2654 } else {
2655 ifp->if_start = wm_start; 2655 ifp->if_start = wm_start;
2656 /* 2656 /*
2657 * wm_transmit() has the same disadvantage as wm_transmit(). 2657 * wm_transmit() has the same disadvantage as wm_transmit().
2658 */ 2658 */
2659 if (wm_is_using_multiqueue(sc)) 2659 if (wm_is_using_multiqueue(sc))
2660 ifp->if_transmit = wm_transmit; 2660 ifp->if_transmit = wm_transmit;
2661 } 2661 }
2662 ifp->if_watchdog = wm_watchdog; 2662 ifp->if_watchdog = wm_watchdog;
2663 ifp->if_init = wm_init; 2663 ifp->if_init = wm_init;
2664 ifp->if_stop = wm_stop; 2664 ifp->if_stop = wm_stop;
2665 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 2665 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2666 IFQ_SET_READY(&ifp->if_snd); 2666 IFQ_SET_READY(&ifp->if_snd);
2667 2667
2668 /* Check for jumbo frame */ 2668 /* Check for jumbo frame */
2669 switch (sc->sc_type) { 2669 switch (sc->sc_type) {
2670 case WM_T_82573: 2670 case WM_T_82573:
2671 /* XXX limited to 9234 if ASPM is disabled */ 2671 /* XXX limited to 9234 if ASPM is disabled */
2672 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword); 2672 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2673 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0) 2673 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2674 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2674 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2675 break; 2675 break;
2676 case WM_T_82571: 2676 case WM_T_82571:
2677 case WM_T_82572: 2677 case WM_T_82572:
2678 case WM_T_82574: 2678 case WM_T_82574:
 2679 case WM_T_82583:
2679 case WM_T_82575: 2680 case WM_T_82575:
2680 case WM_T_82576: 2681 case WM_T_82576:
2681 case WM_T_82580: 2682 case WM_T_82580:
2682 case WM_T_I350: 2683 case WM_T_I350:
2683 case WM_T_I354: /* XXXX ok? */ 2684 case WM_T_I354:
2684 case WM_T_I210: 2685 case WM_T_I210:
2685 case WM_T_I211: 2686 case WM_T_I211:
2686 case WM_T_80003: 2687 case WM_T_80003:
2687 case WM_T_ICH9: 2688 case WM_T_ICH9:
2688 case WM_T_ICH10: 2689 case WM_T_ICH10:
2689 case WM_T_PCH2: /* PCH2 supports 9K frame size */ 2690 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2690 case WM_T_PCH_LPT: 2691 case WM_T_PCH_LPT:
2691 case WM_T_PCH_SPT: 2692 case WM_T_PCH_SPT:
2692 /* XXX limited to 9234 */ 2693 /* XXX limited to 9234 */
2693 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2694 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2694 break; 2695 break;
2695 case WM_T_PCH: 2696 case WM_T_PCH:
2696 /* XXX limited to 4096 */ 2697 /* XXX limited to 4096 */
2697 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2698 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2698 break; 2699 break;
2699 case WM_T_82542_2_0: 2700 case WM_T_82542_2_0:
2700 case WM_T_82542_2_1: 2701 case WM_T_82542_2_1:
2701 case WM_T_82583: 
2702 case WM_T_ICH8: 2702 case WM_T_ICH8:
2703 /* No support for jumbo frame */ 2703 /* No support for jumbo frame */
2704 break; 2704 break;
2705 default: 2705 default:
2706 /* ETHER_MAX_LEN_JUMBO */ 2706 /* ETHER_MAX_LEN_JUMBO */
2707 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2707 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2708 break; 2708 break;
2709 } 2709 }
2710 2710
2711 /* If we're a i82543 or greater, we can support VLANs. */ 2711 /* If we're a i82543 or greater, we can support VLANs. */
2712 if (sc->sc_type >= WM_T_82543) 2712 if (sc->sc_type >= WM_T_82543)
2713 sc->sc_ethercom.ec_capabilities |= 2713 sc->sc_ethercom.ec_capabilities |=
2714 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 2714 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2715 2715
2716 /* 2716 /*
2717 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 2717 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2718 * on i82543 and later. 2718 * on i82543 and later.
2719 */ 2719 */
2720 if (sc->sc_type >= WM_T_82543) { 2720 if (sc->sc_type >= WM_T_82543) {
2721 ifp->if_capabilities |= 2721 ifp->if_capabilities |=
2722 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2722 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2723 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2723 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2724 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 2724 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2725 IFCAP_CSUM_TCPv6_Tx | 2725 IFCAP_CSUM_TCPv6_Tx |
2726 IFCAP_CSUM_UDPv6_Tx; 2726 IFCAP_CSUM_UDPv6_Tx;
2727 } 2727 }
2728 2728
2729 /* 2729 /*
2730 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 2730 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2731 * 2731 *
2732 * 82541GI (8086:1076) ... no 2732 * 82541GI (8086:1076) ... no
2733 * 82572EI (8086:10b9) ... yes 2733 * 82572EI (8086:10b9) ... yes
2734 */ 2734 */
2735 if (sc->sc_type >= WM_T_82571) { 2735 if (sc->sc_type >= WM_T_82571) {
2736 ifp->if_capabilities |= 2736 ifp->if_capabilities |=
2737 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 2737 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2738 } 2738 }
2739 2739
2740 /* 2740 /*
2741 * If we're a i82544 or greater (except i82547), we can do 2741 * If we're a i82544 or greater (except i82547), we can do
2742 * TCP segmentation offload. 2742 * TCP segmentation offload.
2743 */ 2743 */
2744 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 2744 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2745 ifp->if_capabilities |= IFCAP_TSOv4; 2745 ifp->if_capabilities |= IFCAP_TSOv4;
2746 } 2746 }
2747 2747
2748 if (sc->sc_type >= WM_T_82571) { 2748 if (sc->sc_type >= WM_T_82571) {
2749 ifp->if_capabilities |= IFCAP_TSOv6; 2749 ifp->if_capabilities |= IFCAP_TSOv6;
2750 } 2750 }
2751 2751
2752 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT; 2752 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
2753 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT; 2753 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
2754 2754
2755#ifdef WM_MPSAFE 2755#ifdef WM_MPSAFE
2756 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2756 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2757#else 2757#else
2758 sc->sc_core_lock = NULL; 2758 sc->sc_core_lock = NULL;
2759#endif 2759#endif
2760 2760
2761 /* Attach the interface. */ 2761 /* Attach the interface. */
2762 error = if_initialize(ifp); 2762 error = if_initialize(ifp);
2763 if (error != 0) { 2763 if (error != 0) {
2764 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 2764 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
2765 error); 2765 error);
2766 return; /* Error */ 2766 return; /* Error */
2767 } 2767 }
2768 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if); 2768 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2769 ether_ifattach(ifp, enaddr); 2769 ether_ifattach(ifp, enaddr);
2770 if_register(ifp); 2770 if_register(ifp);
2771 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb); 2771 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2772 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 2772 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2773 RND_FLAG_DEFAULT); 2773 RND_FLAG_DEFAULT);
2774 2774
2775#ifdef WM_EVENT_COUNTERS 2775#ifdef WM_EVENT_COUNTERS
2776 /* Attach event counters. */ 2776 /* Attach event counters. */
2777 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 2777 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2778 NULL, xname, "linkintr"); 2778 NULL, xname, "linkintr");
2779 2779
2780 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 2780 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2781 NULL, xname, "tx_xoff"); 2781 NULL, xname, "tx_xoff");
2782 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 2782 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2783 NULL, xname, "tx_xon"); 2783 NULL, xname, "tx_xon");
2784 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 2784 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2785 NULL, xname, "rx_xoff"); 2785 NULL, xname, "rx_xoff");
2786 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 2786 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2787 NULL, xname, "rx_xon"); 2787 NULL, xname, "rx_xon");
2788 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 2788 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2789 NULL, xname, "rx_macctl"); 2789 NULL, xname, "rx_macctl");
2790#endif /* WM_EVENT_COUNTERS */ 2790#endif /* WM_EVENT_COUNTERS */
2791 2791
2792 if (pmf_device_register(self, wm_suspend, wm_resume)) 2792 if (pmf_device_register(self, wm_suspend, wm_resume))
2793 pmf_class_network_register(self, ifp); 2793 pmf_class_network_register(self, ifp);
2794 else 2794 else
2795 aprint_error_dev(self, "couldn't establish power handler\n"); 2795 aprint_error_dev(self, "couldn't establish power handler\n");
2796 2796
2797 sc->sc_flags |= WM_F_ATTACHED; 2797 sc->sc_flags |= WM_F_ATTACHED;
2798 out: 2798 out:
2799 return; 2799 return;
2800} 2800}
2801 2801
2802/* The detach function (ca_detach) */ 2802/* The detach function (ca_detach) */
2803static int 2803static int
2804wm_detach(device_t self, int flags __unused) 2804wm_detach(device_t self, int flags __unused)
2805{ 2805{
2806 struct wm_softc *sc = device_private(self); 2806 struct wm_softc *sc = device_private(self);
2807 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2807 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2808 int i; 2808 int i;
2809 2809
2810 if ((sc->sc_flags & WM_F_ATTACHED) == 0) 2810 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2811 return 0; 2811 return 0;
2812 2812
2813 /* Stop the interface. Callouts are stopped in it. */ 2813 /* Stop the interface. Callouts are stopped in it. */
2814 wm_stop(ifp, 1); 2814 wm_stop(ifp, 1);
2815 2815
2816 pmf_device_deregister(self); 2816 pmf_device_deregister(self);
2817 2817
2818#ifdef WM_EVENT_COUNTERS 2818#ifdef WM_EVENT_COUNTERS
2819 evcnt_detach(&sc->sc_ev_linkintr); 2819 evcnt_detach(&sc->sc_ev_linkintr);
2820 2820
2821 evcnt_detach(&sc->sc_ev_tx_xoff); 2821 evcnt_detach(&sc->sc_ev_tx_xoff);
2822 evcnt_detach(&sc->sc_ev_tx_xon); 2822 evcnt_detach(&sc->sc_ev_tx_xon);
2823 evcnt_detach(&sc->sc_ev_rx_xoff); 2823 evcnt_detach(&sc->sc_ev_rx_xoff);
2824 evcnt_detach(&sc->sc_ev_rx_xon); 2824 evcnt_detach(&sc->sc_ev_rx_xon);
2825 evcnt_detach(&sc->sc_ev_rx_macctl); 2825 evcnt_detach(&sc->sc_ev_rx_macctl);
2826#endif /* WM_EVENT_COUNTERS */ 2826#endif /* WM_EVENT_COUNTERS */
2827 2827
2828 /* Tell the firmware about the release */ 2828 /* Tell the firmware about the release */
2829 WM_CORE_LOCK(sc); 2829 WM_CORE_LOCK(sc);
2830 wm_release_manageability(sc); 2830 wm_release_manageability(sc);
2831 wm_release_hw_control(sc); 2831 wm_release_hw_control(sc);
2832 wm_enable_wakeup(sc); 2832 wm_enable_wakeup(sc);
2833 WM_CORE_UNLOCK(sc); 2833 WM_CORE_UNLOCK(sc);
2834 2834
2835 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 2835 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2836 2836
2837 /* Delete all remaining media. */ 2837 /* Delete all remaining media. */
2838 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 2838 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2839 2839
2840 ether_ifdetach(ifp); 2840 ether_ifdetach(ifp);
2841 if_detach(ifp); 2841 if_detach(ifp);
2842 if_percpuq_destroy(sc->sc_ipq); 2842 if_percpuq_destroy(sc->sc_ipq);
2843 2843
2844 /* Unload RX dmamaps and free mbufs */ 2844 /* Unload RX dmamaps and free mbufs */
2845 for (i = 0; i < sc->sc_nqueues; i++) { 2845 for (i = 0; i < sc->sc_nqueues; i++) {
2846 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 2846 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2847 mutex_enter(rxq->rxq_lock); 2847 mutex_enter(rxq->rxq_lock);
2848 wm_rxdrain(rxq); 2848 wm_rxdrain(rxq);
2849 mutex_exit(rxq->rxq_lock); 2849 mutex_exit(rxq->rxq_lock);
2850 } 2850 }
2851 /* Must unlock here */ 2851 /* Must unlock here */
2852 2852
2853 /* Disestablish the interrupt handler */ 2853 /* Disestablish the interrupt handler */
2854 for (i = 0; i < sc->sc_nintrs; i++) { 2854 for (i = 0; i < sc->sc_nintrs; i++) {
2855 if (sc->sc_ihs[i] != NULL) { 2855 if (sc->sc_ihs[i] != NULL) {
2856 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]); 2856 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2857 sc->sc_ihs[i] = NULL; 2857 sc->sc_ihs[i] = NULL;
2858 } 2858 }
2859 } 2859 }
2860 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs); 2860 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2861 2861
2862 wm_free_txrx_queues(sc); 2862 wm_free_txrx_queues(sc);
2863 2863
2864 /* Unmap the registers */ 2864 /* Unmap the registers */
2865 if (sc->sc_ss) { 2865 if (sc->sc_ss) {
2866 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss); 2866 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2867 sc->sc_ss = 0; 2867 sc->sc_ss = 0;
2868 } 2868 }
2869 if (sc->sc_ios) { 2869 if (sc->sc_ios) {
2870 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 2870 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2871 sc->sc_ios = 0; 2871 sc->sc_ios = 0;
2872 } 2872 }
2873 if (sc->sc_flashs) { 2873 if (sc->sc_flashs) {
2874 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs); 2874 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2875 sc->sc_flashs = 0; 2875 sc->sc_flashs = 0;
2876 } 2876 }
2877 2877
2878 if (sc->sc_core_lock) 2878 if (sc->sc_core_lock)
2879 mutex_obj_free(sc->sc_core_lock); 2879 mutex_obj_free(sc->sc_core_lock);
2880 if (sc->sc_ich_phymtx) 2880 if (sc->sc_ich_phymtx)
2881 mutex_obj_free(sc->sc_ich_phymtx); 2881 mutex_obj_free(sc->sc_ich_phymtx);
2882 if (sc->sc_ich_nvmmtx) 2882 if (sc->sc_ich_nvmmtx)
2883 mutex_obj_free(sc->sc_ich_nvmmtx); 2883 mutex_obj_free(sc->sc_ich_nvmmtx);
2884 2884
2885 return 0; 2885 return 0;
2886} 2886}
2887 2887
2888static bool 2888static bool
2889wm_suspend(device_t self, const pmf_qual_t *qual) 2889wm_suspend(device_t self, const pmf_qual_t *qual)
2890{ 2890{
2891 struct wm_softc *sc = device_private(self); 2891 struct wm_softc *sc = device_private(self);
2892 2892
2893 wm_release_manageability(sc); 2893 wm_release_manageability(sc);
2894 wm_release_hw_control(sc); 2894 wm_release_hw_control(sc);
2895 wm_enable_wakeup(sc); 2895 wm_enable_wakeup(sc);
2896 2896
2897 return true; 2897 return true;
2898} 2898}
2899 2899
2900static bool 2900static bool
2901wm_resume(device_t self, const pmf_qual_t *qual) 2901wm_resume(device_t self, const pmf_qual_t *qual)
2902{ 2902{
2903 struct wm_softc *sc = device_private(self); 2903 struct wm_softc *sc = device_private(self);
2904 2904
2905 wm_init_manageability(sc); 2905 wm_init_manageability(sc);
2906 2906
2907 return true; 2907 return true;
2908} 2908}
2909 2909
2910/* 2910/*
2911 * wm_watchdog: [ifnet interface function] 2911 * wm_watchdog: [ifnet interface function]
2912 * 2912 *
2913 * Watchdog timer handler. 2913 * Watchdog timer handler.
2914 */ 2914 */
2915static void 2915static void
2916wm_watchdog(struct ifnet *ifp) 2916wm_watchdog(struct ifnet *ifp)
2917{ 2917{
2918 int qid; 2918 int qid;
2919 struct wm_softc *sc = ifp->if_softc; 2919 struct wm_softc *sc = ifp->if_softc;
2920 2920
2921 for (qid = 0; qid < sc->sc_nqueues; qid++) { 2921 for (qid = 0; qid < sc->sc_nqueues; qid++) {
2922 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq; 2922 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2923 2923
2924 wm_watchdog_txq(ifp, txq); 2924 wm_watchdog_txq(ifp, txq);
2925 } 2925 }
2926 2926
2927 /* Reset the interface. */ 2927 /* Reset the interface. */
2928 (void) wm_init(ifp); 2928 (void) wm_init(ifp);
2929 2929
2930 /* 2930 /*
2931 * There are still some upper layer processing which call 2931 * There are still some upper layer processing which call
2932 * ifp->if_start(). e.g. ALTQ or one CPU system 2932 * ifp->if_start(). e.g. ALTQ or one CPU system
2933 */ 2933 */
2934 /* Try to get more packets going. */ 2934 /* Try to get more packets going. */
2935 ifp->if_start(ifp); 2935 ifp->if_start(ifp);
2936} 2936}
2937 2937
2938static void 2938static void
2939wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq) 2939wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2940{ 2940{
2941 struct wm_softc *sc = ifp->if_softc; 2941 struct wm_softc *sc = ifp->if_softc;
2942 2942
2943 /* 2943 /*
2944 * Since we're using delayed interrupts, sweep up 2944 * Since we're using delayed interrupts, sweep up
2945 * before we report an error. 2945 * before we report an error.
2946 */ 2946 */
2947 mutex_enter(txq->txq_lock); 2947 mutex_enter(txq->txq_lock);
2948 wm_txeof(sc, txq); 2948 wm_txeof(sc, txq);
2949 mutex_exit(txq->txq_lock); 2949 mutex_exit(txq->txq_lock);
2950 2950
2951 if (txq->txq_free != WM_NTXDESC(txq)) { 2951 if (txq->txq_free != WM_NTXDESC(txq)) {
2952#ifdef WM_DEBUG 2952#ifdef WM_DEBUG
2953 int i, j; 2953 int i, j;
2954 struct wm_txsoft *txs; 2954 struct wm_txsoft *txs;
2955#endif 2955#endif
2956 log(LOG_ERR, 2956 log(LOG_ERR,
2957 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 2957 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2958 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree, 2958 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2959 txq->txq_next); 2959 txq->txq_next);
2960 ifp->if_oerrors++; 2960 ifp->if_oerrors++;
2961#ifdef WM_DEBUG 2961#ifdef WM_DEBUG
2962 for (i = txq->txq_sdirty; i != txq->txq_snext ; 2962 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2963 i = WM_NEXTTXS(txq, i)) { 2963 i = WM_NEXTTXS(txq, i)) {
2964 txs = &txq->txq_soft[i]; 2964 txs = &txq->txq_soft[i];
2965 printf("txs %d tx %d -> %d\n", 2965 printf("txs %d tx %d -> %d\n",
2966 i, txs->txs_firstdesc, txs->txs_lastdesc); 2966 i, txs->txs_firstdesc, txs->txs_lastdesc);
2967 for (j = txs->txs_firstdesc; ; 2967 for (j = txs->txs_firstdesc; ;
2968 j = WM_NEXTTX(txq, j)) { 2968 j = WM_NEXTTX(txq, j)) {
2969 printf("\tdesc %d: 0x%" PRIx64 "\n", j, 2969 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2970 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr); 2970 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2971 printf("\t %#08x%08x\n", 2971 printf("\t %#08x%08x\n",
2972 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields, 2972 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2973 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen); 2973 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2974 if (j == txs->txs_lastdesc) 2974 if (j == txs->txs_lastdesc)
2975 break; 2975 break;
2976 } 2976 }
2977 } 2977 }
2978#endif 2978#endif
2979 } 2979 }
2980} 2980}
2981 2981
2982/* 2982/*
2983 * wm_tick: 2983 * wm_tick:
2984 * 2984 *
2985 * One second timer, used to check link status, sweep up 2985 * One second timer, used to check link status, sweep up
2986 * completed transmit jobs, etc. 2986 * completed transmit jobs, etc.
2987 */ 2987 */
2988static void 2988static void
2989wm_tick(void *arg) 2989wm_tick(void *arg)
2990{ 2990{
2991 struct wm_softc *sc = arg; 2991 struct wm_softc *sc = arg;
2992 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2992 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2993#ifndef WM_MPSAFE 2993#ifndef WM_MPSAFE
2994 int s = splnet(); 2994 int s = splnet();
2995#endif 2995#endif
2996 2996
2997 WM_CORE_LOCK(sc); 2997 WM_CORE_LOCK(sc);
2998 2998
2999 if (sc->sc_core_stopping) 2999 if (sc->sc_core_stopping)
3000 goto out; 3000 goto out;
3001 3001
3002 if (sc->sc_type >= WM_T_82542_2_1) { 3002 if (sc->sc_type >= WM_T_82542_2_1) {
3003 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 3003 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3004 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 3004 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3005 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 3005 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3006 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 3006 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3007 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 3007 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3008 } 3008 }
3009 3009
3010 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 3010 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3011 ifp->if_ierrors += 0ULL /* ensure quad_t */ 3011 ifp->if_ierrors += 0ULL /* ensure quad_t */
3012 + CSR_READ(sc, WMREG_CRCERRS) 3012 + CSR_READ(sc, WMREG_CRCERRS)
3013 + CSR_READ(sc, WMREG_ALGNERRC) 3013 + CSR_READ(sc, WMREG_ALGNERRC)
3014 + CSR_READ(sc, WMREG_SYMERRC) 3014 + CSR_READ(sc, WMREG_SYMERRC)
3015 + CSR_READ(sc, WMREG_RXERRC) 3015 + CSR_READ(sc, WMREG_RXERRC)
3016 + CSR_READ(sc, WMREG_SEC) 3016 + CSR_READ(sc, WMREG_SEC)
3017 + CSR_READ(sc, WMREG_CEXTERR) 3017 + CSR_READ(sc, WMREG_CEXTERR)
3018 + CSR_READ(sc, WMREG_RLEC); 3018 + CSR_READ(sc, WMREG_RLEC);
3019 /* 3019 /*
3020 * WMREG_RNBC is incremented when there is no available buffers in host 3020 * WMREG_RNBC is incremented when there is no available buffers in host
3021 * memory. It does not mean the number of dropped packet. Because 3021 * memory. It does not mean the number of dropped packet. Because
3022 * ethernet controller can receive packets in such case if there is 3022 * ethernet controller can receive packets in such case if there is
3023 * space in phy's FIFO. 3023 * space in phy's FIFO.
3024 * 3024 *
3025 * If you want to know the nubmer of WMREG_RMBC, you should use such as 3025 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3026 * own EVCNT instead of if_iqdrops. 3026 * own EVCNT instead of if_iqdrops.
3027 */ 3027 */
3028 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC); 3028 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
3029 3029
3030 if (sc->sc_flags & WM_F_HAS_MII) 3030 if (sc->sc_flags & WM_F_HAS_MII)
3031 mii_tick(&sc->sc_mii); 3031 mii_tick(&sc->sc_mii);
3032 else if ((sc->sc_type >= WM_T_82575) 3032 else if ((sc->sc_type >= WM_T_82575)
3033 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) 3033 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3034 wm_serdes_tick(sc); 3034 wm_serdes_tick(sc);
3035 else 3035 else
3036 wm_tbi_tick(sc); 3036 wm_tbi_tick(sc);
3037 3037
3038 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 3038 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3039out: 3039out:
3040 WM_CORE_UNLOCK(sc); 3040 WM_CORE_UNLOCK(sc);
3041#ifndef WM_MPSAFE 3041#ifndef WM_MPSAFE
3042 splx(s); 3042 splx(s);
3043#endif 3043#endif
3044} 3044}
3045 3045
3046static int 3046static int
3047wm_ifflags_cb(struct ethercom *ec) 3047wm_ifflags_cb(struct ethercom *ec)
3048{ 3048{
3049 struct ifnet *ifp = &ec->ec_if; 3049 struct ifnet *ifp = &ec->ec_if;
3050 struct wm_softc *sc = ifp->if_softc; 3050 struct wm_softc *sc = ifp->if_softc;
3051 int rc = 0; 3051 int rc = 0;
3052 3052
3053 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 3053 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3054 device_xname(sc->sc_dev), __func__)); 3054 device_xname(sc->sc_dev), __func__));
3055 3055
3056 WM_CORE_LOCK(sc); 3056 WM_CORE_LOCK(sc);
3057 3057
3058 int change = ifp->if_flags ^ sc->sc_if_flags; 3058 int change = ifp->if_flags ^ sc->sc_if_flags;
3059 sc->sc_if_flags = ifp->if_flags; 3059 sc->sc_if_flags = ifp->if_flags;
3060 3060
3061 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 3061 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3062 rc = ENETRESET; 3062 rc = ENETRESET;
3063 goto out; 3063 goto out;
3064 } 3064 }
3065 3065
3066 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3066 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3067 wm_set_filter(sc); 3067 wm_set_filter(sc);
3068 3068
3069 wm_set_vlan(sc); 3069 wm_set_vlan(sc);
3070 3070
3071out: 3071out:
3072 WM_CORE_UNLOCK(sc); 3072 WM_CORE_UNLOCK(sc);
3073 3073
3074 return rc; 3074 return rc;
3075} 3075}
3076 3076
3077/* 3077/*
3078 * wm_ioctl: [ifnet interface function] 3078 * wm_ioctl: [ifnet interface function]
3079 * 3079 *
3080 * Handle control requests from the operator. 3080 * Handle control requests from the operator.
3081 */ 3081 */
3082static int 3082static int
3083wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3083wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3084{ 3084{
3085 struct wm_softc *sc = ifp->if_softc; 3085 struct wm_softc *sc = ifp->if_softc;
3086 struct ifreq *ifr = (struct ifreq *) data; 3086 struct ifreq *ifr = (struct ifreq *) data;
3087 struct ifaddr *ifa = (struct ifaddr *)data; 3087 struct ifaddr *ifa = (struct ifaddr *)data;
3088 struct sockaddr_dl *sdl; 3088 struct sockaddr_dl *sdl;
3089 int s, error; 3089 int s, error;
3090 3090
3091 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 3091 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3092 device_xname(sc->sc_dev), __func__)); 3092 device_xname(sc->sc_dev), __func__));
3093 3093
3094#ifndef WM_MPSAFE 3094#ifndef WM_MPSAFE
3095 s = splnet(); 3095 s = splnet();
3096#endif 3096#endif
3097 switch (cmd) { 3097 switch (cmd) {
3098 case SIOCSIFMEDIA: 3098 case SIOCSIFMEDIA:
3099 case SIOCGIFMEDIA: 3099 case SIOCGIFMEDIA:
3100 WM_CORE_LOCK(sc); 3100 WM_CORE_LOCK(sc);
3101 /* Flow control requires full-duplex mode. */ 3101 /* Flow control requires full-duplex mode. */
3102 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3102 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3103 (ifr->ifr_media & IFM_FDX) == 0) 3103 (ifr->ifr_media & IFM_FDX) == 0)
3104 ifr->ifr_media &= ~IFM_ETH_FMASK; 3104 ifr->ifr_media &= ~IFM_ETH_FMASK;
3105 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3105 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3106 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3106 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3107 /* We can do both TXPAUSE and RXPAUSE. */ 3107 /* We can do both TXPAUSE and RXPAUSE. */
3108 ifr->ifr_media |= 3108 ifr->ifr_media |=
3109 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3109 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3110 } 3110 }
3111 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3111 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3112 } 3112 }
3113 WM_CORE_UNLOCK(sc); 3113 WM_CORE_UNLOCK(sc);
3114#ifdef WM_MPSAFE 3114#ifdef WM_MPSAFE
3115 s = splnet(); 3115 s = splnet();
3116#endif 3116#endif
3117 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 3117 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3118#ifdef WM_MPSAFE 3118#ifdef WM_MPSAFE
3119 splx(s); 3119 splx(s);
3120#endif 3120#endif
3121 break; 3121 break;
3122 case SIOCINITIFADDR: 3122 case SIOCINITIFADDR:
3123 WM_CORE_LOCK(sc); 3123 WM_CORE_LOCK(sc);
3124 if (ifa->ifa_addr->sa_family == AF_LINK) { 3124 if (ifa->ifa_addr->sa_family == AF_LINK) {
3125 sdl = satosdl(ifp->if_dl->ifa_addr); 3125 sdl = satosdl(ifp->if_dl->ifa_addr);
3126 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, 3126 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3127 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); 3127 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3128 /* unicast address is first multicast entry */ 3128 /* unicast address is first multicast entry */
3129 wm_set_filter(sc); 3129 wm_set_filter(sc);
3130 error = 0; 3130 error = 0;
3131 WM_CORE_UNLOCK(sc); 3131 WM_CORE_UNLOCK(sc);
3132 break; 3132 break;
3133 } 3133 }
3134 WM_CORE_UNLOCK(sc); 3134 WM_CORE_UNLOCK(sc);
3135 /*FALLTHROUGH*/ 3135 /*FALLTHROUGH*/
3136 default: 3136 default:
3137#ifdef WM_MPSAFE 3137#ifdef WM_MPSAFE
3138 s = splnet(); 3138 s = splnet();
3139#endif 3139#endif
3140 /* It may call wm_start, so unlock here */ 3140 /* It may call wm_start, so unlock here */
3141 error = ether_ioctl(ifp, cmd, data); 3141 error = ether_ioctl(ifp, cmd, data);
3142#ifdef WM_MPSAFE 3142#ifdef WM_MPSAFE
3143 splx(s); 3143 splx(s);
3144#endif 3144#endif
3145 if (error != ENETRESET) 3145 if (error != ENETRESET)
3146 break; 3146 break;
3147 3147
3148 error = 0; 3148 error = 0;
3149 3149
3150 if (cmd == SIOCSIFCAP) { 3150 if (cmd == SIOCSIFCAP) {
3151 error = (*ifp->if_init)(ifp); 3151 error = (*ifp->if_init)(ifp);
3152 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 3152 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3153 ; 3153 ;
3154 else if (ifp->if_flags & IFF_RUNNING) { 3154 else if (ifp->if_flags & IFF_RUNNING) {
3155 /* 3155 /*
3156 * Multicast list has changed; set the hardware filter 3156 * Multicast list has changed; set the hardware filter
3157 * accordingly. 3157 * accordingly.
3158 */ 3158 */
3159 WM_CORE_LOCK(sc); 3159 WM_CORE_LOCK(sc);
3160 wm_set_filter(sc); 3160 wm_set_filter(sc);
3161 WM_CORE_UNLOCK(sc); 3161 WM_CORE_UNLOCK(sc);
3162 } 3162 }
3163 break; 3163 break;
3164 } 3164 }
3165 3165
3166#ifndef WM_MPSAFE 3166#ifndef WM_MPSAFE
3167 splx(s); 3167 splx(s);
3168#endif 3168#endif
3169 return error; 3169 return error;
3170} 3170}
3171 3171
3172/* MAC address related */ 3172/* MAC address related */
3173 3173
3174/* 3174/*
3175 * Get the offset of MAC address and return it. 3175 * Get the offset of MAC address and return it.
3176 * If error occured, use offset 0. 3176 * If error occured, use offset 0.
3177 */ 3177 */
3178static uint16_t 3178static uint16_t
3179wm_check_alt_mac_addr(struct wm_softc *sc) 3179wm_check_alt_mac_addr(struct wm_softc *sc)
3180{ 3180{
3181 uint16_t myea[ETHER_ADDR_LEN / 2]; 3181 uint16_t myea[ETHER_ADDR_LEN / 2];
3182 uint16_t offset = NVM_OFF_MACADDR; 3182 uint16_t offset = NVM_OFF_MACADDR;
3183 3183
3184 /* Try to read alternative MAC address pointer */ 3184 /* Try to read alternative MAC address pointer */
3185 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0) 3185 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3186 return 0; 3186 return 0;
3187 3187
3188 /* Check pointer if it's valid or not. */ 3188 /* Check pointer if it's valid or not. */
3189 if ((offset == 0x0000) || (offset == 0xffff)) 3189 if ((offset == 0x0000) || (offset == 0xffff))
3190 return 0; 3190 return 0;
3191 3191
3192 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid); 3192 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3193 /* 3193 /*
3194 * Check whether alternative MAC address is valid or not. 3194 * Check whether alternative MAC address is valid or not.
3195 * Some cards have non 0xffff pointer but those don't use 3195 * Some cards have non 0xffff pointer but those don't use
3196 * alternative MAC address in reality. 3196 * alternative MAC address in reality.
3197 * 3197 *
3198 * Check whether the broadcast bit is set or not. 3198 * Check whether the broadcast bit is set or not.
3199 */ 3199 */
3200 if (wm_nvm_read(sc, offset, 1, myea) == 0) 3200 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3201 if (((myea[0] & 0xff) & 0x01) == 0) 3201 if (((myea[0] & 0xff) & 0x01) == 0)
3202 return offset; /* Found */ 3202 return offset; /* Found */
3203 3203
3204 /* Not found */ 3204 /* Not found */
3205 return 0; 3205 return 0;
3206} 3206}
3207 3207
3208static int 3208static int
3209wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr) 3209wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3210{ 3210{
3211 uint16_t myea[ETHER_ADDR_LEN / 2]; 3211 uint16_t myea[ETHER_ADDR_LEN / 2];
3212 uint16_t offset = NVM_OFF_MACADDR; 3212 uint16_t offset = NVM_OFF_MACADDR;
3213 int do_invert = 0; 3213 int do_invert = 0;
3214 3214
3215 switch (sc->sc_type) { 3215 switch (sc->sc_type) {
3216 case WM_T_82580: 3216 case WM_T_82580:
3217 case WM_T_I350: 3217 case WM_T_I350:
3218 case WM_T_I354: 3218 case WM_T_I354:
3219 /* EEPROM Top Level Partitioning */ 3219 /* EEPROM Top Level Partitioning */
3220 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0; 3220 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3221 break; 3221 break;
3222 case WM_T_82571: 3222 case WM_T_82571:
3223 case WM_T_82575: 3223 case WM_T_82575:
3224 case WM_T_82576: 3224 case WM_T_82576:
3225 case WM_T_80003: 3225 case WM_T_80003:
3226 case WM_T_I210: 3226 case WM_T_I210:
3227 case WM_T_I211: 3227 case WM_T_I211:
3228 offset = wm_check_alt_mac_addr(sc); 3228 offset = wm_check_alt_mac_addr(sc);
3229 if (offset == 0) 3229 if (offset == 0)
3230 if ((sc->sc_funcid & 0x01) == 1) 3230 if ((sc->sc_funcid & 0x01) == 1)
3231 do_invert = 1; 3231 do_invert = 1;
3232 break; 3232 break;
3233 default: 3233 default:
3234 if ((sc->sc_funcid & 0x01) == 1) 3234 if ((sc->sc_funcid & 0x01) == 1)
3235 do_invert = 1; 3235 do_invert = 1;
3236 break; 3236 break;
3237 } 3237 }
3238 3238
3239 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0) 3239 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3240 goto bad; 3240 goto bad;
3241 3241
3242 enaddr[0] = myea[0] & 0xff; 3242 enaddr[0] = myea[0] & 0xff;
3243 enaddr[1] = myea[0] >> 8; 3243 enaddr[1] = myea[0] >> 8;
3244 enaddr[2] = myea[1] & 0xff; 3244 enaddr[2] = myea[1] & 0xff;
3245 enaddr[3] = myea[1] >> 8; 3245 enaddr[3] = myea[1] >> 8;
3246 enaddr[4] = myea[2] & 0xff; 3246 enaddr[4] = myea[2] & 0xff;
3247 enaddr[5] = myea[2] >> 8; 3247 enaddr[5] = myea[2] >> 8;
3248 3248
3249 /* 3249 /*
3250 * Toggle the LSB of the MAC address on the second port 3250 * Toggle the LSB of the MAC address on the second port
3251 * of some dual port cards. 3251 * of some dual port cards.
3252 */ 3252 */
3253 if (do_invert != 0) 3253 if (do_invert != 0)
3254 enaddr[5] ^= 1; 3254 enaddr[5] ^= 1;
3255 3255
3256 return 0; 3256 return 0;
3257 3257
3258 bad: 3258 bad:
3259 return -1; 3259 return -1;
3260} 3260}
3261 3261
3262/* 3262/*
3263 * wm_set_ral: 3263 * wm_set_ral:
3264 * 3264 *
3265 * Set an entery in the receive address list. 3265 * Set an entery in the receive address list.
3266 */ 3266 */
3267static void 3267static void
3268wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 3268wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3269{ 3269{
3270 uint32_t ral_lo, ral_hi, addrl, addrh; 3270 uint32_t ral_lo, ral_hi, addrl, addrh;
3271 uint32_t wlock_mac; 3271 uint32_t wlock_mac;
3272 int rv; 3272 int rv;
3273 3273
3274 if (enaddr != NULL) { 3274 if (enaddr != NULL) {
3275 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 3275 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3276 (enaddr[3] << 24); 3276 (enaddr[3] << 24);
3277 ral_hi = enaddr[4] | (enaddr[5] << 8); 3277 ral_hi = enaddr[4] | (enaddr[5] << 8);
3278 ral_hi |= RAL_AV; 3278 ral_hi |= RAL_AV;
3279 } else { 3279 } else {
3280 ral_lo = 0; 3280 ral_lo = 0;
3281 ral_hi = 0; 3281 ral_hi = 0;
3282 } 3282 }
3283 3283
3284 switch (sc->sc_type) { 3284 switch (sc->sc_type) {
3285 case WM_T_82542_2_0: 3285 case WM_T_82542_2_0:
3286 case WM_T_82542_2_1: 3286 case WM_T_82542_2_1:
3287 case WM_T_82543: 3287 case WM_T_82543:
3288 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo); 3288 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3289 CSR_WRITE_FLUSH(sc); 3289 CSR_WRITE_FLUSH(sc);
3290 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi); 3290 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3291 CSR_WRITE_FLUSH(sc); 3291 CSR_WRITE_FLUSH(sc);
3292 break; 3292 break;
3293 case WM_T_PCH2: 3293 case WM_T_PCH2:
3294 case WM_T_PCH_LPT: 3294 case WM_T_PCH_LPT:
3295 case WM_T_PCH_SPT: 3295 case WM_T_PCH_SPT:
3296 if (idx == 0) { 3296 if (idx == 0) {
3297 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo); 3297 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3298 CSR_WRITE_FLUSH(sc); 3298 CSR_WRITE_FLUSH(sc);
3299 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi); 3299 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3300 CSR_WRITE_FLUSH(sc); 3300 CSR_WRITE_FLUSH(sc);
3301 return; 3301 return;
3302 } 3302 }
3303 if (sc->sc_type != WM_T_PCH2) { 3303 if (sc->sc_type != WM_T_PCH2) {
3304 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), 3304 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3305 FWSM_WLOCK_MAC); 3305 FWSM_WLOCK_MAC);
3306 addrl = WMREG_SHRAL(idx - 1); 3306 addrl = WMREG_SHRAL(idx - 1);
3307 addrh = WMREG_SHRAH(idx - 1); 3307 addrh = WMREG_SHRAH(idx - 1);
3308 } else { 3308 } else {
3309 wlock_mac = 0; 3309 wlock_mac = 0;
3310 addrl = WMREG_PCH_LPT_SHRAL(idx - 1); 3310 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3311 addrh = WMREG_PCH_LPT_SHRAH(idx - 1); 3311 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3312 } 3312 }
3313  3313
3314 if ((wlock_mac == 0) || (idx <= wlock_mac)) { 3314 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3315 rv = wm_get_swflag_ich8lan(sc); 3315 rv = wm_get_swflag_ich8lan(sc);
3316 if (rv != 0) 3316 if (rv != 0)
3317 return; 3317 return;
3318 CSR_WRITE(sc, addrl, ral_lo); 3318 CSR_WRITE(sc, addrl, ral_lo);
3319 CSR_WRITE_FLUSH(sc); 3319 CSR_WRITE_FLUSH(sc);
3320 CSR_WRITE(sc, addrh, ral_hi); 3320 CSR_WRITE(sc, addrh, ral_hi);
3321 CSR_WRITE_FLUSH(sc); 3321 CSR_WRITE_FLUSH(sc);
3322 wm_put_swflag_ich8lan(sc); 3322 wm_put_swflag_ich8lan(sc);
3323 } 3323 }
3324 3324
3325 break; 3325 break;
3326 default: 3326 default:
3327 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo); 3327 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3328 CSR_WRITE_FLUSH(sc); 3328 CSR_WRITE_FLUSH(sc);
3329 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi); 3329 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3330 CSR_WRITE_FLUSH(sc); 3330 CSR_WRITE_FLUSH(sc);
3331 break; 3331 break;
3332 } 3332 }
3333} 3333}
3334 3334
3335/* 3335/*
3336 * wm_mchash: 3336 * wm_mchash:
3337 * 3337 *
3338 * Compute the hash of the multicast address for the 4096-bit 3338 * Compute the hash of the multicast address for the 4096-bit
3339 * multicast filter. 3339 * multicast filter.
3340 */ 3340 */
3341static uint32_t 3341static uint32_t
3342wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 3342wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3343{ 3343{
3344 static const int lo_shift[4] = { 4, 3, 2, 0 }; 3344 static const int lo_shift[4] = { 4, 3, 2, 0 };
3345 static const int hi_shift[4] = { 4, 5, 6, 8 }; 3345 static const int hi_shift[4] = { 4, 5, 6, 8 };
3346 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 }; 3346 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3347 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 }; 3347 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3348 uint32_t hash; 3348 uint32_t hash;
3349 3349
3350 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 3350 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3351 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 3351 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3352 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) 3352 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3353 || (sc->sc_type == WM_T_PCH_SPT)) { 3353 || (sc->sc_type == WM_T_PCH_SPT)) {
3354 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | 3354 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3355 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); 3355 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3356 return (hash & 0x3ff); 3356 return (hash & 0x3ff);
3357 } 3357 }
3358 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 3358 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3359 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 3359 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3360 3360
3361 return (hash & 0xfff); 3361 return (hash & 0xfff);
3362} 3362}
3363 3363
3364/* 3364/*
3365 * wm_set_filter: 3365 * wm_set_filter:
3366 * 3366 *
3367 * Set up the receive filter. 3367 * Set up the receive filter.
3368 */ 3368 */
3369static void 3369static void
3370wm_set_filter(struct wm_softc *sc) 3370wm_set_filter(struct wm_softc *sc)
3371{ 3371{
3372 struct ethercom *ec = &sc->sc_ethercom; 3372 struct ethercom *ec = &sc->sc_ethercom;
3373 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3373 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3374 struct ether_multi *enm; 3374 struct ether_multi *enm;
3375 struct ether_multistep step; 3375 struct ether_multistep step;
3376 bus_addr_t mta_reg; 3376 bus_addr_t mta_reg;
3377 uint32_t hash, reg, bit; 3377 uint32_t hash, reg, bit;
3378 int i, size, ralmax; 3378 int i, size, ralmax;
3379 3379
3380 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 3380 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3381 device_xname(sc->sc_dev), __func__)); 3381 device_xname(sc->sc_dev), __func__));
3382 3382
3383 if (sc->sc_type >= WM_T_82544) 3383 if (sc->sc_type >= WM_T_82544)
3384 mta_reg = WMREG_CORDOVA_MTA; 3384 mta_reg = WMREG_CORDOVA_MTA;
3385 else 3385 else
3386 mta_reg = WMREG_MTA; 3386 mta_reg = WMREG_MTA;
3387 3387
3388 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 3388 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3389 3389
3390 if (ifp->if_flags & IFF_BROADCAST) 3390 if (ifp->if_flags & IFF_BROADCAST)
3391 sc->sc_rctl |= RCTL_BAM; 3391 sc->sc_rctl |= RCTL_BAM;
3392 if (ifp->if_flags & IFF_PROMISC) { 3392 if (ifp->if_flags & IFF_PROMISC) {
3393 sc->sc_rctl |= RCTL_UPE; 3393 sc->sc_rctl |= RCTL_UPE;
3394 goto allmulti; 3394 goto allmulti;
3395 } 3395 }
3396 3396
3397 /* 3397 /*
3398 * Set the station address in the first RAL slot, and 3398 * Set the station address in the first RAL slot, and
3399 * clear the remaining slots. 3399 * clear the remaining slots.
3400 */ 3400 */
3401 if (sc->sc_type == WM_T_ICH8) 3401 if (sc->sc_type == WM_T_ICH8)
3402 size = WM_RAL_TABSIZE_ICH8 -1; 3402 size = WM_RAL_TABSIZE_ICH8 -1;
3403 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) 3403 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3404 || (sc->sc_type == WM_T_PCH)) 3404 || (sc->sc_type == WM_T_PCH))
3405 size = WM_RAL_TABSIZE_ICH8; 3405 size = WM_RAL_TABSIZE_ICH8;
3406 else if (sc->sc_type == WM_T_PCH2) 3406 else if (sc->sc_type == WM_T_PCH2)
3407 size = WM_RAL_TABSIZE_PCH2; 3407 size = WM_RAL_TABSIZE_PCH2;
3408 else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT)) 3408 else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3409 size = WM_RAL_TABSIZE_PCH_LPT; 3409 size = WM_RAL_TABSIZE_PCH_LPT;
3410 else if (sc->sc_type == WM_T_82575) 3410 else if (sc->sc_type == WM_T_82575)
3411 size = WM_RAL_TABSIZE_82575; 3411 size = WM_RAL_TABSIZE_82575;
3412 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580)) 3412 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3413 size = WM_RAL_TABSIZE_82576; 3413 size = WM_RAL_TABSIZE_82576;
3414 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 3414 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3415 size = WM_RAL_TABSIZE_I350; 3415 size = WM_RAL_TABSIZE_I350;
3416 else 3416 else
3417 size = WM_RAL_TABSIZE; 3417 size = WM_RAL_TABSIZE;
3418 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 3418 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3419 3419
3420 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) { 3420 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3421 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC); 3421 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3422 switch (i) { 3422 switch (i) {
3423 case 0: 3423 case 0:
3424 /* We can use all entries */ 3424 /* We can use all entries */
3425 ralmax = size; 3425 ralmax = size;
3426 break; 3426 break;
3427 case 1: 3427 case 1:
3428 /* Only RAR[0] */ 3428 /* Only RAR[0] */
3429 ralmax = 1; 3429 ralmax = 1;
3430 break; 3430 break;
3431 default: 3431 default:
3432 /* available SHRA + RAR[0] */ 3432 /* available SHRA + RAR[0] */
3433 ralmax = i + 1; 3433 ralmax = i + 1;
3434 } 3434 }
3435 } else 3435 } else
3436 ralmax = size; 3436 ralmax = size;
3437 for (i = 1; i < size; i++) { 3437 for (i = 1; i < size; i++) {
3438 if (i < ralmax) 3438 if (i < ralmax)
3439 wm_set_ral(sc, NULL, i); 3439 wm_set_ral(sc, NULL, i);
3440 } 3440 }
3441 3441
3442 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 3442 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3443 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 3443 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3444 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) 3444 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3445 || (sc->sc_type == WM_T_PCH_SPT)) 3445 || (sc->sc_type == WM_T_PCH_SPT))
3446 size = WM_ICH8_MC_TABSIZE; 3446 size = WM_ICH8_MC_TABSIZE;
3447 else 3447 else
3448 size = WM_MC_TABSIZE; 3448 size = WM_MC_TABSIZE;
3449 /* Clear out the multicast table. */ 3449 /* Clear out the multicast table. */
3450 for (i = 0; i < size; i++) { 3450 for (i = 0; i < size; i++) {
3451 CSR_WRITE(sc, mta_reg + (i << 2), 0); 3451 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3452 CSR_WRITE_FLUSH(sc); 3452 CSR_WRITE_FLUSH(sc);
3453 } 3453 }
3454 3454
3455 ETHER_LOCK(ec); 3455 ETHER_LOCK(ec);
3456 ETHER_FIRST_MULTI(step, ec, enm); 3456 ETHER_FIRST_MULTI(step, ec, enm);
3457 while (enm != NULL) { 3457 while (enm != NULL) {
3458 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3458 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3459 ETHER_UNLOCK(ec); 3459 ETHER_UNLOCK(ec);
3460 /* 3460 /*
3461 * We must listen to a range of multicast addresses. 3461 * We must listen to a range of multicast addresses.
3462 * For now, just accept all multicasts, rather than 3462 * For now, just accept all multicasts, rather than
3463 * trying to set only those filter bits needed to match 3463 * trying to set only those filter bits needed to match
3464 * the range. (At this time, the only use of address 3464 * the range. (At this time, the only use of address
3465 * ranges is for IP multicast routing, for which the 3465 * ranges is for IP multicast routing, for which the
3466 * range is big enough to require all bits set.) 3466 * range is big enough to require all bits set.)
3467 */ 3467 */
3468 goto allmulti; 3468 goto allmulti;
3469 } 3469 }
3470 3470
3471 hash = wm_mchash(sc, enm->enm_addrlo); 3471 hash = wm_mchash(sc, enm->enm_addrlo);
3472 3472
3473 reg = (hash >> 5); 3473 reg = (hash >> 5);
3474 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 3474 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3475 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 3475 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3476 || (sc->sc_type == WM_T_PCH2) 3476 || (sc->sc_type == WM_T_PCH2)
3477 || (sc->sc_type == WM_T_PCH_LPT) 3477 || (sc->sc_type == WM_T_PCH_LPT)
3478 || (sc->sc_type == WM_T_PCH_SPT)) 3478 || (sc->sc_type == WM_T_PCH_SPT))
3479 reg &= 0x1f; 3479 reg &= 0x1f;
3480 else 3480 else
3481 reg &= 0x7f; 3481 reg &= 0x7f;
3482 bit = hash & 0x1f; 3482 bit = hash & 0x1f;
3483 3483
3484 hash = CSR_READ(sc, mta_reg + (reg << 2)); 3484 hash = CSR_READ(sc, mta_reg + (reg << 2));
3485 hash |= 1U << bit; 3485 hash |= 1U << bit;
3486 3486
3487 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) { 3487 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3488 /* 3488 /*
3489 * 82544 Errata 9: Certain register cannot be written 3489 * 82544 Errata 9: Certain register cannot be written
3490 * with particular alignments in PCI-X bus operation 3490 * with particular alignments in PCI-X bus operation
3491 * (FCAH, MTA and VFTA). 3491 * (FCAH, MTA and VFTA).
3492 */ 3492 */
3493 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 3493 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3494 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3494 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3495 CSR_WRITE_FLUSH(sc); 3495 CSR_WRITE_FLUSH(sc);
3496 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 3496 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3497 CSR_WRITE_FLUSH(sc); 3497 CSR_WRITE_FLUSH(sc);
3498 } else { 3498 } else {
3499 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3499 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3500 CSR_WRITE_FLUSH(sc); 3500 CSR_WRITE_FLUSH(sc);
3501 } 3501 }
3502 3502
3503 ETHER_NEXT_MULTI(step, enm); 3503 ETHER_NEXT_MULTI(step, enm);
3504 } 3504 }
3505 ETHER_UNLOCK(ec); 3505 ETHER_UNLOCK(ec);
3506 3506
3507 ifp->if_flags &= ~IFF_ALLMULTI; 3507 ifp->if_flags &= ~IFF_ALLMULTI;
3508 goto setit; 3508 goto setit;
3509 3509
3510 allmulti: 3510 allmulti:
3511 ifp->if_flags |= IFF_ALLMULTI; 3511 ifp->if_flags |= IFF_ALLMULTI;
3512 sc->sc_rctl |= RCTL_MPE; 3512 sc->sc_rctl |= RCTL_MPE;
3513 3513
3514 setit: 3514 setit:
3515 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 3515 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3516} 3516}
3517 3517
3518/* Reset and init related */ 3518/* Reset and init related */
3519 3519
3520static void 3520static void
3521wm_set_vlan(struct wm_softc *sc) 3521wm_set_vlan(struct wm_softc *sc)
3522{ 3522{
3523 3523
3524 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 3524 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3525 device_xname(sc->sc_dev), __func__)); 3525 device_xname(sc->sc_dev), __func__));
3526 3526
3527 /* Deal with VLAN enables. */ 3527 /* Deal with VLAN enables. */
3528 if (VLAN_ATTACHED(&sc->sc_ethercom)) 3528 if (VLAN_ATTACHED(&sc->sc_ethercom))
3529 sc->sc_ctrl |= CTRL_VME; 3529 sc->sc_ctrl |= CTRL_VME;
3530 else 3530 else
3531 sc->sc_ctrl &= ~CTRL_VME; 3531 sc->sc_ctrl &= ~CTRL_VME;
3532 3532
3533 /* Write the control registers. */ 3533 /* Write the control registers. */
3534 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3534 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3535} 3535}
3536 3536
3537static void 3537static void
3538wm_set_pcie_completion_timeout(struct wm_softc *sc) 3538wm_set_pcie_completion_timeout(struct wm_softc *sc)
3539{ 3539{
3540 uint32_t gcr; 3540 uint32_t gcr;
3541 pcireg_t ctrl2; 3541 pcireg_t ctrl2;
3542 3542
3543 gcr = CSR_READ(sc, WMREG_GCR); 3543 gcr = CSR_READ(sc, WMREG_GCR);
3544 3544
3545 /* Only take action if timeout value is defaulted to 0 */ 3545 /* Only take action if timeout value is defaulted to 0 */
3546 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0) 3546 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3547 goto out; 3547 goto out;
3548 3548
3549 if ((gcr & GCR_CAP_VER2) == 0) { 3549 if ((gcr & GCR_CAP_VER2) == 0) {
3550 gcr |= GCR_CMPL_TMOUT_10MS; 3550 gcr |= GCR_CMPL_TMOUT_10MS;
3551 goto out; 3551 goto out;
3552 } 3552 }
3553 3553
3554 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3554 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3555 sc->sc_pcixe_capoff + PCIE_DCSR2); 3555 sc->sc_pcixe_capoff + PCIE_DCSR2);
3556 ctrl2 |= WM_PCIE_DCSR2_16MS; 3556 ctrl2 |= WM_PCIE_DCSR2_16MS;
3557 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 3557 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3558 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2); 3558 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3559 3559
3560out: 3560out:
3561 /* Disable completion timeout resend */ 3561 /* Disable completion timeout resend */
3562 gcr &= ~GCR_CMPL_TMOUT_RESEND; 3562 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3563 3563
3564 CSR_WRITE(sc, WMREG_GCR, gcr); 3564 CSR_WRITE(sc, WMREG_GCR, gcr);
3565} 3565}
3566 3566
3567void 3567void
3568wm_get_auto_rd_done(struct wm_softc *sc) 3568wm_get_auto_rd_done(struct wm_softc *sc)
3569{ 3569{
3570 int i; 3570 int i;
3571 3571
3572 /* wait for eeprom to reload */ 3572 /* wait for eeprom to reload */
3573 switch (sc->sc_type) { 3573 switch (sc->sc_type) {
3574 case WM_T_82571: 3574 case WM_T_82571:
3575 case WM_T_82572: 3575 case WM_T_82572:
3576 case WM_T_82573: 3576 case WM_T_82573:
3577 case WM_T_82574: 3577 case WM_T_82574:
3578 case WM_T_82583: 3578 case WM_T_82583:
3579 case WM_T_82575: 3579 case WM_T_82575:
3580 case WM_T_82576: 3580 case WM_T_82576:
3581 case WM_T_82580: 3581 case WM_T_82580:
3582 case WM_T_I350: 3582 case WM_T_I350:
3583 case WM_T_I354: 3583 case WM_T_I354:
3584 case WM_T_I210: 3584 case WM_T_I210:
3585 case WM_T_I211: 3585 case WM_T_I211:
3586 case WM_T_80003: 3586 case WM_T_80003:
3587 case WM_T_ICH8: 3587 case WM_T_ICH8:
3588 case WM_T_ICH9: 3588 case WM_T_ICH9:
3589 for (i = 0; i < 10; i++) { 3589 for (i = 0; i < 10; i++) {
3590 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) 3590 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3591 break; 3591 break;
3592 delay(1000); 3592 delay(1000);
3593 } 3593 }
3594 if (i == 10) { 3594 if (i == 10) {
3595 log(LOG_ERR, "%s: auto read from eeprom failed to " 3595 log(LOG_ERR, "%s: auto read from eeprom failed to "
3596 "complete\n", device_xname(sc->sc_dev)); 3596 "complete\n", device_xname(sc->sc_dev));
3597 } 3597 }
3598 break; 3598 break;
3599 default: 3599 default:
3600 break; 3600 break;
3601 } 3601 }
3602} 3602}
3603 3603
3604void 3604void
3605wm_lan_init_done(struct wm_softc *sc) 3605wm_lan_init_done(struct wm_softc *sc)
3606{ 3606{
3607 uint32_t reg = 0; 3607 uint32_t reg = 0;
3608 int i; 3608 int i;
3609 3609
3610 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 3610 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3611 device_xname(sc->sc_dev), __func__)); 3611 device_xname(sc->sc_dev), __func__));
3612 3612
3613 /* Wait for eeprom to reload */ 3613 /* Wait for eeprom to reload */
3614 switch (sc->sc_type) { 3614 switch (sc->sc_type) {
3615 case WM_T_ICH10: 3615 case WM_T_ICH10:
3616 case WM_T_PCH: 3616 case WM_T_PCH:
3617 case WM_T_PCH2: 3617 case WM_T_PCH2:
3618 case WM_T_PCH_LPT: 3618 case WM_T_PCH_LPT:
3619 case WM_T_PCH_SPT: 3619 case WM_T_PCH_SPT:
3620 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) { 3620 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3621 reg = CSR_READ(sc, WMREG_STATUS); 3621 reg = CSR_READ(sc, WMREG_STATUS);
3622 if ((reg & STATUS_LAN_INIT_DONE) != 0) 3622 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3623 break; 3623 break;
3624 delay(100); 3624 delay(100);
3625 } 3625 }
3626 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) { 3626 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3627 log(LOG_ERR, "%s: %s: lan_init_done failed to " 3627 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3628 "complete\n", device_xname(sc->sc_dev), __func__); 3628 "complete\n", device_xname(sc->sc_dev), __func__);
3629 } 3629 }
3630 break; 3630 break;
3631 default: 3631 default:
3632 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 3632 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3633 __func__); 3633 __func__);
3634 break; 3634 break;
3635 } 3635 }
3636 3636
3637 reg &= ~STATUS_LAN_INIT_DONE; 3637 reg &= ~STATUS_LAN_INIT_DONE;
3638 CSR_WRITE(sc, WMREG_STATUS, reg); 3638 CSR_WRITE(sc, WMREG_STATUS, reg);
3639} 3639}
3640 3640
3641void 3641void
3642wm_get_cfg_done(struct wm_softc *sc) 3642wm_get_cfg_done(struct wm_softc *sc)
3643{ 3643{
3644 int mask; 3644 int mask;
3645 uint32_t reg; 3645 uint32_t reg;
3646 int i; 3646 int i;
3647 3647
3648 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 3648 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3649 device_xname(sc->sc_dev), __func__)); 3649 device_xname(sc->sc_dev), __func__));
3650 3650
3651 /* Wait for eeprom to reload */ 3651 /* Wait for eeprom to reload */
3652 switch (sc->sc_type) { 3652 switch (sc->sc_type) {
3653 case WM_T_82542_2_0: 3653 case WM_T_82542_2_0:
3654 case WM_T_82542_2_1: 3654 case WM_T_82542_2_1:
3655 /* null */ 3655 /* null */
3656 break; 3656 break;
3657 case WM_T_82543: 3657 case WM_T_82543:
3658 case WM_T_82544: 3658 case WM_T_82544:
3659 case WM_T_82540: 3659 case WM_T_82540:
3660 case WM_T_82545: 3660 case WM_T_82545:
3661 case WM_T_82545_3: 3661 case WM_T_82545_3:
3662 case WM_T_82546: 3662 case WM_T_82546:
3663 case WM_T_82546_3: 3663 case WM_T_82546_3:
3664 case WM_T_82541: 3664 case WM_T_82541:
3665 case WM_T_82541_2: 3665 case WM_T_82541_2:
3666 case WM_T_82547: 3666 case WM_T_82547:
3667 case WM_T_82547_2: 3667 case WM_T_82547_2:
3668 case WM_T_82573: 3668 case WM_T_82573:
3669 case WM_T_82574: 3669 case WM_T_82574:
3670 case WM_T_82583: 3670 case WM_T_82583:
3671 /* generic */ 3671 /* generic */
3672 delay(10*1000); 3672 delay(10*1000);
3673 break; 3673 break;
3674 case WM_T_80003: 3674 case WM_T_80003:
3675 case WM_T_82571: 3675 case WM_T_82571:
3676 case WM_T_82572: 3676 case WM_T_82572:
3677 case WM_T_82575: 3677 case WM_T_82575:
3678 case WM_T_82576: 3678 case WM_T_82576:
3679 case WM_T_82580: 3679 case WM_T_82580:
3680 case WM_T_I350: 3680 case WM_T_I350:
3681 case WM_T_I354: 3681 case WM_T_I354:
3682 case WM_T_I210: 3682 case WM_T_I210:
3683 case WM_T_I211: 3683 case WM_T_I211:
3684 if (sc->sc_type == WM_T_82571) { 3684 if (sc->sc_type == WM_T_82571) {
3685 /* Only 82571 shares port 0 */ 3685 /* Only 82571 shares port 0 */
3686 mask = EEMNGCTL_CFGDONE_0; 3686 mask = EEMNGCTL_CFGDONE_0;
3687 } else 3687 } else
3688 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid; 3688 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3689 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) { 3689 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3690 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask) 3690 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3691 break; 3691 break;
3692 delay(1000); 3692 delay(1000);
3693 } 3693 }
3694 if (i >= WM_PHY_CFG_TIMEOUT) { 3694 if (i >= WM_PHY_CFG_TIMEOUT) {
3695 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n", 3695 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3696 device_xname(sc->sc_dev), __func__)); 3696 device_xname(sc->sc_dev), __func__));
3697 } 3697 }
3698 break; 3698 break;
3699 case WM_T_ICH8: 3699 case WM_T_ICH8:
3700 case WM_T_ICH9: 3700 case WM_T_ICH9: