Thu Feb 29 10:46:28 2024 UTC (107d)
Pull up the following, requested by msaitoh in ticket #1938:

	sys/dev/pci/if_wm.c			1.792,1.794-1.798 via patch
	sys/dev/pci/if_wmreg.h			1.131

- Add RQDPC(Receive Queue Drop Packet Count) to iqdrops.
- Drop frames if the RX descriptor ring has no room on multiqueue system.
- Improve dmesg output.
  - Print RX packet buffer size.
  - Fix the upper 16bit of Image Unique ID(EtrackID).
- Fix comment.


(martin)
diff -r1.508.4.54 -r1.508.4.55 src/sys/dev/pci/if_wm.c
diff -r1.98.6.18 -r1.98.6.19 src/sys/dev/pci/if_wmreg.h

cvs diff -r1.508.4.54 -r1.508.4.55 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2024/02/03 12:04:06 1.508.4.54
+++ src/sys/dev/pci/if_wm.c 2024/02/29 10:46:27 1.508.4.55
@@ -1,1473 +1,1473 @@ @@ -1,1473 +1,1473 @@
1/* $NetBSD: if_wm.c,v 1.508.4.54 2024/02/03 12:04:06 martin Exp $ */ 1/* $NetBSD: if_wm.c,v 1.508.4.55 2024/02/29 10:46:27 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation 40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42 42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45 45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48 48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52 52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56 56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic) 76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors 77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) 78 * - EEE (Energy Efficiency Ethernet)
79 * - Virtual Function 79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM) 80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM. 81 * - Rework how parameters are loaded from the EEPROM.
82 */ 82 */
83 83
84#include <sys/cdefs.h> 84#include <sys/cdefs.h>
85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.508.4.54 2024/02/03 12:04:06 martin Exp $"); 85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.508.4.55 2024/02/29 10:46:27 martin Exp $");
86 86
87#ifdef _KERNEL_OPT 87#ifdef _KERNEL_OPT
88#include "opt_net_mpsafe.h" 88#include "opt_net_mpsafe.h"
89#include "opt_if_wm.h" 89#include "opt_if_wm.h"
90#endif 90#endif
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/callout.h> 93#include <sys/callout.h>
94#include <sys/cpu.h> 94#include <sys/cpu.h>
95#include <sys/device.h> 95#include <sys/device.h>
96#include <sys/errno.h> 96#include <sys/errno.h>
97#include <sys/interrupt.h> 97#include <sys/interrupt.h>
98#include <sys/ioctl.h> 98#include <sys/ioctl.h>
99#include <sys/kernel.h> 99#include <sys/kernel.h>
100#include <sys/kmem.h> 100#include <sys/kmem.h>
101#include <sys/mbuf.h> 101#include <sys/mbuf.h>
102#include <sys/pcq.h> 102#include <sys/pcq.h>
103#include <sys/queue.h> 103#include <sys/queue.h>
104#include <sys/rndsource.h> 104#include <sys/rndsource.h>
105#include <sys/socket.h> 105#include <sys/socket.h>
106#include <sys/sysctl.h> 106#include <sys/sysctl.h>
107#include <sys/syslog.h> 107#include <sys/syslog.h>
108#include <sys/systm.h> 108#include <sys/systm.h>
109#include <sys/workqueue.h> 109#include <sys/workqueue.h>
110 110
111#include <net/if.h> 111#include <net/if.h>
112#include <net/if_dl.h> 112#include <net/if_dl.h>
113#include <net/if_media.h> 113#include <net/if_media.h>
114#include <net/if_ether.h> 114#include <net/if_ether.h>
115 115
116#include <net/bpf.h> 116#include <net/bpf.h>
117 117
118#include <net/rss_config.h> 118#include <net/rss_config.h>
119 119
120#include <netinet/in.h> /* XXX for struct ip */ 120#include <netinet/in.h> /* XXX for struct ip */
121#include <netinet/in_systm.h> /* XXX for struct ip */ 121#include <netinet/in_systm.h> /* XXX for struct ip */
122#include <netinet/ip.h> /* XXX for struct ip */ 122#include <netinet/ip.h> /* XXX for struct ip */
123#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 123#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
124#include <netinet/tcp.h> /* XXX for struct tcphdr */ 124#include <netinet/tcp.h> /* XXX for struct tcphdr */
125 125
126#include <sys/bus.h> 126#include <sys/bus.h>
127#include <sys/intr.h> 127#include <sys/intr.h>
128#include <machine/endian.h> 128#include <machine/endian.h>
129 129
130#include <dev/mii/mii.h> 130#include <dev/mii/mii.h>
131#include <dev/mii/miivar.h> 131#include <dev/mii/miivar.h>
132#include <dev/mii/miidevs.h> 132#include <dev/mii/miidevs.h>
133#include <dev/mii/mii_bitbang.h> 133#include <dev/mii/mii_bitbang.h>
134#include <dev/mii/ikphyreg.h> 134#include <dev/mii/ikphyreg.h>
135#include <dev/mii/igphyreg.h> 135#include <dev/mii/igphyreg.h>
136#include <dev/mii/igphyvar.h> 136#include <dev/mii/igphyvar.h>
137#include <dev/mii/inbmphyreg.h> 137#include <dev/mii/inbmphyreg.h>
138#include <dev/mii/ihphyreg.h> 138#include <dev/mii/ihphyreg.h>
139#include <dev/mii/makphyreg.h> 139#include <dev/mii/makphyreg.h>
140 140
141#include <dev/pci/pcireg.h> 141#include <dev/pci/pcireg.h>
142#include <dev/pci/pcivar.h> 142#include <dev/pci/pcivar.h>
143#include <dev/pci/pcidevs.h> 143#include <dev/pci/pcidevs.h>
144 144
145#include <dev/pci/if_wmreg.h> 145#include <dev/pci/if_wmreg.h>
146#include <dev/pci/if_wmvar.h> 146#include <dev/pci/if_wmvar.h>
147 147
148#ifdef WM_DEBUG 148#ifdef WM_DEBUG
149#define WM_DEBUG_LINK __BIT(0) 149#define WM_DEBUG_LINK __BIT(0)
150#define WM_DEBUG_TX __BIT(1) 150#define WM_DEBUG_TX __BIT(1)
151#define WM_DEBUG_RX __BIT(2) 151#define WM_DEBUG_RX __BIT(2)
152#define WM_DEBUG_GMII __BIT(3) 152#define WM_DEBUG_GMII __BIT(3)
153#define WM_DEBUG_MANAGE __BIT(4) 153#define WM_DEBUG_MANAGE __BIT(4)
154#define WM_DEBUG_NVM __BIT(5) 154#define WM_DEBUG_NVM __BIT(5)
155#define WM_DEBUG_INIT __BIT(6) 155#define WM_DEBUG_INIT __BIT(6)
156#define WM_DEBUG_LOCK __BIT(7) 156#define WM_DEBUG_LOCK __BIT(7)
157 157
158#if 0 158#if 0
159#define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \ 159#define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
160 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \ 160 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
161 WM_DEBUG_LOCK 161 WM_DEBUG_LOCK
162#endif 162#endif
163 163
164#define DPRINTF(sc, x, y) \ 164#define DPRINTF(sc, x, y) \
165 do { \ 165 do { \
166 if ((sc)->sc_debug & (x)) \ 166 if ((sc)->sc_debug & (x)) \
167 printf y; \ 167 printf y; \
168 } while (0) 168 } while (0)
169#else 169#else
170#define DPRINTF(sc, x, y) __nothing 170#define DPRINTF(sc, x, y) __nothing
171#endif /* WM_DEBUG */ 171#endif /* WM_DEBUG */
172 172
173#ifdef NET_MPSAFE 173#ifdef NET_MPSAFE
174#define WM_MPSAFE 1 174#define WM_MPSAFE 1
175#define WM_CALLOUT_FLAGS CALLOUT_MPSAFE 175#define WM_CALLOUT_FLAGS CALLOUT_MPSAFE
176#define WM_SOFTINT_FLAGS SOFTINT_MPSAFE 176#define WM_SOFTINT_FLAGS SOFTINT_MPSAFE
177#define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 177#define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
178#else 178#else
179#define WM_CALLOUT_FLAGS 0 179#define WM_CALLOUT_FLAGS 0
180#define WM_SOFTINT_FLAGS 0 180#define WM_SOFTINT_FLAGS 0
181#define WM_WORKQUEUE_FLAGS WQ_PERCPU 181#define WM_WORKQUEUE_FLAGS WQ_PERCPU
182#endif 182#endif
183 183
184#define WM_WORKQUEUE_PRI PRI_SOFTNET 184#define WM_WORKQUEUE_PRI PRI_SOFTNET
185 185
186/* 186/*
187 * This device driver's max interrupt numbers. 187 * This device driver's max interrupt numbers.
188 */ 188 */
189#define WM_MAX_NQUEUEINTR 16 189#define WM_MAX_NQUEUEINTR 16
190#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) 190#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
191 191
192#ifndef WM_DISABLE_MSI 192#ifndef WM_DISABLE_MSI
193#define WM_DISABLE_MSI 0 193#define WM_DISABLE_MSI 0
194#endif 194#endif
195#ifndef WM_DISABLE_MSIX 195#ifndef WM_DISABLE_MSIX
196#define WM_DISABLE_MSIX 0 196#define WM_DISABLE_MSIX 0
197#endif 197#endif
198 198
199int wm_disable_msi = WM_DISABLE_MSI; 199int wm_disable_msi = WM_DISABLE_MSI;
200int wm_disable_msix = WM_DISABLE_MSIX; 200int wm_disable_msix = WM_DISABLE_MSIX;
201 201
202#ifndef WM_WATCHDOG_TIMEOUT 202#ifndef WM_WATCHDOG_TIMEOUT
203#define WM_WATCHDOG_TIMEOUT 5 203#define WM_WATCHDOG_TIMEOUT 5
204#endif 204#endif
205static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT; 205static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
206 206
207/* 207/*
208 * Transmit descriptor list size. Due to errata, we can only have 208 * Transmit descriptor list size. Due to errata, we can only have
209 * 256 hardware descriptors in the ring on < 82544, but we use 4096 209 * 256 hardware descriptors in the ring on < 82544, but we use 4096
210 * on >= 82544. We tell the upper layers that they can queue a lot 210 * on >= 82544. We tell the upper layers that they can queue a lot
211 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 211 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
212 * of them at a time. 212 * of them at a time.
213 * 213 *
214 * We allow up to 64 DMA segments per packet. Pathological packet 214 * We allow up to 64 DMA segments per packet. Pathological packet
215 * chains containing many small mbufs have been observed in zero-copy 215 * chains containing many small mbufs have been observed in zero-copy
216 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments, 216 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
217 * m_defrag() is called to reduce it. 217 * m_defrag() is called to reduce it.
218 */ 218 */
219#define WM_NTXSEGS 64 219#define WM_NTXSEGS 64
220#define WM_IFQUEUELEN 256 220#define WM_IFQUEUELEN 256
221#define WM_TXQUEUELEN_MAX 64 221#define WM_TXQUEUELEN_MAX 64
222#define WM_TXQUEUELEN_MAX_82547 16 222#define WM_TXQUEUELEN_MAX_82547 16
223#define WM_TXQUEUELEN(txq) ((txq)->txq_num) 223#define WM_TXQUEUELEN(txq) ((txq)->txq_num)
224#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) 224#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
225#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) 225#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
226#define WM_NTXDESC_82542 256 226#define WM_NTXDESC_82542 256
227#define WM_NTXDESC_82544 4096 227#define WM_NTXDESC_82544 4096
228#define WM_NTXDESC(txq) ((txq)->txq_ndesc) 228#define WM_NTXDESC(txq) ((txq)->txq_ndesc)
229#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) 229#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
230#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) 230#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
231#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) 231#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
232#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) 232#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
233 233
234#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ 234#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
235 235
236#define WM_TXINTERQSIZE 256 236#define WM_TXINTERQSIZE 256
237 237
238#ifndef WM_TX_PROCESS_LIMIT_DEFAULT 238#ifndef WM_TX_PROCESS_LIMIT_DEFAULT
239#define WM_TX_PROCESS_LIMIT_DEFAULT 100U 239#define WM_TX_PROCESS_LIMIT_DEFAULT 100U
240#endif 240#endif
241#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT 241#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
242#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U 242#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
243#endif 243#endif
244 244
245/* 245/*
246 * Receive descriptor list size. We have one Rx buffer for normal 246 * Receive descriptor list size. We have one Rx buffer for normal
247 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 247 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
248 * packet. We allocate 256 receive descriptors, each with a 2k 248 * packet. We allocate 256 receive descriptors, each with a 2k
249 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 249 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
250 */ 250 */
251#define WM_NRXDESC 256U 251#define WM_NRXDESC 256U
252#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 252#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
253#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 253#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
254#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 254#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
255 255
256#ifndef WM_RX_PROCESS_LIMIT_DEFAULT 256#ifndef WM_RX_PROCESS_LIMIT_DEFAULT
257#define WM_RX_PROCESS_LIMIT_DEFAULT 100U 257#define WM_RX_PROCESS_LIMIT_DEFAULT 100U
258#endif 258#endif
259#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT 259#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
260#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U 260#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
261#endif 261#endif
262 262
263typedef union txdescs { 263typedef union txdescs {
264 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; 264 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
265 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; 265 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
266} txdescs_t; 266} txdescs_t;
267 267
268typedef union rxdescs { 268typedef union rxdescs {
269 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; 269 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
270 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ 270 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
271 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ 271 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
272} rxdescs_t; 272} rxdescs_t;
273 273
274#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) 274#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
275#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x)) 275#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
276 276
277/* 277/*
278 * Software state for transmit jobs. 278 * Software state for transmit jobs.
279 */ 279 */
280struct wm_txsoft { 280struct wm_txsoft {
281 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 281 struct mbuf *txs_mbuf; /* head of our mbuf chain */
282 bus_dmamap_t txs_dmamap; /* our DMA map */ 282 bus_dmamap_t txs_dmamap; /* our DMA map */
283 int txs_firstdesc; /* first descriptor in packet */ 283 int txs_firstdesc; /* first descriptor in packet */
284 int txs_lastdesc; /* last descriptor in packet */ 284 int txs_lastdesc; /* last descriptor in packet */
285 int txs_ndesc; /* # of descriptors used */ 285 int txs_ndesc; /* # of descriptors used */
286}; 286};
287 287
288/* 288/*
289 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES) 289 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
290 * buffer and a DMA map. For packets which fill more than one buffer, we chain 290 * buffer and a DMA map. For packets which fill more than one buffer, we chain
291 * them together. 291 * them together.
292 */ 292 */
293struct wm_rxsoft { 293struct wm_rxsoft {
294 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 294 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
295 bus_dmamap_t rxs_dmamap; /* our DMA map */ 295 bus_dmamap_t rxs_dmamap; /* our DMA map */
296}; 296};
297 297
298#define WM_LINKUP_TIMEOUT 50 298#define WM_LINKUP_TIMEOUT 50
299 299
300static uint16_t swfwphysem[] = { 300static uint16_t swfwphysem[] = {
301 SWFW_PHY0_SM, 301 SWFW_PHY0_SM,
302 SWFW_PHY1_SM, 302 SWFW_PHY1_SM,
303 SWFW_PHY2_SM, 303 SWFW_PHY2_SM,
304 SWFW_PHY3_SM 304 SWFW_PHY3_SM
305}; 305};
306 306
307static const uint32_t wm_82580_rxpbs_table[] = { 307static const uint32_t wm_82580_rxpbs_table[] = {
308 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 308 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
309}; 309};
310 310
311struct wm_softc; 311struct wm_softc;
312 312
313#if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS) 313#if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
314#if !defined(WM_EVENT_COUNTERS) 314#if !defined(WM_EVENT_COUNTERS)
315#define WM_EVENT_COUNTERS 1 315#define WM_EVENT_COUNTERS 1
316#endif 316#endif
317#endif 317#endif
318 318
319#ifdef WM_EVENT_COUNTERS 319#ifdef WM_EVENT_COUNTERS
320#define WM_Q_EVCNT_DEFINE(qname, evname) \ 320#define WM_Q_EVCNT_DEFINE(qname, evname) \
321 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ 321 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
322 struct evcnt qname##_ev_##evname 322 struct evcnt qname##_ev_##evname
323 323
324#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ 324#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
325 do { \ 325 do { \
326 snprintf((q)->qname##_##evname##_evcnt_name, \ 326 snprintf((q)->qname##_##evname##_evcnt_name, \
327 sizeof((q)->qname##_##evname##_evcnt_name), \ 327 sizeof((q)->qname##_##evname##_evcnt_name), \
328 "%s%02d%s", #qname, (qnum), #evname); \ 328 "%s%02d%s", #qname, (qnum), #evname); \
329 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ 329 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
330 (evtype), NULL, (xname), \ 330 (evtype), NULL, (xname), \
331 (q)->qname##_##evname##_evcnt_name); \ 331 (q)->qname##_##evname##_evcnt_name); \
332 } while (0) 332 } while (0)
333 333
334#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 334#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
335 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) 335 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
336 336
337#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 337#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
338 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) 338 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
339 339
340#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \ 340#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
341 evcnt_detach(&(q)->qname##_ev_##evname) 341 evcnt_detach(&(q)->qname##_ev_##evname)
342#endif /* WM_EVENT_COUNTERS */ 342#endif /* WM_EVENT_COUNTERS */
343 343
344struct wm_txqueue { 344struct wm_txqueue {
345 kmutex_t *txq_lock; /* lock for tx operations */ 345 kmutex_t *txq_lock; /* lock for tx operations */
346 346
347 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ 347 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
348 348
349 /* Software state for the transmit descriptors. */ 349 /* Software state for the transmit descriptors. */
350 int txq_num; /* must be a power of two */ 350 int txq_num; /* must be a power of two */
351 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; 351 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
352 352
353 /* TX control data structures. */ 353 /* TX control data structures. */
354 int txq_ndesc; /* must be a power of two */ 354 int txq_ndesc; /* must be a power of two */
355 size_t txq_descsize; /* a tx descriptor size */ 355 size_t txq_descsize; /* a tx descriptor size */
356 txdescs_t *txq_descs_u; 356 txdescs_t *txq_descs_u;
357 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ 357 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
358 bus_dma_segment_t txq_desc_seg; /* control data segment */ 358 bus_dma_segment_t txq_desc_seg; /* control data segment */
359 int txq_desc_rseg; /* real number of control segment */ 359 int txq_desc_rseg; /* real number of control segment */
360#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr 360#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
361#define txq_descs txq_descs_u->sctxu_txdescs 361#define txq_descs txq_descs_u->sctxu_txdescs
362#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs 362#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
363 363
364 bus_addr_t txq_tdt_reg; /* offset of TDT register */ 364 bus_addr_t txq_tdt_reg; /* offset of TDT register */
365 365
366 int txq_free; /* number of free Tx descriptors */ 366 int txq_free; /* number of free Tx descriptors */
367 int txq_next; /* next ready Tx descriptor */ 367 int txq_next; /* next ready Tx descriptor */
368 368
369 int txq_sfree; /* number of free Tx jobs */ 369 int txq_sfree; /* number of free Tx jobs */
370 int txq_snext; /* next free Tx job */ 370 int txq_snext; /* next free Tx job */
371 int txq_sdirty; /* dirty Tx jobs */ 371 int txq_sdirty; /* dirty Tx jobs */
372 372
373 /* These 4 variables are used only on the 82547. */ 373 /* These 4 variables are used only on the 82547. */
374 int txq_fifo_size; /* Tx FIFO size */ 374 int txq_fifo_size; /* Tx FIFO size */
375 int txq_fifo_head; /* current head of FIFO */ 375 int txq_fifo_head; /* current head of FIFO */
376 uint32_t txq_fifo_addr; /* internal address of start of FIFO */ 376 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
377 int txq_fifo_stall; /* Tx FIFO is stalled */ 377 int txq_fifo_stall; /* Tx FIFO is stalled */
378 378
379 /* 379 /*
380 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 380 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
381 * CPUs. This queue intermediate them without block. 381 * CPUs. This queue intermediate them without block.
382 */ 382 */
383 pcq_t *txq_interq; 383 pcq_t *txq_interq;
384 384
385 /* 385 /*
386 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 386 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
387 * to manage Tx H/W queue's busy flag. 387 * to manage Tx H/W queue's busy flag.
388 */ 388 */
389 int txq_flags; /* flags for H/W queue, see below */ 389 int txq_flags; /* flags for H/W queue, see below */
390#define WM_TXQ_NO_SPACE 0x1 390#define WM_TXQ_NO_SPACE 0x1
391#define WM_TXQ_LINKDOWN_DISCARD 0x2 391#define WM_TXQ_LINKDOWN_DISCARD 0x2
392 392
393 bool txq_stopping; 393 bool txq_stopping;
394 394
395 bool txq_sending; 395 bool txq_sending;
396 time_t txq_lastsent; 396 time_t txq_lastsent;
397 397
398 /* Checksum flags used for previous packet */ 398 /* Checksum flags used for previous packet */
399 uint32_t txq_last_hw_cmd; 399 uint32_t txq_last_hw_cmd;
400 uint8_t txq_last_hw_fields; 400 uint8_t txq_last_hw_fields;
401 uint16_t txq_last_hw_ipcs; 401 uint16_t txq_last_hw_ipcs;
402 uint16_t txq_last_hw_tucs; 402 uint16_t txq_last_hw_tucs;
403 403
404 uint32_t txq_packets; /* for AIM */ 404 uint32_t txq_packets; /* for AIM */
405 uint32_t txq_bytes; /* for AIM */ 405 uint32_t txq_bytes; /* for AIM */
406#ifdef WM_EVENT_COUNTERS 406#ifdef WM_EVENT_COUNTERS
407 /* TX event counters */ 407 /* TX event counters */
408 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */ 408 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
409 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */ 409 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
410 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */ 410 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
411 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */ 411 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
412 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */ 412 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
413 /* XXX not used? */ 413 /* XXX not used? */
414 414
415 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */ 415 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
416 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */ 416 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
417 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */ 417 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
418 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */ 418 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
419 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */ 419 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
420 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */ 420 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
421 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */ 421 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
422 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */ 422 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
423 /* other than toomanyseg */ 423 /* other than toomanyseg */
424 424
425 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */ 425 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
426 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */ 426 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */
427 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */ 427 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */
428 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */ 428 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
429 429
430 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; 430 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
431 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 431 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
432#endif /* WM_EVENT_COUNTERS */ 432#endif /* WM_EVENT_COUNTERS */
433}; 433};
434 434
435struct wm_rxqueue { 435struct wm_rxqueue {
436 kmutex_t *rxq_lock; /* lock for rx operations */ 436 kmutex_t *rxq_lock; /* lock for rx operations */
437 437
438 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ 438 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
439 439
440 /* Software state for the receive descriptors. */ 440 /* Software state for the receive descriptors. */
441 struct wm_rxsoft rxq_soft[WM_NRXDESC]; 441 struct wm_rxsoft rxq_soft[WM_NRXDESC];
442 442
443 /* RX control data structures. */ 443 /* RX control data structures. */
444 int rxq_ndesc; /* must be a power of two */ 444 int rxq_ndesc; /* must be a power of two */
445 size_t rxq_descsize; /* a rx descriptor size */ 445 size_t rxq_descsize; /* a rx descriptor size */
446 rxdescs_t *rxq_descs_u; 446 rxdescs_t *rxq_descs_u;
447 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ 447 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
448 bus_dma_segment_t rxq_desc_seg; /* control data segment */ 448 bus_dma_segment_t rxq_desc_seg; /* control data segment */
449 int rxq_desc_rseg; /* real number of control segment */ 449 int rxq_desc_rseg; /* real number of control segment */
450#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr 450#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
451#define rxq_descs rxq_descs_u->sctxu_rxdescs 451#define rxq_descs rxq_descs_u->sctxu_rxdescs
452#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs 452#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
453#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs 453#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
454 454
455 bus_addr_t rxq_rdt_reg; /* offset of RDT register */ 455 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
456 456
457 int rxq_ptr; /* next ready Rx desc/queue ent */ 457 int rxq_ptr; /* next ready Rx desc/queue ent */
458 int rxq_discard; 458 int rxq_discard;
459 int rxq_len; 459 int rxq_len;
460 struct mbuf *rxq_head; 460 struct mbuf *rxq_head;
461 struct mbuf *rxq_tail; 461 struct mbuf *rxq_tail;
462 struct mbuf **rxq_tailp; 462 struct mbuf **rxq_tailp;
463 463
464 bool rxq_stopping; 464 bool rxq_stopping;
465 465
466 uint32_t rxq_packets; /* for AIM */ 466 uint32_t rxq_packets; /* for AIM */
467 uint32_t rxq_bytes; /* for AIM */ 467 uint32_t rxq_bytes; /* for AIM */
468#ifdef WM_EVENT_COUNTERS 468#ifdef WM_EVENT_COUNTERS
469 /* RX event counters */ 469 /* RX event counters */
470 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */ 470 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
471 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */ 471 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
472 
473 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */ 472 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
474 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */ 473 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
 474 WM_Q_EVCNT_DEFINE(rxq, qdrop); /* Rx queue drop packet */
475#endif 475#endif
476}; 476};
477 477
478struct wm_queue { 478struct wm_queue {
479 int wmq_id; /* index of TX/RX queues */ 479 int wmq_id; /* index of TX/RX queues */
480 int wmq_intr_idx; /* index of MSI-X tables */ 480 int wmq_intr_idx; /* index of MSI-X tables */
481 481
482 uint32_t wmq_itr; /* interrupt interval per queue. */ 482 uint32_t wmq_itr; /* interrupt interval per queue. */
483 bool wmq_set_itr; 483 bool wmq_set_itr;
484 484
485 struct wm_txqueue wmq_txq; 485 struct wm_txqueue wmq_txq;
486 struct wm_rxqueue wmq_rxq; 486 struct wm_rxqueue wmq_rxq;
487 char sysctlname[32]; /* Name for sysctl */ 487 char sysctlname[32]; /* Name for sysctl */
488 488
489 bool wmq_txrx_use_workqueue; 489 bool wmq_txrx_use_workqueue;
490 bool wmq_wq_enqueued; 490 bool wmq_wq_enqueued;
491 struct work wmq_cookie; 491 struct work wmq_cookie;
492 void *wmq_si; 492 void *wmq_si;
493 krndsource_t rnd_source; /* random source */ 493 krndsource_t rnd_source; /* random source */
494}; 494};
495 495
496struct wm_phyop { 496struct wm_phyop {
497 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result)); 497 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
498 void (*release)(struct wm_softc *); 498 void (*release)(struct wm_softc *);
499 int (*readreg_locked)(device_t, int, int, uint16_t *); 499 int (*readreg_locked)(device_t, int, int, uint16_t *);
500 int (*writereg_locked)(device_t, int, int, uint16_t); 500 int (*writereg_locked)(device_t, int, int, uint16_t);
501 int reset_delay_us; 501 int reset_delay_us;
502 bool no_errprint; 502 bool no_errprint;
503}; 503};
504 504
505struct wm_nvmop { 505struct wm_nvmop {
506 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result)); 506 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
507 void (*release)(struct wm_softc *); 507 void (*release)(struct wm_softc *);
508 int (*read)(struct wm_softc *, int, int, uint16_t *); 508 int (*read)(struct wm_softc *, int, int, uint16_t *);
509}; 509};
510 510
511/* 511/*
512 * Software state per device. 512 * Software state per device.
513 */ 513 */
514struct wm_softc { 514struct wm_softc {
515 device_t sc_dev; /* generic device information */ 515 device_t sc_dev; /* generic device information */
516 bus_space_tag_t sc_st; /* bus space tag */ 516 bus_space_tag_t sc_st; /* bus space tag */
517 bus_space_handle_t sc_sh; /* bus space handle */ 517 bus_space_handle_t sc_sh; /* bus space handle */
518 bus_size_t sc_ss; /* bus space size */ 518 bus_size_t sc_ss; /* bus space size */
519 bus_space_tag_t sc_iot; /* I/O space tag */ 519 bus_space_tag_t sc_iot; /* I/O space tag */
520 bus_space_handle_t sc_ioh; /* I/O space handle */ 520 bus_space_handle_t sc_ioh; /* I/O space handle */
521 bus_size_t sc_ios; /* I/O space size */ 521 bus_size_t sc_ios; /* I/O space size */
522 bus_space_tag_t sc_flasht; /* flash registers space tag */ 522 bus_space_tag_t sc_flasht; /* flash registers space tag */
523 bus_space_handle_t sc_flashh; /* flash registers space handle */ 523 bus_space_handle_t sc_flashh; /* flash registers space handle */
524 bus_size_t sc_flashs; /* flash registers space size */ 524 bus_size_t sc_flashs; /* flash registers space size */
525 off_t sc_flashreg_offset; /* 525 off_t sc_flashreg_offset; /*
526 * offset to flash registers from 526 * offset to flash registers from
527 * start of BAR 527 * start of BAR
528 */ 528 */
529 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 529 bus_dma_tag_t sc_dmat; /* bus DMA tag */
530 530
531 struct ethercom sc_ethercom; /* Ethernet common data */ 531 struct ethercom sc_ethercom; /* Ethernet common data */
532 struct mii_data sc_mii; /* MII/media information */ 532 struct mii_data sc_mii; /* MII/media information */
533 533
534 pci_chipset_tag_t sc_pc; 534 pci_chipset_tag_t sc_pc;
535 pcitag_t sc_pcitag; 535 pcitag_t sc_pcitag;
536 int sc_bus_speed; /* PCI/PCIX bus speed */ 536 int sc_bus_speed; /* PCI/PCIX bus speed */
537 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ 537 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
538 538
539 uint16_t sc_pcidevid; /* PCI device ID */ 539 uint16_t sc_pcidevid; /* PCI device ID */
540 wm_chip_type sc_type; /* MAC type */ 540 wm_chip_type sc_type; /* MAC type */
541 int sc_rev; /* MAC revision */ 541 int sc_rev; /* MAC revision */
542 wm_phy_type sc_phytype; /* PHY type */ 542 wm_phy_type sc_phytype; /* PHY type */
543 uint8_t sc_sfptype; /* SFP type */ 543 uint8_t sc_sfptype; /* SFP type */
544 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ 544 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
545#define WM_MEDIATYPE_UNKNOWN 0x00 545#define WM_MEDIATYPE_UNKNOWN 0x00
546#define WM_MEDIATYPE_FIBER 0x01 546#define WM_MEDIATYPE_FIBER 0x01
547#define WM_MEDIATYPE_COPPER 0x02 547#define WM_MEDIATYPE_COPPER 0x02
548#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ 548#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
549 int sc_funcid; /* unit number of the chip (0 to 3) */ 549 int sc_funcid; /* unit number of the chip (0 to 3) */
550 u_int sc_flags; /* flags; see below */ 550 u_int sc_flags; /* flags; see below */
551 int sc_if_flags; /* last if_flags */ 551 int sc_if_flags; /* last if_flags */
552 int sc_flowflags; /* 802.3x flow control flags */ 552 int sc_flowflags; /* 802.3x flow control flags */
553 int sc_align_tweak; 553 int sc_align_tweak;
554 554
555 void *sc_ihs[WM_MAX_NINTR]; /* 555 void *sc_ihs[WM_MAX_NINTR]; /*
556 * interrupt cookie. 556 * interrupt cookie.
557 * - legacy and msi use sc_ihs[0] only 557 * - legacy and msi use sc_ihs[0] only
558 * - msix use sc_ihs[0] to sc_ihs[nintrs-1] 558 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
559 */ 559 */
560 pci_intr_handle_t *sc_intrs; /* 560 pci_intr_handle_t *sc_intrs; /*
561 * legacy and msi use sc_intrs[0] only 561 * legacy and msi use sc_intrs[0] only
562 * msix use sc_intrs[0] to sc_ihs[nintrs-1] 562 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
563 */ 563 */
564 int sc_nintrs; /* number of interrupts */ 564 int sc_nintrs; /* number of interrupts */
565 565
566 int sc_link_intr_idx; /* index of MSI-X tables */ 566 int sc_link_intr_idx; /* index of MSI-X tables */
567 567
568 callout_t sc_tick_ch; /* tick callout */ 568 callout_t sc_tick_ch; /* tick callout */
569 bool sc_core_stopping; 569 bool sc_core_stopping;
570 570
571 int sc_nvm_ver_major; 571 int sc_nvm_ver_major;
572 int sc_nvm_ver_minor; 572 int sc_nvm_ver_minor;
573 int sc_nvm_ver_build; 573 int sc_nvm_ver_build;
574 int sc_nvm_addrbits; /* NVM address bits */ 574 int sc_nvm_addrbits; /* NVM address bits */
575 unsigned int sc_nvm_wordsize; /* NVM word size */ 575 unsigned int sc_nvm_wordsize; /* NVM word size */
576 int sc_ich8_flash_base; 576 int sc_ich8_flash_base;
577 int sc_ich8_flash_bank_size; 577 int sc_ich8_flash_bank_size;
578 int sc_nvm_k1_enabled; 578 int sc_nvm_k1_enabled;
579 579
580 int sc_nqueues; 580 int sc_nqueues;
581 struct wm_queue *sc_queue; 581 struct wm_queue *sc_queue;
582 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */ 582 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
583 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */ 583 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
584 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */ 584 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
585 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */ 585 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
586 struct workqueue *sc_queue_wq; 586 struct workqueue *sc_queue_wq;
587 bool sc_txrx_use_workqueue; 587 bool sc_txrx_use_workqueue;
588 588
589 int sc_affinity_offset; 589 int sc_affinity_offset;
590 590
591#ifdef WM_EVENT_COUNTERS 591#ifdef WM_EVENT_COUNTERS
592 /* Event counters. */ 592 /* Event counters. */
593 struct evcnt sc_ev_linkintr; /* Link interrupts */ 593 struct evcnt sc_ev_linkintr; /* Link interrupts */
594 594
595 /* >= WM_T_82542_2_1 */ 595 /* >= WM_T_82542_2_1 */
596 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 596 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
597 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 597 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
598 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 598 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
599 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 599 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
600 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 600 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
601 601
602 struct evcnt sc_ev_crcerrs; /* CRC Error */ 602 struct evcnt sc_ev_crcerrs; /* CRC Error */
603 struct evcnt sc_ev_algnerrc; /* Alignment Error */ 603 struct evcnt sc_ev_algnerrc; /* Alignment Error */
604 struct evcnt sc_ev_symerrc; /* Symbol Error */ 604 struct evcnt sc_ev_symerrc; /* Symbol Error */
605 struct evcnt sc_ev_rxerrc; /* Receive Error */ 605 struct evcnt sc_ev_rxerrc; /* Receive Error */
606 struct evcnt sc_ev_mpc; /* Missed Packets */ 606 struct evcnt sc_ev_mpc; /* Missed Packets */
607 struct evcnt sc_ev_scc; /* Single Collision */ 607 struct evcnt sc_ev_scc; /* Single Collision */
608 struct evcnt sc_ev_ecol; /* Excessive Collision */ 608 struct evcnt sc_ev_ecol; /* Excessive Collision */
609 struct evcnt sc_ev_mcc; /* Multiple Collision */ 609 struct evcnt sc_ev_mcc; /* Multiple Collision */
610 struct evcnt sc_ev_latecol; /* Late Collision */ 610 struct evcnt sc_ev_latecol; /* Late Collision */
611 struct evcnt sc_ev_colc; /* Collision */ 611 struct evcnt sc_ev_colc; /* Collision */
612 struct evcnt sc_ev_cbtmpc; /* Circuit Breaker Tx Mng. Packet */ 612 struct evcnt sc_ev_cbtmpc; /* Circuit Breaker Tx Mng. Packet */
613 struct evcnt sc_ev_dc; /* Defer */ 613 struct evcnt sc_ev_dc; /* Defer */
614 struct evcnt sc_ev_tncrs; /* Tx-No CRS */ 614 struct evcnt sc_ev_tncrs; /* Tx-No CRS */
615 struct evcnt sc_ev_sec; /* Sequence Error */ 615 struct evcnt sc_ev_sec; /* Sequence Error */
616 616
617 /* Old */ 617 /* Old */
618 struct evcnt sc_ev_cexterr; /* Carrier Extension Error */ 618 struct evcnt sc_ev_cexterr; /* Carrier Extension Error */
619 /* New */ 619 /* New */
620 struct evcnt sc_ev_htdpmc; /* Host Tx Discarded Pkts by MAC */ 620 struct evcnt sc_ev_htdpmc; /* Host Tx Discarded Pkts by MAC */
621 621
622 struct evcnt sc_ev_rlec; /* Receive Length Error */ 622 struct evcnt sc_ev_rlec; /* Receive Length Error */
623 struct evcnt sc_ev_cbrdpc; /* Circuit Breaker Rx Dropped Packet */ 623 struct evcnt sc_ev_cbrdpc; /* Circuit Breaker Rx Dropped Packet */
624 struct evcnt sc_ev_prc64; /* Packets Rx (64 bytes) */ 624 struct evcnt sc_ev_prc64; /* Packets Rx (64 bytes) */
625 struct evcnt sc_ev_prc127; /* Packets Rx (65-127 bytes) */ 625 struct evcnt sc_ev_prc127; /* Packets Rx (65-127 bytes) */
626 struct evcnt sc_ev_prc255; /* Packets Rx (128-255 bytes) */ 626 struct evcnt sc_ev_prc255; /* Packets Rx (128-255 bytes) */
627 struct evcnt sc_ev_prc511; /* Packets Rx (256-511 bytes) */ 627 struct evcnt sc_ev_prc511; /* Packets Rx (256-511 bytes) */
628 struct evcnt sc_ev_prc1023; /* Packets Rx (512-1023 bytes) */ 628 struct evcnt sc_ev_prc1023; /* Packets Rx (512-1023 bytes) */
629 struct evcnt sc_ev_prc1522; /* Packets Rx (1024-1522 bytes) */ 629 struct evcnt sc_ev_prc1522; /* Packets Rx (1024-1522 bytes) */
630 struct evcnt sc_ev_gprc; /* Good Packets Rx */ 630 struct evcnt sc_ev_gprc; /* Good Packets Rx */
631 struct evcnt sc_ev_bprc; /* Broadcast Packets Rx */ 631 struct evcnt sc_ev_bprc; /* Broadcast Packets Rx */
632 struct evcnt sc_ev_mprc; /* Multicast Packets Rx */ 632 struct evcnt sc_ev_mprc; /* Multicast Packets Rx */
633 struct evcnt sc_ev_gptc; /* Good Packets Tx */ 633 struct evcnt sc_ev_gptc; /* Good Packets Tx */
634 struct evcnt sc_ev_gorc; /* Good Octets Rx */ 634 struct evcnt sc_ev_gorc; /* Good Octets Rx */
635 struct evcnt sc_ev_gotc; /* Good Octets Tx */ 635 struct evcnt sc_ev_gotc; /* Good Octets Tx */
636 struct evcnt sc_ev_rnbc; /* Rx No Buffers */ 636 struct evcnt sc_ev_rnbc; /* Rx No Buffers */
637 struct evcnt sc_ev_ruc; /* Rx Undersize */ 637 struct evcnt sc_ev_ruc; /* Rx Undersize */
638 struct evcnt sc_ev_rfc; /* Rx Fragment */ 638 struct evcnt sc_ev_rfc; /* Rx Fragment */
639 struct evcnt sc_ev_roc; /* Rx Oversize */ 639 struct evcnt sc_ev_roc; /* Rx Oversize */
640 struct evcnt sc_ev_rjc; /* Rx Jabber */ 640 struct evcnt sc_ev_rjc; /* Rx Jabber */
641 struct evcnt sc_ev_mgtprc; /* Management Packets RX */ 641 struct evcnt sc_ev_mgtprc; /* Management Packets RX */
642 struct evcnt sc_ev_mgtpdc; /* Management Packets Dropped */ 642 struct evcnt sc_ev_mgtpdc; /* Management Packets Dropped */
643 struct evcnt sc_ev_mgtptc; /* Management Packets TX */ 643 struct evcnt sc_ev_mgtptc; /* Management Packets TX */
644 struct evcnt sc_ev_tor; /* Total Octets Rx */ 644 struct evcnt sc_ev_tor; /* Total Octets Rx */
645 struct evcnt sc_ev_tot; /* Total Octets Tx */ 645 struct evcnt sc_ev_tot; /* Total Octets Tx */
646 struct evcnt sc_ev_tpr; /* Total Packets Rx */ 646 struct evcnt sc_ev_tpr; /* Total Packets Rx */
647 struct evcnt sc_ev_tpt; /* Total Packets Tx */ 647 struct evcnt sc_ev_tpt; /* Total Packets Tx */
648 struct evcnt sc_ev_ptc64; /* Packets Tx (64 bytes) */ 648 struct evcnt sc_ev_ptc64; /* Packets Tx (64 bytes) */
649 struct evcnt sc_ev_ptc127; /* Packets Tx (65-127 bytes) */ 649 struct evcnt sc_ev_ptc127; /* Packets Tx (65-127 bytes) */
650 struct evcnt sc_ev_ptc255; /* Packets Tx (128-255 bytes) */ 650 struct evcnt sc_ev_ptc255; /* Packets Tx (128-255 bytes) */
651 struct evcnt sc_ev_ptc511; /* Packets Tx (256-511 bytes) */ 651 struct evcnt sc_ev_ptc511; /* Packets Tx (256-511 bytes) */
652 struct evcnt sc_ev_ptc1023; /* Packets Tx (512-1023 bytes) */ 652 struct evcnt sc_ev_ptc1023; /* Packets Tx (512-1023 bytes) */
653 struct evcnt sc_ev_ptc1522; /* Packets Tx (1024-1522 Bytes) */ 653 struct evcnt sc_ev_ptc1522; /* Packets Tx (1024-1522 Bytes) */
654 struct evcnt sc_ev_mptc; /* Multicast Packets Tx */ 654 struct evcnt sc_ev_mptc; /* Multicast Packets Tx */
655 struct evcnt sc_ev_bptc; /* Broadcast Packets Tx */ 655 struct evcnt sc_ev_bptc; /* Broadcast Packets Tx */
656 struct evcnt sc_ev_tsctc; /* TCP Segmentation Context Tx */ 656 struct evcnt sc_ev_tsctc; /* TCP Segmentation Context Tx */
657 657
658 /* Old */ 658 /* Old */
659 struct evcnt sc_ev_tsctfc; /* TCP Segmentation Context Tx Fail */ 659 struct evcnt sc_ev_tsctfc; /* TCP Segmentation Context Tx Fail */
660 /* New */ 660 /* New */
661 struct evcnt sc_ev_cbrmpc; /* Circuit Breaker Rx Mng. Packet */ 661 struct evcnt sc_ev_cbrmpc; /* Circuit Breaker Rx Mng. Packet */
662 662
663 struct evcnt sc_ev_iac; /* Interrupt Assertion */ 663 struct evcnt sc_ev_iac; /* Interrupt Assertion */
664 664
665 /* Old */ 665 /* Old */
666 struct evcnt sc_ev_icrxptc; /* Intr. Cause Rx Pkt Timer Expire */ 666 struct evcnt sc_ev_icrxptc; /* Intr. Cause Rx Pkt Timer Expire */
667 struct evcnt sc_ev_icrxatc; /* Intr. Cause Rx Abs Timer Expire */ 667 struct evcnt sc_ev_icrxatc; /* Intr. Cause Rx Abs Timer Expire */
668 struct evcnt sc_ev_ictxptc; /* Intr. Cause Tx Pkt Timer Expire */ 668 struct evcnt sc_ev_ictxptc; /* Intr. Cause Tx Pkt Timer Expire */
669 struct evcnt sc_ev_ictxatc; /* Intr. Cause Tx Abs Timer Expire */ 669 struct evcnt sc_ev_ictxatc; /* Intr. Cause Tx Abs Timer Expire */
670 struct evcnt sc_ev_ictxqec; /* Intr. Cause Tx Queue Empty */ 670 struct evcnt sc_ev_ictxqec; /* Intr. Cause Tx Queue Empty */
671 struct evcnt sc_ev_ictxqmtc; /* Intr. Cause Tx Queue Min Thresh */ 671 struct evcnt sc_ev_ictxqmtc; /* Intr. Cause Tx Queue Min Thresh */
672 /* 672 /*
673 * sc_ev_rxdmtc is shared with both "Intr. cause" and 673 * sc_ev_rxdmtc is shared with both "Intr. cause" and
674 * non "Intr. cause" register. 674 * non "Intr. cause" register.
675 */ 675 */
676 struct evcnt sc_ev_rxdmtc; /* (Intr. Cause) Rx Desc Min Thresh */ 676 struct evcnt sc_ev_rxdmtc; /* (Intr. Cause) Rx Desc Min Thresh */
677 struct evcnt sc_ev_icrxoc; /* Intr. Cause Receiver Overrun */ 677 struct evcnt sc_ev_icrxoc; /* Intr. Cause Receiver Overrun */
678 /* New */ 678 /* New */
679 struct evcnt sc_ev_rpthc; /* Rx Packets To Host */ 679 struct evcnt sc_ev_rpthc; /* Rx Packets To Host */
680 struct evcnt sc_ev_debug1; /* Debug Counter 1 */ 680 struct evcnt sc_ev_debug1; /* Debug Counter 1 */
681 struct evcnt sc_ev_debug2; /* Debug Counter 2 */ 681 struct evcnt sc_ev_debug2; /* Debug Counter 2 */
682 struct evcnt sc_ev_debug3; /* Debug Counter 3 */ 682 struct evcnt sc_ev_debug3; /* Debug Counter 3 */
683 struct evcnt sc_ev_hgptc; /* Host Good Packets TX */ 683 struct evcnt sc_ev_hgptc; /* Host Good Packets TX */
684 struct evcnt sc_ev_debug4; /* Debug Counter 4 */ 684 struct evcnt sc_ev_debug4; /* Debug Counter 4 */
685 struct evcnt sc_ev_htcbdpc; /* Host Tx Circuit Breaker Drp. Pkts */ 685 struct evcnt sc_ev_htcbdpc; /* Host Tx Circuit Breaker Drp. Pkts */
686 struct evcnt sc_ev_hgorc; /* Host Good Octets Rx */ 686 struct evcnt sc_ev_hgorc; /* Host Good Octets Rx */
687 struct evcnt sc_ev_hgotc; /* Host Good Octets Tx */ 687 struct evcnt sc_ev_hgotc; /* Host Good Octets Tx */
688 struct evcnt sc_ev_lenerrs; /* Length Error */ 688 struct evcnt sc_ev_lenerrs; /* Length Error */
689 struct evcnt sc_ev_tlpic; /* EEE Tx LPI */ 689 struct evcnt sc_ev_tlpic; /* EEE Tx LPI */
690 struct evcnt sc_ev_rlpic; /* EEE Rx LPI */ 690 struct evcnt sc_ev_rlpic; /* EEE Rx LPI */
691 struct evcnt sc_ev_b2ogprc; /* BMC2OS pkts received by host */ 691 struct evcnt sc_ev_b2ogprc; /* BMC2OS pkts received by host */
692 struct evcnt sc_ev_o2bspc; /* OS2BMC pkts transmitted by host */ 692 struct evcnt sc_ev_o2bspc; /* OS2BMC pkts transmitted by host */
693 struct evcnt sc_ev_b2ospc; /* BMC2OS pkts sent by BMC */ 693 struct evcnt sc_ev_b2ospc; /* BMC2OS pkts sent by BMC */
694 struct evcnt sc_ev_o2bgptc; /* OS2BMC pkts received by BMC */ 694 struct evcnt sc_ev_o2bgptc; /* OS2BMC pkts received by BMC */
695 struct evcnt sc_ev_scvpc; /* SerDes/SGMII Code Violation Pkt. */ 695 struct evcnt sc_ev_scvpc; /* SerDes/SGMII Code Violation Pkt. */
696 struct evcnt sc_ev_hrmpc; /* Header Redirection Missed Packet */ 696 struct evcnt sc_ev_hrmpc; /* Header Redirection Missed Packet */
697#endif /* WM_EVENT_COUNTERS */ 697#endif /* WM_EVENT_COUNTERS */
698 698
699 struct sysctllog *sc_sysctllog; 699 struct sysctllog *sc_sysctllog;
700 700
701 /* This variable are used only on the 82547. */ 701 /* This variable are used only on the 82547. */
702 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 702 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
703 703
704 uint32_t sc_ctrl; /* prototype CTRL register */ 704 uint32_t sc_ctrl; /* prototype CTRL register */
705#if 0 705#if 0
706 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 706 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
707#endif 707#endif
708 uint32_t sc_icr; /* prototype interrupt bits */ 708 uint32_t sc_icr; /* prototype interrupt bits */
709 uint32_t sc_itr_init; /* prototype intr throttling reg */ 709 uint32_t sc_itr_init; /* prototype intr throttling reg */
710 uint32_t sc_tctl; /* prototype TCTL register */ 710 uint32_t sc_tctl; /* prototype TCTL register */
711 uint32_t sc_rctl; /* prototype RCTL register */ 711 uint32_t sc_rctl; /* prototype RCTL register */
712 uint32_t sc_txcw; /* prototype TXCW register */ 712 uint32_t sc_txcw; /* prototype TXCW register */
713 uint32_t sc_tipg; /* prototype TIPG register */ 713 uint32_t sc_tipg; /* prototype TIPG register */
714 uint32_t sc_fcrtl; /* prototype FCRTL register */ 714 uint32_t sc_fcrtl; /* prototype FCRTL register */
715 uint32_t sc_pba; /* prototype PBA register */ 715 uint32_t sc_pba; /* prototype PBA register */
716 716
717 int sc_tbi_linkup; /* TBI link status */ 717 int sc_tbi_linkup; /* TBI link status */
718 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ 718 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
719 int sc_tbi_serdes_ticks; /* tbi ticks */ 719 int sc_tbi_serdes_ticks; /* tbi ticks */
720 struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */ 720 struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
721 721
722 int sc_mchash_type; /* multicast filter offset */ 722 int sc_mchash_type; /* multicast filter offset */
723 723
724 struct if_percpuq *sc_ipq; /* softint-based input queues */ 724 struct if_percpuq *sc_ipq; /* softint-based input queues */
725 725
726 kmutex_t *sc_core_lock; /* lock for softc operations */ 726 kmutex_t *sc_core_lock; /* lock for softc operations */
727 kmutex_t *sc_ich_phymtx; /* 727 kmutex_t *sc_ich_phymtx; /*
728 * 82574/82583/ICH/PCH specific PHY 728 * 82574/82583/ICH/PCH specific PHY
729 * mutex. For 82574/82583, the mutex 729 * mutex. For 82574/82583, the mutex
730 * is used for both PHY and NVM. 730 * is used for both PHY and NVM.
731 */ 731 */
732 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ 732 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
733 733
734 struct wm_phyop phy; 734 struct wm_phyop phy;
735 struct wm_nvmop nvm; 735 struct wm_nvmop nvm;
736#ifdef WM_DEBUG 736#ifdef WM_DEBUG
737 uint32_t sc_debug; 737 uint32_t sc_debug;
738#endif 738#endif
739}; 739};
740 740
741#define WM_CORE_LOCK(_sc) \ 741#define WM_CORE_LOCK(_sc) \
742 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) 742 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
743#define WM_CORE_UNLOCK(_sc) \ 743#define WM_CORE_UNLOCK(_sc) \
744 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) 744 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
745#define WM_CORE_LOCKED(_sc) \ 745#define WM_CORE_LOCKED(_sc) \
746 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) 746 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
747 747
748#define WM_RXCHAIN_RESET(rxq) \ 748#define WM_RXCHAIN_RESET(rxq) \
749do { \ 749do { \
750 (rxq)->rxq_tailp = &(rxq)->rxq_head; \ 750 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
751 *(rxq)->rxq_tailp = NULL; \ 751 *(rxq)->rxq_tailp = NULL; \
752 (rxq)->rxq_len = 0; \ 752 (rxq)->rxq_len = 0; \
753} while (/*CONSTCOND*/0) 753} while (/*CONSTCOND*/0)
754 754
755#define WM_RXCHAIN_LINK(rxq, m) \ 755#define WM_RXCHAIN_LINK(rxq, m) \
756do { \ 756do { \
757 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ 757 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
758 (rxq)->rxq_tailp = &(m)->m_next; \ 758 (rxq)->rxq_tailp = &(m)->m_next; \
759} while (/*CONSTCOND*/0) 759} while (/*CONSTCOND*/0)
760 760
761#ifdef WM_EVENT_COUNTERS 761#ifdef WM_EVENT_COUNTERS
762#define WM_EVCNT_INCR(ev) (ev)->ev_count++ 762#define WM_EVCNT_INCR(ev) (ev)->ev_count++
763#define WM_EVCNT_STORE(ev, val) (ev)->ev_count = (val) 763#define WM_EVCNT_STORE(ev, val) (ev)->ev_count = (val)
764#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 764#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
765 765
766#define WM_Q_EVCNT_INCR(qname, evname) \ 766#define WM_Q_EVCNT_INCR(qname, evname) \
767 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) 767 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
768#define WM_Q_EVCNT_STORE(qname, evname, val) \ 768#define WM_Q_EVCNT_STORE(qname, evname, val) \
769 WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val)) 769 WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
770#define WM_Q_EVCNT_ADD(qname, evname, val) \ 770#define WM_Q_EVCNT_ADD(qname, evname, val) \
771 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) 771 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
772#else /* !WM_EVENT_COUNTERS */ 772#else /* !WM_EVENT_COUNTERS */
773#define WM_EVCNT_INCR(ev) __nothing 773#define WM_EVCNT_INCR(ev) __nothing
774#define WM_EVCNT_STORE(ev, val) __nothing 774#define WM_EVCNT_STORE(ev, val) __nothing
775#define WM_EVCNT_ADD(ev, val) __nothing 775#define WM_EVCNT_ADD(ev, val) __nothing
776 776
777#define WM_Q_EVCNT_INCR(qname, evname) __nothing 777#define WM_Q_EVCNT_INCR(qname, evname) __nothing
778#define WM_Q_EVCNT_STORE(qname, evname, val) __nothing 778#define WM_Q_EVCNT_STORE(qname, evname, val) __nothing
779#define WM_Q_EVCNT_ADD(qname, evname, val) __nothing 779#define WM_Q_EVCNT_ADD(qname, evname, val) __nothing
780#endif /* !WM_EVENT_COUNTERS */ 780#endif /* !WM_EVENT_COUNTERS */
781 781
782#define CSR_READ(sc, reg) \ 782#define CSR_READ(sc, reg) \
783 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 783 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
784#define CSR_WRITE(sc, reg, val) \ 784#define CSR_WRITE(sc, reg, val) \
785 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 785 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
786#define CSR_WRITE_FLUSH(sc) \ 786#define CSR_WRITE_FLUSH(sc) \
787 (void)CSR_READ((sc), WMREG_STATUS) 787 (void)CSR_READ((sc), WMREG_STATUS)
788 788
789#define ICH8_FLASH_READ32(sc, reg) \ 789#define ICH8_FLASH_READ32(sc, reg) \
790 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ 790 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
791 (reg) + sc->sc_flashreg_offset) 791 (reg) + sc->sc_flashreg_offset)
792#define ICH8_FLASH_WRITE32(sc, reg, data) \ 792#define ICH8_FLASH_WRITE32(sc, reg, data) \
793 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ 793 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
794 (reg) + sc->sc_flashreg_offset, (data)) 794 (reg) + sc->sc_flashreg_offset, (data))
795 795
796#define ICH8_FLASH_READ16(sc, reg) \ 796#define ICH8_FLASH_READ16(sc, reg) \
797 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ 797 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
798 (reg) + sc->sc_flashreg_offset) 798 (reg) + sc->sc_flashreg_offset)
799#define ICH8_FLASH_WRITE16(sc, reg, data) \ 799#define ICH8_FLASH_WRITE16(sc, reg, data) \
800 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ 800 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
801 (reg) + sc->sc_flashreg_offset, (data)) 801 (reg) + sc->sc_flashreg_offset, (data))
802 802
803#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) 803#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
804#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x))) 804#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
805 805
806#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) 806#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
807#define WM_CDTXADDR_HI(txq, x) \ 807#define WM_CDTXADDR_HI(txq, x) \
808 (sizeof(bus_addr_t) == 8 ? \ 808 (sizeof(bus_addr_t) == 8 ? \
809 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) 809 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
810 810
811#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) 811#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
812#define WM_CDRXADDR_HI(rxq, x) \ 812#define WM_CDRXADDR_HI(rxq, x) \
813 (sizeof(bus_addr_t) == 8 ? \ 813 (sizeof(bus_addr_t) == 8 ? \
814 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) 814 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
815 815
816/* 816/*
817 * Register read/write functions. 817 * Register read/write functions.
818 * Other than CSR_{READ|WRITE}(). 818 * Other than CSR_{READ|WRITE}().
819 */ 819 */
820#if 0 820#if 0
821static inline uint32_t wm_io_read(struct wm_softc *, int); 821static inline uint32_t wm_io_read(struct wm_softc *, int);
822#endif 822#endif
823static inline void wm_io_write(struct wm_softc *, int, uint32_t); 823static inline void wm_io_write(struct wm_softc *, int, uint32_t);
824static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 824static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
825 uint32_t, uint32_t); 825 uint32_t, uint32_t);
826static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 826static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
827 827
828/* 828/*
829 * Descriptor sync/init functions. 829 * Descriptor sync/init functions.
830 */ 830 */
831static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); 831static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
832static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); 832static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
833static inline void wm_init_rxdesc(struct wm_rxqueue *, int); 833static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
834 834
835/* 835/*
836 * Device driver interface functions and commonly used functions. 836 * Device driver interface functions and commonly used functions.
837 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 837 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
838 */ 838 */
839static const struct wm_product *wm_lookup(const struct pci_attach_args *); 839static const struct wm_product *wm_lookup(const struct pci_attach_args *);
840static int wm_match(device_t, cfdata_t, void *); 840static int wm_match(device_t, cfdata_t, void *);
841static void wm_attach(device_t, device_t, void *); 841static void wm_attach(device_t, device_t, void *);
842static int wm_detach(device_t, int); 842static int wm_detach(device_t, int);
843static bool wm_suspend(device_t, const pmf_qual_t *); 843static bool wm_suspend(device_t, const pmf_qual_t *);
844static bool wm_resume(device_t, const pmf_qual_t *); 844static bool wm_resume(device_t, const pmf_qual_t *);
845static void wm_watchdog(struct ifnet *); 845static void wm_watchdog(struct ifnet *);
846static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, 846static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
847 uint16_t *); 847 uint16_t *);
848static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, 848static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
849 uint16_t *); 849 uint16_t *);
850static void wm_tick(void *); 850static void wm_tick(void *);
851static int wm_ifflags_cb(struct ethercom *); 851static int wm_ifflags_cb(struct ethercom *);
852static int wm_ioctl(struct ifnet *, u_long, void *); 852static int wm_ioctl(struct ifnet *, u_long, void *);
853/* MAC address related */ 853/* MAC address related */
854static uint16_t wm_check_alt_mac_addr(struct wm_softc *); 854static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
855static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 855static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
856static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 856static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
857static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 857static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
858static int wm_rar_count(struct wm_softc *); 858static int wm_rar_count(struct wm_softc *);
859static void wm_set_filter(struct wm_softc *); 859static void wm_set_filter(struct wm_softc *);
860/* Reset and init related */ 860/* Reset and init related */
861static void wm_set_vlan(struct wm_softc *); 861static void wm_set_vlan(struct wm_softc *);
862static void wm_set_pcie_completion_timeout(struct wm_softc *); 862static void wm_set_pcie_completion_timeout(struct wm_softc *);
863static void wm_get_auto_rd_done(struct wm_softc *); 863static void wm_get_auto_rd_done(struct wm_softc *);
864static void wm_lan_init_done(struct wm_softc *); 864static void wm_lan_init_done(struct wm_softc *);
865static void wm_get_cfg_done(struct wm_softc *); 865static void wm_get_cfg_done(struct wm_softc *);
866static void wm_phy_post_reset(struct wm_softc *); 866static void wm_phy_post_reset(struct wm_softc *);
867static int wm_write_smbus_addr(struct wm_softc *); 867static int wm_write_smbus_addr(struct wm_softc *);
868static void wm_init_lcd_from_nvm(struct wm_softc *); 868static void wm_init_lcd_from_nvm(struct wm_softc *);
869static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool); 869static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
870static void wm_initialize_hardware_bits(struct wm_softc *); 870static void wm_initialize_hardware_bits(struct wm_softc *);
871static uint32_t wm_rxpbs_adjust_82580(uint32_t); 871static uint32_t wm_rxpbs_adjust_82580(uint32_t);
872static int wm_reset_phy(struct wm_softc *); 872static int wm_reset_phy(struct wm_softc *);
873static void wm_flush_desc_rings(struct wm_softc *); 873static void wm_flush_desc_rings(struct wm_softc *);
874static void wm_reset(struct wm_softc *); 874static void wm_reset(struct wm_softc *);
875static int wm_add_rxbuf(struct wm_rxqueue *, int); 875static int wm_add_rxbuf(struct wm_rxqueue *, int);
876static void wm_rxdrain(struct wm_rxqueue *); 876static void wm_rxdrain(struct wm_rxqueue *);
877static void wm_init_rss(struct wm_softc *); 877static void wm_init_rss(struct wm_softc *);
878static void wm_adjust_qnum(struct wm_softc *, int); 878static void wm_adjust_qnum(struct wm_softc *, int);
879static inline bool wm_is_using_msix(struct wm_softc *); 879static inline bool wm_is_using_msix(struct wm_softc *);
880static inline bool wm_is_using_multiqueue(struct wm_softc *); 880static inline bool wm_is_using_multiqueue(struct wm_softc *);
881static int wm_softint_establish_queue(struct wm_softc *, int, int); 881static int wm_softint_establish_queue(struct wm_softc *, int, int);
882static int wm_setup_legacy(struct wm_softc *); 882static int wm_setup_legacy(struct wm_softc *);
883static int wm_setup_msix(struct wm_softc *); 883static int wm_setup_msix(struct wm_softc *);
884static int wm_init(struct ifnet *); 884static int wm_init(struct ifnet *);
885static int wm_init_locked(struct ifnet *); 885static int wm_init_locked(struct ifnet *);
886static void wm_init_sysctls(struct wm_softc *); 886static void wm_init_sysctls(struct wm_softc *);
887static void wm_update_stats(struct wm_softc *); 887static void wm_update_stats(struct wm_softc *);
888static void wm_clear_evcnt(struct wm_softc *); 888static void wm_clear_evcnt(struct wm_softc *);
889static void wm_unset_stopping_flags(struct wm_softc *); 889static void wm_unset_stopping_flags(struct wm_softc *);
890static void wm_set_stopping_flags(struct wm_softc *); 890static void wm_set_stopping_flags(struct wm_softc *);
891static void wm_stop(struct ifnet *, int); 891static void wm_stop(struct ifnet *, int);
892static void wm_stop_locked(struct ifnet *, int); 892static void wm_stop_locked(struct ifnet *, int);
893static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 893static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
894static void wm_82547_txfifo_stall(void *); 894static void wm_82547_txfifo_stall(void *);
895static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 895static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
896static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *); 896static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
897/* DMA related */ 897/* DMA related */
898static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 898static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
899static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 899static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
900static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 900static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
901static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, 901static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
902 struct wm_txqueue *); 902 struct wm_txqueue *);
903static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 903static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
904static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 904static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
905static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, 905static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
906 struct wm_rxqueue *); 906 struct wm_rxqueue *);
907static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 907static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
908static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 908static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
909static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 909static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
910static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 910static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
911static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 911static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
912static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 912static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
913static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, 913static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
914 struct wm_txqueue *); 914 struct wm_txqueue *);
915static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, 915static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
916 struct wm_rxqueue *); 916 struct wm_rxqueue *);
917static int wm_alloc_txrx_queues(struct wm_softc *); 917static int wm_alloc_txrx_queues(struct wm_softc *);
918static void wm_free_txrx_queues(struct wm_softc *); 918static void wm_free_txrx_queues(struct wm_softc *);
919static int wm_init_txrx_queues(struct wm_softc *); 919static int wm_init_txrx_queues(struct wm_softc *);
920/* Start */ 920/* Start */
921static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *, 921static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
922 struct wm_txsoft *, uint32_t *, uint8_t *); 922 struct wm_txsoft *, uint32_t *, uint8_t *);
923static inline int wm_select_txqueue(struct ifnet *, struct mbuf *); 923static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
924static void wm_start(struct ifnet *); 924static void wm_start(struct ifnet *);
925static void wm_start_locked(struct ifnet *); 925static void wm_start_locked(struct ifnet *);
926static int wm_transmit(struct ifnet *, struct mbuf *); 926static int wm_transmit(struct ifnet *, struct mbuf *);
927static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); 927static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
928static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, 928static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
929 bool); 929 bool);
930static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, 930static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
931 struct wm_txsoft *, uint32_t *, uint32_t *, bool *); 931 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
932static void wm_nq_start(struct ifnet *); 932static void wm_nq_start(struct ifnet *);
933static void wm_nq_start_locked(struct ifnet *); 933static void wm_nq_start_locked(struct ifnet *);
934static int wm_nq_transmit(struct ifnet *, struct mbuf *); 934static int wm_nq_transmit(struct ifnet *, struct mbuf *);
935static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); 935static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
936static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, 936static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
937 bool); 937 bool);
938static void wm_deferred_start_locked(struct wm_txqueue *); 938static void wm_deferred_start_locked(struct wm_txqueue *);
939static void wm_handle_queue(void *); 939static void wm_handle_queue(void *);
940static void wm_handle_queue_work(struct work *, void *); 940static void wm_handle_queue_work(struct work *, void *);
941/* Interrupt */ 941/* Interrupt */
942static bool wm_txeof(struct wm_txqueue *, u_int); 942static bool wm_txeof(struct wm_txqueue *, u_int);
943static bool wm_rxeof(struct wm_rxqueue *, u_int); 943static bool wm_rxeof(struct wm_rxqueue *, u_int);
944static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 944static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
945static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 945static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
946static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 946static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
947static void wm_linkintr(struct wm_softc *, uint32_t); 947static void wm_linkintr(struct wm_softc *, uint32_t);
948static int wm_intr_legacy(void *); 948static int wm_intr_legacy(void *);
949static inline void wm_txrxintr_disable(struct wm_queue *); 949static inline void wm_txrxintr_disable(struct wm_queue *);
950static inline void wm_txrxintr_enable(struct wm_queue *); 950static inline void wm_txrxintr_enable(struct wm_queue *);
951static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *); 951static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
952static int wm_txrxintr_msix(void *); 952static int wm_txrxintr_msix(void *);
953static int wm_linkintr_msix(void *); 953static int wm_linkintr_msix(void *);
954 954
955/* 955/*
956 * Media related. 956 * Media related.
957 * GMII, SGMII, TBI, SERDES and SFP. 957 * GMII, SGMII, TBI, SERDES and SFP.
958 */ 958 */
959/* Common */ 959/* Common */
960static void wm_tbi_serdes_set_linkled(struct wm_softc *); 960static void wm_tbi_serdes_set_linkled(struct wm_softc *);
961/* GMII related */ 961/* GMII related */
962static void wm_gmii_reset(struct wm_softc *); 962static void wm_gmii_reset(struct wm_softc *);
963static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t); 963static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
964static int wm_get_phy_id_82575(struct wm_softc *); 964static int wm_get_phy_id_82575(struct wm_softc *);
965static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 965static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
966static int wm_gmii_mediachange(struct ifnet *); 966static int wm_gmii_mediachange(struct ifnet *);
967static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 967static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
968static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); 968static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
969static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); 969static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
970static int wm_gmii_i82543_readreg(device_t, int, int); 970static int wm_gmii_i82543_readreg(device_t, int, int);
971static void wm_gmii_i82543_writereg(device_t, int, int, int); 971static void wm_gmii_i82543_writereg(device_t, int, int, int);
972static int wm_gmii_mdic_readreg(device_t, int, int); 972static int wm_gmii_mdic_readreg(device_t, int, int);
973static void wm_gmii_mdic_writereg(device_t, int, int, int); 973static void wm_gmii_mdic_writereg(device_t, int, int, int);
974static int wm_gmii_i82544_readreg(device_t, int, int); 974static int wm_gmii_i82544_readreg(device_t, int, int);
975static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *); 975static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
976static void wm_gmii_i82544_writereg(device_t, int, int, int); 976static void wm_gmii_i82544_writereg(device_t, int, int, int);
977static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t); 977static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
978static int wm_gmii_i80003_readreg(device_t, int, int); 978static int wm_gmii_i80003_readreg(device_t, int, int);
979static void wm_gmii_i80003_writereg(device_t, int, int, int); 979static void wm_gmii_i80003_writereg(device_t, int, int, int);
980static int wm_gmii_bm_readreg(device_t, int, int); 980static int wm_gmii_bm_readreg(device_t, int, int);
981static void wm_gmii_bm_writereg(device_t, int, int, int); 981static void wm_gmii_bm_writereg(device_t, int, int, int);
982static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 982static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
983static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 983static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
984static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int, 984static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
985 bool); 985 bool);
986static int wm_gmii_hv_readreg(device_t, int, int); 986static int wm_gmii_hv_readreg(device_t, int, int);
987static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *); 987static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
988static void wm_gmii_hv_writereg(device_t, int, int, int); 988static void wm_gmii_hv_writereg(device_t, int, int, int);
989static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t); 989static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
990static int wm_gmii_82580_readreg(device_t, int, int); 990static int wm_gmii_82580_readreg(device_t, int, int);
991static void wm_gmii_82580_writereg(device_t, int, int, int); 991static void wm_gmii_82580_writereg(device_t, int, int, int);
992static int wm_gmii_gs40g_readreg(device_t, int, int); 992static int wm_gmii_gs40g_readreg(device_t, int, int);
993static void wm_gmii_gs40g_writereg(device_t, int, int, int); 993static void wm_gmii_gs40g_writereg(device_t, int, int, int);
994static void wm_gmii_statchg(struct ifnet *); 994static void wm_gmii_statchg(struct ifnet *);
995/* 995/*
996 * kumeran related (80003, ICH* and PCH*). 996 * kumeran related (80003, ICH* and PCH*).
997 * These functions are not for accessing MII registers but for accessing 997 * These functions are not for accessing MII registers but for accessing
998 * kumeran specific registers. 998 * kumeran specific registers.
999 */ 999 */
1000static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *); 1000static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
1001static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *); 1001static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
1002static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t); 1002static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
1003static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t); 1003static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
1004/* SGMII */ 1004/* SGMII */
1005static bool wm_sgmii_uses_mdio(struct wm_softc *); 1005static bool wm_sgmii_uses_mdio(struct wm_softc *);
1006static void wm_sgmii_sfp_preconfig(struct wm_softc *); 1006static void wm_sgmii_sfp_preconfig(struct wm_softc *);
1007static int wm_sgmii_readreg(device_t, int, int); 1007static int wm_sgmii_readreg(device_t, int, int);
1008static void wm_sgmii_writereg(device_t, int, int, int); 1008static void wm_sgmii_writereg(device_t, int, int, int);
1009/* TBI related */ 1009/* TBI related */
1010static bool wm_tbi_havesignal(struct wm_softc *, uint32_t); 1010static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
1011static void wm_tbi_mediainit(struct wm_softc *); 1011static void wm_tbi_mediainit(struct wm_softc *);
1012static int wm_tbi_mediachange(struct ifnet *); 1012static int wm_tbi_mediachange(struct ifnet *);
1013static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 1013static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
1014static int wm_check_for_link(struct wm_softc *); 1014static int wm_check_for_link(struct wm_softc *);
1015static void wm_tbi_tick(struct wm_softc *); 1015static void wm_tbi_tick(struct wm_softc *);
1016/* SERDES related */ 1016/* SERDES related */
1017static void wm_serdes_power_up_link_82575(struct wm_softc *); 1017static void wm_serdes_power_up_link_82575(struct wm_softc *);
1018static int wm_serdes_mediachange(struct ifnet *); 1018static int wm_serdes_mediachange(struct ifnet *);
1019static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); 1019static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
1020static void wm_serdes_tick(struct wm_softc *); 1020static void wm_serdes_tick(struct wm_softc *);
1021/* SFP related */ 1021/* SFP related */
1022static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); 1022static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
1023static uint32_t wm_sfp_get_media_type(struct wm_softc *); 1023static uint32_t wm_sfp_get_media_type(struct wm_softc *);
1024 1024
1025/* 1025/*
1026 * NVM related. 1026 * NVM related.
1027 * Microwire, SPI (w/wo EERD) and Flash. 1027 * Microwire, SPI (w/wo EERD) and Flash.
1028 */ 1028 */
1029/* Misc functions */ 1029/* Misc functions */
1030static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); 1030static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1031static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); 1031static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1032static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); 1032static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1033/* Microwire */ 1033/* Microwire */
1034static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); 1034static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1035/* SPI */ 1035/* SPI */
1036static int wm_nvm_ready_spi(struct wm_softc *); 1036static int wm_nvm_ready_spi(struct wm_softc *);
1037static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); 1037static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1038/* Using with EERD */ 1038/* Using with EERD */
1039static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 1039static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
1040static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); 1040static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1041/* Flash */ 1041/* Flash */
1042static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, 1042static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1043 unsigned int *); 1043 unsigned int *);
1044static int32_t wm_ich8_cycle_init(struct wm_softc *); 1044static int32_t wm_ich8_cycle_init(struct wm_softc *);
1045static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 1045static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1046static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, 1046static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1047 uint32_t *); 1047 uint32_t *);
1048static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 1048static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1049static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 1049static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1050static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); 1050static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1051static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); 1051static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1052static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); 1052static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1053/* iNVM */ 1053/* iNVM */
1054static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); 1054static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1055static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); 1055static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1056/* Lock, detecting NVM type, validate checksum and read */ 1056/* Lock, detecting NVM type, validate checksum and read */
1057static int wm_nvm_is_onboard_eeprom(struct wm_softc *); 1057static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
1058static int wm_nvm_flash_presence_i210(struct wm_softc *); 1058static int wm_nvm_flash_presence_i210(struct wm_softc *);
1059static int wm_nvm_validate_checksum(struct wm_softc *); 1059static int wm_nvm_validate_checksum(struct wm_softc *);
1060static void wm_nvm_version_invm(struct wm_softc *); 1060static void wm_nvm_version_invm(struct wm_softc *);
1061static void wm_nvm_version(struct wm_softc *); 1061static void wm_nvm_version(struct wm_softc *);
1062static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); 1062static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1063 1063
1064/* 1064/*
1065 * Hardware semaphores. 1065 * Hardware semaphores.
1066 * Very complexed... 1066 * Very complexed...
1067 */ 1067 */
1068static int wm_get_null(struct wm_softc *); 1068static int wm_get_null(struct wm_softc *);
1069static void wm_put_null(struct wm_softc *); 1069static void wm_put_null(struct wm_softc *);
1070static int wm_get_eecd(struct wm_softc *); 1070static int wm_get_eecd(struct wm_softc *);
1071static void wm_put_eecd(struct wm_softc *); 1071static void wm_put_eecd(struct wm_softc *);
1072static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ 1072static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1073static void wm_put_swsm_semaphore(struct wm_softc *); 1073static void wm_put_swsm_semaphore(struct wm_softc *);
1074static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 1074static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1075static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 1075static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1076static int wm_get_nvm_80003(struct wm_softc *); 1076static int wm_get_nvm_80003(struct wm_softc *);
1077static void wm_put_nvm_80003(struct wm_softc *); 1077static void wm_put_nvm_80003(struct wm_softc *);
1078static int wm_get_nvm_82571(struct wm_softc *); 1078static int wm_get_nvm_82571(struct wm_softc *);
1079static void wm_put_nvm_82571(struct wm_softc *); 1079static void wm_put_nvm_82571(struct wm_softc *);
1080static int wm_get_phy_82575(struct wm_softc *); 1080static int wm_get_phy_82575(struct wm_softc *);
1081static void wm_put_phy_82575(struct wm_softc *); 1081static void wm_put_phy_82575(struct wm_softc *);
1082static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ 1082static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1083static void wm_put_swfwhw_semaphore(struct wm_softc *); 1083static void wm_put_swfwhw_semaphore(struct wm_softc *);
1084static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ 1084static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
1085static void wm_put_swflag_ich8lan(struct wm_softc *); 1085static void wm_put_swflag_ich8lan(struct wm_softc *);
1086static int wm_get_nvm_ich8lan(struct wm_softc *); 1086static int wm_get_nvm_ich8lan(struct wm_softc *);
1087static void wm_put_nvm_ich8lan(struct wm_softc *); 1087static void wm_put_nvm_ich8lan(struct wm_softc *);
1088static int wm_get_hw_semaphore_82573(struct wm_softc *); 1088static int wm_get_hw_semaphore_82573(struct wm_softc *);
1089static void wm_put_hw_semaphore_82573(struct wm_softc *); 1089static void wm_put_hw_semaphore_82573(struct wm_softc *);
1090 1090
1091/* 1091/*
1092 * Management mode and power management related subroutines. 1092 * Management mode and power management related subroutines.
1093 * BMC, AMT, suspend/resume and EEE. 1093 * BMC, AMT, suspend/resume and EEE.
1094 */ 1094 */
1095#if 0 1095#if 0
1096static int wm_check_mng_mode(struct wm_softc *); 1096static int wm_check_mng_mode(struct wm_softc *);
1097static int wm_check_mng_mode_ich8lan(struct wm_softc *); 1097static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1098static int wm_check_mng_mode_82574(struct wm_softc *); 1098static int wm_check_mng_mode_82574(struct wm_softc *);
1099static int wm_check_mng_mode_generic(struct wm_softc *); 1099static int wm_check_mng_mode_generic(struct wm_softc *);
1100#endif 1100#endif
1101static int wm_enable_mng_pass_thru(struct wm_softc *); 1101static int wm_enable_mng_pass_thru(struct wm_softc *);
1102static bool wm_phy_resetisblocked(struct wm_softc *); 1102static bool wm_phy_resetisblocked(struct wm_softc *);
1103static void wm_get_hw_control(struct wm_softc *); 1103static void wm_get_hw_control(struct wm_softc *);
1104static void wm_release_hw_control(struct wm_softc *); 1104static void wm_release_hw_control(struct wm_softc *);
1105static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); 1105static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1106static int wm_init_phy_workarounds_pchlan(struct wm_softc *); 1106static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1107static void wm_init_manageability(struct wm_softc *); 1107static void wm_init_manageability(struct wm_softc *);
1108static void wm_release_manageability(struct wm_softc *); 1108static void wm_release_manageability(struct wm_softc *);
1109static void wm_get_wakeup(struct wm_softc *); 1109static void wm_get_wakeup(struct wm_softc *);
1110static int wm_ulp_disable(struct wm_softc *); 1110static int wm_ulp_disable(struct wm_softc *);
1111static int wm_enable_phy_wakeup(struct wm_softc *); 1111static int wm_enable_phy_wakeup(struct wm_softc *);
1112static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 1112static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1113static void wm_suspend_workarounds_ich8lan(struct wm_softc *); 1113static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1114static int wm_resume_workarounds_pchlan(struct wm_softc *); 1114static int wm_resume_workarounds_pchlan(struct wm_softc *);
1115static void wm_enable_wakeup(struct wm_softc *); 1115static void wm_enable_wakeup(struct wm_softc *);
1116static void wm_disable_aspm(struct wm_softc *); 1116static void wm_disable_aspm(struct wm_softc *);
1117/* LPLU (Low Power Link Up) */ 1117/* LPLU (Low Power Link Up) */
1118static void wm_lplu_d0_disable(struct wm_softc *); 1118static void wm_lplu_d0_disable(struct wm_softc *);
1119/* EEE */ 1119/* EEE */
1120static void wm_set_eee_i350(struct wm_softc *); 1120static void wm_set_eee_i350(struct wm_softc *);
1121 1121
1122/* 1122/*
1123 * Workarounds (mainly PHY related). 1123 * Workarounds (mainly PHY related).
1124 * Basically, PHY's workarounds are in the PHY drivers. 1124 * Basically, PHY's workarounds are in the PHY drivers.
1125 */ 1125 */
1126static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 1126static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1127static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 1127static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1128static void wm_hv_phy_workarounds_ich8lan(struct wm_softc *); 1128static void wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1129static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *); 1129static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1130static void wm_lv_phy_workarounds_ich8lan(struct wm_softc *); 1130static void wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1131static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool); 1131static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1132static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 1132static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1133static int wm_k1_workaround_lv(struct wm_softc *); 1133static int wm_k1_workaround_lv(struct wm_softc *);
1134static int wm_link_stall_workaround_hv(struct wm_softc *); 1134static int wm_link_stall_workaround_hv(struct wm_softc *);
1135static void wm_set_mdio_slow_mode_hv(struct wm_softc *); 1135static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
1136static void wm_set_mdio_slow_mode_hv_locked(struct wm_softc *); 1136static void wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1137static void wm_configure_k1_ich8lan(struct wm_softc *, int); 1137static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1138static void wm_reset_init_script_82575(struct wm_softc *); 1138static void wm_reset_init_script_82575(struct wm_softc *);
1139static void wm_reset_mdicnfg_82580(struct wm_softc *); 1139static void wm_reset_mdicnfg_82580(struct wm_softc *);
1140static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 1140static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1141static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 1141static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1142static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 1142static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1143static void wm_pll_workaround_i210(struct wm_softc *); 1143static void wm_pll_workaround_i210(struct wm_softc *);
1144static void wm_legacy_irq_quirk_spt(struct wm_softc *); 1144static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1145static bool wm_phy_need_linkdown_discard(struct wm_softc *); 1145static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1146static void wm_set_linkdown_discard(struct wm_softc *); 1146static void wm_set_linkdown_discard(struct wm_softc *);
1147static void wm_clear_linkdown_discard(struct wm_softc *); 1147static void wm_clear_linkdown_discard(struct wm_softc *);
1148 1148
1149static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO); 1149static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1150static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO); 1150static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1151#ifdef WM_DEBUG 1151#ifdef WM_DEBUG
1152static int wm_sysctl_debug(SYSCTLFN_PROTO); 1152static int wm_sysctl_debug(SYSCTLFN_PROTO);
1153#endif 1153#endif
1154 1154
1155CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 1155CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1156 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 1156 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1157 1157
1158/* 1158/*
1159 * Devices supported by this driver. 1159 * Devices supported by this driver.
1160 */ 1160 */
1161static const struct wm_product { 1161static const struct wm_product {
1162 pci_vendor_id_t wmp_vendor; 1162 pci_vendor_id_t wmp_vendor;
1163 pci_product_id_t wmp_product; 1163 pci_product_id_t wmp_product;
1164 const char *wmp_name; 1164 const char *wmp_name;
1165 wm_chip_type wmp_type; 1165 wm_chip_type wmp_type;
1166 uint32_t wmp_flags; 1166 uint32_t wmp_flags;
1167#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN 1167#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1168#define WMP_F_FIBER WM_MEDIATYPE_FIBER 1168#define WMP_F_FIBER WM_MEDIATYPE_FIBER
1169#define WMP_F_COPPER WM_MEDIATYPE_COPPER 1169#define WMP_F_COPPER WM_MEDIATYPE_COPPER
1170#define WMP_F_SERDES WM_MEDIATYPE_SERDES 1170#define WMP_F_SERDES WM_MEDIATYPE_SERDES
1171#define WMP_MEDIATYPE(x) ((x) & 0x03) 1171#define WMP_MEDIATYPE(x) ((x) & 0x03)
1172} wm_products[] = { 1172} wm_products[] = {
1173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 1173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1174 "Intel i82542 1000BASE-X Ethernet", 1174 "Intel i82542 1000BASE-X Ethernet",
1175 WM_T_82542_2_1, WMP_F_FIBER }, 1175 WM_T_82542_2_1, WMP_F_FIBER },
1176 1176
1177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 1177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1178 "Intel i82543GC 1000BASE-X Ethernet", 1178 "Intel i82543GC 1000BASE-X Ethernet",
1179 WM_T_82543, WMP_F_FIBER }, 1179 WM_T_82543, WMP_F_FIBER },
1180 1180
1181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 1181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1182 "Intel i82543GC 1000BASE-T Ethernet", 1182 "Intel i82543GC 1000BASE-T Ethernet",
1183 WM_T_82543, WMP_F_COPPER }, 1183 WM_T_82543, WMP_F_COPPER },
1184 1184
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1186 "Intel i82544EI 1000BASE-T Ethernet", 1186 "Intel i82544EI 1000BASE-T Ethernet",
1187 WM_T_82544, WMP_F_COPPER }, 1187 WM_T_82544, WMP_F_COPPER },
1188 1188
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1190 "Intel i82544EI 1000BASE-X Ethernet", 1190 "Intel i82544EI 1000BASE-X Ethernet",
1191 WM_T_82544, WMP_F_FIBER }, 1191 WM_T_82544, WMP_F_FIBER },
1192 1192
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1194 "Intel i82544GC 1000BASE-T Ethernet", 1194 "Intel i82544GC 1000BASE-T Ethernet",
1195 WM_T_82544, WMP_F_COPPER }, 1195 WM_T_82544, WMP_F_COPPER },
1196 1196
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1198 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 1198 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1199 WM_T_82544, WMP_F_COPPER }, 1199 WM_T_82544, WMP_F_COPPER },
1200 1200
1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1202 "Intel i82540EM 1000BASE-T Ethernet", 1202 "Intel i82540EM 1000BASE-T Ethernet",
1203 WM_T_82540, WMP_F_COPPER }, 1203 WM_T_82540, WMP_F_COPPER },
1204 1204
1205 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 1205 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1206 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 1206 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1207 WM_T_82540, WMP_F_COPPER }, 1207 WM_T_82540, WMP_F_COPPER },
1208 1208
1209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 1209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1210 "Intel i82540EP 1000BASE-T Ethernet", 1210 "Intel i82540EP 1000BASE-T Ethernet",
1211 WM_T_82540, WMP_F_COPPER }, 1211 WM_T_82540, WMP_F_COPPER },
1212 1212
1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1214 "Intel i82540EP 1000BASE-T Ethernet", 1214 "Intel i82540EP 1000BASE-T Ethernet",
1215 WM_T_82540, WMP_F_COPPER }, 1215 WM_T_82540, WMP_F_COPPER },
1216 1216
1217 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 1217 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1218 "Intel i82540EP 1000BASE-T Ethernet", 1218 "Intel i82540EP 1000BASE-T Ethernet",
1219 WM_T_82540, WMP_F_COPPER }, 1219 WM_T_82540, WMP_F_COPPER },
1220 1220
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1222 "Intel i82545EM 1000BASE-T Ethernet", 1222 "Intel i82545EM 1000BASE-T Ethernet",
1223 WM_T_82545, WMP_F_COPPER }, 1223 WM_T_82545, WMP_F_COPPER },
1224 1224
1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1226 "Intel i82545GM 1000BASE-T Ethernet", 1226 "Intel i82545GM 1000BASE-T Ethernet",
1227 WM_T_82545_3, WMP_F_COPPER }, 1227 WM_T_82545_3, WMP_F_COPPER },
1228 1228
1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1230 "Intel i82545GM 1000BASE-X Ethernet", 1230 "Intel i82545GM 1000BASE-X Ethernet",
1231 WM_T_82545_3, WMP_F_FIBER }, 1231 WM_T_82545_3, WMP_F_FIBER },
1232 1232
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1234 "Intel i82545GM Gigabit Ethernet (SERDES)", 1234 "Intel i82545GM Gigabit Ethernet (SERDES)",
1235 WM_T_82545_3, WMP_F_SERDES }, 1235 WM_T_82545_3, WMP_F_SERDES },
1236 1236
1237 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 1237 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1238 "Intel i82546EB 1000BASE-T Ethernet", 1238 "Intel i82546EB 1000BASE-T Ethernet",
1239 WM_T_82546, WMP_F_COPPER }, 1239 WM_T_82546, WMP_F_COPPER },
1240 1240
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1242 "Intel i82546EB 1000BASE-T Ethernet", 1242 "Intel i82546EB 1000BASE-T Ethernet",
1243 WM_T_82546, WMP_F_COPPER }, 1243 WM_T_82546, WMP_F_COPPER },
1244 1244
1245 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 1245 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1246 "Intel i82545EM 1000BASE-X Ethernet", 1246 "Intel i82545EM 1000BASE-X Ethernet",
1247 WM_T_82545, WMP_F_FIBER }, 1247 WM_T_82545, WMP_F_FIBER },
1248 1248
1249 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 1249 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1250 "Intel i82546EB 1000BASE-X Ethernet", 1250 "Intel i82546EB 1000BASE-X Ethernet",
1251 WM_T_82546, WMP_F_FIBER }, 1251 WM_T_82546, WMP_F_FIBER },
1252 1252
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1254 "Intel i82546GB 1000BASE-T Ethernet", 1254 "Intel i82546GB 1000BASE-T Ethernet",
1255 WM_T_82546_3, WMP_F_COPPER }, 1255 WM_T_82546_3, WMP_F_COPPER },
1256 1256
1257 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 1257 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1258 "Intel i82546GB 1000BASE-X Ethernet", 1258 "Intel i82546GB 1000BASE-X Ethernet",
1259 WM_T_82546_3, WMP_F_FIBER }, 1259 WM_T_82546_3, WMP_F_FIBER },
1260 1260
1261 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 1261 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1262 "Intel i82546GB Gigabit Ethernet (SERDES)", 1262 "Intel i82546GB Gigabit Ethernet (SERDES)",
1263 WM_T_82546_3, WMP_F_SERDES }, 1263 WM_T_82546_3, WMP_F_SERDES },
1264 1264
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1266 "i82546GB quad-port Gigabit Ethernet", 1266 "i82546GB quad-port Gigabit Ethernet",
1267 WM_T_82546_3, WMP_F_COPPER }, 1267 WM_T_82546_3, WMP_F_COPPER },
1268 1268
1269 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 1269 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1270 "i82546GB quad-port Gigabit Ethernet (KSP3)", 1270 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1271 WM_T_82546_3, WMP_F_COPPER }, 1271 WM_T_82546_3, WMP_F_COPPER },
1272 1272
1273 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 1273 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1274 "Intel PRO/1000MT (82546GB)", 1274 "Intel PRO/1000MT (82546GB)",
1275 WM_T_82546_3, WMP_F_COPPER }, 1275 WM_T_82546_3, WMP_F_COPPER },
1276 1276
1277 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 1277 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1278 "Intel i82541EI 1000BASE-T Ethernet", 1278 "Intel i82541EI 1000BASE-T Ethernet",
1279 WM_T_82541, WMP_F_COPPER }, 1279 WM_T_82541, WMP_F_COPPER },
1280 1280
1281 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 1281 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1282 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 1282 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1283 WM_T_82541, WMP_F_COPPER }, 1283 WM_T_82541, WMP_F_COPPER },
1284 1284
1285 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 1285 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1286 "Intel i82541EI Mobile 1000BASE-T Ethernet", 1286 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1287 WM_T_82541, WMP_F_COPPER }, 1287 WM_T_82541, WMP_F_COPPER },
1288 1288
1289 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 1289 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1290 "Intel i82541ER 1000BASE-T Ethernet", 1290 "Intel i82541ER 1000BASE-T Ethernet",
1291 WM_T_82541_2, WMP_F_COPPER }, 1291 WM_T_82541_2, WMP_F_COPPER },
1292 1292
1293 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 1293 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1294 "Intel i82541GI 1000BASE-T Ethernet", 1294 "Intel i82541GI 1000BASE-T Ethernet",
1295 WM_T_82541_2, WMP_F_COPPER }, 1295 WM_T_82541_2, WMP_F_COPPER },
1296 1296
1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1298 "Intel i82541GI Mobile 1000BASE-T Ethernet", 1298 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1299 WM_T_82541_2, WMP_F_COPPER }, 1299 WM_T_82541_2, WMP_F_COPPER },
1300 1300
1301 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 1301 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1302 "Intel i82541PI 1000BASE-T Ethernet", 1302 "Intel i82541PI 1000BASE-T Ethernet",
1303 WM_T_82541_2, WMP_F_COPPER }, 1303 WM_T_82541_2, WMP_F_COPPER },
1304 1304
1305 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 1305 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1306 "Intel i82547EI 1000BASE-T Ethernet", 1306 "Intel i82547EI 1000BASE-T Ethernet",
1307 WM_T_82547, WMP_F_COPPER }, 1307 WM_T_82547, WMP_F_COPPER },
1308 1308
1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1310 "Intel i82547EI Mobile 1000BASE-T Ethernet", 1310 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1311 WM_T_82547, WMP_F_COPPER }, 1311 WM_T_82547, WMP_F_COPPER },
1312 1312
1313 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 1313 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1314 "Intel i82547GI 1000BASE-T Ethernet", 1314 "Intel i82547GI 1000BASE-T Ethernet",
1315 WM_T_82547_2, WMP_F_COPPER }, 1315 WM_T_82547_2, WMP_F_COPPER },
1316 1316
1317 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 1317 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1318 "Intel PRO/1000 PT (82571EB)", 1318 "Intel PRO/1000 PT (82571EB)",
1319 WM_T_82571, WMP_F_COPPER }, 1319 WM_T_82571, WMP_F_COPPER },
1320 1320
1321 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 1321 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1322 "Intel PRO/1000 PF (82571EB)", 1322 "Intel PRO/1000 PF (82571EB)",
1323 WM_T_82571, WMP_F_FIBER }, 1323 WM_T_82571, WMP_F_FIBER },
1324 1324
1325 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 1325 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1326 "Intel PRO/1000 PB (82571EB)", 1326 "Intel PRO/1000 PB (82571EB)",
1327 WM_T_82571, WMP_F_SERDES }, 1327 WM_T_82571, WMP_F_SERDES },
1328 1328
1329 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 1329 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1330 "Intel PRO/1000 QT (82571EB)", 1330 "Intel PRO/1000 QT (82571EB)",
1331 WM_T_82571, WMP_F_COPPER }, 1331 WM_T_82571, WMP_F_COPPER },
1332 1332
1333 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, 1333 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1334 "Intel PRO/1000 PT Quad Port Server Adapter", 1334 "Intel PRO/1000 PT Quad Port Server Adapter",
1335 WM_T_82571, WMP_F_COPPER }, 1335 WM_T_82571, WMP_F_COPPER },
1336 1336
1337 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER, 1337 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1338 "Intel Gigabit PT Quad Port Server ExpressModule", 1338 "Intel Gigabit PT Quad Port Server ExpressModule",
1339 WM_T_82571, WMP_F_COPPER }, 1339 WM_T_82571, WMP_F_COPPER },
1340 1340
1341 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES, 1341 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1342 "Intel 82571EB Dual Gigabit Ethernet (SERDES)", 1342 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1343 WM_T_82571, WMP_F_SERDES }, 1343 WM_T_82571, WMP_F_SERDES },
1344 1344
1345 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES, 1345 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1346 "Intel 82571EB Quad Gigabit Ethernet (SERDES)", 1346 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1347 WM_T_82571, WMP_F_SERDES }, 1347 WM_T_82571, WMP_F_SERDES },
1348 1348
1349 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER, 1349 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1350 "Intel 82571EB Quad 1000baseX Ethernet", 1350 "Intel 82571EB Quad 1000baseX Ethernet",
1351 WM_T_82571, WMP_F_FIBER }, 1351 WM_T_82571, WMP_F_FIBER },
1352 1352
1353 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 1353 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1354 "Intel i82572EI 1000baseT Ethernet", 1354 "Intel i82572EI 1000baseT Ethernet",
1355 WM_T_82572, WMP_F_COPPER }, 1355 WM_T_82572, WMP_F_COPPER },
1356 1356
1357 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 1357 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1358 "Intel i82572EI 1000baseX Ethernet", 1358 "Intel i82572EI 1000baseX Ethernet",
1359 WM_T_82572, WMP_F_FIBER }, 1359 WM_T_82572, WMP_F_FIBER },
1360 1360
1361 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 1361 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1362 "Intel i82572EI Gigabit Ethernet (SERDES)", 1362 "Intel i82572EI Gigabit Ethernet (SERDES)",
1363 WM_T_82572, WMP_F_SERDES }, 1363 WM_T_82572, WMP_F_SERDES },
1364 1364
1365 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 1365 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1366 "Intel i82572EI 1000baseT Ethernet", 1366 "Intel i82572EI 1000baseT Ethernet",
1367 WM_T_82572, WMP_F_COPPER }, 1367 WM_T_82572, WMP_F_COPPER },
1368 1368
1369 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 1369 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1370 "Intel i82573E", 1370 "Intel i82573E",
1371 WM_T_82573, WMP_F_COPPER }, 1371 WM_T_82573, WMP_F_COPPER },
1372 1372
1373 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 1373 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1374 "Intel i82573E IAMT", 1374 "Intel i82573E IAMT",
1375 WM_T_82573, WMP_F_COPPER }, 1375 WM_T_82573, WMP_F_COPPER },
1376 1376
1377 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 1377 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1378 "Intel i82573L Gigabit Ethernet", 1378 "Intel i82573L Gigabit Ethernet",
1379 WM_T_82573, WMP_F_COPPER }, 1379 WM_T_82573, WMP_F_COPPER },
1380 1380
1381 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, 1381 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1382 "Intel i82574L", 1382 "Intel i82574L",
1383 WM_T_82574, WMP_F_COPPER }, 1383 WM_T_82574, WMP_F_COPPER },
1384 1384
1385 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA, 1385 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1386 "Intel i82574L", 1386 "Intel i82574L",
1387 WM_T_82574, WMP_F_COPPER }, 1387 WM_T_82574, WMP_F_COPPER },
1388 1388
1389 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, 1389 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1390 "Intel i82583V", 1390 "Intel i82583V",
1391 WM_T_82583, WMP_F_COPPER }, 1391 WM_T_82583, WMP_F_COPPER },
1392 1392
1393 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 1393 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1394 "i80003 dual 1000baseT Ethernet", 1394 "i80003 dual 1000baseT Ethernet",
1395 WM_T_80003, WMP_F_COPPER }, 1395 WM_T_80003, WMP_F_COPPER },
1396 1396
1397 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 1397 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1398 "i80003 dual 1000baseX Ethernet", 1398 "i80003 dual 1000baseX Ethernet",
1399 WM_T_80003, WMP_F_COPPER }, 1399 WM_T_80003, WMP_F_COPPER },
1400 1400
1401 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 1401 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1402 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 1402 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1403 WM_T_80003, WMP_F_SERDES }, 1403 WM_T_80003, WMP_F_SERDES },
1404 1404
1405 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 1405 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1406 "Intel i80003 1000baseT Ethernet", 1406 "Intel i80003 1000baseT Ethernet",
1407 WM_T_80003, WMP_F_COPPER }, 1407 WM_T_80003, WMP_F_COPPER },
1408 1408
1409 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 1409 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1410 "Intel i80003 Gigabit Ethernet (SERDES)", 1410 "Intel i80003 Gigabit Ethernet (SERDES)",
1411 WM_T_80003, WMP_F_SERDES }, 1411 WM_T_80003, WMP_F_SERDES },
1412 1412
1413 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, 1413 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1414 "Intel i82801H (M_AMT) LAN Controller", 1414 "Intel i82801H (M_AMT) LAN Controller",
1415 WM_T_ICH8, WMP_F_COPPER }, 1415 WM_T_ICH8, WMP_F_COPPER },
1416 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, 1416 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1417 "Intel i82801H (AMT) LAN Controller", 1417 "Intel i82801H (AMT) LAN Controller",
1418 WM_T_ICH8, WMP_F_COPPER }, 1418 WM_T_ICH8, WMP_F_COPPER },
1419 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, 1419 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1420 "Intel i82801H LAN Controller", 1420 "Intel i82801H LAN Controller",
1421 WM_T_ICH8, WMP_F_COPPER }, 1421 WM_T_ICH8, WMP_F_COPPER },
1422 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, 1422 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1423 "Intel i82801H (IFE) 10/100 LAN Controller", 1423 "Intel i82801H (IFE) 10/100 LAN Controller",
1424 WM_T_ICH8, WMP_F_COPPER }, 1424 WM_T_ICH8, WMP_F_COPPER },
1425 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, 1425 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1426 "Intel i82801H (M) LAN Controller", 1426 "Intel i82801H (M) LAN Controller",
1427 WM_T_ICH8, WMP_F_COPPER }, 1427 WM_T_ICH8, WMP_F_COPPER },
1428 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, 1428 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1429 "Intel i82801H IFE (GT) 10/100 LAN Controller", 1429 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1430 WM_T_ICH8, WMP_F_COPPER }, 1430 WM_T_ICH8, WMP_F_COPPER },
1431 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, 1431 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1432 "Intel i82801H IFE (G) 10/100 LAN Controller", 1432 "Intel i82801H IFE (G) 10/100 LAN Controller",
1433 WM_T_ICH8, WMP_F_COPPER }, 1433 WM_T_ICH8, WMP_F_COPPER },
1434 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3, 1434 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1435 "82567V-3 LAN Controller", 1435 "82567V-3 LAN Controller",
1436 WM_T_ICH8, WMP_F_COPPER }, 1436 WM_T_ICH8, WMP_F_COPPER },
1437 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, 1437 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1438 "82801I (AMT) LAN Controller", 1438 "82801I (AMT) LAN Controller",
1439 WM_T_ICH9, WMP_F_COPPER }, 1439 WM_T_ICH9, WMP_F_COPPER },
1440 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, 1440 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1441 "82801I 10/100 LAN Controller", 1441 "82801I 10/100 LAN Controller",
1442 WM_T_ICH9, WMP_F_COPPER }, 1442 WM_T_ICH9, WMP_F_COPPER },
1443 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, 1443 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1444 "82801I (G) 10/100 LAN Controller", 1444 "82801I (G) 10/100 LAN Controller",
1445 WM_T_ICH9, WMP_F_COPPER }, 1445 WM_T_ICH9, WMP_F_COPPER },
1446 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, 1446 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1447 "82801I (GT) 10/100 LAN Controller", 1447 "82801I (GT) 10/100 LAN Controller",
1448 WM_T_ICH9, WMP_F_COPPER }, 1448 WM_T_ICH9, WMP_F_COPPER },
1449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, 1449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1450 "82801I (C) LAN Controller", 1450 "82801I (C) LAN Controller",
1451 WM_T_ICH9, WMP_F_COPPER }, 1451 WM_T_ICH9, WMP_F_COPPER },
1452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, 1452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1453 "82801I mobile LAN Controller", 1453 "82801I mobile LAN Controller",
1454 WM_T_ICH9, WMP_F_COPPER }, 1454 WM_T_ICH9, WMP_F_COPPER },
1455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V, 1455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1456 "82801I mobile (V) LAN Controller", 1456 "82801I mobile (V) LAN Controller",
1457 WM_T_ICH9, WMP_F_COPPER }, 1457 WM_T_ICH9, WMP_F_COPPER },
1458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, 1458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1459 "82801I mobile (AMT) LAN Controller", 1459 "82801I mobile (AMT) LAN Controller",
1460 WM_T_ICH9, WMP_F_COPPER }, 1460 WM_T_ICH9, WMP_F_COPPER },
1461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, 1461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1462 "82567LM-4 LAN Controller", 1462 "82567LM-4 LAN Controller",
1463 WM_T_ICH9, WMP_F_COPPER }, 1463 WM_T_ICH9, WMP_F_COPPER },
1464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, 1464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1465 "82567LM-2 LAN Controller", 1465 "82567LM-2 LAN Controller",
1466 WM_T_ICH10, WMP_F_COPPER }, 1466 WM_T_ICH10, WMP_F_COPPER },
1467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, 1467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1468 "82567LF-2 LAN Controller", 1468 "82567LF-2 LAN Controller",
1469 WM_T_ICH10, WMP_F_COPPER }, 1469 WM_T_ICH10, WMP_F_COPPER },
1470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, 1470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1471 "82567LM-3 LAN Controller", 1471 "82567LM-3 LAN Controller",
1472 WM_T_ICH10, WMP_F_COPPER }, 1472 WM_T_ICH10, WMP_F_COPPER },
1473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, 1473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
@@ -1697,1998 +1697,2002 @@ static const struct wm_product { @@ -1697,1998 +1697,2002 @@ static const struct wm_product {
1697 "I219 LM (7) Ethernet Connection", 1697 "I219 LM (7) Ethernet Connection",
1698 WM_T_PCH_CNP, WMP_F_COPPER }, 1698 WM_T_PCH_CNP, WMP_F_COPPER },
1699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8, 1699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1700 "I219 LM (8) Ethernet Connection", 1700 "I219 LM (8) Ethernet Connection",
1701 WM_T_PCH_CNP, WMP_F_COPPER }, 1701 WM_T_PCH_CNP, WMP_F_COPPER },
1702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9, 1702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1703 "I219 LM (9) Ethernet Connection", 1703 "I219 LM (9) Ethernet Connection",
1704 WM_T_PCH_CNP, WMP_F_COPPER }, 1704 WM_T_PCH_CNP, WMP_F_COPPER },
1705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10, 1705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1706 "I219 LM (10) Ethernet Connection", 1706 "I219 LM (10) Ethernet Connection",
1707 WM_T_PCH_CNP, WMP_F_COPPER }, 1707 WM_T_PCH_CNP, WMP_F_COPPER },
1708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11, 1708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1709 "I219 LM (11) Ethernet Connection", 1709 "I219 LM (11) Ethernet Connection",
1710 WM_T_PCH_CNP, WMP_F_COPPER }, 1710 WM_T_PCH_CNP, WMP_F_COPPER },
1711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12, 1711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1712 "I219 LM (12) Ethernet Connection", 1712 "I219 LM (12) Ethernet Connection",
1713 WM_T_PCH_SPT, WMP_F_COPPER }, 1713 WM_T_PCH_SPT, WMP_F_COPPER },
1714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13, 1714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1715 "I219 LM (13) Ethernet Connection", 1715 "I219 LM (13) Ethernet Connection",
1716 WM_T_PCH_TGP, WMP_F_COPPER }, 1716 WM_T_PCH_TGP, WMP_F_COPPER },
1717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14, 1717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1718 "I219 LM (14) Ethernet Connection", 1718 "I219 LM (14) Ethernet Connection",
1719 WM_T_PCH_TGP, WMP_F_COPPER }, 1719 WM_T_PCH_TGP, WMP_F_COPPER },
1720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15, 1720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1721 "I219 LM (15) Ethernet Connection", 1721 "I219 LM (15) Ethernet Connection",
1722 WM_T_PCH_TGP, WMP_F_COPPER }, 1722 WM_T_PCH_TGP, WMP_F_COPPER },
1723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16, 1723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16,
1724 "I219 LM (16) Ethernet Connection", 1724 "I219 LM (16) Ethernet Connection",
1725 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */ 1725 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */
1726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17, 1726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17,
1727 "I219 LM (17) Ethernet Connection", 1727 "I219 LM (17) Ethernet Connection",
1728 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */ 1728 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */
1729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18, 1729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18,
1730 "I219 LM (18) Ethernet Connection", 1730 "I219 LM (18) Ethernet Connection",
1731 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */ 1731 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19, 1732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19,
1733 "I219 LM (19) Ethernet Connection", 1733 "I219 LM (19) Ethernet Connection",
1734 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */ 1734 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM20, 1735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM20,
1736 "I219 LM (20) Ethernet Connection", 1736 "I219 LM (20) Ethernet Connection",
1737 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */ 1737 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM21, 1738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM21,
1739 "I219 LM (21) Ethernet Connection", 1739 "I219 LM (21) Ethernet Connection",
1740 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */ 1740 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM22, 1741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM22,
1742 "I219 LM (22) Ethernet Connection", 1742 "I219 LM (22) Ethernet Connection",
1743 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP(RPL) */ 1743 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP(RPL) */
1744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM23, 1744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM23,
1745 "I219 LM (23) Ethernet Connection", 1745 "I219 LM (23) Ethernet Connection",
1746 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP(RPL) */ 1746 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP(RPL) */
1747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V, 1747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1748 "I219 V Ethernet Connection", 1748 "I219 V Ethernet Connection",
1749 WM_T_PCH_SPT, WMP_F_COPPER }, 1749 WM_T_PCH_SPT, WMP_F_COPPER },
1750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2, 1750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1751 "I219 V (2) Ethernet Connection", 1751 "I219 V (2) Ethernet Connection",
1752 WM_T_PCH_SPT, WMP_F_COPPER }, 1752 WM_T_PCH_SPT, WMP_F_COPPER },
1753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4, 1753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1754 "I219 V (4) Ethernet Connection", 1754 "I219 V (4) Ethernet Connection",
1755 WM_T_PCH_SPT, WMP_F_COPPER }, 1755 WM_T_PCH_SPT, WMP_F_COPPER },
1756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5, 1756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1757 "I219 V (5) Ethernet Connection", 1757 "I219 V (5) Ethernet Connection",
1758 WM_T_PCH_SPT, WMP_F_COPPER }, 1758 WM_T_PCH_SPT, WMP_F_COPPER },
1759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6, 1759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1760 "I219 V (6) Ethernet Connection", 1760 "I219 V (6) Ethernet Connection",
1761 WM_T_PCH_CNP, WMP_F_COPPER }, 1761 WM_T_PCH_CNP, WMP_F_COPPER },
1762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7, 1762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1763 "I219 V (7) Ethernet Connection", 1763 "I219 V (7) Ethernet Connection",
1764 WM_T_PCH_CNP, WMP_F_COPPER }, 1764 WM_T_PCH_CNP, WMP_F_COPPER },
1765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8, 1765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1766 "I219 V (8) Ethernet Connection", 1766 "I219 V (8) Ethernet Connection",
1767 WM_T_PCH_CNP, WMP_F_COPPER }, 1767 WM_T_PCH_CNP, WMP_F_COPPER },
1768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9, 1768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1769 "I219 V (9) Ethernet Connection", 1769 "I219 V (9) Ethernet Connection",
1770 WM_T_PCH_CNP, WMP_F_COPPER }, 1770 WM_T_PCH_CNP, WMP_F_COPPER },
1771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10, 1771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1772 "I219 V (10) Ethernet Connection", 1772 "I219 V (10) Ethernet Connection",
1773 WM_T_PCH_CNP, WMP_F_COPPER }, 1773 WM_T_PCH_CNP, WMP_F_COPPER },
1774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11, 1774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1775 "I219 V (11) Ethernet Connection", 1775 "I219 V (11) Ethernet Connection",
1776 WM_T_PCH_CNP, WMP_F_COPPER }, 1776 WM_T_PCH_CNP, WMP_F_COPPER },
1777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12, 1777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1778 "I219 V (12) Ethernet Connection", 1778 "I219 V (12) Ethernet Connection",
1779 WM_T_PCH_SPT, WMP_F_COPPER }, 1779 WM_T_PCH_SPT, WMP_F_COPPER },
1780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13, 1780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1781 "I219 V (13) Ethernet Connection", 1781 "I219 V (13) Ethernet Connection",
1782 WM_T_PCH_TGP, WMP_F_COPPER }, 1782 WM_T_PCH_TGP, WMP_F_COPPER },
1783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14, 1783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1784 "I219 V (14) Ethernet Connection", 1784 "I219 V (14) Ethernet Connection",
1785 WM_T_PCH_TGP, WMP_F_COPPER }, 1785 WM_T_PCH_TGP, WMP_F_COPPER },
1786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15, 1786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15,
1787 "I219 V (15) Ethernet Connection", 1787 "I219 V (15) Ethernet Connection",
1788 WM_T_PCH_TGP, WMP_F_COPPER }, 1788 WM_T_PCH_TGP, WMP_F_COPPER },
1789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16, 1789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16,
1790 "I219 V (16) Ethernet Connection", 1790 "I219 V (16) Ethernet Connection",
1791 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */ 1791 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */
1792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17, 1792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17,
1793 "I219 V (17) Ethernet Connection", 1793 "I219 V (17) Ethernet Connection",
1794 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */ 1794 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */
1795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18, 1795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18,
1796 "I219 V (18) Ethernet Connection", 1796 "I219 V (18) Ethernet Connection",
1797 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */ 1797 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19, 1798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19,
1799 "I219 V (19) Ethernet Connection", 1799 "I219 V (19) Ethernet Connection",
1800 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */ 1800 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V20, 1801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V20,
1802 "I219 V (20) Ethernet Connection", 1802 "I219 V (20) Ethernet Connection",
1803 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */ 1803 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V21, 1804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V21,
1805 "I219 V (21) Ethernet Connection", 1805 "I219 V (21) Ethernet Connection",
1806 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */ 1806 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V22, 1807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V22,
1808 "I219 V (22) Ethernet Connection", 1808 "I219 V (22) Ethernet Connection",
1809 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP(RPL) */ 1809 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP(RPL) */
1810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V23, 1810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V23,
1811 "I219 V (23) Ethernet Connection", 1811 "I219 V (23) Ethernet Connection",
1812 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP(RPL) */ 1812 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP(RPL) */
1813 { 0, 0, 1813 { 0, 0,
1814 NULL, 1814 NULL,
1815 0, 0 }, 1815 0, 0 },
1816}; 1816};
1817 1817
1818/* 1818/*
1819 * Register read/write functions. 1819 * Register read/write functions.
1820 * Other than CSR_{READ|WRITE}(). 1820 * Other than CSR_{READ|WRITE}().
1821 */ 1821 */
1822 1822
1823#if 0 /* Not currently used */ 1823#if 0 /* Not currently used */
1824static inline uint32_t 1824static inline uint32_t
1825wm_io_read(struct wm_softc *sc, int reg) 1825wm_io_read(struct wm_softc *sc, int reg)
1826{ 1826{
1827 1827
1828 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1828 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1829 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 1829 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1830} 1830}
1831#endif 1831#endif
1832 1832
1833static inline void 1833static inline void
1834wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 1834wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1835{ 1835{
1836 1836
1837 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1837 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1838 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 1838 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1839} 1839}
1840 1840
1841static inline void 1841static inline void
1842wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off, 1842wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1843 uint32_t data) 1843 uint32_t data)
1844{ 1844{
1845 uint32_t regval; 1845 uint32_t regval;
1846 int i; 1846 int i;
1847 1847
1848 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT); 1848 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1849 1849
1850 CSR_WRITE(sc, reg, regval); 1850 CSR_WRITE(sc, reg, regval);
1851 1851
1852 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) { 1852 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1853 delay(5); 1853 delay(5);
1854 if (CSR_READ(sc, reg) & SCTL_CTL_READY) 1854 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1855 break; 1855 break;
1856 } 1856 }
1857 if (i == SCTL_CTL_POLL_TIMEOUT) { 1857 if (i == SCTL_CTL_POLL_TIMEOUT) {
1858 aprint_error("%s: WARNING:" 1858 aprint_error("%s: WARNING:"
1859 " i82575 reg 0x%08x setup did not indicate ready\n", 1859 " i82575 reg 0x%08x setup did not indicate ready\n",
1860 device_xname(sc->sc_dev), reg); 1860 device_xname(sc->sc_dev), reg);
1861 } 1861 }
1862} 1862}
1863 1863
1864static inline void 1864static inline void
1865wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 1865wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1866{ 1866{
1867 wa->wa_low = htole32(v & 0xffffffffU); 1867 wa->wa_low = htole32(v & 0xffffffffU);
1868 if (sizeof(bus_addr_t) == 8) 1868 if (sizeof(bus_addr_t) == 8)
1869 wa->wa_high = htole32((uint64_t) v >> 32); 1869 wa->wa_high = htole32((uint64_t) v >> 32);
1870 else 1870 else
1871 wa->wa_high = 0; 1871 wa->wa_high = 0;
1872} 1872}
1873 1873
1874/* 1874/*
1875 * Descriptor sync/init functions. 1875 * Descriptor sync/init functions.
1876 */ 1876 */
1877static inline void 1877static inline void
1878wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops) 1878wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1879{ 1879{
1880 struct wm_softc *sc = txq->txq_sc; 1880 struct wm_softc *sc = txq->txq_sc;
1881 1881
1882 /* If it will wrap around, sync to the end of the ring. */ 1882 /* If it will wrap around, sync to the end of the ring. */
1883 if ((start + num) > WM_NTXDESC(txq)) { 1883 if ((start + num) > WM_NTXDESC(txq)) {
1884 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, 1884 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1885 WM_CDTXOFF(txq, start), txq->txq_descsize * 1885 WM_CDTXOFF(txq, start), txq->txq_descsize *
1886 (WM_NTXDESC(txq) - start), ops); 1886 (WM_NTXDESC(txq) - start), ops);
1887 num -= (WM_NTXDESC(txq) - start); 1887 num -= (WM_NTXDESC(txq) - start);
1888 start = 0; 1888 start = 0;
1889 } 1889 }
1890 1890
1891 /* Now sync whatever is left. */ 1891 /* Now sync whatever is left. */
1892 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, 1892 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1893 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops); 1893 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1894} 1894}
1895 1895
1896static inline void 1896static inline void
1897wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops) 1897wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1898{ 1898{
1899 struct wm_softc *sc = rxq->rxq_sc; 1899 struct wm_softc *sc = rxq->rxq_sc;
1900 1900
1901 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap, 1901 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1902 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops); 1902 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1903} 1903}
1904 1904
1905static inline void 1905static inline void
1906wm_init_rxdesc(struct wm_rxqueue *rxq, int start) 1906wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1907{ 1907{
1908 struct wm_softc *sc = rxq->rxq_sc; 1908 struct wm_softc *sc = rxq->rxq_sc;
1909 struct wm_rxsoft *rxs = &rxq->rxq_soft[start]; 1909 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1910 struct mbuf *m = rxs->rxs_mbuf; 1910 struct mbuf *m = rxs->rxs_mbuf;
1911 1911
1912 /* 1912 /*
1913 * Note: We scoot the packet forward 2 bytes in the buffer 1913 * Note: We scoot the packet forward 2 bytes in the buffer
1914 * so that the payload after the Ethernet header is aligned 1914 * so that the payload after the Ethernet header is aligned
1915 * to a 4-byte boundary. 1915 * to a 4-byte boundary.
1916 1916
1917 * XXX BRAINDAMAGE ALERT! 1917 * XXX BRAINDAMAGE ALERT!
1918 * The stupid chip uses the same size for every buffer, which 1918 * The stupid chip uses the same size for every buffer, which
1919 * is set in the Receive Control register. We are using the 2K 1919 * is set in the Receive Control register. We are using the 2K
1920 * size option, but what we REALLY want is (2K - 2)! For this 1920 * size option, but what we REALLY want is (2K - 2)! For this
1921 * reason, we can't "scoot" packets longer than the standard 1921 * reason, we can't "scoot" packets longer than the standard
1922 * Ethernet MTU. On strict-alignment platforms, if the total 1922 * Ethernet MTU. On strict-alignment platforms, if the total
1923 * size exceeds (2K - 2) we set align_tweak to 0 and let 1923 * size exceeds (2K - 2) we set align_tweak to 0 and let
1924 * the upper layer copy the headers. 1924 * the upper layer copy the headers.
1925 */ 1925 */
1926 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak; 1926 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1927 1927
1928 if (sc->sc_type == WM_T_82574) { 1928 if (sc->sc_type == WM_T_82574) {
1929 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start]; 1929 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1930 rxd->erx_data.erxd_addr = 1930 rxd->erx_data.erxd_addr =
1931 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1931 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1932 rxd->erx_data.erxd_dd = 0; 1932 rxd->erx_data.erxd_dd = 0;
1933 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 1933 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1934 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start]; 1934 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1935 1935
1936 rxd->nqrx_data.nrxd_paddr = 1936 rxd->nqrx_data.nrxd_paddr =
1937 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1937 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1938 /* Currently, split header is not supported. */ 1938 /* Currently, split header is not supported. */
1939 rxd->nqrx_data.nrxd_haddr = 0; 1939 rxd->nqrx_data.nrxd_haddr = 0;
1940 } else { 1940 } else {
1941 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start]; 1941 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1942 1942
1943 wm_set_dma_addr(&rxd->wrx_addr, 1943 wm_set_dma_addr(&rxd->wrx_addr,
1944 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1944 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1945 rxd->wrx_len = 0; 1945 rxd->wrx_len = 0;
1946 rxd->wrx_cksum = 0; 1946 rxd->wrx_cksum = 0;
1947 rxd->wrx_status = 0; 1947 rxd->wrx_status = 0;
1948 rxd->wrx_errors = 0; 1948 rxd->wrx_errors = 0;
1949 rxd->wrx_special = 0; 1949 rxd->wrx_special = 0;
1950 } 1950 }
1951 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1951 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1952 1952
1953 CSR_WRITE(sc, rxq->rxq_rdt_reg, start); 1953 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1954} 1954}
1955 1955
1956/* 1956/*
1957 * Device driver interface functions and commonly used functions. 1957 * Device driver interface functions and commonly used functions.
1958 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 1958 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1959 */ 1959 */
1960 1960
1961/* Lookup supported device table */ 1961/* Lookup supported device table */
1962static const struct wm_product * 1962static const struct wm_product *
1963wm_lookup(const struct pci_attach_args *pa) 1963wm_lookup(const struct pci_attach_args *pa)
1964{ 1964{
1965 const struct wm_product *wmp; 1965 const struct wm_product *wmp;
1966 1966
1967 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 1967 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1968 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 1968 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1969 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 1969 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1970 return wmp; 1970 return wmp;
1971 } 1971 }
1972 return NULL; 1972 return NULL;
1973} 1973}
1974 1974
1975/* The match function (ca_match) */ 1975/* The match function (ca_match) */
1976static int 1976static int
1977wm_match(device_t parent, cfdata_t cf, void *aux) 1977wm_match(device_t parent, cfdata_t cf, void *aux)
1978{ 1978{
1979 struct pci_attach_args *pa = aux; 1979 struct pci_attach_args *pa = aux;
1980 1980
1981 if (wm_lookup(pa) != NULL) 1981 if (wm_lookup(pa) != NULL)
1982 return 1; 1982 return 1;
1983 1983
1984 return 0; 1984 return 0;
1985} 1985}
1986 1986
1987/* The attach function (ca_attach) */ 1987/* The attach function (ca_attach) */
1988static void 1988static void
1989wm_attach(device_t parent, device_t self, void *aux) 1989wm_attach(device_t parent, device_t self, void *aux)
1990{ 1990{
1991 struct wm_softc *sc = device_private(self); 1991 struct wm_softc *sc = device_private(self);
1992 struct pci_attach_args *pa = aux; 1992 struct pci_attach_args *pa = aux;
1993 prop_dictionary_t dict; 1993 prop_dictionary_t dict;
1994 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1994 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1995 pci_chipset_tag_t pc = pa->pa_pc; 1995 pci_chipset_tag_t pc = pa->pa_pc;
1996 int counts[PCI_INTR_TYPE_SIZE]; 1996 int counts[PCI_INTR_TYPE_SIZE];
1997 pci_intr_type_t max_type; 1997 pci_intr_type_t max_type;
1998 const char *eetype, *xname; 1998 const char *eetype, *xname;
1999 bus_space_tag_t memt; 1999 bus_space_tag_t memt;
2000 bus_space_handle_t memh; 2000 bus_space_handle_t memh;
2001 bus_size_t memsize; 2001 bus_size_t memsize;
2002 int memh_valid; 2002 int memh_valid;
2003 int i, error; 2003 int i, error;
2004 const struct wm_product *wmp; 2004 const struct wm_product *wmp;
2005 prop_data_t ea; 2005 prop_data_t ea;
2006 prop_number_t pn; 2006 prop_number_t pn;
2007 uint8_t enaddr[ETHER_ADDR_LEN]; 2007 uint8_t enaddr[ETHER_ADDR_LEN];
2008 char buf[256]; 2008 char buf[256];
2009 char wqname[MAXCOMLEN]; 2009 char wqname[MAXCOMLEN];
2010 uint16_t cfg1, cfg2, swdpin, nvmword; 2010 uint16_t cfg1, cfg2, swdpin, nvmword;
2011 pcireg_t preg, memtype; 2011 pcireg_t preg, memtype;
2012 uint16_t eeprom_data, apme_mask; 2012 uint16_t eeprom_data, apme_mask;
2013 bool force_clear_smbi; 2013 bool force_clear_smbi;
2014 uint32_t link_mode; 2014 uint32_t link_mode;
2015 uint32_t reg; 2015 uint32_t reg;
2016 2016
2017#if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT) 2017#if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
2018 sc->sc_debug = WM_DEBUG_DEFAULT; 2018 sc->sc_debug = WM_DEBUG_DEFAULT;
2019#endif 2019#endif
2020 sc->sc_dev = self; 2020 sc->sc_dev = self;
2021 callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS); 2021 callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
2022 sc->sc_core_stopping = false; 2022 sc->sc_core_stopping = false;
2023 2023
2024 wmp = wm_lookup(pa); 2024 wmp = wm_lookup(pa);
2025#ifdef DIAGNOSTIC 2025#ifdef DIAGNOSTIC
2026 if (wmp == NULL) { 2026 if (wmp == NULL) {
2027 printf("\n"); 2027 printf("\n");
2028 panic("wm_attach: impossible"); 2028 panic("wm_attach: impossible");
2029 } 2029 }
2030#endif 2030#endif
2031 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags); 2031 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
2032 2032
2033 sc->sc_pc = pa->pa_pc; 2033 sc->sc_pc = pa->pa_pc;
2034 sc->sc_pcitag = pa->pa_tag; 2034 sc->sc_pcitag = pa->pa_tag;
2035 2035
2036 if (pci_dma64_available(pa)) { 2036 if (pci_dma64_available(pa)) {
2037 aprint_verbose(", 64-bit DMA"); 2037 aprint_verbose(", 64-bit DMA");
2038 sc->sc_dmat = pa->pa_dmat64; 2038 sc->sc_dmat = pa->pa_dmat64;
2039 } else { 2039 } else {
2040 aprint_verbose(", 32-bit DMA"); 2040 aprint_verbose(", 32-bit DMA");
2041 sc->sc_dmat = pa->pa_dmat; 2041 sc->sc_dmat = pa->pa_dmat;
2042 } 2042 }
2043 2043
2044 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id); 2044 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
2045 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG)); 2045 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
2046 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1); 2046 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
2047 2047
2048 sc->sc_type = wmp->wmp_type; 2048 sc->sc_type = wmp->wmp_type;
2049 2049
2050 /* Set default function pointers */ 2050 /* Set default function pointers */
2051 sc->phy.acquire = sc->nvm.acquire = wm_get_null; 2051 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2052 sc->phy.release = sc->nvm.release = wm_put_null; 2052 sc->phy.release = sc->nvm.release = wm_put_null;
2053 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000; 2053 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2054 2054
2055 if (sc->sc_type < WM_T_82543) { 2055 if (sc->sc_type < WM_T_82543) {
2056 if (sc->sc_rev < 2) { 2056 if (sc->sc_rev < 2) {
2057 aprint_error_dev(sc->sc_dev, 2057 aprint_error_dev(sc->sc_dev,
2058 "i82542 must be at least rev. 2\n"); 2058 "i82542 must be at least rev. 2\n");
2059 return; 2059 return;
2060 } 2060 }
2061 if (sc->sc_rev < 3) 2061 if (sc->sc_rev < 3)
2062 sc->sc_type = WM_T_82542_2_0; 2062 sc->sc_type = WM_T_82542_2_0;
2063 } 2063 }
2064 2064
2065 /* 2065 /*
2066 * Disable MSI for Errata: 2066 * Disable MSI for Errata:
2067 * "Message Signaled Interrupt Feature May Corrupt Write Transactions" 2067 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2068 * 2068 *
2069 * 82544: Errata 25 2069 * 82544: Errata 25
2070 * 82540: Errata 6 (easy to reproduce device timeout) 2070 * 82540: Errata 6 (easy to reproduce device timeout)
2071 * 82545: Errata 4 (easy to reproduce device timeout) 2071 * 82545: Errata 4 (easy to reproduce device timeout)
2072 * 82546: Errata 26 (easy to reproduce device timeout) 2072 * 82546: Errata 26 (easy to reproduce device timeout)
2073 * 82541: Errata 7 (easy to reproduce device timeout) 2073 * 82541: Errata 7 (easy to reproduce device timeout)
2074 * 2074 *
2075 * "Byte Enables 2 and 3 are not set on MSI writes" 2075 * "Byte Enables 2 and 3 are not set on MSI writes"
2076 * 2076 *
2077 * 82571 & 82572: Errata 63 2077 * 82571 & 82572: Errata 63
2078 */ 2078 */
2079 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571) 2079 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2080 || (sc->sc_type == WM_T_82572)) 2080 || (sc->sc_type == WM_T_82572))
2081 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY; 2081 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2082 2082
2083 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 2083 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2084 || (sc->sc_type == WM_T_82580) 2084 || (sc->sc_type == WM_T_82580)
2085 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 2085 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2086 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) 2086 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2087 sc->sc_flags |= WM_F_NEWQUEUE; 2087 sc->sc_flags |= WM_F_NEWQUEUE;
2088 2088
2089 /* Set device properties (mactype) */ 2089 /* Set device properties (mactype) */
2090 dict = device_properties(sc->sc_dev); 2090 dict = device_properties(sc->sc_dev);
2091 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type); 2091 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2092 2092
2093 /* 2093 /*
2094 * Map the device. All devices support memory-mapped acccess, 2094 * Map the device. All devices support memory-mapped acccess,
2095 * and it is really required for normal operation. 2095 * and it is really required for normal operation.
2096 */ 2096 */
2097 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 2097 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2098 switch (memtype) { 2098 switch (memtype) {
2099 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2099 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2100 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2100 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2101 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 2101 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2102 memtype, 0, &memt, &memh, NULL, &memsize) == 0); 2102 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2103 break; 2103 break;
2104 default: 2104 default:
2105 memh_valid = 0; 2105 memh_valid = 0;
2106 break; 2106 break;
2107 } 2107 }
2108 2108
2109 if (memh_valid) { 2109 if (memh_valid) {
2110 sc->sc_st = memt; 2110 sc->sc_st = memt;
2111 sc->sc_sh = memh; 2111 sc->sc_sh = memh;
2112 sc->sc_ss = memsize; 2112 sc->sc_ss = memsize;
2113 } else { 2113 } else {
2114 aprint_error_dev(sc->sc_dev, 2114 aprint_error_dev(sc->sc_dev,
2115 "unable to map device registers\n"); 2115 "unable to map device registers\n");
2116 return; 2116 return;
2117 } 2117 }
2118 2118
2119 /* 2119 /*
2120 * In addition, i82544 and later support I/O mapped indirect 2120 * In addition, i82544 and later support I/O mapped indirect
2121 * register access. It is not desirable (nor supported in 2121 * register access. It is not desirable (nor supported in
2122 * this driver) to use it for normal operation, though it is 2122 * this driver) to use it for normal operation, though it is
2123 * required to work around bugs in some chip versions. 2123 * required to work around bugs in some chip versions.
2124 */ 2124 */
2125 switch (sc->sc_type) { 2125 switch (sc->sc_type) {
2126 case WM_T_82544: 2126 case WM_T_82544:
2127 case WM_T_82541: 2127 case WM_T_82541:
2128 case WM_T_82541_2: 2128 case WM_T_82541_2:
2129 case WM_T_82547: 2129 case WM_T_82547:
2130 case WM_T_82547_2: 2130 case WM_T_82547_2:
2131 /* First we have to find the I/O BAR. */ 2131 /* First we have to find the I/O BAR. */
2132 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 2132 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2133 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i); 2133 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2134 if (memtype == PCI_MAPREG_TYPE_IO) 2134 if (memtype == PCI_MAPREG_TYPE_IO)
2135 break; 2135 break;
2136 if (PCI_MAPREG_MEM_TYPE(memtype) == 2136 if (PCI_MAPREG_MEM_TYPE(memtype) ==
2137 PCI_MAPREG_MEM_TYPE_64BIT) 2137 PCI_MAPREG_MEM_TYPE_64BIT)
2138 i += 4; /* skip high bits, too */ 2138 i += 4; /* skip high bits, too */
2139 } 2139 }
2140 if (i < PCI_MAPREG_END) { 2140 if (i < PCI_MAPREG_END) {
2141 /* 2141 /*
2142 * We found PCI_MAPREG_TYPE_IO. Note that 82580 2142 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2143 * (and newer?) chip has no PCI_MAPREG_TYPE_IO. 2143 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2144 * It's no problem because newer chips has no this 2144 * It's no problem because newer chips has no this
2145 * bug. 2145 * bug.
2146 * 2146 *
2147 * The i8254x doesn't apparently respond when the 2147 * The i8254x doesn't apparently respond when the
2148 * I/O BAR is 0, which looks somewhat like it's not 2148 * I/O BAR is 0, which looks somewhat like it's not
2149 * been configured. 2149 * been configured.
2150 */ 2150 */
2151 preg = pci_conf_read(pc, pa->pa_tag, i); 2151 preg = pci_conf_read(pc, pa->pa_tag, i);
2152 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 2152 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2153 aprint_error_dev(sc->sc_dev, 2153 aprint_error_dev(sc->sc_dev,
2154 "WARNING: I/O BAR at zero.\n"); 2154 "WARNING: I/O BAR at zero.\n");
2155 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 2155 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2156 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios) 2156 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2157 == 0) { 2157 == 0) {
2158 sc->sc_flags |= WM_F_IOH_VALID; 2158 sc->sc_flags |= WM_F_IOH_VALID;
2159 } else 2159 } else
2160 aprint_error_dev(sc->sc_dev, 2160 aprint_error_dev(sc->sc_dev,
2161 "WARNING: unable to map I/O space\n"); 2161 "WARNING: unable to map I/O space\n");
2162 } 2162 }
2163 break; 2163 break;
2164 default: 2164 default:
2165 break; 2165 break;
2166 } 2166 }
2167 2167
2168 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 2168 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
2169 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2169 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2170 preg |= PCI_COMMAND_MASTER_ENABLE; 2170 preg |= PCI_COMMAND_MASTER_ENABLE;
2171 if (sc->sc_type < WM_T_82542_2_1) 2171 if (sc->sc_type < WM_T_82542_2_1)
2172 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 2172 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2173 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 2173 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2174 2174
2175 /* Power up chip */ 2175 /* Power up chip */
2176 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL)) 2176 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2177 && error != EOPNOTSUPP) { 2177 && error != EOPNOTSUPP) {
2178 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); 2178 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2179 return; 2179 return;
2180 } 2180 }
2181 2181
2182 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag)); 2182 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2183 /* 2183 /*
2184 * Don't use MSI-X if we can use only one queue to save interrupt 2184 * Don't use MSI-X if we can use only one queue to save interrupt
2185 * resource. 2185 * resource.
2186 */ 2186 */
2187 if (sc->sc_nqueues > 1) { 2187 if (sc->sc_nqueues > 1) {
2188 max_type = PCI_INTR_TYPE_MSIX; 2188 max_type = PCI_INTR_TYPE_MSIX;
2189 /* 2189 /*
2190 * 82583 has a MSI-X capability in the PCI configuration space 2190 * 82583 has a MSI-X capability in the PCI configuration space
2191 * but it doesn't support it. At least the document doesn't 2191 * but it doesn't support it. At least the document doesn't
2192 * say anything about MSI-X. 2192 * say anything about MSI-X.
2193 */ 2193 */
2194 counts[PCI_INTR_TYPE_MSIX] 2194 counts[PCI_INTR_TYPE_MSIX]
2195 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1; 2195 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2196 } else { 2196 } else {
2197 max_type = PCI_INTR_TYPE_MSI; 2197 max_type = PCI_INTR_TYPE_MSI;
2198 counts[PCI_INTR_TYPE_MSIX] = 0; 2198 counts[PCI_INTR_TYPE_MSIX] = 0;
2199 } 2199 }
2200 2200
2201 /* Allocation settings */ 2201 /* Allocation settings */
2202 counts[PCI_INTR_TYPE_MSI] = 1; 2202 counts[PCI_INTR_TYPE_MSI] = 1;
2203 counts[PCI_INTR_TYPE_INTX] = 1; 2203 counts[PCI_INTR_TYPE_INTX] = 1;
2204 /* overridden by disable flags */ 2204 /* overridden by disable flags */
2205 if (wm_disable_msi != 0) { 2205 if (wm_disable_msi != 0) {
2206 counts[PCI_INTR_TYPE_MSI] = 0; 2206 counts[PCI_INTR_TYPE_MSI] = 0;
2207 if (wm_disable_msix != 0) { 2207 if (wm_disable_msix != 0) {
2208 max_type = PCI_INTR_TYPE_INTX; 2208 max_type = PCI_INTR_TYPE_INTX;
2209 counts[PCI_INTR_TYPE_MSIX] = 0; 2209 counts[PCI_INTR_TYPE_MSIX] = 0;
2210 } 2210 }
2211 } else if (wm_disable_msix != 0) { 2211 } else if (wm_disable_msix != 0) {
2212 max_type = PCI_INTR_TYPE_MSI; 2212 max_type = PCI_INTR_TYPE_MSI;
2213 counts[PCI_INTR_TYPE_MSIX] = 0; 2213 counts[PCI_INTR_TYPE_MSIX] = 0;
2214 } 2214 }
2215 2215
2216alloc_retry: 2216alloc_retry:
2217 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) { 2217 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2218 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n"); 2218 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2219 return; 2219 return;
2220 } 2220 }
2221 2221
2222 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) { 2222 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2223 error = wm_setup_msix(sc); 2223 error = wm_setup_msix(sc);
2224 if (error) { 2224 if (error) {
2225 pci_intr_release(pc, sc->sc_intrs, 2225 pci_intr_release(pc, sc->sc_intrs,
2226 counts[PCI_INTR_TYPE_MSIX]); 2226 counts[PCI_INTR_TYPE_MSIX]);
2227 2227
2228 /* Setup for MSI: Disable MSI-X */ 2228 /* Setup for MSI: Disable MSI-X */
2229 max_type = PCI_INTR_TYPE_MSI; 2229 max_type = PCI_INTR_TYPE_MSI;
2230 counts[PCI_INTR_TYPE_MSI] = 1; 2230 counts[PCI_INTR_TYPE_MSI] = 1;
2231 counts[PCI_INTR_TYPE_INTX] = 1; 2231 counts[PCI_INTR_TYPE_INTX] = 1;
2232 goto alloc_retry; 2232 goto alloc_retry;
2233 } 2233 }
2234 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) { 2234 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2235 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */ 2235 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2236 error = wm_setup_legacy(sc); 2236 error = wm_setup_legacy(sc);
2237 if (error) { 2237 if (error) {
2238 pci_intr_release(sc->sc_pc, sc->sc_intrs, 2238 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2239 counts[PCI_INTR_TYPE_MSI]); 2239 counts[PCI_INTR_TYPE_MSI]);
2240 2240
2241 /* The next try is for INTx: Disable MSI */ 2241 /* The next try is for INTx: Disable MSI */
2242 max_type = PCI_INTR_TYPE_INTX; 2242 max_type = PCI_INTR_TYPE_INTX;
2243 counts[PCI_INTR_TYPE_INTX] = 1; 2243 counts[PCI_INTR_TYPE_INTX] = 1;
2244 goto alloc_retry; 2244 goto alloc_retry;
2245 } 2245 }
2246 } else { 2246 } else {
2247 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */ 2247 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2248 error = wm_setup_legacy(sc); 2248 error = wm_setup_legacy(sc);
2249 if (error) { 2249 if (error) {
2250 pci_intr_release(sc->sc_pc, sc->sc_intrs, 2250 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2251 counts[PCI_INTR_TYPE_INTX]); 2251 counts[PCI_INTR_TYPE_INTX]);
2252 return; 2252 return;
2253 } 2253 }
2254 } 2254 }
2255 2255
2256 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev)); 2256 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2257 error = workqueue_create(&sc->sc_queue_wq, wqname, 2257 error = workqueue_create(&sc->sc_queue_wq, wqname,
2258 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET, 2258 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2259 WM_WORKQUEUE_FLAGS); 2259 WM_WORKQUEUE_FLAGS);
2260 if (error) { 2260 if (error) {
2261 aprint_error_dev(sc->sc_dev, 2261 aprint_error_dev(sc->sc_dev,
2262 "unable to create workqueue\n"); 2262 "unable to create workqueue\n");
2263 goto out; 2263 goto out;
2264 } 2264 }
2265 2265
2266 /* 2266 /*
2267 * Check the function ID (unit number of the chip). 2267 * Check the function ID (unit number of the chip).
2268 */ 2268 */
2269 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3) 2269 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2270 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003) 2270 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2271 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 2271 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2272 || (sc->sc_type == WM_T_82580) 2272 || (sc->sc_type == WM_T_82580)
2273 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 2273 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2274 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS) 2274 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2275 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK; 2275 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2276 else 2276 else
2277 sc->sc_funcid = 0; 2277 sc->sc_funcid = 0;
2278 2278
2279 /* 2279 /*
2280 * Determine a few things about the bus we're connected to. 2280 * Determine a few things about the bus we're connected to.
2281 */ 2281 */
2282 if (sc->sc_type < WM_T_82543) { 2282 if (sc->sc_type < WM_T_82543) {
2283 /* We don't really know the bus characteristics here. */ 2283 /* We don't really know the bus characteristics here. */
2284 sc->sc_bus_speed = 33; 2284 sc->sc_bus_speed = 33;
2285 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 2285 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2286 /* 2286 /*
2287 * CSA (Communication Streaming Architecture) is about as fast 2287 * CSA (Communication Streaming Architecture) is about as fast
2288 * a 32-bit 66MHz PCI Bus. 2288 * a 32-bit 66MHz PCI Bus.
2289 */ 2289 */
2290 sc->sc_flags |= WM_F_CSA; 2290 sc->sc_flags |= WM_F_CSA;
2291 sc->sc_bus_speed = 66; 2291 sc->sc_bus_speed = 66;
2292 aprint_verbose_dev(sc->sc_dev, 2292 aprint_verbose_dev(sc->sc_dev,
2293 "Communication Streaming Architecture\n"); 2293 "Communication Streaming Architecture\n");
2294 if (sc->sc_type == WM_T_82547) { 2294 if (sc->sc_type == WM_T_82547) {
2295 callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS); 2295 callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
2296 callout_setfunc(&sc->sc_txfifo_ch, 2296 callout_setfunc(&sc->sc_txfifo_ch,
2297 wm_82547_txfifo_stall, sc); 2297 wm_82547_txfifo_stall, sc);
2298 aprint_verbose_dev(sc->sc_dev, 2298 aprint_verbose_dev(sc->sc_dev,
2299 "using 82547 Tx FIFO stall work-around\n"); 2299 "using 82547 Tx FIFO stall work-around\n");
2300 } 2300 }
2301 } else if (sc->sc_type >= WM_T_82571) { 2301 } else if (sc->sc_type >= WM_T_82571) {
2302 sc->sc_flags |= WM_F_PCIE; 2302 sc->sc_flags |= WM_F_PCIE;
2303 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 2303 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2304 && (sc->sc_type != WM_T_ICH10) 2304 && (sc->sc_type != WM_T_ICH10)
2305 && (sc->sc_type != WM_T_PCH) 2305 && (sc->sc_type != WM_T_PCH)
2306 && (sc->sc_type != WM_T_PCH2) 2306 && (sc->sc_type != WM_T_PCH2)
2307 && (sc->sc_type != WM_T_PCH_LPT) 2307 && (sc->sc_type != WM_T_PCH_LPT)
2308 && (sc->sc_type != WM_T_PCH_SPT) 2308 && (sc->sc_type != WM_T_PCH_SPT)
2309 && (sc->sc_type != WM_T_PCH_CNP) 2309 && (sc->sc_type != WM_T_PCH_CNP)
2310 && (sc->sc_type != WM_T_PCH_TGP)) { 2310 && (sc->sc_type != WM_T_PCH_TGP)) {
2311 /* ICH* and PCH* have no PCIe capability registers */ 2311 /* ICH* and PCH* have no PCIe capability registers */
2312 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 2312 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2313 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff, 2313 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2314 NULL) == 0) 2314 NULL) == 0)
2315 aprint_error_dev(sc->sc_dev, 2315 aprint_error_dev(sc->sc_dev,
2316 "unable to find PCIe capability\n"); 2316 "unable to find PCIe capability\n");
2317 } 2317 }
2318 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n"); 2318 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2319 } else { 2319 } else {
2320 reg = CSR_READ(sc, WMREG_STATUS); 2320 reg = CSR_READ(sc, WMREG_STATUS);
2321 if (reg & STATUS_BUS64) 2321 if (reg & STATUS_BUS64)
2322 sc->sc_flags |= WM_F_BUS64; 2322 sc->sc_flags |= WM_F_BUS64;
2323 if ((reg & STATUS_PCIX_MODE) != 0) { 2323 if ((reg & STATUS_PCIX_MODE) != 0) {
2324 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 2324 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2325 2325
2326 sc->sc_flags |= WM_F_PCIX; 2326 sc->sc_flags |= WM_F_PCIX;
2327 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 2327 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2328 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0) 2328 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2329 aprint_error_dev(sc->sc_dev, 2329 aprint_error_dev(sc->sc_dev,
2330 "unable to find PCIX capability\n"); 2330 "unable to find PCIX capability\n");
2331 else if (sc->sc_type != WM_T_82545_3 && 2331 else if (sc->sc_type != WM_T_82545_3 &&
2332 sc->sc_type != WM_T_82546_3) { 2332 sc->sc_type != WM_T_82546_3) {
2333 /* 2333 /*
2334 * Work around a problem caused by the BIOS 2334 * Work around a problem caused by the BIOS
2335 * setting the max memory read byte count 2335 * setting the max memory read byte count
2336 * incorrectly. 2336 * incorrectly.
2337 */ 2337 */
2338 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 2338 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2339 sc->sc_pcixe_capoff + PCIX_CMD); 2339 sc->sc_pcixe_capoff + PCIX_CMD);
2340 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 2340 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2341 sc->sc_pcixe_capoff + PCIX_STATUS); 2341 sc->sc_pcixe_capoff + PCIX_STATUS);
2342 2342
2343 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >> 2343 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2344 PCIX_CMD_BYTECNT_SHIFT; 2344 PCIX_CMD_BYTECNT_SHIFT;
2345 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >> 2345 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2346 PCIX_STATUS_MAXB_SHIFT; 2346 PCIX_STATUS_MAXB_SHIFT;
2347 if (bytecnt > maxb) { 2347 if (bytecnt > maxb) {
2348 aprint_verbose_dev(sc->sc_dev, 2348 aprint_verbose_dev(sc->sc_dev,
2349 "resetting PCI-X MMRBC: %d -> %d\n", 2349 "resetting PCI-X MMRBC: %d -> %d\n",
2350 512 << bytecnt, 512 << maxb); 2350 512 << bytecnt, 512 << maxb);
2351 pcix_cmd = (pcix_cmd & 2351 pcix_cmd = (pcix_cmd &
2352 ~PCIX_CMD_BYTECNT_MASK) | 2352 ~PCIX_CMD_BYTECNT_MASK) |
2353 (maxb << PCIX_CMD_BYTECNT_SHIFT); 2353 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2354 pci_conf_write(pa->pa_pc, pa->pa_tag, 2354 pci_conf_write(pa->pa_pc, pa->pa_tag,
2355 sc->sc_pcixe_capoff + PCIX_CMD, 2355 sc->sc_pcixe_capoff + PCIX_CMD,
2356 pcix_cmd); 2356 pcix_cmd);
2357 } 2357 }
2358 } 2358 }
2359 } 2359 }
2360 /* 2360 /*
2361 * The quad port adapter is special; it has a PCIX-PCIX 2361 * The quad port adapter is special; it has a PCIX-PCIX
2362 * bridge on the board, and can run the secondary bus at 2362 * bridge on the board, and can run the secondary bus at
2363 * a higher speed. 2363 * a higher speed.
2364 */ 2364 */
2365 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 2365 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2366 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 2366 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2367 : 66; 2367 : 66;
2368 } else if (sc->sc_flags & WM_F_PCIX) { 2368 } else if (sc->sc_flags & WM_F_PCIX) {
2369 switch (reg & STATUS_PCIXSPD_MASK) { 2369 switch (reg & STATUS_PCIXSPD_MASK) {
2370 case STATUS_PCIXSPD_50_66: 2370 case STATUS_PCIXSPD_50_66:
2371 sc->sc_bus_speed = 66; 2371 sc->sc_bus_speed = 66;
2372 break; 2372 break;
2373 case STATUS_PCIXSPD_66_100: 2373 case STATUS_PCIXSPD_66_100:
2374 sc->sc_bus_speed = 100; 2374 sc->sc_bus_speed = 100;
2375 break; 2375 break;
2376 case STATUS_PCIXSPD_100_133: 2376 case STATUS_PCIXSPD_100_133:
2377 sc->sc_bus_speed = 133; 2377 sc->sc_bus_speed = 133;
2378 break; 2378 break;
2379 default: 2379 default:
2380 aprint_error_dev(sc->sc_dev, 2380 aprint_error_dev(sc->sc_dev,
2381 "unknown PCIXSPD %d; assuming 66MHz\n", 2381 "unknown PCIXSPD %d; assuming 66MHz\n",
2382 reg & STATUS_PCIXSPD_MASK); 2382 reg & STATUS_PCIXSPD_MASK);
2383 sc->sc_bus_speed = 66; 2383 sc->sc_bus_speed = 66;
2384 break; 2384 break;
2385 } 2385 }
2386 } else 2386 } else
2387 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 2387 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2388 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n", 2388 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2389 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 2389 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2390 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 2390 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2391 } 2391 }
2392 2392
2393 /* clear interesting stat counters */ 2393 /* clear interesting stat counters */
2394 CSR_READ(sc, WMREG_COLC); 2394 CSR_READ(sc, WMREG_COLC);
2395 CSR_READ(sc, WMREG_RXERRC); 2395 CSR_READ(sc, WMREG_RXERRC);
2396 2396
2397 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583) 2397 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2398 || (sc->sc_type >= WM_T_ICH8)) 2398 || (sc->sc_type >= WM_T_ICH8))
2399 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2399 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2400 if (sc->sc_type >= WM_T_ICH8) 2400 if (sc->sc_type >= WM_T_ICH8)
2401 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2401 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2402 2402
2403 /* Set PHY, NVM mutex related stuff */ 2403 /* Set PHY, NVM mutex related stuff */
2404 switch (sc->sc_type) { 2404 switch (sc->sc_type) {
2405 case WM_T_82542_2_0: 2405 case WM_T_82542_2_0:
2406 case WM_T_82542_2_1: 2406 case WM_T_82542_2_1:
2407 case WM_T_82543: 2407 case WM_T_82543:
2408 case WM_T_82544: 2408 case WM_T_82544:
2409 /* Microwire */ 2409 /* Microwire */
2410 sc->nvm.read = wm_nvm_read_uwire; 2410 sc->nvm.read = wm_nvm_read_uwire;
2411 sc->sc_nvm_wordsize = 64; 2411 sc->sc_nvm_wordsize = 64;
2412 sc->sc_nvm_addrbits = 6; 2412 sc->sc_nvm_addrbits = 6;
2413 break; 2413 break;
2414 case WM_T_82540: 2414 case WM_T_82540:
2415 case WM_T_82545: 2415 case WM_T_82545:
2416 case WM_T_82545_3: 2416 case WM_T_82545_3:
2417 case WM_T_82546: 2417 case WM_T_82546:
2418 case WM_T_82546_3: 2418 case WM_T_82546_3:
2419 /* Microwire */ 2419 /* Microwire */
2420 sc->nvm.read = wm_nvm_read_uwire; 2420 sc->nvm.read = wm_nvm_read_uwire;
2421 reg = CSR_READ(sc, WMREG_EECD); 2421 reg = CSR_READ(sc, WMREG_EECD);
2422 if (reg & EECD_EE_SIZE) { 2422 if (reg & EECD_EE_SIZE) {
2423 sc->sc_nvm_wordsize = 256; 2423 sc->sc_nvm_wordsize = 256;
2424 sc->sc_nvm_addrbits = 8; 2424 sc->sc_nvm_addrbits = 8;
2425 } else { 2425 } else {
2426 sc->sc_nvm_wordsize = 64; 2426 sc->sc_nvm_wordsize = 64;
2427 sc->sc_nvm_addrbits = 6; 2427 sc->sc_nvm_addrbits = 6;
2428 } 2428 }
2429 sc->sc_flags |= WM_F_LOCK_EECD; 2429 sc->sc_flags |= WM_F_LOCK_EECD;
2430 sc->nvm.acquire = wm_get_eecd; 2430 sc->nvm.acquire = wm_get_eecd;
2431 sc->nvm.release = wm_put_eecd; 2431 sc->nvm.release = wm_put_eecd;
2432 break; 2432 break;
2433 case WM_T_82541: 2433 case WM_T_82541:
2434 case WM_T_82541_2: 2434 case WM_T_82541_2:
2435 case WM_T_82547: 2435 case WM_T_82547:
2436 case WM_T_82547_2: 2436 case WM_T_82547_2:
2437 reg = CSR_READ(sc, WMREG_EECD); 2437 reg = CSR_READ(sc, WMREG_EECD);
2438 /* 2438 /*
2439 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only 2439 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2440 * on 8254[17], so set flags and functios before calling it. 2440 * on 8254[17], so set flags and functios before calling it.
2441 */ 2441 */
2442 sc->sc_flags |= WM_F_LOCK_EECD; 2442 sc->sc_flags |= WM_F_LOCK_EECD;
2443 sc->nvm.acquire = wm_get_eecd; 2443 sc->nvm.acquire = wm_get_eecd;
2444 sc->nvm.release = wm_put_eecd; 2444 sc->nvm.release = wm_put_eecd;
2445 if (reg & EECD_EE_TYPE) { 2445 if (reg & EECD_EE_TYPE) {
2446 /* SPI */ 2446 /* SPI */
2447 sc->nvm.read = wm_nvm_read_spi; 2447 sc->nvm.read = wm_nvm_read_spi;
2448 sc->sc_flags |= WM_F_EEPROM_SPI; 2448 sc->sc_flags |= WM_F_EEPROM_SPI;
2449 wm_nvm_set_addrbits_size_eecd(sc); 2449 wm_nvm_set_addrbits_size_eecd(sc);
2450 } else { 2450 } else {
2451 /* Microwire */ 2451 /* Microwire */
2452 sc->nvm.read = wm_nvm_read_uwire; 2452 sc->nvm.read = wm_nvm_read_uwire;
2453 if ((reg & EECD_EE_ABITS) != 0) { 2453 if ((reg & EECD_EE_ABITS) != 0) {
2454 sc->sc_nvm_wordsize = 256; 2454 sc->sc_nvm_wordsize = 256;
2455 sc->sc_nvm_addrbits = 8; 2455 sc->sc_nvm_addrbits = 8;
2456 } else { 2456 } else {
2457 sc->sc_nvm_wordsize = 64; 2457 sc->sc_nvm_wordsize = 64;
2458 sc->sc_nvm_addrbits = 6; 2458 sc->sc_nvm_addrbits = 6;
2459 } 2459 }
2460 } 2460 }
2461 break; 2461 break;
2462 case WM_T_82571: 2462 case WM_T_82571:
2463 case WM_T_82572: 2463 case WM_T_82572:
2464 /* SPI */ 2464 /* SPI */
2465 sc->nvm.read = wm_nvm_read_eerd; 2465 sc->nvm.read = wm_nvm_read_eerd;
2466 /* Not use WM_F_LOCK_EECD because we use EERD */ 2466 /* Not use WM_F_LOCK_EECD because we use EERD */
2467 sc->sc_flags |= WM_F_EEPROM_SPI; 2467 sc->sc_flags |= WM_F_EEPROM_SPI;
2468 wm_nvm_set_addrbits_size_eecd(sc); 2468 wm_nvm_set_addrbits_size_eecd(sc);
2469 sc->phy.acquire = wm_get_swsm_semaphore; 2469 sc->phy.acquire = wm_get_swsm_semaphore;
2470 sc->phy.release = wm_put_swsm_semaphore; 2470 sc->phy.release = wm_put_swsm_semaphore;
2471 sc->nvm.acquire = wm_get_nvm_82571; 2471 sc->nvm.acquire = wm_get_nvm_82571;
2472 sc->nvm.release = wm_put_nvm_82571; 2472 sc->nvm.release = wm_put_nvm_82571;
2473 break; 2473 break;
2474 case WM_T_82573: 2474 case WM_T_82573:
2475 case WM_T_82574: 2475 case WM_T_82574:
2476 case WM_T_82583: 2476 case WM_T_82583:
2477 sc->nvm.read = wm_nvm_read_eerd; 2477 sc->nvm.read = wm_nvm_read_eerd;
2478 /* Not use WM_F_LOCK_EECD because we use EERD */ 2478 /* Not use WM_F_LOCK_EECD because we use EERD */
2479 if (sc->sc_type == WM_T_82573) { 2479 if (sc->sc_type == WM_T_82573) {
2480 sc->phy.acquire = wm_get_swsm_semaphore; 2480 sc->phy.acquire = wm_get_swsm_semaphore;
2481 sc->phy.release = wm_put_swsm_semaphore; 2481 sc->phy.release = wm_put_swsm_semaphore;
2482 sc->nvm.acquire = wm_get_nvm_82571; 2482 sc->nvm.acquire = wm_get_nvm_82571;
2483 sc->nvm.release = wm_put_nvm_82571; 2483 sc->nvm.release = wm_put_nvm_82571;
2484 } else { 2484 } else {
2485 /* Both PHY and NVM use the same semaphore. */ 2485 /* Both PHY and NVM use the same semaphore. */
2486 sc->phy.acquire = sc->nvm.acquire 2486 sc->phy.acquire = sc->nvm.acquire
2487 = wm_get_swfwhw_semaphore; 2487 = wm_get_swfwhw_semaphore;
2488 sc->phy.release = sc->nvm.release 2488 sc->phy.release = sc->nvm.release
2489 = wm_put_swfwhw_semaphore; 2489 = wm_put_swfwhw_semaphore;
2490 } 2490 }
2491 if (wm_nvm_is_onboard_eeprom(sc) == 0) { 2491 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2492 sc->sc_flags |= WM_F_EEPROM_FLASH; 2492 sc->sc_flags |= WM_F_EEPROM_FLASH;
2493 sc->sc_nvm_wordsize = 2048; 2493 sc->sc_nvm_wordsize = 2048;
2494 } else { 2494 } else {
2495 /* SPI */ 2495 /* SPI */
2496 sc->sc_flags |= WM_F_EEPROM_SPI; 2496 sc->sc_flags |= WM_F_EEPROM_SPI;
2497 wm_nvm_set_addrbits_size_eecd(sc); 2497 wm_nvm_set_addrbits_size_eecd(sc);
2498 } 2498 }
2499 break; 2499 break;
2500 case WM_T_82575: 2500 case WM_T_82575:
2501 case WM_T_82576: 2501 case WM_T_82576:
2502 case WM_T_82580: 2502 case WM_T_82580:
2503 case WM_T_I350: 2503 case WM_T_I350:
2504 case WM_T_I354: 2504 case WM_T_I354:
2505 case WM_T_80003: 2505 case WM_T_80003:
2506 /* SPI */ 2506 /* SPI */
2507 sc->sc_flags |= WM_F_EEPROM_SPI; 2507 sc->sc_flags |= WM_F_EEPROM_SPI;
2508 wm_nvm_set_addrbits_size_eecd(sc); 2508 wm_nvm_set_addrbits_size_eecd(sc);
2509 if ((sc->sc_type == WM_T_80003) 2509 if ((sc->sc_type == WM_T_80003)
2510 || (sc->sc_nvm_wordsize < (1 << 15))) { 2510 || (sc->sc_nvm_wordsize < (1 << 15))) {
2511 sc->nvm.read = wm_nvm_read_eerd; 2511 sc->nvm.read = wm_nvm_read_eerd;
2512 /* Don't use WM_F_LOCK_EECD because we use EERD */ 2512 /* Don't use WM_F_LOCK_EECD because we use EERD */
2513 } else { 2513 } else {
2514 sc->nvm.read = wm_nvm_read_spi; 2514 sc->nvm.read = wm_nvm_read_spi;
2515 sc->sc_flags |= WM_F_LOCK_EECD; 2515 sc->sc_flags |= WM_F_LOCK_EECD;
2516 } 2516 }
2517 sc->phy.acquire = wm_get_phy_82575; 2517 sc->phy.acquire = wm_get_phy_82575;
2518 sc->phy.release = wm_put_phy_82575; 2518 sc->phy.release = wm_put_phy_82575;
2519 sc->nvm.acquire = wm_get_nvm_80003; 2519 sc->nvm.acquire = wm_get_nvm_80003;
2520 sc->nvm.release = wm_put_nvm_80003; 2520 sc->nvm.release = wm_put_nvm_80003;
2521 break; 2521 break;
2522 case WM_T_ICH8: 2522 case WM_T_ICH8:
2523 case WM_T_ICH9: 2523 case WM_T_ICH9:
2524 case WM_T_ICH10: 2524 case WM_T_ICH10:
2525 case WM_T_PCH: 2525 case WM_T_PCH:
2526 case WM_T_PCH2: 2526 case WM_T_PCH2:
2527 case WM_T_PCH_LPT: 2527 case WM_T_PCH_LPT:
2528 sc->nvm.read = wm_nvm_read_ich8; 2528 sc->nvm.read = wm_nvm_read_ich8;
2529 /* FLASH */ 2529 /* FLASH */
2530 sc->sc_flags |= WM_F_EEPROM_FLASH; 2530 sc->sc_flags |= WM_F_EEPROM_FLASH;
2531 sc->sc_nvm_wordsize = 2048; 2531 sc->sc_nvm_wordsize = 2048;
2532 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH); 2532 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2533 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 2533 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2534 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) { 2534 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2535 aprint_error_dev(sc->sc_dev, 2535 aprint_error_dev(sc->sc_dev,
2536 "can't map FLASH registers\n"); 2536 "can't map FLASH registers\n");
2537 goto out; 2537 goto out;
2538 } 2538 }
2539 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 2539 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2540 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) * 2540 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2541 ICH_FLASH_SECTOR_SIZE; 2541 ICH_FLASH_SECTOR_SIZE;
2542 sc->sc_ich8_flash_bank_size = 2542 sc->sc_ich8_flash_bank_size =
2543 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1; 2543 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2544 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK); 2544 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2545 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 2545 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2546 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 2546 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2547 sc->sc_flashreg_offset = 0; 2547 sc->sc_flashreg_offset = 0;
2548 sc->phy.acquire = wm_get_swflag_ich8lan; 2548 sc->phy.acquire = wm_get_swflag_ich8lan;
2549 sc->phy.release = wm_put_swflag_ich8lan; 2549 sc->phy.release = wm_put_swflag_ich8lan;
2550 sc->nvm.acquire = wm_get_nvm_ich8lan; 2550 sc->nvm.acquire = wm_get_nvm_ich8lan;
2551 sc->nvm.release = wm_put_nvm_ich8lan; 2551 sc->nvm.release = wm_put_nvm_ich8lan;
2552 break; 2552 break;
2553 case WM_T_PCH_SPT: 2553 case WM_T_PCH_SPT:
2554 case WM_T_PCH_CNP: 2554 case WM_T_PCH_CNP:
2555 case WM_T_PCH_TGP: 2555 case WM_T_PCH_TGP:
2556 sc->nvm.read = wm_nvm_read_spt; 2556 sc->nvm.read = wm_nvm_read_spt;
2557 /* SPT has no GFPREG; flash registers mapped through BAR0 */ 2557 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2558 sc->sc_flags |= WM_F_EEPROM_FLASH; 2558 sc->sc_flags |= WM_F_EEPROM_FLASH;
2559 sc->sc_flasht = sc->sc_st; 2559 sc->sc_flasht = sc->sc_st;
2560 sc->sc_flashh = sc->sc_sh; 2560 sc->sc_flashh = sc->sc_sh;
2561 sc->sc_ich8_flash_base = 0; 2561 sc->sc_ich8_flash_base = 0;
2562 sc->sc_nvm_wordsize = 2562 sc->sc_nvm_wordsize =
2563 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1) 2563 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2564 * NVM_SIZE_MULTIPLIER; 2564 * NVM_SIZE_MULTIPLIER;
2565 /* It is size in bytes, we want words */ 2565 /* It is size in bytes, we want words */
2566 sc->sc_nvm_wordsize /= 2; 2566 sc->sc_nvm_wordsize /= 2;
2567 /* Assume 2 banks */ 2567 /* Assume 2 banks */
2568 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2; 2568 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2569 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET; 2569 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2570 sc->phy.acquire = wm_get_swflag_ich8lan; 2570 sc->phy.acquire = wm_get_swflag_ich8lan;
2571 sc->phy.release = wm_put_swflag_ich8lan; 2571 sc->phy.release = wm_put_swflag_ich8lan;
2572 sc->nvm.acquire = wm_get_nvm_ich8lan; 2572 sc->nvm.acquire = wm_get_nvm_ich8lan;
2573 sc->nvm.release = wm_put_nvm_ich8lan; 2573 sc->nvm.release = wm_put_nvm_ich8lan;
2574 break; 2574 break;
2575 case WM_T_I210: 2575 case WM_T_I210:
2576 case WM_T_I211: 2576 case WM_T_I211:
2577 /* Allow a single clear of the SW semaphore on I210 and newer*/ 2577 /* Allow a single clear of the SW semaphore on I210 and newer*/
2578 sc->sc_flags |= WM_F_WA_I210_CLSEM; 2578 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2579 if (wm_nvm_flash_presence_i210(sc)) { 2579 if (wm_nvm_flash_presence_i210(sc)) {
2580 sc->nvm.read = wm_nvm_read_eerd; 2580 sc->nvm.read = wm_nvm_read_eerd;
2581 /* Don't use WM_F_LOCK_EECD because we use EERD */ 2581 /* Don't use WM_F_LOCK_EECD because we use EERD */
2582 sc->sc_flags |= WM_F_EEPROM_FLASH_HW; 2582 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2583 wm_nvm_set_addrbits_size_eecd(sc); 2583 wm_nvm_set_addrbits_size_eecd(sc);
2584 } else { 2584 } else {
2585 sc->nvm.read = wm_nvm_read_invm; 2585 sc->nvm.read = wm_nvm_read_invm;
2586 sc->sc_flags |= WM_F_EEPROM_INVM; 2586 sc->sc_flags |= WM_F_EEPROM_INVM;
2587 sc->sc_nvm_wordsize = INVM_SIZE; 2587 sc->sc_nvm_wordsize = INVM_SIZE;
2588 } 2588 }
2589 sc->phy.acquire = wm_get_phy_82575; 2589 sc->phy.acquire = wm_get_phy_82575;
2590 sc->phy.release = wm_put_phy_82575; 2590 sc->phy.release = wm_put_phy_82575;
2591 sc->nvm.acquire = wm_get_nvm_80003; 2591 sc->nvm.acquire = wm_get_nvm_80003;
2592 sc->nvm.release = wm_put_nvm_80003; 2592 sc->nvm.release = wm_put_nvm_80003;
2593 break; 2593 break;
2594 default: 2594 default:
2595 break; 2595 break;
2596 } 2596 }
2597 2597
2598 /* Ensure the SMBI bit is clear before first NVM or PHY access */ 2598 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2599 switch (sc->sc_type) { 2599 switch (sc->sc_type) {
2600 case WM_T_82571: 2600 case WM_T_82571:
2601 case WM_T_82572: 2601 case WM_T_82572:
2602 reg = CSR_READ(sc, WMREG_SWSM2); 2602 reg = CSR_READ(sc, WMREG_SWSM2);
2603 if ((reg & SWSM2_LOCK) == 0) { 2603 if ((reg & SWSM2_LOCK) == 0) {
2604 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK); 2604 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2605 force_clear_smbi = true; 2605 force_clear_smbi = true;
2606 } else 2606 } else
2607 force_clear_smbi = false; 2607 force_clear_smbi = false;
2608 break; 2608 break;
2609 case WM_T_82573: 2609 case WM_T_82573:
2610 case WM_T_82574: 2610 case WM_T_82574:
2611 case WM_T_82583: 2611 case WM_T_82583:
2612 force_clear_smbi = true; 2612 force_clear_smbi = true;
2613 break; 2613 break;
2614 default: 2614 default:
2615 force_clear_smbi = false; 2615 force_clear_smbi = false;
2616 break; 2616 break;
2617 } 2617 }
2618 if (force_clear_smbi) { 2618 if (force_clear_smbi) {
2619 reg = CSR_READ(sc, WMREG_SWSM); 2619 reg = CSR_READ(sc, WMREG_SWSM);
2620 if ((reg & SWSM_SMBI) != 0) 2620 if ((reg & SWSM_SMBI) != 0)
2621 aprint_error_dev(sc->sc_dev, 2621 aprint_error_dev(sc->sc_dev,
2622 "Please update the Bootagent\n"); 2622 "Please update the Bootagent\n");
2623 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI); 2623 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2624 } 2624 }
2625 2625
2626 /* 2626 /*
2627 * Defer printing the EEPROM type until after verifying the checksum 2627 * Defer printing the EEPROM type until after verifying the checksum
2628 * This allows the EEPROM type to be printed correctly in the case 2628 * This allows the EEPROM type to be printed correctly in the case
2629 * that no EEPROM is attached. 2629 * that no EEPROM is attached.
2630 */ 2630 */
2631 /* 2631 /*
2632 * Validate the EEPROM checksum. If the checksum fails, flag 2632 * Validate the EEPROM checksum. If the checksum fails, flag
2633 * this for later, so we can fail future reads from the EEPROM. 2633 * this for later, so we can fail future reads from the EEPROM.
2634 */ 2634 */
2635 if (wm_nvm_validate_checksum(sc)) { 2635 if (wm_nvm_validate_checksum(sc)) {
2636 /* 2636 /*
2637 * Read twice again because some PCI-e parts fail the 2637 * Read twice again because some PCI-e parts fail the
2638 * first check due to the link being in sleep state. 2638 * first check due to the link being in sleep state.
2639 */ 2639 */
2640 if (wm_nvm_validate_checksum(sc)) 2640 if (wm_nvm_validate_checksum(sc))
2641 sc->sc_flags |= WM_F_EEPROM_INVALID; 2641 sc->sc_flags |= WM_F_EEPROM_INVALID;
2642 } 2642 }
2643 2643
2644 if (sc->sc_flags & WM_F_EEPROM_INVALID) 2644 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2645 aprint_verbose_dev(sc->sc_dev, "No EEPROM"); 2645 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2646 else { 2646 else {
2647 aprint_verbose_dev(sc->sc_dev, "%u words ", 2647 aprint_verbose_dev(sc->sc_dev, "%u words ",
2648 sc->sc_nvm_wordsize); 2648 sc->sc_nvm_wordsize);
2649 if (sc->sc_flags & WM_F_EEPROM_INVM) 2649 if (sc->sc_flags & WM_F_EEPROM_INVM)
2650 aprint_verbose("iNVM"); 2650 aprint_verbose("iNVM");
2651 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) 2651 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2652 aprint_verbose("FLASH(HW)"); 2652 aprint_verbose("FLASH(HW)");
2653 else if (sc->sc_flags & WM_F_EEPROM_FLASH) 2653 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2654 aprint_verbose("FLASH"); 2654 aprint_verbose("FLASH");
2655 else { 2655 else {
2656 if (sc->sc_flags & WM_F_EEPROM_SPI) 2656 if (sc->sc_flags & WM_F_EEPROM_SPI)
2657 eetype = "SPI"; 2657 eetype = "SPI";
2658 else 2658 else
2659 eetype = "MicroWire"; 2659 eetype = "MicroWire";
2660 aprint_verbose("(%d address bits) %s EEPROM", 2660 aprint_verbose("(%d address bits) %s EEPROM",
2661 sc->sc_nvm_addrbits, eetype); 2661 sc->sc_nvm_addrbits, eetype);
2662 } 2662 }
2663 } 2663 }
2664 wm_nvm_version(sc); 2664 wm_nvm_version(sc);
2665 aprint_verbose("\n"); 2665 aprint_verbose("\n");
2666 2666
2667 /* 2667 /*
2668 * XXX The first call of wm_gmii_setup_phytype. The result might be 2668 * XXX The first call of wm_gmii_setup_phytype. The result might be
2669 * incorrect. 2669 * incorrect.
2670 */ 2670 */
2671 wm_gmii_setup_phytype(sc, 0, 0); 2671 wm_gmii_setup_phytype(sc, 0, 0);
2672 2672
2673 /* Check for WM_F_WOL on some chips before wm_reset() */ 2673 /* Check for WM_F_WOL on some chips before wm_reset() */
2674 switch (sc->sc_type) { 2674 switch (sc->sc_type) {
2675 case WM_T_ICH8: 2675 case WM_T_ICH8:
2676 case WM_T_ICH9: 2676 case WM_T_ICH9:
2677 case WM_T_ICH10: 2677 case WM_T_ICH10:
2678 case WM_T_PCH: 2678 case WM_T_PCH:
2679 case WM_T_PCH2: 2679 case WM_T_PCH2:
2680 case WM_T_PCH_LPT: 2680 case WM_T_PCH_LPT:
2681 case WM_T_PCH_SPT: 2681 case WM_T_PCH_SPT:
2682 case WM_T_PCH_CNP: 2682 case WM_T_PCH_CNP:
2683 case WM_T_PCH_TGP: 2683 case WM_T_PCH_TGP:
2684 apme_mask = WUC_APME; 2684 apme_mask = WUC_APME;
2685 eeprom_data = CSR_READ(sc, WMREG_WUC); 2685 eeprom_data = CSR_READ(sc, WMREG_WUC);
2686 if ((eeprom_data & apme_mask) != 0) 2686 if ((eeprom_data & apme_mask) != 0)
2687 sc->sc_flags |= WM_F_WOL; 2687 sc->sc_flags |= WM_F_WOL;
2688 break; 2688 break;
2689 default: 2689 default:
2690 break; 2690 break;
2691 } 2691 }
2692 2692
2693 /* Reset the chip to a known state. */ 2693 /* Reset the chip to a known state. */
2694 wm_reset(sc); 2694 wm_reset(sc);
2695 2695
 2696 /* sc->sc_pba is set in wm_reset(). */
 2697 aprint_verbose_dev(sc->sc_dev, "RX packet buffer size: %uKB\n",
 2698 sc->sc_pba);
 2699
2696 /* 2700 /*
2697 * Check for I21[01] PLL workaround. 2701 * Check for I21[01] PLL workaround.
2698 * 2702 *
2699 * Three cases: 2703 * Three cases:
2700 * a) Chip is I211. 2704 * a) Chip is I211.
2701 * b) Chip is I210 and it uses INVM (not FLASH). 2705 * b) Chip is I210 and it uses INVM (not FLASH).
2702 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25 2706 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2703 */ 2707 */
2704 if (sc->sc_type == WM_T_I211) 2708 if (sc->sc_type == WM_T_I211)
2705 sc->sc_flags |= WM_F_PLL_WA_I210; 2709 sc->sc_flags |= WM_F_PLL_WA_I210;
2706 if (sc->sc_type == WM_T_I210) { 2710 if (sc->sc_type == WM_T_I210) {
2707 if (!wm_nvm_flash_presence_i210(sc)) 2711 if (!wm_nvm_flash_presence_i210(sc))
2708 sc->sc_flags |= WM_F_PLL_WA_I210; 2712 sc->sc_flags |= WM_F_PLL_WA_I210;
2709 else if ((sc->sc_nvm_ver_major < 3) 2713 else if ((sc->sc_nvm_ver_major < 3)
2710 || ((sc->sc_nvm_ver_major == 3) 2714 || ((sc->sc_nvm_ver_major == 3)
2711 && (sc->sc_nvm_ver_minor < 25))) { 2715 && (sc->sc_nvm_ver_minor < 25))) {
2712 aprint_verbose_dev(sc->sc_dev, 2716 aprint_verbose_dev(sc->sc_dev,
2713 "ROM image version %d.%d is older than 3.25\n", 2717 "ROM image version %d.%d is older than 3.25\n",
2714 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor); 2718 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2715 sc->sc_flags |= WM_F_PLL_WA_I210; 2719 sc->sc_flags |= WM_F_PLL_WA_I210;
2716 } 2720 }
2717 } 2721 }
2718 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) 2722 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2719 wm_pll_workaround_i210(sc); 2723 wm_pll_workaround_i210(sc);
2720 2724
2721 wm_get_wakeup(sc); 2725 wm_get_wakeup(sc);
2722 2726
2723 /* Non-AMT based hardware can now take control from firmware */ 2727 /* Non-AMT based hardware can now take control from firmware */
2724 if ((sc->sc_flags & WM_F_HAS_AMT) == 0) 2728 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2725 wm_get_hw_control(sc); 2729 wm_get_hw_control(sc);
2726 2730
2727 /* 2731 /*
2728 * Read the Ethernet address from the EEPROM, if not first found 2732 * Read the Ethernet address from the EEPROM, if not first found
2729 * in device properties. 2733 * in device properties.
2730 */ 2734 */
2731 ea = prop_dictionary_get(dict, "mac-address"); 2735 ea = prop_dictionary_get(dict, "mac-address");
2732 if (ea != NULL) { 2736 if (ea != NULL) {
2733 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 2737 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2734 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 2738 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2735 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 2739 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2736 } else { 2740 } else {
2737 if (wm_read_mac_addr(sc, enaddr) != 0) { 2741 if (wm_read_mac_addr(sc, enaddr) != 0) {
2738 aprint_error_dev(sc->sc_dev, 2742 aprint_error_dev(sc->sc_dev,
2739 "unable to read Ethernet address\n"); 2743 "unable to read Ethernet address\n");
2740 goto out; 2744 goto out;
2741 } 2745 }
2742 } 2746 }
2743 2747
2744 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 2748 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2745 ether_sprintf(enaddr)); 2749 ether_sprintf(enaddr));
2746 2750
2747 /* 2751 /*
2748 * Read the config info from the EEPROM, and set up various 2752 * Read the config info from the EEPROM, and set up various
2749 * bits in the control registers based on their contents. 2753 * bits in the control registers based on their contents.
2750 */ 2754 */
2751 pn = prop_dictionary_get(dict, "i82543-cfg1"); 2755 pn = prop_dictionary_get(dict, "i82543-cfg1");
2752 if (pn != NULL) { 2756 if (pn != NULL) {
2753 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2757 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2754 cfg1 = (uint16_t) prop_number_integer_value(pn); 2758 cfg1 = (uint16_t) prop_number_integer_value(pn);
2755 } else { 2759 } else {
2756 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) { 2760 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2757 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n"); 2761 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2758 goto out; 2762 goto out;
2759 } 2763 }
2760 } 2764 }
2761 2765
2762 pn = prop_dictionary_get(dict, "i82543-cfg2"); 2766 pn = prop_dictionary_get(dict, "i82543-cfg2");
2763 if (pn != NULL) { 2767 if (pn != NULL) {
2764 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2768 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2765 cfg2 = (uint16_t) prop_number_integer_value(pn); 2769 cfg2 = (uint16_t) prop_number_integer_value(pn);
2766 } else { 2770 } else {
2767 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) { 2771 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2768 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n"); 2772 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2769 goto out; 2773 goto out;
2770 } 2774 }
2771 } 2775 }
2772 2776
2773 /* check for WM_F_WOL */ 2777 /* check for WM_F_WOL */
2774 switch (sc->sc_type) { 2778 switch (sc->sc_type) {
2775 case WM_T_82542_2_0: 2779 case WM_T_82542_2_0:
2776 case WM_T_82542_2_1: 2780 case WM_T_82542_2_1:
2777 case WM_T_82543: 2781 case WM_T_82543:
2778 /* dummy? */ 2782 /* dummy? */
2779 eeprom_data = 0; 2783 eeprom_data = 0;
2780 apme_mask = NVM_CFG3_APME; 2784 apme_mask = NVM_CFG3_APME;
2781 break; 2785 break;
2782 case WM_T_82544: 2786 case WM_T_82544:
2783 apme_mask = NVM_CFG2_82544_APM_EN; 2787 apme_mask = NVM_CFG2_82544_APM_EN;
2784 eeprom_data = cfg2; 2788 eeprom_data = cfg2;
2785 break; 2789 break;
2786 case WM_T_82546: 2790 case WM_T_82546:
2787 case WM_T_82546_3: 2791 case WM_T_82546_3:
2788 case WM_T_82571: 2792 case WM_T_82571:
2789 case WM_T_82572: 2793 case WM_T_82572:
2790 case WM_T_82573: 2794 case WM_T_82573:
2791 case WM_T_82574: 2795 case WM_T_82574:
2792 case WM_T_82583: 2796 case WM_T_82583:
2793 case WM_T_80003: 2797 case WM_T_80003:
2794 case WM_T_82575: 2798 case WM_T_82575:
2795 case WM_T_82576: 2799 case WM_T_82576:
2796 apme_mask = NVM_CFG3_APME; 2800 apme_mask = NVM_CFG3_APME;
2797 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB 2801 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2798 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data); 2802 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2799 break; 2803 break;
2800 case WM_T_82580: 2804 case WM_T_82580:
2801 case WM_T_I350: 2805 case WM_T_I350:
2802 case WM_T_I354: 2806 case WM_T_I354:
2803 case WM_T_I210: 2807 case WM_T_I210:
2804 case WM_T_I211: 2808 case WM_T_I211:
2805 apme_mask = NVM_CFG3_APME; 2809 apme_mask = NVM_CFG3_APME;
2806 wm_nvm_read(sc, 2810 wm_nvm_read(sc,
2807 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA, 2811 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2808 1, &eeprom_data); 2812 1, &eeprom_data);
2809 break; 2813 break;
2810 case WM_T_ICH8: 2814 case WM_T_ICH8:
2811 case WM_T_ICH9: 2815 case WM_T_ICH9:
2812 case WM_T_ICH10: 2816 case WM_T_ICH10:
2813 case WM_T_PCH: 2817 case WM_T_PCH:
2814 case WM_T_PCH2: 2818 case WM_T_PCH2:
2815 case WM_T_PCH_LPT: 2819 case WM_T_PCH_LPT:
2816 case WM_T_PCH_SPT: 2820 case WM_T_PCH_SPT:
2817 case WM_T_PCH_CNP: 2821 case WM_T_PCH_CNP:
2818 case WM_T_PCH_TGP: 2822 case WM_T_PCH_TGP:
2819 /* Already checked before wm_reset () */ 2823 /* Already checked before wm_reset () */
2820 apme_mask = eeprom_data = 0; 2824 apme_mask = eeprom_data = 0;
2821 break; 2825 break;
2822 default: /* XXX 82540 */ 2826 default: /* XXX 82540 */
2823 apme_mask = NVM_CFG3_APME; 2827 apme_mask = NVM_CFG3_APME;
2824 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data); 2828 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2825 break; 2829 break;
2826 } 2830 }
2827 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */ 2831 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2828 if ((eeprom_data & apme_mask) != 0) 2832 if ((eeprom_data & apme_mask) != 0)
2829 sc->sc_flags |= WM_F_WOL; 2833 sc->sc_flags |= WM_F_WOL;
2830 2834
2831 /* 2835 /*
2832 * We have the eeprom settings, now apply the special cases 2836 * We have the eeprom settings, now apply the special cases
2833 * where the eeprom may be wrong or the board won't support 2837 * where the eeprom may be wrong or the board won't support
2834 * wake on lan on a particular port 2838 * wake on lan on a particular port
2835 */ 2839 */
2836 switch (sc->sc_pcidevid) { 2840 switch (sc->sc_pcidevid) {
2837 case PCI_PRODUCT_INTEL_82546GB_PCIE: 2841 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2838 sc->sc_flags &= ~WM_F_WOL; 2842 sc->sc_flags &= ~WM_F_WOL;
2839 break; 2843 break;
2840 case PCI_PRODUCT_INTEL_82546EB_FIBER: 2844 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2841 case PCI_PRODUCT_INTEL_82546GB_FIBER: 2845 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2842 /* Wake events only supported on port A for dual fiber 2846 /* Wake events only supported on port A for dual fiber
2843 * regardless of eeprom setting */ 2847 * regardless of eeprom setting */
2844 if (sc->sc_funcid == 1) 2848 if (sc->sc_funcid == 1)
2845 sc->sc_flags &= ~WM_F_WOL; 2849 sc->sc_flags &= ~WM_F_WOL;
2846 break; 2850 break;
2847 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3: 2851 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2848 /* If quad port adapter, disable WoL on all but port A */ 2852 /* If quad port adapter, disable WoL on all but port A */
2849 if (sc->sc_funcid != 0) 2853 if (sc->sc_funcid != 0)
2850 sc->sc_flags &= ~WM_F_WOL; 2854 sc->sc_flags &= ~WM_F_WOL;
2851 break; 2855 break;
2852 case PCI_PRODUCT_INTEL_82571EB_FIBER: 2856 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2853 /* Wake events only supported on port A for dual fiber 2857 /* Wake events only supported on port A for dual fiber
2854 * regardless of eeprom setting */ 2858 * regardless of eeprom setting */
2855 if (sc->sc_funcid == 1) 2859 if (sc->sc_funcid == 1)
2856 sc->sc_flags &= ~WM_F_WOL; 2860 sc->sc_flags &= ~WM_F_WOL;
2857 break; 2861 break;
2858 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER: 2862 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2859 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER: 2863 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2860 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER: 2864 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2861 /* If quad port adapter, disable WoL on all but port A */ 2865 /* If quad port adapter, disable WoL on all but port A */
2862 if (sc->sc_funcid != 0) 2866 if (sc->sc_funcid != 0)
2863 sc->sc_flags &= ~WM_F_WOL; 2867 sc->sc_flags &= ~WM_F_WOL;
2864 break; 2868 break;
2865 } 2869 }
2866 2870
2867 if (sc->sc_type >= WM_T_82575) { 2871 if (sc->sc_type >= WM_T_82575) {
2868 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) { 2872 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2869 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n", 2873 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2870 nvmword); 2874 nvmword);
2871 if ((sc->sc_type == WM_T_82575) || 2875 if ((sc->sc_type == WM_T_82575) ||
2872 (sc->sc_type == WM_T_82576)) { 2876 (sc->sc_type == WM_T_82576)) {
2873 /* Check NVM for autonegotiation */ 2877 /* Check NVM for autonegotiation */
2874 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) 2878 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2875 != 0) 2879 != 0)
2876 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO; 2880 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2877 } 2881 }
2878 if ((sc->sc_type == WM_T_82575) || 2882 if ((sc->sc_type == WM_T_82575) ||
2879 (sc->sc_type == WM_T_I350)) { 2883 (sc->sc_type == WM_T_I350)) {
2880 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid)) 2884 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2881 sc->sc_flags |= WM_F_MAS; 2885 sc->sc_flags |= WM_F_MAS;
2882 } 2886 }
2883 } 2887 }
2884 } 2888 }
2885 2889
2886 /* 2890 /*
2887 * XXX need special handling for some multiple port cards 2891 * XXX need special handling for some multiple port cards
2888 * to disable a paticular port. 2892 * to disable a paticular port.
2889 */ 2893 */
2890 2894
2891 if (sc->sc_type >= WM_T_82544) { 2895 if (sc->sc_type >= WM_T_82544) {
2892 pn = prop_dictionary_get(dict, "i82543-swdpin"); 2896 pn = prop_dictionary_get(dict, "i82543-swdpin");
2893 if (pn != NULL) { 2897 if (pn != NULL) {
2894 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2898 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2895 swdpin = (uint16_t) prop_number_integer_value(pn); 2899 swdpin = (uint16_t) prop_number_integer_value(pn);
2896 } else { 2900 } else {
2897 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) { 2901 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2898 aprint_error_dev(sc->sc_dev, 2902 aprint_error_dev(sc->sc_dev,
2899 "unable to read SWDPIN\n"); 2903 "unable to read SWDPIN\n");
2900 goto out; 2904 goto out;
2901 } 2905 }
2902 } 2906 }
2903 } 2907 }
2904 2908
2905 if (cfg1 & NVM_CFG1_ILOS) 2909 if (cfg1 & NVM_CFG1_ILOS)
2906 sc->sc_ctrl |= CTRL_ILOS; 2910 sc->sc_ctrl |= CTRL_ILOS;
2907 2911
2908 /* 2912 /*
2909 * XXX 2913 * XXX
2910 * This code isn't correct because pin 2 and 3 are located 2914 * This code isn't correct because pin 2 and 3 are located
2911 * in different position on newer chips. Check all datasheet. 2915 * in different position on newer chips. Check all datasheet.
2912 * 2916 *
2913 * Until resolve this problem, check if a chip < 82580 2917 * Until resolve this problem, check if a chip < 82580
2914 */ 2918 */
2915 if (sc->sc_type <= WM_T_82580) { 2919 if (sc->sc_type <= WM_T_82580) {
2916 if (sc->sc_type >= WM_T_82544) { 2920 if (sc->sc_type >= WM_T_82544) {
2917 sc->sc_ctrl |= 2921 sc->sc_ctrl |=
2918 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 2922 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2919 CTRL_SWDPIO_SHIFT; 2923 CTRL_SWDPIO_SHIFT;
2920 sc->sc_ctrl |= 2924 sc->sc_ctrl |=
2921 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 2925 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2922 CTRL_SWDPINS_SHIFT; 2926 CTRL_SWDPINS_SHIFT;
2923 } else { 2927 } else {
2924 sc->sc_ctrl |= 2928 sc->sc_ctrl |=
2925 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) << 2929 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2926 CTRL_SWDPIO_SHIFT; 2930 CTRL_SWDPIO_SHIFT;
2927 } 2931 }
2928 } 2932 }
2929 2933
2930 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) { 2934 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2931 wm_nvm_read(sc, 2935 wm_nvm_read(sc,
2932 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA, 2936 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2933 1, &nvmword); 2937 1, &nvmword);
2934 if (nvmword & NVM_CFG3_ILOS) 2938 if (nvmword & NVM_CFG3_ILOS)
2935 sc->sc_ctrl |= CTRL_ILOS; 2939 sc->sc_ctrl |= CTRL_ILOS;
2936 } 2940 }
2937 2941
2938#if 0 2942#if 0
2939 if (sc->sc_type >= WM_T_82544) { 2943 if (sc->sc_type >= WM_T_82544) {
2940 if (cfg1 & NVM_CFG1_IPS0) 2944 if (cfg1 & NVM_CFG1_IPS0)
2941 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 2945 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2942 if (cfg1 & NVM_CFG1_IPS1) 2946 if (cfg1 & NVM_CFG1_IPS1)
2943 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 2947 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2944 sc->sc_ctrl_ext |= 2948 sc->sc_ctrl_ext |=
2945 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 2949 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2946 CTRL_EXT_SWDPIO_SHIFT; 2950 CTRL_EXT_SWDPIO_SHIFT;
2947 sc->sc_ctrl_ext |= 2951 sc->sc_ctrl_ext |=
2948 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 2952 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2949 CTRL_EXT_SWDPINS_SHIFT; 2953 CTRL_EXT_SWDPINS_SHIFT;
2950 } else { 2954 } else {
2951 sc->sc_ctrl_ext |= 2955 sc->sc_ctrl_ext |=
2952 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) << 2956 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2953 CTRL_EXT_SWDPIO_SHIFT; 2957 CTRL_EXT_SWDPIO_SHIFT;
2954 } 2958 }
2955#endif 2959#endif
2956 2960
2957 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2961 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2958#if 0 2962#if 0
2959 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 2963 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2960#endif 2964#endif
2961 2965
2962 if (sc->sc_type == WM_T_PCH) { 2966 if (sc->sc_type == WM_T_PCH) {
2963 uint16_t val; 2967 uint16_t val;
2964 2968
2965 /* Save the NVM K1 bit setting */ 2969 /* Save the NVM K1 bit setting */
2966 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val); 2970 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2967 2971
2968 if ((val & NVM_K1_CONFIG_ENABLE) != 0) 2972 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2969 sc->sc_nvm_k1_enabled = 1; 2973 sc->sc_nvm_k1_enabled = 1;
2970 else 2974 else
2971 sc->sc_nvm_k1_enabled = 0; 2975 sc->sc_nvm_k1_enabled = 0;
2972 } 2976 }
2973 2977
2974 /* Determine if we're GMII, TBI, SERDES or SGMII mode */ 2978 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2975 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 2979 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2976 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH 2980 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2977 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT 2981 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2978 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP 2982 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2979 || sc->sc_type == WM_T_PCH_TGP 2983 || sc->sc_type == WM_T_PCH_TGP
2980 || sc->sc_type == WM_T_82573 2984 || sc->sc_type == WM_T_82573
2981 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) { 2985 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2982 /* Copper only */ 2986 /* Copper only */
2983 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 2987 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2984 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350) 2988 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2985 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210) 2989 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2986 || (sc->sc_type ==WM_T_I211)) { 2990 || (sc->sc_type ==WM_T_I211)) {
2987 reg = CSR_READ(sc, WMREG_CTRL_EXT); 2991 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2988 link_mode = reg & CTRL_EXT_LINK_MODE_MASK; 2992 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2989 switch (link_mode) { 2993 switch (link_mode) {
2990 case CTRL_EXT_LINK_MODE_1000KX: 2994 case CTRL_EXT_LINK_MODE_1000KX:
2991 aprint_normal_dev(sc->sc_dev, "1000KX\n"); 2995 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2992 sc->sc_mediatype = WM_MEDIATYPE_SERDES; 2996 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2993 break; 2997 break;
2994 case CTRL_EXT_LINK_MODE_SGMII: 2998 case CTRL_EXT_LINK_MODE_SGMII:
2995 if (wm_sgmii_uses_mdio(sc)) { 2999 if (wm_sgmii_uses_mdio(sc)) {
2996 aprint_normal_dev(sc->sc_dev, 3000 aprint_normal_dev(sc->sc_dev,
2997 "SGMII(MDIO)\n"); 3001 "SGMII(MDIO)\n");
2998 sc->sc_flags |= WM_F_SGMII; 3002 sc->sc_flags |= WM_F_SGMII;
2999 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 3003 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3000 break; 3004 break;
3001 } 3005 }
3002 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n"); 3006 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
3003 /*FALLTHROUGH*/ 3007 /*FALLTHROUGH*/
3004 case CTRL_EXT_LINK_MODE_PCIE_SERDES: 3008 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
3005 sc->sc_mediatype = wm_sfp_get_media_type(sc); 3009 sc->sc_mediatype = wm_sfp_get_media_type(sc);
3006 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) { 3010 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
3007 if (link_mode 3011 if (link_mode
3008 == CTRL_EXT_LINK_MODE_SGMII) { 3012 == CTRL_EXT_LINK_MODE_SGMII) {
3009 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 3013 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3010 sc->sc_flags |= WM_F_SGMII; 3014 sc->sc_flags |= WM_F_SGMII;
3011 aprint_verbose_dev(sc->sc_dev, 3015 aprint_verbose_dev(sc->sc_dev,
3012 "SGMII\n"); 3016 "SGMII\n");
3013 } else { 3017 } else {
3014 sc->sc_mediatype = WM_MEDIATYPE_SERDES; 3018 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
3015 aprint_verbose_dev(sc->sc_dev, 3019 aprint_verbose_dev(sc->sc_dev,
3016 "SERDES\n"); 3020 "SERDES\n");
3017 } 3021 }
3018 break; 3022 break;
3019 } 3023 }
3020 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) 3024 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
3021 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n"); 3025 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
3022 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) { 3026 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3023 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n"); 3027 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
3024 sc->sc_flags |= WM_F_SGMII; 3028 sc->sc_flags |= WM_F_SGMII;
3025 } 3029 }
3026 /* Do not change link mode for 100BaseFX */ 3030 /* Do not change link mode for 100BaseFX */
3027 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX) 3031 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
3028 break; 3032 break;
3029 3033
3030 /* Change current link mode setting */ 3034 /* Change current link mode setting */
3031 reg &= ~CTRL_EXT_LINK_MODE_MASK; 3035 reg &= ~CTRL_EXT_LINK_MODE_MASK;
3032 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) 3036 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3033 reg |= CTRL_EXT_LINK_MODE_SGMII; 3037 reg |= CTRL_EXT_LINK_MODE_SGMII;
3034 else 3038 else
3035 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES; 3039 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
3036 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 3040 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3037 break; 3041 break;
3038 case CTRL_EXT_LINK_MODE_GMII: 3042 case CTRL_EXT_LINK_MODE_GMII:
3039 default: 3043 default:
3040 aprint_normal_dev(sc->sc_dev, "Copper\n"); 3044 aprint_normal_dev(sc->sc_dev, "Copper\n");
3041 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 3045 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3042 break; 3046 break;
3043 } 3047 }
3044 3048
3045 reg &= ~CTRL_EXT_I2C_ENA; 3049 reg &= ~CTRL_EXT_I2C_ENA;
3046 if ((sc->sc_flags & WM_F_SGMII) != 0) 3050 if ((sc->sc_flags & WM_F_SGMII) != 0)
3047 reg |= CTRL_EXT_I2C_ENA; 3051 reg |= CTRL_EXT_I2C_ENA;
3048 else 3052 else
3049 reg &= ~CTRL_EXT_I2C_ENA; 3053 reg &= ~CTRL_EXT_I2C_ENA;
3050 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 3054 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3051 if ((sc->sc_flags & WM_F_SGMII) != 0) { 3055 if ((sc->sc_flags & WM_F_SGMII) != 0) {
3052 if (!wm_sgmii_uses_mdio(sc)) 3056 if (!wm_sgmii_uses_mdio(sc))
3053 wm_gmii_setup_phytype(sc, 0, 0); 3057 wm_gmii_setup_phytype(sc, 0, 0);
3054 wm_reset_mdicnfg_82580(sc); 3058 wm_reset_mdicnfg_82580(sc);
3055 } 3059 }
3056 } else if (sc->sc_type < WM_T_82543 || 3060 } else if (sc->sc_type < WM_T_82543 ||
3057 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 3061 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3058 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) { 3062 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3059 aprint_error_dev(sc->sc_dev, 3063 aprint_error_dev(sc->sc_dev,
3060 "WARNING: TBIMODE set on 1000BASE-T product!\n"); 3064 "WARNING: TBIMODE set on 1000BASE-T product!\n");
3061 sc->sc_mediatype = WM_MEDIATYPE_FIBER; 3065 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3062 } 3066 }
3063 } else { 3067 } else {
3064 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) { 3068 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3065 aprint_error_dev(sc->sc_dev, 3069 aprint_error_dev(sc->sc_dev,
3066 "WARNING: TBIMODE clear on 1000BASE-X product!\n"); 3070 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3067 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 3071 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3068 } 3072 }
3069 } 3073 }
3070 3074
3071 /* 3075 /*
3072 * The I350 has a bug where it always strips the CRC whether 3076 * The I350 has a bug where it always strips the CRC whether
3073 * asked to or not. So ask for stripped CRC here and cope in rxeof 3077 * asked to or not. So ask for stripped CRC here and cope in rxeof
3074 */ 3078 */
3075 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 3079 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3076 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) 3080 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3077 sc->sc_flags |= WM_F_CRC_STRIP; 3081 sc->sc_flags |= WM_F_CRC_STRIP;
3078 3082
3079 /* 3083 /*
3080 * Workaround for some chips to delay sending LINK_STATE_UP. 3084 * Workaround for some chips to delay sending LINK_STATE_UP.
3081 * Some systems can't send packet soon after linkup. See also 3085 * Some systems can't send packet soon after linkup. See also
3082 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus(). 3086 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
3083 */ 3087 */
3084 switch (sc->sc_type) { 3088 switch (sc->sc_type) {
3085 case WM_T_I350: 3089 case WM_T_I350:
3086 case WM_T_I354: 3090 case WM_T_I354:
3087 case WM_T_I210: 3091 case WM_T_I210:
3088 case WM_T_I211: 3092 case WM_T_I211:
3089 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) 3093 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3090 sc->sc_flags |= WM_F_DELAY_LINKUP; 3094 sc->sc_flags |= WM_F_DELAY_LINKUP;
3091 break; 3095 break;
3092 default: 3096 default:
3093 break; 3097 break;
3094 } 3098 }
3095 3099
3096 /* Set device properties (macflags) */ 3100 /* Set device properties (macflags) */
3097 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags); 3101 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3098 3102
3099 if (sc->sc_flags != 0) { 3103 if (sc->sc_flags != 0) {
3100 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags); 3104 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3101 aprint_verbose_dev(sc->sc_dev, "%s\n", buf); 3105 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3102 } 3106 }
3103 3107
3104 /* Initialize the media structures accordingly. */ 3108 /* Initialize the media structures accordingly. */
3105 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) 3109 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3106 wm_gmii_mediainit(sc, wmp->wmp_product); 3110 wm_gmii_mediainit(sc, wmp->wmp_product);
3107 else 3111 else
3108 wm_tbi_mediainit(sc); /* All others */ 3112 wm_tbi_mediainit(sc); /* All others */
3109 3113
3110 ifp = &sc->sc_ethercom.ec_if; 3114 ifp = &sc->sc_ethercom.ec_if;
3111 xname = device_xname(sc->sc_dev); 3115 xname = device_xname(sc->sc_dev);
3112 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 3116 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3113 ifp->if_softc = sc; 3117 ifp->if_softc = sc;
3114 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3118 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3115#ifdef WM_MPSAFE 3119#ifdef WM_MPSAFE
3116 ifp->if_extflags = IFEF_MPSAFE; 3120 ifp->if_extflags = IFEF_MPSAFE;
3117#endif 3121#endif
3118 ifp->if_ioctl = wm_ioctl; 3122 ifp->if_ioctl = wm_ioctl;
3119 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 3123 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3120 ifp->if_start = wm_nq_start; 3124 ifp->if_start = wm_nq_start;
3121 /* 3125 /*
3122 * When the number of CPUs is one and the controller can use 3126 * When the number of CPUs is one and the controller can use
3123 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue. 3127 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3124 * That is, wm(4) use two interrupts, one is used for Tx/Rx 3128 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3125 * and the other is used for link status changing. 3129 * and the other is used for link status changing.
3126 * In this situation, wm_nq_transmit() is disadvantageous 3130 * In this situation, wm_nq_transmit() is disadvantageous
3127 * because of wm_select_txqueue() and pcq(9) overhead. 3131 * because of wm_select_txqueue() and pcq(9) overhead.
3128 */ 3132 */
3129 if (wm_is_using_multiqueue(sc)) 3133 if (wm_is_using_multiqueue(sc))
3130 ifp->if_transmit = wm_nq_transmit; 3134 ifp->if_transmit = wm_nq_transmit;
3131 } else { 3135 } else {
3132 ifp->if_start = wm_start; 3136 ifp->if_start = wm_start;
3133 /* 3137 /*
3134 * wm_transmit() has the same disadvantages as wm_nq_transmit() 3138 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3135 * described above. 3139 * described above.
3136 */ 3140 */
3137 if (wm_is_using_multiqueue(sc)) 3141 if (wm_is_using_multiqueue(sc))
3138 ifp->if_transmit = wm_transmit; 3142 ifp->if_transmit = wm_transmit;
3139 } 3143 }
3140 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */ 3144 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3141 ifp->if_init = wm_init; 3145 ifp->if_init = wm_init;
3142 ifp->if_stop = wm_stop; 3146 ifp->if_stop = wm_stop;
3143 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 3147 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
3144 IFQ_SET_READY(&ifp->if_snd); 3148 IFQ_SET_READY(&ifp->if_snd);
3145 3149
3146 /* Check for jumbo frame */ 3150 /* Check for jumbo frame */
3147 switch (sc->sc_type) { 3151 switch (sc->sc_type) {
3148 case WM_T_82573: 3152 case WM_T_82573:
3149 /* XXX limited to 9234 if ASPM is disabled */ 3153 /* XXX limited to 9234 if ASPM is disabled */
3150 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword); 3154 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3151 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0) 3155 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3152 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3156 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3153 break; 3157 break;
3154 case WM_T_82571: 3158 case WM_T_82571:
3155 case WM_T_82572: 3159 case WM_T_82572:
3156 case WM_T_82574: 3160 case WM_T_82574:
3157 case WM_T_82583: 3161 case WM_T_82583:
3158 case WM_T_82575: 3162 case WM_T_82575:
3159 case WM_T_82576: 3163 case WM_T_82576:
3160 case WM_T_82580: 3164 case WM_T_82580:
3161 case WM_T_I350: 3165 case WM_T_I350:
3162 case WM_T_I354: 3166 case WM_T_I354:
3163 case WM_T_I210: 3167 case WM_T_I210:
3164 case WM_T_I211: 3168 case WM_T_I211:
3165 case WM_T_80003: 3169 case WM_T_80003:
3166 case WM_T_ICH9: 3170 case WM_T_ICH9:
3167 case WM_T_ICH10: 3171 case WM_T_ICH10:
3168 case WM_T_PCH2: /* PCH2 supports 9K frame size */ 3172 case WM_T_PCH2: /* PCH2 supports 9K frame size */
3169 case WM_T_PCH_LPT: 3173 case WM_T_PCH_LPT:
3170 case WM_T_PCH_SPT: 3174 case WM_T_PCH_SPT:
3171 case WM_T_PCH_CNP: 3175 case WM_T_PCH_CNP:
3172 case WM_T_PCH_TGP: 3176 case WM_T_PCH_TGP:
3173 /* XXX limited to 9234 */ 3177 /* XXX limited to 9234 */
3174 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3178 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3175 break; 3179 break;
3176 case WM_T_PCH: 3180 case WM_T_PCH:
3177 /* XXX limited to 4096 */ 3181 /* XXX limited to 4096 */
3178 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3182 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3179 break; 3183 break;
3180 case WM_T_82542_2_0: 3184 case WM_T_82542_2_0:
3181 case WM_T_82542_2_1: 3185 case WM_T_82542_2_1:
3182 case WM_T_ICH8: 3186 case WM_T_ICH8:
3183 /* No support for jumbo frame */ 3187 /* No support for jumbo frame */
3184 break; 3188 break;
3185 default: 3189 default:
3186 /* ETHER_MAX_LEN_JUMBO */ 3190 /* ETHER_MAX_LEN_JUMBO */
3187 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3191 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3188 break; 3192 break;
3189 } 3193 }
3190 3194
3191 /* If we're a i82543 or greater, we can support VLANs. */ 3195 /* If we're a i82543 or greater, we can support VLANs. */
3192 if (sc->sc_type >= WM_T_82543) 3196 if (sc->sc_type >= WM_T_82543)
3193 sc->sc_ethercom.ec_capabilities |= 3197 sc->sc_ethercom.ec_capabilities |=
3194 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 3198 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3195 3199
3196 /* 3200 /*
3197 * We can perform TCPv4 and UDPv4 checksums in-bound. Only 3201 * We can perform TCPv4 and UDPv4 checksums in-bound. Only
3198 * on i82543 and later. 3202 * on i82543 and later.
3199 */ 3203 */
3200 if (sc->sc_type >= WM_T_82543) { 3204 if (sc->sc_type >= WM_T_82543) {
3201 ifp->if_capabilities |= 3205 ifp->if_capabilities |=
3202 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 3206 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3203 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 3207 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3204 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 3208 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3205 IFCAP_CSUM_TCPv6_Tx | 3209 IFCAP_CSUM_TCPv6_Tx |
3206 IFCAP_CSUM_UDPv6_Tx; 3210 IFCAP_CSUM_UDPv6_Tx;
3207 } 3211 }
3208 3212
3209 /* 3213 /*
3210 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 3214 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3211 * 3215 *
3212 * 82541GI (8086:1076) ... no 3216 * 82541GI (8086:1076) ... no
3213 * 82572EI (8086:10b9) ... yes 3217 * 82572EI (8086:10b9) ... yes
3214 */ 3218 */
3215 if (sc->sc_type >= WM_T_82571) { 3219 if (sc->sc_type >= WM_T_82571) {
3216 ifp->if_capabilities |= 3220 ifp->if_capabilities |=
3217 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 3221 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3218 } 3222 }
3219 3223
3220 /* 3224 /*
3221 * If we're a i82544 or greater (except i82547), we can do 3225 * If we're a i82544 or greater (except i82547), we can do
3222 * TCP segmentation offload. 3226 * TCP segmentation offload.
3223 */ 3227 */
3224 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) 3228 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3225 ifp->if_capabilities |= IFCAP_TSOv4; 3229 ifp->if_capabilities |= IFCAP_TSOv4;
3226 3230
3227 if (sc->sc_type >= WM_T_82571) 3231 if (sc->sc_type >= WM_T_82571)
3228 ifp->if_capabilities |= IFCAP_TSOv6; 3232 ifp->if_capabilities |= IFCAP_TSOv6;
3229 3233
3230 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT; 3234 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3231 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT; 3235 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3232 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT; 3236 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3233 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT; 3237 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3234 3238
3235#ifdef WM_MPSAFE 3239#ifdef WM_MPSAFE
3236 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 3240 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3237#else 3241#else
3238 sc->sc_core_lock = NULL; 3242 sc->sc_core_lock = NULL;
3239#endif 3243#endif
3240 3244
3241 /* Attach the interface. */ 3245 /* Attach the interface. */
3242 error = if_initialize(ifp); 3246 error = if_initialize(ifp);
3243 if (error != 0) { 3247 if (error != 0) {
3244 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 3248 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
3245 error); 3249 error);
3246 return; /* Error */ 3250 return; /* Error */
3247 } 3251 }
3248 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if); 3252 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3249 ether_ifattach(ifp, enaddr); 3253 ether_ifattach(ifp, enaddr);
3250 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb); 3254 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3251 if_register(ifp); 3255 if_register(ifp);
3252 3256
3253#ifdef WM_EVENT_COUNTERS 3257#ifdef WM_EVENT_COUNTERS
3254 /* Attach event counters. */ 3258 /* Attach event counters. */
3255 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 3259 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3256 NULL, xname, "linkintr"); 3260 NULL, xname, "linkintr");
3257 3261
3258 evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC, 3262 evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3259 NULL, xname, "CRC Error"); 3263 NULL, xname, "CRC Error");
3260 evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC, 3264 evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3261 NULL, xname, "Symbol Error"); 3265 NULL, xname, "Symbol Error");
3262 evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC, 3266 evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3263 NULL, xname, "Missed Packets"); 3267 NULL, xname, "Missed Packets");
3264 evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC, 3268 evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3265 NULL, xname, "Collision"); 3269 NULL, xname, "Collision");
3266 evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC, 3270 evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3267 NULL, xname, "Sequence Error"); 3271 NULL, xname, "Sequence Error");
3268 evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC, 3272 evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3269 NULL, xname, "Receive Length Error"); 3273 NULL, xname, "Receive Length Error");
3270 3274
3271 if (sc->sc_type >= WM_T_82543) { 3275 if (sc->sc_type >= WM_T_82543) {
3272 evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC, 3276 evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3273 NULL, xname, "Alignment Error"); 3277 NULL, xname, "Alignment Error");
3274 evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC, 3278 evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3275 NULL, xname, "Receive Error"); 3279 NULL, xname, "Receive Error");
3276 /* XXX Does 82575 have HTDPMC? */ 3280 /* XXX Does 82575 have HTDPMC? */
3277 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) 3281 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3278 evcnt_attach_dynamic(&sc->sc_ev_cexterr, 3282 evcnt_attach_dynamic(&sc->sc_ev_cexterr,
3279 EVCNT_TYPE_MISC, NULL, xname, 3283 EVCNT_TYPE_MISC, NULL, xname,
3280 "Carrier Extension Error"); 3284 "Carrier Extension Error");
3281 else 3285 else
3282 evcnt_attach_dynamic(&sc->sc_ev_htdpmc, 3286 evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
3283 EVCNT_TYPE_MISC, NULL, xname, 3287 EVCNT_TYPE_MISC, NULL, xname,
3284 "Host Transmit Discarded Packets by MAC"); 3288 "Host Transmit Discarded Packets by MAC");
3285 3289
3286 evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC, 3290 evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3287 NULL, xname, "Tx with No CRS"); 3291 NULL, xname, "Tx with No CRS");
3288 evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC, 3292 evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3289 NULL, xname, "TCP Segmentation Context Tx"); 3293 NULL, xname, "TCP Segmentation Context Tx");
3290 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) 3294 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3291 evcnt_attach_dynamic(&sc->sc_ev_tsctfc, 3295 evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
3292 EVCNT_TYPE_MISC, NULL, xname, 3296 EVCNT_TYPE_MISC, NULL, xname,
3293 "TCP Segmentation Context Tx Fail"); 3297 "TCP Segmentation Context Tx Fail");
3294 else { 3298 else {
3295 /* XXX Is the circuit breaker only for 82576? */ 3299 /* XXX Is the circuit breaker only for 82576? */
3296 evcnt_attach_dynamic(&sc->sc_ev_cbrdpc, 3300 evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
3297 EVCNT_TYPE_MISC, NULL, xname, 3301 EVCNT_TYPE_MISC, NULL, xname,
3298 "Circuit Breaker Rx Dropped Packet"); 3302 "Circuit Breaker Rx Dropped Packet");
3299 evcnt_attach_dynamic(&sc->sc_ev_cbrmpc, 3303 evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
3300 EVCNT_TYPE_MISC, NULL, xname, 3304 EVCNT_TYPE_MISC, NULL, xname,
3301 "Circuit Breaker Rx Manageability Packet"); 3305 "Circuit Breaker Rx Manageability Packet");
3302 } 3306 }
3303 } 3307 }
3304 3308
3305 if (sc->sc_type >= WM_T_82542_2_1) { 3309 if (sc->sc_type >= WM_T_82542_2_1) {
3306 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 3310 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3307 NULL, xname, "XOFF Transmitted"); 3311 NULL, xname, "XOFF Transmitted");
3308 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 3312 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3309 NULL, xname, "XON Transmitted"); 3313 NULL, xname, "XON Transmitted");
3310 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 3314 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3311 NULL, xname, "XOFF Received"); 3315 NULL, xname, "XOFF Received");
3312 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 3316 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3313 NULL, xname, "XON Received"); 3317 NULL, xname, "XON Received");
3314 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 3318 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3315 NULL, xname, "FC Received Unsupported"); 3319 NULL, xname, "FC Received Unsupported");
3316 } 3320 }
3317 3321
3318 evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC, 3322 evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3319 NULL, xname, "Single Collision"); 3323 NULL, xname, "Single Collision");
3320 evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC, 3324 evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3321 NULL, xname, "Excessive Collisions"); 3325 NULL, xname, "Excessive Collisions");
3322 evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC, 3326 evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3323 NULL, xname, "Multiple Collision"); 3327 NULL, xname, "Multiple Collision");
3324 evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC, 3328 evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3325 NULL, xname, "Late Collisions"); 3329 NULL, xname, "Late Collisions");
3326 3330
3327 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) 3331 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3328 evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC, 3332 evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
3329 NULL, xname, "Circuit Breaker Tx Manageability Packet"); 3333 NULL, xname, "Circuit Breaker Tx Manageability Packet");
3330 3334
3331 evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC, 3335 evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3332 NULL, xname, "Defer"); 3336 NULL, xname, "Defer");
3333 evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC, 3337 evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3334 NULL, xname, "Packets Rx (64 bytes)"); 3338 NULL, xname, "Packets Rx (64 bytes)");
3335 evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC, 3339 evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3336 NULL, xname, "Packets Rx (65-127 bytes)"); 3340 NULL, xname, "Packets Rx (65-127 bytes)");
3337 evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC, 3341 evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3338 NULL, xname, "Packets Rx (128-255 bytes)"); 3342 NULL, xname, "Packets Rx (128-255 bytes)");
3339 evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC, 3343 evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3340 NULL, xname, "Packets Rx (256-511 bytes)"); 3344 NULL, xname, "Packets Rx (256-511 bytes)");
3341 evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC, 3345 evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3342 NULL, xname, "Packets Rx (512-1023 bytes)"); 3346 NULL, xname, "Packets Rx (512-1023 bytes)");
3343 evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC, 3347 evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3344 NULL, xname, "Packets Rx (1024-1522 bytes)"); 3348 NULL, xname, "Packets Rx (1024-1522 bytes)");
3345 evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC, 3349 evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3346 NULL, xname, "Good Packets Rx"); 3350 NULL, xname, "Good Packets Rx");
3347 evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC, 3351 evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3348 NULL, xname, "Broadcast Packets Rx"); 3352 NULL, xname, "Broadcast Packets Rx");
3349 evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC, 3353 evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3350 NULL, xname, "Multicast Packets Rx"); 3354 NULL, xname, "Multicast Packets Rx");
3351 evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC, 3355 evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3352 NULL, xname, "Good Packets Tx"); 3356 NULL, xname, "Good Packets Tx");
3353 evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC, 3357 evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3354 NULL, xname, "Good Octets Rx"); 3358 NULL, xname, "Good Octets Rx");
3355 evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC, 3359 evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3356 NULL, xname, "Good Octets Tx"); 3360 NULL, xname, "Good Octets Tx");
3357 evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC, 3361 evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3358 NULL, xname, "Rx No Buffers"); 3362 NULL, xname, "Rx No Buffers");
3359 evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC, 3363 evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3360 NULL, xname, "Rx Undersize (valid CRC)"); 3364 NULL, xname, "Rx Undersize (valid CRC)");
3361 evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC, 3365 evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3362 NULL, xname, "Rx Fragment (bad CRC)"); 3366 NULL, xname, "Rx Fragment (bad CRC)");
3363 evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC, 3367 evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3364 NULL, xname, "Rx Oversize (valid CRC)"); 3368 NULL, xname, "Rx Oversize (valid CRC)");
3365 evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC, 3369 evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3366 NULL, xname, "Rx Jabber (bad CRC)"); 3370 NULL, xname, "Rx Jabber (bad CRC)");
3367 if (sc->sc_type >= WM_T_82540) { 3371 if (sc->sc_type >= WM_T_82540) {
3368 evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC, 3372 evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3369 NULL, xname, "Management Packets RX"); 3373 NULL, xname, "Management Packets RX");
3370 evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC, 3374 evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3371 NULL, xname, "Management Packets Dropped"); 3375 NULL, xname, "Management Packets Dropped");
3372 evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC, 3376 evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3373 NULL, xname, "Management Packets TX"); 3377 NULL, xname, "Management Packets TX");
3374 } 3378 }
3375 evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC, 3379 evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3376 NULL, xname, "Total Octets Rx"); 3380 NULL, xname, "Total Octets Rx");
3377 evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC, 3381 evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3378 NULL, xname, "Total Octets Tx"); 3382 NULL, xname, "Total Octets Tx");
3379 evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC, 3383 evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3380 NULL, xname, "Total Packets Rx"); 3384 NULL, xname, "Total Packets Rx");
3381 evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC, 3385 evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3382 NULL, xname, "Total Packets Tx"); 3386 NULL, xname, "Total Packets Tx");
3383 evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC, 3387 evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3384 NULL, xname, "Packets Tx (64 bytes)"); 3388 NULL, xname, "Packets Tx (64 bytes)");
3385 evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC, 3389 evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3386 NULL, xname, "Packets Tx (65-127 bytes)"); 3390 NULL, xname, "Packets Tx (65-127 bytes)");
3387 evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC, 3391 evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3388 NULL, xname, "Packets Tx (128-255 bytes)"); 3392 NULL, xname, "Packets Tx (128-255 bytes)");
3389 evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC, 3393 evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3390 NULL, xname, "Packets Tx (256-511 bytes)"); 3394 NULL, xname, "Packets Tx (256-511 bytes)");
3391 evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC, 3395 evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3392 NULL, xname, "Packets Tx (512-1023 bytes)"); 3396 NULL, xname, "Packets Tx (512-1023 bytes)");
3393 evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC, 3397 evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3394 NULL, xname, "Packets Tx (1024-1522 Bytes)"); 3398 NULL, xname, "Packets Tx (1024-1522 Bytes)");
3395 evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC, 3399 evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3396 NULL, xname, "Multicast Packets Tx"); 3400 NULL, xname, "Multicast Packets Tx");
3397 evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC, 3401 evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3398 NULL, xname, "Broadcast Packets Tx"); 3402 NULL, xname, "Broadcast Packets Tx");
3399 if (sc->sc_type >= WM_T_82571) /* PCIe, 80003 and ICH/PCHs */ 3403 if (sc->sc_type >= WM_T_82571) /* PCIe, 80003 and ICH/PCHs */
3400 evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC, 3404 evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3401 NULL, xname, "Interrupt Assertion"); 3405 NULL, xname, "Interrupt Assertion");
3402 if (sc->sc_type < WM_T_82575) { 3406 if (sc->sc_type < WM_T_82575) {
3403 evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC, 3407 evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3404 NULL, xname, "Intr. Cause Rx Pkt Timer Expire"); 3408 NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3405 evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC, 3409 evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3406 NULL, xname, "Intr. Cause Rx Abs Timer Expire"); 3410 NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3407 evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC, 3411 evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3408 NULL, xname, "Intr. Cause Tx Pkt Timer Expire"); 3412 NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3409 evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC, 3413 evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
3410 NULL, xname, "Intr. Cause Tx Abs Timer Expire"); 3414 NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3411 evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC, 3415 evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3412 NULL, xname, "Intr. Cause Tx Queue Empty"); 3416 NULL, xname, "Intr. Cause Tx Queue Empty");
3413 evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC, 3417 evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3414 NULL, xname, "Intr. Cause Tx Queue Min Thresh"); 3418 NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3415 evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC, 3419 evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3416 NULL, xname, "Intr. Cause Rx Desc Min Thresh"); 3420 NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3417 3421
3418 /* XXX 82575 document says it has ICRXOC. Is that right? */ 3422 /* XXX 82575 document says it has ICRXOC. Is that right? */
3419 evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC, 3423 evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3420 NULL, xname, "Interrupt Cause Receiver Overrun"); 3424 NULL, xname, "Interrupt Cause Receiver Overrun");
3421 } else if (!WM_IS_ICHPCH(sc)) { 3425 } else if (!WM_IS_ICHPCH(sc)) {
3422 /* 3426 /*
3423 * For 82575 and newer. 3427 * For 82575 and newer.
3424 * 3428 *
3425 * On 80003, ICHs and PCHs, it seems all of the following 3429 * On 80003, ICHs and PCHs, it seems all of the following
3426 * registers are zero. 3430 * registers are zero.
3427 */ 3431 */
3428 evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC, 3432 evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
3429 NULL, xname, "Rx Packets To Host"); 3433 NULL, xname, "Rx Packets To Host");
3430 evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC, 3434 evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
3431 NULL, xname, "Debug Counter 1"); 3435 NULL, xname, "Debug Counter 1");
3432 evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC, 3436 evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
3433 NULL, xname, "Debug Counter 2"); 3437 NULL, xname, "Debug Counter 2");
3434 evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC, 3438 evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
3435 NULL, xname, "Debug Counter 3"); 3439 NULL, xname, "Debug Counter 3");
3436 3440
3437 /* 3441 /*
3438 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty). 3442 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
3439 * I think it's wrong. The real count I observed is the same 3443 * I think it's wrong. The real count I observed is the same
3440 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx). 3444 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
3441 * It's HGPTC(Host Good Packets Tx) which is described in 3445 * It's HGPTC(Host Good Packets Tx) which is described in
3442 * 82576's datasheet. 3446 * 82576's datasheet.
3443 */ 3447 */
3444 evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC, 3448 evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
3445 NULL, xname, "Host Good Packets TX"); 3449 NULL, xname, "Host Good Packets TX");
3446 3450
3447 evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC, 3451 evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
3448 NULL, xname, "Debug Counter 4"); 3452 NULL, xname, "Debug Counter 4");
3449 evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC, 3453 evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3450 NULL, xname, "Rx Desc Min Thresh"); 3454 NULL, xname, "Rx Desc Min Thresh");
3451 /* XXX Is the circuit breaker only for 82576? */ 3455 /* XXX Is the circuit breaker only for 82576? */
3452 evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC, 3456 evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
3453 NULL, xname, "Host Tx Circuit Breaker Dropped Packets"); 3457 NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
3454 3458
3455 evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC, 3459 evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
3456 NULL, xname, "Host Good Octets Rx"); 3460 NULL, xname, "Host Good Octets Rx");
3457 evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC, 3461 evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
3458 NULL, xname, "Host Good Octets Tx"); 3462 NULL, xname, "Host Good Octets Tx");
3459 evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC, 3463 evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
3460 NULL, xname, "Length Errors (length/type <= 1500)"); 3464 NULL, xname, "Length Errors (length/type <= 1500)");
3461 evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC, 3465 evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
3462 NULL, xname, "SerDes/SGMII Code Violation Packet"); 3466 NULL, xname, "SerDes/SGMII Code Violation Packet");
3463 evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC, 3467 evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
3464 NULL, xname, "Header Redirection Missed Packet"); 3468 NULL, xname, "Header Redirection Missed Packet");
3465 } 3469 }
3466 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) { 3470 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3467 evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC, 3471 evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
3468 NULL, xname, "EEE Tx LPI"); 3472 NULL, xname, "EEE Tx LPI");
3469 evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC, 3473 evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
3470 NULL, xname, "EEE Rx LPI"); 3474 NULL, xname, "EEE Rx LPI");
3471 evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC, 3475 evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3472 NULL, xname, "BMC2OS Packets received by host"); 3476 NULL, xname, "BMC2OS Packets received by host");
3473 evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC, 3477 evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3474 NULL, xname, "OS2BMC Packets transmitted by host"); 3478 NULL, xname, "OS2BMC Packets transmitted by host");
3475 evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC, 3479 evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3476 NULL, xname, "BMC2OS Packets sent by BMC"); 3480 NULL, xname, "BMC2OS Packets sent by BMC");
3477 evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC, 3481 evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3478 NULL, xname, "OS2BMC Packets received by BMC"); 3482 NULL, xname, "OS2BMC Packets received by BMC");
3479 } 3483 }
3480#endif /* WM_EVENT_COUNTERS */ 3484#endif /* WM_EVENT_COUNTERS */
3481 3485
3482 sc->sc_txrx_use_workqueue = false; 3486 sc->sc_txrx_use_workqueue = false;
3483 3487
3484 if (wm_phy_need_linkdown_discard(sc)) { 3488 if (wm_phy_need_linkdown_discard(sc)) {
3485 DPRINTF(sc, WM_DEBUG_LINK, 3489 DPRINTF(sc, WM_DEBUG_LINK,
3486 ("%s: %s: Set linkdown discard flag\n", 3490 ("%s: %s: Set linkdown discard flag\n",
3487 device_xname(sc->sc_dev), __func__)); 3491 device_xname(sc->sc_dev), __func__));
3488 wm_set_linkdown_discard(sc); 3492 wm_set_linkdown_discard(sc);
3489 } 3493 }
3490 3494
3491 wm_init_sysctls(sc); 3495 wm_init_sysctls(sc);
3492 3496
3493 if (pmf_device_register(self, wm_suspend, wm_resume)) 3497 if (pmf_device_register(self, wm_suspend, wm_resume))
3494 pmf_class_network_register(self, ifp); 3498 pmf_class_network_register(self, ifp);
3495 else 3499 else
3496 aprint_error_dev(self, "couldn't establish power handler\n"); 3500 aprint_error_dev(self, "couldn't establish power handler\n");
3497 3501
3498 sc->sc_flags |= WM_F_ATTACHED; 3502 sc->sc_flags |= WM_F_ATTACHED;
3499out: 3503out:
3500 return; 3504 return;
3501} 3505}
3502 3506
3503/* The detach function (ca_detach) */ 3507/* The detach function (ca_detach) */
3504static int 3508static int
3505wm_detach(device_t self, int flags __unused) 3509wm_detach(device_t self, int flags __unused)
3506{ 3510{
3507 struct wm_softc *sc = device_private(self); 3511 struct wm_softc *sc = device_private(self);
3508 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3512 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3509 int i; 3513 int i;
3510 3514
3511 if ((sc->sc_flags & WM_F_ATTACHED) == 0) 3515 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3512 return 0; 3516 return 0;
3513 3517
3514 /* Stop the interface. Callouts are stopped in it. */ 3518 /* Stop the interface. Callouts are stopped in it. */
3515 wm_stop(ifp, 1); 3519 wm_stop(ifp, 1);
3516 3520
3517 pmf_device_deregister(self); 3521 pmf_device_deregister(self);
3518 3522
3519 sysctl_teardown(&sc->sc_sysctllog); 3523 sysctl_teardown(&sc->sc_sysctllog);
3520 3524
3521#ifdef WM_EVENT_COUNTERS 3525#ifdef WM_EVENT_COUNTERS
3522 evcnt_detach(&sc->sc_ev_linkintr); 3526 evcnt_detach(&sc->sc_ev_linkintr);
3523 3527
3524 evcnt_detach(&sc->sc_ev_crcerrs); 3528 evcnt_detach(&sc->sc_ev_crcerrs);
3525 evcnt_detach(&sc->sc_ev_symerrc); 3529 evcnt_detach(&sc->sc_ev_symerrc);
3526 evcnt_detach(&sc->sc_ev_mpc); 3530 evcnt_detach(&sc->sc_ev_mpc);
3527 evcnt_detach(&sc->sc_ev_colc); 3531 evcnt_detach(&sc->sc_ev_colc);
3528 evcnt_detach(&sc->sc_ev_sec); 3532 evcnt_detach(&sc->sc_ev_sec);
3529 evcnt_detach(&sc->sc_ev_rlec); 3533 evcnt_detach(&sc->sc_ev_rlec);
3530 3534
3531 if (sc->sc_type >= WM_T_82543) { 3535 if (sc->sc_type >= WM_T_82543) {
3532 evcnt_detach(&sc->sc_ev_algnerrc); 3536 evcnt_detach(&sc->sc_ev_algnerrc);
3533 evcnt_detach(&sc->sc_ev_rxerrc); 3537 evcnt_detach(&sc->sc_ev_rxerrc);
3534 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) 3538 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3535 evcnt_detach(&sc->sc_ev_cexterr); 3539 evcnt_detach(&sc->sc_ev_cexterr);
3536 else 3540 else
3537 evcnt_detach(&sc->sc_ev_htdpmc); 3541 evcnt_detach(&sc->sc_ev_htdpmc);
3538 3542
3539 evcnt_detach(&sc->sc_ev_tncrs); 3543 evcnt_detach(&sc->sc_ev_tncrs);
3540 evcnt_detach(&sc->sc_ev_tsctc); 3544 evcnt_detach(&sc->sc_ev_tsctc);
3541 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) 3545 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3542 evcnt_detach(&sc->sc_ev_tsctfc); 3546 evcnt_detach(&sc->sc_ev_tsctfc);
3543 else { 3547 else {
3544 evcnt_detach(&sc->sc_ev_cbrdpc); 3548 evcnt_detach(&sc->sc_ev_cbrdpc);
3545 evcnt_detach(&sc->sc_ev_cbrmpc); 3549 evcnt_detach(&sc->sc_ev_cbrmpc);
3546 } 3550 }
3547 } 3551 }
3548 3552
3549 if (sc->sc_type >= WM_T_82542_2_1) { 3553 if (sc->sc_type >= WM_T_82542_2_1) {
3550 evcnt_detach(&sc->sc_ev_tx_xoff); 3554 evcnt_detach(&sc->sc_ev_tx_xoff);
3551 evcnt_detach(&sc->sc_ev_tx_xon); 3555 evcnt_detach(&sc->sc_ev_tx_xon);
3552 evcnt_detach(&sc->sc_ev_rx_xoff); 3556 evcnt_detach(&sc->sc_ev_rx_xoff);
3553 evcnt_detach(&sc->sc_ev_rx_xon); 3557 evcnt_detach(&sc->sc_ev_rx_xon);
3554 evcnt_detach(&sc->sc_ev_rx_macctl); 3558 evcnt_detach(&sc->sc_ev_rx_macctl);
3555 } 3559 }
3556 3560
3557 evcnt_detach(&sc->sc_ev_scc); 3561 evcnt_detach(&sc->sc_ev_scc);
3558 evcnt_detach(&sc->sc_ev_ecol); 3562 evcnt_detach(&sc->sc_ev_ecol);
3559 evcnt_detach(&sc->sc_ev_mcc); 3563 evcnt_detach(&sc->sc_ev_mcc);
3560 evcnt_detach(&sc->sc_ev_latecol); 3564 evcnt_detach(&sc->sc_ev_latecol);
3561 3565
3562 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) 3566 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3563 evcnt_detach(&sc->sc_ev_cbtmpc); 3567 evcnt_detach(&sc->sc_ev_cbtmpc);
3564 3568
3565 evcnt_detach(&sc->sc_ev_dc); 3569 evcnt_detach(&sc->sc_ev_dc);
3566 evcnt_detach(&sc->sc_ev_prc64); 3570 evcnt_detach(&sc->sc_ev_prc64);
3567 evcnt_detach(&sc->sc_ev_prc127); 3571 evcnt_detach(&sc->sc_ev_prc127);
3568 evcnt_detach(&sc->sc_ev_prc255); 3572 evcnt_detach(&sc->sc_ev_prc255);
3569 evcnt_detach(&sc->sc_ev_prc511); 3573 evcnt_detach(&sc->sc_ev_prc511);
3570 evcnt_detach(&sc->sc_ev_prc1023); 3574 evcnt_detach(&sc->sc_ev_prc1023);
3571 evcnt_detach(&sc->sc_ev_prc1522); 3575 evcnt_detach(&sc->sc_ev_prc1522);
3572 evcnt_detach(&sc->sc_ev_gprc); 3576 evcnt_detach(&sc->sc_ev_gprc);
3573 evcnt_detach(&sc->sc_ev_bprc); 3577 evcnt_detach(&sc->sc_ev_bprc);
3574 evcnt_detach(&sc->sc_ev_mprc); 3578 evcnt_detach(&sc->sc_ev_mprc);
3575 evcnt_detach(&sc->sc_ev_gptc); 3579 evcnt_detach(&sc->sc_ev_gptc);
3576 evcnt_detach(&sc->sc_ev_gorc); 3580 evcnt_detach(&sc->sc_ev_gorc);
3577 evcnt_detach(&sc->sc_ev_gotc); 3581 evcnt_detach(&sc->sc_ev_gotc);
3578 evcnt_detach(&sc->sc_ev_rnbc); 3582 evcnt_detach(&sc->sc_ev_rnbc);
3579 evcnt_detach(&sc->sc_ev_ruc); 3583 evcnt_detach(&sc->sc_ev_ruc);
3580 evcnt_detach(&sc->sc_ev_rfc); 3584 evcnt_detach(&sc->sc_ev_rfc);
3581 evcnt_detach(&sc->sc_ev_roc); 3585 evcnt_detach(&sc->sc_ev_roc);
3582 evcnt_detach(&sc->sc_ev_rjc); 3586 evcnt_detach(&sc->sc_ev_rjc);
3583 if (sc->sc_type >= WM_T_82540) { 3587 if (sc->sc_type >= WM_T_82540) {
3584 evcnt_detach(&sc->sc_ev_mgtprc); 3588 evcnt_detach(&sc->sc_ev_mgtprc);
3585 evcnt_detach(&sc->sc_ev_mgtpdc); 3589 evcnt_detach(&sc->sc_ev_mgtpdc);
3586 evcnt_detach(&sc->sc_ev_mgtptc); 3590 evcnt_detach(&sc->sc_ev_mgtptc);
3587 } 3591 }
3588 evcnt_detach(&sc->sc_ev_tor); 3592 evcnt_detach(&sc->sc_ev_tor);
3589 evcnt_detach(&sc->sc_ev_tot); 3593 evcnt_detach(&sc->sc_ev_tot);
3590 evcnt_detach(&sc->sc_ev_tpr); 3594 evcnt_detach(&sc->sc_ev_tpr);
3591 evcnt_detach(&sc->sc_ev_tpt); 3595 evcnt_detach(&sc->sc_ev_tpt);
3592 evcnt_detach(&sc->sc_ev_ptc64); 3596 evcnt_detach(&sc->sc_ev_ptc64);
3593 evcnt_detach(&sc->sc_ev_ptc127); 3597 evcnt_detach(&sc->sc_ev_ptc127);
3594 evcnt_detach(&sc->sc_ev_ptc255); 3598 evcnt_detach(&sc->sc_ev_ptc255);
3595 evcnt_detach(&sc->sc_ev_ptc511); 3599 evcnt_detach(&sc->sc_ev_ptc511);
3596 evcnt_detach(&sc->sc_ev_ptc1023); 3600 evcnt_detach(&sc->sc_ev_ptc1023);
3597 evcnt_detach(&sc->sc_ev_ptc1522); 3601 evcnt_detach(&sc->sc_ev_ptc1522);
3598 evcnt_detach(&sc->sc_ev_mptc); 3602 evcnt_detach(&sc->sc_ev_mptc);
3599 evcnt_detach(&sc->sc_ev_bptc); 3603 evcnt_detach(&sc->sc_ev_bptc);
3600 if (sc->sc_type >= WM_T_82571) 3604 if (sc->sc_type >= WM_T_82571)
3601 evcnt_detach(&sc->sc_ev_iac); 3605 evcnt_detach(&sc->sc_ev_iac);
3602 if (sc->sc_type < WM_T_82575) { 3606 if (sc->sc_type < WM_T_82575) {
3603 evcnt_detach(&sc->sc_ev_icrxptc); 3607 evcnt_detach(&sc->sc_ev_icrxptc);
3604 evcnt_detach(&sc->sc_ev_icrxatc); 3608 evcnt_detach(&sc->sc_ev_icrxatc);
3605 evcnt_detach(&sc->sc_ev_ictxptc); 3609 evcnt_detach(&sc->sc_ev_ictxptc);
3606 evcnt_detach(&sc->sc_ev_ictxatc); 3610 evcnt_detach(&sc->sc_ev_ictxatc);
3607 evcnt_detach(&sc->sc_ev_ictxqec); 3611 evcnt_detach(&sc->sc_ev_ictxqec);
3608 evcnt_detach(&sc->sc_ev_ictxqmtc); 3612 evcnt_detach(&sc->sc_ev_ictxqmtc);
3609 evcnt_detach(&sc->sc_ev_rxdmtc); 3613 evcnt_detach(&sc->sc_ev_rxdmtc);
3610 evcnt_detach(&sc->sc_ev_icrxoc); 3614 evcnt_detach(&sc->sc_ev_icrxoc);
3611 } else if (!WM_IS_ICHPCH(sc)) { 3615 } else if (!WM_IS_ICHPCH(sc)) {
3612 evcnt_detach(&sc->sc_ev_rpthc); 3616 evcnt_detach(&sc->sc_ev_rpthc);
3613 evcnt_detach(&sc->sc_ev_debug1); 3617 evcnt_detach(&sc->sc_ev_debug1);
3614 evcnt_detach(&sc->sc_ev_debug2); 3618 evcnt_detach(&sc->sc_ev_debug2);
3615 evcnt_detach(&sc->sc_ev_debug3); 3619 evcnt_detach(&sc->sc_ev_debug3);
3616 evcnt_detach(&sc->sc_ev_hgptc); 3620 evcnt_detach(&sc->sc_ev_hgptc);
3617 evcnt_detach(&sc->sc_ev_debug4); 3621 evcnt_detach(&sc->sc_ev_debug4);
3618 evcnt_detach(&sc->sc_ev_rxdmtc); 3622 evcnt_detach(&sc->sc_ev_rxdmtc);
3619 evcnt_detach(&sc->sc_ev_htcbdpc); 3623 evcnt_detach(&sc->sc_ev_htcbdpc);
3620 3624
3621 evcnt_detach(&sc->sc_ev_hgorc); 3625 evcnt_detach(&sc->sc_ev_hgorc);
3622 evcnt_detach(&sc->sc_ev_hgotc); 3626 evcnt_detach(&sc->sc_ev_hgotc);
3623 evcnt_detach(&sc->sc_ev_lenerrs); 3627 evcnt_detach(&sc->sc_ev_lenerrs);
3624 evcnt_detach(&sc->sc_ev_scvpc); 3628 evcnt_detach(&sc->sc_ev_scvpc);
3625 evcnt_detach(&sc->sc_ev_hrmpc); 3629 evcnt_detach(&sc->sc_ev_hrmpc);
3626 } 3630 }
3627 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) { 3631 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3628 evcnt_detach(&sc->sc_ev_tlpic); 3632 evcnt_detach(&sc->sc_ev_tlpic);
3629 evcnt_detach(&sc->sc_ev_rlpic); 3633 evcnt_detach(&sc->sc_ev_rlpic);
3630 evcnt_detach(&sc->sc_ev_b2ogprc); 3634 evcnt_detach(&sc->sc_ev_b2ogprc);
3631 evcnt_detach(&sc->sc_ev_o2bspc); 3635 evcnt_detach(&sc->sc_ev_o2bspc);
3632 evcnt_detach(&sc->sc_ev_b2ospc); 3636 evcnt_detach(&sc->sc_ev_b2ospc);
3633 evcnt_detach(&sc->sc_ev_o2bgptc); 3637 evcnt_detach(&sc->sc_ev_o2bgptc);
3634 } 3638 }
3635#endif /* WM_EVENT_COUNTERS */ 3639#endif /* WM_EVENT_COUNTERS */
3636 3640
3637 /* Tell the firmware about the release */ 3641 /* Tell the firmware about the release */
3638 WM_CORE_LOCK(sc); 3642 WM_CORE_LOCK(sc);
3639 wm_release_manageability(sc); 3643 wm_release_manageability(sc);
3640 wm_release_hw_control(sc); 3644 wm_release_hw_control(sc);
3641 wm_enable_wakeup(sc); 3645 wm_enable_wakeup(sc);
3642 WM_CORE_UNLOCK(sc); 3646 WM_CORE_UNLOCK(sc);
3643 3647
3644 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 3648 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3645 3649
3646 /* Delete all remaining media. */ 3650 /* Delete all remaining media. */
3647 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 3651 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
3648 3652
3649 ether_ifdetach(ifp); 3653 ether_ifdetach(ifp);
3650 if_detach(ifp); 3654 if_detach(ifp);
3651 if_percpuq_destroy(sc->sc_ipq); 3655 if_percpuq_destroy(sc->sc_ipq);
3652 3656
3653 /* Unload RX dmamaps and free mbufs */ 3657 /* Unload RX dmamaps and free mbufs */
3654 for (i = 0; i < sc->sc_nqueues; i++) { 3658 for (i = 0; i < sc->sc_nqueues; i++) {
3655 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 3659 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3656 mutex_enter(rxq->rxq_lock); 3660 mutex_enter(rxq->rxq_lock);
3657 wm_rxdrain(rxq); 3661 wm_rxdrain(rxq);
3658 mutex_exit(rxq->rxq_lock); 3662 mutex_exit(rxq->rxq_lock);
3659 } 3663 }
3660 /* Must unlock here */ 3664 /* Must unlock here */
3661 3665
3662 /* Disestablish the interrupt handler */ 3666 /* Disestablish the interrupt handler */
3663 for (i = 0; i < sc->sc_nintrs; i++) { 3667 for (i = 0; i < sc->sc_nintrs; i++) {
3664 if (sc->sc_ihs[i] != NULL) { 3668 if (sc->sc_ihs[i] != NULL) {
3665 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]); 3669 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3666 sc->sc_ihs[i] = NULL; 3670 sc->sc_ihs[i] = NULL;
3667 } 3671 }
3668 } 3672 }
3669 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs); 3673 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3670 3674
3671 /* wm_stop() ensured that the workqueue is stopped. */ 3675 /* wm_stop() ensured that the workqueue is stopped. */
3672 workqueue_destroy(sc->sc_queue_wq); 3676 workqueue_destroy(sc->sc_queue_wq);
3673 3677
3674 for (i = 0; i < sc->sc_nqueues; i++) 3678 for (i = 0; i < sc->sc_nqueues; i++)
3675 softint_disestablish(sc->sc_queue[i].wmq_si); 3679 softint_disestablish(sc->sc_queue[i].wmq_si);
3676 3680
3677 wm_free_txrx_queues(sc); 3681 wm_free_txrx_queues(sc);
3678 3682
3679 /* Unmap the registers */ 3683 /* Unmap the registers */
3680 if (sc->sc_ss) { 3684 if (sc->sc_ss) {
3681 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss); 3685 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3682 sc->sc_ss = 0; 3686 sc->sc_ss = 0;
3683 } 3687 }
3684 if (sc->sc_ios) { 3688 if (sc->sc_ios) {
3685 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 3689 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3686 sc->sc_ios = 0; 3690 sc->sc_ios = 0;
3687 } 3691 }
3688 if (sc->sc_flashs) { 3692 if (sc->sc_flashs) {
3689 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs); 3693 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3690 sc->sc_flashs = 0; 3694 sc->sc_flashs = 0;
3691 } 3695 }
3692 3696
3693 if (sc->sc_core_lock) 3697 if (sc->sc_core_lock)
3694 mutex_obj_free(sc->sc_core_lock); 3698 mutex_obj_free(sc->sc_core_lock);
@@ -5530,3789 +5534,3821 @@ wm_reset(struct wm_softc *sc) @@ -5530,3789 +5534,3821 @@ wm_reset(struct wm_softc *sc)
5530 CSR_WRITE_FLUSH(sc); 5534 CSR_WRITE_FLUSH(sc);
5531 delay(5000); 5535 delay(5000);
5532 } 5536 }
5533 5537
5534 switch (sc->sc_type) { 5538 switch (sc->sc_type) {
5535 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ 5539 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5536 case WM_T_82541: 5540 case WM_T_82541:
5537 case WM_T_82541_2: 5541 case WM_T_82541_2:
5538 case WM_T_82547: 5542 case WM_T_82547:
5539 case WM_T_82547_2: 5543 case WM_T_82547_2:
5540 /* 5544 /*
5541 * On some chipsets, a reset through a memory-mapped write 5545 * On some chipsets, a reset through a memory-mapped write
5542 * cycle can cause the chip to reset before completing the 5546 * cycle can cause the chip to reset before completing the
5543 * write cycle. This causes major headache that can be avoided 5547 * write cycle. This causes major headache that can be avoided
5544 * by issuing the reset via indirect register writes through 5548 * by issuing the reset via indirect register writes through
5545 * I/O space. 5549 * I/O space.
5546 * 5550 *
5547 * So, if we successfully mapped the I/O BAR at attach time, 5551 * So, if we successfully mapped the I/O BAR at attach time,
5548 * use that. Otherwise, try our luck with a memory-mapped 5552 * use that. Otherwise, try our luck with a memory-mapped
5549 * reset. 5553 * reset.
5550 */ 5554 */
5551 if (sc->sc_flags & WM_F_IOH_VALID) 5555 if (sc->sc_flags & WM_F_IOH_VALID)
5552 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 5556 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5553 else 5557 else
5554 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 5558 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5555 break; 5559 break;
5556 case WM_T_82545_3: 5560 case WM_T_82545_3:
5557 case WM_T_82546_3: 5561 case WM_T_82546_3:
5558 /* Use the shadow control register on these chips. */ 5562 /* Use the shadow control register on these chips. */
5559 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 5563 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5560 break; 5564 break;
5561 case WM_T_80003: 5565 case WM_T_80003:
5562 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 5566 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5563 if (sc->phy.acquire(sc) != 0) 5567 if (sc->phy.acquire(sc) != 0)
5564 break; 5568 break;
5565 CSR_WRITE(sc, WMREG_CTRL, reg); 5569 CSR_WRITE(sc, WMREG_CTRL, reg);
5566 sc->phy.release(sc); 5570 sc->phy.release(sc);
5567 break; 5571 break;
5568 case WM_T_ICH8: 5572 case WM_T_ICH8:
5569 case WM_T_ICH9: 5573 case WM_T_ICH9:
5570 case WM_T_ICH10: 5574 case WM_T_ICH10:
5571 case WM_T_PCH: 5575 case WM_T_PCH:
5572 case WM_T_PCH2: 5576 case WM_T_PCH2:
5573 case WM_T_PCH_LPT: 5577 case WM_T_PCH_LPT:
5574 case WM_T_PCH_SPT: 5578 case WM_T_PCH_SPT:
5575 case WM_T_PCH_CNP: 5579 case WM_T_PCH_CNP:
5576 case WM_T_PCH_TGP: 5580 case WM_T_PCH_TGP:
5577 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 5581 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5578 if (wm_phy_resetisblocked(sc) == false) { 5582 if (wm_phy_resetisblocked(sc) == false) {
5579 /* 5583 /*
5580 * Gate automatic PHY configuration by hardware on 5584 * Gate automatic PHY configuration by hardware on
5581 * non-managed 82579 5585 * non-managed 82579
5582 */ 5586 */
5583 if ((sc->sc_type == WM_T_PCH2) 5587 if ((sc->sc_type == WM_T_PCH2)
5584 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) 5588 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5585 == 0)) 5589 == 0))
5586 wm_gate_hw_phy_config_ich8lan(sc, true); 5590 wm_gate_hw_phy_config_ich8lan(sc, true);
5587 5591
5588 reg |= CTRL_PHY_RESET; 5592 reg |= CTRL_PHY_RESET;
5589 phy_reset = 1; 5593 phy_reset = 1;
5590 } else 5594 } else
5591 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n"); 5595 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5592 if (sc->phy.acquire(sc) != 0) 5596 if (sc->phy.acquire(sc) != 0)
5593 break; 5597 break;
5594 CSR_WRITE(sc, WMREG_CTRL, reg); 5598 CSR_WRITE(sc, WMREG_CTRL, reg);
5595 /* Don't insert a completion barrier when reset */ 5599 /* Don't insert a completion barrier when reset */
5596 delay(20*1000); 5600 delay(20*1000);
5597 /* 5601 /*
5598 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset, 5602 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5599 * so don't use sc->phy.release(sc). Release sc_ich_phymtx 5603 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5600 * only. See also wm_get_swflag_ich8lan(). 5604 * only. See also wm_get_swflag_ich8lan().
5601 */ 5605 */
5602 mutex_exit(sc->sc_ich_phymtx); 5606 mutex_exit(sc->sc_ich_phymtx);
5603 break; 5607 break;
5604 case WM_T_82580: 5608 case WM_T_82580:
5605 case WM_T_I350: 5609 case WM_T_I350:
5606 case WM_T_I354: 5610 case WM_T_I354:
5607 case WM_T_I210: 5611 case WM_T_I210:
5608 case WM_T_I211: 5612 case WM_T_I211:
5609 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); 5613 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5610 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII) 5614 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5611 CSR_WRITE_FLUSH(sc); 5615 CSR_WRITE_FLUSH(sc);
5612 delay(5000); 5616 delay(5000);
5613 break; 5617 break;
5614 case WM_T_82542_2_0: 5618 case WM_T_82542_2_0:
5615 case WM_T_82542_2_1: 5619 case WM_T_82542_2_1:
5616 case WM_T_82543: 5620 case WM_T_82543:
5617 case WM_T_82540: 5621 case WM_T_82540:
5618 case WM_T_82545: 5622 case WM_T_82545:
5619 case WM_T_82546: 5623 case WM_T_82546:
5620 case WM_T_82571: 5624 case WM_T_82571:
5621 case WM_T_82572: 5625 case WM_T_82572:
5622 case WM_T_82573: 5626 case WM_T_82573:
5623 case WM_T_82574: 5627 case WM_T_82574:
5624 case WM_T_82575: 5628 case WM_T_82575:
5625 case WM_T_82576: 5629 case WM_T_82576:
5626 case WM_T_82583: 5630 case WM_T_82583:
5627 default: 5631 default:
5628 /* Everything else can safely use the documented method. */ 5632 /* Everything else can safely use the documented method. */
5629 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); 5633 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5630 break; 5634 break;
5631 } 5635 }
5632 5636
5633 /* Must release the MDIO ownership after MAC reset */ 5637 /* Must release the MDIO ownership after MAC reset */
5634 switch (sc->sc_type) { 5638 switch (sc->sc_type) {
5635 case WM_T_82573: 5639 case WM_T_82573:
5636 case WM_T_82574: 5640 case WM_T_82574:
5637 case WM_T_82583: 5641 case WM_T_82583:
5638 if (error == 0) 5642 if (error == 0)
5639 wm_put_hw_semaphore_82573(sc); 5643 wm_put_hw_semaphore_82573(sc);
5640 break; 5644 break;
5641 default: 5645 default:
5642 break; 5646 break;
5643 } 5647 }
5644 5648
5645 /* Set Phy Config Counter to 50msec */ 5649 /* Set Phy Config Counter to 50msec */
5646 if (sc->sc_type == WM_T_PCH2) { 5650 if (sc->sc_type == WM_T_PCH2) {
5647 reg = CSR_READ(sc, WMREG_FEXTNVM3); 5651 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5648 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK; 5652 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5649 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS; 5653 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5650 CSR_WRITE(sc, WMREG_FEXTNVM3, reg); 5654 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5651 } 5655 }
5652 5656
5653 if (phy_reset != 0) 5657 if (phy_reset != 0)
5654 wm_get_cfg_done(sc); 5658 wm_get_cfg_done(sc);
5655 5659
5656 /* Reload EEPROM */ 5660 /* Reload EEPROM */
5657 switch (sc->sc_type) { 5661 switch (sc->sc_type) {
5658 case WM_T_82542_2_0: 5662 case WM_T_82542_2_0:
5659 case WM_T_82542_2_1: 5663 case WM_T_82542_2_1:
5660 case WM_T_82543: 5664 case WM_T_82543:
5661 case WM_T_82544: 5665 case WM_T_82544:
5662 delay(10); 5666 delay(10);
5663 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 5667 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5664 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 5668 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5665 CSR_WRITE_FLUSH(sc); 5669 CSR_WRITE_FLUSH(sc);
5666 delay(2000); 5670 delay(2000);
5667 break; 5671 break;
5668 case WM_T_82540: 5672 case WM_T_82540:
5669 case WM_T_82545: 5673 case WM_T_82545:
5670 case WM_T_82545_3: 5674 case WM_T_82545_3:
5671 case WM_T_82546: 5675 case WM_T_82546:
5672 case WM_T_82546_3: 5676 case WM_T_82546_3:
5673 delay(5*1000); 5677 delay(5*1000);
5674 /* XXX Disable HW ARPs on ASF enabled adapters */ 5678 /* XXX Disable HW ARPs on ASF enabled adapters */
5675 break; 5679 break;
5676 case WM_T_82541: 5680 case WM_T_82541:
5677 case WM_T_82541_2: 5681 case WM_T_82541_2:
5678 case WM_T_82547: 5682 case WM_T_82547:
5679 case WM_T_82547_2: 5683 case WM_T_82547_2:
5680 delay(20000); 5684 delay(20000);
5681 /* XXX Disable HW ARPs on ASF enabled adapters */ 5685 /* XXX Disable HW ARPs on ASF enabled adapters */
5682 break; 5686 break;
5683 case WM_T_82571: 5687 case WM_T_82571:
5684 case WM_T_82572: 5688 case WM_T_82572:
5685 case WM_T_82573: 5689 case WM_T_82573:
5686 case WM_T_82574: 5690 case WM_T_82574:
5687 case WM_T_82583: 5691 case WM_T_82583:
5688 if (sc->sc_flags & WM_F_EEPROM_FLASH) { 5692 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5689 delay(10); 5693 delay(10);
5690 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 5694 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5691 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 5695 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5692 CSR_WRITE_FLUSH(sc); 5696 CSR_WRITE_FLUSH(sc);
5693 } 5697 }
5694 /* check EECD_EE_AUTORD */ 5698 /* check EECD_EE_AUTORD */
5695 wm_get_auto_rd_done(sc); 5699 wm_get_auto_rd_done(sc);
5696 /* 5700 /*
5697 * Phy configuration from NVM just starts after EECD_AUTO_RD 5701 * Phy configuration from NVM just starts after EECD_AUTO_RD
5698 * is set. 5702 * is set.
5699 */ 5703 */
5700 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) 5704 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5701 || (sc->sc_type == WM_T_82583)) 5705 || (sc->sc_type == WM_T_82583))
5702 delay(25*1000); 5706 delay(25*1000);
5703 break; 5707 break;
5704 case WM_T_82575: 5708 case WM_T_82575:
5705 case WM_T_82576: 5709 case WM_T_82576:
5706 case WM_T_82580: 5710 case WM_T_82580:
5707 case WM_T_I350: 5711 case WM_T_I350:
5708 case WM_T_I354: 5712 case WM_T_I354:
5709 case WM_T_I210: 5713 case WM_T_I210:
5710 case WM_T_I211: 5714 case WM_T_I211:
5711 case WM_T_80003: 5715 case WM_T_80003:
5712 /* check EECD_EE_AUTORD */ 5716 /* check EECD_EE_AUTORD */
5713 wm_get_auto_rd_done(sc); 5717 wm_get_auto_rd_done(sc);
5714 break; 5718 break;
5715 case WM_T_ICH8: 5719 case WM_T_ICH8:
5716 case WM_T_ICH9: 5720 case WM_T_ICH9:
5717 case WM_T_ICH10: 5721 case WM_T_ICH10:
5718 case WM_T_PCH: 5722 case WM_T_PCH:
5719 case WM_T_PCH2: 5723 case WM_T_PCH2:
5720 case WM_T_PCH_LPT: 5724 case WM_T_PCH_LPT:
5721 case WM_T_PCH_SPT: 5725 case WM_T_PCH_SPT:
5722 case WM_T_PCH_CNP: 5726 case WM_T_PCH_CNP:
5723 case WM_T_PCH_TGP: 5727 case WM_T_PCH_TGP:
5724 break; 5728 break;
5725 default: 5729 default:
5726 panic("%s: unknown type\n", __func__); 5730 panic("%s: unknown type\n", __func__);
5727 } 5731 }
5728 5732
5729 /* Check whether EEPROM is present or not */ 5733 /* Check whether EEPROM is present or not */
5730 switch (sc->sc_type) { 5734 switch (sc->sc_type) {
5731 case WM_T_82575: 5735 case WM_T_82575:
5732 case WM_T_82576: 5736 case WM_T_82576:
5733 case WM_T_82580: 5737 case WM_T_82580:
5734 case WM_T_I350: 5738 case WM_T_I350:
5735 case WM_T_I354: 5739 case WM_T_I354:
5736 case WM_T_ICH8: 5740 case WM_T_ICH8:
5737 case WM_T_ICH9: 5741 case WM_T_ICH9:
5738 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { 5742 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5739 /* Not found */ 5743 /* Not found */
5740 sc->sc_flags |= WM_F_EEPROM_INVALID; 5744 sc->sc_flags |= WM_F_EEPROM_INVALID;
5741 if (sc->sc_type == WM_T_82575) 5745 if (sc->sc_type == WM_T_82575)
5742 wm_reset_init_script_82575(sc); 5746 wm_reset_init_script_82575(sc);
5743 } 5747 }
5744 break; 5748 break;
5745 default: 5749 default:
5746 break; 5750 break;
5747 } 5751 }
5748 5752
5749 if (phy_reset != 0) 5753 if (phy_reset != 0)
5750 wm_phy_post_reset(sc); 5754 wm_phy_post_reset(sc);
5751 5755
5752 if ((sc->sc_type == WM_T_82580) 5756 if ((sc->sc_type == WM_T_82580)
5753 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) { 5757 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5754 /* Clear global device reset status bit */ 5758 /* Clear global device reset status bit */
5755 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); 5759 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5756 } 5760 }
5757 5761
5758 /* Clear any pending interrupt events. */ 5762 /* Clear any pending interrupt events. */
5759 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 5763 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5760 reg = CSR_READ(sc, WMREG_ICR); 5764 reg = CSR_READ(sc, WMREG_ICR);
5761 if (wm_is_using_msix(sc)) { 5765 if (wm_is_using_msix(sc)) {
5762 if (sc->sc_type != WM_T_82574) { 5766 if (sc->sc_type != WM_T_82574) {
5763 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); 5767 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5764 CSR_WRITE(sc, WMREG_EIAC, 0); 5768 CSR_WRITE(sc, WMREG_EIAC, 0);
5765 } else 5769 } else
5766 CSR_WRITE(sc, WMREG_EIAC_82574, 0); 5770 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5767 } 5771 }
5768 5772
5769 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5773 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5770 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5774 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5771 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) 5775 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5772 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP) 5776 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
5773 || (sc->sc_type == WM_T_PCH_TGP)) { 5777 || (sc->sc_type == WM_T_PCH_TGP)) {
5774 reg = CSR_READ(sc, WMREG_KABGTXD); 5778 reg = CSR_READ(sc, WMREG_KABGTXD);
5775 reg |= KABGTXD_BGSQLBIAS; 5779 reg |= KABGTXD_BGSQLBIAS;
5776 CSR_WRITE(sc, WMREG_KABGTXD, reg); 5780 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5777 } 5781 }
5778 5782
5779 /* Reload sc_ctrl */ 5783 /* Reload sc_ctrl */
5780 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 5784 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5781 5785
5782 if (sc->sc_type == WM_T_I354) { 5786 if (sc->sc_type == WM_T_I354) {
5783#if 0 5787#if 0
5784 /* I354 uses an external PHY */ 5788 /* I354 uses an external PHY */
5785 wm_set_eee_i354(sc); 5789 wm_set_eee_i354(sc);
5786#endif 5790#endif
5787 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)) 5791 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
5788 wm_set_eee_i350(sc); 5792 wm_set_eee_i350(sc);
5789 5793
5790 /* 5794 /*
5791 * For PCH, this write will make sure that any noise will be detected 5795 * For PCH, this write will make sure that any noise will be detected
5792 * as a CRC error and be dropped rather than show up as a bad packet 5796 * as a CRC error and be dropped rather than show up as a bad packet
5793 * to the DMA engine 5797 * to the DMA engine
5794 */ 5798 */
5795 if (sc->sc_type == WM_T_PCH) 5799 if (sc->sc_type == WM_T_PCH)
5796 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); 5800 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5797 5801
5798 if (sc->sc_type >= WM_T_82544) 5802 if (sc->sc_type >= WM_T_82544)
5799 CSR_WRITE(sc, WMREG_WUC, 0); 5803 CSR_WRITE(sc, WMREG_WUC, 0);
5800 5804
5801 if (sc->sc_type < WM_T_82575) 5805 if (sc->sc_type < WM_T_82575)
5802 wm_disable_aspm(sc); /* Workaround for some chips */ 5806 wm_disable_aspm(sc); /* Workaround for some chips */
5803 5807
5804 wm_reset_mdicnfg_82580(sc); 5808 wm_reset_mdicnfg_82580(sc);
5805 5809
5806 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) 5810 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5807 wm_pll_workaround_i210(sc); 5811 wm_pll_workaround_i210(sc);
5808 5812
5809 if (sc->sc_type == WM_T_80003) { 5813 if (sc->sc_type == WM_T_80003) {
5810 /* Default to TRUE to enable the MDIC W/A */ 5814 /* Default to TRUE to enable the MDIC W/A */
5811 sc->sc_flags |= WM_F_80003_MDIC_WA; 5815 sc->sc_flags |= WM_F_80003_MDIC_WA;
5812 5816
5813 rv = wm_kmrn_readreg(sc, 5817 rv = wm_kmrn_readreg(sc,
5814 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg); 5818 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5815 if (rv == 0) { 5819 if (rv == 0) {
5816 if ((kmreg & KUMCTRLSTA_OPMODE_MASK) 5820 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5817 == KUMCTRLSTA_OPMODE_INBAND_MDIO) 5821 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5818 sc->sc_flags &= ~WM_F_80003_MDIC_WA; 5822 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5819 else 5823 else
5820 sc->sc_flags |= WM_F_80003_MDIC_WA; 5824 sc->sc_flags |= WM_F_80003_MDIC_WA;
5821 } 5825 }
5822 } 5826 }
5823} 5827}
5824 5828
5825/* 5829/*
5826 * wm_add_rxbuf: 5830 * wm_add_rxbuf:
5827 * 5831 *
5828 * Add a receive buffer to the indiciated descriptor. 5832 * Add a receive buffer to the indiciated descriptor.
5829 */ 5833 */
5830static int 5834static int
5831wm_add_rxbuf(struct wm_rxqueue *rxq, int idx) 5835wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5832{ 5836{
5833 struct wm_softc *sc = rxq->rxq_sc; 5837 struct wm_softc *sc = rxq->rxq_sc;
5834 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx]; 5838 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5835 struct mbuf *m; 5839 struct mbuf *m;
5836 int error; 5840 int error;
5837 5841
5838 KASSERT(mutex_owned(rxq->rxq_lock)); 5842 KASSERT(mutex_owned(rxq->rxq_lock));
5839 5843
5840 MGETHDR(m, M_DONTWAIT, MT_DATA); 5844 MGETHDR(m, M_DONTWAIT, MT_DATA);
5841 if (m == NULL) 5845 if (m == NULL)
5842 return ENOBUFS; 5846 return ENOBUFS;
5843 5847
5844 MCLGET(m, M_DONTWAIT); 5848 MCLGET(m, M_DONTWAIT);
5845 if ((m->m_flags & M_EXT) == 0) { 5849 if ((m->m_flags & M_EXT) == 0) {
5846 m_freem(m); 5850 m_freem(m);
5847 return ENOBUFS; 5851 return ENOBUFS;
5848 } 5852 }
5849 5853
5850 if (rxs->rxs_mbuf != NULL) 5854 if (rxs->rxs_mbuf != NULL)
5851 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 5855 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5852 5856
5853 rxs->rxs_mbuf = m; 5857 rxs->rxs_mbuf = m;
5854 5858
5855 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 5859 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5856 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 5860 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5857 BUS_DMA_READ | BUS_DMA_NOWAIT); 5861 BUS_DMA_READ | BUS_DMA_NOWAIT);
5858 if (error) { 5862 if (error) {
5859 /* XXX XXX XXX */ 5863 /* XXX XXX XXX */
5860 aprint_error_dev(sc->sc_dev, 5864 aprint_error_dev(sc->sc_dev,
5861 "unable to load rx DMA map %d, error = %d\n", idx, error); 5865 "unable to load rx DMA map %d, error = %d\n", idx, error);
5862 panic("wm_add_rxbuf"); 5866 panic("wm_add_rxbuf");
5863 } 5867 }
5864 5868
5865 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 5869 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5866 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 5870 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5867 5871
5868 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 5872 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5869 if ((sc->sc_rctl & RCTL_EN) != 0) 5873 if ((sc->sc_rctl & RCTL_EN) != 0)
5870 wm_init_rxdesc(rxq, idx); 5874 wm_init_rxdesc(rxq, idx);
5871 } else 5875 } else
5872 wm_init_rxdesc(rxq, idx); 5876 wm_init_rxdesc(rxq, idx);
5873 5877
5874 return 0; 5878 return 0;
5875} 5879}
5876 5880
5877/* 5881/*
5878 * wm_rxdrain: 5882 * wm_rxdrain:
5879 * 5883 *
5880 * Drain the receive queue. 5884 * Drain the receive queue.
5881 */ 5885 */
5882static void 5886static void
5883wm_rxdrain(struct wm_rxqueue *rxq) 5887wm_rxdrain(struct wm_rxqueue *rxq)
5884{ 5888{
5885 struct wm_softc *sc = rxq->rxq_sc; 5889 struct wm_softc *sc = rxq->rxq_sc;
5886 struct wm_rxsoft *rxs; 5890 struct wm_rxsoft *rxs;
5887 int i; 5891 int i;
5888 5892
5889 KASSERT(mutex_owned(rxq->rxq_lock)); 5893 KASSERT(mutex_owned(rxq->rxq_lock));
5890 5894
5891 for (i = 0; i < WM_NRXDESC; i++) { 5895 for (i = 0; i < WM_NRXDESC; i++) {
5892 rxs = &rxq->rxq_soft[i]; 5896 rxs = &rxq->rxq_soft[i];
5893 if (rxs->rxs_mbuf != NULL) { 5897 if (rxs->rxs_mbuf != NULL) {
5894 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 5898 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5895 m_freem(rxs->rxs_mbuf); 5899 m_freem(rxs->rxs_mbuf);
5896 rxs->rxs_mbuf = NULL; 5900 rxs->rxs_mbuf = NULL;
5897 } 5901 }
5898 } 5902 }
5899} 5903}
5900 5904
5901/* 5905/*
5902 * Setup registers for RSS. 5906 * Setup registers for RSS.
5903 * 5907 *
5904 * XXX not yet VMDq support 5908 * XXX not yet VMDq support
5905 */ 5909 */
5906static void 5910static void
5907wm_init_rss(struct wm_softc *sc) 5911wm_init_rss(struct wm_softc *sc)
5908{ 5912{
5909 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS]; 5913 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5910 int i; 5914 int i;
5911 5915
5912 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE); 5916 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5913 5917
5914 for (i = 0; i < RETA_NUM_ENTRIES; i++) { 5918 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5915 unsigned int qid, reta_ent; 5919 unsigned int qid, reta_ent;
5916 5920
5917 qid = i % sc->sc_nqueues; 5921 qid = i % sc->sc_nqueues;
5918 switch (sc->sc_type) { 5922 switch (sc->sc_type) {
5919 case WM_T_82574: 5923 case WM_T_82574:
5920 reta_ent = __SHIFTIN(qid, 5924 reta_ent = __SHIFTIN(qid,
5921 RETA_ENT_QINDEX_MASK_82574); 5925 RETA_ENT_QINDEX_MASK_82574);
5922 break; 5926 break;
5923 case WM_T_82575: 5927 case WM_T_82575:
5924 reta_ent = __SHIFTIN(qid, 5928 reta_ent = __SHIFTIN(qid,
5925 RETA_ENT_QINDEX1_MASK_82575); 5929 RETA_ENT_QINDEX1_MASK_82575);
5926 break; 5930 break;
5927 default: 5931 default:
5928 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK); 5932 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5929 break; 5933 break;
5930 } 5934 }
5931 5935
5932 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i)); 5936 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5933 reta_reg &= ~RETA_ENTRY_MASK_Q(i); 5937 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5934 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i)); 5938 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5935 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg); 5939 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5936 } 5940 }
5937 5941
5938 rss_getkey((uint8_t *)rss_key); 5942 rss_getkey((uint8_t *)rss_key);
5939 for (i = 0; i < RSSRK_NUM_REGS; i++) 5943 for (i = 0; i < RSSRK_NUM_REGS; i++)
5940 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]); 5944 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5941 5945
5942 if (sc->sc_type == WM_T_82574) 5946 if (sc->sc_type == WM_T_82574)
5943 mrqc = MRQC_ENABLE_RSS_MQ_82574; 5947 mrqc = MRQC_ENABLE_RSS_MQ_82574;
5944 else 5948 else
5945 mrqc = MRQC_ENABLE_RSS_MQ; 5949 mrqc = MRQC_ENABLE_RSS_MQ;
5946 5950
5947 /* 5951 /*
5948 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata. 5952 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5949 * See IPV6EXDIS bit in wm_initialize_hardware_bits(). 5953 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5950 */ 5954 */
5951 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP); 5955 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5952 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP); 5956 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5953#if 0 5957#if 0
5954 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP); 5958 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5955 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX; 5959 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5956#endif 5960#endif
5957 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX; 5961 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5958 5962
5959 CSR_WRITE(sc, WMREG_MRQC, mrqc); 5963 CSR_WRITE(sc, WMREG_MRQC, mrqc);
5960} 5964}
5961 5965
5962/* 5966/*
5963 * Adjust TX and RX queue numbers which the system actulally uses. 5967 * Adjust TX and RX queue numbers which the system actulally uses.
5964 * 5968 *
5965 * The numbers are affected by below parameters. 5969 * The numbers are affected by below parameters.
5966 * - The nubmer of hardware queues 5970 * - The nubmer of hardware queues
5967 * - The number of MSI-X vectors (= "nvectors" argument) 5971 * - The number of MSI-X vectors (= "nvectors" argument)
5968 * - ncpu 5972 * - ncpu
5969 */ 5973 */
5970static void 5974static void
5971wm_adjust_qnum(struct wm_softc *sc, int nvectors) 5975wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5972{ 5976{
5973 int hw_ntxqueues, hw_nrxqueues, hw_nqueues; 5977 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5974 5978
5975 if (nvectors < 2) { 5979 if (nvectors < 2) {
5976 sc->sc_nqueues = 1; 5980 sc->sc_nqueues = 1;
5977 return; 5981 return;
5978 } 5982 }
5979 5983
5980 switch (sc->sc_type) { 5984 switch (sc->sc_type) {
5981 case WM_T_82572: 5985 case WM_T_82572:
5982 hw_ntxqueues = 2; 5986 hw_ntxqueues = 2;
5983 hw_nrxqueues = 2; 5987 hw_nrxqueues = 2;
5984 break; 5988 break;
5985 case WM_T_82574: 5989 case WM_T_82574:
5986 hw_ntxqueues = 2; 5990 hw_ntxqueues = 2;
5987 hw_nrxqueues = 2; 5991 hw_nrxqueues = 2;
5988 break; 5992 break;
5989 case WM_T_82575: 5993 case WM_T_82575:
5990 hw_ntxqueues = 4; 5994 hw_ntxqueues = 4;
5991 hw_nrxqueues = 4; 5995 hw_nrxqueues = 4;
5992 break; 5996 break;
5993 case WM_T_82576: 5997 case WM_T_82576:
5994 hw_ntxqueues = 16; 5998 hw_ntxqueues = 16;
5995 hw_nrxqueues = 16; 5999 hw_nrxqueues = 16;
5996 break; 6000 break;
5997 case WM_T_82580: 6001 case WM_T_82580:
5998 case WM_T_I350: 6002 case WM_T_I350:
5999 case WM_T_I354: 6003 case WM_T_I354:
6000 hw_ntxqueues = 8; 6004 hw_ntxqueues = 8;
6001 hw_nrxqueues = 8; 6005 hw_nrxqueues = 8;
6002 break; 6006 break;
6003 case WM_T_I210: 6007 case WM_T_I210:
6004 hw_ntxqueues = 4; 6008 hw_ntxqueues = 4;
6005 hw_nrxqueues = 4; 6009 hw_nrxqueues = 4;
6006 break; 6010 break;
6007 case WM_T_I211: 6011 case WM_T_I211:
6008 hw_ntxqueues = 2; 6012 hw_ntxqueues = 2;
6009 hw_nrxqueues = 2; 6013 hw_nrxqueues = 2;
6010 break; 6014 break;
6011 /* 6015 /*
6012 * The below Ethernet controllers do not support MSI-X; 6016 * The below Ethernet controllers do not support MSI-X;
6013 * this driver doesn't let them use multiqueue. 6017 * this driver doesn't let them use multiqueue.
6014 * - WM_T_80003 6018 * - WM_T_80003
6015 * - WM_T_ICH8 6019 * - WM_T_ICH8
6016 * - WM_T_ICH9 6020 * - WM_T_ICH9
6017 * - WM_T_ICH10 6021 * - WM_T_ICH10
6018 * - WM_T_PCH 6022 * - WM_T_PCH
6019 * - WM_T_PCH2 6023 * - WM_T_PCH2
6020 * - WM_T_PCH_LPT 6024 * - WM_T_PCH_LPT
6021 */ 6025 */
6022 default: 6026 default:
6023 hw_ntxqueues = 1; 6027 hw_ntxqueues = 1;
6024 hw_nrxqueues = 1; 6028 hw_nrxqueues = 1;
6025 break; 6029 break;
6026 } 6030 }
6027 6031
6028 hw_nqueues = min(hw_ntxqueues, hw_nrxqueues); 6032 hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
6029 6033
6030 /* 6034 /*
6031 * As queues more than MSI-X vectors cannot improve scaling, we limit 6035 * As queues more than MSI-X vectors cannot improve scaling, we limit
6032 * the number of queues used actually. 6036 * the number of queues used actually.
6033 */ 6037 */
6034 if (nvectors < hw_nqueues + 1) 6038 if (nvectors < hw_nqueues + 1)
6035 sc->sc_nqueues = nvectors - 1; 6039 sc->sc_nqueues = nvectors - 1;
6036 else 6040 else
6037 sc->sc_nqueues = hw_nqueues; 6041 sc->sc_nqueues = hw_nqueues;
6038 6042
6039 /* 6043 /*
6040 * As queues more than CPUs cannot improve scaling, we limit 6044 * As queues more than CPUs cannot improve scaling, we limit
6041 * the number of queues used actually. 6045 * the number of queues used actually.
6042 */ 6046 */
6043 if (ncpu < sc->sc_nqueues) 6047 if (ncpu < sc->sc_nqueues)
6044 sc->sc_nqueues = ncpu; 6048 sc->sc_nqueues = ncpu;
6045} 6049}
6046 6050
6047static inline bool 6051static inline bool
6048wm_is_using_msix(struct wm_softc *sc) 6052wm_is_using_msix(struct wm_softc *sc)
6049{ 6053{
6050 6054
6051 return (sc->sc_nintrs > 1); 6055 return (sc->sc_nintrs > 1);
6052} 6056}
6053 6057
6054static inline bool 6058static inline bool
6055wm_is_using_multiqueue(struct wm_softc *sc) 6059wm_is_using_multiqueue(struct wm_softc *sc)
6056{ 6060{
6057 6061
6058 return (sc->sc_nqueues > 1); 6062 return (sc->sc_nqueues > 1);
6059} 6063}
6060 6064
6061static int 6065static int
6062wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx) 6066wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6063{ 6067{
6064 struct wm_queue *wmq = &sc->sc_queue[qidx]; 6068 struct wm_queue *wmq = &sc->sc_queue[qidx];
6065 6069
6066 wmq->wmq_id = qidx; 6070 wmq->wmq_id = qidx;
6067 wmq->wmq_intr_idx = intr_idx; 6071 wmq->wmq_intr_idx = intr_idx;
6068 wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS, 6072 wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
6069 wm_handle_queue, wmq); 6073 wm_handle_queue, wmq);
6070 if (wmq->wmq_si != NULL) 6074 if (wmq->wmq_si != NULL)
6071 return 0; 6075 return 0;
6072 6076
6073 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n", 6077 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6074 wmq->wmq_id); 6078 wmq->wmq_id);
6075 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]); 6079 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6076 sc->sc_ihs[wmq->wmq_intr_idx] = NULL; 6080 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6077 return ENOMEM; 6081 return ENOMEM;
6078} 6082}
6079 6083
6080/* 6084/*
6081 * Both single interrupt MSI and INTx can use this function. 6085 * Both single interrupt MSI and INTx can use this function.
6082 */ 6086 */
6083static int 6087static int
6084wm_setup_legacy(struct wm_softc *sc) 6088wm_setup_legacy(struct wm_softc *sc)
6085{ 6089{
6086 pci_chipset_tag_t pc = sc->sc_pc; 6090 pci_chipset_tag_t pc = sc->sc_pc;
6087 const char *intrstr = NULL; 6091 const char *intrstr = NULL;
6088 char intrbuf[PCI_INTRSTR_LEN]; 6092 char intrbuf[PCI_INTRSTR_LEN];
6089 int error; 6093 int error;
6090 6094
6091 error = wm_alloc_txrx_queues(sc); 6095 error = wm_alloc_txrx_queues(sc);
6092 if (error) { 6096 if (error) {
6093 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n", 6097 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6094 error); 6098 error);
6095 return ENOMEM; 6099 return ENOMEM;
6096 } 6100 }
6097 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf, 6101 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6098 sizeof(intrbuf)); 6102 sizeof(intrbuf));
6099#ifdef WM_MPSAFE 6103#ifdef WM_MPSAFE
6100 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true); 6104 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6101#endif 6105#endif
6102 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0], 6106 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6103 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev)); 6107 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6104 if (sc->sc_ihs[0] == NULL) { 6108 if (sc->sc_ihs[0] == NULL) {
6105 aprint_error_dev(sc->sc_dev,"unable to establish %s\n", 6109 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6106 (pci_intr_type(pc, sc->sc_intrs[0]) 6110 (pci_intr_type(pc, sc->sc_intrs[0])
6107 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx"); 6111 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6108 return ENOMEM; 6112 return ENOMEM;
6109 } 6113 }
6110 6114
6111 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 6115 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6112 sc->sc_nintrs = 1; 6116 sc->sc_nintrs = 1;
6113 6117
6114 return wm_softint_establish_queue(sc, 0, 0); 6118 return wm_softint_establish_queue(sc, 0, 0);
6115} 6119}
6116 6120
6117static int 6121static int
6118wm_setup_msix(struct wm_softc *sc) 6122wm_setup_msix(struct wm_softc *sc)
6119{ 6123{
6120 void *vih; 6124 void *vih;
6121 kcpuset_t *affinity; 6125 kcpuset_t *affinity;
6122 int qidx, error, intr_idx, txrx_established; 6126 int qidx, error, intr_idx, txrx_established;
6123 pci_chipset_tag_t pc = sc->sc_pc; 6127 pci_chipset_tag_t pc = sc->sc_pc;
6124 const char *intrstr = NULL; 6128 const char *intrstr = NULL;
6125 char intrbuf[PCI_INTRSTR_LEN]; 6129 char intrbuf[PCI_INTRSTR_LEN];
6126 char intr_xname[INTRDEVNAMEBUF]; 6130 char intr_xname[INTRDEVNAMEBUF];
6127 6131
6128 if (sc->sc_nqueues < ncpu) { 6132 if (sc->sc_nqueues < ncpu) {
6129 /* 6133 /*
6130 * To avoid other devices' interrupts, the affinity of Tx/Rx 6134 * To avoid other devices' interrupts, the affinity of Tx/Rx
6131 * interrupts start from CPU#1. 6135 * interrupts start from CPU#1.
6132 */ 6136 */
6133 sc->sc_affinity_offset = 1; 6137 sc->sc_affinity_offset = 1;
6134 } else { 6138 } else {
6135 /* 6139 /*
6136 * In this case, this device use all CPUs. So, we unify 6140 * In this case, this device use all CPUs. So, we unify
6137 * affinitied cpu_index to msix vector number for readability. 6141 * affinitied cpu_index to msix vector number for readability.
6138 */ 6142 */
6139 sc->sc_affinity_offset = 0; 6143 sc->sc_affinity_offset = 0;
6140 } 6144 }
6141 6145
6142 error = wm_alloc_txrx_queues(sc); 6146 error = wm_alloc_txrx_queues(sc);
6143 if (error) { 6147 if (error) {
6144 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n", 6148 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6145 error); 6149 error);
6146 return ENOMEM; 6150 return ENOMEM;
6147 } 6151 }
6148 6152
6149 kcpuset_create(&affinity, false); 6153 kcpuset_create(&affinity, false);
6150 intr_idx = 0; 6154 intr_idx = 0;
6151 6155
6152 /* 6156 /*
6153 * TX and RX 6157 * TX and RX
6154 */ 6158 */
6155 txrx_established = 0; 6159 txrx_established = 0;
6156 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { 6160 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6157 struct wm_queue *wmq = &sc->sc_queue[qidx]; 6161 struct wm_queue *wmq = &sc->sc_queue[qidx];
6158 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu; 6162 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6159 6163
6160 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, 6164 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6161 sizeof(intrbuf)); 6165 sizeof(intrbuf));
6162#ifdef WM_MPSAFE 6166#ifdef WM_MPSAFE
6163 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], 6167 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6164 PCI_INTR_MPSAFE, true); 6168 PCI_INTR_MPSAFE, true);
6165#endif 6169#endif
6166 memset(intr_xname, 0, sizeof(intr_xname)); 6170 memset(intr_xname, 0, sizeof(intr_xname));
6167 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d", 6171 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6168 device_xname(sc->sc_dev), qidx); 6172 device_xname(sc->sc_dev), qidx);
6169 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], 6173 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6170 IPL_NET, wm_txrxintr_msix, wmq, intr_xname); 6174 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6171 if (vih == NULL) { 6175 if (vih == NULL) {
6172 aprint_error_dev(sc->sc_dev, 6176 aprint_error_dev(sc->sc_dev,
6173 "unable to establish MSI-X(for TX and RX)%s%s\n", 6177 "unable to establish MSI-X(for TX and RX)%s%s\n",
6174 intrstr ? " at " : "", 6178 intrstr ? " at " : "",
6175 intrstr ? intrstr : ""); 6179 intrstr ? intrstr : "");
6176 6180
6177 goto fail; 6181 goto fail;
6178 } 6182 }
6179 kcpuset_zero(affinity); 6183 kcpuset_zero(affinity);
6180 /* Round-robin affinity */ 6184 /* Round-robin affinity */
6181 kcpuset_set(affinity, affinity_to); 6185 kcpuset_set(affinity, affinity_to);
6182 error = interrupt_distribute(vih, affinity, NULL); 6186 error = interrupt_distribute(vih, affinity, NULL);
6183 if (error == 0) { 6187 if (error == 0) {
6184 aprint_normal_dev(sc->sc_dev, 6188 aprint_normal_dev(sc->sc_dev,
6185 "for TX and RX interrupting at %s affinity to %u\n", 6189 "for TX and RX interrupting at %s affinity to %u\n",
6186 intrstr, affinity_to); 6190 intrstr, affinity_to);
6187 } else { 6191 } else {
6188 aprint_normal_dev(sc->sc_dev, 6192 aprint_normal_dev(sc->sc_dev,
6189 "for TX and RX interrupting at %s\n", intrstr); 6193 "for TX and RX interrupting at %s\n", intrstr);
6190 } 6194 }
6191 sc->sc_ihs[intr_idx] = vih; 6195 sc->sc_ihs[intr_idx] = vih;
6192 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0) 6196 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6193 goto fail; 6197 goto fail;
6194 txrx_established++; 6198 txrx_established++;
6195 intr_idx++; 6199 intr_idx++;
6196 } 6200 }
6197 6201
6198 /* LINK */ 6202 /* LINK */
6199 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, 6203 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6200 sizeof(intrbuf)); 6204 sizeof(intrbuf));
6201#ifdef WM_MPSAFE 6205#ifdef WM_MPSAFE
6202 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true); 6206 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6203#endif 6207#endif
6204 memset(intr_xname, 0, sizeof(intr_xname)); 6208 memset(intr_xname, 0, sizeof(intr_xname));
6205 snprintf(intr_xname, sizeof(intr_xname), "%sLINK", 6209 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6206 device_xname(sc->sc_dev)); 6210 device_xname(sc->sc_dev));
6207 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], 6211 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6208 IPL_NET, wm_linkintr_msix, sc, intr_xname); 6212 IPL_NET, wm_linkintr_msix, sc, intr_xname);
6209 if (vih == NULL) { 6213 if (vih == NULL) {
6210 aprint_error_dev(sc->sc_dev, 6214 aprint_error_dev(sc->sc_dev,
6211 "unable to establish MSI-X(for LINK)%s%s\n", 6215 "unable to establish MSI-X(for LINK)%s%s\n",
6212 intrstr ? " at " : "", 6216 intrstr ? " at " : "",
6213 intrstr ? intrstr : ""); 6217 intrstr ? intrstr : "");
6214 6218
6215 goto fail; 6219 goto fail;
6216 } 6220 }
6217 /* Keep default affinity to LINK interrupt */ 6221 /* Keep default affinity to LINK interrupt */
6218 aprint_normal_dev(sc->sc_dev, 6222 aprint_normal_dev(sc->sc_dev,
6219 "for LINK interrupting at %s\n", intrstr); 6223 "for LINK interrupting at %s\n", intrstr);
6220 sc->sc_ihs[intr_idx] = vih; 6224 sc->sc_ihs[intr_idx] = vih;
6221 sc->sc_link_intr_idx = intr_idx; 6225 sc->sc_link_intr_idx = intr_idx;
6222 6226
6223 sc->sc_nintrs = sc->sc_nqueues + 1; 6227 sc->sc_nintrs = sc->sc_nqueues + 1;
6224 kcpuset_destroy(affinity); 6228 kcpuset_destroy(affinity);
6225 return 0; 6229 return 0;
6226 6230
6227fail: 6231fail:
6228 for (qidx = 0; qidx < txrx_established; qidx++) { 6232 for (qidx = 0; qidx < txrx_established; qidx++) {
6229 struct wm_queue *wmq = &sc->sc_queue[qidx]; 6233 struct wm_queue *wmq = &sc->sc_queue[qidx];
6230 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]); 6234 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6231 sc->sc_ihs[wmq->wmq_intr_idx] = NULL; 6235 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6232 } 6236 }
6233 6237
6234 kcpuset_destroy(affinity); 6238 kcpuset_destroy(affinity);
6235 return ENOMEM; 6239 return ENOMEM;
6236} 6240}
6237 6241
6238static void 6242static void
6239wm_unset_stopping_flags(struct wm_softc *sc) 6243wm_unset_stopping_flags(struct wm_softc *sc)
6240{ 6244{
6241 int i; 6245 int i;
6242 6246
6243 KASSERT(WM_CORE_LOCKED(sc)); 6247 KASSERT(WM_CORE_LOCKED(sc));
6244 6248
6245 /* Must unset stopping flags in ascending order. */ 6249 /* Must unset stopping flags in ascending order. */
6246 for (i = 0; i < sc->sc_nqueues; i++) { 6250 for (i = 0; i < sc->sc_nqueues; i++) {
6247 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 6251 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6248 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 6252 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6249 6253
6250 mutex_enter(txq->txq_lock); 6254 mutex_enter(txq->txq_lock);
6251 txq->txq_stopping = false; 6255 txq->txq_stopping = false;
6252 mutex_exit(txq->txq_lock); 6256 mutex_exit(txq->txq_lock);
6253 6257
6254 mutex_enter(rxq->rxq_lock); 6258 mutex_enter(rxq->rxq_lock);
6255 rxq->rxq_stopping = false; 6259 rxq->rxq_stopping = false;
6256 mutex_exit(rxq->rxq_lock); 6260 mutex_exit(rxq->rxq_lock);
6257 } 6261 }
6258 6262
6259 sc->sc_core_stopping = false; 6263 sc->sc_core_stopping = false;
6260} 6264}
6261 6265
6262static void 6266static void
6263wm_set_stopping_flags(struct wm_softc *sc) 6267wm_set_stopping_flags(struct wm_softc *sc)
6264{ 6268{
6265 int i; 6269 int i;
6266 6270
6267 KASSERT(WM_CORE_LOCKED(sc)); 6271 KASSERT(WM_CORE_LOCKED(sc));
6268 6272
6269 sc->sc_core_stopping = true; 6273 sc->sc_core_stopping = true;
6270 6274
6271 /* Must set stopping flags in ascending order. */ 6275 /* Must set stopping flags in ascending order. */
6272 for (i = 0; i < sc->sc_nqueues; i++) { 6276 for (i = 0; i < sc->sc_nqueues; i++) {
6273 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 6277 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6274 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 6278 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6275 6279
6276 mutex_enter(rxq->rxq_lock); 6280 mutex_enter(rxq->rxq_lock);
6277 rxq->rxq_stopping = true; 6281 rxq->rxq_stopping = true;
6278 mutex_exit(rxq->rxq_lock); 6282 mutex_exit(rxq->rxq_lock);
6279 6283
6280 mutex_enter(txq->txq_lock); 6284 mutex_enter(txq->txq_lock);
6281 txq->txq_stopping = true; 6285 txq->txq_stopping = true;
6282 mutex_exit(txq->txq_lock); 6286 mutex_exit(txq->txq_lock);
6283 } 6287 }
6284} 6288}
6285 6289
6286/* 6290/*
6287 * Write interrupt interval value to ITR or EITR 6291 * Write interrupt interval value to ITR or EITR
6288 */ 6292 */
6289static void 6293static void
6290wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq) 6294wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6291{ 6295{
6292 6296
6293 if (!wmq->wmq_set_itr) 6297 if (!wmq->wmq_set_itr)
6294 return; 6298 return;
6295 6299
6296 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 6300 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6297 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK); 6301 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6298 6302
6299 /* 6303 /*
6300 * 82575 doesn't have CNT_INGR field. 6304 * 82575 doesn't have CNT_INGR field.
6301 * So, overwrite counter field by software. 6305 * So, overwrite counter field by software.
6302 */ 6306 */
6303 if (sc->sc_type == WM_T_82575) 6307 if (sc->sc_type == WM_T_82575)
6304 eitr |= __SHIFTIN(wmq->wmq_itr, 6308 eitr |= __SHIFTIN(wmq->wmq_itr,
6305 EITR_COUNTER_MASK_82575); 6309 EITR_COUNTER_MASK_82575);
6306 else 6310 else
6307 eitr |= EITR_CNT_INGR; 6311 eitr |= EITR_CNT_INGR;
6308 6312
6309 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr); 6313 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6310 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) { 6314 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6311 /* 6315 /*
6312 * 82574 has both ITR and EITR. SET EITR when we use 6316 * 82574 has both ITR and EITR. SET EITR when we use
6313 * the multi queue function with MSI-X. 6317 * the multi queue function with MSI-X.
6314 */ 6318 */
6315 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx), 6319 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6316 wmq->wmq_itr & EITR_ITR_INT_MASK_82574); 6320 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6317 } else { 6321 } else {
6318 KASSERT(wmq->wmq_id == 0); 6322 KASSERT(wmq->wmq_id == 0);
6319 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr); 6323 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6320 } 6324 }
6321 6325
6322 wmq->wmq_set_itr = false; 6326 wmq->wmq_set_itr = false;
6323} 6327}
6324 6328
6325/* 6329/*
6326 * TODO 6330 * TODO
6327 * Below dynamic calculation of itr is almost the same as Linux igb, 6331 * Below dynamic calculation of itr is almost the same as Linux igb,
6328 * however it does not fit to wm(4). So, we will have been disable AIM 6332 * however it does not fit to wm(4). So, we will have been disable AIM
6329 * until we will find appropriate calculation of itr. 6333 * until we will find appropriate calculation of itr.
6330 */ 6334 */
6331/* 6335/*
6332 * Calculate interrupt interval value to be going to write register in 6336 * Calculate interrupt interval value to be going to write register in
6333 * wm_itrs_writereg(). This function does not write ITR/EITR register. 6337 * wm_itrs_writereg(). This function does not write ITR/EITR register.
6334 */ 6338 */
6335static void 6339static void
6336wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq) 6340wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6337{ 6341{
6338#ifdef NOTYET 6342#ifdef NOTYET
6339 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 6343 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6340 struct wm_txqueue *txq = &wmq->wmq_txq; 6344 struct wm_txqueue *txq = &wmq->wmq_txq;
6341 uint32_t avg_size = 0; 6345 uint32_t avg_size = 0;
6342 uint32_t new_itr; 6346 uint32_t new_itr;
6343 6347
6344 if (rxq->rxq_packets) 6348 if (rxq->rxq_packets)
6345 avg_size = rxq->rxq_bytes / rxq->rxq_packets; 6349 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
6346 if (txq->txq_packets) 6350 if (txq->txq_packets)
6347 avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets); 6351 avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
6348 6352
6349 if (avg_size == 0) { 6353 if (avg_size == 0) {
6350 new_itr = 450; /* restore default value */ 6354 new_itr = 450; /* restore default value */
6351 goto out; 6355 goto out;
6352 } 6356 }
6353 6357
6354 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 6358 /* Add 24 bytes to size to account for CRC, preamble, and gap */
6355 avg_size += 24; 6359 avg_size += 24;
6356 6360
6357 /* Don't starve jumbo frames */ 6361 /* Don't starve jumbo frames */
6358 avg_size = min(avg_size, 3000); 6362 avg_size = min(avg_size, 3000);
6359 6363
6360 /* Give a little boost to mid-size frames */ 6364 /* Give a little boost to mid-size frames */
6361 if ((avg_size > 300) && (avg_size < 1200)) 6365 if ((avg_size > 300) && (avg_size < 1200))
6362 new_itr = avg_size / 3; 6366 new_itr = avg_size / 3;
6363 else 6367 else
6364 new_itr = avg_size / 2; 6368 new_itr = avg_size / 2;
6365 6369
6366out: 6370out:
6367 /* 6371 /*
6368 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE 6372 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6369 * controllers. See sc->sc_itr_init setting in wm_init_locked(). 6373 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6370 */ 6374 */
6371 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575) 6375 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6372 new_itr *= 4; 6376 new_itr *= 4;
6373 6377
6374 if (new_itr != wmq->wmq_itr) { 6378 if (new_itr != wmq->wmq_itr) {
6375 wmq->wmq_itr = new_itr; 6379 wmq->wmq_itr = new_itr;
6376 wmq->wmq_set_itr = true; 6380 wmq->wmq_set_itr = true;
6377 } else 6381 } else
6378 wmq->wmq_set_itr = false; 6382 wmq->wmq_set_itr = false;
6379 6383
6380 rxq->rxq_packets = 0; 6384 rxq->rxq_packets = 0;
6381 rxq->rxq_bytes = 0; 6385 rxq->rxq_bytes = 0;
6382 txq->txq_packets = 0; 6386 txq->txq_packets = 0;
6383 txq->txq_bytes = 0; 6387 txq->txq_bytes = 0;
6384#endif 6388#endif
6385} 6389}
6386 6390
6387static void 6391static void
6388wm_init_sysctls(struct wm_softc *sc) 6392wm_init_sysctls(struct wm_softc *sc)
6389{ 6393{
6390 struct sysctllog **log; 6394 struct sysctllog **log;
6391 const struct sysctlnode *rnode, *qnode, *cnode; 6395 const struct sysctlnode *rnode, *qnode, *cnode;
6392 int i, rv; 6396 int i, rv;
6393 const char *dvname; 6397 const char *dvname;
6394 6398
6395 log = &sc->sc_sysctllog; 6399 log = &sc->sc_sysctllog;
6396 dvname = device_xname(sc->sc_dev); 6400 dvname = device_xname(sc->sc_dev);
6397 6401
6398 rv = sysctl_createv(log, 0, NULL, &rnode, 6402 rv = sysctl_createv(log, 0, NULL, &rnode,
6399 0, CTLTYPE_NODE, dvname, 6403 0, CTLTYPE_NODE, dvname,
6400 SYSCTL_DESCR("wm information and settings"), 6404 SYSCTL_DESCR("wm information and settings"),
6401 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 6405 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6402 if (rv != 0) 6406 if (rv != 0)
6403 goto err; 6407 goto err;
6404 6408
6405 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 6409 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6406 CTLTYPE_BOOL, "txrx_workqueue", 6410 CTLTYPE_BOOL, "txrx_workqueue",
6407 SYSCTL_DESCR("Use workqueue for packet processing"), 6411 SYSCTL_DESCR("Use workqueue for packet processing"),
6408 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL); 6412 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6409 if (rv != 0) 6413 if (rv != 0)
6410 goto teardown; 6414 goto teardown;
6411 6415
6412 for (i = 0; i < sc->sc_nqueues; i++) { 6416 for (i = 0; i < sc->sc_nqueues; i++) {
6413 struct wm_queue *wmq = &sc->sc_queue[i]; 6417 struct wm_queue *wmq = &sc->sc_queue[i];
6414 struct wm_txqueue *txq = &wmq->wmq_txq; 6418 struct wm_txqueue *txq = &wmq->wmq_txq;
6415 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 6419 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6416 6420
6417 snprintf(sc->sc_queue[i].sysctlname, 6421 snprintf(sc->sc_queue[i].sysctlname,
6418 sizeof(sc->sc_queue[i].sysctlname), "q%d", i); 6422 sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6419 6423
6420 if (sysctl_createv(log, 0, &rnode, &qnode, 6424 if (sysctl_createv(log, 0, &rnode, &qnode,
6421 0, CTLTYPE_NODE, 6425 0, CTLTYPE_NODE,
6422 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"), 6426 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6423 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 6427 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6424 break; 6428 break;
6425 6429
6426 if (sysctl_createv(log, 0, &qnode, &cnode, 6430 if (sysctl_createv(log, 0, &qnode, &cnode,
6427 CTLFLAG_READONLY, CTLTYPE_INT, 6431 CTLFLAG_READONLY, CTLTYPE_INT,
6428 "txq_free", SYSCTL_DESCR("TX queue free"), 6432 "txq_free", SYSCTL_DESCR("TX queue free"),
6429 NULL, 0, &txq->txq_free, 6433 NULL, 0, &txq->txq_free,
6430 0, CTL_CREATE, CTL_EOL) != 0) 6434 0, CTL_CREATE, CTL_EOL) != 0)
6431 break; 6435 break;
6432 if (sysctl_createv(log, 0, &qnode, &cnode, 6436 if (sysctl_createv(log, 0, &qnode, &cnode,
6433 CTLFLAG_READONLY, CTLTYPE_INT, 6437 CTLFLAG_READONLY, CTLTYPE_INT,
6434 "txd_head", SYSCTL_DESCR("TX descriptor head"), 6438 "txd_head", SYSCTL_DESCR("TX descriptor head"),
6435 wm_sysctl_tdh_handler, 0, (void *)txq, 6439 wm_sysctl_tdh_handler, 0, (void *)txq,
6436 0, CTL_CREATE, CTL_EOL) != 0) 6440 0, CTL_CREATE, CTL_EOL) != 0)
6437 break; 6441 break;
6438 if (sysctl_createv(log, 0, &qnode, &cnode, 6442 if (sysctl_createv(log, 0, &qnode, &cnode,
6439 CTLFLAG_READONLY, CTLTYPE_INT, 6443 CTLFLAG_READONLY, CTLTYPE_INT,
6440 "txd_tail", SYSCTL_DESCR("TX descriptor tail"), 6444 "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6441 wm_sysctl_tdt_handler, 0, (void *)txq, 6445 wm_sysctl_tdt_handler, 0, (void *)txq,
6442 0, CTL_CREATE, CTL_EOL) != 0) 6446 0, CTL_CREATE, CTL_EOL) != 0)
6443 break; 6447 break;
6444 if (sysctl_createv(log, 0, &qnode, &cnode, 6448 if (sysctl_createv(log, 0, &qnode, &cnode,
6445 CTLFLAG_READONLY, CTLTYPE_INT, 6449 CTLFLAG_READONLY, CTLTYPE_INT,
6446 "txq_next", SYSCTL_DESCR("TX queue next"), 6450 "txq_next", SYSCTL_DESCR("TX queue next"),
6447 NULL, 0, &txq->txq_next, 6451 NULL, 0, &txq->txq_next,
6448 0, CTL_CREATE, CTL_EOL) != 0) 6452 0, CTL_CREATE, CTL_EOL) != 0)
6449 break; 6453 break;
6450 if (sysctl_createv(log, 0, &qnode, &cnode, 6454 if (sysctl_createv(log, 0, &qnode, &cnode,
6451 CTLFLAG_READONLY, CTLTYPE_INT, 6455 CTLFLAG_READONLY, CTLTYPE_INT,
6452 "txq_sfree", SYSCTL_DESCR("TX queue sfree"), 6456 "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6453 NULL, 0, &txq->txq_sfree, 6457 NULL, 0, &txq->txq_sfree,
6454 0, CTL_CREATE, CTL_EOL) != 0) 6458 0, CTL_CREATE, CTL_EOL) != 0)
6455 break; 6459 break;
6456 if (sysctl_createv(log, 0, &qnode, &cnode, 6460 if (sysctl_createv(log, 0, &qnode, &cnode,
6457 CTLFLAG_READONLY, CTLTYPE_INT, 6461 CTLFLAG_READONLY, CTLTYPE_INT,
6458 "txq_snext", SYSCTL_DESCR("TX queue snext"), 6462 "txq_snext", SYSCTL_DESCR("TX queue snext"),
6459 NULL, 0, &txq->txq_snext, 6463 NULL, 0, &txq->txq_snext,
6460 0, CTL_CREATE, CTL_EOL) != 0) 6464 0, CTL_CREATE, CTL_EOL) != 0)
6461 break; 6465 break;
6462 if (sysctl_createv(log, 0, &qnode, &cnode, 6466 if (sysctl_createv(log, 0, &qnode, &cnode,
6463 CTLFLAG_READONLY, CTLTYPE_INT, 6467 CTLFLAG_READONLY, CTLTYPE_INT,
6464 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"), 6468 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6465 NULL, 0, &txq->txq_sdirty, 6469 NULL, 0, &txq->txq_sdirty,
6466 0, CTL_CREATE, CTL_EOL) != 0) 6470 0, CTL_CREATE, CTL_EOL) != 0)
6467 break; 6471 break;
6468 if (sysctl_createv(log, 0, &qnode, &cnode, 6472 if (sysctl_createv(log, 0, &qnode, &cnode,
6469 CTLFLAG_READONLY, CTLTYPE_INT, 6473 CTLFLAG_READONLY, CTLTYPE_INT,
6470 "txq_flags", SYSCTL_DESCR("TX queue flags"), 6474 "txq_flags", SYSCTL_DESCR("TX queue flags"),
6471 NULL, 0, &txq->txq_flags, 6475 NULL, 0, &txq->txq_flags,
6472 0, CTL_CREATE, CTL_EOL) != 0) 6476 0, CTL_CREATE, CTL_EOL) != 0)
6473 break; 6477 break;
6474 if (sysctl_createv(log, 0, &qnode, &cnode, 6478 if (sysctl_createv(log, 0, &qnode, &cnode,
6475 CTLFLAG_READONLY, CTLTYPE_BOOL, 6479 CTLFLAG_READONLY, CTLTYPE_BOOL,
6476 "txq_stopping", SYSCTL_DESCR("TX queue stopping"), 6480 "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6477 NULL, 0, &txq->txq_stopping, 6481 NULL, 0, &txq->txq_stopping,
6478 0, CTL_CREATE, CTL_EOL) != 0) 6482 0, CTL_CREATE, CTL_EOL) != 0)
6479 break; 6483 break;
6480 if (sysctl_createv(log, 0, &qnode, &cnode, 6484 if (sysctl_createv(log, 0, &qnode, &cnode,
6481 CTLFLAG_READONLY, CTLTYPE_BOOL, 6485 CTLFLAG_READONLY, CTLTYPE_BOOL,
6482 "txq_sending", SYSCTL_DESCR("TX queue sending"), 6486 "txq_sending", SYSCTL_DESCR("TX queue sending"),
6483 NULL, 0, &txq->txq_sending, 6487 NULL, 0, &txq->txq_sending,
6484 0, CTL_CREATE, CTL_EOL) != 0) 6488 0, CTL_CREATE, CTL_EOL) != 0)
6485 break; 6489 break;
6486 6490
6487 if (sysctl_createv(log, 0, &qnode, &cnode, 6491 if (sysctl_createv(log, 0, &qnode, &cnode,
6488 CTLFLAG_READONLY, CTLTYPE_INT, 6492 CTLFLAG_READONLY, CTLTYPE_INT,
6489 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"), 6493 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6490 NULL, 0, &rxq->rxq_ptr, 6494 NULL, 0, &rxq->rxq_ptr,
6491 0, CTL_CREATE, CTL_EOL) != 0) 6495 0, CTL_CREATE, CTL_EOL) != 0)
6492 break; 6496 break;
6493 } 6497 }
6494 6498
6495#ifdef WM_DEBUG 6499#ifdef WM_DEBUG
6496 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 6500 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6497 CTLTYPE_INT, "debug_flags", 6501 CTLTYPE_INT, "debug_flags",
6498 SYSCTL_DESCR( 6502 SYSCTL_DESCR(
6499 "Debug flags:\n" \ 6503 "Debug flags:\n" \
6500 "\t0x01 LINK\n" \ 6504 "\t0x01 LINK\n" \
6501 "\t0x02 TX\n" \ 6505 "\t0x02 TX\n" \
6502 "\t0x04 RX\n" \ 6506 "\t0x04 RX\n" \
6503 "\t0x08 GMII\n" \ 6507 "\t0x08 GMII\n" \
6504 "\t0x10 MANAGE\n" \ 6508 "\t0x10 MANAGE\n" \
6505 "\t0x20 NVM\n" \ 6509 "\t0x20 NVM\n" \
6506 "\t0x40 INIT\n" \ 6510 "\t0x40 INIT\n" \
6507 "\t0x80 LOCK"), 6511 "\t0x80 LOCK"),
6508 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL); 6512 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6509 if (rv != 0) 6513 if (rv != 0)
6510 goto teardown; 6514 goto teardown;
6511#endif 6515#endif
6512 6516
6513 return; 6517 return;
6514 6518
6515teardown: 6519teardown:
6516 sysctl_teardown(log); 6520 sysctl_teardown(log);
6517err: 6521err:
6518 sc->sc_sysctllog = NULL; 6522 sc->sc_sysctllog = NULL;
6519 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n", 6523 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6520 __func__, rv); 6524 __func__, rv);
6521} 6525}
6522 6526
6523static void 6527static void
6524wm_update_stats(struct wm_softc *sc) 6528wm_update_stats(struct wm_softc *sc)
6525{ 6529{
6526 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 6530 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6527 uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc, 6531 uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc,
6528 cexterr; 6532 cexterr;
 6533 uint64_t total_qdrop = 0;
6529 6534
6530 crcerrs = CSR_READ(sc, WMREG_CRCERRS); 6535 crcerrs = CSR_READ(sc, WMREG_CRCERRS);
6531 symerrc = CSR_READ(sc, WMREG_SYMERRC); 6536 symerrc = CSR_READ(sc, WMREG_SYMERRC);
6532 mpc = CSR_READ(sc, WMREG_MPC); 6537 mpc = CSR_READ(sc, WMREG_MPC);
6533 colc = CSR_READ(sc, WMREG_COLC); 6538 colc = CSR_READ(sc, WMREG_COLC);
6534 sec = CSR_READ(sc, WMREG_SEC); 6539 sec = CSR_READ(sc, WMREG_SEC);
6535 rlec = CSR_READ(sc, WMREG_RLEC); 6540 rlec = CSR_READ(sc, WMREG_RLEC);
6536 6541
6537 WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs); 6542 WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
6538 WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc); 6543 WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
6539 WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc); 6544 WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
6540 WM_EVCNT_ADD(&sc->sc_ev_colc, colc); 6545 WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
6541 WM_EVCNT_ADD(&sc->sc_ev_sec, sec); 6546 WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
6542 WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec); 6547 WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
6543 6548
6544 if (sc->sc_type >= WM_T_82543) { 6549 if (sc->sc_type >= WM_T_82543) {
6545 algnerrc = CSR_READ(sc, WMREG_ALGNERRC); 6550 algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
6546 rxerrc = CSR_READ(sc, WMREG_RXERRC); 6551 rxerrc = CSR_READ(sc, WMREG_RXERRC);
6547 WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc); 6552 WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
6548 WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc); 6553 WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
6549 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) { 6554 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
6550 cexterr = CSR_READ(sc, WMREG_CEXTERR); 6555 cexterr = CSR_READ(sc, WMREG_CEXTERR);
6551 WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr); 6556 WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
6552 } else { 6557 } else {
6553 cexterr = 0; 6558 cexterr = 0;
6554 /* Excessive collision + Link down */ 6559 /* Excessive collision + Link down */
6555 WM_EVCNT_ADD(&sc->sc_ev_htdpmc, 6560 WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
6556 CSR_READ(sc, WMREG_HTDPMC)); 6561 CSR_READ(sc, WMREG_HTDPMC));
6557 } 6562 }
6558 6563
6559 WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS)); 6564 WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
6560 WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC)); 6565 WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
6561 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) 6566 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6562 WM_EVCNT_ADD(&sc->sc_ev_tsctfc, 6567 WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
6563 CSR_READ(sc, WMREG_TSCTFC)); 6568 CSR_READ(sc, WMREG_TSCTFC));
6564 else { 6569 else {
6565 WM_EVCNT_ADD(&sc->sc_ev_cbrdpc, 6570 WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
6566 CSR_READ(sc, WMREG_CBRDPC)); 6571 CSR_READ(sc, WMREG_CBRDPC));
6567 WM_EVCNT_ADD(&sc->sc_ev_cbrmpc, 6572 WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
6568 CSR_READ(sc, WMREG_CBRMPC)); 6573 CSR_READ(sc, WMREG_CBRMPC));
6569 } 6574 }
6570 } else 6575 } else
6571 algnerrc = rxerrc = cexterr = 0; 6576 algnerrc = rxerrc = cexterr = 0;
6572 6577
6573 if (sc->sc_type >= WM_T_82542_2_1) { 6578 if (sc->sc_type >= WM_T_82542_2_1) {
6574 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 6579 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
6575 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 6580 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
6576 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 6581 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
6577 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 6582 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
6578 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 6583 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
6579 } 6584 }
6580 6585
6581 WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC)); 6586 WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
6582 WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL)); 6587 WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
6583 WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC)); 6588 WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
6584 WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL)); 6589 WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
6585 6590
6586 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) { 6591 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6587 WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC)); 6592 WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
6588 } 6593 }
6589 6594
6590 WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC)); 6595 WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
6591 WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64)); 6596 WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
6592 WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127)); 6597 WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
6593 WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255)); 6598 WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
6594 WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511)); 6599 WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
6595 WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023)); 6600 WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
6596 WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522)); 6601 WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
6597 WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC)); 6602 WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
6598 WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC)); 6603 WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
6599 WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC)); 6604 WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
6600 WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC)); 6605 WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
6601 6606
6602 WM_EVCNT_ADD(&sc->sc_ev_gorc, 6607 WM_EVCNT_ADD(&sc->sc_ev_gorc,
6603 CSR_READ(sc, WMREG_GORCL) + 6608 CSR_READ(sc, WMREG_GORCL) +
6604 ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32)); 6609 ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
6605 WM_EVCNT_ADD(&sc->sc_ev_gotc, 6610 WM_EVCNT_ADD(&sc->sc_ev_gotc,
6606 CSR_READ(sc, WMREG_GOTCL) + 6611 CSR_READ(sc, WMREG_GOTCL) +
6607 ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32)); 6612 ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
6608 6613
6609 WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC)); 6614 WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
6610 WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC)); 6615 WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
6611 WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC)); 6616 WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
6612 WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC)); 6617 WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
6613 WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC)); 6618 WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
6614 6619
6615 if (sc->sc_type >= WM_T_82540) { 6620 if (sc->sc_type >= WM_T_82540) {
6616 WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC)); 6621 WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
6617 WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC)); 6622 WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
6618 WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC)); 6623 WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
6619 } 6624 }
6620 6625
6621 /* 6626 /*
6622 * The TOR(L) register includes: 6627 * The TOR(L) register includes:
6623 * - Error 6628 * - Error
6624 * - Flow control 6629 * - Flow control
6625 * - Broadcast rejected (This note is described in 82574 and newer 6630 * - Broadcast rejected (This note is described in 82574 and newer
6626 * datasheets. What does "broadcast rejected" mean?) 6631 * datasheets. What does "broadcast rejected" mean?)
6627 */ 6632 */
6628 WM_EVCNT_ADD(&sc->sc_ev_tor, 6633 WM_EVCNT_ADD(&sc->sc_ev_tor,
6629 CSR_READ(sc, WMREG_TORL) + 6634 CSR_READ(sc, WMREG_TORL) +
6630 ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32)); 6635 ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
6631 WM_EVCNT_ADD(&sc->sc_ev_tot, 6636 WM_EVCNT_ADD(&sc->sc_ev_tot,
6632 CSR_READ(sc, WMREG_TOTL) + 6637 CSR_READ(sc, WMREG_TOTL) +
6633 ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32)); 6638 ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
6634 6639
6635 WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR)); 6640 WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
6636 WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT)); 6641 WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
6637 WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64)); 6642 WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
6638 WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127)); 6643 WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
6639 WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255)); 6644 WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
6640 WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511)); 6645 WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
6641 WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023)); 6646 WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
6642 WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522)); 6647 WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
6643 WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC)); 6648 WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
6644 WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC)); 6649 WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
6645 if (sc->sc_type >= WM_T_82571) 6650 if (sc->sc_type >= WM_T_82571)
6646 WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC)); 6651 WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
6647 if (sc->sc_type < WM_T_82575) { 6652 if (sc->sc_type < WM_T_82575) {
6648 WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC)); 6653 WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
6649 WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC)); 6654 WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
6650 WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC)); 6655 WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
6651 WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC)); 6656 WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
6652 WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC)); 6657 WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
6653 WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, 6658 WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
6654 CSR_READ(sc, WMREG_ICTXQMTC)); 6659 CSR_READ(sc, WMREG_ICTXQMTC));
6655 WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, 6660 WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
6656 CSR_READ(sc, WMREG_ICRXDMTC)); 6661 CSR_READ(sc, WMREG_ICRXDMTC));
6657 WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC)); 6662 WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
6658 } else if (!WM_IS_ICHPCH(sc)) { 6663 } else if (!WM_IS_ICHPCH(sc)) {
6659 WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC)); 6664 WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
6660 WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1)); 6665 WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
6661 WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2)); 6666 WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
6662 WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3)); 6667 WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
6663 WM_EVCNT_ADD(&sc->sc_ev_hgptc, CSR_READ(sc, WMREG_HGPTC)); 6668 WM_EVCNT_ADD(&sc->sc_ev_hgptc, CSR_READ(sc, WMREG_HGPTC));
6664 WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4)); 6669 WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
6665 WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC)); 6670 WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
6666 WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC)); 6671 WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
6667 6672
6668 WM_EVCNT_ADD(&sc->sc_ev_hgorc, 6673 WM_EVCNT_ADD(&sc->sc_ev_hgorc,
6669 CSR_READ(sc, WMREG_HGORCL) + 6674 CSR_READ(sc, WMREG_HGORCL) +
6670 ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32)); 6675 ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
6671 WM_EVCNT_ADD(&sc->sc_ev_hgotc, 6676 WM_EVCNT_ADD(&sc->sc_ev_hgotc,
6672 CSR_READ(sc, WMREG_HGOTCL) + 6677 CSR_READ(sc, WMREG_HGOTCL) +
6673 ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32)); 6678 ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
6674 WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS)); 6679 WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
6675 WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC)); 6680 WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
6676 WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC)); 6681 WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
 6682#ifdef WM_EVENT_COUNTERS
 6683 for (int i = 0; i < sc->sc_nqueues; i++) {
 6684 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
 6685 uint32_t rqdpc;
 6686
 6687 rqdpc = CSR_READ(sc, WMREG_RQDPC(i));
 6688 /*
 6689 * On I210 and newer device, the RQDPC register is not
 6690 * cleard on read.
 6691 */
 6692 if ((rqdpc != 0) && (sc->sc_type >= WM_T_I210))
 6693 CSR_WRITE(sc, WMREG_RQDPC(i), 0);
 6694 WM_Q_EVCNT_ADD(rxq, qdrop, rqdpc);
 6695 total_qdrop += rqdpc;
 6696 }
 6697#endif
6677 } 6698 }
6678 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) { 6699 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6679 WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC)); 6700 WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
6680 WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC)); 6701 WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
6681 if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) { 6702 if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
6682 WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, 6703 WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
6683 CSR_READ(sc, WMREG_B2OGPRC)); 6704 CSR_READ(sc, WMREG_B2OGPRC));
6684 WM_EVCNT_ADD(&sc->sc_ev_o2bspc, 6705 WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
6685 CSR_READ(sc, WMREG_O2BSPC)); 6706 CSR_READ(sc, WMREG_O2BSPC));
6686 WM_EVCNT_ADD(&sc->sc_ev_b2ospc, 6707 WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
6687 CSR_READ(sc, WMREG_B2OSPC)); 6708 CSR_READ(sc, WMREG_B2OSPC));
6688 WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, 6709 WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
6689 CSR_READ(sc, WMREG_O2BGPTC)); 6710 CSR_READ(sc, WMREG_O2BGPTC));
6690 } 6711 }
6691 } 6712 }
6692 ifp->if_collisions += colc; 6713 ifp->if_collisions += colc;
6693 ifp->if_ierrors += 6714 ifp->if_ierrors +=
6694 crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec; 6715 crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec;
6695 6716
6696 /* 6717 /*
6697 * WMREG_RNBC is incremented when there are no available buffers in 6718 * WMREG_RNBC is incremented when there are no available buffers in
6698 * host memory. It does not mean the number of dropped packets, because 6719 * host memory. It does not mean the number of dropped packets, because
6699 * an Ethernet controller can receive packets in such case if there is 6720 * an Ethernet controller can receive packets in such case if there is
6700 * space in the phy's FIFO. 6721 * space in the phy's FIFO.
6701 * 6722 *
6702 * If you want to know the nubmer of WMREG_RMBC, you should use such as 6723 * If you want to know the nubmer of WMREG_RMBC, you should use such as
6703 * own EVCNT instead of if_iqdrops. 6724 * own EVCNT instead of if_iqdrops.
6704 */ 6725 */
6705 ifp->if_iqdrops += mpc; 6726 ifp->if_iqdrops += mpc + total_qdrop;
6706} 6727}
6707 6728
6708void 6729void
6709wm_clear_evcnt(struct wm_softc *sc) 6730wm_clear_evcnt(struct wm_softc *sc)
6710{ 6731{
6711#ifdef WM_EVENT_COUNTERS 6732#ifdef WM_EVENT_COUNTERS
6712 int i; 6733 int i;
6713 6734
6714 /* RX queues */ 6735 /* RX queues */
6715 for (i = 0; i < sc->sc_nqueues; i++) { 6736 for (i = 0; i < sc->sc_nqueues; i++) {
6716 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 6737 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6717 6738
6718 WM_Q_EVCNT_STORE(rxq, intr, 0); 6739 WM_Q_EVCNT_STORE(rxq, intr, 0);
6719 WM_Q_EVCNT_STORE(rxq, defer, 0); 6740 WM_Q_EVCNT_STORE(rxq, defer, 0);
6720 WM_Q_EVCNT_STORE(rxq, ipsum, 0); 6741 WM_Q_EVCNT_STORE(rxq, ipsum, 0);
6721 WM_Q_EVCNT_STORE(rxq, tusum, 0); 6742 WM_Q_EVCNT_STORE(rxq, tusum, 0);
 6743 if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
 6744 WM_Q_EVCNT_STORE(rxq, qdrop, 0);
6722 } 6745 }
6723 6746
6724 /* TX queues */ 6747 /* TX queues */
6725 for (i = 0; i < sc->sc_nqueues; i++) { 6748 for (i = 0; i < sc->sc_nqueues; i++) {
6726 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 6749 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6727 int j; 6750 int j;
6728 6751
6729 WM_Q_EVCNT_STORE(txq, txsstall, 0); 6752 WM_Q_EVCNT_STORE(txq, txsstall, 0);
6730 WM_Q_EVCNT_STORE(txq, txdstall, 0); 6753 WM_Q_EVCNT_STORE(txq, txdstall, 0);
6731 WM_Q_EVCNT_STORE(txq, fifo_stall, 0); 6754 WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
6732 WM_Q_EVCNT_STORE(txq, txdw, 0); 6755 WM_Q_EVCNT_STORE(txq, txdw, 0);
6733 WM_Q_EVCNT_STORE(txq, txqe, 0); 6756 WM_Q_EVCNT_STORE(txq, txqe, 0);
6734 WM_Q_EVCNT_STORE(txq, ipsum, 0); 6757 WM_Q_EVCNT_STORE(txq, ipsum, 0);
6735 WM_Q_EVCNT_STORE(txq, tusum, 0); 6758 WM_Q_EVCNT_STORE(txq, tusum, 0);
6736 WM_Q_EVCNT_STORE(txq, tusum6, 0); 6759 WM_Q_EVCNT_STORE(txq, tusum6, 0);
6737 WM_Q_EVCNT_STORE(txq, tso, 0); 6760 WM_Q_EVCNT_STORE(txq, tso, 0);
6738 WM_Q_EVCNT_STORE(txq, tso6, 0); 6761 WM_Q_EVCNT_STORE(txq, tso6, 0);
6739 WM_Q_EVCNT_STORE(txq, tsopain, 0); 6762 WM_Q_EVCNT_STORE(txq, tsopain, 0);
6740 6763
6741 for (j = 0; j < WM_NTXSEGS; j++) 6764 for (j = 0; j < WM_NTXSEGS; j++)
6742 WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0); 6765 WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
6743 6766
6744 WM_Q_EVCNT_STORE(txq, pcqdrop, 0); 6767 WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
6745 WM_Q_EVCNT_STORE(txq, descdrop, 0); 6768 WM_Q_EVCNT_STORE(txq, descdrop, 0);
6746 WM_Q_EVCNT_STORE(txq, toomanyseg, 0); 6769 WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
6747 WM_Q_EVCNT_STORE(txq, defrag, 0); 6770 WM_Q_EVCNT_STORE(txq, defrag, 0);
6748 if (sc->sc_type <= WM_T_82544) 6771 if (sc->sc_type <= WM_T_82544)
6749 WM_Q_EVCNT_STORE(txq, underrun, 0); 6772 WM_Q_EVCNT_STORE(txq, underrun, 0);
6750 WM_Q_EVCNT_STORE(txq, skipcontext, 0); 6773 WM_Q_EVCNT_STORE(txq, skipcontext, 0);
6751 } 6774 }
6752 6775
6753 /* Miscs */ 6776 /* Miscs */
6754 WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0); 6777 WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
6755 6778
6756 WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0); 6779 WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
6757 WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0); 6780 WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
6758 WM_EVCNT_STORE(&sc->sc_ev_mpc, 0); 6781 WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
6759 WM_EVCNT_STORE(&sc->sc_ev_colc, 0); 6782 WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
6760 WM_EVCNT_STORE(&sc->sc_ev_sec, 0); 6783 WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
6761 WM_EVCNT_STORE(&sc->sc_ev_rlec, 0); 6784 WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
6762 6785
6763 if (sc->sc_type >= WM_T_82543) { 6786 if (sc->sc_type >= WM_T_82543) {
6764 WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0); 6787 WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
6765 WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0); 6788 WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
6766 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) 6789 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6767 WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0); 6790 WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
6768 else 6791 else
6769 WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0); 6792 WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
6770 6793
6771 WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0); 6794 WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
6772 WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0); 6795 WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
6773 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) 6796 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6774 WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0); 6797 WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
6775 else { 6798 else {
6776 WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0); 6799 WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
6777 WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0); 6800 WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
6778 } 6801 }
6779 } 6802 }
6780 6803
6781 if (sc->sc_type >= WM_T_82542_2_1) { 6804 if (sc->sc_type >= WM_T_82542_2_1) {
6782 WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0); 6805 WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
6783 WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0); 6806 WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
6784 WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0); 6807 WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
6785 WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0); 6808 WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
6786 WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0); 6809 WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
6787 } 6810 }
6788 6811
6789 WM_EVCNT_STORE(&sc->sc_ev_scc, 0); 6812 WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
6790 WM_EVCNT_STORE(&sc->sc_ev_ecol, 0); 6813 WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
6791 WM_EVCNT_STORE(&sc->sc_ev_mcc, 0); 6814 WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
6792 WM_EVCNT_STORE(&sc->sc_ev_latecol, 0); 6815 WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
6793 6816
6794 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) 6817 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
6795 WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0); 6818 WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
6796 6819
6797 WM_EVCNT_STORE(&sc->sc_ev_dc, 0); 6820 WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
6798 WM_EVCNT_STORE(&sc->sc_ev_prc64, 0); 6821 WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
6799 WM_EVCNT_STORE(&sc->sc_ev_prc127, 0); 6822 WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
6800 WM_EVCNT_STORE(&sc->sc_ev_prc255, 0); 6823 WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
6801 WM_EVCNT_STORE(&sc->sc_ev_prc511, 0); 6824 WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
6802 WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0); 6825 WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
6803 WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0); 6826 WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
6804 WM_EVCNT_STORE(&sc->sc_ev_gprc, 0); 6827 WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
6805 WM_EVCNT_STORE(&sc->sc_ev_bprc, 0); 6828 WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
6806 WM_EVCNT_STORE(&sc->sc_ev_mprc, 0); 6829 WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
6807 WM_EVCNT_STORE(&sc->sc_ev_gptc, 0); 6830 WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
6808 WM_EVCNT_STORE(&sc->sc_ev_gorc, 0); 6831 WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
6809 WM_EVCNT_STORE(&sc->sc_ev_gotc, 0); 6832 WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
6810 WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0); 6833 WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
6811 WM_EVCNT_STORE(&sc->sc_ev_ruc, 0); 6834 WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
6812 WM_EVCNT_STORE(&sc->sc_ev_rfc, 0); 6835 WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
6813 WM_EVCNT_STORE(&sc->sc_ev_roc, 0); 6836 WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
6814 WM_EVCNT_STORE(&sc->sc_ev_rjc, 0); 6837 WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
6815 if (sc->sc_type >= WM_T_82540) { 6838 if (sc->sc_type >= WM_T_82540) {
6816 WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0); 6839 WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
6817 WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0); 6840 WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
6818 WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0); 6841 WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
6819 } 6842 }
6820 WM_EVCNT_STORE(&sc->sc_ev_tor, 0); 6843 WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
6821 WM_EVCNT_STORE(&sc->sc_ev_tot, 0); 6844 WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
6822 WM_EVCNT_STORE(&sc->sc_ev_tpr, 0); 6845 WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
6823 WM_EVCNT_STORE(&sc->sc_ev_tpt, 0); 6846 WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
6824 WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0); 6847 WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
6825 WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0); 6848 WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
6826 WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0); 6849 WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
6827 WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0); 6850 WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
6828 WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0); 6851 WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
6829 WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0); 6852 WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
6830 WM_EVCNT_STORE(&sc->sc_ev_mptc, 0); 6853 WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
6831 WM_EVCNT_STORE(&sc->sc_ev_bptc, 0); 6854 WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
6832 if (sc->sc_type >= WM_T_82571) 6855 if (sc->sc_type >= WM_T_82571)
6833 WM_EVCNT_STORE(&sc->sc_ev_iac, 0); 6856 WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
6834 if (sc->sc_type < WM_T_82575) { 6857 if (sc->sc_type < WM_T_82575) {
6835 WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0); 6858 WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
6836 WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0); 6859 WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
6837 WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0); 6860 WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
6838 WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0); 6861 WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
6839 WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0); 6862 WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
6840 WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0); 6863 WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
6841 WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0); 6864 WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6842 WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0); 6865 WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
6843 } else if (!WM_IS_ICHPCH(sc)) { 6866 } else if (!WM_IS_ICHPCH(sc)) {
6844 WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0); 6867 WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
6845 WM_EVCNT_STORE(&sc->sc_ev_debug1, 0); 6868 WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
6846 WM_EVCNT_STORE(&sc->sc_ev_debug2, 0); 6869 WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
6847 WM_EVCNT_STORE(&sc->sc_ev_debug3, 0); 6870 WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
6848 WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0); 6871 WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
6849 WM_EVCNT_STORE(&sc->sc_ev_debug4, 0); 6872 WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
6850 WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0); 6873 WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6851 WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0); 6874 WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
6852 6875
6853 WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0); 6876 WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
6854 WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0); 6877 WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
6855 WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0); 6878 WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
6856 WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0); 6879 WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
6857 WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0); 6880 WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
6858 } 6881 }
6859 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) { 6882 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6860 WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0); 6883 WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
6861 WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0); 6884 WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
6862 WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0); 6885 WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
6863 WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0); 6886 WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
6864 WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0); 6887 WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
6865 WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0); 6888 WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
6866 } 6889 }
6867#endif 6890#endif
6868} 6891}
6869 6892
6870/* 6893/*
6871 * wm_init: [ifnet interface function] 6894 * wm_init: [ifnet interface function]
6872 * 6895 *
6873 * Initialize the interface. 6896 * Initialize the interface.
6874 */ 6897 */
6875static int 6898static int
6876wm_init(struct ifnet *ifp) 6899wm_init(struct ifnet *ifp)
6877{ 6900{
6878 struct wm_softc *sc = ifp->if_softc; 6901 struct wm_softc *sc = ifp->if_softc;
6879 int ret; 6902 int ret;
6880 6903
6881 WM_CORE_LOCK(sc); 6904 WM_CORE_LOCK(sc);
6882 ret = wm_init_locked(ifp); 6905 ret = wm_init_locked(ifp);
6883 WM_CORE_UNLOCK(sc); 6906 WM_CORE_UNLOCK(sc);
6884 6907
6885 return ret; 6908 return ret;
6886} 6909}
6887 6910
6888static int 6911static int
6889wm_init_locked(struct ifnet *ifp) 6912wm_init_locked(struct ifnet *ifp)
6890{ 6913{
6891 struct wm_softc *sc = ifp->if_softc; 6914 struct wm_softc *sc = ifp->if_softc;
6892 int i, j, trynum, error = 0; 6915 int i, j, trynum, error = 0;
6893 uint32_t reg, sfp_mask = 0; 6916 uint32_t reg, sfp_mask = 0;
6894 6917
6895 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 6918 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6896 device_xname(sc->sc_dev), __func__)); 6919 device_xname(sc->sc_dev), __func__));
6897 KASSERT(WM_CORE_LOCKED(sc)); 6920 KASSERT(WM_CORE_LOCKED(sc));
6898 6921
6899 /* 6922 /*
6900 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 6923 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6901 * There is a small but measurable benefit to avoiding the adjusment 6924 * There is a small but measurable benefit to avoiding the adjusment
6902 * of the descriptor so that the headers are aligned, for normal mtu, 6925 * of the descriptor so that the headers are aligned, for normal mtu,
6903 * on such platforms. One possibility is that the DMA itself is 6926 * on such platforms. One possibility is that the DMA itself is
6904 * slightly more efficient if the front of the entire packet (instead 6927 * slightly more efficient if the front of the entire packet (instead
6905 * of the front of the headers) is aligned. 6928 * of the front of the headers) is aligned.
6906 * 6929 *
6907 * Note we must always set align_tweak to 0 if we are using 6930 * Note we must always set align_tweak to 0 if we are using
6908 * jumbo frames. 6931 * jumbo frames.
6909 */ 6932 */
6910#ifdef __NO_STRICT_ALIGNMENT 6933#ifdef __NO_STRICT_ALIGNMENT
6911 sc->sc_align_tweak = 0; 6934 sc->sc_align_tweak = 0;
6912#else 6935#else
6913 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 6936 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6914 sc->sc_align_tweak = 0; 6937 sc->sc_align_tweak = 0;
6915 else 6938 else
6916 sc->sc_align_tweak = 2; 6939 sc->sc_align_tweak = 2;
6917#endif /* __NO_STRICT_ALIGNMENT */ 6940#endif /* __NO_STRICT_ALIGNMENT */
6918 6941
6919 /* Cancel any pending I/O. */ 6942 /* Cancel any pending I/O. */
6920 wm_stop_locked(ifp, 0); 6943 wm_stop_locked(ifp, 0);
6921 6944
6922 /* Update statistics before reset */ 6945 /* Update statistics before reset */
6923 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 6946 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
6924 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 6947 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
6925 6948
6926 /* >= PCH_SPT hardware workaround before reset. */ 6949 /* >= PCH_SPT hardware workaround before reset. */
6927 if (sc->sc_type >= WM_T_PCH_SPT) 6950 if (sc->sc_type >= WM_T_PCH_SPT)
6928 wm_flush_desc_rings(sc); 6951 wm_flush_desc_rings(sc);
6929 6952
6930 /* Reset the chip to a known state. */ 6953 /* Reset the chip to a known state. */
6931 wm_reset(sc); 6954 wm_reset(sc);
6932 6955
6933 /* 6956 /*
6934 * AMT based hardware can now take control from firmware 6957 * AMT based hardware can now take control from firmware
6935 * Do this after reset. 6958 * Do this after reset.
6936 */ 6959 */
6937 if ((sc->sc_flags & WM_F_HAS_AMT) != 0) 6960 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6938 wm_get_hw_control(sc); 6961 wm_get_hw_control(sc);
6939 6962
6940 if ((sc->sc_type >= WM_T_PCH_SPT) && 6963 if ((sc->sc_type >= WM_T_PCH_SPT) &&
6941 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX) 6964 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6942 wm_legacy_irq_quirk_spt(sc); 6965 wm_legacy_irq_quirk_spt(sc);
6943 6966
6944 /* Init hardware bits */ 6967 /* Init hardware bits */
6945 wm_initialize_hardware_bits(sc); 6968 wm_initialize_hardware_bits(sc);
6946 6969
6947 /* Reset the PHY. */ 6970 /* Reset the PHY. */
6948 if (sc->sc_flags & WM_F_HAS_MII) 6971 if (sc->sc_flags & WM_F_HAS_MII)
6949 wm_gmii_reset(sc); 6972 wm_gmii_reset(sc);
6950 6973
6951 if (sc->sc_type >= WM_T_ICH8) { 6974 if (sc->sc_type >= WM_T_ICH8) {
6952 reg = CSR_READ(sc, WMREG_GCR); 6975 reg = CSR_READ(sc, WMREG_GCR);
6953 /* 6976 /*
6954 * ICH8 No-snoop bits are opposite polarity. Set to snoop by 6977 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6955 * default after reset. 6978 * default after reset.
6956 */ 6979 */
6957 if (sc->sc_type == WM_T_ICH8) 6980 if (sc->sc_type == WM_T_ICH8)
6958 reg |= GCR_NO_SNOOP_ALL; 6981 reg |= GCR_NO_SNOOP_ALL;
6959 else 6982 else
6960 reg &= ~GCR_NO_SNOOP_ALL; 6983 reg &= ~GCR_NO_SNOOP_ALL;
6961 CSR_WRITE(sc, WMREG_GCR, reg); 6984 CSR_WRITE(sc, WMREG_GCR, reg);
6962 } 6985 }
6963 6986
6964 /* Ungate DMA clock to avoid packet loss */ 6987 /* Ungate DMA clock to avoid packet loss */
6965 if (sc->sc_type >= WM_T_PCH_TGP) { 6988 if (sc->sc_type >= WM_T_PCH_TGP) {
6966 reg = CSR_READ(sc, WMREG_FFLT_DBG); 6989 reg = CSR_READ(sc, WMREG_FFLT_DBG);
6967 reg |= (1 << 12); 6990 reg |= (1 << 12);
6968 CSR_WRITE(sc, WMREG_FFLT_DBG, reg); 6991 CSR_WRITE(sc, WMREG_FFLT_DBG, reg);
6969 } 6992 }
6970 6993
6971 if ((sc->sc_type >= WM_T_ICH8) 6994 if ((sc->sc_type >= WM_T_ICH8)
6972 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER) 6995 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6973 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) { 6996 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6974 6997
6975 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6998 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6976 reg |= CTRL_EXT_RO_DIS; 6999 reg |= CTRL_EXT_RO_DIS;
6977 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 7000 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6978 } 7001 }
6979 7002
6980 /* Calculate (E)ITR value */ 7003 /* Calculate (E)ITR value */
6981 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) { 7004 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6982 /* 7005 /*
6983 * For NEWQUEUE's EITR (except for 82575). 7006 * For NEWQUEUE's EITR (except for 82575).
6984 * 82575's EITR should be set same throttling value as other 7007 * 82575's EITR should be set same throttling value as other
6985 * old controllers' ITR because the interrupt/sec calculation 7008 * old controllers' ITR because the interrupt/sec calculation
6986 * is the same, that is, 1,000,000,000 / (N * 256). 7009 * is the same, that is, 1,000,000,000 / (N * 256).
6987 * 7010 *
6988 * 82574's EITR should be set same throttling value as ITR. 7011 * 82574's EITR should be set same throttling value as ITR.
6989 * 7012 *
6990 * For N interrupts/sec, set this value to: 7013 * For N interrupts/sec, set this value to:
6991 * 1,000,000 / N in contrast to ITR throttling value. 7014 * 1,000,000 / N in contrast to ITR throttling value.
6992 */ 7015 */
6993 sc->sc_itr_init = 450; 7016 sc->sc_itr_init = 450;
6994 } else if (sc->sc_type >= WM_T_82543) { 7017 } else if (sc->sc_type >= WM_T_82543) {
6995 /* 7018 /*
6996 * Set up the interrupt throttling register (units of 256ns) 7019 * Set up the interrupt throttling register (units of 256ns)
6997 * Note that a footnote in Intel's documentation says this 7020 * Note that a footnote in Intel's documentation says this
6998 * ticker runs at 1/4 the rate when the chip is in 100Mbit 7021 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6999 * or 10Mbit mode. Empirically, it appears to be the case 7022 * or 10Mbit mode. Empirically, it appears to be the case
7000 * that that is also true for the 1024ns units of the other 7023 * that that is also true for the 1024ns units of the other
7001 * interrupt-related timer registers -- so, really, we ought 7024 * interrupt-related timer registers -- so, really, we ought
7002 * to divide this value by 4 when the link speed is low. 7025 * to divide this value by 4 when the link speed is low.
7003 * 7026 *
7004 * XXX implement this division at link speed change! 7027 * XXX implement this division at link speed change!
7005 */ 7028 */
7006 7029
7007 /* 7030 /*
7008 * For N interrupts/sec, set this value to: 7031 * For N interrupts/sec, set this value to:
7009 * 1,000,000,000 / (N * 256). Note that we set the 7032 * 1,000,000,000 / (N * 256). Note that we set the
7010 * absolute and packet timer values to this value 7033 * absolute and packet timer values to this value
7011 * divided by 4 to get "simple timer" behavior. 7034 * divided by 4 to get "simple timer" behavior.
7012 */ 7035 */
7013 sc->sc_itr_init = 1500; /* 2604 ints/sec */ 7036 sc->sc_itr_init = 1500; /* 2604 ints/sec */
7014 } 7037 }
7015 7038
7016 error = wm_init_txrx_queues(sc); 7039 error = wm_init_txrx_queues(sc);
7017 if (error) 7040 if (error)
7018 goto out; 7041 goto out;
7019 7042
7020 if (((sc->sc_flags & WM_F_SGMII) == 0) && 7043 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
7021 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) && 7044 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
7022 (sc->sc_type >= WM_T_82575)) 7045 (sc->sc_type >= WM_T_82575))
7023 wm_serdes_power_up_link_82575(sc); 7046 wm_serdes_power_up_link_82575(sc);
7024 7047
7025 /* Clear out the VLAN table -- we don't use it (yet). */ 7048 /* Clear out the VLAN table -- we don't use it (yet). */
7026 CSR_WRITE(sc, WMREG_VET, 0); 7049 CSR_WRITE(sc, WMREG_VET, 0);
7027 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 7050 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
7028 trynum = 10; /* Due to hw errata */ 7051 trynum = 10; /* Due to hw errata */
7029 else 7052 else
7030 trynum = 1; 7053 trynum = 1;
7031 for (i = 0; i < WM_VLAN_TABSIZE; i++) 7054 for (i = 0; i < WM_VLAN_TABSIZE; i++)
7032 for (j = 0; j < trynum; j++) 7055 for (j = 0; j < trynum; j++)
7033 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 7056 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
7034 7057
7035 /* 7058 /*
7036 * Set up flow-control parameters. 7059 * Set up flow-control parameters.
7037 * 7060 *
7038 * XXX Values could probably stand some tuning. 7061 * XXX Values could probably stand some tuning.
7039 */ 7062 */
7040 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 7063 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
7041 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) 7064 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
7042 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT) 7065 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
7043 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP) 7066 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)
7044 && (sc->sc_type != WM_T_PCH_TGP)) { 7067 && (sc->sc_type != WM_T_PCH_TGP)) {
7045 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 7068 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
7046 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 7069 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
7047 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 7070 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
7048 } 7071 }
7049 7072
7050 sc->sc_fcrtl = FCRTL_DFLT; 7073 sc->sc_fcrtl = FCRTL_DFLT;
7051 if (sc->sc_type < WM_T_82543) { 7074 if (sc->sc_type < WM_T_82543) {
7052 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 7075 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
7053 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 7076 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
7054 } else { 7077 } else {
7055 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 7078 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
7056 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 7079 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
7057 } 7080 }
7058 7081
7059 if (sc->sc_type == WM_T_80003) 7082 if (sc->sc_type == WM_T_80003)
7060 CSR_WRITE(sc, WMREG_FCTTV, 0xffff); 7083 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
7061 else 7084 else
7062 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 7085 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
7063 7086
7064 /* Writes the control register. */ 7087 /* Writes the control register. */
7065 wm_set_vlan(sc); 7088 wm_set_vlan(sc);
7066 7089
7067 if (sc->sc_flags & WM_F_HAS_MII) { 7090 if (sc->sc_flags & WM_F_HAS_MII) {
7068 uint16_t kmreg; 7091 uint16_t kmreg;
7069 7092
7070 switch (sc->sc_type) { 7093 switch (sc->sc_type) {
7071 case WM_T_80003: 7094 case WM_T_80003:
7072 case WM_T_ICH8: 7095 case WM_T_ICH8:
7073 case WM_T_ICH9: 7096 case WM_T_ICH9:
7074 case WM_T_ICH10: 7097 case WM_T_ICH10:
7075 case WM_T_PCH: 7098 case WM_T_PCH:
7076 case WM_T_PCH2: 7099 case WM_T_PCH2:
7077 case WM_T_PCH_LPT: 7100 case WM_T_PCH_LPT:
7078 case WM_T_PCH_SPT: 7101 case WM_T_PCH_SPT:
7079 case WM_T_PCH_CNP: 7102 case WM_T_PCH_CNP:
7080 case WM_T_PCH_TGP: 7103 case WM_T_PCH_TGP:
7081 /* 7104 /*
7082 * Set the mac to wait the maximum time between each 7105 * Set the mac to wait the maximum time between each
7083 * iteration and increase the max iterations when 7106 * iteration and increase the max iterations when
7084 * polling the phy; this fixes erroneous timeouts at 7107 * polling the phy; this fixes erroneous timeouts at
7085 * 10Mbps. 7108 * 10Mbps.
7086 */ 7109 */
7087 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 7110 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
7088 0xFFFF); 7111 0xFFFF);
7089 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, 7112 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7090 &kmreg); 7113 &kmreg);
7091 kmreg |= 0x3F; 7114 kmreg |= 0x3F;
7092 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, 7115 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7093 kmreg); 7116 kmreg);
7094 break; 7117 break;
7095 default: 7118 default:
7096 break; 7119 break;
7097 } 7120 }
7098 7121
7099 if (sc->sc_type == WM_T_80003) { 7122 if (sc->sc_type == WM_T_80003) {
7100 reg = CSR_READ(sc, WMREG_CTRL_EXT); 7123 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7101 reg &= ~CTRL_EXT_LINK_MODE_MASK; 7124 reg &= ~CTRL_EXT_LINK_MODE_MASK;
7102 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 7125 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7103 7126
7104 /* Bypass RX and TX FIFOs */ 7127 /* Bypass RX and TX FIFOs */
7105 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 7128 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
7106 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 7129 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
7107 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 7130 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
7108 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 7131 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
7109 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 7132 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
7110 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 7133 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
7111 } 7134 }
7112 } 7135 }
7113#if 0 7136#if 0
7114 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 7137 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
7115#endif 7138#endif
7116 7139
7117 /* Set up checksum offload parameters. */ 7140 /* Set up checksum offload parameters. */
7118 reg = CSR_READ(sc, WMREG_RXCSUM); 7141 reg = CSR_READ(sc, WMREG_RXCSUM);
7119 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 7142 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
7120 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 7143 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
7121 reg |= RXCSUM_IPOFL; 7144 reg |= RXCSUM_IPOFL;
7122 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 7145 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
7123 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 7146 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
7124 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 7147 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
7125 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 7148 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
7126 CSR_WRITE(sc, WMREG_RXCSUM, reg); 7149 CSR_WRITE(sc, WMREG_RXCSUM, reg);
7127 7150
7128 /* Set registers about MSI-X */ 7151 /* Set registers about MSI-X */
7129 if (wm_is_using_msix(sc)) { 7152 if (wm_is_using_msix(sc)) {
7130 uint32_t ivar, qintr_idx; 7153 uint32_t ivar, qintr_idx;
7131 struct wm_queue *wmq; 7154 struct wm_queue *wmq;
7132 unsigned int qid; 7155 unsigned int qid;
7133 7156
7134 if (sc->sc_type == WM_T_82575) { 7157 if (sc->sc_type == WM_T_82575) {
7135 /* Interrupt control */ 7158 /* Interrupt control */
7136 reg = CSR_READ(sc, WMREG_CTRL_EXT); 7159 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7137 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR; 7160 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
7138 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 7161 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7139 7162
7140 /* TX and RX */ 7163 /* TX and RX */
7141 for (i = 0; i < sc->sc_nqueues; i++) { 7164 for (i = 0; i < sc->sc_nqueues; i++) {
7142 wmq = &sc->sc_queue[i]; 7165 wmq = &sc->sc_queue[i];
7143 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx), 7166 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
7144 EITR_TX_QUEUE(wmq->wmq_id) 7167 EITR_TX_QUEUE(wmq->wmq_id)
7145 | EITR_RX_QUEUE(wmq->wmq_id)); 7168 | EITR_RX_QUEUE(wmq->wmq_id));
7146 } 7169 }
7147 /* Link status */ 7170 /* Link status */
7148 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx), 7171 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
7149 EITR_OTHER); 7172 EITR_OTHER);
7150 } else if (sc->sc_type == WM_T_82574) { 7173 } else if (sc->sc_type == WM_T_82574) {
7151 /* Interrupt control */ 7174 /* Interrupt control */
7152 reg = CSR_READ(sc, WMREG_CTRL_EXT); 7175 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7153 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME; 7176 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
7154 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 7177 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7155 7178
7156 /* 7179 /*
7157 * Work around issue with spurious interrupts 7180 * Work around issue with spurious interrupts
7158 * in MSI-X mode. 7181 * in MSI-X mode.
7159 * At wm_initialize_hardware_bits(), sc_nintrs has not 7182 * At wm_initialize_hardware_bits(), sc_nintrs has not
7160 * initialized yet. So re-initialize WMREG_RFCTL here. 7183 * initialized yet. So re-initialize WMREG_RFCTL here.
7161 */ 7184 */
7162 reg = CSR_READ(sc, WMREG_RFCTL); 7185 reg = CSR_READ(sc, WMREG_RFCTL);
7163 reg |= WMREG_RFCTL_ACKDIS; 7186 reg |= WMREG_RFCTL_ACKDIS;
7164 CSR_WRITE(sc, WMREG_RFCTL, reg); 7187 CSR_WRITE(sc, WMREG_RFCTL, reg);
7165 7188
7166 ivar = 0; 7189 ivar = 0;
7167 /* TX and RX */ 7190 /* TX and RX */
7168 for (i = 0; i < sc->sc_nqueues; i++) { 7191 for (i = 0; i < sc->sc_nqueues; i++) {
7169 wmq = &sc->sc_queue[i]; 7192 wmq = &sc->sc_queue[i];
7170 qid = wmq->wmq_id; 7193 qid = wmq->wmq_id;
7171 qintr_idx = wmq->wmq_intr_idx; 7194 qintr_idx = wmq->wmq_intr_idx;
7172 7195
7173 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx), 7196 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7174 IVAR_TX_MASK_Q_82574(qid)); 7197 IVAR_TX_MASK_Q_82574(qid));
7175 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx), 7198 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7176 IVAR_RX_MASK_Q_82574(qid)); 7199 IVAR_RX_MASK_Q_82574(qid));
7177 } 7200 }
7178 /* Link status */ 7201 /* Link status */
7179 ivar |= __SHIFTIN((IVAR_VALID_82574 7202 ivar |= __SHIFTIN((IVAR_VALID_82574
7180 | sc->sc_link_intr_idx), IVAR_OTHER_MASK); 7203 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
7181 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB); 7204 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
7182 } else { 7205 } else {
7183 /* Interrupt control */ 7206 /* Interrupt control */
7184 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX 7207 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
7185 | GPIE_EIAME | GPIE_PBA); 7208 | GPIE_EIAME | GPIE_PBA);
7186 7209
7187 switch (sc->sc_type) { 7210 switch (sc->sc_type) {
7188 case WM_T_82580: 7211 case WM_T_82580:
7189 case WM_T_I350: 7212 case WM_T_I350:
7190 case WM_T_I354: 7213 case WM_T_I354:
7191 case WM_T_I210: 7214 case WM_T_I210:
7192 case WM_T_I211: 7215 case WM_T_I211:
7193 /* TX and RX */ 7216 /* TX and RX */
7194 for (i = 0; i < sc->sc_nqueues; i++) { 7217 for (i = 0; i < sc->sc_nqueues; i++) {
7195 wmq = &sc->sc_queue[i]; 7218 wmq = &sc->sc_queue[i];
7196 qid = wmq->wmq_id; 7219 qid = wmq->wmq_id;
7197 qintr_idx = wmq->wmq_intr_idx; 7220 qintr_idx = wmq->wmq_intr_idx;
7198 7221
7199 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid)); 7222 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
7200 ivar &= ~IVAR_TX_MASK_Q(qid); 7223 ivar &= ~IVAR_TX_MASK_Q(qid);
7201 ivar |= __SHIFTIN((qintr_idx 7224 ivar |= __SHIFTIN((qintr_idx
7202 | IVAR_VALID), 7225 | IVAR_VALID),
7203 IVAR_TX_MASK_Q(qid)); 7226 IVAR_TX_MASK_Q(qid));
7204 ivar &= ~IVAR_RX_MASK_Q(qid); 7227 ivar &= ~IVAR_RX_MASK_Q(qid);
7205 ivar |= __SHIFTIN((qintr_idx 7228 ivar |= __SHIFTIN((qintr_idx
7206 | IVAR_VALID), 7229 | IVAR_VALID),
7207 IVAR_RX_MASK_Q(qid)); 7230 IVAR_RX_MASK_Q(qid));
7208 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar); 7231 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
7209 } 7232 }
7210 break; 7233 break;
7211 case WM_T_82576: 7234 case WM_T_82576:
7212 /* TX and RX */ 7235 /* TX and RX */
7213 for (i = 0; i < sc->sc_nqueues; i++) { 7236 for (i = 0; i < sc->sc_nqueues; i++) {
7214 wmq = &sc->sc_queue[i]; 7237 wmq = &sc->sc_queue[i];
7215 qid = wmq->wmq_id; 7238 qid = wmq->wmq_id;
7216 qintr_idx = wmq->wmq_intr_idx; 7239 qintr_idx = wmq->wmq_intr_idx;
7217 7240
7218 ivar = CSR_READ(sc, 7241 ivar = CSR_READ(sc,
7219 WMREG_IVAR_Q_82576(qid)); 7242 WMREG_IVAR_Q_82576(qid));
7220 ivar &= ~IVAR_TX_MASK_Q_82576(qid); 7243 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
7221 ivar |= __SHIFTIN((qintr_idx 7244 ivar |= __SHIFTIN((qintr_idx
7222 | IVAR_VALID), 7245 | IVAR_VALID),
7223 IVAR_TX_MASK_Q_82576(qid)); 7246 IVAR_TX_MASK_Q_82576(qid));
7224 ivar &= ~IVAR_RX_MASK_Q_82576(qid); 7247 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
7225 ivar |= __SHIFTIN((qintr_idx 7248 ivar |= __SHIFTIN((qintr_idx
7226 | IVAR_VALID), 7249 | IVAR_VALID),
7227 IVAR_RX_MASK_Q_82576(qid)); 7250 IVAR_RX_MASK_Q_82576(qid));
7228 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), 7251 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
7229 ivar); 7252 ivar);
7230 } 7253 }
7231 break; 7254 break;
7232 default: 7255 default:
7233 break; 7256 break;
7234 } 7257 }
7235 7258
7236 /* Link status */ 7259 /* Link status */
7237 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID), 7260 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
7238 IVAR_MISC_OTHER); 7261 IVAR_MISC_OTHER);
7239 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar); 7262 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
7240 } 7263 }
7241 7264
7242 if (wm_is_using_multiqueue(sc)) { 7265 if (wm_is_using_multiqueue(sc)) {
7243 wm_init_rss(sc); 7266 wm_init_rss(sc);
7244 7267
7245 /* 7268 /*
7246 ** NOTE: Receive Full-Packet Checksum Offload 7269 ** NOTE: Receive Full-Packet Checksum Offload
7247 ** is mutually exclusive with Multiqueue. However 7270 ** is mutually exclusive with Multiqueue. However
7248 ** this is not the same as TCP/IP checksums which 7271 ** this is not the same as TCP/IP checksums which
7249 ** still work. 7272 ** still work.
7250 */ 7273 */
7251 reg = CSR_READ(sc, WMREG_RXCSUM); 7274 reg = CSR_READ(sc, WMREG_RXCSUM);
7252 reg |= RXCSUM_PCSD; 7275 reg |= RXCSUM_PCSD;
7253 CSR_WRITE(sc, WMREG_RXCSUM, reg); 7276 CSR_WRITE(sc, WMREG_RXCSUM, reg);
7254 } 7277 }
7255 } 7278 }
7256 7279
7257 /* Set up the interrupt registers. */ 7280 /* Set up the interrupt registers. */
7258 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 7281 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7259 7282
7260 /* Enable SFP module insertion interrupt if it's required */ 7283 /* Enable SFP module insertion interrupt if it's required */
7261 if ((sc->sc_flags & WM_F_SFP) != 0) { 7284 if ((sc->sc_flags & WM_F_SFP) != 0) {
7262 sc->sc_ctrl |= CTRL_EXTLINK_EN; 7285 sc->sc_ctrl |= CTRL_EXTLINK_EN;
7263 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 7286 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7264 sfp_mask = ICR_GPI(0); 7287 sfp_mask = ICR_GPI(0);
7265 } 7288 }
7266 7289
7267 if (wm_is_using_msix(sc)) { 7290 if (wm_is_using_msix(sc)) {
7268 uint32_t mask; 7291 uint32_t mask;
7269 struct wm_queue *wmq; 7292 struct wm_queue *wmq;
7270 7293
7271 switch (sc->sc_type) { 7294 switch (sc->sc_type) {
7272 case WM_T_82574: 7295 case WM_T_82574:
7273 mask = 0; 7296 mask = 0;
7274 for (i = 0; i < sc->sc_nqueues; i++) { 7297 for (i = 0; i < sc->sc_nqueues; i++) {
7275 wmq = &sc->sc_queue[i]; 7298 wmq = &sc->sc_queue[i];
7276 mask |= ICR_TXQ(wmq->wmq_id); 7299 mask |= ICR_TXQ(wmq->wmq_id);
7277 mask |= ICR_RXQ(wmq->wmq_id); 7300 mask |= ICR_RXQ(wmq->wmq_id);
7278 } 7301 }
7279 mask |= ICR_OTHER; 7302 mask |= ICR_OTHER;
7280 CSR_WRITE(sc, WMREG_EIAC_82574, mask); 7303 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
7281 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC); 7304 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
7282 break; 7305 break;
7283 default: 7306 default:
7284 if (sc->sc_type == WM_T_82575) { 7307 if (sc->sc_type == WM_T_82575) {
7285 mask = 0; 7308 mask = 0;
7286 for (i = 0; i < sc->sc_nqueues; i++) { 7309 for (i = 0; i < sc->sc_nqueues; i++) {
7287 wmq = &sc->sc_queue[i]; 7310 wmq = &sc->sc_queue[i];
7288 mask |= EITR_TX_QUEUE(wmq->wmq_id); 7311 mask |= EITR_TX_QUEUE(wmq->wmq_id);
7289 mask |= EITR_RX_QUEUE(wmq->wmq_id); 7312 mask |= EITR_RX_QUEUE(wmq->wmq_id);
7290 } 7313 }
7291 mask |= EITR_OTHER; 7314 mask |= EITR_OTHER;
7292 } else { 7315 } else {
7293 mask = 0; 7316 mask = 0;
7294 for (i = 0; i < sc->sc_nqueues; i++) { 7317 for (i = 0; i < sc->sc_nqueues; i++) {
7295 wmq = &sc->sc_queue[i]; 7318 wmq = &sc->sc_queue[i];
7296 mask |= 1 << wmq->wmq_intr_idx; 7319 mask |= 1 << wmq->wmq_intr_idx;
7297 } 7320 }
7298 mask |= 1 << sc->sc_link_intr_idx; 7321 mask |= 1 << sc->sc_link_intr_idx;
7299 } 7322 }
7300 CSR_WRITE(sc, WMREG_EIAC, mask); 7323 CSR_WRITE(sc, WMREG_EIAC, mask);
7301 CSR_WRITE(sc, WMREG_EIAM, mask); 7324 CSR_WRITE(sc, WMREG_EIAM, mask);
7302 CSR_WRITE(sc, WMREG_EIMS, mask); 7325 CSR_WRITE(sc, WMREG_EIMS, mask);
7303 7326
7304 /* For other interrupts */ 7327 /* For other interrupts */
7305 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask); 7328 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
7306 break; 7329 break;
7307 } 7330 }
7308 } else { 7331 } else {
7309 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 7332 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
7310 ICR_RXO | ICR_RXT0 | sfp_mask; 7333 ICR_RXO | ICR_RXT0 | sfp_mask;
7311 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 7334 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
7312 } 7335 }
7313 7336
7314 /* Set up the inter-packet gap. */ 7337 /* Set up the inter-packet gap. */
7315 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 7338 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7316 7339
7317 if (sc->sc_type >= WM_T_82543) { 7340 if (sc->sc_type >= WM_T_82543) {
7318 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) { 7341 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7319 struct wm_queue *wmq = &sc->sc_queue[qidx]; 7342 struct wm_queue *wmq = &sc->sc_queue[qidx];
7320 wm_itrs_writereg(sc, wmq); 7343 wm_itrs_writereg(sc, wmq);
7321 } 7344 }
7322 /* 7345 /*
7323 * Link interrupts occur much less than TX 7346 * Link interrupts occur much less than TX
7324 * interrupts and RX interrupts. So, we don't 7347 * interrupts and RX interrupts. So, we don't
7325 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like 7348 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
7326 * FreeBSD's if_igb. 7349 * FreeBSD's if_igb.
7327 */ 7350 */
7328 } 7351 }
7329 7352
7330 /* Set the VLAN EtherType. */ 7353 /* Set the VLAN EtherType. */
7331 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 7354 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
7332 7355
7333 /* 7356 /*
7334 * Set up the transmit control register; we start out with 7357 * Set up the transmit control register; we start out with
7335 * a collision distance suitable for FDX, but update it when 7358 * a collision distance suitable for FDX, but update it when
7336 * we resolve the media type. 7359 * we resolve the media type.
7337 */ 7360 */
7338 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC 7361 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
7339 | TCTL_CT(TX_COLLISION_THRESHOLD) 7362 | TCTL_CT(TX_COLLISION_THRESHOLD)
7340 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 7363 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7341 if (sc->sc_type >= WM_T_82571) 7364 if (sc->sc_type >= WM_T_82571)
7342 sc->sc_tctl |= TCTL_MULR; 7365 sc->sc_tctl |= TCTL_MULR;
7343 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 7366 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7344 7367
7345 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 7368 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7346 /* Write TDT after TCTL.EN is set. See the document. */ 7369 /* Write TDT after TCTL.EN is set. See the document. */
7347 CSR_WRITE(sc, WMREG_TDT(0), 0); 7370 CSR_WRITE(sc, WMREG_TDT(0), 0);
7348 } 7371 }
7349 7372
7350 if (sc->sc_type == WM_T_80003) { 7373 if (sc->sc_type == WM_T_80003) {
7351 reg = CSR_READ(sc, WMREG_TCTL_EXT); 7374 reg = CSR_READ(sc, WMREG_TCTL_EXT);
7352 reg &= ~TCTL_EXT_GCEX_MASK; 7375 reg &= ~TCTL_EXT_GCEX_MASK;
7353 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; 7376 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
7354 CSR_WRITE(sc, WMREG_TCTL_EXT, reg); 7377 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
7355 } 7378 }
7356 7379
7357 /* Set the media. */ 7380 /* Set the media. */
7358 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) 7381 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
7359 goto out; 7382 goto out;
7360 7383
7361 /* Configure for OS presence */ 7384 /* Configure for OS presence */
7362 wm_init_manageability(sc); 7385 wm_init_manageability(sc);
7363 7386
7364 /* 7387 /*
7365 * Set up the receive control register; we actually program the 7388 * Set up the receive control register; we actually program the
7366 * register when we set the receive filter. Use multicast address 7389 * register when we set the receive filter. Use multicast address
7367 * offset type 0. 7390 * offset type 0.
7368 * 7391 *
7369 * Only the i82544 has the ability to strip the incoming CRC, so we 7392 * Only the i82544 has the ability to strip the incoming CRC, so we
7370 * don't enable that feature. 7393 * don't enable that feature.
7371 */ 7394 */
7372 sc->sc_mchash_type = 0; 7395 sc->sc_mchash_type = 0;
7373 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 7396 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7374 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO); 7397 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7375 7398
7376 /* 82574 use one buffer extended Rx descriptor. */ 7399 /* 82574 use one buffer extended Rx descriptor. */
7377 if (sc->sc_type == WM_T_82574) 7400 if (sc->sc_type == WM_T_82574)
7378 sc->sc_rctl |= RCTL_DTYP_ONEBUF; 7401 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7379 7402
7380 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0) 7403 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7381 sc->sc_rctl |= RCTL_SECRC; 7404 sc->sc_rctl |= RCTL_SECRC;
7382 7405
7383 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) 7406 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7384 && (ifp->if_mtu > ETHERMTU)) { 7407 && (ifp->if_mtu > ETHERMTU)) {
7385 sc->sc_rctl |= RCTL_LPE; 7408 sc->sc_rctl |= RCTL_LPE;
7386 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7409 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7387 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); 7410 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7388 } 7411 }
7389 7412
7390 if (MCLBYTES == 2048) 7413 if (MCLBYTES == 2048)
7391 sc->sc_rctl |= RCTL_2k; 7414 sc->sc_rctl |= RCTL_2k;
7392 else { 7415 else {
7393 if (sc->sc_type >= WM_T_82543) { 7416 if (sc->sc_type >= WM_T_82543) {
7394 switch (MCLBYTES) { 7417 switch (MCLBYTES) {
7395 case 4096: 7418 case 4096:
7396 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 7419 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7397 break; 7420 break;
7398 case 8192: 7421 case 8192:
7399 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 7422 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7400 break; 7423 break;
7401 case 16384: 7424 case 16384:
7402 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 7425 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7403 break; 7426 break;
7404 default: 7427 default:
7405 panic("wm_init: MCLBYTES %d unsupported", 7428 panic("wm_init: MCLBYTES %d unsupported",
7406 MCLBYTES); 7429 MCLBYTES);
7407 break; 7430 break;
7408 } 7431 }
7409 } else 7432 } else
7410 panic("wm_init: i82542 requires MCLBYTES = 2048"); 7433 panic("wm_init: i82542 requires MCLBYTES = 2048");
7411 } 7434 }
7412 7435
7413 /* Enable ECC */ 7436 /* Enable ECC */
7414 switch (sc->sc_type) { 7437 switch (sc->sc_type) {
7415 case WM_T_82571: 7438 case WM_T_82571:
7416 reg = CSR_READ(sc, WMREG_PBA_ECC); 7439 reg = CSR_READ(sc, WMREG_PBA_ECC);
7417 reg |= PBA_ECC_CORR_EN; 7440 reg |= PBA_ECC_CORR_EN;
7418 CSR_WRITE(sc, WMREG_PBA_ECC, reg); 7441 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7419 break; 7442 break;
7420 case WM_T_PCH_LPT: 7443 case WM_T_PCH_LPT:
7421 case WM_T_PCH_SPT: 7444 case WM_T_PCH_SPT:
7422 case WM_T_PCH_CNP: 7445 case WM_T_PCH_CNP:
7423 case WM_T_PCH_TGP: 7446 case WM_T_PCH_TGP:
7424 reg = CSR_READ(sc, WMREG_PBECCSTS); 7447 reg = CSR_READ(sc, WMREG_PBECCSTS);
7425 reg |= PBECCSTS_UNCORR_ECC_ENABLE; 7448 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7426 CSR_WRITE(sc, WMREG_PBECCSTS, reg); 7449 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7427 7450
7428 sc->sc_ctrl |= CTRL_MEHE; 7451 sc->sc_ctrl |= CTRL_MEHE;
7429 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 7452 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7430 break; 7453 break;
7431 default: 7454 default:
7432 break; 7455 break;
7433 } 7456 }
7434 7457
7435 /* 7458 /*
7436 * Set the receive filter. 7459 * Set the receive filter.
7437 * 7460 *
7438 * For 82575 and 82576, the RX descriptors must be initialized after 7461 * For 82575 and 82576, the RX descriptors must be initialized after
7439 * the setting of RCTL.EN in wm_set_filter() 7462 * the setting of RCTL.EN in wm_set_filter()
7440 */ 7463 */
7441 wm_set_filter(sc); 7464 wm_set_filter(sc);
7442 7465
7443 /* On 575 and later set RDT only if RX enabled */ 7466 /* On 575 and later set RDT only if RX enabled */
7444 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 7467 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7445 int qidx; 7468 int qidx;
7446 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { 7469 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7447 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq; 7470 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7448 for (i = 0; i < WM_NRXDESC; i++) { 7471 for (i = 0; i < WM_NRXDESC; i++) {
7449 mutex_enter(rxq->rxq_lock); 7472 mutex_enter(rxq->rxq_lock);
7450 wm_init_rxdesc(rxq, i); 7473 wm_init_rxdesc(rxq, i);
7451 mutex_exit(rxq->rxq_lock); 7474 mutex_exit(rxq->rxq_lock);
7452 7475
7453 } 7476 }
7454 } 7477 }
7455 } 7478 }
7456 7479
7457 wm_unset_stopping_flags(sc); 7480 wm_unset_stopping_flags(sc);
7458 7481
7459 /* Start the one second link check clock. */ 7482 /* Start the one second link check clock. */
7460 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 7483 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
7461 7484
7462 /* ...all done! */ 7485 /* ...all done! */
7463 ifp->if_flags |= IFF_RUNNING; 7486 ifp->if_flags |= IFF_RUNNING;
7464 ifp->if_flags &= ~IFF_OACTIVE; 7487 ifp->if_flags &= ~IFF_OACTIVE;
7465 7488
7466out: 7489out:
7467 sc->sc_if_flags = ifp->if_flags; 7490 sc->sc_if_flags = ifp->if_flags;
7468 if (error) 7491 if (error)
7469 log(LOG_ERR, "%s: interface not running\n", 7492 log(LOG_ERR, "%s: interface not running\n",
7470 device_xname(sc->sc_dev)); 7493 device_xname(sc->sc_dev));
7471 return error; 7494 return error;
7472} 7495}
7473 7496
7474/* 7497/*
7475 * wm_stop: [ifnet interface function] 7498 * wm_stop: [ifnet interface function]
7476 * 7499 *
7477 * Stop transmission on the interface. 7500 * Stop transmission on the interface.
7478 */ 7501 */
7479static void 7502static void
7480wm_stop(struct ifnet *ifp, int disable) 7503wm_stop(struct ifnet *ifp, int disable)
7481{ 7504{
7482 struct wm_softc *sc = ifp->if_softc; 7505 struct wm_softc *sc = ifp->if_softc;
7483 7506
7484 WM_CORE_LOCK(sc); 7507 WM_CORE_LOCK(sc);
7485 wm_stop_locked(ifp, disable); 7508 wm_stop_locked(ifp, disable);
7486 WM_CORE_UNLOCK(sc); 7509 WM_CORE_UNLOCK(sc);
7487 7510
7488 /* 7511 /*
7489 * After wm_set_stopping_flags(), it is guaranteed that 7512 * After wm_set_stopping_flags(), it is guaranteed that
7490 * wm_handle_queue_work() does not call workqueue_enqueue(). 7513 * wm_handle_queue_work() does not call workqueue_enqueue().
7491 * However, workqueue_wait() cannot call in wm_stop_locked() 7514 * However, workqueue_wait() cannot call in wm_stop_locked()
7492 * because it can sleep... 7515 * because it can sleep...
7493 * so, call workqueue_wait() here. 7516 * so, call workqueue_wait() here.
7494 */ 7517 */
7495 for (int i = 0; i < sc->sc_nqueues; i++) 7518 for (int i = 0; i < sc->sc_nqueues; i++)
7496 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie); 7519 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7497} 7520}
7498 7521
7499static void 7522static void
7500wm_stop_locked(struct ifnet *ifp, int disable) 7523wm_stop_locked(struct ifnet *ifp, int disable)
7501{ 7524{
7502 struct wm_softc *sc = ifp->if_softc; 7525 struct wm_softc *sc = ifp->if_softc;
7503 struct wm_txsoft *txs; 7526 struct wm_txsoft *txs;
7504 int i, qidx; 7527 int i, qidx;
7505 7528
7506 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 7529 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7507 device_xname(sc->sc_dev), __func__)); 7530 device_xname(sc->sc_dev), __func__));
7508 KASSERT(WM_CORE_LOCKED(sc)); 7531 KASSERT(WM_CORE_LOCKED(sc));
7509 7532
7510 wm_set_stopping_flags(sc); 7533 wm_set_stopping_flags(sc);
7511 7534
7512 /* Stop the one second clock. */ 7535 /* Stop the one second clock. */
7513 callout_stop(&sc->sc_tick_ch); 7536 callout_stop(&sc->sc_tick_ch);
7514 7537
7515 /* Stop the 82547 Tx FIFO stall check timer. */ 7538 /* Stop the 82547 Tx FIFO stall check timer. */
7516 if (sc->sc_type == WM_T_82547) 7539 if (sc->sc_type == WM_T_82547)
7517 callout_stop(&sc->sc_txfifo_ch); 7540 callout_stop(&sc->sc_txfifo_ch);
7518 7541
7519 if (sc->sc_flags & WM_F_HAS_MII) { 7542 if (sc->sc_flags & WM_F_HAS_MII) {
7520 /* Down the MII. */ 7543 /* Down the MII. */
7521 mii_down(&sc->sc_mii); 7544 mii_down(&sc->sc_mii);
7522 } else { 7545 } else {
7523#if 0 7546#if 0
7524 /* Should we clear PHY's status properly? */ 7547 /* Should we clear PHY's status properly? */
7525 wm_reset(sc); 7548 wm_reset(sc);
7526#endif 7549#endif
7527 } 7550 }
7528 7551
7529 /* Stop the transmit and receive processes. */ 7552 /* Stop the transmit and receive processes. */
7530 CSR_WRITE(sc, WMREG_TCTL, 0); 7553 CSR_WRITE(sc, WMREG_TCTL, 0);
7531 CSR_WRITE(sc, WMREG_RCTL, 0); 7554 CSR_WRITE(sc, WMREG_RCTL, 0);
7532 sc->sc_rctl &= ~RCTL_EN; 7555 sc->sc_rctl &= ~RCTL_EN;
7533 7556
7534 /* 7557 /*
7535 * Clear the interrupt mask to ensure the device cannot assert its 7558 * Clear the interrupt mask to ensure the device cannot assert its
7536 * interrupt line. 7559 * interrupt line.
7537 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to 7560 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7538 * service any currently pending or shared interrupt. 7561 * service any currently pending or shared interrupt.
7539 */ 7562 */
7540 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 7563 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7541 sc->sc_icr = 0; 7564 sc->sc_icr = 0;
7542 if (wm_is_using_msix(sc)) { 7565 if (wm_is_using_msix(sc)) {
7543 if (sc->sc_type != WM_T_82574) { 7566 if (sc->sc_type != WM_T_82574) {
7544 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); 7567 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7545 CSR_WRITE(sc, WMREG_EIAC, 0); 7568 CSR_WRITE(sc, WMREG_EIAC, 0);
7546 } else 7569 } else
7547 CSR_WRITE(sc, WMREG_EIAC_82574, 0); 7570 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7548 } 7571 }
7549 7572
7550 /* Release any queued transmit buffers. */ 7573 /* Release any queued transmit buffers. */
7551 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { 7574 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7552 struct wm_queue *wmq = &sc->sc_queue[qidx]; 7575 struct wm_queue *wmq = &sc->sc_queue[qidx];
7553 struct wm_txqueue *txq = &wmq->wmq_txq; 7576 struct wm_txqueue *txq = &wmq->wmq_txq;
7554 struct mbuf *m; 7577 struct mbuf *m;
7555 7578
7556 mutex_enter(txq->txq_lock); 7579 mutex_enter(txq->txq_lock);
7557 txq->txq_sending = false; /* Ensure watchdog disabled */ 7580 txq->txq_sending = false; /* Ensure watchdog disabled */
7558 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 7581 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7559 txs = &txq->txq_soft[i]; 7582 txs = &txq->txq_soft[i];
7560 if (txs->txs_mbuf != NULL) { 7583 if (txs->txs_mbuf != NULL) {
7561 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap); 7584 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7562 m_freem(txs->txs_mbuf); 7585 m_freem(txs->txs_mbuf);
7563 txs->txs_mbuf = NULL; 7586 txs->txs_mbuf = NULL;
7564 } 7587 }
7565 } 7588 }
7566 /* Drain txq_interq */ 7589 /* Drain txq_interq */
7567 while ((m = pcq_get(txq->txq_interq)) != NULL) 7590 while ((m = pcq_get(txq->txq_interq)) != NULL)
7568 m_freem(m); 7591 m_freem(m);
7569 mutex_exit(txq->txq_lock); 7592 mutex_exit(txq->txq_lock);
7570 } 7593 }
7571 7594
7572 /* Mark the interface as down and cancel the watchdog timer. */ 7595 /* Mark the interface as down and cancel the watchdog timer. */
7573 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 7596 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
7574 7597
7575 if (disable) { 7598 if (disable) {
7576 for (i = 0; i < sc->sc_nqueues; i++) { 7599 for (i = 0; i < sc->sc_nqueues; i++) {
7577 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 7600 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7578 mutex_enter(rxq->rxq_lock); 7601 mutex_enter(rxq->rxq_lock);
7579 wm_rxdrain(rxq); 7602 wm_rxdrain(rxq);
7580 mutex_exit(rxq->rxq_lock); 7603 mutex_exit(rxq->rxq_lock);
7581 } 7604 }
7582 } 7605 }
7583 7606
7584#if 0 /* notyet */ 7607#if 0 /* notyet */
7585 if (sc->sc_type >= WM_T_82544) 7608 if (sc->sc_type >= WM_T_82544)
7586 CSR_WRITE(sc, WMREG_WUC, 0); 7609 CSR_WRITE(sc, WMREG_WUC, 0);
7587#endif 7610#endif
7588} 7611}
7589 7612
7590static void 7613static void
7591wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 7614wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7592{ 7615{
7593 struct mbuf *m; 7616 struct mbuf *m;
7594 int i; 7617 int i;
7595 7618
7596 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev)); 7619 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7597 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 7620 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7598 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 7621 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7599 "m_flags = 0x%08x\n", device_xname(sc->sc_dev), 7622 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7600 m->m_data, m->m_len, m->m_flags); 7623 m->m_data, m->m_len, m->m_flags);
7601 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev), 7624 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7602 i, i == 1 ? "" : "s"); 7625 i, i == 1 ? "" : "s");
7603} 7626}
7604 7627
7605/* 7628/*
7606 * wm_82547_txfifo_stall: 7629 * wm_82547_txfifo_stall:
7607 * 7630 *
7608 * Callout used to wait for the 82547 Tx FIFO to drain, 7631 * Callout used to wait for the 82547 Tx FIFO to drain,
7609 * reset the FIFO pointers, and restart packet transmission. 7632 * reset the FIFO pointers, and restart packet transmission.
7610 */ 7633 */
7611static void 7634static void
7612wm_82547_txfifo_stall(void *arg) 7635wm_82547_txfifo_stall(void *arg)
7613{ 7636{
7614 struct wm_softc *sc = arg; 7637 struct wm_softc *sc = arg;
7615 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7638 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7616 7639
7617 mutex_enter(txq->txq_lock); 7640 mutex_enter(txq->txq_lock);
7618 7641
7619 if (txq->txq_stopping) 7642 if (txq->txq_stopping)
7620 goto out; 7643 goto out;
7621 7644
7622 if (txq->txq_fifo_stall) { 7645 if (txq->txq_fifo_stall) {
7623 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) && 7646 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7624 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 7647 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7625 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 7648 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7626 /* 7649 /*
7627 * Packets have drained. Stop transmitter, reset 7650 * Packets have drained. Stop transmitter, reset
7628 * FIFO pointers, restart transmitter, and kick 7651 * FIFO pointers, restart transmitter, and kick
7629 * the packet queue. 7652 * the packet queue.
7630 */ 7653 */
7631 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 7654 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7632 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 7655 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7633 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr); 7656 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7634 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr); 7657 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7635 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr); 7658 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7636 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr); 7659 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7637 CSR_WRITE(sc, WMREG_TCTL, tctl); 7660 CSR_WRITE(sc, WMREG_TCTL, tctl);
7638 CSR_WRITE_FLUSH(sc); 7661 CSR_WRITE_FLUSH(sc);
7639 7662
7640 txq->txq_fifo_head = 0; 7663 txq->txq_fifo_head = 0;
7641 txq->txq_fifo_stall = 0; 7664 txq->txq_fifo_stall = 0;
7642 wm_start_locked(&sc->sc_ethercom.ec_if); 7665 wm_start_locked(&sc->sc_ethercom.ec_if);
7643 } else { 7666 } else {
7644 /* 7667 /*
7645 * Still waiting for packets to drain; try again in 7668 * Still waiting for packets to drain; try again in
7646 * another tick. 7669 * another tick.
7647 */ 7670 */
7648 callout_schedule(&sc->sc_txfifo_ch, 1); 7671 callout_schedule(&sc->sc_txfifo_ch, 1);
7649 } 7672 }
7650 } 7673 }
7651 7674
7652out: 7675out:
7653 mutex_exit(txq->txq_lock); 7676 mutex_exit(txq->txq_lock);
7654} 7677}
7655 7678
7656/* 7679/*
7657 * wm_82547_txfifo_bugchk: 7680 * wm_82547_txfifo_bugchk:
7658 * 7681 *
7659 * Check for bug condition in the 82547 Tx FIFO. We need to 7682 * Check for bug condition in the 82547 Tx FIFO. We need to
7660 * prevent enqueueing a packet that would wrap around the end 7683 * prevent enqueueing a packet that would wrap around the end
7661 * if the Tx FIFO ring buffer, otherwise the chip will croak. 7684 * if the Tx FIFO ring buffer, otherwise the chip will croak.
7662 * 7685 *
7663 * We do this by checking the amount of space before the end 7686 * We do this by checking the amount of space before the end
7664 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 7687 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
7665 * the Tx FIFO, wait for all remaining packets to drain, reset 7688 * the Tx FIFO, wait for all remaining packets to drain, reset
7666 * the internal FIFO pointers to the beginning, and restart 7689 * the internal FIFO pointers to the beginning, and restart
7667 * transmission on the interface. 7690 * transmission on the interface.
7668 */ 7691 */
7669#define WM_FIFO_HDR 0x10 7692#define WM_FIFO_HDR 0x10
7670#define WM_82547_PAD_LEN 0x3e0 7693#define WM_82547_PAD_LEN 0x3e0
7671static int 7694static int
7672wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 7695wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7673{ 7696{
7674 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7697 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7675 int space = txq->txq_fifo_size - txq->txq_fifo_head; 7698 int space = txq->txq_fifo_size - txq->txq_fifo_head;
7676 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 7699 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7677 7700
7678 /* Just return if already stalled. */ 7701 /* Just return if already stalled. */
7679 if (txq->txq_fifo_stall) 7702 if (txq->txq_fifo_stall)
7680 return 1; 7703 return 1;
7681 7704
7682 if (sc->sc_mii.mii_media_active & IFM_FDX) { 7705 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7683 /* Stall only occurs in half-duplex mode. */ 7706 /* Stall only occurs in half-duplex mode. */
7684 goto send_packet; 7707 goto send_packet;
7685 } 7708 }
7686 7709
7687 if (len >= WM_82547_PAD_LEN + space) { 7710 if (len >= WM_82547_PAD_LEN + space) {
7688 txq->txq_fifo_stall = 1; 7711 txq->txq_fifo_stall = 1;
7689 callout_schedule(&sc->sc_txfifo_ch, 1); 7712 callout_schedule(&sc->sc_txfifo_ch, 1);
7690 return 1; 7713 return 1;
7691 } 7714 }
7692 7715
7693send_packet: 7716send_packet:
7694 txq->txq_fifo_head += len; 7717 txq->txq_fifo_head += len;
7695 if (txq->txq_fifo_head >= txq->txq_fifo_size) 7718 if (txq->txq_fifo_head >= txq->txq_fifo_size)
7696 txq->txq_fifo_head -= txq->txq_fifo_size; 7719 txq->txq_fifo_head -= txq->txq_fifo_size;
7697 7720
7698 return 0; 7721 return 0;
7699} 7722}
7700 7723
7701static int 7724static int
7702wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) 7725wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7703{ 7726{
7704 int error; 7727 int error;
7705 7728
7706 /* 7729 /*
7707 * Allocate the control data structures, and create and load the 7730 * Allocate the control data structures, and create and load the
7708 * DMA map for it. 7731 * DMA map for it.
7709 * 7732 *
7710 * NOTE: All Tx descriptors must be in the same 4G segment of 7733 * NOTE: All Tx descriptors must be in the same 4G segment of
7711 * memory. So must Rx descriptors. We simplify by allocating 7734 * memory. So must Rx descriptors. We simplify by allocating
7712 * both sets within the same 4G segment. 7735 * both sets within the same 4G segment.
7713 */ 7736 */
7714 if (sc->sc_type < WM_T_82544) 7737 if (sc->sc_type < WM_T_82544)
7715 WM_NTXDESC(txq) = WM_NTXDESC_82542; 7738 WM_NTXDESC(txq) = WM_NTXDESC_82542;
7716 else 7739 else
7717 WM_NTXDESC(txq) = WM_NTXDESC_82544; 7740 WM_NTXDESC(txq) = WM_NTXDESC_82544;
7718 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7741 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7719 txq->txq_descsize = sizeof(nq_txdesc_t); 7742 txq->txq_descsize = sizeof(nq_txdesc_t);
7720 else 7743 else
7721 txq->txq_descsize = sizeof(wiseman_txdesc_t); 7744 txq->txq_descsize = sizeof(wiseman_txdesc_t);
7722 7745
7723 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 7746 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7724 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 7747 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7725 1, &txq->txq_desc_rseg, 0)) != 0) { 7748 1, &txq->txq_desc_rseg, 0)) != 0) {
7726 aprint_error_dev(sc->sc_dev, 7749 aprint_error_dev(sc->sc_dev,
7727 "unable to allocate TX control data, error = %d\n", 7750 "unable to allocate TX control data, error = %d\n",
7728 error); 7751 error);
7729 goto fail_0; 7752 goto fail_0;
7730 } 7753 }
7731 7754
7732 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg, 7755 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7733 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq), 7756 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7734 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) { 7757 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7735 aprint_error_dev(sc->sc_dev, 7758 aprint_error_dev(sc->sc_dev,
7736 "unable to map TX control data, error = %d\n", error); 7759 "unable to map TX control data, error = %d\n", error);
7737 goto fail_1; 7760 goto fail_1;
7738 } 7761 }
7739 7762
7740 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1, 7763 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7741 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) { 7764 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7742 aprint_error_dev(sc->sc_dev, 7765 aprint_error_dev(sc->sc_dev,
7743 "unable to create TX control data DMA map, error = %d\n", 7766 "unable to create TX control data DMA map, error = %d\n",
7744 error); 7767 error);
7745 goto fail_2; 7768 goto fail_2;
7746 } 7769 }
7747 7770
7748 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap, 7771 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7749 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) { 7772 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7750 aprint_error_dev(sc->sc_dev, 7773 aprint_error_dev(sc->sc_dev,
7751 "unable to load TX control data DMA map, error = %d\n", 7774 "unable to load TX control data DMA map, error = %d\n",
7752 error); 7775 error);
7753 goto fail_3; 7776 goto fail_3;
7754 } 7777 }
7755 7778
7756 return 0; 7779 return 0;
7757 7780
7758fail_3: 7781fail_3:
7759 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 7782 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7760fail_2: 7783fail_2:
7761 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 7784 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7762 WM_TXDESCS_SIZE(txq)); 7785 WM_TXDESCS_SIZE(txq));
7763fail_1: 7786fail_1:
7764 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 7787 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7765fail_0: 7788fail_0:
7766 return error; 7789 return error;
7767} 7790}
7768 7791
7769static void 7792static void
7770wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) 7793wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7771{ 7794{
7772 7795
7773 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap); 7796 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7774 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 7797 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7775 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 7798 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7776 WM_TXDESCS_SIZE(txq)); 7799 WM_TXDESCS_SIZE(txq));
7777 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 7800 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7778} 7801}
7779 7802
7780static int 7803static int
7781wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) 7804wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7782{ 7805{
7783 int error; 7806 int error;
7784 size_t rxq_descs_size; 7807 size_t rxq_descs_size;
7785 7808
7786 /* 7809 /*
7787 * Allocate the control data structures, and create and load the 7810 * Allocate the control data structures, and create and load the
7788 * DMA map for it. 7811 * DMA map for it.
7789 * 7812 *
7790 * NOTE: All Tx descriptors must be in the same 4G segment of 7813 * NOTE: All Tx descriptors must be in the same 4G segment of
7791 * memory. So must Rx descriptors. We simplify by allocating 7814 * memory. So must Rx descriptors. We simplify by allocating
7792 * both sets within the same 4G segment. 7815 * both sets within the same 4G segment.
7793 */ 7816 */
7794 rxq->rxq_ndesc = WM_NRXDESC; 7817 rxq->rxq_ndesc = WM_NRXDESC;
7795 if (sc->sc_type == WM_T_82574) 7818 if (sc->sc_type == WM_T_82574)
7796 rxq->rxq_descsize = sizeof(ext_rxdesc_t); 7819 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7797 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7820 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7798 rxq->rxq_descsize = sizeof(nq_rxdesc_t); 7821 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7799 else 7822 else
7800 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t); 7823 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7801 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc; 7824 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7802 7825
7803 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size, 7826 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7804 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 7827 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7805 1, &rxq->rxq_desc_rseg, 0)) != 0) { 7828 1, &rxq->rxq_desc_rseg, 0)) != 0) {
7806 aprint_error_dev(sc->sc_dev, 7829 aprint_error_dev(sc->sc_dev,
7807 "unable to allocate RX control data, error = %d\n", 7830 "unable to allocate RX control data, error = %d\n",
7808 error); 7831 error);
7809 goto fail_0; 7832 goto fail_0;
7810 } 7833 }
7811 7834
7812 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg, 7835 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7813 rxq->rxq_desc_rseg, rxq_descs_size, 7836 rxq->rxq_desc_rseg, rxq_descs_size,
7814 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) { 7837 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7815 aprint_error_dev(sc->sc_dev, 7838 aprint_error_dev(sc->sc_dev,
7816 "unable to map RX control data, error = %d\n", error); 7839 "unable to map RX control data, error = %d\n", error);
7817 goto fail_1; 7840 goto fail_1;
7818 } 7841 }
7819 7842
7820 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1, 7843 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7821 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) { 7844 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7822 aprint_error_dev(sc->sc_dev, 7845 aprint_error_dev(sc->sc_dev,
7823 "unable to create RX control data DMA map, error = %d\n", 7846 "unable to create RX control data DMA map, error = %d\n",
7824 error); 7847 error);
7825 goto fail_2; 7848 goto fail_2;
7826 } 7849 }
7827 7850
7828 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap, 7851 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7829 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) { 7852 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7830 aprint_error_dev(sc->sc_dev, 7853 aprint_error_dev(sc->sc_dev,
7831 "unable to load RX control data DMA map, error = %d\n", 7854 "unable to load RX control data DMA map, error = %d\n",
7832 error); 7855 error);
7833 goto fail_3; 7856 goto fail_3;
7834 } 7857 }
7835 7858
7836 return 0; 7859 return 0;
7837 7860
7838 fail_3: 7861 fail_3:
7839 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 7862 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7840 fail_2: 7863 fail_2:
7841 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u, 7864 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7842 rxq_descs_size); 7865 rxq_descs_size);
7843 fail_1: 7866 fail_1:
7844 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 7867 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7845 fail_0: 7868 fail_0:
7846 return error; 7869 return error;
7847} 7870}
7848 7871
7849static void 7872static void
7850wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) 7873wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7851{ 7874{
7852 7875
7853 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap); 7876 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7854 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 7877 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7855 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u, 7878 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7856 rxq->rxq_descsize * rxq->rxq_ndesc); 7879 rxq->rxq_descsize * rxq->rxq_ndesc);
7857 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 7880 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7858} 7881}
7859 7882
7860 7883
7861static int 7884static int
7862wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) 7885wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7863{ 7886{
7864 int i, error; 7887 int i, error;
7865 7888
7866 /* Create the transmit buffer DMA maps. */ 7889 /* Create the transmit buffer DMA maps. */
7867 WM_TXQUEUELEN(txq) = 7890 WM_TXQUEUELEN(txq) =
7868 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 7891 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7869 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 7892 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7870 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 7893 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7871 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 7894 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7872 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 7895 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7873 &txq->txq_soft[i].txs_dmamap)) != 0) { 7896 &txq->txq_soft[i].txs_dmamap)) != 0) {
7874 aprint_error_dev(sc->sc_dev, 7897 aprint_error_dev(sc->sc_dev,
7875 "unable to create Tx DMA map %d, error = %d\n", 7898 "unable to create Tx DMA map %d, error = %d\n",
7876 i, error); 7899 i, error);
7877 goto fail; 7900 goto fail;
7878 } 7901 }
7879 } 7902 }
7880 7903
7881 return 0; 7904 return 0;
7882 7905
7883fail: 7906fail:
7884 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 7907 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7885 if (txq->txq_soft[i].txs_dmamap != NULL) 7908 if (txq->txq_soft[i].txs_dmamap != NULL)
7886 bus_dmamap_destroy(sc->sc_dmat, 7909 bus_dmamap_destroy(sc->sc_dmat,
7887 txq->txq_soft[i].txs_dmamap); 7910 txq->txq_soft[i].txs_dmamap);
7888 } 7911 }
7889 return error; 7912 return error;
7890} 7913}
7891 7914
7892static void 7915static void
7893wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) 7916wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7894{ 7917{
7895 int i; 7918 int i;
7896 7919
7897 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 7920 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7898 if (txq->txq_soft[i].txs_dmamap != NULL) 7921 if (txq->txq_soft[i].txs_dmamap != NULL)
7899 bus_dmamap_destroy(sc->sc_dmat, 7922 bus_dmamap_destroy(sc->sc_dmat,
7900 txq->txq_soft[i].txs_dmamap); 7923 txq->txq_soft[i].txs_dmamap);
7901 } 7924 }
7902} 7925}
7903 7926
7904static int 7927static int
7905wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 7928wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7906{ 7929{
7907 int i, error; 7930 int i, error;
7908 7931
7909 /* Create the receive buffer DMA maps. */ 7932 /* Create the receive buffer DMA maps. */
7910 for (i = 0; i < rxq->rxq_ndesc; i++) { 7933 for (i = 0; i < rxq->rxq_ndesc; i++) {
7911 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 7934 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7912 MCLBYTES, 0, 0, 7935 MCLBYTES, 0, 0,
7913 &rxq->rxq_soft[i].rxs_dmamap)) != 0) { 7936 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7914 aprint_error_dev(sc->sc_dev, 7937 aprint_error_dev(sc->sc_dev,
7915 "unable to create Rx DMA map %d error = %d\n", 7938 "unable to create Rx DMA map %d error = %d\n",
7916 i, error); 7939 i, error);
7917 goto fail; 7940 goto fail;
7918 } 7941 }
7919 rxq->rxq_soft[i].rxs_mbuf = NULL; 7942 rxq->rxq_soft[i].rxs_mbuf = NULL;
7920 } 7943 }
7921 7944
7922 return 0; 7945 return 0;
7923 7946
7924 fail: 7947 fail:
7925 for (i = 0; i < rxq->rxq_ndesc; i++) { 7948 for (i = 0; i < rxq->rxq_ndesc; i++) {
7926 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 7949 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7927 bus_dmamap_destroy(sc->sc_dmat, 7950 bus_dmamap_destroy(sc->sc_dmat,
7928 rxq->rxq_soft[i].rxs_dmamap); 7951 rxq->rxq_soft[i].rxs_dmamap);
7929 } 7952 }
7930 return error; 7953 return error;
7931} 7954}
7932 7955
7933static void 7956static void
7934wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 7957wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7935{ 7958{
7936 int i; 7959 int i;
7937 7960
7938 for (i = 0; i < rxq->rxq_ndesc; i++) { 7961 for (i = 0; i < rxq->rxq_ndesc; i++) {
7939 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 7962 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7940 bus_dmamap_destroy(sc->sc_dmat, 7963 bus_dmamap_destroy(sc->sc_dmat,
7941 rxq->rxq_soft[i].rxs_dmamap); 7964 rxq->rxq_soft[i].rxs_dmamap);
7942 } 7965 }
7943} 7966}
7944 7967
7945/* 7968/*
7946 * wm_alloc_quques: 7969 * wm_alloc_quques:
7947 * Allocate {tx,rx}descs and {tx,rx} buffers 7970 * Allocate {tx,rx}descs and {tx,rx} buffers
7948 */ 7971 */
7949static int 7972static int
7950wm_alloc_txrx_queues(struct wm_softc *sc) 7973wm_alloc_txrx_queues(struct wm_softc *sc)
7951{ 7974{
7952 int i, error, tx_done, rx_done; 7975 int i, error, tx_done, rx_done;
7953 7976
7954 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues, 7977 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7955 KM_SLEEP); 7978 KM_SLEEP);
7956 if (sc->sc_queue == NULL) { 7979 if (sc->sc_queue == NULL) {
7957 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n"); 7980 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7958 error = ENOMEM; 7981 error = ENOMEM;
7959 goto fail_0; 7982 goto fail_0;
7960 } 7983 }
7961 7984
7962 /* For transmission */ 7985 /* For transmission */
7963 error = 0; 7986 error = 0;
7964 tx_done = 0; 7987 tx_done = 0;
7965 for (i = 0; i < sc->sc_nqueues; i++) { 7988 for (i = 0; i < sc->sc_nqueues; i++) {
7966#ifdef WM_EVENT_COUNTERS 7989#ifdef WM_EVENT_COUNTERS
7967 int j; 7990 int j;
7968 const char *xname; 7991 const char *xname;
7969#endif 7992#endif
7970 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 7993 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7971 txq->txq_sc = sc; 7994 txq->txq_sc = sc;
7972 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 7995 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7973 7996
7974 error = wm_alloc_tx_descs(sc, txq); 7997 error = wm_alloc_tx_descs(sc, txq);
7975 if (error) 7998 if (error)
7976 break; 7999 break;
7977 error = wm_alloc_tx_buffer(sc, txq); 8000 error = wm_alloc_tx_buffer(sc, txq);
7978 if (error) { 8001 if (error) {
7979 wm_free_tx_descs(sc, txq); 8002 wm_free_tx_descs(sc, txq);
7980 break; 8003 break;
7981 } 8004 }
7982 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP); 8005 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7983 if (txq->txq_interq == NULL) { 8006 if (txq->txq_interq == NULL) {
7984 wm_free_tx_descs(sc, txq); 8007 wm_free_tx_descs(sc, txq);
7985 wm_free_tx_buffer(sc, txq); 8008 wm_free_tx_buffer(sc, txq);
7986 error = ENOMEM; 8009 error = ENOMEM;
7987 break; 8010 break;
7988 } 8011 }
7989 8012
7990#ifdef WM_EVENT_COUNTERS 8013#ifdef WM_EVENT_COUNTERS
7991 xname = device_xname(sc->sc_dev); 8014 xname = device_xname(sc->sc_dev);
7992 8015
7993 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname); 8016 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7994 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname); 8017 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7995 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname); 8018 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7996 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname); 8019 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7997 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname); 8020 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7998 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname); 8021 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7999 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname); 8022 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
8000 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname); 8023 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
8001 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname); 8024 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
8002 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname); 8025 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
8003 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname); 8026 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
8004 8027
8005 for (j = 0; j < WM_NTXSEGS; j++) { 8028 for (j = 0; j < WM_NTXSEGS; j++) {
8006 snprintf(txq->txq_txseg_evcnt_names[j], 8029 snprintf(txq->txq_txseg_evcnt_names[j],
8007 sizeof(txq->txq_txseg_evcnt_names[j]), 8030 sizeof(txq->txq_txseg_evcnt_names[j]),
8008 "txq%02dtxseg%d", i, j); 8031 "txq%02dtxseg%d", i, j);
8009 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], 8032 evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
8010 EVCNT_TYPE_MISC, 8033 EVCNT_TYPE_MISC,
8011 NULL, xname, txq->txq_txseg_evcnt_names[j]); 8034 NULL, xname, txq->txq_txseg_evcnt_names[j]);
8012 } 8035 }
8013 8036
8014 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname); 8037 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
8015 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname); 8038 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
8016 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname); 8039 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
8017 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname); 8040 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
8018 /* Only for 82544 (and earlier?) */ 8041 /* Only for 82544 (and earlier?) */
8019 if (sc->sc_type <= WM_T_82544) 8042 if (sc->sc_type <= WM_T_82544)
8020 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname); 8043 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
8021 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname); 8044 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
8022#endif /* WM_EVENT_COUNTERS */ 8045#endif /* WM_EVENT_COUNTERS */
8023 8046
8024 tx_done++; 8047 tx_done++;
8025 } 8048 }
8026 if (error) 8049 if (error)
8027 goto fail_1; 8050 goto fail_1;
8028 8051
8029 /* For receive */ 8052 /* For receive */
8030 error = 0; 8053 error = 0;
8031 rx_done = 0; 8054 rx_done = 0;
8032 for (i = 0; i < sc->sc_nqueues; i++) { 8055 for (i = 0; i < sc->sc_nqueues; i++) {
8033#ifdef WM_EVENT_COUNTERS 8056#ifdef WM_EVENT_COUNTERS
8034 const char *xname; 8057 const char *xname;
8035#endif 8058#endif
8036 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 8059 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8037 rxq->rxq_sc = sc; 8060 rxq->rxq_sc = sc;
8038 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 8061 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8039 8062
8040 error = wm_alloc_rx_descs(sc, rxq); 8063 error = wm_alloc_rx_descs(sc, rxq);
8041 if (error) 8064 if (error)
8042 break; 8065 break;
8043 8066
8044 error = wm_alloc_rx_buffer(sc, rxq); 8067 error = wm_alloc_rx_buffer(sc, rxq);
8045 if (error) { 8068 if (error) {
8046 wm_free_rx_descs(sc, rxq); 8069 wm_free_rx_descs(sc, rxq);
8047 break; 8070 break;
8048 } 8071 }
8049 8072
8050#ifdef WM_EVENT_COUNTERS 8073#ifdef WM_EVENT_COUNTERS
8051 xname = device_xname(sc->sc_dev); 8074 xname = device_xname(sc->sc_dev);
8052 8075
8053 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname); 8076 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
8054 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname); 8077 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
8055 
8056 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname); 8078 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
8057 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname); 8079 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
 8080 if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
 8081 WM_Q_MISC_EVCNT_ATTACH(rxq, qdrop, rxq, i, xname);
8058#endif /* WM_EVENT_COUNTERS */ 8082#endif /* WM_EVENT_COUNTERS */
8059 8083
8060 rx_done++; 8084 rx_done++;
8061 } 8085 }
8062 if (error) 8086 if (error)
8063 goto fail_2; 8087 goto fail_2;
8064 8088
8065 for (i = 0; i < sc->sc_nqueues; i++) { 8089 for (i = 0; i < sc->sc_nqueues; i++) {
8066 char rndname[16]; 8090 char rndname[16];
8067 8091
8068 snprintf(rndname, sizeof(rndname), "%sTXRX%d", 8092 snprintf(rndname, sizeof(rndname), "%sTXRX%d",
8069 device_xname(sc->sc_dev), i); 8093 device_xname(sc->sc_dev), i);
8070 rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname, 8094 rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
8071 RND_TYPE_NET, RND_FLAG_DEFAULT); 8095 RND_TYPE_NET, RND_FLAG_DEFAULT);
8072 } 8096 }
8073 8097
8074 return 0; 8098 return 0;
8075 8099
8076fail_2: 8100fail_2:
8077 for (i = 0; i < rx_done; i++) { 8101 for (i = 0; i < rx_done; i++) {
8078 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 8102 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8079 wm_free_rx_buffer(sc, rxq); 8103 wm_free_rx_buffer(sc, rxq);
8080 wm_free_rx_descs(sc, rxq); 8104 wm_free_rx_descs(sc, rxq);
8081 if (rxq->rxq_lock) 8105 if (rxq->rxq_lock)
8082 mutex_obj_free(rxq->rxq_lock); 8106 mutex_obj_free(rxq->rxq_lock);
8083 } 8107 }
8084fail_1: 8108fail_1:
8085 for (i = 0; i < tx_done; i++) { 8109 for (i = 0; i < tx_done; i++) {
8086 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 8110 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8087 pcq_destroy(txq->txq_interq); 8111 pcq_destroy(txq->txq_interq);
8088 wm_free_tx_buffer(sc, txq); 8112 wm_free_tx_buffer(sc, txq);
8089 wm_free_tx_descs(sc, txq); 8113 wm_free_tx_descs(sc, txq);
8090 if (txq->txq_lock) 8114 if (txq->txq_lock)
8091 mutex_obj_free(txq->txq_lock); 8115 mutex_obj_free(txq->txq_lock);
8092 } 8116 }
8093 8117
8094 kmem_free(sc->sc_queue, 8118 kmem_free(sc->sc_queue,
8095 sizeof(struct wm_queue) * sc->sc_nqueues); 8119 sizeof(struct wm_queue) * sc->sc_nqueues);
8096fail_0: 8120fail_0:
8097 return error; 8121 return error;
8098} 8122}
8099 8123
8100/* 8124/*
8101 * wm_free_quques: 8125 * wm_free_quques:
8102 * Free {tx,rx}descs and {tx,rx} buffers 8126 * Free {tx,rx}descs and {tx,rx} buffers
8103 */ 8127 */
8104static void 8128static void
8105wm_free_txrx_queues(struct wm_softc *sc) 8129wm_free_txrx_queues(struct wm_softc *sc)
8106{ 8130{
8107 int i; 8131 int i;
8108 8132
8109 for (i = 0; i < sc->sc_nqueues; i++) 8133 for (i = 0; i < sc->sc_nqueues; i++)
8110 rnd_detach_source(&sc->sc_queue[i].rnd_source); 8134 rnd_detach_source(&sc->sc_queue[i].rnd_source);
8111 8135
8112 for (i = 0; i < sc->sc_nqueues; i++) { 8136 for (i = 0; i < sc->sc_nqueues; i++) {
8113 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 8137 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8114 8138
8115#ifdef WM_EVENT_COUNTERS 8139#ifdef WM_EVENT_COUNTERS
8116 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i); 8140 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
8117 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i); 8141 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
8118 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i); 8142 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
8119 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i); 8143 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
 8144 if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
 8145 WM_Q_EVCNT_DETACH(rxq, qdrop, rxq, i);
8120#endif /* WM_EVENT_COUNTERS */ 8146#endif /* WM_EVENT_COUNTERS */
8121 8147
8122 wm_free_rx_buffer(sc, rxq); 8148 wm_free_rx_buffer(sc, rxq);
8123 wm_free_rx_descs(sc, rxq); 8149 wm_free_rx_descs(sc, rxq);
8124 if (rxq->rxq_lock) 8150 if (rxq->rxq_lock)
8125 mutex_obj_free(rxq->rxq_lock); 8151 mutex_obj_free(rxq->rxq_lock);
8126 } 8152 }
8127 8153
8128 for (i = 0; i < sc->sc_nqueues; i++) { 8154 for (i = 0; i < sc->sc_nqueues; i++) {
8129 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 8155 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8130 struct mbuf *m; 8156 struct mbuf *m;
8131#ifdef WM_EVENT_COUNTERS 8157#ifdef WM_EVENT_COUNTERS
8132 int j; 8158 int j;
8133 8159
8134 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i); 8160 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
8135 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i); 8161 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
8136 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i); 8162 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
8137 WM_Q_EVCNT_DETACH(txq, txdw, txq, i); 8163 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
8138 WM_Q_EVCNT_DETACH(txq, txqe, txq, i); 8164 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
8139 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i); 8165 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
8140 WM_Q_EVCNT_DETACH(txq, tusum, txq, i); 8166 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
8141 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i); 8167 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
8142 WM_Q_EVCNT_DETACH(txq, tso, txq, i); 8168 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
8143 WM_Q_EVCNT_DETACH(txq, tso6, txq, i); 8169 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
8144 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i); 8170 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
8145 8171
8146 for (j = 0; j < WM_NTXSEGS; j++) 8172 for (j = 0; j < WM_NTXSEGS; j++)
8147 evcnt_detach(&txq->txq_ev_txseg[j]); 8173 evcnt_detach(&txq->txq_ev_txseg[j]);
8148 8174
8149 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i); 8175 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
8150 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i); 8176 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
8151 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i); 8177 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
8152 WM_Q_EVCNT_DETACH(txq, defrag, txq, i); 8178 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
8153 if (sc->sc_type <= WM_T_82544) 8179 if (sc->sc_type <= WM_T_82544)
8154 WM_Q_EVCNT_DETACH(txq, underrun, txq, i); 8180 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
8155 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i); 8181 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
8156#endif /* WM_EVENT_COUNTERS */ 8182#endif /* WM_EVENT_COUNTERS */
8157 8183
8158 /* Drain txq_interq */ 8184 /* Drain txq_interq */
8159 while ((m = pcq_get(txq->txq_interq)) != NULL) 8185 while ((m = pcq_get(txq->txq_interq)) != NULL)
8160 m_freem(m); 8186 m_freem(m);
8161 pcq_destroy(txq->txq_interq); 8187 pcq_destroy(txq->txq_interq);
8162 8188
8163 wm_free_tx_buffer(sc, txq); 8189 wm_free_tx_buffer(sc, txq);
8164 wm_free_tx_descs(sc, txq); 8190 wm_free_tx_descs(sc, txq);
8165 if (txq->txq_lock) 8191 if (txq->txq_lock)
8166 mutex_obj_free(txq->txq_lock); 8192 mutex_obj_free(txq->txq_lock);
8167 } 8193 }
8168 8194
8169 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues); 8195 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
8170} 8196}
8171 8197
8172static void 8198static void
8173wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq) 8199wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8174{ 8200{
8175 8201
8176 KASSERT(mutex_owned(txq->txq_lock)); 8202 KASSERT(mutex_owned(txq->txq_lock));
8177 8203
8178 /* Initialize the transmit descriptor ring. */ 8204 /* Initialize the transmit descriptor ring. */
8179 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq)); 8205 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
8180 wm_cdtxsync(txq, 0, WM_NTXDESC(txq), 8206 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
8181 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 8207 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8182 txq->txq_free = WM_NTXDESC(txq); 8208 txq->txq_free = WM_NTXDESC(txq);
8183 txq->txq_next = 0; 8209 txq->txq_next = 0;
8184} 8210}
8185 8211
8186static void 8212static void
8187wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq, 8213wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8188 struct wm_txqueue *txq) 8214 struct wm_txqueue *txq)
8189{ 8215{
8190 8216
8191 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 8217 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8192 device_xname(sc->sc_dev), __func__)); 8218 device_xname(sc->sc_dev), __func__));
8193 KASSERT(mutex_owned(txq->txq_lock)); 8219 KASSERT(mutex_owned(txq->txq_lock));
8194 8220
8195 if (sc->sc_type < WM_T_82543) { 8221 if (sc->sc_type < WM_T_82543) {
8196 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0)); 8222 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
8197 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0)); 8223 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
8198 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq)); 8224 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
8199 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 8225 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
8200 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 8226 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
8201 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 8227 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
8202 } else { 8228 } else {
8203 int qid = wmq->wmq_id; 8229 int qid = wmq->wmq_id;
8204 8230
8205 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0)); 8231 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
8206 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0)); 8232 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
8207 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq)); 8233 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
8208 CSR_WRITE(sc, WMREG_TDH(qid), 0); 8234 CSR_WRITE(sc, WMREG_TDH(qid), 0);
8209 8235
8210 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 8236 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8211 /* 8237 /*
8212 * Don't write TDT before TCTL.EN is set. 8238 * Don't write TDT before TCTL.EN is set.
8213 * See the document. 8239 * See the document.
8214 */ 8240 */
8215 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE 8241 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
8216 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) 8242 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
8217 | TXDCTL_WTHRESH(0)); 8243 | TXDCTL_WTHRESH(0));
8218 else { 8244 else {
8219 /* XXX should update with AIM? */ 8245 /* XXX should update with AIM? */
8220 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4); 8246 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
8221 if (sc->sc_type >= WM_T_82540) { 8247 if (sc->sc_type >= WM_T_82540) {
8222 /* Should be the same */ 8248 /* Should be the same */
8223 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4); 8249 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
8224 } 8250 }
8225 8251
8226 CSR_WRITE(sc, WMREG_TDT(qid), 0); 8252 CSR_WRITE(sc, WMREG_TDT(qid), 0);
8227 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) | 8253 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
8228 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 8254 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
8229 } 8255 }
8230 } 8256 }
8231} 8257}
8232 8258
8233static void 8259static void
8234wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq) 8260wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8235{ 8261{
8236 int i; 8262 int i;
8237 8263
8238 KASSERT(mutex_owned(txq->txq_lock)); 8264 KASSERT(mutex_owned(txq->txq_lock));
8239 8265
8240 /* Initialize the transmit job descriptors. */ 8266 /* Initialize the transmit job descriptors. */
8241 for (i = 0; i < WM_TXQUEUELEN(txq); i++) 8267 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
8242 txq->txq_soft[i].txs_mbuf = NULL; 8268 txq->txq_soft[i].txs_mbuf = NULL;
8243 txq->txq_sfree = WM_TXQUEUELEN(txq); 8269 txq->txq_sfree = WM_TXQUEUELEN(txq);
8244 txq->txq_snext = 0; 8270 txq->txq_snext = 0;
8245 txq->txq_sdirty = 0; 8271 txq->txq_sdirty = 0;
8246} 8272}
8247 8273
8248static void 8274static void
8249wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq, 8275wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8250 struct wm_txqueue *txq) 8276 struct wm_txqueue *txq)
8251{ 8277{
8252 8278
8253 KASSERT(mutex_owned(txq->txq_lock)); 8279 KASSERT(mutex_owned(txq->txq_lock));
8254 8280
8255 /* 8281 /*
8256 * Set up some register offsets that are different between 8282 * Set up some register offsets that are different between
8257 * the i82542 and the i82543 and later chips. 8283 * the i82542 and the i82543 and later chips.
8258 */ 8284 */
8259 if (sc->sc_type < WM_T_82543) 8285 if (sc->sc_type < WM_T_82543)
8260 txq->txq_tdt_reg = WMREG_OLD_TDT; 8286 txq->txq_tdt_reg = WMREG_OLD_TDT;
8261 else 8287 else
8262 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id); 8288 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
8263 8289
8264 wm_init_tx_descs(sc, txq); 8290 wm_init_tx_descs(sc, txq);
8265 wm_init_tx_regs(sc, wmq, txq); 8291 wm_init_tx_regs(sc, wmq, txq);
8266 wm_init_tx_buffer(sc, txq); 8292 wm_init_tx_buffer(sc, txq);
8267 8293
8268 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */ 8294 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */
8269 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD; 8295 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
8270 8296
8271 txq->txq_sending = false; 8297 txq->txq_sending = false;
8272} 8298}
8273 8299
8274static void 8300static void
8275wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq, 8301wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8276 struct wm_rxqueue *rxq) 8302 struct wm_rxqueue *rxq)
8277{ 8303{
8278 8304
8279 KASSERT(mutex_owned(rxq->rxq_lock)); 8305 KASSERT(mutex_owned(rxq->rxq_lock));
8280 8306
8281 /* 8307 /*
8282 * Initialize the receive descriptor and receive job 8308 * Initialize the receive descriptor and receive job
8283 * descriptor rings. 8309 * descriptor rings.
8284 */ 8310 */
8285 if (sc->sc_type < WM_T_82543) { 8311 if (sc->sc_type < WM_T_82543) {
8286 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0)); 8312 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
8287 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0)); 8313 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
8288 CSR_WRITE(sc, WMREG_OLD_RDLEN0, 8314 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
8289 rxq->rxq_descsize * rxq->rxq_ndesc); 8315 rxq->rxq_descsize * rxq->rxq_ndesc);
8290 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 8316 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
8291 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 8317 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
8292 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 8318 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
8293 8319
8294 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 8320 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
8295 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 8321 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
8296 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 8322 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
8297 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 8323 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
8298 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 8324 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
8299 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 8325 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
8300 } else { 8326 } else {
8301 int qid = wmq->wmq_id; 8327 int qid = wmq->wmq_id;
8302 8328
8303 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0)); 8329 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
8304 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0)); 8330 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
8305 CSR_WRITE(sc, WMREG_RDLEN(qid), 8331 CSR_WRITE(sc, WMREG_RDLEN(qid),
8306 rxq->rxq_descsize * rxq->rxq_ndesc); 8332 rxq->rxq_descsize * rxq->rxq_ndesc);
8307 8333
8308 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 8334 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
 8335 uint32_t srrctl;
 8336
8309 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) 8337 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
8310 panic("%s: MCLBYTES %d unsupported for 82575 " 8338 panic("%s: MCLBYTES %d unsupported for 82575 "
8311 "or higher\n", __func__, MCLBYTES); 8339 "or higher\n", __func__, MCLBYTES);
8312 8340
8313 /* 8341 /*
8314 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF 8342 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
8315 * only. 8343 * only.
8316 */ 8344 */
8317 CSR_WRITE(sc, WMREG_SRRCTL(qid), 8345 srrctl = SRRCTL_DESCTYPE_ADV_ONEBUF
8318 SRRCTL_DESCTYPE_ADV_ONEBUF 8346 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT);
8319 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); 8347 /*
 8348 * Drop frames if the RX descriptor ring has no room.
 8349 * This is enabled only on multiqueue system to avoid
 8350 * bad influence to other queues.
 8351 */
 8352 if (sc->sc_nqueues > 1)
 8353 srrctl |= SRRCTL_DROP_EN;
 8354 CSR_WRITE(sc, WMREG_SRRCTL(qid), srrctl);
 8355
8320 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE 8356 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
8321 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) 8357 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
8322 | RXDCTL_WTHRESH(1)); 8358 | RXDCTL_WTHRESH(1));
8323 CSR_WRITE(sc, WMREG_RDH(qid), 0); 8359 CSR_WRITE(sc, WMREG_RDH(qid), 0);
8324 CSR_WRITE(sc, WMREG_RDT(qid), 0); 8360 CSR_WRITE(sc, WMREG_RDT(qid), 0);
8325 } else { 8361 } else {
8326 CSR_WRITE(sc, WMREG_RDH(qid), 0); 8362 CSR_WRITE(sc, WMREG_RDH(qid), 0);
8327 CSR_WRITE(sc, WMREG_RDT(qid), 0); 8363 CSR_WRITE(sc, WMREG_RDT(qid), 0);
8328 /* XXX should update with AIM? */ 8364 /* XXX should update with AIM? */
8329 CSR_WRITE(sc, WMREG_RDTR, 8365 CSR_WRITE(sc, WMREG_RDTR,
8330 (wmq->wmq_itr / 4) | RDTR_FPD); 8366 (wmq->wmq_itr / 4) | RDTR_FPD);
8331 /* MUST be same */ 8367 /* MUST be same */
8332 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4); 8368 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
8333 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) | 8369 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
8334 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 8370 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
8335 } 8371 }
8336 } 8372 }
8337} 8373}
8338 8374
8339static int 8375static int
8340wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 8376wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8341{ 8377{
8342 struct wm_rxsoft *rxs; 8378 struct wm_rxsoft *rxs;
8343 int error, i; 8379 int error, i;
8344 8380
8345 KASSERT(mutex_owned(rxq->rxq_lock)); 8381 KASSERT(mutex_owned(rxq->rxq_lock));
8346 8382
8347 for (i = 0; i < rxq->rxq_ndesc; i++) { 8383 for (i = 0; i < rxq->rxq_ndesc; i++) {
8348 rxs = &rxq->rxq_soft[i]; 8384 rxs = &rxq->rxq_soft[i];
8349 if (rxs->rxs_mbuf == NULL) { 8385 if (rxs->rxs_mbuf == NULL) {
8350 if ((error = wm_add_rxbuf(rxq, i)) != 0) { 8386 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
8351 log(LOG_ERR, "%s: unable to allocate or map " 8387 log(LOG_ERR, "%s: unable to allocate or map "
8352 "rx buffer %d, error = %d\n", 8388 "rx buffer %d, error = %d\n",
8353 device_xname(sc->sc_dev), i, error); 8389 device_xname(sc->sc_dev), i, error);
8354 /* 8390 /*
8355 * XXX Should attempt to run with fewer receive 8391 * XXX Should attempt to run with fewer receive
8356 * XXX buffers instead of just failing. 8392 * XXX buffers instead of just failing.
8357 */ 8393 */
8358 wm_rxdrain(rxq); 8394 wm_rxdrain(rxq);
8359 return ENOMEM; 8395 return ENOMEM;
8360 } 8396 }
8361 } else { 8397 } else {
8362 /* 8398 /*
8363 * For 82575 and 82576, the RX descriptors must be 8399 * For 82575 and 82576, the RX descriptors must be
8364 * initialized after the setting of RCTL.EN in 8400 * initialized after the setting of RCTL.EN in
8365 * wm_set_filter() 8401 * wm_set_filter()
8366 */ 8402 */
8367 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 8403 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8368 wm_init_rxdesc(rxq, i); 8404 wm_init_rxdesc(rxq, i);
8369 } 8405 }
8370 } 8406 }
8371 rxq->rxq_ptr = 0; 8407 rxq->rxq_ptr = 0;
8372 rxq->rxq_discard = 0; 8408 rxq->rxq_discard = 0;
8373 WM_RXCHAIN_RESET(rxq); 8409 WM_RXCHAIN_RESET(rxq);
8374 8410
8375 return 0; 8411 return 0;
8376} 8412}
8377 8413
8378static int 8414static int
8379wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq, 8415wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8380 struct wm_rxqueue *rxq) 8416 struct wm_rxqueue *rxq)
8381{ 8417{
8382 8418
8383 KASSERT(mutex_owned(rxq->rxq_lock)); 8419 KASSERT(mutex_owned(rxq->rxq_lock));
8384 8420
8385 /* 8421 /*
8386 * Set up some register offsets that are different between 8422 * Set up some register offsets that are different between
8387 * the i82542 and the i82543 and later chips. 8423 * the i82542 and the i82543 and later chips.
8388 */ 8424 */
8389 if (sc->sc_type < WM_T_82543) 8425 if (sc->sc_type < WM_T_82543)
8390 rxq->rxq_rdt_reg = WMREG_OLD_RDT0; 8426 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8391 else 8427 else
8392 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id); 8428 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8393 8429
8394 wm_init_rx_regs(sc, wmq, rxq); 8430 wm_init_rx_regs(sc, wmq, rxq);
8395 return wm_init_rx_buffer(sc, rxq); 8431 return wm_init_rx_buffer(sc, rxq);
8396} 8432}
8397 8433
8398/* 8434/*
8399 * wm_init_quques: 8435 * wm_init_quques:
8400 * Initialize {tx,rx}descs and {tx,rx} buffers 8436 * Initialize {tx,rx}descs and {tx,rx} buffers
8401 */ 8437 */
8402static int 8438static int
8403wm_init_txrx_queues(struct wm_softc *sc) 8439wm_init_txrx_queues(struct wm_softc *sc)
8404{ 8440{
8405 int i, error = 0; 8441 int i, error = 0;
8406 8442
8407 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 8443 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8408 device_xname(sc->sc_dev), __func__)); 8444 device_xname(sc->sc_dev), __func__));
8409 8445
8410 for (i = 0; i < sc->sc_nqueues; i++) { 8446 for (i = 0; i < sc->sc_nqueues; i++) {
8411 struct wm_queue *wmq = &sc->sc_queue[i]; 8447 struct wm_queue *wmq = &sc->sc_queue[i];
8412 struct wm_txqueue *txq = &wmq->wmq_txq; 8448 struct wm_txqueue *txq = &wmq->wmq_txq;
8413 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 8449 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8414 8450
8415 /* 8451 /*
8416 * TODO 8452 * TODO
8417 * Currently, use constant variable instead of AIM. 8453 * Currently, use constant variable instead of AIM.
8418 * Furthermore, the interrupt interval of multiqueue which use 8454 * Furthermore, the interrupt interval of multiqueue which use
8419 * polling mode is less than default value. 8455 * polling mode is less than default value.
8420 * More tuning and AIM are required. 8456 * More tuning and AIM are required.
8421 */ 8457 */
8422 if (wm_is_using_multiqueue(sc)) 8458 if (wm_is_using_multiqueue(sc))
8423 wmq->wmq_itr = 50; 8459 wmq->wmq_itr = 50;
8424 else 8460 else
8425 wmq->wmq_itr = sc->sc_itr_init; 8461 wmq->wmq_itr = sc->sc_itr_init;
8426 wmq->wmq_set_itr = true; 8462 wmq->wmq_set_itr = true;
8427 8463
8428 mutex_enter(txq->txq_lock); 8464 mutex_enter(txq->txq_lock);
8429 wm_init_tx_queue(sc, wmq, txq); 8465 wm_init_tx_queue(sc, wmq, txq);
8430 mutex_exit(txq->txq_lock); 8466 mutex_exit(txq->txq_lock);
8431 8467
8432 mutex_enter(rxq->rxq_lock); 8468 mutex_enter(rxq->rxq_lock);
8433 error = wm_init_rx_queue(sc, wmq, rxq); 8469 error = wm_init_rx_queue(sc, wmq, rxq);
8434 mutex_exit(rxq->rxq_lock); 8470 mutex_exit(rxq->rxq_lock);
8435 if (error) 8471 if (error)
8436 break; 8472 break;
8437 } 8473 }
8438 8474
8439 return error; 8475 return error;
8440} 8476}
8441 8477
8442/* 8478/*
8443 * wm_tx_offload: 8479 * wm_tx_offload:
8444 * 8480 *
8445 * Set up TCP/IP checksumming parameters for the 8481 * Set up TCP/IP checksumming parameters for the
8446 * specified packet. 8482 * specified packet.
8447 */ 8483 */
8448static void 8484static void
8449wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq, 8485wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8450 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp) 8486 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8451{ 8487{
8452 struct mbuf *m0 = txs->txs_mbuf; 8488 struct mbuf *m0 = txs->txs_mbuf;
8453 struct livengood_tcpip_ctxdesc *t; 8489 struct livengood_tcpip_ctxdesc *t;
8454 uint32_t ipcs, tucs, cmd, cmdlen, seg; 8490 uint32_t ipcs, tucs, cmd, cmdlen, seg;
8455 uint32_t ipcse; 8491 uint32_t ipcse;
8456 struct ether_header *eh; 8492 struct ether_header *eh;
8457 int offset, iphl; 8493 int offset, iphl;
8458 uint8_t fields; 8494 uint8_t fields;
8459 8495
8460 /* 8496 /*
8461 * XXX It would be nice if the mbuf pkthdr had offset 8497 * XXX It would be nice if the mbuf pkthdr had offset
8462 * fields for the protocol headers. 8498 * fields for the protocol headers.
8463 */ 8499 */
8464 8500
8465 eh = mtod(m0, struct ether_header *); 8501 eh = mtod(m0, struct ether_header *);
8466 switch (htons(eh->ether_type)) { 8502 switch (htons(eh->ether_type)) {
8467 case ETHERTYPE_IP: 8503 case ETHERTYPE_IP:
8468 case ETHERTYPE_IPV6: 8504 case ETHERTYPE_IPV6:
8469 offset = ETHER_HDR_LEN; 8505 offset = ETHER_HDR_LEN;
8470 break; 8506 break;
8471 8507
8472 case ETHERTYPE_VLAN: 8508 case ETHERTYPE_VLAN:
8473 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 8509 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8474 break; 8510 break;
8475 8511
8476 default: 8512 default:
8477 /* Don't support this protocol or encapsulation. */ 8513 /* Don't support this protocol or encapsulation. */
8478 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0; 8514 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8479 txq->txq_last_hw_ipcs = 0; 8515 txq->txq_last_hw_ipcs = 0;
8480 txq->txq_last_hw_tucs = 0; 8516 txq->txq_last_hw_tucs = 0;
8481 *fieldsp = 0; 8517 *fieldsp = 0;
8482 *cmdp = 0; 8518 *cmdp = 0;
8483 return; 8519 return;
8484 } 8520 }
8485 8521
8486 if ((m0->m_pkthdr.csum_flags & 8522 if ((m0->m_pkthdr.csum_flags &
8487 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { 8523 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8488 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 8524 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8489 } else 8525 } else
8490 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 8526 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
8491 8527
8492 ipcse = offset + iphl - 1; 8528 ipcse = offset + iphl - 1;
8493 8529
8494 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 8530 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8495 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 8531 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8496 seg = 0; 8532 seg = 0;
8497 fields = 0; 8533 fields = 0;
8498 8534
8499 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 8535 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8500 int hlen = offset + iphl; 8536 int hlen = offset + iphl;
8501 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 8537 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8502 8538
8503 if (__predict_false(m0->m_len < 8539 if (__predict_false(m0->m_len <
8504 (hlen + sizeof(struct tcphdr)))) { 8540 (hlen + sizeof(struct tcphdr)))) {
8505 /* 8541 /*
8506 * TCP/IP headers are not in the first mbuf; we need 8542 * TCP/IP headers are not in the first mbuf; we need
8507 * to do this the slow and painful way. Let's just 8543 * to do this the slow and painful way. Let's just
8508 * hope this doesn't happen very often. 8544 * hope this doesn't happen very often.
8509 */ 8545 */
8510 struct tcphdr th; 8546 struct tcphdr th;
8511 8547
8512 WM_Q_EVCNT_INCR(txq, tsopain); 8548 WM_Q_EVCNT_INCR(txq, tsopain);
8513 8549
8514 m_copydata(m0, hlen, sizeof(th), &th); 8550 m_copydata(m0, hlen, sizeof(th), &th);
8515 if (v4) { 8551 if (v4) {
8516 struct ip ip; 8552 struct ip ip;
8517 8553
8518 m_copydata(m0, offset, sizeof(ip), &ip); 8554 m_copydata(m0, offset, sizeof(ip), &ip);
8519 ip.ip_len = 0; 8555 ip.ip_len = 0;
8520 m_copyback(m0, 8556 m_copyback(m0,
8521 offset + offsetof(struct ip, ip_len), 8557 offset + offsetof(struct ip, ip_len),
8522 sizeof(ip.ip_len), &ip.ip_len); 8558 sizeof(ip.ip_len), &ip.ip_len);
8523 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 8559 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8524 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 8560 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8525 } else { 8561 } else {
8526 struct ip6_hdr ip6; 8562 struct ip6_hdr ip6;
8527 8563
8528 m_copydata(m0, offset, sizeof(ip6), &ip6); 8564 m_copydata(m0, offset, sizeof(ip6), &ip6);
8529 ip6.ip6_plen = 0; 8565 ip6.ip6_plen = 0;
8530 m_copyback(m0, 8566 m_copyback(m0,
8531 offset + offsetof(struct ip6_hdr, ip6_plen), 8567 offset + offsetof(struct ip6_hdr, ip6_plen),
8532 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 8568 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8533 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 8569 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8534 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 8570 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8535 } 8571 }
8536 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 8572 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8537 sizeof(th.th_sum), &th.th_sum); 8573 sizeof(th.th_sum), &th.th_sum);
8538 8574
8539 hlen += th.th_off << 2; 8575 hlen += th.th_off << 2;
8540 } else { 8576 } else {
8541 /* 8577 /*
8542 * TCP/IP headers are in the first mbuf; we can do 8578 * TCP/IP headers are in the first mbuf; we can do
8543 * this the easy way. 8579 * this the easy way.
8544 */ 8580 */
8545 struct tcphdr *th; 8581 struct tcphdr *th;
8546 8582
8547 if (v4) { 8583 if (v4) {
8548 struct ip *ip = 8584 struct ip *ip =
8549 (void *)(mtod(m0, char *) + offset); 8585 (void *)(mtod(m0, char *) + offset);
8550 th = (void *)(mtod(m0, char *) + hlen); 8586 th = (void *)(mtod(m0, char *) + hlen);
8551 8587
8552 ip->ip_len = 0; 8588 ip->ip_len = 0;
8553 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 8589 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8554 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 8590 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8555 } else { 8591 } else {
8556 struct ip6_hdr *ip6 = 8592 struct ip6_hdr *ip6 =
8557 (void *)(mtod(m0, char *) + offset); 8593 (void *)(mtod(m0, char *) + offset);
8558 th = (void *)(mtod(m0, char *) + hlen); 8594 th = (void *)(mtod(m0, char *) + hlen);
8559 8595
8560 ip6->ip6_plen = 0; 8596 ip6->ip6_plen = 0;
8561 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 8597 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8562 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 8598 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8563 } 8599 }
8564 hlen += th->th_off << 2; 8600 hlen += th->th_off << 2;
8565 } 8601 }
8566 8602
8567 if (v4) { 8603 if (v4) {
8568 WM_Q_EVCNT_INCR(txq, tso); 8604 WM_Q_EVCNT_INCR(txq, tso);
8569 cmdlen |= WTX_TCPIP_CMD_IP; 8605 cmdlen |= WTX_TCPIP_CMD_IP;
8570 } else { 8606 } else {
8571 WM_Q_EVCNT_INCR(txq, tso6); 8607 WM_Q_EVCNT_INCR(txq, tso6);
8572 ipcse = 0; 8608 ipcse = 0;
8573 } 8609 }
8574 cmd |= WTX_TCPIP_CMD_TSE; 8610 cmd |= WTX_TCPIP_CMD_TSE;
8575 cmdlen |= WTX_TCPIP_CMD_TSE | 8611 cmdlen |= WTX_TCPIP_CMD_TSE |
8576 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 8612 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8577 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 8613 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8578 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 8614 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8579 } 8615 }
8580 8616
8581 /* 8617 /*
8582 * NOTE: Even if we're not using the IP or TCP/UDP checksum 8618 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8583 * offload feature, if we load the context descriptor, we 8619 * offload feature, if we load the context descriptor, we
8584 * MUST provide valid values for IPCSS and TUCSS fields. 8620 * MUST provide valid values for IPCSS and TUCSS fields.
8585 */ 8621 */
8586 8622
8587 ipcs = WTX_TCPIP_IPCSS(offset) | 8623 ipcs = WTX_TCPIP_IPCSS(offset) |
8588 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 8624 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8589 WTX_TCPIP_IPCSE(ipcse); 8625 WTX_TCPIP_IPCSE(ipcse);
8590 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) { 8626 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8591 WM_Q_EVCNT_INCR(txq, ipsum); 8627 WM_Q_EVCNT_INCR(txq, ipsum);
8592 fields |= WTX_IXSM; 8628 fields |= WTX_IXSM;
8593 } 8629 }
8594 8630
8595 offset += iphl; 8631 offset += iphl;
8596 8632
8597 if (m0->m_pkthdr.csum_flags & 8633 if (m0->m_pkthdr.csum_flags &
8598 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) { 8634 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8599 WM_Q_EVCNT_INCR(txq, tusum); 8635 WM_Q_EVCNT_INCR(txq, tusum);
8600 fields |= WTX_TXSM; 8636 fields |= WTX_TXSM;
8601 tucs = WTX_TCPIP_TUCSS(offset) | 8637 tucs = WTX_TCPIP_TUCSS(offset) |
8602 WTX_TCPIP_TUCSO(offset + 8638 WTX_TCPIP_TUCSO(offset +
8603 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 8639 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8604 WTX_TCPIP_TUCSE(0) /* Rest of packet */; 8640 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8605 } else if ((m0->m_pkthdr.csum_flags & 8641 } else if ((m0->m_pkthdr.csum_flags &
8606 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) { 8642 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8607 WM_Q_EVCNT_INCR(txq, tusum6); 8643 WM_Q_EVCNT_INCR(txq, tusum6);
8608 fields |= WTX_TXSM; 8644 fields |= WTX_TXSM;
8609 tucs = WTX_TCPIP_TUCSS(offset) | 8645 tucs = WTX_TCPIP_TUCSS(offset) |
8610 WTX_TCPIP_TUCSO(offset + 8646 WTX_TCPIP_TUCSO(offset +
8611 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 8647 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8612 WTX_TCPIP_TUCSE(0) /* Rest of packet */; 8648 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8613 } else { 8649 } else {
8614 /* Just initialize it to a valid TCP context. */ 8650 /* Just initialize it to a valid TCP context. */
8615 tucs = WTX_TCPIP_TUCSS(offset) | 8651 tucs = WTX_TCPIP_TUCSS(offset) |
8616 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 8652 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8617 WTX_TCPIP_TUCSE(0) /* Rest of packet */; 8653 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8618 } 8654 }
8619 8655
8620 *cmdp = cmd; 8656 *cmdp = cmd;
8621 *fieldsp = fields; 8657 *fieldsp = fields;
8622 8658
8623 /* 8659 /*
8624 * We don't have to write context descriptor for every packet 8660 * We don't have to write context descriptor for every packet
8625 * except for 82574. For 82574, we must write context descriptor 8661 * except for 82574. For 82574, we must write context descriptor
8626 * for every packet when we use two descriptor queues. 8662 * for every packet when we use two descriptor queues.
8627 * 8663 *
8628 * The 82574L can only remember the *last* context used 8664 * The 82574L can only remember the *last* context used
8629 * regardless of queue that it was use for. We cannot reuse 8665 * regardless of queue that it was use for. We cannot reuse
8630 * contexts on this hardware platform and must generate a new 8666 * contexts on this hardware platform and must generate a new
8631 * context every time. 82574L hardware spec, section 7.2.6, 8667 * context every time. 82574L hardware spec, section 7.2.6,
8632 * second note. 8668 * second note.
8633 */ 8669 */
8634 if (sc->sc_nqueues < 2) { 8670 if (sc->sc_nqueues < 2) {
8635 /* 8671 /*
8636 * Setting up new checksum offload context for every 8672 * Setting up new checksum offload context for every
8637 * frames takes a lot of processing time for hardware. 8673 * frames takes a lot of processing time for hardware.
8638 * This also reduces performance a lot for small sized 8674 * This also reduces performance a lot for small sized
8639 * frames so avoid it if driver can use previously 8675 * frames so avoid it if driver can use previously
8640 * configured checksum offload context. 8676 * configured checksum offload context.
8641 * For TSO, in theory we can use the same TSO context only if 8677 * For TSO, in theory we can use the same TSO context only if
8642 * frame is the same type(IP/TCP) and the same MSS. However 8678 * frame is the same type(IP/TCP) and the same MSS. However
8643 * checking whether a frame has the same IP/TCP structure is a 8679 * checking whether a frame has the same IP/TCP structure is a
8644 * hard thing so just ignore that and always restablish a 8680 * hard thing so just ignore that and always restablish a
8645 * new TSO context. 8681 * new TSO context.
8646 */ 8682 */
8647 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) 8683 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8648 == 0) { 8684 == 0) {
8649 if (txq->txq_last_hw_cmd == cmd && 8685 if (txq->txq_last_hw_cmd == cmd &&
8650 txq->txq_last_hw_fields == fields && 8686 txq->txq_last_hw_fields == fields &&
8651 txq->txq_last_hw_ipcs == (ipcs & 0xffff) && 8687 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8652 txq->txq_last_hw_tucs == (tucs & 0xffff)) { 8688 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8653 WM_Q_EVCNT_INCR(txq, skipcontext); 8689 WM_Q_EVCNT_INCR(txq, skipcontext);
8654 return; 8690 return;
8655 } 8691 }
8656 } 8692 }
8657 8693
8658 txq->txq_last_hw_cmd = cmd; 8694 txq->txq_last_hw_cmd = cmd;
8659 txq->txq_last_hw_fields = fields; 8695 txq->txq_last_hw_fields = fields;
8660 txq->txq_last_hw_ipcs = (ipcs & 0xffff); 8696 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8661 txq->txq_last_hw_tucs = (tucs & 0xffff); 8697 txq->txq_last_hw_tucs = (tucs & 0xffff);
8662 } 8698 }
8663 8699
8664 /* Fill in the context descriptor. */ 8700 /* Fill in the context descriptor. */
8665 t = (struct livengood_tcpip_ctxdesc *) 8701 t = (struct livengood_tcpip_ctxdesc *)
8666 &txq->txq_descs[txq->txq_next]; 8702 &txq->txq_descs[txq->txq_next];
8667 t->tcpip_ipcs = htole32(ipcs); 8703 t->tcpip_ipcs = htole32(ipcs);
8668 t->tcpip_tucs = htole32(tucs); 8704 t->tcpip_tucs = htole32(tucs);
8669 t->tcpip_cmdlen = htole32(cmdlen); 8705 t->tcpip_cmdlen = htole32(cmdlen);
8670 t->tcpip_seg = htole32(seg); 8706 t->tcpip_seg = htole32(seg);
8671 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 8707 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8672 8708
8673 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 8709 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8674 txs->txs_ndesc++; 8710 txs->txs_ndesc++;
8675} 8711}
8676 8712
8677static inline int 8713static inline int
8678wm_select_txqueue(struct ifnet *ifp, struct mbuf *m) 8714wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8679{ 8715{
8680 struct wm_softc *sc = ifp->if_softc; 8716 struct wm_softc *sc = ifp->if_softc;
8681 u_int cpuid = cpu_index(curcpu()); 8717 u_int cpuid = cpu_index(curcpu());
8682 8718
8683 /* 8719 /*
8684 * Currently, simple distribute strategy. 8720 * Currently, simple distribute strategy.
8685 * TODO: 8721 * TODO:
8686 * distribute by flowid(RSS has value). 8722 * distribute by flowid(RSS has value).
8687 */ 8723 */
8688 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues; 8724 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8689} 8725}
8690 8726
8691static inline bool 8727static inline bool
8692wm_linkdown_discard(struct wm_txqueue *txq) 8728wm_linkdown_discard(struct wm_txqueue *txq)
8693{ 8729{
8694 8730
8695 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0) 8731 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8696 return true; 8732 return true;
8697 8733
8698 return false; 8734 return false;
8699} 8735}
8700 8736
8701/* 8737/*
8702 * wm_start: [ifnet interface function] 8738 * wm_start: [ifnet interface function]
8703 * 8739 *
8704 * Start packet transmission on the interface. 8740 * Start packet transmission on the interface.
8705 */ 8741 */
8706static void 8742static void
8707wm_start(struct ifnet *ifp) 8743wm_start(struct ifnet *ifp)
8708{ 8744{
8709 struct wm_softc *sc = ifp->if_softc; 8745 struct wm_softc *sc = ifp->if_softc;
8710 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 8746 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8711 8747
8712#ifdef WM_MPSAFE 8748#ifdef WM_MPSAFE
8713 KASSERT(if_is_mpsafe(ifp)); 8749 KASSERT(if_is_mpsafe(ifp));
8714#endif 8750#endif
8715 /* 8751 /*
8716 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. 8752 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
8717 */ 8753 */
8718 8754
8719 mutex_enter(txq->txq_lock); 8755 mutex_enter(txq->txq_lock);
8720 if (!txq->txq_stopping) 8756 if (!txq->txq_stopping)
8721 wm_start_locked(ifp); 8757 wm_start_locked(ifp);
8722 mutex_exit(txq->txq_lock); 8758 mutex_exit(txq->txq_lock);
8723} 8759}
8724 8760
8725static void 8761static void
8726wm_start_locked(struct ifnet *ifp) 8762wm_start_locked(struct ifnet *ifp)
8727{ 8763{
8728 struct wm_softc *sc = ifp->if_softc; 8764 struct wm_softc *sc = ifp->if_softc;
8729 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 8765 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8730 8766
8731 wm_send_common_locked(ifp, txq, false); 8767 wm_send_common_locked(ifp, txq, false);
8732} 8768}
8733 8769
8734static int 8770static int
8735wm_transmit(struct ifnet *ifp, struct mbuf *m) 8771wm_transmit(struct ifnet *ifp, struct mbuf *m)
8736{ 8772{
8737 int qid; 8773 int qid;
8738 struct wm_softc *sc = ifp->if_softc; 8774 struct wm_softc *sc = ifp->if_softc;
8739 struct wm_txqueue *txq; 8775 struct wm_txqueue *txq;
8740 8776
8741 qid = wm_select_txqueue(ifp, m); 8777 qid = wm_select_txqueue(ifp, m);
8742 txq = &sc->sc_queue[qid].wmq_txq; 8778 txq = &sc->sc_queue[qid].wmq_txq;
8743 8779
8744 if (__predict_false(!pcq_put(txq->txq_interq, m))) { 8780 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8745 m_freem(m); 8781 m_freem(m);
8746 WM_Q_EVCNT_INCR(txq, pcqdrop); 8782 WM_Q_EVCNT_INCR(txq, pcqdrop);
8747 return ENOBUFS; 8783 return ENOBUFS;
8748 } 8784 }
8749 8785
8750 /* XXX NOMPSAFE: ifp->if_data should be percpu. */ 8786 /* XXX NOMPSAFE: ifp->if_data should be percpu. */
8751 ifp->if_obytes += m->m_pkthdr.len; 8787 ifp->if_obytes += m->m_pkthdr.len;
8752 if (m->m_flags & M_MCAST) 8788 if (m->m_flags & M_MCAST)
8753 ifp->if_omcasts++; 8789 ifp->if_omcasts++;
8754 8790
8755 if (mutex_tryenter(txq->txq_lock)) { 8791 if (mutex_tryenter(txq->txq_lock)) {
8756 if (!txq->txq_stopping) 8792 if (!txq->txq_stopping)
8757 wm_transmit_locked(ifp, txq); 8793 wm_transmit_locked(ifp, txq);
8758 mutex_exit(txq->txq_lock); 8794 mutex_exit(txq->txq_lock);
8759 } 8795 }
8760 8796
8761 return 0; 8797 return 0;
8762} 8798}
8763 8799
8764static void 8800static void
8765wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq) 8801wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8766{ 8802{
8767 8803
8768 wm_send_common_locked(ifp, txq, true); 8804 wm_send_common_locked(ifp, txq, true);
8769} 8805}
8770 8806
8771static void 8807static void
8772wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq, 8808wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8773 bool is_transmit) 8809 bool is_transmit)
8774{ 8810{
8775 struct wm_softc *sc = ifp->if_softc; 8811 struct wm_softc *sc = ifp->if_softc;
8776 struct mbuf *m0; 8812 struct mbuf *m0;
8777 struct wm_txsoft *txs; 8813 struct wm_txsoft *txs;
8778 bus_dmamap_t dmamap; 8814 bus_dmamap_t dmamap;
8779 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 8815 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8780 bus_addr_t curaddr; 8816 bus_addr_t curaddr;
8781 bus_size_t seglen, curlen; 8817 bus_size_t seglen, curlen;
8782 uint32_t cksumcmd; 8818 uint32_t cksumcmd;
8783 uint8_t cksumfields; 8819 uint8_t cksumfields;
8784 bool remap = true; 8820 bool remap = true;
8785 8821
8786 KASSERT(mutex_owned(txq->txq_lock)); 8822 KASSERT(mutex_owned(txq->txq_lock));
8787 8823
8788 if ((ifp->if_flags & IFF_RUNNING) == 0) 8824 if ((ifp->if_flags & IFF_RUNNING) == 0)
8789 return; 8825 return;
8790 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit) 8826 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
8791 return; 8827 return;
8792 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0) 8828 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8793 return; 8829 return;
8794 8830
8795 if (__predict_false(wm_linkdown_discard(txq))) { 8831 if (__predict_false(wm_linkdown_discard(txq))) {
8796 do { 8832 do {
8797 if (is_transmit) 8833 if (is_transmit)
8798 m0 = pcq_get(txq->txq_interq); 8834 m0 = pcq_get(txq->txq_interq);
8799 else 8835 else
8800 IFQ_DEQUEUE(&ifp->if_snd, m0); 8836 IFQ_DEQUEUE(&ifp->if_snd, m0);
8801 /* 8837 /*
8802 * increment successed packet counter as in the case 8838 * increment successed packet counter as in the case
8803 * which the packet is discarded by link down PHY. 8839 * which the packet is discarded by link down PHY.
8804 */ 8840 */
8805 if (m0 != NULL) { 8841 if (m0 != NULL) {
8806 ifp->if_opackets++; 8842 ifp->if_opackets++;
8807 m_freem(m0); 8843 m_freem(m0);
8808 } 8844 }
8809 } while (m0 != NULL); 8845 } while (m0 != NULL);
8810 return; 8846 return;
8811 } 8847 }
8812 8848
8813 /* Remember the previous number of free descriptors. */ 8849 /* Remember the previous number of free descriptors. */
8814 ofree = txq->txq_free; 8850 ofree = txq->txq_free;
8815 8851
8816 /* 8852 /*
8817 * Loop through the send queue, setting up transmit descriptors 8853 * Loop through the send queue, setting up transmit descriptors
8818 * until we drain the queue, or use up all available transmit 8854 * until we drain the queue, or use up all available transmit
8819 * descriptors. 8855 * descriptors.
8820 */ 8856 */
8821 for (;;) { 8857 for (;;) {
8822 m0 = NULL; 8858 m0 = NULL;
8823 8859
8824 /* Get a work queue entry. */ 8860 /* Get a work queue entry. */
8825 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { 8861 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8826 wm_txeof(txq, UINT_MAX); 8862 wm_txeof(txq, UINT_MAX);
8827 if (txq->txq_sfree == 0) { 8863 if (txq->txq_sfree == 0) {
8828 DPRINTF(sc, WM_DEBUG_TX, 8864 DPRINTF(sc, WM_DEBUG_TX,
8829 ("%s: TX: no free job descriptors\n", 8865 ("%s: TX: no free job descriptors\n",
8830 device_xname(sc->sc_dev))); 8866 device_xname(sc->sc_dev)));
8831 WM_Q_EVCNT_INCR(txq, txsstall); 8867 WM_Q_EVCNT_INCR(txq, txsstall);
8832 break; 8868 break;
8833 } 8869 }
8834 } 8870 }
8835 8871
8836 /* Grab a packet off the queue. */ 8872 /* Grab a packet off the queue. */
8837 if (is_transmit) 8873 if (is_transmit)
8838 m0 = pcq_get(txq->txq_interq); 8874 m0 = pcq_get(txq->txq_interq);
8839 else 8875 else
8840 IFQ_DEQUEUE(&ifp->if_snd, m0); 8876 IFQ_DEQUEUE(&ifp->if_snd, m0);
8841 if (m0 == NULL) 8877 if (m0 == NULL)
8842 break; 8878 break;
8843 8879
8844 DPRINTF(sc, WM_DEBUG_TX, 8880 DPRINTF(sc, WM_DEBUG_TX,
8845 ("%s: TX: have packet to transmit: %p\n", 8881 ("%s: TX: have packet to transmit: %p\n",
8846 device_xname(sc->sc_dev), m0)); 8882 device_xname(sc->sc_dev), m0));
8847 8883
8848 txs = &txq->txq_soft[txq->txq_snext]; 8884 txs = &txq->txq_soft[txq->txq_snext];
8849 dmamap = txs->txs_dmamap; 8885 dmamap = txs->txs_dmamap;
8850 8886
8851 use_tso = (m0->m_pkthdr.csum_flags & 8887 use_tso = (m0->m_pkthdr.csum_flags &
8852 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 8888 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8853 8889
8854 /* 8890 /*
8855 * So says the Linux driver: 8891 * So says the Linux driver:
8856 * The controller does a simple calculation to make sure 8892 * The controller does a simple calculation to make sure
8857 * there is enough room in the FIFO before initiating the 8893 * there is enough room in the FIFO before initiating the
8858 * DMA for each buffer. The calc is: 8894 * DMA for each buffer. The calc is:
8859 * 4 = ceil(buffer len / MSS) 8895 * 4 = ceil(buffer len / MSS)
8860 * To make sure we don't overrun the FIFO, adjust the max 8896 * To make sure we don't overrun the FIFO, adjust the max
8861 * buffer len if the MSS drops. 8897 * buffer len if the MSS drops.
8862 */ 8898 */
8863 dmamap->dm_maxsegsz = 8899 dmamap->dm_maxsegsz =
8864 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 8900 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8865 ? m0->m_pkthdr.segsz << 2 8901 ? m0->m_pkthdr.segsz << 2
8866 : WTX_MAX_LEN; 8902 : WTX_MAX_LEN;
8867 8903
8868 /* 8904 /*
8869 * Load the DMA map. If this fails, the packet either 8905 * Load the DMA map. If this fails, the packet either
8870 * didn't fit in the allotted number of segments, or we 8906 * didn't fit in the allotted number of segments, or we
8871 * were short on resources. For the too-many-segments 8907 * were short on resources. For the too-many-segments
8872 * case, we simply report an error and drop the packet, 8908 * case, we simply report an error and drop the packet,
8873 * since we can't sanely copy a jumbo packet to a single 8909 * since we can't sanely copy a jumbo packet to a single
8874 * buffer. 8910 * buffer.
8875 */ 8911 */
8876retry: 8912retry:
8877 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 8913 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8878 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 8914 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8879 if (__predict_false(error)) { 8915 if (__predict_false(error)) {
8880 if (error == EFBIG) { 8916 if (error == EFBIG) {
8881 if (remap == true) { 8917 if (remap == true) {
8882 struct mbuf *m; 8918 struct mbuf *m;
8883 8919
8884 remap = false; 8920 remap = false;
8885 m = m_defrag(m0, M_NOWAIT); 8921 m = m_defrag(m0, M_NOWAIT);
8886 if (m != NULL) { 8922 if (m != NULL) {
8887 WM_Q_EVCNT_INCR(txq, defrag); 8923 WM_Q_EVCNT_INCR(txq, defrag);
8888 m0 = m; 8924 m0 = m;
8889 goto retry; 8925 goto retry;
8890 } 8926 }
8891 } 8927 }
8892 WM_Q_EVCNT_INCR(txq, toomanyseg); 8928 WM_Q_EVCNT_INCR(txq, toomanyseg);
8893 log(LOG_ERR, "%s: Tx packet consumes too many " 8929 log(LOG_ERR, "%s: Tx packet consumes too many "
8894 "DMA segments, dropping...\n", 8930 "DMA segments, dropping...\n",
8895 device_xname(sc->sc_dev)); 8931 device_xname(sc->sc_dev));
8896 wm_dump_mbuf_chain(sc, m0); 8932 wm_dump_mbuf_chain(sc, m0);
8897 m_freem(m0); 8933 m_freem(m0);
8898 continue; 8934 continue;
8899 } 8935 }
8900 /* Short on resources, just stop for now. */ 8936 /* Short on resources, just stop for now. */
8901 DPRINTF(sc, WM_DEBUG_TX, 8937 DPRINTF(sc, WM_DEBUG_TX,
8902 ("%s: TX: dmamap load failed: %d\n", 8938 ("%s: TX: dmamap load failed: %d\n",
8903 device_xname(sc->sc_dev), error)); 8939 device_xname(sc->sc_dev), error));
8904 break; 8940 break;
8905 } 8941 }
8906 8942
8907 segs_needed = dmamap->dm_nsegs; 8943 segs_needed = dmamap->dm_nsegs;
8908 if (use_tso) { 8944 if (use_tso) {
8909 /* For sentinel descriptor; see below. */ 8945 /* For sentinel descriptor; see below. */
8910 segs_needed++; 8946 segs_needed++;
8911 } 8947 }
8912 8948
8913 /* 8949 /*
8914 * Ensure we have enough descriptors free to describe 8950 * Ensure we have enough descriptors free to describe
8915 * the packet. Note, we always reserve one descriptor 8951 * the packet. Note, we always reserve one descriptor
8916 * at the end of the ring due to the semantics of the 8952 * at the end of the ring due to the semantics of the
8917 * TDT register, plus one more in the event we need 8953 * TDT register, plus one more in the event we need
8918 * to load offload context. 8954 * to load offload context.
8919 */ 8955 */
8920 if (segs_needed > txq->txq_free - 2) { 8956 if (segs_needed > txq->txq_free - 2) {
8921 /* 8957 /*
8922 * Not enough free descriptors to transmit this 8958 * Not enough free descriptors to transmit this
8923 * packet. We haven't committed anything yet, 8959 * packet. We haven't committed anything yet,
8924 * so just unload the DMA map, put the packet 8960 * so just unload the DMA map, put the packet
8925 * pack on the queue, and punt. Notify the upper 8961 * pack on the queue, and punt. Notify the upper
8926 * layer that there are no more slots left. 8962 * layer that there are no more slots left.
8927 */ 8963 */
8928 DPRINTF(sc, WM_DEBUG_TX, 8964 DPRINTF(sc, WM_DEBUG_TX,
8929 ("%s: TX: need %d (%d) descriptors, have %d\n", 8965 ("%s: TX: need %d (%d) descriptors, have %d\n",
8930 device_xname(sc->sc_dev), dmamap->dm_nsegs, 8966 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8931 segs_needed, txq->txq_free - 1)); 8967 segs_needed, txq->txq_free - 1));
8932 if (!is_transmit) 8968 if (!is_transmit)
8933 ifp->if_flags |= IFF_OACTIVE; 8969 ifp->if_flags |= IFF_OACTIVE;
8934 txq->txq_flags |= WM_TXQ_NO_SPACE; 8970 txq->txq_flags |= WM_TXQ_NO_SPACE;
8935 bus_dmamap_unload(sc->sc_dmat, dmamap); 8971 bus_dmamap_unload(sc->sc_dmat, dmamap);
8936 WM_Q_EVCNT_INCR(txq, txdstall); 8972 WM_Q_EVCNT_INCR(txq, txdstall);
8937 break; 8973 break;
8938 } 8974 }
8939 8975
8940 /* 8976 /*
8941 * Check for 82547 Tx FIFO bug. We need to do this 8977 * Check for 82547 Tx FIFO bug. We need to do this
8942 * once we know we can transmit the packet, since we 8978 * once we know we can transmit the packet, since we
8943 * do some internal FIFO space accounting here. 8979 * do some internal FIFO space accounting here.
8944 */ 8980 */
8945 if (sc->sc_type == WM_T_82547 && 8981 if (sc->sc_type == WM_T_82547 &&
8946 wm_82547_txfifo_bugchk(sc, m0)) { 8982 wm_82547_txfifo_bugchk(sc, m0)) {
8947 DPRINTF(sc, WM_DEBUG_TX, 8983 DPRINTF(sc, WM_DEBUG_TX,
8948 ("%s: TX: 82547 Tx FIFO bug detected\n", 8984 ("%s: TX: 82547 Tx FIFO bug detected\n",
8949 device_xname(sc->sc_dev))); 8985 device_xname(sc->sc_dev)));
8950 if (!is_transmit) 8986 if (!is_transmit)
8951 ifp->if_flags |= IFF_OACTIVE; 8987 ifp->if_flags |= IFF_OACTIVE;
8952 txq->txq_flags |= WM_TXQ_NO_SPACE; 8988 txq->txq_flags |= WM_TXQ_NO_SPACE;
8953 bus_dmamap_unload(sc->sc_dmat, dmamap); 8989 bus_dmamap_unload(sc->sc_dmat, dmamap);
8954 WM_Q_EVCNT_INCR(txq, fifo_stall); 8990 WM_Q_EVCNT_INCR(txq, fifo_stall);
8955 break; 8991 break;
8956 } 8992 }
8957 8993
8958 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ 8994 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8959 8995
8960 DPRINTF(sc, WM_DEBUG_TX, 8996 DPRINTF(sc, WM_DEBUG_TX,
8961 ("%s: TX: packet has %d (%d) DMA segments\n", 8997 ("%s: TX: packet has %d (%d) DMA segments\n",
8962 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 8998 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8963 8999
8964 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]); 9000 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8965 9001
8966 /* 9002 /*
8967 * Store a pointer to the packet so that we can free it 9003 * Store a pointer to the packet so that we can free it
8968 * later. 9004 * later.
8969 * 9005 *
8970 * Initially, we consider the number of descriptors the 9006 * Initially, we consider the number of descriptors the
8971 * packet uses the number of DMA segments. This may be 9007 * packet uses the number of DMA segments. This may be
8972 * incremented by 1 if we do checksum offload (a descriptor 9008 * incremented by 1 if we do checksum offload (a descriptor
8973 * is used to set the checksum context). 9009 * is used to set the checksum context).
8974 */ 9010 */
8975 txs->txs_mbuf = m0; 9011 txs->txs_mbuf = m0;
8976 txs->txs_firstdesc = txq->txq_next; 9012 txs->txs_firstdesc = txq->txq_next;
8977 txs->txs_ndesc = segs_needed; 9013 txs->txs_ndesc = segs_needed;
8978 9014
8979 /* Set up offload parameters for this packet. */ 9015 /* Set up offload parameters for this packet. */
8980 if (m0->m_pkthdr.csum_flags & 9016 if (m0->m_pkthdr.csum_flags &
8981 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | 9017 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8982 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | 9018 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8983 M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 9019 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8984 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields); 9020 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8985 } else { 9021 } else {
8986 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0; 9022 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8987 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0; 9023 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8988 cksumcmd = 0; 9024 cksumcmd = 0;
8989 cksumfields = 0; 9025 cksumfields = 0;
8990 } 9026 }
8991 9027
8992 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 9028 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8993 9029
8994 /* Sync the DMA map. */ 9030 /* Sync the DMA map. */
8995 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 9031 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8996 BUS_DMASYNC_PREWRITE); 9032 BUS_DMASYNC_PREWRITE);
8997 9033
8998 /* Initialize the transmit descriptor. */ 9034 /* Initialize the transmit descriptor. */
8999 for (nexttx = txq->txq_next, seg = 0; 9035 for (nexttx = txq->txq_next, seg = 0;
9000 seg < dmamap->dm_nsegs; seg++) { 9036 seg < dmamap->dm_nsegs; seg++) {
9001 for (seglen = dmamap->dm_segs[seg].ds_len, 9037 for (seglen = dmamap->dm_segs[seg].ds_len,
9002 curaddr = dmamap->dm_segs[seg].ds_addr; 9038 curaddr = dmamap->dm_segs[seg].ds_addr;
9003 seglen != 0; 9039 seglen != 0;
9004 curaddr += curlen, seglen -= curlen, 9040 curaddr += curlen, seglen -= curlen,
9005 nexttx = WM_NEXTTX(txq, nexttx)) { 9041 nexttx = WM_NEXTTX(txq, nexttx)) {
9006 curlen = seglen; 9042 curlen = seglen;
9007 9043
9008 /* 9044 /*
9009 * So says the Linux driver: 9045 * So says the Linux driver:
9010 * Work around for premature descriptor 9046 * Work around for premature descriptor
9011 * write-backs in TSO mode. Append a 9047 * write-backs in TSO mode. Append a
9012 * 4-byte sentinel descriptor. 9048 * 4-byte sentinel descriptor.
9013 */ 9049 */
9014 if (use_tso && seg == dmamap->dm_nsegs - 1 && 9050 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
9015 curlen > 8) 9051 curlen > 8)
9016 curlen -= 4; 9052 curlen -= 4;
9017 9053
9018 wm_set_dma_addr( 9054 wm_set_dma_addr(
9019 &txq->txq_descs[nexttx].wtx_addr, curaddr); 9055 &txq->txq_descs[nexttx].wtx_addr, curaddr);
9020 txq->txq_descs[nexttx].wtx_cmdlen 9056 txq->txq_descs[nexttx].wtx_cmdlen
9021 = htole32(cksumcmd | curlen); 9057 = htole32(cksumcmd | curlen);
9022 txq->txq_descs[nexttx].wtx_fields.wtxu_status 9058 txq->txq_descs[nexttx].wtx_fields.wtxu_status
9023 = 0; 9059 = 0;
9024 txq->txq_descs[nexttx].wtx_fields.wtxu_options 9060 txq->txq_descs[nexttx].wtx_fields.wtxu_options
9025 = cksumfields; 9061 = cksumfields;
9026 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; 9062 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9027 lasttx = nexttx; 9063 lasttx = nexttx;
9028 9064
9029 DPRINTF(sc, WM_DEBUG_TX, 9065 DPRINTF(sc, WM_DEBUG_TX,
9030 ("%s: TX: desc %d: low %#" PRIx64 ", " 9066 ("%s: TX: desc %d: low %#" PRIx64 ", "
9031 "len %#04zx\n", 9067 "len %#04zx\n",
9032 device_xname(sc->sc_dev), nexttx, 9068 device_xname(sc->sc_dev), nexttx,
9033 (uint64_t)curaddr, curlen)); 9069 (uint64_t)curaddr, curlen));
9034 } 9070 }
9035 } 9071 }
9036 9072
9037 KASSERT(lasttx != -1); 9073 KASSERT(lasttx != -1);
9038 9074
9039 /* 9075 /*
9040 * Set up the command byte on the last descriptor of 9076 * Set up the command byte on the last descriptor of
9041 * the packet. If we're in the interrupt delay window, 9077 * the packet. If we're in the interrupt delay window,
9042 * delay the interrupt. 9078 * delay the interrupt.
9043 */ 9079 */
9044 txq->txq_descs[lasttx].wtx_cmdlen |= 9080 txq->txq_descs[lasttx].wtx_cmdlen |=
9045 htole32(WTX_CMD_EOP | WTX_CMD_RS); 9081 htole32(WTX_CMD_EOP | WTX_CMD_RS);
9046 9082
9047 /* 9083 /*
9048 * If VLANs are enabled and the packet has a VLAN tag, set 9084 * If VLANs are enabled and the packet has a VLAN tag, set
9049 * up the descriptor to encapsulate the packet for us. 9085 * up the descriptor to encapsulate the packet for us.
9050 * 9086 *
9051 * This is only valid on the last descriptor of the packet. 9087 * This is only valid on the last descriptor of the packet.
9052 */ 9088 */
9053 if (vlan_has_tag(m0)) { 9089 if (vlan_has_tag(m0)) {
9054 txq->txq_descs[lasttx].wtx_cmdlen |= 9090 txq->txq_descs[lasttx].wtx_cmdlen |=
9055 htole32(WTX_CMD_VLE); 9091 htole32(WTX_CMD_VLE);
9056 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan 9092 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
9057 = htole16(vlan_get_tag(m0)); 9093 = htole16(vlan_get_tag(m0));
9058 } 9094 }
9059 9095
9060 txs->txs_lastdesc = lasttx; 9096 txs->txs_lastdesc = lasttx;
9061 9097
9062 DPRINTF(sc, WM_DEBUG_TX, 9098 DPRINTF(sc, WM_DEBUG_TX,
9063 ("%s: TX: desc %d: cmdlen 0x%08x\n", 9099 ("%s: TX: desc %d: cmdlen 0x%08x\n",
9064 device_xname(sc->sc_dev), 9100 device_xname(sc->sc_dev),
9065 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); 9101 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9066 9102
9067 /* Sync the descriptors we're using. */ 9103 /* Sync the descriptors we're using. */
9068 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, 9104 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9069 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 9105 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9070 9106
9071 /* Give the packet to the chip. */ 9107 /* Give the packet to the chip. */
9072 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); 9108 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9073 9109
9074 DPRINTF(sc, WM_DEBUG_TX, 9110 DPRINTF(sc, WM_DEBUG_TX,
9075 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 9111 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9076 9112
9077 DPRINTF(sc, WM_DEBUG_TX, 9113 DPRINTF(sc, WM_DEBUG_TX,
9078 ("%s: TX: finished transmitting packet, job %d\n", 9114 ("%s: TX: finished transmitting packet, job %d\n",
9079 device_xname(sc->sc_dev), txq->txq_snext)); 9115 device_xname(sc->sc_dev), txq->txq_snext));
9080 9116
9081 /* Advance the tx pointer. */ 9117 /* Advance the tx pointer. */
9082 txq->txq_free -= txs->txs_ndesc; 9118 txq->txq_free -= txs->txs_ndesc;
9083 txq->txq_next = nexttx; 9119 txq->txq_next = nexttx;
9084 9120
9085 txq->txq_sfree--; 9121 txq->txq_sfree--;
9086 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); 9122 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9087 9123
9088 /* Pass the packet to any BPF listeners. */ 9124 /* Pass the packet to any BPF listeners. */
9089 bpf_mtap(ifp, m0); 9125 bpf_mtap(ifp, m0);
9090 } 9126 }
9091 9127
9092 if (m0 != NULL) { 9128 if (m0 != NULL) {
9093 if (!is_transmit) 9129 if (!is_transmit)
9094 ifp->if_flags |= IFF_OACTIVE; 9130 ifp->if_flags |= IFF_OACTIVE;
9095 txq->txq_flags |= WM_TXQ_NO_SPACE; 9131 txq->txq_flags |= WM_TXQ_NO_SPACE;
9096 WM_Q_EVCNT_INCR(txq, descdrop); 9132 WM_Q_EVCNT_INCR(txq, descdrop);
9097 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", 9133 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9098 __func__)); 9134 __func__));
9099 m_freem(m0); 9135 m_freem(m0);
9100 } 9136 }
9101 9137
9102 if (txq->txq_sfree == 0 || txq->txq_free <= 2) { 9138 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9103 /* No more slots; notify upper layer. */ 9139 /* No more slots; notify upper layer. */
9104 if (!is_transmit) 9140 if (!is_transmit)
9105 ifp->if_flags |= IFF_OACTIVE; 9141 ifp->if_flags |= IFF_OACTIVE;
9106 txq->txq_flags |= WM_TXQ_NO_SPACE; 9142 txq->txq_flags |= WM_TXQ_NO_SPACE;
9107 } 9143 }
9108 9144
9109 if (txq->txq_free != ofree) { 9145 if (txq->txq_free != ofree) {
9110 /* Set a watchdog timer in case the chip flakes out. */ 9146 /* Set a watchdog timer in case the chip flakes out. */
9111 txq->txq_lastsent = time_uptime; 9147 txq->txq_lastsent = time_uptime;
9112 txq->txq_sending = true; 9148 txq->txq_sending = true;
9113 } 9149 }
9114} 9150}
9115 9151
9116/* 9152/*
9117 * wm_nq_tx_offload: 9153 * wm_nq_tx_offload:
9118 * 9154 *
9119 * Set up TCP/IP checksumming parameters for the 9155 * Set up TCP/IP checksumming parameters for the
9120 * specified packet, for NEWQUEUE devices 9156 * specified packet, for NEWQUEUE devices
9121 */ 9157 */
9122static void 9158static void
9123wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq, 9159wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
9124 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) 9160 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
9125{ 9161{
9126 struct mbuf *m0 = txs->txs_mbuf; 9162 struct mbuf *m0 = txs->txs_mbuf;
9127 uint32_t vl_len, mssidx, cmdc; 9163 uint32_t vl_len, mssidx, cmdc;
9128 struct ether_header *eh; 9164 struct ether_header *eh;
9129 int offset, iphl; 9165 int offset, iphl;
9130 9166
9131 /* 9167 /*
9132 * XXX It would be nice if the mbuf pkthdr had offset 9168 * XXX It would be nice if the mbuf pkthdr had offset
9133 * fields for the protocol headers. 9169 * fields for the protocol headers.
9134 */ 9170 */
9135 *cmdlenp = 0; 9171 *cmdlenp = 0;
9136 *fieldsp = 0; 9172 *fieldsp = 0;
9137 9173
9138 eh = mtod(m0, struct ether_header *); 9174 eh = mtod(m0, struct ether_header *);
9139 switch (htons(eh->ether_type)) { 9175 switch (htons(eh->ether_type)) {
9140 case ETHERTYPE_IP: 9176 case ETHERTYPE_IP:
9141 case ETHERTYPE_IPV6: 9177 case ETHERTYPE_IPV6:
9142 offset = ETHER_HDR_LEN; 9178 offset = ETHER_HDR_LEN;
9143 break; 9179 break;
9144 9180
9145 case ETHERTYPE_VLAN: 9181 case ETHERTYPE_VLAN:
9146 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 9182 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
9147 break; 9183 break;
9148 9184
9149 default: 9185 default:
9150 /* Don't support this protocol or encapsulation. */ 9186 /* Don't support this protocol or encapsulation. */
9151 *do_csum = false; 9187 *do_csum = false;
9152 return; 9188 return;
9153 } 9189 }
9154 *do_csum = true; 9190 *do_csum = true;
9155 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS; 9191 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
9156 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT; 9192 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
9157 9193
9158 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT); 9194 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
9159 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0); 9195 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
9160 9196
9161 if ((m0->m_pkthdr.csum_flags & 9197 if ((m0->m_pkthdr.csum_flags &
9162 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { 9198 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
9163 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 9199 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
9164 } else { 9200 } else {
9165 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 9201 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
9166 } 9202 }
9167 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); 9203 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
9168 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); 9204 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
9169 9205
9170 if (vlan_has_tag(m0)) { 9206 if (vlan_has_tag(m0)) {
9171 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK) 9207 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
9172 << NQTXC_VLLEN_VLAN_SHIFT); 9208 << NQTXC_VLLEN_VLAN_SHIFT);
9173 *cmdlenp |= NQTX_CMD_VLE; 9209 *cmdlenp |= NQTX_CMD_VLE;
9174 } 9210 }
9175 9211
9176 mssidx = 0; 9212 mssidx = 0;
9177 9213
9178 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 9214 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
9179 int hlen = offset + iphl; 9215 int hlen = offset + iphl;
9180 int tcp_hlen; 9216 int tcp_hlen;
9181 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 9217 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
9182 9218
9183 if (__predict_false(m0->m_len < 9219 if (__predict_false(m0->m_len <
9184 (hlen + sizeof(struct tcphdr)))) { 9220 (hlen + sizeof(struct tcphdr)))) {
9185 /* 9221 /*
9186 * TCP/IP headers are not in the first mbuf; we need 9222 * TCP/IP headers are not in the first mbuf; we need
9187 * to do this the slow and painful way. Let's just 9223 * to do this the slow and painful way. Let's just
9188 * hope this doesn't happen very often. 9224 * hope this doesn't happen very often.
9189 */ 9225 */
9190 struct tcphdr th; 9226 struct tcphdr th;
9191 9227
9192 WM_Q_EVCNT_INCR(txq, tsopain); 9228 WM_Q_EVCNT_INCR(txq, tsopain);
9193 9229
9194 m_copydata(m0, hlen, sizeof(th), &th); 9230 m_copydata(m0, hlen, sizeof(th), &th);
9195 if (v4) { 9231 if (v4) {
9196 struct ip ip; 9232 struct ip ip;
9197 9233
9198 m_copydata(m0, offset, sizeof(ip), &ip); 9234 m_copydata(m0, offset, sizeof(ip), &ip);
9199 ip.ip_len = 0; 9235 ip.ip_len = 0;
9200 m_copyback(m0, 9236 m_copyback(m0,
9201 offset + offsetof(struct ip, ip_len), 9237 offset + offsetof(struct ip, ip_len),
9202 sizeof(ip.ip_len), &ip.ip_len); 9238 sizeof(ip.ip_len), &ip.ip_len);
9203 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 9239 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
9204 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 9240 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
9205 } else { 9241 } else {
9206 struct ip6_hdr ip6; 9242 struct ip6_hdr ip6;
9207 9243
9208 m_copydata(m0, offset, sizeof(ip6), &ip6); 9244 m_copydata(m0, offset, sizeof(ip6), &ip6);
9209 ip6.ip6_plen = 0; 9245 ip6.ip6_plen = 0;
9210 m_copyback(m0, 9246 m_copyback(m0,
9211 offset + offsetof(struct ip6_hdr, ip6_plen), 9247 offset + offsetof(struct ip6_hdr, ip6_plen),
9212 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 9248 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
9213 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 9249 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
9214 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 9250 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
9215 } 9251 }
9216 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 9252 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
9217 sizeof(th.th_sum), &th.th_sum); 9253 sizeof(th.th_sum), &th.th_sum);
9218 9254
9219 tcp_hlen = th.th_off << 2; 9255 tcp_hlen = th.th_off << 2;
9220 } else { 9256 } else {
9221 /* 9257 /*
9222 * TCP/IP headers are in the first mbuf; we can do 9258 * TCP/IP headers are in the first mbuf; we can do
9223 * this the easy way. 9259 * this the easy way.
9224 */ 9260 */
9225 struct tcphdr *th; 9261 struct tcphdr *th;
9226 9262
9227 if (v4) { 9263 if (v4) {
9228 struct ip *ip = 9264 struct ip *ip =
9229 (void *)(mtod(m0, char *) + offset); 9265 (void *)(mtod(m0, char *) + offset);
9230 th = (void *)(mtod(m0, char *) + hlen); 9266 th = (void *)(mtod(m0, char *) + hlen);
9231 9267
9232 ip->ip_len = 0; 9268 ip->ip_len = 0;
9233 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 9269 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
9234 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 9270 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
9235 } else { 9271 } else {
9236 struct ip6_hdr *ip6 = 9272 struct ip6_hdr *ip6 =
9237 (void *)(mtod(m0, char *) + offset); 9273 (void *)(mtod(m0, char *) + offset);
9238 th = (void *)(mtod(m0, char *) + hlen); 9274 th = (void *)(mtod(m0, char *) + hlen);
9239 9275
9240 ip6->ip6_plen = 0; 9276 ip6->ip6_plen = 0;
9241 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 9277 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
9242 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 9278 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
9243 } 9279 }
9244 tcp_hlen = th->th_off << 2; 9280 tcp_hlen = th->th_off << 2;
9245 } 9281 }
9246 hlen += tcp_hlen; 9282 hlen += tcp_hlen;
9247 *cmdlenp |= NQTX_CMD_TSE; 9283 *cmdlenp |= NQTX_CMD_TSE;
9248 9284
9249 if (v4) { 9285 if (v4) {
9250 WM_Q_EVCNT_INCR(txq, tso); 9286 WM_Q_EVCNT_INCR(txq, tso);
9251 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; 9287 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
9252 } else { 9288 } else {
9253 WM_Q_EVCNT_INCR(txq, tso6); 9289 WM_Q_EVCNT_INCR(txq, tso6);
9254 *fieldsp |= NQTXD_FIELDS_TUXSM; 9290 *fieldsp |= NQTXD_FIELDS_TUXSM;
9255 } 9291 }
9256 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); 9292 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
9257 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 9293 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9258 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT); 9294 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
9259 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0); 9295 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
9260 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT); 9296 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
9261 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0); 9297 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
9262 } else { 9298 } else {
9263 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT); 9299 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
9264 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 9300 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9265 } 9301 }
9266 9302
9267 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 9303 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
9268 *fieldsp |= NQTXD_FIELDS_IXSM; 9304 *fieldsp |= NQTXD_FIELDS_IXSM;
9269 cmdc |= NQTXC_CMD_IP4; 9305 cmdc |= NQTXC_CMD_IP4;
9270 } 9306 }
9271 9307
9272 if (m0->m_pkthdr.csum_flags & 9308 if (m0->m_pkthdr.csum_flags &
9273 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { 9309 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
9274 WM_Q_EVCNT_INCR(txq, tusum); 9310 WM_Q_EVCNT_INCR(txq, tusum);
9275 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) 9311 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
9276 cmdc |= NQTXC_CMD_TCP; 9312 cmdc |= NQTXC_CMD_TCP;
9277 else 9313 else
9278 cmdc |= NQTXC_CMD_UDP; 9314 cmdc |= NQTXC_CMD_UDP;
9279 9315
9280 cmdc |= NQTXC_CMD_IP4; 9316 cmdc |= NQTXC_CMD_IP4;
9281 *fieldsp |= NQTXD_FIELDS_TUXSM; 9317 *fieldsp |= NQTXD_FIELDS_TUXSM;
9282 } 9318 }
9283 if (m0->m_pkthdr.csum_flags & 9319 if (m0->m_pkthdr.csum_flags &
9284 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { 9320 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
9285 WM_Q_EVCNT_INCR(txq, tusum6); 9321 WM_Q_EVCNT_INCR(txq, tusum6);
9286 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) 9322 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
9287 cmdc |= NQTXC_CMD_TCP; 9323 cmdc |= NQTXC_CMD_TCP;
9288 else 9324 else
9289 cmdc |= NQTXC_CMD_UDP; 9325 cmdc |= NQTXC_CMD_UDP;
9290 9326
9291 cmdc |= NQTXC_CMD_IP6; 9327 cmdc |= NQTXC_CMD_IP6;
9292 *fieldsp |= NQTXD_FIELDS_TUXSM; 9328 *fieldsp |= NQTXD_FIELDS_TUXSM;
9293 } 9329 }
9294 9330
9295 /* 9331 /*
9296 * We don't have to write context descriptor for every packet to 9332 * We don't have to write context descriptor for every packet to
9297 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354, 9333 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
9298 * I210 and I211. It is enough to write once per a Tx queue for these 9334 * I210 and I211. It is enough to write once per a Tx queue for these
9299 * controllers. 9335 * controllers.
9300 * It would be overhead to write context descriptor for every packet, 9336 * It would be overhead to write context descriptor for every packet,
9301 * however it does not cause problems. 9337 * however it does not cause problems.
9302 */ 9338 */
9303 /* Fill in the context descriptor. */ 9339 /* Fill in the context descriptor. */
9304 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len = 9340 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
9305 htole32(vl_len); 9341 htole32(vl_len);
9306 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0; 9342 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
9307 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd = 9343 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
9308 htole32(cmdc); 9344 htole32(cmdc);
9309 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx = 9345 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
9310 htole32(mssidx); 9346 htole32(mssidx);
9311 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 9347 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
9312 DPRINTF(sc, WM_DEBUG_TX, 9348 DPRINTF(sc, WM_DEBUG_TX,
9313 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), 9349 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
9314 txq->txq_next, 0, vl_len)); 9350 txq->txq_next, 0, vl_len));
9315 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); 9351 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
9316 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 9352 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
9317 txs->txs_ndesc++; 9353 txs->txs_ndesc++;
9318} 9354}
@@ -9947,2001 +9983,2001 @@ wm_rxdesc_is_set_status(struct wm_softc  @@ -9947,2001 +9983,2001 @@ wm_rxdesc_is_set_status(struct wm_softc
9947{ 9983{
9948 9984
9949 if (sc->sc_type == WM_T_82574) 9985 if (sc->sc_type == WM_T_82574)
9950 return (status & ext_bit) != 0; 9986 return (status & ext_bit) != 0;
9951 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 9987 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9952 return (status & nq_bit) != 0; 9988 return (status & nq_bit) != 0;
9953 else 9989 else
9954 return (status & legacy_bit) != 0; 9990 return (status & legacy_bit) != 0;
9955} 9991}
9956 9992
9957static inline bool 9993static inline bool
9958wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error, 9994wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9959 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit) 9995 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9960{ 9996{
9961 9997
9962 if (sc->sc_type == WM_T_82574) 9998 if (sc->sc_type == WM_T_82574)
9963 return (error & ext_bit) != 0; 9999 return (error & ext_bit) != 0;
9964 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 10000 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9965 return (error & nq_bit) != 0; 10001 return (error & nq_bit) != 0;
9966 else 10002 else
9967 return (error & legacy_bit) != 0; 10003 return (error & legacy_bit) != 0;
9968} 10004}
9969 10005
9970static inline bool 10006static inline bool
9971wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status) 10007wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9972{ 10008{
9973 10009
9974 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status, 10010 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9975 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP)) 10011 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9976 return true; 10012 return true;
9977 else 10013 else
9978 return false; 10014 return false;
9979} 10015}
9980 10016
9981static inline bool 10017static inline bool
9982wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors) 10018wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9983{ 10019{
9984 struct wm_softc *sc = rxq->rxq_sc; 10020 struct wm_softc *sc = rxq->rxq_sc;
9985 10021
9986 /* XXX missing error bit for newqueue? */ 10022 /* XXX missing error bit for newqueue? */
9987 if (wm_rxdesc_is_set_error(sc, errors, 10023 if (wm_rxdesc_is_set_error(sc, errors,
9988 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE, 10024 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9989 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ 10025 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9990 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE, 10026 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9991 NQRXC_ERROR_RXE)) { 10027 NQRXC_ERROR_RXE)) {
9992 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, 10028 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9993 EXTRXC_ERROR_SE, 0)) 10029 EXTRXC_ERROR_SE, 0))
9994 log(LOG_WARNING, "%s: symbol error\n", 10030 log(LOG_WARNING, "%s: symbol error\n",
9995 device_xname(sc->sc_dev)); 10031 device_xname(sc->sc_dev));
9996 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, 10032 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9997 EXTRXC_ERROR_SEQ, 0)) 10033 EXTRXC_ERROR_SEQ, 0))
9998 log(LOG_WARNING, "%s: receive sequence error\n", 10034 log(LOG_WARNING, "%s: receive sequence error\n",
9999 device_xname(sc->sc_dev)); 10035 device_xname(sc->sc_dev));
10000 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, 10036 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
10001 EXTRXC_ERROR_CE, 0)) 10037 EXTRXC_ERROR_CE, 0))
10002 log(LOG_WARNING, "%s: CRC error\n", 10038 log(LOG_WARNING, "%s: CRC error\n",
10003 device_xname(sc->sc_dev)); 10039 device_xname(sc->sc_dev));
10004 return true; 10040 return true;
10005 } 10041 }
10006 10042
10007 return false; 10043 return false;
10008} 10044}
10009 10045
10010static inline bool 10046static inline bool
10011wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status) 10047wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
10012{ 10048{
10013 struct wm_softc *sc = rxq->rxq_sc; 10049 struct wm_softc *sc = rxq->rxq_sc;
10014 10050
10015 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD, 10051 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
10016 NQRXC_STATUS_DD)) { 10052 NQRXC_STATUS_DD)) {
10017 /* We have processed all of the receive descriptors. */ 10053 /* We have processed all of the receive descriptors. */
10018 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD); 10054 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
10019 return false; 10055 return false;
10020 } 10056 }
10021 10057
10022 return true; 10058 return true;
10023} 10059}
10024 10060
10025static inline bool 10061static inline bool
10026wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, 10062wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
10027 uint16_t vlantag, struct mbuf *m) 10063 uint16_t vlantag, struct mbuf *m)
10028{ 10064{
10029 10065
10030 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status, 10066 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10031 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) { 10067 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
10032 vlan_set_tag(m, le16toh(vlantag)); 10068 vlan_set_tag(m, le16toh(vlantag));
10033 } 10069 }
10034 10070
10035 return true; 10071 return true;
10036} 10072}
10037 10073
10038static inline void 10074static inline void
10039wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status, 10075wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
10040 uint32_t errors, struct mbuf *m) 10076 uint32_t errors, struct mbuf *m)
10041{ 10077{
10042 struct wm_softc *sc = rxq->rxq_sc; 10078 struct wm_softc *sc = rxq->rxq_sc;
10043 10079
10044 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) { 10080 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
10045 if (wm_rxdesc_is_set_status(sc, status, 10081 if (wm_rxdesc_is_set_status(sc, status,
10046 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) { 10082 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
10047 WM_Q_EVCNT_INCR(rxq, ipsum); 10083 WM_Q_EVCNT_INCR(rxq, ipsum);
10048 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 10084 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
10049 if (wm_rxdesc_is_set_error(sc, errors, 10085 if (wm_rxdesc_is_set_error(sc, errors,
10050 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE)) 10086 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
10051 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 10087 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
10052 } 10088 }
10053 if (wm_rxdesc_is_set_status(sc, status, 10089 if (wm_rxdesc_is_set_status(sc, status,
10054 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) { 10090 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
10055 /* 10091 /*
10056 * Note: we don't know if this was TCP or UDP, 10092 * Note: we don't know if this was TCP or UDP,
10057 * so we just set both bits, and expect the 10093 * so we just set both bits, and expect the
10058 * upper layers to deal. 10094 * upper layers to deal.
10059 */ 10095 */
10060 WM_Q_EVCNT_INCR(rxq, tusum); 10096 WM_Q_EVCNT_INCR(rxq, tusum);
10061 m->m_pkthdr.csum_flags |= 10097 m->m_pkthdr.csum_flags |=
10062 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 10098 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
10063 M_CSUM_TCPv6 | M_CSUM_UDPv6; 10099 M_CSUM_TCPv6 | M_CSUM_UDPv6;
10064 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE, 10100 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
10065 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E)) 10101 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
10066 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 10102 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
10067 } 10103 }
10068 } 10104 }
10069} 10105}
10070 10106
10071/* 10107/*
10072 * wm_rxeof: 10108 * wm_rxeof:
10073 * 10109 *
10074 * Helper; handle receive interrupts. 10110 * Helper; handle receive interrupts.
10075 */ 10111 */
10076static bool 10112static bool
10077wm_rxeof(struct wm_rxqueue *rxq, u_int limit) 10113wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
10078{ 10114{
10079 struct wm_softc *sc = rxq->rxq_sc; 10115 struct wm_softc *sc = rxq->rxq_sc;
10080 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 10116 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10081 struct wm_rxsoft *rxs; 10117 struct wm_rxsoft *rxs;
10082 struct mbuf *m; 10118 struct mbuf *m;
10083 int i, len; 10119 int i, len;
10084 int count = 0; 10120 int count = 0;
10085 uint32_t status, errors; 10121 uint32_t status, errors;
10086 uint16_t vlantag; 10122 uint16_t vlantag;
10087 bool more = false; 10123 bool more = false;
10088 10124
10089 KASSERT(mutex_owned(rxq->rxq_lock)); 10125 KASSERT(mutex_owned(rxq->rxq_lock));
10090 10126
10091 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) { 10127 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
10092 rxs = &rxq->rxq_soft[i]; 10128 rxs = &rxq->rxq_soft[i];
10093 10129
10094 DPRINTF(sc, WM_DEBUG_RX, 10130 DPRINTF(sc, WM_DEBUG_RX,
10095 ("%s: RX: checking descriptor %d\n", 10131 ("%s: RX: checking descriptor %d\n",
10096 device_xname(sc->sc_dev), i)); 10132 device_xname(sc->sc_dev), i));
10097 wm_cdrxsync(rxq, i, 10133 wm_cdrxsync(rxq, i,
10098 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 10134 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
10099 10135
10100 status = wm_rxdesc_get_status(rxq, i); 10136 status = wm_rxdesc_get_status(rxq, i);
10101 errors = wm_rxdesc_get_errors(rxq, i); 10137 errors = wm_rxdesc_get_errors(rxq, i);
10102 len = le16toh(wm_rxdesc_get_pktlen(rxq, i)); 10138 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
10103 vlantag = wm_rxdesc_get_vlantag(rxq, i); 10139 vlantag = wm_rxdesc_get_vlantag(rxq, i);
10104#ifdef WM_DEBUG 10140#ifdef WM_DEBUG
10105 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i)); 10141 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
10106 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i); 10142 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
10107#endif 10143#endif
10108 10144
10109 if (!wm_rxdesc_dd(rxq, i, status)) 10145 if (!wm_rxdesc_dd(rxq, i, status))
10110 break; 10146 break;
10111 10147
10112 if (limit-- == 0) { 10148 if (limit-- == 0) {
10113 more = true; 10149 more = true;
10114 DPRINTF(sc, WM_DEBUG_RX, 10150 DPRINTF(sc, WM_DEBUG_RX,
10115 ("%s: RX: loop limited, descriptor %d is not processed\n", 10151 ("%s: RX: loop limited, descriptor %d is not processed\n",
10116 device_xname(sc->sc_dev), i)); 10152 device_xname(sc->sc_dev), i));
10117 break; 10153 break;
10118 } 10154 }
10119 10155
10120 count++; 10156 count++;
10121 if (__predict_false(rxq->rxq_discard)) { 10157 if (__predict_false(rxq->rxq_discard)) {
10122 DPRINTF(sc, WM_DEBUG_RX, 10158 DPRINTF(sc, WM_DEBUG_RX,
10123 ("%s: RX: discarding contents of descriptor %d\n", 10159 ("%s: RX: discarding contents of descriptor %d\n",
10124 device_xname(sc->sc_dev), i)); 10160 device_xname(sc->sc_dev), i));
10125 wm_init_rxdesc(rxq, i); 10161 wm_init_rxdesc(rxq, i);
10126 if (wm_rxdesc_is_eop(rxq, status)) { 10162 if (wm_rxdesc_is_eop(rxq, status)) {
10127 /* Reset our state. */ 10163 /* Reset our state. */
10128 DPRINTF(sc, WM_DEBUG_RX, 10164 DPRINTF(sc, WM_DEBUG_RX,
10129 ("%s: RX: resetting rxdiscard -> 0\n", 10165 ("%s: RX: resetting rxdiscard -> 0\n",
10130 device_xname(sc->sc_dev))); 10166 device_xname(sc->sc_dev)));
10131 rxq->rxq_discard = 0; 10167 rxq->rxq_discard = 0;
10132 } 10168 }
10133 continue; 10169 continue;
10134 } 10170 }
10135 10171
10136 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 10172 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10137 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 10173 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
10138 10174
10139 m = rxs->rxs_mbuf; 10175 m = rxs->rxs_mbuf;
10140 10176
10141 /* 10177 /*
10142 * Add a new receive buffer to the ring, unless of 10178 * Add a new receive buffer to the ring, unless of
10143 * course the length is zero. Treat the latter as a 10179 * course the length is zero. Treat the latter as a
10144 * failed mapping. 10180 * failed mapping.
10145 */ 10181 */
10146 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) { 10182 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
10147 /* 10183 /*
10148 * Failed, throw away what we've done so 10184 * Failed, throw away what we've done so
10149 * far, and discard the rest of the packet. 10185 * far, and discard the rest of the packet.
10150 */ 10186 */
10151 ifp->if_ierrors++; 10187 ifp->if_ierrors++;
10152 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 10188 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10153 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 10189 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
10154 wm_init_rxdesc(rxq, i); 10190 wm_init_rxdesc(rxq, i);
10155 if (!wm_rxdesc_is_eop(rxq, status)) 10191 if (!wm_rxdesc_is_eop(rxq, status))
10156 rxq->rxq_discard = 1; 10192 rxq->rxq_discard = 1;
10157 if (rxq->rxq_head != NULL) 10193 if (rxq->rxq_head != NULL)
10158 m_freem(rxq->rxq_head); 10194 m_freem(rxq->rxq_head);
10159 WM_RXCHAIN_RESET(rxq); 10195 WM_RXCHAIN_RESET(rxq);
10160 DPRINTF(sc, WM_DEBUG_RX, 10196 DPRINTF(sc, WM_DEBUG_RX,
10161 ("%s: RX: Rx buffer allocation failed, " 10197 ("%s: RX: Rx buffer allocation failed, "
10162 "dropping packet%s\n", device_xname(sc->sc_dev), 10198 "dropping packet%s\n", device_xname(sc->sc_dev),
10163 rxq->rxq_discard ? " (discard)" : "")); 10199 rxq->rxq_discard ? " (discard)" : ""));
10164 continue; 10200 continue;
10165 } 10201 }
10166 10202
10167 m->m_len = len; 10203 m->m_len = len;
10168 rxq->rxq_len += len; 10204 rxq->rxq_len += len;
10169 DPRINTF(sc, WM_DEBUG_RX, 10205 DPRINTF(sc, WM_DEBUG_RX,
10170 ("%s: RX: buffer at %p len %d\n", 10206 ("%s: RX: buffer at %p len %d\n",
10171 device_xname(sc->sc_dev), m->m_data, len)); 10207 device_xname(sc->sc_dev), m->m_data, len));
10172 10208
10173 /* If this is not the end of the packet, keep looking. */ 10209 /* If this is not the end of the packet, keep looking. */
10174 if (!wm_rxdesc_is_eop(rxq, status)) { 10210 if (!wm_rxdesc_is_eop(rxq, status)) {
10175 WM_RXCHAIN_LINK(rxq, m); 10211 WM_RXCHAIN_LINK(rxq, m);
10176 DPRINTF(sc, WM_DEBUG_RX, 10212 DPRINTF(sc, WM_DEBUG_RX,
10177 ("%s: RX: not yet EOP, rxlen -> %d\n", 10213 ("%s: RX: not yet EOP, rxlen -> %d\n",
10178 device_xname(sc->sc_dev), rxq->rxq_len)); 10214 device_xname(sc->sc_dev), rxq->rxq_len));
10179 continue; 10215 continue;
10180 } 10216 }
10181 10217
10182 /* 10218 /*
10183 * Okay, we have the entire packet now. The chip is 10219 * Okay, we have the entire packet now. The chip is
10184 * configured to include the FCS except I35[04], I21[01]. 10220 * configured to include the FCS except I35[04], I21[01].
10185 * (not all chips can be configured to strip it), so we need 10221 * (not all chips can be configured to strip it), so we need
10186 * to trim it. Those chips have an eratta, the RCTL_SECRC bit 10222 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
10187 * in RCTL register is always set, so we don't trim it. 10223 * in RCTL register is always set, so we don't trim it.
10188 * PCH2 and newer chip also not include FCS when jumbo 10224 * PCH2 and newer chip also not include FCS when jumbo
10189 * frame is used to do workaround an errata. 10225 * frame is used to do workaround an errata.
10190 * May need to adjust length of previous mbuf in the 10226 * May need to adjust length of previous mbuf in the
10191 * chain if the current mbuf is too short. 10227 * chain if the current mbuf is too short.
10192 */ 10228 */
10193 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) { 10229 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
10194 if (m->m_len < ETHER_CRC_LEN) { 10230 if (m->m_len < ETHER_CRC_LEN) {
10195 rxq->rxq_tail->m_len 10231 rxq->rxq_tail->m_len
10196 -= (ETHER_CRC_LEN - m->m_len); 10232 -= (ETHER_CRC_LEN - m->m_len);
10197 m->m_len = 0; 10233 m->m_len = 0;
10198 } else 10234 } else
10199 m->m_len -= ETHER_CRC_LEN; 10235 m->m_len -= ETHER_CRC_LEN;
10200 len = rxq->rxq_len - ETHER_CRC_LEN; 10236 len = rxq->rxq_len - ETHER_CRC_LEN;
10201 } else 10237 } else
10202 len = rxq->rxq_len; 10238 len = rxq->rxq_len;
10203 10239
10204 WM_RXCHAIN_LINK(rxq, m); 10240 WM_RXCHAIN_LINK(rxq, m);
10205 10241
10206 *rxq->rxq_tailp = NULL; 10242 *rxq->rxq_tailp = NULL;
10207 m = rxq->rxq_head; 10243 m = rxq->rxq_head;
10208 10244
10209 WM_RXCHAIN_RESET(rxq); 10245 WM_RXCHAIN_RESET(rxq);
10210 10246
10211 DPRINTF(sc, WM_DEBUG_RX, 10247 DPRINTF(sc, WM_DEBUG_RX,
10212 ("%s: RX: have entire packet, len -> %d\n", 10248 ("%s: RX: have entire packet, len -> %d\n",
10213 device_xname(sc->sc_dev), len)); 10249 device_xname(sc->sc_dev), len));
10214 10250
10215 /* If an error occurred, update stats and drop the packet. */ 10251 /* If an error occurred, update stats and drop the packet. */
10216 if (wm_rxdesc_has_errors(rxq, errors)) { 10252 if (wm_rxdesc_has_errors(rxq, errors)) {
10217 m_freem(m); 10253 m_freem(m);
10218 continue; 10254 continue;
10219 } 10255 }
10220 10256
10221 /* No errors. Receive the packet. */ 10257 /* No errors. Receive the packet. */
10222 m_set_rcvif(m, ifp); 10258 m_set_rcvif(m, ifp);
10223 m->m_pkthdr.len = len; 10259 m->m_pkthdr.len = len;
10224 /* 10260 /*
10225 * TODO 10261 * TODO
10226 * should be save rsshash and rsstype to this mbuf. 10262 * should be save rsshash and rsstype to this mbuf.
10227 */ 10263 */
10228 DPRINTF(sc, WM_DEBUG_RX, 10264 DPRINTF(sc, WM_DEBUG_RX,
10229 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n", 10265 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
10230 device_xname(sc->sc_dev), rsstype, rsshash)); 10266 device_xname(sc->sc_dev), rsstype, rsshash));
10231 10267
10232 /* 10268 /*
10233 * If VLANs are enabled, VLAN packets have been unwrapped 10269 * If VLANs are enabled, VLAN packets have been unwrapped
10234 * for us. Associate the tag with the packet. 10270 * for us. Associate the tag with the packet.
10235 */ 10271 */
10236 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m)) 10272 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
10237 continue; 10273 continue;
10238 10274
10239 /* Set up checksum info for this packet. */ 10275 /* Set up checksum info for this packet. */
10240 wm_rxdesc_ensure_checksum(rxq, status, errors, m); 10276 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
10241 10277
10242 rxq->rxq_packets++; 10278 rxq->rxq_packets++;
10243 rxq->rxq_bytes += len; 10279 rxq->rxq_bytes += len;
10244 /* Pass it on. */ 10280 /* Pass it on. */
10245 if_percpuq_enqueue(sc->sc_ipq, m); 10281 if_percpuq_enqueue(sc->sc_ipq, m);
10246 10282
10247 if (rxq->rxq_stopping) 10283 if (rxq->rxq_stopping)
10248 break; 10284 break;
10249 } 10285 }
10250 rxq->rxq_ptr = i; 10286 rxq->rxq_ptr = i;
10251 10287
10252 DPRINTF(sc, WM_DEBUG_RX, 10288 DPRINTF(sc, WM_DEBUG_RX,
10253 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); 10289 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
10254 10290
10255 return more; 10291 return more;
10256} 10292}
10257 10293
10258/* 10294/*
10259 * wm_linkintr_gmii: 10295 * wm_linkintr_gmii:
10260 * 10296 *
10261 * Helper; handle link interrupts for GMII. 10297 * Helper; handle link interrupts for GMII.
10262 */ 10298 */
10263static void 10299static void
10264wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) 10300wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
10265{ 10301{
10266 uint32_t status, reg; 10302 uint32_t status, reg;
10267 bool link; 10303 bool link;
10268 bool dopoll = true; 10304 bool dopoll = true;
10269 10305
10270 KASSERT(WM_CORE_LOCKED(sc)); 10306 KASSERT(WM_CORE_LOCKED(sc));
10271 10307
10272 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 10308 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10273 __func__)); 10309 __func__));
10274 10310
10275 if ((icr & ICR_LSC) == 0) { 10311 if ((icr & ICR_LSC) == 0) {
10276 if (icr & ICR_RXSEQ) 10312 if (icr & ICR_RXSEQ)
10277 DPRINTF(sc, WM_DEBUG_LINK, 10313 DPRINTF(sc, WM_DEBUG_LINK,
10278 ("%s: LINK Receive sequence error\n", 10314 ("%s: LINK Receive sequence error\n",
10279 device_xname(sc->sc_dev))); 10315 device_xname(sc->sc_dev)));
10280 return; 10316 return;
10281 } 10317 }
10282 10318
10283 /* Link status changed */ 10319 /* Link status changed */
10284 status = CSR_READ(sc, WMREG_STATUS); 10320 status = CSR_READ(sc, WMREG_STATUS);
10285 link = status & STATUS_LU; 10321 link = status & STATUS_LU;
10286 if (link) { 10322 if (link) {
10287 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 10323 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10288 device_xname(sc->sc_dev), 10324 device_xname(sc->sc_dev),
10289 (status & STATUS_FD) ? "FDX" : "HDX")); 10325 (status & STATUS_FD) ? "FDX" : "HDX"));
10290 if (wm_phy_need_linkdown_discard(sc)) { 10326 if (wm_phy_need_linkdown_discard(sc)) {
10291 DPRINTF(sc, WM_DEBUG_LINK, 10327 DPRINTF(sc, WM_DEBUG_LINK,
10292 ("%s: linkintr: Clear linkdown discard flag\n", 10328 ("%s: linkintr: Clear linkdown discard flag\n",
10293 device_xname(sc->sc_dev))); 10329 device_xname(sc->sc_dev)));
10294 wm_clear_linkdown_discard(sc); 10330 wm_clear_linkdown_discard(sc);
10295 } 10331 }
10296 } else { 10332 } else {
10297 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 10333 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10298 device_xname(sc->sc_dev))); 10334 device_xname(sc->sc_dev)));
10299 if (wm_phy_need_linkdown_discard(sc)) { 10335 if (wm_phy_need_linkdown_discard(sc)) {
10300 DPRINTF(sc, WM_DEBUG_LINK, 10336 DPRINTF(sc, WM_DEBUG_LINK,
10301 ("%s: linkintr: Set linkdown discard flag\n", 10337 ("%s: linkintr: Set linkdown discard flag\n",
10302 device_xname(sc->sc_dev))); 10338 device_xname(sc->sc_dev)));
10303 wm_set_linkdown_discard(sc); 10339 wm_set_linkdown_discard(sc);
10304 } 10340 }
10305 } 10341 }
10306 if ((sc->sc_type == WM_T_ICH8) && (link == false)) 10342 if ((sc->sc_type == WM_T_ICH8) && (link == false))
10307 wm_gig_downshift_workaround_ich8lan(sc); 10343 wm_gig_downshift_workaround_ich8lan(sc);
10308 10344
10309 if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3)) 10345 if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
10310 wm_kmrn_lock_loss_workaround_ich8lan(sc); 10346 wm_kmrn_lock_loss_workaround_ich8lan(sc);
10311 10347
10312 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n", 10348 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
10313 device_xname(sc->sc_dev))); 10349 device_xname(sc->sc_dev)));
10314 if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) { 10350 if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
10315 if (link) { 10351 if (link) {
10316 /* Wait 1 second. */ 10352 /* Wait 1 second. */
10317 dopoll = false; 10353 dopoll = false;
10318 getmicrotime(&sc->sc_linkup_delay_time); 10354 getmicrotime(&sc->sc_linkup_delay_time);
10319 sc->sc_linkup_delay_time.tv_sec += 1; 10355 sc->sc_linkup_delay_time.tv_sec += 1;
10320 } else if (sc->sc_linkup_delay_time.tv_sec != 0) { 10356 } else if (sc->sc_linkup_delay_time.tv_sec != 0) {
10321 /* 10357 /*
10322 * Simplify by checking tv_sec only. It's enough. 10358 * Simplify by checking tv_sec only. It's enough.
10323 * 10359 *
10324 * Currently, it's not required to clear the time. 10360 * Currently, it's not required to clear the time.
10325 * It's just to know the timer is stopped 10361 * It's just to know the timer is stopped
10326 * (for debugging). 10362 * (for debugging).
10327 */ 10363 */
10328 10364
10329 sc->sc_linkup_delay_time.tv_sec = 0; 10365 sc->sc_linkup_delay_time.tv_sec = 0;
10330 sc->sc_linkup_delay_time.tv_usec = 0; 10366 sc->sc_linkup_delay_time.tv_usec = 0;
10331 } 10367 }
10332 } 10368 }
10333 10369
10334 /* 10370 /*
10335 * Call mii_pollstat(). 10371 * Call mii_pollstat().
10336 * 10372 *
10337 * Some (not all) systems use I35[04] or I21[01] don't send packet soon 10373 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
10338 * after linkup. The MAC send a packet to the PHY and any error is not 10374 * after linkup. The MAC send a packet to the PHY and any error is not
10339 * observed. This behavior causes a problem that gratuitous ARP and/or 10375 * observed. This behavior causes a problem that gratuitous ARP and/or
10340 * IPv6 DAD packet are silently dropped. To avoid this problem, don't 10376 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
10341 * call mii_pollstat() here which will send LINK_STATE_UP notification 10377 * call mii_pollstat() here which will send LINK_STATE_UP notification
10342 * to the upper layer. Instead, mii_pollstat() will be called in 10378 * to the upper layer. Instead, mii_pollstat() will be called in
10343 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick(). 10379 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
10344 */ 10380 */
10345 if (dopoll) 10381 if (dopoll)
10346 mii_pollstat(&sc->sc_mii); 10382 mii_pollstat(&sc->sc_mii);
10347 10383
10348 /* Do some workarounds soon after link status is changed. */ 10384 /* Do some workarounds soon after link status is changed. */
10349 10385
10350 if (sc->sc_type == WM_T_82543) { 10386 if (sc->sc_type == WM_T_82543) {
10351 int miistatus, active; 10387 int miistatus, active;
10352 10388
10353 /* 10389 /*
10354 * With 82543, we need to force speed and 10390 * With 82543, we need to force speed and
10355 * duplex on the MAC equal to what the PHY 10391 * duplex on the MAC equal to what the PHY
10356 * speed and duplex configuration is. 10392 * speed and duplex configuration is.
10357 */ 10393 */
10358 miistatus = sc->sc_mii.mii_media_status; 10394 miistatus = sc->sc_mii.mii_media_status;
10359 10395
10360 if (miistatus & IFM_ACTIVE) { 10396 if (miistatus & IFM_ACTIVE) {
10361 active = sc->sc_mii.mii_media_active; 10397 active = sc->sc_mii.mii_media_active;
10362 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 10398 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10363 switch (IFM_SUBTYPE(active)) { 10399 switch (IFM_SUBTYPE(active)) {
10364 case IFM_10_T: 10400 case IFM_10_T:
10365 sc->sc_ctrl |= CTRL_SPEED_10; 10401 sc->sc_ctrl |= CTRL_SPEED_10;
10366 break; 10402 break;
10367 case IFM_100_TX: 10403 case IFM_100_TX:
10368 sc->sc_ctrl |= CTRL_SPEED_100; 10404 sc->sc_ctrl |= CTRL_SPEED_100;
10369 break; 10405 break;
10370 case IFM_1000_T: 10406 case IFM_1000_T:
10371 sc->sc_ctrl |= CTRL_SPEED_1000; 10407 sc->sc_ctrl |= CTRL_SPEED_1000;
10372 break; 10408 break;
10373 default: 10409 default:
10374 /* 10410 /*
10375 * Fiber? 10411 * Fiber?
10376 * Shoud not enter here. 10412 * Shoud not enter here.
10377 */ 10413 */
10378 device_printf(sc->sc_dev, 10414 device_printf(sc->sc_dev,
10379 "unknown media (%x)\n", active); 10415 "unknown media (%x)\n", active);
10380 break; 10416 break;
10381 } 10417 }
10382 if (active & IFM_FDX) 10418 if (active & IFM_FDX)
10383 sc->sc_ctrl |= CTRL_FD; 10419 sc->sc_ctrl |= CTRL_FD;
10384 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10420 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10385 } 10421 }
10386 } else if (sc->sc_type == WM_T_PCH) { 10422 } else if (sc->sc_type == WM_T_PCH) {
10387 wm_k1_gig_workaround_hv(sc, 10423 wm_k1_gig_workaround_hv(sc,
10388 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); 10424 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10389 } 10425 }
10390 10426
10391 /* 10427 /*
10392 * I217 Packet Loss issue: 10428 * I217 Packet Loss issue:
10393 * ensure that FEXTNVM4 Beacon Duration is set correctly 10429 * ensure that FEXTNVM4 Beacon Duration is set correctly
10394 * on power up. 10430 * on power up.
10395 * Set the Beacon Duration for I217 to 8 usec 10431 * Set the Beacon Duration for I217 to 8 usec
10396 */ 10432 */
10397 if (sc->sc_type >= WM_T_PCH_LPT) { 10433 if (sc->sc_type >= WM_T_PCH_LPT) {
10398 reg = CSR_READ(sc, WMREG_FEXTNVM4); 10434 reg = CSR_READ(sc, WMREG_FEXTNVM4);
10399 reg &= ~FEXTNVM4_BEACON_DURATION; 10435 reg &= ~FEXTNVM4_BEACON_DURATION;
10400 reg |= FEXTNVM4_BEACON_DURATION_8US; 10436 reg |= FEXTNVM4_BEACON_DURATION_8US;
10401 CSR_WRITE(sc, WMREG_FEXTNVM4, reg); 10437 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10402 } 10438 }
10403 10439
10404 /* Work-around I218 hang issue */ 10440 /* Work-around I218 hang issue */
10405 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) || 10441 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10406 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) || 10442 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10407 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) || 10443 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10408 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3)) 10444 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10409 wm_k1_workaround_lpt_lp(sc, link); 10445 wm_k1_workaround_lpt_lp(sc, link);
10410 10446
10411 if (sc->sc_type >= WM_T_PCH_LPT) { 10447 if (sc->sc_type >= WM_T_PCH_LPT) {
10412 /* 10448 /*
10413 * Set platform power management values for Latency 10449 * Set platform power management values for Latency
10414 * Tolerance Reporting (LTR) 10450 * Tolerance Reporting (LTR)
10415 */ 10451 */
10416 wm_platform_pm_pch_lpt(sc, 10452 wm_platform_pm_pch_lpt(sc,
10417 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); 10453 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10418 } 10454 }
10419 10455
10420 /* FEXTNVM6 K1-off workaround */ 10456 /* FEXTNVM6 K1-off workaround */
10421 if (sc->sc_type == WM_T_PCH_SPT) { 10457 if (sc->sc_type == WM_T_PCH_SPT) {
10422 reg = CSR_READ(sc, WMREG_FEXTNVM6); 10458 reg = CSR_READ(sc, WMREG_FEXTNVM6);
10423 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE) 10459 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10424 reg |= FEXTNVM6_K1_OFF_ENABLE; 10460 reg |= FEXTNVM6_K1_OFF_ENABLE;
10425 else 10461 else
10426 reg &= ~FEXTNVM6_K1_OFF_ENABLE; 10462 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10427 CSR_WRITE(sc, WMREG_FEXTNVM6, reg); 10463 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10428 } 10464 }
10429 10465
10430 if (!link) 10466 if (!link)
10431 return; 10467 return;
10432 10468
10433 switch (sc->sc_type) { 10469 switch (sc->sc_type) {
10434 case WM_T_PCH2: 10470 case WM_T_PCH2:
10435 wm_k1_workaround_lv(sc); 10471 wm_k1_workaround_lv(sc);
10436 /* FALLTHROUGH */ 10472 /* FALLTHROUGH */
10437 case WM_T_PCH: 10473 case WM_T_PCH:
10438 if (sc->sc_phytype == WMPHY_82578) 10474 if (sc->sc_phytype == WMPHY_82578)
10439 wm_link_stall_workaround_hv(sc); 10475 wm_link_stall_workaround_hv(sc);
10440 break; 10476 break;
10441 default: 10477 default:
10442 break; 10478 break;
10443 } 10479 }
10444} 10480}
10445 10481
10446/* 10482/*
10447 * wm_linkintr_tbi: 10483 * wm_linkintr_tbi:
10448 * 10484 *
10449 * Helper; handle link interrupts for TBI mode. 10485 * Helper; handle link interrupts for TBI mode.
10450 */ 10486 */
10451static void 10487static void
10452wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) 10488wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10453{ 10489{
10454 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 10490 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10455 uint32_t status; 10491 uint32_t status;
10456 10492
10457 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 10493 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10458 __func__)); 10494 __func__));
10459 10495
10460 status = CSR_READ(sc, WMREG_STATUS); 10496 status = CSR_READ(sc, WMREG_STATUS);
10461 if (icr & ICR_LSC) { 10497 if (icr & ICR_LSC) {
10462 wm_check_for_link(sc); 10498 wm_check_for_link(sc);
10463 if (status & STATUS_LU) { 10499 if (status & STATUS_LU) {
10464 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 10500 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10465 device_xname(sc->sc_dev), 10501 device_xname(sc->sc_dev),
10466 (status & STATUS_FD) ? "FDX" : "HDX")); 10502 (status & STATUS_FD) ? "FDX" : "HDX"));
10467 /* 10503 /*
10468 * NOTE: CTRL will update TFCE and RFCE automatically, 10504 * NOTE: CTRL will update TFCE and RFCE automatically,
10469 * so we should update sc->sc_ctrl 10505 * so we should update sc->sc_ctrl
10470 */ 10506 */
10471 10507
10472 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 10508 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10473 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 10509 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10474 sc->sc_fcrtl &= ~FCRTL_XONE; 10510 sc->sc_fcrtl &= ~FCRTL_XONE;
10475 if (status & STATUS_FD) 10511 if (status & STATUS_FD)
10476 sc->sc_tctl |= 10512 sc->sc_tctl |=
10477 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 10513 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10478 else 10514 else
10479 sc->sc_tctl |= 10515 sc->sc_tctl |=
10480 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 10516 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10481 if (sc->sc_ctrl & CTRL_TFCE) 10517 if (sc->sc_ctrl & CTRL_TFCE)
10482 sc->sc_fcrtl |= FCRTL_XONE; 10518 sc->sc_fcrtl |= FCRTL_XONE;
10483 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 10519 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10484 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 10520 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10485 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl); 10521 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10486 sc->sc_tbi_linkup = 1; 10522 sc->sc_tbi_linkup = 1;
10487 if_link_state_change(ifp, LINK_STATE_UP); 10523 if_link_state_change(ifp, LINK_STATE_UP);
10488 } else { 10524 } else {
10489 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 10525 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10490 device_xname(sc->sc_dev))); 10526 device_xname(sc->sc_dev)));
10491 sc->sc_tbi_linkup = 0; 10527 sc->sc_tbi_linkup = 0;
10492 if_link_state_change(ifp, LINK_STATE_DOWN); 10528 if_link_state_change(ifp, LINK_STATE_DOWN);
10493 } 10529 }
10494 /* Update LED */ 10530 /* Update LED */
10495 wm_tbi_serdes_set_linkled(sc); 10531 wm_tbi_serdes_set_linkled(sc);
10496 } else if (icr & ICR_RXSEQ) 10532 } else if (icr & ICR_RXSEQ)
10497 DPRINTF(sc, WM_DEBUG_LINK, 10533 DPRINTF(sc, WM_DEBUG_LINK,
10498 ("%s: LINK: Receive sequence error\n", 10534 ("%s: LINK: Receive sequence error\n",
10499 device_xname(sc->sc_dev))); 10535 device_xname(sc->sc_dev)));
10500} 10536}
10501 10537
10502/* 10538/*
10503 * wm_linkintr_serdes: 10539 * wm_linkintr_serdes:
10504 * 10540 *
10505 * Helper; handle link interrupts for TBI mode. 10541 * Helper; handle link interrupts for TBI mode.
10506 */ 10542 */
10507static void 10543static void
10508wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr) 10544wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10509{ 10545{
10510 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 10546 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10511 struct mii_data *mii = &sc->sc_mii; 10547 struct mii_data *mii = &sc->sc_mii;
10512 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 10548 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10513 uint32_t pcs_adv, pcs_lpab, reg; 10549 uint32_t pcs_adv, pcs_lpab, reg;
10514 10550
10515 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 10551 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10516 __func__)); 10552 __func__));
10517 10553
10518 if (icr & ICR_LSC) { 10554 if (icr & ICR_LSC) {
10519 /* Check PCS */ 10555 /* Check PCS */
10520 reg = CSR_READ(sc, WMREG_PCS_LSTS); 10556 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10521 if ((reg & PCS_LSTS_LINKOK) != 0) { 10557 if ((reg & PCS_LSTS_LINKOK) != 0) {
10522 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n", 10558 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10523 device_xname(sc->sc_dev))); 10559 device_xname(sc->sc_dev)));
10524 mii->mii_media_status |= IFM_ACTIVE; 10560 mii->mii_media_status |= IFM_ACTIVE;
10525 sc->sc_tbi_linkup = 1; 10561 sc->sc_tbi_linkup = 1;
10526 if_link_state_change(ifp, LINK_STATE_UP); 10562 if_link_state_change(ifp, LINK_STATE_UP);
10527 } else { 10563 } else {
10528 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 10564 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10529 device_xname(sc->sc_dev))); 10565 device_xname(sc->sc_dev)));
10530 mii->mii_media_status |= IFM_NONE; 10566 mii->mii_media_status |= IFM_NONE;
10531 sc->sc_tbi_linkup = 0; 10567 sc->sc_tbi_linkup = 0;
10532 if_link_state_change(ifp, LINK_STATE_DOWN); 10568 if_link_state_change(ifp, LINK_STATE_DOWN);
10533 wm_tbi_serdes_set_linkled(sc); 10569 wm_tbi_serdes_set_linkled(sc);
10534 return; 10570 return;
10535 } 10571 }
10536 mii->mii_media_active |= IFM_1000_SX; 10572 mii->mii_media_active |= IFM_1000_SX;
10537 if ((reg & PCS_LSTS_FDX) != 0) 10573 if ((reg & PCS_LSTS_FDX) != 0)
10538 mii->mii_media_active |= IFM_FDX; 10574 mii->mii_media_active |= IFM_FDX;
10539 else 10575 else
10540 mii->mii_media_active |= IFM_HDX; 10576 mii->mii_media_active |= IFM_HDX;
10541 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 10577 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10542 /* Check flow */ 10578 /* Check flow */
10543 reg = CSR_READ(sc, WMREG_PCS_LSTS); 10579 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10544 if ((reg & PCS_LSTS_AN_COMP) == 0) { 10580 if ((reg & PCS_LSTS_AN_COMP) == 0) {
10545 DPRINTF(sc, WM_DEBUG_LINK, 10581 DPRINTF(sc, WM_DEBUG_LINK,
10546 ("XXX LINKOK but not ACOMP\n")); 10582 ("XXX LINKOK but not ACOMP\n"));
10547 return; 10583 return;
10548 } 10584 }
10549 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV); 10585 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10550 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB); 10586 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10551 DPRINTF(sc, WM_DEBUG_LINK, 10587 DPRINTF(sc, WM_DEBUG_LINK,
10552 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab)); 10588 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10553 if ((pcs_adv & TXCW_SYM_PAUSE) 10589 if ((pcs_adv & TXCW_SYM_PAUSE)
10554 && (pcs_lpab & TXCW_SYM_PAUSE)) { 10590 && (pcs_lpab & TXCW_SYM_PAUSE)) {
10555 mii->mii_media_active |= IFM_FLOW 10591 mii->mii_media_active |= IFM_FLOW
10556 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 10592 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10557 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0) 10593 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10558 && (pcs_adv & TXCW_ASYM_PAUSE) 10594 && (pcs_adv & TXCW_ASYM_PAUSE)
10559 && (pcs_lpab & TXCW_SYM_PAUSE) 10595 && (pcs_lpab & TXCW_SYM_PAUSE)
10560 && (pcs_lpab & TXCW_ASYM_PAUSE)) 10596 && (pcs_lpab & TXCW_ASYM_PAUSE))
10561 mii->mii_media_active |= IFM_FLOW 10597 mii->mii_media_active |= IFM_FLOW
10562 | IFM_ETH_TXPAUSE; 10598 | IFM_ETH_TXPAUSE;
10563 else if ((pcs_adv & TXCW_SYM_PAUSE) 10599 else if ((pcs_adv & TXCW_SYM_PAUSE)
10564 && (pcs_adv & TXCW_ASYM_PAUSE) 10600 && (pcs_adv & TXCW_ASYM_PAUSE)
10565 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0) 10601 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10566 && (pcs_lpab & TXCW_ASYM_PAUSE)) 10602 && (pcs_lpab & TXCW_ASYM_PAUSE))
10567 mii->mii_media_active |= IFM_FLOW 10603 mii->mii_media_active |= IFM_FLOW
10568 | IFM_ETH_RXPAUSE; 10604 | IFM_ETH_RXPAUSE;
10569 } 10605 }
10570 /* Update LED */ 10606 /* Update LED */
10571 wm_tbi_serdes_set_linkled(sc); 10607 wm_tbi_serdes_set_linkled(sc);
10572 } else 10608 } else
10573 DPRINTF(sc, WM_DEBUG_LINK, 10609 DPRINTF(sc, WM_DEBUG_LINK,
10574 ("%s: LINK: Receive sequence error\n", 10610 ("%s: LINK: Receive sequence error\n",
10575 device_xname(sc->sc_dev))); 10611 device_xname(sc->sc_dev)));
10576} 10612}
10577 10613
10578/* 10614/*
10579 * wm_linkintr: 10615 * wm_linkintr:
10580 * 10616 *
10581 * Helper; handle link interrupts. 10617 * Helper; handle link interrupts.
10582 */ 10618 */
10583static void 10619static void
10584wm_linkintr(struct wm_softc *sc, uint32_t icr) 10620wm_linkintr(struct wm_softc *sc, uint32_t icr)
10585{ 10621{
10586 10622
10587 KASSERT(WM_CORE_LOCKED(sc)); 10623 KASSERT(WM_CORE_LOCKED(sc));
10588 10624
10589 if (sc->sc_flags & WM_F_HAS_MII) 10625 if (sc->sc_flags & WM_F_HAS_MII)
10590 wm_linkintr_gmii(sc, icr); 10626 wm_linkintr_gmii(sc, icr);
10591 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES) 10627 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10592 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))) 10628 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10593 wm_linkintr_serdes(sc, icr); 10629 wm_linkintr_serdes(sc, icr);
10594 else 10630 else
10595 wm_linkintr_tbi(sc, icr); 10631 wm_linkintr_tbi(sc, icr);
10596} 10632}
10597 10633
10598 10634
10599static inline void 10635static inline void
10600wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq) 10636wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10601{ 10637{
10602 10638
10603 if (wmq->wmq_txrx_use_workqueue) { 10639 if (wmq->wmq_txrx_use_workqueue) {
10604 if (!wmq->wmq_wq_enqueued) { 10640 if (!wmq->wmq_wq_enqueued) {
10605 wmq->wmq_wq_enqueued = true; 10641 wmq->wmq_wq_enqueued = true;
10606 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, 10642 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10607 curcpu()); 10643 curcpu());
10608 } 10644 }
10609 } else 10645 } else
10610 softint_schedule(wmq->wmq_si); 10646 softint_schedule(wmq->wmq_si);
10611} 10647}
10612 10648
10613static inline void 10649static inline void
10614wm_legacy_intr_disable(struct wm_softc *sc) 10650wm_legacy_intr_disable(struct wm_softc *sc)
10615{ 10651{
10616 10652
10617 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 10653 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10618} 10654}
10619 10655
10620static inline void 10656static inline void
10621wm_legacy_intr_enable(struct wm_softc *sc) 10657wm_legacy_intr_enable(struct wm_softc *sc)
10622{ 10658{
10623 10659
10624 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 10660 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10625} 10661}
10626 10662
10627/* 10663/*
10628 * wm_intr_legacy: 10664 * wm_intr_legacy:
10629 * 10665 *
10630 * Interrupt service routine for INTx and MSI. 10666 * Interrupt service routine for INTx and MSI.
10631 */ 10667 */
10632static int 10668static int
10633wm_intr_legacy(void *arg) 10669wm_intr_legacy(void *arg)
10634{ 10670{
10635 struct wm_softc *sc = arg; 10671 struct wm_softc *sc = arg;
10636 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 10672 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10637 struct wm_queue *wmq = &sc->sc_queue[0]; 10673 struct wm_queue *wmq = &sc->sc_queue[0];
10638 struct wm_txqueue *txq = &wmq->wmq_txq; 10674 struct wm_txqueue *txq = &wmq->wmq_txq;
10639 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 10675 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10640 u_int txlimit = sc->sc_tx_intr_process_limit; 10676 u_int txlimit = sc->sc_tx_intr_process_limit;
10641 u_int rxlimit = sc->sc_rx_intr_process_limit; 10677 u_int rxlimit = sc->sc_rx_intr_process_limit;
10642 uint32_t icr, rndval = 0; 10678 uint32_t icr, rndval = 0;
10643 bool more = false; 10679 bool more = false;
10644 10680
10645 icr = CSR_READ(sc, WMREG_ICR); 10681 icr = CSR_READ(sc, WMREG_ICR);
10646 if ((icr & sc->sc_icr) == 0) 10682 if ((icr & sc->sc_icr) == 0)
10647 return 0; 10683 return 0;
10648 10684
10649 DPRINTF(sc, WM_DEBUG_TX, 10685 DPRINTF(sc, WM_DEBUG_TX,
10650 ("%s: INTx: got intr\n",device_xname(sc->sc_dev))); 10686 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10651 if (rndval == 0) 10687 if (rndval == 0)
10652 rndval = icr; 10688 rndval = icr;
10653 10689
10654 mutex_enter(txq->txq_lock); 10690 mutex_enter(txq->txq_lock);
10655 10691
10656 if (txq->txq_stopping) { 10692 if (txq->txq_stopping) {
10657 mutex_exit(txq->txq_lock); 10693 mutex_exit(txq->txq_lock);
10658 return 1; 10694 return 1;
10659 } 10695 }
10660 10696
10661#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 10697#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10662 if (icr & ICR_TXDW) { 10698 if (icr & ICR_TXDW) {
10663 DPRINTF(sc, WM_DEBUG_TX, 10699 DPRINTF(sc, WM_DEBUG_TX,
10664 ("%s: TX: got TXDW interrupt\n", 10700 ("%s: TX: got TXDW interrupt\n",
10665 device_xname(sc->sc_dev))); 10701 device_xname(sc->sc_dev)));
10666 WM_Q_EVCNT_INCR(txq, txdw); 10702 WM_Q_EVCNT_INCR(txq, txdw);
10667 } 10703 }
10668#endif 10704#endif
10669 if (txlimit > 0) { 10705 if (txlimit > 0) {
10670 more |= wm_txeof(txq, txlimit); 10706 more |= wm_txeof(txq, txlimit);
10671 if (!IF_IS_EMPTY(&ifp->if_snd)) 10707 if (!IF_IS_EMPTY(&ifp->if_snd))
10672 more = true; 10708 more = true;
10673 } else 10709 } else
10674 more = true; 10710 more = true;
10675 mutex_exit(txq->txq_lock); 10711 mutex_exit(txq->txq_lock);
10676 10712
10677 mutex_enter(rxq->rxq_lock); 10713 mutex_enter(rxq->rxq_lock);
10678 10714
10679 if (rxq->rxq_stopping) { 10715 if (rxq->rxq_stopping) {
10680 mutex_exit(rxq->rxq_lock); 10716 mutex_exit(rxq->rxq_lock);
10681 return 1; 10717 return 1;
10682 } 10718 }
10683 10719
10684#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 10720#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10685 if (icr & (ICR_RXDMT0 | ICR_RXT0)) { 10721 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10686 DPRINTF(sc, WM_DEBUG_RX, 10722 DPRINTF(sc, WM_DEBUG_RX,
10687 ("%s: RX: got Rx intr %#" __PRIxBIT "\n", 10723 ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10688 device_xname(sc->sc_dev), 10724 device_xname(sc->sc_dev),
10689 icr & (ICR_RXDMT0 | ICR_RXT0))); 10725 icr & (ICR_RXDMT0 | ICR_RXT0)));
10690 WM_Q_EVCNT_INCR(rxq, intr); 10726 WM_Q_EVCNT_INCR(rxq, intr);
10691 } 10727 }
10692#endif 10728#endif
10693 if (rxlimit > 0) { 10729 if (rxlimit > 0) {
10694 /* 10730 /*
10695 * wm_rxeof() does *not* call upper layer functions directly, 10731 * wm_rxeof() does *not* call upper layer functions directly,
10696 * as if_percpuq_enqueue() just call softint_schedule(). 10732 * as if_percpuq_enqueue() just call softint_schedule().
10697 * So, we can call wm_rxeof() in interrupt context. 10733 * So, we can call wm_rxeof() in interrupt context.
10698 */ 10734 */
10699 more = wm_rxeof(rxq, rxlimit); 10735 more = wm_rxeof(rxq, rxlimit);
10700 } else 10736 } else
10701 more = true; 10737 more = true;
10702 10738
10703 /* Fill lower bits with RX index. See below for the upper. */ 10739 /* Fill lower bits with RX index. See below for the upper. */
10704 rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK; 10740 rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
10705 10741
10706 mutex_exit(rxq->rxq_lock); 10742 mutex_exit(rxq->rxq_lock);
10707 10743
10708 WM_CORE_LOCK(sc); 10744 WM_CORE_LOCK(sc);
10709 10745
10710 if (sc->sc_core_stopping) { 10746 if (sc->sc_core_stopping) {
10711 WM_CORE_UNLOCK(sc); 10747 WM_CORE_UNLOCK(sc);
10712 return 1; 10748 return 1;
10713 } 10749 }
10714 10750
10715 if (icr & (ICR_LSC | ICR_RXSEQ)) { 10751 if (icr & (ICR_LSC | ICR_RXSEQ)) {
10716 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 10752 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10717 wm_linkintr(sc, icr); 10753 wm_linkintr(sc, icr);
10718 } 10754 }
10719 if ((icr & ICR_GPI(0)) != 0) 10755 if ((icr & ICR_GPI(0)) != 0)
10720 device_printf(sc->sc_dev, "got module interrupt\n"); 10756 device_printf(sc->sc_dev, "got module interrupt\n");
10721 10757
10722 WM_CORE_UNLOCK(sc); 10758 WM_CORE_UNLOCK(sc);
10723 10759
10724 if (icr & ICR_RXO) { 10760 if (icr & ICR_RXO) {
10725#if defined(WM_DEBUG) 10761#if defined(WM_DEBUG)
10726 log(LOG_WARNING, "%s: Receive overrun\n", 10762 log(LOG_WARNING, "%s: Receive overrun\n",
10727 device_xname(sc->sc_dev)); 10763 device_xname(sc->sc_dev));
10728#endif /* defined(WM_DEBUG) */ 10764#endif /* defined(WM_DEBUG) */
10729 } 10765 }
10730 10766
10731 rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval); 10767 rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
10732 10768
10733 if (more) { 10769 if (more) {
10734 /* Try to get more packets going. */ 10770 /* Try to get more packets going. */
10735 wm_legacy_intr_disable(sc); 10771 wm_legacy_intr_disable(sc);
10736 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue; 10772 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10737 wm_sched_handle_queue(sc, wmq); 10773 wm_sched_handle_queue(sc, wmq);
10738 } 10774 }
10739 10775
10740 return 1; 10776 return 1;
10741} 10777}
10742 10778
10743static inline void 10779static inline void
10744wm_txrxintr_disable(struct wm_queue *wmq) 10780wm_txrxintr_disable(struct wm_queue *wmq)
10745{ 10781{
10746 struct wm_softc *sc = wmq->wmq_txq.txq_sc; 10782 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10747 10783
10748 if (__predict_false(!wm_is_using_msix(sc))) { 10784 if (__predict_false(!wm_is_using_msix(sc))) {
10749 wm_legacy_intr_disable(sc); 10785 wm_legacy_intr_disable(sc);
10750 return; 10786 return;
10751 } 10787 }
10752 10788
10753 if (sc->sc_type == WM_T_82574) 10789 if (sc->sc_type == WM_T_82574)
10754 CSR_WRITE(sc, WMREG_IMC, 10790 CSR_WRITE(sc, WMREG_IMC,
10755 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); 10791 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10756 else if (sc->sc_type == WM_T_82575) 10792 else if (sc->sc_type == WM_T_82575)
10757 CSR_WRITE(sc, WMREG_EIMC, 10793 CSR_WRITE(sc, WMREG_EIMC,
10758 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); 10794 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10759 else 10795 else
10760 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx); 10796 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10761} 10797}
10762 10798
10763static inline void 10799static inline void
10764wm_txrxintr_enable(struct wm_queue *wmq) 10800wm_txrxintr_enable(struct wm_queue *wmq)
10765{ 10801{
10766 struct wm_softc *sc = wmq->wmq_txq.txq_sc; 10802 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10767 10803
10768 wm_itrs_calculate(sc, wmq); 10804 wm_itrs_calculate(sc, wmq);
10769 10805
10770 if (__predict_false(!wm_is_using_msix(sc))) { 10806 if (__predict_false(!wm_is_using_msix(sc))) {
10771 wm_legacy_intr_enable(sc); 10807 wm_legacy_intr_enable(sc);
10772 return; 10808 return;
10773 } 10809 }
10774 10810
10775 /* 10811 /*
10776 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here. 10812 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10777 * There is no need to care about which of RXQ(0) and RXQ(1) enable 10813 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10778 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled 10814 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10779 * while each wm_handle_queue(wmq) is runnig. 10815 * while each wm_handle_queue(wmq) is runnig.
10780 */ 10816 */
10781 if (sc->sc_type == WM_T_82574) 10817 if (sc->sc_type == WM_T_82574)
10782 CSR_WRITE(sc, WMREG_IMS, 10818 CSR_WRITE(sc, WMREG_IMS,
10783 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER); 10819 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10784 else if (sc->sc_type == WM_T_82575) 10820 else if (sc->sc_type == WM_T_82575)
10785 CSR_WRITE(sc, WMREG_EIMS, 10821 CSR_WRITE(sc, WMREG_EIMS,
10786 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); 10822 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10787 else 10823 else
10788 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx); 10824 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10789} 10825}
10790 10826
10791static int 10827static int
10792wm_txrxintr_msix(void *arg) 10828wm_txrxintr_msix(void *arg)
10793{ 10829{
10794 struct wm_queue *wmq = arg; 10830 struct wm_queue *wmq = arg;
10795 struct wm_txqueue *txq = &wmq->wmq_txq; 10831 struct wm_txqueue *txq = &wmq->wmq_txq;
10796 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 10832 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10797 struct wm_softc *sc = txq->txq_sc; 10833 struct wm_softc *sc = txq->txq_sc;
10798 u_int txlimit = sc->sc_tx_intr_process_limit; 10834 u_int txlimit = sc->sc_tx_intr_process_limit;
10799 u_int rxlimit = sc->sc_rx_intr_process_limit; 10835 u_int rxlimit = sc->sc_rx_intr_process_limit;
10800 uint32_t rndval = 0; 10836 uint32_t rndval = 0;
10801 bool txmore; 10837 bool txmore;
10802 bool rxmore; 10838 bool rxmore;
10803 10839
10804 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id); 10840 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10805 10841
10806 DPRINTF(sc, WM_DEBUG_TX, 10842 DPRINTF(sc, WM_DEBUG_TX,
10807 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev))); 10843 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10808 10844
10809 wm_txrxintr_disable(wmq); 10845 wm_txrxintr_disable(wmq);
10810 10846
10811 mutex_enter(txq->txq_lock); 10847 mutex_enter(txq->txq_lock);
10812 10848
10813 if (txq->txq_stopping) { 10849 if (txq->txq_stopping) {
10814 mutex_exit(txq->txq_lock); 10850 mutex_exit(txq->txq_lock);
10815 return 1; 10851 return 1;
10816 } 10852 }
10817 10853
10818 WM_Q_EVCNT_INCR(txq, txdw); 10854 WM_Q_EVCNT_INCR(txq, txdw);
10819 /* Fill upper bits with TX index. See below for the lower. */ 10855 /* Fill upper bits with TX index. See below for the lower. */
10820 rndval = txq->txq_next * WM_NRXDESC; 10856 rndval = txq->txq_next * WM_NRXDESC;
10821 if (txlimit > 0) { 10857 if (txlimit > 0) {
10822 txmore = wm_txeof(txq, txlimit); 10858 txmore = wm_txeof(txq, txlimit);
10823 /* wm_deferred start() is done in wm_handle_queue(). */ 10859 /* wm_deferred start() is done in wm_handle_queue(). */
10824 } else 10860 } else
10825 txmore = true; 10861 txmore = true;
10826 mutex_exit(txq->txq_lock); 10862 mutex_exit(txq->txq_lock);
10827 10863
10828 DPRINTF(sc, WM_DEBUG_RX, 10864 DPRINTF(sc, WM_DEBUG_RX,
10829 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev))); 10865 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10830 mutex_enter(rxq->rxq_lock); 10866 mutex_enter(rxq->rxq_lock);
10831 10867
10832 if (rxq->rxq_stopping) { 10868 if (rxq->rxq_stopping) {
10833 mutex_exit(rxq->rxq_lock); 10869 mutex_exit(rxq->rxq_lock);
10834 return 1; 10870 return 1;
10835 } 10871 }
10836 10872
10837 WM_Q_EVCNT_INCR(rxq, intr); 10873 WM_Q_EVCNT_INCR(rxq, intr);
10838 if (rxlimit > 0) { 10874 if (rxlimit > 0) {
10839 rxmore = wm_rxeof(rxq, rxlimit); 10875 rxmore = wm_rxeof(rxq, rxlimit);
10840 } else 10876 } else
10841 rxmore = true; 10877 rxmore = true;
10842 10878
10843 /* Fill lower bits with RX index. See above for the upper. */ 10879 /* Fill lower bits with RX index. See above for the upper. */
10844 rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK; 10880 rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
10845 mutex_exit(rxq->rxq_lock); 10881 mutex_exit(rxq->rxq_lock);
10846 10882
10847 wm_itrs_writereg(sc, wmq); 10883 wm_itrs_writereg(sc, wmq);
10848 10884
10849 /* 10885 /*
10850 * This function is called in the hardware interrupt context and 10886 * This function is called in the hardware interrupt context and
10851 * per-CPU, so it's not required to take a lock. 10887 * per-CPU, so it's not required to take a lock.
10852 */ 10888 */
10853 if (rndval != 0) 10889 if (rndval != 0)
10854 rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval); 10890 rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
10855 10891
10856 if (txmore || rxmore) { 10892 if (txmore || rxmore) {
10857 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue; 10893 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10858 wm_sched_handle_queue(sc, wmq); 10894 wm_sched_handle_queue(sc, wmq);
10859 } else 10895 } else
10860 wm_txrxintr_enable(wmq); 10896 wm_txrxintr_enable(wmq);
10861 10897
10862 return 1; 10898 return 1;
10863} 10899}
10864 10900
10865static void 10901static void
10866wm_handle_queue(void *arg) 10902wm_handle_queue(void *arg)
10867{ 10903{
10868 struct wm_queue *wmq = arg; 10904 struct wm_queue *wmq = arg;
10869 struct wm_txqueue *txq = &wmq->wmq_txq; 10905 struct wm_txqueue *txq = &wmq->wmq_txq;
10870 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 10906 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10871 struct wm_softc *sc = txq->txq_sc; 10907 struct wm_softc *sc = txq->txq_sc;
10872 u_int txlimit = sc->sc_tx_process_limit; 10908 u_int txlimit = sc->sc_tx_process_limit;
10873 u_int rxlimit = sc->sc_rx_process_limit; 10909 u_int rxlimit = sc->sc_rx_process_limit;
10874 bool txmore; 10910 bool txmore;
10875 bool rxmore; 10911 bool rxmore;
10876 10912
10877 mutex_enter(txq->txq_lock); 10913 mutex_enter(txq->txq_lock);
10878 if (txq->txq_stopping) { 10914 if (txq->txq_stopping) {
10879 mutex_exit(txq->txq_lock); 10915 mutex_exit(txq->txq_lock);
10880 return; 10916 return;
10881 } 10917 }
10882 txmore = wm_txeof(txq, txlimit); 10918 txmore = wm_txeof(txq, txlimit);
10883 wm_deferred_start_locked(txq); 10919 wm_deferred_start_locked(txq);
10884 mutex_exit(txq->txq_lock); 10920 mutex_exit(txq->txq_lock);
10885 10921
10886 mutex_enter(rxq->rxq_lock); 10922 mutex_enter(rxq->rxq_lock);
10887 if (rxq->rxq_stopping) { 10923 if (rxq->rxq_stopping) {
10888 mutex_exit(rxq->rxq_lock); 10924 mutex_exit(rxq->rxq_lock);
10889 return; 10925 return;
10890 } 10926 }
10891 WM_Q_EVCNT_INCR(rxq, defer); 10927 WM_Q_EVCNT_INCR(rxq, defer);
10892 rxmore = wm_rxeof(rxq, rxlimit); 10928 rxmore = wm_rxeof(rxq, rxlimit);
10893 mutex_exit(rxq->rxq_lock); 10929 mutex_exit(rxq->rxq_lock);
10894 10930
10895 if (txmore || rxmore) { 10931 if (txmore || rxmore) {
10896 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue; 10932 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10897 wm_sched_handle_queue(sc, wmq); 10933 wm_sched_handle_queue(sc, wmq);
10898 } else 10934 } else
10899 wm_txrxintr_enable(wmq); 10935 wm_txrxintr_enable(wmq);
10900} 10936}
10901 10937
10902static void 10938static void
10903wm_handle_queue_work(struct work *wk, void *context) 10939wm_handle_queue_work(struct work *wk, void *context)
10904{ 10940{
10905 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie); 10941 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10906 10942
10907 /* 10943 /*
10908 * Some qemu environment workaround. They don't stop interrupt 10944 * Some qemu environment workaround. They don't stop interrupt
10909 * immediately. 10945 * immediately.
10910 */ 10946 */
10911 wmq->wmq_wq_enqueued = false; 10947 wmq->wmq_wq_enqueued = false;
10912 wm_handle_queue(wmq); 10948 wm_handle_queue(wmq);
10913} 10949}
10914 10950
10915/* 10951/*
10916 * wm_linkintr_msix: 10952 * wm_linkintr_msix:
10917 * 10953 *
10918 * Interrupt service routine for link status change for MSI-X. 10954 * Interrupt service routine for link status change for MSI-X.
10919 */ 10955 */
10920static int 10956static int
10921wm_linkintr_msix(void *arg) 10957wm_linkintr_msix(void *arg)
10922{ 10958{
10923 struct wm_softc *sc = arg; 10959 struct wm_softc *sc = arg;
10924 uint32_t reg; 10960 uint32_t reg;
10925 bool has_rxo; 10961 bool has_rxo;
10926 10962
10927 reg = CSR_READ(sc, WMREG_ICR); 10963 reg = CSR_READ(sc, WMREG_ICR);
10928 WM_CORE_LOCK(sc); 10964 WM_CORE_LOCK(sc);
10929 DPRINTF(sc, WM_DEBUG_LINK, 10965 DPRINTF(sc, WM_DEBUG_LINK,
10930 ("%s: LINK: got link intr. ICR = %08x\n", 10966 ("%s: LINK: got link intr. ICR = %08x\n",
10931 device_xname(sc->sc_dev), reg)); 10967 device_xname(sc->sc_dev), reg));
10932 10968
10933 if (sc->sc_core_stopping) 10969 if (sc->sc_core_stopping)
10934 goto out; 10970 goto out;
10935 10971
10936 if ((reg & ICR_LSC) != 0) { 10972 if ((reg & ICR_LSC) != 0) {
10937 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 10973 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10938 wm_linkintr(sc, ICR_LSC); 10974 wm_linkintr(sc, ICR_LSC);
10939 } 10975 }
10940 if ((reg & ICR_GPI(0)) != 0) 10976 if ((reg & ICR_GPI(0)) != 0)
10941 device_printf(sc->sc_dev, "got module interrupt\n"); 10977 device_printf(sc->sc_dev, "got module interrupt\n");
10942 10978
10943 /* 10979 /*
10944 * XXX 82574 MSI-X mode workaround 10980 * XXX 82574 MSI-X mode workaround
10945 * 10981 *
10946 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER 10982 * 82574 MSI-X mode causes a receive overrun(RXO) interrupt as an
10947 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor 10983 * ICR_OTHER MSI-X vector; furthermore it causes neither ICR_RXQ(0)
10948 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1) 10984 * nor ICR_RXQ(1) vectors. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10949 * interrupts by writing WMREG_ICS to process receive packets. 10985 * interrupts by writing WMREG_ICS to process receive packets.
10950 */ 10986 */
10951 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) { 10987 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10952#if defined(WM_DEBUG) 10988#if defined(WM_DEBUG)
10953 log(LOG_WARNING, "%s: Receive overrun\n", 10989 log(LOG_WARNING, "%s: Receive overrun\n",
10954 device_xname(sc->sc_dev)); 10990 device_xname(sc->sc_dev));
10955#endif /* defined(WM_DEBUG) */ 10991#endif /* defined(WM_DEBUG) */
10956 10992
10957 has_rxo = true; 10993 has_rxo = true;
10958 /* 10994 /*
10959 * The RXO interrupt is very high rate when receive traffic is 10995 * The RXO interrupt is very high rate when receive traffic is
10960 * high rate. We use polling mode for ICR_OTHER like Tx/Rx 10996 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10961 * interrupts. ICR_OTHER will be enabled at the end of 10997 * interrupts. ICR_OTHER will be enabled at the end of
10962 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and 10998 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10963 * ICR_RXQ(1) interrupts. 10999 * ICR_RXQ(1) interrupts.
10964 */ 11000 */
10965 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); 11001 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10966 11002
10967 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1)); 11003 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10968 } 11004 }
10969 11005
10970 11006
10971 11007
10972out: 11008out:
10973 WM_CORE_UNLOCK(sc); 11009 WM_CORE_UNLOCK(sc);
10974 11010
10975 if (sc->sc_type == WM_T_82574) { 11011 if (sc->sc_type == WM_T_82574) {
10976 if (!has_rxo) 11012 if (!has_rxo)
10977 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); 11013 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10978 else 11014 else
10979 CSR_WRITE(sc, WMREG_IMS, ICR_LSC); 11015 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10980 } else if (sc->sc_type == WM_T_82575) 11016 } else if (sc->sc_type == WM_T_82575)
10981 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER); 11017 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10982 else 11018 else
10983 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx); 11019 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10984 11020
10985 return 1; 11021 return 1;
10986} 11022}
10987 11023
10988/* 11024/*
10989 * Media related. 11025 * Media related.
10990 * GMII, SGMII, TBI (and SERDES) 11026 * GMII, SGMII, TBI (and SERDES)
10991 */ 11027 */
10992 11028
10993/* Common */ 11029/* Common */
10994 11030
10995/* 11031/*
10996 * wm_tbi_serdes_set_linkled: 11032 * wm_tbi_serdes_set_linkled:
10997 * 11033 *
10998 * Update the link LED on TBI and SERDES devices. 11034 * Update the link LED on TBI and SERDES devices.
10999 */ 11035 */
11000static void 11036static void
11001wm_tbi_serdes_set_linkled(struct wm_softc *sc) 11037wm_tbi_serdes_set_linkled(struct wm_softc *sc)
11002{ 11038{
11003 11039
11004 if (sc->sc_tbi_linkup) 11040 if (sc->sc_tbi_linkup)
11005 sc->sc_ctrl |= CTRL_SWDPIN(0); 11041 sc->sc_ctrl |= CTRL_SWDPIN(0);
11006 else 11042 else
11007 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 11043 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
11008 11044
11009 /* 82540 or newer devices are active low */ 11045 /* 82540 or newer devices are active low */
11010 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; 11046 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
11011 11047
11012 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 11048 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11013} 11049}
11014 11050
11015/* GMII related */ 11051/* GMII related */
11016 11052
11017/* 11053/*
11018 * wm_gmii_reset: 11054 * wm_gmii_reset:
11019 * 11055 *
11020 * Reset the PHY. 11056 * Reset the PHY.
11021 */ 11057 */
11022static void 11058static void
11023wm_gmii_reset(struct wm_softc *sc) 11059wm_gmii_reset(struct wm_softc *sc)
11024{ 11060{
11025 uint32_t reg; 11061 uint32_t reg;
11026 int rv; 11062 int rv;
11027 11063
11028 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 11064 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11029 device_xname(sc->sc_dev), __func__)); 11065 device_xname(sc->sc_dev), __func__));
11030 11066
11031 rv = sc->phy.acquire(sc); 11067 rv = sc->phy.acquire(sc);
11032 if (rv != 0) { 11068 if (rv != 0) {
11033 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 11069 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11034 __func__); 11070 __func__);
11035 return; 11071 return;
11036 } 11072 }
11037 11073
11038 switch (sc->sc_type) { 11074 switch (sc->sc_type) {
11039 case WM_T_82542_2_0: 11075 case WM_T_82542_2_0:
11040 case WM_T_82542_2_1: 11076 case WM_T_82542_2_1:
11041 /* null */ 11077 /* null */
11042 break; 11078 break;
11043 case WM_T_82543: 11079 case WM_T_82543:
11044 /* 11080 /*
11045 * With 82543, we need to force speed and duplex on the MAC 11081 * With 82543, we need to force speed and duplex on the MAC
11046 * equal to what the PHY speed and duplex configuration is. 11082 * equal to what the PHY speed and duplex configuration is.
11047 * In addition, we need to perform a hardware reset on the PHY 11083 * In addition, we need to perform a hardware reset on the PHY
11048 * to take it out of reset. 11084 * to take it out of reset.
11049 */ 11085 */
11050 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 11086 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11051 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 11087 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11052 11088
11053 /* The PHY reset pin is active-low. */ 11089 /* The PHY reset pin is active-low. */
11054 reg = CSR_READ(sc, WMREG_CTRL_EXT); 11090 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11055 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 11091 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
11056 CTRL_EXT_SWDPIN(4)); 11092 CTRL_EXT_SWDPIN(4));
11057 reg |= CTRL_EXT_SWDPIO(4); 11093 reg |= CTRL_EXT_SWDPIO(4);
11058 11094
11059 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 11095 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11060 CSR_WRITE_FLUSH(sc); 11096 CSR_WRITE_FLUSH(sc);
11061 delay(10*1000); 11097 delay(10*1000);
11062 11098
11063 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 11099 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
11064 CSR_WRITE_FLUSH(sc); 11100 CSR_WRITE_FLUSH(sc);
11065 delay(150); 11101 delay(150);
11066#if 0 11102#if 0
11067 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 11103 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
11068#endif 11104#endif
11069 delay(20*1000); /* XXX extra delay to get PHY ID? */ 11105 delay(20*1000); /* XXX extra delay to get PHY ID? */
11070 break; 11106 break;
11071 case WM_T_82544: /* Reset 10000us */ 11107 case WM_T_82544: /* Reset 10000us */
11072 case WM_T_82540: 11108 case WM_T_82540:
11073 case WM_T_82545: 11109 case WM_T_82545:
11074 case WM_T_82545_3: 11110 case WM_T_82545_3:
11075 case WM_T_82546: 11111 case WM_T_82546:
11076 case WM_T_82546_3: 11112 case WM_T_82546_3:
11077 case WM_T_82541: 11113 case WM_T_82541:
11078 case WM_T_82541_2: 11114 case WM_T_82541_2:
11079 case WM_T_82547: 11115 case WM_T_82547:
11080 case WM_T_82547_2: 11116 case WM_T_82547_2:
11081 case WM_T_82571: /* Reset 100us */ 11117 case WM_T_82571: /* Reset 100us */
11082 case WM_T_82572: 11118 case WM_T_82572:
11083 case WM_T_82573: 11119 case WM_T_82573:
11084 case WM_T_82574: 11120 case WM_T_82574:
11085 case WM_T_82575: 11121 case WM_T_82575:
11086 case WM_T_82576: 11122 case WM_T_82576:
11087 case WM_T_82580: 11123 case WM_T_82580:
11088 case WM_T_I350: 11124 case WM_T_I350:
11089 case WM_T_I354: 11125 case WM_T_I354:
11090 case WM_T_I210: 11126 case WM_T_I210:
11091 case WM_T_I211: 11127 case WM_T_I211:
11092 case WM_T_82583: 11128 case WM_T_82583:
11093 case WM_T_80003: 11129 case WM_T_80003:
11094 /* Generic reset */ 11130 /* Generic reset */
11095 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 11131 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11096 CSR_WRITE_FLUSH(sc); 11132 CSR_WRITE_FLUSH(sc);
11097 delay(20000); 11133 delay(20000);
11098 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 11134 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11099 CSR_WRITE_FLUSH(sc); 11135 CSR_WRITE_FLUSH(sc);
11100 delay(20000); 11136 delay(20000);
11101 11137
11102 if ((sc->sc_type == WM_T_82541) 11138 if ((sc->sc_type == WM_T_82541)
11103 || (sc->sc_type == WM_T_82541_2) 11139 || (sc->sc_type == WM_T_82541_2)
11104 || (sc->sc_type == WM_T_82547) 11140 || (sc->sc_type == WM_T_82547)
11105 || (sc->sc_type == WM_T_82547_2)) { 11141 || (sc->sc_type == WM_T_82547_2)) {
11106 /* Workaround for igp are done in igp_reset() */ 11142 /* Workaround for igp are done in igp_reset() */
11107 /* XXX add code to set LED after phy reset */ 11143 /* XXX add code to set LED after phy reset */
11108 } 11144 }
11109 break; 11145 break;
11110 case WM_T_ICH8: 11146 case WM_T_ICH8:
11111 case WM_T_ICH9: 11147 case WM_T_ICH9:
11112 case WM_T_ICH10: 11148 case WM_T_ICH10:
11113 case WM_T_PCH: 11149 case WM_T_PCH:
11114 case WM_T_PCH2: 11150 case WM_T_PCH2:
11115 case WM_T_PCH_LPT: 11151 case WM_T_PCH_LPT:
11116 case WM_T_PCH_SPT: 11152 case WM_T_PCH_SPT:
11117 case WM_T_PCH_CNP: 11153 case WM_T_PCH_CNP:
11118 case WM_T_PCH_TGP: 11154 case WM_T_PCH_TGP:
11119 /* Generic reset */ 11155 /* Generic reset */
11120 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 11156 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11121 CSR_WRITE_FLUSH(sc); 11157 CSR_WRITE_FLUSH(sc);
11122 delay(100); 11158 delay(100);
11123 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 11159 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11124 CSR_WRITE_FLUSH(sc); 11160 CSR_WRITE_FLUSH(sc);
11125 delay(150); 11161 delay(150);
11126 break; 11162 break;
11127 default: 11163 default:
11128 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 11164 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
11129 __func__); 11165 __func__);
11130 break; 11166 break;
11131 } 11167 }
11132 11168
11133 sc->phy.release(sc); 11169 sc->phy.release(sc);
11134 11170
11135 /* get_cfg_done */ 11171 /* get_cfg_done */
11136 wm_get_cfg_done(sc); 11172 wm_get_cfg_done(sc);
11137 11173
11138 /* Extra setup */ 11174 /* Extra setup */
11139 switch (sc->sc_type) { 11175 switch (sc->sc_type) {
11140 case WM_T_82542_2_0: 11176 case WM_T_82542_2_0:
11141 case WM_T_82542_2_1: 11177 case WM_T_82542_2_1:
11142 case WM_T_82543: 11178 case WM_T_82543:
11143 case WM_T_82544: 11179 case WM_T_82544:
11144 case WM_T_82540: 11180 case WM_T_82540:
11145 case WM_T_82545: 11181 case WM_T_82545:
11146 case WM_T_82545_3: 11182 case WM_T_82545_3:
11147 case WM_T_82546: 11183 case WM_T_82546:
11148 case WM_T_82546_3: 11184 case WM_T_82546_3:
11149 case WM_T_82541_2: 11185 case WM_T_82541_2:
11150 case WM_T_82547_2: 11186 case WM_T_82547_2:
11151 case WM_T_82571: 11187 case WM_T_82571:
11152 case WM_T_82572: 11188 case WM_T_82572:
11153 case WM_T_82573: 11189 case WM_T_82573:
11154 case WM_T_82574: 11190 case WM_T_82574:
11155 case WM_T_82583: 11191 case WM_T_82583:
11156 case WM_T_82575: 11192 case WM_T_82575:
11157 case WM_T_82576: 11193 case WM_T_82576:
11158 case WM_T_82580: 11194 case WM_T_82580:
11159 case WM_T_I350: 11195 case WM_T_I350:
11160 case WM_T_I354: 11196 case WM_T_I354:
11161 case WM_T_I210: 11197 case WM_T_I210:
11162 case WM_T_I211: 11198 case WM_T_I211:
11163 case WM_T_80003: 11199 case WM_T_80003:
11164 /* Null */ 11200 /* Null */
11165 break; 11201 break;
11166 case WM_T_82541: 11202 case WM_T_82541:
11167 case WM_T_82547: 11203 case WM_T_82547:
11168 /* XXX Configure actively LED after PHY reset */ 11204 /* XXX Configure actively LED after PHY reset */
11169 break; 11205 break;
11170 case WM_T_ICH8: 11206 case WM_T_ICH8:
11171 case WM_T_ICH9: 11207 case WM_T_ICH9:
11172 case WM_T_ICH10: 11208 case WM_T_ICH10:
11173 case WM_T_PCH: 11209 case WM_T_PCH:
11174 case WM_T_PCH2: 11210 case WM_T_PCH2:
11175 case WM_T_PCH_LPT: 11211 case WM_T_PCH_LPT:
11176 case WM_T_PCH_SPT: 11212 case WM_T_PCH_SPT:
11177 case WM_T_PCH_CNP: 11213 case WM_T_PCH_CNP:
11178 case WM_T_PCH_TGP: 11214 case WM_T_PCH_TGP:
11179 wm_phy_post_reset(sc); 11215 wm_phy_post_reset(sc);
11180 break; 11216 break;
11181 default: 11217 default:
11182 panic("%s: unknown type\n", __func__); 11218 panic("%s: unknown type\n", __func__);
11183 break; 11219 break;
11184 } 11220 }
11185} 11221}
11186 11222
11187/* 11223/*
11188 * Set up sc_phytype and mii_{read|write}reg. 11224 * Set up sc_phytype and mii_{read|write}reg.
11189 * 11225 *
11190 * To identify PHY type, correct read/write function should be selected. 11226 * To identify PHY type, correct read/write function should be selected.
11191 * To select correct read/write function, PCI ID or MAC type are required 11227 * To select correct read/write function, PCI ID or MAC type are required
11192 * without accessing PHY registers. 11228 * without accessing PHY registers.
11193 * 11229 *
11194 * On the first call of this function, PHY ID is not known yet. Check 11230 * On the first call of this function, PHY ID is not known yet. Check
11195 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the 11231 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
11196 * result might be incorrect. 11232 * result might be incorrect.
11197 * 11233 *
11198 * In the second call, PHY OUI and model is used to identify PHY type. 11234 * In the second call, PHY OUI and model is used to identify PHY type.
11199 * It might not be perfect because of the lack of compared entry, but it 11235 * It might not be perfect because of the lack of compared entry, but it
11200 * would be better than the first call. 11236 * would be better than the first call.
11201 * 11237 *
11202 * If the detected new result and previous assumption is different, 11238 * If the detected new result and previous assumption is different,
11203 * a diagnostic message will be printed. 11239 * a diagnostic message will be printed.
11204 */ 11240 */
11205static void 11241static void
11206wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui, 11242wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
11207 uint16_t phy_model) 11243 uint16_t phy_model)
11208{ 11244{
11209 device_t dev = sc->sc_dev; 11245 device_t dev = sc->sc_dev;
11210 struct mii_data *mii = &sc->sc_mii; 11246 struct mii_data *mii = &sc->sc_mii;
11211 uint16_t new_phytype = WMPHY_UNKNOWN; 11247 uint16_t new_phytype = WMPHY_UNKNOWN;
11212 uint16_t doubt_phytype = WMPHY_UNKNOWN; 11248 uint16_t doubt_phytype = WMPHY_UNKNOWN;
11213 mii_readreg_t new_readreg; 11249 mii_readreg_t new_readreg;
11214 mii_writereg_t new_writereg; 11250 mii_writereg_t new_writereg;
11215 bool dodiag = true; 11251 bool dodiag = true;
11216 11252
11217 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 11253 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11218 device_xname(sc->sc_dev), __func__)); 11254 device_xname(sc->sc_dev), __func__));
11219 11255
11220 /* 11256 /*
11221 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always 11257 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
11222 * incorrect. So don't print diag output when it's 2nd call. 11258 * incorrect. So don't print diag output when it's 2nd call.
11223 */ 11259 */
11224 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0)) 11260 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
11225 dodiag = false; 11261 dodiag = false;
11226 11262
11227 if (mii->mii_readreg == NULL) { 11263 if (mii->mii_readreg == NULL) {
11228 /* 11264 /*
11229 * This is the first call of this function. For ICH and PCH 11265 * This is the first call of this function. For ICH and PCH
11230 * variants, it's difficult to determine the PHY access method 11266 * variants, it's difficult to determine the PHY access method
11231 * by sc_type, so use the PCI product ID for some devices. 11267 * by sc_type, so use the PCI product ID for some devices.
11232 */ 11268 */
11233 11269
11234 switch (sc->sc_pcidevid) { 11270 switch (sc->sc_pcidevid) {
11235 case PCI_PRODUCT_INTEL_PCH_M_LM: 11271 case PCI_PRODUCT_INTEL_PCH_M_LM:
11236 case PCI_PRODUCT_INTEL_PCH_M_LC: 11272 case PCI_PRODUCT_INTEL_PCH_M_LC:
11237 /* 82577 */ 11273 /* 82577 */
11238 new_phytype = WMPHY_82577; 11274 new_phytype = WMPHY_82577;
11239 break; 11275 break;
11240 case PCI_PRODUCT_INTEL_PCH_D_DM: 11276 case PCI_PRODUCT_INTEL_PCH_D_DM:
11241 case PCI_PRODUCT_INTEL_PCH_D_DC: 11277 case PCI_PRODUCT_INTEL_PCH_D_DC:
11242 /* 82578 */ 11278 /* 82578 */
11243 new_phytype = WMPHY_82578; 11279 new_phytype = WMPHY_82578;
11244 break; 11280 break;
11245 case PCI_PRODUCT_INTEL_PCH2_LV_LM: 11281 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
11246 case PCI_PRODUCT_INTEL_PCH2_LV_V: 11282 case PCI_PRODUCT_INTEL_PCH2_LV_V:
11247 /* 82579 */ 11283 /* 82579 */
11248 new_phytype = WMPHY_82579; 11284 new_phytype = WMPHY_82579;
11249 break; 11285 break;
11250 case PCI_PRODUCT_INTEL_82801H_82567V_3: 11286 case PCI_PRODUCT_INTEL_82801H_82567V_3:
11251 case PCI_PRODUCT_INTEL_82801I_BM: 11287 case PCI_PRODUCT_INTEL_82801I_BM:
11252 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */ 11288 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
11253 case PCI_PRODUCT_INTEL_82801J_R_BM_LM: 11289 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
11254 case PCI_PRODUCT_INTEL_82801J_R_BM_LF: 11290 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
11255 case PCI_PRODUCT_INTEL_82801J_D_BM_LM: 11291 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
11256 case PCI_PRODUCT_INTEL_82801J_D_BM_LF: 11292 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
11257 case PCI_PRODUCT_INTEL_82801J_R_BM_V: 11293 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
11258 /* ICH8, 9, 10 with 82567 */ 11294 /* ICH8, 9, 10 with 82567 */
11259 new_phytype = WMPHY_BM; 11295 new_phytype = WMPHY_BM;
11260 break; 11296 break;
11261 default: 11297 default:
11262 break; 11298 break;
11263 } 11299 }
11264 } else { 11300 } else {
11265 /* It's not the first call. Use PHY OUI and model */ 11301 /* It's not the first call. Use PHY OUI and model */
11266 switch (phy_oui) { 11302 switch (phy_oui) {
11267 case MII_OUI_ATTANSIC: /* atphy(4) */ 11303 case MII_OUI_ATTANSIC: /* atphy(4) */
11268 switch (phy_model) { 11304 switch (phy_model) {
11269 case MII_MODEL_ATTANSIC_AR8021: 11305 case MII_MODEL_ATTANSIC_AR8021:
11270 new_phytype = WMPHY_82578; 11306 new_phytype = WMPHY_82578;
11271 break; 11307 break;
11272 default: 11308 default:
11273 break; 11309 break;
11274 } 11310 }
11275 break; 11311 break;
11276 case MII_OUI_xxMARVELL: 11312 case MII_OUI_xxMARVELL:
11277 switch (phy_model) { 11313 switch (phy_model) {
11278 case MII_MODEL_xxMARVELL_I210: 11314 case MII_MODEL_xxMARVELL_I210:
11279 new_phytype = WMPHY_I210; 11315 new_phytype = WMPHY_I210;
11280 break; 11316 break;
11281 case MII_MODEL_xxMARVELL_E1011: 11317 case MII_MODEL_xxMARVELL_E1011:
11282 case MII_MODEL_xxMARVELL_E1000_3: 11318 case MII_MODEL_xxMARVELL_E1000_3:
11283 case MII_MODEL_xxMARVELL_E1000_5: 11319 case MII_MODEL_xxMARVELL_E1000_5:
11284 case MII_MODEL_xxMARVELL_E1112: 11320 case MII_MODEL_xxMARVELL_E1112:
11285 new_phytype = WMPHY_M88; 11321 new_phytype = WMPHY_M88;
11286 break; 11322 break;
11287 case MII_MODEL_xxMARVELL_E1149: 11323 case MII_MODEL_xxMARVELL_E1149:
11288 new_phytype = WMPHY_BM; 11324 new_phytype = WMPHY_BM;
11289 break; 11325 break;
11290 case MII_MODEL_xxMARVELL_E1111: 11326 case MII_MODEL_xxMARVELL_E1111:
11291 case MII_MODEL_xxMARVELL_I347: 11327 case MII_MODEL_xxMARVELL_I347:
11292 case MII_MODEL_xxMARVELL_E1512: 11328 case MII_MODEL_xxMARVELL_E1512:
11293 case MII_MODEL_xxMARVELL_E1340M: 11329 case MII_MODEL_xxMARVELL_E1340M:
11294 case MII_MODEL_xxMARVELL_E1543: 11330 case MII_MODEL_xxMARVELL_E1543:
11295 new_phytype = WMPHY_M88; 11331 new_phytype = WMPHY_M88;
11296 break; 11332 break;
11297 case MII_MODEL_xxMARVELL_I82563: 11333 case MII_MODEL_xxMARVELL_I82563:
11298 new_phytype = WMPHY_GG82563; 11334 new_phytype = WMPHY_GG82563;
11299 break; 11335 break;
11300 default: 11336 default:
11301 break; 11337 break;
11302 } 11338 }
11303 break; 11339 break;
11304 case MII_OUI_INTEL: 11340 case MII_OUI_INTEL:
11305 switch (phy_model) { 11341 switch (phy_model) {
11306 case MII_MODEL_INTEL_I82577: 11342 case MII_MODEL_INTEL_I82577:
11307 new_phytype = WMPHY_82577; 11343 new_phytype = WMPHY_82577;
11308 break; 11344 break;
11309 case MII_MODEL_INTEL_I82579: 11345 case MII_MODEL_INTEL_I82579:
11310 new_phytype = WMPHY_82579; 11346 new_phytype = WMPHY_82579;
11311 break; 11347 break;
11312 case MII_MODEL_INTEL_I217: 11348 case MII_MODEL_INTEL_I217:
11313 new_phytype = WMPHY_I217; 11349 new_phytype = WMPHY_I217;
11314 break; 11350 break;
11315 case MII_MODEL_INTEL_I82580: 11351 case MII_MODEL_INTEL_I82580:
11316 new_phytype = WMPHY_82580; 11352 new_phytype = WMPHY_82580;
11317 break; 11353 break;
11318 case MII_MODEL_INTEL_I350: 11354 case MII_MODEL_INTEL_I350:
11319 new_phytype = WMPHY_I350; 11355 new_phytype = WMPHY_I350;
11320 break; 11356 break;
11321 default: 11357 default:
11322 break; 11358 break;
11323 } 11359 }
11324 break; 11360 break;
11325 case MII_OUI_yyINTEL: 11361 case MII_OUI_yyINTEL:
11326 switch (phy_model) { 11362 switch (phy_model) {
11327 case MII_MODEL_yyINTEL_I82562G: 11363 case MII_MODEL_yyINTEL_I82562G:
11328 case MII_MODEL_yyINTEL_I82562EM: 11364 case MII_MODEL_yyINTEL_I82562EM:
11329 case MII_MODEL_yyINTEL_I82562ET: 11365 case MII_MODEL_yyINTEL_I82562ET:
11330 new_phytype = WMPHY_IFE; 11366 new_phytype = WMPHY_IFE;
11331 break; 11367 break;
11332 case MII_MODEL_yyINTEL_IGP01E1000: 11368 case MII_MODEL_yyINTEL_IGP01E1000:
11333 new_phytype = WMPHY_IGP; 11369 new_phytype = WMPHY_IGP;
11334 break; 11370 break;
11335 case MII_MODEL_yyINTEL_I82566: 11371 case MII_MODEL_yyINTEL_I82566:
11336 new_phytype = WMPHY_IGP_3; 11372 new_phytype = WMPHY_IGP_3;
11337 break; 11373 break;
11338 default: 11374 default:
11339 break; 11375 break;
11340 } 11376 }
11341 break; 11377 break;
11342 default: 11378 default:
11343 break; 11379 break;
11344 } 11380 }
11345 11381
11346 if (dodiag) { 11382 if (dodiag) {
11347 if (new_phytype == WMPHY_UNKNOWN) 11383 if (new_phytype == WMPHY_UNKNOWN)
11348 aprint_verbose_dev(dev, 11384 aprint_verbose_dev(dev,
11349 "%s: Unknown PHY model. OUI=%06x, " 11385 "%s: Unknown PHY model. OUI=%06x, "
11350 "model=%04x\n", __func__, phy_oui, 11386 "model=%04x\n", __func__, phy_oui,
11351 phy_model); 11387 phy_model);
11352 11388
11353 if ((sc->sc_phytype != WMPHY_UNKNOWN) 11389 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11354 && (sc->sc_phytype != new_phytype)) { 11390 && (sc->sc_phytype != new_phytype)) {
11355 aprint_error_dev(dev, "Previously assumed PHY " 11391 aprint_error_dev(dev, "Previously assumed PHY "
11356 "type(%u) was incorrect. PHY type from PHY" 11392 "type(%u) was incorrect. PHY type from PHY"
11357 "ID = %u\n", sc->sc_phytype, new_phytype); 11393 "ID = %u\n", sc->sc_phytype, new_phytype);
11358 } 11394 }
11359 } 11395 }
11360 } 11396 }
11361 11397
11362 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */ 11398 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11363 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) { 11399 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11364 /* SGMII */ 11400 /* SGMII */
11365 new_readreg = wm_sgmii_readreg; 11401 new_readreg = wm_sgmii_readreg;
11366 new_writereg = wm_sgmii_writereg; 11402 new_writereg = wm_sgmii_writereg;
11367 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ 11403 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11368 /* BM2 (phyaddr == 1) */ 11404 /* BM2 (phyaddr == 1) */
11369 if ((sc->sc_phytype != WMPHY_UNKNOWN) 11405 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11370 && (new_phytype != WMPHY_BM) 11406 && (new_phytype != WMPHY_BM)
11371 && (new_phytype != WMPHY_UNKNOWN)) 11407 && (new_phytype != WMPHY_UNKNOWN))
11372 doubt_phytype = new_phytype; 11408 doubt_phytype = new_phytype;
11373 new_phytype = WMPHY_BM; 11409 new_phytype = WMPHY_BM;
11374 new_readreg = wm_gmii_bm_readreg; 11410 new_readreg = wm_gmii_bm_readreg;
11375 new_writereg = wm_gmii_bm_writereg; 11411 new_writereg = wm_gmii_bm_writereg;
11376 } else if (sc->sc_type >= WM_T_PCH) { 11412 } else if (sc->sc_type >= WM_T_PCH) {
11377 /* All PCH* use _hv_ */ 11413 /* All PCH* use _hv_ */
11378 new_readreg = wm_gmii_hv_readreg; 11414 new_readreg = wm_gmii_hv_readreg;
11379 new_writereg = wm_gmii_hv_writereg; 11415 new_writereg = wm_gmii_hv_writereg;
11380 } else if (sc->sc_type >= WM_T_ICH8) { 11416 } else if (sc->sc_type >= WM_T_ICH8) {
11381 /* non-82567 ICH8, 9 and 10 */ 11417 /* non-82567 ICH8, 9 and 10 */
11382 new_readreg = wm_gmii_i82544_readreg; 11418 new_readreg = wm_gmii_i82544_readreg;
11383 new_writereg = wm_gmii_i82544_writereg; 11419 new_writereg = wm_gmii_i82544_writereg;
11384 } else if (sc->sc_type >= WM_T_80003) { 11420 } else if (sc->sc_type >= WM_T_80003) {
11385 /* 80003 */ 11421 /* 80003 */
11386 if ((sc->sc_phytype != WMPHY_UNKNOWN) 11422 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11387 && (new_phytype != WMPHY_GG82563) 11423 && (new_phytype != WMPHY_GG82563)
11388 && (new_phytype != WMPHY_UNKNOWN)) 11424 && (new_phytype != WMPHY_UNKNOWN))
11389 doubt_phytype = new_phytype; 11425 doubt_phytype = new_phytype;
11390 new_phytype = WMPHY_GG82563; 11426 new_phytype = WMPHY_GG82563;
11391 new_readreg = wm_gmii_i80003_readreg; 11427 new_readreg = wm_gmii_i80003_readreg;
11392 new_writereg = wm_gmii_i80003_writereg; 11428 new_writereg = wm_gmii_i80003_writereg;
11393 } else if (sc->sc_type >= WM_T_I210) { 11429 } else if (sc->sc_type >= WM_T_I210) {
11394 /* I210 and I211 */ 11430 /* I210 and I211 */
11395 if ((sc->sc_phytype != WMPHY_UNKNOWN) 11431 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11396 && (new_phytype != WMPHY_I210) 11432 && (new_phytype != WMPHY_I210)
11397 && (new_phytype != WMPHY_UNKNOWN)) 11433 && (new_phytype != WMPHY_UNKNOWN))
11398 doubt_phytype = new_phytype; 11434 doubt_phytype = new_phytype;
11399 new_phytype = WMPHY_I210; 11435 new_phytype = WMPHY_I210;
11400 new_readreg = wm_gmii_gs40g_readreg; 11436 new_readreg = wm_gmii_gs40g_readreg;
11401 new_writereg = wm_gmii_gs40g_writereg; 11437 new_writereg = wm_gmii_gs40g_writereg;
11402 } else if (sc->sc_type >= WM_T_82580) { 11438 } else if (sc->sc_type >= WM_T_82580) {
11403 /* 82580, I350 and I354 */ 11439 /* 82580, I350 and I354 */
11404 new_readreg = wm_gmii_82580_readreg; 11440 new_readreg = wm_gmii_82580_readreg;
11405 new_writereg = wm_gmii_82580_writereg; 11441 new_writereg = wm_gmii_82580_writereg;
11406 } else if (sc->sc_type >= WM_T_82544) { 11442 } else if (sc->sc_type >= WM_T_82544) {
11407 /* 82544, 0, [56], [17], 8257[1234] and 82583 */ 11443 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
11408 new_readreg = wm_gmii_i82544_readreg; 11444 new_readreg = wm_gmii_i82544_readreg;
11409 new_writereg = wm_gmii_i82544_writereg; 11445 new_writereg = wm_gmii_i82544_writereg;
11410 } else { 11446 } else {
11411 new_readreg = wm_gmii_i82543_readreg; 11447 new_readreg = wm_gmii_i82543_readreg;
11412 new_writereg = wm_gmii_i82543_writereg; 11448 new_writereg = wm_gmii_i82543_writereg;
11413 } 11449 }
11414 11450
11415 if (new_phytype == WMPHY_BM) { 11451 if (new_phytype == WMPHY_BM) {
11416 /* All BM use _bm_ */ 11452 /* All BM use _bm_ */
11417 new_readreg = wm_gmii_bm_readreg; 11453 new_readreg = wm_gmii_bm_readreg;
11418 new_writereg = wm_gmii_bm_writereg; 11454 new_writereg = wm_gmii_bm_writereg;
11419 } 11455 }
11420 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_TGP)) { 11456 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_TGP)) {
11421 /* All PCH* use _hv_ */ 11457 /* All PCH* use _hv_ */
11422 new_readreg = wm_gmii_hv_readreg; 11458 new_readreg = wm_gmii_hv_readreg;
11423 new_writereg = wm_gmii_hv_writereg; 11459 new_writereg = wm_gmii_hv_writereg;
11424 } 11460 }
11425 11461
11426 /* Diag output */ 11462 /* Diag output */
11427 if (dodiag) { 11463 if (dodiag) {
11428 if (doubt_phytype != WMPHY_UNKNOWN) 11464 if (doubt_phytype != WMPHY_UNKNOWN)
11429 aprint_error_dev(dev, "Assumed new PHY type was " 11465 aprint_error_dev(dev, "Assumed new PHY type was "
11430 "incorrect. old = %u, new = %u\n", sc->sc_phytype, 11466 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11431 new_phytype); 11467 new_phytype);
11432 else if ((sc->sc_phytype != WMPHY_UNKNOWN) 11468 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11433 && (sc->sc_phytype != new_phytype)) 11469 && (sc->sc_phytype != new_phytype))
11434 aprint_error_dev(dev, "Previously assumed PHY type(%u)" 11470 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11435 "was incorrect. New PHY type = %u\n", 11471 "was incorrect. New PHY type = %u\n",
11436 sc->sc_phytype, new_phytype); 11472 sc->sc_phytype, new_phytype);
11437 11473
11438 if ((mii->mii_readreg != NULL) && 11474 if ((mii->mii_readreg != NULL) &&
11439 (new_phytype == WMPHY_UNKNOWN)) 11475 (new_phytype == WMPHY_UNKNOWN))
11440 aprint_error_dev(dev, "PHY type is still unknown.\n"); 11476 aprint_error_dev(dev, "PHY type is still unknown.\n");
11441 11477
11442 if ((mii->mii_readreg != NULL) && 11478 if ((mii->mii_readreg != NULL) &&
11443 (mii->mii_readreg != new_readreg)) 11479 (mii->mii_readreg != new_readreg))
11444 aprint_error_dev(dev, "Previously assumed PHY " 11480 aprint_error_dev(dev, "Previously assumed PHY "
11445 "read/write function was incorrect.\n"); 11481 "read/write function was incorrect.\n");
11446 } 11482 }
11447 11483
11448 /* Update now */ 11484 /* Update now */
11449 sc->sc_phytype = new_phytype; 11485 sc->sc_phytype = new_phytype;
11450 mii->mii_readreg = new_readreg; 11486 mii->mii_readreg = new_readreg;
11451 mii->mii_writereg = new_writereg; 11487 mii->mii_writereg = new_writereg;
11452 if (new_readreg == wm_gmii_hv_readreg) { 11488 if (new_readreg == wm_gmii_hv_readreg) {
11453 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked; 11489 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11454 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked; 11490 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11455 } else if (new_readreg == wm_gmii_i82544_readreg) { 11491 } else if (new_readreg == wm_gmii_i82544_readreg) {
11456 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked; 11492 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11457 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked; 11493 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11458 } 11494 }
11459} 11495}
11460 11496
11461/* 11497/*
11462 * wm_get_phy_id_82575: 11498 * wm_get_phy_id_82575:
11463 * 11499 *
11464 * Return PHY ID. Return -1 if it failed. 11500 * Return PHY ID. Return -1 if it failed.
11465 */ 11501 */
11466static int 11502static int
11467wm_get_phy_id_82575(struct wm_softc *sc) 11503wm_get_phy_id_82575(struct wm_softc *sc)
11468{ 11504{
11469 uint32_t reg; 11505 uint32_t reg;
11470 int phyid = -1; 11506 int phyid = -1;
11471 11507
11472 /* XXX */ 11508 /* XXX */
11473 if ((sc->sc_flags & WM_F_SGMII) == 0) 11509 if ((sc->sc_flags & WM_F_SGMII) == 0)
11474 return -1; 11510 return -1;
11475 11511
11476 if (wm_sgmii_uses_mdio(sc)) { 11512 if (wm_sgmii_uses_mdio(sc)) {
11477 switch (sc->sc_type) { 11513 switch (sc->sc_type) {
11478 case WM_T_82575: 11514 case WM_T_82575:
11479 case WM_T_82576: 11515 case WM_T_82576:
11480 reg = CSR_READ(sc, WMREG_MDIC); 11516 reg = CSR_READ(sc, WMREG_MDIC);
11481 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT; 11517 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11482 break; 11518 break;
11483 case WM_T_82580: 11519 case WM_T_82580:
11484 case WM_T_I350: 11520 case WM_T_I350:
11485 case WM_T_I354: 11521 case WM_T_I354:
11486 case WM_T_I210: 11522 case WM_T_I210:
11487 case WM_T_I211: 11523 case WM_T_I211:
11488 reg = CSR_READ(sc, WMREG_MDICNFG); 11524 reg = CSR_READ(sc, WMREG_MDICNFG);
11489 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT; 11525 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11490 break; 11526 break;
11491 default: 11527 default:
11492 return -1; 11528 return -1;
11493 } 11529 }
11494 } 11530 }
11495 11531
11496 return phyid; 11532 return phyid;
11497} 11533}
11498 11534
11499/* 11535/*
11500 * wm_gmii_mediainit: 11536 * wm_gmii_mediainit:
11501 * 11537 *
11502 * Initialize media for use on 1000BASE-T devices. 11538 * Initialize media for use on 1000BASE-T devices.
11503 */ 11539 */
11504static void 11540static void
11505wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) 11541wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11506{ 11542{
11507 device_t dev = sc->sc_dev; 11543 device_t dev = sc->sc_dev;
11508 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 11544 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11509 struct mii_data *mii = &sc->sc_mii; 11545 struct mii_data *mii = &sc->sc_mii;
11510 11546
11511 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n", 11547 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11512 device_xname(sc->sc_dev), __func__)); 11548 device_xname(sc->sc_dev), __func__));
11513 11549
11514 /* We have GMII. */ 11550 /* We have GMII. */
11515 sc->sc_flags |= WM_F_HAS_MII; 11551 sc->sc_flags |= WM_F_HAS_MII;
11516 11552
11517 if (sc->sc_type == WM_T_80003) 11553 if (sc->sc_type == WM_T_80003)
11518 sc->sc_tipg = TIPG_1000T_80003_DFLT; 11554 sc->sc_tipg = TIPG_1000T_80003_DFLT;
11519 else 11555 else
11520 sc->sc_tipg = TIPG_1000T_DFLT; 11556 sc->sc_tipg = TIPG_1000T_DFLT;
11521 11557
11522 /* 11558 /*
11523 * Let the chip set speed/duplex on its own based on 11559 * Let the chip set speed/duplex on its own based on
11524 * signals from the PHY. 11560 * signals from the PHY.
11525 * XXXbouyer - I'm not sure this is right for the 80003, 11561 * XXXbouyer - I'm not sure this is right for the 80003,
11526 * the em driver only sets CTRL_SLU here - but it seems to work. 11562 * the em driver only sets CTRL_SLU here - but it seems to work.
11527 */ 11563 */
11528 sc->sc_ctrl |= CTRL_SLU; 11564 sc->sc_ctrl |= CTRL_SLU;
11529 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 11565 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11530 11566
11531 /* Initialize our media structures and probe the GMII. */ 11567 /* Initialize our media structures and probe the GMII. */
11532 mii->mii_ifp = ifp; 11568 mii->mii_ifp = ifp;
11533 11569
11534 mii->mii_statchg = wm_gmii_statchg; 11570 mii->mii_statchg = wm_gmii_statchg;
11535 11571
11536 /* get PHY control from SMBus to PCIe */ 11572 /* get PHY control from SMBus to PCIe */
11537 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 11573 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11538 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) 11574 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11539 || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) 11575 || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP))
11540 wm_init_phy_workarounds_pchlan(sc); 11576 wm_init_phy_workarounds_pchlan(sc);
11541 11577
11542 wm_gmii_reset(sc); 11578 wm_gmii_reset(sc);
11543 11579
11544 sc->sc_ethercom.ec_mii = &sc->sc_mii; 11580 sc->sc_ethercom.ec_mii = &sc->sc_mii;
11545 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, 11581 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11546 wm_gmii_mediastatus); 11582 wm_gmii_mediastatus);
11547 11583
11548 /* Setup internal SGMII PHY for SFP */ 11584 /* Setup internal SGMII PHY for SFP */
11549 wm_sgmii_sfp_preconfig(sc); 11585 wm_sgmii_sfp_preconfig(sc);
11550 11586
11551 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 11587 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11552 || (sc->sc_type == WM_T_82580) 11588 || (sc->sc_type == WM_T_82580)
11553 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 11589 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11554 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) { 11590 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11555 if ((sc->sc_flags & WM_F_SGMII) == 0) { 11591 if ((sc->sc_flags & WM_F_SGMII) == 0) {
11556 /* Attach only one port */ 11592 /* Attach only one port */
11557 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, 11593 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11558 MII_OFFSET_ANY, MIIF_DOPAUSE); 11594 MII_OFFSET_ANY, MIIF_DOPAUSE);
11559 } else { 11595 } else {
11560 int i, id; 11596 int i, id;
11561 uint32_t ctrl_ext; 11597 uint32_t ctrl_ext;
11562 11598
11563 id = wm_get_phy_id_82575(sc); 11599 id = wm_get_phy_id_82575(sc);
11564 if (id != -1) { 11600 if (id != -1) {
11565 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 11601 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11566 id, MII_OFFSET_ANY, MIIF_DOPAUSE); 11602 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11567 } 11603 }
11568 if ((id == -1) 11604 if ((id == -1)
11569 || (LIST_FIRST(&mii->mii_phys) == NULL)) { 11605 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11570 /* Power on sgmii phy if it is disabled */ 11606 /* Power on sgmii phy if it is disabled */
11571 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 11607 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11572 CSR_WRITE(sc, WMREG_CTRL_EXT, 11608 CSR_WRITE(sc, WMREG_CTRL_EXT,
11573 ctrl_ext &~ CTRL_EXT_SWDPIN(3)); 11609 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11574 CSR_WRITE_FLUSH(sc); 11610 CSR_WRITE_FLUSH(sc);
11575 delay(300*1000); /* XXX too long */ 11611 delay(300*1000); /* XXX too long */
11576 11612
11577 /* 11613 /*
11578 * From 1 to 8. 11614 * From 1 to 8.
11579 * 11615 *
11580 * I2C access fails with I2C register's ERROR 11616 * I2C access fails with I2C register's ERROR
11581 * bit set, so prevent error message while 11617 * bit set, so prevent error message while
11582 * scanning. 11618 * scanning.
11583 */ 11619 */
11584 sc->phy.no_errprint = true; 11620 sc->phy.no_errprint = true;
11585 for (i = 1; i < 8; i++) 11621 for (i = 1; i < 8; i++)
11586 mii_attach(sc->sc_dev, &sc->sc_mii, 11622 mii_attach(sc->sc_dev, &sc->sc_mii,
11587 0xffffffff, i, MII_OFFSET_ANY, 11623 0xffffffff, i, MII_OFFSET_ANY,
11588 MIIF_DOPAUSE); 11624 MIIF_DOPAUSE);
11589 sc->phy.no_errprint = false; 11625 sc->phy.no_errprint = false;
11590 11626
11591 /* Restore previous sfp cage power state */ 11627 /* Restore previous sfp cage power state */
11592 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 11628 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11593 } 11629 }
11594 } 11630 }
11595 } else 11631 } else
11596 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 11632 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11597 MII_OFFSET_ANY, MIIF_DOPAUSE); 11633 MII_OFFSET_ANY, MIIF_DOPAUSE);
11598 11634
11599 /* 11635 /*
11600 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call 11636 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11601 * wm_set_mdio_slow_mode_hv() for a workaround and retry. 11637 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11602 */ 11638 */
11603 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) || 11639 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
11604 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP) 11640 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
11605 || (sc->sc_type == WM_T_PCH_TGP)) 11641 || (sc->sc_type == WM_T_PCH_TGP))
11606 && (LIST_FIRST(&mii->mii_phys) == NULL)) { 11642 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11607 wm_set_mdio_slow_mode_hv(sc); 11643 wm_set_mdio_slow_mode_hv(sc);
11608 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 11644 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11609 MII_OFFSET_ANY, MIIF_DOPAUSE); 11645 MII_OFFSET_ANY, MIIF_DOPAUSE);
11610 } 11646 }
11611 11647
11612 /* 11648 /*
11613 * (For ICH8 variants) 11649 * (For ICH8 variants)
11614 * If PHY detection failed, use BM's r/w function and retry. 11650 * If PHY detection failed, use BM's r/w function and retry.
11615 */ 11651 */
11616 if (LIST_FIRST(&mii->mii_phys) == NULL) { 11652 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11617 /* if failed, retry with *_bm_* */ 11653 /* if failed, retry with *_bm_* */
11618 aprint_verbose_dev(dev, "Assumed PHY access function " 11654 aprint_verbose_dev(dev, "Assumed PHY access function "
11619 "(type = %d) might be incorrect. Use BM and retry.\n", 11655 "(type = %d) might be incorrect. Use BM and retry.\n",
11620 sc->sc_phytype); 11656 sc->sc_phytype);
11621 sc->sc_phytype = WMPHY_BM; 11657 sc->sc_phytype = WMPHY_BM;
11622 mii->mii_readreg = wm_gmii_bm_readreg; 11658 mii->mii_readreg = wm_gmii_bm_readreg;
11623 mii->mii_writereg = wm_gmii_bm_writereg; 11659 mii->mii_writereg = wm_gmii_bm_writereg;
11624 11660
11625 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 11661 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11626 MII_OFFSET_ANY, MIIF_DOPAUSE); 11662 MII_OFFSET_ANY, MIIF_DOPAUSE);
11627 } 11663 }
11628 11664
11629 if (LIST_FIRST(&mii->mii_phys) == NULL) { 11665 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11630 /* Any PHY wasn't found */ 11666 /* Any PHY wasn't found */
11631 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 11667 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11632 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 11668 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11633 sc->sc_phytype = WMPHY_NONE; 11669 sc->sc_phytype = WMPHY_NONE;
11634 } else { 11670 } else {
11635 struct mii_softc *child = LIST_FIRST(&mii->mii_phys); 11671 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11636 11672
11637 /* 11673 /*
11638 * PHY found! Check PHY type again by the second call of 11674 * PHY found! Check PHY type again by the second call of
11639 * wm_gmii_setup_phytype. 11675 * wm_gmii_setup_phytype.
11640 */ 11676 */
11641 wm_gmii_setup_phytype(sc, child->mii_mpd_oui, 11677 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11642 child->mii_mpd_model); 11678 child->mii_mpd_model);
11643 11679
11644 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 11680 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11645 } 11681 }
11646} 11682}
11647 11683
11648/* 11684/*
11649 * wm_gmii_mediachange: [ifmedia interface function] 11685 * wm_gmii_mediachange: [ifmedia interface function]
11650 * 11686 *
11651 * Set hardware to newly-selected media on a 1000BASE-T device. 11687 * Set hardware to newly-selected media on a 1000BASE-T device.
11652 */ 11688 */
11653static int 11689static int
11654wm_gmii_mediachange(struct ifnet *ifp) 11690wm_gmii_mediachange(struct ifnet *ifp)
11655{ 11691{
11656 struct wm_softc *sc = ifp->if_softc; 11692 struct wm_softc *sc = ifp->if_softc;
11657 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 11693 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11658 uint32_t reg; 11694 uint32_t reg;
11659 int rc; 11695 int rc;
11660 11696
11661 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n", 11697 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11662 device_xname(sc->sc_dev), __func__)); 11698 device_xname(sc->sc_dev), __func__));
11663 if ((ifp->if_flags & IFF_UP) == 0) 11699 if ((ifp->if_flags & IFF_UP) == 0)
11664 return 0; 11700 return 0;
11665 11701
11666 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */ 11702 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11667 if ((sc->sc_type == WM_T_82580) 11703 if ((sc->sc_type == WM_T_82580)
11668 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) 11704 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11669 || (sc->sc_type == WM_T_I211)) { 11705 || (sc->sc_type == WM_T_I211)) {
11670 reg = CSR_READ(sc, WMREG_PHPM); 11706 reg = CSR_READ(sc, WMREG_PHPM);
11671 reg &= ~PHPM_GO_LINK_D; 11707 reg &= ~PHPM_GO_LINK_D;
11672 CSR_WRITE(sc, WMREG_PHPM, reg); 11708 CSR_WRITE(sc, WMREG_PHPM, reg);
11673 } 11709 }
11674 11710
11675 /* Disable D0 LPLU. */ 11711 /* Disable D0 LPLU. */
11676 wm_lplu_d0_disable(sc); 11712 wm_lplu_d0_disable(sc);
11677 11713
11678 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 11714 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11679 sc->sc_ctrl |= CTRL_SLU; 11715 sc->sc_ctrl |= CTRL_SLU;
11680 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 11716 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11681 || (sc->sc_type > WM_T_82543)) { 11717 || (sc->sc_type > WM_T_82543)) {
11682 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); 11718 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11683 } else { 11719 } else {
11684 sc->sc_ctrl &= ~CTRL_ASDE; 11720 sc->sc_ctrl &= ~CTRL_ASDE;
11685 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 11721 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11686 if (ife->ifm_media & IFM_FDX) 11722 if (ife->ifm_media & IFM_FDX)
11687 sc->sc_ctrl |= CTRL_FD; 11723 sc->sc_ctrl |= CTRL_FD;
11688 switch (IFM_SUBTYPE(ife->ifm_media)) { 11724 switch (IFM_SUBTYPE(ife->ifm_media)) {
11689 case IFM_10_T: 11725 case IFM_10_T:
11690 sc->sc_ctrl |= CTRL_SPEED_10; 11726 sc->sc_ctrl |= CTRL_SPEED_10;
11691 break; 11727 break;
11692 case IFM_100_TX: 11728 case IFM_100_TX:
11693 sc->sc_ctrl |= CTRL_SPEED_100; 11729 sc->sc_ctrl |= CTRL_SPEED_100;
11694 break; 11730 break;
11695 case IFM_1000_T: 11731 case IFM_1000_T:
11696 sc->sc_ctrl |= CTRL_SPEED_1000; 11732 sc->sc_ctrl |= CTRL_SPEED_1000;
11697 break; 11733 break;
11698 case IFM_NONE: 11734 case IFM_NONE:
11699 /* There is no specific setting for IFM_NONE */ 11735 /* There is no specific setting for IFM_NONE */
11700 break; 11736 break;
11701 default: 11737 default:
11702 panic("wm_gmii_mediachange: bad media 0x%x", 11738 panic("wm_gmii_mediachange: bad media 0x%x",
11703 ife->ifm_media); 11739 ife->ifm_media);
11704 } 11740 }
11705 } 11741 }
11706 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 11742 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11707 CSR_WRITE_FLUSH(sc); 11743 CSR_WRITE_FLUSH(sc);
11708 11744
11709 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) 11745 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11710 wm_serdes_mediachange(ifp); 11746 wm_serdes_mediachange(ifp);
11711 11747
11712 if (sc->sc_type <= WM_T_82543) 11748 if (sc->sc_type <= WM_T_82543)
11713 wm_gmii_reset(sc); 11749 wm_gmii_reset(sc);
11714 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211) 11750 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11715 && ((sc->sc_flags & WM_F_SGMII) != 0)) { 11751 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11716 /* allow time for SFP cage time to power up phy */ 11752 /* allow time for SFP cage time to power up phy */
11717 delay(300 * 1000); 11753 delay(300 * 1000);
11718 wm_gmii_reset(sc); 11754 wm_gmii_reset(sc);
11719 } 11755 }
11720 11756
11721 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 11757 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11722 return 0; 11758 return 0;
11723 return rc; 11759 return rc;
11724} 11760}
11725 11761
11726/* 11762/*
11727 * wm_gmii_mediastatus: [ifmedia interface function] 11763 * wm_gmii_mediastatus: [ifmedia interface function]
11728 * 11764 *
11729 * Get the current interface media status on a 1000BASE-T device. 11765 * Get the current interface media status on a 1000BASE-T device.
11730 */ 11766 */
11731static void 11767static void
11732wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 11768wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11733{ 11769{
11734 struct wm_softc *sc = ifp->if_softc; 11770 struct wm_softc *sc = ifp->if_softc;
11735 struct ethercom *ec = &sc->sc_ethercom; 11771 struct ethercom *ec = &sc->sc_ethercom;
11736 struct mii_data *mii; 11772 struct mii_data *mii;
11737 bool dopoll = true; 11773 bool dopoll = true;
11738 11774
11739 KASSERT(ec->ec_mii != NULL); 11775 KASSERT(ec->ec_mii != NULL);
11740 mii = ec->ec_mii; 11776 mii = ec->ec_mii;
11741 if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) { 11777 if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
11742 struct timeval now; 11778 struct timeval now;
11743 11779
11744 getmicrotime(&now); 11780 getmicrotime(&now);
11745 if (timercmp(&now, &sc->sc_linkup_delay_time, <)) 11781 if (timercmp(&now, &sc->sc_linkup_delay_time, <))
11746 dopoll = false; 11782 dopoll = false;
11747 else if (sc->sc_linkup_delay_time.tv_sec != 0) { 11783 else if (sc->sc_linkup_delay_time.tv_sec != 0) {
11748 /* Simplify by checking tv_sec only. It's enough. */ 11784 /* Simplify by checking tv_sec only. It's enough. */
11749 11785
11750 sc->sc_linkup_delay_time.tv_sec = 0; 11786 sc->sc_linkup_delay_time.tv_sec = 0;
11751 sc->sc_linkup_delay_time.tv_usec = 0; 11787 sc->sc_linkup_delay_time.tv_usec = 0;
11752 } 11788 }
11753 } 11789 }
11754 11790
11755 /* 11791 /*
11756 * Don't call mii_pollstat() while doing workaround. 11792 * Don't call mii_pollstat() while doing workaround.
11757 * See also wm_linkintr_gmii() and wm_tick(). 11793 * See also wm_linkintr_gmii() and wm_tick().
11758 */ 11794 */
11759 if (dopoll) 11795 if (dopoll)
11760 mii_pollstat(mii); 11796 mii_pollstat(mii);
11761 ifmr->ifm_active = mii->mii_media_active; 11797 ifmr->ifm_active = mii->mii_media_active;
11762 ifmr->ifm_status = mii->mii_media_status; 11798 ifmr->ifm_status = mii->mii_media_status;
11763 11799
11764 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 11800 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11765 | sc->sc_flowflags; 11801 | sc->sc_flowflags;
11766} 11802}
11767 11803
11768#define MDI_IO CTRL_SWDPIN(2) 11804#define MDI_IO CTRL_SWDPIN(2)
11769#define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 11805#define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
11770#define MDI_CLK CTRL_SWDPIN(3) 11806#define MDI_CLK CTRL_SWDPIN(3)
11771 11807
11772static void 11808static void
11773wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 11809wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11774{ 11810{
11775 uint32_t i, v; 11811 uint32_t i, v;
11776 11812
11777 v = CSR_READ(sc, WMREG_CTRL); 11813 v = CSR_READ(sc, WMREG_CTRL);
11778 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 11814 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11779 v |= MDI_DIR | CTRL_SWDPIO(3); 11815 v |= MDI_DIR | CTRL_SWDPIO(3);
11780 11816
11781 for (i = __BIT(nbits - 1); i != 0; i >>= 1) { 11817 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11782 if (data & i) 11818 if (data & i)
11783 v |= MDI_IO; 11819 v |= MDI_IO;
11784 else 11820 else
11785 v &= ~MDI_IO; 11821 v &= ~MDI_IO;
11786 CSR_WRITE(sc, WMREG_CTRL, v); 11822 CSR_WRITE(sc, WMREG_CTRL, v);
11787 CSR_WRITE_FLUSH(sc); 11823 CSR_WRITE_FLUSH(sc);
11788 delay(10); 11824 delay(10);
11789 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11825 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11790 CSR_WRITE_FLUSH(sc); 11826 CSR_WRITE_FLUSH(sc);
11791 delay(10); 11827 delay(10);
11792 CSR_WRITE(sc, WMREG_CTRL, v); 11828 CSR_WRITE(sc, WMREG_CTRL, v);
11793 CSR_WRITE_FLUSH(sc); 11829 CSR_WRITE_FLUSH(sc);
11794 delay(10); 11830 delay(10);
11795 } 11831 }
11796} 11832}
11797 11833
11798static uint32_t 11834static uint32_t
11799wm_i82543_mii_recvbits(struct wm_softc *sc) 11835wm_i82543_mii_recvbits(struct wm_softc *sc)
11800{ 11836{
11801 uint32_t v, i, data = 0; 11837 uint32_t v, i, data = 0;
11802 11838
11803 v = CSR_READ(sc, WMREG_CTRL); 11839 v = CSR_READ(sc, WMREG_CTRL);
11804 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 11840 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11805 v |= CTRL_SWDPIO(3); 11841 v |= CTRL_SWDPIO(3);
11806 11842
11807 CSR_WRITE(sc, WMREG_CTRL, v); 11843 CSR_WRITE(sc, WMREG_CTRL, v);
11808 CSR_WRITE_FLUSH(sc); 11844 CSR_WRITE_FLUSH(sc);
11809 delay(10); 11845 delay(10);
11810 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11846 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11811 CSR_WRITE_FLUSH(sc); 11847 CSR_WRITE_FLUSH(sc);
11812 delay(10); 11848 delay(10);
11813 CSR_WRITE(sc, WMREG_CTRL, v); 11849 CSR_WRITE(sc, WMREG_CTRL, v);
11814 CSR_WRITE_FLUSH(sc); 11850 CSR_WRITE_FLUSH(sc);
11815 delay(10); 11851 delay(10);
11816 11852
11817 for (i = 0; i < 16; i++) { 11853 for (i = 0; i < 16; i++) {
11818 data <<= 1; 11854 data <<= 1;
11819 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11855 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11820 CSR_WRITE_FLUSH(sc); 11856 CSR_WRITE_FLUSH(sc);
11821 delay(10); 11857 delay(10);
11822 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 11858 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11823 data |= 1; 11859 data |= 1;
11824 CSR_WRITE(sc, WMREG_CTRL, v); 11860 CSR_WRITE(sc, WMREG_CTRL, v);
11825 CSR_WRITE_FLUSH(sc); 11861 CSR_WRITE_FLUSH(sc);
11826 delay(10); 11862 delay(10);
11827 } 11863 }
11828 11864
11829 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11865 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11830 CSR_WRITE_FLUSH(sc); 11866 CSR_WRITE_FLUSH(sc);
11831 delay(10); 11867 delay(10);
11832 CSR_WRITE(sc, WMREG_CTRL, v); 11868 CSR_WRITE(sc, WMREG_CTRL, v);
11833 CSR_WRITE_FLUSH(sc); 11869 CSR_WRITE_FLUSH(sc);
11834 delay(10); 11870 delay(10);
11835 11871
11836 return data; 11872 return data;
11837} 11873}
11838 11874
11839#undef MDI_IO 11875#undef MDI_IO
11840#undef MDI_DIR 11876#undef MDI_DIR
11841#undef MDI_CLK 11877#undef MDI_CLK
11842 11878
11843/* 11879/*
11844 * wm_gmii_i82543_readreg: [mii interface function] 11880 * wm_gmii_i82543_readreg: [mii interface function]
11845 * 11881 *
11846 * Read a PHY register on the GMII (i82543 version). 11882 * Read a PHY register on the GMII (i82543 version).
11847 */ 11883 */
11848static int 11884static int
11849wm_gmii_i82543_readreg(device_t dev, int phy, int reg) 11885wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
11850{ 11886{
11851 struct wm_softc *sc = device_private(dev); 11887 struct wm_softc *sc = device_private(dev);
11852 int rv; 11888 int rv;
11853 11889
11854 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); 11890 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11855 wm_i82543_mii_sendbits(sc, reg | (phy << 5) | 11891 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11856 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 11892 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11857 rv = wm_i82543_mii_recvbits(sc) & 0xffff; 11893 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
11858 11894
11859 DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 11895 DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
11860 device_xname(dev), phy, reg, rv)); 11896 device_xname(dev), phy, reg, rv));
11861 11897
11862 return rv; 11898 return rv;
11863} 11899}
11864 11900
11865/* 11901/*
11866 * wm_gmii_i82543_writereg: [mii interface function] 11902 * wm_gmii_i82543_writereg: [mii interface function]
11867 * 11903 *
11868 * Write a PHY register on the GMII (i82543 version). 11904 * Write a PHY register on the GMII (i82543 version).
11869 */ 11905 */
11870static void 11906static void
11871wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val) 11907wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
11872{ 11908{
11873 struct wm_softc *sc = device_private(dev); 11909 struct wm_softc *sc = device_private(dev);
11874 11910
11875 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); 11911 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11876 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 11912 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11877 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 11913 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11878 (MII_COMMAND_START << 30), 32); 11914 (MII_COMMAND_START << 30), 32);
11879} 11915}
11880 11916
11881/* 11917/*
11882 * wm_gmii_mdic_readreg: [mii interface function] 11918 * wm_gmii_mdic_readreg: [mii interface function]
11883 * 11919 *
11884 * Read a PHY register on the GMII. 11920 * Read a PHY register on the GMII.
11885 */ 11921 */
11886static int 11922static int
11887wm_gmii_mdic_readreg(device_t dev, int phy, int reg) 11923wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
11888{ 11924{
11889 struct wm_softc *sc = device_private(dev); 11925 struct wm_softc *sc = device_private(dev);
11890 uint32_t mdic = 0; 11926 uint32_t mdic = 0;
11891 int i, rv; 11927 int i, rv;
11892 11928
11893 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217) 11929 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11894 && (reg > MII_ADDRMASK)) { 11930 && (reg > MII_ADDRMASK)) {
11895 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", 11931 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11896 __func__, sc->sc_phytype, reg); 11932 __func__, sc->sc_phytype, reg);
11897 reg &= MII_ADDRMASK; 11933 reg &= MII_ADDRMASK;
11898 } 11934 }
11899 11935
11900 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 11936 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11901 MDIC_REGADD(reg)); 11937 MDIC_REGADD(reg));
11902 11938
11903 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 11939 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11904 delay(50); 11940 delay(50);
11905 mdic = CSR_READ(sc, WMREG_MDIC); 11941 mdic = CSR_READ(sc, WMREG_MDIC);
11906 if (mdic & MDIC_READY) 11942 if (mdic & MDIC_READY)
11907 break; 11943 break;
11908 } 11944 }
11909 11945
11910 if ((mdic & MDIC_READY) == 0) { 11946 if ((mdic & MDIC_READY) == 0) {
11911 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 11947 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
11912 device_xname(dev), phy, reg); 11948 device_xname(dev), phy, reg);
11913 return 0; 11949 return 0;
11914 } else if (mdic & MDIC_E) { 11950 } else if (mdic & MDIC_E) {
11915#if 0 /* This is normal if no PHY is present. */ 11951#if 0 /* This is normal if no PHY is present. */
11916 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 11952 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
11917 device_xname(dev), phy, reg); 11953 device_xname(dev), phy, reg);
11918#endif 11954#endif
11919 return 0; 11955 return 0;
11920 } else { 11956 } else {
11921 rv = MDIC_DATA(mdic); 11957 rv = MDIC_DATA(mdic);
11922 if (rv == 0xffff) 11958 if (rv == 0xffff)
11923 rv = 0; 11959 rv = 0;
11924 } 11960 }
11925 11961
11926 /* 11962 /*
11927 * Allow some time after each MDIC transaction to avoid 11963 * Allow some time after each MDIC transaction to avoid
11928 * reading duplicate data in the next MDIC transaction. 11964 * reading duplicate data in the next MDIC transaction.
11929 */ 11965 */
11930 if (sc->sc_type == WM_T_PCH2) 11966 if (sc->sc_type == WM_T_PCH2)
11931 delay(100); 11967 delay(100);
11932 11968
11933 return rv; 11969 return rv;
11934} 11970}
11935 11971
11936/* 11972/*
11937 * wm_gmii_mdic_writereg: [mii interface function] 11973 * wm_gmii_mdic_writereg: [mii interface function]
11938 * 11974 *
11939 * Write a PHY register on the GMII. 11975 * Write a PHY register on the GMII.
11940 */ 11976 */
11941static void 11977static void
11942wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val) 11978wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
11943{ 11979{
11944 struct wm_softc *sc = device_private(dev); 11980 struct wm_softc *sc = device_private(dev);
11945 uint32_t mdic = 0; 11981 uint32_t mdic = 0;
11946 int i; 11982 int i;
11947 11983
@@ -13977,2008 +14013,2009 @@ wm_nvm_set_addrbits_size_eecd(struct wm_ @@ -13977,2008 +14013,2009 @@ wm_nvm_set_addrbits_size_eecd(struct wm_
13977 * Wait for a SPI EEPROM to be ready for commands. 14013 * Wait for a SPI EEPROM to be ready for commands.
13978 */ 14014 */
13979static int 14015static int
13980wm_nvm_ready_spi(struct wm_softc *sc) 14016wm_nvm_ready_spi(struct wm_softc *sc)
13981{ 14017{
13982 uint32_t val; 14018 uint32_t val;
13983 int usec; 14019 int usec;
13984 14020
13985 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 14021 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13986 device_xname(sc->sc_dev), __func__)); 14022 device_xname(sc->sc_dev), __func__));
13987 14023
13988 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 14024 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13989 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 14025 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13990 wm_eeprom_recvbits(sc, &val, 8); 14026 wm_eeprom_recvbits(sc, &val, 8);
13991 if ((val & SPI_SR_RDY) == 0) 14027 if ((val & SPI_SR_RDY) == 0)
13992 break; 14028 break;
13993 } 14029 }
13994 if (usec >= SPI_MAX_RETRIES) { 14030 if (usec >= SPI_MAX_RETRIES) {
13995 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n"); 14031 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13996 return -1; 14032 return -1;
13997 } 14033 }
13998 return 0; 14034 return 0;
13999} 14035}
14000 14036
14001/* 14037/*
14002 * wm_nvm_read_spi: 14038 * wm_nvm_read_spi:
14003 * 14039 *
14004 * Read a work from the EEPROM using the SPI protocol. 14040 * Read a work from the EEPROM using the SPI protocol.
14005 */ 14041 */
14006static int 14042static int
14007wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 14043wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14008{ 14044{
14009 uint32_t reg, val; 14045 uint32_t reg, val;
14010 int i; 14046 int i;
14011 uint8_t opc; 14047 uint8_t opc;
14012 int rv; 14048 int rv;
14013 14049
14014 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 14050 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14015 device_xname(sc->sc_dev), __func__)); 14051 device_xname(sc->sc_dev), __func__));
14016 14052
14017 rv = sc->nvm.acquire(sc); 14053 rv = sc->nvm.acquire(sc);
14018 if (rv != 0) 14054 if (rv != 0)
14019 return rv; 14055 return rv;
14020 14056
14021 /* Clear SK and CS. */ 14057 /* Clear SK and CS. */
14022 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 14058 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
14023 CSR_WRITE(sc, WMREG_EECD, reg); 14059 CSR_WRITE(sc, WMREG_EECD, reg);
14024 CSR_WRITE_FLUSH(sc); 14060 CSR_WRITE_FLUSH(sc);
14025 delay(2); 14061 delay(2);
14026 14062
14027 if ((rv = wm_nvm_ready_spi(sc)) != 0) 14063 if ((rv = wm_nvm_ready_spi(sc)) != 0)
14028 goto out; 14064 goto out;
14029 14065
14030 /* Toggle CS to flush commands. */ 14066 /* Toggle CS to flush commands. */
14031 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 14067 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
14032 CSR_WRITE_FLUSH(sc); 14068 CSR_WRITE_FLUSH(sc);
14033 delay(2); 14069 delay(2);
14034 CSR_WRITE(sc, WMREG_EECD, reg); 14070 CSR_WRITE(sc, WMREG_EECD, reg);
14035 CSR_WRITE_FLUSH(sc); 14071 CSR_WRITE_FLUSH(sc);
14036 delay(2); 14072 delay(2);
14037 14073
14038 opc = SPI_OPC_READ; 14074 opc = SPI_OPC_READ;
14039 if (sc->sc_nvm_addrbits == 8 && word >= 128) 14075 if (sc->sc_nvm_addrbits == 8 && word >= 128)
14040 opc |= SPI_OPC_A8; 14076 opc |= SPI_OPC_A8;
14041 14077
14042 wm_eeprom_sendbits(sc, opc, 8); 14078 wm_eeprom_sendbits(sc, opc, 8);
14043 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits); 14079 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
14044 14080
14045 for (i = 0; i < wordcnt; i++) { 14081 for (i = 0; i < wordcnt; i++) {
14046 wm_eeprom_recvbits(sc, &val, 16); 14082 wm_eeprom_recvbits(sc, &val, 16);
14047 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 14083 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
14048 } 14084 }
14049 14085
14050 /* Raise CS and clear SK. */ 14086 /* Raise CS and clear SK. */
14051 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 14087 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
14052 CSR_WRITE(sc, WMREG_EECD, reg); 14088 CSR_WRITE(sc, WMREG_EECD, reg);
14053 CSR_WRITE_FLUSH(sc); 14089 CSR_WRITE_FLUSH(sc);
14054 delay(2); 14090 delay(2);
14055 14091
14056out: 14092out:
14057 sc->nvm.release(sc); 14093 sc->nvm.release(sc);
14058 return rv; 14094 return rv;
14059} 14095}
14060 14096
14061/* Using with EERD */ 14097/* Using with EERD */
14062 14098
14063static int 14099static int
14064wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 14100wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
14065{ 14101{
14066 uint32_t attempts = 100000; 14102 uint32_t attempts = 100000;
14067 uint32_t i, reg = 0; 14103 uint32_t i, reg = 0;
14068 int32_t done = -1; 14104 int32_t done = -1;
14069 14105
14070 for (i = 0; i < attempts; i++) { 14106 for (i = 0; i < attempts; i++) {
14071 reg = CSR_READ(sc, rw); 14107 reg = CSR_READ(sc, rw);
14072 14108
14073 if (reg & EERD_DONE) { 14109 if (reg & EERD_DONE) {
14074 done = 0; 14110 done = 0;
14075 break; 14111 break;
14076 } 14112 }
14077 delay(5); 14113 delay(5);
14078 } 14114 }
14079 14115
14080 return done; 14116 return done;
14081} 14117}
14082 14118
14083static int 14119static int
14084wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data) 14120wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
14085{ 14121{
14086 int i, eerd = 0; 14122 int i, eerd = 0;
14087 int rv; 14123 int rv;
14088 14124
14089 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 14125 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14090 device_xname(sc->sc_dev), __func__)); 14126 device_xname(sc->sc_dev), __func__));
14091 14127
14092 rv = sc->nvm.acquire(sc); 14128 rv = sc->nvm.acquire(sc);
14093 if (rv != 0) 14129 if (rv != 0)
14094 return rv; 14130 return rv;
14095 14131
14096 for (i = 0; i < wordcnt; i++) { 14132 for (i = 0; i < wordcnt; i++) {
14097 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 14133 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
14098 CSR_WRITE(sc, WMREG_EERD, eerd); 14134 CSR_WRITE(sc, WMREG_EERD, eerd);
14099 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 14135 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
14100 if (rv != 0) { 14136 if (rv != 0) {
14101 aprint_error_dev(sc->sc_dev, "EERD polling failed: " 14137 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
14102 "offset=%d. wordcnt=%d\n", offset, wordcnt); 14138 "offset=%d. wordcnt=%d\n", offset, wordcnt);
14103 break; 14139 break;
14104 } 14140 }
14105 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 14141 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
14106 } 14142 }
14107 14143
14108 sc->nvm.release(sc); 14144 sc->nvm.release(sc);
14109 return rv; 14145 return rv;
14110} 14146}
14111 14147
14112/* Flash */ 14148/* Flash */
14113 14149
14114static int 14150static int
14115wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank) 14151wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
14116{ 14152{
14117 uint32_t eecd; 14153 uint32_t eecd;
14118 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1; 14154 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
14119 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t); 14155 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
14120 uint32_t nvm_dword = 0; 14156 uint32_t nvm_dword = 0;
14121 uint8_t sig_byte = 0; 14157 uint8_t sig_byte = 0;
14122 int rv; 14158 int rv;
14123 14159
14124 switch (sc->sc_type) { 14160 switch (sc->sc_type) {
14125 case WM_T_PCH_SPT: 14161 case WM_T_PCH_SPT:
14126 case WM_T_PCH_CNP: 14162 case WM_T_PCH_CNP:
14127 case WM_T_PCH_TGP: 14163 case WM_T_PCH_TGP:
14128 bank1_offset = sc->sc_ich8_flash_bank_size * 2; 14164 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
14129 act_offset = ICH_NVM_SIG_WORD * 2; 14165 act_offset = ICH_NVM_SIG_WORD * 2;
14130 14166
14131 /* Set bank to 0 in case flash read fails. */ 14167 /* Set bank to 0 in case flash read fails. */
14132 *bank = 0; 14168 *bank = 0;
14133 14169
14134 /* Check bank 0 */ 14170 /* Check bank 0 */
14135 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword); 14171 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
14136 if (rv != 0) 14172 if (rv != 0)
14137 return rv; 14173 return rv;
14138 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8); 14174 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14139 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 14175 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14140 *bank = 0; 14176 *bank = 0;
14141 return 0; 14177 return 0;
14142 } 14178 }
14143 14179
14144 /* Check bank 1 */ 14180 /* Check bank 1 */
14145 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset, 14181 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
14146 &nvm_dword); 14182 &nvm_dword);
14147 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8); 14183 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14148 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 14184 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14149 *bank = 1; 14185 *bank = 1;
14150 return 0; 14186 return 0;
14151 } 14187 }
14152 aprint_error_dev(sc->sc_dev, 14188 aprint_error_dev(sc->sc_dev,
14153 "%s: no valid NVM bank present (%u)\n", __func__, *bank); 14189 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
14154 return -1; 14190 return -1;
14155 case WM_T_ICH8: 14191 case WM_T_ICH8:
14156 case WM_T_ICH9: 14192 case WM_T_ICH9:
14157 eecd = CSR_READ(sc, WMREG_EECD); 14193 eecd = CSR_READ(sc, WMREG_EECD);
14158 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) { 14194 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
14159 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0; 14195 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
14160 return 0; 14196 return 0;
14161 } 14197 }
14162 /* FALLTHROUGH */ 14198 /* FALLTHROUGH */
14163 default: 14199 default:
14164 /* Default to 0 */ 14200 /* Default to 0 */
14165 *bank = 0; 14201 *bank = 0;
14166 14202
14167 /* Check bank 0 */ 14203 /* Check bank 0 */
14168 wm_read_ich8_byte(sc, act_offset, &sig_byte); 14204 wm_read_ich8_byte(sc, act_offset, &sig_byte);
14169 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 14205 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14170 *bank = 0; 14206 *bank = 0;
14171 return 0; 14207 return 0;
14172 } 14208 }
14173 14209
14174 /* Check bank 1 */ 14210 /* Check bank 1 */
14175 wm_read_ich8_byte(sc, act_offset + bank1_offset, 14211 wm_read_ich8_byte(sc, act_offset + bank1_offset,
14176 &sig_byte); 14212 &sig_byte);
14177 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 14213 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14178 *bank = 1; 14214 *bank = 1;
14179 return 0; 14215 return 0;
14180 } 14216 }
14181 } 14217 }
14182 14218
14183 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n", 14219 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
14184 device_xname(sc->sc_dev))); 14220 device_xname(sc->sc_dev)));
14185 return -1; 14221 return -1;
14186} 14222}
14187 14223
14188/****************************************************************************** 14224/******************************************************************************
14189 * This function does initial flash setup so that a new read/write/erase cycle 14225 * This function does initial flash setup so that a new read/write/erase cycle
14190 * can be started. 14226 * can be started.
14191 * 14227 *
14192 * sc - The pointer to the hw structure 14228 * sc - The pointer to the hw structure
14193 ****************************************************************************/ 14229 ****************************************************************************/
14194static int32_t 14230static int32_t
14195wm_ich8_cycle_init(struct wm_softc *sc) 14231wm_ich8_cycle_init(struct wm_softc *sc)
14196{ 14232{
14197 uint16_t hsfsts; 14233 uint16_t hsfsts;
14198 int32_t error = 1; 14234 int32_t error = 1;
14199 int32_t i = 0; 14235 int32_t i = 0;
14200 14236
14201 if (sc->sc_type >= WM_T_PCH_SPT) 14237 if (sc->sc_type >= WM_T_PCH_SPT)
14202 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL; 14238 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
14203 else 14239 else
14204 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 14240 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14205 14241
14206 /* May be check the Flash Des Valid bit in Hw status */ 14242 /* May be check the Flash Des Valid bit in Hw status */
14207 if ((hsfsts & HSFSTS_FLDVAL) == 0) 14243 if ((hsfsts & HSFSTS_FLDVAL) == 0)
14208 return error; 14244 return error;
14209 14245
14210 /* Clear FCERR in Hw status by writing 1 */ 14246 /* Clear FCERR in Hw status by writing 1 */
14211 /* Clear DAEL in Hw status by writing a 1 */ 14247 /* Clear DAEL in Hw status by writing a 1 */
14212 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; 14248 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14213 14249
14214 if (sc->sc_type >= WM_T_PCH_SPT) 14250 if (sc->sc_type >= WM_T_PCH_SPT)
14215 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL); 14251 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14216 else 14252 else
14217 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 14253 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14218 14254
14219 /* 14255 /*
14220 * Either we should have a hardware SPI cycle in progress bit to check 14256 * Either we should have a hardware SPI cycle in progress bit to check
14221 * against, in order to start a new cycle or FDONE bit should be 14257 * against, in order to start a new cycle or FDONE bit should be
14222 * changed in the hardware so that it is 1 after hardware reset, which 14258 * changed in the hardware so that it is 1 after hardware reset, which
14223 * can then be used as an indication whether a cycle is in progress or 14259 * can then be used as an indication whether a cycle is in progress or
14224 * has been completed .. we should also have some software semaphore 14260 * has been completed .. we should also have some software semaphore
14225 * mechanism to guard FDONE or the cycle in progress bit so that two 14261 * mechanism to guard FDONE or the cycle in progress bit so that two
14226 * threads access to those bits can be sequentiallized or a way so that 14262 * threads access to those bits can be sequentiallized or a way so that
14227 * 2 threads don't start the cycle at the same time 14263 * 2 threads don't start the cycle at the same time
14228 */ 14264 */
14229 14265
14230 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 14266 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14231 /* 14267 /*
14232 * There is no cycle running at present, so we can start a 14268 * There is no cycle running at present, so we can start a
14233 * cycle 14269 * cycle
14234 */ 14270 */
14235 14271
14236 /* Begin by setting Flash Cycle Done. */ 14272 /* Begin by setting Flash Cycle Done. */
14237 hsfsts |= HSFSTS_DONE; 14273 hsfsts |= HSFSTS_DONE;
14238 if (sc->sc_type >= WM_T_PCH_SPT) 14274 if (sc->sc_type >= WM_T_PCH_SPT)
14239 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, 14275 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14240 hsfsts & 0xffffUL); 14276 hsfsts & 0xffffUL);
14241 else 14277 else
14242 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 14278 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14243 error = 0; 14279 error = 0;
14244 } else { 14280 } else {
14245 /* 14281 /*
14246 * Otherwise poll for sometime so the current cycle has a 14282 * Otherwise poll for sometime so the current cycle has a
14247 * chance to end before giving up. 14283 * chance to end before giving up.
14248 */ 14284 */
14249 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { 14285 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14250 if (sc->sc_type >= WM_T_PCH_SPT) 14286 if (sc->sc_type >= WM_T_PCH_SPT)
14251 hsfsts = ICH8_FLASH_READ32(sc, 14287 hsfsts = ICH8_FLASH_READ32(sc,
14252 ICH_FLASH_HSFSTS) & 0xffffUL; 14288 ICH_FLASH_HSFSTS) & 0xffffUL;
14253 else 14289 else
14254 hsfsts = ICH8_FLASH_READ16(sc, 14290 hsfsts = ICH8_FLASH_READ16(sc,
14255 ICH_FLASH_HSFSTS); 14291 ICH_FLASH_HSFSTS);
14256 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 14292 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14257 error = 0; 14293 error = 0;
14258 break; 14294 break;
14259 } 14295 }
14260 delay(1); 14296 delay(1);
14261 } 14297 }
14262 if (error == 0) { 14298 if (error == 0) {
14263 /* 14299 /*
14264 * Successful in waiting for previous cycle to timeout, 14300 * Successful in waiting for previous cycle to timeout,
14265 * now set the Flash Cycle Done. 14301 * now set the Flash Cycle Done.
14266 */ 14302 */
14267 hsfsts |= HSFSTS_DONE; 14303 hsfsts |= HSFSTS_DONE;
14268 if (sc->sc_type >= WM_T_PCH_SPT) 14304 if (sc->sc_type >= WM_T_PCH_SPT)
14269 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, 14305 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14270 hsfsts & 0xffffUL); 14306 hsfsts & 0xffffUL);
14271 else 14307 else
14272 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, 14308 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14273 hsfsts); 14309 hsfsts);
14274 } 14310 }
14275 } 14311 }
14276 return error; 14312 return error;
14277} 14313}
14278 14314
14279/****************************************************************************** 14315/******************************************************************************
14280 * This function starts a flash cycle and waits for its completion 14316 * This function starts a flash cycle and waits for its completion
14281 * 14317 *
14282 * sc - The pointer to the hw structure 14318 * sc - The pointer to the hw structure
14283 ****************************************************************************/ 14319 ****************************************************************************/
14284static int32_t 14320static int32_t
14285wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout) 14321wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14286{ 14322{
14287 uint16_t hsflctl; 14323 uint16_t hsflctl;
14288 uint16_t hsfsts; 14324 uint16_t hsfsts;
14289 int32_t error = 1; 14325 int32_t error = 1;
14290 uint32_t i = 0; 14326 uint32_t i = 0;
14291 14327
14292 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 14328 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14293 if (sc->sc_type >= WM_T_PCH_SPT) 14329 if (sc->sc_type >= WM_T_PCH_SPT)
14294 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16; 14330 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14295 else 14331 else
14296 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 14332 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14297 hsflctl |= HSFCTL_GO; 14333 hsflctl |= HSFCTL_GO;
14298 if (sc->sc_type >= WM_T_PCH_SPT) 14334 if (sc->sc_type >= WM_T_PCH_SPT)
14299 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, 14335 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14300 (uint32_t)hsflctl << 16); 14336 (uint32_t)hsflctl << 16);
14301 else 14337 else
14302 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 14338 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14303 14339
14304 /* Wait till FDONE bit is set to 1 */ 14340 /* Wait till FDONE bit is set to 1 */
14305 do { 14341 do {
14306 if (sc->sc_type >= WM_T_PCH_SPT) 14342 if (sc->sc_type >= WM_T_PCH_SPT)
14307 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) 14343 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14308 & 0xffffUL; 14344 & 0xffffUL;
14309 else 14345 else
14310 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 14346 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14311 if (hsfsts & HSFSTS_DONE) 14347 if (hsfsts & HSFSTS_DONE)
14312 break; 14348 break;
14313 delay(1); 14349 delay(1);
14314 i++; 14350 i++;
14315 } while (i < timeout); 14351 } while (i < timeout);
14316 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) 14352 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14317 error = 0; 14353 error = 0;
14318 14354
14319 return error; 14355 return error;
14320} 14356}
14321 14357
14322/****************************************************************************** 14358/******************************************************************************
14323 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers. 14359 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14324 * 14360 *
14325 * sc - The pointer to the hw structure 14361 * sc - The pointer to the hw structure
14326 * index - The index of the byte or word to read. 14362 * index - The index of the byte or word to read.
14327 * size - Size of data to read, 1=byte 2=word, 4=dword 14363 * size - Size of data to read, 1=byte 2=word, 4=dword
14328 * data - Pointer to the word to store the value read. 14364 * data - Pointer to the word to store the value read.
14329 *****************************************************************************/ 14365 *****************************************************************************/
14330static int32_t 14366static int32_t
14331wm_read_ich8_data(struct wm_softc *sc, uint32_t index, 14367wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14332 uint32_t size, uint32_t *data) 14368 uint32_t size, uint32_t *data)
14333{ 14369{
14334 uint16_t hsfsts; 14370 uint16_t hsfsts;
14335 uint16_t hsflctl; 14371 uint16_t hsflctl;
14336 uint32_t flash_linear_address; 14372 uint32_t flash_linear_address;
14337 uint32_t flash_data = 0; 14373 uint32_t flash_data = 0;
14338 int32_t error = 1; 14374 int32_t error = 1;
14339 int32_t count = 0; 14375 int32_t count = 0;
14340 14376
14341 if (size < 1 || size > 4 || data == 0x0 || 14377 if (size < 1 || size > 4 || data == 0x0 ||
14342 index > ICH_FLASH_LINEAR_ADDR_MASK) 14378 index > ICH_FLASH_LINEAR_ADDR_MASK)
14343 return error; 14379 return error;
14344 14380
14345 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + 14381 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14346 sc->sc_ich8_flash_base; 14382 sc->sc_ich8_flash_base;
14347 14383
14348 do { 14384 do {
14349 delay(1); 14385 delay(1);
14350 /* Steps */ 14386 /* Steps */
14351 error = wm_ich8_cycle_init(sc); 14387 error = wm_ich8_cycle_init(sc);
14352 if (error) 14388 if (error)
14353 break; 14389 break;
14354 14390
14355 if (sc->sc_type >= WM_T_PCH_SPT) 14391 if (sc->sc_type >= WM_T_PCH_SPT)
14356 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) 14392 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14357 >> 16; 14393 >> 16;
14358 else 14394 else
14359 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 14395 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14360 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 14396 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14361 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) 14397 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14362 & HSFCTL_BCOUNT_MASK; 14398 & HSFCTL_BCOUNT_MASK;
14363 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; 14399 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14364 if (sc->sc_type >= WM_T_PCH_SPT) { 14400 if (sc->sc_type >= WM_T_PCH_SPT) {
14365 /* 14401 /*
14366 * In SPT, This register is in Lan memory space, not 14402 * In SPT, This register is in Lan memory space, not
14367 * flash. Therefore, only 32 bit access is supported. 14403 * flash. Therefore, only 32 bit access is supported.
14368 */ 14404 */
14369 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, 14405 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14370 (uint32_t)hsflctl << 16); 14406 (uint32_t)hsflctl << 16);
14371 } else 14407 } else
14372 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 14408 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14373 14409
14374 /* 14410 /*
14375 * Write the last 24 bits of index into Flash Linear address 14411 * Write the last 24 bits of index into Flash Linear address
14376 * field in Flash Address 14412 * field in Flash Address
14377 */ 14413 */
14378 /* TODO: TBD maybe check the index against the size of flash */ 14414 /* TODO: TBD maybe check the index against the size of flash */
14379 14415
14380 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address); 14416 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14381 14417
14382 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT); 14418 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14383 14419
14384 /* 14420 /*
14385 * Check if FCERR is set to 1, if set to 1, clear it and try 14421 * Check if FCERR is set to 1, if set to 1, clear it and try
14386 * the whole sequence a few more times, else read in (shift in) 14422 * the whole sequence a few more times, else read in (shift in)
14387 * the Flash Data0, the order is least significant byte first 14423 * the Flash Data0, the order is least significant byte first
14388 * msb to lsb 14424 * msb to lsb
14389 */ 14425 */
14390 if (error == 0) { 14426 if (error == 0) {
14391 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0); 14427 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14392 if (size == 1) 14428 if (size == 1)
14393 *data = (uint8_t)(flash_data & 0x000000FF); 14429 *data = (uint8_t)(flash_data & 0x000000FF);
14394 else if (size == 2) 14430 else if (size == 2)
14395 *data = (uint16_t)(flash_data & 0x0000FFFF); 14431 *data = (uint16_t)(flash_data & 0x0000FFFF);
14396 else if (size == 4) 14432 else if (size == 4)
14397 *data = (uint32_t)flash_data; 14433 *data = (uint32_t)flash_data;
14398 break; 14434 break;
14399 } else { 14435 } else {
14400 /* 14436 /*
14401 * If we've gotten here, then things are probably 14437 * If we've gotten here, then things are probably
14402 * completely hosed, but if the error condition is 14438 * completely hosed, but if the error condition is
14403 * detected, it won't hurt to give it another try... 14439 * detected, it won't hurt to give it another try...
14404 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 14440 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14405 */ 14441 */
14406 if (sc->sc_type >= WM_T_PCH_SPT) 14442 if (sc->sc_type >= WM_T_PCH_SPT)
14407 hsfsts = ICH8_FLASH_READ32(sc, 14443 hsfsts = ICH8_FLASH_READ32(sc,
14408 ICH_FLASH_HSFSTS) & 0xffffUL; 14444 ICH_FLASH_HSFSTS) & 0xffffUL;
14409 else 14445 else
14410 hsfsts = ICH8_FLASH_READ16(sc, 14446 hsfsts = ICH8_FLASH_READ16(sc,
14411 ICH_FLASH_HSFSTS); 14447 ICH_FLASH_HSFSTS);
14412 14448
14413 if (hsfsts & HSFSTS_ERR) { 14449 if (hsfsts & HSFSTS_ERR) {
14414 /* Repeat for some time before giving up. */ 14450 /* Repeat for some time before giving up. */
14415 continue; 14451 continue;
14416 } else if ((hsfsts & HSFSTS_DONE) == 0) 14452 } else if ((hsfsts & HSFSTS_DONE) == 0)
14417 break; 14453 break;
14418 } 14454 }
14419 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 14455 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14420 14456
14421 return error; 14457 return error;
14422} 14458}
14423 14459
14424/****************************************************************************** 14460/******************************************************************************
14425 * Reads a single byte from the NVM using the ICH8 flash access registers. 14461 * Reads a single byte from the NVM using the ICH8 flash access registers.
14426 * 14462 *
14427 * sc - pointer to wm_hw structure 14463 * sc - pointer to wm_hw structure
14428 * index - The index of the byte to read. 14464 * index - The index of the byte to read.
14429 * data - Pointer to a byte to store the value read. 14465 * data - Pointer to a byte to store the value read.
14430 *****************************************************************************/ 14466 *****************************************************************************/
14431static int32_t 14467static int32_t
14432wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data) 14468wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14433{ 14469{
14434 int32_t status; 14470 int32_t status;
14435 uint32_t word = 0; 14471 uint32_t word = 0;
14436 14472
14437 status = wm_read_ich8_data(sc, index, 1, &word); 14473 status = wm_read_ich8_data(sc, index, 1, &word);
14438 if (status == 0) 14474 if (status == 0)
14439 *data = (uint8_t)word; 14475 *data = (uint8_t)word;
14440 else 14476 else
14441 *data = 0; 14477 *data = 0;
14442 14478
14443 return status; 14479 return status;
14444} 14480}
14445 14481
14446/****************************************************************************** 14482/******************************************************************************
14447 * Reads a word from the NVM using the ICH8 flash access registers. 14483 * Reads a word from the NVM using the ICH8 flash access registers.
14448 * 14484 *
14449 * sc - pointer to wm_hw structure 14485 * sc - pointer to wm_hw structure
14450 * index - The starting byte index of the word to read. 14486 * index - The starting byte index of the word to read.
14451 * data - Pointer to a word to store the value read. 14487 * data - Pointer to a word to store the value read.
14452 *****************************************************************************/ 14488 *****************************************************************************/
14453static int32_t 14489static int32_t
14454wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data) 14490wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14455{ 14491{
14456 int32_t status; 14492 int32_t status;
14457 uint32_t word = 0; 14493 uint32_t word = 0;
14458 14494
14459 status = wm_read_ich8_data(sc, index, 2, &word); 14495 status = wm_read_ich8_data(sc, index, 2, &word);
14460 if (status == 0) 14496 if (status == 0)
14461 *data = (uint16_t)word; 14497 *data = (uint16_t)word;
14462 else 14498 else
14463 *data = 0; 14499 *data = 0;
14464 14500
14465 return status; 14501 return status;
14466} 14502}
14467 14503
14468/****************************************************************************** 14504/******************************************************************************
14469 * Reads a dword from the NVM using the ICH8 flash access registers. 14505 * Reads a dword from the NVM using the ICH8 flash access registers.
14470 * 14506 *
14471 * sc - pointer to wm_hw structure 14507 * sc - pointer to wm_hw structure
14472 * index - The starting byte index of the word to read. 14508 * index - The starting byte index of the word to read.
14473 * data - Pointer to a word to store the value read. 14509 * data - Pointer to a word to store the value read.
14474 *****************************************************************************/ 14510 *****************************************************************************/
14475static int32_t 14511static int32_t
14476wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data) 14512wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14477{ 14513{
14478 int32_t status; 14514 int32_t status;
14479 14515
14480 status = wm_read_ich8_data(sc, index, 4, data); 14516 status = wm_read_ich8_data(sc, index, 4, data);
14481 return status; 14517 return status;
14482} 14518}
14483 14519
14484/****************************************************************************** 14520/******************************************************************************
14485 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 14521 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14486 * register. 14522 * register.
14487 * 14523 *
14488 * sc - Struct containing variables accessed by shared code 14524 * sc - Struct containing variables accessed by shared code
14489 * offset - offset of word in the EEPROM to read 14525 * offset - offset of word in the EEPROM to read
14490 * data - word read from the EEPROM 14526 * data - word read from the EEPROM
14491 * words - number of words to read 14527 * words - number of words to read
14492 *****************************************************************************/ 14528 *****************************************************************************/
14493static int 14529static int
14494wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) 14530wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14495{ 14531{
14496 int rv; 14532 int rv;
14497 uint32_t flash_bank = 0; 14533 uint32_t flash_bank = 0;
14498 uint32_t act_offset = 0; 14534 uint32_t act_offset = 0;
14499 uint32_t bank_offset = 0; 14535 uint32_t bank_offset = 0;
14500 uint16_t word = 0; 14536 uint16_t word = 0;
14501 uint16_t i = 0; 14537 uint16_t i = 0;
14502 14538
14503 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 14539 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14504 device_xname(sc->sc_dev), __func__)); 14540 device_xname(sc->sc_dev), __func__));
14505 14541
14506 rv = sc->nvm.acquire(sc); 14542 rv = sc->nvm.acquire(sc);
14507 if (rv != 0) 14543 if (rv != 0)
14508 return rv; 14544 return rv;
14509 14545
14510 /* 14546 /*
14511 * We need to know which is the valid flash bank. In the event 14547 * We need to know which is the valid flash bank. In the event
14512 * that we didn't allocate eeprom_shadow_ram, we may not be 14548 * that we didn't allocate eeprom_shadow_ram, we may not be
14513 * managing flash_bank. So it cannot be trusted and needs 14549 * managing flash_bank. So it cannot be trusted and needs
14514 * to be updated with each read. 14550 * to be updated with each read.
14515 */ 14551 */
14516 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); 14552 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14517 if (rv) { 14553 if (rv) {
14518 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n", 14554 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14519 device_xname(sc->sc_dev))); 14555 device_xname(sc->sc_dev)));
14520 flash_bank = 0; 14556 flash_bank = 0;
14521 } 14557 }
14522 14558
14523 /* 14559 /*
14524 * Adjust offset appropriately if we're on bank 1 - adjust for word 14560 * Adjust offset appropriately if we're on bank 1 - adjust for word
14525 * size 14561 * size
14526 */ 14562 */
14527 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 14563 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14528 14564
14529 for (i = 0; i < words; i++) { 14565 for (i = 0; i < words; i++) {
14530 /* The NVM part needs a byte offset, hence * 2 */ 14566 /* The NVM part needs a byte offset, hence * 2 */
14531 act_offset = bank_offset + ((offset + i) * 2); 14567 act_offset = bank_offset + ((offset + i) * 2);
14532 rv = wm_read_ich8_word(sc, act_offset, &word); 14568 rv = wm_read_ich8_word(sc, act_offset, &word);
14533 if (rv) { 14569 if (rv) {
14534 aprint_error_dev(sc->sc_dev, 14570 aprint_error_dev(sc->sc_dev,
14535 "%s: failed to read NVM\n", __func__); 14571 "%s: failed to read NVM\n", __func__);
14536 break; 14572 break;
14537 } 14573 }
14538 data[i] = word; 14574 data[i] = word;
14539 } 14575 }
14540 14576
14541 sc->nvm.release(sc); 14577 sc->nvm.release(sc);
14542 return rv; 14578 return rv;
14543} 14579}
14544 14580
14545/****************************************************************************** 14581/******************************************************************************
14546 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access 14582 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14547 * register. 14583 * register.
14548 * 14584 *
14549 * sc - Struct containing variables accessed by shared code 14585 * sc - Struct containing variables accessed by shared code
14550 * offset - offset of word in the EEPROM to read 14586 * offset - offset of word in the EEPROM to read
14551 * data - word read from the EEPROM 14587 * data - word read from the EEPROM
14552 * words - number of words to read 14588 * words - number of words to read
14553 *****************************************************************************/ 14589 *****************************************************************************/
14554static int 14590static int
14555wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data) 14591wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14556{ 14592{
14557 int rv; 14593 int rv;
14558 uint32_t flash_bank = 0; 14594 uint32_t flash_bank = 0;
14559 uint32_t act_offset = 0; 14595 uint32_t act_offset = 0;
14560 uint32_t bank_offset = 0; 14596 uint32_t bank_offset = 0;
14561 uint32_t dword = 0; 14597 uint32_t dword = 0;
14562 uint16_t i = 0; 14598 uint16_t i = 0;
14563 14599
14564 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 14600 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14565 device_xname(sc->sc_dev), __func__)); 14601 device_xname(sc->sc_dev), __func__));
14566 14602
14567 rv = sc->nvm.acquire(sc); 14603 rv = sc->nvm.acquire(sc);
14568 if (rv != 0) 14604 if (rv != 0)
14569 return rv; 14605 return rv;
14570 14606
14571 /* 14607 /*
14572 * We need to know which is the valid flash bank. In the event 14608 * We need to know which is the valid flash bank. In the event
14573 * that we didn't allocate eeprom_shadow_ram, we may not be 14609 * that we didn't allocate eeprom_shadow_ram, we may not be
14574 * managing flash_bank. So it cannot be trusted and needs 14610 * managing flash_bank. So it cannot be trusted and needs
14575 * to be updated with each read. 14611 * to be updated with each read.
14576 */ 14612 */
14577 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); 14613 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14578 if (rv) { 14614 if (rv) {
14579 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n", 14615 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14580 device_xname(sc->sc_dev))); 14616 device_xname(sc->sc_dev)));
14581 flash_bank = 0; 14617 flash_bank = 0;
14582 } 14618 }
14583 14619
14584 /* 14620 /*
14585 * Adjust offset appropriately if we're on bank 1 - adjust for word 14621 * Adjust offset appropriately if we're on bank 1 - adjust for word
14586 * size 14622 * size
14587 */ 14623 */
14588 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 14624 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14589 14625
14590 for (i = 0; i < words; i++) { 14626 for (i = 0; i < words; i++) {
14591 /* The NVM part needs a byte offset, hence * 2 */ 14627 /* The NVM part needs a byte offset, hence * 2 */
14592 act_offset = bank_offset + ((offset + i) * 2); 14628 act_offset = bank_offset + ((offset + i) * 2);
14593 /* but we must read dword aligned, so mask ... */ 14629 /* but we must read dword aligned, so mask ... */
14594 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword); 14630 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14595 if (rv) { 14631 if (rv) {
14596 aprint_error_dev(sc->sc_dev, 14632 aprint_error_dev(sc->sc_dev,
14597 "%s: failed to read NVM\n", __func__); 14633 "%s: failed to read NVM\n", __func__);
14598 break; 14634 break;
14599 } 14635 }
14600 /* ... and pick out low or high word */ 14636 /* ... and pick out low or high word */
14601 if ((act_offset & 0x2) == 0) 14637 if ((act_offset & 0x2) == 0)
14602 data[i] = (uint16_t)(dword & 0xFFFF); 14638 data[i] = (uint16_t)(dword & 0xFFFF);
14603 else 14639 else
14604 data[i] = (uint16_t)((dword >> 16) & 0xFFFF); 14640 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14605 } 14641 }
14606 14642
14607 sc->nvm.release(sc); 14643 sc->nvm.release(sc);
14608 return rv; 14644 return rv;
14609} 14645}
14610 14646
14611/* iNVM */ 14647/* iNVM */
14612 14648
14613static int 14649static int
14614wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data) 14650wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
14615{ 14651{
14616 int32_t rv = 0; 14652 int32_t rv = 0;
14617 uint32_t invm_dword; 14653 uint32_t invm_dword;
14618 uint16_t i; 14654 uint16_t i;
14619 uint8_t record_type, word_address; 14655 uint8_t record_type, word_address;
14620 14656
14621 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 14657 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14622 device_xname(sc->sc_dev), __func__)); 14658 device_xname(sc->sc_dev), __func__));
14623 14659
14624 for (i = 0; i < INVM_SIZE; i++) { 14660 for (i = 0; i < INVM_SIZE; i++) {
14625 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i)); 14661 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
14626 /* Get record type */ 14662 /* Get record type */
14627 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); 14663 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
14628 if (record_type == INVM_UNINITIALIZED_STRUCTURE) 14664 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
14629 break; 14665 break;
14630 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE) 14666 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
14631 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; 14667 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
14632 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE) 14668 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
14633 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; 14669 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
14634 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) { 14670 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
14635 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 14671 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
14636 if (word_address == address) { 14672 if (word_address == address) {
14637 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 14673 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
14638 rv = 0; 14674 rv = 0;
14639 break; 14675 break;
14640 } 14676 }
14641 } 14677 }
14642 } 14678 }
14643 14679
14644 return rv; 14680 return rv;
14645} 14681}
14646 14682
14647static int 14683static int
14648wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data) 14684wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14649{ 14685{
14650 int i, rv; 14686 int i, rv;
14651 14687
14652 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 14688 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14653 device_xname(sc->sc_dev), __func__)); 14689 device_xname(sc->sc_dev), __func__));
14654 14690
14655 rv = sc->nvm.acquire(sc); 14691 rv = sc->nvm.acquire(sc);
14656 if (rv != 0) 14692 if (rv != 0)
14657 return rv; 14693 return rv;
14658 14694
14659 for (i = 0; i < words; i++) { 14695 for (i = 0; i < words; i++) {
14660 switch (offset + i) { 14696 switch (offset + i) {
14661 case NVM_OFF_MACADDR: 14697 case NVM_OFF_MACADDR:
14662 case NVM_OFF_MACADDR1: 14698 case NVM_OFF_MACADDR1:
14663 case NVM_OFF_MACADDR2: 14699 case NVM_OFF_MACADDR2:
14664 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]); 14700 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14665 if (rv != 0) { 14701 if (rv != 0) {
14666 data[i] = 0xffff; 14702 data[i] = 0xffff;
14667 rv = -1; 14703 rv = -1;
14668 } 14704 }
14669 break; 14705 break;
14670 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */ 14706 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
14671 rv = wm_nvm_read_word_invm(sc, offset, data); 14707 rv = wm_nvm_read_word_invm(sc, offset, data);
14672 if (rv != 0) { 14708 if (rv != 0) {
14673 *data = INVM_DEFAULT_AL; 14709 *data = INVM_DEFAULT_AL;
14674 rv = 0; 14710 rv = 0;
14675 } 14711 }
14676 break; 14712 break;
14677 case NVM_OFF_CFG2: 14713 case NVM_OFF_CFG2:
14678 rv = wm_nvm_read_word_invm(sc, offset, data); 14714 rv = wm_nvm_read_word_invm(sc, offset, data);
14679 if (rv != 0) { 14715 if (rv != 0) {
14680 *data = NVM_INIT_CTRL_2_DEFAULT_I211; 14716 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
14681 rv = 0; 14717 rv = 0;
14682 } 14718 }
14683 break; 14719 break;
14684 case NVM_OFF_CFG4: 14720 case NVM_OFF_CFG4:
14685 rv = wm_nvm_read_word_invm(sc, offset, data); 14721 rv = wm_nvm_read_word_invm(sc, offset, data);
14686 if (rv != 0) { 14722 if (rv != 0) {
14687 *data = NVM_INIT_CTRL_4_DEFAULT_I211; 14723 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
14688 rv = 0; 14724 rv = 0;
14689 } 14725 }
14690 break; 14726 break;
14691 case NVM_OFF_LED_1_CFG: 14727 case NVM_OFF_LED_1_CFG:
14692 rv = wm_nvm_read_word_invm(sc, offset, data); 14728 rv = wm_nvm_read_word_invm(sc, offset, data);
14693 if (rv != 0) { 14729 if (rv != 0) {
14694 *data = NVM_LED_1_CFG_DEFAULT_I211; 14730 *data = NVM_LED_1_CFG_DEFAULT_I211;
14695 rv = 0; 14731 rv = 0;
14696 } 14732 }
14697 break; 14733 break;
14698 case NVM_OFF_LED_0_2_CFG: 14734 case NVM_OFF_LED_0_2_CFG:
14699 rv = wm_nvm_read_word_invm(sc, offset, data); 14735 rv = wm_nvm_read_word_invm(sc, offset, data);
14700 if (rv != 0) { 14736 if (rv != 0) {
14701 *data = NVM_LED_0_2_CFG_DEFAULT_I211; 14737 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
14702 rv = 0; 14738 rv = 0;
14703 } 14739 }
14704 break; 14740 break;
14705 case NVM_OFF_ID_LED_SETTINGS: 14741 case NVM_OFF_ID_LED_SETTINGS:
14706 rv = wm_nvm_read_word_invm(sc, offset, data); 14742 rv = wm_nvm_read_word_invm(sc, offset, data);
14707 if (rv != 0) { 14743 if (rv != 0) {
14708 *data = ID_LED_RESERVED_FFFF; 14744 *data = ID_LED_RESERVED_FFFF;
14709 rv = 0; 14745 rv = 0;
14710 } 14746 }
14711 break; 14747 break;
14712 default: 14748 default:
14713 DPRINTF(sc, WM_DEBUG_NVM, 14749 DPRINTF(sc, WM_DEBUG_NVM,
14714 ("NVM word 0x%02x is not mapped.\n", offset)); 14750 ("NVM word 0x%02x is not mapped.\n", offset));
14715 *data = NVM_RESERVED_WORD; 14751 *data = NVM_RESERVED_WORD;
14716 break; 14752 break;
14717 } 14753 }
14718 } 14754 }
14719 14755
14720 sc->nvm.release(sc); 14756 sc->nvm.release(sc);
14721 return rv; 14757 return rv;
14722} 14758}
14723 14759
14724/* Lock, detecting NVM type, validate checksum, version and read */ 14760/* Lock, detecting NVM type, validate checksum, version and read */
14725 14761
14726static int 14762static int
14727wm_nvm_is_onboard_eeprom(struct wm_softc *sc) 14763wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14728{ 14764{
14729 uint32_t eecd = 0; 14765 uint32_t eecd = 0;
14730 14766
14731 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574 14767 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14732 || sc->sc_type == WM_T_82583) { 14768 || sc->sc_type == WM_T_82583) {
14733 eecd = CSR_READ(sc, WMREG_EECD); 14769 eecd = CSR_READ(sc, WMREG_EECD);
14734 14770
14735 /* Isolate bits 15 & 16 */ 14771 /* Isolate bits 15 & 16 */
14736 eecd = ((eecd >> 15) & 0x03); 14772 eecd = ((eecd >> 15) & 0x03);
14737 14773
14738 /* If both bits are set, device is Flash type */ 14774 /* If both bits are set, device is Flash type */
14739 if (eecd == 0x03) 14775 if (eecd == 0x03)
14740 return 0; 14776 return 0;
14741 } 14777 }
14742 return 1; 14778 return 1;
14743} 14779}
14744 14780
14745static int 14781static int
14746wm_nvm_flash_presence_i210(struct wm_softc *sc) 14782wm_nvm_flash_presence_i210(struct wm_softc *sc)
14747{ 14783{
14748 uint32_t eec; 14784 uint32_t eec;
14749 14785
14750 eec = CSR_READ(sc, WMREG_EEC); 14786 eec = CSR_READ(sc, WMREG_EEC);
14751 if ((eec & EEC_FLASH_DETECTED) != 0) 14787 if ((eec & EEC_FLASH_DETECTED) != 0)
14752 return 1; 14788 return 1;
14753 14789
14754 return 0; 14790 return 0;
14755} 14791}
14756 14792
14757/* 14793/*
14758 * wm_nvm_validate_checksum 14794 * wm_nvm_validate_checksum
14759 * 14795 *
14760 * The checksum is defined as the sum of the first 64 (16 bit) words. 14796 * The checksum is defined as the sum of the first 64 (16 bit) words.
14761 */ 14797 */
14762static int 14798static int
14763wm_nvm_validate_checksum(struct wm_softc *sc) 14799wm_nvm_validate_checksum(struct wm_softc *sc)
14764{ 14800{
14765 uint16_t checksum; 14801 uint16_t checksum;
14766 uint16_t eeprom_data; 14802 uint16_t eeprom_data;
14767#ifdef WM_DEBUG 14803#ifdef WM_DEBUG
14768 uint16_t csum_wordaddr, valid_checksum; 14804 uint16_t csum_wordaddr, valid_checksum;
14769#endif 14805#endif
14770 int i; 14806 int i;
14771 14807
14772 checksum = 0; 14808 checksum = 0;
14773 14809
14774 /* Don't check for I211 */ 14810 /* Don't check for I211 */
14775 if (sc->sc_type == WM_T_I211) 14811 if (sc->sc_type == WM_T_I211)
14776 return 0; 14812 return 0;
14777 14813
14778#ifdef WM_DEBUG 14814#ifdef WM_DEBUG
14779 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) || 14815 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
14780 (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) { 14816 (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
14781 csum_wordaddr = NVM_OFF_COMPAT; 14817 csum_wordaddr = NVM_OFF_COMPAT;
14782 valid_checksum = NVM_COMPAT_VALID_CHECKSUM; 14818 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14783 } else { 14819 } else {
14784 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1; 14820 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14785 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM; 14821 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14786 } 14822 }
14787 14823
14788 /* Dump EEPROM image for debug */ 14824 /* Dump EEPROM image for debug */
14789 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 14825 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14790 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 14826 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14791 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 14827 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14792 /* XXX PCH_SPT? */ 14828 /* XXX PCH_SPT? */
14793 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data); 14829 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14794 if ((eeprom_data & valid_checksum) == 0) 14830 if ((eeprom_data & valid_checksum) == 0)
14795 DPRINTF(sc, WM_DEBUG_NVM, 14831 DPRINTF(sc, WM_DEBUG_NVM,
14796 ("%s: NVM need to be updated (%04x != %04x)\n", 14832 ("%s: NVM need to be updated (%04x != %04x)\n",
14797 device_xname(sc->sc_dev), eeprom_data, 14833 device_xname(sc->sc_dev), eeprom_data,
14798 valid_checksum)); 14834 valid_checksum));
14799 } 14835 }
14800 14836
14801 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) { 14837 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14802 printf("%s: NVM dump:\n", device_xname(sc->sc_dev)); 14838 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14803 for (i = 0; i < NVM_SIZE; i++) { 14839 for (i = 0; i < NVM_SIZE; i++) {
14804 if (wm_nvm_read(sc, i, 1, &eeprom_data)) 14840 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14805 printf("XXXX "); 14841 printf("XXXX ");
14806 else 14842 else
14807 printf("%04hx ", eeprom_data); 14843 printf("%04hx ", eeprom_data);
14808 if (i % 8 == 7) 14844 if (i % 8 == 7)
14809 printf("\n"); 14845 printf("\n");
14810 } 14846 }
14811 } 14847 }
14812 14848
14813#endif /* WM_DEBUG */ 14849#endif /* WM_DEBUG */
14814 14850
14815 for (i = 0; i < NVM_SIZE; i++) { 14851 for (i = 0; i < NVM_SIZE; i++) {
14816 if (wm_nvm_read(sc, i, 1, &eeprom_data)) 14852 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14817 return -1; 14853 return -1;
14818 checksum += eeprom_data; 14854 checksum += eeprom_data;
14819 } 14855 }
14820 14856
14821 if (checksum != (uint16_t) NVM_CHECKSUM) { 14857 if (checksum != (uint16_t) NVM_CHECKSUM) {
14822#ifdef WM_DEBUG 14858#ifdef WM_DEBUG
14823 printf("%s: NVM checksum mismatch (%04x != %04x)\n", 14859 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14824 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM); 14860 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14825#endif 14861#endif
14826 } 14862 }
14827 14863
14828 return 0; 14864 return 0;
14829} 14865}
14830 14866
14831static void 14867static void
14832wm_nvm_version_invm(struct wm_softc *sc) 14868wm_nvm_version_invm(struct wm_softc *sc)
14833{ 14869{
14834 uint32_t dword; 14870 uint32_t dword;
14835 14871
14836 /* 14872 /*
14837 * Linux's code to decode version is very strange, so we don't 14873 * Linux's code to decode version is very strange, so we don't
14838 * obey that algorithm and just use word 61 as the document. 14874 * obey that algorithm and just use word 61 as the document.
14839 * Perhaps it's not perfect though... 14875 * Perhaps it's not perfect though...
14840 * 14876 *
14841 * Example: 14877 * Example:
14842 * 14878 *
14843 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6) 14879 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14844 */ 14880 */
14845 dword = CSR_READ(sc, WM_INVM_DATA_REG(61)); 14881 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14846 dword = __SHIFTOUT(dword, INVM_VER_1); 14882 dword = __SHIFTOUT(dword, INVM_VER_1);
14847 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR); 14883 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14848 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR); 14884 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14849} 14885}
14850 14886
14851static void 14887static void
14852wm_nvm_version(struct wm_softc *sc) 14888wm_nvm_version(struct wm_softc *sc)
14853{ 14889{
14854 uint16_t major, minor, build, patch; 14890 uint16_t major, minor, build, patch;
14855 uint16_t uid0, uid1; 14891 uint16_t uid0, uid1;
14856 uint16_t nvm_data; 14892 uint16_t nvm_data;
14857 uint16_t off; 14893 uint16_t off;
14858 bool check_version = false; 14894 bool check_version = false;
14859 bool check_optionrom = false; 14895 bool check_optionrom = false;
14860 bool have_build = false; 14896 bool have_build = false;
14861 bool have_uid = true; 14897 bool have_uid = true;
14862 14898
14863 /* 14899 /*
14864 * Version format: 14900 * Version format:
14865 * 14901 *
14866 * XYYZ 14902 * XYYZ
14867 * X0YZ 14903 * X0YZ
14868 * X0YY 14904 * X0YY
14869 * 14905 *
14870 * Example: 14906 * Example:
14871 * 14907 *
14872 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10) 14908 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
14873 * 82571 0x50a6 5.10.6? 14909 * 82571 0x50a6 5.10.6?
14874 * 82572 0x506a 5.6.10? 14910 * 82572 0x506a 5.6.10?
14875 * 82572EI 0x5069 5.6.9? 14911 * 82572EI 0x5069 5.6.9?
14876 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4) 14912 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
14877 * 0x2013 2.1.3? 14913 * 0x2013 2.1.3?
14878 * 82583 0x10a0 1.10.0? (document says it's default value) 14914 * 82583 0x10a0 1.10.0? (document says it's default value)
14879 * ICH8+82567 0x0040 0.4.0? 14915 * ICH8+82567 0x0040 0.4.0?
14880 * ICH9+82566 0x1040 1.4.0? 14916 * ICH9+82566 0x1040 1.4.0?
14881 *ICH10+82567 0x0043 0.4.3? 14917 *ICH10+82567 0x0043 0.4.3?
14882 * PCH+82577 0x00c1 0.12.1? 14918 * PCH+82577 0x00c1 0.12.1?
14883 * PCH2+82579 0x00d3 0.13.3? 14919 * PCH2+82579 0x00d3 0.13.3?
14884 * 0x00d4 0.13.4? 14920 * 0x00d4 0.13.4?
14885 * LPT+I218 0x0023 0.2.3? 14921 * LPT+I218 0x0023 0.2.3?
14886 * SPT+I219 0x0084 0.8.4? 14922 * SPT+I219 0x0084 0.8.4?
14887 * CNP+I219 0x0054 0.5.4? 14923 * CNP+I219 0x0054 0.5.4?
14888 */ 14924 */
14889 14925
14890 /* 14926 /*
14891 * XXX 14927 * XXX
14892 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words. 14928 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14893 * I've never seen real 82574 hardware with such small SPI ROM. 14929 * I've never seen real 82574 hardware with such small SPI ROM.
14894 */ 14930 */
14895 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1) 14931 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14896 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0)) 14932 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14897 have_uid = false; 14933 have_uid = false;
14898 14934
14899 switch (sc->sc_type) { 14935 switch (sc->sc_type) {
14900 case WM_T_82571: 14936 case WM_T_82571:
14901 case WM_T_82572: 14937 case WM_T_82572:
14902 case WM_T_82574: 14938 case WM_T_82574:
14903 case WM_T_82583: 14939 case WM_T_82583:
14904 check_version = true; 14940 check_version = true;
14905 check_optionrom = true; 14941 check_optionrom = true;
14906 have_build = true; 14942 have_build = true;
14907 break; 14943 break;
14908 case WM_T_ICH8: 14944 case WM_T_ICH8:
14909 case WM_T_ICH9: 14945 case WM_T_ICH9:
14910 case WM_T_ICH10: 14946 case WM_T_ICH10:
14911 case WM_T_PCH: 14947 case WM_T_PCH:
14912 case WM_T_PCH2: 14948 case WM_T_PCH2:
14913 case WM_T_PCH_LPT: 14949 case WM_T_PCH_LPT:
14914 case WM_T_PCH_SPT: 14950 case WM_T_PCH_SPT:
14915 case WM_T_PCH_CNP: 14951 case WM_T_PCH_CNP:
14916 case WM_T_PCH_TGP: 14952 case WM_T_PCH_TGP:
14917 check_version = true; 14953 check_version = true;
14918 have_build = true; 14954 have_build = true;
14919 have_uid = false; 14955 have_uid = false;
14920 break; 14956 break;
14921 case WM_T_82575: 14957 case WM_T_82575:
14922 case WM_T_82576: 14958 case WM_T_82576:
14923 case WM_T_82580: 14959 case WM_T_82580:
14924 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID) 14960 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14925 check_version = true; 14961 check_version = true;
14926 break; 14962 break;
14927 case WM_T_I211: 14963 case WM_T_I211:
14928 wm_nvm_version_invm(sc); 14964 wm_nvm_version_invm(sc);
14929 have_uid = false; 14965 have_uid = false;
14930 goto printver; 14966 goto printver;
14931 case WM_T_I210: 14967 case WM_T_I210:
14932 if (!wm_nvm_flash_presence_i210(sc)) { 14968 if (!wm_nvm_flash_presence_i210(sc)) {
14933 wm_nvm_version_invm(sc); 14969 wm_nvm_version_invm(sc);
14934 have_uid = false; 14970 have_uid = false;
14935 goto printver; 14971 goto printver;
14936 } 14972 }
14937 /* FALLTHROUGH */ 14973 /* FALLTHROUGH */
14938 case WM_T_I350: 14974 case WM_T_I350:
14939 case WM_T_I354: 14975 case WM_T_I354:
14940 check_version = true; 14976 check_version = true;
14941 check_optionrom = true; 14977 check_optionrom = true;
14942 break; 14978 break;
14943 default: 14979 default:
14944 return; 14980 return;
14945 } 14981 }
14946 if (check_version 14982 if (check_version
14947 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) { 14983 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14948 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT; 14984 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14949 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) { 14985 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14950 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT; 14986 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14951 build = nvm_data & NVM_BUILD_MASK; 14987 build = nvm_data & NVM_BUILD_MASK;
14952 have_build = true; 14988 have_build = true;
14953 } else 14989 } else
14954 minor = nvm_data & 0x00ff; 14990 minor = nvm_data & 0x00ff;
14955 14991
14956 /* Decimal */ 14992 /* Decimal */
14957 minor = (minor / 16) * 10 + (minor % 16); 14993 minor = (minor / 16) * 10 + (minor % 16);
14958 sc->sc_nvm_ver_major = major; 14994 sc->sc_nvm_ver_major = major;
14959 sc->sc_nvm_ver_minor = minor; 14995 sc->sc_nvm_ver_minor = minor;
14960 14996
14961printver: 14997printver:
14962 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major, 14998 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14963 sc->sc_nvm_ver_minor); 14999 sc->sc_nvm_ver_minor);
14964 if (have_build) { 15000 if (have_build) {
14965 sc->sc_nvm_ver_build = build; 15001 sc->sc_nvm_ver_build = build;
14966 aprint_verbose(".%d", build); 15002 aprint_verbose(".%d", build);
14967 } 15003 }
14968 } 15004 }
14969 15005
14970 /* Assume the Option ROM area is at avove NVM_SIZE */ 15006 /* Assume the Option ROM area is at avove NVM_SIZE */
14971 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom 15007 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14972 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) { 15008 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14973 /* Option ROM Version */ 15009 /* Option ROM Version */
14974 if ((off != 0x0000) && (off != 0xffff)) { 15010 if ((off != 0x0000) && (off != 0xffff)) {
14975 int rv; 15011 int rv;
 15012 uint16_t oid0, oid1;
14976 15013
14977 off += NVM_COMBO_VER_OFF; 15014 off += NVM_COMBO_VER_OFF;
14978 rv = wm_nvm_read(sc, off + 1, 1, &uid1); 15015 rv = wm_nvm_read(sc, off + 1, 1, &oid1);
14979 rv |= wm_nvm_read(sc, off, 1, &uid0); 15016 rv |= wm_nvm_read(sc, off, 1, &oid0);
14980 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff) 15017 if ((rv == 0) && (oid0 != 0) && (oid0 != 0xffff)
14981 && (uid1 != 0) && (uid1 != 0xffff)) { 15018 && (oid1 != 0) && (oid1 != 0xffff)) {
14982 /* 16bits */ 15019 /* 16bits */
14983 major = uid0 >> 8; 15020 major = oid0 >> 8;
14984 build = (uid0 << 8) | (uid1 >> 8); 15021 build = (oid0 << 8) | (oid1 >> 8);
14985 patch = uid1 & 0x00ff; 15022 patch = oid1 & 0x00ff;
14986 aprint_verbose(", option ROM Version %d.%d.%d", 15023 aprint_verbose(", option ROM Version %d.%d.%d",
14987 major, build, patch); 15024 major, build, patch);
14988 } 15025 }
14989 } 15026 }
14990 } 15027 }
14991 15028
14992 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0)) 15029 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14993 aprint_verbose(", Image Unique ID %08x", 15030 aprint_verbose(", Image Unique ID %08x",
14994 ((uint32_t)uid1 << 16) | uid0); 15031 ((uint32_t)uid1 << 16) | uid0);
14995} 15032}
14996 15033
14997/* 15034/*
14998 * wm_nvm_read: 15035 * wm_nvm_read:
14999 * 15036 *
15000 * Read data from the serial EEPROM. 15037 * Read data from the serial EEPROM.
15001 */ 15038 */
15002static int 15039static int
15003wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 15040wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
15004{ 15041{
15005 int rv; 15042 int rv;
15006 15043
15007 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 15044 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15008 device_xname(sc->sc_dev), __func__)); 15045 device_xname(sc->sc_dev), __func__));
15009 15046
15010 if (sc->sc_flags & WM_F_EEPROM_INVALID) 15047 if (sc->sc_flags & WM_F_EEPROM_INVALID)
15011 return -1; 15048 return -1;
15012 15049
15013 rv = sc->nvm.read(sc, word, wordcnt, data); 15050 rv = sc->nvm.read(sc, word, wordcnt, data);
15014 15051
15015 return rv; 15052 return rv;
15016} 15053}
15017 15054
15018/* 15055/*
15019 * Hardware semaphores. 15056 * Hardware semaphores.
15020 * Very complexed... 15057 * Very complexed...
15021 */ 15058 */
15022 15059
15023static int 15060static int
15024wm_get_null(struct wm_softc *sc) 15061wm_get_null(struct wm_softc *sc)
15025{ 15062{
15026 15063
15027 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15064 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15028 device_xname(sc->sc_dev), __func__)); 15065 device_xname(sc->sc_dev), __func__));
15029 return 0; 15066 return 0;
15030} 15067}
15031 15068
15032static void 15069static void
15033wm_put_null(struct wm_softc *sc) 15070wm_put_null(struct wm_softc *sc)
15034{ 15071{
15035 15072
15036 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15073 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15037 device_xname(sc->sc_dev), __func__)); 15074 device_xname(sc->sc_dev), __func__));
15038 return; 15075 return;
15039} 15076}
15040 15077
15041static int 15078static int
15042wm_get_eecd(struct wm_softc *sc) 15079wm_get_eecd(struct wm_softc *sc)
15043{ 15080{
15044 uint32_t reg; 15081 uint32_t reg;
15045 int x; 15082 int x;
15046 15083
15047 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n", 15084 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15048 device_xname(sc->sc_dev), __func__)); 15085 device_xname(sc->sc_dev), __func__));
15049 15086
15050 reg = CSR_READ(sc, WMREG_EECD); 15087 reg = CSR_READ(sc, WMREG_EECD);
15051 15088
15052 /* Request EEPROM access. */ 15089 /* Request EEPROM access. */
15053 reg |= EECD_EE_REQ; 15090 reg |= EECD_EE_REQ;
15054 CSR_WRITE(sc, WMREG_EECD, reg); 15091 CSR_WRITE(sc, WMREG_EECD, reg);
15055 15092
15056 /* ..and wait for it to be granted. */ 15093 /* ..and wait for it to be granted. */
15057 for (x = 0; x < 1000; x++) { 15094 for (x = 0; x < 1000; x++) {
15058 reg = CSR_READ(sc, WMREG_EECD); 15095 reg = CSR_READ(sc, WMREG_EECD);
15059 if (reg & EECD_EE_GNT) 15096 if (reg & EECD_EE_GNT)
15060 break; 15097 break;
15061 delay(5); 15098 delay(5);
15062 } 15099 }
15063 if ((reg & EECD_EE_GNT) == 0) { 15100 if ((reg & EECD_EE_GNT) == 0) {
15064 aprint_error_dev(sc->sc_dev, 15101 aprint_error_dev(sc->sc_dev,
15065 "could not acquire EEPROM GNT\n"); 15102 "could not acquire EEPROM GNT\n");
15066 reg &= ~EECD_EE_REQ; 15103 reg &= ~EECD_EE_REQ;
15067 CSR_WRITE(sc, WMREG_EECD, reg); 15104 CSR_WRITE(sc, WMREG_EECD, reg);
15068 return -1; 15105 return -1;
15069 } 15106 }
15070 15107
15071 return 0; 15108 return 0;
15072} 15109}
15073 15110
15074static void 15111static void
15075wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd) 15112wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
15076{ 15113{
15077 15114
15078 *eecd |= EECD_SK; 15115 *eecd |= EECD_SK;
15079 CSR_WRITE(sc, WMREG_EECD, *eecd); 15116 CSR_WRITE(sc, WMREG_EECD, *eecd);
15080 CSR_WRITE_FLUSH(sc); 15117 CSR_WRITE_FLUSH(sc);
15081 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) 15118 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15082 delay(1); 15119 delay(1);
15083 else 15120 else
15084 delay(50); 15121 delay(50);
15085} 15122}
15086 15123
15087static void 15124static void
15088wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd) 15125wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
15089{ 15126{
15090 15127
15091 *eecd &= ~EECD_SK; 15128 *eecd &= ~EECD_SK;
15092 CSR_WRITE(sc, WMREG_EECD, *eecd); 15129 CSR_WRITE(sc, WMREG_EECD, *eecd);
15093 CSR_WRITE_FLUSH(sc); 15130 CSR_WRITE_FLUSH(sc);
15094 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) 15131 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15095 delay(1); 15132 delay(1);
15096 else 15133 else
15097 delay(50); 15134 delay(50);
15098} 15135}
15099 15136
15100static void 15137static void
15101wm_put_eecd(struct wm_softc *sc) 15138wm_put_eecd(struct wm_softc *sc)
15102{ 15139{
15103 uint32_t reg; 15140 uint32_t reg;
15104 15141
15105 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15142 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15106 device_xname(sc->sc_dev), __func__)); 15143 device_xname(sc->sc_dev), __func__));
15107 15144
15108 /* Stop nvm */ 15145 /* Stop nvm */
15109 reg = CSR_READ(sc, WMREG_EECD); 15146 reg = CSR_READ(sc, WMREG_EECD);
15110 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) { 15147 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
15111 /* Pull CS high */ 15148 /* Pull CS high */
15112 reg |= EECD_CS; 15149 reg |= EECD_CS;
15113 wm_nvm_eec_clock_lower(sc, &reg); 15150 wm_nvm_eec_clock_lower(sc, &reg);
15114 } else { 15151 } else {
15115 /* CS on Microwire is active-high */ 15152 /* CS on Microwire is active-high */
15116 reg &= ~(EECD_CS | EECD_DI); 15153 reg &= ~(EECD_CS | EECD_DI);
15117 CSR_WRITE(sc, WMREG_EECD, reg); 15154 CSR_WRITE(sc, WMREG_EECD, reg);
15118 wm_nvm_eec_clock_raise(sc, &reg); 15155 wm_nvm_eec_clock_raise(sc, &reg);
15119 wm_nvm_eec_clock_lower(sc, &reg); 15156 wm_nvm_eec_clock_lower(sc, &reg);
15120 } 15157 }
15121 15158
15122 reg = CSR_READ(sc, WMREG_EECD); 15159 reg = CSR_READ(sc, WMREG_EECD);
15123 reg &= ~EECD_EE_REQ; 15160 reg &= ~EECD_EE_REQ;
15124 CSR_WRITE(sc, WMREG_EECD, reg); 15161 CSR_WRITE(sc, WMREG_EECD, reg);
15125 15162
15126 return; 15163 return;
15127} 15164}
15128 15165
15129/* 15166/*
15130 * Get hardware semaphore. 15167 * Get hardware semaphore.
15131 * Same as e1000_get_hw_semaphore_generic() 15168 * Same as e1000_get_hw_semaphore_generic()
15132 */ 15169 */
15133static int 15170static int
15134wm_get_swsm_semaphore(struct wm_softc *sc) 15171wm_get_swsm_semaphore(struct wm_softc *sc)
15135{ 15172{
15136 int32_t timeout; 15173 int32_t timeout;
15137 uint32_t swsm; 15174 uint32_t swsm;
15138 15175
15139 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15176 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15140 device_xname(sc->sc_dev), __func__)); 15177 device_xname(sc->sc_dev), __func__));
15141 KASSERT(sc->sc_nvm_wordsize > 0); 15178 KASSERT(sc->sc_nvm_wordsize > 0);
15142 15179
15143retry: 15180retry:
15144 /* Get the SW semaphore. */ 15181 /* Get the SW semaphore. */
15145 timeout = sc->sc_nvm_wordsize + 1; 15182 timeout = sc->sc_nvm_wordsize + 1;
15146 while (timeout) { 15183 while (timeout) {
15147 swsm = CSR_READ(sc, WMREG_SWSM); 15184 swsm = CSR_READ(sc, WMREG_SWSM);
15148 15185
15149 if ((swsm & SWSM_SMBI) == 0) 15186 if ((swsm & SWSM_SMBI) == 0)
15150 break; 15187 break;
15151 15188
15152 delay(50); 15189 delay(50);
15153 timeout--; 15190 timeout--;
15154 } 15191 }
15155 15192
15156 if (timeout == 0) { 15193 if (timeout == 0) {
15157 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) { 15194 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
15158 /* 15195 /*
15159 * In rare circumstances, the SW semaphore may already 15196 * In rare circumstances, the SW semaphore may already
15160 * be held unintentionally. Clear the semaphore once 15197 * be held unintentionally. Clear the semaphore once
15161 * before giving up. 15198 * before giving up.
15162 */ 15199 */
15163 sc->sc_flags &= ~WM_F_WA_I210_CLSEM; 15200 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
15164 wm_put_swsm_semaphore(sc); 15201 wm_put_swsm_semaphore(sc);
15165 goto retry; 15202 goto retry;
15166 } 15203 }
15167 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n"); 15204 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
15168 return -1; 15205 return -1;
15169 } 15206 }
15170 15207
15171 /* Get the FW semaphore. */ 15208 /* Get the FW semaphore. */
15172 timeout = sc->sc_nvm_wordsize + 1; 15209 timeout = sc->sc_nvm_wordsize + 1;
15173 while (timeout) { 15210 while (timeout) {
15174 swsm = CSR_READ(sc, WMREG_SWSM); 15211 swsm = CSR_READ(sc, WMREG_SWSM);
15175 swsm |= SWSM_SWESMBI; 15212 swsm |= SWSM_SWESMBI;
15176 CSR_WRITE(sc, WMREG_SWSM, swsm); 15213 CSR_WRITE(sc, WMREG_SWSM, swsm);
15177 /* If we managed to set the bit we got the semaphore. */ 15214 /* If we managed to set the bit we got the semaphore. */
15178 swsm = CSR_READ(sc, WMREG_SWSM); 15215 swsm = CSR_READ(sc, WMREG_SWSM);
15179 if (swsm & SWSM_SWESMBI) 15216 if (swsm & SWSM_SWESMBI)
15180 break; 15217 break;
15181 15218
15182 delay(50); 15219 delay(50);
15183 timeout--; 15220 timeout--;
15184 } 15221 }
15185 15222
15186 if (timeout == 0) { 15223 if (timeout == 0) {
15187 aprint_error_dev(sc->sc_dev, 15224 aprint_error_dev(sc->sc_dev,
15188 "could not acquire SWSM SWESMBI\n"); 15225 "could not acquire SWSM SWESMBI\n");
15189 /* Release semaphores */ 15226 /* Release semaphores */
15190 wm_put_swsm_semaphore(sc); 15227 wm_put_swsm_semaphore(sc);
15191 return -1; 15228 return -1;
15192 } 15229 }
15193 return 0; 15230 return 0;
15194} 15231}
15195 15232
15196/* 15233/*
15197 * Put hardware semaphore. 15234 * Put hardware semaphore.
15198 * Same as e1000_put_hw_semaphore_generic() 15235 * Same as e1000_put_hw_semaphore_generic()
15199 */ 15236 */
15200static void 15237static void
15201wm_put_swsm_semaphore(struct wm_softc *sc) 15238wm_put_swsm_semaphore(struct wm_softc *sc)
15202{ 15239{
15203 uint32_t swsm; 15240 uint32_t swsm;
15204 15241
15205 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15242 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15206 device_xname(sc->sc_dev), __func__)); 15243 device_xname(sc->sc_dev), __func__));
15207 15244
15208 swsm = CSR_READ(sc, WMREG_SWSM); 15245 swsm = CSR_READ(sc, WMREG_SWSM);
15209 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI); 15246 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
15210 CSR_WRITE(sc, WMREG_SWSM, swsm); 15247 CSR_WRITE(sc, WMREG_SWSM, swsm);
15211} 15248}
15212 15249
15213/* 15250/*
15214 * Get SW/FW semaphore. 15251 * Get SW/FW semaphore.
15215 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}(). 15252 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15216 */ 15253 */
15217static int 15254static int
15218wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 15255wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15219{ 15256{
15220 uint32_t swfw_sync; 15257 uint32_t swfw_sync;
15221 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 15258 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15222 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 15259 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15223 int timeout; 15260 int timeout;
15224 15261
15225 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15262 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15226 device_xname(sc->sc_dev), __func__)); 15263 device_xname(sc->sc_dev), __func__));
15227 15264
15228 if (sc->sc_type == WM_T_80003) 15265 if (sc->sc_type == WM_T_80003)
15229 timeout = 50; 15266 timeout = 50;
15230 else 15267 else
15231 timeout = 200; 15268 timeout = 200;
15232 15269
15233 while (timeout) { 15270 while (timeout) {
15234 if (wm_get_swsm_semaphore(sc)) { 15271 if (wm_get_swsm_semaphore(sc)) {
15235 aprint_error_dev(sc->sc_dev, 15272 aprint_error_dev(sc->sc_dev,
15236 "%s: failed to get semaphore\n", 15273 "%s: failed to get semaphore\n",
15237 __func__); 15274 __func__);
15238 return -1; 15275 return -1;
15239 } 15276 }
15240 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 15277 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15241 if ((swfw_sync & (swmask | fwmask)) == 0) { 15278 if ((swfw_sync & (swmask | fwmask)) == 0) {
15242 swfw_sync |= swmask; 15279 swfw_sync |= swmask;
15243 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 15280 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15244 wm_put_swsm_semaphore(sc); 15281 wm_put_swsm_semaphore(sc);
15245 return 0; 15282 return 0;
15246 } 15283 }
15247 wm_put_swsm_semaphore(sc); 15284 wm_put_swsm_semaphore(sc);
15248 delay(5000); 15285 delay(5000);
15249 timeout--; 15286 timeout--;
15250 } 15287 }
15251 device_printf(sc->sc_dev, 15288 device_printf(sc->sc_dev,
15252 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 15289 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15253 mask, swfw_sync); 15290 mask, swfw_sync);
15254 return -1; 15291 return -1;
15255} 15292}
15256 15293
15257static void 15294static void
15258wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 15295wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15259{ 15296{
15260 uint32_t swfw_sync; 15297 uint32_t swfw_sync;
15261 15298
15262 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15299 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15263 device_xname(sc->sc_dev), __func__)); 15300 device_xname(sc->sc_dev), __func__));
15264 15301
15265 while (wm_get_swsm_semaphore(sc) != 0) 15302 while (wm_get_swsm_semaphore(sc) != 0)
15266 continue; 15303 continue;
15267 15304
15268 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 15305 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15269 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 15306 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15270 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 15307 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15271 15308
15272 wm_put_swsm_semaphore(sc); 15309 wm_put_swsm_semaphore(sc);
15273} 15310}
15274 15311
15275static int 15312static int
15276wm_get_nvm_80003(struct wm_softc *sc) 15313wm_get_nvm_80003(struct wm_softc *sc)
15277{ 15314{
15278 int rv; 15315 int rv;
15279 15316
15280 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n", 15317 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15281 device_xname(sc->sc_dev), __func__)); 15318 device_xname(sc->sc_dev), __func__));
15282 15319
15283 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) { 15320 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15284 aprint_error_dev(sc->sc_dev, 15321 aprint_error_dev(sc->sc_dev,
15285 "%s: failed to get semaphore(SWFW)\n", __func__); 15322 "%s: failed to get semaphore(SWFW)\n", __func__);
15286 return rv; 15323 return rv;
15287 } 15324 }
15288 15325
15289 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0) 15326 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15290 && (rv = wm_get_eecd(sc)) != 0) { 15327 && (rv = wm_get_eecd(sc)) != 0) {
15291 aprint_error_dev(sc->sc_dev, 15328 aprint_error_dev(sc->sc_dev,
15292 "%s: failed to get semaphore(EECD)\n", __func__); 15329 "%s: failed to get semaphore(EECD)\n", __func__);
15293 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 15330 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15294 return rv; 15331 return rv;
15295 } 15332 }
15296 15333
15297 return 0; 15334 return 0;
15298} 15335}
15299 15336
15300static void 15337static void
15301wm_put_nvm_80003(struct wm_softc *sc) 15338wm_put_nvm_80003(struct wm_softc *sc)
15302{ 15339{
15303 15340
15304 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15341 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15305 device_xname(sc->sc_dev), __func__)); 15342 device_xname(sc->sc_dev), __func__));
15306 15343
15307 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0) 15344 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15308 wm_put_eecd(sc); 15345 wm_put_eecd(sc);
15309 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 15346 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15310} 15347}
15311 15348
15312static int 15349static int
15313wm_get_nvm_82571(struct wm_softc *sc) 15350wm_get_nvm_82571(struct wm_softc *sc)
15314{ 15351{
15315 int rv; 15352 int rv;
15316 15353
15317 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15354 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15318 device_xname(sc->sc_dev), __func__)); 15355 device_xname(sc->sc_dev), __func__));
15319 15356
15320 if ((rv = wm_get_swsm_semaphore(sc)) != 0) 15357 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15321 return rv; 15358 return rv;
15322 15359
15323 switch (sc->sc_type) { 15360 switch (sc->sc_type) {
15324 case WM_T_82573: 15361 case WM_T_82573:
15325 break; 15362 break;
15326 default: 15363 default:
15327 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0) 15364 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15328 rv = wm_get_eecd(sc); 15365 rv = wm_get_eecd(sc);
15329 break; 15366 break;
15330 } 15367 }
15331 15368
15332 if (rv != 0) { 15369 if (rv != 0) {
15333 aprint_error_dev(sc->sc_dev, 15370 aprint_error_dev(sc->sc_dev,
15334 "%s: failed to get semaphore\n", 15371 "%s: failed to get semaphore\n",
15335 __func__); 15372 __func__);
15336 wm_put_swsm_semaphore(sc); 15373 wm_put_swsm_semaphore(sc);
15337 } 15374 }
15338 15375
15339 return rv; 15376 return rv;
15340} 15377}
15341 15378
15342static void 15379static void
15343wm_put_nvm_82571(struct wm_softc *sc) 15380wm_put_nvm_82571(struct wm_softc *sc)
15344{ 15381{
15345 15382
15346 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15383 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15347 device_xname(sc->sc_dev), __func__)); 15384 device_xname(sc->sc_dev), __func__));
15348 15385
15349 switch (sc->sc_type) { 15386 switch (sc->sc_type) {
15350 case WM_T_82573: 15387 case WM_T_82573:
15351 break; 15388 break;
15352 default: 15389 default:
15353 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0) 15390 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15354 wm_put_eecd(sc); 15391 wm_put_eecd(sc);
15355 break; 15392 break;
15356 } 15393 }
15357 15394
15358 wm_put_swsm_semaphore(sc); 15395 wm_put_swsm_semaphore(sc);
15359} 15396}
15360 15397
15361static int 15398static int
15362wm_get_phy_82575(struct wm_softc *sc) 15399wm_get_phy_82575(struct wm_softc *sc)
15363{ 15400{
15364 15401
15365 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15402 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15366 device_xname(sc->sc_dev), __func__)); 15403 device_xname(sc->sc_dev), __func__));
15367 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 15404 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15368} 15405}
15369 15406
15370static void 15407static void
15371wm_put_phy_82575(struct wm_softc *sc) 15408wm_put_phy_82575(struct wm_softc *sc)
15372{ 15409{
15373 15410
15374 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15411 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15375 device_xname(sc->sc_dev), __func__)); 15412 device_xname(sc->sc_dev), __func__));
15376 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 15413 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15377} 15414}
15378 15415
15379static int 15416static int
15380wm_get_swfwhw_semaphore(struct wm_softc *sc) 15417wm_get_swfwhw_semaphore(struct wm_softc *sc)
15381{ 15418{
15382 uint32_t ext_ctrl; 15419 uint32_t ext_ctrl;
15383 int timeout = 200; 15420 int timeout = 200;
15384 15421
15385 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15422 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15386 device_xname(sc->sc_dev), __func__)); 15423 device_xname(sc->sc_dev), __func__));
15387 15424
15388 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ 15425 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15389 for (timeout = 0; timeout < 200; timeout++) { 15426 for (timeout = 0; timeout < 200; timeout++) {
15390 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 15427 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15391 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP; 15428 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15392 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 15429 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15393 15430
15394 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 15431 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15395 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) 15432 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15396 return 0; 15433 return 0;
15397 delay(5000); 15434 delay(5000);
15398 } 15435 }
15399 device_printf(sc->sc_dev, 15436 device_printf(sc->sc_dev,
15400 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl); 15437 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15401 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ 15438 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15402 return -1; 15439 return -1;
15403} 15440}
15404 15441
15405static void 15442static void
15406wm_put_swfwhw_semaphore(struct wm_softc *sc) 15443wm_put_swfwhw_semaphore(struct wm_softc *sc)
15407{ 15444{
15408 uint32_t ext_ctrl; 15445 uint32_t ext_ctrl;
15409 15446
15410 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15447 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15411 device_xname(sc->sc_dev), __func__)); 15448 device_xname(sc->sc_dev), __func__));
15412 15449
15413 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 15450 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15414 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 15451 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15415 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 15452 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15416 15453
15417 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ 15454 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15418} 15455}
15419 15456
15420static int 15457static int
15421wm_get_swflag_ich8lan(struct wm_softc *sc) 15458wm_get_swflag_ich8lan(struct wm_softc *sc)
15422{ 15459{
15423 uint32_t ext_ctrl; 15460 uint32_t ext_ctrl;
15424 int timeout; 15461 int timeout;
15425 15462
15426 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15463 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15427 device_xname(sc->sc_dev), __func__)); 15464 device_xname(sc->sc_dev), __func__));
15428 mutex_enter(sc->sc_ich_phymtx); 15465 mutex_enter(sc->sc_ich_phymtx);
15429 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) { 15466 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15430 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 15467 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15431 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0) 15468 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15432 break; 15469 break;
15433 delay(1000); 15470 delay(1000);
15434 } 15471 }
15435 if (timeout >= WM_PHY_CFG_TIMEOUT) { 15472 if (timeout >= WM_PHY_CFG_TIMEOUT) {
15436 device_printf(sc->sc_dev, 15473 device_printf(sc->sc_dev,
15437 "SW has already locked the resource\n"); 15474 "SW has already locked the resource\n");
15438 goto out; 15475 goto out;
15439 } 15476 }
15440 15477
15441 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP; 15478 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15442 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 15479 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15443 for (timeout = 0; timeout < 1000; timeout++) { 15480 for (timeout = 0; timeout < 1000; timeout++) {
15444 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 15481 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15445 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) 15482 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15446 break; 15483 break;
15447 delay(1000); 15484 delay(1000);
15448 } 15485 }
15449 if (timeout >= 1000) { 15486 if (timeout >= 1000) {
15450 device_printf(sc->sc_dev, "failed to acquire semaphore\n"); 15487 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15451 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 15488 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15452 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 15489 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15453 goto out; 15490 goto out;
15454 } 15491 }
15455 return 0; 15492 return 0;
15456 15493
15457out: 15494out:
15458 mutex_exit(sc->sc_ich_phymtx); 15495 mutex_exit(sc->sc_ich_phymtx);
15459 return -1; 15496 return -1;
15460} 15497}
15461 15498
15462static void 15499static void
15463wm_put_swflag_ich8lan(struct wm_softc *sc) 15500wm_put_swflag_ich8lan(struct wm_softc *sc)
15464{ 15501{
15465 uint32_t ext_ctrl; 15502 uint32_t ext_ctrl;
15466 15503
15467 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15504 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15468 device_xname(sc->sc_dev), __func__)); 15505 device_xname(sc->sc_dev), __func__));
15469 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 15506 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15470 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) { 15507 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15471 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 15508 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15472 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 15509 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15473 } else 15510 } else
15474 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n"); 15511 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15475 15512
15476 mutex_exit(sc->sc_ich_phymtx); 15513 mutex_exit(sc->sc_ich_phymtx);
15477} 15514}
15478 15515
15479static int 15516static int
15480wm_get_nvm_ich8lan(struct wm_softc *sc) 15517wm_get_nvm_ich8lan(struct wm_softc *sc)
15481{ 15518{
15482 15519
15483 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15520 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15484 device_xname(sc->sc_dev), __func__)); 15521 device_xname(sc->sc_dev), __func__));
15485 mutex_enter(sc->sc_ich_nvmmtx); 15522 mutex_enter(sc->sc_ich_nvmmtx);
15486 15523
15487 return 0; 15524 return 0;
15488} 15525}
15489 15526
15490static void 15527static void
15491wm_put_nvm_ich8lan(struct wm_softc *sc) 15528wm_put_nvm_ich8lan(struct wm_softc *sc)
15492{ 15529{
15493 15530
15494 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15531 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15495 device_xname(sc->sc_dev), __func__)); 15532 device_xname(sc->sc_dev), __func__));
15496 mutex_exit(sc->sc_ich_nvmmtx); 15533 mutex_exit(sc->sc_ich_nvmmtx);
15497} 15534}
15498 15535
15499static int 15536static int
15500wm_get_hw_semaphore_82573(struct wm_softc *sc) 15537wm_get_hw_semaphore_82573(struct wm_softc *sc)
15501{ 15538{
15502 int i = 0; 15539 int i = 0;
15503 uint32_t reg; 15540 uint32_t reg;
15504 15541
15505 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15542 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15506 device_xname(sc->sc_dev), __func__)); 15543 device_xname(sc->sc_dev), __func__));
15507 15544
15508 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 15545 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15509 do { 15546 do {
15510 CSR_WRITE(sc, WMREG_EXTCNFCTR, 15547 CSR_WRITE(sc, WMREG_EXTCNFCTR,
15511 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP); 15548 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15512 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 15549 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15513 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0) 15550 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15514 break; 15551 break;
15515 delay(2*1000); 15552 delay(2*1000);
15516 i++; 15553 i++;
15517 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT); 15554 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15518 15555
15519 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) { 15556 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15520 wm_put_hw_semaphore_82573(sc); 15557 wm_put_hw_semaphore_82573(sc);
15521 log(LOG_ERR, "%s: Driver can't access the PHY\n", 15558 log(LOG_ERR, "%s: Driver can't access the PHY\n",
15522 device_xname(sc->sc_dev)); 15559 device_xname(sc->sc_dev));
15523 return -1; 15560 return -1;
15524 } 15561 }
15525 15562
15526 return 0; 15563 return 0;
15527} 15564}
15528 15565
15529static void 15566static void
15530wm_put_hw_semaphore_82573(struct wm_softc *sc) 15567wm_put_hw_semaphore_82573(struct wm_softc *sc)
15531{ 15568{
15532 uint32_t reg; 15569 uint32_t reg;
15533 15570
15534 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15571 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15535 device_xname(sc->sc_dev), __func__)); 15572 device_xname(sc->sc_dev), __func__));
15536 15573
15537 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 15574 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15538 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 15575 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15539 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 15576 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15540} 15577}
15541 15578
15542/* 15579/*
15543 * Management mode and power management related subroutines. 15580 * Management mode and power management related subroutines.
15544 * BMC, AMT, suspend/resume and EEE. 15581 * BMC, AMT, suspend/resume and EEE.
15545 */ 15582 */
15546 15583
15547#ifdef WM_WOL 15584#ifdef WM_WOL
15548static int 15585static int
15549wm_check_mng_mode(struct wm_softc *sc) 15586wm_check_mng_mode(struct wm_softc *sc)
15550{ 15587{
15551 int rv; 15588 int rv;
15552 15589
15553 switch (sc->sc_type) { 15590 switch (sc->sc_type) {
15554 case WM_T_ICH8: 15591 case WM_T_ICH8:
15555 case WM_T_ICH9: 15592 case WM_T_ICH9:
15556 case WM_T_ICH10: 15593 case WM_T_ICH10:
15557 case WM_T_PCH: 15594 case WM_T_PCH:
15558 case WM_T_PCH2: 15595 case WM_T_PCH2:
15559 case WM_T_PCH_LPT: 15596 case WM_T_PCH_LPT:
15560 case WM_T_PCH_SPT: 15597 case WM_T_PCH_SPT:
15561 case WM_T_PCH_CNP: 15598 case WM_T_PCH_CNP:
15562 case WM_T_PCH_TGP: 15599 case WM_T_PCH_TGP:
15563 rv = wm_check_mng_mode_ich8lan(sc); 15600 rv = wm_check_mng_mode_ich8lan(sc);
15564 break; 15601 break;
15565 case WM_T_82574: 15602 case WM_T_82574:
15566 case WM_T_82583: 15603 case WM_T_82583:
15567 rv = wm_check_mng_mode_82574(sc); 15604 rv = wm_check_mng_mode_82574(sc);
15568 break; 15605 break;
15569 case WM_T_82571: 15606 case WM_T_82571:
15570 case WM_T_82572: 15607 case WM_T_82572:
15571 case WM_T_82573: 15608 case WM_T_82573:
15572 case WM_T_80003: 15609 case WM_T_80003:
15573 rv = wm_check_mng_mode_generic(sc); 15610 rv = wm_check_mng_mode_generic(sc);
15574 break; 15611 break;
15575 default: 15612 default:
15576 /* Noting to do */ 15613 /* Noting to do */
15577 rv = 0; 15614 rv = 0;
15578 break; 15615 break;
15579 } 15616 }
15580 15617
15581 return rv; 15618 return rv;
15582} 15619}
15583 15620
15584static int 15621static int
15585wm_check_mng_mode_ich8lan(struct wm_softc *sc) 15622wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15586{ 15623{
15587 uint32_t fwsm; 15624 uint32_t fwsm;
15588 15625
15589 fwsm = CSR_READ(sc, WMREG_FWSM); 15626 fwsm = CSR_READ(sc, WMREG_FWSM);
15590 15627
15591 if (((fwsm & FWSM_FW_VALID) != 0) 15628 if (((fwsm & FWSM_FW_VALID) != 0)
15592 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE)) 15629 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15593 return 1; 15630 return 1;
15594 15631
15595 return 0; 15632 return 0;
15596} 15633}
15597 15634
15598static int 15635static int
15599wm_check_mng_mode_82574(struct wm_softc *sc) 15636wm_check_mng_mode_82574(struct wm_softc *sc)
15600{ 15637{
15601 uint16_t data; 15638 uint16_t data;
15602 15639
15603 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data); 15640 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15604 15641
15605 if ((data & NVM_CFG2_MNGM_MASK) != 0) 15642 if ((data & NVM_CFG2_MNGM_MASK) != 0)
15606 return 1; 15643 return 1;
15607 15644
15608 return 0; 15645 return 0;
15609} 15646}
15610 15647
15611static int 15648static int
15612wm_check_mng_mode_generic(struct wm_softc *sc) 15649wm_check_mng_mode_generic(struct wm_softc *sc)
15613{ 15650{
15614 uint32_t fwsm; 15651 uint32_t fwsm;
15615 15652
15616 fwsm = CSR_READ(sc, WMREG_FWSM); 15653 fwsm = CSR_READ(sc, WMREG_FWSM);
15617 15654
15618 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE) 15655 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
15619 return 1; 15656 return 1;
15620 15657
15621 return 0; 15658 return 0;
15622} 15659}
15623#endif /* WM_WOL */ 15660#endif /* WM_WOL */
15624 15661
15625static int 15662static int
15626wm_enable_mng_pass_thru(struct wm_softc *sc) 15663wm_enable_mng_pass_thru(struct wm_softc *sc)
15627{ 15664{
15628 uint32_t manc, fwsm, factps; 15665 uint32_t manc, fwsm, factps;
15629 15666
15630 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0) 15667 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
15631 return 0; 15668 return 0;
15632 15669
15633 manc = CSR_READ(sc, WMREG_MANC); 15670 manc = CSR_READ(sc, WMREG_MANC);
15634 15671
15635 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n", 15672 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
15636 device_xname(sc->sc_dev), manc)); 15673 device_xname(sc->sc_dev), manc));
15637 if ((manc & MANC_RECV_TCO_EN) == 0) 15674 if ((manc & MANC_RECV_TCO_EN) == 0)
15638 return 0; 15675 return 0;
15639 15676
15640 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) { 15677 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
15641 fwsm = CSR_READ(sc, WMREG_FWSM); 15678 fwsm = CSR_READ(sc, WMREG_FWSM);
15642 factps = CSR_READ(sc, WMREG_FACTPS); 15679 factps = CSR_READ(sc, WMREG_FACTPS);
15643 if (((factps & FACTPS_MNGCG) == 0) 15680 if (((factps & FACTPS_MNGCG) == 0)
15644 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE)) 15681 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15645 return 1; 15682 return 1;
15646 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ 15683 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
15647 uint16_t data; 15684 uint16_t data;
15648 15685
15649 factps = CSR_READ(sc, WMREG_FACTPS); 15686 factps = CSR_READ(sc, WMREG_FACTPS);
15650 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data); 15687 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15651 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n", 15688 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15652 device_xname(sc->sc_dev), factps, data)); 15689 device_xname(sc->sc_dev), factps, data));
15653 if (((factps & FACTPS_MNGCG) == 0) 15690 if (((factps & FACTPS_MNGCG) == 0)
15654 && ((data & NVM_CFG2_MNGM_MASK) 15691 && ((data & NVM_CFG2_MNGM_MASK)
15655 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT))) 15692 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15656 return 1; 15693 return 1;
15657 } else if (((manc & MANC_SMBUS_EN) != 0) 15694 } else if (((manc & MANC_SMBUS_EN) != 0)
15658 && ((manc & MANC_ASF_EN) == 0)) 15695 && ((manc & MANC_ASF_EN) == 0))
15659 return 1; 15696 return 1;
15660 15697
15661 return 0; 15698 return 0;
15662} 15699}
15663 15700
15664static bool 15701static bool
15665wm_phy_resetisblocked(struct wm_softc *sc) 15702wm_phy_resetisblocked(struct wm_softc *sc)
15666{ 15703{
15667 bool blocked = false; 15704 bool blocked = false;
15668 uint32_t reg; 15705 uint32_t reg;
15669 int i = 0; 15706 int i = 0;
15670 15707
15671 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15708 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15672 device_xname(sc->sc_dev), __func__)); 15709 device_xname(sc->sc_dev), __func__));
15673 15710
15674 switch (sc->sc_type) { 15711 switch (sc->sc_type) {
15675 case WM_T_ICH8: 15712 case WM_T_ICH8:
15676 case WM_T_ICH9: 15713 case WM_T_ICH9:
15677 case WM_T_ICH10: 15714 case WM_T_ICH10:
15678 case WM_T_PCH: 15715 case WM_T_PCH:
15679 case WM_T_PCH2: 15716 case WM_T_PCH2:
15680 case WM_T_PCH_LPT: 15717 case WM_T_PCH_LPT:
15681 case WM_T_PCH_SPT: 15718 case WM_T_PCH_SPT:
15682 case WM_T_PCH_CNP: 15719 case WM_T_PCH_CNP:
15683 case WM_T_PCH_TGP: 15720 case WM_T_PCH_TGP:
15684 do { 15721 do {
15685 reg = CSR_READ(sc, WMREG_FWSM); 15722 reg = CSR_READ(sc, WMREG_FWSM);
15686 if ((reg & FWSM_RSPCIPHY) == 0) { 15723 if ((reg & FWSM_RSPCIPHY) == 0) {
15687 blocked = true; 15724 blocked = true;
15688 delay(10*1000); 15725 delay(10*1000);
15689 continue; 15726 continue;
15690 } 15727 }
15691 blocked = false; 15728 blocked = false;
15692 } while (blocked && (i++ < 30)); 15729 } while (blocked && (i++ < 30));
15693 return blocked; 15730 return blocked;
15694 break; 15731 break;
15695 case WM_T_82571: 15732 case WM_T_82571:
15696 case WM_T_82572: 15733 case WM_T_82572:
15697 case WM_T_82573: 15734 case WM_T_82573:
15698 case WM_T_82574: 15735 case WM_T_82574:
15699 case WM_T_82583: 15736 case WM_T_82583:
15700 case WM_T_80003: 15737 case WM_T_80003:
15701 reg = CSR_READ(sc, WMREG_MANC); 15738 reg = CSR_READ(sc, WMREG_MANC);
15702 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0) 15739 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15703 return true; 15740 return true;
15704 else 15741 else
15705 return false; 15742 return false;
15706 break; 15743 break;
15707 default: 15744 default:
15708 /* No problem */ 15745 /* No problem */
15709 break; 15746 break;
15710 } 15747 }
15711 15748
15712 return false; 15749 return false;
15713} 15750}
15714 15751
15715static void 15752static void
15716wm_get_hw_control(struct wm_softc *sc) 15753wm_get_hw_control(struct wm_softc *sc)
15717{ 15754{
15718 uint32_t reg; 15755 uint32_t reg;
15719 15756
15720 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15757 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15721 device_xname(sc->sc_dev), __func__)); 15758 device_xname(sc->sc_dev), __func__));
15722 15759
15723 if (sc->sc_type == WM_T_82573) { 15760 if (sc->sc_type == WM_T_82573) {
15724 reg = CSR_READ(sc, WMREG_SWSM); 15761 reg = CSR_READ(sc, WMREG_SWSM);
15725 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD); 15762 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15726 } else if (sc->sc_type >= WM_T_82571) { 15763 } else if (sc->sc_type >= WM_T_82571) {
15727 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15764 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15728 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD); 15765 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15729 } 15766 }
15730} 15767}
15731 15768
15732static void 15769static void
15733wm_release_hw_control(struct wm_softc *sc) 15770wm_release_hw_control(struct wm_softc *sc)
15734{ 15771{
15735 uint32_t reg; 15772 uint32_t reg;
15736 15773
15737 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15774 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15738 device_xname(sc->sc_dev), __func__)); 15775 device_xname(sc->sc_dev), __func__));
15739 15776
15740 if (sc->sc_type == WM_T_82573) { 15777 if (sc->sc_type == WM_T_82573) {
15741 reg = CSR_READ(sc, WMREG_SWSM); 15778 reg = CSR_READ(sc, WMREG_SWSM);
15742 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD); 15779 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15743 } else if (sc->sc_type >= WM_T_82571) { 15780 } else if (sc->sc_type >= WM_T_82571) {
15744 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15781 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15745 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD); 15782 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15746 } 15783 }
15747} 15784}
15748 15785
15749static void 15786static void
15750wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate) 15787wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15751{ 15788{
15752 uint32_t reg; 15789 uint32_t reg;
15753 15790
15754 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15791 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15755 device_xname(sc->sc_dev), __func__)); 15792 device_xname(sc->sc_dev), __func__));
15756 15793
15757 if (sc->sc_type < WM_T_PCH2) 15794 if (sc->sc_type < WM_T_PCH2)
15758 return; 15795 return;
15759 15796
15760 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 15797 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15761 15798
15762 if (gate) 15799 if (gate)
15763 reg |= EXTCNFCTR_GATE_PHY_CFG; 15800 reg |= EXTCNFCTR_GATE_PHY_CFG;
15764 else 15801 else
15765 reg &= ~EXTCNFCTR_GATE_PHY_CFG; 15802 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15766 15803
15767 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 15804 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15768} 15805}
15769 15806
15770static int 15807static int
15771wm_init_phy_workarounds_pchlan(struct wm_softc *sc) 15808wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15772{ 15809{
15773 uint32_t fwsm, reg; 15810 uint32_t fwsm, reg;
15774 int rv; 15811 int rv;
15775 15812
15776 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15813 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15777 device_xname(sc->sc_dev), __func__)); 15814 device_xname(sc->sc_dev), __func__));
15778 15815
15779 /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 15816 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
15780 wm_gate_hw_phy_config_ich8lan(sc, true); 15817 wm_gate_hw_phy_config_ich8lan(sc, true);
15781 15818
15782 /* Disable ULP */ 15819 /* Disable ULP */
15783 wm_ulp_disable(sc); 15820 wm_ulp_disable(sc);
15784 15821
15785 /* Acquire PHY semaphore */ 15822 /* Acquire PHY semaphore */
15786 rv = sc->phy.acquire(sc); 15823 rv = sc->phy.acquire(sc);
15787 if (rv != 0) { 15824 if (rv != 0) {
15788 DPRINTF(sc, WM_DEBUG_INIT, 15825 DPRINTF(sc, WM_DEBUG_INIT,
15789 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__)); 15826 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15790 return rv; 15827 return rv;
15791 } 15828 }
15792 15829
15793 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is 15830 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
15794 * inaccessible and resetting the PHY is not blocked, toggle the 15831 * inaccessible and resetting the PHY is not blocked, toggle the
15795 * LANPHYPC Value bit to force the interconnect to PCIe mode. 15832 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15796 */ 15833 */
15797 fwsm = CSR_READ(sc, WMREG_FWSM); 15834 fwsm = CSR_READ(sc, WMREG_FWSM);
15798 switch (sc->sc_type) { 15835 switch (sc->sc_type) {
15799 case WM_T_PCH_LPT: 15836 case WM_T_PCH_LPT:
15800 case WM_T_PCH_SPT: 15837 case WM_T_PCH_SPT:
15801 case WM_T_PCH_CNP: 15838 case WM_T_PCH_CNP:
15802 case WM_T_PCH_TGP: 15839 case WM_T_PCH_TGP:
15803 if (wm_phy_is_accessible_pchlan(sc)) 15840 if (wm_phy_is_accessible_pchlan(sc))
15804 break; 15841 break;
15805 15842
15806 /* Before toggling LANPHYPC, see if PHY is accessible by 15843 /* Before toggling LANPHYPC, see if PHY is accessible by
15807 * forcing MAC to SMBus mode first. 15844 * forcing MAC to SMBus mode first.
15808 */ 15845 */
15809 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15846 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15810 reg |= CTRL_EXT_FORCE_SMBUS; 15847 reg |= CTRL_EXT_FORCE_SMBUS;
15811 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 15848 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15812#if 0 15849#if 0
15813 /* XXX Isn't this required??? */ 15850 /* XXX Isn't this required??? */
15814 CSR_WRITE_FLUSH(sc); 15851 CSR_WRITE_FLUSH(sc);
15815#endif 15852#endif
15816 /* Wait 50 milliseconds for MAC to finish any retries 15853 /* Wait 50 milliseconds for MAC to finish any retries
15817 * that it might be trying to perform from previous 15854 * that it might be trying to perform from previous
15818 * attempts to acknowledge any phy read requests. 15855 * attempts to acknowledge any phy read requests.
15819 */ 15856 */
15820 delay(50 * 1000); 15857 delay(50 * 1000);
15821 /* FALLTHROUGH */ 15858 /* FALLTHROUGH */
15822 case WM_T_PCH2: 15859 case WM_T_PCH2:
15823 if (wm_phy_is_accessible_pchlan(sc) == true) 15860 if (wm_phy_is_accessible_pchlan(sc) == true)
15824 break; 15861 break;
15825 /* FALLTHROUGH */ 15862 /* FALLTHROUGH */
15826 case WM_T_PCH: 15863 case WM_T_PCH:
15827 if (sc->sc_type == WM_T_PCH) 15864 if (sc->sc_type == WM_T_PCH)
15828 if ((fwsm & FWSM_FW_VALID) != 0) 15865 if ((fwsm & FWSM_FW_VALID) != 0)
15829 break; 15866 break;
15830 15867
15831 if (wm_phy_resetisblocked(sc) == true) { 15868 if (wm_phy_resetisblocked(sc) == true) {
15832 device_printf(sc->sc_dev, "XXX reset is blocked(2)\n"); 15869 device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
15833 break; 15870 break;
15834 } 15871 }
15835 15872
15836 /* Toggle LANPHYPC Value bit */ 15873 /* Toggle LANPHYPC Value bit */
15837 wm_toggle_lanphypc_pch_lpt(sc); 15874 wm_toggle_lanphypc_pch_lpt(sc);
15838 15875
15839 if (sc->sc_type >= WM_T_PCH_LPT) { 15876 if (sc->sc_type >= WM_T_PCH_LPT) {
15840 if (wm_phy_is_accessible_pchlan(sc) == true) 15877 if (wm_phy_is_accessible_pchlan(sc) == true)
15841 break; 15878 break;
15842 15879
15843 /* Toggling LANPHYPC brings the PHY out of SMBus mode 15880 /* Toggling LANPHYPC brings the PHY out of SMBus mode
15844 * so ensure that the MAC is also out of SMBus mode 15881 * so ensure that the MAC is also out of SMBus mode
15845 */ 15882 */
15846 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15883 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15847 reg &= ~CTRL_EXT_FORCE_SMBUS; 15884 reg &= ~CTRL_EXT_FORCE_SMBUS;
15848 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 15885 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15849 15886
15850 if (wm_phy_is_accessible_pchlan(sc) == true) 15887 if (wm_phy_is_accessible_pchlan(sc) == true)
15851 break; 15888 break;
15852 rv = -1; 15889 rv = -1;
15853 } 15890 }
15854 break; 15891 break;
15855 default: 15892 default:
15856 break; 15893 break;
15857 } 15894 }
15858 15895
15859 /* Release semaphore */ 15896 /* Release semaphore */
15860 sc->phy.release(sc); 15897 sc->phy.release(sc);
15861 15898
15862 if (rv == 0) { 15899 if (rv == 0) {
15863 /* Check to see if able to reset PHY. Print error if not */ 15900 /* Check to see if able to reset PHY. Print error if not */
15864 if (wm_phy_resetisblocked(sc)) { 15901 if (wm_phy_resetisblocked(sc)) {
15865 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n"); 15902 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15866 goto out; 15903 goto out;
15867 } 15904 }
15868 15905
15869 /* Reset the PHY before any access to it. Doing so, ensures 15906 /* Reset the PHY before any access to it. Doing so, ensures
15870 * that the PHY is in a known good state before we read/write 15907 * that the PHY is in a known good state before we read/write
15871 * PHY registers. The generic reset is sufficient here, 15908 * PHY registers. The generic reset is sufficient here,
15872 * because we haven't determined the PHY type yet. 15909 * because we haven't determined the PHY type yet.
15873 */ 15910 */
15874 if (wm_reset_phy(sc) != 0) 15911 if (wm_reset_phy(sc) != 0)
15875 goto out; 15912 goto out;
15876 15913
15877 /* On a successful reset, possibly need to wait for the PHY 15914 /* On a successful reset, possibly need to wait for the PHY
15878 * to quiesce to an accessible state before returning control 15915 * to quiesce to an accessible state before returning control
15879 * to the calling function. If the PHY does not quiesce, then 15916 * to the calling function. If the PHY does not quiesce, then
15880 * return E1000E_BLK_PHY_RESET, as this is the condition that 15917 * return E1000E_BLK_PHY_RESET, as this is the condition that
15881 * the PHY is in. 15918 * the PHY is in.
15882 */ 15919 */
15883 if (wm_phy_resetisblocked(sc)) 15920 if (wm_phy_resetisblocked(sc))
15884 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n"); 15921 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15885 } 15922 }
15886 15923
15887out: 15924out:
15888 /* Ungate automatic PHY configuration on non-managed 82579 */ 15925 /* Ungate automatic PHY configuration on non-managed 82579 */
15889 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) { 15926 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15890 delay(10*1000); 15927 delay(10*1000);
15891 wm_gate_hw_phy_config_ich8lan(sc, false); 15928 wm_gate_hw_phy_config_ich8lan(sc, false);
15892 } 15929 }
15893 15930
15894 return 0; 15931 return 0;
15895} 15932}
15896 15933
15897static void 15934static void
15898wm_init_manageability(struct wm_softc *sc) 15935wm_init_manageability(struct wm_softc *sc)
15899{ 15936{
15900 15937
15901 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15938 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15902 device_xname(sc->sc_dev), __func__)); 15939 device_xname(sc->sc_dev), __func__));
15903 if (sc->sc_flags & WM_F_HAS_MANAGE) { 15940 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15904 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H); 15941 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15905 uint32_t manc = CSR_READ(sc, WMREG_MANC); 15942 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15906 15943
15907 /* Disable hardware interception of ARP */ 15944 /* Disable hardware interception of ARP */
15908 manc &= ~MANC_ARP_EN; 15945 manc &= ~MANC_ARP_EN;
15909 15946
15910 /* Enable receiving management packets to the host */ 15947 /* Enable receiving management packets to the host */
15911 if (sc->sc_type >= WM_T_82571) { 15948 if (sc->sc_type >= WM_T_82571) {
15912 manc |= MANC_EN_MNG2HOST; 15949 manc |= MANC_EN_MNG2HOST;
15913 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624; 15950 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15914 CSR_WRITE(sc, WMREG_MANC2H, manc2h); 15951 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15915 } 15952 }
15916 15953
15917 CSR_WRITE(sc, WMREG_MANC, manc); 15954 CSR_WRITE(sc, WMREG_MANC, manc);
15918 } 15955 }
15919} 15956}
15920 15957
15921static void 15958static void
15922wm_release_manageability(struct wm_softc *sc) 15959wm_release_manageability(struct wm_softc *sc)
15923{ 15960{
15924 15961
15925 if (sc->sc_flags & WM_F_HAS_MANAGE) { 15962 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15926 uint32_t manc = CSR_READ(sc, WMREG_MANC); 15963 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15927 15964
15928 manc |= MANC_ARP_EN; 15965 manc |= MANC_ARP_EN;
15929 if (sc->sc_type >= WM_T_82571) 15966 if (sc->sc_type >= WM_T_82571)
15930 manc &= ~MANC_EN_MNG2HOST; 15967 manc &= ~MANC_EN_MNG2HOST;
15931 15968
15932 CSR_WRITE(sc, WMREG_MANC, manc); 15969 CSR_WRITE(sc, WMREG_MANC, manc);
15933 } 15970 }
15934} 15971}
15935 15972
15936static void 15973static void
15937wm_get_wakeup(struct wm_softc *sc) 15974wm_get_wakeup(struct wm_softc *sc)
15938{ 15975{
15939 15976
15940 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */ 15977 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15941 switch (sc->sc_type) { 15978 switch (sc->sc_type) {
15942 case WM_T_82573: 15979 case WM_T_82573:
15943 case WM_T_82583: 15980 case WM_T_82583:
15944 sc->sc_flags |= WM_F_HAS_AMT; 15981 sc->sc_flags |= WM_F_HAS_AMT;
15945 /* FALLTHROUGH */ 15982 /* FALLTHROUGH */
15946 case WM_T_80003: 15983 case WM_T_80003:
15947 case WM_T_82575: 15984 case WM_T_82575:
15948 case WM_T_82576: 15985 case WM_T_82576:
15949 case WM_T_82580: 15986 case WM_T_82580:
15950 case WM_T_I350: 15987 case WM_T_I350:
15951 case WM_T_I354: 15988 case WM_T_I354:
15952 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0) 15989 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15953 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID; 15990 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15954 /* FALLTHROUGH */ 15991 /* FALLTHROUGH */
15955 case WM_T_82541: 15992 case WM_T_82541:
15956 case WM_T_82541_2: 15993 case WM_T_82541_2:
15957 case WM_T_82547: 15994 case WM_T_82547:
15958 case WM_T_82547_2: 15995 case WM_T_82547_2:
15959 case WM_T_82571: 15996 case WM_T_82571:
15960 case WM_T_82572: 15997 case WM_T_82572:
15961 case WM_T_82574: 15998 case WM_T_82574:
15962 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 15999 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15963 break; 16000 break;
15964 case WM_T_ICH8: 16001 case WM_T_ICH8:
15965 case WM_T_ICH9: 16002 case WM_T_ICH9:
15966 case WM_T_ICH10: 16003 case WM_T_ICH10:
15967 case WM_T_PCH: 16004 case WM_T_PCH:
15968 case WM_T_PCH2: 16005 case WM_T_PCH2:
15969 case WM_T_PCH_LPT: 16006 case WM_T_PCH_LPT:
15970 case WM_T_PCH_SPT: 16007 case WM_T_PCH_SPT:
15971 case WM_T_PCH_CNP: 16008 case WM_T_PCH_CNP:
15972 case WM_T_PCH_TGP: 16009 case WM_T_PCH_TGP:
15973 sc->sc_flags |= WM_F_HAS_AMT; 16010 sc->sc_flags |= WM_F_HAS_AMT;
15974 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 16011 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15975 break; 16012 break;
15976 default: 16013 default:
15977 break; 16014 break;
15978 } 16015 }
15979 16016
15980 /* 1: HAS_MANAGE */ 16017 /* 1: HAS_MANAGE */
15981 if (wm_enable_mng_pass_thru(sc) != 0) 16018 if (wm_enable_mng_pass_thru(sc) != 0)
15982 sc->sc_flags |= WM_F_HAS_MANAGE; 16019 sc->sc_flags |= WM_F_HAS_MANAGE;
15983 16020
15984 /* 16021 /*

cvs diff -r1.98.6.18 -r1.98.6.19 src/sys/dev/pci/if_wmreg.h (switch to unified diff)

--- src/sys/dev/pci/if_wmreg.h 2023/10/18 14:41:54 1.98.6.18
+++ src/sys/dev/pci/if_wmreg.h 2024/02/29 10:46:28 1.98.6.19
@@ -1,1785 +1,1788 @@ @@ -1,1785 +1,1788 @@
1/* $NetBSD: if_wmreg.h,v 1.98.6.18 2023/10/18 14:41:54 martin Exp $ */ 1/* $NetBSD: if_wmreg.h,v 1.98.6.19 2024/02/29 10:46:28 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001 Wasabi Systems, Inc. 4 * Copyright (c) 2001 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/****************************************************************************** 38/******************************************************************************
39 39
40 Copyright (c) 2001-2012, Intel Corporation 40 Copyright (c) 2001-2012, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42 42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45 45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48 48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52 52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56 56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69******************************************************************************/ 69******************************************************************************/
70 70
71/* 71/*
72 * Register description for the Intel i82542 (``Wiseman''), 72 * Register description for the Intel i82542 (``Wiseman''),
73 * i82543 (``Livengood''), and i82544 (``Cordova'') Gigabit 73 * i82543 (``Livengood''), and i82544 (``Cordova'') Gigabit
74 * Ethernet chips. 74 * Ethernet chips.
75 */ 75 */
76 76
77/* 77/*
78 * The wiseman supports 64-bit PCI addressing. This structure 78 * The wiseman supports 64-bit PCI addressing. This structure
79 * describes the address in descriptors. 79 * describes the address in descriptors.
80 */ 80 */
81typedef struct wiseman_addr { 81typedef struct wiseman_addr {
82 uint32_t wa_low; /* low-order 32 bits */ 82 uint32_t wa_low; /* low-order 32 bits */
83 uint32_t wa_high; /* high-order 32 bits */ 83 uint32_t wa_high; /* high-order 32 bits */
84} __packed wiseman_addr_t; 84} __packed wiseman_addr_t;
85 85
86/* 86/*
87 * The Wiseman receive descriptor. 87 * The Wiseman receive descriptor.
88 * 88 *
89 * The receive descriptor ring must be aligned to a 4K boundary, 89 * The receive descriptor ring must be aligned to a 4K boundary,
90 * and there must be an even multiple of 8 descriptors in the ring. 90 * and there must be an even multiple of 8 descriptors in the ring.
91 */ 91 */
92typedef struct wiseman_rxdesc { 92typedef struct wiseman_rxdesc {
93 volatile wiseman_addr_t wrx_addr; /* buffer address */ 93 volatile wiseman_addr_t wrx_addr; /* buffer address */
94 94
95 volatile uint16_t wrx_len; /* buffer length */ 95 volatile uint16_t wrx_len; /* buffer length */
96 volatile uint16_t wrx_cksum; /* checksum (starting at PCSS)*/ 96 volatile uint16_t wrx_cksum; /* checksum (starting at PCSS)*/
97 97
98 volatile uint8_t wrx_status; /* Rx status */ 98 volatile uint8_t wrx_status; /* Rx status */
99 volatile uint8_t wrx_errors; /* Rx errors */ 99 volatile uint8_t wrx_errors; /* Rx errors */
100 volatile uint16_t wrx_special; /* special field (VLAN, etc.) */ 100 volatile uint16_t wrx_special; /* special field (VLAN, etc.) */
101} __packed wiseman_rxdesc_t; 101} __packed wiseman_rxdesc_t;
102 102
103/* wrx_status bits */ 103/* wrx_status bits */
104#define WRX_ST_DD __BIT(0) /* descriptor done */ 104#define WRX_ST_DD __BIT(0) /* descriptor done */
105#define WRX_ST_EOP __BIT(1) /* end of packet */ 105#define WRX_ST_EOP __BIT(1) /* end of packet */
106#define WRX_ST_IXSM __BIT(2) /* ignore checksum indication */ 106#define WRX_ST_IXSM __BIT(2) /* ignore checksum indication */
107#define WRX_ST_VP __BIT(3) /* VLAN packet */ 107#define WRX_ST_VP __BIT(3) /* VLAN packet */
108#define WRX_ST_BPDU __BIT(4) /* ??? */ 108#define WRX_ST_BPDU __BIT(4) /* ??? */
109#define WRX_ST_TCPCS __BIT(5) /* TCP checksum performed */ 109#define WRX_ST_TCPCS __BIT(5) /* TCP checksum performed */
110#define WRX_ST_IPCS __BIT(6) /* IP checksum performed */ 110#define WRX_ST_IPCS __BIT(6) /* IP checksum performed */
111#define WRX_ST_PIF __BIT(7) /* passed in-exact filter */ 111#define WRX_ST_PIF __BIT(7) /* passed in-exact filter */
112 112
113/* wrx_error bits */ 113/* wrx_error bits */
114#define WRX_ER_CE __BIT(0) /* CRC error */ 114#define WRX_ER_CE __BIT(0) /* CRC error */
115#define WRX_ER_SE __BIT(1) /* symbol error */ 115#define WRX_ER_SE __BIT(1) /* symbol error */
116#define WRX_ER_SEQ __BIT(2) /* sequence error */ 116#define WRX_ER_SEQ __BIT(2) /* sequence error */
117#define WRX_ER_ICE __BIT(3) /* ??? */ 117#define WRX_ER_ICE __BIT(3) /* ??? */
118#define WRX_ER_CXE __BIT(4) /* carrier extension error */ 118#define WRX_ER_CXE __BIT(4) /* carrier extension error */
119#define WRX_ER_TCPE __BIT(5) /* TCP checksum error */ 119#define WRX_ER_TCPE __BIT(5) /* TCP checksum error */
120#define WRX_ER_IPE __BIT(6) /* IP checksum error */ 120#define WRX_ER_IPE __BIT(6) /* IP checksum error */
121#define WRX_ER_RXE __BIT(7) /* Rx data error */ 121#define WRX_ER_RXE __BIT(7) /* Rx data error */
122 122
123/* wrx_special field for VLAN packets */ 123/* wrx_special field for VLAN packets */
124#define WRX_VLAN_ID(x) ((x) & 0x0fff) /* VLAN identifier */ 124#define WRX_VLAN_ID(x) ((x) & 0x0fff) /* VLAN identifier */
125#define WRX_VLAN_CFI __BIT(12) /* Canonical Form Indicator */ 125#define WRX_VLAN_CFI __BIT(12) /* Canonical Form Indicator */
126#define WRX_VLAN_PRI(x) (((x) >> 13) & 7)/* VLAN priority field */ 126#define WRX_VLAN_PRI(x) (((x) >> 13) & 7)/* VLAN priority field */
127 127
128/* extended RX descriptor for 82574 */ 128/* extended RX descriptor for 82574 */
129typedef union ext_rxdesc { 129typedef union ext_rxdesc {
130 struct { 130 struct {
131 uint64_t erxd_addr; /* Packet Buffer Address */ 131 uint64_t erxd_addr; /* Packet Buffer Address */
132 uint64_t erxd_dd; /* 63:1 reserved, 0 DD */ 132 uint64_t erxd_dd; /* 63:1 reserved, 0 DD */
133 } erx_data; 133 } erx_data;
134 struct { 134 struct {
135 uint32_t erxc_mrq; /* 135 uint32_t erxc_mrq; /*
136 * 31:13 reserved 136 * 31:13 reserved
137 * 12:8 Rx queue associated with the packet 137 * 12:8 Rx queue associated with the packet
138 * 7:4 reserved 3:0 RSS Type 138 * 7:4 reserved 3:0 RSS Type
139 */ 139 */
140 uint32_t erxc_rsshash; /* RSS Hash or {Fragment Checksum, IP identification } */ 140 uint32_t erxc_rsshash; /* RSS Hash or {Fragment Checksum, IP identification } */
141 uint32_t erxc_err_stat; /* 31:20 Extended Error, 19:0 Extened Status */ 141 uint32_t erxc_err_stat; /* 31:20 Extended Error, 19:0 Extened Status */
142 uint16_t erxc_pktlen; /* PKT_LEN */ 142 uint16_t erxc_pktlen; /* PKT_LEN */
143 uint16_t erxc_vlan; /* VLAN Tag */ 143 uint16_t erxc_vlan; /* VLAN Tag */
144 } erx_ctx; 144 } erx_ctx;
145} __packed ext_rxdesc_t; 145} __packed ext_rxdesc_t;
146 146
147#define EXTRXD_DD_MASK __BIT(0) 147#define EXTRXD_DD_MASK __BIT(0)
148 148
149/* 149/*
150 * erxc_rsshash is used for below 2 patterns 150 * erxc_rsshash is used for below 2 patterns
151 * (1) Fragment Checksum and IP identification 151 * (1) Fragment Checksum and IP identification
152 * - Fragment Checksum is valid 152 * - Fragment Checksum is valid
153 * when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set 153 * when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set
154 * - IP identification is valid 154 * - IP identification is valid
155 * when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set 155 * when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set
156 * (2) RSS Hash 156 * (2) RSS Hash
157 * when RXCSUM.PCSD bit is set 157 * when RXCSUM.PCSD bit is set
158 */ 158 */
159#define EXTRXC_IP_ID_MASK __BITS(15,0) 159#define EXTRXC_IP_ID_MASK __BITS(15,0)
160#define EXTRXC_FRAG_CSUM_MASK __BITS(31,16) 160#define EXTRXC_FRAG_CSUM_MASK __BITS(31,16)
161#define EXTRXC_IP_ID(rsshash) __SHIFTOUT(rsshash,ERXC_IP_ID_MASK) 161#define EXTRXC_IP_ID(rsshash) __SHIFTOUT(rsshash,ERXC_IP_ID_MASK)
162#define EXTRXC_FRAG_CSUM(rsshash) __SHIFTOUT(rsshash,ERXC_FRAG_CSUM_MASK) 162#define EXTRXC_FRAG_CSUM(rsshash) __SHIFTOUT(rsshash,ERXC_FRAG_CSUM_MASK)
163 163
164/* macros for nrxc_mrq */ 164/* macros for nrxc_mrq */
165#define EXTRXC_RSS_TYPE_MASK __BITS(3,0) 165#define EXTRXC_RSS_TYPE_MASK __BITS(3,0)
166/* __BITS(7,4) is reserved */ 166/* __BITS(7,4) is reserved */
167#define EXTRXC_QUEUE_MASK __BITS(12,8) 167#define EXTRXC_QUEUE_MASK __BITS(12,8)
168/* __BITS(31,13) is reserved */ 168/* __BITS(31,13) is reserved */
169#define EXTRXC_RSS_TYPE(mrq) __SHIFTOUT(mrq,EXTRXC_RSS_TYPE_MASK) 169#define EXTRXC_RSS_TYPE(mrq) __SHIFTOUT(mrq,EXTRXC_RSS_TYPE_MASK)
170#define EXTRXC_QUEUE(mrq) __SHIFTOUT(mrq,EXTRXC_QUEUE_MASK) 170#define EXTRXC_QUEUE(mrq) __SHIFTOUT(mrq,EXTRXC_QUEUE_MASK)
171 171
172#define EXTRXC_RSS_TYPE_NONE 0x0 /* No hash computation done. */ 172#define EXTRXC_RSS_TYPE_NONE 0x0 /* No hash computation done. */
173#define EXTRXC_RSS_TYPE_TCP_IPV4 0x1 173#define EXTRXC_RSS_TYPE_TCP_IPV4 0x1
174#define EXTRXC_RSS_TYPE_IPV4 0x2 174#define EXTRXC_RSS_TYPE_IPV4 0x2
175#define EXTRXC_RSS_TYPE_TCP_IPV6 0x3 175#define EXTRXC_RSS_TYPE_TCP_IPV6 0x3
176#define EXTRXC_RSS_TYPE_IPV6_EX 0x4 176#define EXTRXC_RSS_TYPE_IPV6_EX 0x4
177#define EXTRXC_RSS_TYPE_IPV6 0x5 177#define EXTRXC_RSS_TYPE_IPV6 0x5
178/*0x6:0xF is reserved. */ 178/*0x6:0xF is reserved. */
179 179
180#define EXTRXC_STATUS_MASK __BITS(19,0) 180#define EXTRXC_STATUS_MASK __BITS(19,0)
181#define EXTRXC_ERROR_MASK __BITS(31,20) 181#define EXTRXC_ERROR_MASK __BITS(31,20)
182#define EXTRXC_STATUS(err_stat) __SHIFTOUT(err_stat,EXTRXC_STATUS_MASK) 182#define EXTRXC_STATUS(err_stat) __SHIFTOUT(err_stat,EXTRXC_STATUS_MASK)
183#define EXTRXC_ERROR(err_stat) __SHIFTOUT(err_stat,EXTRXC_ERROR_MASK) 183#define EXTRXC_ERROR(err_stat) __SHIFTOUT(err_stat,EXTRXC_ERROR_MASK)
184 184
185/* 3:0 is reserved. */ 185/* 3:0 is reserved. */
186#define EXTRXC_ERROR_CE __BIT(4) /* The same as WRX_ER_CE. */ 186#define EXTRXC_ERROR_CE __BIT(4) /* The same as WRX_ER_CE. */
187#define EXTRXC_ERROR_SE __BIT(5) /* The same as WRX_ER_SE. */ 187#define EXTRXC_ERROR_SE __BIT(5) /* The same as WRX_ER_SE. */
188#define EXTRXC_ERROR_SEQ __BIT(6) /* The same as WRX_ER_SEQ. */ 188#define EXTRXC_ERROR_SEQ __BIT(6) /* The same as WRX_ER_SEQ. */
189/* 7 is reserved. */ 189/* 7 is reserved. */
190#define EXTRXC_ERROR_CXE __BIT(8) /* The same as WRX_ER_CXE. */ 190#define EXTRXC_ERROR_CXE __BIT(8) /* The same as WRX_ER_CXE. */
191#define EXTRXC_ERROR_TCPE __BIT(9) /* The same as WRX_ER_TCPE. */ 191#define EXTRXC_ERROR_TCPE __BIT(9) /* The same as WRX_ER_TCPE. */
192#define EXTRXC_ERROR_IPE __BIT(10) /* The same as WRX_ER_IPE. */ 192#define EXTRXC_ERROR_IPE __BIT(10) /* The same as WRX_ER_IPE. */
193#define EXTRXC_ERROR_RXE __BIT(11) /* The same as WRX_ER_RXE. */ 193#define EXTRXC_ERROR_RXE __BIT(11) /* The same as WRX_ER_RXE. */
194 194
195#define EXTRXC_STATUS_DD __BIT(0) /* The same as WRX_ST_DD. */ 195#define EXTRXC_STATUS_DD __BIT(0) /* The same as WRX_ST_DD. */
196#define EXTRXC_STATUS_EOP __BIT(1) /* The same as WRX_ST_EOP. */ 196#define EXTRXC_STATUS_EOP __BIT(1) /* The same as WRX_ST_EOP. */
197/* 2 is reserved. */ 197/* 2 is reserved. */
198#define EXTRXC_STATUS_VP __BIT(3) /* The same as WRX_ST_VP. */ 198#define EXTRXC_STATUS_VP __BIT(3) /* The same as WRX_ST_VP. */
199#define EXTRXC_STATUS_UDPCS __BIT(4) /* UDP checksum calculated on packet. */ 199#define EXTRXC_STATUS_UDPCS __BIT(4) /* UDP checksum calculated on packet. */
200#define EXTRXC_STATUS_TCPCS __BIT(5) /* The same as WRX_ST_TCPCS. */ 200#define EXTRXC_STATUS_TCPCS __BIT(5) /* The same as WRX_ST_TCPCS. */
201#define EXTRXC_STATUS_IPCS __BIT(6) /* The same as WRX_ST_IPCS. */ 201#define EXTRXC_STATUS_IPCS __BIT(6) /* The same as WRX_ST_IPCS. */
202/* 7 is reserved. */ 202/* 7 is reserved. */
203#define EXTRXC_STATUS_TST __BIT(8) /* Time stamp taken. */ 203#define EXTRXC_STATUS_TST __BIT(8) /* Time stamp taken. */
204#define EXTRXC_STATUS_IPIDV __BIT(9) /* IP identification valid. */ 204#define EXTRXC_STATUS_IPIDV __BIT(9) /* IP identification valid. */
205#define EXTRXC_STATUS_UDPV __BIT(10) /* Valid UDP XSUM. */ 205#define EXTRXC_STATUS_UDPV __BIT(10) /* Valid UDP XSUM. */
206/* 14:11 is reserved. */ 206/* 14:11 is reserved. */
207#define EXTRXC_STATUS_ACK __BIT(15) /* ACK packet indication. */ 207#define EXTRXC_STATUS_ACK __BIT(15) /* ACK packet indication. */
208#define EXTRXC_STATUS_PKTTYPE_MASK __BITS(19,16) 208#define EXTRXC_STATUS_PKTTYPE_MASK __BITS(19,16)
209#define EXTRXC_STATUS_PKTTYPE(status) __SHIFTOUT(status,EXTRXC_STATUS_PKTTYPE_MASK) 209#define EXTRXC_STATUS_PKTTYPE(status) __SHIFTOUT(status,EXTRXC_STATUS_PKTTYPE_MASK)
210 210
211/* advanced RX descriptor for 82575 and newer */ 211/* advanced RX descriptor for 82575 and newer */
212typedef union nq_rxdesc { 212typedef union nq_rxdesc {
213 struct { 213 struct {
214 uint64_t nrxd_paddr; /* 63:1 Packet Buffer Address, 0 A0/NSE */ 214 uint64_t nrxd_paddr; /* 63:1 Packet Buffer Address, 0 A0/NSE */
215 uint64_t nrxd_haddr; /* 63:1 HEader Buffer Address, 0 DD */ 215 uint64_t nrxd_haddr; /* 63:1 HEader Buffer Address, 0 DD */
216 } nqrx_data; 216 } nqrx_data;
217 struct { 217 struct {
218 uint32_t nrxc_misc; /* 218 uint32_t nrxc_misc; /*
219 * 31: SPH, 30:21 HDR_LEN[9:0], 219 * 31: SPH, 30:21 HDR_LEN[9:0],
220 * 20:19 HDR_LEN[11:10], 18:17 RSV, 220 * 20:19 HDR_LEN[11:10], 18:17 RSV,
221 * 16:4 Packet Type 3:0 RSS Type 221 * 16:4 Packet Type 3:0 RSS Type
222 */ 222 */
223 uint32_t nrxc_rsshash; /* RSS Hash or {Fragment Checksum, IP identification } */ 223 uint32_t nrxc_rsshash; /* RSS Hash or {Fragment Checksum, IP identification } */
224 uint32_t nrxc_err_stat; /* 31:20 Extended Error, 19:0 Extened Status */ 224 uint32_t nrxc_err_stat; /* 31:20 Extended Error, 19:0 Extened Status */
225 uint16_t nrxc_pktlen; /* PKT_LEN */ 225 uint16_t nrxc_pktlen; /* PKT_LEN */
226 uint16_t nrxc_vlan; /* VLAN Tag */ 226 uint16_t nrxc_vlan; /* VLAN Tag */
227 } nqrx_ctx; 227 } nqrx_ctx;
228} __packed nq_rxdesc_t; 228} __packed nq_rxdesc_t;
229 229
230/* for nrxd_paddr macros */ 230/* for nrxd_paddr macros */
231#define NQRXD_A0_MASK __BIT(0) 231#define NQRXD_A0_MASK __BIT(0)
232#define NQRXD_NSE_MASK __BIT(0) 232#define NQRXD_NSE_MASK __BIT(0)
233#define NQRXD_ADDR_MASK __BITS(63,1) 233#define NQRXD_ADDR_MASK __BITS(63,1)
234/* for nrxd_haddr macros */ 234/* for nrxd_haddr macros */
235#define NQRXD_DD_MASK __BIT(0) 235#define NQRXD_DD_MASK __BIT(0)
236 236
237/* 237/*
238 * nrxc_rsshash is used for below 2 patterns 238 * nrxc_rsshash is used for below 2 patterns
239 * (1) Fragment Checksum and IP identification 239 * (1) Fragment Checksum and IP identification
240 * - Fragment Checksum is valid 240 * - Fragment Checksum is valid
241 * when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set 241 * when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set
242 * - IP identification is valid 242 * - IP identification is valid
243 * when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set 243 * when RXCSUM.PCSD cleared and RXCSUM.IPPCSE bit is set
244 * (2) RSS Hash 244 * (2) RSS Hash
245 * when RXCSUM.PCSD bit is set 245 * when RXCSUM.PCSD bit is set
246 */ 246 */
247#define NQRXC_IP_ID_MASK __BITS(15,0) 247#define NQRXC_IP_ID_MASK __BITS(15,0)
248#define NQRXC_FRAG_CSUM_MASK __BITS(31,16) 248#define NQRXC_FRAG_CSUM_MASK __BITS(31,16)
249#define NQRXC_IP_ID(rsshash) __SHIFTOUT(rsshash,NRXC_IP_ID_MASK) 249#define NQRXC_IP_ID(rsshash) __SHIFTOUT(rsshash,NRXC_IP_ID_MASK)
250#define NQRXC_FRAG_CSUM(rsshash) __SHIFTOUT(rsshash,NRXC_FRAG_CSUM_MASK) 250#define NQRXC_FRAG_CSUM(rsshash) __SHIFTOUT(rsshash,NRXC_FRAG_CSUM_MASK)
251 251
252/* macros for nrxc_misc */ 252/* macros for nrxc_misc */
253#define NQRXC_RSS_TYPE_MASK __BITS(3,0) 253#define NQRXC_RSS_TYPE_MASK __BITS(3,0)
254#define NQRXC_PKT_TYPE_ID_MASK __BITS(11,4) 254#define NQRXC_PKT_TYPE_ID_MASK __BITS(11,4)
255#define NQRXC_PKT_TYPE_ETQF_INDEX_MASK __BITS(11,4) 255#define NQRXC_PKT_TYPE_ETQF_INDEX_MASK __BITS(11,4)
256#define NQRXC_PKT_TYPE_ETQF_VALID_MASK __BIT(15) 256#define NQRXC_PKT_TYPE_ETQF_VALID_MASK __BIT(15)
257#define NQRXC_PKT_TYPE_VLAN_MASK __BIT(16) 257#define NQRXC_PKT_TYPE_VLAN_MASK __BIT(16)
258#define NQRXC_PKT_TYPE_MASK __BITS(16,4) 258#define NQRXC_PKT_TYPE_MASK __BITS(16,4)
259/* __BITS(18,17) is reserved */ 259/* __BITS(18,17) is reserved */
260#define NQRXC_HDRLEN_HIGH_MASK __BITS(20,19) 260#define NQRXC_HDRLEN_HIGH_MASK __BITS(20,19)
261#define NQRXC_HDRLEN_LOW_MASK __BITS(30,21) 261#define NQRXC_HDRLEN_LOW_MASK __BITS(30,21)
262#define NQRXC_SPH_MASK __BIT(31) 262#define NQRXC_SPH_MASK __BIT(31)
263 263
264#define NQRXC_RSS_TYPE(misc) __SHIFTOUT(misc,NQRXC_RSS_TYPE_MASK) 264#define NQRXC_RSS_TYPE(misc) __SHIFTOUT(misc,NQRXC_RSS_TYPE_MASK)
265#define NQRXC_PKT_TYPE_ID(pkttype) \ 265#define NQRXC_PKT_TYPE_ID(pkttype) \
266 __SHIFTOUT(pkttype,NQRXC_PKT_TYPE_ID_MASK) 266 __SHIFTOUT(pkttype,NQRXC_PKT_TYPE_ID_MASK)
267#define NQRXC_PKT_TYPE(misc) __SHIFTOUT(misc,NQRXC_PKT_TYPE_MASK) 267#define NQRXC_PKT_TYPE(misc) __SHIFTOUT(misc,NQRXC_PKT_TYPE_MASK)
268#define NQRXC_PKT_TYPE_ETQF_INDEX(pkttype) \ 268#define NQRXC_PKT_TYPE_ETQF_INDEX(pkttype) \
269 __SHIFTOUT(pkttype,NQRXC_PKT_TYPE_ETQF_INDEX_MASK) 269 __SHIFTOUT(pkttype,NQRXC_PKT_TYPE_ETQF_INDEX_MASK)
270#define NQRXC_PKT_TYPE_ETQF_VALID NQRXC_PKT_TYPE_ETQF_VALID_MASK 270#define NQRXC_PKT_TYPE_ETQF_VALID NQRXC_PKT_TYPE_ETQF_VALID_MASK
271#define NQRXC_PKT_TYPE_VLAN NQRXC_PKT_TYPE_VLAN_MASK 271#define NQRXC_PKT_TYPE_VLAN NQRXC_PKT_TYPE_VLAN_MASK
272#define NQRXC_HEADER_LEN(misc) (__SHIFTOUT(misc,NQRXC_HDRLEN_LOW_MASK) \ 272#define NQRXC_HEADER_LEN(misc) (__SHIFTOUT(misc,NQRXC_HDRLEN_LOW_MASK) \
273 | __SHIFTOUT(misc,NQRXC_HDRLEN_HIGH_MASK) << 10) 273 | __SHIFTOUT(misc,NQRXC_HDRLEN_HIGH_MASK) << 10)
274#define NQRXC_SPH NQRXC_SPH_MASK 274#define NQRXC_SPH NQRXC_SPH_MASK
275 275
276#define NQRXC_RSS_TYPE_NONE 0x0 /* No hash computation done. */ 276#define NQRXC_RSS_TYPE_NONE 0x0 /* No hash computation done. */
277#define NQRXC_RSS_TYPE_TCP_IPV4 0x1 277#define NQRXC_RSS_TYPE_TCP_IPV4 0x1
278#define NQRXC_RSS_TYPE_IPV4 0x2 278#define NQRXC_RSS_TYPE_IPV4 0x2
279#define NQRXC_RSS_TYPE_TCP_IPV6 0x3 279#define NQRXC_RSS_TYPE_TCP_IPV6 0x3
280#define NQRXC_RSS_TYPE_IPV6_EX 0x4 280#define NQRXC_RSS_TYPE_IPV6_EX 0x4
281#define NQRXC_RSS_TYPE_IPV6 0x5 281#define NQRXC_RSS_TYPE_IPV6 0x5
282#define NQRXC_RSS_TYPE_TCP_IPV6_EX 0x6 282#define NQRXC_RSS_TYPE_TCP_IPV6_EX 0x6
283#define NQRXC_RSS_TYPE_UDP_IPV4 0x7 283#define NQRXC_RSS_TYPE_UDP_IPV4 0x7
284#define NQRXC_RSS_TYPE_UDP_IPV6 0x8 284#define NQRXC_RSS_TYPE_UDP_IPV6 0x8
285#define NQRXC_RSS_TYPE_UDP_IPV6_EX 0x9 285#define NQRXC_RSS_TYPE_UDP_IPV6_EX 0x9
286/*0xA:0xF is reserved. */ 286/*0xA:0xF is reserved. */
287 287
288#define NQRXC_PKT_TYPE_IPV4 __BIT(0) 288#define NQRXC_PKT_TYPE_IPV4 __BIT(0)
289#define NQRXC_PKT_TYPE_IPV4E __BIT(1) 289#define NQRXC_PKT_TYPE_IPV4E __BIT(1)
290#define NQRXC_PKT_TYPE_IPV6 __BIT(2) 290#define NQRXC_PKT_TYPE_IPV6 __BIT(2)
291#define NQRXC_PKT_TYPE_IPV6E __BIT(3) 291#define NQRXC_PKT_TYPE_IPV6E __BIT(3)
292#define NQRXC_PKT_TYPE_TCP __BIT(4) 292#define NQRXC_PKT_TYPE_TCP __BIT(4)
293#define NQRXC_PKT_TYPE_UDP __BIT(5) 293#define NQRXC_PKT_TYPE_UDP __BIT(5)
294#define NQRXC_PKT_TYPE_SCTP __BIT(6) 294#define NQRXC_PKT_TYPE_SCTP __BIT(6)
295#define NQRXC_PKT_TYPE_NFS __BIT(7) 295#define NQRXC_PKT_TYPE_NFS __BIT(7)
296 296
297#define NQRXC_STATUS_MASK __BITS(19,0) 297#define NQRXC_STATUS_MASK __BITS(19,0)
298#define NQRXC_ERROR_MASK __BITS(31,20) 298#define NQRXC_ERROR_MASK __BITS(31,20)
299#define NQRXC_STATUS(err_stat) __SHIFTOUT(err_stat,NQRXC_STATUS_MASK) 299#define NQRXC_STATUS(err_stat) __SHIFTOUT(err_stat,NQRXC_STATUS_MASK)
300#define NQRXC_ERROR(err_stat) __SHIFTOUT(err_stat,NQRXC_ERROR_MASK) 300#define NQRXC_ERROR(err_stat) __SHIFTOUT(err_stat,NQRXC_ERROR_MASK)
301 301
302/* 2:0 is reserved. */ 302/* 2:0 is reserved. */
303#define NQRXC_ERROR_HB0 __BIT(3) /* Header Buffer Overflow. */ 303#define NQRXC_ERROR_HB0 __BIT(3) /* Header Buffer Overflow. */
304/* 6:4 is reserved. */ 304/* 6:4 is reserved. */
305/* 8:7 is reserved. */ 305/* 8:7 is reserved. */
306#define NQRXC_ERROR_L4E __BIT(9) /* L4 error indication. */ 306#define NQRXC_ERROR_L4E __BIT(9) /* L4 error indication. */
307#define NQRXC_ERROR_IPE __BIT(10) /* The same as WRX_ER_IPE. */ 307#define NQRXC_ERROR_IPE __BIT(10) /* The same as WRX_ER_IPE. */
308#define NQRXC_ERROR_RXE __BIT(11) /* The same as WRX_ER_RXE. */ 308#define NQRXC_ERROR_RXE __BIT(11) /* The same as WRX_ER_RXE. */
309/* XXX Where is WRX_ER_CE, WRX_ER_SE, WRX_ER_SEQ, WRX_ER_CXE error? */ 309/* XXX Where is WRX_ER_CE, WRX_ER_SE, WRX_ER_SEQ, WRX_ER_CXE error? */
310 310
311#define NQRXC_STATUS_DD __BIT(0) /* The same as WRX_ST_DD. */ 311#define NQRXC_STATUS_DD __BIT(0) /* The same as WRX_ST_DD. */
312#define NQRXC_STATUS_EOP __BIT(1) /* The same as WRX_ST_EOP. */ 312#define NQRXC_STATUS_EOP __BIT(1) /* The same as WRX_ST_EOP. */
313/* 2 is reserved */ 313/* 2 is reserved */
314#define NQRXC_STATUS_VP __BIT(3) /* The same as WRX_ST_VP. */ 314#define NQRXC_STATUS_VP __BIT(3) /* The same as WRX_ST_VP. */
315#define NQRXC_STATUS_UDPCS __BIT(4) /* UDP checksum or IP payload checksum. */ 315#define NQRXC_STATUS_UDPCS __BIT(4) /* UDP checksum or IP payload checksum. */
316 /* XXX in I210 spec, this bit is the same as WRX_ST_BPDU(is "???" comment) */ 316 /* XXX in I210 spec, this bit is the same as WRX_ST_BPDU(is "???" comment) */
317#define NQRXC_STATUS_L4I __BIT(5) /* L4 integrity check was done. */ 317#define NQRXC_STATUS_L4I __BIT(5) /* L4 integrity check was done. */
318#define NQRXC_STATUS_IPCS __BIT(6) /* The same as WRX_ST_IPCS. */ 318#define NQRXC_STATUS_IPCS __BIT(6) /* The same as WRX_ST_IPCS. */
319#define NQRXC_STATUS_PIF __BIT(7) /* The same as WRX_ST_PIF. */ 319#define NQRXC_STATUS_PIF __BIT(7) /* The same as WRX_ST_PIF. */
320/* 8 is reserved */ 320/* 8 is reserved */
321#define NQRXC_STATUS_VEXT __BIT(9) /* First VLAN is found on a bouble VLAN packet. */ 321#define NQRXC_STATUS_VEXT __BIT(9) /* First VLAN is found on a bouble VLAN packet. */
322#define NQRXC_STATUS_UDPV __BIT(10) /* The packet contains a valid checksum field in a first fragment UDP IPv4 packet. */ 322#define NQRXC_STATUS_UDPV __BIT(10) /* The packet contains a valid checksum field in a first fragment UDP IPv4 packet. */
323#define NQRXC_STATUS_LLINT __BIT(11) /* The packet caused an immediate interrupt. */ 323#define NQRXC_STATUS_LLINT __BIT(11) /* The packet caused an immediate interrupt. */
324#define NQRXC_STATUS_STRIPCRC __BIT(12) /* Ethernet CRC is stripped. */ 324#define NQRXC_STATUS_STRIPCRC __BIT(12) /* Ethernet CRC is stripped. */
325/* 14:13 is reserved */ 325/* 14:13 is reserved */
326#define NQRXC_STATUS_TSIP __BIT(15) /* Timestamp in packet. */ 326#define NQRXC_STATUS_TSIP __BIT(15) /* Timestamp in packet. */
327#define NQRXC_STATUS_TS __BIT(16) /* Time stamped packet. */ 327#define NQRXC_STATUS_TS __BIT(16) /* Time stamped packet. */
328/* 17 is reserved */ 328/* 17 is reserved */
329#define NQRXC_STATUS_LB __BIT(18) /* Sent by a local virtual machine (VM to VM switch indication). */ 329#define NQRXC_STATUS_LB __BIT(18) /* Sent by a local virtual machine (VM to VM switch indication). */
330#define NQRXC_STATUS_MC __BIT(19) /* Packet received from Manageability Controller */ 330#define NQRXC_STATUS_MC __BIT(19) /* Packet received from Manageability Controller */
331 /* "MBC" in i350 spec */ 331 /* "MBC" in i350 spec */
332 332
333/* 333/*
334 * The Wiseman transmit descriptor. 334 * The Wiseman transmit descriptor.
335 * 335 *
336 * The transmit descriptor ring must be aligned to a 4K boundary, 336 * The transmit descriptor ring must be aligned to a 4K boundary,
337 * and there must be an even multiple of 8 descriptors in the ring. 337 * and there must be an even multiple of 8 descriptors in the ring.
338 */ 338 */
339typedef struct wiseman_tx_fields { 339typedef struct wiseman_tx_fields {
340 uint8_t wtxu_status; /* Tx status */ 340 uint8_t wtxu_status; /* Tx status */
341 uint8_t wtxu_options; /* options */ 341 uint8_t wtxu_options; /* options */
342 uint16_t wtxu_vlan; /* VLAN info */ 342 uint16_t wtxu_vlan; /* VLAN info */
343} __packed wiseman_txfields_t; 343} __packed wiseman_txfields_t;
344typedef struct wiseman_txdesc { 344typedef struct wiseman_txdesc {
345 wiseman_addr_t wtx_addr; /* buffer address */ 345 wiseman_addr_t wtx_addr; /* buffer address */
346 uint32_t wtx_cmdlen; /* command and length */ 346 uint32_t wtx_cmdlen; /* command and length */
347 wiseman_txfields_t wtx_fields; /* fields; see below */ 347 wiseman_txfields_t wtx_fields; /* fields; see below */
348} __packed wiseman_txdesc_t; 348} __packed wiseman_txdesc_t;
349 349
350/* Commands for wtx_cmdlen */ 350/* Commands for wtx_cmdlen */
351#define WTX_CMD_EOP __BIT(24) /* end of packet */ 351#define WTX_CMD_EOP __BIT(24) /* end of packet */
352#define WTX_CMD_IFCS __BIT(25) /* insert FCS */ 352#define WTX_CMD_IFCS __BIT(25) /* insert FCS */
353#define WTX_CMD_RS __BIT(27) /* report status */ 353#define WTX_CMD_RS __BIT(27) /* report status */
354#define WTX_CMD_RPS __BIT(28) /* report packet sent */ 354#define WTX_CMD_RPS __BIT(28) /* report packet sent */
355#define WTX_CMD_DEXT __BIT(29) /* descriptor extension */ 355#define WTX_CMD_DEXT __BIT(29) /* descriptor extension */
356#define WTX_CMD_VLE __BIT(30) /* VLAN enable */ 356#define WTX_CMD_VLE __BIT(30) /* VLAN enable */
357#define WTX_CMD_IDE __BIT(31) /* interrupt delay enable */ 357#define WTX_CMD_IDE __BIT(31) /* interrupt delay enable */
358 358
359/* Descriptor types (if DEXT is set) */ 359/* Descriptor types (if DEXT is set) */
360#define WTX_DTYP_MASK __BIT(20) 360#define WTX_DTYP_MASK __BIT(20)
361#define WTX_DTYP_C __SHIFTIN(0, WTX_DTYP_MASK) /* context */ 361#define WTX_DTYP_C __SHIFTIN(0, WTX_DTYP_MASK) /* context */
362#define WTX_DTYP_D __SHIFTIN(1, WTX_DTYP_MASK) /* data */ 362#define WTX_DTYP_D __SHIFTIN(1, WTX_DTYP_MASK) /* data */
363 363
364/* wtx_fields status bits */ 364/* wtx_fields status bits */
365#define WTX_ST_DD __BIT(0) /* descriptor done */ 365#define WTX_ST_DD __BIT(0) /* descriptor done */
366#define WTX_ST_EC __BIT(1) /* excessive collisions */ 366#define WTX_ST_EC __BIT(1) /* excessive collisions */
367#define WTX_ST_LC __BIT(2) /* late collision */ 367#define WTX_ST_LC __BIT(2) /* late collision */
368#define WTX_ST_TU __BIT(3) /* transmit underrun */ 368#define WTX_ST_TU __BIT(3) /* transmit underrun */
369 369
370/* wtx_fields option bits for IP/TCP/UDP checksum offload */ 370/* wtx_fields option bits for IP/TCP/UDP checksum offload */
371#define WTX_IXSM __BIT(0) /* IP checksum offload */ 371#define WTX_IXSM __BIT(0) /* IP checksum offload */
372#define WTX_TXSM __BIT(1) /* TCP/UDP checksum offload */ 372#define WTX_TXSM __BIT(1) /* TCP/UDP checksum offload */
373 373
374/* Maximum payload per Tx descriptor */ 374/* Maximum payload per Tx descriptor */
375#define WTX_MAX_LEN 4096 375#define WTX_MAX_LEN 4096
376 376
377/* 377/*
378 * The Livengood TCP/IP context descriptor. 378 * The Livengood TCP/IP context descriptor.
379 */ 379 */
380struct livengood_tcpip_ctxdesc { 380struct livengood_tcpip_ctxdesc {
381 uint32_t tcpip_ipcs; /* IP checksum context */ 381 uint32_t tcpip_ipcs; /* IP checksum context */
382 uint32_t tcpip_tucs; /* TCP/UDP checksum context */ 382 uint32_t tcpip_tucs; /* TCP/UDP checksum context */
383 uint32_t tcpip_cmdlen; 383 uint32_t tcpip_cmdlen;
384 uint32_t tcpip_seg; /* TCP segmentation context */ 384 uint32_t tcpip_seg; /* TCP segmentation context */
385}; 385};
386 386
387/* commands for context descriptors */ 387/* commands for context descriptors */
388#define WTX_TCPIP_CMD_TCP __BIT(24) /* 1 = TCP, 0 = UDP */ 388#define WTX_TCPIP_CMD_TCP __BIT(24) /* 1 = TCP, 0 = UDP */
389#define WTX_TCPIP_CMD_IP __BIT(25) /* 1 = IPv4, 0 = IPv6 */ 389#define WTX_TCPIP_CMD_IP __BIT(25) /* 1 = IPv4, 0 = IPv6 */
390#define WTX_TCPIP_CMD_TSE __BIT(26) /* segmentation context valid */ 390#define WTX_TCPIP_CMD_TSE __BIT(26) /* segmentation context valid */
391 391
392#define WTX_TCPIP_IPCSS(x) ((x) << 0) /* checksum start */ 392#define WTX_TCPIP_IPCSS(x) ((x) << 0) /* checksum start */
393#define WTX_TCPIP_IPCSO(x) ((x) << 8) /* checksum value offset */ 393#define WTX_TCPIP_IPCSO(x) ((x) << 8) /* checksum value offset */
394#define WTX_TCPIP_IPCSE(x) ((x) << 16) /* checksum end */ 394#define WTX_TCPIP_IPCSE(x) ((x) << 16) /* checksum end */
395 395
396#define WTX_TCPIP_TUCSS(x) ((x) << 0) /* checksum start */ 396#define WTX_TCPIP_TUCSS(x) ((x) << 0) /* checksum start */
397#define WTX_TCPIP_TUCSO(x) ((x) << 8) /* checksum value offset */ 397#define WTX_TCPIP_TUCSO(x) ((x) << 8) /* checksum value offset */
398#define WTX_TCPIP_TUCSE(x) ((x) << 16) /* checksum end */ 398#define WTX_TCPIP_TUCSE(x) ((x) << 16) /* checksum end */
399 399
400#define WTX_TCPIP_SEG_STATUS(x) ((x) << 0) 400#define WTX_TCPIP_SEG_STATUS(x) ((x) << 0)
401#define WTX_TCPIP_SEG_HDRLEN(x) ((x) << 8) 401#define WTX_TCPIP_SEG_HDRLEN(x) ((x) << 8)
402#define WTX_TCPIP_SEG_MSS(x) ((x) << 16) 402#define WTX_TCPIP_SEG_MSS(x) ((x) << 16)
403 403
404/* 404/*
405 * PCI config registers used by the Wiseman. 405 * PCI config registers used by the Wiseman.
406 */ 406 */
407#define WM_PCI_MMBA PCI_MAPREG_START 407#define WM_PCI_MMBA PCI_MAPREG_START
408/* registers for FLASH access on ICH8 */ 408/* registers for FLASH access on ICH8 */
409#define WM_ICH8_FLASH 0x0014 409#define WM_ICH8_FLASH 0x0014
410 410
411#define WM_PCI_LTR_CAP_LPT 0xa8 411#define WM_PCI_LTR_CAP_LPT 0xa8
412 412
413/* XXX Only for PCH_SPT? */ 413/* XXX Only for PCH_SPT? */
414#define WM_PCI_DESCRING_STATUS 0xe4 414#define WM_PCI_DESCRING_STATUS 0xe4
415#define DESCRING_STATUS_FLUSH_REQ __BIT(8) 415#define DESCRING_STATUS_FLUSH_REQ __BIT(8)
416 416
417/* 417/*
418 * Wiseman Control/Status Registers. 418 * Wiseman Control/Status Registers.
419 */ 419 */
420#define WMREG_CTRL 0x0000 /* Device Control Register */ 420#define WMREG_CTRL 0x0000 /* Device Control Register */
421#define CTRL_FD __BIT(0) /* full duplex */ 421#define CTRL_FD __BIT(0) /* full duplex */
422#define CTRL_BEM __BIT(1) /* big-endian mode */ 422#define CTRL_BEM __BIT(1) /* big-endian mode */
423#define CTRL_PRIOR __BIT(2) /* 0 = receive, 1 = fair */ 423#define CTRL_PRIOR __BIT(2) /* 0 = receive, 1 = fair */
424#define CTRL_GIO_M_DIS __BIT(2) /* disabl PCI master access */ 424#define CTRL_GIO_M_DIS __BIT(2) /* disabl PCI master access */
425#define CTRL_LRST __BIT(3) /* link reset */ 425#define CTRL_LRST __BIT(3) /* link reset */
426#define CTRL_ASDE __BIT(5) /* auto speed detect enable */ 426#define CTRL_ASDE __BIT(5) /* auto speed detect enable */
427#define CTRL_SLU __BIT(6) /* set link up */ 427#define CTRL_SLU __BIT(6) /* set link up */
428#define CTRL_ILOS __BIT(7) /* invert loss of signal */ 428#define CTRL_ILOS __BIT(7) /* invert loss of signal */
429#define CTRL_SPEED(x) ((x) << 8) /* speed (Livengood) */ 429#define CTRL_SPEED(x) ((x) << 8) /* speed (Livengood) */
430#define CTRL_SPEED_10 CTRL_SPEED(0) 430#define CTRL_SPEED_10 CTRL_SPEED(0)
431#define CTRL_SPEED_100 CTRL_SPEED(1) 431#define CTRL_SPEED_100 CTRL_SPEED(1)
432#define CTRL_SPEED_1000 CTRL_SPEED(2) 432#define CTRL_SPEED_1000 CTRL_SPEED(2)
433#define CTRL_SPEED_MASK CTRL_SPEED(3) 433#define CTRL_SPEED_MASK CTRL_SPEED(3)
434#define CTRL_FRCSPD __BIT(11) /* force speed (Livengood) */ 434#define CTRL_FRCSPD __BIT(11) /* force speed (Livengood) */
435#define CTRL_FRCFDX __BIT(12) /* force full-duplex (Livengood) */ 435#define CTRL_FRCFDX __BIT(12) /* force full-duplex (Livengood) */
436#define CTRL_D_UD_EN __BIT(13) /* Dock/Undock enable */ 436#define CTRL_D_UD_EN __BIT(13) /* Dock/Undock enable */
437#define CTRL_D_UD_POL __BIT(14) /* Defined polarity of Dock/Undock indication in SDP[0] */ 437#define CTRL_D_UD_POL __BIT(14) /* Defined polarity of Dock/Undock indication in SDP[0] */
438#define CTRL_F_PHY_R __BIT(15) /* Reset both PHY ports, through PHYRST_N pin */ 438#define CTRL_F_PHY_R __BIT(15) /* Reset both PHY ports, through PHYRST_N pin */
439#define CTRL_EXTLINK_EN __BIT(16) /* enable link status from external LINK_0 and LINK_1 pins */ 439#define CTRL_EXTLINK_EN __BIT(16) /* enable link status from external LINK_0 and LINK_1 pins */
440#define CTRL_LANPHYPC_OVERRIDE __BIT(16) /* SW control of LANPHYPC */ 440#define CTRL_LANPHYPC_OVERRIDE __BIT(16) /* SW control of LANPHYPC */
441#define CTRL_LANPHYPC_VALUE __BIT(17) /* SW value of LANPHYPC */ 441#define CTRL_LANPHYPC_VALUE __BIT(17) /* SW value of LANPHYPC */
442#define CTRL_SWDPINS_SHIFT 18 442#define CTRL_SWDPINS_SHIFT 18
443#define CTRL_SWDPINS_MASK 0x0f 443#define CTRL_SWDPINS_MASK 0x0f
444#define CTRL_SWDPIN(x) (1U << (CTRL_SWDPINS_SHIFT + (x))) 444#define CTRL_SWDPIN(x) (1U << (CTRL_SWDPINS_SHIFT + (x)))
445#define CTRL_SWDPIO_SHIFT 22 445#define CTRL_SWDPIO_SHIFT 22
446#define CTRL_SWDPIO_MASK 0x0f 446#define CTRL_SWDPIO_MASK 0x0f
447#define CTRL_SWDPIO(x) (1U << (CTRL_SWDPIO_SHIFT + (x))) 447#define CTRL_SWDPIO(x) (1U << (CTRL_SWDPIO_SHIFT + (x)))
448#define CTRL_MEHE __BIT(19) /* Memory Error Handling Enable(I217)*/ 448#define CTRL_MEHE __BIT(19) /* Memory Error Handling Enable(I217)*/
449#define CTRL_RST __BIT(26) /* device reset */ 449#define CTRL_RST __BIT(26) /* device reset */
450#define CTRL_RFCE __BIT(27) /* Rx flow control enable */ 450#define CTRL_RFCE __BIT(27) /* Rx flow control enable */
451#define CTRL_TFCE __BIT(28) /* Tx flow control enable */ 451#define CTRL_TFCE __BIT(28) /* Tx flow control enable */
452#define CTRL_VME __BIT(30) /* VLAN Mode Enable */ 452#define CTRL_VME __BIT(30) /* VLAN Mode Enable */
453#define CTRL_PHY_RESET __BIT(31) /* PHY reset (Cordova) */ 453#define CTRL_PHY_RESET __BIT(31) /* PHY reset (Cordova) */
454 454
455#define WMREG_CTRL_SHADOW 0x0004 /* Device Control Register (shadow) */ 455#define WMREG_CTRL_SHADOW 0x0004 /* Device Control Register (shadow) */
456 456
457#define WMREG_STATUS 0x0008 /* Device Status Register */ 457#define WMREG_STATUS 0x0008 /* Device Status Register */
458#define STATUS_FD __BIT(0) /* full duplex */ 458#define STATUS_FD __BIT(0) /* full duplex */
459#define STATUS_LU __BIT(1) /* link up */ 459#define STATUS_LU __BIT(1) /* link up */
460#define STATUS_TCKOK __BIT(2) /* Tx clock running */ 460#define STATUS_TCKOK __BIT(2) /* Tx clock running */
461#define STATUS_RBCOK __BIT(3) /* Rx clock running */ 461#define STATUS_RBCOK __BIT(3) /* Rx clock running */
462#define STATUS_FUNCID_SHIFT 2 /* 82546 function ID */ 462#define STATUS_FUNCID_SHIFT 2 /* 82546 function ID */
463#define STATUS_FUNCID_MASK 3 /* ... */ 463#define STATUS_FUNCID_MASK 3 /* ... */
464#define STATUS_TXOFF __BIT(4) /* Tx paused */ 464#define STATUS_TXOFF __BIT(4) /* Tx paused */
465#define STATUS_TBIMODE __BIT(5) /* fiber mode (Livengood) */ 465#define STATUS_TBIMODE __BIT(5) /* fiber mode (Livengood) */
466#define STATUS_SPEED __BITS(7, 6) /* speed indication */ 466#define STATUS_SPEED __BITS(7, 6) /* speed indication */
467#define STATUS_SPEED_10 0 467#define STATUS_SPEED_10 0
468#define STATUS_SPEED_100 1 468#define STATUS_SPEED_100 1
469#define STATUS_SPEED_1000 2 469#define STATUS_SPEED_1000 2
470#define STATUS_ASDV(x) ((x) << 8) /* auto speed det. val. (Livengood) */ 470#define STATUS_ASDV(x) ((x) << 8) /* auto speed det. val. (Livengood) */
471#define STATUS_LAN_INIT_DONE __BIT(9) /* Lan Init Completion by NVM */ 471#define STATUS_LAN_INIT_DONE __BIT(9) /* Lan Init Completion by NVM */
472#define STATUS_MTXCKOK __BIT(10) /* MTXD clock running */ 472#define STATUS_MTXCKOK __BIT(10) /* MTXD clock running */
473#define STATUS_PHYRA __BIT(10) /* PHY Reset Asserted (PCH) */ 473#define STATUS_PHYRA __BIT(10) /* PHY Reset Asserted (PCH) */
474#define STATUS_PCI66 __BIT(11) /* 66MHz bus (Livengood) */ 474#define STATUS_PCI66 __BIT(11) /* 66MHz bus (Livengood) */
475#define STATUS_BUS64 __BIT(12) /* 64-bit bus (Livengood) */ 475#define STATUS_BUS64 __BIT(12) /* 64-bit bus (Livengood) */
476#define STATUS_2P5_SKU __BIT(12) /* Value of the 2.5GBE SKU strap */ 476#define STATUS_2P5_SKU __BIT(12) /* Value of the 2.5GBE SKU strap */
477#define STATUS_PCIX_MODE __BIT(13) /* PCIX mode (Cordova) */ 477#define STATUS_PCIX_MODE __BIT(13) /* PCIX mode (Cordova) */
478#define STATUS_2P5_SKU_OVER __BIT(13) /* Value of the 2.5GBE SKU override */ 478#define STATUS_2P5_SKU_OVER __BIT(13) /* Value of the 2.5GBE SKU override */
479#define STATUS_PCIXSPD(x) ((x) << 14) /* PCIX speed indication (Cordova) */ 479#define STATUS_PCIXSPD(x) ((x) << 14) /* PCIX speed indication (Cordova) */
480#define STATUS_PCIXSPD_50_66 STATUS_PCIXSPD(0) 480#define STATUS_PCIXSPD_50_66 STATUS_PCIXSPD(0)
481#define STATUS_PCIXSPD_66_100 STATUS_PCIXSPD(1) 481#define STATUS_PCIXSPD_66_100 STATUS_PCIXSPD(1)
482#define STATUS_PCIXSPD_100_133 STATUS_PCIXSPD(2) 482#define STATUS_PCIXSPD_100_133 STATUS_PCIXSPD(2)
483#define STATUS_PCIXSPD_MASK STATUS_PCIXSPD(3) 483#define STATUS_PCIXSPD_MASK STATUS_PCIXSPD(3)
484#define STATUS_GIO_M_ENA __BIT(19) /* GIO master enable */ 484#define STATUS_GIO_M_ENA __BIT(19) /* GIO master enable */
485#define STATUS_DEV_RST_SET __BIT(20) /* Device Reset Set */ 485#define STATUS_DEV_RST_SET __BIT(20) /* Device Reset Set */
486 486
487/* Strapping Option Register (PCH_SPT and newer) */ 487/* Strapping Option Register (PCH_SPT and newer) */
488#define WMREG_STRAP 0x000c 488#define WMREG_STRAP 0x000c
489#define STRAP_NVMSIZE __BITS(1, 6) 489#define STRAP_NVMSIZE __BITS(1, 6)
490#define STRAP_FREQ __BITS(12, 13) 490#define STRAP_FREQ __BITS(12, 13)
491#define STRAP_SMBUSADDR __BITS(17, 23) 491#define STRAP_SMBUSADDR __BITS(17, 23)
492 492
493#define WMREG_EECD 0x0010 /* EEPROM Control Register */ 493#define WMREG_EECD 0x0010 /* EEPROM Control Register */
494#define EECD_SK __BIT(0) /* clock */ 494#define EECD_SK __BIT(0) /* clock */
495#define EECD_CS __BIT(1) /* chip select */ 495#define EECD_CS __BIT(1) /* chip select */
496#define EECD_DI __BIT(2) /* data in */ 496#define EECD_DI __BIT(2) /* data in */
497#define EECD_DO __BIT(3) /* data out */ 497#define EECD_DO __BIT(3) /* data out */
498#define EECD_FWE(x) ((x) << 4) /* flash write enable control */ 498#define EECD_FWE(x) ((x) << 4) /* flash write enable control */
499#define EECD_FWE_DISABLED EECD_FWE(1) 499#define EECD_FWE_DISABLED EECD_FWE(1)
500#define EECD_FWE_ENABLED EECD_FWE(2) 500#define EECD_FWE_ENABLED EECD_FWE(2)
501#define EECD_EE_REQ __BIT(6) /* (shared) EEPROM request */ 501#define EECD_EE_REQ __BIT(6) /* (shared) EEPROM request */
502#define EECD_EE_GNT __BIT(7) /* (shared) EEPROM grant */ 502#define EECD_EE_GNT __BIT(7) /* (shared) EEPROM grant */
503#define EECD_EE_PRES __BIT(8) /* EEPROM present */ 503#define EECD_EE_PRES __BIT(8) /* EEPROM present */
504#define EECD_EE_SIZE __BIT(9) /* EEPROM size 504#define EECD_EE_SIZE __BIT(9) /* EEPROM size
505 (0 = 64 word, 1 = 256 word) */ 505 (0 = 64 word, 1 = 256 word) */
506#define EECD_EE_AUTORD __BIT(9) /* auto read done */ 506#define EECD_EE_AUTORD __BIT(9) /* auto read done */
507#define EECD_EE_ABITS __BIT(10) /* EEPROM address bits 507#define EECD_EE_ABITS __BIT(10) /* EEPROM address bits
508 (based on type) */ 508 (based on type) */
509#define EECD_EE_SIZE_EX_MASK __BITS(14,11) /* EEPROM size for new devices */ 509#define EECD_EE_SIZE_EX_MASK __BITS(14,11) /* EEPROM size for new devices */
510#define EECD_EE_TYPE __BIT(13) /* EEPROM type 510#define EECD_EE_TYPE __BIT(13) /* EEPROM type
511 (0 = Microwire, 1 = SPI) */ 511 (0 = Microwire, 1 = SPI) */
512#define EECD_SEC1VAL __BIT(22) /* Sector One Valid */ 512#define EECD_SEC1VAL __BIT(22) /* Sector One Valid */
513#define EECD_SEC1VAL_VALMASK (EECD_EE_AUTORD | EECD_EE_PRES) /* Valid Mask */ 513#define EECD_SEC1VAL_VALMASK (EECD_EE_AUTORD | EECD_EE_PRES) /* Valid Mask */
514 514
515#define WMREG_FEXTNVM6 0x0010 /* Future Extended NVM 6 */ 515#define WMREG_FEXTNVM6 0x0010 /* Future Extended NVM 6 */
516#define FEXTNVM6_REQ_PLL_CLK __BIT(8) 516#define FEXTNVM6_REQ_PLL_CLK __BIT(8)
517#define FEXTNVM6_ENABLE_K1_ENTRY_CONDITION __BIT(9) 517#define FEXTNVM6_ENABLE_K1_ENTRY_CONDITION __BIT(9)
518#define FEXTNVM6_K1_OFF_ENABLE __BIT(31) 518#define FEXTNVM6_K1_OFF_ENABLE __BIT(31)
519 519
520#define WMREG_EERD 0x0014 /* EEPROM read */ 520#define WMREG_EERD 0x0014 /* EEPROM read */
521#define EERD_DONE 0x02 /* done bit */ 521#define EERD_DONE 0x02 /* done bit */
522#define EERD_START 0x01 /* First bit for telling part to start operation */ 522#define EERD_START 0x01 /* First bit for telling part to start operation */
523#define EERD_ADDR_SHIFT 2 /* Shift to the address bits */ 523#define EERD_ADDR_SHIFT 2 /* Shift to the address bits */
524#define EERD_DATA_SHIFT 16 /* Offset to data in EEPROM read/write registers */ 524#define EERD_DATA_SHIFT 16 /* Offset to data in EEPROM read/write registers */
525 525
526#define WMREG_CTRL_EXT 0x0018 /* Extended Device Control Register */ 526#define WMREG_CTRL_EXT 0x0018 /* Extended Device Control Register */
527#define CTRL_EXT_NSICR __BIT(0) /* Non Interrupt clear on read */ 527#define CTRL_EXT_NSICR __BIT(0) /* Non Interrupt clear on read */
528#define CTRL_EXT_GPI_EN(x) (1U << (x)) /* gpin interrupt enable */ 528#define CTRL_EXT_GPI_EN(x) (1U << (x)) /* gpin interrupt enable */
529#define CTRL_EXT_NVMVS __BITS(0, 1) /* NVM valid sector */ 529#define CTRL_EXT_NVMVS __BITS(0, 1) /* NVM valid sector */
530#define CTRL_EXT_LPCD __BIT(2) /* LCD Power Cycle Done */ 530#define CTRL_EXT_LPCD __BIT(2) /* LCD Power Cycle Done */
531#define CTRL_EXT_SWDPINS_SHIFT 4 531#define CTRL_EXT_SWDPINS_SHIFT 4
532#define CTRL_EXT_SWDPINS_MASK 0x0d 532#define CTRL_EXT_SWDPINS_MASK 0x0d
533/* The bit order of the SW Definable pin is not 6543 but 3654! */ 533/* The bit order of the SW Definable pin is not 6543 but 3654! */
534#define CTRL_EXT_SWDPIN(x) (1U << (CTRL_EXT_SWDPINS_SHIFT \ 534#define CTRL_EXT_SWDPIN(x) (1U << (CTRL_EXT_SWDPINS_SHIFT \
535 + ((x) == 3 ? 3 : ((x) - 4)))) 535 + ((x) == 3 ? 3 : ((x) - 4))))
536#define CTRL_EXT_SWDPIO_SHIFT 8 536#define CTRL_EXT_SWDPIO_SHIFT 8
537#define CTRL_EXT_SWDPIO_MASK 0x0d 537#define CTRL_EXT_SWDPIO_MASK 0x0d
538#define CTRL_EXT_SWDPIO(x) (1U << (CTRL_EXT_SWDPIO_SHIFT \ 538#define CTRL_EXT_SWDPIO(x) (1U << (CTRL_EXT_SWDPIO_SHIFT \
539 + ((x) == 3 ? 3 : ((x) - 4)))) 539 + ((x) == 3 ? 3 : ((x) - 4))))
540#define CTRL_EXT_FORCE_SMBUS __BIT(11) /* Force SMBus mode */ 540#define CTRL_EXT_FORCE_SMBUS __BIT(11) /* Force SMBus mode */
541#define CTRL_EXT_ASDCHK __BIT(12) /* ASD check */ 541#define CTRL_EXT_ASDCHK __BIT(12) /* ASD check */
542#define CTRL_EXT_EE_RST __BIT(13) /* EEPROM reset */ 542#define CTRL_EXT_EE_RST __BIT(13) /* EEPROM reset */
543#define CTRL_EXT_IPS __BIT(14) /* invert power state bit 0 */ 543#define CTRL_EXT_IPS __BIT(14) /* invert power state bit 0 */
544#define CTRL_EXT_SPD_BYPS __BIT(15) /* speed select bypass */ 544#define CTRL_EXT_SPD_BYPS __BIT(15) /* speed select bypass */
545#define CTRL_EXT_IPS1 __BIT(16) /* invert power state bit 1 */ 545#define CTRL_EXT_IPS1 __BIT(16) /* invert power state bit 1 */
546#define CTRL_EXT_RO_DIS __BIT(17) /* relaxed ordering disabled */ 546#define CTRL_EXT_RO_DIS __BIT(17) /* relaxed ordering disabled */
547#define CTRL_EXT_SDLPE __BIT(18) /* SerDes Low Power Enable */ 547#define CTRL_EXT_SDLPE __BIT(18) /* SerDes Low Power Enable */
548#define CTRL_EXT_DMA_DYN_CLK __BIT(19) /* DMA Dynamic Gating Enable */ 548#define CTRL_EXT_DMA_DYN_CLK __BIT(19) /* DMA Dynamic Gating Enable */
549#define CTRL_EXT_PHYPDEN __BIT(20) 549#define CTRL_EXT_PHYPDEN __BIT(20)
550#define CTRL_EXT_LINK_MODE_MASK 0x00c00000 550#define CTRL_EXT_LINK_MODE_MASK 0x00c00000
551#define CTRL_EXT_LINK_MODE_GMII 0x00000000 551#define CTRL_EXT_LINK_MODE_GMII 0x00000000
552#define CTRL_EXT_LINK_MODE_KMRN 0x00000000 552#define CTRL_EXT_LINK_MODE_KMRN 0x00000000
553#define CTRL_EXT_LINK_MODE_1000KX 0x00400000 553#define CTRL_EXT_LINK_MODE_1000KX 0x00400000
554#define CTRL_EXT_LINK_MODE_SGMII 0x00800000 554#define CTRL_EXT_LINK_MODE_SGMII 0x00800000
555#define CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000 555#define CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000
556#define CTRL_EXT_LINK_MODE_TBI 0x00c00000 556#define CTRL_EXT_LINK_MODE_TBI 0x00c00000
557#define CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00c00000 557#define CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00c00000
558#define CTRL_EXT_EIAME __BIT(24) /* Extended Interrupt Auto Mask En */ 558#define CTRL_EXT_EIAME __BIT(24) /* Extended Interrupt Auto Mask En */
559#define CTRL_EXT_I2C_ENA 0x02000000 /* I2C enable */ 559#define CTRL_EXT_I2C_ENA 0x02000000 /* I2C enable */
560#define CTRL_EXT_DRV_LOAD 0x10000000 560#define CTRL_EXT_DRV_LOAD 0x10000000
561#define CTRL_EXT_PBA __BIT(31) /* PBA Support */ 561#define CTRL_EXT_PBA __BIT(31) /* PBA Support */
562 562
563#define WMREG_MDIC 0x0020 /* MDI Control Register */ 563#define WMREG_MDIC 0x0020 /* MDI Control Register */
564#define MDIC_DATA(x) ((x) & 0xffff) 564#define MDIC_DATA(x) ((x) & 0xffff)
565#define MDIC_REGADD(x) ((x) << 16) 565#define MDIC_REGADD(x) ((x) << 16)
566#define MDIC_PHY_SHIFT 21 566#define MDIC_PHY_SHIFT 21
567#define MDIC_PHY_MASK __BITS(25, 21) 567#define MDIC_PHY_MASK __BITS(25, 21)
568#define MDIC_PHYADD(x) ((x) << 21) 568#define MDIC_PHYADD(x) ((x) << 21)
569 569
570#define MDIC_OP_RW_MASK __BITS(27, 26) 570#define MDIC_OP_RW_MASK __BITS(27, 26)
571#define MDIC_OP_WRITE __SHIFTIN(1, MDIC_OP_RW_MASK) 571#define MDIC_OP_WRITE __SHIFTIN(1, MDIC_OP_RW_MASK)
572#define MDIC_OP_READ __SHIFTIN(2, MDIC_OP_RW_MASK) 572#define MDIC_OP_READ __SHIFTIN(2, MDIC_OP_RW_MASK)
573#define MDIC_READY __BIT(28) 573#define MDIC_READY __BIT(28)
574#define MDIC_I __BIT(29) /* interrupt on MDI complete */ 574#define MDIC_I __BIT(29) /* interrupt on MDI complete */
575#define MDIC_E __BIT(30) /* MDI error */ 575#define MDIC_E __BIT(30) /* MDI error */
576#define MDIC_DEST __BIT(31) /* Destination */ 576#define MDIC_DEST __BIT(31) /* Destination */
577 577
578#define WMREG_SCTL 0x0024 /* SerDes Control - RW */ 578#define WMREG_SCTL 0x0024 /* SerDes Control - RW */
579/* 579/*
580 * These 4 macros are also used for other 8bit control registers on the 580 * These 4 macros are also used for other 8bit control registers on the
581 * 82575 581 * 82575
582 */ 582 */
583#define SCTL_CTL_READY __BIT(31) 583#define SCTL_CTL_READY __BIT(31)
584#define SCTL_CTL_DATA_MASK 0x000000ff 584#define SCTL_CTL_DATA_MASK 0x000000ff
585#define SCTL_CTL_ADDR_SHIFT 8 585#define SCTL_CTL_ADDR_SHIFT 8
586#define SCTL_CTL_POLL_TIMEOUT 640 586#define SCTL_CTL_POLL_TIMEOUT 640
587#define SCTL_DISABLE_SERDES_LOOPBACK 0x0400 587#define SCTL_DISABLE_SERDES_LOOPBACK 0x0400
588 588
589#define WMREG_FEXTNVM4 0x0024 /* Future Extended NVM 4 - RW */ 589#define WMREG_FEXTNVM4 0x0024 /* Future Extended NVM 4 - RW */
590#define FEXTNVM4_BEACON_DURATION __BITS(2, 0) 590#define FEXTNVM4_BEACON_DURATION __BITS(2, 0)
591#define FEXTNVM4_BEACON_DURATION_8US 0x7 591#define FEXTNVM4_BEACON_DURATION_8US 0x7
592#define FEXTNVM4_BEACON_DURATION_16US 0x3 592#define FEXTNVM4_BEACON_DURATION_16US 0x3
593 593
594#define WMREG_FCAL 0x0028 /* Flow Control Address Low */ 594#define WMREG_FCAL 0x0028 /* Flow Control Address Low */
595#define FCAL_CONST 0x00c28001 /* Flow Control MAC addr low */ 595#define FCAL_CONST 0x00c28001 /* Flow Control MAC addr low */
596 596
597#define WMREG_FEXTNVM 0x0028 /* Future Extended NVM register */ 597#define WMREG_FEXTNVM 0x0028 /* Future Extended NVM register */
598#define FEXTNVM_SW_CONFIG __BIT(0) /* SW PHY Config En (ICH8 B0) */ 598#define FEXTNVM_SW_CONFIG __BIT(0) /* SW PHY Config En (ICH8 B0) */
599#define FEXTNVM_SW_CONFIG_ICH8M __BIT(27) /* SW PHY Config En (>= ICH8 B1) */ 599#define FEXTNVM_SW_CONFIG_ICH8M __BIT(27) /* SW PHY Config En (>= ICH8 B1) */
600 600
601#define WMREG_FCAH 0x002c /* Flow Control Address High */ 601#define WMREG_FCAH 0x002c /* Flow Control Address High */
602#define FCAH_CONST 0x00000100 /* Flow Control MAC addr high */ 602#define FCAH_CONST 0x00000100 /* Flow Control MAC addr high */
603 603
604#define WMREG_FCT 0x0030 /* Flow Control Type */ 604#define WMREG_FCT 0x0030 /* Flow Control Type */
605 605
606#define WMREG_KUMCTRLSTA 0x0034 /* MAC-PHY interface - RW */ 606#define WMREG_KUMCTRLSTA 0x0034 /* MAC-PHY interface - RW */
607#define KUMCTRLSTA_MASK 0x0000ffff 607#define KUMCTRLSTA_MASK 0x0000ffff
608#define KUMCTRLSTA_OFFSET 0x001f0000 608#define KUMCTRLSTA_OFFSET 0x001f0000
609#define KUMCTRLSTA_OFFSET_SHIFT 16 609#define KUMCTRLSTA_OFFSET_SHIFT 16
610#define KUMCTRLSTA_REN 0x00200000 610#define KUMCTRLSTA_REN 0x00200000
611 611
612#define KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 612#define KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000
613#define KUMCTRLSTA_OFFSET_CTRL 0x00000001 613#define KUMCTRLSTA_OFFSET_CTRL 0x00000001
614#define KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 614#define KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002
615#define KUMCTRLSTA_OFFSET_DIAG 0x00000003 615#define KUMCTRLSTA_OFFSET_DIAG 0x00000003
616#define KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 616#define KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004
617#define KUMCTRLSTA_OFFSET_K1_CONFIG 0x00000007 617#define KUMCTRLSTA_OFFSET_K1_CONFIG 0x00000007
618#define KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 618#define KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009
619#define KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 619#define KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010
620#define KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001e 620#define KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001e
621#define KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001f 621#define KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001f
622 622
623/* FIFO Control */ 623/* FIFO Control */
624#define KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 624#define KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
625#define KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 625#define KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
626 626
627/* In-Band Control */ 627/* In-Band Control */
628#define KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT 0x0500 628#define KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT 0x0500
629#define KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 629#define KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
630 630
631/* Diag */ 631/* Diag */
632#define KUMCTRLSTA_DIAG_NELPBK 0x1000 632#define KUMCTRLSTA_DIAG_NELPBK 0x1000
633 633
634/* K1 Config */ 634/* K1 Config */
635#define KUMCTRLSTA_K1_ENABLE 0x0002 635#define KUMCTRLSTA_K1_ENABLE 0x0002
636 636
637/* Half-Duplex Control */ 637/* Half-Duplex Control */
638#define KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 638#define KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
639#define KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 639#define KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
640 640
641/* M2P Modes */ 641/* M2P Modes */
642#define KUMCTRLSTA_OPMODE_MASK 0x000c 642#define KUMCTRLSTA_OPMODE_MASK 0x000c
643#define KUMCTRLSTA_OPMODE_INBAND_MDIO 0x0004 643#define KUMCTRLSTA_OPMODE_INBAND_MDIO 0x0004
644 644
645#define WMREG_CONNSW 0x0034 /* Copper/Fiber Switch Control (>= 82575) */ 645#define WMREG_CONNSW 0x0034 /* Copper/Fiber Switch Control (>= 82575) */
646#define CONNSW_AUTOSENSE_EN __BIT(0) /* Auto Sense Enable */ 646#define CONNSW_AUTOSENSE_EN __BIT(0) /* Auto Sense Enable */
647#define CONNSW_AUTOSENSE_CONF __BIT(1) /* Auto Sense Config Mode */ 647#define CONNSW_AUTOSENSE_CONF __BIT(1) /* Auto Sense Config Mode */
648#define CONNSW_ENRGSRC __BIT(2) /* SerDes Energy Detect Src */ 648#define CONNSW_ENRGSRC __BIT(2) /* SerDes Energy Detect Src */
649#define CONNSW_SERDESD __BIT(9) /* SerDes Signal Detect Ind. */ 649#define CONNSW_SERDESD __BIT(9) /* SerDes Signal Detect Ind. */
650#define CONNSW_PHYSD __BIT(10) /* PHY Signal Detect Ind. */ 650#define CONNSW_PHYSD __BIT(10) /* PHY Signal Detect Ind. */
651#define CONNSW_PHY_PDN __BIT(11) /* Internal PHY in powerdown */ 651#define CONNSW_PHY_PDN __BIT(11) /* Internal PHY in powerdown */
652 652
653#define WMREG_VET 0x0038 /* VLAN Ethertype */ 653#define WMREG_VET 0x0038 /* VLAN Ethertype */
654#define WMREG_MDPHYA 0x003c /* PHY address - RW */ 654#define WMREG_MDPHYA 0x003c /* PHY address - RW */
655 655
656#define WMREG_FEXTNVM3 0x003c /* Future Extended NVM 3 */ 656#define WMREG_FEXTNVM3 0x003c /* Future Extended NVM 3 */
657#define FEXTNVM3_PHY_CFG_COUNTER_MASK __BITS(27, 26) 657#define FEXTNVM3_PHY_CFG_COUNTER_MASK __BITS(27, 26)
658#define FEXTNVM3_PHY_CFG_COUNTER_50MS __BIT(27) 658#define FEXTNVM3_PHY_CFG_COUNTER_50MS __BIT(27)
659 659
660#define WMREG_RAL(x) (0x0040 + ((x) * 8)) /* Receive Address List */ 660#define WMREG_RAL(x) (0x0040 + ((x) * 8)) /* Receive Address List */
661#define WMREG_RAH(x) (WMREG_RAL(x) + 4) 661#define WMREG_RAH(x) (WMREG_RAL(x) + 4)
662#define WMREG_CORDOVA_RAL(x) (((x) <= 15) ? (0x5400 + ((x) * 8)) : \ 662#define WMREG_CORDOVA_RAL(x) (((x) <= 15) ? (0x5400 + ((x) * 8)) : \
663 (0x54e0 + (((x) - 16) * 8))) 663 (0x54e0 + (((x) - 16) * 8)))
664#define WMREG_CORDOVA_RAH(x) (WMREG_CORDOVA_RAL(x) + 4) 664#define WMREG_CORDOVA_RAH(x) (WMREG_CORDOVA_RAL(x) + 4)
665#define WMREG_SHRAL(x) (0x5438 + ((x) * 8)) 665#define WMREG_SHRAL(x) (0x5438 + ((x) * 8))
666#define WMREG_SHRAH(x) (WMREG_PCH_LPT_SHRAL(x) + 4) 666#define WMREG_SHRAH(x) (WMREG_PCH_LPT_SHRAL(x) + 4)
667#define WMREG_PCH_LPT_SHRAL(x) (0x5408 + ((x) * 8)) 667#define WMREG_PCH_LPT_SHRAL(x) (0x5408 + ((x) * 8))
668#define WMREG_PCH_LPT_SHRAH(x) (WMREG_PCH_LPT_SHRAL(x) + 4) 668#define WMREG_PCH_LPT_SHRAH(x) (WMREG_PCH_LPT_SHRAL(x) + 4)
669#define WMREG_RAL_LO(b, x) ((b) + ((x) << 3)) 669#define WMREG_RAL_LO(b, x) ((b) + ((x) << 3))
670#define WMREG_RAL_HI(b, x) (WMREG_RAL_LO(b, x) + 4) 670#define WMREG_RAL_HI(b, x) (WMREG_RAL_LO(b, x) + 4)
671 /* 671 /*
672 * Receive Address List: The LO part is the low-order 32-bits 672 * Receive Address List: The LO part is the low-order 32-bits
673 * of the MAC address. The HI part is the high-order 16-bits 673 * of the MAC address. The HI part is the high-order 16-bits
674 * along with a few control bits. 674 * along with a few control bits.
675 */ 675 */
676#define RAL_AS(x) ((x) << 16) /* address select */ 676#define RAL_AS(x) ((x) << 16) /* address select */
677#define RAL_AS_DEST RAL_AS(0) /* (cordova?) */ 677#define RAL_AS_DEST RAL_AS(0) /* (cordova?) */
678#define RAL_AS_SOURCE RAL_AS(1) /* (cordova?) */ 678#define RAL_AS_SOURCE RAL_AS(1) /* (cordova?) */
679#define RAL_RDR1 __BIT(30) /* put packet in alt. rx ring */ 679#define RAL_RDR1 __BIT(30) /* put packet in alt. rx ring */
680#define RAL_AV __BIT(31) /* entry is valid */ 680#define RAL_AV __BIT(31) /* entry is valid */
681 681
682#define WM_RAL_TABSIZE 15 /* RAL size for old devices */ 682#define WM_RAL_TABSIZE 15 /* RAL size for old devices */
683#define WM_RAL_TABSIZE_ICH8 7 /* RAL size for ICH* and PCH* */ 683#define WM_RAL_TABSIZE_ICH8 7 /* RAL size for ICH* and PCH* */
684#define WM_RAL_TABSIZE_PCH2 5 /* RAL size for PCH2 */ 684#define WM_RAL_TABSIZE_PCH2 5 /* RAL size for PCH2 */
685#define WM_RAL_TABSIZE_PCH_LPT 12 /* RAL size for PCH_LPT */ 685#define WM_RAL_TABSIZE_PCH_LPT 12 /* RAL size for PCH_LPT */
686#define WM_RAL_TABSIZE_82575 16 /* RAL size for 82575 */ 686#define WM_RAL_TABSIZE_82575 16 /* RAL size for 82575 */
687#define WM_RAL_TABSIZE_82576 24 /* RAL size for 82576 and 82580 */ 687#define WM_RAL_TABSIZE_82576 24 /* RAL size for 82576 and 82580 */
688#define WM_RAL_TABSIZE_I350 32 /* RAL size for I350 */ 688#define WM_RAL_TABSIZE_I350 32 /* RAL size for I350 */
689 689
690#define WMREG_ICR 0x00c0 /* Interrupt Cause Register */ 690#define WMREG_ICR 0x00c0 /* Interrupt Cause Register */
691#define ICR_TXDW __BIT(0) /* Tx desc written back */ 691#define ICR_TXDW __BIT(0) /* Tx desc written back */
692#define ICR_TXQE __BIT(1) /* Tx queue empty */ 692#define ICR_TXQE __BIT(1) /* Tx queue empty */
693#define ICR_LSC __BIT(2) /* link status change */ 693#define ICR_LSC __BIT(2) /* link status change */
694#define ICR_RXSEQ __BIT(3) /* receive sequence error */ 694#define ICR_RXSEQ __BIT(3) /* receive sequence error */
695#define ICR_RXDMT0 __BIT(4) /* Rx ring 0 nearly empty */ 695#define ICR_RXDMT0 __BIT(4) /* Rx ring 0 nearly empty */
696#define ICR_RXO __BIT(6) /* Rx overrun */ 696#define ICR_RXO __BIT(6) /* Rx overrun */
697#define ICR_RXT0 __BIT(7) /* Rx ring 0 timer */ 697#define ICR_RXT0 __BIT(7) /* Rx ring 0 timer */
698#define ICR_MDAC __BIT(9) /* MDIO access complete */ 698#define ICR_MDAC __BIT(9) /* MDIO access complete */
699#define ICR_RXCFG __BIT(10) /* Receiving /C/ */ 699#define ICR_RXCFG __BIT(10) /* Receiving /C/ */
700#define ICR_GPI(x) __BIT(11+(x)) /* general purpose interrupts */ 700#define ICR_GPI(x) __BIT(11+(x)) /* general purpose interrupts */
701#define ICR_RXQ(x) __BIT(20+(x)) /* 82574: Rx queue x interrupt x=0,1 */ 701#define ICR_RXQ(x) __BIT(20+(x)) /* 82574: Rx queue x interrupt x=0,1 */
702#define ICR_TXQ(x) __BIT(22+(x)) /* 82574: Tx queue x interrupt x=0,1 */ 702#define ICR_TXQ(x) __BIT(22+(x)) /* 82574: Tx queue x interrupt x=0,1 */
703#define ICR_OTHER __BIT(24) /* 82574: Other interrupt */ 703#define ICR_OTHER __BIT(24) /* 82574: Other interrupt */
704#define ICR_INT __BIT(31) /* device generated an interrupt */ 704#define ICR_INT __BIT(31) /* device generated an interrupt */
705 705
706#define WMREG_ITR 0x00c4 /* Interrupt Throttling Register */ 706#define WMREG_ITR 0x00c4 /* Interrupt Throttling Register */
707#define ITR_IVAL_MASK 0xffff /* Interval mask */ 707#define ITR_IVAL_MASK 0xffff /* Interval mask */
708#define ITR_IVAL_SHIFT 0 /* Interval shift */ 708#define ITR_IVAL_SHIFT 0 /* Interval shift */
709 709
710#define WMREG_ICS 0x00c8 /* Interrupt Cause Set Register */ 710#define WMREG_ICS 0x00c8 /* Interrupt Cause Set Register */
711 /* See ICR bits. */ 711 /* See ICR bits. */
712 712
713#define WMREG_IMS 0x00d0 /* Interrupt Mask Set Register */ 713#define WMREG_IMS 0x00d0 /* Interrupt Mask Set Register */
714 /* See ICR bits. */ 714 /* See ICR bits. */
715 715
716#define WMREG_IMC 0x00d8 /* Interrupt Mask Clear Register */ 716#define WMREG_IMC 0x00d8 /* Interrupt Mask Clear Register */
717 /* See ICR bits. */ 717 /* See ICR bits. */
718 718
719#define WMREG_EIAC_82574 0x00dc /* Interrupt Auto Clear Register */ 719#define WMREG_EIAC_82574 0x00dc /* Interrupt Auto Clear Register */
720#define WMREG_EIAC_82574_MSIX_MASK (ICR_RXQ(0) | ICR_RXQ(1) \ 720#define WMREG_EIAC_82574_MSIX_MASK (ICR_RXQ(0) | ICR_RXQ(1) \
721 | ICR_TXQ(0) | ICR_TXQ(1) | ICR_OTHER) 721 | ICR_TXQ(0) | ICR_TXQ(1) | ICR_OTHER)
722 722
723#define WMREG_FEXTNVM7 0x00e4 /* Future Extended NVM 7 */ 723#define WMREG_FEXTNVM7 0x00e4 /* Future Extended NVM 7 */
724#define FEXTNVM7_SIDE_CLK_UNGATE __BIT(2) 724#define FEXTNVM7_SIDE_CLK_UNGATE __BIT(2)
725#define FEXTNVM7_DIS_SMB_PERST __BIT(5) 725#define FEXTNVM7_DIS_SMB_PERST __BIT(5)
726#define FEXTNVM7_DIS_PB_READ __BIT(18) 726#define FEXTNVM7_DIS_PB_READ __BIT(18)
727 727
728#define WMREG_IVAR 0x00e4 /* Interrupt Vector Allocation Register */ 728#define WMREG_IVAR 0x00e4 /* Interrupt Vector Allocation Register */
729#define WMREG_IVAR0 0x01700 /* Interrupt Vector Allocation */ 729#define WMREG_IVAR0 0x01700 /* Interrupt Vector Allocation */
730#define IVAR_ALLOC_MASK __BITS(0, 6) /* Bit 5 and 6 are reserved */ 730#define IVAR_ALLOC_MASK __BITS(0, 6) /* Bit 5 and 6 are reserved */
731#define IVAR_VALID __BIT(7) 731#define IVAR_VALID __BIT(7)
732/* IVAR definitions for 82580 and newer */ 732/* IVAR definitions for 82580 and newer */
733#define WMREG_IVAR_Q(x) (WMREG_IVAR0 + ((x) / 2) * 4) 733#define WMREG_IVAR_Q(x) (WMREG_IVAR0 + ((x) / 2) * 4)
734#define IVAR_TX_MASK_Q(x) (0x000000ffUL << (((x) % 2) == 0 ? 8 : 24)) 734#define IVAR_TX_MASK_Q(x) (0x000000ffUL << (((x) % 2) == 0 ? 8 : 24))
735#define IVAR_RX_MASK_Q(x) (0x000000ffUL << (((x) % 2) == 0 ? 0 : 16)) 735#define IVAR_RX_MASK_Q(x) (0x000000ffUL << (((x) % 2) == 0 ? 0 : 16))
736/* IVAR definitions for 82576 */ 736/* IVAR definitions for 82576 */
737#define WMREG_IVAR_Q_82576(x) (WMREG_IVAR0 + ((x) & 0x7) * 4) 737#define WMREG_IVAR_Q_82576(x) (WMREG_IVAR0 + ((x) & 0x7) * 4)
738#define IVAR_TX_MASK_Q_82576(x) (0x000000ffUL << (((x) / 8) == 0 ? 8 : 24)) 738#define IVAR_TX_MASK_Q_82576(x) (0x000000ffUL << (((x) / 8) == 0 ? 8 : 24))
739#define IVAR_RX_MASK_Q_82576(x) (0x000000ffUL << (((x) / 8) == 0 ? 0 : 16)) 739#define IVAR_RX_MASK_Q_82576(x) (0x000000ffUL << (((x) / 8) == 0 ? 0 : 16))
740/* IVAR definitions for 82574 */ 740/* IVAR definitions for 82574 */
741#define IVAR_ALLOC_MASK_82574 __BITS(0, 2) 741#define IVAR_ALLOC_MASK_82574 __BITS(0, 2)
742#define IVAR_VALID_82574 __BIT(3) 742#define IVAR_VALID_82574 __BIT(3)
743#define IVAR_TX_MASK_Q_82574(x) (0x0000000fUL << ((x) == 0 ? 8 : 12)) 743#define IVAR_TX_MASK_Q_82574(x) (0x0000000fUL << ((x) == 0 ? 8 : 12))
744#define IVAR_RX_MASK_Q_82574(x) (0x0000000fUL << ((x) == 0 ? 0 : 4)) 744#define IVAR_RX_MASK_Q_82574(x) (0x0000000fUL << ((x) == 0 ? 0 : 4))
745#define IVAR_OTHER_MASK __BITS(16, 19) 745#define IVAR_OTHER_MASK __BITS(16, 19)
746#define IVAR_INT_ON_ALL_WB __BIT(31) 746#define IVAR_INT_ON_ALL_WB __BIT(31)
747 747
748#define WMREG_IVAR_MISC 0x01740 /* IVAR for other causes */ 748#define WMREG_IVAR_MISC 0x01740 /* IVAR for other causes */
749#define IVAR_MISC_TCPTIMER __BITS(0, 7) 749#define IVAR_MISC_TCPTIMER __BITS(0, 7)
750#define IVAR_MISC_OTHER __BITS(8, 15) 750#define IVAR_MISC_OTHER __BITS(8, 15)
751 751
752#define WMREG_SVCR 0x00f0 752#define WMREG_SVCR 0x00f0
753#define SVCR_OFF_EN __BIT(0) 753#define SVCR_OFF_EN __BIT(0)
754#define SVCR_OFF_MASKINT __BIT(12) 754#define SVCR_OFF_MASKINT __BIT(12)
755 755
756#define WMREG_SVT 0x00f4 756#define WMREG_SVT 0x00f4
757#define SVT_OFF_HWM __BITS(4, 0) 757#define SVT_OFF_HWM __BITS(4, 0)
758 758
759#define WMREG_LTRV 0x00f8 /* Latency Tolerance Reporting */ 759#define WMREG_LTRV 0x00f8 /* Latency Tolerance Reporting */
760#define LTRV_VALUE __BITS(9, 0) 760#define LTRV_VALUE __BITS(9, 0)
761#define LTRV_SCALE __BITS(12, 10) 761#define LTRV_SCALE __BITS(12, 10)
762#define LTRV_SCALE_MAX 5 762#define LTRV_SCALE_MAX 5
763#define LTRV_SNOOP_REQ __BIT(15) 763#define LTRV_SNOOP_REQ __BIT(15)
764#define LTRV_SEND __BIT(30) 764#define LTRV_SEND __BIT(30)
765#define LTRV_NONSNOOP __BITS(31, 16) 765#define LTRV_NONSNOOP __BITS(31, 16)
766#define LTRV_NONSNOOP_REQ __BIT(31) 766#define LTRV_NONSNOOP_REQ __BIT(31)
767 767
768#define WMREG_RCTL 0x0100 /* Receive Control */ 768#define WMREG_RCTL 0x0100 /* Receive Control */
769#define RCTL_EN __BIT(1) /* receiver enable */ 769#define RCTL_EN __BIT(1) /* receiver enable */
770#define RCTL_SBP __BIT(2) /* store bad packets */ 770#define RCTL_SBP __BIT(2) /* store bad packets */
771#define RCTL_UPE __BIT(3) /* unicast promisc. enable */ 771#define RCTL_UPE __BIT(3) /* unicast promisc. enable */
772#define RCTL_MPE __BIT(4) /* multicast promisc. enable */ 772#define RCTL_MPE __BIT(4) /* multicast promisc. enable */
773#define RCTL_LPE __BIT(5) /* large packet enable */ 773#define RCTL_LPE __BIT(5) /* large packet enable */
774#define RCTL_LBM(x) ((x) << 6) /* loopback mode */ 774#define RCTL_LBM(x) ((x) << 6) /* loopback mode */
775#define RCTL_LBM_NONE RCTL_LBM(0) 775#define RCTL_LBM_NONE RCTL_LBM(0)
776#define RCTL_LBM_PHY RCTL_LBM(3) 776#define RCTL_LBM_PHY RCTL_LBM(3)
777#define RCTL_RDMTS(x) ((x) << 8) /* receive desc. min thresh size */ 777#define RCTL_RDMTS(x) ((x) << 8) /* receive desc. min thresh size */
778#define RCTL_RDMTS_1_2 RCTL_RDMTS(0) 778#define RCTL_RDMTS_1_2 RCTL_RDMTS(0)
779#define RCTL_RDMTS_1_4 RCTL_RDMTS(1) 779#define RCTL_RDMTS_1_4 RCTL_RDMTS(1)
780#define RCTL_RDMTS_1_8 RCTL_RDMTS(2) 780#define RCTL_RDMTS_1_8 RCTL_RDMTS(2)
781#define RCTL_RDMTS_MASK RCTL_RDMTS(3) 781#define RCTL_RDMTS_MASK RCTL_RDMTS(3)
782#define RCTL_DTYP_MASK __BITS(11,10) /* descriptor type. 82574 only */ 782#define RCTL_DTYP_MASK __BITS(11,10) /* descriptor type. 82574 only */
783#define RCTL_DTYP(x) __SHIFTIN(x,RCTL_DTYP_MASK) 783#define RCTL_DTYP(x) __SHIFTIN(x,RCTL_DTYP_MASK)
784#define RCTL_DTYP_ONEBUF RCTL_DTYP(0) /* use one buffer(not split header). */ 784#define RCTL_DTYP_ONEBUF RCTL_DTYP(0) /* use one buffer(not split header). */
785#define RCTL_DTYP_SPH RCTL_DTYP(1) /* split header buffer. */ 785#define RCTL_DTYP_SPH RCTL_DTYP(1) /* split header buffer. */
786 /* RCTL_DTYP(2) and RCTL_DTYP(3) are reserved. */ 786 /* RCTL_DTYP(2) and RCTL_DTYP(3) are reserved. */
787#define RCTL_MO __BITS(13, 12) /* multicast offset */ 787#define RCTL_MO __BITS(13, 12) /* multicast offset */
788#define RCTL_BAM __BIT(15) /* broadcast accept mode */ 788#define RCTL_BAM __BIT(15) /* broadcast accept mode */
789#define RCTL_RDMTS_HEX __BIT(16) 789#define RCTL_RDMTS_HEX __BIT(16)
790#define RCTL_2k (0 << 16) /* 2k Rx buffers */ 790#define RCTL_2k (0 << 16) /* 2k Rx buffers */
791#define RCTL_1k (1 << 16) /* 1k Rx buffers */ 791#define RCTL_1k (1 << 16) /* 1k Rx buffers */
792#define RCTL_512 (2 << 16) /* 512 byte Rx buffers */ 792#define RCTL_512 (2 << 16) /* 512 byte Rx buffers */
793#define RCTL_256 (3 << 16) /* 256 byte Rx buffers */ 793#define RCTL_256 (3 << 16) /* 256 byte Rx buffers */
794#define RCTL_BSEX_16k (1 << 16) /* 16k Rx buffers (BSEX) */ 794#define RCTL_BSEX_16k (1 << 16) /* 16k Rx buffers (BSEX) */
795#define RCTL_BSEX_8k (2 << 16) /* 8k Rx buffers (BSEX) */ 795#define RCTL_BSEX_8k (2 << 16) /* 8k Rx buffers (BSEX) */
796#define RCTL_BSEX_4k (3 << 16) /* 4k Rx buffers (BSEX) */ 796#define RCTL_BSEX_4k (3 << 16) /* 4k Rx buffers (BSEX) */
797#define RCTL_DPF __BIT(22) /* discard pause frames */ 797#define RCTL_DPF __BIT(22) /* discard pause frames */
798#define RCTL_PMCF __BIT(23) /* pass MAC control frames */ 798#define RCTL_PMCF __BIT(23) /* pass MAC control frames */
799#define RCTL_BSEX __BIT(25) /* buffer size extension (Livengood) */ 799#define RCTL_BSEX __BIT(25) /* buffer size extension (Livengood) */
800#define RCTL_SECRC __BIT(26) /* strip Ethernet CRC */ 800#define RCTL_SECRC __BIT(26) /* strip Ethernet CRC */
801 801
802#define WMREG_OLD_RDTR0 0x0108 /* Receive Delay Timer (ring 0) */ 802#define WMREG_OLD_RDTR0 0x0108 /* Receive Delay Timer (ring 0) */
803#define WMREG_RDTR 0x2820 803#define WMREG_RDTR 0x2820
804#define RDTR_FPD __BIT(31) /* flush partial descriptor */ 804#define RDTR_FPD __BIT(31) /* flush partial descriptor */
805 805
806#define WMREG_LTRC 0x01a0 /* Latency Tolerance Reportiong Control */ 806#define WMREG_LTRC 0x01a0 /* Latency Tolerance Reportiong Control */
807 807
808#define WMREG_OLD_RDBAL0 0x0110 /* Receive Descriptor Base Low (ring 0) */ 808#define WMREG_OLD_RDBAL0 0x0110 /* Receive Descriptor Base Low (ring 0) */
809#define WMREG_RDBAL(x) \ 809#define WMREG_RDBAL(x) \
810 ((x) < 4 ? (0x02800 + ((x) * 0x100)) : \ 810 ((x) < 4 ? (0x02800 + ((x) * 0x100)) : \
811 (0x0c000 + ((x) * 0x40))) 811 (0x0c000 + ((x) * 0x40)))
812 812
813#define WMREG_OLD_RDBAH0 0x0114 /* Receive Descriptor Base High (ring 0) */ 813#define WMREG_OLD_RDBAH0 0x0114 /* Receive Descriptor Base High (ring 0) */
814#define WMREG_RDBAH(x) \ 814#define WMREG_RDBAH(x) \
815 ((x) < 4 ? (0x02804 + ((x) * 0x100)) : \ 815 ((x) < 4 ? (0x02804 + ((x) * 0x100)) : \
816 (0x0c004 + ((x) * 0x40))) 816 (0x0c004 + ((x) * 0x40)))
817 817
818#define WMREG_OLD_RDLEN0 0x0118 /* Receive Descriptor Length (ring 0) */ 818#define WMREG_OLD_RDLEN0 0x0118 /* Receive Descriptor Length (ring 0) */
819#define WMREG_RDLEN(x) \ 819#define WMREG_RDLEN(x) \
820 ((x) < 4 ? (0x02808 + ((x) * 0x100)) : \ 820 ((x) < 4 ? (0x02808 + ((x) * 0x100)) : \
821 (0x0c008 + ((x) * 0x40))) 821 (0x0c008 + ((x) * 0x40)))
822 822
823#define WMREG_SRRCTL(x) \ 823#define WMREG_SRRCTL(x) \
824 ((x) < 4 ? (0x0280c + ((x) * 0x100)) : \ 824 ((x) < 4 ? (0x0280c + ((x) * 0x100)) : \
825 (0x0c00c + ((x) * 0x40))) /* additional recv control used in 82575 ... */ 825 (0x0c00c + ((x) * 0x40))) /* additional recv control used in 82575 ... */
826#define SRRCTL_BSIZEPKT_MASK 0x0000007f 826#define SRRCTL_BSIZEPKT_MASK 0x0000007f
827#define SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ 827#define SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
828#define SRRCTL_BSIZEHDRSIZE_MASK 0x00000f00 828#define SRRCTL_BSIZEHDRSIZE_MASK 0x00000f00
829#define SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ 829#define SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
830#define SRRCTL_DESCTYPE_LEGACY 0x00000000 830#define SRRCTL_DESCTYPE_LEGACY 0x00000000
831#define SRRCTL_DESCTYPE_ADV_ONEBUF (1U << 25) 831#define SRRCTL_DESCTYPE_ADV_ONEBUF (1U << 25)
832#define SRRCTL_DESCTYPE_HDR_SPLIT (2U << 25) 832#define SRRCTL_DESCTYPE_HDR_SPLIT (2U << 25)
833#define SRRCTL_DESCTYPE_HDR_REPLICATION (3U << 25) 833#define SRRCTL_DESCTYPE_HDR_REPLICATION (3U << 25)
834#define SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT (4U << 25) 834#define SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT (4U << 25)
835#define SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS (5U << 25) /* 82575 only */ 835#define SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS (5U << 25) /* 82575 only */
836#define SRRCTL_DESCTYPE_MASK (7U << 25) 836#define SRRCTL_DESCTYPE_MASK (7U << 25)
837#define SRRCTL_DROP_EN 0x80000000 837#define SRRCTL_DROP_EN 0x80000000
838 838
839#define WMREG_OLD_RDH0 0x0120 /* Receive Descriptor Head (ring 0) */ 839#define WMREG_OLD_RDH0 0x0120 /* Receive Descriptor Head (ring 0) */
840#define WMREG_RDH(x) \ 840#define WMREG_RDH(x) \
841 ((x) < 4 ? (0x02810 + ((x) * 0x100)) : \ 841 ((x) < 4 ? (0x02810 + ((x) * 0x100)) : \
842 (0x0c010 + ((x) * 0x40))) 842 (0x0c010 + ((x) * 0x40)))
843 843
844#define WMREG_OLD_RDT0 0x0128 /* Receive Descriptor Tail (ring 0) */ 844#define WMREG_OLD_RDT0 0x0128 /* Receive Descriptor Tail (ring 0) */
845#define WMREG_RDT(x) \ 845#define WMREG_RDT(x) \
846 ((x) < 4 ? (0x02818 + ((x) * 0x100)) : \ 846 ((x) < 4 ? (0x02818 + ((x) * 0x100)) : \
847 (0x0c018 + ((x) * 0x40))) 847 (0x0c018 + ((x) * 0x40)))
848 848
849#define WMREG_RXDCTL(x) \ 849#define WMREG_RXDCTL(x) \
850 ((x) < 4 ? (0x02828 + ((x) * 0x100)) : \ 850 ((x) < 4 ? (0x02828 + ((x) * 0x100)) : \
851 (0x0c028 + ((x) * 0x40))) /* Receive Descriptor Control */ 851 (0x0c028 + ((x) * 0x40))) /* Receive Descriptor Control */
852#define RXDCTL_PTHRESH(x) ((x) << 0) /* prefetch threshold */ 852#define RXDCTL_PTHRESH(x) ((x) << 0) /* prefetch threshold */
853#define RXDCTL_HTHRESH(x) ((x) << 8) /* host threshold */ 853#define RXDCTL_HTHRESH(x) ((x) << 8) /* host threshold */
854#define RXDCTL_WTHRESH(x) ((x) << 16) /* write back threshold */ 854#define RXDCTL_WTHRESH(x) ((x) << 16) /* write back threshold */
855#define RXDCTL_GRAN __BIT(24) /* 0 = cacheline, 1 = descriptor */ 855#define RXDCTL_GRAN __BIT(24) /* 0 = cacheline, 1 = descriptor */
856/* flags used starting with 82575 ... */ 856/* flags used starting with 82575 ... */
857#define RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ 857#define RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
858#define RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */ 858#define RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */
859 859
 860#define WMREG_RQDPC(x) (((x) < 4) ? (0x2830 + (0x100 * (x))) : \
 861 (0xc030 + (0x40 * (x)))) /* Receive Queue Drop Packet Count */
 862
860#define WMREG_OLD_RDTR1 0x0130 /* Receive Delay Timer (ring 1) */ 863#define WMREG_OLD_RDTR1 0x0130 /* Receive Delay Timer (ring 1) */
861#define WMREG_OLD_RDBA1_LO 0x0138 /* Receive Descriptor Base Low (ring 1) */ 864#define WMREG_OLD_RDBA1_LO 0x0138 /* Receive Descriptor Base Low (ring 1) */
862#define WMREG_OLD_RDBA1_HI 0x013c /* Receive Descriptor Base High (ring 1) */ 865#define WMREG_OLD_RDBA1_HI 0x013c /* Receive Descriptor Base High (ring 1) */
863#define WMREG_OLD_RDLEN1 0x0140 /* Receive Drscriptor Length (ring 1) */ 866#define WMREG_OLD_RDLEN1 0x0140 /* Receive Drscriptor Length (ring 1) */
864#define WMREG_OLD_RDH1 0x0148 867#define WMREG_OLD_RDH1 0x0148
865#define WMREG_OLD_RDT1 0x0150 868#define WMREG_OLD_RDT1 0x0150
866#define WMREG_OLD_FCRTH 0x0160 /* Flow Control Rx Threshold Hi (OLD) */ 869#define WMREG_OLD_FCRTH 0x0160 /* Flow Control Rx Threshold Hi (OLD) */
867#define WMREG_FCRTH 0x2168 /* Flow Control Rx Threhsold Hi */ 870#define WMREG_FCRTH 0x2168 /* Flow Control Rx Threhsold Hi */
868#define FCRTH_DFLT 0x00008000 871#define FCRTH_DFLT 0x00008000
869 872
870#define WMREG_OLD_FCRTL 0x0168 /* Flow Control Rx Threshold Lo (OLD) */ 873#define WMREG_OLD_FCRTL 0x0168 /* Flow Control Rx Threshold Lo (OLD) */
871#define WMREG_FCRTL 0x2160 /* Flow Control Rx Threshold Lo */ 874#define WMREG_FCRTL 0x2160 /* Flow Control Rx Threshold Lo */
872#define FCRTL_DFLT 0x00004000 875#define FCRTL_DFLT 0x00004000
873#define FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 876#define FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
874 877
875#define WMREG_FCTTV 0x0170 /* Flow Control Transmit Timer Value */ 878#define WMREG_FCTTV 0x0170 /* Flow Control Transmit Timer Value */
876#define FCTTV_DFLT 0x00000600 879#define FCTTV_DFLT 0x00000600
877 880
878#define WMREG_TXCW 0x0178 /* Transmit Configuration Word (TBI mode) */ 881#define WMREG_TXCW 0x0178 /* Transmit Configuration Word (TBI mode) */
879 /* See MII ANAR_X bits. */ 882 /* See MII ANAR_X bits. */
880#define TXCW_FD __BIT(5) /* Full Duplex */ 883#define TXCW_FD __BIT(5) /* Full Duplex */
881#define TXCW_HD __BIT(6) /* Half Duplex */ 884#define TXCW_HD __BIT(6) /* Half Duplex */
882#define TXCW_SYM_PAUSE __BIT(7) /* sym pause request */ 885#define TXCW_SYM_PAUSE __BIT(7) /* sym pause request */
883#define TXCW_ASYM_PAUSE __BIT(8) /* asym pause request */ 886#define TXCW_ASYM_PAUSE __BIT(8) /* asym pause request */
884#define TXCW_TxConfig __BIT(30) /* Tx Config */ 887#define TXCW_TxConfig __BIT(30) /* Tx Config */
885#define TXCW_ANE __BIT(31) /* Autonegotiate */ 888#define TXCW_ANE __BIT(31) /* Autonegotiate */
886 889
887#define WMREG_RXCW 0x0180 /* Receive Configuration Word (TBI mode) */ 890#define WMREG_RXCW 0x0180 /* Receive Configuration Word (TBI mode) */
888 /* See MII ANLPAR_X bits. */ 891 /* See MII ANLPAR_X bits. */
889#define RXCW_NC __BIT(26) /* no carrier */ 892#define RXCW_NC __BIT(26) /* no carrier */
890#define RXCW_IV __BIT(27) /* config invalid */ 893#define RXCW_IV __BIT(27) /* config invalid */
891#define RXCW_CC __BIT(28) /* config change */ 894#define RXCW_CC __BIT(28) /* config change */
892#define RXCW_C __BIT(29) /* /C/ reception */ 895#define RXCW_C __BIT(29) /* /C/ reception */
893#define RXCW_SYNCH __BIT(30) /* synchronized */ 896#define RXCW_SYNCH __BIT(30) /* synchronized */
894#define RXCW_ANC __BIT(31) /* autonegotiation complete */ 897#define RXCW_ANC __BIT(31) /* autonegotiation complete */
895 898
896#define WMREG_MTA 0x0200 /* Multicast Table Array */ 899#define WMREG_MTA 0x0200 /* Multicast Table Array */
897#define WMREG_CORDOVA_MTA 0x5200 900#define WMREG_CORDOVA_MTA 0x5200
898 901
899#define WMREG_TCTL 0x0400 /* Transmit Control Register */ 902#define WMREG_TCTL 0x0400 /* Transmit Control Register */
900#define TCTL_EN __BIT(1) /* transmitter enable */ 903#define TCTL_EN __BIT(1) /* transmitter enable */
901#define TCTL_PSP __BIT(3) /* pad short packets */ 904#define TCTL_PSP __BIT(3) /* pad short packets */
902#define TCTL_CT(x) (((x) & 0xff) << 4) /* 4:11 - collision threshold */ 905#define TCTL_CT(x) (((x) & 0xff) << 4) /* 4:11 - collision threshold */
903#define TCTL_COLD(x) (((x) & 0x3ff) << 12) /* 12:21 - collision distance */ 906#define TCTL_COLD(x) (((x) & 0x3ff) << 12) /* 12:21 - collision distance */
904#define TCTL_SWXOFF __BIT(22) /* software XOFF */ 907#define TCTL_SWXOFF __BIT(22) /* software XOFF */
905#define TCTL_RTLC __BIT(24) /* retransmit on late collision */ 908#define TCTL_RTLC __BIT(24) /* retransmit on late collision */
906#define TCTL_NRTU __BIT(25) /* no retransmit on underrun */ 909#define TCTL_NRTU __BIT(25) /* no retransmit on underrun */
907#define TCTL_MULR __BIT(28) /* multiple request */ 910#define TCTL_MULR __BIT(28) /* multiple request */
908 911
909#define TX_COLLISION_THRESHOLD 15 912#define TX_COLLISION_THRESHOLD 15
910#define TX_COLLISION_DISTANCE_HDX 512 913#define TX_COLLISION_DISTANCE_HDX 512
911#define TX_COLLISION_DISTANCE_FDX 64 914#define TX_COLLISION_DISTANCE_FDX 64
912 915
913#define WMREG_TCTL_EXT 0x0404 /* Transmit Control Register */ 916#define WMREG_TCTL_EXT 0x0404 /* Transmit Control Register */
914#define TCTL_EXT_BST_MASK 0x000003ff /* Backoff Slot Time */ 917#define TCTL_EXT_BST_MASK 0x000003ff /* Backoff Slot Time */
915#define TCTL_EXT_GCEX_MASK 0x000ffc00 /* Gigabit Carry Extend Padding */ 918#define TCTL_EXT_GCEX_MASK 0x000ffc00 /* Gigabit Carry Extend Padding */
916 919
917#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000 920#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000
918 921
919#define WMREG_TIPG 0x0410 /* Transmit IPG Register */ 922#define WMREG_TIPG 0x0410 /* Transmit IPG Register */
920#define TIPG_IPGT(x) (x) /* IPG transmit time */ 923#define TIPG_IPGT(x) (x) /* IPG transmit time */
921#define TIPG_IPGR1(x) ((x) << 10) /* IPG receive time 1 */ 924#define TIPG_IPGR1(x) ((x) << 10) /* IPG receive time 1 */
922#define TIPG_IPGR2(x) ((x) << 20) /* IPG receive time 2 */ 925#define TIPG_IPGR2(x) ((x) << 20) /* IPG receive time 2 */
923#define TIPG_WM_DFLT (TIPG_IPGT(0x0a) | TIPG_IPGR1(0x02) | TIPG_IPGR2(0x0a)) 926#define TIPG_WM_DFLT (TIPG_IPGT(0x0a) | TIPG_IPGR1(0x02) | TIPG_IPGR2(0x0a))
924#define TIPG_LG_DFLT (TIPG_IPGT(0x06) | TIPG_IPGR1(0x08) | TIPG_IPGR2(0x06)) 927#define TIPG_LG_DFLT (TIPG_IPGT(0x06) | TIPG_IPGR1(0x08) | TIPG_IPGR2(0x06))
925#define TIPG_1000T_DFLT (TIPG_IPGT(0x08) | TIPG_IPGR1(0x08) | TIPG_IPGR2(0x06)) 928#define TIPG_1000T_DFLT (TIPG_IPGT(0x08) | TIPG_IPGR1(0x08) | TIPG_IPGR2(0x06))
926#define TIPG_1000T_80003_DFLT \ 929#define TIPG_1000T_80003_DFLT \
927 (TIPG_IPGT(0x08) | TIPG_IPGR1(0x02) | TIPG_IPGR2(0x07)) 930 (TIPG_IPGT(0x08) | TIPG_IPGR1(0x02) | TIPG_IPGR2(0x07))
928#define TIPG_10_100_80003_DFLT \ 931#define TIPG_10_100_80003_DFLT \
929 (TIPG_IPGT(0x09) | TIPG_IPGR1(0x02) | TIPG_IPGR2(0x07)) 932 (TIPG_IPGT(0x09) | TIPG_IPGR1(0x02) | TIPG_IPGR2(0x07))
930 933
931#define WMREG_TQC 0x0418 934#define WMREG_TQC 0x0418
932 935
933#define WMREG_OLD_TDBAL 0x0420 /* Transmit Descriptor Base Lo */ 936#define WMREG_OLD_TDBAL 0x0420 /* Transmit Descriptor Base Lo */
934#define WMREG_TDBAL(x) \ 937#define WMREG_TDBAL(x) \
935 ((x) < 4 ? (0x03800 + ((x) * 0x100)) : \ 938 ((x) < 4 ? (0x03800 + ((x) * 0x100)) : \
936 (0x0e000 + ((x) * 0x40))) 939 (0x0e000 + ((x) * 0x40)))
937 940
938#define WMREG_OLD_TDBAH 0x0424 /* Transmit Descriptor Base Hi */ 941#define WMREG_OLD_TDBAH 0x0424 /* Transmit Descriptor Base Hi */
939#define WMREG_TDBAH(x)\ 942#define WMREG_TDBAH(x)\
940 ((x) < 4 ? (0x03804 + ((x) * 0x100)) : \ 943 ((x) < 4 ? (0x03804 + ((x) * 0x100)) : \
941 (0x0e004 + ((x) * 0x40))) 944 (0x0e004 + ((x) * 0x40)))
942 945
943#define WMREG_OLD_TDLEN 0x0428 /* Transmit Descriptor Length */ 946#define WMREG_OLD_TDLEN 0x0428 /* Transmit Descriptor Length */
944#define WMREG_TDLEN(x) \ 947#define WMREG_TDLEN(x) \
945 ((x) < 4 ? (0x03808 + ((x) * 0x100)) : \ 948 ((x) < 4 ? (0x03808 + ((x) * 0x100)) : \
946 (0x0e008 + ((x) * 0x40))) 949 (0x0e008 + ((x) * 0x40)))
947 950
948#define WMREG_OLD_TDH 0x0430 /* Transmit Descriptor Head */ 951#define WMREG_OLD_TDH 0x0430 /* Transmit Descriptor Head */
949#define WMREG_TDH(x) \ 952#define WMREG_TDH(x) \
950 ((x) < 4 ? (0x03810 + ((x) * 0x100)) : \ 953 ((x) < 4 ? (0x03810 + ((x) * 0x100)) : \
951 (0x0e010 + ((x) * 0x40))) 954 (0x0e010 + ((x) * 0x40)))
952 955
953#define WMREG_OLD_TDT 0x0438 /* Transmit Descriptor Tail */ 956#define WMREG_OLD_TDT 0x0438 /* Transmit Descriptor Tail */
954#define WMREG_TDT(x) \ 957#define WMREG_TDT(x) \
955 ((x) < 4 ? (0x03818 + ((x) * 0x100)) : \ 958 ((x) < 4 ? (0x03818 + ((x) * 0x100)) : \
956 (0x0e018 + ((x) * 0x40))) 959 (0x0e018 + ((x) * 0x40)))
957 960
958#define WMREG_OLD_TIDV 0x0440 /* Transmit Delay Interrupt Value */ 961#define WMREG_OLD_TIDV 0x0440 /* Transmit Delay Interrupt Value */
959#define WMREG_TIDV 0x3820 962#define WMREG_TIDV 0x3820
960 963
961#define WMREG_AIT 0x0458 /* Adaptive IFS Throttle */ 964#define WMREG_AIT 0x0458 /* Adaptive IFS Throttle */
962#define WMREG_VFTA 0x0600 965#define WMREG_VFTA 0x0600
963 966
964#define WMREG_LEDCTL 0x0e00 /* LED Control - RW */ 967#define WMREG_LEDCTL 0x0e00 /* LED Control - RW */
965 968
966#define WMREG_MDICNFG 0x0e04 /* MDC/MDIO Configuration Register */ 969#define WMREG_MDICNFG 0x0e04 /* MDC/MDIO Configuration Register */
967#define MDICNFG_PHY_SHIFT 21 970#define MDICNFG_PHY_SHIFT 21
968#define MDICNFG_PHY_MASK __BITS(25, 21) 971#define MDICNFG_PHY_MASK __BITS(25, 21)
969#define MDICNFG_COM_MDIO __BIT(30) 972#define MDICNFG_COM_MDIO __BIT(30)
970#define MDICNFG_DEST __BIT(31) 973#define MDICNFG_DEST __BIT(31)
971 974
972#define WM_MC_TABSIZE 128 975#define WM_MC_TABSIZE 128
973#define WM_ICH8_MC_TABSIZE 32 976#define WM_ICH8_MC_TABSIZE 32
974#define WM_VLAN_TABSIZE 128 977#define WM_VLAN_TABSIZE 128
975 978
976#define WMREG_PHPM 0x0e14 /* PHY Power Management */ 979#define WMREG_PHPM 0x0e14 /* PHY Power Management */
977#define PHPM_SPD_EN __BIT(0) /* Smart Power Down */ 980#define PHPM_SPD_EN __BIT(0) /* Smart Power Down */
978#define PHPM_D0A_LPLU __BIT(1) /* D0 Low Power Link Up */ 981#define PHPM_D0A_LPLU __BIT(1) /* D0 Low Power Link Up */
979#define PHPM_NOND0A_LPLU __BIT(2) /* Non-D0a LPLU */ 982#define PHPM_NOND0A_LPLU __BIT(2) /* Non-D0a LPLU */
980#define PHPM_NOND0A_GBE_DIS __BIT(3) /* Disable 1G in non-D0a */ 983#define PHPM_NOND0A_GBE_DIS __BIT(3) /* Disable 1G in non-D0a */
981#define PHPM_GO_LINK_D __BIT(5) /* Go Link Disconnect */ 984#define PHPM_GO_LINK_D __BIT(5) /* Go Link Disconnect */
982 985
983#define WMREG_EEER 0x0e30 /* Energy Efficiency Ethernet "EEE" */ 986#define WMREG_EEER 0x0e30 /* Energy Efficiency Ethernet "EEE" */
984#define EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ 987#define EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
985#define EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ 988#define EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
986#define EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ 989#define EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
987#define EEER_EEER_NEG 0x20000000 /* EEER capability nego */ 990#define EEER_EEER_NEG 0x20000000 /* EEER capability nego */
988#define EEER_EEER_RX_LPI_STATUS 0x40000000 /* EEER Rx in LPI state */ 991#define EEER_EEER_RX_LPI_STATUS 0x40000000 /* EEER Rx in LPI state */
989#define EEER_EEER_TX_LPI_STATUS 0x80000000 /* EEER Tx in LPI state */ 992#define EEER_EEER_TX_LPI_STATUS 0x80000000 /* EEER Tx in LPI state */
990#define WMREG_EEE_SU 0x0e34 /* EEE Setup */ 993#define WMREG_EEE_SU 0x0e34 /* EEE Setup */
991#define WMREG_IPCNFG 0x0e38 /* Internal PHY Configuration */ 994#define WMREG_IPCNFG 0x0e38 /* Internal PHY Configuration */
992#define IPCNFG_10BASE_TE 0x00000002 /* IPCNFG 10BASE-Te low power op. */ 995#define IPCNFG_10BASE_TE 0x00000002 /* IPCNFG 10BASE-Te low power op. */
993#define IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ 996#define IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
994#define IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ 997#define IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
995 998
996#define WMREG_EXTCNFCTR 0x0f00 /* Extended Configuration Control */ 999#define WMREG_EXTCNFCTR 0x0f00 /* Extended Configuration Control */
997#define EXTCNFCTR_PCIE_WRITE_ENABLE 0x00000001 1000#define EXTCNFCTR_PCIE_WRITE_ENABLE 0x00000001
998#define EXTCNFCTR_OEM_WRITE_ENABLE 0x00000008 1001#define EXTCNFCTR_OEM_WRITE_ENABLE 0x00000008
999#define EXTCNFCTR_MDIO_SW_OWNERSHIP 0x00000020 1002#define EXTCNFCTR_MDIO_SW_OWNERSHIP 0x00000020
1000#define EXTCNFCTR_MDIO_HW_OWNERSHIP 0x00000040 1003#define EXTCNFCTR_MDIO_HW_OWNERSHIP 0x00000040
1001#define EXTCNFCTR_GATE_PHY_CFG 0x00000080 1004#define EXTCNFCTR_GATE_PHY_CFG 0x00000080
1002#define EXTCNFCTR_EXT_CNF_POINTER 0x0fff0000 1005#define EXTCNFCTR_EXT_CNF_POINTER 0x0fff0000
1003 1006
1004#define WMREG_EXTCNFSIZE 0x0f08 /* Extended Configuration Size */ 1007#define WMREG_EXTCNFSIZE 0x0f08 /* Extended Configuration Size */
1005#define EXTCNFSIZE_LENGTH __BITS(23, 16) 1008#define EXTCNFSIZE_LENGTH __BITS(23, 16)
1006 1009
1007#define WMREG_PHY_CTRL 0x0f10 /* PHY control */ 1010#define WMREG_PHY_CTRL 0x0f10 /* PHY control */
1008#define PHY_CTRL_SPD_EN (1 << 0) 1011#define PHY_CTRL_SPD_EN (1 << 0)
1009#define PHY_CTRL_D0A_LPLU (1 << 1) 1012#define PHY_CTRL_D0A_LPLU (1 << 1)
1010#define PHY_CTRL_NOND0A_LPLU (1 << 2) 1013#define PHY_CTRL_NOND0A_LPLU (1 << 2)
1011#define PHY_CTRL_NOND0A_GBE_DIS (1 << 3) 1014#define PHY_CTRL_NOND0A_GBE_DIS (1 << 3)
1012#define PHY_CTRL_GBE_DIS (1 << 6) 1015#define PHY_CTRL_GBE_DIS (1 << 6)
1013 1016
1014#define WMREG_PCIEANACFG 0x0f18 /* PCIE Analog Config */ 1017#define WMREG_PCIEANACFG 0x0f18 /* PCIE Analog Config */
1015 1018
1016#define WMREG_IOSFPC 0x0f28 /* Tx corrupted data */ 1019#define WMREG_IOSFPC 0x0f28 /* Tx corrupted data */
1017 1020
1018#define WMREG_PBA 0x1000 /* Packet Buffer Allocation */ 1021#define WMREG_PBA 0x1000 /* Packet Buffer Allocation */
1019#define PBA_BYTE_SHIFT 10 /* KB -> bytes */ 1022#define PBA_BYTE_SHIFT 10 /* KB -> bytes */
1020#define PBA_ADDR_SHIFT 7 /* KB -> quadwords */ 1023#define PBA_ADDR_SHIFT 7 /* KB -> quadwords */
1021#define PBA_8K 0x0008 1024#define PBA_8K 0x0008
1022#define PBA_10K 0x000a 1025#define PBA_10K 0x000a
1023#define PBA_12K 0x000c 1026#define PBA_12K 0x000c
1024#define PBA_14K 0x000e 1027#define PBA_14K 0x000e
1025#define PBA_16K 0x0010 /* 16K, default Tx allocation */ 1028#define PBA_16K 0x0010 /* 16K, default Tx allocation */
1026#define PBA_20K 0x0014 1029#define PBA_20K 0x0014
1027#define PBA_22K 0x0016 1030#define PBA_22K 0x0016
1028#define PBA_24K 0x0018 1031#define PBA_24K 0x0018
1029#define PBA_26K 0x001a 1032#define PBA_26K 0x001a
1030#define PBA_30K 0x001e 1033#define PBA_30K 0x001e
1031#define PBA_32K 0x0020 1034#define PBA_32K 0x0020
1032#define PBA_34K 0x0022 1035#define PBA_34K 0x0022
1033#define PBA_35K 0x0023 1036#define PBA_35K 0x0023
1034#define PBA_40K 0x0028 1037#define PBA_40K 0x0028
1035#define PBA_48K 0x0030 /* 48K, default Rx allocation */ 1038#define PBA_48K 0x0030 /* 48K, default Rx allocation */
1036#define PBA_64K 0x0040 1039#define PBA_64K 0x0040
1037#define PBA_RXA_MASK __BITS(15, 0) 1040#define PBA_RXA_MASK __BITS(15, 0)
1038 1041
1039#define WMREG_PBS 0x1008 /* Packet Buffer Size (ICH) */ 1042#define WMREG_PBS 0x1008 /* Packet Buffer Size (ICH) */
1040 1043
1041#define WMREG_PBECCSTS 0x100c /* Packet Buffer ECC Status (PCH_LPT) */ 1044#define WMREG_PBECCSTS 0x100c /* Packet Buffer ECC Status (PCH_LPT) */
1042#define PBECCSTS_CORR_ERR_CNT_MASK 0x000000ff 1045#define PBECCSTS_CORR_ERR_CNT_MASK 0x000000ff
1043#define PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000ff00 1046#define PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000ff00
1044#define PBECCSTS_UNCORR_ECC_ENABLE 0x00010000 1047#define PBECCSTS_UNCORR_ECC_ENABLE 0x00010000
1045 1048
1046#define WMREG_EEMNGCTL 0x1010 /* MNG EEprom Control */ 1049#define WMREG_EEMNGCTL 0x1010 /* MNG EEprom Control */
1047#define EEMNGCTL_CFGDONE_0 0x040000 /* MNG config cycle done */ 1050#define EEMNGCTL_CFGDONE_0 0x040000 /* MNG config cycle done */
1048#define EEMNGCTL_CFGDONE_1 0x080000 /* 2nd port */ 1051#define EEMNGCTL_CFGDONE_1 0x080000 /* 2nd port */
1049 1052
1050#define WMREG_I2CCMD 0x1028 /* SFPI2C Command Register - RW */ 1053#define WMREG_I2CCMD 0x1028 /* SFPI2C Command Register - RW */
1051#define I2CCMD_REG_ADDR_SHIFT 16 1054#define I2CCMD_REG_ADDR_SHIFT 16
1052#define I2CCMD_REG_ADDR 0x00ff0000 1055#define I2CCMD_REG_ADDR 0x00ff0000
1053#define I2CCMD_PHY_ADDR_SHIFT 24 1056#define I2CCMD_PHY_ADDR_SHIFT 24
1054#define I2CCMD_PHY_ADDR 0x07000000 1057#define I2CCMD_PHY_ADDR 0x07000000
1055#define I2CCMD_OPCODE_READ 0x08000000 1058#define I2CCMD_OPCODE_READ 0x08000000
1056#define I2CCMD_OPCODE_WRITE 0x00000000 1059#define I2CCMD_OPCODE_WRITE 0x00000000
1057#define I2CCMD_RESET 0x10000000 1060#define I2CCMD_RESET 0x10000000
1058#define I2CCMD_READY 0x20000000 1061#define I2CCMD_READY 0x20000000
1059#define I2CCMD_INTERRUPT_ENA 0x40000000 1062#define I2CCMD_INTERRUPT_ENA 0x40000000
1060#define I2CCMD_ERROR 0x80000000 1063#define I2CCMD_ERROR 0x80000000
1061#define MAX_SGMII_PHY_REG_ADDR 255 1064#define MAX_SGMII_PHY_REG_ADDR 255
1062#define I2CCMD_PHY_TIMEOUT 200 1065#define I2CCMD_PHY_TIMEOUT 200
1063 1066
1064#define WMREG_EEWR 0x102c /* EEPROM write */ 1067#define WMREG_EEWR 0x102c /* EEPROM write */
1065 1068
1066#define WMREG_PBA_ECC 0x01100 /* PBA ECC */ 1069#define WMREG_PBA_ECC 0x01100 /* PBA ECC */
1067#define PBA_ECC_COUNTER_MASK 0xfff00000 /* ECC counter mask */ 1070#define PBA_ECC_COUNTER_MASK 0xfff00000 /* ECC counter mask */
1068#define PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ 1071#define PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
1069#define PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */ 1072#define PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */
1070#define PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ 1073#define PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
1071#define PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */ 1074#define PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */
1072 1075
1073#define WMREG_GPIE 0x01514 /* General Purpose Interrupt Enable */ 1076#define WMREG_GPIE 0x01514 /* General Purpose Interrupt Enable */
1074#define GPIE_NSICR __BIT(0) /* Non Selective Interrupt Clear */ 1077#define GPIE_NSICR __BIT(0) /* Non Selective Interrupt Clear */
1075#define GPIE_MULTI_MSIX __BIT(4) /* Multiple MSIX */ 1078#define GPIE_MULTI_MSIX __BIT(4) /* Multiple MSIX */
1076#define GPIE_EIAME __BIT(30) /* Extended Interrupt Auto Mask Ena. */ 1079#define GPIE_EIAME __BIT(30) /* Extended Interrupt Auto Mask Ena. */
1077#define GPIE_PBA __BIT(31) /* PBA support */ 1080#define GPIE_PBA __BIT(31) /* PBA support */
1078 1081
1079#define WMREG_EICS 0x01520 /* Ext. Interrupt Cause Set - WO */ 1082#define WMREG_EICS 0x01520 /* Ext. Interrupt Cause Set - WO */
1080#define WMREG_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ 1083#define WMREG_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
1081#define WMREG_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ 1084#define WMREG_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
1082#define WMREG_EIAC 0x0152c /* Ext. Interrupt Auto Clear - RW */ 1085#define WMREG_EIAC 0x0152c /* Ext. Interrupt Auto Clear - RW */
1083#define WMREG_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ 1086#define WMREG_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
1084 1087
1085#define WMREG_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ 1088#define WMREG_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
1086 1089
1087#define WMREG_MSIXBM(x) (0x1600 + (x) * 4) /* MSI-X Allocation */ 1090#define WMREG_MSIXBM(x) (0x1600 + (x) * 4) /* MSI-X Allocation */
1088 1091
1089#define EITR_RX_QUEUE(x) __BIT(0+(x)) /* Rx Queue x Interrupt x=[0-3] */ 1092#define EITR_RX_QUEUE(x) __BIT(0+(x)) /* Rx Queue x Interrupt x=[0-3] */
1090#define EITR_TX_QUEUE(x) __BIT(8+(x)) /* Tx Queue x Interrupt x=[0-3] */ 1093#define EITR_TX_QUEUE(x) __BIT(8+(x)) /* Tx Queue x Interrupt x=[0-3] */
1091#define EITR_TCP_TIMER 0x40000000 /* TCP Timer */ 1094#define EITR_TCP_TIMER 0x40000000 /* TCP Timer */
1092#define EITR_OTHER 0x80000000 /* Interrupt Cause Active */ 1095#define EITR_OTHER 0x80000000 /* Interrupt Cause Active */
1093 1096
1094#define WMREG_EITR(x) (0x01680 + (0x4 * (x))) 1097#define WMREG_EITR(x) (0x01680 + (0x4 * (x)))
1095#define EITR_ITR_INT_MASK __BITS(14,2) 1098#define EITR_ITR_INT_MASK __BITS(14,2)
1096#define EITR_COUNTER_MASK_82575 __BITS(31,16) 1099#define EITR_COUNTER_MASK_82575 __BITS(31,16)
1097#define EITR_CNT_INGR __BIT(31) /* does not overwrite counter */ 1100#define EITR_CNT_INGR __BIT(31) /* does not overwrite counter */
1098 1101
1099#define WMREG_EITR_82574(x) (0x000e8 + (0x4 * (x))) 1102#define WMREG_EITR_82574(x) (0x000e8 + (0x4 * (x)))
1100#define EITR_ITR_INT_MASK_82574 __BITS(15, 0) 1103#define EITR_ITR_INT_MASK_82574 __BITS(15, 0)
1101 1104
1102#define WMREG_RXPBS 0x2404 /* Rx Packet Buffer Size */ 1105#define WMREG_RXPBS 0x2404 /* Rx Packet Buffer Size */
1103#define RXPBS_SIZE_MASK_82576 0x0000007f 1106#define RXPBS_SIZE_MASK_82576 0x0000007f
1104 1107
1105#define WMREG_RDFH 0x2410 /* Receive Data FIFO Head */ 1108#define WMREG_RDFH 0x2410 /* Receive Data FIFO Head */
1106#define WMREG_RDFT 0x2418 /* Receive Data FIFO Tail */ 1109#define WMREG_RDFT 0x2418 /* Receive Data FIFO Tail */
1107#define WMREG_RDFHS 0x2420 /* Receive Data FIFO Head Saved */ 1110#define WMREG_RDFHS 0x2420 /* Receive Data FIFO Head Saved */
1108#define WMREG_RDFTS 0x2428 /* Receive Data FIFO Tail Saved */ 1111#define WMREG_RDFTS 0x2428 /* Receive Data FIFO Tail Saved */
1109#define WMREG_RADV 0x282c /* Receive Interrupt Absolute Delay Timer */ 1112#define WMREG_RADV 0x282c /* Receive Interrupt Absolute Delay Timer */
1110 1113
1111#define WMREG_TXDMAC 0x3000 /* Transfer DMA Control */ 1114#define WMREG_TXDMAC 0x3000 /* Transfer DMA Control */
1112#define TXDMAC_DPP __BIT(0) /* disable packet prefetch */ 1115#define TXDMAC_DPP __BIT(0) /* disable packet prefetch */
1113 1116
1114#define WMREG_KABGTXD 0x3004 /* AFE and Gap Transmit Ref Data */ 1117#define WMREG_KABGTXD 0x3004 /* AFE and Gap Transmit Ref Data */
1115#define KABGTXD_BGSQLBIAS 0x00050000 1118#define KABGTXD_BGSQLBIAS 0x00050000
1116 1119
1117#define WMREG_TDFH 0x3410 /* Transmit Data FIFO Head */ 1120#define WMREG_TDFH 0x3410 /* Transmit Data FIFO Head */
1118#define WMREG_TDFT 0x3418 /* Transmit Data FIFO Tail */ 1121#define WMREG_TDFT 0x3418 /* Transmit Data FIFO Tail */
1119#define WMREG_TDFHS 0x3420 /* Transmit Data FIFO Head Saved */ 1122#define WMREG_TDFHS 0x3420 /* Transmit Data FIFO Head Saved */
1120#define WMREG_TDFTS 0x3428 /* Transmit Data FIFO Tail Saved */ 1123#define WMREG_TDFTS 0x3428 /* Transmit Data FIFO Tail Saved */
1121#define WMREG_TDFPC 0x3430 /* Transmit Data FIFO Packet Count */ 1124#define WMREG_TDFPC 0x3430 /* Transmit Data FIFO Packet Count */
1122 1125
1123#define WMREG_TXDCTL(n) /* Trandmit Descriptor Control */ \ 1126#define WMREG_TXDCTL(n) /* Trandmit Descriptor Control */ \
1124 (((n) < 4) ? (0x3828 + ((n) * 0x100)) : (0xe028 + ((n) * 0x40))) 1127 (((n) < 4) ? (0x3828 + ((n) * 0x100)) : (0xe028 + ((n) * 0x40)))
1125#define TXDCTL_PTHRESH(x) ((x) << 0) /* prefetch threshold */ 1128#define TXDCTL_PTHRESH(x) ((x) << 0) /* prefetch threshold */
1126#define TXDCTL_HTHRESH(x) ((x) << 8) /* host threshold */ 1129#define TXDCTL_HTHRESH(x) ((x) << 8) /* host threshold */
1127#define TXDCTL_WTHRESH(x) ((x) << 16) /* write back threshold */ 1130#define TXDCTL_WTHRESH(x) ((x) << 16) /* write back threshold */
1128/* flags used starting with 82575 ... */ 1131/* flags used starting with 82575 ... */
1129#define TXDCTL_COUNT_DESC __BIT(22) /* Enable the counting of desc. 1132#define TXDCTL_COUNT_DESC __BIT(22) /* Enable the counting of desc.
1130 still to be processed. */ 1133 still to be processed. */
1131#define TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ 1134#define TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
1132#define TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ 1135#define TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
1133#define TXDCTL_PRIORITY 0x08000000 1136#define TXDCTL_PRIORITY 0x08000000
1134 1137
1135#define WMREG_TADV 0x382c /* Transmit Absolute Interrupt Delay Timer */ 1138#define WMREG_TADV 0x382c /* Transmit Absolute Interrupt Delay Timer */
1136#define WMREG_TSPMT 0x3830 /* TCP Segmentation Pad and Minimum 1139#define WMREG_TSPMT 0x3830 /* TCP Segmentation Pad and Minimum
1137 Threshold (Cordova) */ 1140 Threshold (Cordova) */
1138#define TSPMT_TSMT(x) (x) /* TCP seg min transfer */ 1141#define TSPMT_TSMT(x) (x) /* TCP seg min transfer */
1139#define TSPMT_TSPBP(x) ((x) << 16) /* TCP seg pkt buf padding */ 1142#define TSPMT_TSPBP(x) ((x) << 16) /* TCP seg pkt buf padding */
1140 1143
1141#define WMREG_TARC0 0x3840 /* Tx arbitration count (0) */ 1144#define WMREG_TARC0 0x3840 /* Tx arbitration count (0) */
1142#define WMREG_TARC1 0x3940 /* Tx arbitration count (1) */ 1145#define WMREG_TARC1 0x3940 /* Tx arbitration count (1) */
1143 1146
1144#define WMREG_CRCERRS 0x4000 /* CRC Error Count */ 1147#define WMREG_CRCERRS 0x4000 /* CRC Error Count */
1145#define WMREG_ALGNERRC 0x4004 /* Alignment Error Count */ 1148#define WMREG_ALGNERRC 0x4004 /* Alignment Error Count */
1146#define WMREG_SYMERRC 0x4008 /* Symbol Error Count */ 1149#define WMREG_SYMERRC 0x4008 /* Symbol Error Count */
1147#define WMREG_RXERRC 0x400c /* Receive error Count - R/clr */ 1150#define WMREG_RXERRC 0x400c /* Receive error Count - R/clr */
1148#define WMREG_MPC 0x4010 /* Missed Packets Count - R/clr */ 1151#define WMREG_MPC 0x4010 /* Missed Packets Count - R/clr */
1149#define WMREG_SCC 0x4014 /* Single Collision Count - R/clr */ 1152#define WMREG_SCC 0x4014 /* Single Collision Count - R/clr */
1150#define WMREG_ECOL 0x4018 /* Excessive Collisions Count - R/clr */ 1153#define WMREG_ECOL 0x4018 /* Excessive Collisions Count - R/clr */
1151#define WMREG_MCC 0x401c /* Multiple Collision Count - R/clr */ 1154#define WMREG_MCC 0x401c /* Multiple Collision Count - R/clr */
1152#define WMREG_LATECOL 0x4020 /* Late Collisions Count - R/clr */ 1155#define WMREG_LATECOL 0x4020 /* Late Collisions Count - R/clr */
1153#define WMREG_COLC 0x4028 /* Collision Count - R/clr */ 1156#define WMREG_COLC 0x4028 /* Collision Count - R/clr */
1154#define WMREG_CBTMPC 0x402c /* Circuit Breaker Tx Manageability Packet */ 1157#define WMREG_CBTMPC 0x402c /* Circuit Breaker Tx Manageability Packet */
1155#define WMREG_DC 0x4030 /* Defer Count - R/clr */ 1158#define WMREG_DC 0x4030 /* Defer Count - R/clr */
1156#define WMREG_TNCRS 0x4034 /* Tx with No CRS - R/clr */ 1159#define WMREG_TNCRS 0x4034 /* Tx with No CRS - R/clr */
1157#define WMREG_SEC 0x4038 /* Sequence Error Count */ 1160#define WMREG_SEC 0x4038 /* Sequence Error Count */
1158 1161
1159/* Old */ 1162/* Old */
1160#define WMREG_CEXTERR 0x403c /* Carrier Extension Error Count */ 1163#define WMREG_CEXTERR 0x403c /* Carrier Extension Error Count */
1161/* New */ 1164/* New */
1162#define WMREG_HTDPMC 0x403c /* Host Tx Discarded Packets by MAC Count */ 1165#define WMREG_HTDPMC 0x403c /* Host Tx Discarded Packets by MAC Count */
1163 1166
1164#define WMREG_RLEC 0x4040 /* Receive Length Error Count */ 1167#define WMREG_RLEC 0x4040 /* Receive Length Error Count */
1165#define WMREG_CBRDPC 0x4044 /* Circuit Breaker Rx Dropped Packet Count */ 1168#define WMREG_CBRDPC 0x4044 /* Circuit Breaker Rx Dropped Packet Count */
1166#define WMREG_XONRXC 0x4048 /* XON Rx Count - R/clr */ 1169#define WMREG_XONRXC 0x4048 /* XON Rx Count - R/clr */
1167#define WMREG_XONTXC 0x404c /* XON Tx Count - R/clr */ 1170#define WMREG_XONTXC 0x404c /* XON Tx Count - R/clr */
1168#define WMREG_XOFFRXC 0x4050 /* XOFF Rx Count - R/clr */ 1171#define WMREG_XOFFRXC 0x4050 /* XOFF Rx Count - R/clr */
1169#define WMREG_XOFFTXC 0x4054 /* XOFF Tx Count - R/clr */ 1172#define WMREG_XOFFTXC 0x4054 /* XOFF Tx Count - R/clr */
1170#define WMREG_FCRUC 0x4058 /* Flow Control Rx Unsupported Count - R/clr */ 1173#define WMREG_FCRUC 0x4058 /* Flow Control Rx Unsupported Count - R/clr */
1171#define WMREG_PRC64 0x405c /* Packets Rx (64 bytes) - R/clr */ 1174#define WMREG_PRC64 0x405c /* Packets Rx (64 bytes) - R/clr */
1172#define WMREG_PRC127 0x4060 /* Packets Rx (65-127 bytes) - R/clr */ 1175#define WMREG_PRC127 0x4060 /* Packets Rx (65-127 bytes) - R/clr */
1173#define WMREG_PRC255 0x4064 /* Packets Rx (128-255 bytes) - R/clr */ 1176#define WMREG_PRC255 0x4064 /* Packets Rx (128-255 bytes) - R/clr */
1174#define WMREG_PRC511 0x4068 /* Packets Rx (255-511 bytes) - R/clr */ 1177#define WMREG_PRC511 0x4068 /* Packets Rx (255-511 bytes) - R/clr */
1175#define WMREG_PRC1023 0x406c /* Packets Rx (512-1023 bytes) - R/clr */ 1178#define WMREG_PRC1023 0x406c /* Packets Rx (512-1023 bytes) - R/clr */
1176#define WMREG_PRC1522 0x4070 /* Packets Rx (1024-1522 bytes) - R/clr */ 1179#define WMREG_PRC1522 0x4070 /* Packets Rx (1024-1522 bytes) - R/clr */
1177#define WMREG_GPRC 0x4074 /* Good Packets Rx Count - R/clr */ 1180#define WMREG_GPRC 0x4074 /* Good Packets Rx Count - R/clr */
1178#define WMREG_BPRC 0x4078 /* Broadcast Packets Rx Count - R/clr */ 1181#define WMREG_BPRC 0x4078 /* Broadcast Packets Rx Count - R/clr */
1179#define WMREG_MPRC 0x407c /* Multicast Packets Rx Count - R/clr */ 1182#define WMREG_MPRC 0x407c /* Multicast Packets Rx Count - R/clr */
1180#define WMREG_GPTC 0x4080 /* Good Packets Tx Count - R/clr */ 1183#define WMREG_GPTC 0x4080 /* Good Packets Tx Count - R/clr */
1181#define WMREG_GORCL 0x4088 /* Good Octets Rx Count Low - R/clr */ 1184#define WMREG_GORCL 0x4088 /* Good Octets Rx Count Low - R/clr */
1182#define WMREG_GORCH 0x408c /* Good Octets Rx Count High - R/clr */ 1185#define WMREG_GORCH 0x408c /* Good Octets Rx Count High - R/clr */
1183#define WMREG_GOTCL 0x4090 /* Good Octets Tx Count Low - R/clr */ 1186#define WMREG_GOTCL 0x4090 /* Good Octets Tx Count Low - R/clr */
1184#define WMREG_GOTCH 0x4094 /* Good Octets Tx Count High - R/clr */ 1187#define WMREG_GOTCH 0x4094 /* Good Octets Tx Count High - R/clr */
1185#define WMREG_RNBC 0x40a0 /* Receive No Buffers Count */ 1188#define WMREG_RNBC 0x40a0 /* Receive No Buffers Count */
1186#define WMREG_RUC 0x40a4 /* Rx Undersize Count - R/clr */ 1189#define WMREG_RUC 0x40a4 /* Rx Undersize Count - R/clr */
1187#define WMREG_RFC 0x40a8 /* Rx Fragment Count - R/clr */ 1190#define WMREG_RFC 0x40a8 /* Rx Fragment Count - R/clr */
1188#define WMREG_ROC 0x40ac /* Rx Oversize Count - R/clr */ 1191#define WMREG_ROC 0x40ac /* Rx Oversize Count - R/clr */
1189#define WMREG_RJC 0x40b0 /* Rx Jabber Count - R/clr */ 1192#define WMREG_RJC 0x40b0 /* Rx Jabber Count - R/clr */
1190#define WMREG_MGTPRC 0x40b4 /* Management Packets RX Count - R/clr */ 1193#define WMREG_MGTPRC 0x40b4 /* Management Packets RX Count - R/clr */
1191#define WMREG_MGTPDC 0x40b8 /* Management Packets Dropped Count - R/clr */ 1194#define WMREG_MGTPDC 0x40b8 /* Management Packets Dropped Count - R/clr */
1192#define WMREG_MGTPTC 0x40bc /* Management Packets TX Count - R/clr */ 1195#define WMREG_MGTPTC 0x40bc /* Management Packets TX Count - R/clr */
1193#define WMREG_TORL 0x40c0 /* Total Octets Rx Low - R/clr */ 1196#define WMREG_TORL 0x40c0 /* Total Octets Rx Low - R/clr */
1194#define WMREG_TORH 0x40c4 /* Total Octets Rx High - R/clr */ 1197#define WMREG_TORH 0x40c4 /* Total Octets Rx High - R/clr */
1195#define WMREG_TOTL 0x40c8 /* Total Octets Tx Low - R/clr */ 1198#define WMREG_TOTL 0x40c8 /* Total Octets Tx Low - R/clr */
1196#define WMREG_TOTH 0x40cc /* Total Octets Tx High - R/clr */ 1199#define WMREG_TOTH 0x40cc /* Total Octets Tx High - R/clr */
1197#define WMREG_TPR 0x40d0 /* Total Packets Rx - R/clr */ 1200#define WMREG_TPR 0x40d0 /* Total Packets Rx - R/clr */
1198#define WMREG_TPT 0x40d4 /* Total Packets Tx - R/clr */ 1201#define WMREG_TPT 0x40d4 /* Total Packets Tx - R/clr */
1199#define WMREG_PTC64 0x40d8 /* Packets Tx (64 bytes) - R/clr */ 1202#define WMREG_PTC64 0x40d8 /* Packets Tx (64 bytes) - R/clr */
1200#define WMREG_PTC127 0x40dc /* Packets Tx (65-127 bytes) - R/clr */ 1203#define WMREG_PTC127 0x40dc /* Packets Tx (65-127 bytes) - R/clr */
1201#define WMREG_PTC255 0x40e0 /* Packets Tx (128-255 bytes) - R/clr */ 1204#define WMREG_PTC255 0x40e0 /* Packets Tx (128-255 bytes) - R/clr */
1202#define WMREG_PTC511 0x40e4 /* Packets Tx (256-511 bytes) - R/clr */ 1205#define WMREG_PTC511 0x40e4 /* Packets Tx (256-511 bytes) - R/clr */
1203#define WMREG_PTC1023 0x40e8 /* Packets Tx (512-1023 bytes) - R/clr */ 1206#define WMREG_PTC1023 0x40e8 /* Packets Tx (512-1023 bytes) - R/clr */
1204#define WMREG_PTC1522 0x40ec /* Packets Tx (1024-1522 Bytes) - R/clr */ 1207#define WMREG_PTC1522 0x40ec /* Packets Tx (1024-1522 Bytes) - R/clr */
1205#define WMREG_MPTC 0x40f0 /* Multicast Packets Tx Count - R/clr */ 1208#define WMREG_MPTC 0x40f0 /* Multicast Packets Tx Count - R/clr */
1206#define WMREG_BPTC 0x40f4 /* Broadcast Packets Tx Count */ 1209#define WMREG_BPTC 0x40f4 /* Broadcast Packets Tx Count */
1207#define WMREG_TSCTC 0x40f8 /* TCP Segmentation Context Tx */ 1210#define WMREG_TSCTC 0x40f8 /* TCP Segmentation Context Tx */
1208 1211
1209/* Old */ 1212/* Old */
1210#define WMREG_TSCTFC 0x40fc /* TCP Segmentation Context Tx Fail */ 1213#define WMREG_TSCTFC 0x40fc /* TCP Segmentation Context Tx Fail */
1211/* New */ 1214/* New */
1212#define WMREG_CBRMPC 0x40fc /* Circuit Breaker Rx Manageability Packet */ 1215#define WMREG_CBRMPC 0x40fc /* Circuit Breaker Rx Manageability Packet */
1213 1216
1214#define WMREG_IAC 0x4100 /* Interrupt Assertion Count */ 1217#define WMREG_IAC 0x4100 /* Interrupt Assertion Count */
1215 1218
1216/* Old */ 1219/* Old */
1217#define WMREG_ICRXPTC 0x4104 /* Interrupt Cause Rx Pkt Timer Expire Count */ 1220#define WMREG_ICRXPTC 0x4104 /* Interrupt Cause Rx Pkt Timer Expire Count */
1218#define WMREG_ICRXATC 0x4108 /* Interrupt Cause Rx Abs Timer Expire Count */ 1221#define WMREG_ICRXATC 0x4108 /* Interrupt Cause Rx Abs Timer Expire Count */
1219#define WMREG_ICTXPTC 0x410c /* Interrupt Cause Tx Pkt Timer Expire Count */ 1222#define WMREG_ICTXPTC 0x410c /* Interrupt Cause Tx Pkt Timer Expire Count */
1220#define WMREG_ICTXATC 0x4110 /* Interrupt Cause Tx Abs Timer Expire Count */ 1223#define WMREG_ICTXATC 0x4110 /* Interrupt Cause Tx Abs Timer Expire Count */
1221#define WMREG_ICTXQEC 0x4118 /* Interrupt Cause Tx Queue Empty Count */ 1224#define WMREG_ICTXQEC 0x4118 /* Interrupt Cause Tx Queue Empty Count */
1222#define WMREG_ICTXQMTC 0x411c /* Interrupt Cause Tx Queue Min Thresh Count */ 1225#define WMREG_ICTXQMTC 0x411c /* Interrupt Cause Tx Queue Min Thresh Count */
1223#define WMREG_ICRXDMTC 0x4120 /* Interrupt Cause Rx Desc Min Thresh Count */ 1226#define WMREG_ICRXDMTC 0x4120 /* Interrupt Cause Rx Desc Min Thresh Count */
1224#define WMREG_ICRXOC 0x4124 /* Interrupt Cause Receiver Overrun Count */ 1227#define WMREG_ICRXOC 0x4124 /* Interrupt Cause Receiver Overrun Count */
1225/* New */ 1228/* New */
1226#define WMREG_RPTHC 0x4104 /* Rx Pkt To Host Count */ 1229#define WMREG_RPTHC 0x4104 /* Rx Pkt To Host Count */
1227#define WMREG_DEBUG1 0x4108 /* Debug Counter 1 */ 1230#define WMREG_DEBUG1 0x4108 /* Debug Counter 1 */
1228#define WMREG_DEBUG2 0x410c /* Debug Counter 2 */ 1231#define WMREG_DEBUG2 0x410c /* Debug Counter 2 */
1229#define WMREG_DEBUG3 0x4110 /* Debug Counter 3 */ 1232#define WMREG_DEBUG3 0x4110 /* Debug Counter 3 */
1230#define WMREG_HGPTC 0x4118 /* Host Good Packets Tx Count (>=82576?) */ 1233#define WMREG_HGPTC 0x4118 /* Host Good Packets Tx Count (>=82576?) */
1231#define WMREG_DEBUG4 0x411c /* Debug Counter 4 */ 1234#define WMREG_DEBUG4 0x411c /* Debug Counter 4 */
1232#define WMREG_RXDMTC 0x4120 /* Rx Desc Min Thresh Count */ 1235#define WMREG_RXDMTC 0x4120 /* Rx Desc Min Thresh Count */
1233#define WMREG_HTCBDPC 0x4124 /* Host Tx Circuit Breaker Dropped Pkt. Cnt. */ 1236#define WMREG_HTCBDPC 0x4124 /* Host Tx Circuit Breaker Dropped Pkt. Cnt. */
1234#define WMREG_HGORCL 0x4128 /* Host Good Octets Rx Count Low (>=82576?) */ 1237#define WMREG_HGORCL 0x4128 /* Host Good Octets Rx Count Low (>=82576?) */
1235#define WMREG_HGORCH 0x412c /* Host Good Octets Rx Count High (>=82576?) */ 1238#define WMREG_HGORCH 0x412c /* Host Good Octets Rx Count High (>=82576?) */
1236#define WMREG_HGOTCL 0x4130 /* Host Good Octets Tx Count Low (>=82576?) */ 1239#define WMREG_HGOTCL 0x4130 /* Host Good Octets Tx Count Low (>=82576?) */
1237#define WMREG_HGOTCH 0x4134 /* Host Good Octets Tx Count High (>=82576?) */ 1240#define WMREG_HGOTCH 0x4134 /* Host Good Octets Tx Count High (>=82576?) */
1238#define WMREG_LENERRS 0x4138 /* Length Errors Count (>=82576?) */ 1241#define WMREG_LENERRS 0x4138 /* Length Errors Count (>=82576?) */
1239 1242
1240#define WMREG_TLPIC 0x4148 /* EEE Tx LPI Count */ 1243#define WMREG_TLPIC 0x4148 /* EEE Tx LPI Count */
1241#define WMREG_RLPIC 0x414c /* EEE Rx LPI Count */ 1244#define WMREG_RLPIC 0x414c /* EEE Rx LPI Count */
1242#define WMREG_B2OGPRC 0x4158 /* BMC2OS packets received by host */ 1245#define WMREG_B2OGPRC 0x4158 /* BMC2OS packets received by host */
1243#define WMREG_O2BSPC 0x415c /* OS2BMC packets transmitted by host */ 1246#define WMREG_O2BSPC 0x415c /* OS2BMC packets transmitted by host */
1244 1247
1245#define WMREG_PCS_CFG 0x4200 /* PCS Configuration */ 1248#define WMREG_PCS_CFG 0x4200 /* PCS Configuration */
1246#define PCS_CFG_PCS_EN __BIT(3) 1249#define PCS_CFG_PCS_EN __BIT(3)
1247 1250
1248#define WMREG_PCS_LCTL 0x4208 /* PCS Link Control */ 1251#define WMREG_PCS_LCTL 0x4208 /* PCS Link Control */
1249#define PCS_LCTL_FLV_LINK_UP __BIT(0) /* Forced Link Value */ 1252#define PCS_LCTL_FLV_LINK_UP __BIT(0) /* Forced Link Value */
1250#define PCS_LCTL_FSV_MASK __BITS(2, 1) /* Forced Speed Value */ 1253#define PCS_LCTL_FSV_MASK __BITS(2, 1) /* Forced Speed Value */
1251#define PCS_LCTL_FSV_10 0 /* 10Mbps */ 1254#define PCS_LCTL_FSV_10 0 /* 10Mbps */
1252#define PCS_LCTL_FSV_100 __BIT(1) /* 100Mbps */ 1255#define PCS_LCTL_FSV_100 __BIT(1) /* 100Mbps */
1253#define PCS_LCTL_FSV_1000 __BIT(2) /* 1Gpbs */ 1256#define PCS_LCTL_FSV_1000 __BIT(2) /* 1Gpbs */
1254#define PCS_LCTL_FDV_FULL __BIT(3) /* Force Duplex Value */ 1257#define PCS_LCTL_FDV_FULL __BIT(3) /* Force Duplex Value */
1255#define PCS_LCTL_FSD __BIT(4) /* Force Speed and Duplex */ 1258#define PCS_LCTL_FSD __BIT(4) /* Force Speed and Duplex */
1256#define PCS_LCTL_FORCE_LINK __BIT(5) /* Force Link */ 1259#define PCS_LCTL_FORCE_LINK __BIT(5) /* Force Link */
1257#define PCS_LCTL_LINK_LATCH_LOW __BIT(6) /* Link Latch Low */ 1260#define PCS_LCTL_LINK_LATCH_LOW __BIT(6) /* Link Latch Low */
1258#define PCS_LCTL_FORCE_FC __BIT(7) /* Force Flow Control */ 1261#define PCS_LCTL_FORCE_FC __BIT(7) /* Force Flow Control */
1259#define PCS_LCTL_AN_ENABLE __BIT(16) /* AN enable */ 1262#define PCS_LCTL_AN_ENABLE __BIT(16) /* AN enable */
1260#define PCS_LCTL_AN_RESTART __BIT(17) /* AN restart */ 1263#define PCS_LCTL_AN_RESTART __BIT(17) /* AN restart */
1261#define PCS_LCTL_AN_TIMEOUT __BIT(18) /* AN Timeout Enable */ 1264#define PCS_LCTL_AN_TIMEOUT __BIT(18) /* AN Timeout Enable */
1262#define PCS_LCTL_AN_SGMII_BYP __BIT(19) /* AN SGMII Bypass */ 1265#define PCS_LCTL_AN_SGMII_BYP __BIT(19) /* AN SGMII Bypass */
1263#define PCS_LCTL_AN_SGMII_TRIG __BIT(20) /* AN SGMII Trigger */ 1266#define PCS_LCTL_AN_SGMII_TRIG __BIT(20) /* AN SGMII Trigger */
1264#define PCS_LCTL_FAST_LINKTIMER __BIT(24) /* Fast Link Timer */ 1267#define PCS_LCTL_FAST_LINKTIMER __BIT(24) /* Fast Link Timer */
1265#define PCS_LCTL_LINK_OK_FIX_EN __BIT(25) /* Link OK Fix Enable */ 1268#define PCS_LCTL_LINK_OK_FIX_EN __BIT(25) /* Link OK Fix Enable */
1266 1269
1267#define WMREG_PCS_LSTS 0x420c /* PCS Link Status */ 1270#define WMREG_PCS_LSTS 0x420c /* PCS Link Status */
1268#define PCS_LSTS_LINKOK __BIT(0) 1271#define PCS_LSTS_LINKOK __BIT(0)
1269#define PCS_LSTS_SPEED __BITS(2, 1) 1272#define PCS_LSTS_SPEED __BITS(2, 1)
1270#define PCS_LSTS_SPEED_10 0 1273#define PCS_LSTS_SPEED_10 0
1271#define PCS_LSTS_SPEED_100 1 1274#define PCS_LSTS_SPEED_100 1
1272#define PCS_LSTS_SPEED_1000 2 1275#define PCS_LSTS_SPEED_1000 2
1273#define PCS_LSTS_FDX __BIT(3) 1276#define PCS_LSTS_FDX __BIT(3)
1274#define PCS_LSTS_AN_COMP __BIT(16) 1277#define PCS_LSTS_AN_COMP __BIT(16)
1275 1278
1276#define WMREG_PCS_ANADV 0x4218 /* AN Advertsement */ 1279#define WMREG_PCS_ANADV 0x4218 /* AN Advertsement */
1277#define WMREG_PCS_LPAB 0x421c /* Link Partnet Ability */ 1280#define WMREG_PCS_LPAB 0x421c /* Link Partnet Ability */
1278#define WMREG_PCS_NPTX 0x4220 /* Next Page Transmit */ 1281#define WMREG_PCS_NPTX 0x4220 /* Next Page Transmit */
1279#define WMREG_SCVPC 0x4228 /* SerDes/SGMII Code Violation Packet Count */ 1282#define WMREG_SCVPC 0x4228 /* SerDes/SGMII Code Violation Packet Count */
1280 1283
1281#define WMREG_RXCSUM 0x5000 /* Receive Checksum register */ 1284#define WMREG_RXCSUM 0x5000 /* Receive Checksum register */
1282#define RXCSUM_PCSS 0x000000ff /* Packet Checksum Start */ 1285#define RXCSUM_PCSS 0x000000ff /* Packet Checksum Start */
1283#define RXCSUM_IPOFL __BIT(8) /* IP checksum offload */ 1286#define RXCSUM_IPOFL __BIT(8) /* IP checksum offload */
1284#define RXCSUM_TUOFL __BIT(9) /* TCP/UDP checksum offload */ 1287#define RXCSUM_TUOFL __BIT(9) /* TCP/UDP checksum offload */
1285#define RXCSUM_IPV6OFL __BIT(10) /* IPv6 checksum offload */ 1288#define RXCSUM_IPV6OFL __BIT(10) /* IPv6 checksum offload */
1286#define RXCSUM_CRCOFL __BIT(11) /* SCTP CRC32 checksum offload */ 1289#define RXCSUM_CRCOFL __BIT(11) /* SCTP CRC32 checksum offload */
1287#define RXCSUM_IPPCSE __BIT(12) /* IP payload checksum enable */ 1290#define RXCSUM_IPPCSE __BIT(12) /* IP payload checksum enable */
1288#define RXCSUM_PCSD __BIT(13) /* packet checksum disabled */ 1291#define RXCSUM_PCSD __BIT(13) /* packet checksum disabled */
1289 1292
1290#define WMREG_RLPML 0x5004 /* Rx Long Packet Max Length */ 1293#define WMREG_RLPML 0x5004 /* Rx Long Packet Max Length */
1291 1294
1292#define WMREG_RFCTL 0x5008 /* Receive Filter Control */ 1295#define WMREG_RFCTL 0x5008 /* Receive Filter Control */
1293#define WMREG_RFCTL_NFSWDIS __BIT(6) /* NFS Write Disable */ 1296#define WMREG_RFCTL_NFSWDIS __BIT(6) /* NFS Write Disable */
1294#define WMREG_RFCTL_NFSRDIS __BIT(7) /* NFS Read Disable */ 1297#define WMREG_RFCTL_NFSRDIS __BIT(7) /* NFS Read Disable */
1295#define WMREG_RFCTL_ACKDIS __BIT(12) /* ACK Accelerate Disable */ 1298#define WMREG_RFCTL_ACKDIS __BIT(12) /* ACK Accelerate Disable */
1296#define WMREG_RFCTL_ACKD_DIS __BIT(13) /* ACK data Disable */ 1299#define WMREG_RFCTL_ACKD_DIS __BIT(13) /* ACK data Disable */
1297#define WMREG_RFCTL_EXSTEN __BIT(15) /* Extended status Enable. 82574 only. */ 1300#define WMREG_RFCTL_EXSTEN __BIT(15) /* Extended status Enable. 82574 only. */
1298#define WMREG_RFCTL_IPV6EXDIS __BIT(16) /* IPv6 Extension Header Disable */ 1301#define WMREG_RFCTL_IPV6EXDIS __BIT(16) /* IPv6 Extension Header Disable */
1299#define WMREG_RFCTL_NEWIPV6EXDIS __BIT(17) /* New IPv6 Extension Header */ 1302#define WMREG_RFCTL_NEWIPV6EXDIS __BIT(17) /* New IPv6 Extension Header */
1300 1303
1301#define WMREG_WUC 0x5800 /* Wakeup Control */ 1304#define WMREG_WUC 0x5800 /* Wakeup Control */
1302#define WUC_APME 0x00000001 /* APM Enable */ 1305#define WUC_APME 0x00000001 /* APM Enable */
1303#define WUC_PME_EN 0x00000002 /* PME Enable */ 1306#define WUC_PME_EN 0x00000002 /* PME Enable */
1304#define WUC_PME_STATUS 0x00000004 /* PME Status */ 1307#define WUC_PME_STATUS 0x00000004 /* PME Status */
1305#define WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ 1308#define WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
1306#define WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ 1309#define WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
1307 1310
1308#define WMREG_WUFC 0x5808 /* Wakeup Filter Control */ 1311#define WMREG_WUFC 0x5808 /* Wakeup Filter Control */
1309#define WUFC_LNKC __BIT(0) /* Link Status Change Wakeup Enable */ 1312#define WUFC_LNKC __BIT(0) /* Link Status Change Wakeup Enable */
1310#define WUFC_MAG __BIT(1) /* Magic Packet Wakeup Enable */ 1313#define WUFC_MAG __BIT(1) /* Magic Packet Wakeup Enable */
1311#define WUFC_EX __BIT(2) /* Directed Exact Wakeup Enable */ 1314#define WUFC_EX __BIT(2) /* Directed Exact Wakeup Enable */
1312#define WUFC_MC __BIT(3) /* Directed Multicast Wakeup En */ 1315#define WUFC_MC __BIT(3) /* Directed Multicast Wakeup En */
1313#define WUFC_BC __BIT(4) /* Broadcast Wakeup Enable */ 1316#define WUFC_BC __BIT(4) /* Broadcast Wakeup Enable */
1314#define WUFC_ARPDIR __BIT(5) /* ARP Request Packet Wakeup En */ 1317#define WUFC_ARPDIR __BIT(5) /* ARP Request Packet Wakeup En */
1315#define WUFC_IPV4 __BIT(6) /* Directed IPv4 Packet Wakeup En */ 1318#define WUFC_IPV4 __BIT(6) /* Directed IPv4 Packet Wakeup En */
1316#define WUFC_IPV6 __BIT(7) /* Directed IPv6 Packet Wakeup En */ 1319#define WUFC_IPV6 __BIT(7) /* Directed IPv6 Packet Wakeup En */
1317#define WUFC_NS __BIT(9) /* NS Wakeup En */ 1320#define WUFC_NS __BIT(9) /* NS Wakeup En */
1318#define WUFC_NSDIR __BIT(10) /* NS Directed En */ 1321#define WUFC_NSDIR __BIT(10) /* NS Directed En */
1319#define WUFC_ARP __BIT(11) /* ARP request En */ 1322#define WUFC_ARP __BIT(11) /* ARP request En */
1320#define WUFC_FLEX_HQ __BIT(14) /* Flex Filters Host Queueing En */ 1323#define WUFC_FLEX_HQ __BIT(14) /* Flex Filters Host Queueing En */
1321#define WUFC_NOTCO __BIT(15) /* ? */ 1324#define WUFC_NOTCO __BIT(15) /* ? */
1322#define WUFC_FLX __BITS(23, 16) /* Flexible Filter [0-7] En */ 1325#define WUFC_FLX __BITS(23, 16) /* Flexible Filter [0-7] En */
1323#define WUFC_FLXACT __BITS(27, 24) /* Flexible Filter [0-3] Action */ 1326#define WUFC_FLXACT __BITS(27, 24) /* Flexible Filter [0-3] Action */
1324#define WUFC_FW_RST_WK __BIT(31) /* Wake on Firmware Reset Assert En */ 1327#define WUFC_FW_RST_WK __BIT(31) /* Wake on Firmware Reset Assert En */
1325 1328
1326#define WMREG_WUS 0x5810 /* Wakeup Status (R/W1C) */ 1329#define WMREG_WUS 0x5810 /* Wakeup Status (R/W1C) */
1327 /* Bit 30-24 and 15-12 are reserved */ 1330 /* Bit 30-24 and 15-12 are reserved */
1328#define WUS_MNG __BIT(8) /* Manageability event */ 1331#define WUS_MNG __BIT(8) /* Manageability event */
1329#define WUS_FLAGS "\20" \ 1332#define WUS_FLAGS "\20" \
1330 "\1LINKC" "\2MAG" "\3EX" "\4MC" \ 1333 "\1LINKC" "\2MAG" "\3EX" "\4MC" \
1331 "\5BC" "\6ARPDIR" "\7IPV4" "\10IPV6" \ 1334 "\5BC" "\6ARPDIR" "\7IPV4" "\10IPV6" \
1332 "\11MNG" "\12NS" "\13NSDIR" "\14ARP" \ 1335 "\11MNG" "\12NS" "\13NSDIR" "\14ARP" \
1333 "\21FLX0" "\22FLX1" "\23FLX2" "\24FLX3" \ 1336 "\21FLX0" "\22FLX1" "\23FLX2" "\24FLX3" \
1334 "\25FLX4" "\26FLX5" "\27FLX6" "\30FLX7" \ 1337 "\25FLX4" "\26FLX5" "\27FLX6" "\30FLX7" \
1335 "\40FW_RST_WK" 1338 "\40FW_RST_WK"
1336 1339
1337#define WMREG_MRQC 0x5818 /* Multiple Receive Queues Command */ 1340#define WMREG_MRQC 0x5818 /* Multiple Receive Queues Command */
1338#define MRQC_DISABLE_RSS 0x00000000 1341#define MRQC_DISABLE_RSS 0x00000000
1339#define MRQC_ENABLE_RSS_MQ_82574 __BIT(0) /* enable RSS for 82574 */ 1342#define MRQC_ENABLE_RSS_MQ_82574 __BIT(0) /* enable RSS for 82574 */
1340#define MRQC_ENABLE_RSS_MQ __BIT(1) /* enable hardware max RSS without VMDq */ 1343#define MRQC_ENABLE_RSS_MQ __BIT(1) /* enable hardware max RSS without VMDq */
1341#define MRQC_ENABLE_RSS_VMDQ __BITS(1, 0) /* enable RSS with VMDq */ 1344#define MRQC_ENABLE_RSS_VMDQ __BITS(1, 0) /* enable RSS with VMDq */
1342#define MRQC_DEFQ_MASK __BITS(5, 3) 1345#define MRQC_DEFQ_MASK __BITS(5, 3)
1343 /* 1346 /*
1344 * Defines the default queue in non VMDq 1347 * Defines the default queue in non VMDq
1345 * mode according to value of the Multiple Receive 1348 * mode according to value of the Multiple Receive
1346 * Queues Enable field. 1349 * Queues Enable field.
1347 */ 1350 */
1348#define MRQC_DEFQ_NOT_RSS_FLT __SHFTIN(__BIT(1), MRQC_DEFQ_MASK) 1351#define MRQC_DEFQ_NOT_RSS_FLT __SHFTIN(__BIT(1), MRQC_DEFQ_MASK)
1349 /* 1352 /*
1350 * the destination of all packets 1353 * the destination of all packets
1351 * not forwarded by RSS or filters 1354 * not forwarded by RSS or filters
1352 */ 1355 */
1353#define MRQC_DEFQ_NOT_MAC_ETH __SHFTIN(__BITS(1, 0), MRQC_DEFQ_MASK) 1356#define MRQC_DEFQ_NOT_MAC_ETH __SHFTIN(__BITS(1, 0), MRQC_DEFQ_MASK)
1354 /* 1357 /*
1355 * Def_Q field is ignored. Queueing 1358 * Def_Q field is ignored. Queueing
1356 * decision of all packets not forwarded 1359 * decision of all packets not forwarded
1357 * by MAC address and Ether-type filters 1360 * by MAC address and Ether-type filters
1358 * is according to VT_CTL.DEF_PL field. 1361 * is according to VT_CTL.DEF_PL field.
1359 */ 1362 */
1360#define MRQC_DEFQ_IGNORED1 __SHFTIN(__BIT(2), MRQC_DEFQ_MASK) 1363#define MRQC_DEFQ_IGNORED1 __SHFTIN(__BIT(2), MRQC_DEFQ_MASK)
1361 /* Def_Q field is ignored */ 1364 /* Def_Q field is ignored */
1362#define MRQC_DEFQ_IGNORED2 __SHFTIN(__BIT(2)|__BIT(0), MRQC_DEFQ_MASK) 1365#define MRQC_DEFQ_IGNORED2 __SHFTIN(__BIT(2)|__BIT(0), MRQC_DEFQ_MASK)
1363 /* Def_Q field is ignored */ 1366 /* Def_Q field is ignored */
1364#define MRQC_DEFQ_VMDQ __SHFTIN(__BITS(2, 1), MRQC_DEFQ_MASK) 1367#define MRQC_DEFQ_VMDQ __SHFTIN(__BITS(2, 1), MRQC_DEFQ_MASK)
1365 /* for VMDq mode */ 1368 /* for VMDq mode */
1366#define MRQC_RSS_FIELD_IPV4_TCP __BIT(16) 1369#define MRQC_RSS_FIELD_IPV4_TCP __BIT(16)
1367#define MRQC_RSS_FIELD_IPV4 __BIT(17) 1370#define MRQC_RSS_FIELD_IPV4 __BIT(17)
1368#define MRQC_RSS_FIELD_IPV6_TCP_EX __BIT(18) 1371#define MRQC_RSS_FIELD_IPV6_TCP_EX __BIT(18)
1369#define MRQC_RSS_FIELD_IPV6_EX __BIT(19) 1372#define MRQC_RSS_FIELD_IPV6_EX __BIT(19)
1370#define MRQC_RSS_FIELD_IPV6 __BIT(20) 1373#define MRQC_RSS_FIELD_IPV6 __BIT(20)
1371#define MRQC_RSS_FIELD_IPV6_TCP __BIT(21) 1374#define MRQC_RSS_FIELD_IPV6_TCP __BIT(21)
1372#define MRQC_RSS_FIELD_IPV4_UDP __BIT(22) 1375#define MRQC_RSS_FIELD_IPV4_UDP __BIT(22)
1373#define MRQC_RSS_FIELD_IPV6_UDP __BIT(23) 1376#define MRQC_RSS_FIELD_IPV6_UDP __BIT(23)
1374#define MRQC_RSS_FIELD_IPV6_UDP_EX __BIT(24) 1377#define MRQC_RSS_FIELD_IPV6_UDP_EX __BIT(24)
1375 1378
1376#define WMREG_RETA_Q(x) (0x5c00 + ((x) >> 2) * 4) /* Redirection Table */ 1379#define WMREG_RETA_Q(x) (0x5c00 + ((x) >> 2) * 4) /* Redirection Table */
1377#define RETA_NUM_ENTRIES 128 1380#define RETA_NUM_ENTRIES 128
1378#define RETA_ENTRY_MASK_Q(x) (0x000000ffUL << (((x) % 4) * 8)) /* Redirection Table */ 1381#define RETA_ENTRY_MASK_Q(x) (0x000000ffUL << (((x) % 4) * 8)) /* Redirection Table */
1379#define RETA_ENT_QINDEX_MASK __BITS(3,0) /*queue index for 82580 and newer */ 1382#define RETA_ENT_QINDEX_MASK __BITS(3,0) /*queue index for 82580 and newer */
1380#define RETA_ENT_QINDEX0_MASK_82575 __BITS(3,2) /*queue index for pool0 */ 1383#define RETA_ENT_QINDEX0_MASK_82575 __BITS(3,2) /*queue index for pool0 */
1381#define RETA_ENT_QINDEX1_MASK_82575 __BITS(7,6) /*queue index for pool1 and regular RSS */ 1384#define RETA_ENT_QINDEX1_MASK_82575 __BITS(7,6) /*queue index for pool1 and regular RSS */
1382#define RETA_ENT_QINDEX_MASK_82574 __BIT(7) /*queue index for 82574 */ 1385#define RETA_ENT_QINDEX_MASK_82574 __BIT(7) /*queue index for 82574 */
1383 1386
1384#define WMREG_RSSRK(x) (0x5c80 + (x) * 4) /* RSS Random Key Register */ 1387#define WMREG_RSSRK(x) (0x5c80 + (x) * 4) /* RSS Random Key Register */
1385#define RSSRK_NUM_REGS 10 1388#define RSSRK_NUM_REGS 10
1386 1389
1387#define WMREG_MANC 0x5820 /* Management Control */ 1390#define WMREG_MANC 0x5820 /* Management Control */
1388#define MANC_SMBUS_EN __BIT(0) 1391#define MANC_SMBUS_EN __BIT(0)
1389#define MANC_ASF_EN __BIT(1) 1392#define MANC_ASF_EN __BIT(1)
1390#define MANC_ARP_EN __BIT(13) 1393#define MANC_ARP_EN __BIT(13)
1391#define MANC_RECV_TCO_RESET __BIT(16) 1394#define MANC_RECV_TCO_RESET __BIT(16)
1392#define MANC_RECV_TCO_EN __BIT(17) 1395#define MANC_RECV_TCO_EN __BIT(17)
1393#define MANC_BLK_PHY_RST_ON_IDE __BIT(18) 1396#define MANC_BLK_PHY_RST_ON_IDE __BIT(18)
1394#define MANC_RECV_ALL __BIT(19) 1397#define MANC_RECV_ALL __BIT(19)
1395#define MANC_EN_MAC_ADDR_FILTER __BIT(20) 1398#define MANC_EN_MAC_ADDR_FILTER __BIT(20)
1396#define MANC_EN_MNG2HOST __BIT(21) 1399#define MANC_EN_MNG2HOST __BIT(21)
1397#define MANC_EN_BMC2OS __BIT(28) 1400#define MANC_EN_BMC2OS __BIT(28)
1398 1401
1399#define WMREG_MANC2H 0x5860 /* Management Control To Host - RW */ 1402#define WMREG_MANC2H 0x5860 /* Management Control To Host - RW */
1400#define MANC2H_PORT_623 (1 << 5) 1403#define MANC2H_PORT_623 (1 << 5)
1401#define MANC2H_PORT_624 (1 << 6) 1404#define MANC2H_PORT_624 (1 << 6)
1402 1405
1403#define WMREG_GCR 0x5b00 /* PCIe Control */ 1406#define WMREG_GCR 0x5b00 /* PCIe Control */
1404#define GCR_RXD_NO_SNOOP 0x00000001 1407#define GCR_RXD_NO_SNOOP 0x00000001
1405#define GCR_RXDSCW_NO_SNOOP 0x00000002 1408#define GCR_RXDSCW_NO_SNOOP 0x00000002
1406#define GCR_RXDSCR_NO_SNOOP 0x00000004 1409#define GCR_RXDSCR_NO_SNOOP 0x00000004
1407#define GCR_TXD_NO_SNOOP 0x00000008 1410#define GCR_TXD_NO_SNOOP 0x00000008
1408#define GCR_TXDSCW_NO_SNOOP 0x00000010 1411#define GCR_TXDSCW_NO_SNOOP 0x00000010
1409#define GCR_TXDSCR_NO_SNOOP 0x00000020 1412#define GCR_TXDSCR_NO_SNOOP 0x00000020
1410#define GCR_CMPL_TMOUT_MASK 0x0000f000 1413#define GCR_CMPL_TMOUT_MASK 0x0000f000
1411#define GCR_CMPL_TMOUT_10MS 0x00001000 1414#define GCR_CMPL_TMOUT_10MS 0x00001000
1412#define GCR_CMPL_TMOUT_RESEND 0x00010000 1415#define GCR_CMPL_TMOUT_RESEND 0x00010000
1413#define GCR_CAP_VER2 0x00040000 1416#define GCR_CAP_VER2 0x00040000
1414#define GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 1417#define GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
1415#define GCR_NO_SNOOP_ALL (GCR_RXD_NO_SNOOP | \ 1418#define GCR_NO_SNOOP_ALL (GCR_RXD_NO_SNOOP | \
1416 GCR_RXDSCW_NO_SNOOP | \ 1419 GCR_RXDSCW_NO_SNOOP | \
1417 GCR_RXDSCR_NO_SNOOP | \ 1420 GCR_RXDSCR_NO_SNOOP | \
1418 GCR_TXD_NO_SNOOP | \ 1421 GCR_TXD_NO_SNOOP | \
1419 GCR_TXDSCW_NO_SNOOP | \ 1422 GCR_TXDSCW_NO_SNOOP | \
1420 GCR_TXDSCR_NO_SNOOP) 1423 GCR_TXDSCR_NO_SNOOP)
1421 1424
1422#define WMREG_FACTPS 0x5b30 /* Function Active and Power State to MNG */ 1425#define WMREG_FACTPS 0x5b30 /* Function Active and Power State to MNG */
1423#define FACTPS_MNGCG 0x20000000 1426#define FACTPS_MNGCG 0x20000000
1424#define FACTPS_LFS 0x40000000 /* LAN Function Select */ 1427#define FACTPS_LFS 0x40000000 /* LAN Function Select */
1425 1428
1426#define WMREG_GIOCTL 0x5b44 /* GIO Analog Control Register */ 1429#define WMREG_GIOCTL 0x5b44 /* GIO Analog Control Register */
1427#define WMREG_CCMCTL 0x5b48 /* CCM Control Register */ 1430#define WMREG_CCMCTL 0x5b48 /* CCM Control Register */
1428#define WMREG_SCCTL 0x5b4c /* PCIc PLL Configuration Register */ 1431#define WMREG_SCCTL 0x5b4c /* PCIc PLL Configuration Register */
1429 1432
1430#define WMREG_SWSM 0x5b50 /* SW Semaphore */ 1433#define WMREG_SWSM 0x5b50 /* SW Semaphore */
1431#define SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1434#define SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1432#define SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1435#define SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
1433#define SWSM_WMNG 0x00000004 /* Wake MNG Clock */ 1436#define SWSM_WMNG 0x00000004 /* Wake MNG Clock */
1434#define SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ 1437#define SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
1435/* Intel driver defines H2ME register at 0x5b50 */ 1438/* Intel driver defines H2ME register at 0x5b50 */
1436#define WMREG_H2ME 0x5b50 /* SW Semaphore */ 1439#define WMREG_H2ME 0x5b50 /* SW Semaphore */
1437#define H2ME_ULP __BIT(11) 1440#define H2ME_ULP __BIT(11)
1438#define H2ME_ENFORCE_SETTINGS __BIT(12) 1441#define H2ME_ENFORCE_SETTINGS __BIT(12)
1439 1442
1440#define WMREG_FWSM 0x5b54 /* FW Semaphore */ 1443#define WMREG_FWSM 0x5b54 /* FW Semaphore */
1441#define FWSM_MODE __BITS(1, 3) 1444#define FWSM_MODE __BITS(1, 3)
1442#define MNG_ICH_IAMT_MODE 0x2 /* PT mode? */ 1445#define MNG_ICH_IAMT_MODE 0x2 /* PT mode? */
1443#define MNG_IAMT_MODE 0x3 1446#define MNG_IAMT_MODE 0x3
1444#define FWSM_RSPCIPHY __BIT(6) /* Reset PHY on PCI reset */ 1447#define FWSM_RSPCIPHY __BIT(6) /* Reset PHY on PCI reset */
1445#define FWSM_WLOCK_MAC __BITS(7, 9) 1448#define FWSM_WLOCK_MAC __BITS(7, 9)
1446#define FWSM_ULP_CFG_DONE __BIT(10) 1449#define FWSM_ULP_CFG_DONE __BIT(10)
1447#define FWSM_FW_VALID __BIT(15) /* FW established a valid mode */ 1450#define FWSM_FW_VALID __BIT(15) /* FW established a valid mode */
1448 1451
1449#define WMREG_SWSM2 0x5b58 /* SW Semaphore 2 */ 1452#define WMREG_SWSM2 0x5b58 /* SW Semaphore 2 */
1450#define SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ 1453#define SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
1451 1454
1452#define WMREG_SW_FW_SYNC 0x5b5c /* software-firmware semaphore */ 1455#define WMREG_SW_FW_SYNC 0x5b5c /* software-firmware semaphore */
1453#define SWFW_EEP_SM 0x0001 /* eeprom access */ 1456#define SWFW_EEP_SM 0x0001 /* eeprom access */
1454#define SWFW_PHY0_SM 0x0002 /* first ctrl phy access */ 1457#define SWFW_PHY0_SM 0x0002 /* first ctrl phy access */
1455#define SWFW_PHY1_SM 0x0004 /* second ctrl phy access */ 1458#define SWFW_PHY1_SM 0x0004 /* second ctrl phy access */
1456#define SWFW_MAC_CSR_SM 0x0008 1459#define SWFW_MAC_CSR_SM 0x0008
1457#define SWFW_PHY2_SM 0x0020 /* first ctrl phy access */ 1460#define SWFW_PHY2_SM 0x0020 /* first ctrl phy access */
1458#define SWFW_PHY3_SM 0x0040 /* first ctrl phy access */ 1461#define SWFW_PHY3_SM 0x0040 /* first ctrl phy access */
1459#define SWFW_SOFT_SHIFT 0 /* software semaphores */ 1462#define SWFW_SOFT_SHIFT 0 /* software semaphores */
1460#define SWFW_FIRM_SHIFT 16 /* firmware semaphores */ 1463#define SWFW_FIRM_SHIFT 16 /* firmware semaphores */
1461 1464
1462#define WMREG_GCR2 0x5b64 /* 3GPIO Control Register 2 */ 1465#define WMREG_GCR2 0x5b64 /* 3GPIO Control Register 2 */
1463#define WMREG_FEXTNVM9 0x5bb4 /* Future Extended NVM 9 */ 1466#define WMREG_FEXTNVM9 0x5bb4 /* Future Extended NVM 9 */
1464#define FEXTNVM9_IOSFSB_CLKGATE_DIS __BIT(11) 1467#define FEXTNVM9_IOSFSB_CLKGATE_DIS __BIT(11)
1465#define FEXTNVM9_IOSFSB_CLKREQ_DIS __BIT(12) 1468#define FEXTNVM9_IOSFSB_CLKREQ_DIS __BIT(12)
1466#define WMREG_FEXTNVM11 0x5bbc /* Future Extended NVM 11 */ 1469#define WMREG_FEXTNVM11 0x5bbc /* Future Extended NVM 11 */
1467#define FEXTNVM11_DIS_MULRFIX __BIT(13) /* Disable MULR fix */ 1470#define FEXTNVM11_DIS_MULRFIX __BIT(13) /* Disable MULR fix */
1468 1471
1469#define WMREG_FFLT_DBG 0x05F04 /* Debug Register */ 1472#define WMREG_FFLT_DBG 0x05F04 /* Debug Register */
1470 1473
1471#define WMREG_CRC_OFFSET 0x5f50 1474#define WMREG_CRC_OFFSET 0x5f50
1472 1475
1473#define WMREG_B2OSPC 0x8fe0 /* BMC2OS packets sent by BMC */ 1476#define WMREG_B2OSPC 0x8fe0 /* BMC2OS packets sent by BMC */
1474#define WMREG_O2BGPTC 0x8fe4 /* OS2BMC packets received by BMC */ 1477#define WMREG_O2BGPTC 0x8fe4 /* OS2BMC packets received by BMC */
1475 1478
1476#define WMREG_HRMPC 0xa018 /* Header Redirection Missed Packet Count */ 1479#define WMREG_HRMPC 0xa018 /* Header Redirection Missed Packet Count */
1477 1480
1478#define WMREG_EEC 0x12010 1481#define WMREG_EEC 0x12010
1479#define EEC_FLASH_DETECTED __BIT(19) /* FLASH */ 1482#define EEC_FLASH_DETECTED __BIT(19) /* FLASH */
1480#define EEC_FLUPD __BIT(23) /* Update FLASH */ 1483#define EEC_FLUPD __BIT(23) /* Update FLASH */
1481 1484
1482#define WMREG_EEARBC_I210 0x12024 1485#define WMREG_EEARBC_I210 0x12024
1483 1486
1484/* 1487/*
1485 * NVM related values. 1488 * NVM related values.
1486 * Microwire, SPI, and flash 1489 * Microwire, SPI, and flash
1487 */ 1490 */
1488#define UWIRE_OPC_ERASE 0x04 /* MicroWire "erase" opcode */ 1491#define UWIRE_OPC_ERASE 0x04 /* MicroWire "erase" opcode */
1489#define UWIRE_OPC_WRITE 0x05 /* MicroWire "write" opcode */ 1492#define UWIRE_OPC_WRITE 0x05 /* MicroWire "write" opcode */
1490#define UWIRE_OPC_READ 0x06 /* MicroWire "read" opcode */ 1493#define UWIRE_OPC_READ 0x06 /* MicroWire "read" opcode */
1491 1494
1492#define SPI_OPC_WRITE 0x02 /* SPI "write" opcode */ 1495#define SPI_OPC_WRITE 0x02 /* SPI "write" opcode */
1493#define SPI_OPC_READ 0x03 /* SPI "read" opcode */ 1496#define SPI_OPC_READ 0x03 /* SPI "read" opcode */
1494#define SPI_OPC_A8 0x08 /* opcode bit 3 == address bit 8 */ 1497#define SPI_OPC_A8 0x08 /* opcode bit 3 == address bit 8 */
1495#define SPI_OPC_WREN 0x06 /* SPI "set write enable" opcode */ 1498#define SPI_OPC_WREN 0x06 /* SPI "set write enable" opcode */
1496#define SPI_OPC_WRDI 0x04 /* SPI "clear write enable" opcode */ 1499#define SPI_OPC_WRDI 0x04 /* SPI "clear write enable" opcode */
1497#define SPI_OPC_RDSR 0x05 /* SPI "read status" opcode */ 1500#define SPI_OPC_RDSR 0x05 /* SPI "read status" opcode */
1498#define SPI_OPC_WRSR 0x01 /* SPI "write status" opcode */ 1501#define SPI_OPC_WRSR 0x01 /* SPI "write status" opcode */
1499#define SPI_MAX_RETRIES 5000 /* max wait of 5ms for RDY signal */ 1502#define SPI_MAX_RETRIES 5000 /* max wait of 5ms for RDY signal */
1500 1503
1501#define SPI_SR_RDY 0x01 1504#define SPI_SR_RDY 0x01
1502#define SPI_SR_WEN 0x02 1505#define SPI_SR_WEN 0x02
1503#define SPI_SR_BP0 0x04 1506#define SPI_SR_BP0 0x04
1504#define SPI_SR_BP1 0x08 1507#define SPI_SR_BP1 0x08
1505#define SPI_SR_WPEN 0x80 1508#define SPI_SR_WPEN 0x80
1506 1509
1507#define NVM_CHECKSUM 0xBABA 1510#define NVM_CHECKSUM 0xBABA
1508#define NVM_SIZE 0x0040 1511#define NVM_SIZE 0x0040
1509#define NVM_WORD_SIZE_BASE_SHIFT 6 1512#define NVM_WORD_SIZE_BASE_SHIFT 6
1510 1513
1511#define NVM_OFF_MACADDR 0x0000 /* MAC address offset 0 */ 1514#define NVM_OFF_MACADDR 0x0000 /* MAC address offset 0 */
1512#define NVM_OFF_MACADDR1 0x0001 /* MAC address offset 1 */ 1515#define NVM_OFF_MACADDR1 0x0001 /* MAC address offset 1 */
1513#define NVM_OFF_MACADDR2 0x0002 /* MAC address offset 2 */ 1516#define NVM_OFF_MACADDR2 0x0002 /* MAC address offset 2 */
1514#define NVM_OFF_COMPAT 0x0003 1517#define NVM_OFF_COMPAT 0x0003
1515#define NVM_OFF_ID_LED_SETTINGS 0x0004 1518#define NVM_OFF_ID_LED_SETTINGS 0x0004
1516#define NVM_OFF_VERSION 0x0005 1519#define NVM_OFF_VERSION 0x0005
1517#define NVM_OFF_CFG1 0x000a /* config word 1 */ 1520#define NVM_OFF_CFG1 0x000a /* config word 1 */
1518#define NVM_OFF_CFG2 0x000f /* config word 2 */ 1521#define NVM_OFF_CFG2 0x000f /* config word 2 */
1519#define NVM_OFF_EEPROM_SIZE 0x0012 /* NVM SIZE */ 1522#define NVM_OFF_EEPROM_SIZE 0x0012 /* NVM SIZE */
1520#define NVM_OFF_CFG4 0x0013 /* config word 4 */ 1523#define NVM_OFF_CFG4 0x0013 /* config word 4 */
1521#define NVM_OFF_CFG3_PORTB 0x0014 /* config word 3 */ 1524#define NVM_OFF_CFG3_PORTB 0x0014 /* config word 3 */
1522#define NVM_OFF_FUTURE_INIT_WORD1 0x0019 1525#define NVM_OFF_FUTURE_INIT_WORD1 0x0019
1523#define NVM_OFF_INIT_3GIO_3 0x001a /* PCIe Initial Configuration Word 3 */ 1526#define NVM_OFF_INIT_3GIO_3 0x001a /* PCIe Initial Configuration Word 3 */
1524#define NVM_OFF_K1_CONFIG 0x001b /* NVM K1 Config */ 1527#define NVM_OFF_K1_CONFIG 0x001b /* NVM K1 Config */
1525#define NVM_OFF_LED_1_CFG 0x001c 1528#define NVM_OFF_LED_1_CFG 0x001c
1526#define NVM_OFF_LED_0_2_CFG 0x001f 1529#define NVM_OFF_LED_0_2_CFG 0x001f
1527#define NVM_OFF_SWDPIN 0x0020 /* SWD Pins (Cordova) */ 1530#define NVM_OFF_SWDPIN 0x0020 /* SWD Pins (Cordova) */
1528#define NVM_OFF_CFG3_PORTA 0x0024 /* config word 3 */ 1531#define NVM_OFF_CFG3_PORTA 0x0024 /* config word 3 */
1529#define NVM_OFF_ALT_MAC_ADDR_PTR 0x0037 /* to the alternative MAC addresses */ 1532#define NVM_OFF_ALT_MAC_ADDR_PTR 0x0037 /* to the alternative MAC addresses */
1530#define NVM_OFF_COMB_VER_PTR 0x003d 1533#define NVM_OFF_COMB_VER_PTR 0x003d
1531#define NVM_OFF_IMAGE_UID0 0x0042 1534#define NVM_OFF_IMAGE_UID0 0x0042
1532#define NVM_OFF_IMAGE_UID1 0x0043 1535#define NVM_OFF_IMAGE_UID1 0x0043
1533 1536
1534#define NVM_COMPAT_VALID_CHECKSUM 0x0001 1537#define NVM_COMPAT_VALID_CHECKSUM 0x0001
1535 1538
1536#define NVM_CFG1_LVDID __BIT(0) 1539#define NVM_CFG1_LVDID __BIT(0)
1537#define NVM_CFG1_LSSID __BIT(1) 1540#define NVM_CFG1_LSSID __BIT(1)
1538#define NVM_CFG1_PME_CLOCK __BIT(2) 1541#define NVM_CFG1_PME_CLOCK __BIT(2)
1539#define NVM_CFG1_PM __BIT(3) 1542#define NVM_CFG1_PM __BIT(3)
1540#define NVM_CFG1_ILOS __BIT(4) /* Invert loss of signal */ 1543#define NVM_CFG1_ILOS __BIT(4) /* Invert loss of signal */
1541#define NVM_CFG1_SWDPIO_SHIFT 5 1544#define NVM_CFG1_SWDPIO_SHIFT 5
1542#define NVM_CFG1_SWDPIO_MASK (0xf << NVM_CFG1_SWDPIO_SHIFT) 1545#define NVM_CFG1_SWDPIO_MASK (0xf << NVM_CFG1_SWDPIO_SHIFT)
1543#define NVM_CFG1_IPS1 __BIT(8) 1546#define NVM_CFG1_IPS1 __BIT(8)
1544#define NVM_CFG1_LRST __BIT(9) 1547#define NVM_CFG1_LRST __BIT(9)
1545#define NVM_CFG1_FD __BIT(10) 1548#define NVM_CFG1_FD __BIT(10)
1546#define NVM_CFG1_FRCSPD __BIT(11) 1549#define NVM_CFG1_FRCSPD __BIT(11)
1547#define NVM_CFG1_IPS0 __BIT(12) 1550#define NVM_CFG1_IPS0 __BIT(12)
1548#define NVM_CFG1_64_32_BAR __BIT(13) 1551#define NVM_CFG1_64_32_BAR __BIT(13)
1549 1552
1550#define NVM_CFG2_CSR_RD_SPLIT __BIT(1) 1553#define NVM_CFG2_CSR_RD_SPLIT __BIT(1)
1551#define NVM_CFG2_82544_APM_EN __BIT(2) 1554#define NVM_CFG2_82544_APM_EN __BIT(2)
1552#define NVM_CFG2_64_BIT __BIT(3) 1555#define NVM_CFG2_64_BIT __BIT(3)
1553#define NVM_CFG2_MAX_READ __BIT(4) 1556#define NVM_CFG2_MAX_READ __BIT(4)
1554#define NVM_CFG2_DMCR_MAP __BIT(5) 1557#define NVM_CFG2_DMCR_MAP __BIT(5)
1555#define NVM_CFG2_133_CAP __BIT(6) 1558#define NVM_CFG2_133_CAP __BIT(6)
1556#define NVM_CFG2_MSI_DIS __BIT(7) 1559#define NVM_CFG2_MSI_DIS __BIT(7)
1557#define NVM_CFG2_FLASH_DIS __BIT(8) 1560#define NVM_CFG2_FLASH_DIS __BIT(8)
1558#define NVM_CFG2_FLASH_SIZE(x) (((x) & 3) >> 9) 1561#define NVM_CFG2_FLASH_SIZE(x) (((x) & 3) >> 9)
1559#define NVM_CFG2_APM_EN __BIT(10) 1562#define NVM_CFG2_APM_EN __BIT(10)
1560#define NVM_CFG2_ANE __BIT(11) 1563#define NVM_CFG2_ANE __BIT(11)
1561#define NVM_CFG2_PAUSE(x) (((x) & 3) >> 12) 1564#define NVM_CFG2_PAUSE(x) (((x) & 3) >> 12)
1562#define NVM_CFG2_ASDE __BIT(14) 1565#define NVM_CFG2_ASDE __BIT(14)
1563#define NVM_CFG2_APM_PME __BIT(15) 1566#define NVM_CFG2_APM_PME __BIT(15)
1564#define NVM_CFG2_SWDPIO_SHIFT 4 1567#define NVM_CFG2_SWDPIO_SHIFT 4
1565#define NVM_CFG2_SWDPIO_MASK (0xf << NVM_CFG2_SWDPIO_SHIFT) 1568#define NVM_CFG2_SWDPIO_MASK (0xf << NVM_CFG2_SWDPIO_SHIFT)
1566#define NVM_CFG2_MNGM_SHIFT 13 /* Manageability Operation mode */ 1569#define NVM_CFG2_MNGM_SHIFT 13 /* Manageability Operation mode */
1567#define NVM_CFG2_MNGM_MASK (3U << NVM_CFG2_MNGM_SHIFT) 1570#define NVM_CFG2_MNGM_MASK (3U << NVM_CFG2_MNGM_SHIFT)
1568#define NVM_CFG2_MNGM_DIS 0 1571#define NVM_CFG2_MNGM_DIS 0
1569#define NVM_CFG2_MNGM_NCSI 1 1572#define NVM_CFG2_MNGM_NCSI 1
1570#define NVM_CFG2_MNGM_PT 2 1573#define NVM_CFG2_MNGM_PT 2
1571 1574
1572#define NVM_COMPAT_MAS_EN(x) __BIT(x) /* Media Auto Sense Enable */ 1575#define NVM_COMPAT_MAS_EN(x) __BIT(x) /* Media Auto Sense Enable */
1573#define NVM_COMPAT_SERDES_FORCE_MODE __BIT(14) /* Don't use autonego */ 1576#define NVM_COMPAT_SERDES_FORCE_MODE __BIT(14) /* Don't use autonego */
1574 1577
1575#define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040 1578#define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040
1576 1579
1577#define NVM_K1_CONFIG_ENABLE 0x01 1580#define NVM_K1_CONFIG_ENABLE 0x01
1578 1581
1579#define NVM_SWDPIN_MASK 0xdf 1582#define NVM_SWDPIN_MASK 0xdf
1580#define NVM_SWDPIN_SWDPIN_SHIFT 0 1583#define NVM_SWDPIN_SWDPIN_SHIFT 0
1581#define NVM_SWDPIN_SWDPIO_SHIFT 8 1584#define NVM_SWDPIN_SWDPIO_SHIFT 8
1582 1585
1583#define NVM_3GIO_3_ASPM_MASK (0x3 << 2) /* Active State PM Support */ 1586#define NVM_3GIO_3_ASPM_MASK (0x3 << 2) /* Active State PM Support */
1584 1587
1585#define NVM_CFG3_PORTA_EXT_MDIO __BIT(2) /* External MDIO Interface */ 1588#define NVM_CFG3_PORTA_EXT_MDIO __BIT(2) /* External MDIO Interface */
1586#define NVM_CFG3_PORTA_COM_MDIO __BIT(3) /* MDIO Interface is shared */ 1589#define NVM_CFG3_PORTA_COM_MDIO __BIT(3) /* MDIO Interface is shared */
1587#define NVM_CFG3_APME __BIT(10) /* APM Enable */ 1590#define NVM_CFG3_APME __BIT(10) /* APM Enable */
1588#define NVM_CFG3_ILOS __BIT(13) /* Invert loss of signal */ 1591#define NVM_CFG3_ILOS __BIT(13) /* Invert loss of signal */
1589 1592
1590#define NVM_OFF_MACADDR_82571(x) (3 * (x)) 1593#define NVM_OFF_MACADDR_82571(x) (3 * (x))
1591 1594
1592/* 1595/*
1593 * EEPROM Partitioning. See Table 6-1, "EEPROM Top Level Partitioning" 1596 * EEPROM Partitioning. See Table 6-1, "EEPROM Top Level Partitioning"
1594 * in 82580's datasheet. 1597 * in 82580's datasheet.
1595 */ 1598 */
1596#define NVM_OFF_LAN_FUNC_82580(x) ((x) ? (0x40 + (0x40 * (x))) : 0) 1599#define NVM_OFF_LAN_FUNC_82580(x) ((x) ? (0x40 + (0x40 * (x))) : 0)
1597 1600
1598#define NVM_COMBO_VER_OFF 0x0083 1601#define NVM_COMBO_VER_OFF 0x0083
1599 1602
1600#define NVM_MAJOR_MASK 0xf000 1603#define NVM_MAJOR_MASK 0xf000
1601#define NVM_MAJOR_SHIFT 12 1604#define NVM_MAJOR_SHIFT 12
1602#define NVM_MINOR_MASK 0x0ff0 1605#define NVM_MINOR_MASK 0x0ff0
1603#define NVM_MINOR_SHIFT 4 1606#define NVM_MINOR_SHIFT 4
1604#define NVM_BUILD_MASK 0x000f 1607#define NVM_BUILD_MASK 0x000f
1605#define NVM_UID_VALID 0x8000 1608#define NVM_UID_VALID 0x8000
1606 1609
1607/* iNVM Registers for i21[01] */ 1610/* iNVM Registers for i21[01] */
1608#define WM_INVM_DATA_REG(reg) (0x12120 + 4*(reg)) 1611#define WM_INVM_DATA_REG(reg) (0x12120 + 4*(reg))
1609#define INVM_SIZE 64 /* Number of INVM Data Registers */ 1612#define INVM_SIZE 64 /* Number of INVM Data Registers */
1610 1613
1611/* iNVM default value */ 1614/* iNVM default value */
1612#define NVM_INIT_CTRL_2_DEFAULT_I211 0x7243 1615#define NVM_INIT_CTRL_2_DEFAULT_I211 0x7243
1613#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00c1 1616#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00c1
1614#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 1617#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
1615#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200c 1618#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200c
1616#define NVM_RESERVED_WORD 0xffff 1619#define NVM_RESERVED_WORD 0xffff
1617 1620
1618#define INVM_DWORD_TO_RECORD_TYPE(dword) ((dword) & 0x7) 1621#define INVM_DWORD_TO_RECORD_TYPE(dword) ((dword) & 0x7)
1619#define INVM_DWORD_TO_WORD_ADDRESS(dword) (((dword) & 0x0000FE00) >> 9) 1622#define INVM_DWORD_TO_WORD_ADDRESS(dword) (((dword) & 0x0000FE00) >> 9)
1620#define INVM_DWORD_TO_WORD_DATA(dword) (((dword) & 0xFFFF0000) >> 16) 1623#define INVM_DWORD_TO_WORD_DATA(dword) (((dword) & 0xFFFF0000) >> 16)
1621 1624
1622#define INVM_UNINITIALIZED_STRUCTURE 0x0 1625#define INVM_UNINITIALIZED_STRUCTURE 0x0
1623#define INVM_WORD_AUTOLOAD_STRUCTURE 0x1 1626#define INVM_WORD_AUTOLOAD_STRUCTURE 0x1
1624#define INVM_CSR_AUTOLOAD_STRUCTURE 0x2 1627#define INVM_CSR_AUTOLOAD_STRUCTURE 0x2
1625#define INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE 0x3 1628#define INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE 0x3
1626#define INVM_RSA_KEY_SHA256_STRUCTURE 0x4 1629#define INVM_RSA_KEY_SHA256_STRUCTURE 0x4
1627#define INVM_INVALIDATED_STRUCTURE 0xf 1630#define INVM_INVALIDATED_STRUCTURE 0xf
1628 1631
1629#define INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 1632#define INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
1630#define INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 1633#define INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
1631 1634
1632#define INVM_DEFAULT_AL 0x202f 1635#define INVM_DEFAULT_AL 0x202f
1633#define INVM_AUTOLOAD 0x0a 1636#define INVM_AUTOLOAD 0x0a
1634#define INVM_PLL_WO_VAL 0x0010 1637#define INVM_PLL_WO_VAL 0x0010
1635 1638
1636/* Version and Image Type field */ 1639/* Version and Image Type field */
1637#define INVM_VER_1 __BITS(12,3) 1640#define INVM_VER_1 __BITS(12,3)
1638#define INVM_VER_2 __BITS(22,13) 1641#define INVM_VER_2 __BITS(22,13)
1639#define INVM_IMGTYPE __BITS(28,23) 1642#define INVM_IMGTYPE __BITS(28,23)
1640#define INVM_MINOR __BITS(3,0) 1643#define INVM_MINOR __BITS(3,0)
1641#define INVM_MAJOR __BITS(9,4) 1644#define INVM_MAJOR __BITS(9,4)
1642 1645
1643/* Word definitions for ID LED Settings */ 1646/* Word definitions for ID LED Settings */
1644#define ID_LED_RESERVED_FFFF 0xffff 1647#define ID_LED_RESERVED_FFFF 0xffff
1645 1648
1646/* ich8 flash control */ 1649/* ich8 flash control */
1647#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ 1650#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
1648#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ 1651#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
1649#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ 1652#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
1650#define ICH_FLASH_SEG_SIZE_256 256 1653#define ICH_FLASH_SEG_SIZE_256 256
1651#define ICH_FLASH_SEG_SIZE_4K 4096 1654#define ICH_FLASH_SEG_SIZE_4K 4096
1652#define ICH_FLASH_SEG_SIZE_64K 65536 1655#define ICH_FLASH_SEG_SIZE_64K 65536
1653 1656
1654#define ICH_CYCLE_READ 0x0 1657#define ICH_CYCLE_READ 0x0
1655#define ICH_CYCLE_RESERVED 0x1 1658#define ICH_CYCLE_RESERVED 0x1
1656#define ICH_CYCLE_WRITE 0x2 1659#define ICH_CYCLE_WRITE 0x2
1657#define ICH_CYCLE_ERASE 0x3 1660#define ICH_CYCLE_ERASE 0x3
1658 1661
1659#define ICH_FLASH_GFPREG 0x0000 1662#define ICH_FLASH_GFPREG 0x0000
1660#define ICH_FLASH_HSFSTS 0x0004 /* Flash Status Register */ 1663#define ICH_FLASH_HSFSTS 0x0004 /* Flash Status Register */
1661#define HSFSTS_DONE 0x0001 /* Flash Cycle Done */ 1664#define HSFSTS_DONE 0x0001 /* Flash Cycle Done */
1662#define HSFSTS_ERR 0x0002 /* Flash Cycle Error */ 1665#define HSFSTS_ERR 0x0002 /* Flash Cycle Error */
1663#define HSFSTS_DAEL 0x0004 /* Direct Access error Log */ 1666#define HSFSTS_DAEL 0x0004 /* Direct Access error Log */
1664#define HSFSTS_ERSZ_MASK 0x0018 /* Block/Sector Erase Size */ 1667#define HSFSTS_ERSZ_MASK 0x0018 /* Block/Sector Erase Size */
1665#define HSFSTS_ERSZ_SHIFT 3 1668#define HSFSTS_ERSZ_SHIFT 3
1666#define HSFSTS_FLINPRO 0x0020 /* flash SPI cycle in Progress */ 1669#define HSFSTS_FLINPRO 0x0020 /* flash SPI cycle in Progress */
1667#define HSFSTS_FLDVAL 0x4000 /* Flash Descriptor Valid */ 1670#define HSFSTS_FLDVAL 0x4000 /* Flash Descriptor Valid */
1668#define HSFSTS_FLLK 0x8000 /* Flash Configuration Lock-Down */ 1671#define HSFSTS_FLLK 0x8000 /* Flash Configuration Lock-Down */
1669#define ICH_FLASH_HSFCTL 0x0006 /* Flash control Register */ 1672#define ICH_FLASH_HSFCTL 0x0006 /* Flash control Register */
1670#define HSFCTL_GO 0x0001 /* Flash Cycle Go */ 1673#define HSFCTL_GO 0x0001 /* Flash Cycle Go */
1671#define HSFCTL_CYCLE_MASK 0x0006 /* Flash Cycle */ 1674#define HSFCTL_CYCLE_MASK 0x0006 /* Flash Cycle */
1672#define HSFCTL_CYCLE_SHIFT 1 1675#define HSFCTL_CYCLE_SHIFT 1
1673#define HSFCTL_BCOUNT_MASK 0x0300 /* Data Byte Count */ 1676#define HSFCTL_BCOUNT_MASK 0x0300 /* Data Byte Count */
1674#define HSFCTL_BCOUNT_SHIFT 8 1677#define HSFCTL_BCOUNT_SHIFT 8
1675#define ICH_FLASH_FADDR 0x0008 1678#define ICH_FLASH_FADDR 0x0008
1676#define ICH_FLASH_FDATA0 0x0010 1679#define ICH_FLASH_FDATA0 0x0010
1677#define ICH_FLASH_FRACC 0x0050 1680#define ICH_FLASH_FRACC 0x0050
1678#define ICH_FLASH_FREG0 0x0054 1681#define ICH_FLASH_FREG0 0x0054
1679#define ICH_FLASH_FREG1 0x0058 1682#define ICH_FLASH_FREG1 0x0058
1680#define ICH_FLASH_FREG2 0x005c 1683#define ICH_FLASH_FREG2 0x005c
1681#define ICH_FLASH_FREG3 0x0060 1684#define ICH_FLASH_FREG3 0x0060
1682#define ICH_FLASH_FPR0 0x0074 1685#define ICH_FLASH_FPR0 0x0074
1683#define ICH_FLASH_FPR1 0x0078 1686#define ICH_FLASH_FPR1 0x0078
1684#define ICH_FLASH_SSFSTS 0x0090 1687#define ICH_FLASH_SSFSTS 0x0090
1685#define ICH_FLASH_SSFCTL 0x0092 1688#define ICH_FLASH_SSFCTL 0x0092
1686#define ICH_FLASH_PREOP 0x0094 1689#define ICH_FLASH_PREOP 0x0094
1687#define ICH_FLASH_OPTYPE 0x0096 1690#define ICH_FLASH_OPTYPE 0x0096
1688#define ICH_FLASH_OPMENU 0x0098 1691#define ICH_FLASH_OPMENU 0x0098
1689 1692
1690#define ICH_FLASH_REG_MAPSIZE 0x00a0 1693#define ICH_FLASH_REG_MAPSIZE 0x00a0
1691#define ICH_FLASH_SECTOR_SIZE 4096 1694#define ICH_FLASH_SECTOR_SIZE 4096
1692#define ICH_GFPREG_BASE_MASK 0x1fff 1695#define ICH_GFPREG_BASE_MASK 0x1fff
1693#define ICH_FLASH_LINEAR_ADDR_MASK 0x00ffffff 1696#define ICH_FLASH_LINEAR_ADDR_MASK 0x00ffffff
1694 1697
1695#define ICH_NVM_SIG_WORD 0x13 1698#define ICH_NVM_SIG_WORD 0x13
1696#define ICH_NVM_SIG_MASK 0xc000 1699#define ICH_NVM_SIG_MASK 0xc000
1697#define ICH_NVM_VALID_SIG_MASK 0xc0 1700#define ICH_NVM_VALID_SIG_MASK 0xc0
1698#define ICH_NVM_SIG_VALUE 0x80 1701#define ICH_NVM_SIG_VALUE 0x80
1699 1702
1700#define NVM_SIZE_MULTIPLIER 4096 /* multiplier for NVMS field */ 1703#define NVM_SIZE_MULTIPLIER 4096 /* multiplier for NVMS field */
1701#define WM_PCH_SPT_FLASHOFFSET 0xe000 /* offset of NVM access regs(PCH_SPT)*/ 1704#define WM_PCH_SPT_FLASHOFFSET 0xe000 /* offset of NVM access regs(PCH_SPT)*/
1702 1705
1703/* for PCI express Capability registers */ 1706/* for PCI express Capability registers */
1704#define WM_PCIE_DCSR2_16MS 0x00000005 1707#define WM_PCIE_DCSR2_16MS 0x00000005
1705 1708
1706/* SFF SFP ROM data */ 1709/* SFF SFP ROM data */
1707#define SFF_SFP_ID_OFF 0x00 1710#define SFF_SFP_ID_OFF 0x00
1708#define SFF_SFP_ID_UNKNOWN 0x00 /* Unknown */ 1711#define SFF_SFP_ID_UNKNOWN 0x00 /* Unknown */
1709#define SFF_SFP_ID_SFF 0x02 /* Module soldered to motherboard */ 1712#define SFF_SFP_ID_SFF 0x02 /* Module soldered to motherboard */
1710#define SFF_SFP_ID_SFP 0x03 /* SFP transceiver */ 1713#define SFF_SFP_ID_SFP 0x03 /* SFP transceiver */
1711 1714
1712#define SFF_SFP_ETH_FLAGS_OFF 0x06 1715#define SFF_SFP_ETH_FLAGS_OFF 0x06
1713#define SFF_SFP_ETH_FLAGS_1000SX 0x01 1716#define SFF_SFP_ETH_FLAGS_1000SX 0x01
1714#define SFF_SFP_ETH_FLAGS_1000LX 0x02 1717#define SFF_SFP_ETH_FLAGS_1000LX 0x02
1715#define SFF_SFP_ETH_FLAGS_1000CX 0x04 1718#define SFF_SFP_ETH_FLAGS_1000CX 0x04
1716#define SFF_SFP_ETH_FLAGS_1000T 0x08 1719#define SFF_SFP_ETH_FLAGS_1000T 0x08
1717#define SFF_SFP_ETH_FLAGS_100LX 0x10 1720#define SFF_SFP_ETH_FLAGS_100LX 0x10
1718#define SFF_SFP_ETH_FLAGS_100FX 0x20 1721#define SFF_SFP_ETH_FLAGS_100FX 0x20
1719 1722
1720/* I21[01] PHY related definitions */ 1723/* I21[01] PHY related definitions */
1721#define GS40G_PAGE_SELECT 0x16 1724#define GS40G_PAGE_SELECT 0x16
1722#define GS40G_PAGE_SHIFT 16 1725#define GS40G_PAGE_SHIFT 16
1723#define GS40G_OFFSET_MASK 0xffff 1726#define GS40G_OFFSET_MASK 0xffff
1724#define GS40G_PHY_PLL_FREQ_PAGE 0xfc0000 1727#define GS40G_PHY_PLL_FREQ_PAGE 0xfc0000
1725#define GS40G_PHY_PLL_FREQ_REG 0x000e 1728#define GS40G_PHY_PLL_FREQ_REG 0x000e
1726#define GS40G_PHY_PLL_UNCONF 0xff 1729#define GS40G_PHY_PLL_UNCONF 0xff
1727 1730
1728/* advanced TX descriptor for 82575 and newer */ 1731/* advanced TX descriptor for 82575 and newer */
1729typedef union nq_txdesc { 1732typedef union nq_txdesc {
1730 struct { 1733 struct {
1731 uint64_t nqtxd_addr; 1734 uint64_t nqtxd_addr;
1732 uint32_t nqtxd_cmdlen; 1735 uint32_t nqtxd_cmdlen;
1733 uint32_t nqtxd_fields; 1736 uint32_t nqtxd_fields;
1734 } nqtx_data; 1737 } nqtx_data;
1735 struct { 1738 struct {
1736 uint32_t nqtxc_vl_len; 1739 uint32_t nqtxc_vl_len;
1737 uint32_t nqtxc_sn; 1740 uint32_t nqtxc_sn;
1738 uint32_t nqtxc_cmd; 1741 uint32_t nqtxc_cmd;
1739 uint32_t nqtxc_mssidx; 1742 uint32_t nqtxc_mssidx;
1740 } nqtx_ctx; 1743 } nqtx_ctx;
1741} __packed nq_txdesc_t; 1744} __packed nq_txdesc_t;
1742 1745
1743 1746
1744/* Commands for nqtxd_cmdlen and nqtxc_cmd */ 1747/* Commands for nqtxd_cmdlen and nqtxc_cmd */
1745#define NQTX_CMD_EOP __BIT(24) /* end of packet */ 1748#define NQTX_CMD_EOP __BIT(24) /* end of packet */
1746#define NQTX_CMD_IFCS __BIT(25) /* insert FCS */ 1749#define NQTX_CMD_IFCS __BIT(25) /* insert FCS */
1747#define NQTX_CMD_RS __BIT(27) /* report status */ 1750#define NQTX_CMD_RS __BIT(27) /* report status */
1748#define NQTX_CMD_DEXT __BIT(29) /* descriptor extension */ 1751#define NQTX_CMD_DEXT __BIT(29) /* descriptor extension */
1749#define NQTX_CMD_VLE __BIT(30) /* VLAN enable */ 1752#define NQTX_CMD_VLE __BIT(30) /* VLAN enable */
1750#define NQTX_CMD_TSE __BIT(31) /* TCP segmentation enable */ 1753#define NQTX_CMD_TSE __BIT(31) /* TCP segmentation enable */
1751 1754
1752/* Descriptor types (if DEXT is set) */ 1755/* Descriptor types (if DEXT is set) */
1753#define NQTX_DTYP_C (2U << 20) /* context */ 1756#define NQTX_DTYP_C (2U << 20) /* context */
1754#define NQTX_DTYP_D (3U << 20) /* data */ 1757#define NQTX_DTYP_D (3U << 20) /* data */
1755 1758
1756#define NQTXD_FIELDS_IDX_SHIFT 4 /* context index shift */ 1759#define NQTXD_FIELDS_IDX_SHIFT 4 /* context index shift */
1757#define NQTXD_FIELDS_IDX_MASK 0xf 1760#define NQTXD_FIELDS_IDX_MASK 0xf
1758#define NQTXD_FIELDS_PAYLEN_SHIFT 14 /* payload len shift */ 1761#define NQTXD_FIELDS_PAYLEN_SHIFT 14 /* payload len shift */
1759#define NQTXD_FIELDS_PAYLEN_MASK 0x3ffff 1762#define NQTXD_FIELDS_PAYLEN_MASK 0x3ffff
1760 1763
1761#define NQTXD_FIELDS_IXSM __BIT(8) /* do IP checksum */ 1764#define NQTXD_FIELDS_IXSM __BIT(8) /* do IP checksum */
1762#define NQTXD_FIELDS_TUXSM __BIT(9) /* do TCP/UDP checksum */ 1765#define NQTXD_FIELDS_TUXSM __BIT(9) /* do TCP/UDP checksum */
1763 1766
1764#define NQTXC_VLLEN_IPLEN_SHIFT 0 /* IP header len */ 1767#define NQTXC_VLLEN_IPLEN_SHIFT 0 /* IP header len */
1765#define NQTXC_VLLEN_IPLEN_MASK 0x1ff 1768#define NQTXC_VLLEN_IPLEN_MASK 0x1ff
1766#define NQTXC_VLLEN_MACLEN_SHIFT 9 /* MAC header len */ 1769#define NQTXC_VLLEN_MACLEN_SHIFT 9 /* MAC header len */
1767#define NQTXC_VLLEN_MACLEN_MASK 0x7f 1770#define NQTXC_VLLEN_MACLEN_MASK 0x7f
1768#define NQTXC_VLLEN_VLAN_SHIFT 16 /* vlan number */ 1771#define NQTXC_VLLEN_VLAN_SHIFT 16 /* vlan number */
1769#define NQTXC_VLLEN_VLAN_MASK 0xffff 1772#define NQTXC_VLLEN_VLAN_MASK 0xffff
1770 1773
1771#define NQTXC_CMD_MKRLOC_SHIFT 0 /* IP checksum offset */ 1774#define NQTXC_CMD_MKRLOC_SHIFT 0 /* IP checksum offset */
1772#define NQTXC_CMD_MKRLOC_MASK 0x1ff 1775#define NQTXC_CMD_MKRLOC_MASK 0x1ff
1773#define NQTXC_CMD_SNAP __BIT(9) 1776#define NQTXC_CMD_SNAP __BIT(9)
1774#define NQTXC_CMD_IPV_MASK __BIT(10) 1777#define NQTXC_CMD_IPV_MASK __BIT(10)
1775#define NQTXC_CMD_IP4 __SHIFTIN(1, NQTXC_CMD_IPV_MASK) 1778#define NQTXC_CMD_IP4 __SHIFTIN(1, NQTXC_CMD_IPV_MASK)
1776#define NQTXC_CMD_IP6 __SHIFTIN(0, NQTXC_CMD_IPV_MASK) 1779#define NQTXC_CMD_IP6 __SHIFTIN(0, NQTXC_CMD_IPV_MASK)
1777#define NQTXC_CMD_TP_MASK __BIT(11) 1780#define NQTXC_CMD_TP_MASK __BIT(11)
1778#define NQTXC_CMD_TCP __SHIFTIN(1, NQTXC_CMD_TP_MASK) 1781#define NQTXC_CMD_TCP __SHIFTIN(1, NQTXC_CMD_TP_MASK)
1779#define NQTXC_CMD_UDP __SHIFTIN(0, NQTXC_CMD_TP_MASK) 1782#define NQTXC_CMD_UDP __SHIFTIN(0, NQTXC_CMD_TP_MASK)
1780#define NQTXC_MSSIDX_IDX_SHIFT 4 /* context index shift */ 1783#define NQTXC_MSSIDX_IDX_SHIFT 4 /* context index shift */
1781#define NQTXC_MSSIDX_IDX_MASK 0xf 1784#define NQTXC_MSSIDX_IDX_MASK 0xf
1782#define NQTXC_MSSIDX_L4LEN_SHIFT 8 /* L4 header len shift */ 1785#define NQTXC_MSSIDX_L4LEN_SHIFT 8 /* L4 header len shift */
1783#define NQTXC_MSSIDX_L4LEN_MASK 0xff 1786#define NQTXC_MSSIDX_L4LEN_MASK 0xff
1784#define NQTXC_MSSIDX_MSS_SHIFT 16 /* MSS */ 1787#define NQTXC_MSSIDX_MSS_SHIFT 16 /* MSS */
1785#define NQTXC_MSSIDX_MSS_MASK 0xffff 1788#define NQTXC_MSSIDX_MSS_MASK 0xffff