Mon Jan 30 09:33:38 2017 UTC ()
fix endianness of "rsshash"(currently, used for debug message only) and tabify.


(knakahara)
diff -r1.470 -r1.471 src/sys/dev/pci/if_wm.c

cvs diff -r1.470 -r1.471 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2017/01/27 05:04:47 1.470
+++ src/sys/dev/pci/if_wm.c 2017/01/30 09:33:38 1.471
@@ -1,1086 +1,1086 @@ @@ -1,1086 +1,1086 @@
1/* $NetBSD: if_wm.c,v 1.470 2017/01/27 05:04:47 knakahara Exp $ */ 1/* $NetBSD: if_wm.c,v 1.471 2017/01/30 09:33:38 knakahara Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation 40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42  42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45  45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48  48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52  52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56  56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350. 76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue improvement (refine queue selection logic) 77 * - TX Multi queue improvement (refine queue selection logic)
78 * - Split header buffer for newer descriptors 78 * - Split header buffer for newer descriptors
79 * - EEE (Energy Efficiency Ethernet) 79 * - EEE (Energy Efficiency Ethernet)
80 * - Virtual Function 80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM) 81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM. 82 * - Rework how parameters are loaded from the EEPROM.
83 * - Image Unique ID 83 * - Image Unique ID
84 */ 84 */
85 85
86#include <sys/cdefs.h> 86#include <sys/cdefs.h>
87__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.470 2017/01/27 05:04:47 knakahara Exp $"); 87__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.471 2017/01/30 09:33:38 knakahara Exp $");
88 88
89#ifdef _KERNEL_OPT 89#ifdef _KERNEL_OPT
90#include "opt_net_mpsafe.h" 90#include "opt_net_mpsafe.h"
91#endif 91#endif
92 92
93#include <sys/param.h> 93#include <sys/param.h>
94#include <sys/systm.h> 94#include <sys/systm.h>
95#include <sys/callout.h> 95#include <sys/callout.h>
96#include <sys/mbuf.h> 96#include <sys/mbuf.h>
97#include <sys/malloc.h> 97#include <sys/malloc.h>
98#include <sys/kmem.h> 98#include <sys/kmem.h>
99#include <sys/kernel.h> 99#include <sys/kernel.h>
100#include <sys/socket.h> 100#include <sys/socket.h>
101#include <sys/ioctl.h> 101#include <sys/ioctl.h>
102#include <sys/errno.h> 102#include <sys/errno.h>
103#include <sys/device.h> 103#include <sys/device.h>
104#include <sys/queue.h> 104#include <sys/queue.h>
105#include <sys/syslog.h> 105#include <sys/syslog.h>
106#include <sys/interrupt.h> 106#include <sys/interrupt.h>
107#include <sys/cpu.h> 107#include <sys/cpu.h>
108#include <sys/pcq.h> 108#include <sys/pcq.h>
109 109
110#include <sys/rndsource.h> 110#include <sys/rndsource.h>
111 111
112#include <net/if.h> 112#include <net/if.h>
113#include <net/if_dl.h> 113#include <net/if_dl.h>
114#include <net/if_media.h> 114#include <net/if_media.h>
115#include <net/if_ether.h> 115#include <net/if_ether.h>
116 116
117#include <net/bpf.h> 117#include <net/bpf.h>
118 118
119#include <netinet/in.h> /* XXX for struct ip */ 119#include <netinet/in.h> /* XXX for struct ip */
120#include <netinet/in_systm.h> /* XXX for struct ip */ 120#include <netinet/in_systm.h> /* XXX for struct ip */
121#include <netinet/ip.h> /* XXX for struct ip */ 121#include <netinet/ip.h> /* XXX for struct ip */
122#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 122#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
123#include <netinet/tcp.h> /* XXX for struct tcphdr */ 123#include <netinet/tcp.h> /* XXX for struct tcphdr */
124 124
125#include <sys/bus.h> 125#include <sys/bus.h>
126#include <sys/intr.h> 126#include <sys/intr.h>
127#include <machine/endian.h> 127#include <machine/endian.h>
128 128
129#include <dev/mii/mii.h> 129#include <dev/mii/mii.h>
130#include <dev/mii/miivar.h> 130#include <dev/mii/miivar.h>
131#include <dev/mii/miidevs.h> 131#include <dev/mii/miidevs.h>
132#include <dev/mii/mii_bitbang.h> 132#include <dev/mii/mii_bitbang.h>
133#include <dev/mii/ikphyreg.h> 133#include <dev/mii/ikphyreg.h>
134#include <dev/mii/igphyreg.h> 134#include <dev/mii/igphyreg.h>
135#include <dev/mii/igphyvar.h> 135#include <dev/mii/igphyvar.h>
136#include <dev/mii/inbmphyreg.h> 136#include <dev/mii/inbmphyreg.h>
137 137
138#include <dev/pci/pcireg.h> 138#include <dev/pci/pcireg.h>
139#include <dev/pci/pcivar.h> 139#include <dev/pci/pcivar.h>
140#include <dev/pci/pcidevs.h> 140#include <dev/pci/pcidevs.h>
141 141
142#include <dev/pci/if_wmreg.h> 142#include <dev/pci/if_wmreg.h>
143#include <dev/pci/if_wmvar.h> 143#include <dev/pci/if_wmvar.h>
144 144
145#ifdef WM_DEBUG 145#ifdef WM_DEBUG
146#define WM_DEBUG_LINK __BIT(0) 146#define WM_DEBUG_LINK __BIT(0)
147#define WM_DEBUG_TX __BIT(1) 147#define WM_DEBUG_TX __BIT(1)
148#define WM_DEBUG_RX __BIT(2) 148#define WM_DEBUG_RX __BIT(2)
149#define WM_DEBUG_GMII __BIT(3) 149#define WM_DEBUG_GMII __BIT(3)
150#define WM_DEBUG_MANAGE __BIT(4) 150#define WM_DEBUG_MANAGE __BIT(4)
151#define WM_DEBUG_NVM __BIT(5) 151#define WM_DEBUG_NVM __BIT(5)
152#define WM_DEBUG_INIT __BIT(6) 152#define WM_DEBUG_INIT __BIT(6)
153#define WM_DEBUG_LOCK __BIT(7) 153#define WM_DEBUG_LOCK __BIT(7)
154int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII 154int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK; 155 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
156 156
157#define DPRINTF(x, y) if (wm_debug & (x)) printf y 157#define DPRINTF(x, y) if (wm_debug & (x)) printf y
158#else 158#else
159#define DPRINTF(x, y) /* nothing */ 159#define DPRINTF(x, y) /* nothing */
160#endif /* WM_DEBUG */ 160#endif /* WM_DEBUG */
161 161
162#ifdef NET_MPSAFE 162#ifdef NET_MPSAFE
163#define WM_MPSAFE 1 163#define WM_MPSAFE 1
164#endif 164#endif
165 165
166/* 166/*
167 * This device driver's max interrupt numbers. 167 * This device driver's max interrupt numbers.
168 */ 168 */
169#define WM_MAX_NQUEUEINTR 16 169#define WM_MAX_NQUEUEINTR 16
170#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) 170#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
171 171
172/* 172/*
173 * Transmit descriptor list size. Due to errata, we can only have 173 * Transmit descriptor list size. Due to errata, we can only have
174 * 256 hardware descriptors in the ring on < 82544, but we use 4096 174 * 256 hardware descriptors in the ring on < 82544, but we use 4096
175 * on >= 82544. We tell the upper layers that they can queue a lot 175 * on >= 82544. We tell the upper layers that they can queue a lot
176 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 176 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177 * of them at a time. 177 * of them at a time.
178 * 178 *
179 * We allow up to 256 (!) DMA segments per packet. Pathological packet 179 * We allow up to 256 (!) DMA segments per packet. Pathological packet
180 * chains containing many small mbufs have been observed in zero-copy 180 * chains containing many small mbufs have been observed in zero-copy
181 * situations with jumbo frames. 181 * situations with jumbo frames.
182 */ 182 */
183#define WM_NTXSEGS 256 183#define WM_NTXSEGS 256
184#define WM_IFQUEUELEN 256 184#define WM_IFQUEUELEN 256
185#define WM_TXQUEUELEN_MAX 64 185#define WM_TXQUEUELEN_MAX 64
186#define WM_TXQUEUELEN_MAX_82547 16 186#define WM_TXQUEUELEN_MAX_82547 16
187#define WM_TXQUEUELEN(txq) ((txq)->txq_num) 187#define WM_TXQUEUELEN(txq) ((txq)->txq_num)
188#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) 188#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
189#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) 189#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
190#define WM_NTXDESC_82542 256 190#define WM_NTXDESC_82542 256
191#define WM_NTXDESC_82544 4096 191#define WM_NTXDESC_82544 4096
192#define WM_NTXDESC(txq) ((txq)->txq_ndesc) 192#define WM_NTXDESC(txq) ((txq)->txq_ndesc)
193#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) 193#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
194#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) 194#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
195#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) 195#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
196#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) 196#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197 197
198#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ 198#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
199 199
200#define WM_TXINTERQSIZE 256 200#define WM_TXINTERQSIZE 256
201 201
202/* 202/*
203 * Receive descriptor list size. We have one Rx buffer for normal 203 * Receive descriptor list size. We have one Rx buffer for normal
204 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 204 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
205 * packet. We allocate 256 receive descriptors, each with a 2k 205 * packet. We allocate 256 receive descriptors, each with a 2k
206 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 206 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207 */ 207 */
208#define WM_NRXDESC 256 208#define WM_NRXDESC 256
209#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 209#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
210#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 210#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
211#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 211#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
212 212
213typedef union txdescs { 213typedef union txdescs {
214 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; 214 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; 215 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
216} txdescs_t; 216} txdescs_t;
217 217
218typedef union rxdescs { 218typedef union rxdescs {
219 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; 219 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
220 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ 220 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
221 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ 221 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
222} rxdescs_t; 222} rxdescs_t;
223 223
224#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) 224#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
225#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x)) 225#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
226 226
227/* 227/*
228 * Software state for transmit jobs. 228 * Software state for transmit jobs.
229 */ 229 */
230struct wm_txsoft { 230struct wm_txsoft {
231 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 231 struct mbuf *txs_mbuf; /* head of our mbuf chain */
232 bus_dmamap_t txs_dmamap; /* our DMA map */ 232 bus_dmamap_t txs_dmamap; /* our DMA map */
233 int txs_firstdesc; /* first descriptor in packet */ 233 int txs_firstdesc; /* first descriptor in packet */
234 int txs_lastdesc; /* last descriptor in packet */ 234 int txs_lastdesc; /* last descriptor in packet */
235 int txs_ndesc; /* # of descriptors used */ 235 int txs_ndesc; /* # of descriptors used */
236}; 236};
237 237
238/* 238/*
239 * Software state for receive buffers. Each descriptor gets a 239 * Software state for receive buffers. Each descriptor gets a
240 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 240 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
241 * more than one buffer, we chain them together. 241 * more than one buffer, we chain them together.
242 */ 242 */
243struct wm_rxsoft { 243struct wm_rxsoft {
244 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 244 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
245 bus_dmamap_t rxs_dmamap; /* our DMA map */ 245 bus_dmamap_t rxs_dmamap; /* our DMA map */
246}; 246};
247 247
248#define WM_LINKUP_TIMEOUT 50 248#define WM_LINKUP_TIMEOUT 50
249 249
250static uint16_t swfwphysem[] = { 250static uint16_t swfwphysem[] = {
251 SWFW_PHY0_SM, 251 SWFW_PHY0_SM,
252 SWFW_PHY1_SM, 252 SWFW_PHY1_SM,
253 SWFW_PHY2_SM, 253 SWFW_PHY2_SM,
254 SWFW_PHY3_SM 254 SWFW_PHY3_SM
255}; 255};
256 256
257static const uint32_t wm_82580_rxpbs_table[] = { 257static const uint32_t wm_82580_rxpbs_table[] = {
258 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 258 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
259}; 259};
260 260
261struct wm_softc; 261struct wm_softc;
262 262
263#ifdef WM_EVENT_COUNTERS 263#ifdef WM_EVENT_COUNTERS
264#define WM_Q_EVCNT_DEFINE(qname, evname) \ 264#define WM_Q_EVCNT_DEFINE(qname, evname) \
265 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ 265 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
266 struct evcnt qname##_ev_##evname; 266 struct evcnt qname##_ev_##evname;
267 267
268#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ 268#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
269 do{ \ 269 do{ \
270 snprintf((q)->qname##_##evname##_evcnt_name, \ 270 snprintf((q)->qname##_##evname##_evcnt_name, \
271 sizeof((q)->qname##_##evname##_evcnt_name), \ 271 sizeof((q)->qname##_##evname##_evcnt_name), \
272 "%s%02d%s", #qname, (qnum), #evname); \ 272 "%s%02d%s", #qname, (qnum), #evname); \
273 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ 273 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
274 (evtype), NULL, (xname), \ 274 (evtype), NULL, (xname), \
275 (q)->qname##_##evname##_evcnt_name); \ 275 (q)->qname##_##evname##_evcnt_name); \
276 }while(0) 276 }while(0)
277 277
278#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 278#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
279 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) 279 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
280 280
281#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 281#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
282 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) 282 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
283#endif /* WM_EVENT_COUNTERS */ 283#endif /* WM_EVENT_COUNTERS */
284 284
285struct wm_txqueue { 285struct wm_txqueue {
286 kmutex_t *txq_lock; /* lock for tx operations */ 286 kmutex_t *txq_lock; /* lock for tx operations */
287 287
288 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ 288 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
289 289
290 /* Software state for the transmit descriptors. */ 290 /* Software state for the transmit descriptors. */
291 int txq_num; /* must be a power of two */ 291 int txq_num; /* must be a power of two */
292 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; 292 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
293 293
294 /* TX control data structures. */ 294 /* TX control data structures. */
295 int txq_ndesc; /* must be a power of two */ 295 int txq_ndesc; /* must be a power of two */
296 size_t txq_descsize; /* a tx descriptor size */ 296 size_t txq_descsize; /* a tx descriptor size */
297 txdescs_t *txq_descs_u; 297 txdescs_t *txq_descs_u;
298 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ 298 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
299 bus_dma_segment_t txq_desc_seg; /* control data segment */ 299 bus_dma_segment_t txq_desc_seg; /* control data segment */
300 int txq_desc_rseg; /* real number of control segment */ 300 int txq_desc_rseg; /* real number of control segment */
301#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr 301#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
302#define txq_descs txq_descs_u->sctxu_txdescs 302#define txq_descs txq_descs_u->sctxu_txdescs
303#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs 303#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
304 304
305 bus_addr_t txq_tdt_reg; /* offset of TDT register */ 305 bus_addr_t txq_tdt_reg; /* offset of TDT register */
306 306
307 int txq_free; /* number of free Tx descriptors */ 307 int txq_free; /* number of free Tx descriptors */
308 int txq_next; /* next ready Tx descriptor */ 308 int txq_next; /* next ready Tx descriptor */
309 309
310 int txq_sfree; /* number of free Tx jobs */ 310 int txq_sfree; /* number of free Tx jobs */
311 int txq_snext; /* next free Tx job */ 311 int txq_snext; /* next free Tx job */
312 int txq_sdirty; /* dirty Tx jobs */ 312 int txq_sdirty; /* dirty Tx jobs */
313 313
314 /* These 4 variables are used only on the 82547. */ 314 /* These 4 variables are used only on the 82547. */
315 int txq_fifo_size; /* Tx FIFO size */ 315 int txq_fifo_size; /* Tx FIFO size */
316 int txq_fifo_head; /* current head of FIFO */ 316 int txq_fifo_head; /* current head of FIFO */
317 uint32_t txq_fifo_addr; /* internal address of start of FIFO */ 317 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
318 int txq_fifo_stall; /* Tx FIFO is stalled */ 318 int txq_fifo_stall; /* Tx FIFO is stalled */
319 319
320 /* 320 /*
321 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 321 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
322 * CPUs. This queue intermediate them without block. 322 * CPUs. This queue intermediate them without block.
323 */ 323 */
324 pcq_t *txq_interq; 324 pcq_t *txq_interq;
325 325
326 /* 326 /*
327 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 327 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
328 * to manage Tx H/W queue's busy flag. 328 * to manage Tx H/W queue's busy flag.
329 */ 329 */
330 int txq_flags; /* flags for H/W queue, see below */ 330 int txq_flags; /* flags for H/W queue, see below */
331#define WM_TXQ_NO_SPACE 0x1 331#define WM_TXQ_NO_SPACE 0x1
332 332
333 bool txq_stopping; 333 bool txq_stopping;
334 334
335#ifdef WM_EVENT_COUNTERS 335#ifdef WM_EVENT_COUNTERS
336 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */ 336 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */
337 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */ 337 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */
338 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */ 338 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */
339 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ 339 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
340 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ 340 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
341 /* XXX not used? */ 341 /* XXX not used? */
342 342
343 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */ 343 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */
344 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */ 344 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */
345 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */ 345 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */
346 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */ 346 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */
347 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */ 347 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */
348 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */ 348 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */
349 349
350 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */ 350 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */
351 351
352 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */ 352 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */
353 353
354 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; 354 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
355 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 355 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
356#endif /* WM_EVENT_COUNTERS */ 356#endif /* WM_EVENT_COUNTERS */
357}; 357};
358 358
359struct wm_rxqueue { 359struct wm_rxqueue {
360 kmutex_t *rxq_lock; /* lock for rx operations */ 360 kmutex_t *rxq_lock; /* lock for rx operations */
361 361
362 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ 362 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
363 363
364 /* Software state for the receive descriptors. */ 364 /* Software state for the receive descriptors. */
365 struct wm_rxsoft rxq_soft[WM_NRXDESC]; 365 struct wm_rxsoft rxq_soft[WM_NRXDESC];
366 366
367 /* RX control data structures. */ 367 /* RX control data structures. */
368 int rxq_ndesc; /* must be a power of two */ 368 int rxq_ndesc; /* must be a power of two */
369 size_t rxq_descsize; /* a rx descriptor size */ 369 size_t rxq_descsize; /* a rx descriptor size */
370 rxdescs_t *rxq_descs_u; 370 rxdescs_t *rxq_descs_u;
371 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ 371 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
372 bus_dma_segment_t rxq_desc_seg; /* control data segment */ 372 bus_dma_segment_t rxq_desc_seg; /* control data segment */
373 int rxq_desc_rseg; /* real number of control segment */ 373 int rxq_desc_rseg; /* real number of control segment */
374#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr 374#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
375#define rxq_descs rxq_descs_u->sctxu_rxdescs 375#define rxq_descs rxq_descs_u->sctxu_rxdescs
376#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs 376#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
377#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs 377#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
378 378
379 bus_addr_t rxq_rdt_reg; /* offset of RDT register */ 379 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
380 380
381 int rxq_ptr; /* next ready Rx desc/queue ent */ 381 int rxq_ptr; /* next ready Rx desc/queue ent */
382 int rxq_discard; 382 int rxq_discard;
383 int rxq_len; 383 int rxq_len;
384 struct mbuf *rxq_head; 384 struct mbuf *rxq_head;
385 struct mbuf *rxq_tail; 385 struct mbuf *rxq_tail;
386 struct mbuf **rxq_tailp; 386 struct mbuf **rxq_tailp;
387 387
388 bool rxq_stopping; 388 bool rxq_stopping;
389 389
390#ifdef WM_EVENT_COUNTERS 390#ifdef WM_EVENT_COUNTERS
391 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */ 391 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */
392 392
393 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */ 393 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */
394 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */ 394 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */
395#endif 395#endif
396}; 396};
397 397
398struct wm_queue { 398struct wm_queue {
399 int wmq_id; /* index of transmit and receive queues */ 399 int wmq_id; /* index of transmit and receive queues */
400 int wmq_intr_idx; /* index of MSI-X tables */ 400 int wmq_intr_idx; /* index of MSI-X tables */
401 401
402 struct wm_txqueue wmq_txq; 402 struct wm_txqueue wmq_txq;
403 struct wm_rxqueue wmq_rxq; 403 struct wm_rxqueue wmq_rxq;
404}; 404};
405 405
406struct wm_phyop { 406struct wm_phyop {
407 int (*acquire)(struct wm_softc *); 407 int (*acquire)(struct wm_softc *);
408 void (*release)(struct wm_softc *); 408 void (*release)(struct wm_softc *);
409 int reset_delay_us; 409 int reset_delay_us;
410}; 410};
411 411
412/* 412/*
413 * Software state per device. 413 * Software state per device.
414 */ 414 */
415struct wm_softc { 415struct wm_softc {
416 device_t sc_dev; /* generic device information */ 416 device_t sc_dev; /* generic device information */
417 bus_space_tag_t sc_st; /* bus space tag */ 417 bus_space_tag_t sc_st; /* bus space tag */
418 bus_space_handle_t sc_sh; /* bus space handle */ 418 bus_space_handle_t sc_sh; /* bus space handle */
419 bus_size_t sc_ss; /* bus space size */ 419 bus_size_t sc_ss; /* bus space size */
420 bus_space_tag_t sc_iot; /* I/O space tag */ 420 bus_space_tag_t sc_iot; /* I/O space tag */
421 bus_space_handle_t sc_ioh; /* I/O space handle */ 421 bus_space_handle_t sc_ioh; /* I/O space handle */
422 bus_size_t sc_ios; /* I/O space size */ 422 bus_size_t sc_ios; /* I/O space size */
423 bus_space_tag_t sc_flasht; /* flash registers space tag */ 423 bus_space_tag_t sc_flasht; /* flash registers space tag */
424 bus_space_handle_t sc_flashh; /* flash registers space handle */ 424 bus_space_handle_t sc_flashh; /* flash registers space handle */
425 bus_size_t sc_flashs; /* flash registers space size */ 425 bus_size_t sc_flashs; /* flash registers space size */
426 off_t sc_flashreg_offset; /* 426 off_t sc_flashreg_offset; /*
427 * offset to flash registers from 427 * offset to flash registers from
428 * start of BAR 428 * start of BAR
429 */ 429 */
430 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 430 bus_dma_tag_t sc_dmat; /* bus DMA tag */
431 431
432 struct ethercom sc_ethercom; /* ethernet common data */ 432 struct ethercom sc_ethercom; /* ethernet common data */
433 struct mii_data sc_mii; /* MII/media information */ 433 struct mii_data sc_mii; /* MII/media information */
434 434
435 pci_chipset_tag_t sc_pc; 435 pci_chipset_tag_t sc_pc;
436 pcitag_t sc_pcitag; 436 pcitag_t sc_pcitag;
437 int sc_bus_speed; /* PCI/PCIX bus speed */ 437 int sc_bus_speed; /* PCI/PCIX bus speed */
438 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ 438 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
439 439
440 uint16_t sc_pcidevid; /* PCI device ID */ 440 uint16_t sc_pcidevid; /* PCI device ID */
441 wm_chip_type sc_type; /* MAC type */ 441 wm_chip_type sc_type; /* MAC type */
442 int sc_rev; /* MAC revision */ 442 int sc_rev; /* MAC revision */
443 wm_phy_type sc_phytype; /* PHY type */ 443 wm_phy_type sc_phytype; /* PHY type */
444 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ 444 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
445#define WM_MEDIATYPE_UNKNOWN 0x00 445#define WM_MEDIATYPE_UNKNOWN 0x00
446#define WM_MEDIATYPE_FIBER 0x01 446#define WM_MEDIATYPE_FIBER 0x01
447#define WM_MEDIATYPE_COPPER 0x02 447#define WM_MEDIATYPE_COPPER 0x02
448#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ 448#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
449 int sc_funcid; /* unit number of the chip (0 to 3) */ 449 int sc_funcid; /* unit number of the chip (0 to 3) */
450 int sc_flags; /* flags; see below */ 450 int sc_flags; /* flags; see below */
451 int sc_if_flags; /* last if_flags */ 451 int sc_if_flags; /* last if_flags */
452 int sc_flowflags; /* 802.3x flow control flags */ 452 int sc_flowflags; /* 802.3x flow control flags */
453 int sc_align_tweak; 453 int sc_align_tweak;
454 454
455 void *sc_ihs[WM_MAX_NINTR]; /* 455 void *sc_ihs[WM_MAX_NINTR]; /*
456 * interrupt cookie. 456 * interrupt cookie.
457 * legacy and msi use sc_ihs[0]. 457 * legacy and msi use sc_ihs[0].
458 */ 458 */
459 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */ 459 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
460 int sc_nintrs; /* number of interrupts */ 460 int sc_nintrs; /* number of interrupts */
461 461
462 int sc_link_intr_idx; /* index of MSI-X tables */ 462 int sc_link_intr_idx; /* index of MSI-X tables */
463 463
464 callout_t sc_tick_ch; /* tick callout */ 464 callout_t sc_tick_ch; /* tick callout */
465 bool sc_core_stopping; 465 bool sc_core_stopping;
466 466
467 int sc_nvm_ver_major; 467 int sc_nvm_ver_major;
468 int sc_nvm_ver_minor; 468 int sc_nvm_ver_minor;
469 int sc_nvm_ver_build; 469 int sc_nvm_ver_build;
470 int sc_nvm_addrbits; /* NVM address bits */ 470 int sc_nvm_addrbits; /* NVM address bits */
471 unsigned int sc_nvm_wordsize; /* NVM word size */ 471 unsigned int sc_nvm_wordsize; /* NVM word size */
472 int sc_ich8_flash_base; 472 int sc_ich8_flash_base;
473 int sc_ich8_flash_bank_size; 473 int sc_ich8_flash_bank_size;
474 int sc_nvm_k1_enabled; 474 int sc_nvm_k1_enabled;
475 475
476 int sc_nqueues; 476 int sc_nqueues;
477 struct wm_queue *sc_queue; 477 struct wm_queue *sc_queue;
478 478
479 int sc_affinity_offset; 479 int sc_affinity_offset;
480 480
481#ifdef WM_EVENT_COUNTERS 481#ifdef WM_EVENT_COUNTERS
482 /* Event counters. */ 482 /* Event counters. */
483 struct evcnt sc_ev_linkintr; /* Link interrupts */ 483 struct evcnt sc_ev_linkintr; /* Link interrupts */
484 484
485 /* WM_T_82542_2_1 only */ 485 /* WM_T_82542_2_1 only */
486 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 486 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
487 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 487 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
488 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 488 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
489 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 489 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
490 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 490 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
491#endif /* WM_EVENT_COUNTERS */ 491#endif /* WM_EVENT_COUNTERS */
492 492
493 /* This variable are used only on the 82547. */ 493 /* This variable are used only on the 82547. */
494 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 494 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
495 495
496 uint32_t sc_ctrl; /* prototype CTRL register */ 496 uint32_t sc_ctrl; /* prototype CTRL register */
497#if 0 497#if 0
498 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 498 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
499#endif 499#endif
500 uint32_t sc_icr; /* prototype interrupt bits */ 500 uint32_t sc_icr; /* prototype interrupt bits */
501 uint32_t sc_itr; /* prototype intr throttling reg */ 501 uint32_t sc_itr; /* prototype intr throttling reg */
502 uint32_t sc_tctl; /* prototype TCTL register */ 502 uint32_t sc_tctl; /* prototype TCTL register */
503 uint32_t sc_rctl; /* prototype RCTL register */ 503 uint32_t sc_rctl; /* prototype RCTL register */
504 uint32_t sc_txcw; /* prototype TXCW register */ 504 uint32_t sc_txcw; /* prototype TXCW register */
505 uint32_t sc_tipg; /* prototype TIPG register */ 505 uint32_t sc_tipg; /* prototype TIPG register */
506 uint32_t sc_fcrtl; /* prototype FCRTL register */ 506 uint32_t sc_fcrtl; /* prototype FCRTL register */
507 uint32_t sc_pba; /* prototype PBA register */ 507 uint32_t sc_pba; /* prototype PBA register */
508 508
509 int sc_tbi_linkup; /* TBI link status */ 509 int sc_tbi_linkup; /* TBI link status */
510 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ 510 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
511 int sc_tbi_serdes_ticks; /* tbi ticks */ 511 int sc_tbi_serdes_ticks; /* tbi ticks */
512 512
513 int sc_mchash_type; /* multicast filter offset */ 513 int sc_mchash_type; /* multicast filter offset */
514 514
515 krndsource_t rnd_source; /* random source */ 515 krndsource_t rnd_source; /* random source */
516 516
517 struct if_percpuq *sc_ipq; /* softint-based input queues */ 517 struct if_percpuq *sc_ipq; /* softint-based input queues */
518 518
519 kmutex_t *sc_core_lock; /* lock for softc operations */ 519 kmutex_t *sc_core_lock; /* lock for softc operations */
520 kmutex_t *sc_ich_phymtx; /* 520 kmutex_t *sc_ich_phymtx; /*
521 * 82574/82583/ICH/PCH specific PHY 521 * 82574/82583/ICH/PCH specific PHY
522 * mutex. For 82574/82583, the mutex 522 * mutex. For 82574/82583, the mutex
523 * is used for both PHY and NVM. 523 * is used for both PHY and NVM.
524 */ 524 */
525 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ 525 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
526 526
527 struct wm_phyop phy; 527 struct wm_phyop phy;
528}; 528};
529 529
530#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) 530#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
531#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) 531#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
532#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) 532#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
533 533
534#ifdef WM_MPSAFE 534#ifdef WM_MPSAFE
535#define CALLOUT_FLAGS CALLOUT_MPSAFE 535#define CALLOUT_FLAGS CALLOUT_MPSAFE
536#else 536#else
537#define CALLOUT_FLAGS 0 537#define CALLOUT_FLAGS 0
538#endif 538#endif
539 539
540#define WM_RXCHAIN_RESET(rxq) \ 540#define WM_RXCHAIN_RESET(rxq) \
541do { \ 541do { \
542 (rxq)->rxq_tailp = &(rxq)->rxq_head; \ 542 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
543 *(rxq)->rxq_tailp = NULL; \ 543 *(rxq)->rxq_tailp = NULL; \
544 (rxq)->rxq_len = 0; \ 544 (rxq)->rxq_len = 0; \
545} while (/*CONSTCOND*/0) 545} while (/*CONSTCOND*/0)
546 546
547#define WM_RXCHAIN_LINK(rxq, m) \ 547#define WM_RXCHAIN_LINK(rxq, m) \
548do { \ 548do { \
549 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ 549 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
550 (rxq)->rxq_tailp = &(m)->m_next; \ 550 (rxq)->rxq_tailp = &(m)->m_next; \
551} while (/*CONSTCOND*/0) 551} while (/*CONSTCOND*/0)
552 552
553#ifdef WM_EVENT_COUNTERS 553#ifdef WM_EVENT_COUNTERS
554#define WM_EVCNT_INCR(ev) (ev)->ev_count++ 554#define WM_EVCNT_INCR(ev) (ev)->ev_count++
555#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 555#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
556 556
557#define WM_Q_EVCNT_INCR(qname, evname) \ 557#define WM_Q_EVCNT_INCR(qname, evname) \
558 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) 558 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
559#define WM_Q_EVCNT_ADD(qname, evname, val) \ 559#define WM_Q_EVCNT_ADD(qname, evname, val) \
560 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) 560 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
561#else /* !WM_EVENT_COUNTERS */ 561#else /* !WM_EVENT_COUNTERS */
562#define WM_EVCNT_INCR(ev) /* nothing */ 562#define WM_EVCNT_INCR(ev) /* nothing */
563#define WM_EVCNT_ADD(ev, val) /* nothing */ 563#define WM_EVCNT_ADD(ev, val) /* nothing */
564 564
565#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */ 565#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
566#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */ 566#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
567#endif /* !WM_EVENT_COUNTERS */ 567#endif /* !WM_EVENT_COUNTERS */
568 568
569#define CSR_READ(sc, reg) \ 569#define CSR_READ(sc, reg) \
570 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 570 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
571#define CSR_WRITE(sc, reg, val) \ 571#define CSR_WRITE(sc, reg, val) \
572 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 572 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
573#define CSR_WRITE_FLUSH(sc) \ 573#define CSR_WRITE_FLUSH(sc) \
574 (void) CSR_READ((sc), WMREG_STATUS) 574 (void) CSR_READ((sc), WMREG_STATUS)
575 575
576#define ICH8_FLASH_READ32(sc, reg) \ 576#define ICH8_FLASH_READ32(sc, reg) \
577 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ 577 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
578 (reg) + sc->sc_flashreg_offset) 578 (reg) + sc->sc_flashreg_offset)
579#define ICH8_FLASH_WRITE32(sc, reg, data) \ 579#define ICH8_FLASH_WRITE32(sc, reg, data) \
580 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ 580 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
581 (reg) + sc->sc_flashreg_offset, (data)) 581 (reg) + sc->sc_flashreg_offset, (data))
582 582
583#define ICH8_FLASH_READ16(sc, reg) \ 583#define ICH8_FLASH_READ16(sc, reg) \
584 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ 584 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
585 (reg) + sc->sc_flashreg_offset) 585 (reg) + sc->sc_flashreg_offset)
586#define ICH8_FLASH_WRITE16(sc, reg, data) \ 586#define ICH8_FLASH_WRITE16(sc, reg, data) \
587 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ 587 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
588 (reg) + sc->sc_flashreg_offset, (data)) 588 (reg) + sc->sc_flashreg_offset, (data))
589 589
590#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) 590#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
591#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x))) 591#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
592 592
593#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) 593#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
594#define WM_CDTXADDR_HI(txq, x) \ 594#define WM_CDTXADDR_HI(txq, x) \
595 (sizeof(bus_addr_t) == 8 ? \ 595 (sizeof(bus_addr_t) == 8 ? \
596 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) 596 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
597 597
598#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) 598#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
599#define WM_CDRXADDR_HI(rxq, x) \ 599#define WM_CDRXADDR_HI(rxq, x) \
600 (sizeof(bus_addr_t) == 8 ? \ 600 (sizeof(bus_addr_t) == 8 ? \
601 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) 601 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
602 602
603/* 603/*
604 * Register read/write functions. 604 * Register read/write functions.
605 * Other than CSR_{READ|WRITE}(). 605 * Other than CSR_{READ|WRITE}().
606 */ 606 */
607#if 0 607#if 0
608static inline uint32_t wm_io_read(struct wm_softc *, int); 608static inline uint32_t wm_io_read(struct wm_softc *, int);
609#endif 609#endif
610static inline void wm_io_write(struct wm_softc *, int, uint32_t); 610static inline void wm_io_write(struct wm_softc *, int, uint32_t);
611static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 611static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
612 uint32_t, uint32_t); 612 uint32_t, uint32_t);
613static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 613static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
614 614
615/* 615/*
616 * Descriptor sync/init functions. 616 * Descriptor sync/init functions.
617 */ 617 */
618static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); 618static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
619static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); 619static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
620static inline void wm_init_rxdesc(struct wm_rxqueue *, int); 620static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
621 621
622/* 622/*
623 * Device driver interface functions and commonly used functions. 623 * Device driver interface functions and commonly used functions.
624 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 624 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
625 */ 625 */
626static const struct wm_product *wm_lookup(const struct pci_attach_args *); 626static const struct wm_product *wm_lookup(const struct pci_attach_args *);
627static int wm_match(device_t, cfdata_t, void *); 627static int wm_match(device_t, cfdata_t, void *);
628static void wm_attach(device_t, device_t, void *); 628static void wm_attach(device_t, device_t, void *);
629static int wm_detach(device_t, int); 629static int wm_detach(device_t, int);
630static bool wm_suspend(device_t, const pmf_qual_t *); 630static bool wm_suspend(device_t, const pmf_qual_t *);
631static bool wm_resume(device_t, const pmf_qual_t *); 631static bool wm_resume(device_t, const pmf_qual_t *);
632static void wm_watchdog(struct ifnet *); 632static void wm_watchdog(struct ifnet *);
633static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *); 633static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
634static void wm_tick(void *); 634static void wm_tick(void *);
635static int wm_ifflags_cb(struct ethercom *); 635static int wm_ifflags_cb(struct ethercom *);
636static int wm_ioctl(struct ifnet *, u_long, void *); 636static int wm_ioctl(struct ifnet *, u_long, void *);
637/* MAC address related */ 637/* MAC address related */
638static uint16_t wm_check_alt_mac_addr(struct wm_softc *); 638static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
639static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 639static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
640static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 640static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
641static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 641static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
642static void wm_set_filter(struct wm_softc *); 642static void wm_set_filter(struct wm_softc *);
643/* Reset and init related */ 643/* Reset and init related */
644static void wm_set_vlan(struct wm_softc *); 644static void wm_set_vlan(struct wm_softc *);
645static void wm_set_pcie_completion_timeout(struct wm_softc *); 645static void wm_set_pcie_completion_timeout(struct wm_softc *);
646static void wm_get_auto_rd_done(struct wm_softc *); 646static void wm_get_auto_rd_done(struct wm_softc *);
647static void wm_lan_init_done(struct wm_softc *); 647static void wm_lan_init_done(struct wm_softc *);
648static void wm_get_cfg_done(struct wm_softc *); 648static void wm_get_cfg_done(struct wm_softc *);
649static void wm_initialize_hardware_bits(struct wm_softc *); 649static void wm_initialize_hardware_bits(struct wm_softc *);
650static uint32_t wm_rxpbs_adjust_82580(uint32_t); 650static uint32_t wm_rxpbs_adjust_82580(uint32_t);
651static void wm_reset_phy(struct wm_softc *); 651static void wm_reset_phy(struct wm_softc *);
652static void wm_flush_desc_rings(struct wm_softc *); 652static void wm_flush_desc_rings(struct wm_softc *);
653static void wm_reset(struct wm_softc *); 653static void wm_reset(struct wm_softc *);
654static int wm_add_rxbuf(struct wm_rxqueue *, int); 654static int wm_add_rxbuf(struct wm_rxqueue *, int);
655static void wm_rxdrain(struct wm_rxqueue *); 655static void wm_rxdrain(struct wm_rxqueue *);
656static void wm_rss_getkey(uint8_t *); 656static void wm_rss_getkey(uint8_t *);
657static void wm_init_rss(struct wm_softc *); 657static void wm_init_rss(struct wm_softc *);
658static void wm_adjust_qnum(struct wm_softc *, int); 658static void wm_adjust_qnum(struct wm_softc *, int);
659static int wm_setup_legacy(struct wm_softc *); 659static int wm_setup_legacy(struct wm_softc *);
660static int wm_setup_msix(struct wm_softc *); 660static int wm_setup_msix(struct wm_softc *);
661static int wm_init(struct ifnet *); 661static int wm_init(struct ifnet *);
662static int wm_init_locked(struct ifnet *); 662static int wm_init_locked(struct ifnet *);
663static void wm_turnon(struct wm_softc *); 663static void wm_turnon(struct wm_softc *);
664static void wm_turnoff(struct wm_softc *); 664static void wm_turnoff(struct wm_softc *);
665static void wm_stop(struct ifnet *, int); 665static void wm_stop(struct ifnet *, int);
666static void wm_stop_locked(struct ifnet *, int); 666static void wm_stop_locked(struct ifnet *, int);
667static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 667static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
668static void wm_82547_txfifo_stall(void *); 668static void wm_82547_txfifo_stall(void *);
669static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 669static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
670/* DMA related */ 670/* DMA related */
671static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 671static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
672static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 672static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
673static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 673static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
674static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, 674static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
675 struct wm_txqueue *); 675 struct wm_txqueue *);
676static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 676static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
677static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 677static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
678static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, 678static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
679 struct wm_rxqueue *); 679 struct wm_rxqueue *);
680static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 680static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
681static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 681static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
682static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 682static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
683static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 683static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
684static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 684static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
685static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 685static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
686static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, 686static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
687 struct wm_txqueue *); 687 struct wm_txqueue *);
688static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, 688static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
689 struct wm_rxqueue *); 689 struct wm_rxqueue *);
690static int wm_alloc_txrx_queues(struct wm_softc *); 690static int wm_alloc_txrx_queues(struct wm_softc *);
691static void wm_free_txrx_queues(struct wm_softc *); 691static void wm_free_txrx_queues(struct wm_softc *);
692static int wm_init_txrx_queues(struct wm_softc *); 692static int wm_init_txrx_queues(struct wm_softc *);
693/* Start */ 693/* Start */
694static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, 694static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
695 uint32_t *, uint8_t *); 695 uint32_t *, uint8_t *);
696static inline int wm_select_txqueue(struct ifnet *, struct mbuf *); 696static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
697static void wm_start(struct ifnet *); 697static void wm_start(struct ifnet *);
698static void wm_start_locked(struct ifnet *); 698static void wm_start_locked(struct ifnet *);
699static int wm_transmit(struct ifnet *, struct mbuf *); 699static int wm_transmit(struct ifnet *, struct mbuf *);
700static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); 700static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
701static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); 701static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
702static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, 702static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
703 struct wm_txsoft *, uint32_t *, uint32_t *, bool *); 703 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
704static void wm_nq_start(struct ifnet *); 704static void wm_nq_start(struct ifnet *);
705static void wm_nq_start_locked(struct ifnet *); 705static void wm_nq_start_locked(struct ifnet *);
706static int wm_nq_transmit(struct ifnet *, struct mbuf *); 706static int wm_nq_transmit(struct ifnet *, struct mbuf *);
707static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); 707static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
708static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); 708static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
709static void wm_deferred_start(struct ifnet *); 709static void wm_deferred_start(struct ifnet *);
710/* Interrupt */ 710/* Interrupt */
711static int wm_txeof(struct wm_softc *, struct wm_txqueue *); 711static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
712static void wm_rxeof(struct wm_rxqueue *); 712static void wm_rxeof(struct wm_rxqueue *);
713static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 713static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
714static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 714static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
715static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 715static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
716static void wm_linkintr(struct wm_softc *, uint32_t); 716static void wm_linkintr(struct wm_softc *, uint32_t);
717static int wm_intr_legacy(void *); 717static int wm_intr_legacy(void *);
718static int wm_txrxintr_msix(void *); 718static int wm_txrxintr_msix(void *);
719static int wm_linkintr_msix(void *); 719static int wm_linkintr_msix(void *);
720 720
721/* 721/*
722 * Media related. 722 * Media related.
723 * GMII, SGMII, TBI, SERDES and SFP. 723 * GMII, SGMII, TBI, SERDES and SFP.
724 */ 724 */
725/* Common */ 725/* Common */
726static void wm_tbi_serdes_set_linkled(struct wm_softc *); 726static void wm_tbi_serdes_set_linkled(struct wm_softc *);
727/* GMII related */ 727/* GMII related */
728static void wm_gmii_reset(struct wm_softc *); 728static void wm_gmii_reset(struct wm_softc *);
729static int wm_get_phy_id_82575(struct wm_softc *); 729static int wm_get_phy_id_82575(struct wm_softc *);
730static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 730static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
731static int wm_gmii_mediachange(struct ifnet *); 731static int wm_gmii_mediachange(struct ifnet *);
732static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 732static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
733static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); 733static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
734static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); 734static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
735static int wm_gmii_i82543_readreg(device_t, int, int); 735static int wm_gmii_i82543_readreg(device_t, int, int);
736static void wm_gmii_i82543_writereg(device_t, int, int, int); 736static void wm_gmii_i82543_writereg(device_t, int, int, int);
737static int wm_gmii_mdic_readreg(device_t, int, int); 737static int wm_gmii_mdic_readreg(device_t, int, int);
738static void wm_gmii_mdic_writereg(device_t, int, int, int); 738static void wm_gmii_mdic_writereg(device_t, int, int, int);
739static int wm_gmii_i82544_readreg(device_t, int, int); 739static int wm_gmii_i82544_readreg(device_t, int, int);
740static void wm_gmii_i82544_writereg(device_t, int, int, int); 740static void wm_gmii_i82544_writereg(device_t, int, int, int);
741static int wm_gmii_i80003_readreg(device_t, int, int); 741static int wm_gmii_i80003_readreg(device_t, int, int);
742static void wm_gmii_i80003_writereg(device_t, int, int, int); 742static void wm_gmii_i80003_writereg(device_t, int, int, int);
743static int wm_gmii_bm_readreg(device_t, int, int); 743static int wm_gmii_bm_readreg(device_t, int, int);
744static void wm_gmii_bm_writereg(device_t, int, int, int); 744static void wm_gmii_bm_writereg(device_t, int, int, int);
745static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); 745static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
746static int wm_gmii_hv_readreg(device_t, int, int); 746static int wm_gmii_hv_readreg(device_t, int, int);
747static int wm_gmii_hv_readreg_locked(device_t, int, int); 747static int wm_gmii_hv_readreg_locked(device_t, int, int);
748static void wm_gmii_hv_writereg(device_t, int, int, int); 748static void wm_gmii_hv_writereg(device_t, int, int, int);
749static void wm_gmii_hv_writereg_locked(device_t, int, int, int); 749static void wm_gmii_hv_writereg_locked(device_t, int, int, int);
750static int wm_gmii_82580_readreg(device_t, int, int); 750static int wm_gmii_82580_readreg(device_t, int, int);
751static void wm_gmii_82580_writereg(device_t, int, int, int); 751static void wm_gmii_82580_writereg(device_t, int, int, int);
752static int wm_gmii_gs40g_readreg(device_t, int, int); 752static int wm_gmii_gs40g_readreg(device_t, int, int);
753static void wm_gmii_gs40g_writereg(device_t, int, int, int); 753static void wm_gmii_gs40g_writereg(device_t, int, int, int);
754static void wm_gmii_statchg(struct ifnet *); 754static void wm_gmii_statchg(struct ifnet *);
755/* 755/*
756 * kumeran related (80003, ICH* and PCH*). 756 * kumeran related (80003, ICH* and PCH*).
757 * These functions are not for accessing MII registers but for accessing 757 * These functions are not for accessing MII registers but for accessing
758 * kumeran specific registers. 758 * kumeran specific registers.
759 */ 759 */
760static int wm_kmrn_readreg(struct wm_softc *, int); 760static int wm_kmrn_readreg(struct wm_softc *, int);
761static int wm_kmrn_readreg_locked(struct wm_softc *, int); 761static int wm_kmrn_readreg_locked(struct wm_softc *, int);
762static void wm_kmrn_writereg(struct wm_softc *, int, int); 762static void wm_kmrn_writereg(struct wm_softc *, int, int);
763static void wm_kmrn_writereg_locked(struct wm_softc *, int, int); 763static void wm_kmrn_writereg_locked(struct wm_softc *, int, int);
764/* SGMII */ 764/* SGMII */
765static bool wm_sgmii_uses_mdio(struct wm_softc *); 765static bool wm_sgmii_uses_mdio(struct wm_softc *);
766static int wm_sgmii_readreg(device_t, int, int); 766static int wm_sgmii_readreg(device_t, int, int);
767static void wm_sgmii_writereg(device_t, int, int, int); 767static void wm_sgmii_writereg(device_t, int, int, int);
768/* TBI related */ 768/* TBI related */
769static void wm_tbi_mediainit(struct wm_softc *); 769static void wm_tbi_mediainit(struct wm_softc *);
770static int wm_tbi_mediachange(struct ifnet *); 770static int wm_tbi_mediachange(struct ifnet *);
771static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 771static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
772static int wm_check_for_link(struct wm_softc *); 772static int wm_check_for_link(struct wm_softc *);
773static void wm_tbi_tick(struct wm_softc *); 773static void wm_tbi_tick(struct wm_softc *);
774/* SERDES related */ 774/* SERDES related */
775static void wm_serdes_power_up_link_82575(struct wm_softc *); 775static void wm_serdes_power_up_link_82575(struct wm_softc *);
776static int wm_serdes_mediachange(struct ifnet *); 776static int wm_serdes_mediachange(struct ifnet *);
777static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); 777static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
778static void wm_serdes_tick(struct wm_softc *); 778static void wm_serdes_tick(struct wm_softc *);
779/* SFP related */ 779/* SFP related */
780static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); 780static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
781static uint32_t wm_sfp_get_media_type(struct wm_softc *); 781static uint32_t wm_sfp_get_media_type(struct wm_softc *);
782 782
783/* 783/*
784 * NVM related. 784 * NVM related.
785 * Microwire, SPI (w/wo EERD) and Flash. 785 * Microwire, SPI (w/wo EERD) and Flash.
786 */ 786 */
787/* Misc functions */ 787/* Misc functions */
788static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); 788static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
789static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); 789static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
790static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); 790static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
791/* Microwire */ 791/* Microwire */
792static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); 792static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
793/* SPI */ 793/* SPI */
794static int wm_nvm_ready_spi(struct wm_softc *); 794static int wm_nvm_ready_spi(struct wm_softc *);
795static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); 795static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
796/* Using with EERD */ 796/* Using with EERD */
797static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 797static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
798static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); 798static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
799/* Flash */ 799/* Flash */
800static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, 800static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
801 unsigned int *); 801 unsigned int *);
802static int32_t wm_ich8_cycle_init(struct wm_softc *); 802static int32_t wm_ich8_cycle_init(struct wm_softc *);
803static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 803static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
804static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, 804static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
805 uint32_t *); 805 uint32_t *);
806static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 806static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
807static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 807static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
808static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); 808static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
809static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); 809static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
810static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); 810static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
811/* iNVM */ 811/* iNVM */
812static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); 812static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
813static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); 813static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
814/* Lock, detecting NVM type, validate checksum and read */ 814/* Lock, detecting NVM type, validate checksum and read */
815static int wm_nvm_acquire(struct wm_softc *); 815static int wm_nvm_acquire(struct wm_softc *);
816static void wm_nvm_release(struct wm_softc *); 816static void wm_nvm_release(struct wm_softc *);
817static int wm_nvm_is_onboard_eeprom(struct wm_softc *); 817static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
818static int wm_nvm_get_flash_presence_i210(struct wm_softc *); 818static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
819static int wm_nvm_validate_checksum(struct wm_softc *); 819static int wm_nvm_validate_checksum(struct wm_softc *);
820static void wm_nvm_version_invm(struct wm_softc *); 820static void wm_nvm_version_invm(struct wm_softc *);
821static void wm_nvm_version(struct wm_softc *); 821static void wm_nvm_version(struct wm_softc *);
822static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); 822static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
823 823
824/* 824/*
825 * Hardware semaphores. 825 * Hardware semaphores.
826 * Very complexed... 826 * Very complexed...
827 */ 827 */
828static int wm_get_null(struct wm_softc *); 828static int wm_get_null(struct wm_softc *);
829static void wm_put_null(struct wm_softc *); 829static void wm_put_null(struct wm_softc *);
830static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ 830static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
831static void wm_put_swsm_semaphore(struct wm_softc *); 831static void wm_put_swsm_semaphore(struct wm_softc *);
832static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 832static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
833static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 833static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
834static int wm_get_phy_82575(struct wm_softc *); 834static int wm_get_phy_82575(struct wm_softc *);
835static void wm_put_phy_82575(struct wm_softc *); 835static void wm_put_phy_82575(struct wm_softc *);
836static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ 836static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
837static void wm_put_swfwhw_semaphore(struct wm_softc *); 837static void wm_put_swfwhw_semaphore(struct wm_softc *);
838static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ 838static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
839static void wm_put_swflag_ich8lan(struct wm_softc *); 839static void wm_put_swflag_ich8lan(struct wm_softc *);
840static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */ 840static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */
841static void wm_put_nvm_ich8lan(struct wm_softc *); 841static void wm_put_nvm_ich8lan(struct wm_softc *);
842static int wm_get_hw_semaphore_82573(struct wm_softc *); 842static int wm_get_hw_semaphore_82573(struct wm_softc *);
843static void wm_put_hw_semaphore_82573(struct wm_softc *); 843static void wm_put_hw_semaphore_82573(struct wm_softc *);
844 844
845/* 845/*
846 * Management mode and power management related subroutines. 846 * Management mode and power management related subroutines.
847 * BMC, AMT, suspend/resume and EEE. 847 * BMC, AMT, suspend/resume and EEE.
848 */ 848 */
849#if 0 849#if 0
850static int wm_check_mng_mode(struct wm_softc *); 850static int wm_check_mng_mode(struct wm_softc *);
851static int wm_check_mng_mode_ich8lan(struct wm_softc *); 851static int wm_check_mng_mode_ich8lan(struct wm_softc *);
852static int wm_check_mng_mode_82574(struct wm_softc *); 852static int wm_check_mng_mode_82574(struct wm_softc *);
853static int wm_check_mng_mode_generic(struct wm_softc *); 853static int wm_check_mng_mode_generic(struct wm_softc *);
854#endif 854#endif
855static int wm_enable_mng_pass_thru(struct wm_softc *); 855static int wm_enable_mng_pass_thru(struct wm_softc *);
856static bool wm_phy_resetisblocked(struct wm_softc *); 856static bool wm_phy_resetisblocked(struct wm_softc *);
857static void wm_get_hw_control(struct wm_softc *); 857static void wm_get_hw_control(struct wm_softc *);
858static void wm_release_hw_control(struct wm_softc *); 858static void wm_release_hw_control(struct wm_softc *);
859static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); 859static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
860static void wm_smbustopci(struct wm_softc *); 860static void wm_smbustopci(struct wm_softc *);
861static void wm_init_manageability(struct wm_softc *); 861static void wm_init_manageability(struct wm_softc *);
862static void wm_release_manageability(struct wm_softc *); 862static void wm_release_manageability(struct wm_softc *);
863static void wm_get_wakeup(struct wm_softc *); 863static void wm_get_wakeup(struct wm_softc *);
864static void wm_ulp_disable(struct wm_softc *); 864static void wm_ulp_disable(struct wm_softc *);
865static void wm_enable_phy_wakeup(struct wm_softc *); 865static void wm_enable_phy_wakeup(struct wm_softc *);
866static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 866static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
867static void wm_enable_wakeup(struct wm_softc *); 867static void wm_enable_wakeup(struct wm_softc *);
868/* LPLU (Low Power Link Up) */ 868/* LPLU (Low Power Link Up) */
869static void wm_lplu_d0_disable(struct wm_softc *); 869static void wm_lplu_d0_disable(struct wm_softc *);
870static void wm_lplu_d0_disable_pch(struct wm_softc *); 870static void wm_lplu_d0_disable_pch(struct wm_softc *);
871/* EEE */ 871/* EEE */
872static void wm_set_eee_i350(struct wm_softc *); 872static void wm_set_eee_i350(struct wm_softc *);
873 873
874/* 874/*
875 * Workarounds (mainly PHY related). 875 * Workarounds (mainly PHY related).
876 * Basically, PHY's workarounds are in the PHY drivers. 876 * Basically, PHY's workarounds are in the PHY drivers.
877 */ 877 */
878static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 878static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
879static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 879static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
880static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); 880static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
881static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); 881static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
882static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 882static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
883static void wm_set_mdio_slow_mode_hv(struct wm_softc *); 883static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
884static void wm_configure_k1_ich8lan(struct wm_softc *, int); 884static void wm_configure_k1_ich8lan(struct wm_softc *, int);
885static void wm_reset_init_script_82575(struct wm_softc *); 885static void wm_reset_init_script_82575(struct wm_softc *);
886static void wm_reset_mdicnfg_82580(struct wm_softc *); 886static void wm_reset_mdicnfg_82580(struct wm_softc *);
887static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 887static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
888static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 888static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
889static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 889static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
890static void wm_pll_workaround_i210(struct wm_softc *); 890static void wm_pll_workaround_i210(struct wm_softc *);
891 891
892CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 892CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
893 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 893 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
894 894
895/* 895/*
896 * Devices supported by this driver. 896 * Devices supported by this driver.
897 */ 897 */
898static const struct wm_product { 898static const struct wm_product {
899 pci_vendor_id_t wmp_vendor; 899 pci_vendor_id_t wmp_vendor;
900 pci_product_id_t wmp_product; 900 pci_product_id_t wmp_product;
901 const char *wmp_name; 901 const char *wmp_name;
902 wm_chip_type wmp_type; 902 wm_chip_type wmp_type;
903 uint32_t wmp_flags; 903 uint32_t wmp_flags;
904#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN 904#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
905#define WMP_F_FIBER WM_MEDIATYPE_FIBER 905#define WMP_F_FIBER WM_MEDIATYPE_FIBER
906#define WMP_F_COPPER WM_MEDIATYPE_COPPER 906#define WMP_F_COPPER WM_MEDIATYPE_COPPER
907#define WMP_F_SERDES WM_MEDIATYPE_SERDES 907#define WMP_F_SERDES WM_MEDIATYPE_SERDES
908#define WMP_MEDIATYPE(x) ((x) & 0x03) 908#define WMP_MEDIATYPE(x) ((x) & 0x03)
909} wm_products[] = { 909} wm_products[] = {
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
911 "Intel i82542 1000BASE-X Ethernet", 911 "Intel i82542 1000BASE-X Ethernet",
912 WM_T_82542_2_1, WMP_F_FIBER }, 912 WM_T_82542_2_1, WMP_F_FIBER },
913 913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
915 "Intel i82543GC 1000BASE-X Ethernet", 915 "Intel i82543GC 1000BASE-X Ethernet",
916 WM_T_82543, WMP_F_FIBER }, 916 WM_T_82543, WMP_F_FIBER },
917 917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
919 "Intel i82543GC 1000BASE-T Ethernet", 919 "Intel i82543GC 1000BASE-T Ethernet",
920 WM_T_82543, WMP_F_COPPER }, 920 WM_T_82543, WMP_F_COPPER },
921 921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
923 "Intel i82544EI 1000BASE-T Ethernet", 923 "Intel i82544EI 1000BASE-T Ethernet",
924 WM_T_82544, WMP_F_COPPER }, 924 WM_T_82544, WMP_F_COPPER },
925 925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
927 "Intel i82544EI 1000BASE-X Ethernet", 927 "Intel i82544EI 1000BASE-X Ethernet",
928 WM_T_82544, WMP_F_FIBER }, 928 WM_T_82544, WMP_F_FIBER },
929 929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
931 "Intel i82544GC 1000BASE-T Ethernet", 931 "Intel i82544GC 1000BASE-T Ethernet",
932 WM_T_82544, WMP_F_COPPER }, 932 WM_T_82544, WMP_F_COPPER },
933 933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
935 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 935 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
936 WM_T_82544, WMP_F_COPPER }, 936 WM_T_82544, WMP_F_COPPER },
937 937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
939 "Intel i82540EM 1000BASE-T Ethernet", 939 "Intel i82540EM 1000BASE-T Ethernet",
940 WM_T_82540, WMP_F_COPPER }, 940 WM_T_82540, WMP_F_COPPER },
941 941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
943 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 943 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
944 WM_T_82540, WMP_F_COPPER }, 944 WM_T_82540, WMP_F_COPPER },
945 945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
947 "Intel i82540EP 1000BASE-T Ethernet", 947 "Intel i82540EP 1000BASE-T Ethernet",
948 WM_T_82540, WMP_F_COPPER }, 948 WM_T_82540, WMP_F_COPPER },
949 949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
951 "Intel i82540EP 1000BASE-T Ethernet", 951 "Intel i82540EP 1000BASE-T Ethernet",
952 WM_T_82540, WMP_F_COPPER }, 952 WM_T_82540, WMP_F_COPPER },
953 953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
955 "Intel i82540EP 1000BASE-T Ethernet", 955 "Intel i82540EP 1000BASE-T Ethernet",
956 WM_T_82540, WMP_F_COPPER }, 956 WM_T_82540, WMP_F_COPPER },
957 957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
959 "Intel i82545EM 1000BASE-T Ethernet", 959 "Intel i82545EM 1000BASE-T Ethernet",
960 WM_T_82545, WMP_F_COPPER }, 960 WM_T_82545, WMP_F_COPPER },
961 961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
963 "Intel i82545GM 1000BASE-T Ethernet", 963 "Intel i82545GM 1000BASE-T Ethernet",
964 WM_T_82545_3, WMP_F_COPPER }, 964 WM_T_82545_3, WMP_F_COPPER },
965 965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
967 "Intel i82545GM 1000BASE-X Ethernet", 967 "Intel i82545GM 1000BASE-X Ethernet",
968 WM_T_82545_3, WMP_F_FIBER }, 968 WM_T_82545_3, WMP_F_FIBER },
969 969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
971 "Intel i82545GM Gigabit Ethernet (SERDES)", 971 "Intel i82545GM Gigabit Ethernet (SERDES)",
972 WM_T_82545_3, WMP_F_SERDES }, 972 WM_T_82545_3, WMP_F_SERDES },
973 973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
975 "Intel i82546EB 1000BASE-T Ethernet", 975 "Intel i82546EB 1000BASE-T Ethernet",
976 WM_T_82546, WMP_F_COPPER }, 976 WM_T_82546, WMP_F_COPPER },
977 977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
979 "Intel i82546EB 1000BASE-T Ethernet", 979 "Intel i82546EB 1000BASE-T Ethernet",
980 WM_T_82546, WMP_F_COPPER }, 980 WM_T_82546, WMP_F_COPPER },
981 981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
983 "Intel i82545EM 1000BASE-X Ethernet", 983 "Intel i82545EM 1000BASE-X Ethernet",
984 WM_T_82545, WMP_F_FIBER }, 984 WM_T_82545, WMP_F_FIBER },
985 985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
987 "Intel i82546EB 1000BASE-X Ethernet", 987 "Intel i82546EB 1000BASE-X Ethernet",
988 WM_T_82546, WMP_F_FIBER }, 988 WM_T_82546, WMP_F_FIBER },
989 989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
991 "Intel i82546GB 1000BASE-T Ethernet", 991 "Intel i82546GB 1000BASE-T Ethernet",
992 WM_T_82546_3, WMP_F_COPPER }, 992 WM_T_82546_3, WMP_F_COPPER },
993 993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
995 "Intel i82546GB 1000BASE-X Ethernet", 995 "Intel i82546GB 1000BASE-X Ethernet",
996 WM_T_82546_3, WMP_F_FIBER }, 996 WM_T_82546_3, WMP_F_FIBER },
997 997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
999 "Intel i82546GB Gigabit Ethernet (SERDES)", 999 "Intel i82546GB Gigabit Ethernet (SERDES)",
1000 WM_T_82546_3, WMP_F_SERDES }, 1000 WM_T_82546_3, WMP_F_SERDES },
1001 1001
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1003 "i82546GB quad-port Gigabit Ethernet", 1003 "i82546GB quad-port Gigabit Ethernet",
1004 WM_T_82546_3, WMP_F_COPPER }, 1004 WM_T_82546_3, WMP_F_COPPER },
1005 1005
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1007 "i82546GB quad-port Gigabit Ethernet (KSP3)", 1007 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1008 WM_T_82546_3, WMP_F_COPPER }, 1008 WM_T_82546_3, WMP_F_COPPER },
1009 1009
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1011 "Intel PRO/1000MT (82546GB)", 1011 "Intel PRO/1000MT (82546GB)",
1012 WM_T_82546_3, WMP_F_COPPER }, 1012 WM_T_82546_3, WMP_F_COPPER },
1013 1013
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1015 "Intel i82541EI 1000BASE-T Ethernet", 1015 "Intel i82541EI 1000BASE-T Ethernet",
1016 WM_T_82541, WMP_F_COPPER }, 1016 WM_T_82541, WMP_F_COPPER },
1017 1017
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1019 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 1019 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1020 WM_T_82541, WMP_F_COPPER }, 1020 WM_T_82541, WMP_F_COPPER },
1021 1021
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1023 "Intel i82541EI Mobile 1000BASE-T Ethernet", 1023 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1024 WM_T_82541, WMP_F_COPPER }, 1024 WM_T_82541, WMP_F_COPPER },
1025 1025
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1027 "Intel i82541ER 1000BASE-T Ethernet", 1027 "Intel i82541ER 1000BASE-T Ethernet",
1028 WM_T_82541_2, WMP_F_COPPER }, 1028 WM_T_82541_2, WMP_F_COPPER },
1029 1029
1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1031 "Intel i82541GI 1000BASE-T Ethernet", 1031 "Intel i82541GI 1000BASE-T Ethernet",
1032 WM_T_82541_2, WMP_F_COPPER }, 1032 WM_T_82541_2, WMP_F_COPPER },
1033 1033
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1035 "Intel i82541GI Mobile 1000BASE-T Ethernet", 1035 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1036 WM_T_82541_2, WMP_F_COPPER }, 1036 WM_T_82541_2, WMP_F_COPPER },
1037 1037
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1039 "Intel i82541PI 1000BASE-T Ethernet", 1039 "Intel i82541PI 1000BASE-T Ethernet",
1040 WM_T_82541_2, WMP_F_COPPER }, 1040 WM_T_82541_2, WMP_F_COPPER },
1041 1041
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1043 "Intel i82547EI 1000BASE-T Ethernet", 1043 "Intel i82547EI 1000BASE-T Ethernet",
1044 WM_T_82547, WMP_F_COPPER }, 1044 WM_T_82547, WMP_F_COPPER },
1045 1045
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1047 "Intel i82547EI Mobile 1000BASE-T Ethernet", 1047 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1048 WM_T_82547, WMP_F_COPPER }, 1048 WM_T_82547, WMP_F_COPPER },
1049 1049
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1051 "Intel i82547GI 1000BASE-T Ethernet", 1051 "Intel i82547GI 1000BASE-T Ethernet",
1052 WM_T_82547_2, WMP_F_COPPER }, 1052 WM_T_82547_2, WMP_F_COPPER },
1053 1053
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1055 "Intel PRO/1000 PT (82571EB)", 1055 "Intel PRO/1000 PT (82571EB)",
1056 WM_T_82571, WMP_F_COPPER }, 1056 WM_T_82571, WMP_F_COPPER },
1057 1057
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1059 "Intel PRO/1000 PF (82571EB)", 1059 "Intel PRO/1000 PF (82571EB)",
1060 WM_T_82571, WMP_F_FIBER }, 1060 WM_T_82571, WMP_F_FIBER },
1061 1061
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1063 "Intel PRO/1000 PB (82571EB)", 1063 "Intel PRO/1000 PB (82571EB)",
1064 WM_T_82571, WMP_F_SERDES }, 1064 WM_T_82571, WMP_F_SERDES },
1065 1065
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1067 "Intel PRO/1000 QT (82571EB)", 1067 "Intel PRO/1000 QT (82571EB)",
1068 WM_T_82571, WMP_F_COPPER }, 1068 WM_T_82571, WMP_F_COPPER },
1069 1069
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, 1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1071 "Intel PRO/1000 PT Quad Port Server Adapter", 1071 "Intel PRO/1000 PT Quad Port Server Adapter",
1072 WM_T_82571, WMP_F_COPPER, }, 1072 WM_T_82571, WMP_F_COPPER, },
1073 1073
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER, 1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1075 "Intel Gigabit PT Quad Port Server ExpressModule", 1075 "Intel Gigabit PT Quad Port Server ExpressModule",
1076 WM_T_82571, WMP_F_COPPER, }, 1076 WM_T_82571, WMP_F_COPPER, },
1077 1077
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES, 1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1079 "Intel 82571EB Dual Gigabit Ethernet (SERDES)", 1079 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1080 WM_T_82571, WMP_F_SERDES, }, 1080 WM_T_82571, WMP_F_SERDES, },
1081 1081
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES, 1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1083 "Intel 82571EB Quad Gigabit Ethernet (SERDES)", 1083 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1084 WM_T_82571, WMP_F_SERDES, }, 1084 WM_T_82571, WMP_F_SERDES, },
1085 1085
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER, 1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
@@ -6730,2117 +6730,2117 @@ wm_send_common_locked(struct ifnet *ifp, @@ -6730,2117 +6730,2117 @@ wm_send_common_locked(struct ifnet *ifp,
6730 6730
6731 DPRINTF(WM_DEBUG_TX, 6731 DPRINTF(WM_DEBUG_TX,
6732 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 6732 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6733 6733
6734 DPRINTF(WM_DEBUG_TX, 6734 DPRINTF(WM_DEBUG_TX,
6735 ("%s: TX: finished transmitting packet, job %d\n", 6735 ("%s: TX: finished transmitting packet, job %d\n",
6736 device_xname(sc->sc_dev), txq->txq_snext)); 6736 device_xname(sc->sc_dev), txq->txq_snext));
6737 6737
6738 /* Advance the tx pointer. */ 6738 /* Advance the tx pointer. */
6739 txq->txq_free -= txs->txs_ndesc; 6739 txq->txq_free -= txs->txs_ndesc;
6740 txq->txq_next = nexttx; 6740 txq->txq_next = nexttx;
6741 6741
6742 txq->txq_sfree--; 6742 txq->txq_sfree--;
6743 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); 6743 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6744 6744
6745 /* Pass the packet to any BPF listeners. */ 6745 /* Pass the packet to any BPF listeners. */
6746 bpf_mtap(ifp, m0); 6746 bpf_mtap(ifp, m0);
6747 } 6747 }
6748 6748
6749 if (m0 != NULL) { 6749 if (m0 != NULL) {
6750 ifp->if_flags |= IFF_OACTIVE; 6750 ifp->if_flags |= IFF_OACTIVE;
6751 WM_Q_EVCNT_INCR(txq, txdrop); 6751 WM_Q_EVCNT_INCR(txq, txdrop);
6752 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", 6752 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6753 __func__)); 6753 __func__));
6754 m_freem(m0); 6754 m_freem(m0);
6755 } 6755 }
6756 6756
6757 if (txq->txq_sfree == 0 || txq->txq_free <= 2) { 6757 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6758 /* No more slots; notify upper layer. */ 6758 /* No more slots; notify upper layer. */
6759 ifp->if_flags |= IFF_OACTIVE; 6759 ifp->if_flags |= IFF_OACTIVE;
6760 } 6760 }
6761 6761
6762 if (txq->txq_free != ofree) { 6762 if (txq->txq_free != ofree) {
6763 /* Set a watchdog timer in case the chip flakes out. */ 6763 /* Set a watchdog timer in case the chip flakes out. */
6764 ifp->if_timer = 5; 6764 ifp->if_timer = 5;
6765 } 6765 }
6766} 6766}
6767 6767
6768/* 6768/*
6769 * wm_nq_tx_offload: 6769 * wm_nq_tx_offload:
6770 * 6770 *
6771 * Set up TCP/IP checksumming parameters for the 6771 * Set up TCP/IP checksumming parameters for the
6772 * specified packet, for NEWQUEUE devices 6772 * specified packet, for NEWQUEUE devices
6773 */ 6773 */
6774static int 6774static int
6775wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq, 6775wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6776 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) 6776 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6777{ 6777{
6778 struct mbuf *m0 = txs->txs_mbuf; 6778 struct mbuf *m0 = txs->txs_mbuf;
6779 struct m_tag *mtag; 6779 struct m_tag *mtag;
6780 uint32_t vl_len, mssidx, cmdc; 6780 uint32_t vl_len, mssidx, cmdc;
6781 struct ether_header *eh; 6781 struct ether_header *eh;
6782 int offset, iphl; 6782 int offset, iphl;
6783 6783
6784 /* 6784 /*
6785 * XXX It would be nice if the mbuf pkthdr had offset 6785 * XXX It would be nice if the mbuf pkthdr had offset
6786 * fields for the protocol headers. 6786 * fields for the protocol headers.
6787 */ 6787 */
6788 *cmdlenp = 0; 6788 *cmdlenp = 0;
6789 *fieldsp = 0; 6789 *fieldsp = 0;
6790 6790
6791 eh = mtod(m0, struct ether_header *); 6791 eh = mtod(m0, struct ether_header *);
6792 switch (htons(eh->ether_type)) { 6792 switch (htons(eh->ether_type)) {
6793 case ETHERTYPE_IP: 6793 case ETHERTYPE_IP:
6794 case ETHERTYPE_IPV6: 6794 case ETHERTYPE_IPV6:
6795 offset = ETHER_HDR_LEN; 6795 offset = ETHER_HDR_LEN;
6796 break; 6796 break;
6797 6797
6798 case ETHERTYPE_VLAN: 6798 case ETHERTYPE_VLAN:
6799 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 6799 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6800 break; 6800 break;
6801 6801
6802 default: 6802 default:
6803 /* Don't support this protocol or encapsulation. */ 6803 /* Don't support this protocol or encapsulation. */
6804 *do_csum = false; 6804 *do_csum = false;
6805 return 0; 6805 return 0;
6806 } 6806 }
6807 *do_csum = true; 6807 *do_csum = true;
6808 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS; 6808 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6809 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT; 6809 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6810 6810
6811 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT); 6811 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6812 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0); 6812 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6813 6813
6814 if ((m0->m_pkthdr.csum_flags & 6814 if ((m0->m_pkthdr.csum_flags &
6815 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { 6815 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6816 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 6816 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6817 } else { 6817 } else {
6818 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 6818 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6819 } 6819 }
6820 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); 6820 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6821 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); 6821 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6822 6822
6823 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 6823 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6824 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK) 6824 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6825 << NQTXC_VLLEN_VLAN_SHIFT); 6825 << NQTXC_VLLEN_VLAN_SHIFT);
6826 *cmdlenp |= NQTX_CMD_VLE; 6826 *cmdlenp |= NQTX_CMD_VLE;
6827 } 6827 }
6828 6828
6829 mssidx = 0; 6829 mssidx = 0;
6830 6830
6831 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 6831 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6832 int hlen = offset + iphl; 6832 int hlen = offset + iphl;
6833 int tcp_hlen; 6833 int tcp_hlen;
6834 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 6834 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6835 6835
6836 if (__predict_false(m0->m_len < 6836 if (__predict_false(m0->m_len <
6837 (hlen + sizeof(struct tcphdr)))) { 6837 (hlen + sizeof(struct tcphdr)))) {
6838 /* 6838 /*
6839 * TCP/IP headers are not in the first mbuf; we need 6839 * TCP/IP headers are not in the first mbuf; we need
6840 * to do this the slow and painful way. Let's just 6840 * to do this the slow and painful way. Let's just
6841 * hope this doesn't happen very often. 6841 * hope this doesn't happen very often.
6842 */ 6842 */
6843 struct tcphdr th; 6843 struct tcphdr th;
6844 6844
6845 WM_Q_EVCNT_INCR(txq, txtsopain); 6845 WM_Q_EVCNT_INCR(txq, txtsopain);
6846 6846
6847 m_copydata(m0, hlen, sizeof(th), &th); 6847 m_copydata(m0, hlen, sizeof(th), &th);
6848 if (v4) { 6848 if (v4) {
6849 struct ip ip; 6849 struct ip ip;
6850 6850
6851 m_copydata(m0, offset, sizeof(ip), &ip); 6851 m_copydata(m0, offset, sizeof(ip), &ip);
6852 ip.ip_len = 0; 6852 ip.ip_len = 0;
6853 m_copyback(m0, 6853 m_copyback(m0,
6854 offset + offsetof(struct ip, ip_len), 6854 offset + offsetof(struct ip, ip_len),
6855 sizeof(ip.ip_len), &ip.ip_len); 6855 sizeof(ip.ip_len), &ip.ip_len);
6856 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 6856 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6857 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 6857 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6858 } else { 6858 } else {
6859 struct ip6_hdr ip6; 6859 struct ip6_hdr ip6;
6860 6860
6861 m_copydata(m0, offset, sizeof(ip6), &ip6); 6861 m_copydata(m0, offset, sizeof(ip6), &ip6);
6862 ip6.ip6_plen = 0; 6862 ip6.ip6_plen = 0;
6863 m_copyback(m0, 6863 m_copyback(m0,
6864 offset + offsetof(struct ip6_hdr, ip6_plen), 6864 offset + offsetof(struct ip6_hdr, ip6_plen),
6865 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 6865 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6866 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 6866 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6867 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 6867 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6868 } 6868 }
6869 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 6869 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6870 sizeof(th.th_sum), &th.th_sum); 6870 sizeof(th.th_sum), &th.th_sum);
6871 6871
6872 tcp_hlen = th.th_off << 2; 6872 tcp_hlen = th.th_off << 2;
6873 } else { 6873 } else {
6874 /* 6874 /*
6875 * TCP/IP headers are in the first mbuf; we can do 6875 * TCP/IP headers are in the first mbuf; we can do
6876 * this the easy way. 6876 * this the easy way.
6877 */ 6877 */
6878 struct tcphdr *th; 6878 struct tcphdr *th;
6879 6879
6880 if (v4) { 6880 if (v4) {
6881 struct ip *ip = 6881 struct ip *ip =
6882 (void *)(mtod(m0, char *) + offset); 6882 (void *)(mtod(m0, char *) + offset);
6883 th = (void *)(mtod(m0, char *) + hlen); 6883 th = (void *)(mtod(m0, char *) + hlen);
6884 6884
6885 ip->ip_len = 0; 6885 ip->ip_len = 0;
6886 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 6886 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6887 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 6887 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6888 } else { 6888 } else {
6889 struct ip6_hdr *ip6 = 6889 struct ip6_hdr *ip6 =
6890 (void *)(mtod(m0, char *) + offset); 6890 (void *)(mtod(m0, char *) + offset);
6891 th = (void *)(mtod(m0, char *) + hlen); 6891 th = (void *)(mtod(m0, char *) + hlen);
6892 6892
6893 ip6->ip6_plen = 0; 6893 ip6->ip6_plen = 0;
6894 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 6894 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6895 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 6895 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6896 } 6896 }
6897 tcp_hlen = th->th_off << 2; 6897 tcp_hlen = th->th_off << 2;
6898 } 6898 }
6899 hlen += tcp_hlen; 6899 hlen += tcp_hlen;
6900 *cmdlenp |= NQTX_CMD_TSE; 6900 *cmdlenp |= NQTX_CMD_TSE;
6901 6901
6902 if (v4) { 6902 if (v4) {
6903 WM_Q_EVCNT_INCR(txq, txtso); 6903 WM_Q_EVCNT_INCR(txq, txtso);
6904 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; 6904 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6905 } else { 6905 } else {
6906 WM_Q_EVCNT_INCR(txq, txtso6); 6906 WM_Q_EVCNT_INCR(txq, txtso6);
6907 *fieldsp |= NQTXD_FIELDS_TUXSM; 6907 *fieldsp |= NQTXD_FIELDS_TUXSM;
6908 } 6908 }
6909 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); 6909 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6910 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 6910 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6911 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT); 6911 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6912 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0); 6912 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6913 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT); 6913 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6914 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0); 6914 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6915 } else { 6915 } else {
6916 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT); 6916 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6917 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 6917 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6918 } 6918 }
6919 6919
6920 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 6920 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6921 *fieldsp |= NQTXD_FIELDS_IXSM; 6921 *fieldsp |= NQTXD_FIELDS_IXSM;
6922 cmdc |= NQTXC_CMD_IP4; 6922 cmdc |= NQTXC_CMD_IP4;
6923 } 6923 }
6924 6924
6925 if (m0->m_pkthdr.csum_flags & 6925 if (m0->m_pkthdr.csum_flags &
6926 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { 6926 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6927 WM_Q_EVCNT_INCR(txq, txtusum); 6927 WM_Q_EVCNT_INCR(txq, txtusum);
6928 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) { 6928 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6929 cmdc |= NQTXC_CMD_TCP; 6929 cmdc |= NQTXC_CMD_TCP;
6930 } else { 6930 } else {
6931 cmdc |= NQTXC_CMD_UDP; 6931 cmdc |= NQTXC_CMD_UDP;
6932 } 6932 }
6933 cmdc |= NQTXC_CMD_IP4; 6933 cmdc |= NQTXC_CMD_IP4;
6934 *fieldsp |= NQTXD_FIELDS_TUXSM; 6934 *fieldsp |= NQTXD_FIELDS_TUXSM;
6935 } 6935 }
6936 if (m0->m_pkthdr.csum_flags & 6936 if (m0->m_pkthdr.csum_flags &
6937 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { 6937 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6938 WM_Q_EVCNT_INCR(txq, txtusum6); 6938 WM_Q_EVCNT_INCR(txq, txtusum6);
6939 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) { 6939 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6940 cmdc |= NQTXC_CMD_TCP; 6940 cmdc |= NQTXC_CMD_TCP;
6941 } else { 6941 } else {
6942 cmdc |= NQTXC_CMD_UDP; 6942 cmdc |= NQTXC_CMD_UDP;
6943 } 6943 }
6944 cmdc |= NQTXC_CMD_IP6; 6944 cmdc |= NQTXC_CMD_IP6;
6945 *fieldsp |= NQTXD_FIELDS_TUXSM; 6945 *fieldsp |= NQTXD_FIELDS_TUXSM;
6946 } 6946 }
6947 6947
6948 /* Fill in the context descriptor. */ 6948 /* Fill in the context descriptor. */
6949 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len = 6949 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6950 htole32(vl_len); 6950 htole32(vl_len);
6951 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0; 6951 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6952 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd = 6952 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6953 htole32(cmdc); 6953 htole32(cmdc);
6954 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx = 6954 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6955 htole32(mssidx); 6955 htole32(mssidx);
6956 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 6956 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6957 DPRINTF(WM_DEBUG_TX, 6957 DPRINTF(WM_DEBUG_TX,
6958 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), 6958 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6959 txq->txq_next, 0, vl_len)); 6959 txq->txq_next, 0, vl_len));
6960 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); 6960 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6961 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 6961 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6962 txs->txs_ndesc++; 6962 txs->txs_ndesc++;
6963 return 0; 6963 return 0;
6964} 6964}
6965 6965
6966/* 6966/*
6967 * wm_nq_start: [ifnet interface function] 6967 * wm_nq_start: [ifnet interface function]
6968 * 6968 *
6969 * Start packet transmission on the interface for NEWQUEUE devices 6969 * Start packet transmission on the interface for NEWQUEUE devices
6970 */ 6970 */
6971static void 6971static void
6972wm_nq_start(struct ifnet *ifp) 6972wm_nq_start(struct ifnet *ifp)
6973{ 6973{
6974 struct wm_softc *sc = ifp->if_softc; 6974 struct wm_softc *sc = ifp->if_softc;
6975 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 6975 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6976 6976
6977 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE); 6977 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6978 6978
6979 /* 6979 /*
6980 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. 6980 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
6981 */ 6981 */
6982 6982
6983 mutex_enter(txq->txq_lock); 6983 mutex_enter(txq->txq_lock);
6984 if (!txq->txq_stopping) 6984 if (!txq->txq_stopping)
6985 wm_nq_start_locked(ifp); 6985 wm_nq_start_locked(ifp);
6986 mutex_exit(txq->txq_lock); 6986 mutex_exit(txq->txq_lock);
6987} 6987}
6988 6988
6989static void 6989static void
6990wm_nq_start_locked(struct ifnet *ifp) 6990wm_nq_start_locked(struct ifnet *ifp)
6991{ 6991{
6992 struct wm_softc *sc = ifp->if_softc; 6992 struct wm_softc *sc = ifp->if_softc;
6993 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 6993 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6994 6994
6995 wm_nq_send_common_locked(ifp, txq, false); 6995 wm_nq_send_common_locked(ifp, txq, false);
6996} 6996}
6997 6997
6998static int 6998static int
6999wm_nq_transmit(struct ifnet *ifp, struct mbuf *m) 6999wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
7000{ 7000{
7001 int qid; 7001 int qid;
7002 struct wm_softc *sc = ifp->if_softc; 7002 struct wm_softc *sc = ifp->if_softc;
7003 struct wm_txqueue *txq; 7003 struct wm_txqueue *txq;
7004 7004
7005 qid = wm_select_txqueue(ifp, m); 7005 qid = wm_select_txqueue(ifp, m);
7006 txq = &sc->sc_queue[qid].wmq_txq; 7006 txq = &sc->sc_queue[qid].wmq_txq;
7007 7007
7008 if (__predict_false(!pcq_put(txq->txq_interq, m))) { 7008 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7009 m_freem(m); 7009 m_freem(m);
7010 WM_Q_EVCNT_INCR(txq, txdrop); 7010 WM_Q_EVCNT_INCR(txq, txdrop);
7011 return ENOBUFS; 7011 return ENOBUFS;
7012 } 7012 }
7013 7013
7014 /* 7014 /*
7015 * XXXX NOMPSAFE: ifp->if_data should be percpu. 7015 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7016 */ 7016 */
7017 ifp->if_obytes += m->m_pkthdr.len; 7017 ifp->if_obytes += m->m_pkthdr.len;
7018 if (m->m_flags & M_MCAST) 7018 if (m->m_flags & M_MCAST)
7019 ifp->if_omcasts++; 7019 ifp->if_omcasts++;
7020 7020
7021 /* 7021 /*
7022 * The situations which this mutex_tryenter() fails at running time 7022 * The situations which this mutex_tryenter() fails at running time
7023 * are below two patterns. 7023 * are below two patterns.
7024 * (1) contention with interrupt handler(wm_txrxintr_msix()) 7024 * (1) contention with interrupt handler(wm_txrxintr_msix())
7025 * (2) contention with deferred if_start softint(wm_deferred_start()) 7025 * (2) contention with deferred if_start softint(wm_deferred_start())
7026 * In the case of (1), the last packet enqueued to txq->txq_interq is 7026 * In the case of (1), the last packet enqueued to txq->txq_interq is
7027 * dequeued by wm_deferred_start(). So, it does not get stuck. 7027 * dequeued by wm_deferred_start(). So, it does not get stuck.
7028 * In the case of (2), the last packet enqueued to txq->txq_interq is also 7028 * In the case of (2), the last packet enqueued to txq->txq_interq is also
7029 * dequeued by wm_deferred_start(). So, it does not get stuck, either. 7029 * dequeued by wm_deferred_start(). So, it does not get stuck, either.
7030 */ 7030 */
7031 if (mutex_tryenter(txq->txq_lock)) { 7031 if (mutex_tryenter(txq->txq_lock)) {
7032 if (!txq->txq_stopping) 7032 if (!txq->txq_stopping)
7033 wm_nq_transmit_locked(ifp, txq); 7033 wm_nq_transmit_locked(ifp, txq);
7034 mutex_exit(txq->txq_lock); 7034 mutex_exit(txq->txq_lock);
7035 } 7035 }
7036 7036
7037 return 0; 7037 return 0;
7038} 7038}
7039 7039
7040static void 7040static void
7041wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq) 7041wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7042{ 7042{
7043 7043
7044 wm_nq_send_common_locked(ifp, txq, true); 7044 wm_nq_send_common_locked(ifp, txq, true);
7045} 7045}
7046 7046
7047static void 7047static void
7048wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq, 7048wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7049 bool is_transmit) 7049 bool is_transmit)
7050{ 7050{
7051 struct wm_softc *sc = ifp->if_softc; 7051 struct wm_softc *sc = ifp->if_softc;
7052 struct mbuf *m0; 7052 struct mbuf *m0;
7053 struct m_tag *mtag; 7053 struct m_tag *mtag;
7054 struct wm_txsoft *txs; 7054 struct wm_txsoft *txs;
7055 bus_dmamap_t dmamap; 7055 bus_dmamap_t dmamap;
7056 int error, nexttx, lasttx = -1, seg, segs_needed; 7056 int error, nexttx, lasttx = -1, seg, segs_needed;
7057 bool do_csum, sent; 7057 bool do_csum, sent;
7058 7058
7059 KASSERT(mutex_owned(txq->txq_lock)); 7059 KASSERT(mutex_owned(txq->txq_lock));
7060 7060
7061 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 7061 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
7062 return; 7062 return;
7063 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0) 7063 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7064 return; 7064 return;
7065 7065
7066 sent = false; 7066 sent = false;
7067 7067
7068 /* 7068 /*
7069 * Loop through the send queue, setting up transmit descriptors 7069 * Loop through the send queue, setting up transmit descriptors
7070 * until we drain the queue, or use up all available transmit 7070 * until we drain the queue, or use up all available transmit
7071 * descriptors. 7071 * descriptors.
7072 */ 7072 */
7073 for (;;) { 7073 for (;;) {
7074 m0 = NULL; 7074 m0 = NULL;
7075 7075
7076 /* Get a work queue entry. */ 7076 /* Get a work queue entry. */
7077 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { 7077 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7078 wm_txeof(sc, txq); 7078 wm_txeof(sc, txq);
7079 if (txq->txq_sfree == 0) { 7079 if (txq->txq_sfree == 0) {
7080 DPRINTF(WM_DEBUG_TX, 7080 DPRINTF(WM_DEBUG_TX,
7081 ("%s: TX: no free job descriptors\n", 7081 ("%s: TX: no free job descriptors\n",
7082 device_xname(sc->sc_dev))); 7082 device_xname(sc->sc_dev)));
7083 WM_Q_EVCNT_INCR(txq, txsstall); 7083 WM_Q_EVCNT_INCR(txq, txsstall);
7084 break; 7084 break;
7085 } 7085 }
7086 } 7086 }
7087 7087
7088 /* Grab a packet off the queue. */ 7088 /* Grab a packet off the queue. */
7089 if (is_transmit) 7089 if (is_transmit)
7090 m0 = pcq_get(txq->txq_interq); 7090 m0 = pcq_get(txq->txq_interq);
7091 else 7091 else
7092 IFQ_DEQUEUE(&ifp->if_snd, m0); 7092 IFQ_DEQUEUE(&ifp->if_snd, m0);
7093 if (m0 == NULL) 7093 if (m0 == NULL)
7094 break; 7094 break;
7095 7095
7096 DPRINTF(WM_DEBUG_TX, 7096 DPRINTF(WM_DEBUG_TX,
7097 ("%s: TX: have packet to transmit: %p\n", 7097 ("%s: TX: have packet to transmit: %p\n",
7098 device_xname(sc->sc_dev), m0)); 7098 device_xname(sc->sc_dev), m0));
7099 7099
7100 txs = &txq->txq_soft[txq->txq_snext]; 7100 txs = &txq->txq_soft[txq->txq_snext];
7101 dmamap = txs->txs_dmamap; 7101 dmamap = txs->txs_dmamap;
7102 7102
7103 /* 7103 /*
7104 * Load the DMA map. If this fails, the packet either 7104 * Load the DMA map. If this fails, the packet either
7105 * didn't fit in the allotted number of segments, or we 7105 * didn't fit in the allotted number of segments, or we
7106 * were short on resources. For the too-many-segments 7106 * were short on resources. For the too-many-segments
7107 * case, we simply report an error and drop the packet, 7107 * case, we simply report an error and drop the packet,
7108 * since we can't sanely copy a jumbo packet to a single 7108 * since we can't sanely copy a jumbo packet to a single
7109 * buffer. 7109 * buffer.
7110 */ 7110 */
7111 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 7111 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7112 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 7112 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7113 if (error) { 7113 if (error) {
7114 if (error == EFBIG) { 7114 if (error == EFBIG) {
7115 WM_Q_EVCNT_INCR(txq, txdrop); 7115 WM_Q_EVCNT_INCR(txq, txdrop);
7116 log(LOG_ERR, "%s: Tx packet consumes too many " 7116 log(LOG_ERR, "%s: Tx packet consumes too many "
7117 "DMA segments, dropping...\n", 7117 "DMA segments, dropping...\n",
7118 device_xname(sc->sc_dev)); 7118 device_xname(sc->sc_dev));
7119 wm_dump_mbuf_chain(sc, m0); 7119 wm_dump_mbuf_chain(sc, m0);
7120 m_freem(m0); 7120 m_freem(m0);
7121 continue; 7121 continue;
7122 } 7122 }
7123 /* Short on resources, just stop for now. */ 7123 /* Short on resources, just stop for now. */
7124 DPRINTF(WM_DEBUG_TX, 7124 DPRINTF(WM_DEBUG_TX,
7125 ("%s: TX: dmamap load failed: %d\n", 7125 ("%s: TX: dmamap load failed: %d\n",
7126 device_xname(sc->sc_dev), error)); 7126 device_xname(sc->sc_dev), error));
7127 break; 7127 break;
7128 } 7128 }
7129 7129
7130 segs_needed = dmamap->dm_nsegs; 7130 segs_needed = dmamap->dm_nsegs;
7131 7131
7132 /* 7132 /*
7133 * Ensure we have enough descriptors free to describe 7133 * Ensure we have enough descriptors free to describe
7134 * the packet. Note, we always reserve one descriptor 7134 * the packet. Note, we always reserve one descriptor
7135 * at the end of the ring due to the semantics of the 7135 * at the end of the ring due to the semantics of the
7136 * TDT register, plus one more in the event we need 7136 * TDT register, plus one more in the event we need
7137 * to load offload context. 7137 * to load offload context.
7138 */ 7138 */
7139 if (segs_needed > txq->txq_free - 2) { 7139 if (segs_needed > txq->txq_free - 2) {
7140 /* 7140 /*
7141 * Not enough free descriptors to transmit this 7141 * Not enough free descriptors to transmit this
7142 * packet. We haven't committed anything yet, 7142 * packet. We haven't committed anything yet,
7143 * so just unload the DMA map, put the packet 7143 * so just unload the DMA map, put the packet
7144 * pack on the queue, and punt. Notify the upper 7144 * pack on the queue, and punt. Notify the upper
7145 * layer that there are no more slots left. 7145 * layer that there are no more slots left.
7146 */ 7146 */
7147 DPRINTF(WM_DEBUG_TX, 7147 DPRINTF(WM_DEBUG_TX,
7148 ("%s: TX: need %d (%d) descriptors, have %d\n", 7148 ("%s: TX: need %d (%d) descriptors, have %d\n",
7149 device_xname(sc->sc_dev), dmamap->dm_nsegs, 7149 device_xname(sc->sc_dev), dmamap->dm_nsegs,
7150 segs_needed, txq->txq_free - 1)); 7150 segs_needed, txq->txq_free - 1));
7151 txq->txq_flags |= WM_TXQ_NO_SPACE; 7151 txq->txq_flags |= WM_TXQ_NO_SPACE;
7152 bus_dmamap_unload(sc->sc_dmat, dmamap); 7152 bus_dmamap_unload(sc->sc_dmat, dmamap);
7153 WM_Q_EVCNT_INCR(txq, txdstall); 7153 WM_Q_EVCNT_INCR(txq, txdstall);
7154 break; 7154 break;
7155 } 7155 }
7156 7156
7157 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ 7157 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7158 7158
7159 DPRINTF(WM_DEBUG_TX, 7159 DPRINTF(WM_DEBUG_TX,
7160 ("%s: TX: packet has %d (%d) DMA segments\n", 7160 ("%s: TX: packet has %d (%d) DMA segments\n",
7161 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 7161 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7162 7162
7163 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]); 7163 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7164 7164
7165 /* 7165 /*
7166 * Store a pointer to the packet so that we can free it 7166 * Store a pointer to the packet so that we can free it
7167 * later. 7167 * later.
7168 * 7168 *
7169 * Initially, we consider the number of descriptors the 7169 * Initially, we consider the number of descriptors the
7170 * packet uses the number of DMA segments. This may be 7170 * packet uses the number of DMA segments. This may be
7171 * incremented by 1 if we do checksum offload (a descriptor 7171 * incremented by 1 if we do checksum offload (a descriptor
7172 * is used to set the checksum context). 7172 * is used to set the checksum context).
7173 */ 7173 */
7174 txs->txs_mbuf = m0; 7174 txs->txs_mbuf = m0;
7175 txs->txs_firstdesc = txq->txq_next; 7175 txs->txs_firstdesc = txq->txq_next;
7176 txs->txs_ndesc = segs_needed; 7176 txs->txs_ndesc = segs_needed;
7177 7177
7178 /* Set up offload parameters for this packet. */ 7178 /* Set up offload parameters for this packet. */
7179 uint32_t cmdlen, fields, dcmdlen; 7179 uint32_t cmdlen, fields, dcmdlen;
7180 if (m0->m_pkthdr.csum_flags &  7180 if (m0->m_pkthdr.csum_flags &
7181 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | 7181 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7182 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | 7182 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7183 M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 7183 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7184 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields, 7184 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7185 &do_csum) != 0) { 7185 &do_csum) != 0) {
7186 /* Error message already displayed. */ 7186 /* Error message already displayed. */
7187 bus_dmamap_unload(sc->sc_dmat, dmamap); 7187 bus_dmamap_unload(sc->sc_dmat, dmamap);
7188 continue; 7188 continue;
7189 } 7189 }
7190 } else { 7190 } else {
7191 do_csum = false; 7191 do_csum = false;
7192 cmdlen = 0; 7192 cmdlen = 0;
7193 fields = 0; 7193 fields = 0;
7194 } 7194 }
7195 7195
7196 /* Sync the DMA map. */ 7196 /* Sync the DMA map. */
7197 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 7197 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7198 BUS_DMASYNC_PREWRITE); 7198 BUS_DMASYNC_PREWRITE);
7199 7199
7200 /* Initialize the first transmit descriptor. */ 7200 /* Initialize the first transmit descriptor. */
7201 nexttx = txq->txq_next; 7201 nexttx = txq->txq_next;
7202 if (!do_csum) { 7202 if (!do_csum) {
7203 /* setup a legacy descriptor */ 7203 /* setup a legacy descriptor */
7204 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr, 7204 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7205 dmamap->dm_segs[0].ds_addr); 7205 dmamap->dm_segs[0].ds_addr);
7206 txq->txq_descs[nexttx].wtx_cmdlen = 7206 txq->txq_descs[nexttx].wtx_cmdlen =
7207 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); 7207 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7208 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0; 7208 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7209 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0; 7209 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7210 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != 7210 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
7211 NULL) { 7211 NULL) {
7212 txq->txq_descs[nexttx].wtx_cmdlen |= 7212 txq->txq_descs[nexttx].wtx_cmdlen |=
7213 htole32(WTX_CMD_VLE); 7213 htole32(WTX_CMD_VLE);
7214 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 7214 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7215 htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 7215 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
7216 } else { 7216 } else {
7217 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; 7217 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7218 } 7218 }
7219 dcmdlen = 0; 7219 dcmdlen = 0;
7220 } else { 7220 } else {
7221 /* setup an advanced data descriptor */ 7221 /* setup an advanced data descriptor */
7222 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = 7222 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7223 htole64(dmamap->dm_segs[0].ds_addr); 7223 htole64(dmamap->dm_segs[0].ds_addr);
7224 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); 7224 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7225 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = 7225 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7226 htole32(dmamap->dm_segs[0].ds_len | cmdlen ); 7226 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7227 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 7227 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7228 htole32(fields); 7228 htole32(fields);
7229 DPRINTF(WM_DEBUG_TX, 7229 DPRINTF(WM_DEBUG_TX,
7230 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", 7230 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7231 device_xname(sc->sc_dev), nexttx, 7231 device_xname(sc->sc_dev), nexttx,
7232 (uint64_t)dmamap->dm_segs[0].ds_addr)); 7232 (uint64_t)dmamap->dm_segs[0].ds_addr));
7233 DPRINTF(WM_DEBUG_TX, 7233 DPRINTF(WM_DEBUG_TX,
7234 ("\t 0x%08x%08x\n", fields, 7234 ("\t 0x%08x%08x\n", fields,
7235 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); 7235 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7236 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; 7236 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7237 } 7237 }
7238 7238
7239 lasttx = nexttx; 7239 lasttx = nexttx;
7240 nexttx = WM_NEXTTX(txq, nexttx); 7240 nexttx = WM_NEXTTX(txq, nexttx);
7241 /* 7241 /*
7242 * fill in the next descriptors. legacy or adcanced format 7242 * fill in the next descriptors. legacy or adcanced format
7243 * is the same here 7243 * is the same here
7244 */ 7244 */
7245 for (seg = 1; seg < dmamap->dm_nsegs; 7245 for (seg = 1; seg < dmamap->dm_nsegs;
7246 seg++, nexttx = WM_NEXTTX(txq, nexttx)) { 7246 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7247 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = 7247 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7248 htole64(dmamap->dm_segs[seg].ds_addr); 7248 htole64(dmamap->dm_segs[seg].ds_addr);
7249 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = 7249 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7250 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); 7250 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7251 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); 7251 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7252 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0; 7252 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7253 lasttx = nexttx; 7253 lasttx = nexttx;
7254 7254
7255 DPRINTF(WM_DEBUG_TX, 7255 DPRINTF(WM_DEBUG_TX,
7256 ("%s: TX: desc %d: %#" PRIx64 ", " 7256 ("%s: TX: desc %d: %#" PRIx64 ", "
7257 "len %#04zx\n", 7257 "len %#04zx\n",
7258 device_xname(sc->sc_dev), nexttx, 7258 device_xname(sc->sc_dev), nexttx,
7259 (uint64_t)dmamap->dm_segs[seg].ds_addr, 7259 (uint64_t)dmamap->dm_segs[seg].ds_addr,
7260 dmamap->dm_segs[seg].ds_len)); 7260 dmamap->dm_segs[seg].ds_len));
7261 } 7261 }
7262 7262
7263 KASSERT(lasttx != -1); 7263 KASSERT(lasttx != -1);
7264 7264
7265 /* 7265 /*
7266 * Set up the command byte on the last descriptor of 7266 * Set up the command byte on the last descriptor of
7267 * the packet. If we're in the interrupt delay window, 7267 * the packet. If we're in the interrupt delay window,
7268 * delay the interrupt. 7268 * delay the interrupt.
7269 */ 7269 */
7270 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == 7270 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7271 (NQTX_CMD_EOP | NQTX_CMD_RS)); 7271 (NQTX_CMD_EOP | NQTX_CMD_RS));
7272 txq->txq_descs[lasttx].wtx_cmdlen |= 7272 txq->txq_descs[lasttx].wtx_cmdlen |=
7273 htole32(WTX_CMD_EOP | WTX_CMD_RS); 7273 htole32(WTX_CMD_EOP | WTX_CMD_RS);
7274 7274
7275 txs->txs_lastdesc = lasttx; 7275 txs->txs_lastdesc = lasttx;
7276 7276
7277 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n", 7277 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7278 device_xname(sc->sc_dev), 7278 device_xname(sc->sc_dev),
7279 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); 7279 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7280 7280
7281 /* Sync the descriptors we're using. */ 7281 /* Sync the descriptors we're using. */
7282 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, 7282 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7283 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 7283 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7284 7284
7285 /* Give the packet to the chip. */ 7285 /* Give the packet to the chip. */
7286 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); 7286 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7287 sent = true; 7287 sent = true;
7288 7288
7289 DPRINTF(WM_DEBUG_TX, 7289 DPRINTF(WM_DEBUG_TX,
7290 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 7290 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7291 7291
7292 DPRINTF(WM_DEBUG_TX, 7292 DPRINTF(WM_DEBUG_TX,
7293 ("%s: TX: finished transmitting packet, job %d\n", 7293 ("%s: TX: finished transmitting packet, job %d\n",
7294 device_xname(sc->sc_dev), txq->txq_snext)); 7294 device_xname(sc->sc_dev), txq->txq_snext));
7295 7295
7296 /* Advance the tx pointer. */ 7296 /* Advance the tx pointer. */
7297 txq->txq_free -= txs->txs_ndesc; 7297 txq->txq_free -= txs->txs_ndesc;
7298 txq->txq_next = nexttx; 7298 txq->txq_next = nexttx;
7299 7299
7300 txq->txq_sfree--; 7300 txq->txq_sfree--;
7301 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); 7301 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7302 7302
7303 /* Pass the packet to any BPF listeners. */ 7303 /* Pass the packet to any BPF listeners. */
7304 bpf_mtap(ifp, m0); 7304 bpf_mtap(ifp, m0);
7305 } 7305 }
7306 7306
7307 if (m0 != NULL) { 7307 if (m0 != NULL) {
7308 txq->txq_flags |= WM_TXQ_NO_SPACE; 7308 txq->txq_flags |= WM_TXQ_NO_SPACE;
7309 WM_Q_EVCNT_INCR(txq, txdrop); 7309 WM_Q_EVCNT_INCR(txq, txdrop);
7310 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", 7310 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7311 __func__)); 7311 __func__));
7312 m_freem(m0); 7312 m_freem(m0);
7313 } 7313 }
7314 7314
7315 if (txq->txq_sfree == 0 || txq->txq_free <= 2) { 7315 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7316 /* No more slots; notify upper layer. */ 7316 /* No more slots; notify upper layer. */
7317 txq->txq_flags |= WM_TXQ_NO_SPACE; 7317 txq->txq_flags |= WM_TXQ_NO_SPACE;
7318 } 7318 }
7319 7319
7320 if (sent) { 7320 if (sent) {
7321 /* Set a watchdog timer in case the chip flakes out. */ 7321 /* Set a watchdog timer in case the chip flakes out. */
7322 ifp->if_timer = 5; 7322 ifp->if_timer = 5;
7323 } 7323 }
7324} 7324}
7325 7325
7326static void 7326static void
7327wm_deferred_start(struct ifnet *ifp) 7327wm_deferred_start(struct ifnet *ifp)
7328{ 7328{
7329 struct wm_softc *sc = ifp->if_softc; 7329 struct wm_softc *sc = ifp->if_softc;
7330 int qid = 0; 7330 int qid = 0;
7331 7331
7332 /* 7332 /*
7333 * Try to transmit on all Tx queues. Passing a txq somehow and 7333 * Try to transmit on all Tx queues. Passing a txq somehow and
7334 * transmitting only on the txq may be better. 7334 * transmitting only on the txq may be better.
7335 */ 7335 */
7336restart: 7336restart:
7337 WM_CORE_LOCK(sc); 7337 WM_CORE_LOCK(sc);
7338 if (sc->sc_core_stopping) 7338 if (sc->sc_core_stopping)
7339 goto out; 7339 goto out;
7340 7340
7341 for (; qid < sc->sc_nqueues; qid++) { 7341 for (; qid < sc->sc_nqueues; qid++) {
7342 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq; 7342 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
7343 7343
7344 if (!mutex_tryenter(txq->txq_lock)) 7344 if (!mutex_tryenter(txq->txq_lock))
7345 continue; 7345 continue;
7346 7346
7347 if (txq->txq_stopping) { 7347 if (txq->txq_stopping) {
7348 mutex_exit(txq->txq_lock); 7348 mutex_exit(txq->txq_lock);
7349 continue; 7349 continue;
7350 } 7350 }
7351 WM_CORE_UNLOCK(sc); 7351 WM_CORE_UNLOCK(sc);
7352 7352
7353 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 7353 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7354 /* XXX need for ALTQ */ 7354 /* XXX need for ALTQ */
7355 if (qid == 0) 7355 if (qid == 0)
7356 wm_nq_start_locked(ifp); 7356 wm_nq_start_locked(ifp);
7357 wm_nq_transmit_locked(ifp, txq); 7357 wm_nq_transmit_locked(ifp, txq);
7358 } else { 7358 } else {
7359 /* XXX need for ALTQ */ 7359 /* XXX need for ALTQ */
7360 if (qid == 0) 7360 if (qid == 0)
7361 wm_start_locked(ifp); 7361 wm_start_locked(ifp);
7362 wm_transmit_locked(ifp, txq); 7362 wm_transmit_locked(ifp, txq);
7363 } 7363 }
7364 mutex_exit(txq->txq_lock); 7364 mutex_exit(txq->txq_lock);
7365 7365
7366 qid++; 7366 qid++;
7367 goto restart; 7367 goto restart;
7368 } 7368 }
7369out: 7369out:
7370 WM_CORE_UNLOCK(sc); 7370 WM_CORE_UNLOCK(sc);
7371} 7371}
7372 7372
7373/* Interrupt */ 7373/* Interrupt */
7374 7374
7375/* 7375/*
7376 * wm_txeof: 7376 * wm_txeof:
7377 * 7377 *
7378 * Helper; handle transmit interrupts. 7378 * Helper; handle transmit interrupts.
7379 */ 7379 */
7380static int 7380static int
7381wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq) 7381wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7382{ 7382{
7383 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 7383 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7384 struct wm_txsoft *txs; 7384 struct wm_txsoft *txs;
7385 bool processed = false; 7385 bool processed = false;
7386 int count = 0; 7386 int count = 0;
7387 int i; 7387 int i;
7388 uint8_t status; 7388 uint8_t status;
7389 7389
7390 KASSERT(mutex_owned(txq->txq_lock)); 7390 KASSERT(mutex_owned(txq->txq_lock));
7391 7391
7392 if (txq->txq_stopping) 7392 if (txq->txq_stopping)
7393 return 0; 7393 return 0;
7394 7394
7395 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7395 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7396 txq->txq_flags &= ~WM_TXQ_NO_SPACE; 7396 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7397 else 7397 else
7398 ifp->if_flags &= ~IFF_OACTIVE; 7398 ifp->if_flags &= ~IFF_OACTIVE;
7399 7399
7400 /* 7400 /*
7401 * Go through the Tx list and free mbufs for those 7401 * Go through the Tx list and free mbufs for those
7402 * frames which have been transmitted. 7402 * frames which have been transmitted.
7403 */ 7403 */
7404 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq); 7404 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7405 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) { 7405 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7406 txs = &txq->txq_soft[i]; 7406 txs = &txq->txq_soft[i];
7407 7407
7408 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n", 7408 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7409 device_xname(sc->sc_dev), i)); 7409 device_xname(sc->sc_dev), i));
7410 7410
7411 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc, 7411 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7412 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 7412 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7413 7413
7414 status = 7414 status =
7415 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status; 7415 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7416 if ((status & WTX_ST_DD) == 0) { 7416 if ((status & WTX_ST_DD) == 0) {
7417 wm_cdtxsync(txq, txs->txs_lastdesc, 1, 7417 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7418 BUS_DMASYNC_PREREAD); 7418 BUS_DMASYNC_PREREAD);
7419 break; 7419 break;
7420 } 7420 }
7421 7421
7422 processed = true; 7422 processed = true;
7423 count++; 7423 count++;
7424 DPRINTF(WM_DEBUG_TX, 7424 DPRINTF(WM_DEBUG_TX,
7425 ("%s: TX: job %d done: descs %d..%d\n", 7425 ("%s: TX: job %d done: descs %d..%d\n",
7426 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 7426 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7427 txs->txs_lastdesc)); 7427 txs->txs_lastdesc));
7428 7428
7429 /* 7429 /*
7430 * XXX We should probably be using the statistics 7430 * XXX We should probably be using the statistics
7431 * XXX registers, but I don't know if they exist 7431 * XXX registers, but I don't know if they exist
7432 * XXX on chips before the i82544. 7432 * XXX on chips before the i82544.
7433 */ 7433 */
7434 7434
7435#ifdef WM_EVENT_COUNTERS 7435#ifdef WM_EVENT_COUNTERS
7436 if (status & WTX_ST_TU) 7436 if (status & WTX_ST_TU)
7437 WM_Q_EVCNT_INCR(txq, tu); 7437 WM_Q_EVCNT_INCR(txq, tu);
7438#endif /* WM_EVENT_COUNTERS */ 7438#endif /* WM_EVENT_COUNTERS */
7439 7439
7440 if (status & (WTX_ST_EC | WTX_ST_LC)) { 7440 if (status & (WTX_ST_EC | WTX_ST_LC)) {
7441 ifp->if_oerrors++; 7441 ifp->if_oerrors++;
7442 if (status & WTX_ST_LC) 7442 if (status & WTX_ST_LC)
7443 log(LOG_WARNING, "%s: late collision\n", 7443 log(LOG_WARNING, "%s: late collision\n",
7444 device_xname(sc->sc_dev)); 7444 device_xname(sc->sc_dev));
7445 else if (status & WTX_ST_EC) { 7445 else if (status & WTX_ST_EC) {
7446 ifp->if_collisions += 16; 7446 ifp->if_collisions += 16;
7447 log(LOG_WARNING, "%s: excessive collisions\n", 7447 log(LOG_WARNING, "%s: excessive collisions\n",
7448 device_xname(sc->sc_dev)); 7448 device_xname(sc->sc_dev));
7449 } 7449 }
7450 } else 7450 } else
7451 ifp->if_opackets++; 7451 ifp->if_opackets++;
7452 7452
7453 txq->txq_free += txs->txs_ndesc; 7453 txq->txq_free += txs->txs_ndesc;
7454 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 7454 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7455 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 7455 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7456 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 7456 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7457 m_freem(txs->txs_mbuf); 7457 m_freem(txs->txs_mbuf);
7458 txs->txs_mbuf = NULL; 7458 txs->txs_mbuf = NULL;
7459 } 7459 }
7460 7460
7461 /* Update the dirty transmit buffer pointer. */ 7461 /* Update the dirty transmit buffer pointer. */
7462 txq->txq_sdirty = i; 7462 txq->txq_sdirty = i;
7463 DPRINTF(WM_DEBUG_TX, 7463 DPRINTF(WM_DEBUG_TX,
7464 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); 7464 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7465 7465
7466 if (count != 0) 7466 if (count != 0)
7467 rnd_add_uint32(&sc->rnd_source, count); 7467 rnd_add_uint32(&sc->rnd_source, count);
7468 7468
7469 /* 7469 /*
7470 * If there are no more pending transmissions, cancel the watchdog 7470 * If there are no more pending transmissions, cancel the watchdog
7471 * timer. 7471 * timer.
7472 */ 7472 */
7473 if (txq->txq_sfree == WM_TXQUEUELEN(txq)) 7473 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7474 ifp->if_timer = 0; 7474 ifp->if_timer = 0;
7475 7475
7476 return processed; 7476 return processed;
7477} 7477}
7478 7478
7479static inline uint32_t 7479static inline uint32_t
7480wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx) 7480wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
7481{ 7481{
7482 struct wm_softc *sc = rxq->rxq_sc; 7482 struct wm_softc *sc = rxq->rxq_sc;
7483 7483
7484 if (sc->sc_type == WM_T_82574) 7484 if (sc->sc_type == WM_T_82574)
7485 return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat); 7485 return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
7486 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7486 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7487 return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat); 7487 return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
7488 else 7488 else
7489 return rxq->rxq_descs[idx].wrx_status; 7489 return rxq->rxq_descs[idx].wrx_status;
7490} 7490}
7491 7491
7492static inline uint32_t 7492static inline uint32_t
7493wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx) 7493wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
7494{ 7494{
7495 struct wm_softc *sc = rxq->rxq_sc; 7495 struct wm_softc *sc = rxq->rxq_sc;
7496 7496
7497 if (sc->sc_type == WM_T_82574) 7497 if (sc->sc_type == WM_T_82574)
7498 return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat); 7498 return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
7499 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7499 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7500 return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat); 7500 return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
7501 else 7501 else
7502 return rxq->rxq_descs[idx].wrx_errors; 7502 return rxq->rxq_descs[idx].wrx_errors;
7503} 7503}
7504 7504
7505static inline uint16_t 7505static inline uint16_t
7506wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx) 7506wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
7507{ 7507{
7508 struct wm_softc *sc = rxq->rxq_sc; 7508 struct wm_softc *sc = rxq->rxq_sc;
7509 7509
7510 if (sc->sc_type == WM_T_82574) 7510 if (sc->sc_type == WM_T_82574)
7511 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan; 7511 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
7512 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7512 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7513 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan; 7513 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
7514 else 7514 else
7515 return rxq->rxq_descs[idx].wrx_special; 7515 return rxq->rxq_descs[idx].wrx_special;
7516} 7516}
7517 7517
7518static inline int 7518static inline int
7519wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx) 7519wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
7520{ 7520{
7521 struct wm_softc *sc = rxq->rxq_sc; 7521 struct wm_softc *sc = rxq->rxq_sc;
7522 7522
7523 if (sc->sc_type == WM_T_82574) 7523 if (sc->sc_type == WM_T_82574)
7524 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen; 7524 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
7525 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7525 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7526 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen; 7526 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
7527 else 7527 else
7528 return rxq->rxq_descs[idx].wrx_len; 7528 return rxq->rxq_descs[idx].wrx_len;
7529} 7529}
7530 7530
7531#ifdef WM_DEBUG 7531#ifdef WM_DEBUG
7532static inline uint32_t 7532static inline uint32_t
7533wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx) 7533wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
7534{ 7534{
7535 struct wm_softc *sc = rxq->rxq_sc; 7535 struct wm_softc *sc = rxq->rxq_sc;
7536 7536
7537 if (sc->sc_type == WM_T_82574) 7537 if (sc->sc_type == WM_T_82574)
7538 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash; 7538 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
7539 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7539 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7540 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash; 7540 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
7541 else 7541 else
7542 return 0; 7542 return 0;
7543} 7543}
7544 7544
7545static inline uint8_t 7545static inline uint8_t
7546wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx) 7546wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
7547{ 7547{
7548 struct wm_softc *sc = rxq->rxq_sc; 7548 struct wm_softc *sc = rxq->rxq_sc;
7549 7549
7550 if (sc->sc_type == WM_T_82574) 7550 if (sc->sc_type == WM_T_82574)
7551 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq); 7551 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
7552 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7552 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7553 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc); 7553 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
7554 else 7554 else
7555 return 0; 7555 return 0;
7556} 7556}
7557#endif /* WM_DEBUG */ 7557#endif /* WM_DEBUG */
7558 7558
7559static inline bool 7559static inline bool
7560wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status, 7560wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
7561 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit) 7561 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
7562{ 7562{
7563 7563
7564 if (sc->sc_type == WM_T_82574) 7564 if (sc->sc_type == WM_T_82574)
7565 return (status & ext_bit) != 0; 7565 return (status & ext_bit) != 0;
7566 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7566 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7567 return (status & nq_bit) != 0; 7567 return (status & nq_bit) != 0;
7568 else 7568 else
7569 return (status & legacy_bit) != 0; 7569 return (status & legacy_bit) != 0;
7570} 7570}
7571 7571
7572static inline bool 7572static inline bool
7573wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error, 7573wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
7574 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit) 7574 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
7575{ 7575{
7576 7576
7577 if (sc->sc_type == WM_T_82574) 7577 if (sc->sc_type == WM_T_82574)
7578 return (error & ext_bit) != 0; 7578 return (error & ext_bit) != 0;
7579 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7579 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7580 return (error & nq_bit) != 0; 7580 return (error & nq_bit) != 0;
7581 else 7581 else
7582 return (error & legacy_bit) != 0; 7582 return (error & legacy_bit) != 0;
7583} 7583}
7584 7584
7585static inline bool 7585static inline bool
7586wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status) 7586wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
7587{ 7587{
7588 7588
7589 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status, 7589 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
7590 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP)) 7590 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
7591 return true; 7591 return true;
7592 else 7592 else
7593 return false; 7593 return false;
7594} 7594}
7595 7595
7596static inline bool 7596static inline bool
7597wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors) 7597wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
7598{ 7598{
7599 struct wm_softc *sc = rxq->rxq_sc; 7599 struct wm_softc *sc = rxq->rxq_sc;
7600 7600
7601 /* XXXX missing error bit for newqueue? */ 7601 /* XXXX missing error bit for newqueue? */
7602 if (wm_rxdesc_is_set_error(sc, errors, 7602 if (wm_rxdesc_is_set_error(sc, errors,
7603 WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE, 7603 WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
7604 EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE, 7604 EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
7605 NQRXC_ERROR_RXE)) { 7605 NQRXC_ERROR_RXE)) {
7606 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0)) 7606 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
7607 log(LOG_WARNING, "%s: symbol error\n", 7607 log(LOG_WARNING, "%s: symbol error\n",
7608 device_xname(sc->sc_dev)); 7608 device_xname(sc->sc_dev));
7609 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0)) 7609 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
7610 log(LOG_WARNING, "%s: receive sequence error\n", 7610 log(LOG_WARNING, "%s: receive sequence error\n",
7611 device_xname(sc->sc_dev)); 7611 device_xname(sc->sc_dev));
7612 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0)) 7612 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
7613 log(LOG_WARNING, "%s: CRC error\n", 7613 log(LOG_WARNING, "%s: CRC error\n",
7614 device_xname(sc->sc_dev)); 7614 device_xname(sc->sc_dev));
7615 return true; 7615 return true;
7616 } 7616 }
7617 7617
7618 return false; 7618 return false;
7619} 7619}
7620 7620
7621static inline bool 7621static inline bool
7622wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status) 7622wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
7623{ 7623{
7624 struct wm_softc *sc = rxq->rxq_sc; 7624 struct wm_softc *sc = rxq->rxq_sc;
7625 7625
7626 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD, 7626 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
7627 NQRXC_STATUS_DD)) { 7627 NQRXC_STATUS_DD)) {
7628 /* We have processed all of the receive descriptors. */ 7628 /* We have processed all of the receive descriptors. */
7629 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx]; 7629 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
7630 7630
7631 if (sc->sc_type == WM_T_82574) { 7631 if (sc->sc_type == WM_T_82574) {
7632 rxq->rxq_ext_descs[idx].erx_data.erxd_addr = 7632 rxq->rxq_ext_descs[idx].erx_data.erxd_addr =
7633 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr 7633 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr
7634 + sc->sc_align_tweak); 7634 + sc->sc_align_tweak);
7635 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 7635 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7636 rxq->rxq_nq_descs[idx].nqrx_data.nrxd_paddr = 7636 rxq->rxq_nq_descs[idx].nqrx_data.nrxd_paddr =
7637 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr 7637 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr
7638 + sc->sc_align_tweak); 7638 + sc->sc_align_tweak);
7639 } 7639 }
7640 7640
7641 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD); 7641 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
7642 return false; 7642 return false;
7643 } 7643 }
7644 7644
7645 return true; 7645 return true;
7646} 7646}
7647 7647
7648static inline bool 7648static inline bool
7649wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag, 7649wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
7650 struct mbuf *m) 7650 struct mbuf *m)
7651{ 7651{
7652 struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if; 7652 struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
7653 7653
7654 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status, 7654 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
7655 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) { 7655 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
7656 VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false); 7656 VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
7657 } 7657 }
7658 7658
7659 return true; 7659 return true;
7660} 7660}
7661 7661
7662static inline void 7662static inline void
7663wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status, 7663wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
7664 uint32_t errors, struct mbuf *m) 7664 uint32_t errors, struct mbuf *m)
7665{ 7665{
7666 struct wm_softc *sc = rxq->rxq_sc; 7666 struct wm_softc *sc = rxq->rxq_sc;
7667 7667
7668 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) { 7668 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
7669 if (wm_rxdesc_is_set_status(sc, status, 7669 if (wm_rxdesc_is_set_status(sc, status,
7670 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) { 7670 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
7671 WM_Q_EVCNT_INCR(rxq, rxipsum); 7671 WM_Q_EVCNT_INCR(rxq, rxipsum);
7672 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 7672 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7673 if (wm_rxdesc_is_set_error(sc, errors, 7673 if (wm_rxdesc_is_set_error(sc, errors,
7674 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE)) 7674 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
7675 m->m_pkthdr.csum_flags |= 7675 m->m_pkthdr.csum_flags |=
7676 M_CSUM_IPv4_BAD; 7676 M_CSUM_IPv4_BAD;
7677 } 7677 }
7678 if (wm_rxdesc_is_set_status(sc, status, 7678 if (wm_rxdesc_is_set_status(sc, status,
7679 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) { 7679 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
7680 /* 7680 /*
7681 * Note: we don't know if this was TCP or UDP, 7681 * Note: we don't know if this was TCP or UDP,
7682 * so we just set both bits, and expect the 7682 * so we just set both bits, and expect the
7683 * upper layers to deal. 7683 * upper layers to deal.
7684 */ 7684 */
7685 WM_Q_EVCNT_INCR(rxq, rxtusum); 7685 WM_Q_EVCNT_INCR(rxq, rxtusum);
7686 m->m_pkthdr.csum_flags |= 7686 m->m_pkthdr.csum_flags |=
7687 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 7687 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7688 M_CSUM_TCPv6 | M_CSUM_UDPv6; 7688 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7689 if (wm_rxdesc_is_set_error(sc, errors, 7689 if (wm_rxdesc_is_set_error(sc, errors,
7690 WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E)) 7690 WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
7691 m->m_pkthdr.csum_flags |= 7691 m->m_pkthdr.csum_flags |=
7692 M_CSUM_TCP_UDP_BAD; 7692 M_CSUM_TCP_UDP_BAD;
7693 } 7693 }
7694 } 7694 }
7695} 7695}
7696 7696
7697/* 7697/*
7698 * wm_rxeof: 7698 * wm_rxeof:
7699 * 7699 *
7700 * Helper; handle receive interrupts. 7700 * Helper; handle receive interrupts.
7701 */ 7701 */
7702static void 7702static void
7703wm_rxeof(struct wm_rxqueue *rxq) 7703wm_rxeof(struct wm_rxqueue *rxq)
7704{ 7704{
7705 struct wm_softc *sc = rxq->rxq_sc; 7705 struct wm_softc *sc = rxq->rxq_sc;
7706 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 7706 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7707 struct wm_rxsoft *rxs; 7707 struct wm_rxsoft *rxs;
7708 struct mbuf *m; 7708 struct mbuf *m;
7709 int i, len; 7709 int i, len;
7710 int count = 0; 7710 int count = 0;
7711 uint32_t status, errors; 7711 uint32_t status, errors;
7712 uint16_t vlantag; 7712 uint16_t vlantag;
7713 7713
7714 KASSERT(mutex_owned(rxq->rxq_lock)); 7714 KASSERT(mutex_owned(rxq->rxq_lock));
7715 7715
7716 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) { 7716 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7717 rxs = &rxq->rxq_soft[i]; 7717 rxs = &rxq->rxq_soft[i];
7718 7718
7719 DPRINTF(WM_DEBUG_RX, 7719 DPRINTF(WM_DEBUG_RX,
7720 ("%s: RX: checking descriptor %d\n", 7720 ("%s: RX: checking descriptor %d\n",
7721 device_xname(sc->sc_dev), i)); 7721 device_xname(sc->sc_dev), i));
7722 wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 7722 wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7723 7723
7724 status = wm_rxdesc_get_status(rxq, i); 7724 status = wm_rxdesc_get_status(rxq, i);
7725 errors = wm_rxdesc_get_errors(rxq, i); 7725 errors = wm_rxdesc_get_errors(rxq, i);
7726 len = le16toh(wm_rxdesc_get_pktlen(rxq, i)); 7726 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
7727 vlantag = wm_rxdesc_get_vlantag(rxq, i); 7727 vlantag = wm_rxdesc_get_vlantag(rxq, i);
7728#ifdef WM_DEBUG 7728#ifdef WM_DEBUG
7729 uint32_t rsshash = wm_rxdesc_get_rsshash(rxq, i); 7729 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
7730 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i); 7730 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
7731#endif 7731#endif
7732 7732
7733 if (!wm_rxdesc_dd(rxq, i, status)) 7733 if (!wm_rxdesc_dd(rxq, i, status))
7734 break; 7734 break;
7735 7735
7736 count++; 7736 count++;
7737 if (__predict_false(rxq->rxq_discard)) { 7737 if (__predict_false(rxq->rxq_discard)) {
7738 DPRINTF(WM_DEBUG_RX, 7738 DPRINTF(WM_DEBUG_RX,
7739 ("%s: RX: discarding contents of descriptor %d\n", 7739 ("%s: RX: discarding contents of descriptor %d\n",
7740 device_xname(sc->sc_dev), i)); 7740 device_xname(sc->sc_dev), i));
7741 wm_init_rxdesc(rxq, i); 7741 wm_init_rxdesc(rxq, i);
7742 if (wm_rxdesc_is_eop(rxq, status)) { 7742 if (wm_rxdesc_is_eop(rxq, status)) {
7743 /* Reset our state. */ 7743 /* Reset our state. */
7744 DPRINTF(WM_DEBUG_RX, 7744 DPRINTF(WM_DEBUG_RX,
7745 ("%s: RX: resetting rxdiscard -> 0\n", 7745 ("%s: RX: resetting rxdiscard -> 0\n",
7746 device_xname(sc->sc_dev))); 7746 device_xname(sc->sc_dev)));
7747 rxq->rxq_discard = 0; 7747 rxq->rxq_discard = 0;
7748 } 7748 }
7749 continue; 7749 continue;
7750 } 7750 }
7751 7751
7752 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 7752 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7753 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 7753 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7754 7754
7755 m = rxs->rxs_mbuf; 7755 m = rxs->rxs_mbuf;
7756 7756
7757 /* 7757 /*
7758 * Add a new receive buffer to the ring, unless of 7758 * Add a new receive buffer to the ring, unless of
7759 * course the length is zero. Treat the latter as a 7759 * course the length is zero. Treat the latter as a
7760 * failed mapping. 7760 * failed mapping.
7761 */ 7761 */
7762 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) { 7762 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7763 /* 7763 /*
7764 * Failed, throw away what we've done so 7764 * Failed, throw away what we've done so
7765 * far, and discard the rest of the packet. 7765 * far, and discard the rest of the packet.
7766 */ 7766 */
7767 ifp->if_ierrors++; 7767 ifp->if_ierrors++;
7768 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 7768 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7769 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 7769 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7770 wm_init_rxdesc(rxq, i); 7770 wm_init_rxdesc(rxq, i);
7771 if (!wm_rxdesc_is_eop(rxq, status)) 7771 if (!wm_rxdesc_is_eop(rxq, status))
7772 rxq->rxq_discard = 1; 7772 rxq->rxq_discard = 1;
7773 if (rxq->rxq_head != NULL) 7773 if (rxq->rxq_head != NULL)
7774 m_freem(rxq->rxq_head); 7774 m_freem(rxq->rxq_head);
7775 WM_RXCHAIN_RESET(rxq); 7775 WM_RXCHAIN_RESET(rxq);
7776 DPRINTF(WM_DEBUG_RX, 7776 DPRINTF(WM_DEBUG_RX,
7777 ("%s: RX: Rx buffer allocation failed, " 7777 ("%s: RX: Rx buffer allocation failed, "
7778 "dropping packet%s\n", device_xname(sc->sc_dev), 7778 "dropping packet%s\n", device_xname(sc->sc_dev),
7779 rxq->rxq_discard ? " (discard)" : "")); 7779 rxq->rxq_discard ? " (discard)" : ""));
7780 continue; 7780 continue;
7781 } 7781 }
7782 7782
7783 m->m_len = len; 7783 m->m_len = len;
7784 rxq->rxq_len += len; 7784 rxq->rxq_len += len;
7785 DPRINTF(WM_DEBUG_RX, 7785 DPRINTF(WM_DEBUG_RX,
7786 ("%s: RX: buffer at %p len %d\n", 7786 ("%s: RX: buffer at %p len %d\n",
7787 device_xname(sc->sc_dev), m->m_data, len)); 7787 device_xname(sc->sc_dev), m->m_data, len));
7788 7788
7789 /* If this is not the end of the packet, keep looking. */ 7789 /* If this is not the end of the packet, keep looking. */
7790 if (!wm_rxdesc_is_eop(rxq, status)) { 7790 if (!wm_rxdesc_is_eop(rxq, status)) {
7791 WM_RXCHAIN_LINK(rxq, m); 7791 WM_RXCHAIN_LINK(rxq, m);
7792 DPRINTF(WM_DEBUG_RX, 7792 DPRINTF(WM_DEBUG_RX,
7793 ("%s: RX: not yet EOP, rxlen -> %d\n", 7793 ("%s: RX: not yet EOP, rxlen -> %d\n",
7794 device_xname(sc->sc_dev), rxq->rxq_len)); 7794 device_xname(sc->sc_dev), rxq->rxq_len));
7795 continue; 7795 continue;
7796 } 7796 }
7797 7797
7798 /* 7798 /*
7799 * Okay, we have the entire packet now. The chip is 7799 * Okay, we have the entire packet now. The chip is
7800 * configured to include the FCS except I350 and I21[01] 7800 * configured to include the FCS except I350 and I21[01]
7801 * (not all chips can be configured to strip it), 7801 * (not all chips can be configured to strip it),
7802 * so we need to trim it. 7802 * so we need to trim it.
7803 * May need to adjust length of previous mbuf in the 7803 * May need to adjust length of previous mbuf in the
7804 * chain if the current mbuf is too short. 7804 * chain if the current mbuf is too short.
7805 * For an eratta, the RCTL_SECRC bit in RCTL register 7805 * For an eratta, the RCTL_SECRC bit in RCTL register
7806 * is always set in I350, so we don't trim it. 7806 * is always set in I350, so we don't trim it.
7807 */ 7807 */
7808 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354) 7808 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7809 && (sc->sc_type != WM_T_I210) 7809 && (sc->sc_type != WM_T_I210)
7810 && (sc->sc_type != WM_T_I211)) { 7810 && (sc->sc_type != WM_T_I211)) {
7811 if (m->m_len < ETHER_CRC_LEN) { 7811 if (m->m_len < ETHER_CRC_LEN) {
7812 rxq->rxq_tail->m_len 7812 rxq->rxq_tail->m_len
7813 -= (ETHER_CRC_LEN - m->m_len); 7813 -= (ETHER_CRC_LEN - m->m_len);
7814 m->m_len = 0; 7814 m->m_len = 0;
7815 } else 7815 } else
7816 m->m_len -= ETHER_CRC_LEN; 7816 m->m_len -= ETHER_CRC_LEN;
7817 len = rxq->rxq_len - ETHER_CRC_LEN; 7817 len = rxq->rxq_len - ETHER_CRC_LEN;
7818 } else 7818 } else
7819 len = rxq->rxq_len; 7819 len = rxq->rxq_len;
7820 7820
7821 WM_RXCHAIN_LINK(rxq, m); 7821 WM_RXCHAIN_LINK(rxq, m);
7822 7822
7823 *rxq->rxq_tailp = NULL; 7823 *rxq->rxq_tailp = NULL;
7824 m = rxq->rxq_head; 7824 m = rxq->rxq_head;
7825 7825
7826 WM_RXCHAIN_RESET(rxq); 7826 WM_RXCHAIN_RESET(rxq);
7827 7827
7828 DPRINTF(WM_DEBUG_RX, 7828 DPRINTF(WM_DEBUG_RX,
7829 ("%s: RX: have entire packet, len -> %d\n", 7829 ("%s: RX: have entire packet, len -> %d\n",
7830 device_xname(sc->sc_dev), len)); 7830 device_xname(sc->sc_dev), len));
7831 7831
7832 /* If an error occurred, update stats and drop the packet. */ 7832 /* If an error occurred, update stats and drop the packet. */
7833 if (wm_rxdesc_has_errors(rxq, errors)) { 7833 if (wm_rxdesc_has_errors(rxq, errors)) {
7834 m_freem(m); 7834 m_freem(m);
7835 continue; 7835 continue;
7836 } 7836 }
7837 7837
7838 /* No errors. Receive the packet. */ 7838 /* No errors. Receive the packet. */
7839 m_set_rcvif(m, ifp); 7839 m_set_rcvif(m, ifp);
7840 m->m_pkthdr.len = len; 7840 m->m_pkthdr.len = len;
7841 /* 7841 /*
7842 * TODO 7842 * TODO
7843 * should be save rsshash and rsstype to this mbuf. 7843 * should be save rsshash and rsstype to this mbuf.
7844 */ 7844 */
7845 DPRINTF(WM_DEBUG_RX, 7845 DPRINTF(WM_DEBUG_RX,
7846 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n", 7846 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
7847 device_xname(sc->sc_dev), rsstype, rsshash)); 7847 device_xname(sc->sc_dev), rsstype, rsshash));
7848 7848
7849 /* 7849 /*
7850 * If VLANs are enabled, VLAN packets have been unwrapped 7850 * If VLANs are enabled, VLAN packets have been unwrapped
7851 * for us. Associate the tag with the packet. 7851 * for us. Associate the tag with the packet.
7852 */ 7852 */
7853 /* XXXX should check for i350 and i354 */ 7853 /* XXXX should check for i350 and i354 */
7854 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m)) 7854 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
7855 continue; 7855 continue;
7856 7856
7857 /* Set up checksum info for this packet. */ 7857 /* Set up checksum info for this packet. */
7858 wm_rxdesc_ensure_checksum(rxq, status, errors, m); 7858 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
7859 7859
7860 mutex_exit(rxq->rxq_lock); 7860 mutex_exit(rxq->rxq_lock);
7861 7861
7862 /* Pass it on. */ 7862 /* Pass it on. */
7863 if_percpuq_enqueue(sc->sc_ipq, m); 7863 if_percpuq_enqueue(sc->sc_ipq, m);
7864 7864
7865 mutex_enter(rxq->rxq_lock); 7865 mutex_enter(rxq->rxq_lock);
7866 7866
7867 if (rxq->rxq_stopping) 7867 if (rxq->rxq_stopping)
7868 break; 7868 break;
7869 } 7869 }
7870 7870
7871 /* Update the receive pointer. */ 7871 /* Update the receive pointer. */
7872 rxq->rxq_ptr = i; 7872 rxq->rxq_ptr = i;
7873 if (count != 0) 7873 if (count != 0)
7874 rnd_add_uint32(&sc->rnd_source, count); 7874 rnd_add_uint32(&sc->rnd_source, count);
7875 7875
7876 DPRINTF(WM_DEBUG_RX, 7876 DPRINTF(WM_DEBUG_RX,
7877 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); 7877 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7878} 7878}
7879 7879
7880/* 7880/*
7881 * wm_linkintr_gmii: 7881 * wm_linkintr_gmii:
7882 * 7882 *
7883 * Helper; handle link interrupts for GMII. 7883 * Helper; handle link interrupts for GMII.
7884 */ 7884 */
7885static void 7885static void
7886wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) 7886wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7887{ 7887{
7888 7888
7889 KASSERT(WM_CORE_LOCKED(sc)); 7889 KASSERT(WM_CORE_LOCKED(sc));
7890 7890
7891 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 7891 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7892 __func__)); 7892 __func__));
7893 7893
7894 if (icr & ICR_LSC) { 7894 if (icr & ICR_LSC) {
7895 uint32_t reg; 7895 uint32_t reg;
7896 uint32_t status = CSR_READ(sc, WMREG_STATUS); 7896 uint32_t status = CSR_READ(sc, WMREG_STATUS);
7897 7897
7898 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0)) 7898 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7899 wm_gig_downshift_workaround_ich8lan(sc); 7899 wm_gig_downshift_workaround_ich8lan(sc);
7900 7900
7901 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n", 7901 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7902 device_xname(sc->sc_dev))); 7902 device_xname(sc->sc_dev)));
7903 mii_pollstat(&sc->sc_mii); 7903 mii_pollstat(&sc->sc_mii);
7904 if (sc->sc_type == WM_T_82543) { 7904 if (sc->sc_type == WM_T_82543) {
7905 int miistatus, active; 7905 int miistatus, active;
7906 7906
7907 /* 7907 /*
7908 * With 82543, we need to force speed and 7908 * With 82543, we need to force speed and
7909 * duplex on the MAC equal to what the PHY 7909 * duplex on the MAC equal to what the PHY
7910 * speed and duplex configuration is. 7910 * speed and duplex configuration is.
7911 */ 7911 */
7912 miistatus = sc->sc_mii.mii_media_status; 7912 miistatus = sc->sc_mii.mii_media_status;
7913 7913
7914 if (miistatus & IFM_ACTIVE) { 7914 if (miistatus & IFM_ACTIVE) {
7915 active = sc->sc_mii.mii_media_active; 7915 active = sc->sc_mii.mii_media_active;
7916 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 7916 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7917 switch (IFM_SUBTYPE(active)) { 7917 switch (IFM_SUBTYPE(active)) {
7918 case IFM_10_T: 7918 case IFM_10_T:
7919 sc->sc_ctrl |= CTRL_SPEED_10; 7919 sc->sc_ctrl |= CTRL_SPEED_10;
7920 break; 7920 break;
7921 case IFM_100_TX: 7921 case IFM_100_TX:
7922 sc->sc_ctrl |= CTRL_SPEED_100; 7922 sc->sc_ctrl |= CTRL_SPEED_100;
7923 break; 7923 break;
7924 case IFM_1000_T: 7924 case IFM_1000_T:
7925 sc->sc_ctrl |= CTRL_SPEED_1000; 7925 sc->sc_ctrl |= CTRL_SPEED_1000;
7926 break; 7926 break;
7927 default: 7927 default:
7928 /* 7928 /*
7929 * fiber? 7929 * fiber?
7930 * Shoud not enter here. 7930 * Shoud not enter here.
7931 */ 7931 */
7932 printf("unknown media (%x)\n", active); 7932 printf("unknown media (%x)\n", active);
7933 break; 7933 break;
7934 } 7934 }
7935 if (active & IFM_FDX) 7935 if (active & IFM_FDX)
7936 sc->sc_ctrl |= CTRL_FD; 7936 sc->sc_ctrl |= CTRL_FD;
7937 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 7937 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7938 } 7938 }
7939 } else if ((sc->sc_type == WM_T_ICH8) 7939 } else if ((sc->sc_type == WM_T_ICH8)
7940 && (sc->sc_phytype == WMPHY_IGP_3)) { 7940 && (sc->sc_phytype == WMPHY_IGP_3)) {
7941 wm_kmrn_lock_loss_workaround_ich8lan(sc); 7941 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7942 } else if (sc->sc_type == WM_T_PCH) { 7942 } else if (sc->sc_type == WM_T_PCH) {
7943 wm_k1_gig_workaround_hv(sc, 7943 wm_k1_gig_workaround_hv(sc,
7944 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); 7944 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7945 } 7945 }
7946 7946
7947 if ((sc->sc_phytype == WMPHY_82578) 7947 if ((sc->sc_phytype == WMPHY_82578)
7948 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) 7948 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7949 == IFM_1000_T)) { 7949 == IFM_1000_T)) {
7950 7950
7951 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { 7951 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7952 delay(200*1000); /* XXX too big */ 7952 delay(200*1000); /* XXX too big */
7953 7953
7954 /* Link stall fix for link up */ 7954 /* Link stall fix for link up */
7955 wm_gmii_hv_writereg(sc->sc_dev, 1, 7955 wm_gmii_hv_writereg(sc->sc_dev, 1,
7956 HV_MUX_DATA_CTRL, 7956 HV_MUX_DATA_CTRL,
7957 HV_MUX_DATA_CTRL_GEN_TO_MAC 7957 HV_MUX_DATA_CTRL_GEN_TO_MAC
7958 | HV_MUX_DATA_CTRL_FORCE_SPEED); 7958 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7959 wm_gmii_hv_writereg(sc->sc_dev, 1, 7959 wm_gmii_hv_writereg(sc->sc_dev, 1,
7960 HV_MUX_DATA_CTRL, 7960 HV_MUX_DATA_CTRL,
7961 HV_MUX_DATA_CTRL_GEN_TO_MAC); 7961 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7962 } 7962 }
7963 } 7963 }
7964 /* 7964 /*
7965 * I217 Packet Loss issue: 7965 * I217 Packet Loss issue:
7966 * ensure that FEXTNVM4 Beacon Duration is set correctly 7966 * ensure that FEXTNVM4 Beacon Duration is set correctly
7967 * on power up. 7967 * on power up.
7968 * Set the Beacon Duration for I217 to 8 usec 7968 * Set the Beacon Duration for I217 to 8 usec
7969 */ 7969 */
7970 if ((sc->sc_type == WM_T_PCH_LPT) 7970 if ((sc->sc_type == WM_T_PCH_LPT)
7971 || (sc->sc_type == WM_T_PCH_SPT)) { 7971 || (sc->sc_type == WM_T_PCH_SPT)) {
7972 reg = CSR_READ(sc, WMREG_FEXTNVM4); 7972 reg = CSR_READ(sc, WMREG_FEXTNVM4);
7973 reg &= ~FEXTNVM4_BEACON_DURATION; 7973 reg &= ~FEXTNVM4_BEACON_DURATION;
7974 reg |= FEXTNVM4_BEACON_DURATION_8US; 7974 reg |= FEXTNVM4_BEACON_DURATION_8US;
7975 CSR_WRITE(sc, WMREG_FEXTNVM4, reg); 7975 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
7976 } 7976 }
7977 7977
7978 /* XXX Work-around I218 hang issue */ 7978 /* XXX Work-around I218 hang issue */
7979 /* e1000_k1_workaround_lpt_lp() */ 7979 /* e1000_k1_workaround_lpt_lp() */
7980 7980
7981 if ((sc->sc_type == WM_T_PCH_LPT) 7981 if ((sc->sc_type == WM_T_PCH_LPT)
7982 || (sc->sc_type == WM_T_PCH_SPT)) { 7982 || (sc->sc_type == WM_T_PCH_SPT)) {
7983 /* 7983 /*
7984 * Set platform power management values for Latency 7984 * Set platform power management values for Latency
7985 * Tolerance Reporting (LTR) 7985 * Tolerance Reporting (LTR)
7986 */ 7986 */
7987 wm_platform_pm_pch_lpt(sc, 7987 wm_platform_pm_pch_lpt(sc,
7988 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) 7988 ((sc->sc_mii.mii_media_status & IFM_ACTIVE)
7989 != 0)); 7989 != 0));
7990 } 7990 }
7991 7991
7992 /* FEXTNVM6 K1-off workaround */ 7992 /* FEXTNVM6 K1-off workaround */
7993 if (sc->sc_type == WM_T_PCH_SPT) { 7993 if (sc->sc_type == WM_T_PCH_SPT) {
7994 reg = CSR_READ(sc, WMREG_FEXTNVM6); 7994 reg = CSR_READ(sc, WMREG_FEXTNVM6);
7995 if (CSR_READ(sc, WMREG_PCIEANACFG) 7995 if (CSR_READ(sc, WMREG_PCIEANACFG)
7996 & FEXTNVM6_K1_OFF_ENABLE) 7996 & FEXTNVM6_K1_OFF_ENABLE)
7997 reg |= FEXTNVM6_K1_OFF_ENABLE; 7997 reg |= FEXTNVM6_K1_OFF_ENABLE;
7998 else 7998 else
7999 reg &= ~FEXTNVM6_K1_OFF_ENABLE; 7999 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
8000 CSR_WRITE(sc, WMREG_FEXTNVM6, reg); 8000 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
8001 } 8001 }
8002 } else if (icr & ICR_RXSEQ) { 8002 } else if (icr & ICR_RXSEQ) {
8003 DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n", 8003 DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
8004 device_xname(sc->sc_dev))); 8004 device_xname(sc->sc_dev)));
8005 } 8005 }
8006} 8006}
8007 8007
8008/* 8008/*
8009 * wm_linkintr_tbi: 8009 * wm_linkintr_tbi:
8010 * 8010 *
8011 * Helper; handle link interrupts for TBI mode. 8011 * Helper; handle link interrupts for TBI mode.
8012 */ 8012 */
8013static void 8013static void
8014wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) 8014wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
8015{ 8015{
8016 uint32_t status; 8016 uint32_t status;
8017 8017
8018 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 8018 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8019 __func__)); 8019 __func__));
8020 8020
8021 status = CSR_READ(sc, WMREG_STATUS); 8021 status = CSR_READ(sc, WMREG_STATUS);
8022 if (icr & ICR_LSC) { 8022 if (icr & ICR_LSC) {
8023 if (status & STATUS_LU) { 8023 if (status & STATUS_LU) {
8024 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 8024 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8025 device_xname(sc->sc_dev), 8025 device_xname(sc->sc_dev),
8026 (status & STATUS_FD) ? "FDX" : "HDX")); 8026 (status & STATUS_FD) ? "FDX" : "HDX"));
8027 /* 8027 /*
8028 * NOTE: CTRL will update TFCE and RFCE automatically, 8028 * NOTE: CTRL will update TFCE and RFCE automatically,
8029 * so we should update sc->sc_ctrl 8029 * so we should update sc->sc_ctrl
8030 */ 8030 */
8031 8031
8032 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 8032 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8033 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 8033 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8034 sc->sc_fcrtl &= ~FCRTL_XONE; 8034 sc->sc_fcrtl &= ~FCRTL_XONE;
8035 if (status & STATUS_FD) 8035 if (status & STATUS_FD)
8036 sc->sc_tctl |= 8036 sc->sc_tctl |=
8037 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 8037 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8038 else 8038 else
8039 sc->sc_tctl |= 8039 sc->sc_tctl |=
8040 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 8040 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8041 if (sc->sc_ctrl & CTRL_TFCE) 8041 if (sc->sc_ctrl & CTRL_TFCE)
8042 sc->sc_fcrtl |= FCRTL_XONE; 8042 sc->sc_fcrtl |= FCRTL_XONE;
8043 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 8043 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8044 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 8044 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8045 WMREG_OLD_FCRTL : WMREG_FCRTL, 8045 WMREG_OLD_FCRTL : WMREG_FCRTL,
8046 sc->sc_fcrtl); 8046 sc->sc_fcrtl);
8047 sc->sc_tbi_linkup = 1; 8047 sc->sc_tbi_linkup = 1;
8048 } else { 8048 } else {
8049 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 8049 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8050 device_xname(sc->sc_dev))); 8050 device_xname(sc->sc_dev)));
8051 sc->sc_tbi_linkup = 0; 8051 sc->sc_tbi_linkup = 0;
8052 } 8052 }
8053 /* Update LED */ 8053 /* Update LED */
8054 wm_tbi_serdes_set_linkled(sc); 8054 wm_tbi_serdes_set_linkled(sc);
8055 } else if (icr & ICR_RXSEQ) { 8055 } else if (icr & ICR_RXSEQ) {
8056 DPRINTF(WM_DEBUG_LINK, 8056 DPRINTF(WM_DEBUG_LINK,
8057 ("%s: LINK: Receive sequence error\n", 8057 ("%s: LINK: Receive sequence error\n",
8058 device_xname(sc->sc_dev))); 8058 device_xname(sc->sc_dev)));
8059 } 8059 }
8060} 8060}
8061 8061
8062/* 8062/*
8063 * wm_linkintr_serdes: 8063 * wm_linkintr_serdes:
8064 * 8064 *
8065 * Helper; handle link interrupts for TBI mode. 8065 * Helper; handle link interrupts for TBI mode.
8066 */ 8066 */
8067static void 8067static void
8068wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr) 8068wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
8069{ 8069{
8070 struct mii_data *mii = &sc->sc_mii; 8070 struct mii_data *mii = &sc->sc_mii;
8071 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 8071 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8072 uint32_t pcs_adv, pcs_lpab, reg; 8072 uint32_t pcs_adv, pcs_lpab, reg;
8073 8073
8074 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 8074 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8075 __func__)); 8075 __func__));
8076 8076
8077 if (icr & ICR_LSC) { 8077 if (icr & ICR_LSC) {
8078 /* Check PCS */ 8078 /* Check PCS */
8079 reg = CSR_READ(sc, WMREG_PCS_LSTS); 8079 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8080 if ((reg & PCS_LSTS_LINKOK) != 0) { 8080 if ((reg & PCS_LSTS_LINKOK) != 0) {
8081 mii->mii_media_status |= IFM_ACTIVE; 8081 mii->mii_media_status |= IFM_ACTIVE;
8082 sc->sc_tbi_linkup = 1; 8082 sc->sc_tbi_linkup = 1;
8083 } else { 8083 } else {
8084 mii->mii_media_status |= IFM_NONE; 8084 mii->mii_media_status |= IFM_NONE;
8085 sc->sc_tbi_linkup = 0; 8085 sc->sc_tbi_linkup = 0;
8086 wm_tbi_serdes_set_linkled(sc); 8086 wm_tbi_serdes_set_linkled(sc);
8087 return; 8087 return;
8088 } 8088 }
8089 mii->mii_media_active |= IFM_1000_SX; 8089 mii->mii_media_active |= IFM_1000_SX;
8090 if ((reg & PCS_LSTS_FDX) != 0) 8090 if ((reg & PCS_LSTS_FDX) != 0)
8091 mii->mii_media_active |= IFM_FDX; 8091 mii->mii_media_active |= IFM_FDX;
8092 else 8092 else
8093 mii->mii_media_active |= IFM_HDX; 8093 mii->mii_media_active |= IFM_HDX;
8094 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 8094 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8095 /* Check flow */ 8095 /* Check flow */
8096 reg = CSR_READ(sc, WMREG_PCS_LSTS); 8096 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8097 if ((reg & PCS_LSTS_AN_COMP) == 0) { 8097 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8098 DPRINTF(WM_DEBUG_LINK, 8098 DPRINTF(WM_DEBUG_LINK,
8099 ("XXX LINKOK but not ACOMP\n")); 8099 ("XXX LINKOK but not ACOMP\n"));
8100 return; 8100 return;
8101 } 8101 }
8102 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV); 8102 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8103 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB); 8103 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8104 DPRINTF(WM_DEBUG_LINK, 8104 DPRINTF(WM_DEBUG_LINK,
8105 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab)); 8105 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
8106 if ((pcs_adv & TXCW_SYM_PAUSE) 8106 if ((pcs_adv & TXCW_SYM_PAUSE)
8107 && (pcs_lpab & TXCW_SYM_PAUSE)) { 8107 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8108 mii->mii_media_active |= IFM_FLOW 8108 mii->mii_media_active |= IFM_FLOW
8109 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 8109 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8110 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0) 8110 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8111 && (pcs_adv & TXCW_ASYM_PAUSE) 8111 && (pcs_adv & TXCW_ASYM_PAUSE)
8112 && (pcs_lpab & TXCW_SYM_PAUSE) 8112 && (pcs_lpab & TXCW_SYM_PAUSE)
8113 && (pcs_lpab & TXCW_ASYM_PAUSE)) 8113 && (pcs_lpab & TXCW_ASYM_PAUSE))
8114 mii->mii_media_active |= IFM_FLOW 8114 mii->mii_media_active |= IFM_FLOW
8115 | IFM_ETH_TXPAUSE; 8115 | IFM_ETH_TXPAUSE;
8116 else if ((pcs_adv & TXCW_SYM_PAUSE) 8116 else if ((pcs_adv & TXCW_SYM_PAUSE)
8117 && (pcs_adv & TXCW_ASYM_PAUSE) 8117 && (pcs_adv & TXCW_ASYM_PAUSE)
8118 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0) 8118 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8119 && (pcs_lpab & TXCW_ASYM_PAUSE)) 8119 && (pcs_lpab & TXCW_ASYM_PAUSE))
8120 mii->mii_media_active |= IFM_FLOW 8120 mii->mii_media_active |= IFM_FLOW
8121 | IFM_ETH_RXPAUSE; 8121 | IFM_ETH_RXPAUSE;
8122 } 8122 }
8123 /* Update LED */ 8123 /* Update LED */
8124 wm_tbi_serdes_set_linkled(sc); 8124 wm_tbi_serdes_set_linkled(sc);
8125 } else { 8125 } else {
8126 DPRINTF(WM_DEBUG_LINK, 8126 DPRINTF(WM_DEBUG_LINK,
8127 ("%s: LINK: Receive sequence error\n", 8127 ("%s: LINK: Receive sequence error\n",
8128 device_xname(sc->sc_dev))); 8128 device_xname(sc->sc_dev)));
8129 } 8129 }
8130} 8130}
8131 8131
8132/* 8132/*
8133 * wm_linkintr: 8133 * wm_linkintr:
8134 * 8134 *
8135 * Helper; handle link interrupts. 8135 * Helper; handle link interrupts.
8136 */ 8136 */
8137static void 8137static void
8138wm_linkintr(struct wm_softc *sc, uint32_t icr) 8138wm_linkintr(struct wm_softc *sc, uint32_t icr)
8139{ 8139{
8140 8140
8141 KASSERT(WM_CORE_LOCKED(sc)); 8141 KASSERT(WM_CORE_LOCKED(sc));
8142 8142
8143 if (sc->sc_flags & WM_F_HAS_MII) 8143 if (sc->sc_flags & WM_F_HAS_MII)
8144 wm_linkintr_gmii(sc, icr); 8144 wm_linkintr_gmii(sc, icr);
8145 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES) 8145 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8146 && (sc->sc_type >= WM_T_82575)) 8146 && (sc->sc_type >= WM_T_82575))
8147 wm_linkintr_serdes(sc, icr); 8147 wm_linkintr_serdes(sc, icr);
8148 else 8148 else
8149 wm_linkintr_tbi(sc, icr); 8149 wm_linkintr_tbi(sc, icr);
8150} 8150}
8151 8151
8152/* 8152/*
8153 * wm_intr_legacy: 8153 * wm_intr_legacy:
8154 * 8154 *
8155 * Interrupt service routine for INTx and MSI. 8155 * Interrupt service routine for INTx and MSI.
8156 */ 8156 */
8157static int 8157static int
8158wm_intr_legacy(void *arg) 8158wm_intr_legacy(void *arg)
8159{ 8159{
8160 struct wm_softc *sc = arg; 8160 struct wm_softc *sc = arg;
8161 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 8161 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8162 struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq; 8162 struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
8163 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 8163 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8164 uint32_t icr, rndval = 0; 8164 uint32_t icr, rndval = 0;
8165 int handled = 0; 8165 int handled = 0;
8166 8166
8167 DPRINTF(WM_DEBUG_TX, 8167 DPRINTF(WM_DEBUG_TX,
8168 ("%s: INTx: got intr\n", device_xname(sc->sc_dev))); 8168 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
8169 while (1 /* CONSTCOND */) { 8169 while (1 /* CONSTCOND */) {
8170 icr = CSR_READ(sc, WMREG_ICR); 8170 icr = CSR_READ(sc, WMREG_ICR);
8171 if ((icr & sc->sc_icr) == 0) 8171 if ((icr & sc->sc_icr) == 0)
8172 break; 8172 break;
8173 if (rndval == 0) 8173 if (rndval == 0)
8174 rndval = icr; 8174 rndval = icr;
8175 8175
8176 mutex_enter(rxq->rxq_lock); 8176 mutex_enter(rxq->rxq_lock);
8177 8177
8178 if (rxq->rxq_stopping) { 8178 if (rxq->rxq_stopping) {
8179 mutex_exit(rxq->rxq_lock); 8179 mutex_exit(rxq->rxq_lock);
8180 break; 8180 break;
8181 } 8181 }
8182 8182
8183 handled = 1; 8183 handled = 1;
8184 8184
8185#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 8185#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8186 if (icr & (ICR_RXDMT0 | ICR_RXT0)) { 8186 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
8187 DPRINTF(WM_DEBUG_RX, 8187 DPRINTF(WM_DEBUG_RX,
8188 ("%s: RX: got Rx intr 0x%08x\n", 8188 ("%s: RX: got Rx intr 0x%08x\n",
8189 device_xname(sc->sc_dev), 8189 device_xname(sc->sc_dev),
8190 icr & (ICR_RXDMT0 | ICR_RXT0))); 8190 icr & (ICR_RXDMT0 | ICR_RXT0)));
8191 WM_Q_EVCNT_INCR(rxq, rxintr); 8191 WM_Q_EVCNT_INCR(rxq, rxintr);
8192 } 8192 }
8193#endif 8193#endif
8194 wm_rxeof(rxq); 8194 wm_rxeof(rxq);
8195 8195
8196 mutex_exit(rxq->rxq_lock); 8196 mutex_exit(rxq->rxq_lock);
8197 mutex_enter(txq->txq_lock); 8197 mutex_enter(txq->txq_lock);
8198 8198
8199 if (txq->txq_stopping) { 8199 if (txq->txq_stopping) {
8200 mutex_exit(txq->txq_lock); 8200 mutex_exit(txq->txq_lock);
8201 break; 8201 break;
8202 } 8202 }
8203 8203
8204#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 8204#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8205 if (icr & ICR_TXDW) { 8205 if (icr & ICR_TXDW) {
8206 DPRINTF(WM_DEBUG_TX, 8206 DPRINTF(WM_DEBUG_TX,
8207 ("%s: TX: got TXDW interrupt\n", 8207 ("%s: TX: got TXDW interrupt\n",
8208 device_xname(sc->sc_dev))); 8208 device_xname(sc->sc_dev)));
8209 WM_Q_EVCNT_INCR(txq, txdw); 8209 WM_Q_EVCNT_INCR(txq, txdw);
8210 } 8210 }
8211#endif 8211#endif
8212 wm_txeof(sc, txq); 8212 wm_txeof(sc, txq);
8213 8213
8214 mutex_exit(txq->txq_lock); 8214 mutex_exit(txq->txq_lock);
8215 WM_CORE_LOCK(sc); 8215 WM_CORE_LOCK(sc);
8216 8216
8217 if (sc->sc_core_stopping) { 8217 if (sc->sc_core_stopping) {
8218 WM_CORE_UNLOCK(sc); 8218 WM_CORE_UNLOCK(sc);
8219 break; 8219 break;
8220 } 8220 }
8221 8221
8222 if (icr & (ICR_LSC | ICR_RXSEQ)) { 8222 if (icr & (ICR_LSC | ICR_RXSEQ)) {
8223 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 8223 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8224 wm_linkintr(sc, icr); 8224 wm_linkintr(sc, icr);
8225 } 8225 }
8226 8226
8227 WM_CORE_UNLOCK(sc); 8227 WM_CORE_UNLOCK(sc);
8228 8228
8229 if (icr & ICR_RXO) { 8229 if (icr & ICR_RXO) {
8230#if defined(WM_DEBUG) 8230#if defined(WM_DEBUG)
8231 log(LOG_WARNING, "%s: Receive overrun\n", 8231 log(LOG_WARNING, "%s: Receive overrun\n",
8232 device_xname(sc->sc_dev)); 8232 device_xname(sc->sc_dev));
8233#endif /* defined(WM_DEBUG) */ 8233#endif /* defined(WM_DEBUG) */
8234 } 8234 }
8235 } 8235 }
8236 8236
8237 rnd_add_uint32(&sc->rnd_source, rndval); 8237 rnd_add_uint32(&sc->rnd_source, rndval);
8238 8238
8239 if (handled) { 8239 if (handled) {
8240 /* Try to get more packets going. */ 8240 /* Try to get more packets going. */
8241 if_schedule_deferred_start(ifp); 8241 if_schedule_deferred_start(ifp);
8242 } 8242 }
8243 8243
8244 return handled; 8244 return handled;
8245} 8245}
8246 8246
8247static int 8247static int
8248wm_txrxintr_msix(void *arg) 8248wm_txrxintr_msix(void *arg)
8249{ 8249{
8250 struct wm_queue *wmq = arg; 8250 struct wm_queue *wmq = arg;
8251 struct wm_txqueue *txq = &wmq->wmq_txq; 8251 struct wm_txqueue *txq = &wmq->wmq_txq;
8252 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 8252 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8253 struct wm_softc *sc = txq->txq_sc; 8253 struct wm_softc *sc = txq->txq_sc;
8254 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 8254 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8255 8255
8256 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id); 8256 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
8257 8257
8258 DPRINTF(WM_DEBUG_TX, 8258 DPRINTF(WM_DEBUG_TX,
8259 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev))); 8259 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
8260 8260
8261 if (sc->sc_type == WM_T_82574) 8261 if (sc->sc_type == WM_T_82574)
8262 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); 8262 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8263 else if (sc->sc_type == WM_T_82575) 8263 else if (sc->sc_type == WM_T_82575)
8264 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); 8264 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8265 else 8265 else
8266 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx); 8266 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
8267 8267
8268 mutex_enter(txq->txq_lock); 8268 mutex_enter(txq->txq_lock);
8269 8269
8270 if (txq->txq_stopping) { 8270 if (txq->txq_stopping) {
8271 mutex_exit(txq->txq_lock); 8271 mutex_exit(txq->txq_lock);
8272 return 0; 8272 return 0;
8273 } 8273 }
8274 8274
8275 WM_Q_EVCNT_INCR(txq, txdw); 8275 WM_Q_EVCNT_INCR(txq, txdw);
8276 wm_txeof(sc, txq); 8276 wm_txeof(sc, txq);
8277 8277
8278 /* Try to get more packets going. */ 8278 /* Try to get more packets going. */
8279 if (pcq_peek(txq->txq_interq) != NULL) 8279 if (pcq_peek(txq->txq_interq) != NULL)
8280 if_schedule_deferred_start(ifp); 8280 if_schedule_deferred_start(ifp);
8281 /* 8281 /*
8282 * There are still some upper layer processing which call 8282 * There are still some upper layer processing which call
8283 * ifp->if_start(). e.g. ALTQ 8283 * ifp->if_start(). e.g. ALTQ
8284 */ 8284 */
8285 if (wmq->wmq_id == 0) 8285 if (wmq->wmq_id == 0)
8286 if_schedule_deferred_start(ifp); 8286 if_schedule_deferred_start(ifp);
8287 8287
8288 mutex_exit(txq->txq_lock); 8288 mutex_exit(txq->txq_lock);
8289 8289
8290 DPRINTF(WM_DEBUG_RX, 8290 DPRINTF(WM_DEBUG_RX,
8291 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev))); 8291 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
8292 mutex_enter(rxq->rxq_lock); 8292 mutex_enter(rxq->rxq_lock);
8293 8293
8294 if (rxq->rxq_stopping) { 8294 if (rxq->rxq_stopping) {
8295 mutex_exit(rxq->rxq_lock); 8295 mutex_exit(rxq->rxq_lock);
8296 return 0; 8296 return 0;
8297 } 8297 }
8298 8298
8299 WM_Q_EVCNT_INCR(rxq, rxintr); 8299 WM_Q_EVCNT_INCR(rxq, rxintr);
8300 wm_rxeof(rxq); 8300 wm_rxeof(rxq);
8301 mutex_exit(rxq->rxq_lock); 8301 mutex_exit(rxq->rxq_lock);
8302 8302
8303 if (sc->sc_type == WM_T_82574) 8303 if (sc->sc_type == WM_T_82574)
8304 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); 8304 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8305 else if (sc->sc_type == WM_T_82575) 8305 else if (sc->sc_type == WM_T_82575)
8306 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); 8306 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8307 else 8307 else
8308 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx); 8308 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
8309 8309
8310 return 1; 8310 return 1;
8311} 8311}
8312 8312
8313/* 8313/*
8314 * wm_linkintr_msix: 8314 * wm_linkintr_msix:
8315 * 8315 *
8316 * Interrupt service routine for link status change for MSI-X. 8316 * Interrupt service routine for link status change for MSI-X.
8317 */ 8317 */
8318static int 8318static int
8319wm_linkintr_msix(void *arg) 8319wm_linkintr_msix(void *arg)
8320{ 8320{
8321 struct wm_softc *sc = arg; 8321 struct wm_softc *sc = arg;
8322 uint32_t reg; 8322 uint32_t reg;
8323 8323
8324 DPRINTF(WM_DEBUG_LINK, 8324 DPRINTF(WM_DEBUG_LINK,
8325 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev))); 8325 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
8326 8326
8327 reg = CSR_READ(sc, WMREG_ICR); 8327 reg = CSR_READ(sc, WMREG_ICR);
8328 WM_CORE_LOCK(sc); 8328 WM_CORE_LOCK(sc);
8329 if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0)) 8329 if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
8330 goto out; 8330 goto out;
8331 8331
8332 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 8332 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8333 wm_linkintr(sc, ICR_LSC); 8333 wm_linkintr(sc, ICR_LSC);
8334 8334
8335out: 8335out:
8336 WM_CORE_UNLOCK(sc); 8336 WM_CORE_UNLOCK(sc);
8337  8337
8338 if (sc->sc_type == WM_T_82574) 8338 if (sc->sc_type == WM_T_82574)
8339 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); 8339 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
8340 else if (sc->sc_type == WM_T_82575) 8340 else if (sc->sc_type == WM_T_82575)
8341 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER); 8341 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
8342 else 8342 else
8343 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx); 8343 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
8344 8344
8345 return 1; 8345 return 1;
8346} 8346}
8347 8347
8348/* 8348/*
8349 * Media related. 8349 * Media related.
8350 * GMII, SGMII, TBI (and SERDES) 8350 * GMII, SGMII, TBI (and SERDES)
8351 */ 8351 */
8352 8352
8353/* Common */ 8353/* Common */
8354 8354
8355/* 8355/*
8356 * wm_tbi_serdes_set_linkled: 8356 * wm_tbi_serdes_set_linkled:
8357 * 8357 *
8358 * Update the link LED on TBI and SERDES devices. 8358 * Update the link LED on TBI and SERDES devices.
8359 */ 8359 */
8360static void 8360static void
8361wm_tbi_serdes_set_linkled(struct wm_softc *sc) 8361wm_tbi_serdes_set_linkled(struct wm_softc *sc)
8362{ 8362{
8363 8363
8364 if (sc->sc_tbi_linkup) 8364 if (sc->sc_tbi_linkup)
8365 sc->sc_ctrl |= CTRL_SWDPIN(0); 8365 sc->sc_ctrl |= CTRL_SWDPIN(0);
8366 else 8366 else
8367 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 8367 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
8368 8368
8369 /* 82540 or newer devices are active low */ 8369 /* 82540 or newer devices are active low */
8370 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; 8370 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
8371 8371
8372 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8372 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8373} 8373}
8374 8374
8375/* GMII related */ 8375/* GMII related */
8376 8376
8377/* 8377/*
8378 * wm_gmii_reset: 8378 * wm_gmii_reset:
8379 * 8379 *
8380 * Reset the PHY. 8380 * Reset the PHY.
8381 */ 8381 */
8382static void 8382static void
8383wm_gmii_reset(struct wm_softc *sc) 8383wm_gmii_reset(struct wm_softc *sc)
8384{ 8384{
8385 uint32_t reg; 8385 uint32_t reg;
8386 int rv; 8386 int rv;
8387 8387
8388 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 8388 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
8389 device_xname(sc->sc_dev), __func__)); 8389 device_xname(sc->sc_dev), __func__));
8390 8390
8391 rv = sc->phy.acquire(sc); 8391 rv = sc->phy.acquire(sc);
8392 if (rv != 0) { 8392 if (rv != 0) {
8393 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 8393 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8394 __func__); 8394 __func__);
8395 return; 8395 return;
8396 } 8396 }
8397 8397
8398 switch (sc->sc_type) { 8398 switch (sc->sc_type) {
8399 case WM_T_82542_2_0: 8399 case WM_T_82542_2_0:
8400 case WM_T_82542_2_1: 8400 case WM_T_82542_2_1:
8401 /* null */ 8401 /* null */
8402 break; 8402 break;
8403 case WM_T_82543: 8403 case WM_T_82543:
8404 /* 8404 /*
8405 * With 82543, we need to force speed and duplex on the MAC 8405 * With 82543, we need to force speed and duplex on the MAC
8406 * equal to what the PHY speed and duplex configuration is. 8406 * equal to what the PHY speed and duplex configuration is.
8407 * In addition, we need to perform a hardware reset on the PHY 8407 * In addition, we need to perform a hardware reset on the PHY
8408 * to take it out of reset. 8408 * to take it out of reset.
8409 */ 8409 */
8410 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 8410 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8411 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8411 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8412 8412
8413 /* The PHY reset pin is active-low. */ 8413 /* The PHY reset pin is active-low. */
8414 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8414 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8415 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 8415 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
8416 CTRL_EXT_SWDPIN(4)); 8416 CTRL_EXT_SWDPIN(4));
8417 reg |= CTRL_EXT_SWDPIO(4); 8417 reg |= CTRL_EXT_SWDPIO(4);
8418 8418
8419 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 8419 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8420 CSR_WRITE_FLUSH(sc); 8420 CSR_WRITE_FLUSH(sc);
8421 delay(10*1000); 8421 delay(10*1000);
8422 8422
8423 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 8423 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
8424 CSR_WRITE_FLUSH(sc); 8424 CSR_WRITE_FLUSH(sc);
8425 delay(150); 8425 delay(150);
8426#if 0 8426#if 0
8427 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 8427 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
8428#endif 8428#endif
8429 delay(20*1000); /* XXX extra delay to get PHY ID? */ 8429 delay(20*1000); /* XXX extra delay to get PHY ID? */
8430 break; 8430 break;
8431 case WM_T_82544: /* reset 10000us */ 8431 case WM_T_82544: /* reset 10000us */
8432 case WM_T_82540: 8432 case WM_T_82540:
8433 case WM_T_82545: 8433 case WM_T_82545:
8434 case WM_T_82545_3: 8434 case WM_T_82545_3:
8435 case WM_T_82546: 8435 case WM_T_82546:
8436 case WM_T_82546_3: 8436 case WM_T_82546_3:
8437 case WM_T_82541: 8437 case WM_T_82541:
8438 case WM_T_82541_2: 8438 case WM_T_82541_2:
8439 case WM_T_82547: 8439 case WM_T_82547:
8440 case WM_T_82547_2: 8440 case WM_T_82547_2:
8441 case WM_T_82571: /* reset 100us */ 8441 case WM_T_82571: /* reset 100us */
8442 case WM_T_82572: 8442 case WM_T_82572:
8443 case WM_T_82573: 8443 case WM_T_82573:
8444 case WM_T_82574: 8444 case WM_T_82574:
8445 case WM_T_82575: 8445 case WM_T_82575:
8446 case WM_T_82576: 8446 case WM_T_82576:
8447 case WM_T_82580: 8447 case WM_T_82580:
8448 case WM_T_I350: 8448 case WM_T_I350:
8449 case WM_T_I354: 8449 case WM_T_I354:
8450 case WM_T_I210: 8450 case WM_T_I210:
8451 case WM_T_I211: 8451 case WM_T_I211:
8452 case WM_T_82583: 8452 case WM_T_82583:
8453 case WM_T_80003: 8453 case WM_T_80003:
8454 /* generic reset */ 8454 /* generic reset */
8455 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 8455 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8456 CSR_WRITE_FLUSH(sc); 8456 CSR_WRITE_FLUSH(sc);
8457 delay(20000); 8457 delay(20000);
8458 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8458 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8459 CSR_WRITE_FLUSH(sc); 8459 CSR_WRITE_FLUSH(sc);
8460 delay(20000); 8460 delay(20000);
8461 8461
8462 if ((sc->sc_type == WM_T_82541) 8462 if ((sc->sc_type == WM_T_82541)
8463 || (sc->sc_type == WM_T_82541_2) 8463 || (sc->sc_type == WM_T_82541_2)
8464 || (sc->sc_type == WM_T_82547) 8464 || (sc->sc_type == WM_T_82547)
8465 || (sc->sc_type == WM_T_82547_2)) { 8465 || (sc->sc_type == WM_T_82547_2)) {
8466 /* workaround for igp are done in igp_reset() */ 8466 /* workaround for igp are done in igp_reset() */
8467 /* XXX add code to set LED after phy reset */ 8467 /* XXX add code to set LED after phy reset */
8468 } 8468 }
8469 break; 8469 break;
8470 case WM_T_ICH8: 8470 case WM_T_ICH8:
8471 case WM_T_ICH9: 8471 case WM_T_ICH9:
8472 case WM_T_ICH10: 8472 case WM_T_ICH10:
8473 case WM_T_PCH: 8473 case WM_T_PCH:
8474 case WM_T_PCH2: 8474 case WM_T_PCH2:
8475 case WM_T_PCH_LPT: 8475 case WM_T_PCH_LPT:
8476 case WM_T_PCH_SPT: 8476 case WM_T_PCH_SPT:
8477 /* generic reset */ 8477 /* generic reset */
8478 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 8478 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8479 CSR_WRITE_FLUSH(sc); 8479 CSR_WRITE_FLUSH(sc);
8480 delay(100); 8480 delay(100);
8481 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8481 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8482 CSR_WRITE_FLUSH(sc); 8482 CSR_WRITE_FLUSH(sc);
8483 delay(150); 8483 delay(150);
8484 break; 8484 break;
8485 default: 8485 default:
8486 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 8486 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
8487 __func__); 8487 __func__);
8488 break; 8488 break;
8489 } 8489 }
8490 8490
8491 sc->phy.release(sc); 8491 sc->phy.release(sc);
8492 8492
8493 /* get_cfg_done */ 8493 /* get_cfg_done */
8494 wm_get_cfg_done(sc); 8494 wm_get_cfg_done(sc);
8495 8495
8496 /* extra setup */ 8496 /* extra setup */
8497 switch (sc->sc_type) { 8497 switch (sc->sc_type) {
8498 case WM_T_82542_2_0: 8498 case WM_T_82542_2_0:
8499 case WM_T_82542_2_1: 8499 case WM_T_82542_2_1:
8500 case WM_T_82543: 8500 case WM_T_82543:
8501 case WM_T_82544: 8501 case WM_T_82544:
8502 case WM_T_82540: 8502 case WM_T_82540:
8503 case WM_T_82545: 8503 case WM_T_82545:
8504 case WM_T_82545_3: 8504 case WM_T_82545_3:
8505 case WM_T_82546: 8505 case WM_T_82546:
8506 case WM_T_82546_3: 8506 case WM_T_82546_3:
8507 case WM_T_82541_2: 8507 case WM_T_82541_2:
8508 case WM_T_82547_2: 8508 case WM_T_82547_2:
8509 case WM_T_82571: 8509 case WM_T_82571:
8510 case WM_T_82572: 8510 case WM_T_82572:
8511 case WM_T_82573: 8511 case WM_T_82573:
8512 case WM_T_82575: 8512 case WM_T_82575:
8513 case WM_T_82576: 8513 case WM_T_82576:
8514 case WM_T_82580: 8514 case WM_T_82580:
8515 case WM_T_I350: 8515 case WM_T_I350:
8516 case WM_T_I354: 8516 case WM_T_I354:
8517 case WM_T_I210: 8517 case WM_T_I210:
8518 case WM_T_I211: 8518 case WM_T_I211:
8519 case WM_T_80003: 8519 case WM_T_80003:
8520 /* null */ 8520 /* null */
8521 break; 8521 break;
8522 case WM_T_82574: 8522 case WM_T_82574:
8523 case WM_T_82583: 8523 case WM_T_82583:
8524 wm_lplu_d0_disable(sc); 8524 wm_lplu_d0_disable(sc);
8525 break; 8525 break;
8526 case WM_T_82541: 8526 case WM_T_82541:
8527 case WM_T_82547: 8527 case WM_T_82547:
8528 /* XXX Configure actively LED after PHY reset */ 8528 /* XXX Configure actively LED after PHY reset */
8529 break; 8529 break;
8530 case WM_T_ICH8: 8530 case WM_T_ICH8:
8531 case WM_T_ICH9: 8531 case WM_T_ICH9:
8532 case WM_T_ICH10: 8532 case WM_T_ICH10:
8533 case WM_T_PCH: 8533 case WM_T_PCH:
8534 case WM_T_PCH2: 8534 case WM_T_PCH2:
8535 case WM_T_PCH_LPT: 8535 case WM_T_PCH_LPT:
8536 case WM_T_PCH_SPT: 8536 case WM_T_PCH_SPT:
8537 /* Allow time for h/w to get to a quiescent state afer reset */ 8537 /* Allow time for h/w to get to a quiescent state afer reset */
8538 delay(10*1000); 8538 delay(10*1000);
8539 8539
8540 if (sc->sc_type == WM_T_PCH) 8540 if (sc->sc_type == WM_T_PCH)
8541 wm_hv_phy_workaround_ich8lan(sc); 8541 wm_hv_phy_workaround_ich8lan(sc);
8542 8542
8543 if (sc->sc_type == WM_T_PCH2) 8543 if (sc->sc_type == WM_T_PCH2)
8544 wm_lv_phy_workaround_ich8lan(sc); 8544 wm_lv_phy_workaround_ich8lan(sc);
8545 8545
8546 /* Clear the host wakeup bit after lcd reset */ 8546 /* Clear the host wakeup bit after lcd reset */
8547 if (sc->sc_type >= WM_T_PCH) { 8547 if (sc->sc_type >= WM_T_PCH) {
8548 reg = wm_gmii_hv_readreg(sc->sc_dev, 2, 8548 reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
8549 BM_PORT_GEN_CFG); 8549 BM_PORT_GEN_CFG);
8550 reg &= ~BM_WUC_HOST_WU_BIT; 8550 reg &= ~BM_WUC_HOST_WU_BIT;
8551 wm_gmii_hv_writereg(sc->sc_dev, 2, 8551 wm_gmii_hv_writereg(sc->sc_dev, 2,
8552 BM_PORT_GEN_CFG, reg); 8552 BM_PORT_GEN_CFG, reg);
8553 } 8553 }
8554 8554
8555 /* 8555 /*
8556 * XXX Configure the LCD with th extended configuration region 8556 * XXX Configure the LCD with th extended configuration region
8557 * in NVM 8557 * in NVM
8558 */ 8558 */
8559 8559
8560 /* Disable D0 LPLU. */ 8560 /* Disable D0 LPLU. */
8561 if (sc->sc_type >= WM_T_PCH) /* PCH* */ 8561 if (sc->sc_type >= WM_T_PCH) /* PCH* */
8562 wm_lplu_d0_disable_pch(sc); 8562 wm_lplu_d0_disable_pch(sc);
8563 else 8563 else
8564 wm_lplu_d0_disable(sc); /* ICH* */ 8564 wm_lplu_d0_disable(sc); /* ICH* */
8565 break; 8565 break;
8566 default: 8566 default:
8567 panic("%s: unknown type\n", __func__); 8567 panic("%s: unknown type\n", __func__);
8568 break; 8568 break;
8569 } 8569 }
8570} 8570}
8571 8571
8572/* 8572/*
8573 * wm_get_phy_id_82575: 8573 * wm_get_phy_id_82575:
8574 * 8574 *
8575 * Return PHY ID. Return -1 if it failed. 8575 * Return PHY ID. Return -1 if it failed.
8576 */ 8576 */
8577static int 8577static int
8578wm_get_phy_id_82575(struct wm_softc *sc) 8578wm_get_phy_id_82575(struct wm_softc *sc)
8579{ 8579{
8580 uint32_t reg; 8580 uint32_t reg;
8581 int phyid = -1; 8581 int phyid = -1;
8582 8582
8583 /* XXX */ 8583 /* XXX */
8584 if ((sc->sc_flags & WM_F_SGMII) == 0) 8584 if ((sc->sc_flags & WM_F_SGMII) == 0)
8585 return -1; 8585 return -1;
8586 8586
8587 if (wm_sgmii_uses_mdio(sc)) { 8587 if (wm_sgmii_uses_mdio(sc)) {
8588 switch (sc->sc_type) { 8588 switch (sc->sc_type) {
8589 case WM_T_82575: 8589 case WM_T_82575:
8590 case WM_T_82576: 8590 case WM_T_82576:
8591 reg = CSR_READ(sc, WMREG_MDIC); 8591 reg = CSR_READ(sc, WMREG_MDIC);
8592 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT; 8592 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8593 break; 8593 break;
8594 case WM_T_82580: 8594 case WM_T_82580:
8595 case WM_T_I350: 8595 case WM_T_I350:
8596 case WM_T_I354: 8596 case WM_T_I354:
8597 case WM_T_I210: 8597 case WM_T_I210:
8598 case WM_T_I211: 8598 case WM_T_I211:
8599 reg = CSR_READ(sc, WMREG_MDICNFG); 8599 reg = CSR_READ(sc, WMREG_MDICNFG);
8600 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT; 8600 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8601 break; 8601 break;
8602 default: 8602 default:
8603 return -1; 8603 return -1;
8604 } 8604 }
8605 } 8605 }
8606 8606
8607 return phyid; 8607 return phyid;
8608} 8608}
8609 8609
8610 8610
8611/* 8611/*
8612 * wm_gmii_mediainit: 8612 * wm_gmii_mediainit:
8613 * 8613 *
8614 * Initialize media for use on 1000BASE-T devices. 8614 * Initialize media for use on 1000BASE-T devices.
8615 */ 8615 */
8616static void 8616static void
8617wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) 8617wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8618{ 8618{
8619 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 8619 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8620 struct mii_data *mii = &sc->sc_mii; 8620 struct mii_data *mii = &sc->sc_mii;
8621 uint32_t reg; 8621 uint32_t reg;
8622 8622
8623 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n", 8623 DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8624 device_xname(sc->sc_dev), __func__)); 8624 device_xname(sc->sc_dev), __func__));
8625 8625
8626 /* We have GMII. */ 8626 /* We have GMII. */
8627 sc->sc_flags |= WM_F_HAS_MII; 8627 sc->sc_flags |= WM_F_HAS_MII;
8628 8628
8629 if (sc->sc_type == WM_T_80003) 8629 if (sc->sc_type == WM_T_80003)
8630 sc->sc_tipg = TIPG_1000T_80003_DFLT; 8630 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8631 else 8631 else
8632 sc->sc_tipg = TIPG_1000T_DFLT; 8632 sc->sc_tipg = TIPG_1000T_DFLT;
8633 8633
8634 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */ 8634 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8635 if ((sc->sc_type == WM_T_82580) 8635 if ((sc->sc_type == WM_T_82580)
8636 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) 8636 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8637 || (sc->sc_type == WM_T_I211)) { 8637 || (sc->sc_type == WM_T_I211)) {
8638 reg = CSR_READ(sc, WMREG_PHPM); 8638 reg = CSR_READ(sc, WMREG_PHPM);
8639 reg &= ~PHPM_GO_LINK_D; 8639 reg &= ~PHPM_GO_LINK_D;
8640 CSR_WRITE(sc, WMREG_PHPM, reg); 8640 CSR_WRITE(sc, WMREG_PHPM, reg);
8641 } 8641 }
8642 8642
8643 /* 8643 /*
8644 * Let the chip set speed/duplex on its own based on 8644 * Let the chip set speed/duplex on its own based on
8645 * signals from the PHY. 8645 * signals from the PHY.
8646 * XXXbouyer - I'm not sure this is right for the 80003, 8646 * XXXbouyer - I'm not sure this is right for the 80003,
8647 * the em driver only sets CTRL_SLU here - but it seems to work. 8647 * the em driver only sets CTRL_SLU here - but it seems to work.
8648 */ 8648 */
8649 sc->sc_ctrl |= CTRL_SLU; 8649 sc->sc_ctrl |= CTRL_SLU;
8650 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8650 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8651 8651
8652 /* Initialize our media structures and probe the GMII. */ 8652 /* Initialize our media structures and probe the GMII. */
8653 mii->mii_ifp = ifp; 8653 mii->mii_ifp = ifp;
8654 8654
8655 /* 8655 /*
8656 * Determine the PHY access method. 8656 * Determine the PHY access method.
8657 * 8657 *
8658 * For SGMII, use SGMII specific method. 8658 * For SGMII, use SGMII specific method.
8659 * 8659 *
8660 * For some devices, we can determine the PHY access method 8660 * For some devices, we can determine the PHY access method
8661 * from sc_type. 8661 * from sc_type.
8662 * 8662 *
8663 * For ICH and PCH variants, it's difficult to determine the PHY 8663 * For ICH and PCH variants, it's difficult to determine the PHY
8664 * access method by sc_type, so use the PCI product ID for some 8664 * access method by sc_type, so use the PCI product ID for some
8665 * devices. 8665 * devices.
8666 * For other ICH8 variants, try to use igp's method. If the PHY 8666 * For other ICH8 variants, try to use igp's method. If the PHY
8667 * can't detect, then use bm's method. 8667 * can't detect, then use bm's method.
8668 */ 8668 */
8669 switch (prodid) { 8669 switch (prodid) {
8670 case PCI_PRODUCT_INTEL_PCH_M_LM: 8670 case PCI_PRODUCT_INTEL_PCH_M_LM:
8671 case PCI_PRODUCT_INTEL_PCH_M_LC: 8671 case PCI_PRODUCT_INTEL_PCH_M_LC:
8672 /* 82577 */ 8672 /* 82577 */
8673 sc->sc_phytype = WMPHY_82577; 8673 sc->sc_phytype = WMPHY_82577;
8674 break; 8674 break;
8675 case PCI_PRODUCT_INTEL_PCH_D_DM: 8675 case PCI_PRODUCT_INTEL_PCH_D_DM:
8676 case PCI_PRODUCT_INTEL_PCH_D_DC: 8676 case PCI_PRODUCT_INTEL_PCH_D_DC:
8677 /* 82578 */ 8677 /* 82578 */
8678 sc->sc_phytype = WMPHY_82578; 8678 sc->sc_phytype = WMPHY_82578;
8679 break; 8679 break;
8680 case PCI_PRODUCT_INTEL_PCH2_LV_LM: 8680 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8681 case PCI_PRODUCT_INTEL_PCH2_LV_V: 8681 case PCI_PRODUCT_INTEL_PCH2_LV_V:
8682 /* 82579 */ 8682 /* 82579 */
8683 sc->sc_phytype = WMPHY_82579; 8683 sc->sc_phytype = WMPHY_82579;
8684 break; 8684 break;
8685 case PCI_PRODUCT_INTEL_82801H_82567V_3: 8685 case PCI_PRODUCT_INTEL_82801H_82567V_3:
8686 case PCI_PRODUCT_INTEL_82801I_BM: 8686 case PCI_PRODUCT_INTEL_82801I_BM:
8687 case PCI_PRODUCT_INTEL_82801J_R_BM_LM: 8687 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8688 case PCI_PRODUCT_INTEL_82801J_R_BM_LF: 8688 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8689 case PCI_PRODUCT_INTEL_82801J_D_BM_LM: 8689 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8690 case PCI_PRODUCT_INTEL_82801J_D_BM_LF: 8690 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8691 case PCI_PRODUCT_INTEL_82801J_R_BM_V: 8691 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8692 /* ICH8, 9, 10 with 82567 */ 8692 /* ICH8, 9, 10 with 82567 */
8693 sc->sc_phytype = WMPHY_BM; 8693 sc->sc_phytype = WMPHY_BM;
8694 mii->mii_readreg = wm_gmii_bm_readreg; 8694 mii->mii_readreg = wm_gmii_bm_readreg;
8695 mii->mii_writereg = wm_gmii_bm_writereg; 8695 mii->mii_writereg = wm_gmii_bm_writereg;
8696 break; 8696 break;
8697 default: 8697 default:
8698 if (((sc->sc_flags & WM_F_SGMII) != 0) 8698 if (((sc->sc_flags & WM_F_SGMII) != 0)
8699 && !wm_sgmii_uses_mdio(sc)){ 8699 && !wm_sgmii_uses_mdio(sc)){
8700 /* SGMII */ 8700 /* SGMII */
8701 mii->mii_readreg = wm_sgmii_readreg; 8701 mii->mii_readreg = wm_sgmii_readreg;
8702 mii->mii_writereg = wm_sgmii_writereg; 8702 mii->mii_writereg = wm_sgmii_writereg;
8703 } else if ((sc->sc_type == WM_T_82574) 8703 } else if ((sc->sc_type == WM_T_82574)
8704 || (sc->sc_type == WM_T_82583)) { 8704 || (sc->sc_type == WM_T_82583)) {
8705 /* BM2 (phyaddr == 1) */ 8705 /* BM2 (phyaddr == 1) */
8706 sc->sc_phytype = WMPHY_BM; 8706 sc->sc_phytype = WMPHY_BM;
8707 mii->mii_readreg = wm_gmii_bm_readreg; 8707 mii->mii_readreg = wm_gmii_bm_readreg;
8708 mii->mii_writereg = wm_gmii_bm_writereg; 8708 mii->mii_writereg = wm_gmii_bm_writereg;
8709 } else if (sc->sc_type >= WM_T_ICH8) { 8709 } else if (sc->sc_type >= WM_T_ICH8) {
8710 /* non-82567 ICH8, 9 and 10 */ 8710 /* non-82567 ICH8, 9 and 10 */
8711 mii->mii_readreg = wm_gmii_i82544_readreg; 8711 mii->mii_readreg = wm_gmii_i82544_readreg;
8712 mii->mii_writereg = wm_gmii_i82544_writereg; 8712 mii->mii_writereg = wm_gmii_i82544_writereg;
8713 } else if (sc->sc_type >= WM_T_80003) { 8713 } else if (sc->sc_type >= WM_T_80003) {
8714 /* 80003 */ 8714 /* 80003 */
8715 sc->sc_phytype = WMPHY_GG82563; 8715 sc->sc_phytype = WMPHY_GG82563;
8716 mii->mii_readreg = wm_gmii_i80003_readreg; 8716 mii->mii_readreg = wm_gmii_i80003_readreg;
8717 mii->mii_writereg = wm_gmii_i80003_writereg; 8717 mii->mii_writereg = wm_gmii_i80003_writereg;
8718 } else if (sc->sc_type >= WM_T_I210) { 8718 } else if (sc->sc_type >= WM_T_I210) {
8719 /* I210 and I211 */ 8719 /* I210 and I211 */
8720 sc->sc_phytype = WMPHY_210; 8720 sc->sc_phytype = WMPHY_210;
8721 mii->mii_readreg = wm_gmii_gs40g_readreg; 8721 mii->mii_readreg = wm_gmii_gs40g_readreg;
8722 mii->mii_writereg = wm_gmii_gs40g_writereg; 8722 mii->mii_writereg = wm_gmii_gs40g_writereg;
8723 } else if (sc->sc_type >= WM_T_82580) { 8723 } else if (sc->sc_type >= WM_T_82580) {
8724 /* 82580, I350 and I354 */ 8724 /* 82580, I350 and I354 */
8725 sc->sc_phytype = WMPHY_82580; 8725 sc->sc_phytype = WMPHY_82580;
8726 mii->mii_readreg = wm_gmii_82580_readreg; 8726 mii->mii_readreg = wm_gmii_82580_readreg;
8727 mii->mii_writereg = wm_gmii_82580_writereg; 8727 mii->mii_writereg = wm_gmii_82580_writereg;
8728 } else if (sc->sc_type >= WM_T_82544) { 8728 } else if (sc->sc_type >= WM_T_82544) {
8729 /* 82544, 0, [56], [17], 8257[1234] and 82583 */ 8729 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
8730 mii->mii_readreg = wm_gmii_i82544_readreg; 8730 mii->mii_readreg = wm_gmii_i82544_readreg;
8731 mii->mii_writereg = wm_gmii_i82544_writereg; 8731 mii->mii_writereg = wm_gmii_i82544_writereg;
8732 } else { 8732 } else {
8733 mii->mii_readreg = wm_gmii_i82543_readreg; 8733 mii->mii_readreg = wm_gmii_i82543_readreg;
8734 mii->mii_writereg = wm_gmii_i82543_writereg; 8734 mii->mii_writereg = wm_gmii_i82543_writereg;
8735 } 8735 }
8736 break; 8736 break;
8737 } 8737 }
8738 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) { 8738 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8739 /* All PCH* use _hv_ */ 8739 /* All PCH* use _hv_ */
8740 mii->mii_readreg = wm_gmii_hv_readreg; 8740 mii->mii_readreg = wm_gmii_hv_readreg;
8741 mii->mii_writereg = wm_gmii_hv_writereg; 8741 mii->mii_writereg = wm_gmii_hv_writereg;
8742 } 8742 }
8743 mii->mii_statchg = wm_gmii_statchg; 8743 mii->mii_statchg = wm_gmii_statchg;
8744 8744
8745 /* get PHY control from SMBus to PCIe */ 8745 /* get PHY control from SMBus to PCIe */
8746 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 8746 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
8747 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) 8747 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
8748 wm_smbustopci(sc); 8748 wm_smbustopci(sc);
8749 8749
8750 wm_gmii_reset(sc); 8750 wm_gmii_reset(sc);
8751 8751
8752 sc->sc_ethercom.ec_mii = &sc->sc_mii; 8752 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8753 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, 8753 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8754 wm_gmii_mediastatus); 8754 wm_gmii_mediastatus);
8755 8755
8756 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 8756 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8757 || (sc->sc_type == WM_T_82580) 8757 || (sc->sc_type == WM_T_82580)
8758 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 8758 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8759 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) { 8759 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8760 if ((sc->sc_flags & WM_F_SGMII) == 0) { 8760 if ((sc->sc_flags & WM_F_SGMII) == 0) {
8761 /* Attach only one port */ 8761 /* Attach only one port */
8762 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, 8762 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8763 MII_OFFSET_ANY, MIIF_DOPAUSE); 8763 MII_OFFSET_ANY, MIIF_DOPAUSE);
8764 } else { 8764 } else {
8765 int i, id; 8765 int i, id;
8766 uint32_t ctrl_ext; 8766 uint32_t ctrl_ext;
8767 8767
8768 id = wm_get_phy_id_82575(sc); 8768 id = wm_get_phy_id_82575(sc);
8769 if (id != -1) { 8769 if (id != -1) {
8770 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 8770 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8771 id, MII_OFFSET_ANY, MIIF_DOPAUSE); 8771 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8772 } 8772 }
8773 if ((id == -1) 8773 if ((id == -1)
8774 || (LIST_FIRST(&mii->mii_phys) == NULL)) { 8774 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8775 /* Power on sgmii phy if it is disabled */ 8775 /* Power on sgmii phy if it is disabled */
8776 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 8776 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8777 CSR_WRITE(sc, WMREG_CTRL_EXT, 8777 CSR_WRITE(sc, WMREG_CTRL_EXT,
8778 ctrl_ext &~ CTRL_EXT_SWDPIN(3)); 8778 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8779 CSR_WRITE_FLUSH(sc); 8779 CSR_WRITE_FLUSH(sc);
8780 delay(300*1000); /* XXX too long */ 8780 delay(300*1000); /* XXX too long */
8781 8781
8782 /* from 1 to 8 */ 8782 /* from 1 to 8 */
8783 for (i = 1; i < 8; i++) 8783 for (i = 1; i < 8; i++)
8784 mii_attach(sc->sc_dev, &sc->sc_mii, 8784 mii_attach(sc->sc_dev, &sc->sc_mii,
8785 0xffffffff, i, MII_OFFSET_ANY, 8785 0xffffffff, i, MII_OFFSET_ANY,
8786 MIIF_DOPAUSE); 8786 MIIF_DOPAUSE);
8787 8787
8788 /* restore previous sfp cage power state */ 8788 /* restore previous sfp cage power state */
8789 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 8789 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8790 } 8790 }
8791 } 8791 }
8792 } else { 8792 } else {
8793 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 8793 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8794 MII_OFFSET_ANY, MIIF_DOPAUSE); 8794 MII_OFFSET_ANY, MIIF_DOPAUSE);
8795 } 8795 }
8796 8796
8797 /* 8797 /*
8798 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call 8798 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8799 * wm_set_mdio_slow_mode_hv() for a workaround and retry. 8799 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8800 */ 8800 */
8801 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) && 8801 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8802 (LIST_FIRST(&mii->mii_phys) == NULL)) { 8802 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8803 wm_set_mdio_slow_mode_hv(sc); 8803 wm_set_mdio_slow_mode_hv(sc);
8804 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 8804 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8805 MII_OFFSET_ANY, MIIF_DOPAUSE); 8805 MII_OFFSET_ANY, MIIF_DOPAUSE);
8806 } 8806 }
8807 8807
8808 /* 8808 /*
8809 * (For ICH8 variants) 8809 * (For ICH8 variants)
8810 * If PHY detection failed, use BM's r/w function and retry. 8810 * If PHY detection failed, use BM's r/w function and retry.
8811 */ 8811 */
8812 if (LIST_FIRST(&mii->mii_phys) == NULL) { 8812 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8813 /* if failed, retry with *_bm_* */ 8813 /* if failed, retry with *_bm_* */
8814 mii->mii_readreg = wm_gmii_bm_readreg; 8814 mii->mii_readreg = wm_gmii_bm_readreg;
8815 mii->mii_writereg = wm_gmii_bm_writereg; 8815 mii->mii_writereg = wm_gmii_bm_writereg;
8816 8816
8817 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 8817 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8818 MII_OFFSET_ANY, MIIF_DOPAUSE); 8818 MII_OFFSET_ANY, MIIF_DOPAUSE);
8819 } 8819 }
8820 8820
8821 if (LIST_FIRST(&mii->mii_phys) == NULL) { 8821 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8822 /* Any PHY wasn't find */ 8822 /* Any PHY wasn't find */
8823 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 8823 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8824 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 8824 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8825 sc->sc_phytype = WMPHY_NONE; 8825 sc->sc_phytype = WMPHY_NONE;
8826 } else { 8826 } else {
8827 /* 8827 /*
8828 * PHY Found! 8828 * PHY Found!
8829 * Check PHY type. 8829 * Check PHY type.
8830 */ 8830 */
8831 uint32_t model; 8831 uint32_t model;
8832 struct mii_softc *child; 8832 struct mii_softc *child;
8833 8833
8834 child = LIST_FIRST(&mii->mii_phys); 8834 child = LIST_FIRST(&mii->mii_phys);
8835 model = child->mii_mpd_model; 8835 model = child->mii_mpd_model;
8836 if (model == MII_MODEL_yyINTEL_I82566) 8836 if (model == MII_MODEL_yyINTEL_I82566)
8837 sc->sc_phytype = WMPHY_IGP_3; 8837 sc->sc_phytype = WMPHY_IGP_3;
8838 8838
8839 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 8839 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8840 } 8840 }
8841} 8841}
8842 8842
8843/* 8843/*
8844 * wm_gmii_mediachange: [ifmedia interface function] 8844 * wm_gmii_mediachange: [ifmedia interface function]
8845 * 8845 *
8846 * Set hardware to newly-selected media on a 1000BASE-T device. 8846 * Set hardware to newly-selected media on a 1000BASE-T device.