Wed Oct 28 07:08:08 2020 UTC ()
 Add missing drain for pcq in wm_stop_locked(). OK'd by knakahara.

This change fixes two problems:
 1. If the pcq is full and watchdog timer is fired, the full state of the
    pcq is kept and wm_transmit() returns with ENOBUFS.
 2. ifconfig down doesn't free mbufs in the pcq.


(msaitoh)
diff -r1.691 -r1.692 src/sys/dev/pci/if_wm.c

cvs diff -r1.691 -r1.692 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2020/10/16 05:53:39 1.691
+++ src/sys/dev/pci/if_wm.c 2020/10/28 07:08:08 1.692
@@ -1,1084 +1,1084 @@ @@ -1,1084 +1,1084 @@
1/* $NetBSD: if_wm.c,v 1.691 2020/10/16 05:53:39 msaitoh Exp $ */ 1/* $NetBSD: if_wm.c,v 1.692 2020/10/28 07:08:08 msaitoh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation 40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42  42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45  45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48  48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52  52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56  56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic) 76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors 77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354 78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function 79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM) 80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM. 81 * - Rework how parameters are loaded from the EEPROM.
82 */ 82 */
83 83
84#include <sys/cdefs.h> 84#include <sys/cdefs.h>
85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.691 2020/10/16 05:53:39 msaitoh Exp $"); 85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.692 2020/10/28 07:08:08 msaitoh Exp $");
86 86
87#ifdef _KERNEL_OPT 87#ifdef _KERNEL_OPT
88#include "opt_net_mpsafe.h" 88#include "opt_net_mpsafe.h"
89#include "opt_if_wm.h" 89#include "opt_if_wm.h"
90#endif 90#endif
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/systm.h> 93#include <sys/systm.h>
94#include <sys/callout.h> 94#include <sys/callout.h>
95#include <sys/mbuf.h> 95#include <sys/mbuf.h>
96#include <sys/malloc.h> 96#include <sys/malloc.h>
97#include <sys/kmem.h> 97#include <sys/kmem.h>
98#include <sys/kernel.h> 98#include <sys/kernel.h>
99#include <sys/socket.h> 99#include <sys/socket.h>
100#include <sys/ioctl.h> 100#include <sys/ioctl.h>
101#include <sys/errno.h> 101#include <sys/errno.h>
102#include <sys/device.h> 102#include <sys/device.h>
103#include <sys/queue.h> 103#include <sys/queue.h>
104#include <sys/syslog.h> 104#include <sys/syslog.h>
105#include <sys/interrupt.h> 105#include <sys/interrupt.h>
106#include <sys/cpu.h> 106#include <sys/cpu.h>
107#include <sys/pcq.h> 107#include <sys/pcq.h>
108#include <sys/sysctl.h> 108#include <sys/sysctl.h>
109#include <sys/workqueue.h> 109#include <sys/workqueue.h>
110 110
111#include <sys/rndsource.h> 111#include <sys/rndsource.h>
112 112
113#include <net/if.h> 113#include <net/if.h>
114#include <net/if_dl.h> 114#include <net/if_dl.h>
115#include <net/if_media.h> 115#include <net/if_media.h>
116#include <net/if_ether.h> 116#include <net/if_ether.h>
117 117
118#include <net/bpf.h> 118#include <net/bpf.h>
119 119
120#include <net/rss_config.h> 120#include <net/rss_config.h>
121 121
122#include <netinet/in.h> /* XXX for struct ip */ 122#include <netinet/in.h> /* XXX for struct ip */
123#include <netinet/in_systm.h> /* XXX for struct ip */ 123#include <netinet/in_systm.h> /* XXX for struct ip */
124#include <netinet/ip.h> /* XXX for struct ip */ 124#include <netinet/ip.h> /* XXX for struct ip */
125#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 125#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
126#include <netinet/tcp.h> /* XXX for struct tcphdr */ 126#include <netinet/tcp.h> /* XXX for struct tcphdr */
127 127
128#include <sys/bus.h> 128#include <sys/bus.h>
129#include <sys/intr.h> 129#include <sys/intr.h>
130#include <machine/endian.h> 130#include <machine/endian.h>
131 131
132#include <dev/mii/mii.h> 132#include <dev/mii/mii.h>
133#include <dev/mii/mdio.h> 133#include <dev/mii/mdio.h>
134#include <dev/mii/miivar.h> 134#include <dev/mii/miivar.h>
135#include <dev/mii/miidevs.h> 135#include <dev/mii/miidevs.h>
136#include <dev/mii/mii_bitbang.h> 136#include <dev/mii/mii_bitbang.h>
137#include <dev/mii/ikphyreg.h> 137#include <dev/mii/ikphyreg.h>
138#include <dev/mii/igphyreg.h> 138#include <dev/mii/igphyreg.h>
139#include <dev/mii/igphyvar.h> 139#include <dev/mii/igphyvar.h>
140#include <dev/mii/inbmphyreg.h> 140#include <dev/mii/inbmphyreg.h>
141#include <dev/mii/ihphyreg.h> 141#include <dev/mii/ihphyreg.h>
142#include <dev/mii/makphyreg.h> 142#include <dev/mii/makphyreg.h>
143 143
144#include <dev/pci/pcireg.h> 144#include <dev/pci/pcireg.h>
145#include <dev/pci/pcivar.h> 145#include <dev/pci/pcivar.h>
146#include <dev/pci/pcidevs.h> 146#include <dev/pci/pcidevs.h>
147 147
148#include <dev/pci/if_wmreg.h> 148#include <dev/pci/if_wmreg.h>
149#include <dev/pci/if_wmvar.h> 149#include <dev/pci/if_wmvar.h>
150 150
151#ifdef WM_DEBUG 151#ifdef WM_DEBUG
152#define WM_DEBUG_LINK __BIT(0) 152#define WM_DEBUG_LINK __BIT(0)
153#define WM_DEBUG_TX __BIT(1) 153#define WM_DEBUG_TX __BIT(1)
154#define WM_DEBUG_RX __BIT(2) 154#define WM_DEBUG_RX __BIT(2)
155#define WM_DEBUG_GMII __BIT(3) 155#define WM_DEBUG_GMII __BIT(3)
156#define WM_DEBUG_MANAGE __BIT(4) 156#define WM_DEBUG_MANAGE __BIT(4)
157#define WM_DEBUG_NVM __BIT(5) 157#define WM_DEBUG_NVM __BIT(5)
158#define WM_DEBUG_INIT __BIT(6) 158#define WM_DEBUG_INIT __BIT(6)
159#define WM_DEBUG_LOCK __BIT(7) 159#define WM_DEBUG_LOCK __BIT(7)
160int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII 160int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
161 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK; 161 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
162#define DPRINTF(x, y) do { if (wm_debug & (x)) printf y; } while (0) 162#define DPRINTF(x, y) do { if (wm_debug & (x)) printf y; } while (0)
163#else 163#else
164#define DPRINTF(x, y) __nothing 164#define DPRINTF(x, y) __nothing
165#endif /* WM_DEBUG */ 165#endif /* WM_DEBUG */
166 166
167#ifdef NET_MPSAFE 167#ifdef NET_MPSAFE
168#define WM_MPSAFE 1 168#define WM_MPSAFE 1
169#define WM_CALLOUT_FLAGS CALLOUT_MPSAFE 169#define WM_CALLOUT_FLAGS CALLOUT_MPSAFE
170#define WM_SOFTINT_FLAGS SOFTINT_MPSAFE 170#define WM_SOFTINT_FLAGS SOFTINT_MPSAFE
171#define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 171#define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
172#else 172#else
173#define WM_CALLOUT_FLAGS 0 173#define WM_CALLOUT_FLAGS 0
174#define WM_SOFTINT_FLAGS 0 174#define WM_SOFTINT_FLAGS 0
175#define WM_WORKQUEUE_FLAGS WQ_PERCPU 175#define WM_WORKQUEUE_FLAGS WQ_PERCPU
176#endif 176#endif
177 177
178#define WM_WORKQUEUE_PRI PRI_SOFTNET 178#define WM_WORKQUEUE_PRI PRI_SOFTNET
179 179
180/* 180/*
181 * This device driver's max interrupt numbers. 181 * This device driver's max interrupt numbers.
182 */ 182 */
183#define WM_MAX_NQUEUEINTR 16 183#define WM_MAX_NQUEUEINTR 16
184#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) 184#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
185 185
186#ifndef WM_DISABLE_MSI 186#ifndef WM_DISABLE_MSI
187#define WM_DISABLE_MSI 0 187#define WM_DISABLE_MSI 0
188#endif 188#endif
189#ifndef WM_DISABLE_MSIX 189#ifndef WM_DISABLE_MSIX
190#define WM_DISABLE_MSIX 0 190#define WM_DISABLE_MSIX 0
191#endif 191#endif
192 192
193int wm_disable_msi = WM_DISABLE_MSI; 193int wm_disable_msi = WM_DISABLE_MSI;
194int wm_disable_msix = WM_DISABLE_MSIX; 194int wm_disable_msix = WM_DISABLE_MSIX;
195 195
196#ifndef WM_WATCHDOG_TIMEOUT 196#ifndef WM_WATCHDOG_TIMEOUT
197#define WM_WATCHDOG_TIMEOUT 5 197#define WM_WATCHDOG_TIMEOUT 5
198#endif 198#endif
199static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT; 199static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
200 200
201/* 201/*
202 * Transmit descriptor list size. Due to errata, we can only have 202 * Transmit descriptor list size. Due to errata, we can only have
203 * 256 hardware descriptors in the ring on < 82544, but we use 4096 203 * 256 hardware descriptors in the ring on < 82544, but we use 4096
204 * on >= 82544. We tell the upper layers that they can queue a lot 204 * on >= 82544. We tell the upper layers that they can queue a lot
205 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 205 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
206 * of them at a time. 206 * of them at a time.
207 * 207 *
208 * We allow up to 64 DMA segments per packet. Pathological packet 208 * We allow up to 64 DMA segments per packet. Pathological packet
209 * chains containing many small mbufs have been observed in zero-copy 209 * chains containing many small mbufs have been observed in zero-copy
210 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments, 210 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
211 * m_defrag() is called to reduce it. 211 * m_defrag() is called to reduce it.
212 */ 212 */
213#define WM_NTXSEGS 64 213#define WM_NTXSEGS 64
214#define WM_IFQUEUELEN 256 214#define WM_IFQUEUELEN 256
215#define WM_TXQUEUELEN_MAX 64 215#define WM_TXQUEUELEN_MAX 64
216#define WM_TXQUEUELEN_MAX_82547 16 216#define WM_TXQUEUELEN_MAX_82547 16
217#define WM_TXQUEUELEN(txq) ((txq)->txq_num) 217#define WM_TXQUEUELEN(txq) ((txq)->txq_num)
218#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) 218#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
219#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) 219#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
220#define WM_NTXDESC_82542 256 220#define WM_NTXDESC_82542 256
221#define WM_NTXDESC_82544 4096 221#define WM_NTXDESC_82544 4096
222#define WM_NTXDESC(txq) ((txq)->txq_ndesc) 222#define WM_NTXDESC(txq) ((txq)->txq_ndesc)
223#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) 223#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
224#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) 224#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
225#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) 225#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
226#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) 226#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
227 227
228#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ 228#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
229 229
230#define WM_TXINTERQSIZE 256 230#define WM_TXINTERQSIZE 256
231 231
232#ifndef WM_TX_PROCESS_LIMIT_DEFAULT 232#ifndef WM_TX_PROCESS_LIMIT_DEFAULT
233#define WM_TX_PROCESS_LIMIT_DEFAULT 100U 233#define WM_TX_PROCESS_LIMIT_DEFAULT 100U
234#endif 234#endif
235#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT 235#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
236#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U 236#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
237#endif 237#endif
238 238
239/* 239/*
240 * Receive descriptor list size. We have one Rx buffer for normal 240 * Receive descriptor list size. We have one Rx buffer for normal
241 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 241 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
242 * packet. We allocate 256 receive descriptors, each with a 2k 242 * packet. We allocate 256 receive descriptors, each with a 2k
243 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 243 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
244 */ 244 */
245#define WM_NRXDESC 256U 245#define WM_NRXDESC 256U
246#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 246#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
247#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 247#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
248#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 248#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
249 249
250#ifndef WM_RX_PROCESS_LIMIT_DEFAULT 250#ifndef WM_RX_PROCESS_LIMIT_DEFAULT
251#define WM_RX_PROCESS_LIMIT_DEFAULT 100U 251#define WM_RX_PROCESS_LIMIT_DEFAULT 100U
252#endif 252#endif
253#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT 253#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
254#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U 254#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
255#endif 255#endif
256 256
257typedef union txdescs { 257typedef union txdescs {
258 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; 258 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
259 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; 259 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
260} txdescs_t; 260} txdescs_t;
261 261
262typedef union rxdescs { 262typedef union rxdescs {
263 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; 263 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
264 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ 264 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
265 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ 265 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
266} rxdescs_t; 266} rxdescs_t;
267 267
268#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) 268#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
269#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x)) 269#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
270 270
271/* 271/*
272 * Software state for transmit jobs. 272 * Software state for transmit jobs.
273 */ 273 */
274struct wm_txsoft { 274struct wm_txsoft {
275 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 275 struct mbuf *txs_mbuf; /* head of our mbuf chain */
276 bus_dmamap_t txs_dmamap; /* our DMA map */ 276 bus_dmamap_t txs_dmamap; /* our DMA map */
277 int txs_firstdesc; /* first descriptor in packet */ 277 int txs_firstdesc; /* first descriptor in packet */
278 int txs_lastdesc; /* last descriptor in packet */ 278 int txs_lastdesc; /* last descriptor in packet */
279 int txs_ndesc; /* # of descriptors used */ 279 int txs_ndesc; /* # of descriptors used */
280}; 280};
281 281
282/* 282/*
283 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES) 283 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
284 * buffer and a DMA map. For packets which fill more than one buffer, we chain 284 * buffer and a DMA map. For packets which fill more than one buffer, we chain
285 * them together. 285 * them together.
286 */ 286 */
287struct wm_rxsoft { 287struct wm_rxsoft {
288 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 288 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
289 bus_dmamap_t rxs_dmamap; /* our DMA map */ 289 bus_dmamap_t rxs_dmamap; /* our DMA map */
290}; 290};
291 291
292#define WM_LINKUP_TIMEOUT 50 292#define WM_LINKUP_TIMEOUT 50
293 293
294static uint16_t swfwphysem[] = { 294static uint16_t swfwphysem[] = {
295 SWFW_PHY0_SM, 295 SWFW_PHY0_SM,
296 SWFW_PHY1_SM, 296 SWFW_PHY1_SM,
297 SWFW_PHY2_SM, 297 SWFW_PHY2_SM,
298 SWFW_PHY3_SM 298 SWFW_PHY3_SM
299}; 299};
300 300
301static const uint32_t wm_82580_rxpbs_table[] = { 301static const uint32_t wm_82580_rxpbs_table[] = {
302 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 302 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
303}; 303};
304 304
305struct wm_softc; 305struct wm_softc;
306 306
307#ifdef WM_EVENT_COUNTERS 307#ifdef WM_EVENT_COUNTERS
308#define WM_Q_EVCNT_DEFINE(qname, evname) \ 308#define WM_Q_EVCNT_DEFINE(qname, evname) \
309 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ 309 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
310 struct evcnt qname##_ev_##evname; 310 struct evcnt qname##_ev_##evname;
311 311
312#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ 312#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
313 do { \ 313 do { \
314 snprintf((q)->qname##_##evname##_evcnt_name, \ 314 snprintf((q)->qname##_##evname##_evcnt_name, \
315 sizeof((q)->qname##_##evname##_evcnt_name), \ 315 sizeof((q)->qname##_##evname##_evcnt_name), \
316 "%s%02d%s", #qname, (qnum), #evname); \ 316 "%s%02d%s", #qname, (qnum), #evname); \
317 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ 317 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
318 (evtype), NULL, (xname), \ 318 (evtype), NULL, (xname), \
319 (q)->qname##_##evname##_evcnt_name); \ 319 (q)->qname##_##evname##_evcnt_name); \
320 } while (0) 320 } while (0)
321 321
322#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 322#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
323 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) 323 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
324 324
325#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 325#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
326 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) 326 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
327 327
328#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \ 328#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
329 evcnt_detach(&(q)->qname##_ev_##evname); 329 evcnt_detach(&(q)->qname##_ev_##evname);
330#endif /* WM_EVENT_COUNTERS */ 330#endif /* WM_EVENT_COUNTERS */
331 331
332struct wm_txqueue { 332struct wm_txqueue {
333 kmutex_t *txq_lock; /* lock for tx operations */ 333 kmutex_t *txq_lock; /* lock for tx operations */
334 334
335 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ 335 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
336 336
337 /* Software state for the transmit descriptors. */ 337 /* Software state for the transmit descriptors. */
338 int txq_num; /* must be a power of two */ 338 int txq_num; /* must be a power of two */
339 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; 339 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
340 340
341 /* TX control data structures. */ 341 /* TX control data structures. */
342 int txq_ndesc; /* must be a power of two */ 342 int txq_ndesc; /* must be a power of two */
343 size_t txq_descsize; /* a tx descriptor size */ 343 size_t txq_descsize; /* a tx descriptor size */
344 txdescs_t *txq_descs_u; 344 txdescs_t *txq_descs_u;
345 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ 345 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
346 bus_dma_segment_t txq_desc_seg; /* control data segment */ 346 bus_dma_segment_t txq_desc_seg; /* control data segment */
347 int txq_desc_rseg; /* real number of control segment */ 347 int txq_desc_rseg; /* real number of control segment */
348#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr 348#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
349#define txq_descs txq_descs_u->sctxu_txdescs 349#define txq_descs txq_descs_u->sctxu_txdescs
350#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs 350#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
351 351
352 bus_addr_t txq_tdt_reg; /* offset of TDT register */ 352 bus_addr_t txq_tdt_reg; /* offset of TDT register */
353 353
354 int txq_free; /* number of free Tx descriptors */ 354 int txq_free; /* number of free Tx descriptors */
355 int txq_next; /* next ready Tx descriptor */ 355 int txq_next; /* next ready Tx descriptor */
356 356
357 int txq_sfree; /* number of free Tx jobs */ 357 int txq_sfree; /* number of free Tx jobs */
358 int txq_snext; /* next free Tx job */ 358 int txq_snext; /* next free Tx job */
359 int txq_sdirty; /* dirty Tx jobs */ 359 int txq_sdirty; /* dirty Tx jobs */
360 360
361 /* These 4 variables are used only on the 82547. */ 361 /* These 4 variables are used only on the 82547. */
362 int txq_fifo_size; /* Tx FIFO size */ 362 int txq_fifo_size; /* Tx FIFO size */
363 int txq_fifo_head; /* current head of FIFO */ 363 int txq_fifo_head; /* current head of FIFO */
364 uint32_t txq_fifo_addr; /* internal address of start of FIFO */ 364 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
365 int txq_fifo_stall; /* Tx FIFO is stalled */ 365 int txq_fifo_stall; /* Tx FIFO is stalled */
366 366
367 /* 367 /*
368 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 368 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
369 * CPUs. This queue intermediate them without block. 369 * CPUs. This queue intermediate them without block.
370 */ 370 */
371 pcq_t *txq_interq; 371 pcq_t *txq_interq;
372 372
373 /* 373 /*
374 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 374 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
375 * to manage Tx H/W queue's busy flag. 375 * to manage Tx H/W queue's busy flag.
376 */ 376 */
377 int txq_flags; /* flags for H/W queue, see below */ 377 int txq_flags; /* flags for H/W queue, see below */
378#define WM_TXQ_NO_SPACE 0x1 378#define WM_TXQ_NO_SPACE 0x1
379 379
380 bool txq_stopping; 380 bool txq_stopping;
381 381
382 bool txq_sending; 382 bool txq_sending;
383 time_t txq_lastsent; 383 time_t txq_lastsent;
384 384
385 /* Checksum flags used for previous packet */ 385 /* Checksum flags used for previous packet */
386 uint32_t txq_last_hw_cmd; 386 uint32_t txq_last_hw_cmd;
387 uint8_t txq_last_hw_fields; 387 uint8_t txq_last_hw_fields;
388 uint16_t txq_last_hw_ipcs; 388 uint16_t txq_last_hw_ipcs;
389 uint16_t txq_last_hw_tucs; 389 uint16_t txq_last_hw_tucs;
390 390
391 uint32_t txq_packets; /* for AIM */ 391 uint32_t txq_packets; /* for AIM */
392 uint32_t txq_bytes; /* for AIM */ 392 uint32_t txq_bytes; /* for AIM */
393#ifdef WM_EVENT_COUNTERS 393#ifdef WM_EVENT_COUNTERS
394 /* TX event counters */ 394 /* TX event counters */
395 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */ 395 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */
396 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */ 396 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */
397 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */ 397 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */
398 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ 398 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
399 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ 399 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
400 /* XXX not used? */ 400 /* XXX not used? */
401 401
402 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */ 402 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */
403 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */ 403 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */
404 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */ 404 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */
405 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */ 405 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */
406 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */ 406 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */
407 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */ 407 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */
408 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */ 408 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */
409 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */ 409 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */
410 /* other than toomanyseg */ 410 /* other than toomanyseg */
411 411
412 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */ 412 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */
413 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */ 413 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */
414 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */ 414 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */
415 WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */ 415 WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
416 416
417 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; 417 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
418 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 418 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
419#endif /* WM_EVENT_COUNTERS */ 419#endif /* WM_EVENT_COUNTERS */
420}; 420};
421 421
422struct wm_rxqueue { 422struct wm_rxqueue {
423 kmutex_t *rxq_lock; /* lock for rx operations */ 423 kmutex_t *rxq_lock; /* lock for rx operations */
424 424
425 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ 425 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
426 426
427 /* Software state for the receive descriptors. */ 427 /* Software state for the receive descriptors. */
428 struct wm_rxsoft rxq_soft[WM_NRXDESC]; 428 struct wm_rxsoft rxq_soft[WM_NRXDESC];
429 429
430 /* RX control data structures. */ 430 /* RX control data structures. */
431 int rxq_ndesc; /* must be a power of two */ 431 int rxq_ndesc; /* must be a power of two */
432 size_t rxq_descsize; /* a rx descriptor size */ 432 size_t rxq_descsize; /* a rx descriptor size */
433 rxdescs_t *rxq_descs_u; 433 rxdescs_t *rxq_descs_u;
434 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ 434 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
435 bus_dma_segment_t rxq_desc_seg; /* control data segment */ 435 bus_dma_segment_t rxq_desc_seg; /* control data segment */
436 int rxq_desc_rseg; /* real number of control segment */ 436 int rxq_desc_rseg; /* real number of control segment */
437#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr 437#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
438#define rxq_descs rxq_descs_u->sctxu_rxdescs 438#define rxq_descs rxq_descs_u->sctxu_rxdescs
439#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs 439#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
440#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs 440#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
441 441
442 bus_addr_t rxq_rdt_reg; /* offset of RDT register */ 442 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
443 443
444 int rxq_ptr; /* next ready Rx desc/queue ent */ 444 int rxq_ptr; /* next ready Rx desc/queue ent */
445 int rxq_discard; 445 int rxq_discard;
446 int rxq_len; 446 int rxq_len;
447 struct mbuf *rxq_head; 447 struct mbuf *rxq_head;
448 struct mbuf *rxq_tail; 448 struct mbuf *rxq_tail;
449 struct mbuf **rxq_tailp; 449 struct mbuf **rxq_tailp;
450 450
451 bool rxq_stopping; 451 bool rxq_stopping;
452 452
453 uint32_t rxq_packets; /* for AIM */ 453 uint32_t rxq_packets; /* for AIM */
454 uint32_t rxq_bytes; /* for AIM */ 454 uint32_t rxq_bytes; /* for AIM */
455#ifdef WM_EVENT_COUNTERS 455#ifdef WM_EVENT_COUNTERS
456 /* RX event counters */ 456 /* RX event counters */
457 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */ 457 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
458 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */ 458 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
459 459
460 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */ 460 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
461 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */ 461 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
462#endif 462#endif
463}; 463};
464 464
465struct wm_queue { 465struct wm_queue {
466 int wmq_id; /* index of TX/RX queues */ 466 int wmq_id; /* index of TX/RX queues */
467 int wmq_intr_idx; /* index of MSI-X tables */ 467 int wmq_intr_idx; /* index of MSI-X tables */
468 468
469 uint32_t wmq_itr; /* interrupt interval per queue. */ 469 uint32_t wmq_itr; /* interrupt interval per queue. */
470 bool wmq_set_itr; 470 bool wmq_set_itr;
471 471
472 struct wm_txqueue wmq_txq; 472 struct wm_txqueue wmq_txq;
473 struct wm_rxqueue wmq_rxq; 473 struct wm_rxqueue wmq_rxq;
474 474
475 bool wmq_txrx_use_workqueue; 475 bool wmq_txrx_use_workqueue;
476 struct work wmq_cookie; 476 struct work wmq_cookie;
477 void *wmq_si; 477 void *wmq_si;
478}; 478};
479 479
480struct wm_phyop { 480struct wm_phyop {
481 int (*acquire)(struct wm_softc *); 481 int (*acquire)(struct wm_softc *);
482 void (*release)(struct wm_softc *); 482 void (*release)(struct wm_softc *);
483 int (*readreg_locked)(device_t, int, int, uint16_t *); 483 int (*readreg_locked)(device_t, int, int, uint16_t *);
484 int (*writereg_locked)(device_t, int, int, uint16_t); 484 int (*writereg_locked)(device_t, int, int, uint16_t);
485 int reset_delay_us; 485 int reset_delay_us;
486 bool no_errprint; 486 bool no_errprint;
487}; 487};
488 488
489struct wm_nvmop { 489struct wm_nvmop {
490 int (*acquire)(struct wm_softc *); 490 int (*acquire)(struct wm_softc *);
491 void (*release)(struct wm_softc *); 491 void (*release)(struct wm_softc *);
492 int (*read)(struct wm_softc *, int, int, uint16_t *); 492 int (*read)(struct wm_softc *, int, int, uint16_t *);
493}; 493};
494 494
495/* 495/*
496 * Software state per device. 496 * Software state per device.
497 */ 497 */
498struct wm_softc { 498struct wm_softc {
499 device_t sc_dev; /* generic device information */ 499 device_t sc_dev; /* generic device information */
500 bus_space_tag_t sc_st; /* bus space tag */ 500 bus_space_tag_t sc_st; /* bus space tag */
501 bus_space_handle_t sc_sh; /* bus space handle */ 501 bus_space_handle_t sc_sh; /* bus space handle */
502 bus_size_t sc_ss; /* bus space size */ 502 bus_size_t sc_ss; /* bus space size */
503 bus_space_tag_t sc_iot; /* I/O space tag */ 503 bus_space_tag_t sc_iot; /* I/O space tag */
504 bus_space_handle_t sc_ioh; /* I/O space handle */ 504 bus_space_handle_t sc_ioh; /* I/O space handle */
505 bus_size_t sc_ios; /* I/O space size */ 505 bus_size_t sc_ios; /* I/O space size */
506 bus_space_tag_t sc_flasht; /* flash registers space tag */ 506 bus_space_tag_t sc_flasht; /* flash registers space tag */
507 bus_space_handle_t sc_flashh; /* flash registers space handle */ 507 bus_space_handle_t sc_flashh; /* flash registers space handle */
508 bus_size_t sc_flashs; /* flash registers space size */ 508 bus_size_t sc_flashs; /* flash registers space size */
509 off_t sc_flashreg_offset; /* 509 off_t sc_flashreg_offset; /*
510 * offset to flash registers from 510 * offset to flash registers from
511 * start of BAR 511 * start of BAR
512 */ 512 */
513 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 513 bus_dma_tag_t sc_dmat; /* bus DMA tag */
514 514
515 struct ethercom sc_ethercom; /* ethernet common data */ 515 struct ethercom sc_ethercom; /* ethernet common data */
516 struct mii_data sc_mii; /* MII/media information */ 516 struct mii_data sc_mii; /* MII/media information */
517 517
518 pci_chipset_tag_t sc_pc; 518 pci_chipset_tag_t sc_pc;
519 pcitag_t sc_pcitag; 519 pcitag_t sc_pcitag;
520 int sc_bus_speed; /* PCI/PCIX bus speed */ 520 int sc_bus_speed; /* PCI/PCIX bus speed */
521 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ 521 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
522 522
523 uint16_t sc_pcidevid; /* PCI device ID */ 523 uint16_t sc_pcidevid; /* PCI device ID */
524 wm_chip_type sc_type; /* MAC type */ 524 wm_chip_type sc_type; /* MAC type */
525 int sc_rev; /* MAC revision */ 525 int sc_rev; /* MAC revision */
526 wm_phy_type sc_phytype; /* PHY type */ 526 wm_phy_type sc_phytype; /* PHY type */
527 uint8_t sc_sfptype; /* SFP type */ 527 uint8_t sc_sfptype; /* SFP type */
528 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ 528 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
529#define WM_MEDIATYPE_UNKNOWN 0x00 529#define WM_MEDIATYPE_UNKNOWN 0x00
530#define WM_MEDIATYPE_FIBER 0x01 530#define WM_MEDIATYPE_FIBER 0x01
531#define WM_MEDIATYPE_COPPER 0x02 531#define WM_MEDIATYPE_COPPER 0x02
532#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ 532#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
533 int sc_funcid; /* unit number of the chip (0 to 3) */ 533 int sc_funcid; /* unit number of the chip (0 to 3) */
534 int sc_flags; /* flags; see below */ 534 int sc_flags; /* flags; see below */
535 u_short sc_if_flags; /* last if_flags */ 535 u_short sc_if_flags; /* last if_flags */
536 int sc_ec_capenable; /* last ec_capenable */ 536 int sc_ec_capenable; /* last ec_capenable */
537 int sc_flowflags; /* 802.3x flow control flags */ 537 int sc_flowflags; /* 802.3x flow control flags */
538 uint16_t eee_lp_ability; /* EEE link partner's ability */ 538 uint16_t eee_lp_ability; /* EEE link partner's ability */
539 int sc_align_tweak; 539 int sc_align_tweak;
540 540
541 void *sc_ihs[WM_MAX_NINTR]; /* 541 void *sc_ihs[WM_MAX_NINTR]; /*
542 * interrupt cookie. 542 * interrupt cookie.
543 * - legacy and msi use sc_ihs[0] only 543 * - legacy and msi use sc_ihs[0] only
544 * - msix use sc_ihs[0] to sc_ihs[nintrs-1] 544 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
545 */ 545 */
546 pci_intr_handle_t *sc_intrs; /* 546 pci_intr_handle_t *sc_intrs; /*
547 * legacy and msi use sc_intrs[0] only 547 * legacy and msi use sc_intrs[0] only
548 * msix use sc_intrs[0] to sc_ihs[nintrs-1] 548 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
549 */ 549 */
550 int sc_nintrs; /* number of interrupts */ 550 int sc_nintrs; /* number of interrupts */
551 551
552 int sc_link_intr_idx; /* index of MSI-X tables */ 552 int sc_link_intr_idx; /* index of MSI-X tables */
553 553
554 callout_t sc_tick_ch; /* tick callout */ 554 callout_t sc_tick_ch; /* tick callout */
555 bool sc_core_stopping; 555 bool sc_core_stopping;
556 556
557 int sc_nvm_ver_major; 557 int sc_nvm_ver_major;
558 int sc_nvm_ver_minor; 558 int sc_nvm_ver_minor;
559 int sc_nvm_ver_build; 559 int sc_nvm_ver_build;
560 int sc_nvm_addrbits; /* NVM address bits */ 560 int sc_nvm_addrbits; /* NVM address bits */
561 unsigned int sc_nvm_wordsize; /* NVM word size */ 561 unsigned int sc_nvm_wordsize; /* NVM word size */
562 int sc_ich8_flash_base; 562 int sc_ich8_flash_base;
563 int sc_ich8_flash_bank_size; 563 int sc_ich8_flash_bank_size;
564 int sc_nvm_k1_enabled; 564 int sc_nvm_k1_enabled;
565 565
566 int sc_nqueues; 566 int sc_nqueues;
567 struct wm_queue *sc_queue; 567 struct wm_queue *sc_queue;
568 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */ 568 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
569 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */ 569 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
570 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */ 570 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
571 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */ 571 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
572 struct workqueue *sc_queue_wq; 572 struct workqueue *sc_queue_wq;
573 bool sc_txrx_use_workqueue; 573 bool sc_txrx_use_workqueue;
574 574
575 int sc_affinity_offset; 575 int sc_affinity_offset;
576 576
577#ifdef WM_EVENT_COUNTERS 577#ifdef WM_EVENT_COUNTERS
578 /* Event counters. */ 578 /* Event counters. */
579 struct evcnt sc_ev_linkintr; /* Link interrupts */ 579 struct evcnt sc_ev_linkintr; /* Link interrupts */
580 580
581 /* WM_T_82542_2_1 only */ 581 /* WM_T_82542_2_1 only */
582 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 582 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
583 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 583 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
584 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 584 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
585 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 585 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
586 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 586 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
587#endif /* WM_EVENT_COUNTERS */ 587#endif /* WM_EVENT_COUNTERS */
588 588
589 struct sysctllog *sc_sysctllog; 589 struct sysctllog *sc_sysctllog;
590 590
591 /* This variable are used only on the 82547. */ 591 /* This variable are used only on the 82547. */
592 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 592 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
593 593
594 uint32_t sc_ctrl; /* prototype CTRL register */ 594 uint32_t sc_ctrl; /* prototype CTRL register */
595#if 0 595#if 0
596 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 596 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
597#endif 597#endif
598 uint32_t sc_icr; /* prototype interrupt bits */ 598 uint32_t sc_icr; /* prototype interrupt bits */
599 uint32_t sc_itr_init; /* prototype intr throttling reg */ 599 uint32_t sc_itr_init; /* prototype intr throttling reg */
600 uint32_t sc_tctl; /* prototype TCTL register */ 600 uint32_t sc_tctl; /* prototype TCTL register */
601 uint32_t sc_rctl; /* prototype RCTL register */ 601 uint32_t sc_rctl; /* prototype RCTL register */
602 uint32_t sc_txcw; /* prototype TXCW register */ 602 uint32_t sc_txcw; /* prototype TXCW register */
603 uint32_t sc_tipg; /* prototype TIPG register */ 603 uint32_t sc_tipg; /* prototype TIPG register */
604 uint32_t sc_fcrtl; /* prototype FCRTL register */ 604 uint32_t sc_fcrtl; /* prototype FCRTL register */
605 uint32_t sc_pba; /* prototype PBA register */ 605 uint32_t sc_pba; /* prototype PBA register */
606 606
607 int sc_tbi_linkup; /* TBI link status */ 607 int sc_tbi_linkup; /* TBI link status */
608 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ 608 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
609 int sc_tbi_serdes_ticks; /* tbi ticks */ 609 int sc_tbi_serdes_ticks; /* tbi ticks */
610 610
611 int sc_mchash_type; /* multicast filter offset */ 611 int sc_mchash_type; /* multicast filter offset */
612 612
613 krndsource_t rnd_source; /* random source */ 613 krndsource_t rnd_source; /* random source */
614 614
615 struct if_percpuq *sc_ipq; /* softint-based input queues */ 615 struct if_percpuq *sc_ipq; /* softint-based input queues */
616 616
617 kmutex_t *sc_core_lock; /* lock for softc operations */ 617 kmutex_t *sc_core_lock; /* lock for softc operations */
618 kmutex_t *sc_ich_phymtx; /* 618 kmutex_t *sc_ich_phymtx; /*
619 * 82574/82583/ICH/PCH specific PHY 619 * 82574/82583/ICH/PCH specific PHY
620 * mutex. For 82574/82583, the mutex 620 * mutex. For 82574/82583, the mutex
621 * is used for both PHY and NVM. 621 * is used for both PHY and NVM.
622 */ 622 */
623 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ 623 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
624 624
625 struct wm_phyop phy; 625 struct wm_phyop phy;
626 struct wm_nvmop nvm; 626 struct wm_nvmop nvm;
627}; 627};
628 628
629#define WM_CORE_LOCK(_sc) \ 629#define WM_CORE_LOCK(_sc) \
630 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) 630 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
631#define WM_CORE_UNLOCK(_sc) \ 631#define WM_CORE_UNLOCK(_sc) \
632 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) 632 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
633#define WM_CORE_LOCKED(_sc) \ 633#define WM_CORE_LOCKED(_sc) \
634 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) 634 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
635 635
636#define WM_RXCHAIN_RESET(rxq) \ 636#define WM_RXCHAIN_RESET(rxq) \
637do { \ 637do { \
638 (rxq)->rxq_tailp = &(rxq)->rxq_head; \ 638 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
639 *(rxq)->rxq_tailp = NULL; \ 639 *(rxq)->rxq_tailp = NULL; \
640 (rxq)->rxq_len = 0; \ 640 (rxq)->rxq_len = 0; \
641} while (/*CONSTCOND*/0) 641} while (/*CONSTCOND*/0)
642 642
643#define WM_RXCHAIN_LINK(rxq, m) \ 643#define WM_RXCHAIN_LINK(rxq, m) \
644do { \ 644do { \
645 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ 645 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
646 (rxq)->rxq_tailp = &(m)->m_next; \ 646 (rxq)->rxq_tailp = &(m)->m_next; \
647} while (/*CONSTCOND*/0) 647} while (/*CONSTCOND*/0)
648 648
649#ifdef WM_EVENT_COUNTERS 649#ifdef WM_EVENT_COUNTERS
650#define WM_EVCNT_INCR(ev) (ev)->ev_count++ 650#define WM_EVCNT_INCR(ev) (ev)->ev_count++
651#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 651#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
652 652
653#define WM_Q_EVCNT_INCR(qname, evname) \ 653#define WM_Q_EVCNT_INCR(qname, evname) \
654 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) 654 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
655#define WM_Q_EVCNT_ADD(qname, evname, val) \ 655#define WM_Q_EVCNT_ADD(qname, evname, val) \
656 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) 656 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
657#else /* !WM_EVENT_COUNTERS */ 657#else /* !WM_EVENT_COUNTERS */
658#define WM_EVCNT_INCR(ev) /* nothing */ 658#define WM_EVCNT_INCR(ev) /* nothing */
659#define WM_EVCNT_ADD(ev, val) /* nothing */ 659#define WM_EVCNT_ADD(ev, val) /* nothing */
660 660
661#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */ 661#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
662#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */ 662#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
663#endif /* !WM_EVENT_COUNTERS */ 663#endif /* !WM_EVENT_COUNTERS */
664 664
665#define CSR_READ(sc, reg) \ 665#define CSR_READ(sc, reg) \
666 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 666 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
667#define CSR_WRITE(sc, reg, val) \ 667#define CSR_WRITE(sc, reg, val) \
668 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 668 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
669#define CSR_WRITE_FLUSH(sc) \ 669#define CSR_WRITE_FLUSH(sc) \
670 (void)CSR_READ((sc), WMREG_STATUS) 670 (void)CSR_READ((sc), WMREG_STATUS)
671 671
672#define ICH8_FLASH_READ32(sc, reg) \ 672#define ICH8_FLASH_READ32(sc, reg) \
673 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ 673 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
674 (reg) + sc->sc_flashreg_offset) 674 (reg) + sc->sc_flashreg_offset)
675#define ICH8_FLASH_WRITE32(sc, reg, data) \ 675#define ICH8_FLASH_WRITE32(sc, reg, data) \
676 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ 676 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
677 (reg) + sc->sc_flashreg_offset, (data)) 677 (reg) + sc->sc_flashreg_offset, (data))
678 678
679#define ICH8_FLASH_READ16(sc, reg) \ 679#define ICH8_FLASH_READ16(sc, reg) \
680 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ 680 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
681 (reg) + sc->sc_flashreg_offset) 681 (reg) + sc->sc_flashreg_offset)
682#define ICH8_FLASH_WRITE16(sc, reg, data) \ 682#define ICH8_FLASH_WRITE16(sc, reg, data) \
683 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ 683 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
684 (reg) + sc->sc_flashreg_offset, (data)) 684 (reg) + sc->sc_flashreg_offset, (data))
685 685
686#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) 686#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
687#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x))) 687#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
688 688
689#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) 689#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
690#define WM_CDTXADDR_HI(txq, x) \ 690#define WM_CDTXADDR_HI(txq, x) \
691 (sizeof(bus_addr_t) == 8 ? \ 691 (sizeof(bus_addr_t) == 8 ? \
692 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) 692 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
693 693
694#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) 694#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
695#define WM_CDRXADDR_HI(rxq, x) \ 695#define WM_CDRXADDR_HI(rxq, x) \
696 (sizeof(bus_addr_t) == 8 ? \ 696 (sizeof(bus_addr_t) == 8 ? \
697 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) 697 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
698 698
699/* 699/*
700 * Register read/write functions. 700 * Register read/write functions.
701 * Other than CSR_{READ|WRITE}(). 701 * Other than CSR_{READ|WRITE}().
702 */ 702 */
703#if 0 703#if 0
704static inline uint32_t wm_io_read(struct wm_softc *, int); 704static inline uint32_t wm_io_read(struct wm_softc *, int);
705#endif 705#endif
706static inline void wm_io_write(struct wm_softc *, int, uint32_t); 706static inline void wm_io_write(struct wm_softc *, int, uint32_t);
707static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 707static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
708 uint32_t, uint32_t); 708 uint32_t, uint32_t);
709static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 709static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
710 710
711/* 711/*
712 * Descriptor sync/init functions. 712 * Descriptor sync/init functions.
713 */ 713 */
714static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); 714static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
715static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); 715static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
716static inline void wm_init_rxdesc(struct wm_rxqueue *, int); 716static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
717 717
718/* 718/*
719 * Device driver interface functions and commonly used functions. 719 * Device driver interface functions and commonly used functions.
720 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 720 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
721 */ 721 */
722static const struct wm_product *wm_lookup(const struct pci_attach_args *); 722static const struct wm_product *wm_lookup(const struct pci_attach_args *);
723static int wm_match(device_t, cfdata_t, void *); 723static int wm_match(device_t, cfdata_t, void *);
724static void wm_attach(device_t, device_t, void *); 724static void wm_attach(device_t, device_t, void *);
725static int wm_detach(device_t, int); 725static int wm_detach(device_t, int);
726static bool wm_suspend(device_t, const pmf_qual_t *); 726static bool wm_suspend(device_t, const pmf_qual_t *);
727static bool wm_resume(device_t, const pmf_qual_t *); 727static bool wm_resume(device_t, const pmf_qual_t *);
728static void wm_watchdog(struct ifnet *); 728static void wm_watchdog(struct ifnet *);
729static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, 729static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
730 uint16_t *); 730 uint16_t *);
731static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, 731static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
732 uint16_t *); 732 uint16_t *);
733static void wm_tick(void *); 733static void wm_tick(void *);
734static int wm_ifflags_cb(struct ethercom *); 734static int wm_ifflags_cb(struct ethercom *);
735static int wm_ioctl(struct ifnet *, u_long, void *); 735static int wm_ioctl(struct ifnet *, u_long, void *);
736/* MAC address related */ 736/* MAC address related */
737static uint16_t wm_check_alt_mac_addr(struct wm_softc *); 737static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
738static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 738static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
739static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 739static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
740static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 740static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
741static int wm_rar_count(struct wm_softc *); 741static int wm_rar_count(struct wm_softc *);
742static void wm_set_filter(struct wm_softc *); 742static void wm_set_filter(struct wm_softc *);
743/* Reset and init related */ 743/* Reset and init related */
744static void wm_set_vlan(struct wm_softc *); 744static void wm_set_vlan(struct wm_softc *);
745static void wm_set_pcie_completion_timeout(struct wm_softc *); 745static void wm_set_pcie_completion_timeout(struct wm_softc *);
746static void wm_get_auto_rd_done(struct wm_softc *); 746static void wm_get_auto_rd_done(struct wm_softc *);
747static void wm_lan_init_done(struct wm_softc *); 747static void wm_lan_init_done(struct wm_softc *);
748static void wm_get_cfg_done(struct wm_softc *); 748static void wm_get_cfg_done(struct wm_softc *);
749static int wm_phy_post_reset(struct wm_softc *); 749static int wm_phy_post_reset(struct wm_softc *);
750static int wm_write_smbus_addr(struct wm_softc *); 750static int wm_write_smbus_addr(struct wm_softc *);
751static int wm_init_lcd_from_nvm(struct wm_softc *); 751static int wm_init_lcd_from_nvm(struct wm_softc *);
752static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool); 752static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
753static void wm_initialize_hardware_bits(struct wm_softc *); 753static void wm_initialize_hardware_bits(struct wm_softc *);
754static uint32_t wm_rxpbs_adjust_82580(uint32_t); 754static uint32_t wm_rxpbs_adjust_82580(uint32_t);
755static int wm_reset_phy(struct wm_softc *); 755static int wm_reset_phy(struct wm_softc *);
756static void wm_flush_desc_rings(struct wm_softc *); 756static void wm_flush_desc_rings(struct wm_softc *);
757static void wm_reset(struct wm_softc *); 757static void wm_reset(struct wm_softc *);
758static int wm_add_rxbuf(struct wm_rxqueue *, int); 758static int wm_add_rxbuf(struct wm_rxqueue *, int);
759static void wm_rxdrain(struct wm_rxqueue *); 759static void wm_rxdrain(struct wm_rxqueue *);
760static void wm_init_rss(struct wm_softc *); 760static void wm_init_rss(struct wm_softc *);
761static void wm_adjust_qnum(struct wm_softc *, int); 761static void wm_adjust_qnum(struct wm_softc *, int);
762static inline bool wm_is_using_msix(struct wm_softc *); 762static inline bool wm_is_using_msix(struct wm_softc *);
763static inline bool wm_is_using_multiqueue(struct wm_softc *); 763static inline bool wm_is_using_multiqueue(struct wm_softc *);
764static int wm_softint_establish_queue(struct wm_softc *, int, int); 764static int wm_softint_establish_queue(struct wm_softc *, int, int);
765static int wm_setup_legacy(struct wm_softc *); 765static int wm_setup_legacy(struct wm_softc *);
766static int wm_setup_msix(struct wm_softc *); 766static int wm_setup_msix(struct wm_softc *);
767static int wm_init(struct ifnet *); 767static int wm_init(struct ifnet *);
768static int wm_init_locked(struct ifnet *); 768static int wm_init_locked(struct ifnet *);
769static void wm_init_sysctls(struct wm_softc *); 769static void wm_init_sysctls(struct wm_softc *);
770static void wm_unset_stopping_flags(struct wm_softc *); 770static void wm_unset_stopping_flags(struct wm_softc *);
771static void wm_set_stopping_flags(struct wm_softc *); 771static void wm_set_stopping_flags(struct wm_softc *);
772static void wm_stop(struct ifnet *, int); 772static void wm_stop(struct ifnet *, int);
773static void wm_stop_locked(struct ifnet *, bool, bool); 773static void wm_stop_locked(struct ifnet *, bool, bool);
774static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 774static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
775static void wm_82547_txfifo_stall(void *); 775static void wm_82547_txfifo_stall(void *);
776static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 776static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
777static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *); 777static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
778/* DMA related */ 778/* DMA related */
779static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 779static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
780static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 780static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
781static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 781static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
782static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, 782static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
783 struct wm_txqueue *); 783 struct wm_txqueue *);
784static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 784static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
785static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 785static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
786static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, 786static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
787 struct wm_rxqueue *); 787 struct wm_rxqueue *);
788static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 788static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
789static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 789static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
790static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 790static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
791static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 791static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
792static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 792static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
793static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 793static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
794static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, 794static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
795 struct wm_txqueue *); 795 struct wm_txqueue *);
796static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, 796static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
797 struct wm_rxqueue *); 797 struct wm_rxqueue *);
798static int wm_alloc_txrx_queues(struct wm_softc *); 798static int wm_alloc_txrx_queues(struct wm_softc *);
799static void wm_free_txrx_queues(struct wm_softc *); 799static void wm_free_txrx_queues(struct wm_softc *);
800static int wm_init_txrx_queues(struct wm_softc *); 800static int wm_init_txrx_queues(struct wm_softc *);
801/* Start */ 801/* Start */
802static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *, 802static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
803 struct wm_txsoft *, uint32_t *, uint8_t *); 803 struct wm_txsoft *, uint32_t *, uint8_t *);
804static inline int wm_select_txqueue(struct ifnet *, struct mbuf *); 804static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
805static void wm_start(struct ifnet *); 805static void wm_start(struct ifnet *);
806static void wm_start_locked(struct ifnet *); 806static void wm_start_locked(struct ifnet *);
807static int wm_transmit(struct ifnet *, struct mbuf *); 807static int wm_transmit(struct ifnet *, struct mbuf *);
808static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); 808static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
809static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, 809static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
810 bool); 810 bool);
811static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, 811static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
812 struct wm_txsoft *, uint32_t *, uint32_t *, bool *); 812 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
813static void wm_nq_start(struct ifnet *); 813static void wm_nq_start(struct ifnet *);
814static void wm_nq_start_locked(struct ifnet *); 814static void wm_nq_start_locked(struct ifnet *);
815static int wm_nq_transmit(struct ifnet *, struct mbuf *); 815static int wm_nq_transmit(struct ifnet *, struct mbuf *);
816static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); 816static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
817static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, 817static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
818 bool); 818 bool);
819static void wm_deferred_start_locked(struct wm_txqueue *); 819static void wm_deferred_start_locked(struct wm_txqueue *);
820static void wm_handle_queue(void *); 820static void wm_handle_queue(void *);
821static void wm_handle_queue_work(struct work *, void *); 821static void wm_handle_queue_work(struct work *, void *);
822/* Interrupt */ 822/* Interrupt */
823static bool wm_txeof(struct wm_txqueue *, u_int); 823static bool wm_txeof(struct wm_txqueue *, u_int);
824static bool wm_rxeof(struct wm_rxqueue *, u_int); 824static bool wm_rxeof(struct wm_rxqueue *, u_int);
825static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 825static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
826static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 826static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
827static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 827static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
828static void wm_linkintr(struct wm_softc *, uint32_t); 828static void wm_linkintr(struct wm_softc *, uint32_t);
829static int wm_intr_legacy(void *); 829static int wm_intr_legacy(void *);
830static inline void wm_txrxintr_disable(struct wm_queue *); 830static inline void wm_txrxintr_disable(struct wm_queue *);
831static inline void wm_txrxintr_enable(struct wm_queue *); 831static inline void wm_txrxintr_enable(struct wm_queue *);
832static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *); 832static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
833static int wm_txrxintr_msix(void *); 833static int wm_txrxintr_msix(void *);
834static int wm_linkintr_msix(void *); 834static int wm_linkintr_msix(void *);
835 835
836/* 836/*
837 * Media related. 837 * Media related.
838 * GMII, SGMII, TBI, SERDES and SFP. 838 * GMII, SGMII, TBI, SERDES and SFP.
839 */ 839 */
840/* Common */ 840/* Common */
841static void wm_tbi_serdes_set_linkled(struct wm_softc *); 841static void wm_tbi_serdes_set_linkled(struct wm_softc *);
842/* GMII related */ 842/* GMII related */
843static void wm_gmii_reset(struct wm_softc *); 843static void wm_gmii_reset(struct wm_softc *);
844static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t); 844static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
845static int wm_get_phy_id_82575(struct wm_softc *); 845static int wm_get_phy_id_82575(struct wm_softc *);
846static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 846static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
847static int wm_gmii_mediachange(struct ifnet *); 847static int wm_gmii_mediachange(struct ifnet *);
848static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 848static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
849static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); 849static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
850static uint16_t wm_i82543_mii_recvbits(struct wm_softc *); 850static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
851static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *); 851static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
852static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t); 852static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
853static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *); 853static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
854static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t); 854static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
855static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *); 855static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
856static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *); 856static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
857static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t); 857static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
858static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t); 858static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
859static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *); 859static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
860static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t); 860static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
861static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *); 861static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
862static int wm_gmii_bm_writereg(device_t, int, int, uint16_t); 862static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
863static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 863static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
864static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 864static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
865static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int, 865static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
866 bool); 866 bool);
867static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *); 867static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
868static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *); 868static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
869static int wm_gmii_hv_writereg(device_t, int, int, uint16_t); 869static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
870static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t); 870static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
871static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *); 871static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
872static int wm_gmii_82580_writereg(device_t, int, int, uint16_t); 872static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
873static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *); 873static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
874static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t); 874static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
875static void wm_gmii_statchg(struct ifnet *); 875static void wm_gmii_statchg(struct ifnet *);
876/* 876/*
877 * kumeran related (80003, ICH* and PCH*). 877 * kumeran related (80003, ICH* and PCH*).
878 * These functions are not for accessing MII registers but for accessing 878 * These functions are not for accessing MII registers but for accessing
879 * kumeran specific registers. 879 * kumeran specific registers.
880 */ 880 */
881static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *); 881static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
882static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *); 882static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
883static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t); 883static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
884static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t); 884static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
885/* EMI register related */ 885/* EMI register related */
886static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool); 886static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
887static int wm_read_emi_reg_locked(device_t, int, uint16_t *); 887static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
888static int wm_write_emi_reg_locked(device_t, int, uint16_t); 888static int wm_write_emi_reg_locked(device_t, int, uint16_t);
889/* SGMII */ 889/* SGMII */
890static bool wm_sgmii_uses_mdio(struct wm_softc *); 890static bool wm_sgmii_uses_mdio(struct wm_softc *);
891static void wm_sgmii_sfp_preconfig(struct wm_softc *); 891static void wm_sgmii_sfp_preconfig(struct wm_softc *);
892static int wm_sgmii_readreg(device_t, int, int, uint16_t *); 892static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
893static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *); 893static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
894static int wm_sgmii_writereg(device_t, int, int, uint16_t); 894static int wm_sgmii_writereg(device_t, int, int, uint16_t);
895static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t); 895static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
896/* TBI related */ 896/* TBI related */
897static bool wm_tbi_havesignal(struct wm_softc *, uint32_t); 897static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
898static void wm_tbi_mediainit(struct wm_softc *); 898static void wm_tbi_mediainit(struct wm_softc *);
899static int wm_tbi_mediachange(struct ifnet *); 899static int wm_tbi_mediachange(struct ifnet *);
900static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 900static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
901static int wm_check_for_link(struct wm_softc *); 901static int wm_check_for_link(struct wm_softc *);
902static void wm_tbi_tick(struct wm_softc *); 902static void wm_tbi_tick(struct wm_softc *);
903/* SERDES related */ 903/* SERDES related */
904static void wm_serdes_power_up_link_82575(struct wm_softc *); 904static void wm_serdes_power_up_link_82575(struct wm_softc *);
905static int wm_serdes_mediachange(struct ifnet *); 905static int wm_serdes_mediachange(struct ifnet *);
906static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); 906static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
907static void wm_serdes_tick(struct wm_softc *); 907static void wm_serdes_tick(struct wm_softc *);
908/* SFP related */ 908/* SFP related */
909static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); 909static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
910static uint32_t wm_sfp_get_media_type(struct wm_softc *); 910static uint32_t wm_sfp_get_media_type(struct wm_softc *);
911 911
912/* 912/*
913 * NVM related. 913 * NVM related.
914 * Microwire, SPI (w/wo EERD) and Flash. 914 * Microwire, SPI (w/wo EERD) and Flash.
915 */ 915 */
916/* Misc functions */ 916/* Misc functions */
917static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); 917static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
918static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); 918static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
919static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); 919static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
920/* Microwire */ 920/* Microwire */
921static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); 921static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
922/* SPI */ 922/* SPI */
923static int wm_nvm_ready_spi(struct wm_softc *); 923static int wm_nvm_ready_spi(struct wm_softc *);
924static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); 924static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
925/* Using with EERD */ 925/* Using with EERD */
926static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 926static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
927static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); 927static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
928/* Flash */ 928/* Flash */
929static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, 929static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
930 unsigned int *); 930 unsigned int *);
931static int32_t wm_ich8_cycle_init(struct wm_softc *); 931static int32_t wm_ich8_cycle_init(struct wm_softc *);
932static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 932static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
933static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, 933static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
934 uint32_t *); 934 uint32_t *);
935static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 935static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
936static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 936static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
937static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); 937static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
938static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); 938static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
939static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); 939static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
940/* iNVM */ 940/* iNVM */
941static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); 941static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
942static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); 942static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
943/* Lock, detecting NVM type, validate checksum and read */ 943/* Lock, detecting NVM type, validate checksum and read */
944static int wm_nvm_is_onboard_eeprom(struct wm_softc *); 944static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
945static int wm_nvm_flash_presence_i210(struct wm_softc *); 945static int wm_nvm_flash_presence_i210(struct wm_softc *);
946static int wm_nvm_validate_checksum(struct wm_softc *); 946static int wm_nvm_validate_checksum(struct wm_softc *);
947static void wm_nvm_version_invm(struct wm_softc *); 947static void wm_nvm_version_invm(struct wm_softc *);
948static void wm_nvm_version(struct wm_softc *); 948static void wm_nvm_version(struct wm_softc *);
949static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); 949static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
950 950
951/* 951/*
952 * Hardware semaphores. 952 * Hardware semaphores.
953 * Very complexed... 953 * Very complexed...
954 */ 954 */
955static int wm_get_null(struct wm_softc *); 955static int wm_get_null(struct wm_softc *);
956static void wm_put_null(struct wm_softc *); 956static void wm_put_null(struct wm_softc *);
957static int wm_get_eecd(struct wm_softc *); 957static int wm_get_eecd(struct wm_softc *);
958static void wm_put_eecd(struct wm_softc *); 958static void wm_put_eecd(struct wm_softc *);
959static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ 959static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
960static void wm_put_swsm_semaphore(struct wm_softc *); 960static void wm_put_swsm_semaphore(struct wm_softc *);
961static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 961static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
962static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 962static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
963static int wm_get_nvm_80003(struct wm_softc *); 963static int wm_get_nvm_80003(struct wm_softc *);
964static void wm_put_nvm_80003(struct wm_softc *); 964static void wm_put_nvm_80003(struct wm_softc *);
965static int wm_get_nvm_82571(struct wm_softc *); 965static int wm_get_nvm_82571(struct wm_softc *);
966static void wm_put_nvm_82571(struct wm_softc *); 966static void wm_put_nvm_82571(struct wm_softc *);
967static int wm_get_phy_82575(struct wm_softc *); 967static int wm_get_phy_82575(struct wm_softc *);
968static void wm_put_phy_82575(struct wm_softc *); 968static void wm_put_phy_82575(struct wm_softc *);
969static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ 969static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
970static void wm_put_swfwhw_semaphore(struct wm_softc *); 970static void wm_put_swfwhw_semaphore(struct wm_softc *);
971static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ 971static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
972static void wm_put_swflag_ich8lan(struct wm_softc *); 972static void wm_put_swflag_ich8lan(struct wm_softc *);
973static int wm_get_nvm_ich8lan(struct wm_softc *); 973static int wm_get_nvm_ich8lan(struct wm_softc *);
974static void wm_put_nvm_ich8lan(struct wm_softc *); 974static void wm_put_nvm_ich8lan(struct wm_softc *);
975static int wm_get_hw_semaphore_82573(struct wm_softc *); 975static int wm_get_hw_semaphore_82573(struct wm_softc *);
976static void wm_put_hw_semaphore_82573(struct wm_softc *); 976static void wm_put_hw_semaphore_82573(struct wm_softc *);
977 977
978/* 978/*
979 * Management mode and power management related subroutines. 979 * Management mode and power management related subroutines.
980 * BMC, AMT, suspend/resume and EEE. 980 * BMC, AMT, suspend/resume and EEE.
981 */ 981 */
982#if 0 982#if 0
983static int wm_check_mng_mode(struct wm_softc *); 983static int wm_check_mng_mode(struct wm_softc *);
984static int wm_check_mng_mode_ich8lan(struct wm_softc *); 984static int wm_check_mng_mode_ich8lan(struct wm_softc *);
985static int wm_check_mng_mode_82574(struct wm_softc *); 985static int wm_check_mng_mode_82574(struct wm_softc *);
986static int wm_check_mng_mode_generic(struct wm_softc *); 986static int wm_check_mng_mode_generic(struct wm_softc *);
987#endif 987#endif
988static int wm_enable_mng_pass_thru(struct wm_softc *); 988static int wm_enable_mng_pass_thru(struct wm_softc *);
989static bool wm_phy_resetisblocked(struct wm_softc *); 989static bool wm_phy_resetisblocked(struct wm_softc *);
990static void wm_get_hw_control(struct wm_softc *); 990static void wm_get_hw_control(struct wm_softc *);
991static void wm_release_hw_control(struct wm_softc *); 991static void wm_release_hw_control(struct wm_softc *);
992static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); 992static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
993static int wm_init_phy_workarounds_pchlan(struct wm_softc *); 993static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
994static void wm_init_manageability(struct wm_softc *); 994static void wm_init_manageability(struct wm_softc *);
995static void wm_release_manageability(struct wm_softc *); 995static void wm_release_manageability(struct wm_softc *);
996static void wm_get_wakeup(struct wm_softc *); 996static void wm_get_wakeup(struct wm_softc *);
997static int wm_ulp_disable(struct wm_softc *); 997static int wm_ulp_disable(struct wm_softc *);
998static int wm_enable_phy_wakeup(struct wm_softc *); 998static int wm_enable_phy_wakeup(struct wm_softc *);
999static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 999static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1000static void wm_suspend_workarounds_ich8lan(struct wm_softc *); 1000static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1001static int wm_resume_workarounds_pchlan(struct wm_softc *); 1001static int wm_resume_workarounds_pchlan(struct wm_softc *);
1002static void wm_enable_wakeup(struct wm_softc *); 1002static void wm_enable_wakeup(struct wm_softc *);
1003static void wm_disable_aspm(struct wm_softc *); 1003static void wm_disable_aspm(struct wm_softc *);
1004/* LPLU (Low Power Link Up) */ 1004/* LPLU (Low Power Link Up) */
1005static void wm_lplu_d0_disable(struct wm_softc *); 1005static void wm_lplu_d0_disable(struct wm_softc *);
1006/* EEE */ 1006/* EEE */
1007static int wm_set_eee_i350(struct wm_softc *); 1007static int wm_set_eee_i350(struct wm_softc *);
1008static int wm_set_eee_pchlan(struct wm_softc *); 1008static int wm_set_eee_pchlan(struct wm_softc *);
1009static int wm_set_eee(struct wm_softc *); 1009static int wm_set_eee(struct wm_softc *);
1010 1010
1011/* 1011/*
1012 * Workarounds (mainly PHY related). 1012 * Workarounds (mainly PHY related).
1013 * Basically, PHY's workarounds are in the PHY drivers. 1013 * Basically, PHY's workarounds are in the PHY drivers.
1014 */ 1014 */
1015static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 1015static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1016static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 1016static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1017static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *); 1017static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1018static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *); 1018static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1019static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *); 1019static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1020static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool); 1020static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1021static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *); 1021static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1022static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool); 1022static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1023static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 1023static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1024static int wm_k1_workaround_lv(struct wm_softc *); 1024static int wm_k1_workaround_lv(struct wm_softc *);
1025static int wm_link_stall_workaround_hv(struct wm_softc *); 1025static int wm_link_stall_workaround_hv(struct wm_softc *);
1026static int wm_set_mdio_slow_mode_hv(struct wm_softc *); 1026static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1027static void wm_configure_k1_ich8lan(struct wm_softc *, int); 1027static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1028static void wm_reset_init_script_82575(struct wm_softc *); 1028static void wm_reset_init_script_82575(struct wm_softc *);
1029static void wm_reset_mdicnfg_82580(struct wm_softc *); 1029static void wm_reset_mdicnfg_82580(struct wm_softc *);
1030static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 1030static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1031static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 1031static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1032static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 1032static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1033static int wm_pll_workaround_i210(struct wm_softc *); 1033static int wm_pll_workaround_i210(struct wm_softc *);
1034static void wm_legacy_irq_quirk_spt(struct wm_softc *); 1034static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1035 1035
1036CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 1036CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1037 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 1037 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1038 1038
1039/* 1039/*
1040 * Devices supported by this driver. 1040 * Devices supported by this driver.
1041 */ 1041 */
1042static const struct wm_product { 1042static const struct wm_product {
1043 pci_vendor_id_t wmp_vendor; 1043 pci_vendor_id_t wmp_vendor;
1044 pci_product_id_t wmp_product; 1044 pci_product_id_t wmp_product;
1045 const char *wmp_name; 1045 const char *wmp_name;
1046 wm_chip_type wmp_type; 1046 wm_chip_type wmp_type;
1047 uint32_t wmp_flags; 1047 uint32_t wmp_flags;
1048#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN 1048#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1049#define WMP_F_FIBER WM_MEDIATYPE_FIBER 1049#define WMP_F_FIBER WM_MEDIATYPE_FIBER
1050#define WMP_F_COPPER WM_MEDIATYPE_COPPER 1050#define WMP_F_COPPER WM_MEDIATYPE_COPPER
1051#define WMP_F_SERDES WM_MEDIATYPE_SERDES 1051#define WMP_F_SERDES WM_MEDIATYPE_SERDES
1052#define WMP_MEDIATYPE(x) ((x) & 0x03) 1052#define WMP_MEDIATYPE(x) ((x) & 0x03)
1053} wm_products[] = { 1053} wm_products[] = {
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1055 "Intel i82542 1000BASE-X Ethernet", 1055 "Intel i82542 1000BASE-X Ethernet",
1056 WM_T_82542_2_1, WMP_F_FIBER }, 1056 WM_T_82542_2_1, WMP_F_FIBER },
1057 1057
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1059 "Intel i82543GC 1000BASE-X Ethernet", 1059 "Intel i82543GC 1000BASE-X Ethernet",
1060 WM_T_82543, WMP_F_FIBER }, 1060 WM_T_82543, WMP_F_FIBER },
1061 1061
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1063 "Intel i82543GC 1000BASE-T Ethernet", 1063 "Intel i82543GC 1000BASE-T Ethernet",
1064 WM_T_82543, WMP_F_COPPER }, 1064 WM_T_82543, WMP_F_COPPER },
1065 1065
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1067 "Intel i82544EI 1000BASE-T Ethernet", 1067 "Intel i82544EI 1000BASE-T Ethernet",
1068 WM_T_82544, WMP_F_COPPER }, 1068 WM_T_82544, WMP_F_COPPER },
1069 1069
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1071 "Intel i82544EI 1000BASE-X Ethernet", 1071 "Intel i82544EI 1000BASE-X Ethernet",
1072 WM_T_82544, WMP_F_FIBER }, 1072 WM_T_82544, WMP_F_FIBER },
1073 1073
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1075 "Intel i82544GC 1000BASE-T Ethernet", 1075 "Intel i82544GC 1000BASE-T Ethernet",
1076 WM_T_82544, WMP_F_COPPER }, 1076 WM_T_82544, WMP_F_COPPER },
1077 1077
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1079 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 1079 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1080 WM_T_82544, WMP_F_COPPER }, 1080 WM_T_82544, WMP_F_COPPER },
1081 1081
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1083 "Intel i82540EM 1000BASE-T Ethernet", 1083 "Intel i82540EM 1000BASE-T Ethernet",
1084 WM_T_82540, WMP_F_COPPER }, 1084 WM_T_82540, WMP_F_COPPER },
@@ -5527,2008 +5527,2013 @@ wm_setup_legacy(struct wm_softc *sc) @@ -5527,2008 +5527,2013 @@ wm_setup_legacy(struct wm_softc *sc)
5527 sc->sc_nintrs = 1; 5527 sc->sc_nintrs = 1;
5528 5528
5529 return wm_softint_establish_queue(sc, 0, 0); 5529 return wm_softint_establish_queue(sc, 0, 0);
5530} 5530}
5531 5531
5532static int 5532static int
5533wm_setup_msix(struct wm_softc *sc) 5533wm_setup_msix(struct wm_softc *sc)
5534{ 5534{
5535 void *vih; 5535 void *vih;
5536 kcpuset_t *affinity; 5536 kcpuset_t *affinity;
5537 int qidx, error, intr_idx, txrx_established; 5537 int qidx, error, intr_idx, txrx_established;
5538 pci_chipset_tag_t pc = sc->sc_pc; 5538 pci_chipset_tag_t pc = sc->sc_pc;
5539 const char *intrstr = NULL; 5539 const char *intrstr = NULL;
5540 char intrbuf[PCI_INTRSTR_LEN]; 5540 char intrbuf[PCI_INTRSTR_LEN];
5541 char intr_xname[INTRDEVNAMEBUF]; 5541 char intr_xname[INTRDEVNAMEBUF];
5542 5542
5543 if (sc->sc_nqueues < ncpu) { 5543 if (sc->sc_nqueues < ncpu) {
5544 /* 5544 /*
5545 * To avoid other devices' interrupts, the affinity of Tx/Rx 5545 * To avoid other devices' interrupts, the affinity of Tx/Rx
5546 * interrupts start from CPU#1. 5546 * interrupts start from CPU#1.
5547 */ 5547 */
5548 sc->sc_affinity_offset = 1; 5548 sc->sc_affinity_offset = 1;
5549 } else { 5549 } else {
5550 /* 5550 /*
5551 * In this case, this device use all CPUs. So, we unify 5551 * In this case, this device use all CPUs. So, we unify
5552 * affinitied cpu_index to msix vector number for readability. 5552 * affinitied cpu_index to msix vector number for readability.
5553 */ 5553 */
5554 sc->sc_affinity_offset = 0; 5554 sc->sc_affinity_offset = 0;
5555 } 5555 }
5556 5556
5557 error = wm_alloc_txrx_queues(sc); 5557 error = wm_alloc_txrx_queues(sc);
5558 if (error) { 5558 if (error) {
5559 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n", 5559 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5560 error); 5560 error);
5561 return ENOMEM; 5561 return ENOMEM;
5562 } 5562 }
5563 5563
5564 kcpuset_create(&affinity, false); 5564 kcpuset_create(&affinity, false);
5565 intr_idx = 0; 5565 intr_idx = 0;
5566 5566
5567 /* 5567 /*
5568 * TX and RX 5568 * TX and RX
5569 */ 5569 */
5570 txrx_established = 0; 5570 txrx_established = 0;
5571 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { 5571 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5572 struct wm_queue *wmq = &sc->sc_queue[qidx]; 5572 struct wm_queue *wmq = &sc->sc_queue[qidx];
5573 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu; 5573 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5574 5574
5575 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, 5575 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5576 sizeof(intrbuf)); 5576 sizeof(intrbuf));
5577#ifdef WM_MPSAFE 5577#ifdef WM_MPSAFE
5578 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], 5578 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5579 PCI_INTR_MPSAFE, true); 5579 PCI_INTR_MPSAFE, true);
5580#endif 5580#endif
5581 memset(intr_xname, 0, sizeof(intr_xname)); 5581 memset(intr_xname, 0, sizeof(intr_xname));
5582 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d", 5582 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5583 device_xname(sc->sc_dev), qidx); 5583 device_xname(sc->sc_dev), qidx);
5584 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], 5584 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5585 IPL_NET, wm_txrxintr_msix, wmq, intr_xname); 5585 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5586 if (vih == NULL) { 5586 if (vih == NULL) {
5587 aprint_error_dev(sc->sc_dev, 5587 aprint_error_dev(sc->sc_dev,
5588 "unable to establish MSI-X(for TX and RX)%s%s\n", 5588 "unable to establish MSI-X(for TX and RX)%s%s\n",
5589 intrstr ? " at " : "", 5589 intrstr ? " at " : "",
5590 intrstr ? intrstr : ""); 5590 intrstr ? intrstr : "");
5591 5591
5592 goto fail; 5592 goto fail;
5593 } 5593 }
5594 kcpuset_zero(affinity); 5594 kcpuset_zero(affinity);
5595 /* Round-robin affinity */ 5595 /* Round-robin affinity */
5596 kcpuset_set(affinity, affinity_to); 5596 kcpuset_set(affinity, affinity_to);
5597 error = interrupt_distribute(vih, affinity, NULL); 5597 error = interrupt_distribute(vih, affinity, NULL);
5598 if (error == 0) { 5598 if (error == 0) {
5599 aprint_normal_dev(sc->sc_dev, 5599 aprint_normal_dev(sc->sc_dev,
5600 "for TX and RX interrupting at %s affinity to %u\n", 5600 "for TX and RX interrupting at %s affinity to %u\n",
5601 intrstr, affinity_to); 5601 intrstr, affinity_to);
5602 } else { 5602 } else {
5603 aprint_normal_dev(sc->sc_dev, 5603 aprint_normal_dev(sc->sc_dev,
5604 "for TX and RX interrupting at %s\n", intrstr); 5604 "for TX and RX interrupting at %s\n", intrstr);
5605 } 5605 }
5606 sc->sc_ihs[intr_idx] = vih; 5606 sc->sc_ihs[intr_idx] = vih;
5607 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0) 5607 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
5608 goto fail; 5608 goto fail;
5609 txrx_established++; 5609 txrx_established++;
5610 intr_idx++; 5610 intr_idx++;
5611 } 5611 }
5612 5612
5613 /* LINK */ 5613 /* LINK */
5614 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, 5614 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5615 sizeof(intrbuf)); 5615 sizeof(intrbuf));
5616#ifdef WM_MPSAFE 5616#ifdef WM_MPSAFE
5617 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true); 5617 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5618#endif 5618#endif
5619 memset(intr_xname, 0, sizeof(intr_xname)); 5619 memset(intr_xname, 0, sizeof(intr_xname));
5620 snprintf(intr_xname, sizeof(intr_xname), "%sLINK", 5620 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5621 device_xname(sc->sc_dev)); 5621 device_xname(sc->sc_dev));
5622 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], 5622 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5623 IPL_NET, wm_linkintr_msix, sc, intr_xname); 5623 IPL_NET, wm_linkintr_msix, sc, intr_xname);
5624 if (vih == NULL) { 5624 if (vih == NULL) {
5625 aprint_error_dev(sc->sc_dev, 5625 aprint_error_dev(sc->sc_dev,
5626 "unable to establish MSI-X(for LINK)%s%s\n", 5626 "unable to establish MSI-X(for LINK)%s%s\n",
5627 intrstr ? " at " : "", 5627 intrstr ? " at " : "",
5628 intrstr ? intrstr : ""); 5628 intrstr ? intrstr : "");
5629 5629
5630 goto fail; 5630 goto fail;
5631 } 5631 }
5632 /* Keep default affinity to LINK interrupt */ 5632 /* Keep default affinity to LINK interrupt */
5633 aprint_normal_dev(sc->sc_dev, 5633 aprint_normal_dev(sc->sc_dev,
5634 "for LINK interrupting at %s\n", intrstr); 5634 "for LINK interrupting at %s\n", intrstr);
5635 sc->sc_ihs[intr_idx] = vih; 5635 sc->sc_ihs[intr_idx] = vih;
5636 sc->sc_link_intr_idx = intr_idx; 5636 sc->sc_link_intr_idx = intr_idx;
5637 5637
5638 sc->sc_nintrs = sc->sc_nqueues + 1; 5638 sc->sc_nintrs = sc->sc_nqueues + 1;
5639 kcpuset_destroy(affinity); 5639 kcpuset_destroy(affinity);
5640 return 0; 5640 return 0;
5641 5641
5642 fail: 5642 fail:
5643 for (qidx = 0; qidx < txrx_established; qidx++) { 5643 for (qidx = 0; qidx < txrx_established; qidx++) {
5644 struct wm_queue *wmq = &sc->sc_queue[qidx]; 5644 struct wm_queue *wmq = &sc->sc_queue[qidx];
5645 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]); 5645 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5646 sc->sc_ihs[wmq->wmq_intr_idx] = NULL; 5646 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5647 } 5647 }
5648 5648
5649 kcpuset_destroy(affinity); 5649 kcpuset_destroy(affinity);
5650 return ENOMEM; 5650 return ENOMEM;
5651} 5651}
5652 5652
5653static void 5653static void
5654wm_unset_stopping_flags(struct wm_softc *sc) 5654wm_unset_stopping_flags(struct wm_softc *sc)
5655{ 5655{
5656 int i; 5656 int i;
5657 5657
5658 KASSERT(WM_CORE_LOCKED(sc)); 5658 KASSERT(WM_CORE_LOCKED(sc));
5659 5659
5660 /* Must unset stopping flags in ascending order. */ 5660 /* Must unset stopping flags in ascending order. */
5661 for (i = 0; i < sc->sc_nqueues; i++) { 5661 for (i = 0; i < sc->sc_nqueues; i++) {
5662 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 5662 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5663 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 5663 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5664 5664
5665 mutex_enter(txq->txq_lock); 5665 mutex_enter(txq->txq_lock);
5666 txq->txq_stopping = false; 5666 txq->txq_stopping = false;
5667 mutex_exit(txq->txq_lock); 5667 mutex_exit(txq->txq_lock);
5668 5668
5669 mutex_enter(rxq->rxq_lock); 5669 mutex_enter(rxq->rxq_lock);
5670 rxq->rxq_stopping = false; 5670 rxq->rxq_stopping = false;
5671 mutex_exit(rxq->rxq_lock); 5671 mutex_exit(rxq->rxq_lock);
5672 } 5672 }
5673 5673
5674 sc->sc_core_stopping = false; 5674 sc->sc_core_stopping = false;
5675} 5675}
5676 5676
5677static void 5677static void
5678wm_set_stopping_flags(struct wm_softc *sc) 5678wm_set_stopping_flags(struct wm_softc *sc)
5679{ 5679{
5680 int i; 5680 int i;
5681 5681
5682 KASSERT(WM_CORE_LOCKED(sc)); 5682 KASSERT(WM_CORE_LOCKED(sc));
5683 5683
5684 sc->sc_core_stopping = true; 5684 sc->sc_core_stopping = true;
5685 5685
5686 /* Must set stopping flags in ascending order. */ 5686 /* Must set stopping flags in ascending order. */
5687 for (i = 0; i < sc->sc_nqueues; i++) { 5687 for (i = 0; i < sc->sc_nqueues; i++) {
5688 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 5688 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5689 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 5689 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5690 5690
5691 mutex_enter(rxq->rxq_lock); 5691 mutex_enter(rxq->rxq_lock);
5692 rxq->rxq_stopping = true; 5692 rxq->rxq_stopping = true;
5693 mutex_exit(rxq->rxq_lock); 5693 mutex_exit(rxq->rxq_lock);
5694 5694
5695 mutex_enter(txq->txq_lock); 5695 mutex_enter(txq->txq_lock);
5696 txq->txq_stopping = true; 5696 txq->txq_stopping = true;
5697 mutex_exit(txq->txq_lock); 5697 mutex_exit(txq->txq_lock);
5698 } 5698 }
5699} 5699}
5700 5700
5701/* 5701/*
5702 * Write interrupt interval value to ITR or EITR 5702 * Write interrupt interval value to ITR or EITR
5703 */ 5703 */
5704static void 5704static void
5705wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq) 5705wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5706{ 5706{
5707 5707
5708 if (!wmq->wmq_set_itr) 5708 if (!wmq->wmq_set_itr)
5709 return; 5709 return;
5710 5710
5711 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 5711 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5712 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK); 5712 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5713 5713
5714 /* 5714 /*
5715 * 82575 doesn't have CNT_INGR field. 5715 * 82575 doesn't have CNT_INGR field.
5716 * So, overwrite counter field by software. 5716 * So, overwrite counter field by software.
5717 */ 5717 */
5718 if (sc->sc_type == WM_T_82575) 5718 if (sc->sc_type == WM_T_82575)
5719 eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575); 5719 eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5720 else 5720 else
5721 eitr |= EITR_CNT_INGR; 5721 eitr |= EITR_CNT_INGR;
5722 5722
5723 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr); 5723 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5724 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) { 5724 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5725 /* 5725 /*
5726 * 82574 has both ITR and EITR. SET EITR when we use 5726 * 82574 has both ITR and EITR. SET EITR when we use
5727 * the multi queue function with MSI-X. 5727 * the multi queue function with MSI-X.
5728 */ 5728 */
5729 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx), 5729 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5730 wmq->wmq_itr & EITR_ITR_INT_MASK_82574); 5730 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5731 } else { 5731 } else {
5732 KASSERT(wmq->wmq_id == 0); 5732 KASSERT(wmq->wmq_id == 0);
5733 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr); 5733 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5734 } 5734 }
5735 5735
5736 wmq->wmq_set_itr = false; 5736 wmq->wmq_set_itr = false;
5737} 5737}
5738 5738
5739/* 5739/*
5740 * TODO 5740 * TODO
5741 * Below dynamic calculation of itr is almost the same as linux igb, 5741 * Below dynamic calculation of itr is almost the same as linux igb,
5742 * however it does not fit to wm(4). So, we will have been disable AIM 5742 * however it does not fit to wm(4). So, we will have been disable AIM
5743 * until we will find appropriate calculation of itr. 5743 * until we will find appropriate calculation of itr.
5744 */ 5744 */
5745/* 5745/*
5746 * calculate interrupt interval value to be going to write register in 5746 * calculate interrupt interval value to be going to write register in
5747 * wm_itrs_writereg(). This function does not write ITR/EITR register. 5747 * wm_itrs_writereg(). This function does not write ITR/EITR register.
5748 */ 5748 */
5749static void 5749static void
5750wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq) 5750wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5751{ 5751{
5752#ifdef NOTYET 5752#ifdef NOTYET
5753 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 5753 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5754 struct wm_txqueue *txq = &wmq->wmq_txq; 5754 struct wm_txqueue *txq = &wmq->wmq_txq;
5755 uint32_t avg_size = 0; 5755 uint32_t avg_size = 0;
5756 uint32_t new_itr; 5756 uint32_t new_itr;
5757 5757
5758 if (rxq->rxq_packets) 5758 if (rxq->rxq_packets)
5759 avg_size = rxq->rxq_bytes / rxq->rxq_packets; 5759 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
5760 if (txq->txq_packets) 5760 if (txq->txq_packets)
5761 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets); 5761 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
5762 5762
5763 if (avg_size == 0) { 5763 if (avg_size == 0) {
5764 new_itr = 450; /* restore default value */ 5764 new_itr = 450; /* restore default value */
5765 goto out; 5765 goto out;
5766 } 5766 }
5767 5767
5768 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 5768 /* Add 24 bytes to size to account for CRC, preamble, and gap */
5769 avg_size += 24; 5769 avg_size += 24;
5770 5770
5771 /* Don't starve jumbo frames */ 5771 /* Don't starve jumbo frames */
5772 avg_size = uimin(avg_size, 3000); 5772 avg_size = uimin(avg_size, 3000);
5773 5773
5774 /* Give a little boost to mid-size frames */ 5774 /* Give a little boost to mid-size frames */
5775 if ((avg_size > 300) && (avg_size < 1200)) 5775 if ((avg_size > 300) && (avg_size < 1200))
5776 new_itr = avg_size / 3; 5776 new_itr = avg_size / 3;
5777 else 5777 else
5778 new_itr = avg_size / 2; 5778 new_itr = avg_size / 2;
5779 5779
5780out: 5780out:
5781 /* 5781 /*
5782 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE 5782 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5783 * controllers. See sc->sc_itr_init setting in wm_init_locked(). 5783 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5784 */ 5784 */
5785 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575) 5785 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5786 new_itr *= 4; 5786 new_itr *= 4;
5787 5787
5788 if (new_itr != wmq->wmq_itr) { 5788 if (new_itr != wmq->wmq_itr) {
5789 wmq->wmq_itr = new_itr; 5789 wmq->wmq_itr = new_itr;
5790 wmq->wmq_set_itr = true; 5790 wmq->wmq_set_itr = true;
5791 } else 5791 } else
5792 wmq->wmq_set_itr = false; 5792 wmq->wmq_set_itr = false;
5793 5793
5794 rxq->rxq_packets = 0; 5794 rxq->rxq_packets = 0;
5795 rxq->rxq_bytes = 0; 5795 rxq->rxq_bytes = 0;
5796 txq->txq_packets = 0; 5796 txq->txq_packets = 0;
5797 txq->txq_bytes = 0; 5797 txq->txq_bytes = 0;
5798#endif 5798#endif
5799} 5799}
5800 5800
5801static void 5801static void
5802wm_init_sysctls(struct wm_softc *sc) 5802wm_init_sysctls(struct wm_softc *sc)
5803{ 5803{
5804 struct sysctllog **log; 5804 struct sysctllog **log;
5805 const struct sysctlnode *rnode, *cnode; 5805 const struct sysctlnode *rnode, *cnode;
5806 int rv; 5806 int rv;
5807 const char *dvname; 5807 const char *dvname;
5808 5808
5809 log = &sc->sc_sysctllog; 5809 log = &sc->sc_sysctllog;
5810 dvname = device_xname(sc->sc_dev); 5810 dvname = device_xname(sc->sc_dev);
5811 5811
5812 rv = sysctl_createv(log, 0, NULL, &rnode, 5812 rv = sysctl_createv(log, 0, NULL, &rnode,
5813 0, CTLTYPE_NODE, dvname, 5813 0, CTLTYPE_NODE, dvname,
5814 SYSCTL_DESCR("wm information and settings"), 5814 SYSCTL_DESCR("wm information and settings"),
5815 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 5815 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5816 if (rv != 0) 5816 if (rv != 0)
5817 goto err; 5817 goto err;
5818 5818
5819 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 5819 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5820 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), 5820 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
5821 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL); 5821 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
5822 if (rv != 0) 5822 if (rv != 0)
5823 goto teardown; 5823 goto teardown;
5824 5824
5825 return; 5825 return;
5826 5826
5827teardown: 5827teardown:
5828 sysctl_teardown(log); 5828 sysctl_teardown(log);
5829err: 5829err:
5830 sc->sc_sysctllog = NULL; 5830 sc->sc_sysctllog = NULL;
5831 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n", 5831 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
5832 __func__, rv); 5832 __func__, rv);
5833} 5833}
5834 5834
5835/* 5835/*
5836 * wm_init: [ifnet interface function] 5836 * wm_init: [ifnet interface function]
5837 * 5837 *
5838 * Initialize the interface. 5838 * Initialize the interface.
5839 */ 5839 */
5840static int 5840static int
5841wm_init(struct ifnet *ifp) 5841wm_init(struct ifnet *ifp)
5842{ 5842{
5843 struct wm_softc *sc = ifp->if_softc; 5843 struct wm_softc *sc = ifp->if_softc;
5844 int ret; 5844 int ret;
5845 5845
5846 WM_CORE_LOCK(sc); 5846 WM_CORE_LOCK(sc);
5847 ret = wm_init_locked(ifp); 5847 ret = wm_init_locked(ifp);
5848 WM_CORE_UNLOCK(sc); 5848 WM_CORE_UNLOCK(sc);
5849 5849
5850 return ret; 5850 return ret;
5851} 5851}
5852 5852
5853static int 5853static int
5854wm_init_locked(struct ifnet *ifp) 5854wm_init_locked(struct ifnet *ifp)
5855{ 5855{
5856 struct wm_softc *sc = ifp->if_softc; 5856 struct wm_softc *sc = ifp->if_softc;
5857 struct ethercom *ec = &sc->sc_ethercom; 5857 struct ethercom *ec = &sc->sc_ethercom;
5858 int i, j, trynum, error = 0; 5858 int i, j, trynum, error = 0;
5859 uint32_t reg, sfp_mask = 0; 5859 uint32_t reg, sfp_mask = 0;
5860 5860
5861 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 5861 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5862 device_xname(sc->sc_dev), __func__)); 5862 device_xname(sc->sc_dev), __func__));
5863 KASSERT(WM_CORE_LOCKED(sc)); 5863 KASSERT(WM_CORE_LOCKED(sc));
5864 5864
5865 /* 5865 /*
5866 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 5866 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
5867 * There is a small but measurable benefit to avoiding the adjusment 5867 * There is a small but measurable benefit to avoiding the adjusment
5868 * of the descriptor so that the headers are aligned, for normal mtu, 5868 * of the descriptor so that the headers are aligned, for normal mtu,
5869 * on such platforms. One possibility is that the DMA itself is 5869 * on such platforms. One possibility is that the DMA itself is
5870 * slightly more efficient if the front of the entire packet (instead 5870 * slightly more efficient if the front of the entire packet (instead
5871 * of the front of the headers) is aligned. 5871 * of the front of the headers) is aligned.
5872 * 5872 *
5873 * Note we must always set align_tweak to 0 if we are using 5873 * Note we must always set align_tweak to 0 if we are using
5874 * jumbo frames. 5874 * jumbo frames.
5875 */ 5875 */
5876#ifdef __NO_STRICT_ALIGNMENT 5876#ifdef __NO_STRICT_ALIGNMENT
5877 sc->sc_align_tweak = 0; 5877 sc->sc_align_tweak = 0;
5878#else 5878#else
5879 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 5879 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
5880 sc->sc_align_tweak = 0; 5880 sc->sc_align_tweak = 0;
5881 else 5881 else
5882 sc->sc_align_tweak = 2; 5882 sc->sc_align_tweak = 2;
5883#endif /* __NO_STRICT_ALIGNMENT */ 5883#endif /* __NO_STRICT_ALIGNMENT */
5884 5884
5885 /* Cancel any pending I/O. */ 5885 /* Cancel any pending I/O. */
5886 wm_stop_locked(ifp, false, false); 5886 wm_stop_locked(ifp, false, false);
5887 5887
5888 /* Update statistics before reset */ 5888 /* Update statistics before reset */
5889 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC), 5889 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
5890 if_ierrors, CSR_READ(sc, WMREG_RXERRC)); 5890 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
5891 5891
5892 /* PCH_SPT hardware workaround */ 5892 /* PCH_SPT hardware workaround */
5893 if (sc->sc_type == WM_T_PCH_SPT) 5893 if (sc->sc_type == WM_T_PCH_SPT)
5894 wm_flush_desc_rings(sc); 5894 wm_flush_desc_rings(sc);
5895 5895
5896 /* Reset the chip to a known state. */ 5896 /* Reset the chip to a known state. */
5897 wm_reset(sc); 5897 wm_reset(sc);
5898 5898
5899 /* 5899 /*
5900 * AMT based hardware can now take control from firmware 5900 * AMT based hardware can now take control from firmware
5901 * Do this after reset. 5901 * Do this after reset.
5902 */ 5902 */
5903 if ((sc->sc_flags & WM_F_HAS_AMT) != 0) 5903 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
5904 wm_get_hw_control(sc); 5904 wm_get_hw_control(sc);
5905 5905
5906 if ((sc->sc_type >= WM_T_PCH_SPT) && 5906 if ((sc->sc_type >= WM_T_PCH_SPT) &&
5907 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX) 5907 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
5908 wm_legacy_irq_quirk_spt(sc); 5908 wm_legacy_irq_quirk_spt(sc);
5909 5909
5910 /* Init hardware bits */ 5910 /* Init hardware bits */
5911 wm_initialize_hardware_bits(sc); 5911 wm_initialize_hardware_bits(sc);
5912 5912
5913 /* Reset the PHY. */ 5913 /* Reset the PHY. */
5914 if (sc->sc_flags & WM_F_HAS_MII) 5914 if (sc->sc_flags & WM_F_HAS_MII)
5915 wm_gmii_reset(sc); 5915 wm_gmii_reset(sc);
5916 5916
5917 if (sc->sc_type >= WM_T_ICH8) { 5917 if (sc->sc_type >= WM_T_ICH8) {
5918 reg = CSR_READ(sc, WMREG_GCR); 5918 reg = CSR_READ(sc, WMREG_GCR);
5919 /* 5919 /*
5920 * ICH8 No-snoop bits are opposite polarity. Set to snoop by 5920 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
5921 * default after reset. 5921 * default after reset.
5922 */ 5922 */
5923 if (sc->sc_type == WM_T_ICH8) 5923 if (sc->sc_type == WM_T_ICH8)
5924 reg |= GCR_NO_SNOOP_ALL; 5924 reg |= GCR_NO_SNOOP_ALL;
5925 else 5925 else
5926 reg &= ~GCR_NO_SNOOP_ALL; 5926 reg &= ~GCR_NO_SNOOP_ALL;
5927 CSR_WRITE(sc, WMREG_GCR, reg); 5927 CSR_WRITE(sc, WMREG_GCR, reg);
5928 } 5928 }
5929 5929
5930 if ((sc->sc_type >= WM_T_ICH8) 5930 if ((sc->sc_type >= WM_T_ICH8)
5931 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER) 5931 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
5932 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) { 5932 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
5933 5933
5934 reg = CSR_READ(sc, WMREG_CTRL_EXT); 5934 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5935 reg |= CTRL_EXT_RO_DIS; 5935 reg |= CTRL_EXT_RO_DIS;
5936 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 5936 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5937 } 5937 }
5938 5938
5939 /* Calculate (E)ITR value */ 5939 /* Calculate (E)ITR value */
5940 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) { 5940 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
5941 /* 5941 /*
5942 * For NEWQUEUE's EITR (except for 82575). 5942 * For NEWQUEUE's EITR (except for 82575).
5943 * 82575's EITR should be set same throttling value as other 5943 * 82575's EITR should be set same throttling value as other
5944 * old controllers' ITR because the interrupt/sec calculation 5944 * old controllers' ITR because the interrupt/sec calculation
5945 * is the same, that is, 1,000,000,000 / (N * 256). 5945 * is the same, that is, 1,000,000,000 / (N * 256).
5946 * 5946 *
5947 * 82574's EITR should be set same throttling value as ITR. 5947 * 82574's EITR should be set same throttling value as ITR.
5948 * 5948 *
5949 * For N interrupts/sec, set this value to: 5949 * For N interrupts/sec, set this value to:
5950 * 1,000,000 / N in contrast to ITR throttoling value. 5950 * 1,000,000 / N in contrast to ITR throttoling value.
5951 */ 5951 */
5952 sc->sc_itr_init = 450; 5952 sc->sc_itr_init = 450;
5953 } else if (sc->sc_type >= WM_T_82543) { 5953 } else if (sc->sc_type >= WM_T_82543) {
5954 /* 5954 /*
5955 * Set up the interrupt throttling register (units of 256ns) 5955 * Set up the interrupt throttling register (units of 256ns)
5956 * Note that a footnote in Intel's documentation says this 5956 * Note that a footnote in Intel's documentation says this
5957 * ticker runs at 1/4 the rate when the chip is in 100Mbit 5957 * ticker runs at 1/4 the rate when the chip is in 100Mbit
5958 * or 10Mbit mode. Empirically, it appears to be the case 5958 * or 10Mbit mode. Empirically, it appears to be the case
5959 * that that is also true for the 1024ns units of the other 5959 * that that is also true for the 1024ns units of the other
5960 * interrupt-related timer registers -- so, really, we ought 5960 * interrupt-related timer registers -- so, really, we ought
5961 * to divide this value by 4 when the link speed is low. 5961 * to divide this value by 4 when the link speed is low.
5962 * 5962 *
5963 * XXX implement this division at link speed change! 5963 * XXX implement this division at link speed change!
5964 */ 5964 */
5965 5965
5966 /* 5966 /*
5967 * For N interrupts/sec, set this value to: 5967 * For N interrupts/sec, set this value to:
5968 * 1,000,000,000 / (N * 256). Note that we set the 5968 * 1,000,000,000 / (N * 256). Note that we set the
5969 * absolute and packet timer values to this value 5969 * absolute and packet timer values to this value
5970 * divided by 4 to get "simple timer" behavior. 5970 * divided by 4 to get "simple timer" behavior.
5971 */ 5971 */
5972 sc->sc_itr_init = 1500; /* 2604 ints/sec */ 5972 sc->sc_itr_init = 1500; /* 2604 ints/sec */
5973 } 5973 }
5974 5974
5975 error = wm_init_txrx_queues(sc); 5975 error = wm_init_txrx_queues(sc);
5976 if (error) 5976 if (error)
5977 goto out; 5977 goto out;
5978 5978
5979 if (((sc->sc_flags & WM_F_SGMII) == 0) && 5979 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
5980 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) && 5980 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
5981 (sc->sc_type >= WM_T_82575)) 5981 (sc->sc_type >= WM_T_82575))
5982 wm_serdes_power_up_link_82575(sc); 5982 wm_serdes_power_up_link_82575(sc);
5983 5983
5984 /* Clear out the VLAN table -- we don't use it (yet). */ 5984 /* Clear out the VLAN table -- we don't use it (yet). */
5985 CSR_WRITE(sc, WMREG_VET, 0); 5985 CSR_WRITE(sc, WMREG_VET, 0);
5986 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 5986 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5987 trynum = 10; /* Due to hw errata */ 5987 trynum = 10; /* Due to hw errata */
5988 else 5988 else
5989 trynum = 1; 5989 trynum = 1;
5990 for (i = 0; i < WM_VLAN_TABSIZE; i++) 5990 for (i = 0; i < WM_VLAN_TABSIZE; i++)
5991 for (j = 0; j < trynum; j++) 5991 for (j = 0; j < trynum; j++)
5992 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 5992 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
5993 5993
5994 /* 5994 /*
5995 * Set up flow-control parameters. 5995 * Set up flow-control parameters.
5996 * 5996 *
5997 * XXX Values could probably stand some tuning. 5997 * XXX Values could probably stand some tuning.
5998 */ 5998 */
5999 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 5999 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6000 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) 6000 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6001 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT) 6001 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6002 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){ 6002 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6003 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 6003 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6004 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 6004 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6005 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 6005 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6006 } 6006 }
6007 6007
6008 sc->sc_fcrtl = FCRTL_DFLT; 6008 sc->sc_fcrtl = FCRTL_DFLT;
6009 if (sc->sc_type < WM_T_82543) { 6009 if (sc->sc_type < WM_T_82543) {
6010 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 6010 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6011 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 6011 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6012 } else { 6012 } else {
6013 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 6013 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6014 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 6014 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6015 } 6015 }
6016 6016
6017 if (sc->sc_type == WM_T_80003) 6017 if (sc->sc_type == WM_T_80003)
6018 CSR_WRITE(sc, WMREG_FCTTV, 0xffff); 6018 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6019 else 6019 else
6020 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 6020 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6021 6021
6022 /* Writes the control register. */ 6022 /* Writes the control register. */
6023 wm_set_vlan(sc); 6023 wm_set_vlan(sc);
6024 6024
6025 if (sc->sc_flags & WM_F_HAS_MII) { 6025 if (sc->sc_flags & WM_F_HAS_MII) {
6026 uint16_t kmreg; 6026 uint16_t kmreg;
6027 6027
6028 switch (sc->sc_type) { 6028 switch (sc->sc_type) {
6029 case WM_T_80003: 6029 case WM_T_80003:
6030 case WM_T_ICH8: 6030 case WM_T_ICH8:
6031 case WM_T_ICH9: 6031 case WM_T_ICH9:
6032 case WM_T_ICH10: 6032 case WM_T_ICH10:
6033 case WM_T_PCH: 6033 case WM_T_PCH:
6034 case WM_T_PCH2: 6034 case WM_T_PCH2:
6035 case WM_T_PCH_LPT: 6035 case WM_T_PCH_LPT:
6036 case WM_T_PCH_SPT: 6036 case WM_T_PCH_SPT:
6037 case WM_T_PCH_CNP: 6037 case WM_T_PCH_CNP:
6038 /* 6038 /*
6039 * Set the mac to wait the maximum time between each 6039 * Set the mac to wait the maximum time between each
6040 * iteration and increase the max iterations when 6040 * iteration and increase the max iterations when
6041 * polling the phy; this fixes erroneous timeouts at 6041 * polling the phy; this fixes erroneous timeouts at
6042 * 10Mbps. 6042 * 10Mbps.
6043 */ 6043 */
6044 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 6044 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6045 0xFFFF); 6045 0xFFFF);
6046 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, 6046 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6047 &kmreg); 6047 &kmreg);
6048 kmreg |= 0x3F; 6048 kmreg |= 0x3F;
6049 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, 6049 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6050 kmreg); 6050 kmreg);
6051 break; 6051 break;
6052 default: 6052 default:
6053 break; 6053 break;
6054 } 6054 }
6055 6055
6056 if (sc->sc_type == WM_T_80003) { 6056 if (sc->sc_type == WM_T_80003) {
6057 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6057 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6058 reg &= ~CTRL_EXT_LINK_MODE_MASK; 6058 reg &= ~CTRL_EXT_LINK_MODE_MASK;
6059 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6059 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6060 6060
6061 /* Bypass RX and TX FIFO's */ 6061 /* Bypass RX and TX FIFO's */
6062 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 6062 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6063 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 6063 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6064 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 6064 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6065 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 6065 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6066 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 6066 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6067 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 6067 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6068 } 6068 }
6069 } 6069 }
6070#if 0 6070#if 0
6071 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 6071 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6072#endif 6072#endif
6073 6073
6074 /* Set up checksum offload parameters. */ 6074 /* Set up checksum offload parameters. */
6075 reg = CSR_READ(sc, WMREG_RXCSUM); 6075 reg = CSR_READ(sc, WMREG_RXCSUM);
6076 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 6076 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6077 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 6077 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6078 reg |= RXCSUM_IPOFL; 6078 reg |= RXCSUM_IPOFL;
6079 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 6079 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6080 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 6080 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6081 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 6081 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6082 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 6082 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6083 CSR_WRITE(sc, WMREG_RXCSUM, reg); 6083 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6084 6084
6085 /* Set registers about MSI-X */ 6085 /* Set registers about MSI-X */
6086 if (wm_is_using_msix(sc)) { 6086 if (wm_is_using_msix(sc)) {
6087 uint32_t ivar, qintr_idx; 6087 uint32_t ivar, qintr_idx;
6088 struct wm_queue *wmq; 6088 struct wm_queue *wmq;
6089 unsigned int qid; 6089 unsigned int qid;
6090 6090
6091 if (sc->sc_type == WM_T_82575) { 6091 if (sc->sc_type == WM_T_82575) {
6092 /* Interrupt control */ 6092 /* Interrupt control */
6093 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6093 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6094 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR; 6094 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6095 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6095 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6096 6096
6097 /* TX and RX */ 6097 /* TX and RX */
6098 for (i = 0; i < sc->sc_nqueues; i++) { 6098 for (i = 0; i < sc->sc_nqueues; i++) {
6099 wmq = &sc->sc_queue[i]; 6099 wmq = &sc->sc_queue[i];
6100 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx), 6100 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6101 EITR_TX_QUEUE(wmq->wmq_id) 6101 EITR_TX_QUEUE(wmq->wmq_id)
6102 | EITR_RX_QUEUE(wmq->wmq_id)); 6102 | EITR_RX_QUEUE(wmq->wmq_id));
6103 } 6103 }
6104 /* Link status */ 6104 /* Link status */
6105 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx), 6105 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6106 EITR_OTHER); 6106 EITR_OTHER);
6107 } else if (sc->sc_type == WM_T_82574) { 6107 } else if (sc->sc_type == WM_T_82574) {
6108 /* Interrupt control */ 6108 /* Interrupt control */
6109 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6109 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6110 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME; 6110 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6111 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6111 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6112 6112
6113 /* 6113 /*
6114 * Workaround issue with spurious interrupts 6114 * Workaround issue with spurious interrupts
6115 * in MSI-X mode. 6115 * in MSI-X mode.
6116 * At wm_initialize_hardware_bits(), sc_nintrs has not 6116 * At wm_initialize_hardware_bits(), sc_nintrs has not
6117 * initialized yet. So re-initialize WMREG_RFCTL here. 6117 * initialized yet. So re-initialize WMREG_RFCTL here.
6118 */ 6118 */
6119 reg = CSR_READ(sc, WMREG_RFCTL); 6119 reg = CSR_READ(sc, WMREG_RFCTL);
6120 reg |= WMREG_RFCTL_ACKDIS; 6120 reg |= WMREG_RFCTL_ACKDIS;
6121 CSR_WRITE(sc, WMREG_RFCTL, reg); 6121 CSR_WRITE(sc, WMREG_RFCTL, reg);
6122 6122
6123 ivar = 0; 6123 ivar = 0;
6124 /* TX and RX */ 6124 /* TX and RX */
6125 for (i = 0; i < sc->sc_nqueues; i++) { 6125 for (i = 0; i < sc->sc_nqueues; i++) {
6126 wmq = &sc->sc_queue[i]; 6126 wmq = &sc->sc_queue[i];
6127 qid = wmq->wmq_id; 6127 qid = wmq->wmq_id;
6128 qintr_idx = wmq->wmq_intr_idx; 6128 qintr_idx = wmq->wmq_intr_idx;
6129 6129
6130 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx), 6130 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6131 IVAR_TX_MASK_Q_82574(qid)); 6131 IVAR_TX_MASK_Q_82574(qid));
6132 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx), 6132 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6133 IVAR_RX_MASK_Q_82574(qid)); 6133 IVAR_RX_MASK_Q_82574(qid));
6134 } 6134 }
6135 /* Link status */ 6135 /* Link status */
6136 ivar |= __SHIFTIN((IVAR_VALID_82574 6136 ivar |= __SHIFTIN((IVAR_VALID_82574
6137 | sc->sc_link_intr_idx), IVAR_OTHER_MASK); 6137 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6138 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB); 6138 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6139 } else { 6139 } else {
6140 /* Interrupt control */ 6140 /* Interrupt control */
6141 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX 6141 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6142 | GPIE_EIAME | GPIE_PBA); 6142 | GPIE_EIAME | GPIE_PBA);
6143 6143
6144 switch (sc->sc_type) { 6144 switch (sc->sc_type) {
6145 case WM_T_82580: 6145 case WM_T_82580:
6146 case WM_T_I350: 6146 case WM_T_I350:
6147 case WM_T_I354: 6147 case WM_T_I354:
6148 case WM_T_I210: 6148 case WM_T_I210:
6149 case WM_T_I211: 6149 case WM_T_I211:
6150 /* TX and RX */ 6150 /* TX and RX */
6151 for (i = 0; i < sc->sc_nqueues; i++) { 6151 for (i = 0; i < sc->sc_nqueues; i++) {
6152 wmq = &sc->sc_queue[i]; 6152 wmq = &sc->sc_queue[i];
6153 qid = wmq->wmq_id; 6153 qid = wmq->wmq_id;
6154 qintr_idx = wmq->wmq_intr_idx; 6154 qintr_idx = wmq->wmq_intr_idx;
6155 6155
6156 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid)); 6156 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6157 ivar &= ~IVAR_TX_MASK_Q(qid); 6157 ivar &= ~IVAR_TX_MASK_Q(qid);
6158 ivar |= __SHIFTIN((qintr_idx 6158 ivar |= __SHIFTIN((qintr_idx
6159 | IVAR_VALID), 6159 | IVAR_VALID),
6160 IVAR_TX_MASK_Q(qid)); 6160 IVAR_TX_MASK_Q(qid));
6161 ivar &= ~IVAR_RX_MASK_Q(qid); 6161 ivar &= ~IVAR_RX_MASK_Q(qid);
6162 ivar |= __SHIFTIN((qintr_idx 6162 ivar |= __SHIFTIN((qintr_idx
6163 | IVAR_VALID), 6163 | IVAR_VALID),
6164 IVAR_RX_MASK_Q(qid)); 6164 IVAR_RX_MASK_Q(qid));
6165 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar); 6165 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6166 } 6166 }
6167 break; 6167 break;
6168 case WM_T_82576: 6168 case WM_T_82576:
6169 /* TX and RX */ 6169 /* TX and RX */
6170 for (i = 0; i < sc->sc_nqueues; i++) { 6170 for (i = 0; i < sc->sc_nqueues; i++) {
6171 wmq = &sc->sc_queue[i]; 6171 wmq = &sc->sc_queue[i];
6172 qid = wmq->wmq_id; 6172 qid = wmq->wmq_id;
6173 qintr_idx = wmq->wmq_intr_idx; 6173 qintr_idx = wmq->wmq_intr_idx;
6174 6174
6175 ivar = CSR_READ(sc, 6175 ivar = CSR_READ(sc,
6176 WMREG_IVAR_Q_82576(qid)); 6176 WMREG_IVAR_Q_82576(qid));
6177 ivar &= ~IVAR_TX_MASK_Q_82576(qid); 6177 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6178 ivar |= __SHIFTIN((qintr_idx 6178 ivar |= __SHIFTIN((qintr_idx
6179 | IVAR_VALID), 6179 | IVAR_VALID),
6180 IVAR_TX_MASK_Q_82576(qid)); 6180 IVAR_TX_MASK_Q_82576(qid));
6181 ivar &= ~IVAR_RX_MASK_Q_82576(qid); 6181 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6182 ivar |= __SHIFTIN((qintr_idx 6182 ivar |= __SHIFTIN((qintr_idx
6183 | IVAR_VALID), 6183 | IVAR_VALID),
6184 IVAR_RX_MASK_Q_82576(qid)); 6184 IVAR_RX_MASK_Q_82576(qid));
6185 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), 6185 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6186 ivar); 6186 ivar);
6187 } 6187 }
6188 break; 6188 break;
6189 default: 6189 default:
6190 break; 6190 break;
6191 } 6191 }
6192 6192
6193 /* Link status */ 6193 /* Link status */
6194 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID), 6194 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6195 IVAR_MISC_OTHER); 6195 IVAR_MISC_OTHER);
6196 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar); 6196 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6197 } 6197 }
6198 6198
6199 if (wm_is_using_multiqueue(sc)) { 6199 if (wm_is_using_multiqueue(sc)) {
6200 wm_init_rss(sc); 6200 wm_init_rss(sc);
6201 6201
6202 /* 6202 /*
6203 ** NOTE: Receive Full-Packet Checksum Offload 6203 ** NOTE: Receive Full-Packet Checksum Offload
6204 ** is mutually exclusive with Multiqueue. However 6204 ** is mutually exclusive with Multiqueue. However
6205 ** this is not the same as TCP/IP checksums which 6205 ** this is not the same as TCP/IP checksums which
6206 ** still work. 6206 ** still work.
6207 */ 6207 */
6208 reg = CSR_READ(sc, WMREG_RXCSUM); 6208 reg = CSR_READ(sc, WMREG_RXCSUM);
6209 reg |= RXCSUM_PCSD; 6209 reg |= RXCSUM_PCSD;
6210 CSR_WRITE(sc, WMREG_RXCSUM, reg); 6210 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6211 } 6211 }
6212 } 6212 }
6213 6213
6214 /* Set up the interrupt registers. */ 6214 /* Set up the interrupt registers. */
6215 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 6215 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6216 6216
6217 /* Enable SFP module insertion interrupt if it's required */ 6217 /* Enable SFP module insertion interrupt if it's required */
6218 if ((sc->sc_flags & WM_F_SFP) != 0) { 6218 if ((sc->sc_flags & WM_F_SFP) != 0) {
6219 sc->sc_ctrl |= CTRL_EXTLINK_EN; 6219 sc->sc_ctrl |= CTRL_EXTLINK_EN;
6220 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6220 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6221 sfp_mask = ICR_GPI(0); 6221 sfp_mask = ICR_GPI(0);
6222 } 6222 }
6223 6223
6224 if (wm_is_using_msix(sc)) { 6224 if (wm_is_using_msix(sc)) {
6225 uint32_t mask; 6225 uint32_t mask;
6226 struct wm_queue *wmq; 6226 struct wm_queue *wmq;
6227 6227
6228 switch (sc->sc_type) { 6228 switch (sc->sc_type) {
6229 case WM_T_82574: 6229 case WM_T_82574:
6230 mask = 0; 6230 mask = 0;
6231 for (i = 0; i < sc->sc_nqueues; i++) { 6231 for (i = 0; i < sc->sc_nqueues; i++) {
6232 wmq = &sc->sc_queue[i]; 6232 wmq = &sc->sc_queue[i];
6233 mask |= ICR_TXQ(wmq->wmq_id); 6233 mask |= ICR_TXQ(wmq->wmq_id);
6234 mask |= ICR_RXQ(wmq->wmq_id); 6234 mask |= ICR_RXQ(wmq->wmq_id);
6235 } 6235 }
6236 mask |= ICR_OTHER; 6236 mask |= ICR_OTHER;
6237 CSR_WRITE(sc, WMREG_EIAC_82574, mask); 6237 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6238 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC); 6238 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6239 break; 6239 break;
6240 default: 6240 default:
6241 if (sc->sc_type == WM_T_82575) { 6241 if (sc->sc_type == WM_T_82575) {
6242 mask = 0; 6242 mask = 0;
6243 for (i = 0; i < sc->sc_nqueues; i++) { 6243 for (i = 0; i < sc->sc_nqueues; i++) {
6244 wmq = &sc->sc_queue[i]; 6244 wmq = &sc->sc_queue[i];
6245 mask |= EITR_TX_QUEUE(wmq->wmq_id); 6245 mask |= EITR_TX_QUEUE(wmq->wmq_id);
6246 mask |= EITR_RX_QUEUE(wmq->wmq_id); 6246 mask |= EITR_RX_QUEUE(wmq->wmq_id);
6247 } 6247 }
6248 mask |= EITR_OTHER; 6248 mask |= EITR_OTHER;
6249 } else { 6249 } else {
6250 mask = 0; 6250 mask = 0;
6251 for (i = 0; i < sc->sc_nqueues; i++) { 6251 for (i = 0; i < sc->sc_nqueues; i++) {
6252 wmq = &sc->sc_queue[i]; 6252 wmq = &sc->sc_queue[i];
6253 mask |= 1 << wmq->wmq_intr_idx; 6253 mask |= 1 << wmq->wmq_intr_idx;
6254 } 6254 }
6255 mask |= 1 << sc->sc_link_intr_idx; 6255 mask |= 1 << sc->sc_link_intr_idx;
6256 } 6256 }
6257 CSR_WRITE(sc, WMREG_EIAC, mask); 6257 CSR_WRITE(sc, WMREG_EIAC, mask);
6258 CSR_WRITE(sc, WMREG_EIAM, mask); 6258 CSR_WRITE(sc, WMREG_EIAM, mask);
6259 CSR_WRITE(sc, WMREG_EIMS, mask); 6259 CSR_WRITE(sc, WMREG_EIMS, mask);
6260 6260
6261 /* For other interrupts */ 6261 /* For other interrupts */
6262 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask); 6262 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6263 break; 6263 break;
6264 } 6264 }
6265 } else { 6265 } else {
6266 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 6266 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6267 ICR_RXO | ICR_RXT0 | sfp_mask; 6267 ICR_RXO | ICR_RXT0 | sfp_mask;
6268 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 6268 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6269 } 6269 }
6270 6270
6271 /* Set up the inter-packet gap. */ 6271 /* Set up the inter-packet gap. */
6272 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 6272 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6273 6273
6274 if (sc->sc_type >= WM_T_82543) { 6274 if (sc->sc_type >= WM_T_82543) {
6275 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) { 6275 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6276 struct wm_queue *wmq = &sc->sc_queue[qidx]; 6276 struct wm_queue *wmq = &sc->sc_queue[qidx];
6277 wm_itrs_writereg(sc, wmq); 6277 wm_itrs_writereg(sc, wmq);
6278 } 6278 }
6279 /* 6279 /*
6280 * Link interrupts occur much less than TX 6280 * Link interrupts occur much less than TX
6281 * interrupts and RX interrupts. So, we don't 6281 * interrupts and RX interrupts. So, we don't
6282 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like 6282 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6283 * FreeBSD's if_igb. 6283 * FreeBSD's if_igb.
6284 */ 6284 */
6285 } 6285 }
6286 6286
6287 /* Set the VLAN ethernetype. */ 6287 /* Set the VLAN ethernetype. */
6288 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 6288 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6289 6289
6290 /* 6290 /*
6291 * Set up the transmit control register; we start out with 6291 * Set up the transmit control register; we start out with
6292 * a collision distance suitable for FDX, but update it whe 6292 * a collision distance suitable for FDX, but update it whe
6293 * we resolve the media type. 6293 * we resolve the media type.
6294 */ 6294 */
6295 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC 6295 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6296 | TCTL_CT(TX_COLLISION_THRESHOLD) 6296 | TCTL_CT(TX_COLLISION_THRESHOLD)
6297 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 6297 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6298 if (sc->sc_type >= WM_T_82571) 6298 if (sc->sc_type >= WM_T_82571)
6299 sc->sc_tctl |= TCTL_MULR; 6299 sc->sc_tctl |= TCTL_MULR;
6300 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 6300 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6301 6301
6302 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 6302 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6303 /* Write TDT after TCTL.EN is set. See the document. */ 6303 /* Write TDT after TCTL.EN is set. See the document. */
6304 CSR_WRITE(sc, WMREG_TDT(0), 0); 6304 CSR_WRITE(sc, WMREG_TDT(0), 0);
6305 } 6305 }
6306 6306
6307 if (sc->sc_type == WM_T_80003) { 6307 if (sc->sc_type == WM_T_80003) {
6308 reg = CSR_READ(sc, WMREG_TCTL_EXT); 6308 reg = CSR_READ(sc, WMREG_TCTL_EXT);
6309 reg &= ~TCTL_EXT_GCEX_MASK; 6309 reg &= ~TCTL_EXT_GCEX_MASK;
6310 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; 6310 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6311 CSR_WRITE(sc, WMREG_TCTL_EXT, reg); 6311 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6312 } 6312 }
6313 6313
6314 /* Set the media. */ 6314 /* Set the media. */
6315 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) 6315 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6316 goto out; 6316 goto out;
6317 6317
6318 /* Configure for OS presence */ 6318 /* Configure for OS presence */
6319 wm_init_manageability(sc); 6319 wm_init_manageability(sc);
6320 6320
6321 /* 6321 /*
6322 * Set up the receive control register; we actually program the 6322 * Set up the receive control register; we actually program the
6323 * register when we set the receive filter. Use multicast address 6323 * register when we set the receive filter. Use multicast address
6324 * offset type 0. 6324 * offset type 0.
6325 * 6325 *
6326 * Only the i82544 has the ability to strip the incoming CRC, so we 6326 * Only the i82544 has the ability to strip the incoming CRC, so we
6327 * don't enable that feature. 6327 * don't enable that feature.
6328 */ 6328 */
6329 sc->sc_mchash_type = 0; 6329 sc->sc_mchash_type = 0;
6330 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 6330 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6331 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO); 6331 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6332 6332
6333 /* 82574 use one buffer extended Rx descriptor. */ 6333 /* 82574 use one buffer extended Rx descriptor. */
6334 if (sc->sc_type == WM_T_82574) 6334 if (sc->sc_type == WM_T_82574)
6335 sc->sc_rctl |= RCTL_DTYP_ONEBUF; 6335 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6336 6336
6337 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0) 6337 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
6338 sc->sc_rctl |= RCTL_SECRC; 6338 sc->sc_rctl |= RCTL_SECRC;
6339 6339
6340 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) 6340 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6341 && (ifp->if_mtu > ETHERMTU)) { 6341 && (ifp->if_mtu > ETHERMTU)) {
6342 sc->sc_rctl |= RCTL_LPE; 6342 sc->sc_rctl |= RCTL_LPE;
6343 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 6343 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6344 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); 6344 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6345 } 6345 }
6346 6346
6347 if (MCLBYTES == 2048) 6347 if (MCLBYTES == 2048)
6348 sc->sc_rctl |= RCTL_2k; 6348 sc->sc_rctl |= RCTL_2k;
6349 else { 6349 else {
6350 if (sc->sc_type >= WM_T_82543) { 6350 if (sc->sc_type >= WM_T_82543) {
6351 switch (MCLBYTES) { 6351 switch (MCLBYTES) {
6352 case 4096: 6352 case 4096:
6353 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 6353 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6354 break; 6354 break;
6355 case 8192: 6355 case 8192:
6356 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 6356 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6357 break; 6357 break;
6358 case 16384: 6358 case 16384:
6359 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 6359 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
6360 break; 6360 break;
6361 default: 6361 default:
6362 panic("wm_init: MCLBYTES %d unsupported", 6362 panic("wm_init: MCLBYTES %d unsupported",
6363 MCLBYTES); 6363 MCLBYTES);
6364 break; 6364 break;
6365 } 6365 }
6366 } else 6366 } else
6367 panic("wm_init: i82542 requires MCLBYTES = 2048"); 6367 panic("wm_init: i82542 requires MCLBYTES = 2048");
6368 } 6368 }
6369 6369
6370 /* Enable ECC */ 6370 /* Enable ECC */
6371 switch (sc->sc_type) { 6371 switch (sc->sc_type) {
6372 case WM_T_82571: 6372 case WM_T_82571:
6373 reg = CSR_READ(sc, WMREG_PBA_ECC); 6373 reg = CSR_READ(sc, WMREG_PBA_ECC);
6374 reg |= PBA_ECC_CORR_EN; 6374 reg |= PBA_ECC_CORR_EN;
6375 CSR_WRITE(sc, WMREG_PBA_ECC, reg); 6375 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
6376 break; 6376 break;
6377 case WM_T_PCH_LPT: 6377 case WM_T_PCH_LPT:
6378 case WM_T_PCH_SPT: 6378 case WM_T_PCH_SPT:
6379 case WM_T_PCH_CNP: 6379 case WM_T_PCH_CNP:
6380 reg = CSR_READ(sc, WMREG_PBECCSTS); 6380 reg = CSR_READ(sc, WMREG_PBECCSTS);
6381 reg |= PBECCSTS_UNCORR_ECC_ENABLE; 6381 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
6382 CSR_WRITE(sc, WMREG_PBECCSTS, reg); 6382 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
6383 6383
6384 sc->sc_ctrl |= CTRL_MEHE; 6384 sc->sc_ctrl |= CTRL_MEHE;
6385 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6385 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6386 break; 6386 break;
6387 default: 6387 default:
6388 break; 6388 break;
6389 } 6389 }
6390 6390
6391 /* 6391 /*
6392 * Set the receive filter. 6392 * Set the receive filter.
6393 * 6393 *
6394 * For 82575 and 82576, the RX descriptors must be initialized after 6394 * For 82575 and 82576, the RX descriptors must be initialized after
6395 * the setting of RCTL.EN in wm_set_filter() 6395 * the setting of RCTL.EN in wm_set_filter()
6396 */ 6396 */
6397 wm_set_filter(sc); 6397 wm_set_filter(sc);
6398 6398
6399 /* On 575 and later set RDT only if RX enabled */ 6399 /* On 575 and later set RDT only if RX enabled */
6400 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 6400 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6401 int qidx; 6401 int qidx;
6402 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { 6402 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6403 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq; 6403 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
6404 for (i = 0; i < WM_NRXDESC; i++) { 6404 for (i = 0; i < WM_NRXDESC; i++) {
6405 mutex_enter(rxq->rxq_lock); 6405 mutex_enter(rxq->rxq_lock);
6406 wm_init_rxdesc(rxq, i); 6406 wm_init_rxdesc(rxq, i);
6407 mutex_exit(rxq->rxq_lock); 6407 mutex_exit(rxq->rxq_lock);
6408 6408
6409 } 6409 }
6410 } 6410 }
6411 } 6411 }
6412 6412
6413 wm_unset_stopping_flags(sc); 6413 wm_unset_stopping_flags(sc);
6414 6414
6415 /* Start the one second link check clock. */ 6415 /* Start the one second link check clock. */
6416 callout_schedule(&sc->sc_tick_ch, hz); 6416 callout_schedule(&sc->sc_tick_ch, hz);
6417 6417
6418 /* ...all done! */ 6418 /* ...all done! */
6419 ifp->if_flags |= IFF_RUNNING; 6419 ifp->if_flags |= IFF_RUNNING;
6420 6420
6421 out: 6421 out:
6422 /* Save last flags for the callback */ 6422 /* Save last flags for the callback */
6423 sc->sc_if_flags = ifp->if_flags; 6423 sc->sc_if_flags = ifp->if_flags;
6424 sc->sc_ec_capenable = ec->ec_capenable; 6424 sc->sc_ec_capenable = ec->ec_capenable;
6425 if (error) 6425 if (error)
6426 log(LOG_ERR, "%s: interface not running\n", 6426 log(LOG_ERR, "%s: interface not running\n",
6427 device_xname(sc->sc_dev)); 6427 device_xname(sc->sc_dev));
6428 return error; 6428 return error;
6429} 6429}
6430 6430
6431/* 6431/*
6432 * wm_stop: [ifnet interface function] 6432 * wm_stop: [ifnet interface function]
6433 * 6433 *
6434 * Stop transmission on the interface. 6434 * Stop transmission on the interface.
6435 */ 6435 */
6436static void 6436static void
6437wm_stop(struct ifnet *ifp, int disable) 6437wm_stop(struct ifnet *ifp, int disable)
6438{ 6438{
6439 struct wm_softc *sc = ifp->if_softc; 6439 struct wm_softc *sc = ifp->if_softc;
6440 6440
6441 ASSERT_SLEEPABLE(); 6441 ASSERT_SLEEPABLE();
6442 6442
6443 WM_CORE_LOCK(sc); 6443 WM_CORE_LOCK(sc);
6444 wm_stop_locked(ifp, disable ? true : false, true); 6444 wm_stop_locked(ifp, disable ? true : false, true);
6445 WM_CORE_UNLOCK(sc); 6445 WM_CORE_UNLOCK(sc);
6446 6446
6447 /* 6447 /*
6448 * After wm_set_stopping_flags(), it is guaranteed 6448 * After wm_set_stopping_flags(), it is guaranteed
6449 * wm_handle_queue_work() does not call workqueue_enqueue(). 6449 * wm_handle_queue_work() does not call workqueue_enqueue().
6450 * However, workqueue_wait() cannot call in wm_stop_locked() 6450 * However, workqueue_wait() cannot call in wm_stop_locked()
6451 * because it can sleep... 6451 * because it can sleep...
6452 * so, call workqueue_wait() here. 6452 * so, call workqueue_wait() here.
6453 */ 6453 */
6454 for (int i = 0; i < sc->sc_nqueues; i++) 6454 for (int i = 0; i < sc->sc_nqueues; i++)
6455 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie); 6455 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
6456} 6456}
6457 6457
6458static void 6458static void
6459wm_stop_locked(struct ifnet *ifp, bool disable, bool wait) 6459wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
6460{ 6460{
6461 struct wm_softc *sc = ifp->if_softc; 6461 struct wm_softc *sc = ifp->if_softc;
6462 struct wm_txsoft *txs; 6462 struct wm_txsoft *txs;
6463 int i, qidx; 6463 int i, qidx;
6464 6464
6465 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 6465 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6466 device_xname(sc->sc_dev), __func__)); 6466 device_xname(sc->sc_dev), __func__));
6467 KASSERT(WM_CORE_LOCKED(sc)); 6467 KASSERT(WM_CORE_LOCKED(sc));
6468 6468
6469 wm_set_stopping_flags(sc); 6469 wm_set_stopping_flags(sc);
6470 6470
6471 if (sc->sc_flags & WM_F_HAS_MII) { 6471 if (sc->sc_flags & WM_F_HAS_MII) {
6472 /* Down the MII. */ 6472 /* Down the MII. */
6473 mii_down(&sc->sc_mii); 6473 mii_down(&sc->sc_mii);
6474 } else { 6474 } else {
6475#if 0 6475#if 0
6476 /* Should we clear PHY's status properly? */ 6476 /* Should we clear PHY's status properly? */
6477 wm_reset(sc); 6477 wm_reset(sc);
6478#endif 6478#endif
6479 } 6479 }
6480 6480
6481 /* Stop the transmit and receive processes. */ 6481 /* Stop the transmit and receive processes. */
6482 CSR_WRITE(sc, WMREG_TCTL, 0); 6482 CSR_WRITE(sc, WMREG_TCTL, 0);
6483 CSR_WRITE(sc, WMREG_RCTL, 0); 6483 CSR_WRITE(sc, WMREG_RCTL, 0);
6484 sc->sc_rctl &= ~RCTL_EN; 6484 sc->sc_rctl &= ~RCTL_EN;
6485 6485
6486 /* 6486 /*
6487 * Clear the interrupt mask to ensure the device cannot assert its 6487 * Clear the interrupt mask to ensure the device cannot assert its
6488 * interrupt line. 6488 * interrupt line.
6489 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to 6489 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
6490 * service any currently pending or shared interrupt. 6490 * service any currently pending or shared interrupt.
6491 */ 6491 */
6492 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 6492 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6493 sc->sc_icr = 0; 6493 sc->sc_icr = 0;
6494 if (wm_is_using_msix(sc)) { 6494 if (wm_is_using_msix(sc)) {
6495 if (sc->sc_type != WM_T_82574) { 6495 if (sc->sc_type != WM_T_82574) {
6496 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); 6496 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6497 CSR_WRITE(sc, WMREG_EIAC, 0); 6497 CSR_WRITE(sc, WMREG_EIAC, 0);
6498 } else 6498 } else
6499 CSR_WRITE(sc, WMREG_EIAC_82574, 0); 6499 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6500 } 6500 }
6501 6501
6502 /* 6502 /*
6503 * Stop callouts after interrupts are disabled; if we have 6503 * Stop callouts after interrupts are disabled; if we have
6504 * to wait for them, we will be releasing the CORE_LOCK 6504 * to wait for them, we will be releasing the CORE_LOCK
6505 * briefly, which will unblock interrupts on the current CPU. 6505 * briefly, which will unblock interrupts on the current CPU.
6506 */ 6506 */
6507 6507
6508 /* Stop the one second clock. */ 6508 /* Stop the one second clock. */
6509 if (wait) 6509 if (wait)
6510 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock); 6510 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
6511 else 6511 else
6512 callout_stop(&sc->sc_tick_ch); 6512 callout_stop(&sc->sc_tick_ch);
6513 6513
6514 /* Stop the 82547 Tx FIFO stall check timer. */ 6514 /* Stop the 82547 Tx FIFO stall check timer. */
6515 if (sc->sc_type == WM_T_82547) { 6515 if (sc->sc_type == WM_T_82547) {
6516 if (wait) 6516 if (wait)
6517 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock); 6517 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
6518 else 6518 else
6519 callout_stop(&sc->sc_txfifo_ch); 6519 callout_stop(&sc->sc_txfifo_ch);
6520 } 6520 }
6521 6521
6522 /* Release any queued transmit buffers. */ 6522 /* Release any queued transmit buffers. */
6523 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { 6523 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6524 struct wm_queue *wmq = &sc->sc_queue[qidx]; 6524 struct wm_queue *wmq = &sc->sc_queue[qidx];
6525 struct wm_txqueue *txq = &wmq->wmq_txq; 6525 struct wm_txqueue *txq = &wmq->wmq_txq;
 6526 struct mbuf *m;
 6527
6526 mutex_enter(txq->txq_lock); 6528 mutex_enter(txq->txq_lock);
6527 txq->txq_sending = false; /* Ensure watchdog disabled */ 6529 txq->txq_sending = false; /* Ensure watchdog disabled */
6528 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 6530 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6529 txs = &txq->txq_soft[i]; 6531 txs = &txq->txq_soft[i];
6530 if (txs->txs_mbuf != NULL) { 6532 if (txs->txs_mbuf != NULL) {
6531 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap); 6533 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6532 m_freem(txs->txs_mbuf); 6534 m_freem(txs->txs_mbuf);
6533 txs->txs_mbuf = NULL; 6535 txs->txs_mbuf = NULL;
6534 } 6536 }
6535 } 6537 }
 6538 /* Drain txq_interq */
 6539 while ((m = pcq_get(txq->txq_interq)) != NULL)
 6540 m_freem(m);
6536 mutex_exit(txq->txq_lock); 6541 mutex_exit(txq->txq_lock);
6537 } 6542 }
6538 6543
6539 /* Mark the interface as down and cancel the watchdog timer. */ 6544 /* Mark the interface as down and cancel the watchdog timer. */
6540 ifp->if_flags &= ~IFF_RUNNING; 6545 ifp->if_flags &= ~IFF_RUNNING;
6541 6546
6542 if (disable) { 6547 if (disable) {
6543 for (i = 0; i < sc->sc_nqueues; i++) { 6548 for (i = 0; i < sc->sc_nqueues; i++) {
6544 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 6549 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6545 mutex_enter(rxq->rxq_lock); 6550 mutex_enter(rxq->rxq_lock);
6546 wm_rxdrain(rxq); 6551 wm_rxdrain(rxq);
6547 mutex_exit(rxq->rxq_lock); 6552 mutex_exit(rxq->rxq_lock);
6548 } 6553 }
6549 } 6554 }
6550 6555
6551#if 0 /* notyet */ 6556#if 0 /* notyet */
6552 if (sc->sc_type >= WM_T_82544) 6557 if (sc->sc_type >= WM_T_82544)
6553 CSR_WRITE(sc, WMREG_WUC, 0); 6558 CSR_WRITE(sc, WMREG_WUC, 0);
6554#endif 6559#endif
6555} 6560}
6556 6561
6557static void 6562static void
6558wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 6563wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6559{ 6564{
6560 struct mbuf *m; 6565 struct mbuf *m;
6561 int i; 6566 int i;
6562 6567
6563 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev)); 6568 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6564 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 6569 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6565 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 6570 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6566 "m_flags = 0x%08x\n", device_xname(sc->sc_dev), 6571 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6567 m->m_data, m->m_len, m->m_flags); 6572 m->m_data, m->m_len, m->m_flags);
6568 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev), 6573 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6569 i, i == 1 ? "" : "s"); 6574 i, i == 1 ? "" : "s");
6570} 6575}
6571 6576
6572/* 6577/*
6573 * wm_82547_txfifo_stall: 6578 * wm_82547_txfifo_stall:
6574 * 6579 *
6575 * Callout used to wait for the 82547 Tx FIFO to drain, 6580 * Callout used to wait for the 82547 Tx FIFO to drain,
6576 * reset the FIFO pointers, and restart packet transmission. 6581 * reset the FIFO pointers, and restart packet transmission.
6577 */ 6582 */
6578static void 6583static void
6579wm_82547_txfifo_stall(void *arg) 6584wm_82547_txfifo_stall(void *arg)
6580{ 6585{
6581 struct wm_softc *sc = arg; 6586 struct wm_softc *sc = arg;
6582 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 6587 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6583 6588
6584 mutex_enter(txq->txq_lock); 6589 mutex_enter(txq->txq_lock);
6585 6590
6586 if (txq->txq_stopping) 6591 if (txq->txq_stopping)
6587 goto out; 6592 goto out;
6588 6593
6589 if (txq->txq_fifo_stall) { 6594 if (txq->txq_fifo_stall) {
6590 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) && 6595 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6591 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 6596 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6592 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 6597 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6593 /* 6598 /*
6594 * Packets have drained. Stop transmitter, reset 6599 * Packets have drained. Stop transmitter, reset
6595 * FIFO pointers, restart transmitter, and kick 6600 * FIFO pointers, restart transmitter, and kick
6596 * the packet queue. 6601 * the packet queue.
6597 */ 6602 */
6598 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 6603 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6599 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 6604 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6600 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr); 6605 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6601 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr); 6606 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6602 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr); 6607 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6603 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr); 6608 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6604 CSR_WRITE(sc, WMREG_TCTL, tctl); 6609 CSR_WRITE(sc, WMREG_TCTL, tctl);
6605 CSR_WRITE_FLUSH(sc); 6610 CSR_WRITE_FLUSH(sc);
6606 6611
6607 txq->txq_fifo_head = 0; 6612 txq->txq_fifo_head = 0;
6608 txq->txq_fifo_stall = 0; 6613 txq->txq_fifo_stall = 0;
6609 wm_start_locked(&sc->sc_ethercom.ec_if); 6614 wm_start_locked(&sc->sc_ethercom.ec_if);
6610 } else { 6615 } else {
6611 /* 6616 /*
6612 * Still waiting for packets to drain; try again in 6617 * Still waiting for packets to drain; try again in
6613 * another tick. 6618 * another tick.
6614 */ 6619 */
6615 callout_schedule(&sc->sc_txfifo_ch, 1); 6620 callout_schedule(&sc->sc_txfifo_ch, 1);
6616 } 6621 }
6617 } 6622 }
6618 6623
6619out: 6624out:
6620 mutex_exit(txq->txq_lock); 6625 mutex_exit(txq->txq_lock);
6621} 6626}
6622 6627
6623/* 6628/*
6624 * wm_82547_txfifo_bugchk: 6629 * wm_82547_txfifo_bugchk:
6625 * 6630 *
6626 * Check for bug condition in the 82547 Tx FIFO. We need to 6631 * Check for bug condition in the 82547 Tx FIFO. We need to
6627 * prevent enqueueing a packet that would wrap around the end 6632 * prevent enqueueing a packet that would wrap around the end
6628 * if the Tx FIFO ring buffer, otherwise the chip will croak. 6633 * if the Tx FIFO ring buffer, otherwise the chip will croak.
6629 * 6634 *
6630 * We do this by checking the amount of space before the end 6635 * We do this by checking the amount of space before the end
6631 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 6636 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
6632 * the Tx FIFO, wait for all remaining packets to drain, reset 6637 * the Tx FIFO, wait for all remaining packets to drain, reset
6633 * the internal FIFO pointers to the beginning, and restart 6638 * the internal FIFO pointers to the beginning, and restart
6634 * transmission on the interface. 6639 * transmission on the interface.
6635 */ 6640 */
6636#define WM_FIFO_HDR 0x10 6641#define WM_FIFO_HDR 0x10
6637#define WM_82547_PAD_LEN 0x3e0 6642#define WM_82547_PAD_LEN 0x3e0
6638static int 6643static int
6639wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 6644wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6640{ 6645{
6641 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 6646 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6642 int space = txq->txq_fifo_size - txq->txq_fifo_head; 6647 int space = txq->txq_fifo_size - txq->txq_fifo_head;
6643 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 6648 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6644 6649
6645 /* Just return if already stalled. */ 6650 /* Just return if already stalled. */
6646 if (txq->txq_fifo_stall) 6651 if (txq->txq_fifo_stall)
6647 return 1; 6652 return 1;
6648 6653
6649 if (sc->sc_mii.mii_media_active & IFM_FDX) { 6654 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6650 /* Stall only occurs in half-duplex mode. */ 6655 /* Stall only occurs in half-duplex mode. */
6651 goto send_packet; 6656 goto send_packet;
6652 } 6657 }
6653 6658
6654 if (len >= WM_82547_PAD_LEN + space) { 6659 if (len >= WM_82547_PAD_LEN + space) {
6655 txq->txq_fifo_stall = 1; 6660 txq->txq_fifo_stall = 1;
6656 callout_schedule(&sc->sc_txfifo_ch, 1); 6661 callout_schedule(&sc->sc_txfifo_ch, 1);
6657 return 1; 6662 return 1;
6658 } 6663 }
6659 6664
6660 send_packet: 6665 send_packet:
6661 txq->txq_fifo_head += len; 6666 txq->txq_fifo_head += len;
6662 if (txq->txq_fifo_head >= txq->txq_fifo_size) 6667 if (txq->txq_fifo_head >= txq->txq_fifo_size)
6663 txq->txq_fifo_head -= txq->txq_fifo_size; 6668 txq->txq_fifo_head -= txq->txq_fifo_size;
6664 6669
6665 return 0; 6670 return 0;
6666} 6671}
6667 6672
6668static int 6673static int
6669wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) 6674wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6670{ 6675{
6671 int error; 6676 int error;
6672 6677
6673 /* 6678 /*
6674 * Allocate the control data structures, and create and load the 6679 * Allocate the control data structures, and create and load the
6675 * DMA map for it. 6680 * DMA map for it.
6676 * 6681 *
6677 * NOTE: All Tx descriptors must be in the same 4G segment of 6682 * NOTE: All Tx descriptors must be in the same 4G segment of
6678 * memory. So must Rx descriptors. We simplify by allocating 6683 * memory. So must Rx descriptors. We simplify by allocating
6679 * both sets within the same 4G segment. 6684 * both sets within the same 4G segment.
6680 */ 6685 */
6681 if (sc->sc_type < WM_T_82544) 6686 if (sc->sc_type < WM_T_82544)
6682 WM_NTXDESC(txq) = WM_NTXDESC_82542; 6687 WM_NTXDESC(txq) = WM_NTXDESC_82542;
6683 else 6688 else
6684 WM_NTXDESC(txq) = WM_NTXDESC_82544; 6689 WM_NTXDESC(txq) = WM_NTXDESC_82544;
6685 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 6690 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6686 txq->txq_descsize = sizeof(nq_txdesc_t); 6691 txq->txq_descsize = sizeof(nq_txdesc_t);
6687 else 6692 else
6688 txq->txq_descsize = sizeof(wiseman_txdesc_t); 6693 txq->txq_descsize = sizeof(wiseman_txdesc_t);
6689 6694
6690 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 6695 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6691 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 6696 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6692 1, &txq->txq_desc_rseg, 0)) != 0) { 6697 1, &txq->txq_desc_rseg, 0)) != 0) {
6693 aprint_error_dev(sc->sc_dev, 6698 aprint_error_dev(sc->sc_dev,
6694 "unable to allocate TX control data, error = %d\n", 6699 "unable to allocate TX control data, error = %d\n",
6695 error); 6700 error);
6696 goto fail_0; 6701 goto fail_0;
6697 } 6702 }
6698 6703
6699 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg, 6704 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6700 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq), 6705 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6701 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) { 6706 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6702 aprint_error_dev(sc->sc_dev, 6707 aprint_error_dev(sc->sc_dev,
6703 "unable to map TX control data, error = %d\n", error); 6708 "unable to map TX control data, error = %d\n", error);
6704 goto fail_1; 6709 goto fail_1;
6705 } 6710 }
6706 6711
6707 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1, 6712 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6708 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) { 6713 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6709 aprint_error_dev(sc->sc_dev, 6714 aprint_error_dev(sc->sc_dev,
6710 "unable to create TX control data DMA map, error = %d\n", 6715 "unable to create TX control data DMA map, error = %d\n",
6711 error); 6716 error);
6712 goto fail_2; 6717 goto fail_2;
6713 } 6718 }
6714 6719
6715 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap, 6720 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6716 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) { 6721 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6717 aprint_error_dev(sc->sc_dev, 6722 aprint_error_dev(sc->sc_dev,
6718 "unable to load TX control data DMA map, error = %d\n", 6723 "unable to load TX control data DMA map, error = %d\n",
6719 error); 6724 error);
6720 goto fail_3; 6725 goto fail_3;
6721 } 6726 }
6722 6727
6723 return 0; 6728 return 0;
6724 6729
6725 fail_3: 6730 fail_3:
6726 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 6731 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6727 fail_2: 6732 fail_2:
6728 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 6733 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6729 WM_TXDESCS_SIZE(txq)); 6734 WM_TXDESCS_SIZE(txq));
6730 fail_1: 6735 fail_1:
6731 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 6736 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6732 fail_0: 6737 fail_0:
6733 return error; 6738 return error;
6734} 6739}
6735 6740
6736static void 6741static void
6737wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) 6742wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6738{ 6743{
6739 6744
6740 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap); 6745 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6741 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 6746 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6742 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 6747 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6743 WM_TXDESCS_SIZE(txq)); 6748 WM_TXDESCS_SIZE(txq));
6744 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 6749 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6745} 6750}
6746 6751
6747static int 6752static int
6748wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) 6753wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6749{ 6754{
6750 int error; 6755 int error;
6751 size_t rxq_descs_size; 6756 size_t rxq_descs_size;
6752 6757
6753 /* 6758 /*
6754 * Allocate the control data structures, and create and load the 6759 * Allocate the control data structures, and create and load the
6755 * DMA map for it. 6760 * DMA map for it.
6756 * 6761 *
6757 * NOTE: All Tx descriptors must be in the same 4G segment of 6762 * NOTE: All Tx descriptors must be in the same 4G segment of
6758 * memory. So must Rx descriptors. We simplify by allocating 6763 * memory. So must Rx descriptors. We simplify by allocating
6759 * both sets within the same 4G segment. 6764 * both sets within the same 4G segment.
6760 */ 6765 */
6761 rxq->rxq_ndesc = WM_NRXDESC; 6766 rxq->rxq_ndesc = WM_NRXDESC;
6762 if (sc->sc_type == WM_T_82574) 6767 if (sc->sc_type == WM_T_82574)
6763 rxq->rxq_descsize = sizeof(ext_rxdesc_t); 6768 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6764 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 6769 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6765 rxq->rxq_descsize = sizeof(nq_rxdesc_t); 6770 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6766 else 6771 else
6767 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t); 6772 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6768 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc; 6773 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6769 6774
6770 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size, 6775 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6771 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 6776 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6772 1, &rxq->rxq_desc_rseg, 0)) != 0) { 6777 1, &rxq->rxq_desc_rseg, 0)) != 0) {
6773 aprint_error_dev(sc->sc_dev, 6778 aprint_error_dev(sc->sc_dev,
6774 "unable to allocate RX control data, error = %d\n", 6779 "unable to allocate RX control data, error = %d\n",
6775 error); 6780 error);
6776 goto fail_0; 6781 goto fail_0;
6777 } 6782 }
6778 6783
6779 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg, 6784 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6780 rxq->rxq_desc_rseg, rxq_descs_size, 6785 rxq->rxq_desc_rseg, rxq_descs_size,
6781 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) { 6786 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6782 aprint_error_dev(sc->sc_dev, 6787 aprint_error_dev(sc->sc_dev,
6783 "unable to map RX control data, error = %d\n", error); 6788 "unable to map RX control data, error = %d\n", error);
6784 goto fail_1; 6789 goto fail_1;
6785 } 6790 }
6786 6791
6787 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1, 6792 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6788 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) { 6793 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6789 aprint_error_dev(sc->sc_dev, 6794 aprint_error_dev(sc->sc_dev,
6790 "unable to create RX control data DMA map, error = %d\n", 6795 "unable to create RX control data DMA map, error = %d\n",
6791 error); 6796 error);
6792 goto fail_2; 6797 goto fail_2;
6793 } 6798 }
6794 6799
6795 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap, 6800 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6796 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) { 6801 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6797 aprint_error_dev(sc->sc_dev, 6802 aprint_error_dev(sc->sc_dev,
6798 "unable to load RX control data DMA map, error = %d\n", 6803 "unable to load RX control data DMA map, error = %d\n",
6799 error); 6804 error);
6800 goto fail_3; 6805 goto fail_3;
6801 } 6806 }
6802 6807
6803 return 0; 6808 return 0;
6804 6809
6805 fail_3: 6810 fail_3:
6806 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 6811 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6807 fail_2: 6812 fail_2:
6808 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u, 6813 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6809 rxq_descs_size); 6814 rxq_descs_size);
6810 fail_1: 6815 fail_1:
6811 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 6816 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6812 fail_0: 6817 fail_0:
6813 return error; 6818 return error;
6814} 6819}
6815 6820
6816static void 6821static void
6817wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) 6822wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6818{ 6823{
6819 6824
6820 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap); 6825 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6821 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 6826 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6822 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u, 6827 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6823 rxq->rxq_descsize * rxq->rxq_ndesc); 6828 rxq->rxq_descsize * rxq->rxq_ndesc);
6824 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 6829 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6825} 6830}
6826 6831
6827 6832
6828static int 6833static int
6829wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) 6834wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6830{ 6835{
6831 int i, error; 6836 int i, error;
6832 6837
6833 /* Create the transmit buffer DMA maps. */ 6838 /* Create the transmit buffer DMA maps. */
6834 WM_TXQUEUELEN(txq) = 6839 WM_TXQUEUELEN(txq) =
6835 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 6840 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6836 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 6841 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6837 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 6842 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6838 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 6843 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6839 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 6844 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6840 &txq->txq_soft[i].txs_dmamap)) != 0) { 6845 &txq->txq_soft[i].txs_dmamap)) != 0) {
6841 aprint_error_dev(sc->sc_dev, 6846 aprint_error_dev(sc->sc_dev,
6842 "unable to create Tx DMA map %d, error = %d\n", 6847 "unable to create Tx DMA map %d, error = %d\n",
6843 i, error); 6848 i, error);
6844 goto fail; 6849 goto fail;
6845 } 6850 }
6846 } 6851 }
6847 6852
6848 return 0; 6853 return 0;
6849 6854
6850 fail: 6855 fail:
6851 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 6856 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6852 if (txq->txq_soft[i].txs_dmamap != NULL) 6857 if (txq->txq_soft[i].txs_dmamap != NULL)
6853 bus_dmamap_destroy(sc->sc_dmat, 6858 bus_dmamap_destroy(sc->sc_dmat,
6854 txq->txq_soft[i].txs_dmamap); 6859 txq->txq_soft[i].txs_dmamap);
6855 } 6860 }
6856 return error; 6861 return error;
6857} 6862}
6858 6863
6859static void 6864static void
6860wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) 6865wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6861{ 6866{
6862 int i; 6867 int i;
6863 6868
6864 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 6869 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6865 if (txq->txq_soft[i].txs_dmamap != NULL) 6870 if (txq->txq_soft[i].txs_dmamap != NULL)
6866 bus_dmamap_destroy(sc->sc_dmat, 6871 bus_dmamap_destroy(sc->sc_dmat,
6867 txq->txq_soft[i].txs_dmamap); 6872 txq->txq_soft[i].txs_dmamap);
6868 } 6873 }
6869} 6874}
6870 6875
6871static int 6876static int
6872wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 6877wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6873{ 6878{
6874 int i, error; 6879 int i, error;
6875 6880
6876 /* Create the receive buffer DMA maps. */ 6881 /* Create the receive buffer DMA maps. */
6877 for (i = 0; i < rxq->rxq_ndesc; i++) { 6882 for (i = 0; i < rxq->rxq_ndesc; i++) {
6878 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 6883 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
6879 MCLBYTES, 0, 0, 6884 MCLBYTES, 0, 0,
6880 &rxq->rxq_soft[i].rxs_dmamap)) != 0) { 6885 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
6881 aprint_error_dev(sc->sc_dev, 6886 aprint_error_dev(sc->sc_dev,
6882 "unable to create Rx DMA map %d error = %d\n", 6887 "unable to create Rx DMA map %d error = %d\n",
6883 i, error); 6888 i, error);
6884 goto fail; 6889 goto fail;
6885 } 6890 }
6886 rxq->rxq_soft[i].rxs_mbuf = NULL; 6891 rxq->rxq_soft[i].rxs_mbuf = NULL;
6887 } 6892 }
6888 6893
6889 return 0; 6894 return 0;
6890 6895
6891 fail: 6896 fail:
6892 for (i = 0; i < rxq->rxq_ndesc; i++) { 6897 for (i = 0; i < rxq->rxq_ndesc; i++) {
6893 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 6898 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6894 bus_dmamap_destroy(sc->sc_dmat, 6899 bus_dmamap_destroy(sc->sc_dmat,
6895 rxq->rxq_soft[i].rxs_dmamap); 6900 rxq->rxq_soft[i].rxs_dmamap);
6896 } 6901 }
6897 return error; 6902 return error;
6898} 6903}
6899 6904
6900static void 6905static void
6901wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 6906wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6902{ 6907{
6903 int i; 6908 int i;
6904 6909
6905 for (i = 0; i < rxq->rxq_ndesc; i++) { 6910 for (i = 0; i < rxq->rxq_ndesc; i++) {
6906 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 6911 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6907 bus_dmamap_destroy(sc->sc_dmat, 6912 bus_dmamap_destroy(sc->sc_dmat,
6908 rxq->rxq_soft[i].rxs_dmamap); 6913 rxq->rxq_soft[i].rxs_dmamap);
6909 } 6914 }
6910} 6915}
6911 6916
6912/* 6917/*
6913 * wm_alloc_quques: 6918 * wm_alloc_quques:
6914 * Allocate {tx,rx}descs and {tx,rx} buffers 6919 * Allocate {tx,rx}descs and {tx,rx} buffers
6915 */ 6920 */
6916static int 6921static int
6917wm_alloc_txrx_queues(struct wm_softc *sc) 6922wm_alloc_txrx_queues(struct wm_softc *sc)
6918{ 6923{
6919 int i, error, tx_done, rx_done; 6924 int i, error, tx_done, rx_done;
6920 6925
6921 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues, 6926 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
6922 KM_SLEEP); 6927 KM_SLEEP);
6923 if (sc->sc_queue == NULL) { 6928 if (sc->sc_queue == NULL) {
6924 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n"); 6929 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
6925 error = ENOMEM; 6930 error = ENOMEM;
6926 goto fail_0; 6931 goto fail_0;
6927 } 6932 }
6928 6933
6929 /* For transmission */ 6934 /* For transmission */
6930 error = 0; 6935 error = 0;
6931 tx_done = 0; 6936 tx_done = 0;
6932 for (i = 0; i < sc->sc_nqueues; i++) { 6937 for (i = 0; i < sc->sc_nqueues; i++) {
6933#ifdef WM_EVENT_COUNTERS 6938#ifdef WM_EVENT_COUNTERS
6934 int j; 6939 int j;
6935 const char *xname; 6940 const char *xname;
6936#endif 6941#endif
6937 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 6942 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6938 txq->txq_sc = sc; 6943 txq->txq_sc = sc;
6939 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 6944 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6940 6945
6941 error = wm_alloc_tx_descs(sc, txq); 6946 error = wm_alloc_tx_descs(sc, txq);
6942 if (error) 6947 if (error)
6943 break; 6948 break;
6944 error = wm_alloc_tx_buffer(sc, txq); 6949 error = wm_alloc_tx_buffer(sc, txq);
6945 if (error) { 6950 if (error) {
6946 wm_free_tx_descs(sc, txq); 6951 wm_free_tx_descs(sc, txq);
6947 break; 6952 break;
6948 } 6953 }
6949 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP); 6954 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6950 if (txq->txq_interq == NULL) { 6955 if (txq->txq_interq == NULL) {
6951 wm_free_tx_descs(sc, txq); 6956 wm_free_tx_descs(sc, txq);
6952 wm_free_tx_buffer(sc, txq); 6957 wm_free_tx_buffer(sc, txq);
6953 error = ENOMEM; 6958 error = ENOMEM;
6954 break; 6959 break;
6955 } 6960 }
6956 6961
6957#ifdef WM_EVENT_COUNTERS 6962#ifdef WM_EVENT_COUNTERS
6958 xname = device_xname(sc->sc_dev); 6963 xname = device_xname(sc->sc_dev);
6959 6964
6960 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname); 6965 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6961 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname); 6966 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6962 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname); 6967 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
6963 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname); 6968 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6964 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname); 6969 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6965 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname); 6970 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
6966 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname); 6971 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
6967 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname); 6972 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
6968 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname); 6973 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
6969 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname); 6974 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
6970 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname); 6975 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
6971 6976
6972 for (j = 0; j < WM_NTXSEGS; j++) { 6977 for (j = 0; j < WM_NTXSEGS; j++) {
6973 snprintf(txq->txq_txseg_evcnt_names[j], 6978 snprintf(txq->txq_txseg_evcnt_names[j],
6974 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j); 6979 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6975 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC, 6980 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6976 NULL, xname, txq->txq_txseg_evcnt_names[j]); 6981 NULL, xname, txq->txq_txseg_evcnt_names[j]);
6977 } 6982 }
6978 6983
6979 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname); 6984 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
6980 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname); 6985 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
6981 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname); 6986 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
6982 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname); 6987 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
6983 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname); 6988 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
6984 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname); 6989 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
6985#endif /* WM_EVENT_COUNTERS */ 6990#endif /* WM_EVENT_COUNTERS */
6986 6991
6987 tx_done++; 6992 tx_done++;
6988 } 6993 }
6989 if (error) 6994 if (error)
6990 goto fail_1; 6995 goto fail_1;
6991 6996
6992 /* For receive */ 6997 /* For receive */
6993 error = 0; 6998 error = 0;
6994 rx_done = 0; 6999 rx_done = 0;
6995 for (i = 0; i < sc->sc_nqueues; i++) { 7000 for (i = 0; i < sc->sc_nqueues; i++) {
6996#ifdef WM_EVENT_COUNTERS 7001#ifdef WM_EVENT_COUNTERS
6997 const char *xname; 7002 const char *xname;
6998#endif 7003#endif
6999 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 7004 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7000 rxq->rxq_sc = sc; 7005 rxq->rxq_sc = sc;
7001 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 7006 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7002 7007
7003 error = wm_alloc_rx_descs(sc, rxq); 7008 error = wm_alloc_rx_descs(sc, rxq);
7004 if (error) 7009 if (error)
7005 break; 7010 break;
7006 7011
7007 error = wm_alloc_rx_buffer(sc, rxq); 7012 error = wm_alloc_rx_buffer(sc, rxq);
7008 if (error) { 7013 if (error) {
7009 wm_free_rx_descs(sc, rxq); 7014 wm_free_rx_descs(sc, rxq);
7010 break; 7015 break;
7011 } 7016 }
7012 7017
7013#ifdef WM_EVENT_COUNTERS 7018#ifdef WM_EVENT_COUNTERS
7014 xname = device_xname(sc->sc_dev); 7019 xname = device_xname(sc->sc_dev);
7015 7020
7016 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname); 7021 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7017 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname); 7022 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7018 7023
7019 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname); 7024 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7020 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname); 7025 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7021#endif /* WM_EVENT_COUNTERS */ 7026#endif /* WM_EVENT_COUNTERS */
7022 7027
7023 rx_done++; 7028 rx_done++;
7024 } 7029 }
7025 if (error) 7030 if (error)
7026 goto fail_2; 7031 goto fail_2;
7027 7032
7028 return 0; 7033 return 0;
7029 7034
7030 fail_2: 7035 fail_2:
7031 for (i = 0; i < rx_done; i++) { 7036 for (i = 0; i < rx_done; i++) {
7032 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 7037 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7033 wm_free_rx_buffer(sc, rxq); 7038 wm_free_rx_buffer(sc, rxq);
7034 wm_free_rx_descs(sc, rxq); 7039 wm_free_rx_descs(sc, rxq);
7035 if (rxq->rxq_lock) 7040 if (rxq->rxq_lock)
7036 mutex_obj_free(rxq->rxq_lock); 7041 mutex_obj_free(rxq->rxq_lock);
7037 } 7042 }
7038 fail_1: 7043 fail_1:
7039 for (i = 0; i < tx_done; i++) { 7044 for (i = 0; i < tx_done; i++) {
7040 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 7045 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7041 pcq_destroy(txq->txq_interq); 7046 pcq_destroy(txq->txq_interq);
7042 wm_free_tx_buffer(sc, txq); 7047 wm_free_tx_buffer(sc, txq);
7043 wm_free_tx_descs(sc, txq); 7048 wm_free_tx_descs(sc, txq);
7044 if (txq->txq_lock) 7049 if (txq->txq_lock)
7045 mutex_obj_free(txq->txq_lock); 7050 mutex_obj_free(txq->txq_lock);
7046 } 7051 }
7047 7052
7048 kmem_free(sc->sc_queue, 7053 kmem_free(sc->sc_queue,
7049 sizeof(struct wm_queue) * sc->sc_nqueues); 7054 sizeof(struct wm_queue) * sc->sc_nqueues);
7050 fail_0: 7055 fail_0:
7051 return error; 7056 return error;
7052} 7057}
7053 7058
7054/* 7059/*
7055 * wm_free_quques: 7060 * wm_free_quques:
7056 * Free {tx,rx}descs and {tx,rx} buffers 7061 * Free {tx,rx}descs and {tx,rx} buffers
7057 */ 7062 */
7058static void 7063static void
7059wm_free_txrx_queues(struct wm_softc *sc) 7064wm_free_txrx_queues(struct wm_softc *sc)
7060{ 7065{
7061 int i; 7066 int i;
7062 7067
7063 for (i = 0; i < sc->sc_nqueues; i++) { 7068 for (i = 0; i < sc->sc_nqueues; i++) {
7064 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 7069 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7065 7070
7066#ifdef WM_EVENT_COUNTERS 7071#ifdef WM_EVENT_COUNTERS
7067 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i); 7072 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7068 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i); 7073 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7069 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i); 7074 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7070 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i); 7075 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7071#endif /* WM_EVENT_COUNTERS */ 7076#endif /* WM_EVENT_COUNTERS */
7072 7077
7073 wm_free_rx_buffer(sc, rxq); 7078 wm_free_rx_buffer(sc, rxq);
7074 wm_free_rx_descs(sc, rxq); 7079 wm_free_rx_descs(sc, rxq);
7075 if (rxq->rxq_lock) 7080 if (rxq->rxq_lock)
7076 mutex_obj_free(rxq->rxq_lock); 7081 mutex_obj_free(rxq->rxq_lock);
7077 } 7082 }
7078 7083
7079 for (i = 0; i < sc->sc_nqueues; i++) { 7084 for (i = 0; i < sc->sc_nqueues; i++) {
7080 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 7085 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7081 struct mbuf *m; 7086 struct mbuf *m;
7082#ifdef WM_EVENT_COUNTERS 7087#ifdef WM_EVENT_COUNTERS
7083 int j; 7088 int j;
7084 7089
7085 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i); 7090 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7086 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i); 7091 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7087 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i); 7092 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7088 WM_Q_EVCNT_DETACH(txq, txdw, txq, i); 7093 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7089 WM_Q_EVCNT_DETACH(txq, txqe, txq, i); 7094 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7090 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i); 7095 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7091 WM_Q_EVCNT_DETACH(txq, tusum, txq, i); 7096 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7092 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i); 7097 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7093 WM_Q_EVCNT_DETACH(txq, tso, txq, i); 7098 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7094 WM_Q_EVCNT_DETACH(txq, tso6, txq, i); 7099 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7095 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i); 7100 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7096 7101
7097 for (j = 0; j < WM_NTXSEGS; j++) 7102 for (j = 0; j < WM_NTXSEGS; j++)
7098 evcnt_detach(&txq->txq_ev_txseg[j]); 7103 evcnt_detach(&txq->txq_ev_txseg[j]);
7099 7104
7100 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i); 7105 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7101 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i); 7106 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7102 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i); 7107 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7103 WM_Q_EVCNT_DETACH(txq, defrag, txq, i); 7108 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7104 WM_Q_EVCNT_DETACH(txq, underrun, txq, i); 7109 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7105 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i); 7110 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7106#endif /* WM_EVENT_COUNTERS */ 7111#endif /* WM_EVENT_COUNTERS */
7107 7112
7108 /* Drain txq_interq */ 7113 /* Drain txq_interq */
7109 while ((m = pcq_get(txq->txq_interq)) != NULL) 7114 while ((m = pcq_get(txq->txq_interq)) != NULL)
7110 m_freem(m); 7115 m_freem(m);
7111 pcq_destroy(txq->txq_interq); 7116 pcq_destroy(txq->txq_interq);
7112 7117
7113 wm_free_tx_buffer(sc, txq); 7118 wm_free_tx_buffer(sc, txq);
7114 wm_free_tx_descs(sc, txq); 7119 wm_free_tx_descs(sc, txq);
7115 if (txq->txq_lock) 7120 if (txq->txq_lock)
7116 mutex_obj_free(txq->txq_lock); 7121 mutex_obj_free(txq->txq_lock);
7117 } 7122 }
7118 7123
7119 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues); 7124 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7120} 7125}
7121 7126
7122static void 7127static void
7123wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq) 7128wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7124{ 7129{
7125 7130
7126 KASSERT(mutex_owned(txq->txq_lock)); 7131 KASSERT(mutex_owned(txq->txq_lock));
7127 7132
7128 /* Initialize the transmit descriptor ring. */ 7133 /* Initialize the transmit descriptor ring. */
7129 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq)); 7134 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7130 wm_cdtxsync(txq, 0, WM_NTXDESC(txq), 7135 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7131 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 7136 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7132 txq->txq_free = WM_NTXDESC(txq); 7137 txq->txq_free = WM_NTXDESC(txq);
7133 txq->txq_next = 0; 7138 txq->txq_next = 0;
7134} 7139}
7135 7140
7136static void 7141static void
7137wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq, 7142wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7138 struct wm_txqueue *txq) 7143 struct wm_txqueue *txq)
7139{ 7144{
7140 7145
7141 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 7146 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7142 device_xname(sc->sc_dev), __func__)); 7147 device_xname(sc->sc_dev), __func__));
7143 KASSERT(mutex_owned(txq->txq_lock)); 7148 KASSERT(mutex_owned(txq->txq_lock));
7144 7149
7145 if (sc->sc_type < WM_T_82543) { 7150 if (sc->sc_type < WM_T_82543) {
7146 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0)); 7151 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7147 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0)); 7152 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7148 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq)); 7153 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7149 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 7154 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7150 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 7155 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7151 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 7156 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7152 } else { 7157 } else {
7153 int qid = wmq->wmq_id; 7158 int qid = wmq->wmq_id;
7154 7159
7155 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0)); 7160 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7156 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0)); 7161 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7157 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq)); 7162 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7158 CSR_WRITE(sc, WMREG_TDH(qid), 0); 7163 CSR_WRITE(sc, WMREG_TDH(qid), 0);
7159 7164
7160 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7165 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7161 /* 7166 /*
7162 * Don't write TDT before TCTL.EN is set. 7167 * Don't write TDT before TCTL.EN is set.
7163 * See the document. 7168 * See the document.
7164 */ 7169 */
7165 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE 7170 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7166 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) 7171 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7167 | TXDCTL_WTHRESH(0)); 7172 | TXDCTL_WTHRESH(0));
7168 else { 7173 else {
7169 /* XXX should update with AIM? */ 7174 /* XXX should update with AIM? */
7170 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4); 7175 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7171 if (sc->sc_type >= WM_T_82540) { 7176 if (sc->sc_type >= WM_T_82540) {
7172 /* Should be the same */ 7177 /* Should be the same */
7173 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4); 7178 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7174 } 7179 }
7175 7180
7176 CSR_WRITE(sc, WMREG_TDT(qid), 0); 7181 CSR_WRITE(sc, WMREG_TDT(qid), 0);
7177 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) | 7182 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7178 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 7183 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7179 } 7184 }
7180 } 7185 }
7181} 7186}
7182 7187
7183static void 7188static void
7184wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq) 7189wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7185{ 7190{
7186 int i; 7191 int i;
7187 7192
7188 KASSERT(mutex_owned(txq->txq_lock)); 7193 KASSERT(mutex_owned(txq->txq_lock));
7189 7194
7190 /* Initialize the transmit job descriptors. */ 7195 /* Initialize the transmit job descriptors. */
7191 for (i = 0; i < WM_TXQUEUELEN(txq); i++) 7196 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7192 txq->txq_soft[i].txs_mbuf = NULL; 7197 txq->txq_soft[i].txs_mbuf = NULL;
7193 txq->txq_sfree = WM_TXQUEUELEN(txq); 7198 txq->txq_sfree = WM_TXQUEUELEN(txq);
7194 txq->txq_snext = 0; 7199 txq->txq_snext = 0;
7195 txq->txq_sdirty = 0; 7200 txq->txq_sdirty = 0;
7196} 7201}
7197 7202
7198static void 7203static void
7199wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq, 7204wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7200 struct wm_txqueue *txq) 7205 struct wm_txqueue *txq)
7201{ 7206{
7202 7207
7203 KASSERT(mutex_owned(txq->txq_lock)); 7208 KASSERT(mutex_owned(txq->txq_lock));
7204 7209
7205 /* 7210 /*
7206 * Set up some register offsets that are different between 7211 * Set up some register offsets that are different between
7207 * the i82542 and the i82543 and later chips. 7212 * the i82542 and the i82543 and later chips.
7208 */ 7213 */
7209 if (sc->sc_type < WM_T_82543) 7214 if (sc->sc_type < WM_T_82543)
7210 txq->txq_tdt_reg = WMREG_OLD_TDT; 7215 txq->txq_tdt_reg = WMREG_OLD_TDT;
7211 else 7216 else
7212 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id); 7217 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7213 7218
7214 wm_init_tx_descs(sc, txq); 7219 wm_init_tx_descs(sc, txq);
7215 wm_init_tx_regs(sc, wmq, txq); 7220 wm_init_tx_regs(sc, wmq, txq);
7216 wm_init_tx_buffer(sc, txq); 7221 wm_init_tx_buffer(sc, txq);
7217 7222
7218 txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */ 7223 txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
7219 txq->txq_sending = false; 7224 txq->txq_sending = false;
7220} 7225}
7221 7226
7222static void 7227static void
7223wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq, 7228wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7224 struct wm_rxqueue *rxq) 7229 struct wm_rxqueue *rxq)
7225{ 7230{
7226 7231
7227 KASSERT(mutex_owned(rxq->rxq_lock)); 7232 KASSERT(mutex_owned(rxq->rxq_lock));
7228 7233
7229 /* 7234 /*
7230 * Initialize the receive descriptor and receive job 7235 * Initialize the receive descriptor and receive job
7231 * descriptor rings. 7236 * descriptor rings.
7232 */ 7237 */
7233 if (sc->sc_type < WM_T_82543) { 7238 if (sc->sc_type < WM_T_82543) {
7234 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0)); 7239 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7235 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0)); 7240 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7236 CSR_WRITE(sc, WMREG_OLD_RDLEN0, 7241 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7237 rxq->rxq_descsize * rxq->rxq_ndesc); 7242 rxq->rxq_descsize * rxq->rxq_ndesc);
7238 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 7243 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7239 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 7244 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7240 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 7245 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7241 7246
7242 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 7247 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7243 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 7248 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7244 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 7249 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7245 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 7250 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7246 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 7251 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7247 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 7252 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7248 } else { 7253 } else {
7249 int qid = wmq->wmq_id; 7254 int qid = wmq->wmq_id;
7250 7255
7251 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0)); 7256 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7252 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0)); 7257 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7253 CSR_WRITE(sc, WMREG_RDLEN(qid), 7258 CSR_WRITE(sc, WMREG_RDLEN(qid),
7254 rxq->rxq_descsize * rxq->rxq_ndesc); 7259 rxq->rxq_descsize * rxq->rxq_ndesc);
7255 7260
7256 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 7261 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7257 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) 7262 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7258 panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES); 7263 panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
7259 7264
7260 /* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */ 7265 /* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
7261 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF 7266 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
7262 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); 7267 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7263 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE 7268 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7264 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) 7269 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7265 | RXDCTL_WTHRESH(1)); 7270 | RXDCTL_WTHRESH(1));
7266 CSR_WRITE(sc, WMREG_RDH(qid), 0); 7271 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7267 CSR_WRITE(sc, WMREG_RDT(qid), 0); 7272 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7268 } else { 7273 } else {
7269 CSR_WRITE(sc, WMREG_RDH(qid), 0); 7274 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7270 CSR_WRITE(sc, WMREG_RDT(qid), 0); 7275 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7271 /* XXX should update with AIM? */ 7276 /* XXX should update with AIM? */
7272 CSR_WRITE(sc, WMREG_RDTR, 7277 CSR_WRITE(sc, WMREG_RDTR,
7273 (wmq->wmq_itr / 4) | RDTR_FPD); 7278 (wmq->wmq_itr / 4) | RDTR_FPD);
7274 /* MUST be same */ 7279 /* MUST be same */
7275 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4); 7280 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7276 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) | 7281 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7277 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 7282 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7278 } 7283 }
7279 } 7284 }
7280} 7285}
7281 7286
7282static int 7287static int
7283wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 7288wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7284{ 7289{
7285 struct wm_rxsoft *rxs; 7290 struct wm_rxsoft *rxs;
7286 int error, i; 7291 int error, i;
7287 7292
7288 KASSERT(mutex_owned(rxq->rxq_lock)); 7293 KASSERT(mutex_owned(rxq->rxq_lock));
7289 7294
7290 for (i = 0; i < rxq->rxq_ndesc; i++) { 7295 for (i = 0; i < rxq->rxq_ndesc; i++) {
7291 rxs = &rxq->rxq_soft[i]; 7296 rxs = &rxq->rxq_soft[i];
7292 if (rxs->rxs_mbuf == NULL) { 7297 if (rxs->rxs_mbuf == NULL) {
7293 if ((error = wm_add_rxbuf(rxq, i)) != 0) { 7298 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7294 log(LOG_ERR, "%s: unable to allocate or map " 7299 log(LOG_ERR, "%s: unable to allocate or map "
7295 "rx buffer %d, error = %d\n", 7300 "rx buffer %d, error = %d\n",
7296 device_xname(sc->sc_dev), i, error); 7301 device_xname(sc->sc_dev), i, error);
7297 /* 7302 /*
7298 * XXX Should attempt to run with fewer receive 7303 * XXX Should attempt to run with fewer receive
7299 * XXX buffers instead of just failing. 7304 * XXX buffers instead of just failing.
7300 */ 7305 */
7301 wm_rxdrain(rxq); 7306 wm_rxdrain(rxq);
7302 return ENOMEM; 7307 return ENOMEM;
7303 } 7308 }
7304 } else { 7309 } else {
7305 /* 7310 /*
7306 * For 82575 and 82576, the RX descriptors must be 7311 * For 82575 and 82576, the RX descriptors must be
7307 * initialized after the setting of RCTL.EN in 7312 * initialized after the setting of RCTL.EN in
7308 * wm_set_filter() 7313 * wm_set_filter()
7309 */ 7314 */
7310 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 7315 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7311 wm_init_rxdesc(rxq, i); 7316 wm_init_rxdesc(rxq, i);
7312 } 7317 }
7313 } 7318 }
7314 rxq->rxq_ptr = 0; 7319 rxq->rxq_ptr = 0;
7315 rxq->rxq_discard = 0; 7320 rxq->rxq_discard = 0;
7316 WM_RXCHAIN_RESET(rxq); 7321 WM_RXCHAIN_RESET(rxq);
7317 7322
7318 return 0; 7323 return 0;
7319} 7324}
7320 7325
7321static int 7326static int
7322wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq, 7327wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7323 struct wm_rxqueue *rxq) 7328 struct wm_rxqueue *rxq)
7324{ 7329{
7325 7330
7326 KASSERT(mutex_owned(rxq->rxq_lock)); 7331 KASSERT(mutex_owned(rxq->rxq_lock));
7327 7332
7328 /* 7333 /*
7329 * Set up some register offsets that are different between 7334 * Set up some register offsets that are different between
7330 * the i82542 and the i82543 and later chips. 7335 * the i82542 and the i82543 and later chips.
7331 */ 7336 */
7332 if (sc->sc_type < WM_T_82543) 7337 if (sc->sc_type < WM_T_82543)
7333 rxq->rxq_rdt_reg = WMREG_OLD_RDT0; 7338 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7334 else 7339 else
7335 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id); 7340 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7336 7341
7337 wm_init_rx_regs(sc, wmq, rxq); 7342 wm_init_rx_regs(sc, wmq, rxq);
7338 return wm_init_rx_buffer(sc, rxq); 7343 return wm_init_rx_buffer(sc, rxq);
7339} 7344}
7340 7345
7341/* 7346/*
7342 * wm_init_quques: 7347 * wm_init_quques:
7343 * Initialize {tx,rx}descs and {tx,rx} buffers 7348 * Initialize {tx,rx}descs and {tx,rx} buffers
7344 */ 7349 */
7345static int 7350static int
7346wm_init_txrx_queues(struct wm_softc *sc) 7351wm_init_txrx_queues(struct wm_softc *sc)
7347{ 7352{
7348 int i, error = 0; 7353 int i, error = 0;
7349 7354
7350 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 7355 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7351 device_xname(sc->sc_dev), __func__)); 7356 device_xname(sc->sc_dev), __func__));
7352 7357
7353 for (i = 0; i < sc->sc_nqueues; i++) { 7358 for (i = 0; i < sc->sc_nqueues; i++) {
7354 struct wm_queue *wmq = &sc->sc_queue[i]; 7359 struct wm_queue *wmq = &sc->sc_queue[i];
7355 struct wm_txqueue *txq = &wmq->wmq_txq; 7360 struct wm_txqueue *txq = &wmq->wmq_txq;
7356 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 7361 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7357 7362
7358 /* 7363 /*
7359 * TODO 7364 * TODO
7360 * Currently, use constant variable instead of AIM. 7365 * Currently, use constant variable instead of AIM.
7361 * Furthermore, the interrupt interval of multiqueue which use 7366 * Furthermore, the interrupt interval of multiqueue which use
7362 * polling mode is less than default value. 7367 * polling mode is less than default value.
7363 * More tuning and AIM are required. 7368 * More tuning and AIM are required.
7364 */ 7369 */
7365 if (wm_is_using_multiqueue(sc)) 7370 if (wm_is_using_multiqueue(sc))
7366 wmq->wmq_itr = 50; 7371 wmq->wmq_itr = 50;
7367 else 7372 else
7368 wmq->wmq_itr = sc->sc_itr_init; 7373 wmq->wmq_itr = sc->sc_itr_init;
7369 wmq->wmq_set_itr = true; 7374 wmq->wmq_set_itr = true;
7370 7375
7371 mutex_enter(txq->txq_lock); 7376 mutex_enter(txq->txq_lock);
7372 wm_init_tx_queue(sc, wmq, txq); 7377 wm_init_tx_queue(sc, wmq, txq);
7373 mutex_exit(txq->txq_lock); 7378 mutex_exit(txq->txq_lock);
7374 7379
7375 mutex_enter(rxq->rxq_lock); 7380 mutex_enter(rxq->rxq_lock);
7376 error = wm_init_rx_queue(sc, wmq, rxq); 7381 error = wm_init_rx_queue(sc, wmq, rxq);
7377 mutex_exit(rxq->rxq_lock); 7382 mutex_exit(rxq->rxq_lock);
7378 if (error) 7383 if (error)
7379 break; 7384 break;
7380 } 7385 }
7381 7386
7382 return error; 7387 return error;
7383} 7388}
7384 7389
7385/* 7390/*
7386 * wm_tx_offload: 7391 * wm_tx_offload:
7387 * 7392 *
7388 * Set up TCP/IP checksumming parameters for the 7393 * Set up TCP/IP checksumming parameters for the
7389 * specified packet. 7394 * specified packet.
7390 */ 7395 */
7391static void 7396static void
7392wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq, 7397wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7393 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp) 7398 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7394{ 7399{
7395 struct mbuf *m0 = txs->txs_mbuf; 7400 struct mbuf *m0 = txs->txs_mbuf;
7396 struct livengood_tcpip_ctxdesc *t; 7401 struct livengood_tcpip_ctxdesc *t;
7397 uint32_t ipcs, tucs, cmd, cmdlen, seg; 7402 uint32_t ipcs, tucs, cmd, cmdlen, seg;
7398 uint32_t ipcse; 7403 uint32_t ipcse;
7399 struct ether_header *eh; 7404 struct ether_header *eh;
7400 int offset, iphl; 7405 int offset, iphl;
7401 uint8_t fields; 7406 uint8_t fields;
7402 7407
7403 /* 7408 /*
7404 * XXX It would be nice if the mbuf pkthdr had offset 7409 * XXX It would be nice if the mbuf pkthdr had offset
7405 * fields for the protocol headers. 7410 * fields for the protocol headers.
7406 */ 7411 */
7407 7412
7408 eh = mtod(m0, struct ether_header *); 7413 eh = mtod(m0, struct ether_header *);
7409 switch (htons(eh->ether_type)) { 7414 switch (htons(eh->ether_type)) {
7410 case ETHERTYPE_IP: 7415 case ETHERTYPE_IP:
7411 case ETHERTYPE_IPV6: 7416 case ETHERTYPE_IPV6:
7412 offset = ETHER_HDR_LEN; 7417 offset = ETHER_HDR_LEN;
7413 break; 7418 break;
7414 7419
7415 case ETHERTYPE_VLAN: 7420 case ETHERTYPE_VLAN:
7416 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 7421 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7417 break; 7422 break;
7418 7423
7419 default: 7424 default:
7420 /* Don't support this protocol or encapsulation. */ 7425 /* Don't support this protocol or encapsulation. */
7421 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0; 7426 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
7422 txq->txq_last_hw_ipcs = 0; 7427 txq->txq_last_hw_ipcs = 0;
7423 txq->txq_last_hw_tucs = 0; 7428 txq->txq_last_hw_tucs = 0;
7424 *fieldsp = 0; 7429 *fieldsp = 0;
7425 *cmdp = 0; 7430 *cmdp = 0;
7426 return; 7431 return;
7427 } 7432 }
7428 7433
7429 if ((m0->m_pkthdr.csum_flags & 7434 if ((m0->m_pkthdr.csum_flags &
7430 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { 7435 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7431 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 7436 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7432 } else 7437 } else
7433 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data); 7438 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7434 7439
7435 ipcse = offset + iphl - 1; 7440 ipcse = offset + iphl - 1;
7436 7441
7437 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 7442 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7438 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 7443 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7439 seg = 0; 7444 seg = 0;
7440 fields = 0; 7445 fields = 0;
7441 7446
7442 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 7447 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7443 int hlen = offset + iphl; 7448 int hlen = offset + iphl;
7444 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 7449 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7445 7450
7446 if (__predict_false(m0->m_len < 7451 if (__predict_false(m0->m_len <
7447 (hlen + sizeof(struct tcphdr)))) { 7452 (hlen + sizeof(struct tcphdr)))) {
7448 /* 7453 /*
7449 * TCP/IP headers are not in the first mbuf; we need 7454 * TCP/IP headers are not in the first mbuf; we need
7450 * to do this the slow and painful way. Let's just 7455 * to do this the slow and painful way. Let's just
7451 * hope this doesn't happen very often. 7456 * hope this doesn't happen very often.
7452 */ 7457 */
7453 struct tcphdr th; 7458 struct tcphdr th;
7454 7459
7455 WM_Q_EVCNT_INCR(txq, tsopain); 7460 WM_Q_EVCNT_INCR(txq, tsopain);
7456 7461
7457 m_copydata(m0, hlen, sizeof(th), &th); 7462 m_copydata(m0, hlen, sizeof(th), &th);
7458 if (v4) { 7463 if (v4) {
7459 struct ip ip; 7464 struct ip ip;
7460 7465
7461 m_copydata(m0, offset, sizeof(ip), &ip); 7466 m_copydata(m0, offset, sizeof(ip), &ip);
7462 ip.ip_len = 0; 7467 ip.ip_len = 0;
7463 m_copyback(m0, 7468 m_copyback(m0,
7464 offset + offsetof(struct ip, ip_len), 7469 offset + offsetof(struct ip, ip_len),
7465 sizeof(ip.ip_len), &ip.ip_len); 7470 sizeof(ip.ip_len), &ip.ip_len);
7466 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 7471 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7467 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 7472 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7468 } else { 7473 } else {
7469 struct ip6_hdr ip6; 7474 struct ip6_hdr ip6;
7470 7475
7471 m_copydata(m0, offset, sizeof(ip6), &ip6); 7476 m_copydata(m0, offset, sizeof(ip6), &ip6);
7472 ip6.ip6_plen = 0; 7477 ip6.ip6_plen = 0;
7473 m_copyback(m0, 7478 m_copyback(m0,
7474 offset + offsetof(struct ip6_hdr, ip6_plen), 7479 offset + offsetof(struct ip6_hdr, ip6_plen),
7475 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 7480 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7476 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 7481 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7477 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 7482 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7478 } 7483 }
7479 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 7484 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7480 sizeof(th.th_sum), &th.th_sum); 7485 sizeof(th.th_sum), &th.th_sum);
7481 7486
7482 hlen += th.th_off << 2; 7487 hlen += th.th_off << 2;
7483 } else { 7488 } else {
7484 /* 7489 /*
7485 * TCP/IP headers are in the first mbuf; we can do 7490 * TCP/IP headers are in the first mbuf; we can do
7486 * this the easy way. 7491 * this the easy way.
7487 */ 7492 */
7488 struct tcphdr *th; 7493 struct tcphdr *th;
7489 7494
7490 if (v4) { 7495 if (v4) {
7491 struct ip *ip = 7496 struct ip *ip =
7492 (void *)(mtod(m0, char *) + offset); 7497 (void *)(mtod(m0, char *) + offset);
7493 th = (void *)(mtod(m0, char *) + hlen); 7498 th = (void *)(mtod(m0, char *) + hlen);
7494 7499
7495 ip->ip_len = 0; 7500 ip->ip_len = 0;
7496 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 7501 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7497 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 7502 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7498 } else { 7503 } else {
7499 struct ip6_hdr *ip6 = 7504 struct ip6_hdr *ip6 =
7500 (void *)(mtod(m0, char *) + offset); 7505 (void *)(mtod(m0, char *) + offset);
7501 th = (void *)(mtod(m0, char *) + hlen); 7506 th = (void *)(mtod(m0, char *) + hlen);
7502 7507
7503 ip6->ip6_plen = 0; 7508 ip6->ip6_plen = 0;
7504 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 7509 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7505 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 7510 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7506 } 7511 }
7507 hlen += th->th_off << 2; 7512 hlen += th->th_off << 2;
7508 } 7513 }
7509 7514
7510 if (v4) { 7515 if (v4) {
7511 WM_Q_EVCNT_INCR(txq, tso); 7516 WM_Q_EVCNT_INCR(txq, tso);
7512 cmdlen |= WTX_TCPIP_CMD_IP; 7517 cmdlen |= WTX_TCPIP_CMD_IP;
7513 } else { 7518 } else {
7514 WM_Q_EVCNT_INCR(txq, tso6); 7519 WM_Q_EVCNT_INCR(txq, tso6);
7515 ipcse = 0; 7520 ipcse = 0;
7516 } 7521 }
7517 cmd |= WTX_TCPIP_CMD_TSE; 7522 cmd |= WTX_TCPIP_CMD_TSE;
7518 cmdlen |= WTX_TCPIP_CMD_TSE | 7523 cmdlen |= WTX_TCPIP_CMD_TSE |
7519 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 7524 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7520 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 7525 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7521 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 7526 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7522 } 7527 }
7523 7528
7524 /* 7529 /*
7525 * NOTE: Even if we're not using the IP or TCP/UDP checksum 7530 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7526 * offload feature, if we load the context descriptor, we 7531 * offload feature, if we load the context descriptor, we
7527 * MUST provide valid values for IPCSS and TUCSS fields. 7532 * MUST provide valid values for IPCSS and TUCSS fields.
7528 */ 7533 */
7529 7534
7530 ipcs = WTX_TCPIP_IPCSS(offset) | 7535 ipcs = WTX_TCPIP_IPCSS(offset) |
7531 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 7536 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7532 WTX_TCPIP_IPCSE(ipcse); 7537 WTX_TCPIP_IPCSE(ipcse);
7533 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) { 7538 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7534 WM_Q_EVCNT_INCR(txq, ipsum); 7539 WM_Q_EVCNT_INCR(txq, ipsum);