Tue Dec 18 18:24:09 2018 UTC ()
Pull up following revision(s) (requested by knakahara in ticket #1138):

	sys/dev/pci/if_wm.c: revision 1.606

Fix txqueue assignment. Pointed out by yamaguchi@n.o, thanks.

E.g. When ncpu is six and nqueue is four, the sequence error occurs.

XXX pullup-8


(martin)
diff -r1.508.4.27 -r1.508.4.28 src/sys/dev/pci/if_wm.c

cvs diff -r1.508.4.27 -r1.508.4.28 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2018/12/04 11:21:32 1.508.4.27
+++ src/sys/dev/pci/if_wm.c 2018/12/18 18:24:09 1.508.4.28
@@ -1,1085 +1,1085 @@ @@ -1,1085 +1,1085 @@
1/* $NetBSD: if_wm.c,v 1.508.4.27 2018/12/04 11:21:32 martin Exp $ */ 1/* $NetBSD: if_wm.c,v 1.508.4.28 2018/12/18 18:24:09 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation 40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42  42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45  45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48  48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52  52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56  56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic) 76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors 77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) 78 * - EEE (Energy Efficiency Ethernet)
79 * - Virtual Function 79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM) 80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM. 81 * - Rework how parameters are loaded from the EEPROM.
82 * - Image Unique ID 82 * - Image Unique ID
83 */ 83 */
84 84
85#include <sys/cdefs.h> 85#include <sys/cdefs.h>
86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.508.4.27 2018/12/04 11:21:32 martin Exp $"); 86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.508.4.28 2018/12/18 18:24:09 martin Exp $");
87 87
88#ifdef _KERNEL_OPT 88#ifdef _KERNEL_OPT
89#include "opt_net_mpsafe.h" 89#include "opt_net_mpsafe.h"
90#include "opt_if_wm.h" 90#include "opt_if_wm.h"
91#endif 91#endif
92 92
93#include <sys/param.h> 93#include <sys/param.h>
94#include <sys/systm.h> 94#include <sys/systm.h>
95#include <sys/callout.h> 95#include <sys/callout.h>
96#include <sys/mbuf.h> 96#include <sys/mbuf.h>
97#include <sys/malloc.h> 97#include <sys/malloc.h>
98#include <sys/kmem.h> 98#include <sys/kmem.h>
99#include <sys/kernel.h> 99#include <sys/kernel.h>
100#include <sys/socket.h> 100#include <sys/socket.h>
101#include <sys/ioctl.h> 101#include <sys/ioctl.h>
102#include <sys/errno.h> 102#include <sys/errno.h>
103#include <sys/device.h> 103#include <sys/device.h>
104#include <sys/queue.h> 104#include <sys/queue.h>
105#include <sys/syslog.h> 105#include <sys/syslog.h>
106#include <sys/interrupt.h> 106#include <sys/interrupt.h>
107#include <sys/cpu.h> 107#include <sys/cpu.h>
108#include <sys/pcq.h> 108#include <sys/pcq.h>
109 109
110#include <sys/rndsource.h> 110#include <sys/rndsource.h>
111 111
112#include <net/if.h> 112#include <net/if.h>
113#include <net/if_dl.h> 113#include <net/if_dl.h>
114#include <net/if_media.h> 114#include <net/if_media.h>
115#include <net/if_ether.h> 115#include <net/if_ether.h>
116 116
117#include <net/bpf.h> 117#include <net/bpf.h>
118 118
119#include <net/rss_config.h> 119#include <net/rss_config.h>
120 120
121#include <netinet/in.h> /* XXX for struct ip */ 121#include <netinet/in.h> /* XXX for struct ip */
122#include <netinet/in_systm.h> /* XXX for struct ip */ 122#include <netinet/in_systm.h> /* XXX for struct ip */
123#include <netinet/ip.h> /* XXX for struct ip */ 123#include <netinet/ip.h> /* XXX for struct ip */
124#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 124#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
125#include <netinet/tcp.h> /* XXX for struct tcphdr */ 125#include <netinet/tcp.h> /* XXX for struct tcphdr */
126 126
127#include <sys/bus.h> 127#include <sys/bus.h>
128#include <sys/intr.h> 128#include <sys/intr.h>
129#include <machine/endian.h> 129#include <machine/endian.h>
130 130
131#include <dev/mii/mii.h> 131#include <dev/mii/mii.h>
132#include <dev/mii/miivar.h> 132#include <dev/mii/miivar.h>
133#include <dev/mii/miidevs.h> 133#include <dev/mii/miidevs.h>
134#include <dev/mii/mii_bitbang.h> 134#include <dev/mii/mii_bitbang.h>
135#include <dev/mii/ikphyreg.h> 135#include <dev/mii/ikphyreg.h>
136#include <dev/mii/igphyreg.h> 136#include <dev/mii/igphyreg.h>
137#include <dev/mii/igphyvar.h> 137#include <dev/mii/igphyvar.h>
138#include <dev/mii/inbmphyreg.h> 138#include <dev/mii/inbmphyreg.h>
139#include <dev/mii/ihphyreg.h> 139#include <dev/mii/ihphyreg.h>
140 140
141#include <dev/pci/pcireg.h> 141#include <dev/pci/pcireg.h>
142#include <dev/pci/pcivar.h> 142#include <dev/pci/pcivar.h>
143#include <dev/pci/pcidevs.h> 143#include <dev/pci/pcidevs.h>
144 144
145#include <dev/pci/if_wmreg.h> 145#include <dev/pci/if_wmreg.h>
146#include <dev/pci/if_wmvar.h> 146#include <dev/pci/if_wmvar.h>
147 147
148#ifdef WM_DEBUG 148#ifdef WM_DEBUG
149#define WM_DEBUG_LINK __BIT(0) 149#define WM_DEBUG_LINK __BIT(0)
150#define WM_DEBUG_TX __BIT(1) 150#define WM_DEBUG_TX __BIT(1)
151#define WM_DEBUG_RX __BIT(2) 151#define WM_DEBUG_RX __BIT(2)
152#define WM_DEBUG_GMII __BIT(3) 152#define WM_DEBUG_GMII __BIT(3)
153#define WM_DEBUG_MANAGE __BIT(4) 153#define WM_DEBUG_MANAGE __BIT(4)
154#define WM_DEBUG_NVM __BIT(5) 154#define WM_DEBUG_NVM __BIT(5)
155#define WM_DEBUG_INIT __BIT(6) 155#define WM_DEBUG_INIT __BIT(6)
156#define WM_DEBUG_LOCK __BIT(7) 156#define WM_DEBUG_LOCK __BIT(7)
157int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII 157int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
158 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK; 158 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
159 159
160#define DPRINTF(x, y) if (wm_debug & (x)) printf y 160#define DPRINTF(x, y) if (wm_debug & (x)) printf y
161#else 161#else
162#define DPRINTF(x, y) /* nothing */ 162#define DPRINTF(x, y) /* nothing */
163#endif /* WM_DEBUG */ 163#endif /* WM_DEBUG */
164 164
165#ifdef NET_MPSAFE 165#ifdef NET_MPSAFE
166#define WM_MPSAFE 1 166#define WM_MPSAFE 1
167#define CALLOUT_FLAGS CALLOUT_MPSAFE 167#define CALLOUT_FLAGS CALLOUT_MPSAFE
168#else 168#else
169#define CALLOUT_FLAGS 0 169#define CALLOUT_FLAGS 0
170#endif 170#endif
171 171
172/* 172/*
173 * This device driver's max interrupt numbers. 173 * This device driver's max interrupt numbers.
174 */ 174 */
175#define WM_MAX_NQUEUEINTR 16 175#define WM_MAX_NQUEUEINTR 16
176#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) 176#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
177 177
178#ifndef WM_DISABLE_MSI 178#ifndef WM_DISABLE_MSI
179#define WM_DISABLE_MSI 0 179#define WM_DISABLE_MSI 0
180#endif 180#endif
181#ifndef WM_DISABLE_MSIX 181#ifndef WM_DISABLE_MSIX
182#define WM_DISABLE_MSIX 0 182#define WM_DISABLE_MSIX 0
183#endif 183#endif
184 184
185int wm_disable_msi = WM_DISABLE_MSI; 185int wm_disable_msi = WM_DISABLE_MSI;
186int wm_disable_msix = WM_DISABLE_MSIX; 186int wm_disable_msix = WM_DISABLE_MSIX;
187 187
188#ifndef WM_WATCHDOG_TIMEOUT 188#ifndef WM_WATCHDOG_TIMEOUT
189#define WM_WATCHDOG_TIMEOUT 5 189#define WM_WATCHDOG_TIMEOUT 5
190#endif 190#endif
191static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT; 191static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
192 192
193/* 193/*
194 * Transmit descriptor list size. Due to errata, we can only have 194 * Transmit descriptor list size. Due to errata, we can only have
195 * 256 hardware descriptors in the ring on < 82544, but we use 4096 195 * 256 hardware descriptors in the ring on < 82544, but we use 4096
196 * on >= 82544. We tell the upper layers that they can queue a lot 196 * on >= 82544. We tell the upper layers that they can queue a lot
197 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 197 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
198 * of them at a time. 198 * of them at a time.
199 * 199 *
200 * We allow up to 64 DMA segments per packet. Pathological packet 200 * We allow up to 64 DMA segments per packet. Pathological packet
201 * chains containing many small mbufs have been observed in zero-copy 201 * chains containing many small mbufs have been observed in zero-copy
202 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments, 202 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
203 * m_defrag() is called to reduce it. 203 * m_defrag() is called to reduce it.
204 */ 204 */
205#define WM_NTXSEGS 64 205#define WM_NTXSEGS 64
206#define WM_IFQUEUELEN 256 206#define WM_IFQUEUELEN 256
207#define WM_TXQUEUELEN_MAX 64 207#define WM_TXQUEUELEN_MAX 64
208#define WM_TXQUEUELEN_MAX_82547 16 208#define WM_TXQUEUELEN_MAX_82547 16
209#define WM_TXQUEUELEN(txq) ((txq)->txq_num) 209#define WM_TXQUEUELEN(txq) ((txq)->txq_num)
210#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) 210#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
211#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) 211#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
212#define WM_NTXDESC_82542 256 212#define WM_NTXDESC_82542 256
213#define WM_NTXDESC_82544 4096 213#define WM_NTXDESC_82544 4096
214#define WM_NTXDESC(txq) ((txq)->txq_ndesc) 214#define WM_NTXDESC(txq) ((txq)->txq_ndesc)
215#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) 215#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
216#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) 216#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
217#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) 217#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
218#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) 218#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
219 219
220#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ 220#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
221 221
222#define WM_TXINTERQSIZE 256 222#define WM_TXINTERQSIZE 256
223 223
224#ifndef WM_TX_PROCESS_LIMIT_DEFAULT 224#ifndef WM_TX_PROCESS_LIMIT_DEFAULT
225#define WM_TX_PROCESS_LIMIT_DEFAULT 100U 225#define WM_TX_PROCESS_LIMIT_DEFAULT 100U
226#endif 226#endif
227#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT 227#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
228#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U 228#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
229#endif 229#endif
230 230
231/* 231/*
232 * Receive descriptor list size. We have one Rx buffer for normal 232 * Receive descriptor list size. We have one Rx buffer for normal
233 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 233 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
234 * packet. We allocate 256 receive descriptors, each with a 2k 234 * packet. We allocate 256 receive descriptors, each with a 2k
235 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 235 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
236 */ 236 */
237#define WM_NRXDESC 256 237#define WM_NRXDESC 256
238#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 238#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
239#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 239#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
240#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 240#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
241 241
242#ifndef WM_RX_PROCESS_LIMIT_DEFAULT 242#ifndef WM_RX_PROCESS_LIMIT_DEFAULT
243#define WM_RX_PROCESS_LIMIT_DEFAULT 100U 243#define WM_RX_PROCESS_LIMIT_DEFAULT 100U
244#endif 244#endif
245#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT 245#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
246#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U 246#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
247#endif 247#endif
248 248
249typedef union txdescs { 249typedef union txdescs {
250 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; 250 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
251 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; 251 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
252} txdescs_t; 252} txdescs_t;
253 253
254typedef union rxdescs { 254typedef union rxdescs {
255 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; 255 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
256 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ 256 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
257 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ 257 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
258} rxdescs_t; 258} rxdescs_t;
259 259
260#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) 260#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
261#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x)) 261#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
262 262
263/* 263/*
264 * Software state for transmit jobs. 264 * Software state for transmit jobs.
265 */ 265 */
266struct wm_txsoft { 266struct wm_txsoft {
267 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 267 struct mbuf *txs_mbuf; /* head of our mbuf chain */
268 bus_dmamap_t txs_dmamap; /* our DMA map */ 268 bus_dmamap_t txs_dmamap; /* our DMA map */
269 int txs_firstdesc; /* first descriptor in packet */ 269 int txs_firstdesc; /* first descriptor in packet */
270 int txs_lastdesc; /* last descriptor in packet */ 270 int txs_lastdesc; /* last descriptor in packet */
271 int txs_ndesc; /* # of descriptors used */ 271 int txs_ndesc; /* # of descriptors used */
272}; 272};
273 273
274/* 274/*
275 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES) 275 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
276 * buffer and a DMA map. For packets which fill more than one buffer, we chain 276 * buffer and a DMA map. For packets which fill more than one buffer, we chain
277 * them together. 277 * them together.
278 */ 278 */
279struct wm_rxsoft { 279struct wm_rxsoft {
280 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 280 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
281 bus_dmamap_t rxs_dmamap; /* our DMA map */ 281 bus_dmamap_t rxs_dmamap; /* our DMA map */
282}; 282};
283 283
284#define WM_LINKUP_TIMEOUT 50 284#define WM_LINKUP_TIMEOUT 50
285 285
286static uint16_t swfwphysem[] = { 286static uint16_t swfwphysem[] = {
287 SWFW_PHY0_SM, 287 SWFW_PHY0_SM,
288 SWFW_PHY1_SM, 288 SWFW_PHY1_SM,
289 SWFW_PHY2_SM, 289 SWFW_PHY2_SM,
290 SWFW_PHY3_SM 290 SWFW_PHY3_SM
291}; 291};
292 292
293static const uint32_t wm_82580_rxpbs_table[] = { 293static const uint32_t wm_82580_rxpbs_table[] = {
294 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 294 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
295}; 295};
296 296
297struct wm_softc; 297struct wm_softc;
298 298
299#ifdef WM_EVENT_COUNTERS 299#ifdef WM_EVENT_COUNTERS
300#define WM_Q_EVCNT_DEFINE(qname, evname) \ 300#define WM_Q_EVCNT_DEFINE(qname, evname) \
301 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ 301 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
302 struct evcnt qname##_ev_##evname; 302 struct evcnt qname##_ev_##evname;
303 303
304#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ 304#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
305 do { \ 305 do { \
306 snprintf((q)->qname##_##evname##_evcnt_name, \ 306 snprintf((q)->qname##_##evname##_evcnt_name, \
307 sizeof((q)->qname##_##evname##_evcnt_name), \ 307 sizeof((q)->qname##_##evname##_evcnt_name), \
308 "%s%02d%s", #qname, (qnum), #evname); \ 308 "%s%02d%s", #qname, (qnum), #evname); \
309 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ 309 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
310 (evtype), NULL, (xname), \ 310 (evtype), NULL, (xname), \
311 (q)->qname##_##evname##_evcnt_name); \ 311 (q)->qname##_##evname##_evcnt_name); \
312 } while (0) 312 } while (0)
313 313
314#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 314#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
315 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) 315 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
316 316
317#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 317#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
318 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) 318 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
319 319
320#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \ 320#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
321 evcnt_detach(&(q)->qname##_ev_##evname); 321 evcnt_detach(&(q)->qname##_ev_##evname);
322#endif /* WM_EVENT_COUNTERS */ 322#endif /* WM_EVENT_COUNTERS */
323 323
324struct wm_txqueue { 324struct wm_txqueue {
325 kmutex_t *txq_lock; /* lock for tx operations */ 325 kmutex_t *txq_lock; /* lock for tx operations */
326 326
327 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ 327 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
328 328
329 /* Software state for the transmit descriptors. */ 329 /* Software state for the transmit descriptors. */
330 int txq_num; /* must be a power of two */ 330 int txq_num; /* must be a power of two */
331 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; 331 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
332 332
333 /* TX control data structures. */ 333 /* TX control data structures. */
334 int txq_ndesc; /* must be a power of two */ 334 int txq_ndesc; /* must be a power of two */
335 size_t txq_descsize; /* a tx descriptor size */ 335 size_t txq_descsize; /* a tx descriptor size */
336 txdescs_t *txq_descs_u; 336 txdescs_t *txq_descs_u;
337 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ 337 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
338 bus_dma_segment_t txq_desc_seg; /* control data segment */ 338 bus_dma_segment_t txq_desc_seg; /* control data segment */
339 int txq_desc_rseg; /* real number of control segment */ 339 int txq_desc_rseg; /* real number of control segment */
340#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr 340#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
341#define txq_descs txq_descs_u->sctxu_txdescs 341#define txq_descs txq_descs_u->sctxu_txdescs
342#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs 342#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
343 343
344 bus_addr_t txq_tdt_reg; /* offset of TDT register */ 344 bus_addr_t txq_tdt_reg; /* offset of TDT register */
345 345
346 int txq_free; /* number of free Tx descriptors */ 346 int txq_free; /* number of free Tx descriptors */
347 int txq_next; /* next ready Tx descriptor */ 347 int txq_next; /* next ready Tx descriptor */
348 348
349 int txq_sfree; /* number of free Tx jobs */ 349 int txq_sfree; /* number of free Tx jobs */
350 int txq_snext; /* next free Tx job */ 350 int txq_snext; /* next free Tx job */
351 int txq_sdirty; /* dirty Tx jobs */ 351 int txq_sdirty; /* dirty Tx jobs */
352 352
353 /* These 4 variables are used only on the 82547. */ 353 /* These 4 variables are used only on the 82547. */
354 int txq_fifo_size; /* Tx FIFO size */ 354 int txq_fifo_size; /* Tx FIFO size */
355 int txq_fifo_head; /* current head of FIFO */ 355 int txq_fifo_head; /* current head of FIFO */
356 uint32_t txq_fifo_addr; /* internal address of start of FIFO */ 356 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
357 int txq_fifo_stall; /* Tx FIFO is stalled */ 357 int txq_fifo_stall; /* Tx FIFO is stalled */
358 358
359 /* 359 /*
360 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 360 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
361 * CPUs. This queue intermediate them without block. 361 * CPUs. This queue intermediate them without block.
362 */ 362 */
363 pcq_t *txq_interq; 363 pcq_t *txq_interq;
364 364
365 /* 365 /*
366 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 366 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
367 * to manage Tx H/W queue's busy flag. 367 * to manage Tx H/W queue's busy flag.
368 */ 368 */
369 int txq_flags; /* flags for H/W queue, see below */ 369 int txq_flags; /* flags for H/W queue, see below */
370#define WM_TXQ_NO_SPACE 0x1 370#define WM_TXQ_NO_SPACE 0x1
371 371
372 bool txq_stopping; 372 bool txq_stopping;
373 373
374 bool txq_sending; 374 bool txq_sending;
375 time_t txq_lastsent; 375 time_t txq_lastsent;
376 376
377 uint32_t txq_packets; /* for AIM */ 377 uint32_t txq_packets; /* for AIM */
378 uint32_t txq_bytes; /* for AIM */ 378 uint32_t txq_bytes; /* for AIM */
379#ifdef WM_EVENT_COUNTERS 379#ifdef WM_EVENT_COUNTERS
380 /* TX event counters */ 380 /* TX event counters */
381 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */ 381 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */
382 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */ 382 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */
383 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */ 383 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */
384 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ 384 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
385 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ 385 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
386 /* XXX not used? */ 386 /* XXX not used? */
387 387
388 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */ 388 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */
389 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */ 389 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */
390 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */ 390 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */
391 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */ 391 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */
392 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */ 392 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */
393 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */ 393 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */
394 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */ 394 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */
395 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */ 395 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */
396 /* other than toomanyseg */ 396 /* other than toomanyseg */
397 397
398 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */ 398 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */
399 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */ 399 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */
400 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */ 400 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */
401 401
402 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; 402 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
403 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 403 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
404#endif /* WM_EVENT_COUNTERS */ 404#endif /* WM_EVENT_COUNTERS */
405}; 405};
406 406
407struct wm_rxqueue { 407struct wm_rxqueue {
408 kmutex_t *rxq_lock; /* lock for rx operations */ 408 kmutex_t *rxq_lock; /* lock for rx operations */
409 409
410 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ 410 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
411 411
412 /* Software state for the receive descriptors. */ 412 /* Software state for the receive descriptors. */
413 struct wm_rxsoft rxq_soft[WM_NRXDESC]; 413 struct wm_rxsoft rxq_soft[WM_NRXDESC];
414 414
415 /* RX control data structures. */ 415 /* RX control data structures. */
416 int rxq_ndesc; /* must be a power of two */ 416 int rxq_ndesc; /* must be a power of two */
417 size_t rxq_descsize; /* a rx descriptor size */ 417 size_t rxq_descsize; /* a rx descriptor size */
418 rxdescs_t *rxq_descs_u; 418 rxdescs_t *rxq_descs_u;
419 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ 419 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
420 bus_dma_segment_t rxq_desc_seg; /* control data segment */ 420 bus_dma_segment_t rxq_desc_seg; /* control data segment */
421 int rxq_desc_rseg; /* real number of control segment */ 421 int rxq_desc_rseg; /* real number of control segment */
422#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr 422#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
423#define rxq_descs rxq_descs_u->sctxu_rxdescs 423#define rxq_descs rxq_descs_u->sctxu_rxdescs
424#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs 424#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
425#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs 425#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
426 426
427 bus_addr_t rxq_rdt_reg; /* offset of RDT register */ 427 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
428 428
429 int rxq_ptr; /* next ready Rx desc/queue ent */ 429 int rxq_ptr; /* next ready Rx desc/queue ent */
430 int rxq_discard; 430 int rxq_discard;
431 int rxq_len; 431 int rxq_len;
432 struct mbuf *rxq_head; 432 struct mbuf *rxq_head;
433 struct mbuf *rxq_tail; 433 struct mbuf *rxq_tail;
434 struct mbuf **rxq_tailp; 434 struct mbuf **rxq_tailp;
435 435
436 bool rxq_stopping; 436 bool rxq_stopping;
437 437
438 uint32_t rxq_packets; /* for AIM */ 438 uint32_t rxq_packets; /* for AIM */
439 uint32_t rxq_bytes; /* for AIM */ 439 uint32_t rxq_bytes; /* for AIM */
440#ifdef WM_EVENT_COUNTERS 440#ifdef WM_EVENT_COUNTERS
441 /* RX event counters */ 441 /* RX event counters */
442 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */ 442 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
443 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */ 443 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
444 444
445 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */ 445 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
446 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */ 446 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
447#endif 447#endif
448}; 448};
449 449
450struct wm_queue { 450struct wm_queue {
451 int wmq_id; /* index of TX/RX queues */ 451 int wmq_id; /* index of TX/RX queues */
452 int wmq_intr_idx; /* index of MSI-X tables */ 452 int wmq_intr_idx; /* index of MSI-X tables */
453 453
454 uint32_t wmq_itr; /* interrupt interval per queue. */ 454 uint32_t wmq_itr; /* interrupt interval per queue. */
455 bool wmq_set_itr; 455 bool wmq_set_itr;
456 456
457 struct wm_txqueue wmq_txq; 457 struct wm_txqueue wmq_txq;
458 struct wm_rxqueue wmq_rxq; 458 struct wm_rxqueue wmq_rxq;
459 459
460 void *wmq_si; 460 void *wmq_si;
461}; 461};
462 462
463struct wm_phyop { 463struct wm_phyop {
464 int (*acquire)(struct wm_softc *); 464 int (*acquire)(struct wm_softc *);
465 void (*release)(struct wm_softc *); 465 void (*release)(struct wm_softc *);
466 int (*readreg_locked)(device_t, int, int, uint16_t *); 466 int (*readreg_locked)(device_t, int, int, uint16_t *);
467 int (*writereg_locked)(device_t, int, int, uint16_t); 467 int (*writereg_locked)(device_t, int, int, uint16_t);
468 int reset_delay_us; 468 int reset_delay_us;
469}; 469};
470 470
471struct wm_nvmop { 471struct wm_nvmop {
472 int (*acquire)(struct wm_softc *); 472 int (*acquire)(struct wm_softc *);
473 void (*release)(struct wm_softc *); 473 void (*release)(struct wm_softc *);
474 int (*read)(struct wm_softc *, int, int, uint16_t *); 474 int (*read)(struct wm_softc *, int, int, uint16_t *);
475}; 475};
476 476
477/* 477/*
478 * Software state per device. 478 * Software state per device.
479 */ 479 */
480struct wm_softc { 480struct wm_softc {
481 device_t sc_dev; /* generic device information */ 481 device_t sc_dev; /* generic device information */
482 bus_space_tag_t sc_st; /* bus space tag */ 482 bus_space_tag_t sc_st; /* bus space tag */
483 bus_space_handle_t sc_sh; /* bus space handle */ 483 bus_space_handle_t sc_sh; /* bus space handle */
484 bus_size_t sc_ss; /* bus space size */ 484 bus_size_t sc_ss; /* bus space size */
485 bus_space_tag_t sc_iot; /* I/O space tag */ 485 bus_space_tag_t sc_iot; /* I/O space tag */
486 bus_space_handle_t sc_ioh; /* I/O space handle */ 486 bus_space_handle_t sc_ioh; /* I/O space handle */
487 bus_size_t sc_ios; /* I/O space size */ 487 bus_size_t sc_ios; /* I/O space size */
488 bus_space_tag_t sc_flasht; /* flash registers space tag */ 488 bus_space_tag_t sc_flasht; /* flash registers space tag */
489 bus_space_handle_t sc_flashh; /* flash registers space handle */ 489 bus_space_handle_t sc_flashh; /* flash registers space handle */
490 bus_size_t sc_flashs; /* flash registers space size */ 490 bus_size_t sc_flashs; /* flash registers space size */
491 off_t sc_flashreg_offset; /* 491 off_t sc_flashreg_offset; /*
492 * offset to flash registers from 492 * offset to flash registers from
493 * start of BAR 493 * start of BAR
494 */ 494 */
495 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 495 bus_dma_tag_t sc_dmat; /* bus DMA tag */
496 496
497 struct ethercom sc_ethercom; /* ethernet common data */ 497 struct ethercom sc_ethercom; /* ethernet common data */
498 struct mii_data sc_mii; /* MII/media information */ 498 struct mii_data sc_mii; /* MII/media information */
499 499
500 pci_chipset_tag_t sc_pc; 500 pci_chipset_tag_t sc_pc;
501 pcitag_t sc_pcitag; 501 pcitag_t sc_pcitag;
502 int sc_bus_speed; /* PCI/PCIX bus speed */ 502 int sc_bus_speed; /* PCI/PCIX bus speed */
503 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ 503 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
504 504
505 uint16_t sc_pcidevid; /* PCI device ID */ 505 uint16_t sc_pcidevid; /* PCI device ID */
506 wm_chip_type sc_type; /* MAC type */ 506 wm_chip_type sc_type; /* MAC type */
507 int sc_rev; /* MAC revision */ 507 int sc_rev; /* MAC revision */
508 wm_phy_type sc_phytype; /* PHY type */ 508 wm_phy_type sc_phytype; /* PHY type */
509 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ 509 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
510#define WM_MEDIATYPE_UNKNOWN 0x00 510#define WM_MEDIATYPE_UNKNOWN 0x00
511#define WM_MEDIATYPE_FIBER 0x01 511#define WM_MEDIATYPE_FIBER 0x01
512#define WM_MEDIATYPE_COPPER 0x02 512#define WM_MEDIATYPE_COPPER 0x02
513#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ 513#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
514 int sc_funcid; /* unit number of the chip (0 to 3) */ 514 int sc_funcid; /* unit number of the chip (0 to 3) */
515 int sc_flags; /* flags; see below */ 515 int sc_flags; /* flags; see below */
516 int sc_if_flags; /* last if_flags */ 516 int sc_if_flags; /* last if_flags */
517 int sc_flowflags; /* 802.3x flow control flags */ 517 int sc_flowflags; /* 802.3x flow control flags */
518 int sc_align_tweak; 518 int sc_align_tweak;
519 519
520 void *sc_ihs[WM_MAX_NINTR]; /* 520 void *sc_ihs[WM_MAX_NINTR]; /*
521 * interrupt cookie. 521 * interrupt cookie.
522 * - legacy and msi use sc_ihs[0] only 522 * - legacy and msi use sc_ihs[0] only
523 * - msix use sc_ihs[0] to sc_ihs[nintrs-1] 523 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
524 */ 524 */
525 pci_intr_handle_t *sc_intrs; /* 525 pci_intr_handle_t *sc_intrs; /*
526 * legacy and msi use sc_intrs[0] only 526 * legacy and msi use sc_intrs[0] only
527 * msix use sc_intrs[0] to sc_ihs[nintrs-1] 527 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
528 */ 528 */
529 int sc_nintrs; /* number of interrupts */ 529 int sc_nintrs; /* number of interrupts */
530 530
531 int sc_link_intr_idx; /* index of MSI-X tables */ 531 int sc_link_intr_idx; /* index of MSI-X tables */
532 532
533 callout_t sc_tick_ch; /* tick callout */ 533 callout_t sc_tick_ch; /* tick callout */
534 bool sc_core_stopping; 534 bool sc_core_stopping;
535 535
536 int sc_nvm_ver_major; 536 int sc_nvm_ver_major;
537 int sc_nvm_ver_minor; 537 int sc_nvm_ver_minor;
538 int sc_nvm_ver_build; 538 int sc_nvm_ver_build;
539 int sc_nvm_addrbits; /* NVM address bits */ 539 int sc_nvm_addrbits; /* NVM address bits */
540 unsigned int sc_nvm_wordsize; /* NVM word size */ 540 unsigned int sc_nvm_wordsize; /* NVM word size */
541 int sc_ich8_flash_base; 541 int sc_ich8_flash_base;
542 int sc_ich8_flash_bank_size; 542 int sc_ich8_flash_bank_size;
543 int sc_nvm_k1_enabled; 543 int sc_nvm_k1_enabled;
544 544
545 int sc_nqueues; 545 int sc_nqueues;
546 struct wm_queue *sc_queue; 546 struct wm_queue *sc_queue;
547 u_int sc_tx_process_limit; /* Tx processing repeat limit in softint */ 547 u_int sc_tx_process_limit; /* Tx processing repeat limit in softint */
548 u_int sc_tx_intr_process_limit; /* Tx processing repeat limit in H/W intr */ 548 u_int sc_tx_intr_process_limit; /* Tx processing repeat limit in H/W intr */
549 u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */ 549 u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */
550 u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */ 550 u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */
551 551
552 int sc_affinity_offset; 552 int sc_affinity_offset;
553 553
554#ifdef WM_EVENT_COUNTERS 554#ifdef WM_EVENT_COUNTERS
555 /* Event counters. */ 555 /* Event counters. */
556 struct evcnt sc_ev_linkintr; /* Link interrupts */ 556 struct evcnt sc_ev_linkintr; /* Link interrupts */
557 557
558 /* WM_T_82542_2_1 only */ 558 /* WM_T_82542_2_1 only */
559 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 559 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
560 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 560 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
561 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 561 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
562 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 562 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
563 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 563 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
564#endif /* WM_EVENT_COUNTERS */ 564#endif /* WM_EVENT_COUNTERS */
565 565
566 /* This variable are used only on the 82547. */ 566 /* This variable are used only on the 82547. */
567 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 567 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
568 568
569 uint32_t sc_ctrl; /* prototype CTRL register */ 569 uint32_t sc_ctrl; /* prototype CTRL register */
570#if 0 570#if 0
571 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 571 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
572#endif 572#endif
573 uint32_t sc_icr; /* prototype interrupt bits */ 573 uint32_t sc_icr; /* prototype interrupt bits */
574 uint32_t sc_itr_init; /* prototype intr throttling reg */ 574 uint32_t sc_itr_init; /* prototype intr throttling reg */
575 uint32_t sc_tctl; /* prototype TCTL register */ 575 uint32_t sc_tctl; /* prototype TCTL register */
576 uint32_t sc_rctl; /* prototype RCTL register */ 576 uint32_t sc_rctl; /* prototype RCTL register */
577 uint32_t sc_txcw; /* prototype TXCW register */ 577 uint32_t sc_txcw; /* prototype TXCW register */
578 uint32_t sc_tipg; /* prototype TIPG register */ 578 uint32_t sc_tipg; /* prototype TIPG register */
579 uint32_t sc_fcrtl; /* prototype FCRTL register */ 579 uint32_t sc_fcrtl; /* prototype FCRTL register */
580 uint32_t sc_pba; /* prototype PBA register */ 580 uint32_t sc_pba; /* prototype PBA register */
581 581
582 int sc_tbi_linkup; /* TBI link status */ 582 int sc_tbi_linkup; /* TBI link status */
583 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ 583 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
584 int sc_tbi_serdes_ticks; /* tbi ticks */ 584 int sc_tbi_serdes_ticks; /* tbi ticks */
585 585
586 int sc_mchash_type; /* multicast filter offset */ 586 int sc_mchash_type; /* multicast filter offset */
587 587
588 krndsource_t rnd_source; /* random source */ 588 krndsource_t rnd_source; /* random source */
589 589
590 struct if_percpuq *sc_ipq; /* softint-based input queues */ 590 struct if_percpuq *sc_ipq; /* softint-based input queues */
591 591
592 kmutex_t *sc_core_lock; /* lock for softc operations */ 592 kmutex_t *sc_core_lock; /* lock for softc operations */
593 kmutex_t *sc_ich_phymtx; /* 593 kmutex_t *sc_ich_phymtx; /*
594 * 82574/82583/ICH/PCH specific PHY 594 * 82574/82583/ICH/PCH specific PHY
595 * mutex. For 82574/82583, the mutex 595 * mutex. For 82574/82583, the mutex
596 * is used for both PHY and NVM. 596 * is used for both PHY and NVM.
597 */ 597 */
598 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ 598 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
599 599
600 struct wm_phyop phy; 600 struct wm_phyop phy;
601 struct wm_nvmop nvm; 601 struct wm_nvmop nvm;
602}; 602};
603 603
604#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) 604#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
605#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) 605#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
606#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) 606#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
607 607
608#define WM_RXCHAIN_RESET(rxq) \ 608#define WM_RXCHAIN_RESET(rxq) \
609do { \ 609do { \
610 (rxq)->rxq_tailp = &(rxq)->rxq_head; \ 610 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
611 *(rxq)->rxq_tailp = NULL; \ 611 *(rxq)->rxq_tailp = NULL; \
612 (rxq)->rxq_len = 0; \ 612 (rxq)->rxq_len = 0; \
613} while (/*CONSTCOND*/0) 613} while (/*CONSTCOND*/0)
614 614
615#define WM_RXCHAIN_LINK(rxq, m) \ 615#define WM_RXCHAIN_LINK(rxq, m) \
616do { \ 616do { \
617 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ 617 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
618 (rxq)->rxq_tailp = &(m)->m_next; \ 618 (rxq)->rxq_tailp = &(m)->m_next; \
619} while (/*CONSTCOND*/0) 619} while (/*CONSTCOND*/0)
620 620
621#ifdef WM_EVENT_COUNTERS 621#ifdef WM_EVENT_COUNTERS
622#define WM_EVCNT_INCR(ev) (ev)->ev_count++ 622#define WM_EVCNT_INCR(ev) (ev)->ev_count++
623#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 623#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
624 624
625#define WM_Q_EVCNT_INCR(qname, evname) \ 625#define WM_Q_EVCNT_INCR(qname, evname) \
626 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) 626 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
627#define WM_Q_EVCNT_ADD(qname, evname, val) \ 627#define WM_Q_EVCNT_ADD(qname, evname, val) \
628 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) 628 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
629#else /* !WM_EVENT_COUNTERS */ 629#else /* !WM_EVENT_COUNTERS */
630#define WM_EVCNT_INCR(ev) /* nothing */ 630#define WM_EVCNT_INCR(ev) /* nothing */
631#define WM_EVCNT_ADD(ev, val) /* nothing */ 631#define WM_EVCNT_ADD(ev, val) /* nothing */
632 632
633#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */ 633#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
634#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */ 634#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
635#endif /* !WM_EVENT_COUNTERS */ 635#endif /* !WM_EVENT_COUNTERS */
636 636
637#define CSR_READ(sc, reg) \ 637#define CSR_READ(sc, reg) \
638 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 638 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
639#define CSR_WRITE(sc, reg, val) \ 639#define CSR_WRITE(sc, reg, val) \
640 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 640 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
641#define CSR_WRITE_FLUSH(sc) \ 641#define CSR_WRITE_FLUSH(sc) \
642 (void) CSR_READ((sc), WMREG_STATUS) 642 (void) CSR_READ((sc), WMREG_STATUS)
643 643
644#define ICH8_FLASH_READ32(sc, reg) \ 644#define ICH8_FLASH_READ32(sc, reg) \
645 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ 645 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
646 (reg) + sc->sc_flashreg_offset) 646 (reg) + sc->sc_flashreg_offset)
647#define ICH8_FLASH_WRITE32(sc, reg, data) \ 647#define ICH8_FLASH_WRITE32(sc, reg, data) \
648 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ 648 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
649 (reg) + sc->sc_flashreg_offset, (data)) 649 (reg) + sc->sc_flashreg_offset, (data))
650 650
651#define ICH8_FLASH_READ16(sc, reg) \ 651#define ICH8_FLASH_READ16(sc, reg) \
652 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ 652 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
653 (reg) + sc->sc_flashreg_offset) 653 (reg) + sc->sc_flashreg_offset)
654#define ICH8_FLASH_WRITE16(sc, reg, data) \ 654#define ICH8_FLASH_WRITE16(sc, reg, data) \
655 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ 655 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
656 (reg) + sc->sc_flashreg_offset, (data)) 656 (reg) + sc->sc_flashreg_offset, (data))
657 657
658#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) 658#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
659#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x))) 659#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
660 660
661#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) 661#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
662#define WM_CDTXADDR_HI(txq, x) \ 662#define WM_CDTXADDR_HI(txq, x) \
663 (sizeof(bus_addr_t) == 8 ? \ 663 (sizeof(bus_addr_t) == 8 ? \
664 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) 664 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
665 665
666#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) 666#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
667#define WM_CDRXADDR_HI(rxq, x) \ 667#define WM_CDRXADDR_HI(rxq, x) \
668 (sizeof(bus_addr_t) == 8 ? \ 668 (sizeof(bus_addr_t) == 8 ? \
669 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) 669 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
670 670
671/* 671/*
672 * Register read/write functions. 672 * Register read/write functions.
673 * Other than CSR_{READ|WRITE}(). 673 * Other than CSR_{READ|WRITE}().
674 */ 674 */
675#if 0 675#if 0
676static inline uint32_t wm_io_read(struct wm_softc *, int); 676static inline uint32_t wm_io_read(struct wm_softc *, int);
677#endif 677#endif
678static inline void wm_io_write(struct wm_softc *, int, uint32_t); 678static inline void wm_io_write(struct wm_softc *, int, uint32_t);
679static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 679static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
680 uint32_t, uint32_t); 680 uint32_t, uint32_t);
681static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 681static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
682 682
683/* 683/*
684 * Descriptor sync/init functions. 684 * Descriptor sync/init functions.
685 */ 685 */
686static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); 686static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
687static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); 687static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
688static inline void wm_init_rxdesc(struct wm_rxqueue *, int); 688static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
689 689
690/* 690/*
691 * Device driver interface functions and commonly used functions. 691 * Device driver interface functions and commonly used functions.
692 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 692 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
693 */ 693 */
694static const struct wm_product *wm_lookup(const struct pci_attach_args *); 694static const struct wm_product *wm_lookup(const struct pci_attach_args *);
695static int wm_match(device_t, cfdata_t, void *); 695static int wm_match(device_t, cfdata_t, void *);
696static void wm_attach(device_t, device_t, void *); 696static void wm_attach(device_t, device_t, void *);
697static int wm_detach(device_t, int); 697static int wm_detach(device_t, int);
698static bool wm_suspend(device_t, const pmf_qual_t *); 698static bool wm_suspend(device_t, const pmf_qual_t *);
699static bool wm_resume(device_t, const pmf_qual_t *); 699static bool wm_resume(device_t, const pmf_qual_t *);
700static void wm_watchdog(struct ifnet *); 700static void wm_watchdog(struct ifnet *);
701static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, 701static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
702 uint16_t *); 702 uint16_t *);
703static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, 703static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
704 uint16_t *); 704 uint16_t *);
705static void wm_tick(void *); 705static void wm_tick(void *);
706static int wm_ifflags_cb(struct ethercom *); 706static int wm_ifflags_cb(struct ethercom *);
707static int wm_ioctl(struct ifnet *, u_long, void *); 707static int wm_ioctl(struct ifnet *, u_long, void *);
708/* MAC address related */ 708/* MAC address related */
709static uint16_t wm_check_alt_mac_addr(struct wm_softc *); 709static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
710static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 710static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
711static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 711static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
712static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 712static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
713static void wm_set_filter(struct wm_softc *); 713static void wm_set_filter(struct wm_softc *);
714/* Reset and init related */ 714/* Reset and init related */
715static void wm_set_vlan(struct wm_softc *); 715static void wm_set_vlan(struct wm_softc *);
716static void wm_set_pcie_completion_timeout(struct wm_softc *); 716static void wm_set_pcie_completion_timeout(struct wm_softc *);
717static void wm_get_auto_rd_done(struct wm_softc *); 717static void wm_get_auto_rd_done(struct wm_softc *);
718static void wm_lan_init_done(struct wm_softc *); 718static void wm_lan_init_done(struct wm_softc *);
719static void wm_get_cfg_done(struct wm_softc *); 719static void wm_get_cfg_done(struct wm_softc *);
720static void wm_phy_post_reset(struct wm_softc *); 720static void wm_phy_post_reset(struct wm_softc *);
721static int wm_write_smbus_addr(struct wm_softc *); 721static int wm_write_smbus_addr(struct wm_softc *);
722static void wm_init_lcd_from_nvm(struct wm_softc *); 722static void wm_init_lcd_from_nvm(struct wm_softc *);
723static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool); 723static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
724static void wm_initialize_hardware_bits(struct wm_softc *); 724static void wm_initialize_hardware_bits(struct wm_softc *);
725static uint32_t wm_rxpbs_adjust_82580(uint32_t); 725static uint32_t wm_rxpbs_adjust_82580(uint32_t);
726static void wm_reset_phy(struct wm_softc *); 726static void wm_reset_phy(struct wm_softc *);
727static void wm_flush_desc_rings(struct wm_softc *); 727static void wm_flush_desc_rings(struct wm_softc *);
728static void wm_reset(struct wm_softc *); 728static void wm_reset(struct wm_softc *);
729static int wm_add_rxbuf(struct wm_rxqueue *, int); 729static int wm_add_rxbuf(struct wm_rxqueue *, int);
730static void wm_rxdrain(struct wm_rxqueue *); 730static void wm_rxdrain(struct wm_rxqueue *);
731static void wm_init_rss(struct wm_softc *); 731static void wm_init_rss(struct wm_softc *);
732static void wm_adjust_qnum(struct wm_softc *, int); 732static void wm_adjust_qnum(struct wm_softc *, int);
733static inline bool wm_is_using_msix(struct wm_softc *); 733static inline bool wm_is_using_msix(struct wm_softc *);
734static inline bool wm_is_using_multiqueue(struct wm_softc *); 734static inline bool wm_is_using_multiqueue(struct wm_softc *);
735static int wm_softint_establish(struct wm_softc *, int, int); 735static int wm_softint_establish(struct wm_softc *, int, int);
736static int wm_setup_legacy(struct wm_softc *); 736static int wm_setup_legacy(struct wm_softc *);
737static int wm_setup_msix(struct wm_softc *); 737static int wm_setup_msix(struct wm_softc *);
738static int wm_init(struct ifnet *); 738static int wm_init(struct ifnet *);
739static int wm_init_locked(struct ifnet *); 739static int wm_init_locked(struct ifnet *);
740static void wm_unset_stopping_flags(struct wm_softc *); 740static void wm_unset_stopping_flags(struct wm_softc *);
741static void wm_set_stopping_flags(struct wm_softc *); 741static void wm_set_stopping_flags(struct wm_softc *);
742static void wm_stop(struct ifnet *, int); 742static void wm_stop(struct ifnet *, int);
743static void wm_stop_locked(struct ifnet *, int); 743static void wm_stop_locked(struct ifnet *, int);
744static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 744static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
745static void wm_82547_txfifo_stall(void *); 745static void wm_82547_txfifo_stall(void *);
746static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 746static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
747static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *); 747static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
748/* DMA related */ 748/* DMA related */
749static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 749static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
750static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 750static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
751static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 751static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
752static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, 752static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
753 struct wm_txqueue *); 753 struct wm_txqueue *);
754static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 754static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
755static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 755static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
756static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, 756static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
757 struct wm_rxqueue *); 757 struct wm_rxqueue *);
758static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 758static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
759static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 759static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
760static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 760static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
761static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 761static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
762static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 762static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
763static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 763static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
764static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, 764static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
765 struct wm_txqueue *); 765 struct wm_txqueue *);
766static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, 766static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
767 struct wm_rxqueue *); 767 struct wm_rxqueue *);
768static int wm_alloc_txrx_queues(struct wm_softc *); 768static int wm_alloc_txrx_queues(struct wm_softc *);
769static void wm_free_txrx_queues(struct wm_softc *); 769static void wm_free_txrx_queues(struct wm_softc *);
770static int wm_init_txrx_queues(struct wm_softc *); 770static int wm_init_txrx_queues(struct wm_softc *);
771/* Start */ 771/* Start */
772static int wm_tx_offload(struct wm_softc *, struct wm_txqueue *, 772static int wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
773 struct wm_txsoft *, uint32_t *, uint8_t *); 773 struct wm_txsoft *, uint32_t *, uint8_t *);
774static inline int wm_select_txqueue(struct ifnet *, struct mbuf *); 774static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
775static void wm_start(struct ifnet *); 775static void wm_start(struct ifnet *);
776static void wm_start_locked(struct ifnet *); 776static void wm_start_locked(struct ifnet *);
777static int wm_transmit(struct ifnet *, struct mbuf *); 777static int wm_transmit(struct ifnet *, struct mbuf *);
778static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); 778static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
779static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, 779static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
780 bool); 780 bool);
781static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, 781static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
782 struct wm_txsoft *, uint32_t *, uint32_t *, bool *); 782 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
783static void wm_nq_start(struct ifnet *); 783static void wm_nq_start(struct ifnet *);
784static void wm_nq_start_locked(struct ifnet *); 784static void wm_nq_start_locked(struct ifnet *);
785static int wm_nq_transmit(struct ifnet *, struct mbuf *); 785static int wm_nq_transmit(struct ifnet *, struct mbuf *);
786static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); 786static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
787static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, 787static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
788 bool); 788 bool);
789static void wm_deferred_start_locked(struct wm_txqueue *); 789static void wm_deferred_start_locked(struct wm_txqueue *);
790static void wm_handle_queue(void *); 790static void wm_handle_queue(void *);
791/* Interrupt */ 791/* Interrupt */
792static bool wm_txeof(struct wm_txqueue *, u_int); 792static bool wm_txeof(struct wm_txqueue *, u_int);
793static bool wm_rxeof(struct wm_rxqueue *, u_int); 793static bool wm_rxeof(struct wm_rxqueue *, u_int);
794static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 794static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
795static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 795static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
796static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 796static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
797static void wm_linkintr(struct wm_softc *, uint32_t); 797static void wm_linkintr(struct wm_softc *, uint32_t);
798static int wm_intr_legacy(void *); 798static int wm_intr_legacy(void *);
799static inline void wm_txrxintr_disable(struct wm_queue *); 799static inline void wm_txrxintr_disable(struct wm_queue *);
800static inline void wm_txrxintr_enable(struct wm_queue *); 800static inline void wm_txrxintr_enable(struct wm_queue *);
801static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *); 801static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
802static int wm_txrxintr_msix(void *); 802static int wm_txrxintr_msix(void *);
803static int wm_linkintr_msix(void *); 803static int wm_linkintr_msix(void *);
804 804
805/* 805/*
806 * Media related. 806 * Media related.
807 * GMII, SGMII, TBI, SERDES and SFP. 807 * GMII, SGMII, TBI, SERDES and SFP.
808 */ 808 */
809/* Common */ 809/* Common */
810static void wm_tbi_serdes_set_linkled(struct wm_softc *); 810static void wm_tbi_serdes_set_linkled(struct wm_softc *);
811/* GMII related */ 811/* GMII related */
812static void wm_gmii_reset(struct wm_softc *); 812static void wm_gmii_reset(struct wm_softc *);
813static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t); 813static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
814static int wm_get_phy_id_82575(struct wm_softc *); 814static int wm_get_phy_id_82575(struct wm_softc *);
815static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 815static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
816static int wm_gmii_mediachange(struct ifnet *); 816static int wm_gmii_mediachange(struct ifnet *);
817static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 817static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
818static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); 818static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
819static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); 819static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
820static int wm_gmii_i82543_readreg(device_t, int, int); 820static int wm_gmii_i82543_readreg(device_t, int, int);
821static void wm_gmii_i82543_writereg(device_t, int, int, int); 821static void wm_gmii_i82543_writereg(device_t, int, int, int);
822static int wm_gmii_mdic_readreg(device_t, int, int); 822static int wm_gmii_mdic_readreg(device_t, int, int);
823static void wm_gmii_mdic_writereg(device_t, int, int, int); 823static void wm_gmii_mdic_writereg(device_t, int, int, int);
824static int wm_gmii_i82544_readreg(device_t, int, int); 824static int wm_gmii_i82544_readreg(device_t, int, int);
825static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *); 825static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
826static void wm_gmii_i82544_writereg(device_t, int, int, int); 826static void wm_gmii_i82544_writereg(device_t, int, int, int);
827static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t); 827static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
828static int wm_gmii_i80003_readreg(device_t, int, int); 828static int wm_gmii_i80003_readreg(device_t, int, int);
829static void wm_gmii_i80003_writereg(device_t, int, int, int); 829static void wm_gmii_i80003_writereg(device_t, int, int, int);
830static int wm_gmii_bm_readreg(device_t, int, int); 830static int wm_gmii_bm_readreg(device_t, int, int);
831static void wm_gmii_bm_writereg(device_t, int, int, int); 831static void wm_gmii_bm_writereg(device_t, int, int, int);
832static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); 832static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
833static int wm_gmii_hv_readreg(device_t, int, int); 833static int wm_gmii_hv_readreg(device_t, int, int);
834static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *); 834static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
835static void wm_gmii_hv_writereg(device_t, int, int, int); 835static void wm_gmii_hv_writereg(device_t, int, int, int);
836static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t); 836static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
837static int wm_gmii_82580_readreg(device_t, int, int); 837static int wm_gmii_82580_readreg(device_t, int, int);
838static void wm_gmii_82580_writereg(device_t, int, int, int); 838static void wm_gmii_82580_writereg(device_t, int, int, int);
839static int wm_gmii_gs40g_readreg(device_t, int, int); 839static int wm_gmii_gs40g_readreg(device_t, int, int);
840static void wm_gmii_gs40g_writereg(device_t, int, int, int); 840static void wm_gmii_gs40g_writereg(device_t, int, int, int);
841static void wm_gmii_statchg(struct ifnet *); 841static void wm_gmii_statchg(struct ifnet *);
842/* 842/*
843 * kumeran related (80003, ICH* and PCH*). 843 * kumeran related (80003, ICH* and PCH*).
844 * These functions are not for accessing MII registers but for accessing 844 * These functions are not for accessing MII registers but for accessing
845 * kumeran specific registers. 845 * kumeran specific registers.
846 */ 846 */
847static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *); 847static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
848static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *); 848static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
849static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t); 849static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
850static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t); 850static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
851/* SGMII */ 851/* SGMII */
852static bool wm_sgmii_uses_mdio(struct wm_softc *); 852static bool wm_sgmii_uses_mdio(struct wm_softc *);
853static int wm_sgmii_readreg(device_t, int, int); 853static int wm_sgmii_readreg(device_t, int, int);
854static void wm_sgmii_writereg(device_t, int, int, int); 854static void wm_sgmii_writereg(device_t, int, int, int);
855/* TBI related */ 855/* TBI related */
856static bool wm_tbi_havesignal(struct wm_softc *, uint32_t); 856static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
857static void wm_tbi_mediainit(struct wm_softc *); 857static void wm_tbi_mediainit(struct wm_softc *);
858static int wm_tbi_mediachange(struct ifnet *); 858static int wm_tbi_mediachange(struct ifnet *);
859static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 859static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
860static int wm_check_for_link(struct wm_softc *); 860static int wm_check_for_link(struct wm_softc *);
861static void wm_tbi_tick(struct wm_softc *); 861static void wm_tbi_tick(struct wm_softc *);
862/* SERDES related */ 862/* SERDES related */
863static void wm_serdes_power_up_link_82575(struct wm_softc *); 863static void wm_serdes_power_up_link_82575(struct wm_softc *);
864static int wm_serdes_mediachange(struct ifnet *); 864static int wm_serdes_mediachange(struct ifnet *);
865static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); 865static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
866static void wm_serdes_tick(struct wm_softc *); 866static void wm_serdes_tick(struct wm_softc *);
867/* SFP related */ 867/* SFP related */
868static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); 868static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
869static uint32_t wm_sfp_get_media_type(struct wm_softc *); 869static uint32_t wm_sfp_get_media_type(struct wm_softc *);
870 870
871/* 871/*
872 * NVM related. 872 * NVM related.
873 * Microwire, SPI (w/wo EERD) and Flash. 873 * Microwire, SPI (w/wo EERD) and Flash.
874 */ 874 */
875/* Misc functions */ 875/* Misc functions */
876static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); 876static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
877static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); 877static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
878static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); 878static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
879/* Microwire */ 879/* Microwire */
880static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); 880static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
881/* SPI */ 881/* SPI */
882static int wm_nvm_ready_spi(struct wm_softc *); 882static int wm_nvm_ready_spi(struct wm_softc *);
883static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); 883static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
884/* Using with EERD */ 884/* Using with EERD */
885static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 885static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
886static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); 886static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
887/* Flash */ 887/* Flash */
888static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, 888static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
889 unsigned int *); 889 unsigned int *);
890static int32_t wm_ich8_cycle_init(struct wm_softc *); 890static int32_t wm_ich8_cycle_init(struct wm_softc *);
891static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 891static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
892static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, 892static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
893 uint32_t *); 893 uint32_t *);
894static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 894static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
895static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 895static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
896static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); 896static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
897static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); 897static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
898static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); 898static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
899/* iNVM */ 899/* iNVM */
900static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); 900static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
901static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); 901static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
902/* Lock, detecting NVM type, validate checksum and read */ 902/* Lock, detecting NVM type, validate checksum and read */
903static int wm_nvm_is_onboard_eeprom(struct wm_softc *); 903static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
904static int wm_nvm_flash_presence_i210(struct wm_softc *); 904static int wm_nvm_flash_presence_i210(struct wm_softc *);
905static int wm_nvm_validate_checksum(struct wm_softc *); 905static int wm_nvm_validate_checksum(struct wm_softc *);
906static void wm_nvm_version_invm(struct wm_softc *); 906static void wm_nvm_version_invm(struct wm_softc *);
907static void wm_nvm_version(struct wm_softc *); 907static void wm_nvm_version(struct wm_softc *);
908static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); 908static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
909 909
910/* 910/*
911 * Hardware semaphores. 911 * Hardware semaphores.
912 * Very complexed... 912 * Very complexed...
913 */ 913 */
914static int wm_get_null(struct wm_softc *); 914static int wm_get_null(struct wm_softc *);
915static void wm_put_null(struct wm_softc *); 915static void wm_put_null(struct wm_softc *);
916static int wm_get_eecd(struct wm_softc *); 916static int wm_get_eecd(struct wm_softc *);
917static void wm_put_eecd(struct wm_softc *); 917static void wm_put_eecd(struct wm_softc *);
918static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ 918static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
919static void wm_put_swsm_semaphore(struct wm_softc *); 919static void wm_put_swsm_semaphore(struct wm_softc *);
920static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 920static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
921static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 921static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
922static int wm_get_nvm_80003(struct wm_softc *); 922static int wm_get_nvm_80003(struct wm_softc *);
923static void wm_put_nvm_80003(struct wm_softc *); 923static void wm_put_nvm_80003(struct wm_softc *);
924static int wm_get_nvm_82571(struct wm_softc *); 924static int wm_get_nvm_82571(struct wm_softc *);
925static void wm_put_nvm_82571(struct wm_softc *); 925static void wm_put_nvm_82571(struct wm_softc *);
926static int wm_get_phy_82575(struct wm_softc *); 926static int wm_get_phy_82575(struct wm_softc *);
927static void wm_put_phy_82575(struct wm_softc *); 927static void wm_put_phy_82575(struct wm_softc *);
928static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ 928static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
929static void wm_put_swfwhw_semaphore(struct wm_softc *); 929static void wm_put_swfwhw_semaphore(struct wm_softc *);
930static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ 930static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
931static void wm_put_swflag_ich8lan(struct wm_softc *); 931static void wm_put_swflag_ich8lan(struct wm_softc *);
932static int wm_get_nvm_ich8lan(struct wm_softc *); 932static int wm_get_nvm_ich8lan(struct wm_softc *);
933static void wm_put_nvm_ich8lan(struct wm_softc *); 933static void wm_put_nvm_ich8lan(struct wm_softc *);
934static int wm_get_hw_semaphore_82573(struct wm_softc *); 934static int wm_get_hw_semaphore_82573(struct wm_softc *);
935static void wm_put_hw_semaphore_82573(struct wm_softc *); 935static void wm_put_hw_semaphore_82573(struct wm_softc *);
936 936
937/* 937/*
938 * Management mode and power management related subroutines. 938 * Management mode and power management related subroutines.
939 * BMC, AMT, suspend/resume and EEE. 939 * BMC, AMT, suspend/resume and EEE.
940 */ 940 */
941#if 0 941#if 0
942static int wm_check_mng_mode(struct wm_softc *); 942static int wm_check_mng_mode(struct wm_softc *);
943static int wm_check_mng_mode_ich8lan(struct wm_softc *); 943static int wm_check_mng_mode_ich8lan(struct wm_softc *);
944static int wm_check_mng_mode_82574(struct wm_softc *); 944static int wm_check_mng_mode_82574(struct wm_softc *);
945static int wm_check_mng_mode_generic(struct wm_softc *); 945static int wm_check_mng_mode_generic(struct wm_softc *);
946#endif 946#endif
947static int wm_enable_mng_pass_thru(struct wm_softc *); 947static int wm_enable_mng_pass_thru(struct wm_softc *);
948static bool wm_phy_resetisblocked(struct wm_softc *); 948static bool wm_phy_resetisblocked(struct wm_softc *);
949static void wm_get_hw_control(struct wm_softc *); 949static void wm_get_hw_control(struct wm_softc *);
950static void wm_release_hw_control(struct wm_softc *); 950static void wm_release_hw_control(struct wm_softc *);
951static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); 951static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
952static void wm_smbustopci(struct wm_softc *); 952static void wm_smbustopci(struct wm_softc *);
953static void wm_init_manageability(struct wm_softc *); 953static void wm_init_manageability(struct wm_softc *);
954static void wm_release_manageability(struct wm_softc *); 954static void wm_release_manageability(struct wm_softc *);
955static void wm_get_wakeup(struct wm_softc *); 955static void wm_get_wakeup(struct wm_softc *);
956static int wm_ulp_disable(struct wm_softc *); 956static int wm_ulp_disable(struct wm_softc *);
957static void wm_enable_phy_wakeup(struct wm_softc *); 957static void wm_enable_phy_wakeup(struct wm_softc *);
958static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 958static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
959static void wm_suspend_workarounds_ich8lan(struct wm_softc *); 959static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
960static void wm_enable_wakeup(struct wm_softc *); 960static void wm_enable_wakeup(struct wm_softc *);
961static void wm_disable_aspm(struct wm_softc *); 961static void wm_disable_aspm(struct wm_softc *);
962/* LPLU (Low Power Link Up) */ 962/* LPLU (Low Power Link Up) */
963static void wm_lplu_d0_disable(struct wm_softc *); 963static void wm_lplu_d0_disable(struct wm_softc *);
964/* EEE */ 964/* EEE */
965static void wm_set_eee_i350(struct wm_softc *); 965static void wm_set_eee_i350(struct wm_softc *);
966 966
967/* 967/*
968 * Workarounds (mainly PHY related). 968 * Workarounds (mainly PHY related).
969 * Basically, PHY's workarounds are in the PHY drivers. 969 * Basically, PHY's workarounds are in the PHY drivers.
970 */ 970 */
971static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 971static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
972static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 972static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
973static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); 973static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
974static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); 974static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
975static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool); 975static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
976static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 976static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
977static int wm_k1_workaround_lv(struct wm_softc *); 977static int wm_k1_workaround_lv(struct wm_softc *);
978static int wm_link_stall_workaround_hv(struct wm_softc *); 978static int wm_link_stall_workaround_hv(struct wm_softc *);
979static void wm_set_mdio_slow_mode_hv(struct wm_softc *); 979static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
980static void wm_configure_k1_ich8lan(struct wm_softc *, int); 980static void wm_configure_k1_ich8lan(struct wm_softc *, int);
981static void wm_reset_init_script_82575(struct wm_softc *); 981static void wm_reset_init_script_82575(struct wm_softc *);
982static void wm_reset_mdicnfg_82580(struct wm_softc *); 982static void wm_reset_mdicnfg_82580(struct wm_softc *);
983static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 983static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
984static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 984static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
985static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 985static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
986static void wm_pll_workaround_i210(struct wm_softc *); 986static void wm_pll_workaround_i210(struct wm_softc *);
987static void wm_legacy_irq_quirk_spt(struct wm_softc *); 987static void wm_legacy_irq_quirk_spt(struct wm_softc *);
988 988
989CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 989CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
990 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 990 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
991 991
992/* 992/*
993 * Devices supported by this driver. 993 * Devices supported by this driver.
994 */ 994 */
995static const struct wm_product { 995static const struct wm_product {
996 pci_vendor_id_t wmp_vendor; 996 pci_vendor_id_t wmp_vendor;
997 pci_product_id_t wmp_product; 997 pci_product_id_t wmp_product;
998 const char *wmp_name; 998 const char *wmp_name;
999 wm_chip_type wmp_type; 999 wm_chip_type wmp_type;
1000 uint32_t wmp_flags; 1000 uint32_t wmp_flags;
1001#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN 1001#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1002#define WMP_F_FIBER WM_MEDIATYPE_FIBER 1002#define WMP_F_FIBER WM_MEDIATYPE_FIBER
1003#define WMP_F_COPPER WM_MEDIATYPE_COPPER 1003#define WMP_F_COPPER WM_MEDIATYPE_COPPER
1004#define WMP_F_SERDES WM_MEDIATYPE_SERDES 1004#define WMP_F_SERDES WM_MEDIATYPE_SERDES
1005#define WMP_MEDIATYPE(x) ((x) & 0x03) 1005#define WMP_MEDIATYPE(x) ((x) & 0x03)
1006} wm_products[] = { 1006} wm_products[] = {
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1008 "Intel i82542 1000BASE-X Ethernet", 1008 "Intel i82542 1000BASE-X Ethernet",
1009 WM_T_82542_2_1, WMP_F_FIBER }, 1009 WM_T_82542_2_1, WMP_F_FIBER },
1010 1010
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1012 "Intel i82543GC 1000BASE-X Ethernet", 1012 "Intel i82543GC 1000BASE-X Ethernet",
1013 WM_T_82543, WMP_F_FIBER }, 1013 WM_T_82543, WMP_F_FIBER },
1014 1014
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1016 "Intel i82543GC 1000BASE-T Ethernet", 1016 "Intel i82543GC 1000BASE-T Ethernet",
1017 WM_T_82543, WMP_F_COPPER }, 1017 WM_T_82543, WMP_F_COPPER },
1018 1018
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1020 "Intel i82544EI 1000BASE-T Ethernet", 1020 "Intel i82544EI 1000BASE-T Ethernet",
1021 WM_T_82544, WMP_F_COPPER }, 1021 WM_T_82544, WMP_F_COPPER },
1022 1022
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1024 "Intel i82544EI 1000BASE-X Ethernet", 1024 "Intel i82544EI 1000BASE-X Ethernet",
1025 WM_T_82544, WMP_F_FIBER }, 1025 WM_T_82544, WMP_F_FIBER },
1026 1026
1027 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 1027 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1028 "Intel i82544GC 1000BASE-T Ethernet", 1028 "Intel i82544GC 1000BASE-T Ethernet",
1029 WM_T_82544, WMP_F_COPPER }, 1029 WM_T_82544, WMP_F_COPPER },
1030 1030
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1032 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 1032 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1033 WM_T_82544, WMP_F_COPPER }, 1033 WM_T_82544, WMP_F_COPPER },
1034 1034
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1036 "Intel i82540EM 1000BASE-T Ethernet", 1036 "Intel i82540EM 1000BASE-T Ethernet",
1037 WM_T_82540, WMP_F_COPPER }, 1037 WM_T_82540, WMP_F_COPPER },
1038 1038
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1040 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 1040 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1041 WM_T_82540, WMP_F_COPPER }, 1041 WM_T_82540, WMP_F_COPPER },
1042 1042
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1044 "Intel i82540EP 1000BASE-T Ethernet", 1044 "Intel i82540EP 1000BASE-T Ethernet",
1045 WM_T_82540, WMP_F_COPPER }, 1045 WM_T_82540, WMP_F_COPPER },
1046 1046
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1048 "Intel i82540EP 1000BASE-T Ethernet", 1048 "Intel i82540EP 1000BASE-T Ethernet",
1049 WM_T_82540, WMP_F_COPPER }, 1049 WM_T_82540, WMP_F_COPPER },
1050 1050
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1052 "Intel i82540EP 1000BASE-T Ethernet", 1052 "Intel i82540EP 1000BASE-T Ethernet",
1053 WM_T_82540, WMP_F_COPPER }, 1053 WM_T_82540, WMP_F_COPPER },
1054 1054
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1056 "Intel i82545EM 1000BASE-T Ethernet", 1056 "Intel i82545EM 1000BASE-T Ethernet",
1057 WM_T_82545, WMP_F_COPPER }, 1057 WM_T_82545, WMP_F_COPPER },
1058 1058
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1060 "Intel i82545GM 1000BASE-T Ethernet", 1060 "Intel i82545GM 1000BASE-T Ethernet",
1061 WM_T_82545_3, WMP_F_COPPER }, 1061 WM_T_82545_3, WMP_F_COPPER },
1062 1062
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1064 "Intel i82545GM 1000BASE-X Ethernet", 1064 "Intel i82545GM 1000BASE-X Ethernet",
1065 WM_T_82545_3, WMP_F_FIBER }, 1065 WM_T_82545_3, WMP_F_FIBER },
1066 1066
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1068 "Intel i82545GM Gigabit Ethernet (SERDES)", 1068 "Intel i82545GM Gigabit Ethernet (SERDES)",
1069 WM_T_82545_3, WMP_F_SERDES }, 1069 WM_T_82545_3, WMP_F_SERDES },
1070 1070
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1072 "Intel i82546EB 1000BASE-T Ethernet", 1072 "Intel i82546EB 1000BASE-T Ethernet",
1073 WM_T_82546, WMP_F_COPPER }, 1073 WM_T_82546, WMP_F_COPPER },
1074 1074
1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1076 "Intel i82546EB 1000BASE-T Ethernet", 1076 "Intel i82546EB 1000BASE-T Ethernet",
1077 WM_T_82546, WMP_F_COPPER }, 1077 WM_T_82546, WMP_F_COPPER },
1078 1078
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1080 "Intel i82545EM 1000BASE-X Ethernet", 1080 "Intel i82545EM 1000BASE-X Ethernet",
1081 WM_T_82545, WMP_F_FIBER }, 1081 WM_T_82545, WMP_F_FIBER },
1082 1082
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1084 "Intel i82546EB 1000BASE-X Ethernet", 1084 "Intel i82546EB 1000BASE-X Ethernet",
1085 WM_T_82546, WMP_F_FIBER }, 1085 WM_T_82546, WMP_F_FIBER },
@@ -6215,1999 +6215,1999 @@ wm_82547_txfifo_stall(void *arg) @@ -6215,1999 +6215,1999 @@ wm_82547_txfifo_stall(void *arg)
6215 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr); 6215 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6216 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr); 6216 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6217 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr); 6217 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6218 CSR_WRITE(sc, WMREG_TCTL, tctl); 6218 CSR_WRITE(sc, WMREG_TCTL, tctl);
6219 CSR_WRITE_FLUSH(sc); 6219 CSR_WRITE_FLUSH(sc);
6220 6220
6221 txq->txq_fifo_head = 0; 6221 txq->txq_fifo_head = 0;
6222 txq->txq_fifo_stall = 0; 6222 txq->txq_fifo_stall = 0;
6223 wm_start_locked(&sc->sc_ethercom.ec_if); 6223 wm_start_locked(&sc->sc_ethercom.ec_if);
6224 } else { 6224 } else {
6225 /* 6225 /*
6226 * Still waiting for packets to drain; try again in 6226 * Still waiting for packets to drain; try again in
6227 * another tick. 6227 * another tick.
6228 */ 6228 */
6229 callout_schedule(&sc->sc_txfifo_ch, 1); 6229 callout_schedule(&sc->sc_txfifo_ch, 1);
6230 } 6230 }
6231 } 6231 }
6232 6232
6233out: 6233out:
6234 mutex_exit(txq->txq_lock); 6234 mutex_exit(txq->txq_lock);
6235} 6235}
6236 6236
6237/* 6237/*
6238 * wm_82547_txfifo_bugchk: 6238 * wm_82547_txfifo_bugchk:
6239 * 6239 *
6240 * Check for bug condition in the 82547 Tx FIFO. We need to 6240 * Check for bug condition in the 82547 Tx FIFO. We need to
6241 * prevent enqueueing a packet that would wrap around the end 6241 * prevent enqueueing a packet that would wrap around the end
6242 * if the Tx FIFO ring buffer, otherwise the chip will croak. 6242 * if the Tx FIFO ring buffer, otherwise the chip will croak.
6243 * 6243 *
6244 * We do this by checking the amount of space before the end 6244 * We do this by checking the amount of space before the end
6245 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 6245 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
6246 * the Tx FIFO, wait for all remaining packets to drain, reset 6246 * the Tx FIFO, wait for all remaining packets to drain, reset
6247 * the internal FIFO pointers to the beginning, and restart 6247 * the internal FIFO pointers to the beginning, and restart
6248 * transmission on the interface. 6248 * transmission on the interface.
6249 */ 6249 */
6250#define WM_FIFO_HDR 0x10 6250#define WM_FIFO_HDR 0x10
6251#define WM_82547_PAD_LEN 0x3e0 6251#define WM_82547_PAD_LEN 0x3e0
6252static int 6252static int
6253wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 6253wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6254{ 6254{
6255 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 6255 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6256 int space = txq->txq_fifo_size - txq->txq_fifo_head; 6256 int space = txq->txq_fifo_size - txq->txq_fifo_head;
6257 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 6257 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6258 6258
6259 /* Just return if already stalled. */ 6259 /* Just return if already stalled. */
6260 if (txq->txq_fifo_stall) 6260 if (txq->txq_fifo_stall)
6261 return 1; 6261 return 1;
6262 6262
6263 if (sc->sc_mii.mii_media_active & IFM_FDX) { 6263 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6264 /* Stall only occurs in half-duplex mode. */ 6264 /* Stall only occurs in half-duplex mode. */
6265 goto send_packet; 6265 goto send_packet;
6266 } 6266 }
6267 6267
6268 if (len >= WM_82547_PAD_LEN + space) { 6268 if (len >= WM_82547_PAD_LEN + space) {
6269 txq->txq_fifo_stall = 1; 6269 txq->txq_fifo_stall = 1;
6270 callout_schedule(&sc->sc_txfifo_ch, 1); 6270 callout_schedule(&sc->sc_txfifo_ch, 1);
6271 return 1; 6271 return 1;
6272 } 6272 }
6273 6273
6274 send_packet: 6274 send_packet:
6275 txq->txq_fifo_head += len; 6275 txq->txq_fifo_head += len;
6276 if (txq->txq_fifo_head >= txq->txq_fifo_size) 6276 if (txq->txq_fifo_head >= txq->txq_fifo_size)
6277 txq->txq_fifo_head -= txq->txq_fifo_size; 6277 txq->txq_fifo_head -= txq->txq_fifo_size;
6278 6278
6279 return 0; 6279 return 0;
6280} 6280}
6281 6281
6282static int 6282static int
6283wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) 6283wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6284{ 6284{
6285 int error; 6285 int error;
6286 6286
6287 /* 6287 /*
6288 * Allocate the control data structures, and create and load the 6288 * Allocate the control data structures, and create and load the
6289 * DMA map for it. 6289 * DMA map for it.
6290 * 6290 *
6291 * NOTE: All Tx descriptors must be in the same 4G segment of 6291 * NOTE: All Tx descriptors must be in the same 4G segment of
6292 * memory. So must Rx descriptors. We simplify by allocating 6292 * memory. So must Rx descriptors. We simplify by allocating
6293 * both sets within the same 4G segment. 6293 * both sets within the same 4G segment.
6294 */ 6294 */
6295 if (sc->sc_type < WM_T_82544) 6295 if (sc->sc_type < WM_T_82544)
6296 WM_NTXDESC(txq) = WM_NTXDESC_82542; 6296 WM_NTXDESC(txq) = WM_NTXDESC_82542;
6297 else 6297 else
6298 WM_NTXDESC(txq) = WM_NTXDESC_82544; 6298 WM_NTXDESC(txq) = WM_NTXDESC_82544;
6299 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 6299 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6300 txq->txq_descsize = sizeof(nq_txdesc_t); 6300 txq->txq_descsize = sizeof(nq_txdesc_t);
6301 else 6301 else
6302 txq->txq_descsize = sizeof(wiseman_txdesc_t); 6302 txq->txq_descsize = sizeof(wiseman_txdesc_t);
6303 6303
6304 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 6304 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6305 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 6305 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6306 1, &txq->txq_desc_rseg, 0)) != 0) { 6306 1, &txq->txq_desc_rseg, 0)) != 0) {
6307 aprint_error_dev(sc->sc_dev, 6307 aprint_error_dev(sc->sc_dev,
6308 "unable to allocate TX control data, error = %d\n", 6308 "unable to allocate TX control data, error = %d\n",
6309 error); 6309 error);
6310 goto fail_0; 6310 goto fail_0;
6311 } 6311 }
6312 6312
6313 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg, 6313 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6314 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq), 6314 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6315 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) { 6315 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6316 aprint_error_dev(sc->sc_dev, 6316 aprint_error_dev(sc->sc_dev,
6317 "unable to map TX control data, error = %d\n", error); 6317 "unable to map TX control data, error = %d\n", error);
6318 goto fail_1; 6318 goto fail_1;
6319 } 6319 }
6320 6320
6321 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1, 6321 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6322 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) { 6322 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6323 aprint_error_dev(sc->sc_dev, 6323 aprint_error_dev(sc->sc_dev,
6324 "unable to create TX control data DMA map, error = %d\n", 6324 "unable to create TX control data DMA map, error = %d\n",
6325 error); 6325 error);
6326 goto fail_2; 6326 goto fail_2;
6327 } 6327 }
6328 6328
6329 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap, 6329 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6330 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) { 6330 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6331 aprint_error_dev(sc->sc_dev, 6331 aprint_error_dev(sc->sc_dev,
6332 "unable to load TX control data DMA map, error = %d\n", 6332 "unable to load TX control data DMA map, error = %d\n",
6333 error); 6333 error);
6334 goto fail_3; 6334 goto fail_3;
6335 } 6335 }
6336 6336
6337 return 0; 6337 return 0;
6338 6338
6339 fail_3: 6339 fail_3:
6340 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 6340 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6341 fail_2: 6341 fail_2:
6342 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 6342 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6343 WM_TXDESCS_SIZE(txq)); 6343 WM_TXDESCS_SIZE(txq));
6344 fail_1: 6344 fail_1:
6345 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 6345 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6346 fail_0: 6346 fail_0:
6347 return error; 6347 return error;
6348} 6348}
6349 6349
6350static void 6350static void
6351wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) 6351wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6352{ 6352{
6353 6353
6354 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap); 6354 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6355 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 6355 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6356 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 6356 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6357 WM_TXDESCS_SIZE(txq)); 6357 WM_TXDESCS_SIZE(txq));
6358 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 6358 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6359} 6359}
6360 6360
6361static int 6361static int
6362wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) 6362wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6363{ 6363{
6364 int error; 6364 int error;
6365 size_t rxq_descs_size; 6365 size_t rxq_descs_size;
6366 6366
6367 /* 6367 /*
6368 * Allocate the control data structures, and create and load the 6368 * Allocate the control data structures, and create and load the
6369 * DMA map for it. 6369 * DMA map for it.
6370 * 6370 *
6371 * NOTE: All Tx descriptors must be in the same 4G segment of 6371 * NOTE: All Tx descriptors must be in the same 4G segment of
6372 * memory. So must Rx descriptors. We simplify by allocating 6372 * memory. So must Rx descriptors. We simplify by allocating
6373 * both sets within the same 4G segment. 6373 * both sets within the same 4G segment.
6374 */ 6374 */
6375 rxq->rxq_ndesc = WM_NRXDESC; 6375 rxq->rxq_ndesc = WM_NRXDESC;
6376 if (sc->sc_type == WM_T_82574) 6376 if (sc->sc_type == WM_T_82574)
6377 rxq->rxq_descsize = sizeof(ext_rxdesc_t); 6377 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6378 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 6378 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6379 rxq->rxq_descsize = sizeof(nq_rxdesc_t); 6379 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6380 else 6380 else
6381 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t); 6381 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6382 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc; 6382 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6383 6383
6384 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size, 6384 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6385 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 6385 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6386 1, &rxq->rxq_desc_rseg, 0)) != 0) { 6386 1, &rxq->rxq_desc_rseg, 0)) != 0) {
6387 aprint_error_dev(sc->sc_dev, 6387 aprint_error_dev(sc->sc_dev,
6388 "unable to allocate RX control data, error = %d\n", 6388 "unable to allocate RX control data, error = %d\n",
6389 error); 6389 error);
6390 goto fail_0; 6390 goto fail_0;
6391 } 6391 }
6392 6392
6393 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg, 6393 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6394 rxq->rxq_desc_rseg, rxq_descs_size, 6394 rxq->rxq_desc_rseg, rxq_descs_size,
6395 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) { 6395 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6396 aprint_error_dev(sc->sc_dev, 6396 aprint_error_dev(sc->sc_dev,
6397 "unable to map RX control data, error = %d\n", error); 6397 "unable to map RX control data, error = %d\n", error);
6398 goto fail_1; 6398 goto fail_1;
6399 } 6399 }
6400 6400
6401 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1, 6401 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6402 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) { 6402 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6403 aprint_error_dev(sc->sc_dev, 6403 aprint_error_dev(sc->sc_dev,
6404 "unable to create RX control data DMA map, error = %d\n", 6404 "unable to create RX control data DMA map, error = %d\n",
6405 error); 6405 error);
6406 goto fail_2; 6406 goto fail_2;
6407 } 6407 }
6408 6408
6409 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap, 6409 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6410 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) { 6410 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6411 aprint_error_dev(sc->sc_dev, 6411 aprint_error_dev(sc->sc_dev,
6412 "unable to load RX control data DMA map, error = %d\n", 6412 "unable to load RX control data DMA map, error = %d\n",
6413 error); 6413 error);
6414 goto fail_3; 6414 goto fail_3;
6415 } 6415 }
6416 6416
6417 return 0; 6417 return 0;
6418 6418
6419 fail_3: 6419 fail_3:
6420 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 6420 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6421 fail_2: 6421 fail_2:
6422 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u, 6422 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6423 rxq_descs_size); 6423 rxq_descs_size);
6424 fail_1: 6424 fail_1:
6425 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 6425 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6426 fail_0: 6426 fail_0:
6427 return error; 6427 return error;
6428} 6428}
6429 6429
6430static void 6430static void
6431wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) 6431wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6432{ 6432{
6433 6433
6434 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap); 6434 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6435 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 6435 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6436 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u, 6436 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6437 rxq->rxq_descsize * rxq->rxq_ndesc); 6437 rxq->rxq_descsize * rxq->rxq_ndesc);
6438 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 6438 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6439} 6439}
6440 6440
6441 6441
6442static int 6442static int
6443wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) 6443wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6444{ 6444{
6445 int i, error; 6445 int i, error;
6446 6446
6447 /* Create the transmit buffer DMA maps. */ 6447 /* Create the transmit buffer DMA maps. */
6448 WM_TXQUEUELEN(txq) = 6448 WM_TXQUEUELEN(txq) =
6449 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 6449 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6450 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 6450 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6451 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 6451 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6452 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 6452 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6453 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 6453 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6454 &txq->txq_soft[i].txs_dmamap)) != 0) { 6454 &txq->txq_soft[i].txs_dmamap)) != 0) {
6455 aprint_error_dev(sc->sc_dev, 6455 aprint_error_dev(sc->sc_dev,
6456 "unable to create Tx DMA map %d, error = %d\n", 6456 "unable to create Tx DMA map %d, error = %d\n",
6457 i, error); 6457 i, error);
6458 goto fail; 6458 goto fail;
6459 } 6459 }
6460 } 6460 }
6461 6461
6462 return 0; 6462 return 0;
6463 6463
6464 fail: 6464 fail:
6465 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 6465 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6466 if (txq->txq_soft[i].txs_dmamap != NULL) 6466 if (txq->txq_soft[i].txs_dmamap != NULL)
6467 bus_dmamap_destroy(sc->sc_dmat, 6467 bus_dmamap_destroy(sc->sc_dmat,
6468 txq->txq_soft[i].txs_dmamap); 6468 txq->txq_soft[i].txs_dmamap);
6469 } 6469 }
6470 return error; 6470 return error;
6471} 6471}
6472 6472
6473static void 6473static void
6474wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) 6474wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6475{ 6475{
6476 int i; 6476 int i;
6477 6477
6478 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 6478 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6479 if (txq->txq_soft[i].txs_dmamap != NULL) 6479 if (txq->txq_soft[i].txs_dmamap != NULL)
6480 bus_dmamap_destroy(sc->sc_dmat, 6480 bus_dmamap_destroy(sc->sc_dmat,
6481 txq->txq_soft[i].txs_dmamap); 6481 txq->txq_soft[i].txs_dmamap);
6482 } 6482 }
6483} 6483}
6484 6484
6485static int 6485static int
6486wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 6486wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6487{ 6487{
6488 int i, error; 6488 int i, error;
6489 6489
6490 /* Create the receive buffer DMA maps. */ 6490 /* Create the receive buffer DMA maps. */
6491 for (i = 0; i < rxq->rxq_ndesc; i++) { 6491 for (i = 0; i < rxq->rxq_ndesc; i++) {
6492 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 6492 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
6493 MCLBYTES, 0, 0, 6493 MCLBYTES, 0, 0,
6494 &rxq->rxq_soft[i].rxs_dmamap)) != 0) { 6494 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
6495 aprint_error_dev(sc->sc_dev, 6495 aprint_error_dev(sc->sc_dev,
6496 "unable to create Rx DMA map %d error = %d\n", 6496 "unable to create Rx DMA map %d error = %d\n",
6497 i, error); 6497 i, error);
6498 goto fail; 6498 goto fail;
6499 } 6499 }
6500 rxq->rxq_soft[i].rxs_mbuf = NULL; 6500 rxq->rxq_soft[i].rxs_mbuf = NULL;
6501 } 6501 }
6502 6502
6503 return 0; 6503 return 0;
6504 6504
6505 fail: 6505 fail:
6506 for (i = 0; i < rxq->rxq_ndesc; i++) { 6506 for (i = 0; i < rxq->rxq_ndesc; i++) {
6507 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 6507 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6508 bus_dmamap_destroy(sc->sc_dmat, 6508 bus_dmamap_destroy(sc->sc_dmat,
6509 rxq->rxq_soft[i].rxs_dmamap); 6509 rxq->rxq_soft[i].rxs_dmamap);
6510 } 6510 }
6511 return error; 6511 return error;
6512} 6512}
6513 6513
6514static void 6514static void
6515wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 6515wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6516{ 6516{
6517 int i; 6517 int i;
6518 6518
6519 for (i = 0; i < rxq->rxq_ndesc; i++) { 6519 for (i = 0; i < rxq->rxq_ndesc; i++) {
6520 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 6520 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6521 bus_dmamap_destroy(sc->sc_dmat, 6521 bus_dmamap_destroy(sc->sc_dmat,
6522 rxq->rxq_soft[i].rxs_dmamap); 6522 rxq->rxq_soft[i].rxs_dmamap);
6523 } 6523 }
6524} 6524}
6525 6525
6526/* 6526/*
6527 * wm_alloc_quques: 6527 * wm_alloc_quques:
6528 * Allocate {tx,rx}descs and {tx,rx} buffers 6528 * Allocate {tx,rx}descs and {tx,rx} buffers
6529 */ 6529 */
6530static int 6530static int
6531wm_alloc_txrx_queues(struct wm_softc *sc) 6531wm_alloc_txrx_queues(struct wm_softc *sc)
6532{ 6532{
6533 int i, error, tx_done, rx_done; 6533 int i, error, tx_done, rx_done;
6534 6534
6535 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues, 6535 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
6536 KM_SLEEP); 6536 KM_SLEEP);
6537 if (sc->sc_queue == NULL) { 6537 if (sc->sc_queue == NULL) {
6538 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n"); 6538 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
6539 error = ENOMEM; 6539 error = ENOMEM;
6540 goto fail_0; 6540 goto fail_0;
6541 } 6541 }
6542 6542
6543 /* 6543 /*
6544 * For transmission 6544 * For transmission
6545 */ 6545 */
6546 error = 0; 6546 error = 0;
6547 tx_done = 0; 6547 tx_done = 0;
6548 for (i = 0; i < sc->sc_nqueues; i++) { 6548 for (i = 0; i < sc->sc_nqueues; i++) {
6549#ifdef WM_EVENT_COUNTERS 6549#ifdef WM_EVENT_COUNTERS
6550 int j; 6550 int j;
6551 const char *xname; 6551 const char *xname;
6552#endif 6552#endif
6553 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 6553 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6554 txq->txq_sc = sc; 6554 txq->txq_sc = sc;
6555 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 6555 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6556 6556
6557 error = wm_alloc_tx_descs(sc, txq); 6557 error = wm_alloc_tx_descs(sc, txq);
6558 if (error) 6558 if (error)
6559 break; 6559 break;
6560 error = wm_alloc_tx_buffer(sc, txq); 6560 error = wm_alloc_tx_buffer(sc, txq);
6561 if (error) { 6561 if (error) {
6562 wm_free_tx_descs(sc, txq); 6562 wm_free_tx_descs(sc, txq);
6563 break; 6563 break;
6564 } 6564 }
6565 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP); 6565 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6566 if (txq->txq_interq == NULL) { 6566 if (txq->txq_interq == NULL) {
6567 wm_free_tx_descs(sc, txq); 6567 wm_free_tx_descs(sc, txq);
6568 wm_free_tx_buffer(sc, txq); 6568 wm_free_tx_buffer(sc, txq);
6569 error = ENOMEM; 6569 error = ENOMEM;
6570 break; 6570 break;
6571 } 6571 }
6572 6572
6573#ifdef WM_EVENT_COUNTERS 6573#ifdef WM_EVENT_COUNTERS
6574 xname = device_xname(sc->sc_dev); 6574 xname = device_xname(sc->sc_dev);
6575 6575
6576 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname); 6576 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6577 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname); 6577 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6578 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname); 6578 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
6579 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname); 6579 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6580 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname); 6580 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6581 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname); 6581 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
6582 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname); 6582 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
6583 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname); 6583 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
6584 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname); 6584 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
6585 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname); 6585 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
6586 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname); 6586 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
6587 6587
6588 for (j = 0; j < WM_NTXSEGS; j++) { 6588 for (j = 0; j < WM_NTXSEGS; j++) {
6589 snprintf(txq->txq_txseg_evcnt_names[j], 6589 snprintf(txq->txq_txseg_evcnt_names[j],
6590 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j); 6590 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6591 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC, 6591 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6592 NULL, xname, txq->txq_txseg_evcnt_names[j]); 6592 NULL, xname, txq->txq_txseg_evcnt_names[j]);
6593 } 6593 }
6594 6594
6595 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname); 6595 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
6596 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname); 6596 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
6597 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname); 6597 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
6598 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname); 6598 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
6599 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname); 6599 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
6600#endif /* WM_EVENT_COUNTERS */ 6600#endif /* WM_EVENT_COUNTERS */
6601 6601
6602 tx_done++; 6602 tx_done++;
6603 } 6603 }
6604 if (error) 6604 if (error)
6605 goto fail_1; 6605 goto fail_1;
6606 6606
6607 /* 6607 /*
6608 * For recieve 6608 * For recieve
6609 */ 6609 */
6610 error = 0; 6610 error = 0;
6611 rx_done = 0; 6611 rx_done = 0;
6612 for (i = 0; i < sc->sc_nqueues; i++) { 6612 for (i = 0; i < sc->sc_nqueues; i++) {
6613#ifdef WM_EVENT_COUNTERS 6613#ifdef WM_EVENT_COUNTERS
6614 const char *xname; 6614 const char *xname;
6615#endif 6615#endif
6616 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 6616 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6617 rxq->rxq_sc = sc; 6617 rxq->rxq_sc = sc;
6618 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 6618 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6619 6619
6620 error = wm_alloc_rx_descs(sc, rxq); 6620 error = wm_alloc_rx_descs(sc, rxq);
6621 if (error) 6621 if (error)
6622 break; 6622 break;
6623 6623
6624 error = wm_alloc_rx_buffer(sc, rxq); 6624 error = wm_alloc_rx_buffer(sc, rxq);
6625 if (error) { 6625 if (error) {
6626 wm_free_rx_descs(sc, rxq); 6626 wm_free_rx_descs(sc, rxq);
6627 break; 6627 break;
6628 } 6628 }
6629 6629
6630#ifdef WM_EVENT_COUNTERS 6630#ifdef WM_EVENT_COUNTERS
6631 xname = device_xname(sc->sc_dev); 6631 xname = device_xname(sc->sc_dev);
6632 6632
6633 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname); 6633 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
6634 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname); 6634 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
6635 6635
6636 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname); 6636 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
6637 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname); 6637 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
6638#endif /* WM_EVENT_COUNTERS */ 6638#endif /* WM_EVENT_COUNTERS */
6639 6639
6640 rx_done++; 6640 rx_done++;
6641 } 6641 }
6642 if (error) 6642 if (error)
6643 goto fail_2; 6643 goto fail_2;
6644 6644
6645 return 0; 6645 return 0;
6646 6646
6647 fail_2: 6647 fail_2:
6648 for (i = 0; i < rx_done; i++) { 6648 for (i = 0; i < rx_done; i++) {
6649 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 6649 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6650 wm_free_rx_buffer(sc, rxq); 6650 wm_free_rx_buffer(sc, rxq);
6651 wm_free_rx_descs(sc, rxq); 6651 wm_free_rx_descs(sc, rxq);
6652 if (rxq->rxq_lock) 6652 if (rxq->rxq_lock)
6653 mutex_obj_free(rxq->rxq_lock); 6653 mutex_obj_free(rxq->rxq_lock);
6654 } 6654 }
6655 fail_1: 6655 fail_1:
6656 for (i = 0; i < tx_done; i++) { 6656 for (i = 0; i < tx_done; i++) {
6657 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 6657 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6658 pcq_destroy(txq->txq_interq); 6658 pcq_destroy(txq->txq_interq);
6659 wm_free_tx_buffer(sc, txq); 6659 wm_free_tx_buffer(sc, txq);
6660 wm_free_tx_descs(sc, txq); 6660 wm_free_tx_descs(sc, txq);
6661 if (txq->txq_lock) 6661 if (txq->txq_lock)
6662 mutex_obj_free(txq->txq_lock); 6662 mutex_obj_free(txq->txq_lock);
6663 } 6663 }
6664 6664
6665 kmem_free(sc->sc_queue, 6665 kmem_free(sc->sc_queue,
6666 sizeof(struct wm_queue) * sc->sc_nqueues); 6666 sizeof(struct wm_queue) * sc->sc_nqueues);
6667 fail_0: 6667 fail_0:
6668 return error; 6668 return error;
6669} 6669}
6670 6670
6671/* 6671/*
6672 * wm_free_quques: 6672 * wm_free_quques:
6673 * Free {tx,rx}descs and {tx,rx} buffers 6673 * Free {tx,rx}descs and {tx,rx} buffers
6674 */ 6674 */
6675static void 6675static void
6676wm_free_txrx_queues(struct wm_softc *sc) 6676wm_free_txrx_queues(struct wm_softc *sc)
6677{ 6677{
6678 int i; 6678 int i;
6679 6679
6680 for (i = 0; i < sc->sc_nqueues; i++) { 6680 for (i = 0; i < sc->sc_nqueues; i++) {
6681 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 6681 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6682 6682
6683#ifdef WM_EVENT_COUNTERS 6683#ifdef WM_EVENT_COUNTERS
6684 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i); 6684 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
6685 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i); 6685 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
6686 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i); 6686 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
6687 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i); 6687 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
6688#endif /* WM_EVENT_COUNTERS */ 6688#endif /* WM_EVENT_COUNTERS */
6689 6689
6690 wm_free_rx_buffer(sc, rxq); 6690 wm_free_rx_buffer(sc, rxq);
6691 wm_free_rx_descs(sc, rxq); 6691 wm_free_rx_descs(sc, rxq);
6692 if (rxq->rxq_lock) 6692 if (rxq->rxq_lock)
6693 mutex_obj_free(rxq->rxq_lock); 6693 mutex_obj_free(rxq->rxq_lock);
6694 } 6694 }
6695 6695
6696 for (i = 0; i < sc->sc_nqueues; i++) { 6696 for (i = 0; i < sc->sc_nqueues; i++) {
6697 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 6697 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6698 struct mbuf *m; 6698 struct mbuf *m;
6699#ifdef WM_EVENT_COUNTERS 6699#ifdef WM_EVENT_COUNTERS
6700 int j; 6700 int j;
6701 6701
6702 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i); 6702 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
6703 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i); 6703 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
6704 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i); 6704 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
6705 WM_Q_EVCNT_DETACH(txq, txdw, txq, i); 6705 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
6706 WM_Q_EVCNT_DETACH(txq, txqe, txq, i); 6706 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
6707 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i); 6707 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
6708 WM_Q_EVCNT_DETACH(txq, tusum, txq, i); 6708 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
6709 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i); 6709 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
6710 WM_Q_EVCNT_DETACH(txq, tso, txq, i); 6710 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
6711 WM_Q_EVCNT_DETACH(txq, tso6, txq, i); 6711 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
6712 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i); 6712 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
6713 6713
6714 for (j = 0; j < WM_NTXSEGS; j++) 6714 for (j = 0; j < WM_NTXSEGS; j++)
6715 evcnt_detach(&txq->txq_ev_txseg[j]); 6715 evcnt_detach(&txq->txq_ev_txseg[j]);
6716 6716
6717 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i); 6717 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
6718 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i); 6718 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
6719 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i); 6719 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
6720 WM_Q_EVCNT_DETACH(txq, defrag, txq, i); 6720 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
6721 WM_Q_EVCNT_DETACH(txq, underrun, txq, i); 6721 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
6722#endif /* WM_EVENT_COUNTERS */ 6722#endif /* WM_EVENT_COUNTERS */
6723 6723
6724 /* drain txq_interq */ 6724 /* drain txq_interq */
6725 while ((m = pcq_get(txq->txq_interq)) != NULL) 6725 while ((m = pcq_get(txq->txq_interq)) != NULL)
6726 m_freem(m); 6726 m_freem(m);
6727 pcq_destroy(txq->txq_interq); 6727 pcq_destroy(txq->txq_interq);
6728 6728
6729 wm_free_tx_buffer(sc, txq); 6729 wm_free_tx_buffer(sc, txq);
6730 wm_free_tx_descs(sc, txq); 6730 wm_free_tx_descs(sc, txq);
6731 if (txq->txq_lock) 6731 if (txq->txq_lock)
6732 mutex_obj_free(txq->txq_lock); 6732 mutex_obj_free(txq->txq_lock);
6733 } 6733 }
6734 6734
6735 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues); 6735 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
6736} 6736}
6737 6737
6738static void 6738static void
6739wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq) 6739wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6740{ 6740{
6741 6741
6742 KASSERT(mutex_owned(txq->txq_lock)); 6742 KASSERT(mutex_owned(txq->txq_lock));
6743 6743
6744 /* Initialize the transmit descriptor ring. */ 6744 /* Initialize the transmit descriptor ring. */
6745 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq)); 6745 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
6746 wm_cdtxsync(txq, 0, WM_NTXDESC(txq), 6746 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
6747 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 6747 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6748 txq->txq_free = WM_NTXDESC(txq); 6748 txq->txq_free = WM_NTXDESC(txq);
6749 txq->txq_next = 0; 6749 txq->txq_next = 0;
6750} 6750}
6751 6751
6752static void 6752static void
6753wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq, 6753wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6754 struct wm_txqueue *txq) 6754 struct wm_txqueue *txq)
6755{ 6755{
6756 6756
6757 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 6757 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6758 device_xname(sc->sc_dev), __func__)); 6758 device_xname(sc->sc_dev), __func__));
6759 KASSERT(mutex_owned(txq->txq_lock)); 6759 KASSERT(mutex_owned(txq->txq_lock));
6760 6760
6761 if (sc->sc_type < WM_T_82543) { 6761 if (sc->sc_type < WM_T_82543) {
6762 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0)); 6762 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
6763 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0)); 6763 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
6764 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq)); 6764 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
6765 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 6765 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
6766 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 6766 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
6767 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 6767 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
6768 } else { 6768 } else {
6769 int qid = wmq->wmq_id; 6769 int qid = wmq->wmq_id;
6770 6770
6771 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0)); 6771 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
6772 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0)); 6772 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
6773 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq)); 6773 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
6774 CSR_WRITE(sc, WMREG_TDH(qid), 0); 6774 CSR_WRITE(sc, WMREG_TDH(qid), 0);
6775 6775
6776 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 6776 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6777 /* 6777 /*
6778 * Don't write TDT before TCTL.EN is set. 6778 * Don't write TDT before TCTL.EN is set.
6779 * See the document. 6779 * See the document.
6780 */ 6780 */
6781 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE 6781 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
6782 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) 6782 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
6783 | TXDCTL_WTHRESH(0)); 6783 | TXDCTL_WTHRESH(0));
6784 else { 6784 else {
6785 /* XXX should update with AIM? */ 6785 /* XXX should update with AIM? */
6786 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4); 6786 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
6787 if (sc->sc_type >= WM_T_82540) { 6787 if (sc->sc_type >= WM_T_82540) {
6788 /* should be same */ 6788 /* should be same */
6789 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4); 6789 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
6790 } 6790 }
6791 6791
6792 CSR_WRITE(sc, WMREG_TDT(qid), 0); 6792 CSR_WRITE(sc, WMREG_TDT(qid), 0);
6793 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) | 6793 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
6794 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 6794 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
6795 } 6795 }
6796 } 6796 }
6797} 6797}
6798 6798
6799static void 6799static void
6800wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq) 6800wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6801{ 6801{
6802 int i; 6802 int i;
6803 6803
6804 KASSERT(mutex_owned(txq->txq_lock)); 6804 KASSERT(mutex_owned(txq->txq_lock));
6805 6805
6806 /* Initialize the transmit job descriptors. */ 6806 /* Initialize the transmit job descriptors. */
6807 for (i = 0; i < WM_TXQUEUELEN(txq); i++) 6807 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
6808 txq->txq_soft[i].txs_mbuf = NULL; 6808 txq->txq_soft[i].txs_mbuf = NULL;
6809 txq->txq_sfree = WM_TXQUEUELEN(txq); 6809 txq->txq_sfree = WM_TXQUEUELEN(txq);
6810 txq->txq_snext = 0; 6810 txq->txq_snext = 0;
6811 txq->txq_sdirty = 0; 6811 txq->txq_sdirty = 0;
6812} 6812}
6813 6813
6814static void 6814static void
6815wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq, 6815wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6816 struct wm_txqueue *txq) 6816 struct wm_txqueue *txq)
6817{ 6817{
6818 6818
6819 KASSERT(mutex_owned(txq->txq_lock)); 6819 KASSERT(mutex_owned(txq->txq_lock));
6820 6820
6821 /* 6821 /*
6822 * Set up some register offsets that are different between 6822 * Set up some register offsets that are different between
6823 * the i82542 and the i82543 and later chips. 6823 * the i82542 and the i82543 and later chips.
6824 */ 6824 */
6825 if (sc->sc_type < WM_T_82543) 6825 if (sc->sc_type < WM_T_82543)
6826 txq->txq_tdt_reg = WMREG_OLD_TDT; 6826 txq->txq_tdt_reg = WMREG_OLD_TDT;
6827 else 6827 else
6828 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id); 6828 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
6829 6829
6830 wm_init_tx_descs(sc, txq); 6830 wm_init_tx_descs(sc, txq);
6831 wm_init_tx_regs(sc, wmq, txq); 6831 wm_init_tx_regs(sc, wmq, txq);
6832 wm_init_tx_buffer(sc, txq); 6832 wm_init_tx_buffer(sc, txq);
6833 6833
6834 txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */ 6834 txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
6835 txq->txq_sending = false; 6835 txq->txq_sending = false;
6836} 6836}
6837 6837
6838static void 6838static void
6839wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq, 6839wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6840 struct wm_rxqueue *rxq) 6840 struct wm_rxqueue *rxq)
6841{ 6841{
6842 6842
6843 KASSERT(mutex_owned(rxq->rxq_lock)); 6843 KASSERT(mutex_owned(rxq->rxq_lock));
6844 6844
6845 /* 6845 /*
6846 * Initialize the receive descriptor and receive job 6846 * Initialize the receive descriptor and receive job
6847 * descriptor rings. 6847 * descriptor rings.
6848 */ 6848 */
6849 if (sc->sc_type < WM_T_82543) { 6849 if (sc->sc_type < WM_T_82543) {
6850 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0)); 6850 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
6851 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0)); 6851 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
6852 CSR_WRITE(sc, WMREG_OLD_RDLEN0, 6852 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
6853 rxq->rxq_descsize * rxq->rxq_ndesc); 6853 rxq->rxq_descsize * rxq->rxq_ndesc);
6854 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 6854 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
6855 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 6855 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
6856 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 6856 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
6857 6857
6858 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 6858 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
6859 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 6859 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
6860 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 6860 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
6861 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 6861 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
6862 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 6862 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
6863 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 6863 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
6864 } else { 6864 } else {
6865 int qid = wmq->wmq_id; 6865 int qid = wmq->wmq_id;
6866 6866
6867 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0)); 6867 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
6868 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0)); 6868 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
6869 CSR_WRITE(sc, WMREG_RDLEN(qid), 6869 CSR_WRITE(sc, WMREG_RDLEN(qid),
6870 rxq->rxq_descsize * rxq->rxq_ndesc); 6870 rxq->rxq_descsize * rxq->rxq_ndesc);
6871 6871
6872 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 6872 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6873 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) 6873 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
6874 panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES); 6874 panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
6875 6875
6876 /* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */ 6876 /* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
6877 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF 6877 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
6878 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); 6878 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
6879 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE 6879 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
6880 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) 6880 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
6881 | RXDCTL_WTHRESH(1)); 6881 | RXDCTL_WTHRESH(1));
6882 CSR_WRITE(sc, WMREG_RDH(qid), 0); 6882 CSR_WRITE(sc, WMREG_RDH(qid), 0);
6883 CSR_WRITE(sc, WMREG_RDT(qid), 0); 6883 CSR_WRITE(sc, WMREG_RDT(qid), 0);
6884 } else { 6884 } else {
6885 CSR_WRITE(sc, WMREG_RDH(qid), 0); 6885 CSR_WRITE(sc, WMREG_RDH(qid), 0);
6886 CSR_WRITE(sc, WMREG_RDT(qid), 0); 6886 CSR_WRITE(sc, WMREG_RDT(qid), 0);
6887 /* XXX should update with AIM? */ 6887 /* XXX should update with AIM? */
6888 CSR_WRITE(sc, WMREG_RDTR, 6888 CSR_WRITE(sc, WMREG_RDTR,
6889 (wmq->wmq_itr / 4) | RDTR_FPD); 6889 (wmq->wmq_itr / 4) | RDTR_FPD);
6890 /* MUST be same */ 6890 /* MUST be same */
6891 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4); 6891 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
6892 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) | 6892 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
6893 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 6893 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
6894 } 6894 }
6895 } 6895 }
6896} 6896}
6897 6897
6898static int 6898static int
6899wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 6899wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6900{ 6900{
6901 struct wm_rxsoft *rxs; 6901 struct wm_rxsoft *rxs;
6902 int error, i; 6902 int error, i;
6903 6903
6904 KASSERT(mutex_owned(rxq->rxq_lock)); 6904 KASSERT(mutex_owned(rxq->rxq_lock));
6905 6905
6906 for (i = 0; i < rxq->rxq_ndesc; i++) { 6906 for (i = 0; i < rxq->rxq_ndesc; i++) {
6907 rxs = &rxq->rxq_soft[i]; 6907 rxs = &rxq->rxq_soft[i];
6908 if (rxs->rxs_mbuf == NULL) { 6908 if (rxs->rxs_mbuf == NULL) {
6909 if ((error = wm_add_rxbuf(rxq, i)) != 0) { 6909 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
6910 log(LOG_ERR, "%s: unable to allocate or map " 6910 log(LOG_ERR, "%s: unable to allocate or map "
6911 "rx buffer %d, error = %d\n", 6911 "rx buffer %d, error = %d\n",
6912 device_xname(sc->sc_dev), i, error); 6912 device_xname(sc->sc_dev), i, error);
6913 /* 6913 /*
6914 * XXX Should attempt to run with fewer receive 6914 * XXX Should attempt to run with fewer receive
6915 * XXX buffers instead of just failing. 6915 * XXX buffers instead of just failing.
6916 */ 6916 */
6917 wm_rxdrain(rxq); 6917 wm_rxdrain(rxq);
6918 return ENOMEM; 6918 return ENOMEM;
6919 } 6919 }
6920 } else { 6920 } else {
6921 /* 6921 /*
6922 * For 82575 and 82576, the RX descriptors must be 6922 * For 82575 and 82576, the RX descriptors must be
6923 * initialized after the setting of RCTL.EN in 6923 * initialized after the setting of RCTL.EN in
6924 * wm_set_filter() 6924 * wm_set_filter()
6925 */ 6925 */
6926 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 6926 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6927 wm_init_rxdesc(rxq, i); 6927 wm_init_rxdesc(rxq, i);
6928 } 6928 }
6929 } 6929 }
6930 rxq->rxq_ptr = 0; 6930 rxq->rxq_ptr = 0;
6931 rxq->rxq_discard = 0; 6931 rxq->rxq_discard = 0;
6932 WM_RXCHAIN_RESET(rxq); 6932 WM_RXCHAIN_RESET(rxq);
6933 6933
6934 return 0; 6934 return 0;
6935} 6935}
6936 6936
6937static int 6937static int
6938wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq, 6938wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6939 struct wm_rxqueue *rxq) 6939 struct wm_rxqueue *rxq)
6940{ 6940{
6941 6941
6942 KASSERT(mutex_owned(rxq->rxq_lock)); 6942 KASSERT(mutex_owned(rxq->rxq_lock));
6943 6943
6944 /* 6944 /*
6945 * Set up some register offsets that are different between 6945 * Set up some register offsets that are different between
6946 * the i82542 and the i82543 and later chips. 6946 * the i82542 and the i82543 and later chips.
6947 */ 6947 */
6948 if (sc->sc_type < WM_T_82543) 6948 if (sc->sc_type < WM_T_82543)
6949 rxq->rxq_rdt_reg = WMREG_OLD_RDT0; 6949 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6950 else 6950 else
6951 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id); 6951 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6952 6952
6953 wm_init_rx_regs(sc, wmq, rxq); 6953 wm_init_rx_regs(sc, wmq, rxq);
6954 return wm_init_rx_buffer(sc, rxq); 6954 return wm_init_rx_buffer(sc, rxq);
6955} 6955}
6956 6956
6957/* 6957/*
6958 * wm_init_quques: 6958 * wm_init_quques:
6959 * Initialize {tx,rx}descs and {tx,rx} buffers 6959 * Initialize {tx,rx}descs and {tx,rx} buffers
6960 */ 6960 */
6961static int 6961static int
6962wm_init_txrx_queues(struct wm_softc *sc) 6962wm_init_txrx_queues(struct wm_softc *sc)
6963{ 6963{
6964 int i, error = 0; 6964 int i, error = 0;
6965 6965
6966 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 6966 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6967 device_xname(sc->sc_dev), __func__)); 6967 device_xname(sc->sc_dev), __func__));
6968 6968
6969 for (i = 0; i < sc->sc_nqueues; i++) { 6969 for (i = 0; i < sc->sc_nqueues; i++) {
6970 struct wm_queue *wmq = &sc->sc_queue[i]; 6970 struct wm_queue *wmq = &sc->sc_queue[i];
6971 struct wm_txqueue *txq = &wmq->wmq_txq; 6971 struct wm_txqueue *txq = &wmq->wmq_txq;
6972 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 6972 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6973 6973
6974 /* 6974 /*
6975 * TODO 6975 * TODO
6976 * Currently, use constant variable instead of AIM. 6976 * Currently, use constant variable instead of AIM.
6977 * Furthermore, the interrupt interval of multiqueue which use 6977 * Furthermore, the interrupt interval of multiqueue which use
6978 * polling mode is less than default value. 6978 * polling mode is less than default value.
6979 * More tuning and AIM are required. 6979 * More tuning and AIM are required.
6980 */ 6980 */
6981 if (wm_is_using_multiqueue(sc)) 6981 if (wm_is_using_multiqueue(sc))
6982 wmq->wmq_itr = 50; 6982 wmq->wmq_itr = 50;
6983 else 6983 else
6984 wmq->wmq_itr = sc->sc_itr_init; 6984 wmq->wmq_itr = sc->sc_itr_init;
6985 wmq->wmq_set_itr = true; 6985 wmq->wmq_set_itr = true;
6986 6986
6987 mutex_enter(txq->txq_lock); 6987 mutex_enter(txq->txq_lock);
6988 wm_init_tx_queue(sc, wmq, txq); 6988 wm_init_tx_queue(sc, wmq, txq);
6989 mutex_exit(txq->txq_lock); 6989 mutex_exit(txq->txq_lock);
6990 6990
6991 mutex_enter(rxq->rxq_lock); 6991 mutex_enter(rxq->rxq_lock);
6992 error = wm_init_rx_queue(sc, wmq, rxq); 6992 error = wm_init_rx_queue(sc, wmq, rxq);
6993 mutex_exit(rxq->rxq_lock); 6993 mutex_exit(rxq->rxq_lock);
6994 if (error) 6994 if (error)
6995 break; 6995 break;
6996 } 6996 }
6997 6997
6998 return error; 6998 return error;
6999} 6999}
7000 7000
7001/* 7001/*
7002 * wm_tx_offload: 7002 * wm_tx_offload:
7003 * 7003 *
7004 * Set up TCP/IP checksumming parameters for the 7004 * Set up TCP/IP checksumming parameters for the
7005 * specified packet. 7005 * specified packet.
7006 */ 7006 */
7007static int 7007static int
7008wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq, 7008wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7009 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp) 7009 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7010{ 7010{
7011 struct mbuf *m0 = txs->txs_mbuf; 7011 struct mbuf *m0 = txs->txs_mbuf;
7012 struct livengood_tcpip_ctxdesc *t; 7012 struct livengood_tcpip_ctxdesc *t;
7013 uint32_t ipcs, tucs, cmd, cmdlen, seg; 7013 uint32_t ipcs, tucs, cmd, cmdlen, seg;
7014 uint32_t ipcse; 7014 uint32_t ipcse;
7015 struct ether_header *eh; 7015 struct ether_header *eh;
7016 int offset, iphl; 7016 int offset, iphl;
7017 uint8_t fields; 7017 uint8_t fields;
7018 7018
7019 /* 7019 /*
7020 * XXX It would be nice if the mbuf pkthdr had offset 7020 * XXX It would be nice if the mbuf pkthdr had offset
7021 * fields for the protocol headers. 7021 * fields for the protocol headers.
7022 */ 7022 */
7023 7023
7024 eh = mtod(m0, struct ether_header *); 7024 eh = mtod(m0, struct ether_header *);
7025 switch (htons(eh->ether_type)) { 7025 switch (htons(eh->ether_type)) {
7026 case ETHERTYPE_IP: 7026 case ETHERTYPE_IP:
7027 case ETHERTYPE_IPV6: 7027 case ETHERTYPE_IPV6:
7028 offset = ETHER_HDR_LEN; 7028 offset = ETHER_HDR_LEN;
7029 break; 7029 break;
7030 7030
7031 case ETHERTYPE_VLAN: 7031 case ETHERTYPE_VLAN:
7032 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 7032 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7033 break; 7033 break;
7034 7034
7035 default: 7035 default:
7036 /* 7036 /*
7037 * Don't support this protocol or encapsulation. 7037 * Don't support this protocol or encapsulation.
7038 */ 7038 */
7039 *fieldsp = 0; 7039 *fieldsp = 0;
7040 *cmdp = 0; 7040 *cmdp = 0;
7041 return 0; 7041 return 0;
7042 } 7042 }
7043 7043
7044 if ((m0->m_pkthdr.csum_flags & 7044 if ((m0->m_pkthdr.csum_flags &
7045 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { 7045 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7046 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 7046 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7047 } else 7047 } else
7048 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 7048 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
7049 7049
7050 ipcse = offset + iphl - 1; 7050 ipcse = offset + iphl - 1;
7051 7051
7052 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 7052 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7053 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 7053 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7054 seg = 0; 7054 seg = 0;
7055 fields = 0; 7055 fields = 0;
7056 7056
7057 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 7057 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7058 int hlen = offset + iphl; 7058 int hlen = offset + iphl;
7059 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 7059 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7060 7060
7061 if (__predict_false(m0->m_len < 7061 if (__predict_false(m0->m_len <
7062 (hlen + sizeof(struct tcphdr)))) { 7062 (hlen + sizeof(struct tcphdr)))) {
7063 /* 7063 /*
7064 * TCP/IP headers are not in the first mbuf; we need 7064 * TCP/IP headers are not in the first mbuf; we need
7065 * to do this the slow and painful way. Let's just 7065 * to do this the slow and painful way. Let's just
7066 * hope this doesn't happen very often. 7066 * hope this doesn't happen very often.
7067 */ 7067 */
7068 struct tcphdr th; 7068 struct tcphdr th;
7069 7069
7070 WM_Q_EVCNT_INCR(txq, tsopain); 7070 WM_Q_EVCNT_INCR(txq, tsopain);
7071 7071
7072 m_copydata(m0, hlen, sizeof(th), &th); 7072 m_copydata(m0, hlen, sizeof(th), &th);
7073 if (v4) { 7073 if (v4) {
7074 struct ip ip; 7074 struct ip ip;
7075 7075
7076 m_copydata(m0, offset, sizeof(ip), &ip); 7076 m_copydata(m0, offset, sizeof(ip), &ip);
7077 ip.ip_len = 0; 7077 ip.ip_len = 0;
7078 m_copyback(m0, 7078 m_copyback(m0,
7079 offset + offsetof(struct ip, ip_len), 7079 offset + offsetof(struct ip, ip_len),
7080 sizeof(ip.ip_len), &ip.ip_len); 7080 sizeof(ip.ip_len), &ip.ip_len);
7081 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 7081 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7082 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 7082 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7083 } else { 7083 } else {
7084 struct ip6_hdr ip6; 7084 struct ip6_hdr ip6;
7085 7085
7086 m_copydata(m0, offset, sizeof(ip6), &ip6); 7086 m_copydata(m0, offset, sizeof(ip6), &ip6);
7087 ip6.ip6_plen = 0; 7087 ip6.ip6_plen = 0;
7088 m_copyback(m0, 7088 m_copyback(m0,
7089 offset + offsetof(struct ip6_hdr, ip6_plen), 7089 offset + offsetof(struct ip6_hdr, ip6_plen),
7090 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 7090 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7091 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 7091 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7092 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 7092 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7093 } 7093 }
7094 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 7094 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7095 sizeof(th.th_sum), &th.th_sum); 7095 sizeof(th.th_sum), &th.th_sum);
7096 7096
7097 hlen += th.th_off << 2; 7097 hlen += th.th_off << 2;
7098 } else { 7098 } else {
7099 /* 7099 /*
7100 * TCP/IP headers are in the first mbuf; we can do 7100 * TCP/IP headers are in the first mbuf; we can do
7101 * this the easy way. 7101 * this the easy way.
7102 */ 7102 */
7103 struct tcphdr *th; 7103 struct tcphdr *th;
7104 7104
7105 if (v4) { 7105 if (v4) {
7106 struct ip *ip = 7106 struct ip *ip =
7107 (void *)(mtod(m0, char *) + offset); 7107 (void *)(mtod(m0, char *) + offset);
7108 th = (void *)(mtod(m0, char *) + hlen); 7108 th = (void *)(mtod(m0, char *) + hlen);
7109 7109
7110 ip->ip_len = 0; 7110 ip->ip_len = 0;
7111 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 7111 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7112 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 7112 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7113 } else { 7113 } else {
7114 struct ip6_hdr *ip6 = 7114 struct ip6_hdr *ip6 =
7115 (void *)(mtod(m0, char *) + offset); 7115 (void *)(mtod(m0, char *) + offset);
7116 th = (void *)(mtod(m0, char *) + hlen); 7116 th = (void *)(mtod(m0, char *) + hlen);
7117 7117
7118 ip6->ip6_plen = 0; 7118 ip6->ip6_plen = 0;
7119 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 7119 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7120 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 7120 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7121 } 7121 }
7122 hlen += th->th_off << 2; 7122 hlen += th->th_off << 2;
7123 } 7123 }
7124 7124
7125 if (v4) { 7125 if (v4) {
7126 WM_Q_EVCNT_INCR(txq, tso); 7126 WM_Q_EVCNT_INCR(txq, tso);
7127 cmdlen |= WTX_TCPIP_CMD_IP; 7127 cmdlen |= WTX_TCPIP_CMD_IP;
7128 } else { 7128 } else {
7129 WM_Q_EVCNT_INCR(txq, tso6); 7129 WM_Q_EVCNT_INCR(txq, tso6);
7130 ipcse = 0; 7130 ipcse = 0;
7131 } 7131 }
7132 cmd |= WTX_TCPIP_CMD_TSE; 7132 cmd |= WTX_TCPIP_CMD_TSE;
7133 cmdlen |= WTX_TCPIP_CMD_TSE | 7133 cmdlen |= WTX_TCPIP_CMD_TSE |
7134 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 7134 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7135 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 7135 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7136 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 7136 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7137 } 7137 }
7138 7138
7139 /* 7139 /*
7140 * NOTE: Even if we're not using the IP or TCP/UDP checksum 7140 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7141 * offload feature, if we load the context descriptor, we 7141 * offload feature, if we load the context descriptor, we
7142 * MUST provide valid values for IPCSS and TUCSS fields. 7142 * MUST provide valid values for IPCSS and TUCSS fields.
7143 */ 7143 */
7144 7144
7145 ipcs = WTX_TCPIP_IPCSS(offset) | 7145 ipcs = WTX_TCPIP_IPCSS(offset) |
7146 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 7146 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7147 WTX_TCPIP_IPCSE(ipcse); 7147 WTX_TCPIP_IPCSE(ipcse);
7148 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) { 7148 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7149 WM_Q_EVCNT_INCR(txq, ipsum); 7149 WM_Q_EVCNT_INCR(txq, ipsum);
7150 fields |= WTX_IXSM; 7150 fields |= WTX_IXSM;
7151 } 7151 }
7152 7152
7153 offset += iphl; 7153 offset += iphl;
7154 7154
7155 if (m0->m_pkthdr.csum_flags & 7155 if (m0->m_pkthdr.csum_flags &
7156 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) { 7156 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7157 WM_Q_EVCNT_INCR(txq, tusum); 7157 WM_Q_EVCNT_INCR(txq, tusum);
7158 fields |= WTX_TXSM; 7158 fields |= WTX_TXSM;
7159 tucs = WTX_TCPIP_TUCSS(offset) | 7159 tucs = WTX_TCPIP_TUCSS(offset) |
7160 WTX_TCPIP_TUCSO(offset + 7160 WTX_TCPIP_TUCSO(offset +
7161 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 7161 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7162 WTX_TCPIP_TUCSE(0) /* rest of packet */; 7162 WTX_TCPIP_TUCSE(0) /* rest of packet */;
7163 } else if ((m0->m_pkthdr.csum_flags & 7163 } else if ((m0->m_pkthdr.csum_flags &
7164 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) { 7164 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7165 WM_Q_EVCNT_INCR(txq, tusum6); 7165 WM_Q_EVCNT_INCR(txq, tusum6);
7166 fields |= WTX_TXSM; 7166 fields |= WTX_TXSM;
7167 tucs = WTX_TCPIP_TUCSS(offset) | 7167 tucs = WTX_TCPIP_TUCSS(offset) |
7168 WTX_TCPIP_TUCSO(offset + 7168 WTX_TCPIP_TUCSO(offset +
7169 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 7169 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7170 WTX_TCPIP_TUCSE(0) /* rest of packet */; 7170 WTX_TCPIP_TUCSE(0) /* rest of packet */;
7171 } else { 7171 } else {
7172 /* Just initialize it to a valid TCP context. */ 7172 /* Just initialize it to a valid TCP context. */
7173 tucs = WTX_TCPIP_TUCSS(offset) | 7173 tucs = WTX_TCPIP_TUCSS(offset) |
7174 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 7174 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7175 WTX_TCPIP_TUCSE(0) /* rest of packet */; 7175 WTX_TCPIP_TUCSE(0) /* rest of packet */;
7176 } 7176 }
7177 7177
7178 /* 7178 /*
7179 * We don't have to write context descriptor for every packet 7179 * We don't have to write context descriptor for every packet
7180 * except for 82574. For 82574, we must write context descriptor 7180 * except for 82574. For 82574, we must write context descriptor
7181 * for every packet when we use two descriptor queues. 7181 * for every packet when we use two descriptor queues.
7182 * It would be overhead to write context descriptor for every packet, 7182 * It would be overhead to write context descriptor for every packet,
7183 * however it does not cause problems. 7183 * however it does not cause problems.
7184 */ 7184 */
7185 /* Fill in the context descriptor. */ 7185 /* Fill in the context descriptor. */
7186 t = (struct livengood_tcpip_ctxdesc *) 7186 t = (struct livengood_tcpip_ctxdesc *)
7187 &txq->txq_descs[txq->txq_next]; 7187 &txq->txq_descs[txq->txq_next];
7188 t->tcpip_ipcs = htole32(ipcs); 7188 t->tcpip_ipcs = htole32(ipcs);
7189 t->tcpip_tucs = htole32(tucs); 7189 t->tcpip_tucs = htole32(tucs);
7190 t->tcpip_cmdlen = htole32(cmdlen); 7190 t->tcpip_cmdlen = htole32(cmdlen);
7191 t->tcpip_seg = htole32(seg); 7191 t->tcpip_seg = htole32(seg);
7192 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 7192 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7193 7193
7194 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 7194 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7195 txs->txs_ndesc++; 7195 txs->txs_ndesc++;
7196 7196
7197 *cmdp = cmd; 7197 *cmdp = cmd;
7198 *fieldsp = fields; 7198 *fieldsp = fields;
7199 7199
7200 return 0; 7200 return 0;
7201} 7201}
7202 7202
7203static inline int 7203static inline int
7204wm_select_txqueue(struct ifnet *ifp, struct mbuf *m) 7204wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7205{ 7205{
7206 struct wm_softc *sc = ifp->if_softc; 7206 struct wm_softc *sc = ifp->if_softc;
7207 u_int cpuid = cpu_index(curcpu()); 7207 u_int cpuid = cpu_index(curcpu());
7208 7208
7209 /* 7209 /*
7210 * Currently, simple distribute strategy. 7210 * Currently, simple distribute strategy.
7211 * TODO: 7211 * TODO:
7212 * distribute by flowid(RSS has value). 7212 * distribute by flowid(RSS has value).
7213 */ 7213 */
7214 return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues; 7214 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7215} 7215}
7216 7216
7217/* 7217/*
7218 * wm_start: [ifnet interface function] 7218 * wm_start: [ifnet interface function]
7219 * 7219 *
7220 * Start packet transmission on the interface. 7220 * Start packet transmission on the interface.
7221 */ 7221 */
7222static void 7222static void
7223wm_start(struct ifnet *ifp) 7223wm_start(struct ifnet *ifp)
7224{ 7224{
7225 struct wm_softc *sc = ifp->if_softc; 7225 struct wm_softc *sc = ifp->if_softc;
7226 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7226 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7227 7227
7228#ifdef WM_MPSAFE 7228#ifdef WM_MPSAFE
7229 KASSERT(if_is_mpsafe(ifp)); 7229 KASSERT(if_is_mpsafe(ifp));
7230#endif 7230#endif
7231 /* 7231 /*
7232 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. 7232 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7233 */ 7233 */
7234 7234
7235 mutex_enter(txq->txq_lock); 7235 mutex_enter(txq->txq_lock);
7236 if (!txq->txq_stopping) 7236 if (!txq->txq_stopping)
7237 wm_start_locked(ifp); 7237 wm_start_locked(ifp);
7238 mutex_exit(txq->txq_lock); 7238 mutex_exit(txq->txq_lock);
7239} 7239}
7240 7240
7241static void 7241static void
7242wm_start_locked(struct ifnet *ifp) 7242wm_start_locked(struct ifnet *ifp)
7243{ 7243{
7244 struct wm_softc *sc = ifp->if_softc; 7244 struct wm_softc *sc = ifp->if_softc;
7245 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7245 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7246 7246
7247 wm_send_common_locked(ifp, txq, false); 7247 wm_send_common_locked(ifp, txq, false);
7248} 7248}
7249 7249
7250static int 7250static int
7251wm_transmit(struct ifnet *ifp, struct mbuf *m) 7251wm_transmit(struct ifnet *ifp, struct mbuf *m)
7252{ 7252{
7253 int qid; 7253 int qid;
7254 struct wm_softc *sc = ifp->if_softc; 7254 struct wm_softc *sc = ifp->if_softc;
7255 struct wm_txqueue *txq; 7255 struct wm_txqueue *txq;
7256 7256
7257 qid = wm_select_txqueue(ifp, m); 7257 qid = wm_select_txqueue(ifp, m);
7258 txq = &sc->sc_queue[qid].wmq_txq; 7258 txq = &sc->sc_queue[qid].wmq_txq;
7259 7259
7260 if (__predict_false(!pcq_put(txq->txq_interq, m))) { 7260 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7261 m_freem(m); 7261 m_freem(m);
7262 WM_Q_EVCNT_INCR(txq, pcqdrop); 7262 WM_Q_EVCNT_INCR(txq, pcqdrop);
7263 return ENOBUFS; 7263 return ENOBUFS;
7264 } 7264 }
7265 7265
7266 /* 7266 /*
7267 * XXXX NOMPSAFE: ifp->if_data should be percpu. 7267 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7268 */ 7268 */
7269 ifp->if_obytes += m->m_pkthdr.len; 7269 ifp->if_obytes += m->m_pkthdr.len;
7270 if (m->m_flags & M_MCAST) 7270 if (m->m_flags & M_MCAST)
7271 ifp->if_omcasts++; 7271 ifp->if_omcasts++;
7272 7272
7273 if (mutex_tryenter(txq->txq_lock)) { 7273 if (mutex_tryenter(txq->txq_lock)) {
7274 if (!txq->txq_stopping) 7274 if (!txq->txq_stopping)
7275 wm_transmit_locked(ifp, txq); 7275 wm_transmit_locked(ifp, txq);
7276 mutex_exit(txq->txq_lock); 7276 mutex_exit(txq->txq_lock);
7277 } 7277 }
7278 7278
7279 return 0; 7279 return 0;
7280} 7280}
7281 7281
7282static void 7282static void
7283wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq) 7283wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7284{ 7284{
7285 7285
7286 wm_send_common_locked(ifp, txq, true); 7286 wm_send_common_locked(ifp, txq, true);
7287} 7287}
7288 7288
7289static void 7289static void
7290wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq, 7290wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7291 bool is_transmit) 7291 bool is_transmit)
7292{ 7292{
7293 struct wm_softc *sc = ifp->if_softc; 7293 struct wm_softc *sc = ifp->if_softc;
7294 struct mbuf *m0; 7294 struct mbuf *m0;
7295 struct wm_txsoft *txs; 7295 struct wm_txsoft *txs;
7296 bus_dmamap_t dmamap; 7296 bus_dmamap_t dmamap;
7297 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 7297 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7298 bus_addr_t curaddr; 7298 bus_addr_t curaddr;
7299 bus_size_t seglen, curlen; 7299 bus_size_t seglen, curlen;
7300 uint32_t cksumcmd; 7300 uint32_t cksumcmd;
7301 uint8_t cksumfields; 7301 uint8_t cksumfields;
7302 bool remap = true; 7302 bool remap = true;
7303 7303
7304 KASSERT(mutex_owned(txq->txq_lock)); 7304 KASSERT(mutex_owned(txq->txq_lock));
7305 7305
7306 if ((ifp->if_flags & IFF_RUNNING) == 0) 7306 if ((ifp->if_flags & IFF_RUNNING) == 0)
7307 return; 7307 return;
7308 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit) 7308 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7309 return; 7309 return;
7310 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0) 7310 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7311 return; 7311 return;
7312 7312
7313 /* Remember the previous number of free descriptors. */ 7313 /* Remember the previous number of free descriptors. */
7314 ofree = txq->txq_free; 7314 ofree = txq->txq_free;
7315 7315
7316 /* 7316 /*
7317 * Loop through the send queue, setting up transmit descriptors 7317 * Loop through the send queue, setting up transmit descriptors
7318 * until we drain the queue, or use up all available transmit 7318 * until we drain the queue, or use up all available transmit
7319 * descriptors. 7319 * descriptors.
7320 */ 7320 */
7321 for (;;) { 7321 for (;;) {
7322 m0 = NULL; 7322 m0 = NULL;
7323 7323
7324 /* Get a work queue entry. */ 7324 /* Get a work queue entry. */
7325 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { 7325 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7326 wm_txeof(txq, UINT_MAX); 7326 wm_txeof(txq, UINT_MAX);
7327 if (txq->txq_sfree == 0) { 7327 if (txq->txq_sfree == 0) {
7328 DPRINTF(WM_DEBUG_TX, 7328 DPRINTF(WM_DEBUG_TX,
7329 ("%s: TX: no free job descriptors\n", 7329 ("%s: TX: no free job descriptors\n",
7330 device_xname(sc->sc_dev))); 7330 device_xname(sc->sc_dev)));
7331 WM_Q_EVCNT_INCR(txq, txsstall); 7331 WM_Q_EVCNT_INCR(txq, txsstall);
7332 break; 7332 break;
7333 } 7333 }
7334 } 7334 }
7335 7335
7336 /* Grab a packet off the queue. */ 7336 /* Grab a packet off the queue. */
7337 if (is_transmit) 7337 if (is_transmit)
7338 m0 = pcq_get(txq->txq_interq); 7338 m0 = pcq_get(txq->txq_interq);
7339 else 7339 else
7340 IFQ_DEQUEUE(&ifp->if_snd, m0); 7340 IFQ_DEQUEUE(&ifp->if_snd, m0);
7341 if (m0 == NULL) 7341 if (m0 == NULL)
7342 break; 7342 break;
7343 7343
7344 DPRINTF(WM_DEBUG_TX, 7344 DPRINTF(WM_DEBUG_TX,
7345 ("%s: TX: have packet to transmit: %p\n", 7345 ("%s: TX: have packet to transmit: %p\n",
7346 device_xname(sc->sc_dev), m0)); 7346 device_xname(sc->sc_dev), m0));
7347 7347
7348 txs = &txq->txq_soft[txq->txq_snext]; 7348 txs = &txq->txq_soft[txq->txq_snext];
7349 dmamap = txs->txs_dmamap; 7349 dmamap = txs->txs_dmamap;
7350 7350
7351 use_tso = (m0->m_pkthdr.csum_flags & 7351 use_tso = (m0->m_pkthdr.csum_flags &
7352 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 7352 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7353 7353
7354 /* 7354 /*
7355 * So says the Linux driver: 7355 * So says the Linux driver:
7356 * The controller does a simple calculation to make sure 7356 * The controller does a simple calculation to make sure
7357 * there is enough room in the FIFO before initiating the 7357 * there is enough room in the FIFO before initiating the
7358 * DMA for each buffer. The calc is: 7358 * DMA for each buffer. The calc is:
7359 * 4 = ceil(buffer len / MSS) 7359 * 4 = ceil(buffer len / MSS)
7360 * To make sure we don't overrun the FIFO, adjust the max 7360 * To make sure we don't overrun the FIFO, adjust the max
7361 * buffer len if the MSS drops. 7361 * buffer len if the MSS drops.
7362 */ 7362 */
7363 dmamap->dm_maxsegsz = 7363 dmamap->dm_maxsegsz =
7364 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 7364 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7365 ? m0->m_pkthdr.segsz << 2 7365 ? m0->m_pkthdr.segsz << 2
7366 : WTX_MAX_LEN; 7366 : WTX_MAX_LEN;
7367 7367
7368 /* 7368 /*
7369 * Load the DMA map. If this fails, the packet either 7369 * Load the DMA map. If this fails, the packet either
7370 * didn't fit in the allotted number of segments, or we 7370 * didn't fit in the allotted number of segments, or we
7371 * were short on resources. For the too-many-segments 7371 * were short on resources. For the too-many-segments
7372 * case, we simply report an error and drop the packet, 7372 * case, we simply report an error and drop the packet,
7373 * since we can't sanely copy a jumbo packet to a single 7373 * since we can't sanely copy a jumbo packet to a single
7374 * buffer. 7374 * buffer.
7375 */ 7375 */
7376retry: 7376retry:
7377 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 7377 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7378 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 7378 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7379 if (__predict_false(error)) { 7379 if (__predict_false(error)) {
7380 if (error == EFBIG) { 7380 if (error == EFBIG) {
7381 if (remap == true) { 7381 if (remap == true) {
7382 struct mbuf *m; 7382 struct mbuf *m;
7383 7383
7384 remap = false; 7384 remap = false;
7385 m = m_defrag(m0, M_NOWAIT); 7385 m = m_defrag(m0, M_NOWAIT);
7386 if (m != NULL) { 7386 if (m != NULL) {
7387 WM_Q_EVCNT_INCR(txq, defrag); 7387 WM_Q_EVCNT_INCR(txq, defrag);
7388 m0 = m; 7388 m0 = m;
7389 goto retry; 7389 goto retry;
7390 } 7390 }
7391 } 7391 }
7392 WM_Q_EVCNT_INCR(txq, toomanyseg); 7392 WM_Q_EVCNT_INCR(txq, toomanyseg);
7393 log(LOG_ERR, "%s: Tx packet consumes too many " 7393 log(LOG_ERR, "%s: Tx packet consumes too many "
7394 "DMA segments, dropping...\n", 7394 "DMA segments, dropping...\n",
7395 device_xname(sc->sc_dev)); 7395 device_xname(sc->sc_dev));
7396 wm_dump_mbuf_chain(sc, m0); 7396 wm_dump_mbuf_chain(sc, m0);
7397 m_freem(m0); 7397 m_freem(m0);
7398 continue; 7398 continue;
7399 } 7399 }
7400 /* Short on resources, just stop for now. */ 7400 /* Short on resources, just stop for now. */
7401 DPRINTF(WM_DEBUG_TX, 7401 DPRINTF(WM_DEBUG_TX,
7402 ("%s: TX: dmamap load failed: %d\n", 7402 ("%s: TX: dmamap load failed: %d\n",
7403 device_xname(sc->sc_dev), error)); 7403 device_xname(sc->sc_dev), error));
7404 break; 7404 break;
7405 } 7405 }
7406 7406
7407 segs_needed = dmamap->dm_nsegs; 7407 segs_needed = dmamap->dm_nsegs;
7408 if (use_tso) { 7408 if (use_tso) {
7409 /* For sentinel descriptor; see below. */ 7409 /* For sentinel descriptor; see below. */
7410 segs_needed++; 7410 segs_needed++;
7411 } 7411 }
7412 7412
7413 /* 7413 /*
7414 * Ensure we have enough descriptors free to describe 7414 * Ensure we have enough descriptors free to describe
7415 * the packet. Note, we always reserve one descriptor 7415 * the packet. Note, we always reserve one descriptor
7416 * at the end of the ring due to the semantics of the 7416 * at the end of the ring due to the semantics of the
7417 * TDT register, plus one more in the event we need 7417 * TDT register, plus one more in the event we need
7418 * to load offload context. 7418 * to load offload context.
7419 */ 7419 */
7420 if (segs_needed > txq->txq_free - 2) { 7420 if (segs_needed > txq->txq_free - 2) {
7421 /* 7421 /*
7422 * Not enough free descriptors to transmit this 7422 * Not enough free descriptors to transmit this
7423 * packet. We haven't committed anything yet, 7423 * packet. We haven't committed anything yet,
7424 * so just unload the DMA map, put the packet 7424 * so just unload the DMA map, put the packet
7425 * pack on the queue, and punt. Notify the upper 7425 * pack on the queue, and punt. Notify the upper
7426 * layer that there are no more slots left. 7426 * layer that there are no more slots left.
7427 */ 7427 */
7428 DPRINTF(WM_DEBUG_TX, 7428 DPRINTF(WM_DEBUG_TX,
7429 ("%s: TX: need %d (%d) descriptors, have %d\n", 7429 ("%s: TX: need %d (%d) descriptors, have %d\n",
7430 device_xname(sc->sc_dev), dmamap->dm_nsegs, 7430 device_xname(sc->sc_dev), dmamap->dm_nsegs,
7431 segs_needed, txq->txq_free - 1)); 7431 segs_needed, txq->txq_free - 1));
7432 if (!is_transmit) 7432 if (!is_transmit)
7433 ifp->if_flags |= IFF_OACTIVE; 7433 ifp->if_flags |= IFF_OACTIVE;
7434 txq->txq_flags |= WM_TXQ_NO_SPACE; 7434 txq->txq_flags |= WM_TXQ_NO_SPACE;
7435 bus_dmamap_unload(sc->sc_dmat, dmamap); 7435 bus_dmamap_unload(sc->sc_dmat, dmamap);
7436 WM_Q_EVCNT_INCR(txq, txdstall); 7436 WM_Q_EVCNT_INCR(txq, txdstall);
7437 break; 7437 break;
7438 } 7438 }
7439 7439
7440 /* 7440 /*
7441 * Check for 82547 Tx FIFO bug. We need to do this 7441 * Check for 82547 Tx FIFO bug. We need to do this
7442 * once we know we can transmit the packet, since we 7442 * once we know we can transmit the packet, since we
7443 * do some internal FIFO space accounting here. 7443 * do some internal FIFO space accounting here.
7444 */ 7444 */
7445 if (sc->sc_type == WM_T_82547 && 7445 if (sc->sc_type == WM_T_82547 &&
7446 wm_82547_txfifo_bugchk(sc, m0)) { 7446 wm_82547_txfifo_bugchk(sc, m0)) {
7447 DPRINTF(WM_DEBUG_TX, 7447 DPRINTF(WM_DEBUG_TX,
7448 ("%s: TX: 82547 Tx FIFO bug detected\n", 7448 ("%s: TX: 82547 Tx FIFO bug detected\n",
7449 device_xname(sc->sc_dev))); 7449 device_xname(sc->sc_dev)));
7450 if (!is_transmit) 7450 if (!is_transmit)
7451 ifp->if_flags |= IFF_OACTIVE; 7451 ifp->if_flags |= IFF_OACTIVE;
7452 txq->txq_flags |= WM_TXQ_NO_SPACE; 7452 txq->txq_flags |= WM_TXQ_NO_SPACE;
7453 bus_dmamap_unload(sc->sc_dmat, dmamap); 7453 bus_dmamap_unload(sc->sc_dmat, dmamap);
7454 WM_Q_EVCNT_INCR(txq, fifo_stall); 7454 WM_Q_EVCNT_INCR(txq, fifo_stall);
7455 break; 7455 break;
7456 } 7456 }
7457 7457
7458 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ 7458 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7459 7459
7460 DPRINTF(WM_DEBUG_TX, 7460 DPRINTF(WM_DEBUG_TX,
7461 ("%s: TX: packet has %d (%d) DMA segments\n", 7461 ("%s: TX: packet has %d (%d) DMA segments\n",
7462 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 7462 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7463 7463
7464 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]); 7464 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7465 7465
7466 /* 7466 /*
7467 * Store a pointer to the packet so that we can free it 7467 * Store a pointer to the packet so that we can free it
7468 * later. 7468 * later.
7469 * 7469 *
7470 * Initially, we consider the number of descriptors the 7470 * Initially, we consider the number of descriptors the
7471 * packet uses the number of DMA segments. This may be 7471 * packet uses the number of DMA segments. This may be
7472 * incremented by 1 if we do checksum offload (a descriptor 7472 * incremented by 1 if we do checksum offload (a descriptor
7473 * is used to set the checksum context). 7473 * is used to set the checksum context).
7474 */ 7474 */
7475 txs->txs_mbuf = m0; 7475 txs->txs_mbuf = m0;
7476 txs->txs_firstdesc = txq->txq_next; 7476 txs->txs_firstdesc = txq->txq_next;
7477 txs->txs_ndesc = segs_needed; 7477 txs->txs_ndesc = segs_needed;
7478 7478
7479 /* Set up offload parameters for this packet. */ 7479 /* Set up offload parameters for this packet. */
7480 if (m0->m_pkthdr.csum_flags & 7480 if (m0->m_pkthdr.csum_flags &
7481 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | 7481 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7482 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | 7482 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7483 M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 7483 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7484 if (wm_tx_offload(sc, txq, txs, &cksumcmd, 7484 if (wm_tx_offload(sc, txq, txs, &cksumcmd,
7485 &cksumfields) != 0) { 7485 &cksumfields) != 0) {
7486 /* Error message already displayed. */ 7486 /* Error message already displayed. */
7487 bus_dmamap_unload(sc->sc_dmat, dmamap); 7487 bus_dmamap_unload(sc->sc_dmat, dmamap);
7488 continue; 7488 continue;
7489 } 7489 }
7490 } else { 7490 } else {
7491 cksumcmd = 0; 7491 cksumcmd = 0;
7492 cksumfields = 0; 7492 cksumfields = 0;
7493 } 7493 }
7494 7494
7495 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 7495 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
7496 7496
7497 /* Sync the DMA map. */ 7497 /* Sync the DMA map. */
7498 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 7498 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7499 BUS_DMASYNC_PREWRITE); 7499 BUS_DMASYNC_PREWRITE);
7500 7500
7501 /* Initialize the transmit descriptor. */ 7501 /* Initialize the transmit descriptor. */
7502 for (nexttx = txq->txq_next, seg = 0; 7502 for (nexttx = txq->txq_next, seg = 0;
7503 seg < dmamap->dm_nsegs; seg++) { 7503 seg < dmamap->dm_nsegs; seg++) {
7504 for (seglen = dmamap->dm_segs[seg].ds_len, 7504 for (seglen = dmamap->dm_segs[seg].ds_len,
7505 curaddr = dmamap->dm_segs[seg].ds_addr; 7505 curaddr = dmamap->dm_segs[seg].ds_addr;
7506 seglen != 0; 7506 seglen != 0;
7507 curaddr += curlen, seglen -= curlen, 7507 curaddr += curlen, seglen -= curlen,
7508 nexttx = WM_NEXTTX(txq, nexttx)) { 7508 nexttx = WM_NEXTTX(txq, nexttx)) {
7509 curlen = seglen; 7509 curlen = seglen;
7510 7510
7511 /* 7511 /*
7512 * So says the Linux driver: 7512 * So says the Linux driver:
7513 * Work around for premature descriptor 7513 * Work around for premature descriptor
7514 * write-backs in TSO mode. Append a 7514 * write-backs in TSO mode. Append a
7515 * 4-byte sentinel descriptor. 7515 * 4-byte sentinel descriptor.
7516 */ 7516 */
7517 if (use_tso && seg == dmamap->dm_nsegs - 1 && 7517 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
7518 curlen > 8) 7518 curlen > 8)
7519 curlen -= 4; 7519 curlen -= 4;
7520 7520
7521 wm_set_dma_addr( 7521 wm_set_dma_addr(
7522 &txq->txq_descs[nexttx].wtx_addr, curaddr); 7522 &txq->txq_descs[nexttx].wtx_addr, curaddr);
7523 txq->txq_descs[nexttx].wtx_cmdlen 7523 txq->txq_descs[nexttx].wtx_cmdlen
7524 = htole32(cksumcmd | curlen); 7524 = htole32(cksumcmd | curlen);
7525 txq->txq_descs[nexttx].wtx_fields.wtxu_status 7525 txq->txq_descs[nexttx].wtx_fields.wtxu_status
7526 = 0; 7526 = 0;
7527 txq->txq_descs[nexttx].wtx_fields.wtxu_options 7527 txq->txq_descs[nexttx].wtx_fields.wtxu_options
7528 = cksumfields; 7528 = cksumfields;
7529 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; 7529 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7530 lasttx = nexttx; 7530 lasttx = nexttx;
7531 7531
7532 DPRINTF(WM_DEBUG_TX, 7532 DPRINTF(WM_DEBUG_TX,
7533 ("%s: TX: desc %d: low %#" PRIx64 ", " 7533 ("%s: TX: desc %d: low %#" PRIx64 ", "
7534 "len %#04zx\n", 7534 "len %#04zx\n",
7535 device_xname(sc->sc_dev), nexttx, 7535 device_xname(sc->sc_dev), nexttx,
7536 (uint64_t)curaddr, curlen)); 7536 (uint64_t)curaddr, curlen));
7537 } 7537 }
7538 } 7538 }
7539 7539
7540 KASSERT(lasttx != -1); 7540 KASSERT(lasttx != -1);
7541 7541
7542 /* 7542 /*
7543 * Set up the command byte on the last descriptor of 7543 * Set up the command byte on the last descriptor of
7544 * the packet. If we're in the interrupt delay window, 7544 * the packet. If we're in the interrupt delay window,
7545 * delay the interrupt. 7545 * delay the interrupt.
7546 */ 7546 */
7547 txq->txq_descs[lasttx].wtx_cmdlen |= 7547 txq->txq_descs[lasttx].wtx_cmdlen |=
7548 htole32(WTX_CMD_EOP | WTX_CMD_RS); 7548 htole32(WTX_CMD_EOP | WTX_CMD_RS);
7549 7549
7550 /* 7550 /*
7551 * If VLANs are enabled and the packet has a VLAN tag, set 7551 * If VLANs are enabled and the packet has a VLAN tag, set
7552 * up the descriptor to encapsulate the packet for us. 7552 * up the descriptor to encapsulate the packet for us.
7553 * 7553 *
7554 * This is only valid on the last descriptor of the packet. 7554 * This is only valid on the last descriptor of the packet.
7555 */ 7555 */
7556 if (vlan_has_tag(m0)) { 7556 if (vlan_has_tag(m0)) {
7557 txq->txq_descs[lasttx].wtx_cmdlen |= 7557 txq->txq_descs[lasttx].wtx_cmdlen |=
7558 htole32(WTX_CMD_VLE); 7558 htole32(WTX_CMD_VLE);
7559 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan 7559 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
7560 = htole16(vlan_get_tag(m0)); 7560 = htole16(vlan_get_tag(m0));
7561 } 7561 }
7562 7562
7563 txs->txs_lastdesc = lasttx; 7563 txs->txs_lastdesc = lasttx;
7564 7564
7565 DPRINTF(WM_DEBUG_TX, 7565 DPRINTF(WM_DEBUG_TX,
7566 ("%s: TX: desc %d: cmdlen 0x%08x\n", 7566 ("%s: TX: desc %d: cmdlen 0x%08x\n",
7567 device_xname(sc->sc_dev), 7567 device_xname(sc->sc_dev),
7568 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); 7568 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7569 7569
7570 /* Sync the descriptors we're using. */ 7570 /* Sync the descriptors we're using. */
7571 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, 7571 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7572 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 7572 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7573 7573
7574 /* Give the packet to the chip. */ 7574 /* Give the packet to the chip. */
7575 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); 7575 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7576 7576
7577 DPRINTF(WM_DEBUG_TX, 7577 DPRINTF(WM_DEBUG_TX,
7578 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 7578 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7579 7579
7580 DPRINTF(WM_DEBUG_TX, 7580 DPRINTF(WM_DEBUG_TX,
7581 ("%s: TX: finished transmitting packet, job %d\n", 7581 ("%s: TX: finished transmitting packet, job %d\n",
7582 device_xname(sc->sc_dev), txq->txq_snext)); 7582 device_xname(sc->sc_dev), txq->txq_snext));
7583 7583
7584 /* Advance the tx pointer. */ 7584 /* Advance the tx pointer. */
7585 txq->txq_free -= txs->txs_ndesc; 7585 txq->txq_free -= txs->txs_ndesc;
7586 txq->txq_next = nexttx; 7586 txq->txq_next = nexttx;
7587 7587
7588 txq->txq_sfree--; 7588 txq->txq_sfree--;
7589 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); 7589 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7590 7590
7591 /* Pass the packet to any BPF listeners. */ 7591 /* Pass the packet to any BPF listeners. */
7592 bpf_mtap(ifp, m0); 7592 bpf_mtap(ifp, m0);
7593 } 7593 }
7594 7594
7595 if (m0 != NULL) { 7595 if (m0 != NULL) {
7596 if (!is_transmit) 7596 if (!is_transmit)
7597 ifp->if_flags |= IFF_OACTIVE; 7597 ifp->if_flags |= IFF_OACTIVE;
7598 txq->txq_flags |= WM_TXQ_NO_SPACE; 7598 txq->txq_flags |= WM_TXQ_NO_SPACE;
7599 WM_Q_EVCNT_INCR(txq, descdrop); 7599 WM_Q_EVCNT_INCR(txq, descdrop);
7600 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", 7600 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7601 __func__)); 7601 __func__));
7602 m_freem(m0); 7602 m_freem(m0);
7603 } 7603 }
7604 7604
7605 if (txq->txq_sfree == 0 || txq->txq_free <= 2) { 7605 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7606 /* No more slots; notify upper layer. */ 7606 /* No more slots; notify upper layer. */
7607 if (!is_transmit) 7607 if (!is_transmit)
7608 ifp->if_flags |= IFF_OACTIVE; 7608 ifp->if_flags |= IFF_OACTIVE;
7609 txq->txq_flags |= WM_TXQ_NO_SPACE; 7609 txq->txq_flags |= WM_TXQ_NO_SPACE;
7610 } 7610 }
7611 7611
7612 if (txq->txq_free != ofree) { 7612 if (txq->txq_free != ofree) {
7613 /* Set a watchdog timer in case the chip flakes out. */ 7613 /* Set a watchdog timer in case the chip flakes out. */
7614 txq->txq_lastsent = time_uptime; 7614 txq->txq_lastsent = time_uptime;
7615 txq->txq_sending = true; 7615 txq->txq_sending = true;
7616 } 7616 }
7617} 7617}
7618 7618
7619/* 7619/*
7620 * wm_nq_tx_offload: 7620 * wm_nq_tx_offload:
7621 * 7621 *
7622 * Set up TCP/IP checksumming parameters for the 7622 * Set up TCP/IP checksumming parameters for the
7623 * specified packet, for NEWQUEUE devices 7623 * specified packet, for NEWQUEUE devices
7624 */ 7624 */
7625static int 7625static int
7626wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq, 7626wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7627 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) 7627 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
7628{ 7628{
7629 struct mbuf *m0 = txs->txs_mbuf; 7629 struct mbuf *m0 = txs->txs_mbuf;
7630 uint32_t vl_len, mssidx, cmdc; 7630 uint32_t vl_len, mssidx, cmdc;
7631 struct ether_header *eh; 7631 struct ether_header *eh;
7632 int offset, iphl; 7632 int offset, iphl;
7633 7633
7634 /* 7634 /*
7635 * XXX It would be nice if the mbuf pkthdr had offset 7635 * XXX It would be nice if the mbuf pkthdr had offset
7636 * fields for the protocol headers. 7636 * fields for the protocol headers.
7637 */ 7637 */
7638 *cmdlenp = 0; 7638 *cmdlenp = 0;
7639 *fieldsp = 0; 7639 *fieldsp = 0;
7640 7640
7641 eh = mtod(m0, struct ether_header *); 7641 eh = mtod(m0, struct ether_header *);
7642 switch (htons(eh->ether_type)) { 7642 switch (htons(eh->ether_type)) {
7643 case ETHERTYPE_IP: 7643 case ETHERTYPE_IP:
7644 case ETHERTYPE_IPV6: 7644 case ETHERTYPE_IPV6:
7645 offset = ETHER_HDR_LEN; 7645 offset = ETHER_HDR_LEN;
7646 break; 7646 break;
7647 7647
7648 case ETHERTYPE_VLAN: 7648 case ETHERTYPE_VLAN:
7649 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 7649 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7650 break; 7650 break;
7651 7651
7652 default: 7652 default:
7653 /* Don't support this protocol or encapsulation. */ 7653 /* Don't support this protocol or encapsulation. */
7654 *do_csum = false; 7654 *do_csum = false;
7655 return 0; 7655 return 0;
7656 } 7656 }
7657 *do_csum = true; 7657 *do_csum = true;
7658 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS; 7658 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
7659 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT; 7659 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
7660 7660
7661 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT); 7661 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
7662 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0); 7662 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
7663 7663
7664 if ((m0->m_pkthdr.csum_flags & 7664 if ((m0->m_pkthdr.csum_flags &
7665 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { 7665 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7666 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 7666 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7667 } else { 7667 } else {
7668 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 7668 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
7669 } 7669 }
7670 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); 7670 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
7671 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); 7671 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
7672 7672
7673 if (vlan_has_tag(m0)) { 7673 if (vlan_has_tag(m0)) {
7674 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK) 7674 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
7675 << NQTXC_VLLEN_VLAN_SHIFT); 7675 << NQTXC_VLLEN_VLAN_SHIFT);
7676 *cmdlenp |= NQTX_CMD_VLE; 7676 *cmdlenp |= NQTX_CMD_VLE;
7677 } 7677 }
7678 7678
7679 mssidx = 0; 7679 mssidx = 0;
7680 7680
7681 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 7681 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7682 int hlen = offset + iphl; 7682 int hlen = offset + iphl;
7683 int tcp_hlen; 7683 int tcp_hlen;
7684 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 7684 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7685 7685
7686 if (__predict_false(m0->m_len < 7686 if (__predict_false(m0->m_len <
7687 (hlen + sizeof(struct tcphdr)))) { 7687 (hlen + sizeof(struct tcphdr)))) {
7688 /* 7688 /*
7689 * TCP/IP headers are not in the first mbuf; we need 7689 * TCP/IP headers are not in the first mbuf; we need
7690 * to do this the slow and painful way. Let's just 7690 * to do this the slow and painful way. Let's just
7691 * hope this doesn't happen very often. 7691 * hope this doesn't happen very often.
7692 */ 7692 */
7693 struct tcphdr th; 7693 struct tcphdr th;
7694 7694
7695 WM_Q_EVCNT_INCR(txq, tsopain); 7695 WM_Q_EVCNT_INCR(txq, tsopain);
7696 7696
7697 m_copydata(m0, hlen, sizeof(th), &th); 7697 m_copydata(m0, hlen, sizeof(th), &th);
7698 if (v4) { 7698 if (v4) {
7699 struct ip ip; 7699 struct ip ip;
7700 7700
7701 m_copydata(m0, offset, sizeof(ip), &ip); 7701 m_copydata(m0, offset, sizeof(ip), &ip);
7702 ip.ip_len = 0; 7702 ip.ip_len = 0;
7703 m_copyback(m0, 7703 m_copyback(m0,
7704 offset + offsetof(struct ip, ip_len), 7704 offset + offsetof(struct ip, ip_len),
7705 sizeof(ip.ip_len), &ip.ip_len); 7705 sizeof(ip.ip_len), &ip.ip_len);
7706 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 7706 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7707 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 7707 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7708 } else { 7708 } else {
7709 struct ip6_hdr ip6; 7709 struct ip6_hdr ip6;
7710 7710
7711 m_copydata(m0, offset, sizeof(ip6), &ip6); 7711 m_copydata(m0, offset, sizeof(ip6), &ip6);
7712 ip6.ip6_plen = 0; 7712 ip6.ip6_plen = 0;
7713 m_copyback(m0, 7713 m_copyback(m0,
7714 offset + offsetof(struct ip6_hdr, ip6_plen), 7714 offset + offsetof(struct ip6_hdr, ip6_plen),
7715 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 7715 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7716 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 7716 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7717 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 7717 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7718 } 7718 }
7719 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 7719 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7720 sizeof(th.th_sum), &th.th_sum); 7720 sizeof(th.th_sum), &th.th_sum);
7721 7721
7722 tcp_hlen = th.th_off << 2; 7722 tcp_hlen = th.th_off << 2;
7723 } else { 7723 } else {
7724 /* 7724 /*
7725 * TCP/IP headers are in the first mbuf; we can do 7725 * TCP/IP headers are in the first mbuf; we can do
7726 * this the easy way. 7726 * this the easy way.
7727 */ 7727 */
7728 struct tcphdr *th; 7728 struct tcphdr *th;
7729 7729
7730 if (v4) { 7730 if (v4) {
7731 struct ip *ip = 7731 struct ip *ip =
7732 (void *)(mtod(m0, char *) + offset); 7732 (void *)(mtod(m0, char *) + offset);
7733 th = (void *)(mtod(m0, char *) + hlen); 7733 th = (void *)(mtod(m0, char *) + hlen);
7734 7734
7735 ip->ip_len = 0; 7735 ip->ip_len = 0;
7736 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 7736 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7737 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 7737 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7738 } else { 7738 } else {
7739 struct ip6_hdr *ip6 = 7739 struct ip6_hdr *ip6 =
7740 (void *)(mtod(m0, char *) + offset); 7740 (void *)(mtod(m0, char *) + offset);
7741 th = (void *)(mtod(m0, char *) + hlen); 7741 th = (void *)(mtod(m0, char *) + hlen);
7742 7742
7743 ip6->ip6_plen = 0; 7743 ip6->ip6_plen = 0;
7744 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 7744 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7745 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 7745 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7746 } 7746 }
7747 tcp_hlen = th->th_off << 2; 7747 tcp_hlen = th->th_off << 2;
7748 } 7748 }
7749 hlen += tcp_hlen; 7749 hlen += tcp_hlen;
7750 *cmdlenp |= NQTX_CMD_TSE; 7750 *cmdlenp |= NQTX_CMD_TSE;
7751 7751
7752 if (v4) { 7752 if (v4) {
7753 WM_Q_EVCNT_INCR(txq, tso); 7753 WM_Q_EVCNT_INCR(txq, tso);
7754 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; 7754 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
7755 } else { 7755 } else {
7756 WM_Q_EVCNT_INCR(txq, tso6); 7756 WM_Q_EVCNT_INCR(txq, tso6);
7757 *fieldsp |= NQTXD_FIELDS_TUXSM; 7757 *fieldsp |= NQTXD_FIELDS_TUXSM;
7758 } 7758 }
7759 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); 7759 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
7760 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 7760 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7761 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT); 7761 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
7762 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0); 7762 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
7763 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT); 7763 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
7764 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0); 7764 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
7765 } else { 7765 } else {
7766 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT); 7766 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
7767 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 7767 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7768 } 7768 }
7769 7769
7770 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 7770 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
7771 *fieldsp |= NQTXD_FIELDS_IXSM; 7771 *fieldsp |= NQTXD_FIELDS_IXSM;
7772 cmdc |= NQTXC_CMD_IP4; 7772 cmdc |= NQTXC_CMD_IP4;
7773 } 7773 }
7774 7774
7775 if (m0->m_pkthdr.csum_flags & 7775 if (m0->m_pkthdr.csum_flags &
7776 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { 7776 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7777 WM_Q_EVCNT_INCR(txq, tusum); 7777 WM_Q_EVCNT_INCR(txq, tusum);
7778 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) 7778 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
7779 cmdc |= NQTXC_CMD_TCP; 7779 cmdc |= NQTXC_CMD_TCP;
7780 else 7780 else
7781 cmdc |= NQTXC_CMD_UDP; 7781 cmdc |= NQTXC_CMD_UDP;
7782 7782
7783 cmdc |= NQTXC_CMD_IP4; 7783 cmdc |= NQTXC_CMD_IP4;
7784 *fieldsp |= NQTXD_FIELDS_TUXSM; 7784 *fieldsp |= NQTXD_FIELDS_TUXSM;
7785 } 7785 }
7786 if (m0->m_pkthdr.csum_flags & 7786 if (m0->m_pkthdr.csum_flags &
7787 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { 7787 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7788 WM_Q_EVCNT_INCR(txq, tusum6); 7788 WM_Q_EVCNT_INCR(txq, tusum6);
7789 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) 7789 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
7790 cmdc |= NQTXC_CMD_TCP; 7790 cmdc |= NQTXC_CMD_TCP;
7791 else 7791 else
7792 cmdc |= NQTXC_CMD_UDP; 7792 cmdc |= NQTXC_CMD_UDP;
7793 7793
7794 cmdc |= NQTXC_CMD_IP6; 7794 cmdc |= NQTXC_CMD_IP6;
7795 *fieldsp |= NQTXD_FIELDS_TUXSM; 7795 *fieldsp |= NQTXD_FIELDS_TUXSM;
7796 } 7796 }
7797 7797
7798 /* 7798 /*
7799 * We don't have to write context descriptor for every packet to 7799 * We don't have to write context descriptor for every packet to
7800 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354, 7800 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
7801 * I210 and I211. It is enough to write once per a Tx queue for these 7801 * I210 and I211. It is enough to write once per a Tx queue for these
7802 * controllers. 7802 * controllers.
7803 * It would be overhead to write context descriptor for every packet, 7803 * It would be overhead to write context descriptor for every packet,
7804 * however it does not cause problems. 7804 * however it does not cause problems.
7805 */ 7805 */
7806 /* Fill in the context descriptor. */ 7806 /* Fill in the context descriptor. */
7807 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len = 7807 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
7808 htole32(vl_len); 7808 htole32(vl_len);
7809 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0; 7809 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
7810 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd = 7810 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
7811 htole32(cmdc); 7811 htole32(cmdc);
7812 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx = 7812 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
7813 htole32(mssidx); 7813 htole32(mssidx);
7814 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 7814 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7815 DPRINTF(WM_DEBUG_TX, 7815 DPRINTF(WM_DEBUG_TX,
7816 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), 7816 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
7817 txq->txq_next, 0, vl_len)); 7817 txq->txq_next, 0, vl_len));
7818 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); 7818 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
7819 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 7819 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7820 txs->txs_ndesc++; 7820 txs->txs_ndesc++;
7821 return 0; 7821 return 0;
7822} 7822}
7823 7823
7824/* 7824/*
7825 * wm_nq_start: [ifnet interface function] 7825 * wm_nq_start: [ifnet interface function]
7826 * 7826 *
7827 * Start packet transmission on the interface for NEWQUEUE devices 7827 * Start packet transmission on the interface for NEWQUEUE devices
7828 */ 7828 */
7829static void 7829static void
7830wm_nq_start(struct ifnet *ifp) 7830wm_nq_start(struct ifnet *ifp)
7831{ 7831{
7832 struct wm_softc *sc = ifp->if_softc; 7832 struct wm_softc *sc = ifp->if_softc;
7833 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7833 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7834 7834
7835#ifdef WM_MPSAFE 7835#ifdef WM_MPSAFE
7836 KASSERT(if_is_mpsafe(ifp)); 7836 KASSERT(if_is_mpsafe(ifp));
7837#endif 7837#endif
7838 /* 7838 /*
7839 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. 7839 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7840 */ 7840 */
7841 7841
7842 mutex_enter(txq->txq_lock); 7842 mutex_enter(txq->txq_lock);
7843 if (!txq->txq_stopping) 7843 if (!txq->txq_stopping)
7844 wm_nq_start_locked(ifp); 7844 wm_nq_start_locked(ifp);
7845 mutex_exit(txq->txq_lock); 7845 mutex_exit(txq->txq_lock);
7846} 7846}
7847 7847
7848static void 7848static void
7849wm_nq_start_locked(struct ifnet *ifp) 7849wm_nq_start_locked(struct ifnet *ifp)
7850{ 7850{
7851 struct wm_softc *sc = ifp->if_softc; 7851 struct wm_softc *sc = ifp->if_softc;
7852 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7852 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7853 7853
7854 wm_nq_send_common_locked(ifp, txq, false); 7854 wm_nq_send_common_locked(ifp, txq, false);
7855} 7855}
7856 7856
7857static int 7857static int
7858wm_nq_transmit(struct ifnet *ifp, struct mbuf *m) 7858wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
7859{ 7859{
7860 int qid; 7860 int qid;
7861 struct wm_softc *sc = ifp->if_softc; 7861 struct wm_softc *sc = ifp->if_softc;
7862 struct wm_txqueue *txq; 7862 struct wm_txqueue *txq;
7863 7863
7864 qid = wm_select_txqueue(ifp, m); 7864 qid = wm_select_txqueue(ifp, m);
7865 txq = &sc->sc_queue[qid].wmq_txq; 7865 txq = &sc->sc_queue[qid].wmq_txq;
7866 7866
7867 if (__predict_false(!pcq_put(txq->txq_interq, m))) { 7867 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7868 m_freem(m); 7868 m_freem(m);
7869 WM_Q_EVCNT_INCR(txq, pcqdrop); 7869 WM_Q_EVCNT_INCR(txq, pcqdrop);
7870 return ENOBUFS; 7870 return ENOBUFS;
7871 } 7871 }
7872 7872
7873 /* 7873 /*
7874 * XXXX NOMPSAFE: ifp->if_data should be percpu. 7874 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7875 */ 7875 */
7876 ifp->if_obytes += m->m_pkthdr.len; 7876 ifp->if_obytes += m->m_pkthdr.len;
7877 if (m->m_flags & M_MCAST) 7877 if (m->m_flags & M_MCAST)
7878 ifp->if_omcasts++; 7878 ifp->if_omcasts++;
7879 7879
7880 /* 7880 /*
7881 * The situations which this mutex_tryenter() fails at running time 7881 * The situations which this mutex_tryenter() fails at running time
7882 * are below two patterns. 7882 * are below two patterns.
7883 * (1) contention with interrupt handler(wm_txrxintr_msix()) 7883 * (1) contention with interrupt handler(wm_txrxintr_msix())
7884 * (2) contention with deferred if_start softint(wm_handle_queue()) 7884 * (2) contention with deferred if_start softint(wm_handle_queue())
7885 * In the case of (1), the last packet enqueued to txq->txq_interq is 7885 * In the case of (1), the last packet enqueued to txq->txq_interq is
7886 * dequeued by wm_deferred_start_locked(). So, it does not get stuck. 7886 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
7887 * In the case of (2), the last packet enqueued to txq->txq_interq is 7887 * In the case of (2), the last packet enqueued to txq->txq_interq is
7888 * also dequeued by wm_deferred_start_locked(). So, it does not get 7888 * also dequeued by wm_deferred_start_locked(). So, it does not get
7889 * stuck, either. 7889 * stuck, either.
7890 */ 7890 */
7891 if (mutex_tryenter(txq->txq_lock)) { 7891 if (mutex_tryenter(txq->txq_lock)) {
7892 if (!txq->txq_stopping) 7892 if (!txq->txq_stopping)
7893 wm_nq_transmit_locked(ifp, txq); 7893 wm_nq_transmit_locked(ifp, txq);
7894 mutex_exit(txq->txq_lock); 7894 mutex_exit(txq->txq_lock);
7895 } 7895 }
7896 7896
7897 return 0; 7897 return 0;
7898} 7898}
7899 7899
7900static void 7900static void
7901wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq) 7901wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7902{ 7902{
7903 7903
7904 wm_nq_send_common_locked(ifp, txq, true); 7904 wm_nq_send_common_locked(ifp, txq, true);
7905} 7905}
7906 7906
7907static void 7907static void
7908wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq, 7908wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7909 bool is_transmit) 7909 bool is_transmit)
7910{ 7910{
7911 struct wm_softc *sc = ifp->if_softc; 7911 struct wm_softc *sc = ifp->if_softc;
7912 struct mbuf *m0; 7912 struct mbuf *m0;
7913 struct wm_txsoft *txs; 7913 struct wm_txsoft *txs;
7914 bus_dmamap_t dmamap; 7914 bus_dmamap_t dmamap;
7915 int error, nexttx, lasttx = -1, seg, segs_needed; 7915 int error, nexttx, lasttx = -1, seg, segs_needed;
7916 bool do_csum, sent; 7916 bool do_csum, sent;
7917 bool remap = true; 7917 bool remap = true;
7918 7918
7919 KASSERT(mutex_owned(txq->txq_lock)); 7919 KASSERT(mutex_owned(txq->txq_lock));
7920 7920
7921 if ((ifp->if_flags & IFF_RUNNING) == 0) 7921 if ((ifp->if_flags & IFF_RUNNING) == 0)
7922 return; 7922 return;
7923 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit) 7923 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7924 return; 7924 return;
7925 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0) 7925 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7926 return; 7926 return;
7927 7927
7928 sent = false; 7928 sent = false;
7929 7929
7930 /* 7930 /*
7931 * Loop through the send queue, setting up transmit descriptors 7931 * Loop through the send queue, setting up transmit descriptors
7932 * until we drain the queue, or use up all available transmit 7932 * until we drain the queue, or use up all available transmit
7933 * descriptors. 7933 * descriptors.
7934 */ 7934 */
7935 for (;;) { 7935 for (;;) {
7936 m0 = NULL; 7936 m0 = NULL;
7937 7937
7938 /* Get a work queue entry. */ 7938 /* Get a work queue entry. */
7939 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { 7939 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7940 wm_txeof(txq, UINT_MAX); 7940 wm_txeof(txq, UINT_MAX);
7941 if (txq->txq_sfree == 0) { 7941 if (txq->txq_sfree == 0) {
7942 DPRINTF(WM_DEBUG_TX, 7942 DPRINTF(WM_DEBUG_TX,
7943 ("%s: TX: no free job descriptors\n", 7943 ("%s: TX: no free job descriptors\n",
7944 device_xname(sc->sc_dev))); 7944 device_xname(sc->sc_dev)));
7945 WM_Q_EVCNT_INCR(txq, txsstall); 7945 WM_Q_EVCNT_INCR(txq, txsstall);
7946 break; 7946 break;
7947 } 7947 }
7948 } 7948 }
7949 7949
7950 /* Grab a packet off the queue. */ 7950 /* Grab a packet off the queue. */
7951 if (is_transmit) 7951 if (is_transmit)
7952 m0 = pcq_get(txq->txq_interq); 7952 m0 = pcq_get(txq->txq_interq);
7953 else 7953 else
7954 IFQ_DEQUEUE(&ifp->if_snd, m0); 7954 IFQ_DEQUEUE(&ifp->if_snd, m0);
7955 if (m0 == NULL) 7955 if (m0 == NULL)
7956 break; 7956 break;
7957 7957
7958 DPRINTF(WM_DEBUG_TX, 7958 DPRINTF(WM_DEBUG_TX,
7959 ("%s: TX: have packet to transmit: %p\n", 7959 ("%s: TX: have packet to transmit: %p\n",
7960 device_xname(sc->sc_dev), m0)); 7960 device_xname(sc->sc_dev), m0));
7961 7961
7962 txs = &txq->txq_soft[txq->txq_snext]; 7962 txs = &txq->txq_soft[txq->txq_snext];
7963 dmamap = txs->txs_dmamap; 7963 dmamap = txs->txs_dmamap;
7964 7964
7965 /* 7965 /*
7966 * Load the DMA map. If this fails, the packet either 7966 * Load the DMA map. If this fails, the packet either
7967 * didn't fit in the allotted number of segments, or we 7967 * didn't fit in the allotted number of segments, or we
7968 * were short on resources. For the too-many-segments 7968 * were short on resources. For the too-many-segments
7969 * case, we simply report an error and drop the packet, 7969 * case, we simply report an error and drop the packet,
7970 * since we can't sanely copy a jumbo packet to a single 7970 * since we can't sanely copy a jumbo packet to a single
7971 * buffer. 7971 * buffer.
7972 */ 7972 */
7973retry: 7973retry:
7974 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 7974 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7975 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 7975 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7976 if (__predict_false(error)) { 7976 if (__predict_false(error)) {
7977 if (error == EFBIG) { 7977 if (error == EFBIG) {
7978 if (remap == true) { 7978 if (remap == true) {
7979 struct mbuf *m; 7979 struct mbuf *m;
7980 7980
7981 remap = false; 7981 remap = false;
7982 m = m_defrag(m0, M_NOWAIT); 7982 m = m_defrag(m0, M_NOWAIT);
7983 if (m != NULL) { 7983 if (m != NULL) {
7984 WM_Q_EVCNT_INCR(txq, defrag); 7984 WM_Q_EVCNT_INCR(txq, defrag);
7985 m0 = m; 7985 m0 = m;
7986 goto retry; 7986 goto retry;
7987 } 7987 }
7988 } 7988 }
7989 WM_Q_EVCNT_INCR(txq, toomanyseg); 7989 WM_Q_EVCNT_INCR(txq, toomanyseg);
7990 log(LOG_ERR, "%s: Tx packet consumes too many " 7990 log(LOG_ERR, "%s: Tx packet consumes too many "
7991 "DMA segments, dropping...\n", 7991 "DMA segments, dropping...\n",
7992 device_xname(sc->sc_dev)); 7992 device_xname(sc->sc_dev));
7993 wm_dump_mbuf_chain(sc, m0); 7993 wm_dump_mbuf_chain(sc, m0);
7994 m_freem(m0); 7994 m_freem(m0);
7995 continue; 7995 continue;
7996 } 7996 }
7997 /* Short on resources, just stop for now. */ 7997 /* Short on resources, just stop for now. */
7998 DPRINTF(WM_DEBUG_TX, 7998 DPRINTF(WM_DEBUG_TX,
7999 ("%s: TX: dmamap load failed: %d\n", 7999 ("%s: TX: dmamap load failed: %d\n",
8000 device_xname(sc->sc_dev), error)); 8000 device_xname(sc->sc_dev), error));
8001 break; 8001 break;
8002 } 8002 }
8003 8003
8004 segs_needed = dmamap->dm_nsegs; 8004 segs_needed = dmamap->dm_nsegs;
8005 8005
8006 /* 8006 /*
8007 * Ensure we have enough descriptors free to describe 8007 * Ensure we have enough descriptors free to describe
8008 * the packet. Note, we always reserve one descriptor 8008 * the packet. Note, we always reserve one descriptor
8009 * at the end of the ring due to the semantics of the 8009 * at the end of the ring due to the semantics of the
8010 * TDT register, plus one more in the event we need 8010 * TDT register, plus one more in the event we need
8011 * to load offload context. 8011 * to load offload context.
8012 */ 8012 */
8013 if (segs_needed > txq->txq_free - 2) { 8013 if (segs_needed > txq->txq_free - 2) {
8014 /* 8014 /*
8015 * Not enough free descriptors to transmit this 8015 * Not enough free descriptors to transmit this
8016 * packet. We haven't committed anything yet, 8016 * packet. We haven't committed anything yet,
8017 * so just unload the DMA map, put the packet 8017 * so just unload the DMA map, put the packet
8018 * pack on the queue, and punt. Notify the upper 8018 * pack on the queue, and punt. Notify the upper
8019 * layer that there are no more slots left. 8019 * layer that there are no more slots left.
8020 */ 8020 */
8021 DPRINTF(WM_DEBUG_TX, 8021 DPRINTF(WM_DEBUG_TX,
8022 ("%s: TX: need %d (%d) descriptors, have %d\n", 8022 ("%s: TX: need %d (%d) descriptors, have %d\n",
8023 device_xname(sc->sc_dev), dmamap->dm_nsegs, 8023 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8024 segs_needed, txq->txq_free - 1)); 8024 segs_needed, txq->txq_free - 1));
8025 if (!is_transmit) 8025 if (!is_transmit)
8026 ifp->if_flags |= IFF_OACTIVE; 8026 ifp->if_flags |= IFF_OACTIVE;
8027 txq->txq_flags |= WM_TXQ_NO_SPACE; 8027 txq->txq_flags |= WM_TXQ_NO_SPACE;
8028 bus_dmamap_unload(sc->sc_dmat, dmamap); 8028 bus_dmamap_unload(sc->sc_dmat, dmamap);
8029 WM_Q_EVCNT_INCR(txq, txdstall); 8029 WM_Q_EVCNT_INCR(txq, txdstall);
8030 break; 8030 break;
8031 } 8031 }
8032 8032
8033 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ 8033 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8034 8034
8035 DPRINTF(WM_DEBUG_TX, 8035 DPRINTF(WM_DEBUG_TX,
8036 ("%s: TX: packet has %d (%d) DMA segments\n", 8036 ("%s: TX: packet has %d (%d) DMA segments\n",
8037 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 8037 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8038 8038
8039 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]); 8039 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8040 8040
8041 /* 8041 /*
8042 * Store a pointer to the packet so that we can free it 8042 * Store a pointer to the packet so that we can free it
8043 * later. 8043 * later.
8044 * 8044 *
8045 * Initially, we consider the number of descriptors the 8045 * Initially, we consider the number of descriptors the
8046 * packet uses the number of DMA segments. This may be 8046 * packet uses the number of DMA segments. This may be
8047 * incremented by 1 if we do checksum offload (a descriptor 8047 * incremented by 1 if we do checksum offload (a descriptor
8048 * is used to set the checksum context). 8048 * is used to set the checksum context).
8049 */ 8049 */
8050 txs->txs_mbuf = m0; 8050 txs->txs_mbuf = m0;
8051 txs->txs_firstdesc = txq->txq_next; 8051 txs->txs_firstdesc = txq->txq_next;
8052 txs->txs_ndesc = segs_needed; 8052 txs->txs_ndesc = segs_needed;
8053 8053
8054 /* Set up offload parameters for this packet. */ 8054 /* Set up offload parameters for this packet. */
8055 uint32_t cmdlen, fields, dcmdlen; 8055 uint32_t cmdlen, fields, dcmdlen;
8056 if (m0->m_pkthdr.csum_flags &  8056 if (m0->m_pkthdr.csum_flags &
8057 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | 8057 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8058 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | 8058 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8059 M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 8059 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8060 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields, 8060 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
8061 &do_csum) != 0) { 8061 &do_csum) != 0) {
8062 /* Error message already displayed. */ 8062 /* Error message already displayed. */
8063 bus_dmamap_unload(sc->sc_dmat, dmamap); 8063 bus_dmamap_unload(sc->sc_dmat, dmamap);
8064 continue; 8064 continue;
8065 } 8065 }
8066 } else { 8066 } else {
8067 do_csum = false; 8067 do_csum = false;
8068 cmdlen = 0; 8068 cmdlen = 0;
8069 fields = 0; 8069 fields = 0;
8070 } 8070 }
8071 8071
8072 /* Sync the DMA map. */ 8072 /* Sync the DMA map. */
8073 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 8073 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8074 BUS_DMASYNC_PREWRITE); 8074 BUS_DMASYNC_PREWRITE);
8075 8075
8076 /* Initialize the first transmit descriptor. */ 8076 /* Initialize the first transmit descriptor. */
8077 nexttx = txq->txq_next; 8077 nexttx = txq->txq_next;
8078 if (!do_csum) { 8078 if (!do_csum) {
8079 /* setup a legacy descriptor */ 8079 /* setup a legacy descriptor */
8080 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr, 8080 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
8081 dmamap->dm_segs[0].ds_addr); 8081 dmamap->dm_segs[0].ds_addr);
8082 txq->txq_descs[nexttx].wtx_cmdlen = 8082 txq->txq_descs[nexttx].wtx_cmdlen =
8083 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); 8083 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
8084 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0; 8084 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
8085 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0; 8085 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
8086 if (vlan_has_tag(m0)) { 8086 if (vlan_has_tag(m0)) {
8087 txq->txq_descs[nexttx].wtx_cmdlen |= 8087 txq->txq_descs[nexttx].wtx_cmdlen |=
8088 htole32(WTX_CMD_VLE); 8088 htole32(WTX_CMD_VLE);
8089 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 8089 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
8090 htole16(vlan_get_tag(m0)); 8090 htole16(vlan_get_tag(m0));
8091 } else 8091 } else
8092 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; 8092 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8093 8093
8094 dcmdlen = 0; 8094 dcmdlen = 0;
8095 } else { 8095 } else {
8096 /* setup an advanced data descriptor */ 8096 /* setup an advanced data descriptor */
8097 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = 8097 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8098 htole64(dmamap->dm_segs[0].ds_addr); 8098 htole64(dmamap->dm_segs[0].ds_addr);
8099 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); 8099 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
8100 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = 8100 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8101 htole32(dmamap->dm_segs[0].ds_len | cmdlen ); 8101 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
8102 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 8102 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
8103 htole32(fields); 8103 htole32(fields);
8104 DPRINTF(WM_DEBUG_TX, 8104 DPRINTF(WM_DEBUG_TX,
8105 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", 8105 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
8106 device_xname(sc->sc_dev), nexttx, 8106 device_xname(sc->sc_dev), nexttx,
8107 (uint64_t)dmamap->dm_segs[0].ds_addr)); 8107 (uint64_t)dmamap->dm_segs[0].ds_addr));
8108 DPRINTF(WM_DEBUG_TX, 8108 DPRINTF(WM_DEBUG_TX,
8109 ("\t 0x%08x%08x\n", fields, 8109 ("\t 0x%08x%08x\n", fields,
8110 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); 8110 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
8111 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; 8111 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
8112 } 8112 }
8113 8113
8114 lasttx = nexttx; 8114 lasttx = nexttx;
8115 nexttx = WM_NEXTTX(txq, nexttx); 8115 nexttx = WM_NEXTTX(txq, nexttx);
8116 /* 8116 /*
8117 * fill in the next descriptors. legacy or advanced format 8117 * fill in the next descriptors. legacy or advanced format
8118 * is the same here 8118 * is the same here
8119 */ 8119 */
8120 for (seg = 1; seg < dmamap->dm_nsegs; 8120 for (seg = 1; seg < dmamap->dm_nsegs;
8121 seg++, nexttx = WM_NEXTTX(txq, nexttx)) { 8121 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
8122 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = 8122 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8123 htole64(dmamap->dm_segs[seg].ds_addr); 8123 htole64(dmamap->dm_segs[seg].ds_addr);
8124 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = 8124 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8125 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); 8125 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
8126 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); 8126 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
8127 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0; 8127 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
8128 lasttx = nexttx; 8128 lasttx = nexttx;
8129 8129
8130 DPRINTF(WM_DEBUG_TX, 8130 DPRINTF(WM_DEBUG_TX,
8131 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n", 8131 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
8132 device_xname(sc->sc_dev), nexttx, 8132 device_xname(sc->sc_dev), nexttx,
8133 (uint64_t)dmamap->dm_segs[seg].ds_addr, 8133 (uint64_t)dmamap->dm_segs[seg].ds_addr,
8134 dmamap->dm_segs[seg].ds_len)); 8134 dmamap->dm_segs[seg].ds_len));
8135 } 8135 }
8136 8136
8137 KASSERT(lasttx != -1); 8137 KASSERT(lasttx != -1);
8138 8138
8139 /* 8139 /*
8140 * Set up the command byte on the last descriptor of 8140 * Set up the command byte on the last descriptor of
8141 * the packet. If we're in the interrupt delay window, 8141 * the packet. If we're in the interrupt delay window,
8142 * delay the interrupt. 8142 * delay the interrupt.
8143 */ 8143 */
8144 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == 8144 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
8145 (NQTX_CMD_EOP | NQTX_CMD_RS)); 8145 (NQTX_CMD_EOP | NQTX_CMD_RS));
8146 txq->txq_descs[lasttx].wtx_cmdlen |= 8146 txq->txq_descs[lasttx].wtx_cmdlen |=
8147 htole32(WTX_CMD_EOP | WTX_CMD_RS); 8147 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8148 8148
8149 txs->txs_lastdesc = lasttx; 8149 txs->txs_lastdesc = lasttx;
8150 8150
8151 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n", 8151 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
8152 device_xname(sc->sc_dev), 8152 device_xname(sc->sc_dev),
8153 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); 8153 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8154 8154
8155 /* Sync the descriptors we're using. */ 8155 /* Sync the descriptors we're using. */
8156 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, 8156 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8157 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 8157 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8158 8158
8159 /* Give the packet to the chip. */ 8159 /* Give the packet to the chip. */
8160 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); 8160 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8161 sent = true; 8161 sent = true;
8162 8162
8163 DPRINTF(WM_DEBUG_TX, 8163 DPRINTF(WM_DEBUG_TX,
8164 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 8164 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8165 8165
8166 DPRINTF(WM_DEBUG_TX, 8166 DPRINTF(WM_DEBUG_TX,
8167 ("%s: TX: finished transmitting packet, job %d\n", 8167 ("%s: TX: finished transmitting packet, job %d\n",
8168 device_xname(sc->sc_dev), txq->txq_snext)); 8168 device_xname(sc->sc_dev), txq->txq_snext));
8169 8169
8170 /* Advance the tx pointer. */ 8170 /* Advance the tx pointer. */
8171 txq->txq_free -= txs->txs_ndesc; 8171 txq->txq_free -= txs->txs_ndesc;
8172 txq->txq_next = nexttx; 8172 txq->txq_next = nexttx;
8173 8173
8174 txq->txq_sfree--; 8174 txq->txq_sfree--;
8175 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); 8175 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8176 8176
8177 /* Pass the packet to any BPF listeners. */ 8177 /* Pass the packet to any BPF listeners. */
8178 bpf_mtap(ifp, m0); 8178 bpf_mtap(ifp, m0);
8179 } 8179 }
8180 8180
8181 if (m0 != NULL) { 8181 if (m0 != NULL) {
8182 if (!is_transmit) 8182 if (!is_transmit)
8183 ifp->if_flags |= IFF_OACTIVE; 8183 ifp->if_flags |= IFF_OACTIVE;
8184 txq->txq_flags |= WM_TXQ_NO_SPACE; 8184 txq->txq_flags |= WM_TXQ_NO_SPACE;
8185 WM_Q_EVCNT_INCR(txq, descdrop); 8185 WM_Q_EVCNT_INCR(txq, descdrop);
8186 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", 8186 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8187 __func__)); 8187 __func__));
8188 m_freem(m0); 8188 m_freem(m0);
8189 } 8189 }
8190 8190
8191 if (txq->txq_sfree == 0 || txq->txq_free <= 2) { 8191 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8192 /* No more slots; notify upper layer. */ 8192 /* No more slots; notify upper layer. */
8193 if (!is_transmit) 8193 if (!is_transmit)
8194 ifp->if_flags |= IFF_OACTIVE; 8194 ifp->if_flags |= IFF_OACTIVE;
8195 txq->txq_flags |= WM_TXQ_NO_SPACE; 8195 txq->txq_flags |= WM_TXQ_NO_SPACE;
8196 } 8196 }
8197 8197
8198 if (sent) { 8198 if (sent) {
8199 /* Set a watchdog timer in case the chip flakes out. */ 8199 /* Set a watchdog timer in case the chip flakes out. */
8200 txq->txq_lastsent = time_uptime; 8200 txq->txq_lastsent = time_uptime;
8201 txq->txq_sending = true; 8201 txq->txq_sending = true;
8202 } 8202 }
8203} 8203}
8204 8204
8205static void 8205static void
8206wm_deferred_start_locked(struct wm_txqueue *txq) 8206wm_deferred_start_locked(struct wm_txqueue *txq)
8207{ 8207{
8208 struct wm_softc *sc = txq->txq_sc; 8208 struct wm_softc *sc = txq->txq_sc;
8209 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 8209 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8210 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq); 8210 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8211 int qid = wmq->wmq_id; 8211 int qid = wmq->wmq_id;
8212 8212
8213 KASSERT(mutex_owned(txq->txq_lock)); 8213 KASSERT(mutex_owned(txq->txq_lock));