Wed Feb 16 03:15:27 2022 UTC ()
Remove duplicated break. No functional change.


(msaitoh)
diff -r1.726 -r1.727 src/sys/dev/pci/if_wm.c

cvs diff -r1.726 -r1.727 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2021/12/31 14:25:23 1.726
+++ src/sys/dev/pci/if_wm.c 2022/02/16 03:15:27 1.727
@@ -1,1084 +1,1084 @@ @@ -1,1084 +1,1084 @@
1/* $NetBSD: if_wm.c,v 1.726 2021/12/31 14:25:23 riastradh Exp $ */ 1/* $NetBSD: if_wm.c,v 1.727 2022/02/16 03:15:27 msaitoh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation 40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42 42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45 45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48 48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52 52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56 56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic) 76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors 77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354 78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function 79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM) 80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM. 81 * - Rework how parameters are loaded from the EEPROM.
82 */ 82 */
83 83
84#include <sys/cdefs.h> 84#include <sys/cdefs.h>
85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.726 2021/12/31 14:25:23 riastradh Exp $"); 85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.727 2022/02/16 03:15:27 msaitoh Exp $");
86 86
87#ifdef _KERNEL_OPT 87#ifdef _KERNEL_OPT
88#include "opt_net_mpsafe.h" 88#include "opt_net_mpsafe.h"
89#include "opt_if_wm.h" 89#include "opt_if_wm.h"
90#endif 90#endif
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/systm.h> 93#include <sys/systm.h>
94#include <sys/callout.h> 94#include <sys/callout.h>
95#include <sys/mbuf.h> 95#include <sys/mbuf.h>
96#include <sys/malloc.h> 96#include <sys/malloc.h>
97#include <sys/kmem.h> 97#include <sys/kmem.h>
98#include <sys/kernel.h> 98#include <sys/kernel.h>
99#include <sys/socket.h> 99#include <sys/socket.h>
100#include <sys/ioctl.h> 100#include <sys/ioctl.h>
101#include <sys/errno.h> 101#include <sys/errno.h>
102#include <sys/device.h> 102#include <sys/device.h>
103#include <sys/queue.h> 103#include <sys/queue.h>
104#include <sys/syslog.h> 104#include <sys/syslog.h>
105#include <sys/interrupt.h> 105#include <sys/interrupt.h>
106#include <sys/cpu.h> 106#include <sys/cpu.h>
107#include <sys/pcq.h> 107#include <sys/pcq.h>
108#include <sys/sysctl.h> 108#include <sys/sysctl.h>
109#include <sys/workqueue.h> 109#include <sys/workqueue.h>
110#include <sys/atomic.h> 110#include <sys/atomic.h>
111 111
112#include <sys/rndsource.h> 112#include <sys/rndsource.h>
113 113
114#include <net/if.h> 114#include <net/if.h>
115#include <net/if_dl.h> 115#include <net/if_dl.h>
116#include <net/if_media.h> 116#include <net/if_media.h>
117#include <net/if_ether.h> 117#include <net/if_ether.h>
118 118
119#include <net/bpf.h> 119#include <net/bpf.h>
120 120
121#include <net/rss_config.h> 121#include <net/rss_config.h>
122 122
123#include <netinet/in.h> /* XXX for struct ip */ 123#include <netinet/in.h> /* XXX for struct ip */
124#include <netinet/in_systm.h> /* XXX for struct ip */ 124#include <netinet/in_systm.h> /* XXX for struct ip */
125#include <netinet/ip.h> /* XXX for struct ip */ 125#include <netinet/ip.h> /* XXX for struct ip */
126#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 126#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
127#include <netinet/tcp.h> /* XXX for struct tcphdr */ 127#include <netinet/tcp.h> /* XXX for struct tcphdr */
128 128
129#include <sys/bus.h> 129#include <sys/bus.h>
130#include <sys/intr.h> 130#include <sys/intr.h>
131#include <machine/endian.h> 131#include <machine/endian.h>
132 132
133#include <dev/mii/mii.h> 133#include <dev/mii/mii.h>
134#include <dev/mii/mdio.h> 134#include <dev/mii/mdio.h>
135#include <dev/mii/miivar.h> 135#include <dev/mii/miivar.h>
136#include <dev/mii/miidevs.h> 136#include <dev/mii/miidevs.h>
137#include <dev/mii/mii_bitbang.h> 137#include <dev/mii/mii_bitbang.h>
138#include <dev/mii/ikphyreg.h> 138#include <dev/mii/ikphyreg.h>
139#include <dev/mii/igphyreg.h> 139#include <dev/mii/igphyreg.h>
140#include <dev/mii/igphyvar.h> 140#include <dev/mii/igphyvar.h>
141#include <dev/mii/inbmphyreg.h> 141#include <dev/mii/inbmphyreg.h>
142#include <dev/mii/ihphyreg.h> 142#include <dev/mii/ihphyreg.h>
143#include <dev/mii/makphyreg.h> 143#include <dev/mii/makphyreg.h>
144 144
145#include <dev/pci/pcireg.h> 145#include <dev/pci/pcireg.h>
146#include <dev/pci/pcivar.h> 146#include <dev/pci/pcivar.h>
147#include <dev/pci/pcidevs.h> 147#include <dev/pci/pcidevs.h>
148 148
149#include <dev/pci/if_wmreg.h> 149#include <dev/pci/if_wmreg.h>
150#include <dev/pci/if_wmvar.h> 150#include <dev/pci/if_wmvar.h>
151 151
152#ifdef WM_DEBUG 152#ifdef WM_DEBUG
153#define WM_DEBUG_LINK __BIT(0) 153#define WM_DEBUG_LINK __BIT(0)
154#define WM_DEBUG_TX __BIT(1) 154#define WM_DEBUG_TX __BIT(1)
155#define WM_DEBUG_RX __BIT(2) 155#define WM_DEBUG_RX __BIT(2)
156#define WM_DEBUG_GMII __BIT(3) 156#define WM_DEBUG_GMII __BIT(3)
157#define WM_DEBUG_MANAGE __BIT(4) 157#define WM_DEBUG_MANAGE __BIT(4)
158#define WM_DEBUG_NVM __BIT(5) 158#define WM_DEBUG_NVM __BIT(5)
159#define WM_DEBUG_INIT __BIT(6) 159#define WM_DEBUG_INIT __BIT(6)
160#define WM_DEBUG_LOCK __BIT(7) 160#define WM_DEBUG_LOCK __BIT(7)
161 161
162#if 0 162#if 0
163#define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \ 163#define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
164 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \ 164 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
165 WM_DEBUG_LOCK 165 WM_DEBUG_LOCK
166#endif 166#endif
167 167
168#define DPRINTF(sc, x, y) \ 168#define DPRINTF(sc, x, y) \
169 do { \ 169 do { \
170 if ((sc)->sc_debug & (x)) \ 170 if ((sc)->sc_debug & (x)) \
171 printf y; \ 171 printf y; \
172 } while (0) 172 } while (0)
173#else 173#else
174#define DPRINTF(sc, x, y) __nothing 174#define DPRINTF(sc, x, y) __nothing
175#endif /* WM_DEBUG */ 175#endif /* WM_DEBUG */
176 176
177#ifdef NET_MPSAFE 177#ifdef NET_MPSAFE
178#define WM_MPSAFE 1 178#define WM_MPSAFE 1
179#define WM_CALLOUT_FLAGS CALLOUT_MPSAFE 179#define WM_CALLOUT_FLAGS CALLOUT_MPSAFE
180#define WM_SOFTINT_FLAGS SOFTINT_MPSAFE 180#define WM_SOFTINT_FLAGS SOFTINT_MPSAFE
181#define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 181#define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
182#else 182#else
183#define WM_CALLOUT_FLAGS 0 183#define WM_CALLOUT_FLAGS 0
184#define WM_SOFTINT_FLAGS 0 184#define WM_SOFTINT_FLAGS 0
185#define WM_WORKQUEUE_FLAGS WQ_PERCPU 185#define WM_WORKQUEUE_FLAGS WQ_PERCPU
186#endif 186#endif
187 187
188#define WM_WORKQUEUE_PRI PRI_SOFTNET 188#define WM_WORKQUEUE_PRI PRI_SOFTNET
189 189
190/* 190/*
191 * This device driver's max interrupt numbers. 191 * This device driver's max interrupt numbers.
192 */ 192 */
193#define WM_MAX_NQUEUEINTR 16 193#define WM_MAX_NQUEUEINTR 16
194#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) 194#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
195 195
196#ifndef WM_DISABLE_MSI 196#ifndef WM_DISABLE_MSI
197#define WM_DISABLE_MSI 0 197#define WM_DISABLE_MSI 0
198#endif 198#endif
199#ifndef WM_DISABLE_MSIX 199#ifndef WM_DISABLE_MSIX
200#define WM_DISABLE_MSIX 0 200#define WM_DISABLE_MSIX 0
201#endif 201#endif
202 202
203int wm_disable_msi = WM_DISABLE_MSI; 203int wm_disable_msi = WM_DISABLE_MSI;
204int wm_disable_msix = WM_DISABLE_MSIX; 204int wm_disable_msix = WM_DISABLE_MSIX;
205 205
206#ifndef WM_WATCHDOG_TIMEOUT 206#ifndef WM_WATCHDOG_TIMEOUT
207#define WM_WATCHDOG_TIMEOUT 5 207#define WM_WATCHDOG_TIMEOUT 5
208#endif 208#endif
209static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT; 209static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
210 210
211/* 211/*
212 * Transmit descriptor list size. Due to errata, we can only have 212 * Transmit descriptor list size. Due to errata, we can only have
213 * 256 hardware descriptors in the ring on < 82544, but we use 4096 213 * 256 hardware descriptors in the ring on < 82544, but we use 4096
214 * on >= 82544. We tell the upper layers that they can queue a lot 214 * on >= 82544. We tell the upper layers that they can queue a lot
215 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 215 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
216 * of them at a time. 216 * of them at a time.
217 * 217 *
218 * We allow up to 64 DMA segments per packet. Pathological packet 218 * We allow up to 64 DMA segments per packet. Pathological packet
219 * chains containing many small mbufs have been observed in zero-copy 219 * chains containing many small mbufs have been observed in zero-copy
220 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments, 220 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
221 * m_defrag() is called to reduce it. 221 * m_defrag() is called to reduce it.
222 */ 222 */
223#define WM_NTXSEGS 64 223#define WM_NTXSEGS 64
224#define WM_IFQUEUELEN 256 224#define WM_IFQUEUELEN 256
225#define WM_TXQUEUELEN_MAX 64 225#define WM_TXQUEUELEN_MAX 64
226#define WM_TXQUEUELEN_MAX_82547 16 226#define WM_TXQUEUELEN_MAX_82547 16
227#define WM_TXQUEUELEN(txq) ((txq)->txq_num) 227#define WM_TXQUEUELEN(txq) ((txq)->txq_num)
228#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) 228#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
229#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) 229#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
230#define WM_NTXDESC_82542 256 230#define WM_NTXDESC_82542 256
231#define WM_NTXDESC_82544 4096 231#define WM_NTXDESC_82544 4096
232#define WM_NTXDESC(txq) ((txq)->txq_ndesc) 232#define WM_NTXDESC(txq) ((txq)->txq_ndesc)
233#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) 233#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
234#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) 234#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
235#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) 235#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
236#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) 236#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
237 237
238#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ 238#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
239 239
240#define WM_TXINTERQSIZE 256 240#define WM_TXINTERQSIZE 256
241 241
242#ifndef WM_TX_PROCESS_LIMIT_DEFAULT 242#ifndef WM_TX_PROCESS_LIMIT_DEFAULT
243#define WM_TX_PROCESS_LIMIT_DEFAULT 100U 243#define WM_TX_PROCESS_LIMIT_DEFAULT 100U
244#endif 244#endif
245#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT 245#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
246#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U 246#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
247#endif 247#endif
248 248
249/* 249/*
250 * Receive descriptor list size. We have one Rx buffer for normal 250 * Receive descriptor list size. We have one Rx buffer for normal
251 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 251 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
252 * packet. We allocate 256 receive descriptors, each with a 2k 252 * packet. We allocate 256 receive descriptors, each with a 2k
253 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 253 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
254 */ 254 */
255#define WM_NRXDESC 256U 255#define WM_NRXDESC 256U
256#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 256#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
257#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 257#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
258#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 258#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
259 259
260#ifndef WM_RX_PROCESS_LIMIT_DEFAULT 260#ifndef WM_RX_PROCESS_LIMIT_DEFAULT
261#define WM_RX_PROCESS_LIMIT_DEFAULT 100U 261#define WM_RX_PROCESS_LIMIT_DEFAULT 100U
262#endif 262#endif
263#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT 263#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
264#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U 264#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
265#endif 265#endif
266 266
267typedef union txdescs { 267typedef union txdescs {
268 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; 268 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
269 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; 269 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
270} txdescs_t; 270} txdescs_t;
271 271
272typedef union rxdescs { 272typedef union rxdescs {
273 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; 273 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
274 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ 274 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
275 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ 275 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
276} rxdescs_t; 276} rxdescs_t;
277 277
278#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) 278#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
279#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x)) 279#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
280 280
281/* 281/*
282 * Software state for transmit jobs. 282 * Software state for transmit jobs.
283 */ 283 */
284struct wm_txsoft { 284struct wm_txsoft {
285 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 285 struct mbuf *txs_mbuf; /* head of our mbuf chain */
286 bus_dmamap_t txs_dmamap; /* our DMA map */ 286 bus_dmamap_t txs_dmamap; /* our DMA map */
287 int txs_firstdesc; /* first descriptor in packet */ 287 int txs_firstdesc; /* first descriptor in packet */
288 int txs_lastdesc; /* last descriptor in packet */ 288 int txs_lastdesc; /* last descriptor in packet */
289 int txs_ndesc; /* # of descriptors used */ 289 int txs_ndesc; /* # of descriptors used */
290}; 290};
291 291
292/* 292/*
293 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES) 293 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
294 * buffer and a DMA map. For packets which fill more than one buffer, we chain 294 * buffer and a DMA map. For packets which fill more than one buffer, we chain
295 * them together. 295 * them together.
296 */ 296 */
297struct wm_rxsoft { 297struct wm_rxsoft {
298 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 298 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
299 bus_dmamap_t rxs_dmamap; /* our DMA map */ 299 bus_dmamap_t rxs_dmamap; /* our DMA map */
300}; 300};
301 301
302#define WM_LINKUP_TIMEOUT 50 302#define WM_LINKUP_TIMEOUT 50
303 303
304static uint16_t swfwphysem[] = { 304static uint16_t swfwphysem[] = {
305 SWFW_PHY0_SM, 305 SWFW_PHY0_SM,
306 SWFW_PHY1_SM, 306 SWFW_PHY1_SM,
307 SWFW_PHY2_SM, 307 SWFW_PHY2_SM,
308 SWFW_PHY3_SM 308 SWFW_PHY3_SM
309}; 309};
310 310
311static const uint32_t wm_82580_rxpbs_table[] = { 311static const uint32_t wm_82580_rxpbs_table[] = {
312 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 312 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
313}; 313};
314 314
315struct wm_softc; 315struct wm_softc;
316 316
317#if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS) 317#if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
318#if !defined(WM_EVENT_COUNTERS) 318#if !defined(WM_EVENT_COUNTERS)
319#define WM_EVENT_COUNTERS 1 319#define WM_EVENT_COUNTERS 1
320#endif 320#endif
321#endif 321#endif
322 322
323#ifdef WM_EVENT_COUNTERS 323#ifdef WM_EVENT_COUNTERS
324#define WM_Q_EVCNT_DEFINE(qname, evname) \ 324#define WM_Q_EVCNT_DEFINE(qname, evname) \
325 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ 325 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
326 struct evcnt qname##_ev_##evname; 326 struct evcnt qname##_ev_##evname;
327 327
328#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ 328#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
329 do { \ 329 do { \
330 snprintf((q)->qname##_##evname##_evcnt_name, \ 330 snprintf((q)->qname##_##evname##_evcnt_name, \
331 sizeof((q)->qname##_##evname##_evcnt_name), \ 331 sizeof((q)->qname##_##evname##_evcnt_name), \
332 "%s%02d%s", #qname, (qnum), #evname); \ 332 "%s%02d%s", #qname, (qnum), #evname); \
333 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ 333 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
334 (evtype), NULL, (xname), \ 334 (evtype), NULL, (xname), \
335 (q)->qname##_##evname##_evcnt_name); \ 335 (q)->qname##_##evname##_evcnt_name); \
336 } while (0) 336 } while (0)
337 337
338#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 338#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
339 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) 339 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
340 340
341#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 341#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
342 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) 342 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
343 343
344#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \ 344#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
345 evcnt_detach(&(q)->qname##_ev_##evname); 345 evcnt_detach(&(q)->qname##_ev_##evname);
346#endif /* WM_EVENT_COUNTERS */ 346#endif /* WM_EVENT_COUNTERS */
347 347
348struct wm_txqueue { 348struct wm_txqueue {
349 kmutex_t *txq_lock; /* lock for tx operations */ 349 kmutex_t *txq_lock; /* lock for tx operations */
350 350
351 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ 351 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
352 352
353 /* Software state for the transmit descriptors. */ 353 /* Software state for the transmit descriptors. */
354 int txq_num; /* must be a power of two */ 354 int txq_num; /* must be a power of two */
355 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; 355 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
356 356
357 /* TX control data structures. */ 357 /* TX control data structures. */
358 int txq_ndesc; /* must be a power of two */ 358 int txq_ndesc; /* must be a power of two */
359 size_t txq_descsize; /* a tx descriptor size */ 359 size_t txq_descsize; /* a tx descriptor size */
360 txdescs_t *txq_descs_u; 360 txdescs_t *txq_descs_u;
361 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ 361 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
362 bus_dma_segment_t txq_desc_seg; /* control data segment */ 362 bus_dma_segment_t txq_desc_seg; /* control data segment */
363 int txq_desc_rseg; /* real number of control segment */ 363 int txq_desc_rseg; /* real number of control segment */
364#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr 364#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
365#define txq_descs txq_descs_u->sctxu_txdescs 365#define txq_descs txq_descs_u->sctxu_txdescs
366#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs 366#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
367 367
368 bus_addr_t txq_tdt_reg; /* offset of TDT register */ 368 bus_addr_t txq_tdt_reg; /* offset of TDT register */
369 369
370 int txq_free; /* number of free Tx descriptors */ 370 int txq_free; /* number of free Tx descriptors */
371 int txq_next; /* next ready Tx descriptor */ 371 int txq_next; /* next ready Tx descriptor */
372 372
373 int txq_sfree; /* number of free Tx jobs */ 373 int txq_sfree; /* number of free Tx jobs */
374 int txq_snext; /* next free Tx job */ 374 int txq_snext; /* next free Tx job */
375 int txq_sdirty; /* dirty Tx jobs */ 375 int txq_sdirty; /* dirty Tx jobs */
376 376
377 /* These 4 variables are used only on the 82547. */ 377 /* These 4 variables are used only on the 82547. */
378 int txq_fifo_size; /* Tx FIFO size */ 378 int txq_fifo_size; /* Tx FIFO size */
379 int txq_fifo_head; /* current head of FIFO */ 379 int txq_fifo_head; /* current head of FIFO */
380 uint32_t txq_fifo_addr; /* internal address of start of FIFO */ 380 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
381 int txq_fifo_stall; /* Tx FIFO is stalled */ 381 int txq_fifo_stall; /* Tx FIFO is stalled */
382 382
383 /* 383 /*
384 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 384 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
385 * CPUs. This queue intermediate them without block. 385 * CPUs. This queue intermediate them without block.
386 */ 386 */
387 pcq_t *txq_interq; 387 pcq_t *txq_interq;
388 388
389 /* 389 /*
390 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 390 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
391 * to manage Tx H/W queue's busy flag. 391 * to manage Tx H/W queue's busy flag.
392 */ 392 */
393 int txq_flags; /* flags for H/W queue, see below */ 393 int txq_flags; /* flags for H/W queue, see below */
394#define WM_TXQ_NO_SPACE 0x1 394#define WM_TXQ_NO_SPACE 0x1
395#define WM_TXQ_LINKDOWN_DISCARD 0x2 395#define WM_TXQ_LINKDOWN_DISCARD 0x2
396 396
397 bool txq_stopping; 397 bool txq_stopping;
398 398
399 bool txq_sending; 399 bool txq_sending;
400 time_t txq_lastsent; 400 time_t txq_lastsent;
401 401
402 /* Checksum flags used for previous packet */ 402 /* Checksum flags used for previous packet */
403 uint32_t txq_last_hw_cmd; 403 uint32_t txq_last_hw_cmd;
404 uint8_t txq_last_hw_fields; 404 uint8_t txq_last_hw_fields;
405 uint16_t txq_last_hw_ipcs; 405 uint16_t txq_last_hw_ipcs;
406 uint16_t txq_last_hw_tucs; 406 uint16_t txq_last_hw_tucs;
407 407
408 uint32_t txq_packets; /* for AIM */ 408 uint32_t txq_packets; /* for AIM */
409 uint32_t txq_bytes; /* for AIM */ 409 uint32_t txq_bytes; /* for AIM */
410#ifdef WM_EVENT_COUNTERS 410#ifdef WM_EVENT_COUNTERS
411 /* TX event counters */ 411 /* TX event counters */
412 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */ 412 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */
413 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */ 413 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */
414 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */ 414 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */
415 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ 415 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
416 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ 416 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
417 /* XXX not used? */ 417 /* XXX not used? */
418 418
419 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */ 419 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */
420 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */ 420 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */
421 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */ 421 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */
422 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */ 422 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */
423 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */ 423 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */
424 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */ 424 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */
425 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */ 425 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */
426 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */ 426 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */
427 /* other than toomanyseg */ 427 /* other than toomanyseg */
428 428
429 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */ 429 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */
430 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */ 430 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */
431 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */ 431 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */
432 WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */ 432 WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
433 433
434 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; 434 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
435 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 435 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
436#endif /* WM_EVENT_COUNTERS */ 436#endif /* WM_EVENT_COUNTERS */
437}; 437};
438 438
439struct wm_rxqueue { 439struct wm_rxqueue {
440 kmutex_t *rxq_lock; /* lock for rx operations */ 440 kmutex_t *rxq_lock; /* lock for rx operations */
441 441
442 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ 442 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
443 443
444 /* Software state for the receive descriptors. */ 444 /* Software state for the receive descriptors. */
445 struct wm_rxsoft rxq_soft[WM_NRXDESC]; 445 struct wm_rxsoft rxq_soft[WM_NRXDESC];
446 446
447 /* RX control data structures. */ 447 /* RX control data structures. */
448 int rxq_ndesc; /* must be a power of two */ 448 int rxq_ndesc; /* must be a power of two */
449 size_t rxq_descsize; /* a rx descriptor size */ 449 size_t rxq_descsize; /* a rx descriptor size */
450 rxdescs_t *rxq_descs_u; 450 rxdescs_t *rxq_descs_u;
451 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ 451 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
452 bus_dma_segment_t rxq_desc_seg; /* control data segment */ 452 bus_dma_segment_t rxq_desc_seg; /* control data segment */
453 int rxq_desc_rseg; /* real number of control segment */ 453 int rxq_desc_rseg; /* real number of control segment */
454#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr 454#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
455#define rxq_descs rxq_descs_u->sctxu_rxdescs 455#define rxq_descs rxq_descs_u->sctxu_rxdescs
456#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs 456#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
457#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs 457#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
458 458
459 bus_addr_t rxq_rdt_reg; /* offset of RDT register */ 459 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
460 460
461 int rxq_ptr; /* next ready Rx desc/queue ent */ 461 int rxq_ptr; /* next ready Rx desc/queue ent */
462 int rxq_discard; 462 int rxq_discard;
463 int rxq_len; 463 int rxq_len;
464 struct mbuf *rxq_head; 464 struct mbuf *rxq_head;
465 struct mbuf *rxq_tail; 465 struct mbuf *rxq_tail;
466 struct mbuf **rxq_tailp; 466 struct mbuf **rxq_tailp;
467 467
468 bool rxq_stopping; 468 bool rxq_stopping;
469 469
470 uint32_t rxq_packets; /* for AIM */ 470 uint32_t rxq_packets; /* for AIM */
471 uint32_t rxq_bytes; /* for AIM */ 471 uint32_t rxq_bytes; /* for AIM */
472#ifdef WM_EVENT_COUNTERS 472#ifdef WM_EVENT_COUNTERS
473 /* RX event counters */ 473 /* RX event counters */
474 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */ 474 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
475 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */ 475 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
476 476
477 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */ 477 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
478 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */ 478 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
479#endif 479#endif
480}; 480};
481 481
482struct wm_queue { 482struct wm_queue {
483 int wmq_id; /* index of TX/RX queues */ 483 int wmq_id; /* index of TX/RX queues */
484 int wmq_intr_idx; /* index of MSI-X tables */ 484 int wmq_intr_idx; /* index of MSI-X tables */
485 485
486 uint32_t wmq_itr; /* interrupt interval per queue. */ 486 uint32_t wmq_itr; /* interrupt interval per queue. */
487 bool wmq_set_itr; 487 bool wmq_set_itr;
488 488
489 struct wm_txqueue wmq_txq; 489 struct wm_txqueue wmq_txq;
490 struct wm_rxqueue wmq_rxq; 490 struct wm_rxqueue wmq_rxq;
491 char sysctlname[32]; /* Name for sysctl */ 491 char sysctlname[32]; /* Name for sysctl */
492 492
493 bool wmq_txrx_use_workqueue; 493 bool wmq_txrx_use_workqueue;
494 struct work wmq_cookie; 494 struct work wmq_cookie;
495 void *wmq_si; 495 void *wmq_si;
496}; 496};
497 497
498struct wm_phyop { 498struct wm_phyop {
499 int (*acquire)(struct wm_softc *); 499 int (*acquire)(struct wm_softc *);
500 void (*release)(struct wm_softc *); 500 void (*release)(struct wm_softc *);
501 int (*readreg_locked)(device_t, int, int, uint16_t *); 501 int (*readreg_locked)(device_t, int, int, uint16_t *);
502 int (*writereg_locked)(device_t, int, int, uint16_t); 502 int (*writereg_locked)(device_t, int, int, uint16_t);
503 int reset_delay_us; 503 int reset_delay_us;
504 bool no_errprint; 504 bool no_errprint;
505}; 505};
506 506
507struct wm_nvmop { 507struct wm_nvmop {
508 int (*acquire)(struct wm_softc *); 508 int (*acquire)(struct wm_softc *);
509 void (*release)(struct wm_softc *); 509 void (*release)(struct wm_softc *);
510 int (*read)(struct wm_softc *, int, int, uint16_t *); 510 int (*read)(struct wm_softc *, int, int, uint16_t *);
511}; 511};
512 512
513/* 513/*
514 * Software state per device. 514 * Software state per device.
515 */ 515 */
516struct wm_softc { 516struct wm_softc {
517 device_t sc_dev; /* generic device information */ 517 device_t sc_dev; /* generic device information */
518 bus_space_tag_t sc_st; /* bus space tag */ 518 bus_space_tag_t sc_st; /* bus space tag */
519 bus_space_handle_t sc_sh; /* bus space handle */ 519 bus_space_handle_t sc_sh; /* bus space handle */
520 bus_size_t sc_ss; /* bus space size */ 520 bus_size_t sc_ss; /* bus space size */
521 bus_space_tag_t sc_iot; /* I/O space tag */ 521 bus_space_tag_t sc_iot; /* I/O space tag */
522 bus_space_handle_t sc_ioh; /* I/O space handle */ 522 bus_space_handle_t sc_ioh; /* I/O space handle */
523 bus_size_t sc_ios; /* I/O space size */ 523 bus_size_t sc_ios; /* I/O space size */
524 bus_space_tag_t sc_flasht; /* flash registers space tag */ 524 bus_space_tag_t sc_flasht; /* flash registers space tag */
525 bus_space_handle_t sc_flashh; /* flash registers space handle */ 525 bus_space_handle_t sc_flashh; /* flash registers space handle */
526 bus_size_t sc_flashs; /* flash registers space size */ 526 bus_size_t sc_flashs; /* flash registers space size */
527 off_t sc_flashreg_offset; /* 527 off_t sc_flashreg_offset; /*
528 * offset to flash registers from 528 * offset to flash registers from
529 * start of BAR 529 * start of BAR
530 */ 530 */
531 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 531 bus_dma_tag_t sc_dmat; /* bus DMA tag */
532 532
533 struct ethercom sc_ethercom; /* ethernet common data */ 533 struct ethercom sc_ethercom; /* ethernet common data */
534 struct mii_data sc_mii; /* MII/media information */ 534 struct mii_data sc_mii; /* MII/media information */
535 535
536 pci_chipset_tag_t sc_pc; 536 pci_chipset_tag_t sc_pc;
537 pcitag_t sc_pcitag; 537 pcitag_t sc_pcitag;
538 int sc_bus_speed; /* PCI/PCIX bus speed */ 538 int sc_bus_speed; /* PCI/PCIX bus speed */
539 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ 539 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
540 540
541 uint16_t sc_pcidevid; /* PCI device ID */ 541 uint16_t sc_pcidevid; /* PCI device ID */
542 wm_chip_type sc_type; /* MAC type */ 542 wm_chip_type sc_type; /* MAC type */
543 int sc_rev; /* MAC revision */ 543 int sc_rev; /* MAC revision */
544 wm_phy_type sc_phytype; /* PHY type */ 544 wm_phy_type sc_phytype; /* PHY type */
545 uint8_t sc_sfptype; /* SFP type */ 545 uint8_t sc_sfptype; /* SFP type */
546 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ 546 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
547#define WM_MEDIATYPE_UNKNOWN 0x00 547#define WM_MEDIATYPE_UNKNOWN 0x00
548#define WM_MEDIATYPE_FIBER 0x01 548#define WM_MEDIATYPE_FIBER 0x01
549#define WM_MEDIATYPE_COPPER 0x02 549#define WM_MEDIATYPE_COPPER 0x02
550#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ 550#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
551 int sc_funcid; /* unit number of the chip (0 to 3) */ 551 int sc_funcid; /* unit number of the chip (0 to 3) */
552 int sc_flags; /* flags; see below */ 552 int sc_flags; /* flags; see below */
553 u_short sc_if_flags; /* last if_flags */ 553 u_short sc_if_flags; /* last if_flags */
554 int sc_ec_capenable; /* last ec_capenable */ 554 int sc_ec_capenable; /* last ec_capenable */
555 int sc_flowflags; /* 802.3x flow control flags */ 555 int sc_flowflags; /* 802.3x flow control flags */
556 uint16_t eee_lp_ability; /* EEE link partner's ability */ 556 uint16_t eee_lp_ability; /* EEE link partner's ability */
557 int sc_align_tweak; 557 int sc_align_tweak;
558 558
559 void *sc_ihs[WM_MAX_NINTR]; /* 559 void *sc_ihs[WM_MAX_NINTR]; /*
560 * interrupt cookie. 560 * interrupt cookie.
561 * - legacy and msi use sc_ihs[0] only 561 * - legacy and msi use sc_ihs[0] only
562 * - msix use sc_ihs[0] to sc_ihs[nintrs-1] 562 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
563 */ 563 */
564 pci_intr_handle_t *sc_intrs; /* 564 pci_intr_handle_t *sc_intrs; /*
565 * legacy and msi use sc_intrs[0] only 565 * legacy and msi use sc_intrs[0] only
566 * msix use sc_intrs[0] to sc_ihs[nintrs-1] 566 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
567 */ 567 */
568 int sc_nintrs; /* number of interrupts */ 568 int sc_nintrs; /* number of interrupts */
569 569
570 int sc_link_intr_idx; /* index of MSI-X tables */ 570 int sc_link_intr_idx; /* index of MSI-X tables */
571 571
572 callout_t sc_tick_ch; /* tick callout */ 572 callout_t sc_tick_ch; /* tick callout */
573 bool sc_core_stopping; 573 bool sc_core_stopping;
574 574
575 int sc_nvm_ver_major; 575 int sc_nvm_ver_major;
576 int sc_nvm_ver_minor; 576 int sc_nvm_ver_minor;
577 int sc_nvm_ver_build; 577 int sc_nvm_ver_build;
578 int sc_nvm_addrbits; /* NVM address bits */ 578 int sc_nvm_addrbits; /* NVM address bits */
579 unsigned int sc_nvm_wordsize; /* NVM word size */ 579 unsigned int sc_nvm_wordsize; /* NVM word size */
580 int sc_ich8_flash_base; 580 int sc_ich8_flash_base;
581 int sc_ich8_flash_bank_size; 581 int sc_ich8_flash_bank_size;
582 int sc_nvm_k1_enabled; 582 int sc_nvm_k1_enabled;
583 583
584 int sc_nqueues; 584 int sc_nqueues;
585 struct wm_queue *sc_queue; 585 struct wm_queue *sc_queue;
586 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */ 586 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
587 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */ 587 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
588 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */ 588 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
589 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */ 589 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
590 struct workqueue *sc_queue_wq; 590 struct workqueue *sc_queue_wq;
591 bool sc_txrx_use_workqueue; 591 bool sc_txrx_use_workqueue;
592 592
593 int sc_affinity_offset; 593 int sc_affinity_offset;
594 594
595#ifdef WM_EVENT_COUNTERS 595#ifdef WM_EVENT_COUNTERS
596 /* Event counters. */ 596 /* Event counters. */
597 struct evcnt sc_ev_linkintr; /* Link interrupts */ 597 struct evcnt sc_ev_linkintr; /* Link interrupts */
598 598
599 /* WM_T_82542_2_1 only */ 599 /* WM_T_82542_2_1 only */
600 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 600 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
601 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 601 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
602 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 602 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
603 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 603 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
604 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 604 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
605#endif /* WM_EVENT_COUNTERS */ 605#endif /* WM_EVENT_COUNTERS */
606 606
607 struct sysctllog *sc_sysctllog; 607 struct sysctllog *sc_sysctllog;
608 608
609 /* This variable are used only on the 82547. */ 609 /* This variable are used only on the 82547. */
610 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 610 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
611 611
612 uint32_t sc_ctrl; /* prototype CTRL register */ 612 uint32_t sc_ctrl; /* prototype CTRL register */
613#if 0 613#if 0
614 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 614 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
615#endif 615#endif
616 uint32_t sc_icr; /* prototype interrupt bits */ 616 uint32_t sc_icr; /* prototype interrupt bits */
617 uint32_t sc_itr_init; /* prototype intr throttling reg */ 617 uint32_t sc_itr_init; /* prototype intr throttling reg */
618 uint32_t sc_tctl; /* prototype TCTL register */ 618 uint32_t sc_tctl; /* prototype TCTL register */
619 uint32_t sc_rctl; /* prototype RCTL register */ 619 uint32_t sc_rctl; /* prototype RCTL register */
620 uint32_t sc_txcw; /* prototype TXCW register */ 620 uint32_t sc_txcw; /* prototype TXCW register */
621 uint32_t sc_tipg; /* prototype TIPG register */ 621 uint32_t sc_tipg; /* prototype TIPG register */
622 uint32_t sc_fcrtl; /* prototype FCRTL register */ 622 uint32_t sc_fcrtl; /* prototype FCRTL register */
623 uint32_t sc_pba; /* prototype PBA register */ 623 uint32_t sc_pba; /* prototype PBA register */
624 624
625 int sc_tbi_linkup; /* TBI link status */ 625 int sc_tbi_linkup; /* TBI link status */
626 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ 626 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
627 int sc_tbi_serdes_ticks; /* tbi ticks */ 627 int sc_tbi_serdes_ticks; /* tbi ticks */
628 628
629 int sc_mchash_type; /* multicast filter offset */ 629 int sc_mchash_type; /* multicast filter offset */
630 630
631 krndsource_t rnd_source; /* random source */ 631 krndsource_t rnd_source; /* random source */
632 632
633 struct if_percpuq *sc_ipq; /* softint-based input queues */ 633 struct if_percpuq *sc_ipq; /* softint-based input queues */
634 634
635 kmutex_t *sc_core_lock; /* lock for softc operations */ 635 kmutex_t *sc_core_lock; /* lock for softc operations */
636 kmutex_t *sc_ich_phymtx; /* 636 kmutex_t *sc_ich_phymtx; /*
637 * 82574/82583/ICH/PCH specific PHY 637 * 82574/82583/ICH/PCH specific PHY
638 * mutex. For 82574/82583, the mutex 638 * mutex. For 82574/82583, the mutex
639 * is used for both PHY and NVM. 639 * is used for both PHY and NVM.
640 */ 640 */
641 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ 641 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
642 642
643 struct wm_phyop phy; 643 struct wm_phyop phy;
644 struct wm_nvmop nvm; 644 struct wm_nvmop nvm;
645#ifdef WM_DEBUG 645#ifdef WM_DEBUG
646 uint32_t sc_debug; 646 uint32_t sc_debug;
647#endif 647#endif
648}; 648};
649 649
650#define WM_CORE_LOCK(_sc) \ 650#define WM_CORE_LOCK(_sc) \
651 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) 651 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
652#define WM_CORE_UNLOCK(_sc) \ 652#define WM_CORE_UNLOCK(_sc) \
653 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) 653 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
654#define WM_CORE_LOCKED(_sc) \ 654#define WM_CORE_LOCKED(_sc) \
655 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) 655 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
656 656
657#define WM_RXCHAIN_RESET(rxq) \ 657#define WM_RXCHAIN_RESET(rxq) \
658do { \ 658do { \
659 (rxq)->rxq_tailp = &(rxq)->rxq_head; \ 659 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
660 *(rxq)->rxq_tailp = NULL; \ 660 *(rxq)->rxq_tailp = NULL; \
661 (rxq)->rxq_len = 0; \ 661 (rxq)->rxq_len = 0; \
662} while (/*CONSTCOND*/0) 662} while (/*CONSTCOND*/0)
663 663
664#define WM_RXCHAIN_LINK(rxq, m) \ 664#define WM_RXCHAIN_LINK(rxq, m) \
665do { \ 665do { \
666 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ 666 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
667 (rxq)->rxq_tailp = &(m)->m_next; \ 667 (rxq)->rxq_tailp = &(m)->m_next; \
668} while (/*CONSTCOND*/0) 668} while (/*CONSTCOND*/0)
669 669
670#ifdef WM_EVENT_COUNTERS 670#ifdef WM_EVENT_COUNTERS
671#ifdef __HAVE_ATOMIC64_LOADSTORE 671#ifdef __HAVE_ATOMIC64_LOADSTORE
672#define WM_EVCNT_INCR(ev) \ 672#define WM_EVCNT_INCR(ev) \
673 atomic_store_relaxed(&((ev)->ev_count), \ 673 atomic_store_relaxed(&((ev)->ev_count), \
674 atomic_load_relaxed(&(ev)->ev_count) + 1) 674 atomic_load_relaxed(&(ev)->ev_count) + 1)
675#define WM_EVCNT_ADD(ev, val) \ 675#define WM_EVCNT_ADD(ev, val) \
676 atomic_store_relaxed(&((ev)->ev_count), \ 676 atomic_store_relaxed(&((ev)->ev_count), \
677 atomic_load_relaxed(&(ev)->ev_count) + (val)) 677 atomic_load_relaxed(&(ev)->ev_count) + (val))
678#else 678#else
679#define WM_EVCNT_INCR(ev) \ 679#define WM_EVCNT_INCR(ev) \
680 ((ev)->ev_count)++ 680 ((ev)->ev_count)++
681#define WM_EVCNT_ADD(ev, val) \ 681#define WM_EVCNT_ADD(ev, val) \
682 (ev)->ev_count += (val) 682 (ev)->ev_count += (val)
683#endif 683#endif
684 684
685#define WM_Q_EVCNT_INCR(qname, evname) \ 685#define WM_Q_EVCNT_INCR(qname, evname) \
686 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) 686 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
687#define WM_Q_EVCNT_ADD(qname, evname, val) \ 687#define WM_Q_EVCNT_ADD(qname, evname, val) \
688 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) 688 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
689#else /* !WM_EVENT_COUNTERS */ 689#else /* !WM_EVENT_COUNTERS */
690#define WM_EVCNT_INCR(ev) /* nothing */ 690#define WM_EVCNT_INCR(ev) /* nothing */
691#define WM_EVCNT_ADD(ev, val) /* nothing */ 691#define WM_EVCNT_ADD(ev, val) /* nothing */
692 692
693#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */ 693#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
694#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */ 694#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
695#endif /* !WM_EVENT_COUNTERS */ 695#endif /* !WM_EVENT_COUNTERS */
696 696
697#define CSR_READ(sc, reg) \ 697#define CSR_READ(sc, reg) \
698 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 698 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
699#define CSR_WRITE(sc, reg, val) \ 699#define CSR_WRITE(sc, reg, val) \
700 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 700 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
701#define CSR_WRITE_FLUSH(sc) \ 701#define CSR_WRITE_FLUSH(sc) \
702 (void)CSR_READ((sc), WMREG_STATUS) 702 (void)CSR_READ((sc), WMREG_STATUS)
703 703
704#define ICH8_FLASH_READ32(sc, reg) \ 704#define ICH8_FLASH_READ32(sc, reg) \
705 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ 705 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
706 (reg) + sc->sc_flashreg_offset) 706 (reg) + sc->sc_flashreg_offset)
707#define ICH8_FLASH_WRITE32(sc, reg, data) \ 707#define ICH8_FLASH_WRITE32(sc, reg, data) \
708 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ 708 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
709 (reg) + sc->sc_flashreg_offset, (data)) 709 (reg) + sc->sc_flashreg_offset, (data))
710 710
711#define ICH8_FLASH_READ16(sc, reg) \ 711#define ICH8_FLASH_READ16(sc, reg) \
712 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ 712 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
713 (reg) + sc->sc_flashreg_offset) 713 (reg) + sc->sc_flashreg_offset)
714#define ICH8_FLASH_WRITE16(sc, reg, data) \ 714#define ICH8_FLASH_WRITE16(sc, reg, data) \
715 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ 715 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
716 (reg) + sc->sc_flashreg_offset, (data)) 716 (reg) + sc->sc_flashreg_offset, (data))
717 717
718#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) 718#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
719#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x))) 719#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
720 720
721#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) 721#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
722#define WM_CDTXADDR_HI(txq, x) \ 722#define WM_CDTXADDR_HI(txq, x) \
723 (sizeof(bus_addr_t) == 8 ? \ 723 (sizeof(bus_addr_t) == 8 ? \
724 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) 724 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
725 725
726#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) 726#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
727#define WM_CDRXADDR_HI(rxq, x) \ 727#define WM_CDRXADDR_HI(rxq, x) \
728 (sizeof(bus_addr_t) == 8 ? \ 728 (sizeof(bus_addr_t) == 8 ? \
729 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) 729 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
730 730
731/* 731/*
732 * Register read/write functions. 732 * Register read/write functions.
733 * Other than CSR_{READ|WRITE}(). 733 * Other than CSR_{READ|WRITE}().
734 */ 734 */
735#if 0 735#if 0
736static inline uint32_t wm_io_read(struct wm_softc *, int); 736static inline uint32_t wm_io_read(struct wm_softc *, int);
737#endif 737#endif
738static inline void wm_io_write(struct wm_softc *, int, uint32_t); 738static inline void wm_io_write(struct wm_softc *, int, uint32_t);
739static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 739static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
740 uint32_t, uint32_t); 740 uint32_t, uint32_t);
741static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 741static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
742 742
743/* 743/*
744 * Descriptor sync/init functions. 744 * Descriptor sync/init functions.
745 */ 745 */
746static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); 746static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
747static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); 747static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
748static inline void wm_init_rxdesc(struct wm_rxqueue *, int); 748static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
749 749
750/* 750/*
751 * Device driver interface functions and commonly used functions. 751 * Device driver interface functions and commonly used functions.
752 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 752 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
753 */ 753 */
754static const struct wm_product *wm_lookup(const struct pci_attach_args *); 754static const struct wm_product *wm_lookup(const struct pci_attach_args *);
755static int wm_match(device_t, cfdata_t, void *); 755static int wm_match(device_t, cfdata_t, void *);
756static void wm_attach(device_t, device_t, void *); 756static void wm_attach(device_t, device_t, void *);
757static int wm_detach(device_t, int); 757static int wm_detach(device_t, int);
758static bool wm_suspend(device_t, const pmf_qual_t *); 758static bool wm_suspend(device_t, const pmf_qual_t *);
759static bool wm_resume(device_t, const pmf_qual_t *); 759static bool wm_resume(device_t, const pmf_qual_t *);
760static void wm_watchdog(struct ifnet *); 760static void wm_watchdog(struct ifnet *);
761static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, 761static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
762 uint16_t *); 762 uint16_t *);
763static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, 763static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
764 uint16_t *); 764 uint16_t *);
765static void wm_tick(void *); 765static void wm_tick(void *);
766static int wm_ifflags_cb(struct ethercom *); 766static int wm_ifflags_cb(struct ethercom *);
767static int wm_ioctl(struct ifnet *, u_long, void *); 767static int wm_ioctl(struct ifnet *, u_long, void *);
768/* MAC address related */ 768/* MAC address related */
769static uint16_t wm_check_alt_mac_addr(struct wm_softc *); 769static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
770static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 770static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
771static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 771static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
772static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 772static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
773static int wm_rar_count(struct wm_softc *); 773static int wm_rar_count(struct wm_softc *);
774static void wm_set_filter(struct wm_softc *); 774static void wm_set_filter(struct wm_softc *);
775/* Reset and init related */ 775/* Reset and init related */
776static void wm_set_vlan(struct wm_softc *); 776static void wm_set_vlan(struct wm_softc *);
777static void wm_set_pcie_completion_timeout(struct wm_softc *); 777static void wm_set_pcie_completion_timeout(struct wm_softc *);
778static void wm_get_auto_rd_done(struct wm_softc *); 778static void wm_get_auto_rd_done(struct wm_softc *);
779static void wm_lan_init_done(struct wm_softc *); 779static void wm_lan_init_done(struct wm_softc *);
780static void wm_get_cfg_done(struct wm_softc *); 780static void wm_get_cfg_done(struct wm_softc *);
781static int wm_phy_post_reset(struct wm_softc *); 781static int wm_phy_post_reset(struct wm_softc *);
782static int wm_write_smbus_addr(struct wm_softc *); 782static int wm_write_smbus_addr(struct wm_softc *);
783static int wm_init_lcd_from_nvm(struct wm_softc *); 783static int wm_init_lcd_from_nvm(struct wm_softc *);
784static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool); 784static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
785static void wm_initialize_hardware_bits(struct wm_softc *); 785static void wm_initialize_hardware_bits(struct wm_softc *);
786static uint32_t wm_rxpbs_adjust_82580(uint32_t); 786static uint32_t wm_rxpbs_adjust_82580(uint32_t);
787static int wm_reset_phy(struct wm_softc *); 787static int wm_reset_phy(struct wm_softc *);
788static void wm_flush_desc_rings(struct wm_softc *); 788static void wm_flush_desc_rings(struct wm_softc *);
789static void wm_reset(struct wm_softc *); 789static void wm_reset(struct wm_softc *);
790static int wm_add_rxbuf(struct wm_rxqueue *, int); 790static int wm_add_rxbuf(struct wm_rxqueue *, int);
791static void wm_rxdrain(struct wm_rxqueue *); 791static void wm_rxdrain(struct wm_rxqueue *);
792static void wm_init_rss(struct wm_softc *); 792static void wm_init_rss(struct wm_softc *);
793static void wm_adjust_qnum(struct wm_softc *, int); 793static void wm_adjust_qnum(struct wm_softc *, int);
794static inline bool wm_is_using_msix(struct wm_softc *); 794static inline bool wm_is_using_msix(struct wm_softc *);
795static inline bool wm_is_using_multiqueue(struct wm_softc *); 795static inline bool wm_is_using_multiqueue(struct wm_softc *);
796static int wm_softint_establish_queue(struct wm_softc *, int, int); 796static int wm_softint_establish_queue(struct wm_softc *, int, int);
797static int wm_setup_legacy(struct wm_softc *); 797static int wm_setup_legacy(struct wm_softc *);
798static int wm_setup_msix(struct wm_softc *); 798static int wm_setup_msix(struct wm_softc *);
799static int wm_init(struct ifnet *); 799static int wm_init(struct ifnet *);
800static int wm_init_locked(struct ifnet *); 800static int wm_init_locked(struct ifnet *);
801static void wm_init_sysctls(struct wm_softc *); 801static void wm_init_sysctls(struct wm_softc *);
802static void wm_unset_stopping_flags(struct wm_softc *); 802static void wm_unset_stopping_flags(struct wm_softc *);
803static void wm_set_stopping_flags(struct wm_softc *); 803static void wm_set_stopping_flags(struct wm_softc *);
804static void wm_stop(struct ifnet *, int); 804static void wm_stop(struct ifnet *, int);
805static void wm_stop_locked(struct ifnet *, bool, bool); 805static void wm_stop_locked(struct ifnet *, bool, bool);
806static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 806static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
807static void wm_82547_txfifo_stall(void *); 807static void wm_82547_txfifo_stall(void *);
808static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 808static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
809static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *); 809static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
810/* DMA related */ 810/* DMA related */
811static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 811static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
812static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 812static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
813static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 813static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
814static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, 814static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
815 struct wm_txqueue *); 815 struct wm_txqueue *);
816static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 816static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
817static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 817static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
818static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, 818static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
819 struct wm_rxqueue *); 819 struct wm_rxqueue *);
820static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 820static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
821static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 821static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
822static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 822static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
823static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 823static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
824static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 824static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
825static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 825static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
826static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, 826static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
827 struct wm_txqueue *); 827 struct wm_txqueue *);
828static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, 828static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
829 struct wm_rxqueue *); 829 struct wm_rxqueue *);
830static int wm_alloc_txrx_queues(struct wm_softc *); 830static int wm_alloc_txrx_queues(struct wm_softc *);
831static void wm_free_txrx_queues(struct wm_softc *); 831static void wm_free_txrx_queues(struct wm_softc *);
832static int wm_init_txrx_queues(struct wm_softc *); 832static int wm_init_txrx_queues(struct wm_softc *);
833/* Start */ 833/* Start */
834static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *, 834static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
835 struct wm_txsoft *, uint32_t *, uint8_t *); 835 struct wm_txsoft *, uint32_t *, uint8_t *);
836static inline int wm_select_txqueue(struct ifnet *, struct mbuf *); 836static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
837static void wm_start(struct ifnet *); 837static void wm_start(struct ifnet *);
838static void wm_start_locked(struct ifnet *); 838static void wm_start_locked(struct ifnet *);
839static int wm_transmit(struct ifnet *, struct mbuf *); 839static int wm_transmit(struct ifnet *, struct mbuf *);
840static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); 840static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
841static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, 841static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
842 bool); 842 bool);
843static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, 843static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
844 struct wm_txsoft *, uint32_t *, uint32_t *, bool *); 844 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
845static void wm_nq_start(struct ifnet *); 845static void wm_nq_start(struct ifnet *);
846static void wm_nq_start_locked(struct ifnet *); 846static void wm_nq_start_locked(struct ifnet *);
847static int wm_nq_transmit(struct ifnet *, struct mbuf *); 847static int wm_nq_transmit(struct ifnet *, struct mbuf *);
848static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); 848static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
849static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, 849static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
850 bool); 850 bool);
851static void wm_deferred_start_locked(struct wm_txqueue *); 851static void wm_deferred_start_locked(struct wm_txqueue *);
852static void wm_handle_queue(void *); 852static void wm_handle_queue(void *);
853static void wm_handle_queue_work(struct work *, void *); 853static void wm_handle_queue_work(struct work *, void *);
854/* Interrupt */ 854/* Interrupt */
855static bool wm_txeof(struct wm_txqueue *, u_int); 855static bool wm_txeof(struct wm_txqueue *, u_int);
856static bool wm_rxeof(struct wm_rxqueue *, u_int); 856static bool wm_rxeof(struct wm_rxqueue *, u_int);
857static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 857static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
858static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 858static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
859static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 859static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
860static void wm_linkintr(struct wm_softc *, uint32_t); 860static void wm_linkintr(struct wm_softc *, uint32_t);
861static int wm_intr_legacy(void *); 861static int wm_intr_legacy(void *);
862static inline void wm_txrxintr_disable(struct wm_queue *); 862static inline void wm_txrxintr_disable(struct wm_queue *);
863static inline void wm_txrxintr_enable(struct wm_queue *); 863static inline void wm_txrxintr_enable(struct wm_queue *);
864static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *); 864static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
865static int wm_txrxintr_msix(void *); 865static int wm_txrxintr_msix(void *);
866static int wm_linkintr_msix(void *); 866static int wm_linkintr_msix(void *);
867 867
868/* 868/*
869 * Media related. 869 * Media related.
870 * GMII, SGMII, TBI, SERDES and SFP. 870 * GMII, SGMII, TBI, SERDES and SFP.
871 */ 871 */
872/* Common */ 872/* Common */
873static void wm_tbi_serdes_set_linkled(struct wm_softc *); 873static void wm_tbi_serdes_set_linkled(struct wm_softc *);
874/* GMII related */ 874/* GMII related */
875static void wm_gmii_reset(struct wm_softc *); 875static void wm_gmii_reset(struct wm_softc *);
876static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t); 876static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
877static int wm_get_phy_id_82575(struct wm_softc *); 877static int wm_get_phy_id_82575(struct wm_softc *);
878static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 878static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
879static int wm_gmii_mediachange(struct ifnet *); 879static int wm_gmii_mediachange(struct ifnet *);
880static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 880static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
881static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); 881static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
882static uint16_t wm_i82543_mii_recvbits(struct wm_softc *); 882static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
883static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *); 883static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
884static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t); 884static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
885static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *); 885static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
886static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t); 886static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
887static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *); 887static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
888static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *); 888static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
889static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t); 889static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
890static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t); 890static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
891static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *); 891static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
892static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t); 892static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
893static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *); 893static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
894static int wm_gmii_bm_writereg(device_t, int, int, uint16_t); 894static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
895static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 895static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
896static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 896static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
897static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int, 897static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
898 bool); 898 bool);
899static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *); 899static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
900static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *); 900static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
901static int wm_gmii_hv_writereg(device_t, int, int, uint16_t); 901static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
902static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t); 902static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
903static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *); 903static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
904static int wm_gmii_82580_writereg(device_t, int, int, uint16_t); 904static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
905static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *); 905static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
906static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t); 906static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
907static void wm_gmii_statchg(struct ifnet *); 907static void wm_gmii_statchg(struct ifnet *);
908/* 908/*
909 * kumeran related (80003, ICH* and PCH*). 909 * kumeran related (80003, ICH* and PCH*).
910 * These functions are not for accessing MII registers but for accessing 910 * These functions are not for accessing MII registers but for accessing
911 * kumeran specific registers. 911 * kumeran specific registers.
912 */ 912 */
913static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *); 913static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
914static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *); 914static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
915static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t); 915static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
916static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t); 916static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
917/* EMI register related */ 917/* EMI register related */
918static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool); 918static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
919static int wm_read_emi_reg_locked(device_t, int, uint16_t *); 919static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
920static int wm_write_emi_reg_locked(device_t, int, uint16_t); 920static int wm_write_emi_reg_locked(device_t, int, uint16_t);
921/* SGMII */ 921/* SGMII */
922static bool wm_sgmii_uses_mdio(struct wm_softc *); 922static bool wm_sgmii_uses_mdio(struct wm_softc *);
923static void wm_sgmii_sfp_preconfig(struct wm_softc *); 923static void wm_sgmii_sfp_preconfig(struct wm_softc *);
924static int wm_sgmii_readreg(device_t, int, int, uint16_t *); 924static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
925static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *); 925static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
926static int wm_sgmii_writereg(device_t, int, int, uint16_t); 926static int wm_sgmii_writereg(device_t, int, int, uint16_t);
927static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t); 927static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
928/* TBI related */ 928/* TBI related */
929static bool wm_tbi_havesignal(struct wm_softc *, uint32_t); 929static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
930static void wm_tbi_mediainit(struct wm_softc *); 930static void wm_tbi_mediainit(struct wm_softc *);
931static int wm_tbi_mediachange(struct ifnet *); 931static int wm_tbi_mediachange(struct ifnet *);
932static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 932static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
933static int wm_check_for_link(struct wm_softc *); 933static int wm_check_for_link(struct wm_softc *);
934static void wm_tbi_tick(struct wm_softc *); 934static void wm_tbi_tick(struct wm_softc *);
935/* SERDES related */ 935/* SERDES related */
936static void wm_serdes_power_up_link_82575(struct wm_softc *); 936static void wm_serdes_power_up_link_82575(struct wm_softc *);
937static int wm_serdes_mediachange(struct ifnet *); 937static int wm_serdes_mediachange(struct ifnet *);
938static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); 938static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
939static void wm_serdes_tick(struct wm_softc *); 939static void wm_serdes_tick(struct wm_softc *);
940/* SFP related */ 940/* SFP related */
941static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); 941static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
942static uint32_t wm_sfp_get_media_type(struct wm_softc *); 942static uint32_t wm_sfp_get_media_type(struct wm_softc *);
943 943
944/* 944/*
945 * NVM related. 945 * NVM related.
946 * Microwire, SPI (w/wo EERD) and Flash. 946 * Microwire, SPI (w/wo EERD) and Flash.
947 */ 947 */
948/* Misc functions */ 948/* Misc functions */
949static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); 949static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
950static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); 950static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
951static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); 951static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
952/* Microwire */ 952/* Microwire */
953static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); 953static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
954/* SPI */ 954/* SPI */
955static int wm_nvm_ready_spi(struct wm_softc *); 955static int wm_nvm_ready_spi(struct wm_softc *);
956static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); 956static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
957/* Using with EERD */ 957/* Using with EERD */
958static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 958static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
959static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); 959static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
960/* Flash */ 960/* Flash */
961static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, 961static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
962 unsigned int *); 962 unsigned int *);
963static int32_t wm_ich8_cycle_init(struct wm_softc *); 963static int32_t wm_ich8_cycle_init(struct wm_softc *);
964static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 964static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
965static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, 965static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
966 uint32_t *); 966 uint32_t *);
967static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 967static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
968static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 968static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
969static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); 969static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
970static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); 970static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
971static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); 971static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
972/* iNVM */ 972/* iNVM */
973static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); 973static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
974static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); 974static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
975/* Lock, detecting NVM type, validate checksum and read */ 975/* Lock, detecting NVM type, validate checksum and read */
976static int wm_nvm_is_onboard_eeprom(struct wm_softc *); 976static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
977static int wm_nvm_flash_presence_i210(struct wm_softc *); 977static int wm_nvm_flash_presence_i210(struct wm_softc *);
978static int wm_nvm_validate_checksum(struct wm_softc *); 978static int wm_nvm_validate_checksum(struct wm_softc *);
979static void wm_nvm_version_invm(struct wm_softc *); 979static void wm_nvm_version_invm(struct wm_softc *);
980static void wm_nvm_version(struct wm_softc *); 980static void wm_nvm_version(struct wm_softc *);
981static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); 981static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
982 982
983/* 983/*
984 * Hardware semaphores. 984 * Hardware semaphores.
985 * Very complexed... 985 * Very complexed...
986 */ 986 */
987static int wm_get_null(struct wm_softc *); 987static int wm_get_null(struct wm_softc *);
988static void wm_put_null(struct wm_softc *); 988static void wm_put_null(struct wm_softc *);
989static int wm_get_eecd(struct wm_softc *); 989static int wm_get_eecd(struct wm_softc *);
990static void wm_put_eecd(struct wm_softc *); 990static void wm_put_eecd(struct wm_softc *);
991static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ 991static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
992static void wm_put_swsm_semaphore(struct wm_softc *); 992static void wm_put_swsm_semaphore(struct wm_softc *);
993static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 993static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
994static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 994static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
995static int wm_get_nvm_80003(struct wm_softc *); 995static int wm_get_nvm_80003(struct wm_softc *);
996static void wm_put_nvm_80003(struct wm_softc *); 996static void wm_put_nvm_80003(struct wm_softc *);
997static int wm_get_nvm_82571(struct wm_softc *); 997static int wm_get_nvm_82571(struct wm_softc *);
998static void wm_put_nvm_82571(struct wm_softc *); 998static void wm_put_nvm_82571(struct wm_softc *);
999static int wm_get_phy_82575(struct wm_softc *); 999static int wm_get_phy_82575(struct wm_softc *);
1000static void wm_put_phy_82575(struct wm_softc *); 1000static void wm_put_phy_82575(struct wm_softc *);
1001static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ 1001static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1002static void wm_put_swfwhw_semaphore(struct wm_softc *); 1002static void wm_put_swfwhw_semaphore(struct wm_softc *);
1003static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ 1003static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
1004static void wm_put_swflag_ich8lan(struct wm_softc *); 1004static void wm_put_swflag_ich8lan(struct wm_softc *);
1005static int wm_get_nvm_ich8lan(struct wm_softc *); 1005static int wm_get_nvm_ich8lan(struct wm_softc *);
1006static void wm_put_nvm_ich8lan(struct wm_softc *); 1006static void wm_put_nvm_ich8lan(struct wm_softc *);
1007static int wm_get_hw_semaphore_82573(struct wm_softc *); 1007static int wm_get_hw_semaphore_82573(struct wm_softc *);
1008static void wm_put_hw_semaphore_82573(struct wm_softc *); 1008static void wm_put_hw_semaphore_82573(struct wm_softc *);
1009 1009
1010/* 1010/*
1011 * Management mode and power management related subroutines. 1011 * Management mode and power management related subroutines.
1012 * BMC, AMT, suspend/resume and EEE. 1012 * BMC, AMT, suspend/resume and EEE.
1013 */ 1013 */
1014#if 0 1014#if 0
1015static int wm_check_mng_mode(struct wm_softc *); 1015static int wm_check_mng_mode(struct wm_softc *);
1016static int wm_check_mng_mode_ich8lan(struct wm_softc *); 1016static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1017static int wm_check_mng_mode_82574(struct wm_softc *); 1017static int wm_check_mng_mode_82574(struct wm_softc *);
1018static int wm_check_mng_mode_generic(struct wm_softc *); 1018static int wm_check_mng_mode_generic(struct wm_softc *);
1019#endif 1019#endif
1020static int wm_enable_mng_pass_thru(struct wm_softc *); 1020static int wm_enable_mng_pass_thru(struct wm_softc *);
1021static bool wm_phy_resetisblocked(struct wm_softc *); 1021static bool wm_phy_resetisblocked(struct wm_softc *);
1022static void wm_get_hw_control(struct wm_softc *); 1022static void wm_get_hw_control(struct wm_softc *);
1023static void wm_release_hw_control(struct wm_softc *); 1023static void wm_release_hw_control(struct wm_softc *);
1024static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); 1024static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1025static int wm_init_phy_workarounds_pchlan(struct wm_softc *); 1025static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1026static void wm_init_manageability(struct wm_softc *); 1026static void wm_init_manageability(struct wm_softc *);
1027static void wm_release_manageability(struct wm_softc *); 1027static void wm_release_manageability(struct wm_softc *);
1028static void wm_get_wakeup(struct wm_softc *); 1028static void wm_get_wakeup(struct wm_softc *);
1029static int wm_ulp_disable(struct wm_softc *); 1029static int wm_ulp_disable(struct wm_softc *);
1030static int wm_enable_phy_wakeup(struct wm_softc *); 1030static int wm_enable_phy_wakeup(struct wm_softc *);
1031static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 1031static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1032static void wm_suspend_workarounds_ich8lan(struct wm_softc *); 1032static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1033static int wm_resume_workarounds_pchlan(struct wm_softc *); 1033static int wm_resume_workarounds_pchlan(struct wm_softc *);
1034static void wm_enable_wakeup(struct wm_softc *); 1034static void wm_enable_wakeup(struct wm_softc *);
1035static void wm_disable_aspm(struct wm_softc *); 1035static void wm_disable_aspm(struct wm_softc *);
1036/* LPLU (Low Power Link Up) */ 1036/* LPLU (Low Power Link Up) */
1037static void wm_lplu_d0_disable(struct wm_softc *); 1037static void wm_lplu_d0_disable(struct wm_softc *);
1038/* EEE */ 1038/* EEE */
1039static int wm_set_eee_i350(struct wm_softc *); 1039static int wm_set_eee_i350(struct wm_softc *);
1040static int wm_set_eee_pchlan(struct wm_softc *); 1040static int wm_set_eee_pchlan(struct wm_softc *);
1041static int wm_set_eee(struct wm_softc *); 1041static int wm_set_eee(struct wm_softc *);
1042 1042
1043/* 1043/*
1044 * Workarounds (mainly PHY related). 1044 * Workarounds (mainly PHY related).
1045 * Basically, PHY's workarounds are in the PHY drivers. 1045 * Basically, PHY's workarounds are in the PHY drivers.
1046 */ 1046 */
1047static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 1047static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1048static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 1048static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1049static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *); 1049static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1050static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *); 1050static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1051static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *); 1051static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1052static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool); 1052static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1053static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *); 1053static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1054static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool); 1054static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1055static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 1055static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1056static int wm_k1_workaround_lv(struct wm_softc *); 1056static int wm_k1_workaround_lv(struct wm_softc *);
1057static int wm_link_stall_workaround_hv(struct wm_softc *); 1057static int wm_link_stall_workaround_hv(struct wm_softc *);
1058static int wm_set_mdio_slow_mode_hv(struct wm_softc *); 1058static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1059static void wm_configure_k1_ich8lan(struct wm_softc *, int); 1059static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1060static void wm_reset_init_script_82575(struct wm_softc *); 1060static void wm_reset_init_script_82575(struct wm_softc *);
1061static void wm_reset_mdicnfg_82580(struct wm_softc *); 1061static void wm_reset_mdicnfg_82580(struct wm_softc *);
1062static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 1062static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1063static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 1063static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1064static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 1064static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1065static int wm_pll_workaround_i210(struct wm_softc *); 1065static int wm_pll_workaround_i210(struct wm_softc *);
1066static void wm_legacy_irq_quirk_spt(struct wm_softc *); 1066static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1067static bool wm_phy_need_linkdown_discard(struct wm_softc *); 1067static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1068static void wm_set_linkdown_discard(struct wm_softc *); 1068static void wm_set_linkdown_discard(struct wm_softc *);
1069static void wm_clear_linkdown_discard(struct wm_softc *); 1069static void wm_clear_linkdown_discard(struct wm_softc *);
1070 1070
1071static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO); 1071static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1072static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO); 1072static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1073#ifdef WM_DEBUG 1073#ifdef WM_DEBUG
1074static int wm_sysctl_debug(SYSCTLFN_PROTO); 1074static int wm_sysctl_debug(SYSCTLFN_PROTO);
1075#endif 1075#endif
1076 1076
1077CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 1077CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1078 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 1078 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1079 1079
1080/* 1080/*
1081 * Devices supported by this driver. 1081 * Devices supported by this driver.
1082 */ 1082 */
1083static const struct wm_product { 1083static const struct wm_product {
1084 pci_vendor_id_t wmp_vendor; 1084 pci_vendor_id_t wmp_vendor;
@@ -9576,1999 +9576,1998 @@ wm_linkintr_gmii(struct wm_softc *sc, ui @@ -9576,1999 +9576,1998 @@ wm_linkintr_gmii(struct wm_softc *sc, ui
9576 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED); 9576 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9577 bool fdx; 9577 bool fdx;
9578 uint16_t emi_addr, emi_val; 9578 uint16_t emi_addr, emi_val;
9579 9579
9580 tipg_reg = CSR_READ(sc, WMREG_TIPG); 9580 tipg_reg = CSR_READ(sc, WMREG_TIPG);
9581 tipg_reg &= ~TIPG_IPGT_MASK; 9581 tipg_reg &= ~TIPG_IPGT_MASK;
9582 fdx = status & STATUS_FD; 9582 fdx = status & STATUS_FD;
9583 9583
9584 if (!fdx && (speed == STATUS_SPEED_10)) { 9584 if (!fdx && (speed == STATUS_SPEED_10)) {
9585 tipg_reg |= 0xff; 9585 tipg_reg |= 0xff;
9586 /* Reduce Rx latency in analog PHY */ 9586 /* Reduce Rx latency in analog PHY */
9587 emi_val = 0; 9587 emi_val = 0;
9588 } else if ((sc->sc_type >= WM_T_PCH_SPT) && 9588 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9589 fdx && speed != STATUS_SPEED_1000) { 9589 fdx && speed != STATUS_SPEED_1000) {
9590 tipg_reg |= 0xc; 9590 tipg_reg |= 0xc;
9591 emi_val = 1; 9591 emi_val = 1;
9592 } else { 9592 } else {
9593 /* Roll back the default values */ 9593 /* Roll back the default values */
9594 tipg_reg |= 0x08; 9594 tipg_reg |= 0x08;
9595 emi_val = 1; 9595 emi_val = 1;
9596 } 9596 }
9597 9597
9598 CSR_WRITE(sc, WMREG_TIPG, tipg_reg); 9598 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9599 9599
9600 rv = sc->phy.acquire(sc); 9600 rv = sc->phy.acquire(sc);
9601 if (rv) 9601 if (rv)
9602 return; 9602 return;
9603 9603
9604 if (sc->sc_type == WM_T_PCH2) 9604 if (sc->sc_type == WM_T_PCH2)
9605 emi_addr = I82579_RX_CONFIG; 9605 emi_addr = I82579_RX_CONFIG;
9606 else 9606 else
9607 emi_addr = I217_RX_CONFIG; 9607 emi_addr = I217_RX_CONFIG;
9608 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val); 9608 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9609 9609
9610 if (sc->sc_type >= WM_T_PCH_LPT) { 9610 if (sc->sc_type >= WM_T_PCH_LPT) {
9611 uint16_t phy_reg; 9611 uint16_t phy_reg;
9612 9612
9613 sc->phy.readreg_locked(dev, 2, 9613 sc->phy.readreg_locked(dev, 2,
9614 I217_PLL_CLOCK_GATE_REG, &phy_reg); 9614 I217_PLL_CLOCK_GATE_REG, &phy_reg);
9615 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; 9615 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9616 if (speed == STATUS_SPEED_100 9616 if (speed == STATUS_SPEED_100
9617 || speed == STATUS_SPEED_10) 9617 || speed == STATUS_SPEED_10)
9618 phy_reg |= 0x3e8; 9618 phy_reg |= 0x3e8;
9619 else 9619 else
9620 phy_reg |= 0xfa; 9620 phy_reg |= 0xfa;
9621 sc->phy.writereg_locked(dev, 2, 9621 sc->phy.writereg_locked(dev, 2,
9622 I217_PLL_CLOCK_GATE_REG, phy_reg); 9622 I217_PLL_CLOCK_GATE_REG, phy_reg);
9623 9623
9624 if (speed == STATUS_SPEED_1000) { 9624 if (speed == STATUS_SPEED_1000) {
9625 sc->phy.readreg_locked(dev, 2, 9625 sc->phy.readreg_locked(dev, 2,
9626 HV_PM_CTRL, &phy_reg); 9626 HV_PM_CTRL, &phy_reg);
9627 9627
9628 phy_reg |= HV_PM_CTRL_K1_CLK_REQ; 9628 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
9629 9629
9630 sc->phy.writereg_locked(dev, 2, 9630 sc->phy.writereg_locked(dev, 2,
9631 HV_PM_CTRL, phy_reg); 9631 HV_PM_CTRL, phy_reg);
9632 } 9632 }
9633 } 9633 }
9634 sc->phy.release(sc); 9634 sc->phy.release(sc);
9635 9635
9636 if (rv) 9636 if (rv)
9637 return; 9637 return;
9638 9638
9639 if (sc->sc_type >= WM_T_PCH_SPT) { 9639 if (sc->sc_type >= WM_T_PCH_SPT) {
9640 uint16_t data, ptr_gap; 9640 uint16_t data, ptr_gap;
9641 9641
9642 if (speed == STATUS_SPEED_1000) { 9642 if (speed == STATUS_SPEED_1000) {
9643 rv = sc->phy.acquire(sc); 9643 rv = sc->phy.acquire(sc);
9644 if (rv) 9644 if (rv)
9645 return; 9645 return;
9646 9646
9647 rv = sc->phy.readreg_locked(dev, 2, 9647 rv = sc->phy.readreg_locked(dev, 2,
9648 I82579_UNKNOWN1, &data); 9648 I82579_UNKNOWN1, &data);
9649 if (rv) { 9649 if (rv) {
9650 sc->phy.release(sc); 9650 sc->phy.release(sc);
9651 return; 9651 return;
9652 } 9652 }
9653 9653
9654 ptr_gap = (data & (0x3ff << 2)) >> 2; 9654 ptr_gap = (data & (0x3ff << 2)) >> 2;
9655 if (ptr_gap < 0x18) { 9655 if (ptr_gap < 0x18) {
9656 data &= ~(0x3ff << 2); 9656 data &= ~(0x3ff << 2);
9657 data |= (0x18 << 2); 9657 data |= (0x18 << 2);
9658 rv = sc->phy.writereg_locked(dev, 9658 rv = sc->phy.writereg_locked(dev,
9659 2, I82579_UNKNOWN1, data); 9659 2, I82579_UNKNOWN1, data);
9660 } 9660 }
9661 sc->phy.release(sc); 9661 sc->phy.release(sc);
9662 if (rv) 9662 if (rv)
9663 return; 9663 return;
9664 } else { 9664 } else {
9665 rv = sc->phy.acquire(sc); 9665 rv = sc->phy.acquire(sc);
9666 if (rv) 9666 if (rv)
9667 return; 9667 return;
9668 9668
9669 rv = sc->phy.writereg_locked(dev, 2, 9669 rv = sc->phy.writereg_locked(dev, 2,
9670 I82579_UNKNOWN1, 0xc023); 9670 I82579_UNKNOWN1, 0xc023);
9671 sc->phy.release(sc); 9671 sc->phy.release(sc);
9672 if (rv) 9672 if (rv)
9673 return; 9673 return;
9674 9674
9675 } 9675 }
9676 } 9676 }
9677 } 9677 }
9678 9678
9679 /* 9679 /*
9680 * I217 Packet Loss issue: 9680 * I217 Packet Loss issue:
9681 * ensure that FEXTNVM4 Beacon Duration is set correctly 9681 * ensure that FEXTNVM4 Beacon Duration is set correctly
9682 * on power up. 9682 * on power up.
9683 * Set the Beacon Duration for I217 to 8 usec 9683 * Set the Beacon Duration for I217 to 8 usec
9684 */ 9684 */
9685 if (sc->sc_type >= WM_T_PCH_LPT) { 9685 if (sc->sc_type >= WM_T_PCH_LPT) {
9686 reg = CSR_READ(sc, WMREG_FEXTNVM4); 9686 reg = CSR_READ(sc, WMREG_FEXTNVM4);
9687 reg &= ~FEXTNVM4_BEACON_DURATION; 9687 reg &= ~FEXTNVM4_BEACON_DURATION;
9688 reg |= FEXTNVM4_BEACON_DURATION_8US; 9688 reg |= FEXTNVM4_BEACON_DURATION_8US;
9689 CSR_WRITE(sc, WMREG_FEXTNVM4, reg); 9689 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
9690 } 9690 }
9691 9691
9692 /* Work-around I218 hang issue */ 9692 /* Work-around I218 hang issue */
9693 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) || 9693 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
9694 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) || 9694 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
9695 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) || 9695 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
9696 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3)) 9696 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
9697 wm_k1_workaround_lpt_lp(sc, link); 9697 wm_k1_workaround_lpt_lp(sc, link);
9698 9698
9699 if (sc->sc_type >= WM_T_PCH_LPT) { 9699 if (sc->sc_type >= WM_T_PCH_LPT) {
9700 /* 9700 /*
9701 * Set platform power management values for Latency 9701 * Set platform power management values for Latency
9702 * Tolerance Reporting (LTR) 9702 * Tolerance Reporting (LTR)
9703 */ 9703 */
9704 wm_platform_pm_pch_lpt(sc, 9704 wm_platform_pm_pch_lpt(sc,
9705 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); 9705 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9706 } 9706 }
9707 9707
9708 /* Clear link partner's EEE ability */ 9708 /* Clear link partner's EEE ability */
9709 sc->eee_lp_ability = 0; 9709 sc->eee_lp_ability = 0;
9710 9710
9711 /* FEXTNVM6 K1-off workaround */ 9711 /* FEXTNVM6 K1-off workaround */
9712 if (sc->sc_type == WM_T_PCH_SPT) { 9712 if (sc->sc_type == WM_T_PCH_SPT) {
9713 reg = CSR_READ(sc, WMREG_FEXTNVM6); 9713 reg = CSR_READ(sc, WMREG_FEXTNVM6);
9714 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE) 9714 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
9715 reg |= FEXTNVM6_K1_OFF_ENABLE; 9715 reg |= FEXTNVM6_K1_OFF_ENABLE;
9716 else 9716 else
9717 reg &= ~FEXTNVM6_K1_OFF_ENABLE; 9717 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
9718 CSR_WRITE(sc, WMREG_FEXTNVM6, reg); 9718 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
9719 } 9719 }
9720 9720
9721 if (!link) 9721 if (!link)
9722 return; 9722 return;
9723 9723
9724 switch (sc->sc_type) { 9724 switch (sc->sc_type) {
9725 case WM_T_PCH2: 9725 case WM_T_PCH2:
9726 wm_k1_workaround_lv(sc); 9726 wm_k1_workaround_lv(sc);
9727 /* FALLTHROUGH */ 9727 /* FALLTHROUGH */
9728 case WM_T_PCH: 9728 case WM_T_PCH:
9729 if (sc->sc_phytype == WMPHY_82578) 9729 if (sc->sc_phytype == WMPHY_82578)
9730 wm_link_stall_workaround_hv(sc); 9730 wm_link_stall_workaround_hv(sc);
9731 break; 9731 break;
9732 default: 9732 default:
9733 break; 9733 break;
9734 } 9734 }
9735 9735
9736 /* Enable/Disable EEE after link up */ 9736 /* Enable/Disable EEE after link up */
9737 if (sc->sc_phytype > WMPHY_82579) 9737 if (sc->sc_phytype > WMPHY_82579)
9738 wm_set_eee_pchlan(sc); 9738 wm_set_eee_pchlan(sc);
9739} 9739}
9740 9740
9741/* 9741/*
9742 * wm_linkintr_tbi: 9742 * wm_linkintr_tbi:
9743 * 9743 *
9744 * Helper; handle link interrupts for TBI mode. 9744 * Helper; handle link interrupts for TBI mode.
9745 */ 9745 */
9746static void 9746static void
9747wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) 9747wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
9748{ 9748{
9749 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 9749 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9750 uint32_t status; 9750 uint32_t status;
9751 9751
9752 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 9752 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9753 __func__)); 9753 __func__));
9754 9754
9755 status = CSR_READ(sc, WMREG_STATUS); 9755 status = CSR_READ(sc, WMREG_STATUS);
9756 if (icr & ICR_LSC) { 9756 if (icr & ICR_LSC) {
9757 wm_check_for_link(sc); 9757 wm_check_for_link(sc);
9758 if (status & STATUS_LU) { 9758 if (status & STATUS_LU) {
9759 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 9759 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9760 device_xname(sc->sc_dev), 9760 device_xname(sc->sc_dev),
9761 (status & STATUS_FD) ? "FDX" : "HDX")); 9761 (status & STATUS_FD) ? "FDX" : "HDX"));
9762 /* 9762 /*
9763 * NOTE: CTRL will update TFCE and RFCE automatically, 9763 * NOTE: CTRL will update TFCE and RFCE automatically,
9764 * so we should update sc->sc_ctrl 9764 * so we should update sc->sc_ctrl
9765 */ 9765 */
9766 9766
9767 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 9767 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9768 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 9768 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9769 sc->sc_fcrtl &= ~FCRTL_XONE; 9769 sc->sc_fcrtl &= ~FCRTL_XONE;
9770 if (status & STATUS_FD) 9770 if (status & STATUS_FD)
9771 sc->sc_tctl |= 9771 sc->sc_tctl |=
9772 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 9772 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9773 else 9773 else
9774 sc->sc_tctl |= 9774 sc->sc_tctl |=
9775 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 9775 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9776 if (sc->sc_ctrl & CTRL_TFCE) 9776 if (sc->sc_ctrl & CTRL_TFCE)
9777 sc->sc_fcrtl |= FCRTL_XONE; 9777 sc->sc_fcrtl |= FCRTL_XONE;
9778 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 9778 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9779 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 9779 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9780 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl); 9780 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
9781 sc->sc_tbi_linkup = 1; 9781 sc->sc_tbi_linkup = 1;
9782 if_link_state_change(ifp, LINK_STATE_UP); 9782 if_link_state_change(ifp, LINK_STATE_UP);
9783 } else { 9783 } else {
9784 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 9784 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9785 device_xname(sc->sc_dev))); 9785 device_xname(sc->sc_dev)));
9786 sc->sc_tbi_linkup = 0; 9786 sc->sc_tbi_linkup = 0;
9787 if_link_state_change(ifp, LINK_STATE_DOWN); 9787 if_link_state_change(ifp, LINK_STATE_DOWN);
9788 } 9788 }
9789 /* Update LED */ 9789 /* Update LED */
9790 wm_tbi_serdes_set_linkled(sc); 9790 wm_tbi_serdes_set_linkled(sc);
9791 } else if (icr & ICR_RXSEQ) 9791 } else if (icr & ICR_RXSEQ)
9792 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n", 9792 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9793 device_xname(sc->sc_dev))); 9793 device_xname(sc->sc_dev)));
9794} 9794}
9795 9795
9796/* 9796/*
9797 * wm_linkintr_serdes: 9797 * wm_linkintr_serdes:
9798 * 9798 *
9799 * Helper; handle link interrupts for TBI mode. 9799 * Helper; handle link interrupts for TBI mode.
9800 */ 9800 */
9801static void 9801static void
9802wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr) 9802wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
9803{ 9803{
9804 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 9804 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9805 struct mii_data *mii = &sc->sc_mii; 9805 struct mii_data *mii = &sc->sc_mii;
9806 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 9806 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9807 uint32_t pcs_adv, pcs_lpab, reg; 9807 uint32_t pcs_adv, pcs_lpab, reg;
9808 9808
9809 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 9809 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9810 __func__)); 9810 __func__));
9811 9811
9812 if (icr & ICR_LSC) { 9812 if (icr & ICR_LSC) {
9813 /* Check PCS */ 9813 /* Check PCS */
9814 reg = CSR_READ(sc, WMREG_PCS_LSTS); 9814 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9815 if ((reg & PCS_LSTS_LINKOK) != 0) { 9815 if ((reg & PCS_LSTS_LINKOK) != 0) {
9816 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n", 9816 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
9817 device_xname(sc->sc_dev))); 9817 device_xname(sc->sc_dev)));
9818 mii->mii_media_status |= IFM_ACTIVE; 9818 mii->mii_media_status |= IFM_ACTIVE;
9819 sc->sc_tbi_linkup = 1; 9819 sc->sc_tbi_linkup = 1;
9820 if_link_state_change(ifp, LINK_STATE_UP); 9820 if_link_state_change(ifp, LINK_STATE_UP);
9821 } else { 9821 } else {
9822 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 9822 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9823 device_xname(sc->sc_dev))); 9823 device_xname(sc->sc_dev)));
9824 mii->mii_media_status |= IFM_NONE; 9824 mii->mii_media_status |= IFM_NONE;
9825 sc->sc_tbi_linkup = 0; 9825 sc->sc_tbi_linkup = 0;
9826 if_link_state_change(ifp, LINK_STATE_DOWN); 9826 if_link_state_change(ifp, LINK_STATE_DOWN);
9827 wm_tbi_serdes_set_linkled(sc); 9827 wm_tbi_serdes_set_linkled(sc);
9828 return; 9828 return;
9829 } 9829 }
9830 mii->mii_media_active |= IFM_1000_SX; 9830 mii->mii_media_active |= IFM_1000_SX;
9831 if ((reg & PCS_LSTS_FDX) != 0) 9831 if ((reg & PCS_LSTS_FDX) != 0)
9832 mii->mii_media_active |= IFM_FDX; 9832 mii->mii_media_active |= IFM_FDX;
9833 else 9833 else
9834 mii->mii_media_active |= IFM_HDX; 9834 mii->mii_media_active |= IFM_HDX;
9835 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 9835 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9836 /* Check flow */ 9836 /* Check flow */
9837 reg = CSR_READ(sc, WMREG_PCS_LSTS); 9837 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9838 if ((reg & PCS_LSTS_AN_COMP) == 0) { 9838 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9839 DPRINTF(sc, WM_DEBUG_LINK, 9839 DPRINTF(sc, WM_DEBUG_LINK,
9840 ("XXX LINKOK but not ACOMP\n")); 9840 ("XXX LINKOK but not ACOMP\n"));
9841 return; 9841 return;
9842 } 9842 }
9843 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV); 9843 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9844 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB); 9844 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9845 DPRINTF(sc, WM_DEBUG_LINK, 9845 DPRINTF(sc, WM_DEBUG_LINK,
9846 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab)); 9846 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
9847 if ((pcs_adv & TXCW_SYM_PAUSE) 9847 if ((pcs_adv & TXCW_SYM_PAUSE)
9848 && (pcs_lpab & TXCW_SYM_PAUSE)) { 9848 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9849 mii->mii_media_active |= IFM_FLOW 9849 mii->mii_media_active |= IFM_FLOW
9850 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 9850 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9851 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0) 9851 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9852 && (pcs_adv & TXCW_ASYM_PAUSE) 9852 && (pcs_adv & TXCW_ASYM_PAUSE)
9853 && (pcs_lpab & TXCW_SYM_PAUSE) 9853 && (pcs_lpab & TXCW_SYM_PAUSE)
9854 && (pcs_lpab & TXCW_ASYM_PAUSE)) 9854 && (pcs_lpab & TXCW_ASYM_PAUSE))
9855 mii->mii_media_active |= IFM_FLOW 9855 mii->mii_media_active |= IFM_FLOW
9856 | IFM_ETH_TXPAUSE; 9856 | IFM_ETH_TXPAUSE;
9857 else if ((pcs_adv & TXCW_SYM_PAUSE) 9857 else if ((pcs_adv & TXCW_SYM_PAUSE)
9858 && (pcs_adv & TXCW_ASYM_PAUSE) 9858 && (pcs_adv & TXCW_ASYM_PAUSE)
9859 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0) 9859 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9860 && (pcs_lpab & TXCW_ASYM_PAUSE)) 9860 && (pcs_lpab & TXCW_ASYM_PAUSE))
9861 mii->mii_media_active |= IFM_FLOW 9861 mii->mii_media_active |= IFM_FLOW
9862 | IFM_ETH_RXPAUSE; 9862 | IFM_ETH_RXPAUSE;
9863 } 9863 }
9864 /* Update LED */ 9864 /* Update LED */
9865 wm_tbi_serdes_set_linkled(sc); 9865 wm_tbi_serdes_set_linkled(sc);
9866 } else 9866 } else
9867 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n", 9867 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9868 device_xname(sc->sc_dev))); 9868 device_xname(sc->sc_dev)));
9869} 9869}
9870 9870
9871/* 9871/*
9872 * wm_linkintr: 9872 * wm_linkintr:
9873 * 9873 *
9874 * Helper; handle link interrupts. 9874 * Helper; handle link interrupts.
9875 */ 9875 */
9876static void 9876static void
9877wm_linkintr(struct wm_softc *sc, uint32_t icr) 9877wm_linkintr(struct wm_softc *sc, uint32_t icr)
9878{ 9878{
9879 9879
9880 KASSERT(WM_CORE_LOCKED(sc)); 9880 KASSERT(WM_CORE_LOCKED(sc));
9881 9881
9882 if (sc->sc_flags & WM_F_HAS_MII) 9882 if (sc->sc_flags & WM_F_HAS_MII)
9883 wm_linkintr_gmii(sc, icr); 9883 wm_linkintr_gmii(sc, icr);
9884 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES) 9884 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9885 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))) 9885 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
9886 wm_linkintr_serdes(sc, icr); 9886 wm_linkintr_serdes(sc, icr);
9887 else 9887 else
9888 wm_linkintr_tbi(sc, icr); 9888 wm_linkintr_tbi(sc, icr);
9889} 9889}
9890 9890
9891 9891
9892static inline void 9892static inline void
9893wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq) 9893wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
9894{ 9894{
9895 9895
9896 if (wmq->wmq_txrx_use_workqueue) 9896 if (wmq->wmq_txrx_use_workqueue)
9897 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu()); 9897 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
9898 else 9898 else
9899 softint_schedule(wmq->wmq_si); 9899 softint_schedule(wmq->wmq_si);
9900} 9900}
9901 9901
9902static inline void 9902static inline void
9903wm_legacy_intr_disable(struct wm_softc *sc) 9903wm_legacy_intr_disable(struct wm_softc *sc)
9904{ 9904{
9905 9905
9906 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 9906 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
9907} 9907}
9908 9908
9909static inline void 9909static inline void
9910wm_legacy_intr_enable(struct wm_softc *sc) 9910wm_legacy_intr_enable(struct wm_softc *sc)
9911{ 9911{
9912 9912
9913 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 9913 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
9914} 9914}
9915 9915
9916/* 9916/*
9917 * wm_intr_legacy: 9917 * wm_intr_legacy:
9918 * 9918 *
9919 * Interrupt service routine for INTx and MSI. 9919 * Interrupt service routine for INTx and MSI.
9920 */ 9920 */
9921static int 9921static int
9922wm_intr_legacy(void *arg) 9922wm_intr_legacy(void *arg)
9923{ 9923{
9924 struct wm_softc *sc = arg; 9924 struct wm_softc *sc = arg;
9925 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 9925 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9926 struct wm_queue *wmq = &sc->sc_queue[0]; 9926 struct wm_queue *wmq = &sc->sc_queue[0];
9927 struct wm_txqueue *txq = &wmq->wmq_txq; 9927 struct wm_txqueue *txq = &wmq->wmq_txq;
9928 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 9928 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9929 u_int txlimit = sc->sc_tx_intr_process_limit; 9929 u_int txlimit = sc->sc_tx_intr_process_limit;
9930 u_int rxlimit = sc->sc_rx_intr_process_limit; 9930 u_int rxlimit = sc->sc_rx_intr_process_limit;
9931 uint32_t icr, rndval = 0; 9931 uint32_t icr, rndval = 0;
9932 bool more = false; 9932 bool more = false;
9933 9933
9934 icr = CSR_READ(sc, WMREG_ICR); 9934 icr = CSR_READ(sc, WMREG_ICR);
9935 if ((icr & sc->sc_icr) == 0) 9935 if ((icr & sc->sc_icr) == 0)
9936 return 0; 9936 return 0;
9937 9937
9938 DPRINTF(sc, WM_DEBUG_TX, 9938 DPRINTF(sc, WM_DEBUG_TX,
9939 ("%s: INTx: got intr\n",device_xname(sc->sc_dev))); 9939 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
9940 if (rndval == 0) 9940 if (rndval == 0)
9941 rndval = icr; 9941 rndval = icr;
9942 9942
9943 mutex_enter(rxq->rxq_lock); 9943 mutex_enter(rxq->rxq_lock);
9944 9944
9945 if (rxq->rxq_stopping) { 9945 if (rxq->rxq_stopping) {
9946 mutex_exit(rxq->rxq_lock); 9946 mutex_exit(rxq->rxq_lock);
9947 return 1; 9947 return 1;
9948 } 9948 }
9949 9949
9950#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 9950#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9951 if (icr & (ICR_RXDMT0 | ICR_RXT0)) { 9951 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
9952 DPRINTF(sc, WM_DEBUG_RX, 9952 DPRINTF(sc, WM_DEBUG_RX,
9953 ("%s: RX: got Rx intr 0x%08x\n", 9953 ("%s: RX: got Rx intr 0x%08x\n",
9954 device_xname(sc->sc_dev), 9954 device_xname(sc->sc_dev),
9955 icr & (uint32_t)(ICR_RXDMT0 | ICR_RXT0))); 9955 icr & (uint32_t)(ICR_RXDMT0 | ICR_RXT0)));
9956 WM_Q_EVCNT_INCR(rxq, intr); 9956 WM_Q_EVCNT_INCR(rxq, intr);
9957 } 9957 }
9958#endif 9958#endif
9959 /* 9959 /*
9960 * wm_rxeof() does *not* call upper layer functions directly, 9960 * wm_rxeof() does *not* call upper layer functions directly,
9961 * as if_percpuq_enqueue() just call softint_schedule(). 9961 * as if_percpuq_enqueue() just call softint_schedule().
9962 * So, we can call wm_rxeof() in interrupt context. 9962 * So, we can call wm_rxeof() in interrupt context.
9963 */ 9963 */
9964 more = wm_rxeof(rxq, rxlimit); 9964 more = wm_rxeof(rxq, rxlimit);
9965 9965
9966 mutex_exit(rxq->rxq_lock); 9966 mutex_exit(rxq->rxq_lock);
9967 mutex_enter(txq->txq_lock); 9967 mutex_enter(txq->txq_lock);
9968 9968
9969 if (txq->txq_stopping) { 9969 if (txq->txq_stopping) {
9970 mutex_exit(txq->txq_lock); 9970 mutex_exit(txq->txq_lock);
9971 return 1; 9971 return 1;
9972 } 9972 }
9973 9973
9974#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 9974#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9975 if (icr & ICR_TXDW) { 9975 if (icr & ICR_TXDW) {
9976 DPRINTF(sc, WM_DEBUG_TX, 9976 DPRINTF(sc, WM_DEBUG_TX,
9977 ("%s: TX: got TXDW interrupt\n", 9977 ("%s: TX: got TXDW interrupt\n",
9978 device_xname(sc->sc_dev))); 9978 device_xname(sc->sc_dev)));
9979 WM_Q_EVCNT_INCR(txq, txdw); 9979 WM_Q_EVCNT_INCR(txq, txdw);
9980 } 9980 }
9981#endif 9981#endif
9982 more |= wm_txeof(txq, txlimit); 9982 more |= wm_txeof(txq, txlimit);
9983 if (!IF_IS_EMPTY(&ifp->if_snd)) 9983 if (!IF_IS_EMPTY(&ifp->if_snd))
9984 more = true; 9984 more = true;
9985 9985
9986 mutex_exit(txq->txq_lock); 9986 mutex_exit(txq->txq_lock);
9987 WM_CORE_LOCK(sc); 9987 WM_CORE_LOCK(sc);
9988 9988
9989 if (sc->sc_core_stopping) { 9989 if (sc->sc_core_stopping) {
9990 WM_CORE_UNLOCK(sc); 9990 WM_CORE_UNLOCK(sc);
9991 return 1; 9991 return 1;
9992 } 9992 }
9993 9993
9994 if (icr & (ICR_LSC | ICR_RXSEQ)) { 9994 if (icr & (ICR_LSC | ICR_RXSEQ)) {
9995 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 9995 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9996 wm_linkintr(sc, icr); 9996 wm_linkintr(sc, icr);
9997 } 9997 }
9998 if ((icr & ICR_GPI(0)) != 0) 9998 if ((icr & ICR_GPI(0)) != 0)
9999 device_printf(sc->sc_dev, "got module interrupt\n"); 9999 device_printf(sc->sc_dev, "got module interrupt\n");
10000 10000
10001 WM_CORE_UNLOCK(sc); 10001 WM_CORE_UNLOCK(sc);
10002 10002
10003 if (icr & ICR_RXO) { 10003 if (icr & ICR_RXO) {
10004#if defined(WM_DEBUG) 10004#if defined(WM_DEBUG)
10005 log(LOG_WARNING, "%s: Receive overrun\n", 10005 log(LOG_WARNING, "%s: Receive overrun\n",
10006 device_xname(sc->sc_dev)); 10006 device_xname(sc->sc_dev));
10007#endif /* defined(WM_DEBUG) */ 10007#endif /* defined(WM_DEBUG) */
10008 } 10008 }
10009 10009
10010 rnd_add_uint32(&sc->rnd_source, rndval); 10010 rnd_add_uint32(&sc->rnd_source, rndval);
10011 10011
10012 if (more) { 10012 if (more) {
10013 /* Try to get more packets going. */ 10013 /* Try to get more packets going. */
10014 wm_legacy_intr_disable(sc); 10014 wm_legacy_intr_disable(sc);
10015 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue; 10015 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10016 wm_sched_handle_queue(sc, wmq); 10016 wm_sched_handle_queue(sc, wmq);
10017 } 10017 }
10018 10018
10019 return 1; 10019 return 1;
10020} 10020}
10021 10021
10022static inline void 10022static inline void
10023wm_txrxintr_disable(struct wm_queue *wmq) 10023wm_txrxintr_disable(struct wm_queue *wmq)
10024{ 10024{
10025 struct wm_softc *sc = wmq->wmq_txq.txq_sc; 10025 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10026 10026
10027 if (__predict_false(!wm_is_using_msix(sc))) { 10027 if (__predict_false(!wm_is_using_msix(sc))) {
10028 return wm_legacy_intr_disable(sc); 10028 return wm_legacy_intr_disable(sc);
10029 } 10029 }
10030 10030
10031 if (sc->sc_type == WM_T_82574) 10031 if (sc->sc_type == WM_T_82574)
10032 CSR_WRITE(sc, WMREG_IMC, 10032 CSR_WRITE(sc, WMREG_IMC,
10033 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); 10033 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10034 else if (sc->sc_type == WM_T_82575) 10034 else if (sc->sc_type == WM_T_82575)
10035 CSR_WRITE(sc, WMREG_EIMC, 10035 CSR_WRITE(sc, WMREG_EIMC,
10036 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); 10036 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10037 else 10037 else
10038 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx); 10038 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10039} 10039}
10040 10040
10041static inline void 10041static inline void
10042wm_txrxintr_enable(struct wm_queue *wmq) 10042wm_txrxintr_enable(struct wm_queue *wmq)
10043{ 10043{
10044 struct wm_softc *sc = wmq->wmq_txq.txq_sc; 10044 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10045 10045
10046 wm_itrs_calculate(sc, wmq); 10046 wm_itrs_calculate(sc, wmq);
10047 10047
10048 if (__predict_false(!wm_is_using_msix(sc))) { 10048 if (__predict_false(!wm_is_using_msix(sc))) {
10049 return wm_legacy_intr_enable(sc); 10049 return wm_legacy_intr_enable(sc);
10050 } 10050 }
10051 10051
10052 /* 10052 /*
10053 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here. 10053 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10054 * There is no need to care about which of RXQ(0) and RXQ(1) enable 10054 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10055 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled 10055 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10056 * while each wm_handle_queue(wmq) is runnig. 10056 * while each wm_handle_queue(wmq) is runnig.
10057 */ 10057 */
10058 if (sc->sc_type == WM_T_82574) 10058 if (sc->sc_type == WM_T_82574)
10059 CSR_WRITE(sc, WMREG_IMS, 10059 CSR_WRITE(sc, WMREG_IMS,
10060 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER); 10060 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10061 else if (sc->sc_type == WM_T_82575) 10061 else if (sc->sc_type == WM_T_82575)
10062 CSR_WRITE(sc, WMREG_EIMS, 10062 CSR_WRITE(sc, WMREG_EIMS,
10063 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); 10063 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10064 else 10064 else
10065 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx); 10065 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10066} 10066}
10067 10067
10068static int 10068static int
10069wm_txrxintr_msix(void *arg) 10069wm_txrxintr_msix(void *arg)
10070{ 10070{
10071 struct wm_queue *wmq = arg; 10071 struct wm_queue *wmq = arg;
10072 struct wm_txqueue *txq = &wmq->wmq_txq; 10072 struct wm_txqueue *txq = &wmq->wmq_txq;
10073 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 10073 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10074 struct wm_softc *sc = txq->txq_sc; 10074 struct wm_softc *sc = txq->txq_sc;
10075 u_int txlimit = sc->sc_tx_intr_process_limit; 10075 u_int txlimit = sc->sc_tx_intr_process_limit;
10076 u_int rxlimit = sc->sc_rx_intr_process_limit; 10076 u_int rxlimit = sc->sc_rx_intr_process_limit;
10077 bool txmore; 10077 bool txmore;
10078 bool rxmore; 10078 bool rxmore;
10079 10079
10080 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id); 10080 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10081 10081
10082 DPRINTF(sc, WM_DEBUG_TX, 10082 DPRINTF(sc, WM_DEBUG_TX,
10083 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev))); 10083 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10084 10084
10085 wm_txrxintr_disable(wmq); 10085 wm_txrxintr_disable(wmq);
10086 10086
10087 mutex_enter(txq->txq_lock); 10087 mutex_enter(txq->txq_lock);
10088 10088
10089 if (txq->txq_stopping) { 10089 if (txq->txq_stopping) {
10090 mutex_exit(txq->txq_lock); 10090 mutex_exit(txq->txq_lock);
10091 return 1; 10091 return 1;
10092 } 10092 }
10093 10093
10094 WM_Q_EVCNT_INCR(txq, txdw); 10094 WM_Q_EVCNT_INCR(txq, txdw);
10095 txmore = wm_txeof(txq, txlimit); 10095 txmore = wm_txeof(txq, txlimit);
10096 /* wm_deferred start() is done in wm_handle_queue(). */ 10096 /* wm_deferred start() is done in wm_handle_queue(). */
10097 mutex_exit(txq->txq_lock); 10097 mutex_exit(txq->txq_lock);
10098 10098
10099 DPRINTF(sc, WM_DEBUG_RX, 10099 DPRINTF(sc, WM_DEBUG_RX,
10100 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev))); 10100 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10101 mutex_enter(rxq->rxq_lock); 10101 mutex_enter(rxq->rxq_lock);
10102 10102
10103 if (rxq->rxq_stopping) { 10103 if (rxq->rxq_stopping) {
10104 mutex_exit(rxq->rxq_lock); 10104 mutex_exit(rxq->rxq_lock);
10105 return 1; 10105 return 1;
10106 } 10106 }
10107 10107
10108 WM_Q_EVCNT_INCR(rxq, intr); 10108 WM_Q_EVCNT_INCR(rxq, intr);
10109 rxmore = wm_rxeof(rxq, rxlimit); 10109 rxmore = wm_rxeof(rxq, rxlimit);
10110 mutex_exit(rxq->rxq_lock); 10110 mutex_exit(rxq->rxq_lock);
10111 10111
10112 wm_itrs_writereg(sc, wmq); 10112 wm_itrs_writereg(sc, wmq);
10113 10113
10114 if (txmore || rxmore) { 10114 if (txmore || rxmore) {
10115 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue; 10115 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10116 wm_sched_handle_queue(sc, wmq); 10116 wm_sched_handle_queue(sc, wmq);
10117 } else 10117 } else
10118 wm_txrxintr_enable(wmq); 10118 wm_txrxintr_enable(wmq);
10119 10119
10120 return 1; 10120 return 1;
10121} 10121}
10122 10122
10123static void 10123static void
10124wm_handle_queue(void *arg) 10124wm_handle_queue(void *arg)
10125{ 10125{
10126 struct wm_queue *wmq = arg; 10126 struct wm_queue *wmq = arg;
10127 struct wm_txqueue *txq = &wmq->wmq_txq; 10127 struct wm_txqueue *txq = &wmq->wmq_txq;
10128 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 10128 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10129 struct wm_softc *sc = txq->txq_sc; 10129 struct wm_softc *sc = txq->txq_sc;
10130 u_int txlimit = sc->sc_tx_process_limit; 10130 u_int txlimit = sc->sc_tx_process_limit;
10131 u_int rxlimit = sc->sc_rx_process_limit; 10131 u_int rxlimit = sc->sc_rx_process_limit;
10132 bool txmore; 10132 bool txmore;
10133 bool rxmore; 10133 bool rxmore;
10134 10134
10135 mutex_enter(txq->txq_lock); 10135 mutex_enter(txq->txq_lock);
10136 if (txq->txq_stopping) { 10136 if (txq->txq_stopping) {
10137 mutex_exit(txq->txq_lock); 10137 mutex_exit(txq->txq_lock);
10138 return; 10138 return;
10139 } 10139 }
10140 txmore = wm_txeof(txq, txlimit); 10140 txmore = wm_txeof(txq, txlimit);
10141 wm_deferred_start_locked(txq); 10141 wm_deferred_start_locked(txq);
10142 mutex_exit(txq->txq_lock); 10142 mutex_exit(txq->txq_lock);
10143 10143
10144 mutex_enter(rxq->rxq_lock); 10144 mutex_enter(rxq->rxq_lock);
10145 if (rxq->rxq_stopping) { 10145 if (rxq->rxq_stopping) {
10146 mutex_exit(rxq->rxq_lock); 10146 mutex_exit(rxq->rxq_lock);
10147 return; 10147 return;
10148 } 10148 }
10149 WM_Q_EVCNT_INCR(rxq, defer); 10149 WM_Q_EVCNT_INCR(rxq, defer);
10150 rxmore = wm_rxeof(rxq, rxlimit); 10150 rxmore = wm_rxeof(rxq, rxlimit);
10151 mutex_exit(rxq->rxq_lock); 10151 mutex_exit(rxq->rxq_lock);
10152 10152
10153 if (txmore || rxmore) { 10153 if (txmore || rxmore) {
10154 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue; 10154 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10155 wm_sched_handle_queue(sc, wmq); 10155 wm_sched_handle_queue(sc, wmq);
10156 } else 10156 } else
10157 wm_txrxintr_enable(wmq); 10157 wm_txrxintr_enable(wmq);
10158} 10158}
10159 10159
10160static void 10160static void
10161wm_handle_queue_work(struct work *wk, void *context) 10161wm_handle_queue_work(struct work *wk, void *context)
10162{ 10162{
10163 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie); 10163 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10164 10164
10165 /* 10165 /*
10166 * "enqueued flag" is not required here. 10166 * "enqueued flag" is not required here.
10167 */ 10167 */
10168 wm_handle_queue(wmq); 10168 wm_handle_queue(wmq);
10169} 10169}
10170 10170
10171/* 10171/*
10172 * wm_linkintr_msix: 10172 * wm_linkintr_msix:
10173 * 10173 *
10174 * Interrupt service routine for link status change for MSI-X. 10174 * Interrupt service routine for link status change for MSI-X.
10175 */ 10175 */
10176static int 10176static int
10177wm_linkintr_msix(void *arg) 10177wm_linkintr_msix(void *arg)
10178{ 10178{
10179 struct wm_softc *sc = arg; 10179 struct wm_softc *sc = arg;
10180 uint32_t reg; 10180 uint32_t reg;
10181 bool has_rxo; 10181 bool has_rxo;
10182 10182
10183 reg = CSR_READ(sc, WMREG_ICR); 10183 reg = CSR_READ(sc, WMREG_ICR);
10184 WM_CORE_LOCK(sc); 10184 WM_CORE_LOCK(sc);
10185 DPRINTF(sc, WM_DEBUG_LINK, 10185 DPRINTF(sc, WM_DEBUG_LINK,
10186 ("%s: LINK: got link intr. ICR = %08x\n", 10186 ("%s: LINK: got link intr. ICR = %08x\n",
10187 device_xname(sc->sc_dev), reg)); 10187 device_xname(sc->sc_dev), reg));
10188 10188
10189 if (sc->sc_core_stopping) 10189 if (sc->sc_core_stopping)
10190 goto out; 10190 goto out;
10191 10191
10192 if ((reg & ICR_LSC) != 0) { 10192 if ((reg & ICR_LSC) != 0) {
10193 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 10193 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10194 wm_linkintr(sc, ICR_LSC); 10194 wm_linkintr(sc, ICR_LSC);
10195 } 10195 }
10196 if ((reg & ICR_GPI(0)) != 0) 10196 if ((reg & ICR_GPI(0)) != 0)
10197 device_printf(sc->sc_dev, "got module interrupt\n"); 10197 device_printf(sc->sc_dev, "got module interrupt\n");
10198 10198
10199 /* 10199 /*
10200 * XXX 82574 MSI-X mode workaround 10200 * XXX 82574 MSI-X mode workaround
10201 * 10201 *
10202 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER 10202 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10203 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor 10203 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10204 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1) 10204 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10205 * interrupts by writing WMREG_ICS to process receive packets. 10205 * interrupts by writing WMREG_ICS to process receive packets.
10206 */ 10206 */
10207 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) { 10207 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10208#if defined(WM_DEBUG) 10208#if defined(WM_DEBUG)
10209 log(LOG_WARNING, "%s: Receive overrun\n", 10209 log(LOG_WARNING, "%s: Receive overrun\n",
10210 device_xname(sc->sc_dev)); 10210 device_xname(sc->sc_dev));
10211#endif /* defined(WM_DEBUG) */ 10211#endif /* defined(WM_DEBUG) */
10212 10212
10213 has_rxo = true; 10213 has_rxo = true;
10214 /* 10214 /*
10215 * The RXO interrupt is very high rate when receive traffic is 10215 * The RXO interrupt is very high rate when receive traffic is
10216 * high rate. We use polling mode for ICR_OTHER like Tx/Rx 10216 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10217 * interrupts. ICR_OTHER will be enabled at the end of 10217 * interrupts. ICR_OTHER will be enabled at the end of
10218 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and 10218 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10219 * ICR_RXQ(1) interrupts. 10219 * ICR_RXQ(1) interrupts.
10220 */ 10220 */
10221 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); 10221 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10222 10222
10223 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1)); 10223 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10224 } 10224 }
10225 10225
10226 10226
10227 10227
10228out: 10228out:
10229 WM_CORE_UNLOCK(sc); 10229 WM_CORE_UNLOCK(sc);
10230 10230
10231 if (sc->sc_type == WM_T_82574) { 10231 if (sc->sc_type == WM_T_82574) {
10232 if (!has_rxo) 10232 if (!has_rxo)
10233 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); 10233 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10234 else 10234 else
10235 CSR_WRITE(sc, WMREG_IMS, ICR_LSC); 10235 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10236 } else if (sc->sc_type == WM_T_82575) 10236 } else if (sc->sc_type == WM_T_82575)
10237 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER); 10237 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10238 else 10238 else
10239 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx); 10239 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10240 10240
10241 return 1; 10241 return 1;
10242} 10242}
10243 10243
10244/* 10244/*
10245 * Media related. 10245 * Media related.
10246 * GMII, SGMII, TBI (and SERDES) 10246 * GMII, SGMII, TBI (and SERDES)
10247 */ 10247 */
10248 10248
10249/* Common */ 10249/* Common */
10250 10250
10251/* 10251/*
10252 * wm_tbi_serdes_set_linkled: 10252 * wm_tbi_serdes_set_linkled:
10253 * 10253 *
10254 * Update the link LED on TBI and SERDES devices. 10254 * Update the link LED on TBI and SERDES devices.
10255 */ 10255 */
10256static void 10256static void
10257wm_tbi_serdes_set_linkled(struct wm_softc *sc) 10257wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10258{ 10258{
10259 10259
10260 if (sc->sc_tbi_linkup) 10260 if (sc->sc_tbi_linkup)
10261 sc->sc_ctrl |= CTRL_SWDPIN(0); 10261 sc->sc_ctrl |= CTRL_SWDPIN(0);
10262 else 10262 else
10263 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 10263 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10264 10264
10265 /* 82540 or newer devices are active low */ 10265 /* 82540 or newer devices are active low */
10266 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; 10266 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10267 10267
10268 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10268 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10269} 10269}
10270 10270
10271/* GMII related */ 10271/* GMII related */
10272 10272
10273/* 10273/*
10274 * wm_gmii_reset: 10274 * wm_gmii_reset:
10275 * 10275 *
10276 * Reset the PHY. 10276 * Reset the PHY.
10277 */ 10277 */
10278static void 10278static void
10279wm_gmii_reset(struct wm_softc *sc) 10279wm_gmii_reset(struct wm_softc *sc)
10280{ 10280{
10281 uint32_t reg; 10281 uint32_t reg;
10282 int rv; 10282 int rv;
10283 10283
10284 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 10284 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10285 device_xname(sc->sc_dev), __func__)); 10285 device_xname(sc->sc_dev), __func__));
10286 10286
10287 rv = sc->phy.acquire(sc); 10287 rv = sc->phy.acquire(sc);
10288 if (rv != 0) { 10288 if (rv != 0) {
10289 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 10289 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10290 __func__); 10290 __func__);
10291 return; 10291 return;
10292 } 10292 }
10293 10293
10294 switch (sc->sc_type) { 10294 switch (sc->sc_type) {
10295 case WM_T_82542_2_0: 10295 case WM_T_82542_2_0:
10296 case WM_T_82542_2_1: 10296 case WM_T_82542_2_1:
10297 /* null */ 10297 /* null */
10298 break; 10298 break;
10299 case WM_T_82543: 10299 case WM_T_82543:
10300 /* 10300 /*
10301 * With 82543, we need to force speed and duplex on the MAC 10301 * With 82543, we need to force speed and duplex on the MAC
10302 * equal to what the PHY speed and duplex configuration is. 10302 * equal to what the PHY speed and duplex configuration is.
10303 * In addition, we need to perform a hardware reset on the PHY 10303 * In addition, we need to perform a hardware reset on the PHY
10304 * to take it out of reset. 10304 * to take it out of reset.
10305 */ 10305 */
10306 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 10306 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10307 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10307 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10308 10308
10309 /* The PHY reset pin is active-low. */ 10309 /* The PHY reset pin is active-low. */
10310 reg = CSR_READ(sc, WMREG_CTRL_EXT); 10310 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10311 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 10311 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10312 CTRL_EXT_SWDPIN(4)); 10312 CTRL_EXT_SWDPIN(4));
10313 reg |= CTRL_EXT_SWDPIO(4); 10313 reg |= CTRL_EXT_SWDPIO(4);
10314 10314
10315 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 10315 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10316 CSR_WRITE_FLUSH(sc); 10316 CSR_WRITE_FLUSH(sc);
10317 delay(10*1000); 10317 delay(10*1000);
10318 10318
10319 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 10319 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10320 CSR_WRITE_FLUSH(sc); 10320 CSR_WRITE_FLUSH(sc);
10321 delay(150); 10321 delay(150);
10322#if 0 10322#if 0
10323 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 10323 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10324#endif 10324#endif
10325 delay(20*1000); /* XXX extra delay to get PHY ID? */ 10325 delay(20*1000); /* XXX extra delay to get PHY ID? */
10326 break; 10326 break;
10327 case WM_T_82544: /* Reset 10000us */ 10327 case WM_T_82544: /* Reset 10000us */
10328 case WM_T_82540: 10328 case WM_T_82540:
10329 case WM_T_82545: 10329 case WM_T_82545:
10330 case WM_T_82545_3: 10330 case WM_T_82545_3:
10331 case WM_T_82546: 10331 case WM_T_82546:
10332 case WM_T_82546_3: 10332 case WM_T_82546_3:
10333 case WM_T_82541: 10333 case WM_T_82541:
10334 case WM_T_82541_2: 10334 case WM_T_82541_2:
10335 case WM_T_82547: 10335 case WM_T_82547:
10336 case WM_T_82547_2: 10336 case WM_T_82547_2:
10337 case WM_T_82571: /* Reset 100us */ 10337 case WM_T_82571: /* Reset 100us */
10338 case WM_T_82572: 10338 case WM_T_82572:
10339 case WM_T_82573: 10339 case WM_T_82573:
10340 case WM_T_82574: 10340 case WM_T_82574:
10341 case WM_T_82575: 10341 case WM_T_82575:
10342 case WM_T_82576: 10342 case WM_T_82576:
10343 case WM_T_82580: 10343 case WM_T_82580:
10344 case WM_T_I350: 10344 case WM_T_I350:
10345 case WM_T_I354: 10345 case WM_T_I354:
10346 case WM_T_I210: 10346 case WM_T_I210:
10347 case WM_T_I211: 10347 case WM_T_I211:
10348 case WM_T_82583: 10348 case WM_T_82583:
10349 case WM_T_80003: 10349 case WM_T_80003:
10350 /* Generic reset */ 10350 /* Generic reset */
10351 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 10351 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10352 CSR_WRITE_FLUSH(sc); 10352 CSR_WRITE_FLUSH(sc);
10353 delay(20000); 10353 delay(20000);
10354 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10354 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10355 CSR_WRITE_FLUSH(sc); 10355 CSR_WRITE_FLUSH(sc);
10356 delay(20000); 10356 delay(20000);
10357 10357
10358 if ((sc->sc_type == WM_T_82541) 10358 if ((sc->sc_type == WM_T_82541)
10359 || (sc->sc_type == WM_T_82541_2) 10359 || (sc->sc_type == WM_T_82541_2)
10360 || (sc->sc_type == WM_T_82547) 10360 || (sc->sc_type == WM_T_82547)
10361 || (sc->sc_type == WM_T_82547_2)) { 10361 || (sc->sc_type == WM_T_82547_2)) {
10362 /* Workaround for igp are done in igp_reset() */ 10362 /* Workaround for igp are done in igp_reset() */
10363 /* XXX add code to set LED after phy reset */ 10363 /* XXX add code to set LED after phy reset */
10364 } 10364 }
10365 break; 10365 break;
10366 case WM_T_ICH8: 10366 case WM_T_ICH8:
10367 case WM_T_ICH9: 10367 case WM_T_ICH9:
10368 case WM_T_ICH10: 10368 case WM_T_ICH10:
10369 case WM_T_PCH: 10369 case WM_T_PCH:
10370 case WM_T_PCH2: 10370 case WM_T_PCH2:
10371 case WM_T_PCH_LPT: 10371 case WM_T_PCH_LPT:
10372 case WM_T_PCH_SPT: 10372 case WM_T_PCH_SPT:
10373 case WM_T_PCH_CNP: 10373 case WM_T_PCH_CNP:
10374 /* Generic reset */ 10374 /* Generic reset */
10375 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 10375 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10376 CSR_WRITE_FLUSH(sc); 10376 CSR_WRITE_FLUSH(sc);
10377 delay(100); 10377 delay(100);
10378 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10378 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10379 CSR_WRITE_FLUSH(sc); 10379 CSR_WRITE_FLUSH(sc);
10380 delay(150); 10380 delay(150);
10381 break; 10381 break;
10382 default: 10382 default:
10383 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 10383 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10384 __func__); 10384 __func__);
10385 break; 10385 break;
10386 } 10386 }
10387 10387
10388 sc->phy.release(sc); 10388 sc->phy.release(sc);
10389 10389
10390 /* get_cfg_done */ 10390 /* get_cfg_done */
10391 wm_get_cfg_done(sc); 10391 wm_get_cfg_done(sc);
10392 10392
10393 /* Extra setup */ 10393 /* Extra setup */
10394 switch (sc->sc_type) { 10394 switch (sc->sc_type) {
10395 case WM_T_82542_2_0: 10395 case WM_T_82542_2_0:
10396 case WM_T_82542_2_1: 10396 case WM_T_82542_2_1:
10397 case WM_T_82543: 10397 case WM_T_82543:
10398 case WM_T_82544: 10398 case WM_T_82544:
10399 case WM_T_82540: 10399 case WM_T_82540:
10400 case WM_T_82545: 10400 case WM_T_82545:
10401 case WM_T_82545_3: 10401 case WM_T_82545_3:
10402 case WM_T_82546: 10402 case WM_T_82546:
10403 case WM_T_82546_3: 10403 case WM_T_82546_3:
10404 case WM_T_82541_2: 10404 case WM_T_82541_2:
10405 case WM_T_82547_2: 10405 case WM_T_82547_2:
10406 case WM_T_82571: 10406 case WM_T_82571:
10407 case WM_T_82572: 10407 case WM_T_82572:
10408 case WM_T_82573: 10408 case WM_T_82573:
10409 case WM_T_82574: 10409 case WM_T_82574:
10410 case WM_T_82583: 10410 case WM_T_82583:
10411 case WM_T_82575: 10411 case WM_T_82575:
10412 case WM_T_82576: 10412 case WM_T_82576:
10413 case WM_T_82580: 10413 case WM_T_82580:
10414 case WM_T_I350: 10414 case WM_T_I350:
10415 case WM_T_I354: 10415 case WM_T_I354:
10416 case WM_T_I210: 10416 case WM_T_I210:
10417 case WM_T_I211: 10417 case WM_T_I211:
10418 case WM_T_80003: 10418 case WM_T_80003:
10419 /* Null */ 10419 /* Null */
10420 break; 10420 break;
10421 case WM_T_82541: 10421 case WM_T_82541:
10422 case WM_T_82547: 10422 case WM_T_82547:
10423 /* XXX Configure actively LED after PHY reset */ 10423 /* XXX Configure actively LED after PHY reset */
10424 break; 10424 break;
10425 case WM_T_ICH8: 10425 case WM_T_ICH8:
10426 case WM_T_ICH9: 10426 case WM_T_ICH9:
10427 case WM_T_ICH10: 10427 case WM_T_ICH10:
10428 case WM_T_PCH: 10428 case WM_T_PCH:
10429 case WM_T_PCH2: 10429 case WM_T_PCH2:
10430 case WM_T_PCH_LPT: 10430 case WM_T_PCH_LPT:
10431 case WM_T_PCH_SPT: 10431 case WM_T_PCH_SPT:
10432 case WM_T_PCH_CNP: 10432 case WM_T_PCH_CNP:
10433 wm_phy_post_reset(sc); 10433 wm_phy_post_reset(sc);
10434 break; 10434 break;
10435 default: 10435 default:
10436 panic("%s: unknown type\n", __func__); 10436 panic("%s: unknown type\n", __func__);
10437 break; 10437 break;
10438 } 10438 }
10439} 10439}
10440 10440
10441/* 10441/*
10442 * Setup sc_phytype and mii_{read|write}reg. 10442 * Setup sc_phytype and mii_{read|write}reg.
10443 * 10443 *
10444 * To identify PHY type, correct read/write function should be selected. 10444 * To identify PHY type, correct read/write function should be selected.
10445 * To select correct read/write function, PCI ID or MAC type are required 10445 * To select correct read/write function, PCI ID or MAC type are required
10446 * without accessing PHY registers. 10446 * without accessing PHY registers.
10447 * 10447 *
10448 * On the first call of this function, PHY ID is not known yet. Check 10448 * On the first call of this function, PHY ID is not known yet. Check
10449 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the 10449 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10450 * result might be incorrect. 10450 * result might be incorrect.
10451 * 10451 *
10452 * In the second call, PHY OUI and model is used to identify PHY type. 10452 * In the second call, PHY OUI and model is used to identify PHY type.
10453 * It might not be perfect because of the lack of compared entry, but it 10453 * It might not be perfect because of the lack of compared entry, but it
10454 * would be better than the first call. 10454 * would be better than the first call.
10455 * 10455 *
10456 * If the detected new result and previous assumption is different, 10456 * If the detected new result and previous assumption is different,
10457 * diagnous message will be printed. 10457 * diagnous message will be printed.
10458 */ 10458 */
10459static void 10459static void
10460wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui, 10460wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10461 uint16_t phy_model) 10461 uint16_t phy_model)
10462{ 10462{
10463 device_t dev = sc->sc_dev; 10463 device_t dev = sc->sc_dev;
10464 struct mii_data *mii = &sc->sc_mii; 10464 struct mii_data *mii = &sc->sc_mii;
10465 uint16_t new_phytype = WMPHY_UNKNOWN; 10465 uint16_t new_phytype = WMPHY_UNKNOWN;
10466 uint16_t doubt_phytype = WMPHY_UNKNOWN; 10466 uint16_t doubt_phytype = WMPHY_UNKNOWN;
10467 mii_readreg_t new_readreg; 10467 mii_readreg_t new_readreg;
10468 mii_writereg_t new_writereg; 10468 mii_writereg_t new_writereg;
10469 bool dodiag = true; 10469 bool dodiag = true;
10470 10470
10471 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 10471 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10472 device_xname(sc->sc_dev), __func__)); 10472 device_xname(sc->sc_dev), __func__));
10473 10473
10474 /* 10474 /*
10475 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always 10475 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10476 * incorrect. So don't print diag output when it's 2nd call. 10476 * incorrect. So don't print diag output when it's 2nd call.
10477 */ 10477 */
10478 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0)) 10478 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10479 dodiag = false; 10479 dodiag = false;
10480 10480
10481 if (mii->mii_readreg == NULL) { 10481 if (mii->mii_readreg == NULL) {
10482 /* 10482 /*
10483 * This is the first call of this function. For ICH and PCH 10483 * This is the first call of this function. For ICH and PCH
10484 * variants, it's difficult to determine the PHY access method 10484 * variants, it's difficult to determine the PHY access method
10485 * by sc_type, so use the PCI product ID for some devices. 10485 * by sc_type, so use the PCI product ID for some devices.
10486 */ 10486 */
10487 10487
10488 switch (sc->sc_pcidevid) { 10488 switch (sc->sc_pcidevid) {
10489 case PCI_PRODUCT_INTEL_PCH_M_LM: 10489 case PCI_PRODUCT_INTEL_PCH_M_LM:
10490 case PCI_PRODUCT_INTEL_PCH_M_LC: 10490 case PCI_PRODUCT_INTEL_PCH_M_LC:
10491 /* 82577 */ 10491 /* 82577 */
10492 new_phytype = WMPHY_82577; 10492 new_phytype = WMPHY_82577;
10493 break; 10493 break;
10494 case PCI_PRODUCT_INTEL_PCH_D_DM: 10494 case PCI_PRODUCT_INTEL_PCH_D_DM:
10495 case PCI_PRODUCT_INTEL_PCH_D_DC: 10495 case PCI_PRODUCT_INTEL_PCH_D_DC:
10496 /* 82578 */ 10496 /* 82578 */
10497 new_phytype = WMPHY_82578; 10497 new_phytype = WMPHY_82578;
10498 break; 10498 break;
10499 case PCI_PRODUCT_INTEL_PCH2_LV_LM: 10499 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10500 case PCI_PRODUCT_INTEL_PCH2_LV_V: 10500 case PCI_PRODUCT_INTEL_PCH2_LV_V:
10501 /* 82579 */ 10501 /* 82579 */
10502 new_phytype = WMPHY_82579; 10502 new_phytype = WMPHY_82579;
10503 break; 10503 break;
10504 case PCI_PRODUCT_INTEL_82801H_82567V_3: 10504 case PCI_PRODUCT_INTEL_82801H_82567V_3:
10505 case PCI_PRODUCT_INTEL_82801I_BM: 10505 case PCI_PRODUCT_INTEL_82801I_BM:
10506 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */ 10506 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10507 case PCI_PRODUCT_INTEL_82801J_R_BM_LM: 10507 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10508 case PCI_PRODUCT_INTEL_82801J_R_BM_LF: 10508 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10509 case PCI_PRODUCT_INTEL_82801J_D_BM_LM: 10509 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10510 case PCI_PRODUCT_INTEL_82801J_D_BM_LF: 10510 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10511 case PCI_PRODUCT_INTEL_82801J_R_BM_V: 10511 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10512 /* ICH8, 9, 10 with 82567 */ 10512 /* ICH8, 9, 10 with 82567 */
10513 new_phytype = WMPHY_BM; 10513 new_phytype = WMPHY_BM;
10514 break; 10514 break;
10515 default: 10515 default:
10516 break; 10516 break;
10517 } 10517 }
10518 } else { 10518 } else {
10519 /* It's not the first call. Use PHY OUI and model */ 10519 /* It's not the first call. Use PHY OUI and model */
10520 switch (phy_oui) { 10520 switch (phy_oui) {
10521 case MII_OUI_ATTANSIC: /* atphy(4) */ 10521 case MII_OUI_ATTANSIC: /* atphy(4) */
10522 switch (phy_model) { 10522 switch (phy_model) {
10523 case MII_MODEL_ATTANSIC_AR8021: 10523 case MII_MODEL_ATTANSIC_AR8021:
10524 new_phytype = WMPHY_82578; 10524 new_phytype = WMPHY_82578;
10525 break; 10525 break;
10526 default: 10526 default:
10527 break; 10527 break;
10528 } 10528 }
10529 break; 10529 break;
10530 case MII_OUI_xxMARVELL: 10530 case MII_OUI_xxMARVELL:
10531 switch (phy_model) { 10531 switch (phy_model) {
10532 case MII_MODEL_xxMARVELL_I210: 10532 case MII_MODEL_xxMARVELL_I210:
10533 new_phytype = WMPHY_I210; 10533 new_phytype = WMPHY_I210;
10534 break; 10534 break;
10535 case MII_MODEL_xxMARVELL_E1011: 10535 case MII_MODEL_xxMARVELL_E1011:
10536 case MII_MODEL_xxMARVELL_E1000_3: 10536 case MII_MODEL_xxMARVELL_E1000_3:
10537 case MII_MODEL_xxMARVELL_E1000_5: 10537 case MII_MODEL_xxMARVELL_E1000_5:
10538 case MII_MODEL_xxMARVELL_E1112: 10538 case MII_MODEL_xxMARVELL_E1112:
10539 new_phytype = WMPHY_M88; 10539 new_phytype = WMPHY_M88;
10540 break; 10540 break;
10541 case MII_MODEL_xxMARVELL_E1149: 10541 case MII_MODEL_xxMARVELL_E1149:
10542 new_phytype = WMPHY_BM; 10542 new_phytype = WMPHY_BM;
10543 break; 10543 break;
10544 case MII_MODEL_xxMARVELL_E1111: 10544 case MII_MODEL_xxMARVELL_E1111:
10545 case MII_MODEL_xxMARVELL_I347: 10545 case MII_MODEL_xxMARVELL_I347:
10546 case MII_MODEL_xxMARVELL_E1512: 10546 case MII_MODEL_xxMARVELL_E1512:
10547 case MII_MODEL_xxMARVELL_E1340M: 10547 case MII_MODEL_xxMARVELL_E1340M:
10548 case MII_MODEL_xxMARVELL_E1543: 10548 case MII_MODEL_xxMARVELL_E1543:
10549 new_phytype = WMPHY_M88; 10549 new_phytype = WMPHY_M88;
10550 break; 10550 break;
10551 case MII_MODEL_xxMARVELL_I82563: 10551 case MII_MODEL_xxMARVELL_I82563:
10552 new_phytype = WMPHY_GG82563; 10552 new_phytype = WMPHY_GG82563;
10553 break; 10553 break;
10554 default: 10554 default:
10555 break; 10555 break;
10556 } 10556 }
10557 break; 10557 break;
10558 case MII_OUI_INTEL: 10558 case MII_OUI_INTEL:
10559 switch (phy_model) { 10559 switch (phy_model) {
10560 case MII_MODEL_INTEL_I82577: 10560 case MII_MODEL_INTEL_I82577:
10561 new_phytype = WMPHY_82577; 10561 new_phytype = WMPHY_82577;
10562 break; 10562 break;
10563 case MII_MODEL_INTEL_I82579: 10563 case MII_MODEL_INTEL_I82579:
10564 new_phytype = WMPHY_82579; 10564 new_phytype = WMPHY_82579;
10565 break; 10565 break;
10566 case MII_MODEL_INTEL_I217: 10566 case MII_MODEL_INTEL_I217:
10567 new_phytype = WMPHY_I217; 10567 new_phytype = WMPHY_I217;
10568 break; 10568 break;
10569 case MII_MODEL_INTEL_I82580: 10569 case MII_MODEL_INTEL_I82580:
10570 new_phytype = WMPHY_82580; 10570 new_phytype = WMPHY_82580;
10571 break; 10571 break;
10572 case MII_MODEL_INTEL_I350: 10572 case MII_MODEL_INTEL_I350:
10573 new_phytype = WMPHY_I350; 10573 new_phytype = WMPHY_I350;
10574 break; 10574 break;
10575 break; 
10576 default: 10575 default:
10577 break; 10576 break;
10578 } 10577 }
10579 break; 10578 break;
10580 case MII_OUI_yyINTEL: 10579 case MII_OUI_yyINTEL:
10581 switch (phy_model) { 10580 switch (phy_model) {
10582 case MII_MODEL_yyINTEL_I82562G: 10581 case MII_MODEL_yyINTEL_I82562G:
10583 case MII_MODEL_yyINTEL_I82562EM: 10582 case MII_MODEL_yyINTEL_I82562EM:
10584 case MII_MODEL_yyINTEL_I82562ET: 10583 case MII_MODEL_yyINTEL_I82562ET:
10585 new_phytype = WMPHY_IFE; 10584 new_phytype = WMPHY_IFE;
10586 break; 10585 break;
10587 case MII_MODEL_yyINTEL_IGP01E1000: 10586 case MII_MODEL_yyINTEL_IGP01E1000:
10588 new_phytype = WMPHY_IGP; 10587 new_phytype = WMPHY_IGP;
10589 break; 10588 break;
10590 case MII_MODEL_yyINTEL_I82566: 10589 case MII_MODEL_yyINTEL_I82566:
10591 new_phytype = WMPHY_IGP_3; 10590 new_phytype = WMPHY_IGP_3;
10592 break; 10591 break;
10593 default: 10592 default:
10594 break; 10593 break;
10595 } 10594 }
10596 break; 10595 break;
10597 default: 10596 default:
10598 break; 10597 break;
10599 } 10598 }
10600 10599
10601 if (dodiag) { 10600 if (dodiag) {
10602 if (new_phytype == WMPHY_UNKNOWN) 10601 if (new_phytype == WMPHY_UNKNOWN)
10603 aprint_verbose_dev(dev, 10602 aprint_verbose_dev(dev,
10604 "%s: Unknown PHY model. OUI=%06x, " 10603 "%s: Unknown PHY model. OUI=%06x, "
10605 "model=%04x\n", __func__, phy_oui, 10604 "model=%04x\n", __func__, phy_oui,
10606 phy_model); 10605 phy_model);
10607 10606
10608 if ((sc->sc_phytype != WMPHY_UNKNOWN) 10607 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10609 && (sc->sc_phytype != new_phytype)) { 10608 && (sc->sc_phytype != new_phytype)) {
10610 aprint_error_dev(dev, "Previously assumed PHY " 10609 aprint_error_dev(dev, "Previously assumed PHY "
10611 "type(%u) was incorrect. PHY type from PHY" 10610 "type(%u) was incorrect. PHY type from PHY"
10612 "ID = %u\n", sc->sc_phytype, new_phytype); 10611 "ID = %u\n", sc->sc_phytype, new_phytype);
10613 } 10612 }
10614 } 10613 }
10615 } 10614 }
10616 10615
10617 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */ 10616 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
10618 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) { 10617 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
10619 /* SGMII */ 10618 /* SGMII */
10620 new_readreg = wm_sgmii_readreg; 10619 new_readreg = wm_sgmii_readreg;
10621 new_writereg = wm_sgmii_writereg; 10620 new_writereg = wm_sgmii_writereg;
10622 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ 10621 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10623 /* BM2 (phyaddr == 1) */ 10622 /* BM2 (phyaddr == 1) */
10624 if ((sc->sc_phytype != WMPHY_UNKNOWN) 10623 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10625 && (new_phytype != WMPHY_BM) 10624 && (new_phytype != WMPHY_BM)
10626 && (new_phytype != WMPHY_UNKNOWN)) 10625 && (new_phytype != WMPHY_UNKNOWN))
10627 doubt_phytype = new_phytype; 10626 doubt_phytype = new_phytype;
10628 new_phytype = WMPHY_BM; 10627 new_phytype = WMPHY_BM;
10629 new_readreg = wm_gmii_bm_readreg; 10628 new_readreg = wm_gmii_bm_readreg;
10630 new_writereg = wm_gmii_bm_writereg; 10629 new_writereg = wm_gmii_bm_writereg;
10631 } else if (sc->sc_type >= WM_T_PCH) { 10630 } else if (sc->sc_type >= WM_T_PCH) {
10632 /* All PCH* use _hv_ */ 10631 /* All PCH* use _hv_ */
10633 new_readreg = wm_gmii_hv_readreg; 10632 new_readreg = wm_gmii_hv_readreg;
10634 new_writereg = wm_gmii_hv_writereg; 10633 new_writereg = wm_gmii_hv_writereg;
10635 } else if (sc->sc_type >= WM_T_ICH8) { 10634 } else if (sc->sc_type >= WM_T_ICH8) {
10636 /* non-82567 ICH8, 9 and 10 */ 10635 /* non-82567 ICH8, 9 and 10 */
10637 new_readreg = wm_gmii_i82544_readreg; 10636 new_readreg = wm_gmii_i82544_readreg;
10638 new_writereg = wm_gmii_i82544_writereg; 10637 new_writereg = wm_gmii_i82544_writereg;
10639 } else if (sc->sc_type >= WM_T_80003) { 10638 } else if (sc->sc_type >= WM_T_80003) {
10640 /* 80003 */ 10639 /* 80003 */
10641 if ((sc->sc_phytype != WMPHY_UNKNOWN) 10640 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10642 && (new_phytype != WMPHY_GG82563) 10641 && (new_phytype != WMPHY_GG82563)
10643 && (new_phytype != WMPHY_UNKNOWN)) 10642 && (new_phytype != WMPHY_UNKNOWN))
10644 doubt_phytype = new_phytype; 10643 doubt_phytype = new_phytype;
10645 new_phytype = WMPHY_GG82563; 10644 new_phytype = WMPHY_GG82563;
10646 new_readreg = wm_gmii_i80003_readreg; 10645 new_readreg = wm_gmii_i80003_readreg;
10647 new_writereg = wm_gmii_i80003_writereg; 10646 new_writereg = wm_gmii_i80003_writereg;
10648 } else if (sc->sc_type >= WM_T_I210) { 10647 } else if (sc->sc_type >= WM_T_I210) {
10649 /* I210 and I211 */ 10648 /* I210 and I211 */
10650 if ((sc->sc_phytype != WMPHY_UNKNOWN) 10649 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10651 && (new_phytype != WMPHY_I210) 10650 && (new_phytype != WMPHY_I210)
10652 && (new_phytype != WMPHY_UNKNOWN)) 10651 && (new_phytype != WMPHY_UNKNOWN))
10653 doubt_phytype = new_phytype; 10652 doubt_phytype = new_phytype;
10654 new_phytype = WMPHY_I210; 10653 new_phytype = WMPHY_I210;
10655 new_readreg = wm_gmii_gs40g_readreg; 10654 new_readreg = wm_gmii_gs40g_readreg;
10656 new_writereg = wm_gmii_gs40g_writereg; 10655 new_writereg = wm_gmii_gs40g_writereg;
10657 } else if (sc->sc_type >= WM_T_82580) { 10656 } else if (sc->sc_type >= WM_T_82580) {
10658 /* 82580, I350 and I354 */ 10657 /* 82580, I350 and I354 */
10659 new_readreg = wm_gmii_82580_readreg; 10658 new_readreg = wm_gmii_82580_readreg;
10660 new_writereg = wm_gmii_82580_writereg; 10659 new_writereg = wm_gmii_82580_writereg;
10661 } else if (sc->sc_type >= WM_T_82544) { 10660 } else if (sc->sc_type >= WM_T_82544) {
10662 /* 82544, 0, [56], [17], 8257[1234] and 82583 */ 10661 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
10663 new_readreg = wm_gmii_i82544_readreg; 10662 new_readreg = wm_gmii_i82544_readreg;
10664 new_writereg = wm_gmii_i82544_writereg; 10663 new_writereg = wm_gmii_i82544_writereg;
10665 } else { 10664 } else {
10666 new_readreg = wm_gmii_i82543_readreg; 10665 new_readreg = wm_gmii_i82543_readreg;
10667 new_writereg = wm_gmii_i82543_writereg; 10666 new_writereg = wm_gmii_i82543_writereg;
10668 } 10667 }
10669 10668
10670 if (new_phytype == WMPHY_BM) { 10669 if (new_phytype == WMPHY_BM) {
10671 /* All BM use _bm_ */ 10670 /* All BM use _bm_ */
10672 new_readreg = wm_gmii_bm_readreg; 10671 new_readreg = wm_gmii_bm_readreg;
10673 new_writereg = wm_gmii_bm_writereg; 10672 new_writereg = wm_gmii_bm_writereg;
10674 } 10673 }
10675 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) { 10674 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
10676 /* All PCH* use _hv_ */ 10675 /* All PCH* use _hv_ */
10677 new_readreg = wm_gmii_hv_readreg; 10676 new_readreg = wm_gmii_hv_readreg;
10678 new_writereg = wm_gmii_hv_writereg; 10677 new_writereg = wm_gmii_hv_writereg;
10679 } 10678 }
10680 10679
10681 /* Diag output */ 10680 /* Diag output */
10682 if (dodiag) { 10681 if (dodiag) {
10683 if (doubt_phytype != WMPHY_UNKNOWN) 10682 if (doubt_phytype != WMPHY_UNKNOWN)
10684 aprint_error_dev(dev, "Assumed new PHY type was " 10683 aprint_error_dev(dev, "Assumed new PHY type was "
10685 "incorrect. old = %u, new = %u\n", sc->sc_phytype, 10684 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
10686 new_phytype); 10685 new_phytype);
10687 else if ((sc->sc_phytype != WMPHY_UNKNOWN) 10686 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
10688 && (sc->sc_phytype != new_phytype)) 10687 && (sc->sc_phytype != new_phytype))
10689 aprint_error_dev(dev, "Previously assumed PHY type(%u)" 10688 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10690 "was incorrect. New PHY type = %u\n", 10689 "was incorrect. New PHY type = %u\n",
10691 sc->sc_phytype, new_phytype); 10690 sc->sc_phytype, new_phytype);
10692 10691
10693 if ((mii->mii_readreg != NULL) && 10692 if ((mii->mii_readreg != NULL) &&
10694 (new_phytype == WMPHY_UNKNOWN)) 10693 (new_phytype == WMPHY_UNKNOWN))
10695 aprint_error_dev(dev, "PHY type is still unknown.\n"); 10694 aprint_error_dev(dev, "PHY type is still unknown.\n");
10696 10695
10697 if ((mii->mii_readreg != NULL) && 10696 if ((mii->mii_readreg != NULL) &&
10698 (mii->mii_readreg != new_readreg)) 10697 (mii->mii_readreg != new_readreg))
10699 aprint_error_dev(dev, "Previously assumed PHY " 10698 aprint_error_dev(dev, "Previously assumed PHY "
10700 "read/write function was incorrect.\n"); 10699 "read/write function was incorrect.\n");
10701 } 10700 }
10702 10701
10703 /* Update now */ 10702 /* Update now */
10704 sc->sc_phytype = new_phytype; 10703 sc->sc_phytype = new_phytype;
10705 mii->mii_readreg = new_readreg; 10704 mii->mii_readreg = new_readreg;
10706 mii->mii_writereg = new_writereg; 10705 mii->mii_writereg = new_writereg;
10707 if (new_readreg == wm_gmii_hv_readreg) { 10706 if (new_readreg == wm_gmii_hv_readreg) {
10708 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked; 10707 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
10709 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked; 10708 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
10710 } else if (new_readreg == wm_sgmii_readreg) { 10709 } else if (new_readreg == wm_sgmii_readreg) {
10711 sc->phy.readreg_locked = wm_sgmii_readreg_locked; 10710 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
10712 sc->phy.writereg_locked = wm_sgmii_writereg_locked; 10711 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
10713 } else if (new_readreg == wm_gmii_i82544_readreg) { 10712 } else if (new_readreg == wm_gmii_i82544_readreg) {
10714 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked; 10713 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
10715 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked; 10714 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
10716 } 10715 }
10717} 10716}
10718 10717
10719/* 10718/*
10720 * wm_get_phy_id_82575: 10719 * wm_get_phy_id_82575:
10721 * 10720 *
10722 * Return PHY ID. Return -1 if it failed. 10721 * Return PHY ID. Return -1 if it failed.
10723 */ 10722 */
10724static int 10723static int
10725wm_get_phy_id_82575(struct wm_softc *sc) 10724wm_get_phy_id_82575(struct wm_softc *sc)
10726{ 10725{
10727 uint32_t reg; 10726 uint32_t reg;
10728 int phyid = -1; 10727 int phyid = -1;
10729 10728
10730 /* XXX */ 10729 /* XXX */
10731 if ((sc->sc_flags & WM_F_SGMII) == 0) 10730 if ((sc->sc_flags & WM_F_SGMII) == 0)
10732 return -1; 10731 return -1;
10733 10732
10734 if (wm_sgmii_uses_mdio(sc)) { 10733 if (wm_sgmii_uses_mdio(sc)) {
10735 switch (sc->sc_type) { 10734 switch (sc->sc_type) {
10736 case WM_T_82575: 10735 case WM_T_82575:
10737 case WM_T_82576: 10736 case WM_T_82576:
10738 reg = CSR_READ(sc, WMREG_MDIC); 10737 reg = CSR_READ(sc, WMREG_MDIC);
10739 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT; 10738 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
10740 break; 10739 break;
10741 case WM_T_82580: 10740 case WM_T_82580:
10742 case WM_T_I350: 10741 case WM_T_I350:
10743 case WM_T_I354: 10742 case WM_T_I354:
10744 case WM_T_I210: 10743 case WM_T_I210:
10745 case WM_T_I211: 10744 case WM_T_I211:
10746 reg = CSR_READ(sc, WMREG_MDICNFG); 10745 reg = CSR_READ(sc, WMREG_MDICNFG);
10747 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT; 10746 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
10748 break; 10747 break;
10749 default: 10748 default:
10750 return -1; 10749 return -1;
10751 } 10750 }
10752 } 10751 }
10753 10752
10754 return phyid; 10753 return phyid;
10755} 10754}
10756 10755
10757/* 10756/*
10758 * wm_gmii_mediainit: 10757 * wm_gmii_mediainit:
10759 * 10758 *
10760 * Initialize media for use on 1000BASE-T devices. 10759 * Initialize media for use on 1000BASE-T devices.
10761 */ 10760 */
10762static void 10761static void
10763wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) 10762wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
10764{ 10763{
10765 device_t dev = sc->sc_dev; 10764 device_t dev = sc->sc_dev;
10766 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 10765 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10767 struct mii_data *mii = &sc->sc_mii; 10766 struct mii_data *mii = &sc->sc_mii;
10768 10767
10769 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n", 10768 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10770 device_xname(sc->sc_dev), __func__)); 10769 device_xname(sc->sc_dev), __func__));
10771 10770
10772 /* We have GMII. */ 10771 /* We have GMII. */
10773 sc->sc_flags |= WM_F_HAS_MII; 10772 sc->sc_flags |= WM_F_HAS_MII;
10774 10773
10775 if (sc->sc_type == WM_T_80003) 10774 if (sc->sc_type == WM_T_80003)
10776 sc->sc_tipg = TIPG_1000T_80003_DFLT; 10775 sc->sc_tipg = TIPG_1000T_80003_DFLT;
10777 else 10776 else
10778 sc->sc_tipg = TIPG_1000T_DFLT; 10777 sc->sc_tipg = TIPG_1000T_DFLT;
10779 10778
10780 /* 10779 /*
10781 * Let the chip set speed/duplex on its own based on 10780 * Let the chip set speed/duplex on its own based on
10782 * signals from the PHY. 10781 * signals from the PHY.
10783 * XXXbouyer - I'm not sure this is right for the 80003, 10782 * XXXbouyer - I'm not sure this is right for the 80003,
10784 * the em driver only sets CTRL_SLU here - but it seems to work. 10783 * the em driver only sets CTRL_SLU here - but it seems to work.
10785 */ 10784 */
10786 sc->sc_ctrl |= CTRL_SLU; 10785 sc->sc_ctrl |= CTRL_SLU;
10787 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10786 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10788 10787
10789 /* Initialize our media structures and probe the GMII. */ 10788 /* Initialize our media structures and probe the GMII. */
10790 mii->mii_ifp = ifp; 10789 mii->mii_ifp = ifp;
10791 10790
10792 mii->mii_statchg = wm_gmii_statchg; 10791 mii->mii_statchg = wm_gmii_statchg;
10793 10792
10794 /* get PHY control from SMBus to PCIe */ 10793 /* get PHY control from SMBus to PCIe */
10795 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 10794 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
10796 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) 10795 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
10797 || (sc->sc_type == WM_T_PCH_CNP)) 10796 || (sc->sc_type == WM_T_PCH_CNP))
10798 wm_init_phy_workarounds_pchlan(sc); 10797 wm_init_phy_workarounds_pchlan(sc);
10799 10798
10800 wm_gmii_reset(sc); 10799 wm_gmii_reset(sc);
10801 10800
10802 sc->sc_ethercom.ec_mii = &sc->sc_mii; 10801 sc->sc_ethercom.ec_mii = &sc->sc_mii;
10803 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, 10802 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
10804 wm_gmii_mediastatus, sc->sc_core_lock); 10803 wm_gmii_mediastatus, sc->sc_core_lock);
10805 10804
10806 /* Setup internal SGMII PHY for SFP */ 10805 /* Setup internal SGMII PHY for SFP */
10807 wm_sgmii_sfp_preconfig(sc); 10806 wm_sgmii_sfp_preconfig(sc);
10808 10807
10809 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 10808 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
10810 || (sc->sc_type == WM_T_82580) 10809 || (sc->sc_type == WM_T_82580)
10811 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 10810 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
10812 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) { 10811 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
10813 if ((sc->sc_flags & WM_F_SGMII) == 0) { 10812 if ((sc->sc_flags & WM_F_SGMII) == 0) {
10814 /* Attach only one port */ 10813 /* Attach only one port */
10815 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, 10814 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
10816 MII_OFFSET_ANY, MIIF_DOPAUSE); 10815 MII_OFFSET_ANY, MIIF_DOPAUSE);
10817 } else { 10816 } else {
10818 int i, id; 10817 int i, id;
10819 uint32_t ctrl_ext; 10818 uint32_t ctrl_ext;
10820 10819
10821 id = wm_get_phy_id_82575(sc); 10820 id = wm_get_phy_id_82575(sc);
10822 if (id != -1) { 10821 if (id != -1) {
10823 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 10822 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
10824 id, MII_OFFSET_ANY, MIIF_DOPAUSE); 10823 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
10825 } 10824 }
10826 if ((id == -1) 10825 if ((id == -1)
10827 || (LIST_FIRST(&mii->mii_phys) == NULL)) { 10826 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
10828 /* Power on sgmii phy if it is disabled */ 10827 /* Power on sgmii phy if it is disabled */
10829 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 10828 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10830 CSR_WRITE(sc, WMREG_CTRL_EXT, 10829 CSR_WRITE(sc, WMREG_CTRL_EXT,
10831 ctrl_ext &~ CTRL_EXT_SWDPIN(3)); 10830 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
10832 CSR_WRITE_FLUSH(sc); 10831 CSR_WRITE_FLUSH(sc);
10833 delay(300*1000); /* XXX too long */ 10832 delay(300*1000); /* XXX too long */
10834 10833
10835 /* 10834 /*
10836 * From 1 to 8. 10835 * From 1 to 8.
10837 * 10836 *
10838 * I2C access fails with I2C register's ERROR 10837 * I2C access fails with I2C register's ERROR
10839 * bit set, so prevent error message while 10838 * bit set, so prevent error message while
10840 * scanning. 10839 * scanning.
10841 */ 10840 */
10842 sc->phy.no_errprint = true; 10841 sc->phy.no_errprint = true;
10843 for (i = 1; i < 8; i++) 10842 for (i = 1; i < 8; i++)
10844 mii_attach(sc->sc_dev, &sc->sc_mii, 10843 mii_attach(sc->sc_dev, &sc->sc_mii,
10845 0xffffffff, i, MII_OFFSET_ANY, 10844 0xffffffff, i, MII_OFFSET_ANY,
10846 MIIF_DOPAUSE); 10845 MIIF_DOPAUSE);
10847 sc->phy.no_errprint = false; 10846 sc->phy.no_errprint = false;
10848 10847
10849 /* Restore previous sfp cage power state */ 10848 /* Restore previous sfp cage power state */
10850 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 10849 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10851 } 10850 }
10852 } 10851 }
10853 } else 10852 } else
10854 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 10853 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10855 MII_OFFSET_ANY, MIIF_DOPAUSE); 10854 MII_OFFSET_ANY, MIIF_DOPAUSE);
10856 10855
10857 /* 10856 /*
10858 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call 10857 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
10859 * wm_set_mdio_slow_mode_hv() for a workaround and retry. 10858 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
10860 */ 10859 */
10861 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) 10860 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
10862 || (sc->sc_type == WM_T_PCH_SPT) 10861 || (sc->sc_type == WM_T_PCH_SPT)
10863 || (sc->sc_type == WM_T_PCH_CNP)) 10862 || (sc->sc_type == WM_T_PCH_CNP))
10864 && (LIST_FIRST(&mii->mii_phys) == NULL)) { 10863 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
10865 wm_set_mdio_slow_mode_hv(sc); 10864 wm_set_mdio_slow_mode_hv(sc);
10866 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 10865 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10867 MII_OFFSET_ANY, MIIF_DOPAUSE); 10866 MII_OFFSET_ANY, MIIF_DOPAUSE);
10868 } 10867 }
10869 10868
10870 /* 10869 /*
10871 * (For ICH8 variants) 10870 * (For ICH8 variants)
10872 * If PHY detection failed, use BM's r/w function and retry. 10871 * If PHY detection failed, use BM's r/w function and retry.
10873 */ 10872 */
10874 if (LIST_FIRST(&mii->mii_phys) == NULL) { 10873 if (LIST_FIRST(&mii->mii_phys) == NULL) {
10875 /* if failed, retry with *_bm_* */ 10874 /* if failed, retry with *_bm_* */
10876 aprint_verbose_dev(dev, "Assumed PHY access function " 10875 aprint_verbose_dev(dev, "Assumed PHY access function "
10877 "(type = %d) might be incorrect. Use BM and retry.\n", 10876 "(type = %d) might be incorrect. Use BM and retry.\n",
10878 sc->sc_phytype); 10877 sc->sc_phytype);
10879 sc->sc_phytype = WMPHY_BM; 10878 sc->sc_phytype = WMPHY_BM;
10880 mii->mii_readreg = wm_gmii_bm_readreg; 10879 mii->mii_readreg = wm_gmii_bm_readreg;
10881 mii->mii_writereg = wm_gmii_bm_writereg; 10880 mii->mii_writereg = wm_gmii_bm_writereg;
10882 10881
10883 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 10882 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10884 MII_OFFSET_ANY, MIIF_DOPAUSE); 10883 MII_OFFSET_ANY, MIIF_DOPAUSE);
10885 } 10884 }
10886 10885
10887 if (LIST_FIRST(&mii->mii_phys) == NULL) { 10886 if (LIST_FIRST(&mii->mii_phys) == NULL) {
10888 /* Any PHY wasn't find */ 10887 /* Any PHY wasn't find */
10889 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 10888 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
10890 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 10889 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
10891 sc->sc_phytype = WMPHY_NONE; 10890 sc->sc_phytype = WMPHY_NONE;
10892 } else { 10891 } else {
10893 struct mii_softc *child = LIST_FIRST(&mii->mii_phys); 10892 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
10894 10893
10895 /* 10894 /*
10896 * PHY Found! Check PHY type again by the second call of 10895 * PHY Found! Check PHY type again by the second call of
10897 * wm_gmii_setup_phytype. 10896 * wm_gmii_setup_phytype.
10898 */ 10897 */
10899 wm_gmii_setup_phytype(sc, child->mii_mpd_oui, 10898 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
10900 child->mii_mpd_model); 10899 child->mii_mpd_model);
10901 10900
10902 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 10901 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
10903 } 10902 }
10904} 10903}
10905 10904
10906/* 10905/*
10907 * wm_gmii_mediachange: [ifmedia interface function] 10906 * wm_gmii_mediachange: [ifmedia interface function]
10908 * 10907 *
10909 * Set hardware to newly-selected media on a 1000BASE-T device. 10908 * Set hardware to newly-selected media on a 1000BASE-T device.
10910 */ 10909 */
10911static int 10910static int
10912wm_gmii_mediachange(struct ifnet *ifp) 10911wm_gmii_mediachange(struct ifnet *ifp)
10913{ 10912{
10914 struct wm_softc *sc = ifp->if_softc; 10913 struct wm_softc *sc = ifp->if_softc;
10915 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 10914 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10916 uint32_t reg; 10915 uint32_t reg;
10917 int rc; 10916 int rc;
10918 10917
10919 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n", 10918 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10920 device_xname(sc->sc_dev), __func__)); 10919 device_xname(sc->sc_dev), __func__));
10921 if ((ifp->if_flags & IFF_UP) == 0) 10920 if ((ifp->if_flags & IFF_UP) == 0)
10922 return 0; 10921 return 0;
10923 10922
10924 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */ 10923 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
10925 if ((sc->sc_type == WM_T_82580) 10924 if ((sc->sc_type == WM_T_82580)
10926 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) 10925 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
10927 || (sc->sc_type == WM_T_I211)) { 10926 || (sc->sc_type == WM_T_I211)) {
10928 reg = CSR_READ(sc, WMREG_PHPM); 10927 reg = CSR_READ(sc, WMREG_PHPM);
10929 reg &= ~PHPM_GO_LINK_D; 10928 reg &= ~PHPM_GO_LINK_D;
10930 CSR_WRITE(sc, WMREG_PHPM, reg); 10929 CSR_WRITE(sc, WMREG_PHPM, reg);
10931 } 10930 }
10932 10931
10933 /* Disable D0 LPLU. */ 10932 /* Disable D0 LPLU. */
10934 wm_lplu_d0_disable(sc); 10933 wm_lplu_d0_disable(sc);
10935 10934
10936 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 10935 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10937 sc->sc_ctrl |= CTRL_SLU; 10936 sc->sc_ctrl |= CTRL_SLU;
10938 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 10937 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10939 || (sc->sc_type > WM_T_82543)) { 10938 || (sc->sc_type > WM_T_82543)) {
10940 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); 10939 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
10941 } else { 10940 } else {
10942 sc->sc_ctrl &= ~CTRL_ASDE; 10941 sc->sc_ctrl &= ~CTRL_ASDE;
10943 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 10942 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10944 if (ife->ifm_media & IFM_FDX) 10943 if (ife->ifm_media & IFM_FDX)
10945 sc->sc_ctrl |= CTRL_FD; 10944 sc->sc_ctrl |= CTRL_FD;
10946 switch (IFM_SUBTYPE(ife->ifm_media)) { 10945 switch (IFM_SUBTYPE(ife->ifm_media)) {
10947 case IFM_10_T: 10946 case IFM_10_T:
10948 sc->sc_ctrl |= CTRL_SPEED_10; 10947 sc->sc_ctrl |= CTRL_SPEED_10;
10949 break; 10948 break;
10950 case IFM_100_TX: 10949 case IFM_100_TX:
10951 sc->sc_ctrl |= CTRL_SPEED_100; 10950 sc->sc_ctrl |= CTRL_SPEED_100;
10952 break; 10951 break;
10953 case IFM_1000_T: 10952 case IFM_1000_T:
10954 sc->sc_ctrl |= CTRL_SPEED_1000; 10953 sc->sc_ctrl |= CTRL_SPEED_1000;
10955 break; 10954 break;
10956 case IFM_NONE: 10955 case IFM_NONE:
10957 /* There is no specific setting for IFM_NONE */ 10956 /* There is no specific setting for IFM_NONE */
10958 break; 10957 break;
10959 default: 10958 default:
10960 panic("wm_gmii_mediachange: bad media 0x%x", 10959 panic("wm_gmii_mediachange: bad media 0x%x",
10961 ife->ifm_media); 10960 ife->ifm_media);
10962 } 10961 }
10963 } 10962 }
10964 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10963 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10965 CSR_WRITE_FLUSH(sc); 10964 CSR_WRITE_FLUSH(sc);
10966 10965
10967 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) 10966 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
10968 wm_serdes_mediachange(ifp); 10967 wm_serdes_mediachange(ifp);
10969 10968
10970 if (sc->sc_type <= WM_T_82543) 10969 if (sc->sc_type <= WM_T_82543)
10971 wm_gmii_reset(sc); 10970 wm_gmii_reset(sc);
10972 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211) 10971 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
10973 && ((sc->sc_flags & WM_F_SGMII) != 0)) { 10972 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
10974 /* allow time for SFP cage time to power up phy */ 10973 /* allow time for SFP cage time to power up phy */
10975 delay(300 * 1000); 10974 delay(300 * 1000);
10976 wm_gmii_reset(sc); 10975 wm_gmii_reset(sc);
10977 } 10976 }
10978 10977
10979 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 10978 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
10980 return 0; 10979 return 0;
10981 return rc; 10980 return rc;
10982} 10981}
10983 10982
10984/* 10983/*
10985 * wm_gmii_mediastatus: [ifmedia interface function] 10984 * wm_gmii_mediastatus: [ifmedia interface function]
10986 * 10985 *
10987 * Get the current interface media status on a 1000BASE-T device. 10986 * Get the current interface media status on a 1000BASE-T device.
10988 */ 10987 */
10989static void 10988static void
10990wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 10989wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10991{ 10990{
10992 struct wm_softc *sc = ifp->if_softc; 10991 struct wm_softc *sc = ifp->if_softc;
10993 10992
10994 ether_mediastatus(ifp, ifmr); 10993 ether_mediastatus(ifp, ifmr);
10995 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 10994 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10996 | sc->sc_flowflags; 10995 | sc->sc_flowflags;
10997} 10996}
10998 10997
10999#define MDI_IO CTRL_SWDPIN(2) 10998#define MDI_IO CTRL_SWDPIN(2)
11000#define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 10999#define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
11001#define MDI_CLK CTRL_SWDPIN(3) 11000#define MDI_CLK CTRL_SWDPIN(3)
11002 11001
11003static void 11002static void
11004wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 11003wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11005{ 11004{
11006 uint32_t i, v; 11005 uint32_t i, v;
11007 11006
11008 v = CSR_READ(sc, WMREG_CTRL); 11007 v = CSR_READ(sc, WMREG_CTRL);
11009 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 11008 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11010 v |= MDI_DIR | CTRL_SWDPIO(3); 11009 v |= MDI_DIR | CTRL_SWDPIO(3);
11011 11010
11012 for (i = __BIT(nbits - 1); i != 0; i >>= 1) { 11011 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11013 if (data & i) 11012 if (data & i)
11014 v |= MDI_IO; 11013 v |= MDI_IO;
11015 else 11014 else
11016 v &= ~MDI_IO; 11015 v &= ~MDI_IO;
11017 CSR_WRITE(sc, WMREG_CTRL, v); 11016 CSR_WRITE(sc, WMREG_CTRL, v);
11018 CSR_WRITE_FLUSH(sc); 11017 CSR_WRITE_FLUSH(sc);
11019 delay(10); 11018 delay(10);
11020 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11019 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11021 CSR_WRITE_FLUSH(sc); 11020 CSR_WRITE_FLUSH(sc);
11022 delay(10); 11021 delay(10);
11023 CSR_WRITE(sc, WMREG_CTRL, v); 11022 CSR_WRITE(sc, WMREG_CTRL, v);
11024 CSR_WRITE_FLUSH(sc); 11023 CSR_WRITE_FLUSH(sc);
11025 delay(10); 11024 delay(10);
11026 } 11025 }
11027} 11026}
11028 11027
11029static uint16_t 11028static uint16_t
11030wm_i82543_mii_recvbits(struct wm_softc *sc) 11029wm_i82543_mii_recvbits(struct wm_softc *sc)
11031{ 11030{
11032 uint32_t v, i; 11031 uint32_t v, i;
11033 uint16_t data = 0; 11032 uint16_t data = 0;
11034 11033
11035 v = CSR_READ(sc, WMREG_CTRL); 11034 v = CSR_READ(sc, WMREG_CTRL);
11036 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 11035 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11037 v |= CTRL_SWDPIO(3); 11036 v |= CTRL_SWDPIO(3);
11038 11037
11039 CSR_WRITE(sc, WMREG_CTRL, v); 11038 CSR_WRITE(sc, WMREG_CTRL, v);
11040 CSR_WRITE_FLUSH(sc); 11039 CSR_WRITE_FLUSH(sc);
11041 delay(10); 11040 delay(10);
11042 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11041 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11043 CSR_WRITE_FLUSH(sc); 11042 CSR_WRITE_FLUSH(sc);
11044 delay(10); 11043 delay(10);
11045 CSR_WRITE(sc, WMREG_CTRL, v); 11044 CSR_WRITE(sc, WMREG_CTRL, v);
11046 CSR_WRITE_FLUSH(sc); 11045 CSR_WRITE_FLUSH(sc);
11047 delay(10); 11046 delay(10);
11048 11047
11049 for (i = 0; i < 16; i++) { 11048 for (i = 0; i < 16; i++) {
11050 data <<= 1; 11049 data <<= 1;
11051 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11050 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11052 CSR_WRITE_FLUSH(sc); 11051 CSR_WRITE_FLUSH(sc);
11053 delay(10); 11052 delay(10);
11054 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 11053 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11055 data |= 1; 11054 data |= 1;
11056 CSR_WRITE(sc, WMREG_CTRL, v); 11055 CSR_WRITE(sc, WMREG_CTRL, v);
11057 CSR_WRITE_FLUSH(sc); 11056 CSR_WRITE_FLUSH(sc);
11058 delay(10); 11057 delay(10);
11059 } 11058 }
11060 11059
11061 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11060 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11062 CSR_WRITE_FLUSH(sc); 11061 CSR_WRITE_FLUSH(sc);
11063 delay(10); 11062 delay(10);
11064 CSR_WRITE(sc, WMREG_CTRL, v); 11063 CSR_WRITE(sc, WMREG_CTRL, v);
11065 CSR_WRITE_FLUSH(sc); 11064 CSR_WRITE_FLUSH(sc);
11066 delay(10); 11065 delay(10);
11067 11066
11068 return data; 11067 return data;
11069} 11068}
11070 11069
11071#undef MDI_IO 11070#undef MDI_IO
11072#undef MDI_DIR 11071#undef MDI_DIR
11073#undef MDI_CLK 11072#undef MDI_CLK
11074 11073
11075/* 11074/*
11076 * wm_gmii_i82543_readreg: [mii interface function] 11075 * wm_gmii_i82543_readreg: [mii interface function]
11077 * 11076 *
11078 * Read a PHY register on the GMII (i82543 version). 11077 * Read a PHY register on the GMII (i82543 version).
11079 */ 11078 */
11080static int 11079static int
11081wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val) 11080wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11082{ 11081{
11083 struct wm_softc *sc = device_private(dev); 11082 struct wm_softc *sc = device_private(dev);
11084 11083
11085 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); 11084 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11086 wm_i82543_mii_sendbits(sc, reg | (phy << 5) | 11085 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11087 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 11086 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11088 *val = wm_i82543_mii_recvbits(sc) & 0xffff; 11087 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
11089 11088
11090 DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n", 11089 DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11091 device_xname(dev), phy, reg, *val)); 11090 device_xname(dev), phy, reg, *val));
11092 11091
11093 return 0; 11092 return 0;
11094} 11093}
11095 11094
11096/* 11095/*
11097 * wm_gmii_i82543_writereg: [mii interface function] 11096 * wm_gmii_i82543_writereg: [mii interface function]
11098 * 11097 *
11099 * Write a PHY register on the GMII (i82543 version). 11098 * Write a PHY register on the GMII (i82543 version).
11100 */ 11099 */
11101static int 11100static int
11102wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val) 11101wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11103{ 11102{
11104 struct wm_softc *sc = device_private(dev); 11103 struct wm_softc *sc = device_private(dev);
11105 11104
11106 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); 11105 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11107 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 11106 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11108 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 11107 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11109 (MII_COMMAND_START << 30), 32); 11108 (MII_COMMAND_START << 30), 32);
11110 11109
11111 return 0; 11110 return 0;
11112} 11111}
11113 11112
11114/* 11113/*
11115 * wm_gmii_mdic_readreg: [mii interface function] 11114 * wm_gmii_mdic_readreg: [mii interface function]
11116 * 11115 *
11117 * Read a PHY register on the GMII. 11116 * Read a PHY register on the GMII.
11118 */ 11117 */
11119static int 11118static int
11120wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val) 11119wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11121{ 11120{
11122 struct wm_softc *sc = device_private(dev); 11121 struct wm_softc *sc = device_private(dev);
11123 uint32_t mdic = 0; 11122 uint32_t mdic = 0;
11124 int i; 11123 int i;
11125 11124
11126 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217) 11125 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11127 && (reg > MII_ADDRMASK)) { 11126 && (reg > MII_ADDRMASK)) {
11128 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", 11127 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11129 __func__, sc->sc_phytype, reg); 11128 __func__, sc->sc_phytype, reg);
11130 reg &= MII_ADDRMASK; 11129 reg &= MII_ADDRMASK;
11131 } 11130 }
11132 11131
11133 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 11132 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11134 MDIC_REGADD(reg)); 11133 MDIC_REGADD(reg));
11135 11134
11136 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 11135 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11137 delay(50); 11136 delay(50);
11138 mdic = CSR_READ(sc, WMREG_MDIC); 11137 mdic = CSR_READ(sc, WMREG_MDIC);
11139 if (mdic & MDIC_READY) 11138 if (mdic & MDIC_READY)
11140 break; 11139 break;
11141 } 11140 }
11142 11141
11143 if ((mdic & MDIC_READY) == 0) { 11142 if ((mdic & MDIC_READY) == 0) {
11144 DPRINTF(sc, WM_DEBUG_GMII, 11143 DPRINTF(sc, WM_DEBUG_GMII,
11145 ("%s: MDIC read timed out: phy %d reg %d\n", 11144 ("%s: MDIC read timed out: phy %d reg %d\n",
11146 device_xname(dev), phy, reg)); 11145 device_xname(dev), phy, reg));
11147 return ETIMEDOUT; 11146 return ETIMEDOUT;
11148 } else if (mdic & MDIC_E) { 11147 } else if (mdic & MDIC_E) {
11149 /* This is normal if no PHY is present. */ 11148 /* This is normal if no PHY is present. */
11150 DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n", 11149 DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
11151 device_xname(sc->sc_dev), phy, reg)); 11150 device_xname(sc->sc_dev), phy, reg));
11152 return -1; 11151 return -1;
11153 } else 11152 } else
11154 *val = MDIC_DATA(mdic); 11153 *val = MDIC_DATA(mdic);
11155 11154
11156 /* 11155 /*
11157 * Allow some time after each MDIC transaction to avoid 11156 * Allow some time after each MDIC transaction to avoid
11158 * reading duplicate data in the next MDIC transaction. 11157 * reading duplicate data in the next MDIC transaction.
11159 */ 11158 */
11160 if (sc->sc_type == WM_T_PCH2) 11159 if (sc->sc_type == WM_T_PCH2)
11161 delay(100); 11160 delay(100);
11162 11161
11163 return 0; 11162 return 0;
11164} 11163}
11165 11164
11166/* 11165/*
11167 * wm_gmii_mdic_writereg: [mii interface function] 11166 * wm_gmii_mdic_writereg: [mii interface function]
11168 * 11167 *
11169 * Write a PHY register on the GMII. 11168 * Write a PHY register on the GMII.
11170 */ 11169 */
11171static int 11170static int
11172wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val) 11171wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11173{ 11172{
11174 struct wm_softc *sc = device_private(dev); 11173 struct wm_softc *sc = device_private(dev);
11175 uint32_t mdic = 0; 11174 uint32_t mdic = 0;
11176 int i; 11175 int i;
11177 11176
11178 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217) 11177 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11179 && (reg > MII_ADDRMASK)) { 11178 && (reg > MII_ADDRMASK)) {
11180 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", 11179 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11181 __func__, sc->sc_phytype, reg); 11180 __func__, sc->sc_phytype, reg);
11182 reg &= MII_ADDRMASK; 11181 reg &= MII_ADDRMASK;
11183 } 11182 }
11184 11183
11185 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 11184 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11186 MDIC_REGADD(reg) | MDIC_DATA(val)); 11185 MDIC_REGADD(reg) | MDIC_DATA(val));
11187 11186
11188 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 11187 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11189 delay(50); 11188 delay(50);
11190 mdic = CSR_READ(sc, WMREG_MDIC); 11189 mdic = CSR_READ(sc, WMREG_MDIC);
11191 if (mdic & MDIC_READY) 11190 if (mdic & MDIC_READY)
11192 break; 11191 break;
11193 } 11192 }
11194 11193
11195 if ((mdic & MDIC_READY) == 0) { 11194 if ((mdic & MDIC_READY) == 0) {
11196 DPRINTF(sc, WM_DEBUG_GMII, 11195 DPRINTF(sc, WM_DEBUG_GMII,
11197 ("%s: MDIC write timed out: phy %d reg %d\n", 11196 ("%s: MDIC write timed out: phy %d reg %d\n",
11198 device_xname(dev), phy, reg)); 11197 device_xname(dev), phy, reg));
11199 return ETIMEDOUT; 11198 return ETIMEDOUT;
11200 } else if (mdic & MDIC_E) { 11199 } else if (mdic & MDIC_E) {
11201 DPRINTF(sc, WM_DEBUG_GMII, 11200 DPRINTF(sc, WM_DEBUG_GMII,
11202 ("%s: MDIC write error: phy %d reg %d\n", 11201 ("%s: MDIC write error: phy %d reg %d\n",
11203 device_xname(dev), phy, reg)); 11202 device_xname(dev), phy, reg));
11204 return -1; 11203 return -1;
11205 } 11204 }
11206 11205
11207 /* 11206 /*
11208 * Allow some time after each MDIC transaction to avoid 11207 * Allow some time after each MDIC transaction to avoid
11209 * reading duplicate data in the next MDIC transaction. 11208 * reading duplicate data in the next MDIC transaction.
11210 */ 11209 */
11211 if (sc->sc_type == WM_T_PCH2) 11210 if (sc->sc_type == WM_T_PCH2)
11212 delay(100); 11211 delay(100);
11213 11212
11214 return 0; 11213 return 0;
11215} 11214}
11216 11215
11217/* 11216/*
11218 * wm_gmii_i82544_readreg: [mii interface function] 11217 * wm_gmii_i82544_readreg: [mii interface function]
11219 * 11218 *
11220 * Read a PHY register on the GMII. 11219 * Read a PHY register on the GMII.
11221 */ 11220 */
11222static int 11221static int
11223wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val) 11222wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11224{ 11223{
11225 struct wm_softc *sc = device_private(dev); 11224 struct wm_softc *sc = device_private(dev);
11226 int rv; 11225 int rv;
11227 11226
11228 if (sc->phy.acquire(sc)) { 11227 if (sc->phy.acquire(sc)) {
11229 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11228 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11230 return -1; 11229 return -1;
11231 } 11230 }
11232 11231
11233 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val); 11232 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11234 11233
11235 sc->phy.release(sc); 11234 sc->phy.release(sc);
11236 11235
11237 return rv; 11236 return rv;
11238} 11237}
11239 11238
11240static int 11239static int
11241wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val) 11240wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11242{ 11241{
11243 struct wm_softc *sc = device_private(dev); 11242 struct wm_softc *sc = device_private(dev);
11244 int rv; 11243 int rv;
11245 11244
11246 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 11245 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11247 switch (sc->sc_phytype) { 11246 switch (sc->sc_phytype) {
11248 case WMPHY_IGP: 11247 case WMPHY_IGP:
11249 case WMPHY_IGP_2: 11248 case WMPHY_IGP_2:
11250 case WMPHY_IGP_3: 11249 case WMPHY_IGP_3:
11251 rv = wm_gmii_mdic_writereg(dev, phy, 11250 rv = wm_gmii_mdic_writereg(dev, phy,
11252 IGPHY_PAGE_SELECT, reg); 11251 IGPHY_PAGE_SELECT, reg);
11253 if (rv != 0) 11252 if (rv != 0)
11254 return rv; 11253 return rv;
11255 break; 11254 break;
11256 default: 11255 default:
11257#ifdef WM_DEBUG 11256#ifdef WM_DEBUG
11258 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n", 11257 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11259 __func__, sc->sc_phytype, reg); 11258 __func__, sc->sc_phytype, reg);
11260#endif 11259#endif
11261 break; 11260 break;
11262 } 11261 }
11263 } 11262 }
11264 11263
11265 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val); 11264 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11266} 11265}
11267 11266
11268/* 11267/*
11269 * wm_gmii_i82544_writereg: [mii interface function] 11268 * wm_gmii_i82544_writereg: [mii interface function]
11270 * 11269 *
11271 * Write a PHY register on the GMII. 11270 * Write a PHY register on the GMII.
11272 */ 11271 */
11273static int 11272static int
11274wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val) 11273wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11275{ 11274{
11276 struct wm_softc *sc = device_private(dev); 11275 struct wm_softc *sc = device_private(dev);
11277 int rv; 11276 int rv;
11278 11277
11279 if (sc->phy.acquire(sc)) { 11278 if (sc->phy.acquire(sc)) {
11280 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11279 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11281 return -1; 11280 return -1;
11282 } 11281 }
11283 11282
11284 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val); 11283 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11285 sc->phy.release(sc); 11284 sc->phy.release(sc);
11286 11285
11287 return rv; 11286 return rv;
11288} 11287}
11289 11288
11290static int 11289static int
11291wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val) 11290wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11292{ 11291{
11293 struct wm_softc *sc = device_private(dev); 11292 struct wm_softc *sc = device_private(dev);
11294 int rv; 11293 int rv;
11295 11294
11296 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 11295 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11297 switch (sc->sc_phytype) { 11296 switch (sc->sc_phytype) {
11298 case WMPHY_IGP: 11297 case WMPHY_IGP:
11299 case WMPHY_IGP_2: 11298 case WMPHY_IGP_2:
11300 case WMPHY_IGP_3: 11299 case WMPHY_IGP_3:
11301 rv = wm_gmii_mdic_writereg(dev, phy, 11300 rv = wm_gmii_mdic_writereg(dev, phy,
11302 IGPHY_PAGE_SELECT, reg); 11301 IGPHY_PAGE_SELECT, reg);
11303 if (rv != 0) 11302 if (rv != 0)
11304 return rv; 11303 return rv;
11305 break; 11304 break;
11306 default: 11305 default:
11307#ifdef WM_DEBUG 11306#ifdef WM_DEBUG
11308 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x", 11307 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11309 __func__, sc->sc_phytype, reg); 11308 __func__, sc->sc_phytype, reg);
11310#endif 11309#endif
11311 break; 11310 break;
11312 } 11311 }
11313 } 11312 }
11314 11313
11315 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); 11314 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11316} 11315}
11317 11316
11318/* 11317/*
11319 * wm_gmii_i80003_readreg: [mii interface function] 11318 * wm_gmii_i80003_readreg: [mii interface function]
11320 * 11319 *
11321 * Read a PHY register on the kumeran 11320 * Read a PHY register on the kumeran
11322 * This could be handled by the PHY layer if we didn't have to lock the 11321 * This could be handled by the PHY layer if we didn't have to lock the
11323 * resource ... 11322 * resource ...
11324 */ 11323 */
11325static int 11324static int
11326wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val) 11325wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11327{ 11326{
11328 struct wm_softc *sc = device_private(dev); 11327 struct wm_softc *sc = device_private(dev);
11329 int page_select; 11328 int page_select;
11330 uint16_t temp, temp2; 11329 uint16_t temp, temp2;
11331 int rv = 0; 11330 int rv = 0;
11332 11331
11333 if (phy != 1) /* Only one PHY on kumeran bus */ 11332 if (phy != 1) /* Only one PHY on kumeran bus */
11334 return -1; 11333 return -1;
11335 11334
11336 if (sc->phy.acquire(sc)) { 11335 if (sc->phy.acquire(sc)) {
11337 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11336 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11338 return -1; 11337 return -1;
11339 } 11338 }
11340 11339
11341 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) 11340 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11342 page_select = GG82563_PHY_PAGE_SELECT; 11341 page_select = GG82563_PHY_PAGE_SELECT;
11343 else { 11342 else {
11344 /* 11343 /*
11345 * Use Alternative Page Select register to access registers 11344 * Use Alternative Page Select register to access registers
11346 * 30 and 31. 11345 * 30 and 31.
11347 */ 11346 */
11348 page_select = GG82563_PHY_PAGE_SELECT_ALT; 11347 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11349 } 11348 }
11350 temp = reg >> GG82563_PAGE_SHIFT; 11349 temp = reg >> GG82563_PAGE_SHIFT;
11351 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0) 11350 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11352 goto out; 11351 goto out;
11353 11352
11354 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) { 11353 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11355 /* 11354 /*
11356 * Wait more 200us for a bug of the ready bit in the MDIC 11355 * Wait more 200us for a bug of the ready bit in the MDIC
11357 * register. 11356 * register.
11358 */ 11357 */
11359 delay(200); 11358 delay(200);
11360 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2); 11359 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11361 if ((rv != 0) || (temp2 != temp)) { 11360 if ((rv != 0) || (temp2 != temp)) {
11362 device_printf(dev, "%s failed\n", __func__); 11361 device_printf(dev, "%s failed\n", __func__);
11363 rv = -1; 11362 rv = -1;
11364 goto out; 11363 goto out;
11365 } 11364 }
11366 delay(200); 11365 delay(200);
11367 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val); 11366 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11368 delay(200); 11367 delay(200);
11369 } else 11368 } else
11370 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val); 11369 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11371 11370
11372out: 11371out:
11373 sc->phy.release(sc); 11372 sc->phy.release(sc);
11374 return rv; 11373 return rv;
11375} 11374}
11376 11375
11377/* 11376/*
11378 * wm_gmii_i80003_writereg: [mii interface function] 11377 * wm_gmii_i80003_writereg: [mii interface function]
11379 * 11378 *
11380 * Write a PHY register on the kumeran. 11379 * Write a PHY register on the kumeran.
11381 * This could be handled by the PHY layer if we didn't have to lock the 11380 * This could be handled by the PHY layer if we didn't have to lock the
11382 * resource ... 11381 * resource ...
11383 */ 11382 */
11384static int 11383static int
11385wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val) 11384wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11386{ 11385{
11387 struct wm_softc *sc = device_private(dev); 11386 struct wm_softc *sc = device_private(dev);
11388 int page_select, rv; 11387 int page_select, rv;
11389 uint16_t temp, temp2; 11388 uint16_t temp, temp2;
11390 11389
11391 if (phy != 1) /* Only one PHY on kumeran bus */ 11390 if (phy != 1) /* Only one PHY on kumeran bus */
11392 return -1; 11391 return -1;
11393 11392
11394 if (sc->phy.acquire(sc)) { 11393 if (sc->phy.acquire(sc)) {
11395 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11394 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11396 return -1; 11395 return -1;
11397 } 11396 }
11398 11397
11399 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) 11398 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11400 page_select = GG82563_PHY_PAGE_SELECT; 11399 page_select = GG82563_PHY_PAGE_SELECT;
11401 else { 11400 else {
11402 /* 11401 /*
11403 * Use Alternative Page Select register to access registers 11402 * Use Alternative Page Select register to access registers
11404 * 30 and 31. 11403 * 30 and 31.
11405 */ 11404 */
11406 page_select = GG82563_PHY_PAGE_SELECT_ALT; 11405 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11407 } 11406 }
11408 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT; 11407 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11409 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0) 11408 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11410 goto out; 11409 goto out;
11411 11410
11412 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) { 11411 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11413 /* 11412 /*
11414 * Wait more 200us for a bug of the ready bit in the MDIC 11413 * Wait more 200us for a bug of the ready bit in the MDIC
11415 * register. 11414 * register.
11416 */ 11415 */
11417 delay(200); 11416 delay(200);
11418 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2); 11417 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11419 if ((rv != 0) || (temp2 != temp)) { 11418 if ((rv != 0) || (temp2 != temp)) {
11420 device_printf(dev, "%s failed\n", __func__); 11419 device_printf(dev, "%s failed\n", __func__);
11421 rv = -1; 11420 rv = -1;
11422 goto out; 11421 goto out;
11423 } 11422 }
11424 delay(200); 11423 delay(200);
11425 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); 11424 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11426 delay(200); 11425 delay(200);
11427 } else 11426 } else
11428 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); 11427 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11429 11428
11430out: 11429out:
11431 sc->phy.release(sc); 11430 sc->phy.release(sc);
11432 return rv; 11431 return rv;
11433} 11432}
11434 11433
11435/* 11434/*
11436 * wm_gmii_bm_readreg: [mii interface function] 11435 * wm_gmii_bm_readreg: [mii interface function]
11437 * 11436 *
11438 * Read a PHY register on the kumeran 11437 * Read a PHY register on the kumeran
11439 * This could be handled by the PHY layer if we didn't have to lock the 11438 * This could be handled by the PHY layer if we didn't have to lock the
11440 * resource ... 11439 * resource ...
11441 */ 11440 */
11442static int 11441static int
11443wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val) 11442wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11444{ 11443{
11445 struct wm_softc *sc = device_private(dev); 11444 struct wm_softc *sc = device_private(dev);
11446 uint16_t page = reg >> BME1000_PAGE_SHIFT; 11445 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11447 int rv; 11446 int rv;
11448 11447
11449 if (sc->phy.acquire(sc)) { 11448 if (sc->phy.acquire(sc)) {
11450 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11449 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11451 return -1; 11450 return -1;
11452 } 11451 }
11453 11452
11454 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583)) 11453 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11455 phy = ((page >= 768) || ((page == 0) && (reg == 25)) 11454 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11456 || (reg == 31)) ? 1 : phy; 11455 || (reg == 31)) ? 1 : phy;
11457 /* Page 800 works differently than the rest so it has its own func */ 11456 /* Page 800 works differently than the rest so it has its own func */
11458 if (page == BM_WUC_PAGE) { 11457 if (page == BM_WUC_PAGE) {
11459 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false); 11458 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11460 goto release; 11459 goto release;
11461 } 11460 }
11462 11461
11463 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 11462 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11464 if ((phy == 1) && (sc->sc_type != WM_T_82574) 11463 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11465 && (sc->sc_type != WM_T_82583)) 11464 && (sc->sc_type != WM_T_82583))
11466 rv = wm_gmii_mdic_writereg(dev, phy, 11465 rv = wm_gmii_mdic_writereg(dev, phy,
11467 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); 11466 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11468 else 11467 else
11469 rv = wm_gmii_mdic_writereg(dev, phy, 11468 rv = wm_gmii_mdic_writereg(dev, phy,
11470 BME1000_PHY_PAGE_SELECT, page); 11469 BME1000_PHY_PAGE_SELECT, page);
11471 if (rv != 0) 11470 if (rv != 0)
11472 goto release; 11471 goto release;
11473 } 11472 }
11474 11473
11475 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val); 11474 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11476 11475
11477release: 11476release:
11478 sc->phy.release(sc); 11477 sc->phy.release(sc);
11479 return rv; 11478 return rv;
11480} 11479}
11481 11480
11482/* 11481/*
11483 * wm_gmii_bm_writereg: [mii interface function] 11482 * wm_gmii_bm_writereg: [mii interface function]
11484 * 11483 *
11485 * Write a PHY register on the kumeran. 11484 * Write a PHY register on the kumeran.
11486 * This could be handled by the PHY layer if we didn't have to lock the 11485 * This could be handled by the PHY layer if we didn't have to lock the
11487 * resource ... 11486 * resource ...
11488 */ 11487 */
11489static int 11488static int
11490wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val) 11489wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11491{ 11490{
11492 struct wm_softc *sc = device_private(dev); 11491 struct wm_softc *sc = device_private(dev);
11493 uint16_t page = reg >> BME1000_PAGE_SHIFT; 11492 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11494 int rv; 11493 int rv;
11495 11494
11496 if (sc->phy.acquire(sc)) { 11495 if (sc->phy.acquire(sc)) {
11497 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11496 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11498 return -1; 11497 return -1;
11499 } 11498 }
11500 11499
11501 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583)) 11500 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11502 phy = ((page >= 768) || ((page == 0) && (reg == 25)) 11501 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11503 || (reg == 31)) ? 1 : phy; 11502 || (reg == 31)) ? 1 : phy;
11504 /* Page 800 works differently than the rest so it has its own func */ 11503 /* Page 800 works differently than the rest so it has its own func */
11505 if (page == BM_WUC_PAGE) { 11504 if (page == BM_WUC_PAGE) {
11506 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false); 11505 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11507 goto release; 11506 goto release;
11508 } 11507 }
11509 11508
11510 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 11509 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11511 if ((phy == 1) && (sc->sc_type != WM_T_82574) 11510 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11512 && (sc->sc_type != WM_T_82583)) 11511 && (sc->sc_type != WM_T_82583))
11513 rv = wm_gmii_mdic_writereg(dev, phy, 11512 rv = wm_gmii_mdic_writereg(dev, phy,
11514 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); 11513 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11515 else 11514 else
11516 rv = wm_gmii_mdic_writereg(dev, phy, 11515 rv = wm_gmii_mdic_writereg(dev, phy,
11517 BME1000_PHY_PAGE_SELECT, page); 11516 BME1000_PHY_PAGE_SELECT, page);
11518 if (rv != 0) 11517 if (rv != 0)
11519 goto release; 11518 goto release;
11520 } 11519 }
11521 11520
11522 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); 11521 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11523 11522
11524release: 11523release:
11525 sc->phy.release(sc); 11524 sc->phy.release(sc);
11526 return rv; 11525 return rv;
11527} 11526}
11528 11527
11529/* 11528/*
11530 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers 11529 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11531 * @dev: pointer to the HW structure 11530 * @dev: pointer to the HW structure
11532 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG 11531 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11533 * 11532 *
11534 * Assumes semaphore already acquired and phy_reg points to a valid memory 11533 * Assumes semaphore already acquired and phy_reg points to a valid memory
11535 * address to store contents of the BM_WUC_ENABLE_REG register. 11534 * address to store contents of the BM_WUC_ENABLE_REG register.
11536 */ 11535 */
11537static int 11536static int
11538wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp) 11537wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11539{ 11538{
11540#ifdef WM_DEBUG 11539#ifdef WM_DEBUG
11541 struct wm_softc *sc = device_private(dev); 11540 struct wm_softc *sc = device_private(dev);
11542#endif 11541#endif
11543 uint16_t temp; 11542 uint16_t temp;
11544 int rv; 11543 int rv;
11545 11544
11546 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 11545 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11547 device_xname(dev), __func__)); 11546 device_xname(dev), __func__));
11548 11547
11549 if (!phy_regp) 11548 if (!phy_regp)
11550 return -1; 11549 return -1;
11551 11550
11552 /* All page select, port ctrl and wakeup registers use phy address 1 */ 11551 /* All page select, port ctrl and wakeup registers use phy address 1 */
11553 11552
11554 /* Select Port Control Registers page */ 11553 /* Select Port Control Registers page */
11555 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 11554 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11556 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT); 11555 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11557 if (rv != 0) 11556 if (rv != 0)
11558 return rv; 11557 return rv;
11559 11558
11560 /* Read WUCE and save it */ 11559 /* Read WUCE and save it */
11561 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp); 11560 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
11562 if (rv != 0) 11561 if (rv != 0)
11563 return rv; 11562 return rv;
11564 11563
11565 /* Enable both PHY wakeup mode and Wakeup register page writes. 11564 /* Enable both PHY wakeup mode and Wakeup register page writes.
11566 * Prevent a power state change by disabling ME and Host PHY wakeup. 11565 * Prevent a power state change by disabling ME and Host PHY wakeup.
11567 */ 11566 */
11568 temp = *phy_regp; 11567 temp = *phy_regp;
11569 temp |= BM_WUC_ENABLE_BIT; 11568 temp |= BM_WUC_ENABLE_BIT;
11570 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); 11569 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
11571 11570
11572 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0) 11571 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
11573 return rv; 11572 return rv;
11574 11573