Wed Mar 8 08:00:09 2017 UTC ()
PR kern/52039: use same safeguard as for the 82578


(kardel)
diff -r1.496 -r1.497 src/sys/dev/pci/if_wm.c

cvs diff -r1.496 -r1.497 src/sys/dev/pci/if_wm.c (switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2017/03/03 16:48:55 1.496
+++ src/sys/dev/pci/if_wm.c 2017/03/08 08:00:09 1.497
@@ -1,1086 +1,1086 @@ @@ -1,1086 +1,1086 @@
1/* $NetBSD: if_wm.c,v 1.496 2017/03/03 16:48:55 knakahara Exp $ */ 1/* $NetBSD: if_wm.c,v 1.497 2017/03/08 08:00:09 kardel Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/******************************************************************************* 38/*******************************************************************************
39 39
40 Copyright (c) 2001-2005, Intel Corporation 40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved. 41 All rights reserved.
42  42
43 Redistribution and use in source and binary forms, with or without 43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met: 44 modification, are permitted provided that the following conditions are met:
45  45
46 1. Redistributions of source code must retain the above copyright notice, 46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer. 47 this list of conditions and the following disclaimer.
48  48
49 2. Redistributions in binary form must reproduce the above copyright 49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the 50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution. 51 documentation and/or other materials provided with the distribution.
52  52
53 3. Neither the name of the Intel Corporation nor the names of its 53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from 54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission. 55 this software without specific prior written permission.
56  56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350. 76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue improvement (refine queue selection logic) 77 * - TX Multi queue improvement (refine queue selection logic)
78 * - Split header buffer for newer descriptors 78 * - Split header buffer for newer descriptors
79 * - EEE (Energy Efficiency Ethernet) 79 * - EEE (Energy Efficiency Ethernet)
80 * - Virtual Function 80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM) 81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM. 82 * - Rework how parameters are loaded from the EEPROM.
83 * - Image Unique ID 83 * - Image Unique ID
84 */ 84 */
85 85
86#include <sys/cdefs.h> 86#include <sys/cdefs.h>
87__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.496 2017/03/03 16:48:55 knakahara Exp $"); 87__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.497 2017/03/08 08:00:09 kardel Exp $");
88 88
89#ifdef _KERNEL_OPT 89#ifdef _KERNEL_OPT
90#include "opt_net_mpsafe.h" 90#include "opt_net_mpsafe.h"
91#include "opt_if_wm.h" 91#include "opt_if_wm.h"
92#endif 92#endif
93 93
94#include <sys/param.h> 94#include <sys/param.h>
95#include <sys/systm.h> 95#include <sys/systm.h>
96#include <sys/callout.h> 96#include <sys/callout.h>
97#include <sys/mbuf.h> 97#include <sys/mbuf.h>
98#include <sys/malloc.h> 98#include <sys/malloc.h>
99#include <sys/kmem.h> 99#include <sys/kmem.h>
100#include <sys/kernel.h> 100#include <sys/kernel.h>
101#include <sys/socket.h> 101#include <sys/socket.h>
102#include <sys/ioctl.h> 102#include <sys/ioctl.h>
103#include <sys/errno.h> 103#include <sys/errno.h>
104#include <sys/device.h> 104#include <sys/device.h>
105#include <sys/queue.h> 105#include <sys/queue.h>
106#include <sys/syslog.h> 106#include <sys/syslog.h>
107#include <sys/interrupt.h> 107#include <sys/interrupt.h>
108#include <sys/cpu.h> 108#include <sys/cpu.h>
109#include <sys/pcq.h> 109#include <sys/pcq.h>
110 110
111#include <sys/rndsource.h> 111#include <sys/rndsource.h>
112 112
113#include <net/if.h> 113#include <net/if.h>
114#include <net/if_dl.h> 114#include <net/if_dl.h>
115#include <net/if_media.h> 115#include <net/if_media.h>
116#include <net/if_ether.h> 116#include <net/if_ether.h>
117 117
118#include <net/bpf.h> 118#include <net/bpf.h>
119 119
120#include <netinet/in.h> /* XXX for struct ip */ 120#include <netinet/in.h> /* XXX for struct ip */
121#include <netinet/in_systm.h> /* XXX for struct ip */ 121#include <netinet/in_systm.h> /* XXX for struct ip */
122#include <netinet/ip.h> /* XXX for struct ip */ 122#include <netinet/ip.h> /* XXX for struct ip */
123#include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 123#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
124#include <netinet/tcp.h> /* XXX for struct tcphdr */ 124#include <netinet/tcp.h> /* XXX for struct tcphdr */
125 125
126#include <sys/bus.h> 126#include <sys/bus.h>
127#include <sys/intr.h> 127#include <sys/intr.h>
128#include <machine/endian.h> 128#include <machine/endian.h>
129 129
130#include <dev/mii/mii.h> 130#include <dev/mii/mii.h>
131#include <dev/mii/miivar.h> 131#include <dev/mii/miivar.h>
132#include <dev/mii/miidevs.h> 132#include <dev/mii/miidevs.h>
133#include <dev/mii/mii_bitbang.h> 133#include <dev/mii/mii_bitbang.h>
134#include <dev/mii/ikphyreg.h> 134#include <dev/mii/ikphyreg.h>
135#include <dev/mii/igphyreg.h> 135#include <dev/mii/igphyreg.h>
136#include <dev/mii/igphyvar.h> 136#include <dev/mii/igphyvar.h>
137#include <dev/mii/inbmphyreg.h> 137#include <dev/mii/inbmphyreg.h>
138 138
139#include <dev/pci/pcireg.h> 139#include <dev/pci/pcireg.h>
140#include <dev/pci/pcivar.h> 140#include <dev/pci/pcivar.h>
141#include <dev/pci/pcidevs.h> 141#include <dev/pci/pcidevs.h>
142 142
143#include <dev/pci/if_wmreg.h> 143#include <dev/pci/if_wmreg.h>
144#include <dev/pci/if_wmvar.h> 144#include <dev/pci/if_wmvar.h>
145 145
146#ifdef WM_DEBUG 146#ifdef WM_DEBUG
147#define WM_DEBUG_LINK __BIT(0) 147#define WM_DEBUG_LINK __BIT(0)
148#define WM_DEBUG_TX __BIT(1) 148#define WM_DEBUG_TX __BIT(1)
149#define WM_DEBUG_RX __BIT(2) 149#define WM_DEBUG_RX __BIT(2)
150#define WM_DEBUG_GMII __BIT(3) 150#define WM_DEBUG_GMII __BIT(3)
151#define WM_DEBUG_MANAGE __BIT(4) 151#define WM_DEBUG_MANAGE __BIT(4)
152#define WM_DEBUG_NVM __BIT(5) 152#define WM_DEBUG_NVM __BIT(5)
153#define WM_DEBUG_INIT __BIT(6) 153#define WM_DEBUG_INIT __BIT(6)
154#define WM_DEBUG_LOCK __BIT(7) 154#define WM_DEBUG_LOCK __BIT(7)
155int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII 155int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
156 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK; 156 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
157 157
158#define DPRINTF(x, y) if (wm_debug & (x)) printf y 158#define DPRINTF(x, y) if (wm_debug & (x)) printf y
159#else 159#else
160#define DPRINTF(x, y) /* nothing */ 160#define DPRINTF(x, y) /* nothing */
161#endif /* WM_DEBUG */ 161#endif /* WM_DEBUG */
162 162
163#ifdef NET_MPSAFE 163#ifdef NET_MPSAFE
164#define WM_MPSAFE 1 164#define WM_MPSAFE 1
165#define CALLOUT_FLAGS CALLOUT_MPSAFE 165#define CALLOUT_FLAGS CALLOUT_MPSAFE
166#else 166#else
167#define CALLOUT_FLAGS 0 167#define CALLOUT_FLAGS 0
168#endif 168#endif
169 169
170/* 170/*
171 * This device driver's max interrupt numbers. 171 * This device driver's max interrupt numbers.
172 */ 172 */
173#define WM_MAX_NQUEUEINTR 16 173#define WM_MAX_NQUEUEINTR 16
174#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) 174#define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
175 175
176/* 176/*
177 * Transmit descriptor list size. Due to errata, we can only have 177 * Transmit descriptor list size. Due to errata, we can only have
178 * 256 hardware descriptors in the ring on < 82544, but we use 4096 178 * 256 hardware descriptors in the ring on < 82544, but we use 4096
179 * on >= 82544. We tell the upper layers that they can queue a lot 179 * on >= 82544. We tell the upper layers that they can queue a lot
180 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 180 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
181 * of them at a time. 181 * of them at a time.
182 * 182 *
183 * We allow up to 256 (!) DMA segments per packet. Pathological packet 183 * We allow up to 256 (!) DMA segments per packet. Pathological packet
184 * chains containing many small mbufs have been observed in zero-copy 184 * chains containing many small mbufs have been observed in zero-copy
185 * situations with jumbo frames. 185 * situations with jumbo frames.
186 */ 186 */
187#define WM_NTXSEGS 256 187#define WM_NTXSEGS 256
188#define WM_IFQUEUELEN 256 188#define WM_IFQUEUELEN 256
189#define WM_TXQUEUELEN_MAX 64 189#define WM_TXQUEUELEN_MAX 64
190#define WM_TXQUEUELEN_MAX_82547 16 190#define WM_TXQUEUELEN_MAX_82547 16
191#define WM_TXQUEUELEN(txq) ((txq)->txq_num) 191#define WM_TXQUEUELEN(txq) ((txq)->txq_num)
192#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) 192#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
193#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) 193#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
194#define WM_NTXDESC_82542 256 194#define WM_NTXDESC_82542 256
195#define WM_NTXDESC_82544 4096 195#define WM_NTXDESC_82544 4096
196#define WM_NTXDESC(txq) ((txq)->txq_ndesc) 196#define WM_NTXDESC(txq) ((txq)->txq_ndesc)
197#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) 197#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
198#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) 198#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
199#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) 199#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
200#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) 200#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
201 201
202#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ 202#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
203 203
204#define WM_TXINTERQSIZE 256 204#define WM_TXINTERQSIZE 256
205 205
206/* 206/*
207 * Receive descriptor list size. We have one Rx buffer for normal 207 * Receive descriptor list size. We have one Rx buffer for normal
208 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 208 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
209 * packet. We allocate 256 receive descriptors, each with a 2k 209 * packet. We allocate 256 receive descriptors, each with a 2k
210 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 210 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
211 */ 211 */
212#define WM_NRXDESC 256 212#define WM_NRXDESC 256
213#define WM_NRXDESC_MASK (WM_NRXDESC - 1) 213#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
214#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 214#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
215#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 215#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
216 216
217#ifndef WM_RX_PROCESS_LIMIT_DEFAULT 217#ifndef WM_RX_PROCESS_LIMIT_DEFAULT
218#define WM_RX_PROCESS_LIMIT_DEFAULT 100U 218#define WM_RX_PROCESS_LIMIT_DEFAULT 100U
219#endif 219#endif
220#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT 220#ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
221#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U 221#define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
222#endif 222#endif
223 223
224typedef union txdescs { 224typedef union txdescs {
225 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; 225 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
226 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; 226 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
227} txdescs_t; 227} txdescs_t;
228 228
229typedef union rxdescs { 229typedef union rxdescs {
230 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; 230 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
231 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ 231 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
232 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ 232 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
233} rxdescs_t; 233} rxdescs_t;
234 234
235#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) 235#define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
236#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x)) 236#define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
237 237
238/* 238/*
239 * Software state for transmit jobs. 239 * Software state for transmit jobs.
240 */ 240 */
241struct wm_txsoft { 241struct wm_txsoft {
242 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 242 struct mbuf *txs_mbuf; /* head of our mbuf chain */
243 bus_dmamap_t txs_dmamap; /* our DMA map */ 243 bus_dmamap_t txs_dmamap; /* our DMA map */
244 int txs_firstdesc; /* first descriptor in packet */ 244 int txs_firstdesc; /* first descriptor in packet */
245 int txs_lastdesc; /* last descriptor in packet */ 245 int txs_lastdesc; /* last descriptor in packet */
246 int txs_ndesc; /* # of descriptors used */ 246 int txs_ndesc; /* # of descriptors used */
247}; 247};
248 248
249/* 249/*
250 * Software state for receive buffers. Each descriptor gets a 250 * Software state for receive buffers. Each descriptor gets a
251 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 251 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
252 * more than one buffer, we chain them together. 252 * more than one buffer, we chain them together.
253 */ 253 */
254struct wm_rxsoft { 254struct wm_rxsoft {
255 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 255 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
256 bus_dmamap_t rxs_dmamap; /* our DMA map */ 256 bus_dmamap_t rxs_dmamap; /* our DMA map */
257}; 257};
258 258
259#define WM_LINKUP_TIMEOUT 50 259#define WM_LINKUP_TIMEOUT 50
260 260
261static uint16_t swfwphysem[] = { 261static uint16_t swfwphysem[] = {
262 SWFW_PHY0_SM, 262 SWFW_PHY0_SM,
263 SWFW_PHY1_SM, 263 SWFW_PHY1_SM,
264 SWFW_PHY2_SM, 264 SWFW_PHY2_SM,
265 SWFW_PHY3_SM 265 SWFW_PHY3_SM
266}; 266};
267 267
268static const uint32_t wm_82580_rxpbs_table[] = { 268static const uint32_t wm_82580_rxpbs_table[] = {
269 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 269 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
270}; 270};
271 271
272struct wm_softc; 272struct wm_softc;
273 273
274#ifdef WM_EVENT_COUNTERS 274#ifdef WM_EVENT_COUNTERS
275#define WM_Q_EVCNT_DEFINE(qname, evname) \ 275#define WM_Q_EVCNT_DEFINE(qname, evname) \
276 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ 276 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
277 struct evcnt qname##_ev_##evname; 277 struct evcnt qname##_ev_##evname;
278 278
279#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ 279#define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
280 do{ \ 280 do{ \
281 snprintf((q)->qname##_##evname##_evcnt_name, \ 281 snprintf((q)->qname##_##evname##_evcnt_name, \
282 sizeof((q)->qname##_##evname##_evcnt_name), \ 282 sizeof((q)->qname##_##evname##_evcnt_name), \
283 "%s%02d%s", #qname, (qnum), #evname); \ 283 "%s%02d%s", #qname, (qnum), #evname); \
284 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ 284 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
285 (evtype), NULL, (xname), \ 285 (evtype), NULL, (xname), \
286 (q)->qname##_##evname##_evcnt_name); \ 286 (q)->qname##_##evname##_evcnt_name); \
287 }while(0) 287 }while(0)
288 288
289#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 289#define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
290 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) 290 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
291 291
292#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 292#define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
293 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) 293 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
294 294
295#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \ 295#define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
296 evcnt_detach(&(q)->qname##_ev_##evname); 296 evcnt_detach(&(q)->qname##_ev_##evname);
297#endif /* WM_EVENT_COUNTERS */ 297#endif /* WM_EVENT_COUNTERS */
298 298
299struct wm_txqueue { 299struct wm_txqueue {
300 kmutex_t *txq_lock; /* lock for tx operations */ 300 kmutex_t *txq_lock; /* lock for tx operations */
301 301
302 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ 302 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
303 303
304 /* Software state for the transmit descriptors. */ 304 /* Software state for the transmit descriptors. */
305 int txq_num; /* must be a power of two */ 305 int txq_num; /* must be a power of two */
306 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; 306 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
307 307
308 /* TX control data structures. */ 308 /* TX control data structures. */
309 int txq_ndesc; /* must be a power of two */ 309 int txq_ndesc; /* must be a power of two */
310 size_t txq_descsize; /* a tx descriptor size */ 310 size_t txq_descsize; /* a tx descriptor size */
311 txdescs_t *txq_descs_u; 311 txdescs_t *txq_descs_u;
312 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ 312 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
313 bus_dma_segment_t txq_desc_seg; /* control data segment */ 313 bus_dma_segment_t txq_desc_seg; /* control data segment */
314 int txq_desc_rseg; /* real number of control segment */ 314 int txq_desc_rseg; /* real number of control segment */
315#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr 315#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
316#define txq_descs txq_descs_u->sctxu_txdescs 316#define txq_descs txq_descs_u->sctxu_txdescs
317#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs 317#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
318 318
319 bus_addr_t txq_tdt_reg; /* offset of TDT register */ 319 bus_addr_t txq_tdt_reg; /* offset of TDT register */
320 320
321 int txq_free; /* number of free Tx descriptors */ 321 int txq_free; /* number of free Tx descriptors */
322 int txq_next; /* next ready Tx descriptor */ 322 int txq_next; /* next ready Tx descriptor */
323 323
324 int txq_sfree; /* number of free Tx jobs */ 324 int txq_sfree; /* number of free Tx jobs */
325 int txq_snext; /* next free Tx job */ 325 int txq_snext; /* next free Tx job */
326 int txq_sdirty; /* dirty Tx jobs */ 326 int txq_sdirty; /* dirty Tx jobs */
327 327
328 /* These 4 variables are used only on the 82547. */ 328 /* These 4 variables are used only on the 82547. */
329 int txq_fifo_size; /* Tx FIFO size */ 329 int txq_fifo_size; /* Tx FIFO size */
330 int txq_fifo_head; /* current head of FIFO */ 330 int txq_fifo_head; /* current head of FIFO */
331 uint32_t txq_fifo_addr; /* internal address of start of FIFO */ 331 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
332 int txq_fifo_stall; /* Tx FIFO is stalled */ 332 int txq_fifo_stall; /* Tx FIFO is stalled */
333 333
334 /* 334 /*
335 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 335 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
336 * CPUs. This queue intermediate them without block. 336 * CPUs. This queue intermediate them without block.
337 */ 337 */
338 pcq_t *txq_interq; 338 pcq_t *txq_interq;
339 339
340 /* 340 /*
341 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 341 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
342 * to manage Tx H/W queue's busy flag. 342 * to manage Tx H/W queue's busy flag.
343 */ 343 */
344 int txq_flags; /* flags for H/W queue, see below */ 344 int txq_flags; /* flags for H/W queue, see below */
345#define WM_TXQ_NO_SPACE 0x1 345#define WM_TXQ_NO_SPACE 0x1
346 346
347 bool txq_stopping; 347 bool txq_stopping;
348 348
349 uint32_t txq_packets; /* for AIM */ 349 uint32_t txq_packets; /* for AIM */
350 uint32_t txq_bytes; /* for AIM */ 350 uint32_t txq_bytes; /* for AIM */
351#ifdef WM_EVENT_COUNTERS 351#ifdef WM_EVENT_COUNTERS
352 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */ 352 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */
353 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */ 353 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */
354 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */ 354 WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */
355 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ 355 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */
356 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ 356 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */
357 /* XXX not used? */ 357 /* XXX not used? */
358 358
359 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */ 359 WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */
360 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */ 360 WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */
361 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */ 361 WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */
362 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */ 362 WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */
363 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */ 363 WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */
364 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */ 364 WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */
365 365
366 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */ 366 WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */
367 367
368 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */ 368 WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */
369 369
370 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; 370 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
371 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 371 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
372#endif /* WM_EVENT_COUNTERS */ 372#endif /* WM_EVENT_COUNTERS */
373}; 373};
374 374
375struct wm_rxqueue { 375struct wm_rxqueue {
376 kmutex_t *rxq_lock; /* lock for rx operations */ 376 kmutex_t *rxq_lock; /* lock for rx operations */
377 377
378 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ 378 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
379 379
380 /* Software state for the receive descriptors. */ 380 /* Software state for the receive descriptors. */
381 struct wm_rxsoft rxq_soft[WM_NRXDESC]; 381 struct wm_rxsoft rxq_soft[WM_NRXDESC];
382 382
383 /* RX control data structures. */ 383 /* RX control data structures. */
384 int rxq_ndesc; /* must be a power of two */ 384 int rxq_ndesc; /* must be a power of two */
385 size_t rxq_descsize; /* a rx descriptor size */ 385 size_t rxq_descsize; /* a rx descriptor size */
386 rxdescs_t *rxq_descs_u; 386 rxdescs_t *rxq_descs_u;
387 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ 387 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
388 bus_dma_segment_t rxq_desc_seg; /* control data segment */ 388 bus_dma_segment_t rxq_desc_seg; /* control data segment */
389 int rxq_desc_rseg; /* real number of control segment */ 389 int rxq_desc_rseg; /* real number of control segment */
390#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr 390#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
391#define rxq_descs rxq_descs_u->sctxu_rxdescs 391#define rxq_descs rxq_descs_u->sctxu_rxdescs
392#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs 392#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
393#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs 393#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
394 394
395 bus_addr_t rxq_rdt_reg; /* offset of RDT register */ 395 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
396 396
397 int rxq_ptr; /* next ready Rx desc/queue ent */ 397 int rxq_ptr; /* next ready Rx desc/queue ent */
398 int rxq_discard; 398 int rxq_discard;
399 int rxq_len; 399 int rxq_len;
400 struct mbuf *rxq_head; 400 struct mbuf *rxq_head;
401 struct mbuf *rxq_tail; 401 struct mbuf *rxq_tail;
402 struct mbuf **rxq_tailp; 402 struct mbuf **rxq_tailp;
403 403
404 bool rxq_stopping; 404 bool rxq_stopping;
405 405
406 uint32_t rxq_packets; /* for AIM */ 406 uint32_t rxq_packets; /* for AIM */
407 uint32_t rxq_bytes; /* for AIM */ 407 uint32_t rxq_bytes; /* for AIM */
408#ifdef WM_EVENT_COUNTERS 408#ifdef WM_EVENT_COUNTERS
409 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */ 409 WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */
410 410
411 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */ 411 WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */
412 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */ 412 WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */
413#endif 413#endif
414}; 414};
415 415
416struct wm_queue { 416struct wm_queue {
417 int wmq_id; /* index of transmit and receive queues */ 417 int wmq_id; /* index of transmit and receive queues */
418 int wmq_intr_idx; /* index of MSI-X tables */ 418 int wmq_intr_idx; /* index of MSI-X tables */
419 419
420 uint32_t wmq_itr; /* interrupt interval per queue. */ 420 uint32_t wmq_itr; /* interrupt interval per queue. */
421 bool wmq_set_itr; 421 bool wmq_set_itr;
422 422
423 struct wm_txqueue wmq_txq; 423 struct wm_txqueue wmq_txq;
424 struct wm_rxqueue wmq_rxq; 424 struct wm_rxqueue wmq_rxq;
425 425
426 void *wmq_si; 426 void *wmq_si;
427}; 427};
428 428
429struct wm_phyop { 429struct wm_phyop {
430 int (*acquire)(struct wm_softc *); 430 int (*acquire)(struct wm_softc *);
431 void (*release)(struct wm_softc *); 431 void (*release)(struct wm_softc *);
432 int reset_delay_us; 432 int reset_delay_us;
433}; 433};
434 434
435/* 435/*
436 * Software state per device. 436 * Software state per device.
437 */ 437 */
438struct wm_softc { 438struct wm_softc {
439 device_t sc_dev; /* generic device information */ 439 device_t sc_dev; /* generic device information */
440 bus_space_tag_t sc_st; /* bus space tag */ 440 bus_space_tag_t sc_st; /* bus space tag */
441 bus_space_handle_t sc_sh; /* bus space handle */ 441 bus_space_handle_t sc_sh; /* bus space handle */
442 bus_size_t sc_ss; /* bus space size */ 442 bus_size_t sc_ss; /* bus space size */
443 bus_space_tag_t sc_iot; /* I/O space tag */ 443 bus_space_tag_t sc_iot; /* I/O space tag */
444 bus_space_handle_t sc_ioh; /* I/O space handle */ 444 bus_space_handle_t sc_ioh; /* I/O space handle */
445 bus_size_t sc_ios; /* I/O space size */ 445 bus_size_t sc_ios; /* I/O space size */
446 bus_space_tag_t sc_flasht; /* flash registers space tag */ 446 bus_space_tag_t sc_flasht; /* flash registers space tag */
447 bus_space_handle_t sc_flashh; /* flash registers space handle */ 447 bus_space_handle_t sc_flashh; /* flash registers space handle */
448 bus_size_t sc_flashs; /* flash registers space size */ 448 bus_size_t sc_flashs; /* flash registers space size */
449 off_t sc_flashreg_offset; /* 449 off_t sc_flashreg_offset; /*
450 * offset to flash registers from 450 * offset to flash registers from
451 * start of BAR 451 * start of BAR
452 */ 452 */
453 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 453 bus_dma_tag_t sc_dmat; /* bus DMA tag */
454 454
455 struct ethercom sc_ethercom; /* ethernet common data */ 455 struct ethercom sc_ethercom; /* ethernet common data */
456 struct mii_data sc_mii; /* MII/media information */ 456 struct mii_data sc_mii; /* MII/media information */
457 457
458 pci_chipset_tag_t sc_pc; 458 pci_chipset_tag_t sc_pc;
459 pcitag_t sc_pcitag; 459 pcitag_t sc_pcitag;
460 int sc_bus_speed; /* PCI/PCIX bus speed */ 460 int sc_bus_speed; /* PCI/PCIX bus speed */
461 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ 461 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
462 462
463 uint16_t sc_pcidevid; /* PCI device ID */ 463 uint16_t sc_pcidevid; /* PCI device ID */
464 wm_chip_type sc_type; /* MAC type */ 464 wm_chip_type sc_type; /* MAC type */
465 int sc_rev; /* MAC revision */ 465 int sc_rev; /* MAC revision */
466 wm_phy_type sc_phytype; /* PHY type */ 466 wm_phy_type sc_phytype; /* PHY type */
467 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ 467 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
468#define WM_MEDIATYPE_UNKNOWN 0x00 468#define WM_MEDIATYPE_UNKNOWN 0x00
469#define WM_MEDIATYPE_FIBER 0x01 469#define WM_MEDIATYPE_FIBER 0x01
470#define WM_MEDIATYPE_COPPER 0x02 470#define WM_MEDIATYPE_COPPER 0x02
471#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ 471#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
472 int sc_funcid; /* unit number of the chip (0 to 3) */ 472 int sc_funcid; /* unit number of the chip (0 to 3) */
473 int sc_flags; /* flags; see below */ 473 int sc_flags; /* flags; see below */
474 int sc_if_flags; /* last if_flags */ 474 int sc_if_flags; /* last if_flags */
475 int sc_flowflags; /* 802.3x flow control flags */ 475 int sc_flowflags; /* 802.3x flow control flags */
476 int sc_align_tweak; 476 int sc_align_tweak;
477 477
478 void *sc_ihs[WM_MAX_NINTR]; /* 478 void *sc_ihs[WM_MAX_NINTR]; /*
479 * interrupt cookie. 479 * interrupt cookie.
480 * legacy and msi use sc_ihs[0]. 480 * legacy and msi use sc_ihs[0].
481 */ 481 */
482 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */ 482 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
483 int sc_nintrs; /* number of interrupts */ 483 int sc_nintrs; /* number of interrupts */
484 484
485 int sc_link_intr_idx; /* index of MSI-X tables */ 485 int sc_link_intr_idx; /* index of MSI-X tables */
486 486
487 callout_t sc_tick_ch; /* tick callout */ 487 callout_t sc_tick_ch; /* tick callout */
488 bool sc_core_stopping; 488 bool sc_core_stopping;
489 489
490 int sc_nvm_ver_major; 490 int sc_nvm_ver_major;
491 int sc_nvm_ver_minor; 491 int sc_nvm_ver_minor;
492 int sc_nvm_ver_build; 492 int sc_nvm_ver_build;
493 int sc_nvm_addrbits; /* NVM address bits */ 493 int sc_nvm_addrbits; /* NVM address bits */
494 unsigned int sc_nvm_wordsize; /* NVM word size */ 494 unsigned int sc_nvm_wordsize; /* NVM word size */
495 int sc_ich8_flash_base; 495 int sc_ich8_flash_base;
496 int sc_ich8_flash_bank_size; 496 int sc_ich8_flash_bank_size;
497 int sc_nvm_k1_enabled; 497 int sc_nvm_k1_enabled;
498 498
499 int sc_nqueues; 499 int sc_nqueues;
500 struct wm_queue *sc_queue; 500 struct wm_queue *sc_queue;
501 u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */ 501 u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */
502 u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */ 502 u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */
503 503
504 int sc_affinity_offset; 504 int sc_affinity_offset;
505 505
506#ifdef WM_EVENT_COUNTERS 506#ifdef WM_EVENT_COUNTERS
507 /* Event counters. */ 507 /* Event counters. */
508 struct evcnt sc_ev_linkintr; /* Link interrupts */ 508 struct evcnt sc_ev_linkintr; /* Link interrupts */
509 509
510 /* WM_T_82542_2_1 only */ 510 /* WM_T_82542_2_1 only */
511 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 511 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
512 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 512 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
513 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 513 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
514 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 514 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
515 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 515 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
516#endif /* WM_EVENT_COUNTERS */ 516#endif /* WM_EVENT_COUNTERS */
517 517
518 /* This variable are used only on the 82547. */ 518 /* This variable are used only on the 82547. */
519 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 519 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
520 520
521 uint32_t sc_ctrl; /* prototype CTRL register */ 521 uint32_t sc_ctrl; /* prototype CTRL register */
522#if 0 522#if 0
523 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 523 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
524#endif 524#endif
525 uint32_t sc_icr; /* prototype interrupt bits */ 525 uint32_t sc_icr; /* prototype interrupt bits */
526 uint32_t sc_itr_init; /* prototype intr throttling reg */ 526 uint32_t sc_itr_init; /* prototype intr throttling reg */
527 uint32_t sc_tctl; /* prototype TCTL register */ 527 uint32_t sc_tctl; /* prototype TCTL register */
528 uint32_t sc_rctl; /* prototype RCTL register */ 528 uint32_t sc_rctl; /* prototype RCTL register */
529 uint32_t sc_txcw; /* prototype TXCW register */ 529 uint32_t sc_txcw; /* prototype TXCW register */
530 uint32_t sc_tipg; /* prototype TIPG register */ 530 uint32_t sc_tipg; /* prototype TIPG register */
531 uint32_t sc_fcrtl; /* prototype FCRTL register */ 531 uint32_t sc_fcrtl; /* prototype FCRTL register */
532 uint32_t sc_pba; /* prototype PBA register */ 532 uint32_t sc_pba; /* prototype PBA register */
533 533
534 int sc_tbi_linkup; /* TBI link status */ 534 int sc_tbi_linkup; /* TBI link status */
535 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ 535 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
536 int sc_tbi_serdes_ticks; /* tbi ticks */ 536 int sc_tbi_serdes_ticks; /* tbi ticks */
537 537
538 int sc_mchash_type; /* multicast filter offset */ 538 int sc_mchash_type; /* multicast filter offset */
539 539
540 krndsource_t rnd_source; /* random source */ 540 krndsource_t rnd_source; /* random source */
541 541
542 struct if_percpuq *sc_ipq; /* softint-based input queues */ 542 struct if_percpuq *sc_ipq; /* softint-based input queues */
543 543
544 kmutex_t *sc_core_lock; /* lock for softc operations */ 544 kmutex_t *sc_core_lock; /* lock for softc operations */
545 kmutex_t *sc_ich_phymtx; /* 545 kmutex_t *sc_ich_phymtx; /*
546 * 82574/82583/ICH/PCH specific PHY 546 * 82574/82583/ICH/PCH specific PHY
547 * mutex. For 82574/82583, the mutex 547 * mutex. For 82574/82583, the mutex
548 * is used for both PHY and NVM. 548 * is used for both PHY and NVM.
549 */ 549 */
550 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ 550 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
551 551
552 struct wm_phyop phy; 552 struct wm_phyop phy;
553}; 553};
554 554
555#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) 555#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
556#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) 556#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
557#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) 557#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
558 558
559#define WM_RXCHAIN_RESET(rxq) \ 559#define WM_RXCHAIN_RESET(rxq) \
560do { \ 560do { \
561 (rxq)->rxq_tailp = &(rxq)->rxq_head; \ 561 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
562 *(rxq)->rxq_tailp = NULL; \ 562 *(rxq)->rxq_tailp = NULL; \
563 (rxq)->rxq_len = 0; \ 563 (rxq)->rxq_len = 0; \
564} while (/*CONSTCOND*/0) 564} while (/*CONSTCOND*/0)
565 565
566#define WM_RXCHAIN_LINK(rxq, m) \ 566#define WM_RXCHAIN_LINK(rxq, m) \
567do { \ 567do { \
568 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ 568 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
569 (rxq)->rxq_tailp = &(m)->m_next; \ 569 (rxq)->rxq_tailp = &(m)->m_next; \
570} while (/*CONSTCOND*/0) 570} while (/*CONSTCOND*/0)
571 571
572#ifdef WM_EVENT_COUNTERS 572#ifdef WM_EVENT_COUNTERS
573#define WM_EVCNT_INCR(ev) (ev)->ev_count++ 573#define WM_EVCNT_INCR(ev) (ev)->ev_count++
574#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 574#define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
575 575
576#define WM_Q_EVCNT_INCR(qname, evname) \ 576#define WM_Q_EVCNT_INCR(qname, evname) \
577 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) 577 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
578#define WM_Q_EVCNT_ADD(qname, evname, val) \ 578#define WM_Q_EVCNT_ADD(qname, evname, val) \
579 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) 579 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
580#else /* !WM_EVENT_COUNTERS */ 580#else /* !WM_EVENT_COUNTERS */
581#define WM_EVCNT_INCR(ev) /* nothing */ 581#define WM_EVCNT_INCR(ev) /* nothing */
582#define WM_EVCNT_ADD(ev, val) /* nothing */ 582#define WM_EVCNT_ADD(ev, val) /* nothing */
583 583
584#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */ 584#define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
585#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */ 585#define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
586#endif /* !WM_EVENT_COUNTERS */ 586#endif /* !WM_EVENT_COUNTERS */
587 587
588#define CSR_READ(sc, reg) \ 588#define CSR_READ(sc, reg) \
589 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 589 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
590#define CSR_WRITE(sc, reg, val) \ 590#define CSR_WRITE(sc, reg, val) \
591 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 591 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
592#define CSR_WRITE_FLUSH(sc) \ 592#define CSR_WRITE_FLUSH(sc) \
593 (void) CSR_READ((sc), WMREG_STATUS) 593 (void) CSR_READ((sc), WMREG_STATUS)
594 594
595#define ICH8_FLASH_READ32(sc, reg) \ 595#define ICH8_FLASH_READ32(sc, reg) \
596 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ 596 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
597 (reg) + sc->sc_flashreg_offset) 597 (reg) + sc->sc_flashreg_offset)
598#define ICH8_FLASH_WRITE32(sc, reg, data) \ 598#define ICH8_FLASH_WRITE32(sc, reg, data) \
599 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ 599 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
600 (reg) + sc->sc_flashreg_offset, (data)) 600 (reg) + sc->sc_flashreg_offset, (data))
601 601
602#define ICH8_FLASH_READ16(sc, reg) \ 602#define ICH8_FLASH_READ16(sc, reg) \
603 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ 603 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
604 (reg) + sc->sc_flashreg_offset) 604 (reg) + sc->sc_flashreg_offset)
605#define ICH8_FLASH_WRITE16(sc, reg, data) \ 605#define ICH8_FLASH_WRITE16(sc, reg, data) \
606 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ 606 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
607 (reg) + sc->sc_flashreg_offset, (data)) 607 (reg) + sc->sc_flashreg_offset, (data))
608 608
609#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) 609#define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
610#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x))) 610#define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
611 611
612#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) 612#define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
613#define WM_CDTXADDR_HI(txq, x) \ 613#define WM_CDTXADDR_HI(txq, x) \
614 (sizeof(bus_addr_t) == 8 ? \ 614 (sizeof(bus_addr_t) == 8 ? \
615 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) 615 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
616 616
617#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) 617#define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
618#define WM_CDRXADDR_HI(rxq, x) \ 618#define WM_CDRXADDR_HI(rxq, x) \
619 (sizeof(bus_addr_t) == 8 ? \ 619 (sizeof(bus_addr_t) == 8 ? \
620 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) 620 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
621 621
622/* 622/*
623 * Register read/write functions. 623 * Register read/write functions.
624 * Other than CSR_{READ|WRITE}(). 624 * Other than CSR_{READ|WRITE}().
625 */ 625 */
626#if 0 626#if 0
627static inline uint32_t wm_io_read(struct wm_softc *, int); 627static inline uint32_t wm_io_read(struct wm_softc *, int);
628#endif 628#endif
629static inline void wm_io_write(struct wm_softc *, int, uint32_t); 629static inline void wm_io_write(struct wm_softc *, int, uint32_t);
630static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 630static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
631 uint32_t, uint32_t); 631 uint32_t, uint32_t);
632static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 632static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
633 633
634/* 634/*
635 * Descriptor sync/init functions. 635 * Descriptor sync/init functions.
636 */ 636 */
637static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); 637static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
638static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); 638static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
639static inline void wm_init_rxdesc(struct wm_rxqueue *, int); 639static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
640 640
641/* 641/*
642 * Device driver interface functions and commonly used functions. 642 * Device driver interface functions and commonly used functions.
643 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 643 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
644 */ 644 */
645static const struct wm_product *wm_lookup(const struct pci_attach_args *); 645static const struct wm_product *wm_lookup(const struct pci_attach_args *);
646static int wm_match(device_t, cfdata_t, void *); 646static int wm_match(device_t, cfdata_t, void *);
647static void wm_attach(device_t, device_t, void *); 647static void wm_attach(device_t, device_t, void *);
648static int wm_detach(device_t, int); 648static int wm_detach(device_t, int);
649static bool wm_suspend(device_t, const pmf_qual_t *); 649static bool wm_suspend(device_t, const pmf_qual_t *);
650static bool wm_resume(device_t, const pmf_qual_t *); 650static bool wm_resume(device_t, const pmf_qual_t *);
651static void wm_watchdog(struct ifnet *); 651static void wm_watchdog(struct ifnet *);
652static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *); 652static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
653static void wm_tick(void *); 653static void wm_tick(void *);
654static int wm_ifflags_cb(struct ethercom *); 654static int wm_ifflags_cb(struct ethercom *);
655static int wm_ioctl(struct ifnet *, u_long, void *); 655static int wm_ioctl(struct ifnet *, u_long, void *);
656/* MAC address related */ 656/* MAC address related */
657static uint16_t wm_check_alt_mac_addr(struct wm_softc *); 657static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
658static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 658static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
659static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 659static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
660static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 660static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
661static void wm_set_filter(struct wm_softc *); 661static void wm_set_filter(struct wm_softc *);
662/* Reset and init related */ 662/* Reset and init related */
663static void wm_set_vlan(struct wm_softc *); 663static void wm_set_vlan(struct wm_softc *);
664static void wm_set_pcie_completion_timeout(struct wm_softc *); 664static void wm_set_pcie_completion_timeout(struct wm_softc *);
665static void wm_get_auto_rd_done(struct wm_softc *); 665static void wm_get_auto_rd_done(struct wm_softc *);
666static void wm_lan_init_done(struct wm_softc *); 666static void wm_lan_init_done(struct wm_softc *);
667static void wm_get_cfg_done(struct wm_softc *); 667static void wm_get_cfg_done(struct wm_softc *);
668static void wm_initialize_hardware_bits(struct wm_softc *); 668static void wm_initialize_hardware_bits(struct wm_softc *);
669static uint32_t wm_rxpbs_adjust_82580(uint32_t); 669static uint32_t wm_rxpbs_adjust_82580(uint32_t);
670static void wm_reset_phy(struct wm_softc *); 670static void wm_reset_phy(struct wm_softc *);
671static void wm_flush_desc_rings(struct wm_softc *); 671static void wm_flush_desc_rings(struct wm_softc *);
672static void wm_reset(struct wm_softc *); 672static void wm_reset(struct wm_softc *);
673static int wm_add_rxbuf(struct wm_rxqueue *, int); 673static int wm_add_rxbuf(struct wm_rxqueue *, int);
674static void wm_rxdrain(struct wm_rxqueue *); 674static void wm_rxdrain(struct wm_rxqueue *);
675static void wm_rss_getkey(uint8_t *); 675static void wm_rss_getkey(uint8_t *);
676static void wm_init_rss(struct wm_softc *); 676static void wm_init_rss(struct wm_softc *);
677static void wm_adjust_qnum(struct wm_softc *, int); 677static void wm_adjust_qnum(struct wm_softc *, int);
678static int wm_setup_legacy(struct wm_softc *); 678static int wm_setup_legacy(struct wm_softc *);
679static int wm_setup_msix(struct wm_softc *); 679static int wm_setup_msix(struct wm_softc *);
680static int wm_init(struct ifnet *); 680static int wm_init(struct ifnet *);
681static int wm_init_locked(struct ifnet *); 681static int wm_init_locked(struct ifnet *);
682static void wm_turnon(struct wm_softc *); 682static void wm_turnon(struct wm_softc *);
683static void wm_turnoff(struct wm_softc *); 683static void wm_turnoff(struct wm_softc *);
684static void wm_stop(struct ifnet *, int); 684static void wm_stop(struct ifnet *, int);
685static void wm_stop_locked(struct ifnet *, int); 685static void wm_stop_locked(struct ifnet *, int);
686static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 686static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
687static void wm_82547_txfifo_stall(void *); 687static void wm_82547_txfifo_stall(void *);
688static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 688static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
689static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *); 689static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
690/* DMA related */ 690/* DMA related */
691static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 691static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
692static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 692static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
693static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 693static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
694static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, 694static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
695 struct wm_txqueue *); 695 struct wm_txqueue *);
696static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 696static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
697static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 697static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
698static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, 698static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
699 struct wm_rxqueue *); 699 struct wm_rxqueue *);
700static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 700static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
701static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 701static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
702static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 702static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
703static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 703static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
704static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 704static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
705static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 705static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
706static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, 706static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
707 struct wm_txqueue *); 707 struct wm_txqueue *);
708static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, 708static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
709 struct wm_rxqueue *); 709 struct wm_rxqueue *);
710static int wm_alloc_txrx_queues(struct wm_softc *); 710static int wm_alloc_txrx_queues(struct wm_softc *);
711static void wm_free_txrx_queues(struct wm_softc *); 711static void wm_free_txrx_queues(struct wm_softc *);
712static int wm_init_txrx_queues(struct wm_softc *); 712static int wm_init_txrx_queues(struct wm_softc *);
713/* Start */ 713/* Start */
714static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, 714static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
715 uint32_t *, uint8_t *); 715 uint32_t *, uint8_t *);
716static inline int wm_select_txqueue(struct ifnet *, struct mbuf *); 716static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
717static void wm_start(struct ifnet *); 717static void wm_start(struct ifnet *);
718static void wm_start_locked(struct ifnet *); 718static void wm_start_locked(struct ifnet *);
719static int wm_transmit(struct ifnet *, struct mbuf *); 719static int wm_transmit(struct ifnet *, struct mbuf *);
720static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); 720static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
721static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); 721static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
722static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, 722static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
723 struct wm_txsoft *, uint32_t *, uint32_t *, bool *); 723 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
724static void wm_nq_start(struct ifnet *); 724static void wm_nq_start(struct ifnet *);
725static void wm_nq_start_locked(struct ifnet *); 725static void wm_nq_start_locked(struct ifnet *);
726static int wm_nq_transmit(struct ifnet *, struct mbuf *); 726static int wm_nq_transmit(struct ifnet *, struct mbuf *);
727static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); 727static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
728static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); 728static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
729static void wm_deferred_start_locked(struct wm_txqueue *); 729static void wm_deferred_start_locked(struct wm_txqueue *);
730static void wm_handle_queue(void *); 730static void wm_handle_queue(void *);
731/* Interrupt */ 731/* Interrupt */
732static int wm_txeof(struct wm_softc *, struct wm_txqueue *); 732static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
733static void wm_rxeof(struct wm_rxqueue *, u_int); 733static void wm_rxeof(struct wm_rxqueue *, u_int);
734static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 734static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
735static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 735static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
736static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 736static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
737static void wm_linkintr(struct wm_softc *, uint32_t); 737static void wm_linkintr(struct wm_softc *, uint32_t);
738static int wm_intr_legacy(void *); 738static int wm_intr_legacy(void *);
739static inline void wm_txrxintr_disable(struct wm_queue *); 739static inline void wm_txrxintr_disable(struct wm_queue *);
740static inline void wm_txrxintr_enable(struct wm_queue *); 740static inline void wm_txrxintr_enable(struct wm_queue *);
741static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *); 741static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
742static int wm_txrxintr_msix(void *); 742static int wm_txrxintr_msix(void *);
743static int wm_linkintr_msix(void *); 743static int wm_linkintr_msix(void *);
744 744
745/* 745/*
746 * Media related. 746 * Media related.
747 * GMII, SGMII, TBI, SERDES and SFP. 747 * GMII, SGMII, TBI, SERDES and SFP.
748 */ 748 */
749/* Common */ 749/* Common */
750static void wm_tbi_serdes_set_linkled(struct wm_softc *); 750static void wm_tbi_serdes_set_linkled(struct wm_softc *);
751/* GMII related */ 751/* GMII related */
752static void wm_gmii_reset(struct wm_softc *); 752static void wm_gmii_reset(struct wm_softc *);
753static void wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t); 753static void wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
754static int wm_get_phy_id_82575(struct wm_softc *); 754static int wm_get_phy_id_82575(struct wm_softc *);
755static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 755static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
756static int wm_gmii_mediachange(struct ifnet *); 756static int wm_gmii_mediachange(struct ifnet *);
757static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 757static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
758static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); 758static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
759static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); 759static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
760static int wm_gmii_i82543_readreg(device_t, int, int); 760static int wm_gmii_i82543_readreg(device_t, int, int);
761static void wm_gmii_i82543_writereg(device_t, int, int, int); 761static void wm_gmii_i82543_writereg(device_t, int, int, int);
762static int wm_gmii_mdic_readreg(device_t, int, int); 762static int wm_gmii_mdic_readreg(device_t, int, int);
763static void wm_gmii_mdic_writereg(device_t, int, int, int); 763static void wm_gmii_mdic_writereg(device_t, int, int, int);
764static int wm_gmii_i82544_readreg(device_t, int, int); 764static int wm_gmii_i82544_readreg(device_t, int, int);
765static void wm_gmii_i82544_writereg(device_t, int, int, int); 765static void wm_gmii_i82544_writereg(device_t, int, int, int);
766static int wm_gmii_i80003_readreg(device_t, int, int); 766static int wm_gmii_i80003_readreg(device_t, int, int);
767static void wm_gmii_i80003_writereg(device_t, int, int, int); 767static void wm_gmii_i80003_writereg(device_t, int, int, int);
768static int wm_gmii_bm_readreg(device_t, int, int); 768static int wm_gmii_bm_readreg(device_t, int, int);
769static void wm_gmii_bm_writereg(device_t, int, int, int); 769static void wm_gmii_bm_writereg(device_t, int, int, int);
770static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); 770static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
771static int wm_gmii_hv_readreg(device_t, int, int); 771static int wm_gmii_hv_readreg(device_t, int, int);
772static int wm_gmii_hv_readreg_locked(device_t, int, int); 772static int wm_gmii_hv_readreg_locked(device_t, int, int);
773static void wm_gmii_hv_writereg(device_t, int, int, int); 773static void wm_gmii_hv_writereg(device_t, int, int, int);
774static void wm_gmii_hv_writereg_locked(device_t, int, int, int); 774static void wm_gmii_hv_writereg_locked(device_t, int, int, int);
775static int wm_gmii_82580_readreg(device_t, int, int); 775static int wm_gmii_82580_readreg(device_t, int, int);
776static void wm_gmii_82580_writereg(device_t, int, int, int); 776static void wm_gmii_82580_writereg(device_t, int, int, int);
777static int wm_gmii_gs40g_readreg(device_t, int, int); 777static int wm_gmii_gs40g_readreg(device_t, int, int);
778static void wm_gmii_gs40g_writereg(device_t, int, int, int); 778static void wm_gmii_gs40g_writereg(device_t, int, int, int);
779static void wm_gmii_statchg(struct ifnet *); 779static void wm_gmii_statchg(struct ifnet *);
780/* 780/*
781 * kumeran related (80003, ICH* and PCH*). 781 * kumeran related (80003, ICH* and PCH*).
782 * These functions are not for accessing MII registers but for accessing 782 * These functions are not for accessing MII registers but for accessing
783 * kumeran specific registers. 783 * kumeran specific registers.
784 */ 784 */
785static int wm_kmrn_readreg(struct wm_softc *, int); 785static int wm_kmrn_readreg(struct wm_softc *, int);
786static int wm_kmrn_readreg_locked(struct wm_softc *, int); 786static int wm_kmrn_readreg_locked(struct wm_softc *, int);
787static void wm_kmrn_writereg(struct wm_softc *, int, int); 787static void wm_kmrn_writereg(struct wm_softc *, int, int);
788static void wm_kmrn_writereg_locked(struct wm_softc *, int, int); 788static void wm_kmrn_writereg_locked(struct wm_softc *, int, int);
789/* SGMII */ 789/* SGMII */
790static bool wm_sgmii_uses_mdio(struct wm_softc *); 790static bool wm_sgmii_uses_mdio(struct wm_softc *);
791static int wm_sgmii_readreg(device_t, int, int); 791static int wm_sgmii_readreg(device_t, int, int);
792static void wm_sgmii_writereg(device_t, int, int, int); 792static void wm_sgmii_writereg(device_t, int, int, int);
793/* TBI related */ 793/* TBI related */
794static void wm_tbi_mediainit(struct wm_softc *); 794static void wm_tbi_mediainit(struct wm_softc *);
795static int wm_tbi_mediachange(struct ifnet *); 795static int wm_tbi_mediachange(struct ifnet *);
796static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 796static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
797static int wm_check_for_link(struct wm_softc *); 797static int wm_check_for_link(struct wm_softc *);
798static void wm_tbi_tick(struct wm_softc *); 798static void wm_tbi_tick(struct wm_softc *);
799/* SERDES related */ 799/* SERDES related */
800static void wm_serdes_power_up_link_82575(struct wm_softc *); 800static void wm_serdes_power_up_link_82575(struct wm_softc *);
801static int wm_serdes_mediachange(struct ifnet *); 801static int wm_serdes_mediachange(struct ifnet *);
802static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); 802static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
803static void wm_serdes_tick(struct wm_softc *); 803static void wm_serdes_tick(struct wm_softc *);
804/* SFP related */ 804/* SFP related */
805static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); 805static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
806static uint32_t wm_sfp_get_media_type(struct wm_softc *); 806static uint32_t wm_sfp_get_media_type(struct wm_softc *);
807 807
808/* 808/*
809 * NVM related. 809 * NVM related.
810 * Microwire, SPI (w/wo EERD) and Flash. 810 * Microwire, SPI (w/wo EERD) and Flash.
811 */ 811 */
812/* Misc functions */ 812/* Misc functions */
813static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); 813static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
814static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); 814static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
815static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); 815static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
816/* Microwire */ 816/* Microwire */
817static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); 817static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
818/* SPI */ 818/* SPI */
819static int wm_nvm_ready_spi(struct wm_softc *); 819static int wm_nvm_ready_spi(struct wm_softc *);
820static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); 820static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
821/* Using with EERD */ 821/* Using with EERD */
822static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 822static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
823static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); 823static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
824/* Flash */ 824/* Flash */
825static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, 825static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
826 unsigned int *); 826 unsigned int *);
827static int32_t wm_ich8_cycle_init(struct wm_softc *); 827static int32_t wm_ich8_cycle_init(struct wm_softc *);
828static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 828static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
829static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, 829static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
830 uint32_t *); 830 uint32_t *);
831static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 831static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
832static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 832static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
833static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); 833static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
834static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); 834static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
835static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); 835static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
836/* iNVM */ 836/* iNVM */
837static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); 837static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
838static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); 838static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
839/* Lock, detecting NVM type, validate checksum and read */ 839/* Lock, detecting NVM type, validate checksum and read */
840static int wm_nvm_acquire(struct wm_softc *); 840static int wm_nvm_acquire(struct wm_softc *);
841static void wm_nvm_release(struct wm_softc *); 841static void wm_nvm_release(struct wm_softc *);
842static int wm_nvm_is_onboard_eeprom(struct wm_softc *); 842static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
843static int wm_nvm_get_flash_presence_i210(struct wm_softc *); 843static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
844static int wm_nvm_validate_checksum(struct wm_softc *); 844static int wm_nvm_validate_checksum(struct wm_softc *);
845static void wm_nvm_version_invm(struct wm_softc *); 845static void wm_nvm_version_invm(struct wm_softc *);
846static void wm_nvm_version(struct wm_softc *); 846static void wm_nvm_version(struct wm_softc *);
847static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); 847static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
848 848
849/* 849/*
850 * Hardware semaphores. 850 * Hardware semaphores.
851 * Very complexed... 851 * Very complexed...
852 */ 852 */
853static int wm_get_null(struct wm_softc *); 853static int wm_get_null(struct wm_softc *);
854static void wm_put_null(struct wm_softc *); 854static void wm_put_null(struct wm_softc *);
855static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ 855static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
856static void wm_put_swsm_semaphore(struct wm_softc *); 856static void wm_put_swsm_semaphore(struct wm_softc *);
857static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 857static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
858static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 858static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
859static int wm_get_phy_82575(struct wm_softc *); 859static int wm_get_phy_82575(struct wm_softc *);
860static void wm_put_phy_82575(struct wm_softc *); 860static void wm_put_phy_82575(struct wm_softc *);
861static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ 861static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
862static void wm_put_swfwhw_semaphore(struct wm_softc *); 862static void wm_put_swfwhw_semaphore(struct wm_softc *);
863static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ 863static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
864static void wm_put_swflag_ich8lan(struct wm_softc *); 864static void wm_put_swflag_ich8lan(struct wm_softc *);
865static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */ 865static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */
866static void wm_put_nvm_ich8lan(struct wm_softc *); 866static void wm_put_nvm_ich8lan(struct wm_softc *);
867static int wm_get_hw_semaphore_82573(struct wm_softc *); 867static int wm_get_hw_semaphore_82573(struct wm_softc *);
868static void wm_put_hw_semaphore_82573(struct wm_softc *); 868static void wm_put_hw_semaphore_82573(struct wm_softc *);
869 869
870/* 870/*
871 * Management mode and power management related subroutines. 871 * Management mode and power management related subroutines.
872 * BMC, AMT, suspend/resume and EEE. 872 * BMC, AMT, suspend/resume and EEE.
873 */ 873 */
874#if 0 874#if 0
875static int wm_check_mng_mode(struct wm_softc *); 875static int wm_check_mng_mode(struct wm_softc *);
876static int wm_check_mng_mode_ich8lan(struct wm_softc *); 876static int wm_check_mng_mode_ich8lan(struct wm_softc *);
877static int wm_check_mng_mode_82574(struct wm_softc *); 877static int wm_check_mng_mode_82574(struct wm_softc *);
878static int wm_check_mng_mode_generic(struct wm_softc *); 878static int wm_check_mng_mode_generic(struct wm_softc *);
879#endif 879#endif
880static int wm_enable_mng_pass_thru(struct wm_softc *); 880static int wm_enable_mng_pass_thru(struct wm_softc *);
881static bool wm_phy_resetisblocked(struct wm_softc *); 881static bool wm_phy_resetisblocked(struct wm_softc *);
882static void wm_get_hw_control(struct wm_softc *); 882static void wm_get_hw_control(struct wm_softc *);
883static void wm_release_hw_control(struct wm_softc *); 883static void wm_release_hw_control(struct wm_softc *);
884static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); 884static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
885static void wm_smbustopci(struct wm_softc *); 885static void wm_smbustopci(struct wm_softc *);
886static void wm_init_manageability(struct wm_softc *); 886static void wm_init_manageability(struct wm_softc *);
887static void wm_release_manageability(struct wm_softc *); 887static void wm_release_manageability(struct wm_softc *);
888static void wm_get_wakeup(struct wm_softc *); 888static void wm_get_wakeup(struct wm_softc *);
889static void wm_ulp_disable(struct wm_softc *); 889static void wm_ulp_disable(struct wm_softc *);
890static void wm_enable_phy_wakeup(struct wm_softc *); 890static void wm_enable_phy_wakeup(struct wm_softc *);
891static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 891static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
892static void wm_enable_wakeup(struct wm_softc *); 892static void wm_enable_wakeup(struct wm_softc *);
893/* LPLU (Low Power Link Up) */ 893/* LPLU (Low Power Link Up) */
894static void wm_lplu_d0_disable(struct wm_softc *); 894static void wm_lplu_d0_disable(struct wm_softc *);
895static void wm_lplu_d0_disable_pch(struct wm_softc *); 895static void wm_lplu_d0_disable_pch(struct wm_softc *);
896/* EEE */ 896/* EEE */
897static void wm_set_eee_i350(struct wm_softc *); 897static void wm_set_eee_i350(struct wm_softc *);
898 898
899/* 899/*
900 * Workarounds (mainly PHY related). 900 * Workarounds (mainly PHY related).
901 * Basically, PHY's workarounds are in the PHY drivers. 901 * Basically, PHY's workarounds are in the PHY drivers.
902 */ 902 */
903static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 903static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
904static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 904static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
905static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); 905static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
906static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); 906static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
907static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 907static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
908static void wm_set_mdio_slow_mode_hv(struct wm_softc *); 908static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
909static void wm_configure_k1_ich8lan(struct wm_softc *, int); 909static void wm_configure_k1_ich8lan(struct wm_softc *, int);
910static void wm_reset_init_script_82575(struct wm_softc *); 910static void wm_reset_init_script_82575(struct wm_softc *);
911static void wm_reset_mdicnfg_82580(struct wm_softc *); 911static void wm_reset_mdicnfg_82580(struct wm_softc *);
912static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 912static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
913static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 913static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
914static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 914static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
915static void wm_pll_workaround_i210(struct wm_softc *); 915static void wm_pll_workaround_i210(struct wm_softc *);
916 916
917CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 917CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
918 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 918 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
919 919
920/* 920/*
921 * Devices supported by this driver. 921 * Devices supported by this driver.
922 */ 922 */
923static const struct wm_product { 923static const struct wm_product {
924 pci_vendor_id_t wmp_vendor; 924 pci_vendor_id_t wmp_vendor;
925 pci_product_id_t wmp_product; 925 pci_product_id_t wmp_product;
926 const char *wmp_name; 926 const char *wmp_name;
927 wm_chip_type wmp_type; 927 wm_chip_type wmp_type;
928 uint32_t wmp_flags; 928 uint32_t wmp_flags;
929#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN 929#define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
930#define WMP_F_FIBER WM_MEDIATYPE_FIBER 930#define WMP_F_FIBER WM_MEDIATYPE_FIBER
931#define WMP_F_COPPER WM_MEDIATYPE_COPPER 931#define WMP_F_COPPER WM_MEDIATYPE_COPPER
932#define WMP_F_SERDES WM_MEDIATYPE_SERDES 932#define WMP_F_SERDES WM_MEDIATYPE_SERDES
933#define WMP_MEDIATYPE(x) ((x) & 0x03) 933#define WMP_MEDIATYPE(x) ((x) & 0x03)
934} wm_products[] = { 934} wm_products[] = {
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
936 "Intel i82542 1000BASE-X Ethernet", 936 "Intel i82542 1000BASE-X Ethernet",
937 WM_T_82542_2_1, WMP_F_FIBER }, 937 WM_T_82542_2_1, WMP_F_FIBER },
938 938
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
940 "Intel i82543GC 1000BASE-X Ethernet", 940 "Intel i82543GC 1000BASE-X Ethernet",
941 WM_T_82543, WMP_F_FIBER }, 941 WM_T_82543, WMP_F_FIBER },
942 942
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
944 "Intel i82543GC 1000BASE-T Ethernet", 944 "Intel i82543GC 1000BASE-T Ethernet",
945 WM_T_82543, WMP_F_COPPER }, 945 WM_T_82543, WMP_F_COPPER },
946 946
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
948 "Intel i82544EI 1000BASE-T Ethernet", 948 "Intel i82544EI 1000BASE-T Ethernet",
949 WM_T_82544, WMP_F_COPPER }, 949 WM_T_82544, WMP_F_COPPER },
950 950
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
952 "Intel i82544EI 1000BASE-X Ethernet", 952 "Intel i82544EI 1000BASE-X Ethernet",
953 WM_T_82544, WMP_F_FIBER }, 953 WM_T_82544, WMP_F_FIBER },
954 954
955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
956 "Intel i82544GC 1000BASE-T Ethernet", 956 "Intel i82544GC 1000BASE-T Ethernet",
957 WM_T_82544, WMP_F_COPPER }, 957 WM_T_82544, WMP_F_COPPER },
958 958
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
960 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 960 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
961 WM_T_82544, WMP_F_COPPER }, 961 WM_T_82544, WMP_F_COPPER },
962 962
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
964 "Intel i82540EM 1000BASE-T Ethernet", 964 "Intel i82540EM 1000BASE-T Ethernet",
965 WM_T_82540, WMP_F_COPPER }, 965 WM_T_82540, WMP_F_COPPER },
966 966
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
968 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 968 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
969 WM_T_82540, WMP_F_COPPER }, 969 WM_T_82540, WMP_F_COPPER },
970 970
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
972 "Intel i82540EP 1000BASE-T Ethernet", 972 "Intel i82540EP 1000BASE-T Ethernet",
973 WM_T_82540, WMP_F_COPPER }, 973 WM_T_82540, WMP_F_COPPER },
974 974
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
976 "Intel i82540EP 1000BASE-T Ethernet", 976 "Intel i82540EP 1000BASE-T Ethernet",
977 WM_T_82540, WMP_F_COPPER }, 977 WM_T_82540, WMP_F_COPPER },
978 978
979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
980 "Intel i82540EP 1000BASE-T Ethernet", 980 "Intel i82540EP 1000BASE-T Ethernet",
981 WM_T_82540, WMP_F_COPPER }, 981 WM_T_82540, WMP_F_COPPER },
982 982
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
984 "Intel i82545EM 1000BASE-T Ethernet", 984 "Intel i82545EM 1000BASE-T Ethernet",
985 WM_T_82545, WMP_F_COPPER }, 985 WM_T_82545, WMP_F_COPPER },
986 986
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
988 "Intel i82545GM 1000BASE-T Ethernet", 988 "Intel i82545GM 1000BASE-T Ethernet",
989 WM_T_82545_3, WMP_F_COPPER }, 989 WM_T_82545_3, WMP_F_COPPER },
990 990
991 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 991 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
992 "Intel i82545GM 1000BASE-X Ethernet", 992 "Intel i82545GM 1000BASE-X Ethernet",
993 WM_T_82545_3, WMP_F_FIBER }, 993 WM_T_82545_3, WMP_F_FIBER },
994 994
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
996 "Intel i82545GM Gigabit Ethernet (SERDES)", 996 "Intel i82545GM Gigabit Ethernet (SERDES)",
997 WM_T_82545_3, WMP_F_SERDES }, 997 WM_T_82545_3, WMP_F_SERDES },
998 998
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1000 "Intel i82546EB 1000BASE-T Ethernet", 1000 "Intel i82546EB 1000BASE-T Ethernet",
1001 WM_T_82546, WMP_F_COPPER }, 1001 WM_T_82546, WMP_F_COPPER },
1002 1002
1003 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 1003 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1004 "Intel i82546EB 1000BASE-T Ethernet", 1004 "Intel i82546EB 1000BASE-T Ethernet",
1005 WM_T_82546, WMP_F_COPPER }, 1005 WM_T_82546, WMP_F_COPPER },
1006 1006
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1008 "Intel i82545EM 1000BASE-X Ethernet", 1008 "Intel i82545EM 1000BASE-X Ethernet",
1009 WM_T_82545, WMP_F_FIBER }, 1009 WM_T_82545, WMP_F_FIBER },
1010 1010
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1012 "Intel i82546EB 1000BASE-X Ethernet", 1012 "Intel i82546EB 1000BASE-X Ethernet",
1013 WM_T_82546, WMP_F_FIBER }, 1013 WM_T_82546, WMP_F_FIBER },
1014 1014
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1016 "Intel i82546GB 1000BASE-T Ethernet", 1016 "Intel i82546GB 1000BASE-T Ethernet",
1017 WM_T_82546_3, WMP_F_COPPER }, 1017 WM_T_82546_3, WMP_F_COPPER },
1018 1018
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1020 "Intel i82546GB 1000BASE-X Ethernet", 1020 "Intel i82546GB 1000BASE-X Ethernet",
1021 WM_T_82546_3, WMP_F_FIBER }, 1021 WM_T_82546_3, WMP_F_FIBER },
1022 1022
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1024 "Intel i82546GB Gigabit Ethernet (SERDES)", 1024 "Intel i82546GB Gigabit Ethernet (SERDES)",
1025 WM_T_82546_3, WMP_F_SERDES }, 1025 WM_T_82546_3, WMP_F_SERDES },
1026 1026
1027 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 1027 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1028 "i82546GB quad-port Gigabit Ethernet", 1028 "i82546GB quad-port Gigabit Ethernet",
1029 WM_T_82546_3, WMP_F_COPPER }, 1029 WM_T_82546_3, WMP_F_COPPER },
1030 1030
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1032 "i82546GB quad-port Gigabit Ethernet (KSP3)", 1032 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1033 WM_T_82546_3, WMP_F_COPPER }, 1033 WM_T_82546_3, WMP_F_COPPER },
1034 1034
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1036 "Intel PRO/1000MT (82546GB)", 1036 "Intel PRO/1000MT (82546GB)",
1037 WM_T_82546_3, WMP_F_COPPER }, 1037 WM_T_82546_3, WMP_F_COPPER },
1038 1038
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1040 "Intel i82541EI 1000BASE-T Ethernet", 1040 "Intel i82541EI 1000BASE-T Ethernet",
1041 WM_T_82541, WMP_F_COPPER }, 1041 WM_T_82541, WMP_F_COPPER },
1042 1042
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1044 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 1044 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1045 WM_T_82541, WMP_F_COPPER }, 1045 WM_T_82541, WMP_F_COPPER },
1046 1046
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1048 "Intel i82541EI Mobile 1000BASE-T Ethernet", 1048 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1049 WM_T_82541, WMP_F_COPPER }, 1049 WM_T_82541, WMP_F_COPPER },
1050 1050
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1052 "Intel i82541ER 1000BASE-T Ethernet", 1052 "Intel i82541ER 1000BASE-T Ethernet",
1053 WM_T_82541_2, WMP_F_COPPER }, 1053 WM_T_82541_2, WMP_F_COPPER },
1054 1054
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1056 "Intel i82541GI 1000BASE-T Ethernet", 1056 "Intel i82541GI 1000BASE-T Ethernet",
1057 WM_T_82541_2, WMP_F_COPPER }, 1057 WM_T_82541_2, WMP_F_COPPER },
1058 1058
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1060 "Intel i82541GI Mobile 1000BASE-T Ethernet", 1060 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1061 WM_T_82541_2, WMP_F_COPPER }, 1061 WM_T_82541_2, WMP_F_COPPER },
1062 1062
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1064 "Intel i82541PI 1000BASE-T Ethernet", 1064 "Intel i82541PI 1000BASE-T Ethernet",
1065 WM_T_82541_2, WMP_F_COPPER }, 1065 WM_T_82541_2, WMP_F_COPPER },
1066 1066
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1068 "Intel i82547EI 1000BASE-T Ethernet", 1068 "Intel i82547EI 1000BASE-T Ethernet",
1069 WM_T_82547, WMP_F_COPPER }, 1069 WM_T_82547, WMP_F_COPPER },
1070 1070
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1072 "Intel i82547EI Mobile 1000BASE-T Ethernet", 1072 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1073 WM_T_82547, WMP_F_COPPER }, 1073 WM_T_82547, WMP_F_COPPER },
1074 1074
1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1076 "Intel i82547GI 1000BASE-T Ethernet", 1076 "Intel i82547GI 1000BASE-T Ethernet",
1077 WM_T_82547_2, WMP_F_COPPER }, 1077 WM_T_82547_2, WMP_F_COPPER },
1078 1078
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1080 "Intel PRO/1000 PT (82571EB)", 1080 "Intel PRO/1000 PT (82571EB)",
1081 WM_T_82571, WMP_F_COPPER }, 1081 WM_T_82571, WMP_F_COPPER },
1082 1082
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1084 "Intel PRO/1000 PF (82571EB)", 1084 "Intel PRO/1000 PF (82571EB)",
1085 WM_T_82571, WMP_F_FIBER }, 1085 WM_T_82571, WMP_F_FIBER },
1086 1086
@@ -12182,1610 +12182,1610 @@ printver: @@ -12182,1610 +12182,1610 @@ printver:
12182 12182
12183 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0); 12183 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
12184 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0); 12184 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
12185} 12185}
12186 12186
12187/* 12187/*
12188 * wm_nvm_read: 12188 * wm_nvm_read:
12189 * 12189 *
12190 * Read data from the serial EEPROM. 12190 * Read data from the serial EEPROM.
12191 */ 12191 */
12192static int 12192static int
12193wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 12193wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12194{ 12194{
12195 int rv; 12195 int rv;
12196 12196
12197 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n", 12197 DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12198 device_xname(sc->sc_dev), __func__)); 12198 device_xname(sc->sc_dev), __func__));
12199 12199
12200 if (sc->sc_flags & WM_F_EEPROM_INVALID) 12200 if (sc->sc_flags & WM_F_EEPROM_INVALID)
12201 return 1; 12201 return 1;
12202 12202
12203 if (wm_nvm_acquire(sc)) 12203 if (wm_nvm_acquire(sc))
12204 return 1; 12204 return 1;
12205 12205
12206 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 12206 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12207 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 12207 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12208 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) 12208 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
12209 rv = wm_nvm_read_ich8(sc, word, wordcnt, data); 12209 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
12210 else if (sc->sc_type == WM_T_PCH_SPT) 12210 else if (sc->sc_type == WM_T_PCH_SPT)
12211 rv = wm_nvm_read_spt(sc, word, wordcnt, data); 12211 rv = wm_nvm_read_spt(sc, word, wordcnt, data);
12212 else if (sc->sc_flags & WM_F_EEPROM_INVM) 12212 else if (sc->sc_flags & WM_F_EEPROM_INVM)
12213 rv = wm_nvm_read_invm(sc, word, wordcnt, data); 12213 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
12214 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 12214 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
12215 rv = wm_nvm_read_eerd(sc, word, wordcnt, data); 12215 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
12216 else if (sc->sc_flags & WM_F_EEPROM_SPI) 12216 else if (sc->sc_flags & WM_F_EEPROM_SPI)
12217 rv = wm_nvm_read_spi(sc, word, wordcnt, data); 12217 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
12218 else 12218 else
12219 rv = wm_nvm_read_uwire(sc, word, wordcnt, data); 12219 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
12220 12220
12221 wm_nvm_release(sc); 12221 wm_nvm_release(sc);
12222 return rv; 12222 return rv;
12223} 12223}
12224 12224
12225/* 12225/*
12226 * Hardware semaphores. 12226 * Hardware semaphores.
12227 * Very complexed... 12227 * Very complexed...
12228 */ 12228 */
12229 12229
12230static int 12230static int
12231wm_get_null(struct wm_softc *sc) 12231wm_get_null(struct wm_softc *sc)
12232{ 12232{
12233 12233
12234 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12234 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12235 device_xname(sc->sc_dev), __func__)); 12235 device_xname(sc->sc_dev), __func__));
12236 return 0; 12236 return 0;
12237} 12237}
12238 12238
12239static void 12239static void
12240wm_put_null(struct wm_softc *sc) 12240wm_put_null(struct wm_softc *sc)
12241{ 12241{
12242 12242
12243 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12243 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12244 device_xname(sc->sc_dev), __func__)); 12244 device_xname(sc->sc_dev), __func__));
12245 return; 12245 return;
12246} 12246}
12247 12247
12248/* 12248/*
12249 * Get hardware semaphore. 12249 * Get hardware semaphore.
12250 * Same as e1000_get_hw_semaphore_generic() 12250 * Same as e1000_get_hw_semaphore_generic()
12251 */ 12251 */
12252static int 12252static int
12253wm_get_swsm_semaphore(struct wm_softc *sc) 12253wm_get_swsm_semaphore(struct wm_softc *sc)
12254{ 12254{
12255 int32_t timeout; 12255 int32_t timeout;
12256 uint32_t swsm; 12256 uint32_t swsm;
12257 12257
12258 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12258 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12259 device_xname(sc->sc_dev), __func__)); 12259 device_xname(sc->sc_dev), __func__));
12260 KASSERT(sc->sc_nvm_wordsize > 0); 12260 KASSERT(sc->sc_nvm_wordsize > 0);
12261 12261
12262 /* Get the SW semaphore. */ 12262 /* Get the SW semaphore. */
12263 timeout = sc->sc_nvm_wordsize + 1; 12263 timeout = sc->sc_nvm_wordsize + 1;
12264 while (timeout) { 12264 while (timeout) {
12265 swsm = CSR_READ(sc, WMREG_SWSM); 12265 swsm = CSR_READ(sc, WMREG_SWSM);
12266 12266
12267 if ((swsm & SWSM_SMBI) == 0) 12267 if ((swsm & SWSM_SMBI) == 0)
12268 break; 12268 break;
12269 12269
12270 delay(50); 12270 delay(50);
12271 timeout--; 12271 timeout--;
12272 } 12272 }
12273 12273
12274 if (timeout == 0) { 12274 if (timeout == 0) {
12275 aprint_error_dev(sc->sc_dev, 12275 aprint_error_dev(sc->sc_dev,
12276 "could not acquire SWSM SMBI\n"); 12276 "could not acquire SWSM SMBI\n");
12277 return 1; 12277 return 1;
12278 } 12278 }
12279 12279
12280 /* Get the FW semaphore. */ 12280 /* Get the FW semaphore. */
12281 timeout = sc->sc_nvm_wordsize + 1; 12281 timeout = sc->sc_nvm_wordsize + 1;
12282 while (timeout) { 12282 while (timeout) {
12283 swsm = CSR_READ(sc, WMREG_SWSM); 12283 swsm = CSR_READ(sc, WMREG_SWSM);
12284 swsm |= SWSM_SWESMBI; 12284 swsm |= SWSM_SWESMBI;
12285 CSR_WRITE(sc, WMREG_SWSM, swsm); 12285 CSR_WRITE(sc, WMREG_SWSM, swsm);
12286 /* If we managed to set the bit we got the semaphore. */ 12286 /* If we managed to set the bit we got the semaphore. */
12287 swsm = CSR_READ(sc, WMREG_SWSM); 12287 swsm = CSR_READ(sc, WMREG_SWSM);
12288 if (swsm & SWSM_SWESMBI) 12288 if (swsm & SWSM_SWESMBI)
12289 break; 12289 break;
12290 12290
12291 delay(50); 12291 delay(50);
12292 timeout--; 12292 timeout--;
12293 } 12293 }
12294 12294
12295 if (timeout == 0) { 12295 if (timeout == 0) {
12296 aprint_error_dev(sc->sc_dev, 12296 aprint_error_dev(sc->sc_dev,
12297 "could not acquire SWSM SWESMBI\n"); 12297 "could not acquire SWSM SWESMBI\n");
12298 /* Release semaphores */ 12298 /* Release semaphores */
12299 wm_put_swsm_semaphore(sc); 12299 wm_put_swsm_semaphore(sc);
12300 return 1; 12300 return 1;
12301 } 12301 }
12302 return 0; 12302 return 0;
12303} 12303}
12304 12304
12305/* 12305/*
12306 * Put hardware semaphore. 12306 * Put hardware semaphore.
12307 * Same as e1000_put_hw_semaphore_generic() 12307 * Same as e1000_put_hw_semaphore_generic()
12308 */ 12308 */
12309static void 12309static void
12310wm_put_swsm_semaphore(struct wm_softc *sc) 12310wm_put_swsm_semaphore(struct wm_softc *sc)
12311{ 12311{
12312 uint32_t swsm; 12312 uint32_t swsm;
12313 12313
12314 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12314 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12315 device_xname(sc->sc_dev), __func__)); 12315 device_xname(sc->sc_dev), __func__));
12316 12316
12317 swsm = CSR_READ(sc, WMREG_SWSM); 12317 swsm = CSR_READ(sc, WMREG_SWSM);
12318 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI); 12318 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
12319 CSR_WRITE(sc, WMREG_SWSM, swsm); 12319 CSR_WRITE(sc, WMREG_SWSM, swsm);
12320} 12320}
12321 12321
12322/* 12322/*
12323 * Get SW/FW semaphore. 12323 * Get SW/FW semaphore.
12324 * Same as e1000_acquire_swfw_sync_82575(). 12324 * Same as e1000_acquire_swfw_sync_82575().
12325 */ 12325 */
12326static int 12326static int
12327wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 12327wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
12328{ 12328{
12329 uint32_t swfw_sync; 12329 uint32_t swfw_sync;
12330 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 12330 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
12331 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 12331 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
12332 int timeout = 200; 12332 int timeout = 200;
12333 12333
12334 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12334 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12335 device_xname(sc->sc_dev), __func__)); 12335 device_xname(sc->sc_dev), __func__));
12336 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0); 12336 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
12337 12337
12338 for (timeout = 0; timeout < 200; timeout++) { 12338 for (timeout = 0; timeout < 200; timeout++) {
12339 if (sc->sc_flags & WM_F_LOCK_SWSM) { 12339 if (sc->sc_flags & WM_F_LOCK_SWSM) {
12340 if (wm_get_swsm_semaphore(sc)) { 12340 if (wm_get_swsm_semaphore(sc)) {
12341 aprint_error_dev(sc->sc_dev, 12341 aprint_error_dev(sc->sc_dev,
12342 "%s: failed to get semaphore\n", 12342 "%s: failed to get semaphore\n",
12343 __func__); 12343 __func__);
12344 return 1; 12344 return 1;
12345 } 12345 }
12346 } 12346 }
12347 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 12347 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
12348 if ((swfw_sync & (swmask | fwmask)) == 0) { 12348 if ((swfw_sync & (swmask | fwmask)) == 0) {
12349 swfw_sync |= swmask; 12349 swfw_sync |= swmask;
12350 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 12350 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
12351 if (sc->sc_flags & WM_F_LOCK_SWSM) 12351 if (sc->sc_flags & WM_F_LOCK_SWSM)
12352 wm_put_swsm_semaphore(sc); 12352 wm_put_swsm_semaphore(sc);
12353 return 0; 12353 return 0;
12354 } 12354 }
12355 if (sc->sc_flags & WM_F_LOCK_SWSM) 12355 if (sc->sc_flags & WM_F_LOCK_SWSM)
12356 wm_put_swsm_semaphore(sc); 12356 wm_put_swsm_semaphore(sc);
12357 delay(5000); 12357 delay(5000);
12358 } 12358 }
12359 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 12359 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
12360 device_xname(sc->sc_dev), mask, swfw_sync); 12360 device_xname(sc->sc_dev), mask, swfw_sync);
12361 return 1; 12361 return 1;
12362} 12362}
12363 12363
12364static void 12364static void
12365wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 12365wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
12366{ 12366{
12367 uint32_t swfw_sync; 12367 uint32_t swfw_sync;
12368 12368
12369 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12369 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12370 device_xname(sc->sc_dev), __func__)); 12370 device_xname(sc->sc_dev), __func__));
12371 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0); 12371 KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
12372 12372
12373 if (sc->sc_flags & WM_F_LOCK_SWSM) { 12373 if (sc->sc_flags & WM_F_LOCK_SWSM) {
12374 while (wm_get_swsm_semaphore(sc) != 0) 12374 while (wm_get_swsm_semaphore(sc) != 0)
12375 continue; 12375 continue;
12376 } 12376 }
12377 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 12377 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
12378 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 12378 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
12379 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 12379 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
12380 if (sc->sc_flags & WM_F_LOCK_SWSM) 12380 if (sc->sc_flags & WM_F_LOCK_SWSM)
12381 wm_put_swsm_semaphore(sc); 12381 wm_put_swsm_semaphore(sc);
12382} 12382}
12383 12383
12384static int 12384static int
12385wm_get_phy_82575(struct wm_softc *sc) 12385wm_get_phy_82575(struct wm_softc *sc)
12386{ 12386{
12387 12387
12388 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12388 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12389 device_xname(sc->sc_dev), __func__)); 12389 device_xname(sc->sc_dev), __func__));
12390 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 12390 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
12391} 12391}
12392 12392
12393static void 12393static void
12394wm_put_phy_82575(struct wm_softc *sc) 12394wm_put_phy_82575(struct wm_softc *sc)
12395{ 12395{
12396 12396
12397 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12397 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12398 device_xname(sc->sc_dev), __func__)); 12398 device_xname(sc->sc_dev), __func__));
12399 return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 12399 return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
12400} 12400}
12401 12401
12402static int 12402static int
12403wm_get_swfwhw_semaphore(struct wm_softc *sc) 12403wm_get_swfwhw_semaphore(struct wm_softc *sc)
12404{ 12404{
12405 uint32_t ext_ctrl; 12405 uint32_t ext_ctrl;
12406 int timeout = 200; 12406 int timeout = 200;
12407 12407
12408 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12408 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12409 device_xname(sc->sc_dev), __func__)); 12409 device_xname(sc->sc_dev), __func__));
12410 12410
12411 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ 12411 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
12412 for (timeout = 0; timeout < 200; timeout++) { 12412 for (timeout = 0; timeout < 200; timeout++) {
12413 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 12413 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12414 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP; 12414 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
12415 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 12415 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12416 12416
12417 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 12417 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12418 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) 12418 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
12419 return 0; 12419 return 0;
12420 delay(5000); 12420 delay(5000);
12421 } 12421 }
12422 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n", 12422 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
12423 device_xname(sc->sc_dev), ext_ctrl); 12423 device_xname(sc->sc_dev), ext_ctrl);
12424 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ 12424 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
12425 return 1; 12425 return 1;
12426} 12426}
12427 12427
12428static void 12428static void
12429wm_put_swfwhw_semaphore(struct wm_softc *sc) 12429wm_put_swfwhw_semaphore(struct wm_softc *sc)
12430{ 12430{
12431 uint32_t ext_ctrl; 12431 uint32_t ext_ctrl;
12432 12432
12433 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12433 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12434 device_xname(sc->sc_dev), __func__)); 12434 device_xname(sc->sc_dev), __func__));
12435 12435
12436 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 12436 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12437 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 12437 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12438 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 12438 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12439 12439
12440 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ 12440 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
12441} 12441}
12442 12442
12443static int 12443static int
12444wm_get_swflag_ich8lan(struct wm_softc *sc) 12444wm_get_swflag_ich8lan(struct wm_softc *sc)
12445{ 12445{
12446 uint32_t ext_ctrl; 12446 uint32_t ext_ctrl;
12447 int timeout; 12447 int timeout;
12448 12448
12449 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12449 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12450 device_xname(sc->sc_dev), __func__)); 12450 device_xname(sc->sc_dev), __func__));
12451 mutex_enter(sc->sc_ich_phymtx); 12451 mutex_enter(sc->sc_ich_phymtx);
12452 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) { 12452 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
12453 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 12453 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12454 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0) 12454 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
12455 break; 12455 break;
12456 delay(1000); 12456 delay(1000);
12457 } 12457 }
12458 if (timeout >= WM_PHY_CFG_TIMEOUT) { 12458 if (timeout >= WM_PHY_CFG_TIMEOUT) {
12459 printf("%s: SW has already locked the resource\n",  12459 printf("%s: SW has already locked the resource\n",
12460 device_xname(sc->sc_dev)); 12460 device_xname(sc->sc_dev));
12461 goto out; 12461 goto out;
12462 } 12462 }
12463 12463
12464 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP; 12464 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
12465 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 12465 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12466 for (timeout = 0; timeout < 1000; timeout++) { 12466 for (timeout = 0; timeout < 1000; timeout++) {
12467 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 12467 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12468 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) 12468 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
12469 break; 12469 break;
12470 delay(1000); 12470 delay(1000);
12471 } 12471 }
12472 if (timeout >= 1000) { 12472 if (timeout >= 1000) {
12473 printf("%s: failed to acquire semaphore\n", 12473 printf("%s: failed to acquire semaphore\n",
12474 device_xname(sc->sc_dev)); 12474 device_xname(sc->sc_dev));
12475 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 12475 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12476 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 12476 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12477 goto out; 12477 goto out;
12478 } 12478 }
12479 return 0; 12479 return 0;
12480 12480
12481out: 12481out:
12482 mutex_exit(sc->sc_ich_phymtx); 12482 mutex_exit(sc->sc_ich_phymtx);
12483 return 1; 12483 return 1;
12484} 12484}
12485 12485
12486static void 12486static void
12487wm_put_swflag_ich8lan(struct wm_softc *sc) 12487wm_put_swflag_ich8lan(struct wm_softc *sc)
12488{ 12488{
12489 uint32_t ext_ctrl; 12489 uint32_t ext_ctrl;
12490 12490
12491 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12491 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12492 device_xname(sc->sc_dev), __func__)); 12492 device_xname(sc->sc_dev), __func__));
12493 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 12493 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12494 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) { 12494 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
12495 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 12495 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12496 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 12496 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12497 } else { 12497 } else {
12498 printf("%s: Semaphore unexpectedly released\n", 12498 printf("%s: Semaphore unexpectedly released\n",
12499 device_xname(sc->sc_dev)); 12499 device_xname(sc->sc_dev));
12500 } 12500 }
12501 12501
12502 mutex_exit(sc->sc_ich_phymtx); 12502 mutex_exit(sc->sc_ich_phymtx);
12503} 12503}
12504 12504
12505static int 12505static int
12506wm_get_nvm_ich8lan(struct wm_softc *sc) 12506wm_get_nvm_ich8lan(struct wm_softc *sc)
12507{ 12507{
12508 12508
12509 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12509 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12510 device_xname(sc->sc_dev), __func__)); 12510 device_xname(sc->sc_dev), __func__));
12511 mutex_enter(sc->sc_ich_nvmmtx); 12511 mutex_enter(sc->sc_ich_nvmmtx);
12512 12512
12513 return 0; 12513 return 0;
12514} 12514}
12515 12515
12516static void 12516static void
12517wm_put_nvm_ich8lan(struct wm_softc *sc) 12517wm_put_nvm_ich8lan(struct wm_softc *sc)
12518{ 12518{
12519 12519
12520 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12520 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12521 device_xname(sc->sc_dev), __func__)); 12521 device_xname(sc->sc_dev), __func__));
12522 mutex_exit(sc->sc_ich_nvmmtx); 12522 mutex_exit(sc->sc_ich_nvmmtx);
12523} 12523}
12524 12524
12525static int 12525static int
12526wm_get_hw_semaphore_82573(struct wm_softc *sc) 12526wm_get_hw_semaphore_82573(struct wm_softc *sc)
12527{ 12527{
12528 int i = 0; 12528 int i = 0;
12529 uint32_t reg; 12529 uint32_t reg;
12530 12530
12531 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12531 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12532 device_xname(sc->sc_dev), __func__)); 12532 device_xname(sc->sc_dev), __func__));
12533 12533
12534 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 12534 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12535 do { 12535 do {
12536 CSR_WRITE(sc, WMREG_EXTCNFCTR, 12536 CSR_WRITE(sc, WMREG_EXTCNFCTR,
12537 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP); 12537 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
12538 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 12538 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12539 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0) 12539 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
12540 break; 12540 break;
12541 delay(2*1000); 12541 delay(2*1000);
12542 i++; 12542 i++;
12543 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT); 12543 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
12544 12544
12545 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) { 12545 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
12546 wm_put_hw_semaphore_82573(sc); 12546 wm_put_hw_semaphore_82573(sc);
12547 log(LOG_ERR, "%s: Driver can't access the PHY\n", 12547 log(LOG_ERR, "%s: Driver can't access the PHY\n",
12548 device_xname(sc->sc_dev)); 12548 device_xname(sc->sc_dev));
12549 return -1; 12549 return -1;
12550 } 12550 }
12551 12551
12552 return 0; 12552 return 0;
12553} 12553}
12554 12554
12555static void 12555static void
12556wm_put_hw_semaphore_82573(struct wm_softc *sc) 12556wm_put_hw_semaphore_82573(struct wm_softc *sc)
12557{ 12557{
12558 uint32_t reg; 12558 uint32_t reg;
12559 12559
12560 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12560 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12561 device_xname(sc->sc_dev), __func__)); 12561 device_xname(sc->sc_dev), __func__));
12562 12562
12563 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 12563 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12564 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 12564 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12565 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 12565 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
12566} 12566}
12567 12567
12568/* 12568/*
12569 * Management mode and power management related subroutines. 12569 * Management mode and power management related subroutines.
12570 * BMC, AMT, suspend/resume and EEE. 12570 * BMC, AMT, suspend/resume and EEE.
12571 */ 12571 */
12572 12572
12573#ifdef WM_WOL 12573#ifdef WM_WOL
12574static int 12574static int
12575wm_check_mng_mode(struct wm_softc *sc) 12575wm_check_mng_mode(struct wm_softc *sc)
12576{ 12576{
12577 int rv; 12577 int rv;
12578 12578
12579 switch (sc->sc_type) { 12579 switch (sc->sc_type) {
12580 case WM_T_ICH8: 12580 case WM_T_ICH8:
12581 case WM_T_ICH9: 12581 case WM_T_ICH9:
12582 case WM_T_ICH10: 12582 case WM_T_ICH10:
12583 case WM_T_PCH: 12583 case WM_T_PCH:
12584 case WM_T_PCH2: 12584 case WM_T_PCH2:
12585 case WM_T_PCH_LPT: 12585 case WM_T_PCH_LPT:
12586 case WM_T_PCH_SPT: 12586 case WM_T_PCH_SPT:
12587 rv = wm_check_mng_mode_ich8lan(sc); 12587 rv = wm_check_mng_mode_ich8lan(sc);
12588 break; 12588 break;
12589 case WM_T_82574: 12589 case WM_T_82574:
12590 case WM_T_82583: 12590 case WM_T_82583:
12591 rv = wm_check_mng_mode_82574(sc); 12591 rv = wm_check_mng_mode_82574(sc);
12592 break; 12592 break;
12593 case WM_T_82571: 12593 case WM_T_82571:
12594 case WM_T_82572: 12594 case WM_T_82572:
12595 case WM_T_82573: 12595 case WM_T_82573:
12596 case WM_T_80003: 12596 case WM_T_80003:
12597 rv = wm_check_mng_mode_generic(sc); 12597 rv = wm_check_mng_mode_generic(sc);
12598 break; 12598 break;
12599 default: 12599 default:
12600 /* noting to do */ 12600 /* noting to do */
12601 rv = 0; 12601 rv = 0;
12602 break; 12602 break;
12603 } 12603 }
12604 12604
12605 return rv; 12605 return rv;
12606} 12606}
12607 12607
12608static int 12608static int
12609wm_check_mng_mode_ich8lan(struct wm_softc *sc) 12609wm_check_mng_mode_ich8lan(struct wm_softc *sc)
12610{ 12610{
12611 uint32_t fwsm; 12611 uint32_t fwsm;
12612 12612
12613 fwsm = CSR_READ(sc, WMREG_FWSM); 12613 fwsm = CSR_READ(sc, WMREG_FWSM);
12614 12614
12615 if (((fwsm & FWSM_FW_VALID) != 0) 12615 if (((fwsm & FWSM_FW_VALID) != 0)
12616 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE)) 12616 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
12617 return 1; 12617 return 1;
12618 12618
12619 return 0; 12619 return 0;
12620} 12620}
12621 12621
12622static int 12622static int
12623wm_check_mng_mode_82574(struct wm_softc *sc) 12623wm_check_mng_mode_82574(struct wm_softc *sc)
12624{ 12624{
12625 uint16_t data; 12625 uint16_t data;
12626 12626
12627 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data); 12627 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
12628 12628
12629 if ((data & NVM_CFG2_MNGM_MASK) != 0) 12629 if ((data & NVM_CFG2_MNGM_MASK) != 0)
12630 return 1; 12630 return 1;
12631 12631
12632 return 0; 12632 return 0;
12633} 12633}
12634 12634
12635static int 12635static int
12636wm_check_mng_mode_generic(struct wm_softc *sc) 12636wm_check_mng_mode_generic(struct wm_softc *sc)
12637{ 12637{
12638 uint32_t fwsm; 12638 uint32_t fwsm;
12639 12639
12640 fwsm = CSR_READ(sc, WMREG_FWSM); 12640 fwsm = CSR_READ(sc, WMREG_FWSM);
12641 12641
12642 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE) 12642 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
12643 return 1; 12643 return 1;
12644 12644
12645 return 0; 12645 return 0;
12646} 12646}
12647#endif /* WM_WOL */ 12647#endif /* WM_WOL */
12648 12648
12649static int 12649static int
12650wm_enable_mng_pass_thru(struct wm_softc *sc) 12650wm_enable_mng_pass_thru(struct wm_softc *sc)
12651{ 12651{
12652 uint32_t manc, fwsm, factps; 12652 uint32_t manc, fwsm, factps;
12653 12653
12654 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0) 12654 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
12655 return 0; 12655 return 0;
12656 12656
12657 manc = CSR_READ(sc, WMREG_MANC); 12657 manc = CSR_READ(sc, WMREG_MANC);
12658 12658
12659 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n", 12659 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
12660 device_xname(sc->sc_dev), manc)); 12660 device_xname(sc->sc_dev), manc));
12661 if ((manc & MANC_RECV_TCO_EN) == 0) 12661 if ((manc & MANC_RECV_TCO_EN) == 0)
12662 return 0; 12662 return 0;
12663 12663
12664 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) { 12664 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
12665 fwsm = CSR_READ(sc, WMREG_FWSM); 12665 fwsm = CSR_READ(sc, WMREG_FWSM);
12666 factps = CSR_READ(sc, WMREG_FACTPS); 12666 factps = CSR_READ(sc, WMREG_FACTPS);
12667 if (((factps & FACTPS_MNGCG) == 0) 12667 if (((factps & FACTPS_MNGCG) == 0)
12668 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE)) 12668 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
12669 return 1; 12669 return 1;
12670 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ 12670 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
12671 uint16_t data; 12671 uint16_t data;
12672 12672
12673 factps = CSR_READ(sc, WMREG_FACTPS); 12673 factps = CSR_READ(sc, WMREG_FACTPS);
12674 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data); 12674 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
12675 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n", 12675 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
12676 device_xname(sc->sc_dev), factps, data)); 12676 device_xname(sc->sc_dev), factps, data));
12677 if (((factps & FACTPS_MNGCG) == 0) 12677 if (((factps & FACTPS_MNGCG) == 0)
12678 && ((data & NVM_CFG2_MNGM_MASK) 12678 && ((data & NVM_CFG2_MNGM_MASK)
12679 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT))) 12679 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
12680 return 1; 12680 return 1;
12681 } else if (((manc & MANC_SMBUS_EN) != 0) 12681 } else if (((manc & MANC_SMBUS_EN) != 0)
12682 && ((manc & MANC_ASF_EN) == 0)) 12682 && ((manc & MANC_ASF_EN) == 0))
12683 return 1; 12683 return 1;
12684 12684
12685 return 0; 12685 return 0;
12686} 12686}
12687 12687
12688static bool 12688static bool
12689wm_phy_resetisblocked(struct wm_softc *sc) 12689wm_phy_resetisblocked(struct wm_softc *sc)
12690{ 12690{
12691 bool blocked = false; 12691 bool blocked = false;
12692 uint32_t reg; 12692 uint32_t reg;
12693 int i = 0; 12693 int i = 0;
12694 12694
12695 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 12695 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12696 device_xname(sc->sc_dev), __func__)); 12696 device_xname(sc->sc_dev), __func__));
12697 12697
12698 switch (sc->sc_type) { 12698 switch (sc->sc_type) {
12699 case WM_T_ICH8: 12699 case WM_T_ICH8:
12700 case WM_T_ICH9: 12700 case WM_T_ICH9:
12701 case WM_T_ICH10: 12701 case WM_T_ICH10:
12702 case WM_T_PCH: 12702 case WM_T_PCH:
12703 case WM_T_PCH2: 12703 case WM_T_PCH2:
12704 case WM_T_PCH_LPT: 12704 case WM_T_PCH_LPT:
12705 case WM_T_PCH_SPT: 12705 case WM_T_PCH_SPT:
12706 do { 12706 do {
12707 reg = CSR_READ(sc, WMREG_FWSM); 12707 reg = CSR_READ(sc, WMREG_FWSM);
12708 if ((reg & FWSM_RSPCIPHY) == 0) { 12708 if ((reg & FWSM_RSPCIPHY) == 0) {
12709 blocked = true; 12709 blocked = true;
12710 delay(10*1000); 12710 delay(10*1000);
12711 continue; 12711 continue;
12712 } 12712 }
12713 blocked = false; 12713 blocked = false;
12714 } while (blocked && (i++ < 30)); 12714 } while (blocked && (i++ < 30));
12715 return blocked; 12715 return blocked;
12716 break; 12716 break;
12717 case WM_T_82571: 12717 case WM_T_82571:
12718 case WM_T_82572: 12718 case WM_T_82572:
12719 case WM_T_82573: 12719 case WM_T_82573:
12720 case WM_T_82574: 12720 case WM_T_82574:
12721 case WM_T_82583: 12721 case WM_T_82583:
12722 case WM_T_80003: 12722 case WM_T_80003:
12723 reg = CSR_READ(sc, WMREG_MANC); 12723 reg = CSR_READ(sc, WMREG_MANC);
12724 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0) 12724 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
12725 return true; 12725 return true;
12726 else 12726 else
12727 return false; 12727 return false;
12728 break; 12728 break;
12729 default: 12729 default:
12730 /* no problem */ 12730 /* no problem */
12731 break; 12731 break;
12732 } 12732 }
12733 12733
12734 return false; 12734 return false;
12735} 12735}
12736 12736
12737static void 12737static void
12738wm_get_hw_control(struct wm_softc *sc) 12738wm_get_hw_control(struct wm_softc *sc)
12739{ 12739{
12740 uint32_t reg; 12740 uint32_t reg;
12741 12741
12742 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12742 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12743 device_xname(sc->sc_dev), __func__)); 12743 device_xname(sc->sc_dev), __func__));
12744 12744
12745 if (sc->sc_type == WM_T_82573) { 12745 if (sc->sc_type == WM_T_82573) {
12746 reg = CSR_READ(sc, WMREG_SWSM); 12746 reg = CSR_READ(sc, WMREG_SWSM);
12747 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD); 12747 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
12748 } else if (sc->sc_type >= WM_T_82571) { 12748 } else if (sc->sc_type >= WM_T_82571) {
12749 reg = CSR_READ(sc, WMREG_CTRL_EXT); 12749 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12750 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD); 12750 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
12751 } 12751 }
12752} 12752}
12753 12753
12754static void 12754static void
12755wm_release_hw_control(struct wm_softc *sc) 12755wm_release_hw_control(struct wm_softc *sc)
12756{ 12756{
12757 uint32_t reg; 12757 uint32_t reg;
12758 12758
12759 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", 12759 DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12760 device_xname(sc->sc_dev), __func__)); 12760 device_xname(sc->sc_dev), __func__));
12761 12761
12762 if (sc->sc_type == WM_T_82573) { 12762 if (sc->sc_type == WM_T_82573) {
12763 reg = CSR_READ(sc, WMREG_SWSM); 12763 reg = CSR_READ(sc, WMREG_SWSM);
12764 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD); 12764 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
12765 } else if (sc->sc_type >= WM_T_82571) { 12765 } else if (sc->sc_type >= WM_T_82571) {
12766 reg = CSR_READ(sc, WMREG_CTRL_EXT); 12766 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12767 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD); 12767 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
12768 } 12768 }
12769} 12769}
12770 12770
12771static void 12771static void
12772wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate) 12772wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
12773{ 12773{
12774 uint32_t reg; 12774 uint32_t reg;
12775 12775
12776 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 12776 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12777 device_xname(sc->sc_dev), __func__)); 12777 device_xname(sc->sc_dev), __func__));
12778 12778
12779 if (sc->sc_type < WM_T_PCH2) 12779 if (sc->sc_type < WM_T_PCH2)
12780 return; 12780 return;
12781 12781
12782 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 12782 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12783 12783
12784 if (gate) 12784 if (gate)
12785 reg |= EXTCNFCTR_GATE_PHY_CFG; 12785 reg |= EXTCNFCTR_GATE_PHY_CFG;
12786 else 12786 else
12787 reg &= ~EXTCNFCTR_GATE_PHY_CFG; 12787 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
12788 12788
12789 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 12789 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
12790} 12790}
12791 12791
12792static void 12792static void
12793wm_smbustopci(struct wm_softc *sc) 12793wm_smbustopci(struct wm_softc *sc)
12794{ 12794{
12795 uint32_t fwsm, reg; 12795 uint32_t fwsm, reg;
12796 int rv = 0; 12796 int rv = 0;
12797 12797
12798 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 12798 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12799 device_xname(sc->sc_dev), __func__)); 12799 device_xname(sc->sc_dev), __func__));
12800 12800
12801 /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 12801 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
12802 wm_gate_hw_phy_config_ich8lan(sc, true); 12802 wm_gate_hw_phy_config_ich8lan(sc, true);
12803 12803
12804 /* Disable ULP */ 12804 /* Disable ULP */
12805 wm_ulp_disable(sc); 12805 wm_ulp_disable(sc);
12806 12806
12807 /* Acquire PHY semaphore */ 12807 /* Acquire PHY semaphore */
12808 sc->phy.acquire(sc); 12808 sc->phy.acquire(sc);
12809 12809
12810 fwsm = CSR_READ(sc, WMREG_FWSM); 12810 fwsm = CSR_READ(sc, WMREG_FWSM);
12811 switch (sc->sc_type) { 12811 switch (sc->sc_type) {
12812 case WM_T_PCH_LPT: 12812 case WM_T_PCH_LPT:
12813 case WM_T_PCH_SPT: 12813 case WM_T_PCH_SPT:
12814 if (wm_phy_is_accessible_pchlan(sc)) 12814 if (wm_phy_is_accessible_pchlan(sc))
12815 break; 12815 break;
12816 12816
12817 reg = CSR_READ(sc, WMREG_CTRL_EXT); 12817 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12818 reg |= CTRL_EXT_FORCE_SMBUS; 12818 reg |= CTRL_EXT_FORCE_SMBUS;
12819 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 12819 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12820#if 0 12820#if 0
12821 /* XXX Isn't this required??? */ 12821 /* XXX Isn't this required??? */
12822 CSR_WRITE_FLUSH(sc); 12822 CSR_WRITE_FLUSH(sc);
12823#endif 12823#endif
12824 delay(50 * 1000); 12824 delay(50 * 1000);
12825 /* FALLTHROUGH */ 12825 /* FALLTHROUGH */
12826 case WM_T_PCH2: 12826 case WM_T_PCH2:
12827 if (wm_phy_is_accessible_pchlan(sc) == true) 12827 if (wm_phy_is_accessible_pchlan(sc) == true)
12828 break; 12828 break;
12829 /* FALLTHROUGH */ 12829 /* FALLTHROUGH */
12830 case WM_T_PCH: 12830 case WM_T_PCH:
12831 if (sc->sc_type == WM_T_PCH) 12831 if (sc->sc_type == WM_T_PCH)
12832 if ((fwsm & FWSM_FW_VALID) != 0) 12832 if ((fwsm & FWSM_FW_VALID) != 0)
12833 break; 12833 break;
12834 12834
12835 if (wm_phy_resetisblocked(sc) == true) { 12835 if (wm_phy_resetisblocked(sc) == true) {
12836 printf("XXX reset is blocked(3)\n"); 12836 printf("XXX reset is blocked(3)\n");
12837 break; 12837 break;
12838 } 12838 }
12839 12839
12840 wm_toggle_lanphypc_pch_lpt(sc); 12840 wm_toggle_lanphypc_pch_lpt(sc);
12841 12841
12842 if (sc->sc_type >= WM_T_PCH_LPT) { 12842 if (sc->sc_type >= WM_T_PCH_LPT) {
12843 if (wm_phy_is_accessible_pchlan(sc) == true) 12843 if (wm_phy_is_accessible_pchlan(sc) == true)
12844 break; 12844 break;
12845 12845
12846 reg = CSR_READ(sc, WMREG_CTRL_EXT); 12846 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12847 reg &= ~CTRL_EXT_FORCE_SMBUS; 12847 reg &= ~CTRL_EXT_FORCE_SMBUS;
12848 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 12848 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12849 12849
12850 if (wm_phy_is_accessible_pchlan(sc) == true) 12850 if (wm_phy_is_accessible_pchlan(sc) == true)
12851 break; 12851 break;
12852 rv = -1; 12852 rv = -1;
12853 } 12853 }
12854 break; 12854 break;
12855 default: 12855 default:
12856 break; 12856 break;
12857 } 12857 }
12858 12858
12859 /* Release semaphore */ 12859 /* Release semaphore */
12860 sc->phy.release(sc); 12860 sc->phy.release(sc);
12861 12861
12862 if (rv == 0) { 12862 if (rv == 0) {
12863 if (wm_phy_resetisblocked(sc)) { 12863 if (wm_phy_resetisblocked(sc)) {
12864 printf("XXX reset is blocked(4)\n"); 12864 printf("XXX reset is blocked(4)\n");
12865 goto out; 12865 goto out;
12866 } 12866 }
12867 wm_reset_phy(sc); 12867 wm_reset_phy(sc);
12868 if (wm_phy_resetisblocked(sc)) 12868 if (wm_phy_resetisblocked(sc))
12869 printf("XXX reset is blocked(4)\n"); 12869 printf("XXX reset is blocked(4)\n");
12870 } 12870 }
12871 12871
12872out: 12872out:
12873 /* 12873 /*
12874 * Ungate automatic PHY configuration by hardware on non-managed 82579 12874 * Ungate automatic PHY configuration by hardware on non-managed 82579
12875 */ 12875 */
12876 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) { 12876 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
12877 delay(10*1000); 12877 delay(10*1000);
12878 wm_gate_hw_phy_config_ich8lan(sc, false); 12878 wm_gate_hw_phy_config_ich8lan(sc, false);
12879 } 12879 }
12880} 12880}
12881 12881
12882static void 12882static void
12883wm_init_manageability(struct wm_softc *sc) 12883wm_init_manageability(struct wm_softc *sc)
12884{ 12884{
12885 12885
12886 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 12886 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12887 device_xname(sc->sc_dev), __func__)); 12887 device_xname(sc->sc_dev), __func__));
12888 if (sc->sc_flags & WM_F_HAS_MANAGE) { 12888 if (sc->sc_flags & WM_F_HAS_MANAGE) {
12889 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H); 12889 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
12890 uint32_t manc = CSR_READ(sc, WMREG_MANC); 12890 uint32_t manc = CSR_READ(sc, WMREG_MANC);
12891 12891
12892 /* Disable hardware interception of ARP */ 12892 /* Disable hardware interception of ARP */
12893 manc &= ~MANC_ARP_EN; 12893 manc &= ~MANC_ARP_EN;
12894 12894
12895 /* Enable receiving management packets to the host */ 12895 /* Enable receiving management packets to the host */
12896 if (sc->sc_type >= WM_T_82571) { 12896 if (sc->sc_type >= WM_T_82571) {
12897 manc |= MANC_EN_MNG2HOST; 12897 manc |= MANC_EN_MNG2HOST;
12898 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624; 12898 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
12899 CSR_WRITE(sc, WMREG_MANC2H, manc2h); 12899 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
12900 } 12900 }
12901 12901
12902 CSR_WRITE(sc, WMREG_MANC, manc); 12902 CSR_WRITE(sc, WMREG_MANC, manc);
12903 } 12903 }
12904} 12904}
12905 12905
12906static void 12906static void
12907wm_release_manageability(struct wm_softc *sc) 12907wm_release_manageability(struct wm_softc *sc)
12908{ 12908{
12909 12909
12910 if (sc->sc_flags & WM_F_HAS_MANAGE) { 12910 if (sc->sc_flags & WM_F_HAS_MANAGE) {
12911 uint32_t manc = CSR_READ(sc, WMREG_MANC); 12911 uint32_t manc = CSR_READ(sc, WMREG_MANC);
12912 12912
12913 manc |= MANC_ARP_EN; 12913 manc |= MANC_ARP_EN;
12914 if (sc->sc_type >= WM_T_82571) 12914 if (sc->sc_type >= WM_T_82571)
12915 manc &= ~MANC_EN_MNG2HOST; 12915 manc &= ~MANC_EN_MNG2HOST;
12916 12916
12917 CSR_WRITE(sc, WMREG_MANC, manc); 12917 CSR_WRITE(sc, WMREG_MANC, manc);
12918 } 12918 }
12919} 12919}
12920 12920
12921static void 12921static void
12922wm_get_wakeup(struct wm_softc *sc) 12922wm_get_wakeup(struct wm_softc *sc)
12923{ 12923{
12924 12924
12925 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */ 12925 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
12926 switch (sc->sc_type) { 12926 switch (sc->sc_type) {
12927 case WM_T_82573: 12927 case WM_T_82573:
12928 case WM_T_82583: 12928 case WM_T_82583:
12929 sc->sc_flags |= WM_F_HAS_AMT; 12929 sc->sc_flags |= WM_F_HAS_AMT;
12930 /* FALLTHROUGH */ 12930 /* FALLTHROUGH */
12931 case WM_T_80003: 12931 case WM_T_80003:
12932 case WM_T_82575: 12932 case WM_T_82575:
12933 case WM_T_82576: 12933 case WM_T_82576:
12934 case WM_T_82580: 12934 case WM_T_82580:
12935 case WM_T_I350: 12935 case WM_T_I350:
12936 case WM_T_I354: 12936 case WM_T_I354:
12937 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0) 12937 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
12938 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID; 12938 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
12939 /* FALLTHROUGH */ 12939 /* FALLTHROUGH */
12940 case WM_T_82541: 12940 case WM_T_82541:
12941 case WM_T_82541_2: 12941 case WM_T_82541_2:
12942 case WM_T_82547: 12942 case WM_T_82547:
12943 case WM_T_82547_2: 12943 case WM_T_82547_2:
12944 case WM_T_82571: 12944 case WM_T_82571:
12945 case WM_T_82572: 12945 case WM_T_82572:
12946 case WM_T_82574: 12946 case WM_T_82574:
12947 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 12947 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12948 break; 12948 break;
12949 case WM_T_ICH8: 12949 case WM_T_ICH8:
12950 case WM_T_ICH9: 12950 case WM_T_ICH9:
12951 case WM_T_ICH10: 12951 case WM_T_ICH10:
12952 case WM_T_PCH: 12952 case WM_T_PCH:
12953 case WM_T_PCH2: 12953 case WM_T_PCH2:
12954 case WM_T_PCH_LPT: 12954 case WM_T_PCH_LPT:
12955 case WM_T_PCH_SPT: 12955 case WM_T_PCH_SPT:
12956 sc->sc_flags |= WM_F_HAS_AMT; 12956 sc->sc_flags |= WM_F_HAS_AMT;
12957 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 12957 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12958 break; 12958 break;
12959 default: 12959 default:
12960 break; 12960 break;
12961 } 12961 }
12962 12962
12963 /* 1: HAS_MANAGE */ 12963 /* 1: HAS_MANAGE */
12964 if (wm_enable_mng_pass_thru(sc) != 0) 12964 if (wm_enable_mng_pass_thru(sc) != 0)
12965 sc->sc_flags |= WM_F_HAS_MANAGE; 12965 sc->sc_flags |= WM_F_HAS_MANAGE;
12966 12966
12967#ifdef WM_DEBUG 12967#ifdef WM_DEBUG
12968 printf("\n"); 12968 printf("\n");
12969 if ((sc->sc_flags & WM_F_HAS_AMT) != 0) 12969 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
12970 printf("HAS_AMT,"); 12970 printf("HAS_AMT,");
12971 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) 12971 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
12972 printf("ARC_SUBSYS_VALID,"); 12972 printf("ARC_SUBSYS_VALID,");
12973 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0) 12973 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
12974 printf("ASF_FIRMWARE_PRES,"); 12974 printf("ASF_FIRMWARE_PRES,");
12975 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0) 12975 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
12976 printf("HAS_MANAGE,"); 12976 printf("HAS_MANAGE,");
12977 printf("\n"); 12977 printf("\n");
12978#endif 12978#endif
12979 /* 12979 /*
12980 * Note that the WOL flags is set after the resetting of the eeprom 12980 * Note that the WOL flags is set after the resetting of the eeprom
12981 * stuff 12981 * stuff
12982 */ 12982 */
12983} 12983}
12984 12984
12985/* 12985/*
12986 * Unconfigure Ultra Low Power mode. 12986 * Unconfigure Ultra Low Power mode.
12987 * Only for I217 and newer (see below). 12987 * Only for I217 and newer (see below).
12988 */ 12988 */
12989static void 12989static void
12990wm_ulp_disable(struct wm_softc *sc) 12990wm_ulp_disable(struct wm_softc *sc)
12991{ 12991{
12992 uint32_t reg; 12992 uint32_t reg;
12993 int i = 0; 12993 int i = 0;
12994 12994
12995 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 12995 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12996 device_xname(sc->sc_dev), __func__)); 12996 device_xname(sc->sc_dev), __func__));
12997 /* Exclude old devices */ 12997 /* Exclude old devices */
12998 if ((sc->sc_type < WM_T_PCH_LPT) 12998 if ((sc->sc_type < WM_T_PCH_LPT)
12999 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM) 12999 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
13000 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V) 13000 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
13001 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2) 13001 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
13002 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2)) 13002 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
13003 return; 13003 return;
13004 13004
13005 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) { 13005 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
13006 /* Request ME un-configure ULP mode in the PHY */ 13006 /* Request ME un-configure ULP mode in the PHY */
13007 reg = CSR_READ(sc, WMREG_H2ME); 13007 reg = CSR_READ(sc, WMREG_H2ME);
13008 reg &= ~H2ME_ULP; 13008 reg &= ~H2ME_ULP;
13009 reg |= H2ME_ENFORCE_SETTINGS; 13009 reg |= H2ME_ENFORCE_SETTINGS;
13010 CSR_WRITE(sc, WMREG_H2ME, reg); 13010 CSR_WRITE(sc, WMREG_H2ME, reg);
13011 13011
13012 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ 13012 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
13013 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) { 13013 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
13014 if (i++ == 30) { 13014 if (i++ == 30) {
13015 printf("%s timed out\n", __func__); 13015 printf("%s timed out\n", __func__);
13016 return; 13016 return;
13017 } 13017 }
13018 delay(10 * 1000); 13018 delay(10 * 1000);
13019 } 13019 }
13020 reg = CSR_READ(sc, WMREG_H2ME); 13020 reg = CSR_READ(sc, WMREG_H2ME);
13021 reg &= ~H2ME_ENFORCE_SETTINGS; 13021 reg &= ~H2ME_ENFORCE_SETTINGS;
13022 CSR_WRITE(sc, WMREG_H2ME, reg); 13022 CSR_WRITE(sc, WMREG_H2ME, reg);
13023 13023
13024 return; 13024 return;
13025 } 13025 }
13026 13026
13027 /* Acquire semaphore */ 13027 /* Acquire semaphore */
13028 sc->phy.acquire(sc); 13028 sc->phy.acquire(sc);
13029 13029
13030 /* Toggle LANPHYPC */ 13030 /* Toggle LANPHYPC */
13031 wm_toggle_lanphypc_pch_lpt(sc); 13031 wm_toggle_lanphypc_pch_lpt(sc);
13032 13032
13033 /* Unforce SMBus mode in PHY */ 13033 /* Unforce SMBus mode in PHY */
13034 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL); 13034 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13035 if (reg == 0x0000 || reg == 0xffff) { 13035 if (reg == 0x0000 || reg == 0xffff) {
13036 uint32_t reg2; 13036 uint32_t reg2;
13037 13037
13038 printf("%s: Force SMBus first.\n", __func__); 13038 printf("%s: Force SMBus first.\n", __func__);
13039 reg2 = CSR_READ(sc, WMREG_CTRL_EXT); 13039 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
13040 reg2 |= CTRL_EXT_FORCE_SMBUS; 13040 reg2 |= CTRL_EXT_FORCE_SMBUS;
13041 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2); 13041 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
13042 delay(50 * 1000); 13042 delay(50 * 1000);
13043 13043
13044 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL); 13044 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13045 } 13045 }
13046 reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 13046 reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
13047 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg); 13047 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
13048 13048
13049 /* Unforce SMBus mode in MAC */ 13049 /* Unforce SMBus mode in MAC */
13050 reg = CSR_READ(sc, WMREG_CTRL_EXT); 13050 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13051 reg &= ~CTRL_EXT_FORCE_SMBUS; 13051 reg &= ~CTRL_EXT_FORCE_SMBUS;
13052 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 13052 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13053 13053
13054 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL); 13054 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
13055 reg |= HV_PM_CTRL_K1_ENA; 13055 reg |= HV_PM_CTRL_K1_ENA;
13056 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg); 13056 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
13057 13057
13058 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1); 13058 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
13059 reg &= ~(I218_ULP_CONFIG1_IND 13059 reg &= ~(I218_ULP_CONFIG1_IND
13060 | I218_ULP_CONFIG1_STICKY_ULP 13060 | I218_ULP_CONFIG1_STICKY_ULP
13061 | I218_ULP_CONFIG1_RESET_TO_SMBUS 13061 | I218_ULP_CONFIG1_RESET_TO_SMBUS
13062 | I218_ULP_CONFIG1_WOL_HOST 13062 | I218_ULP_CONFIG1_WOL_HOST
13063 | I218_ULP_CONFIG1_INBAND_EXIT 13063 | I218_ULP_CONFIG1_INBAND_EXIT
13064 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC 13064 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
13065 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 13065 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
13066 | I218_ULP_CONFIG1_DIS_SMB_PERST); 13066 | I218_ULP_CONFIG1_DIS_SMB_PERST);
13067 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg); 13067 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13068 reg |= I218_ULP_CONFIG1_START; 13068 reg |= I218_ULP_CONFIG1_START;
13069 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg); 13069 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13070 13070
13071 reg = CSR_READ(sc, WMREG_FEXTNVM7); 13071 reg = CSR_READ(sc, WMREG_FEXTNVM7);
13072 reg &= ~FEXTNVM7_DIS_SMB_PERST; 13072 reg &= ~FEXTNVM7_DIS_SMB_PERST;
13073 CSR_WRITE(sc, WMREG_FEXTNVM7, reg); 13073 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
13074 13074
13075 /* Release semaphore */ 13075 /* Release semaphore */
13076 sc->phy.release(sc); 13076 sc->phy.release(sc);
13077 wm_gmii_reset(sc); 13077 wm_gmii_reset(sc);
13078 delay(50 * 1000); 13078 delay(50 * 1000);
13079} 13079}
13080 13080
13081/* WOL in the newer chipset interfaces (pchlan) */ 13081/* WOL in the newer chipset interfaces (pchlan) */
13082static void 13082static void
13083wm_enable_phy_wakeup(struct wm_softc *sc) 13083wm_enable_phy_wakeup(struct wm_softc *sc)
13084{ 13084{
13085#if 0 13085#if 0
13086 uint16_t preg; 13086 uint16_t preg;
13087 13087
13088 /* Copy MAC RARs to PHY RARs */ 13088 /* Copy MAC RARs to PHY RARs */
13089 13089
13090 /* Copy MAC MTA to PHY MTA */ 13090 /* Copy MAC MTA to PHY MTA */
13091 13091
13092 /* Configure PHY Rx Control register */ 13092 /* Configure PHY Rx Control register */
13093 13093
13094 /* Enable PHY wakeup in MAC register */ 13094 /* Enable PHY wakeup in MAC register */
13095 13095
13096 /* Configure and enable PHY wakeup in PHY registers */ 13096 /* Configure and enable PHY wakeup in PHY registers */
13097 13097
13098 /* Activate PHY wakeup */ 13098 /* Activate PHY wakeup */
13099 13099
13100 /* XXX */ 13100 /* XXX */
13101#endif 13101#endif
13102} 13102}
13103 13103
13104/* Power down workaround on D3 */ 13104/* Power down workaround on D3 */
13105static void 13105static void
13106wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc) 13106wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
13107{ 13107{
13108 uint32_t reg; 13108 uint32_t reg;
13109 int i; 13109 int i;
13110 13110
13111 for (i = 0; i < 2; i++) { 13111 for (i = 0; i < 2; i++) {
13112 /* Disable link */ 13112 /* Disable link */
13113 reg = CSR_READ(sc, WMREG_PHY_CTRL); 13113 reg = CSR_READ(sc, WMREG_PHY_CTRL);
13114 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 13114 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
13115 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 13115 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13116 13116
13117 /* 13117 /*
13118 * Call gig speed drop workaround on Gig disable before 13118 * Call gig speed drop workaround on Gig disable before
13119 * accessing any PHY registers 13119 * accessing any PHY registers
13120 */ 13120 */
13121 if (sc->sc_type == WM_T_ICH8) 13121 if (sc->sc_type == WM_T_ICH8)
13122 wm_gig_downshift_workaround_ich8lan(sc); 13122 wm_gig_downshift_workaround_ich8lan(sc);
13123 13123
13124 /* Write VR power-down enable */ 13124 /* Write VR power-down enable */
13125 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); 13125 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13126 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 13126 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13127 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN; 13127 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
13128 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg); 13128 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
13129 13129
13130 /* Read it back and test */ 13130 /* Read it back and test */
13131 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); 13131 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13132 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 13132 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13133 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0)) 13133 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
13134 break; 13134 break;
13135 13135
13136 /* Issue PHY reset and repeat at most one more time */ 13136 /* Issue PHY reset and repeat at most one more time */
13137 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 13137 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
13138 } 13138 }
13139} 13139}
13140 13140
13141static void 13141static void
13142wm_enable_wakeup(struct wm_softc *sc) 13142wm_enable_wakeup(struct wm_softc *sc)
13143{ 13143{
13144 uint32_t reg, pmreg; 13144 uint32_t reg, pmreg;
13145 pcireg_t pmode; 13145 pcireg_t pmode;
13146 13146
13147 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 13147 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13148 device_xname(sc->sc_dev), __func__)); 13148 device_xname(sc->sc_dev), __func__));
13149 13149
13150 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, 13150 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13151 &pmreg, NULL) == 0) 13151 &pmreg, NULL) == 0)
13152 return; 13152 return;
13153 13153
13154 /* Advertise the wakeup capability */ 13154 /* Advertise the wakeup capability */
13155 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2) 13155 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
13156 | CTRL_SWDPIN(3)); 13156 | CTRL_SWDPIN(3));
13157 CSR_WRITE(sc, WMREG_WUC, WUC_APME); 13157 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
13158 13158
13159 /* ICH workaround */ 13159 /* ICH workaround */
13160 switch (sc->sc_type) { 13160 switch (sc->sc_type) {
13161 case WM_T_ICH8: 13161 case WM_T_ICH8:
13162 case WM_T_ICH9: 13162 case WM_T_ICH9:
13163 case WM_T_ICH10: 13163 case WM_T_ICH10:
13164 case WM_T_PCH: 13164 case WM_T_PCH:
13165 case WM_T_PCH2: 13165 case WM_T_PCH2:
13166 case WM_T_PCH_LPT: 13166 case WM_T_PCH_LPT:
13167 case WM_T_PCH_SPT: 13167 case WM_T_PCH_SPT:
13168 /* Disable gig during WOL */ 13168 /* Disable gig during WOL */
13169 reg = CSR_READ(sc, WMREG_PHY_CTRL); 13169 reg = CSR_READ(sc, WMREG_PHY_CTRL);
13170 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS; 13170 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
13171 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 13171 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13172 if (sc->sc_type == WM_T_PCH) 13172 if (sc->sc_type == WM_T_PCH)
13173 wm_gmii_reset(sc); 13173 wm_gmii_reset(sc);
13174 13174
13175 /* Power down workaround */ 13175 /* Power down workaround */
13176 if (sc->sc_phytype == WMPHY_82577) { 13176 if (sc->sc_phytype == WMPHY_82577) {
13177 struct mii_softc *child; 13177 struct mii_softc *child;
13178 13178
13179 /* Assume that the PHY is copper */ 13179 /* Assume that the PHY is copper */
13180 child = LIST_FIRST(&sc->sc_mii.mii_phys); 13180 child = LIST_FIRST(&sc->sc_mii.mii_phys);
13181 if (child->mii_mpd_rev <= 2) 13181 if ((child != NULL) && (child->mii_mpd_rev <= 2))
13182 sc->sc_mii.mii_writereg(sc->sc_dev, 1, 13182 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
13183 (768 << 5) | 25, 0x0444); /* magic num */ 13183 (768 << 5) | 25, 0x0444); /* magic num */
13184 } 13184 }
13185 break; 13185 break;
13186 default: 13186 default:
13187 break; 13187 break;
13188 } 13188 }
13189 13189
13190 /* Keep the laser running on fiber adapters */ 13190 /* Keep the laser running on fiber adapters */
13191 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER) 13191 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
13192 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) { 13192 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13193 reg = CSR_READ(sc, WMREG_CTRL_EXT); 13193 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13194 reg |= CTRL_EXT_SWDPIN(3); 13194 reg |= CTRL_EXT_SWDPIN(3);
13195 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 13195 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13196 } 13196 }
13197 13197
13198 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG; 13198 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
13199#if 0 /* for the multicast packet */ 13199#if 0 /* for the multicast packet */
13200 reg |= WUFC_MC; 13200 reg |= WUFC_MC;
13201 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE); 13201 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
13202#endif 13202#endif
13203 13203
13204 if (sc->sc_type >= WM_T_PCH) 13204 if (sc->sc_type >= WM_T_PCH)
13205 wm_enable_phy_wakeup(sc); 13205 wm_enable_phy_wakeup(sc);
13206 else { 13206 else {
13207 CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN); 13207 CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
13208 CSR_WRITE(sc, WMREG_WUFC, reg); 13208 CSR_WRITE(sc, WMREG_WUFC, reg);
13209 } 13209 }
13210 13210
13211 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 13211 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
13212 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 13212 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
13213 || (sc->sc_type == WM_T_PCH2)) 13213 || (sc->sc_type == WM_T_PCH2))
13214 && (sc->sc_phytype == WMPHY_IGP_3)) 13214 && (sc->sc_phytype == WMPHY_IGP_3))
13215 wm_igp3_phy_powerdown_workaround_ich8lan(sc); 13215 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
13216 13216
13217 /* Request PME */ 13217 /* Request PME */
13218 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR); 13218 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
13219#if 0 13219#if 0
13220 /* Disable WOL */ 13220 /* Disable WOL */
13221 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN); 13221 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
13222#else 13222#else
13223 /* For WOL */ 13223 /* For WOL */
13224 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN; 13224 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
13225#endif 13225#endif
13226 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode); 13226 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
13227} 13227}
13228 13228
13229/* LPLU */ 13229/* LPLU */
13230 13230
13231static void 13231static void
13232wm_lplu_d0_disable(struct wm_softc *sc) 13232wm_lplu_d0_disable(struct wm_softc *sc)
13233{ 13233{
13234 uint32_t reg; 13234 uint32_t reg;
13235 13235
13236 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 13236 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13237 device_xname(sc->sc_dev), __func__)); 13237 device_xname(sc->sc_dev), __func__));
13238 13238
13239 reg = CSR_READ(sc, WMREG_PHY_CTRL); 13239 reg = CSR_READ(sc, WMREG_PHY_CTRL);
13240 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU); 13240 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
13241 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 13241 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13242} 13242}
13243 13243
13244static void 13244static void
13245wm_lplu_d0_disable_pch(struct wm_softc *sc) 13245wm_lplu_d0_disable_pch(struct wm_softc *sc)
13246{ 13246{
13247 uint32_t reg; 13247 uint32_t reg;
13248 13248
13249 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 13249 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13250 device_xname(sc->sc_dev), __func__)); 13250 device_xname(sc->sc_dev), __func__));
13251 13251
13252 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS); 13252 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
13253 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU); 13253 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
13254 reg |= HV_OEM_BITS_ANEGNOW; 13254 reg |= HV_OEM_BITS_ANEGNOW;
13255 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg); 13255 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
13256} 13256}
13257 13257
13258/* EEE */ 13258/* EEE */
13259 13259
13260static void 13260static void
13261wm_set_eee_i350(struct wm_softc *sc) 13261wm_set_eee_i350(struct wm_softc *sc)
13262{ 13262{
13263 uint32_t ipcnfg, eeer; 13263 uint32_t ipcnfg, eeer;
13264 13264
13265 ipcnfg = CSR_READ(sc, WMREG_IPCNFG); 13265 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
13266 eeer = CSR_READ(sc, WMREG_EEER); 13266 eeer = CSR_READ(sc, WMREG_EEER);
13267 13267
13268 if ((sc->sc_flags & WM_F_EEE) != 0) { 13268 if ((sc->sc_flags & WM_F_EEE) != 0) {
13269 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); 13269 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
13270 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN 13270 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
13271 | EEER_LPI_FC); 13271 | EEER_LPI_FC);
13272 } else { 13272 } else {
13273 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); 13273 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
13274 ipcnfg &= ~IPCNFG_10BASE_TE; 13274 ipcnfg &= ~IPCNFG_10BASE_TE;
13275 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN 13275 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
13276 | EEER_LPI_FC); 13276 | EEER_LPI_FC);
13277 } 13277 }
13278 13278
13279 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg); 13279 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
13280 CSR_WRITE(sc, WMREG_EEER, eeer); 13280 CSR_WRITE(sc, WMREG_EEER, eeer);
13281 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */ 13281 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
13282 CSR_READ(sc, WMREG_EEER); /* XXX flush? */ 13282 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
13283} 13283}
13284 13284
13285/* 13285/*
13286 * Workarounds (mainly PHY related). 13286 * Workarounds (mainly PHY related).
13287 * Basically, PHY's workarounds are in the PHY drivers. 13287 * Basically, PHY's workarounds are in the PHY drivers.
13288 */ 13288 */
13289 13289
13290/* Work-around for 82566 Kumeran PCS lock loss */ 13290/* Work-around for 82566 Kumeran PCS lock loss */
13291static void 13291static void
13292wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc) 13292wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
13293{ 13293{
13294#if 0 13294#if 0
13295 int miistatus, active, i; 13295 int miistatus, active, i;
13296 int reg; 13296 int reg;
13297 13297
13298 miistatus = sc->sc_mii.mii_media_status; 13298 miistatus = sc->sc_mii.mii_media_status;
13299 13299
13300 /* If the link is not up, do nothing */ 13300 /* If the link is not up, do nothing */
13301 if ((miistatus & IFM_ACTIVE) == 0) 13301 if ((miistatus & IFM_ACTIVE) == 0)
13302 return; 13302 return;
13303 13303
13304 active = sc->sc_mii.mii_media_active; 13304 active = sc->sc_mii.mii_media_active;
13305 13305
13306 /* Nothing to do if the link is other than 1Gbps */ 13306 /* Nothing to do if the link is other than 1Gbps */
13307 if (IFM_SUBTYPE(active) != IFM_1000_T) 13307 if (IFM_SUBTYPE(active) != IFM_1000_T)
13308 return; 13308 return;
13309 13309
13310 for (i = 0; i < 10; i++) { 13310 for (i = 0; i < 10; i++) {
13311 /* read twice */ 13311 /* read twice */
13312 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); 13312 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
13313 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); 13313 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
13314 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0) 13314 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
13315 goto out; /* GOOD! */ 13315 goto out; /* GOOD! */
13316 13316
13317 /* Reset the PHY */ 13317 /* Reset the PHY */
13318 wm_gmii_reset(sc); 13318 wm_gmii_reset(sc);
13319 delay(5*1000); 13319 delay(5*1000);
13320 } 13320 }
13321 13321
13322 /* Disable GigE link negotiation */ 13322 /* Disable GigE link negotiation */
13323 reg = CSR_READ(sc, WMREG_PHY_CTRL); 13323 reg = CSR_READ(sc, WMREG_PHY_CTRL);
13324 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 13324 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
13325 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 13325 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13326 13326
13327 /* 13327 /*
13328 * Call gig speed drop workaround on Gig disable before accessing 13328 * Call gig speed drop workaround on Gig disable before accessing
13329 * any PHY registers. 13329 * any PHY registers.
13330 */ 13330 */
13331 wm_gig_downshift_workaround_ich8lan(sc); 13331 wm_gig_downshift_workaround_ich8lan(sc);
13332 13332
13333out: 13333out:
13334 return; 13334 return;
13335#endif 13335#endif
13336} 13336}
13337 13337
13338/* WOL from S5 stops working */ 13338/* WOL from S5 stops working */
13339static void 13339static void
13340wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc) 13340wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
13341{ 13341{
13342 uint16_t kmrn_reg; 13342 uint16_t kmrn_reg;
13343 13343
13344 /* Only for igp3 */ 13344 /* Only for igp3 */
13345 if (sc->sc_phytype == WMPHY_IGP_3) { 13345 if (sc->sc_phytype == WMPHY_IGP_3) {
13346 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG); 13346 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
13347 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK; 13347 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
13348 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); 13348 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
13349 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK; 13349 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
13350 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); 13350 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
13351 } 13351 }
13352} 13352}
13353 13353
13354/* 13354/*
13355 * Workaround for pch's PHYs 13355 * Workaround for pch's PHYs
13356 * XXX should be moved to new PHY driver? 13356 * XXX should be moved to new PHY driver?
13357 */ 13357 */
13358static void 13358static void
13359wm_hv_phy_workaround_ich8lan(struct wm_softc *sc) 13359wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
13360{ 13360{
13361 13361
13362 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 13362 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13363 device_xname(sc->sc_dev), __func__)); 13363 device_xname(sc->sc_dev), __func__));
13364 KASSERT(sc->sc_type == WM_T_PCH); 13364 KASSERT(sc->sc_type == WM_T_PCH);
13365 13365
13366 if (sc->sc_phytype == WMPHY_82577) 13366 if (sc->sc_phytype == WMPHY_82577)
13367 wm_set_mdio_slow_mode_hv(sc); 13367 wm_set_mdio_slow_mode_hv(sc);
13368 13368
13369 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */ 13369 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
13370 13370
13371 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/ 13371 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
13372 13372
13373 /* 82578 */ 13373 /* 82578 */
13374 if (sc->sc_phytype == WMPHY_82578) { 13374 if (sc->sc_phytype == WMPHY_82578) {
13375 struct mii_softc *child; 13375 struct mii_softc *child;
13376 13376
13377 /* 13377 /*
13378 * Return registers to default by doing a soft reset then 13378 * Return registers to default by doing a soft reset then
13379 * writing 0x3140 to the control register 13379 * writing 0x3140 to the control register
13380 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 13380 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
13381 */ 13381 */
13382 child = LIST_FIRST(&sc->sc_mii.mii_phys); 13382 child = LIST_FIRST(&sc->sc_mii.mii_phys);
13383 if ((child != NULL) && (child->mii_mpd_rev < 2)) { 13383 if ((child != NULL) && (child->mii_mpd_rev < 2)) {
13384 PHY_RESET(child); 13384 PHY_RESET(child);
13385 sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR, 13385 sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
13386 0x3140); 13386 0x3140);
13387 } 13387 }
13388 } 13388 }
13389 13389
13390 /* Select page 0 */ 13390 /* Select page 0 */
13391 sc->phy.acquire(sc); 13391 sc->phy.acquire(sc);
13392 wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0); 13392 wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
13393 sc->phy.release(sc); 13393 sc->phy.release(sc);
13394 13394
13395 /* 13395 /*
13396 * Configure the K1 Si workaround during phy reset assuming there is 13396 * Configure the K1 Si workaround during phy reset assuming there is
13397 * link so that it disables K1 if link is in 1Gbps. 13397 * link so that it disables K1 if link is in 1Gbps.
13398 */ 13398 */
13399 wm_k1_gig_workaround_hv(sc, 1); 13399 wm_k1_gig_workaround_hv(sc, 1);
13400} 13400}
13401 13401
13402static void 13402static void
13403wm_lv_phy_workaround_ich8lan(struct wm_softc *sc) 13403wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
13404{ 13404{
13405 13405
13406 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 13406 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13407 device_xname(sc->sc_dev), __func__)); 13407 device_xname(sc->sc_dev), __func__));
13408 KASSERT(sc->sc_type == WM_T_PCH2); 13408 KASSERT(sc->sc_type == WM_T_PCH2);
13409 13409
13410 wm_set_mdio_slow_mode_hv(sc); 13410 wm_set_mdio_slow_mode_hv(sc);
13411} 13411}
13412 13412
13413static int 13413static int
13414wm_k1_gig_workaround_hv(struct wm_softc *sc, int link) 13414wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
13415{ 13415{
13416 int k1_enable = sc->sc_nvm_k1_enabled; 13416 int k1_enable = sc->sc_nvm_k1_enabled;
13417 13417
13418 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 13418 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13419 device_xname(sc->sc_dev), __func__)); 13419 device_xname(sc->sc_dev), __func__));
13420 13420
13421 if (sc->phy.acquire(sc) != 0) 13421 if (sc->phy.acquire(sc) != 0)
13422 return -1; 13422 return -1;
13423 13423
13424 if (link) { 13424 if (link) {
13425 k1_enable = 0; 13425 k1_enable = 0;
13426 13426
13427 /* Link stall fix for link up */ 13427 /* Link stall fix for link up */
13428 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100); 13428 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
13429 } else { 13429 } else {
13430 /* Link stall fix for link down */ 13430 /* Link stall fix for link down */
13431 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100); 13431 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
13432 } 13432 }
13433 13433
13434 wm_configure_k1_ich8lan(sc, k1_enable); 13434 wm_configure_k1_ich8lan(sc, k1_enable);
13435 sc->phy.release(sc); 13435 sc->phy.release(sc);
13436 13436
13437 return 0; 13437 return 0;
13438} 13438}
13439 13439
13440static void 13440static void
13441wm_set_mdio_slow_mode_hv(struct wm_softc *sc) 13441wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
13442{ 13442{
13443 uint32_t reg; 13443 uint32_t reg;
13444 13444
13445 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL); 13445 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
13446 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, 13446 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
13447 reg | HV_KMRN_MDIO_SLOW); 13447 reg | HV_KMRN_MDIO_SLOW);
13448} 13448}
13449 13449
13450static void 13450static void
13451wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable) 13451wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
13452{ 13452{
13453 uint32_t ctrl, ctrl_ext, tmp; 13453 uint32_t ctrl, ctrl_ext, tmp;
13454 uint16_t kmrn_reg; 13454 uint16_t kmrn_reg;
13455 13455
13456 kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG); 13456 kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
13457 13457
13458 if (k1_enable) 13458 if (k1_enable)
13459 kmrn_reg |= KUMCTRLSTA_K1_ENABLE; 13459 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
13460 else 13460 else
13461 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE; 13461 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
13462 13462
13463 wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg); 13463 wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
13464 13464
13465 delay(20); 13465 delay(20);
13466 13466
13467 ctrl = CSR_READ(sc, WMREG_CTRL); 13467 ctrl = CSR_READ(sc, WMREG_CTRL);
13468 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 13468 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13469 13469
13470 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100); 13470 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
13471 tmp |= CTRL_FRCSPD; 13471 tmp |= CTRL_FRCSPD;
13472 13472
13473 CSR_WRITE(sc, WMREG_CTRL, tmp); 13473 CSR_WRITE(sc, WMREG_CTRL, tmp);
13474 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS); 13474 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
13475 CSR_WRITE_FLUSH(sc); 13475 CSR_WRITE_FLUSH(sc);
13476 delay(20); 13476 delay(20);
13477 13477
13478 CSR_WRITE(sc, WMREG_CTRL, ctrl); 13478 CSR_WRITE(sc, WMREG_CTRL, ctrl);
13479 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 13479 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13480 CSR_WRITE_FLUSH(sc); 13480 CSR_WRITE_FLUSH(sc);
13481 delay(20); 13481 delay(20);
13482} 13482}
13483 13483
13484/* special case - for 82575 - need to do manual init ... */ 13484/* special case - for 82575 - need to do manual init ... */
13485static void 13485static void
13486wm_reset_init_script_82575(struct wm_softc *sc) 13486wm_reset_init_script_82575(struct wm_softc *sc)
13487{ 13487{
13488 /* 13488 /*
13489 * remark: this is untested code - we have no board without EEPROM 13489 * remark: this is untested code - we have no board without EEPROM
13490 * same setup as mentioned int the FreeBSD driver for the i82575 13490 * same setup as mentioned int the FreeBSD driver for the i82575
13491 */ 13491 */
13492 13492
13493 /* SerDes configuration via SERDESCTRL */ 13493 /* SerDes configuration via SERDESCTRL */
13494 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c); 13494 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
13495 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78); 13495 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
13496 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23); 13496 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
13497 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15); 13497 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
13498 13498
13499 /* CCM configuration via CCMCTL register */ 13499 /* CCM configuration via CCMCTL register */
13500 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00); 13500 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
13501 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00); 13501 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
13502 13502
13503 /* PCIe lanes configuration */ 13503 /* PCIe lanes configuration */
13504 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec); 13504 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
13505 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf); 13505 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
13506 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05); 13506 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
13507 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81); 13507 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
13508 13508
13509 /* PCIe PLL Configuration */ 13509 /* PCIe PLL Configuration */
13510 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47); 13510 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
13511 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00); 13511 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
13512 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00); 13512 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
13513} 13513}
13514 13514
13515static void 13515static void
13516wm_reset_mdicnfg_82580(struct wm_softc *sc) 13516wm_reset_mdicnfg_82580(struct wm_softc *sc)
13517{ 13517{
13518 uint32_t reg; 13518 uint32_t reg;
13519 uint16_t nvmword; 13519 uint16_t nvmword;
13520 int rv; 13520 int rv;
13521 13521
13522 if ((sc->sc_flags & WM_F_SGMII) == 0) 13522 if ((sc->sc_flags & WM_F_SGMII) == 0)
13523 return; 13523 return;
13524 13524
13525 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) 13525 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
13526 + NVM_OFF_CFG3_PORTA, 1, &nvmword); 13526 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
13527 if (rv != 0) { 13527 if (rv != 0) {
13528 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n", 13528 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
13529 __func__); 13529 __func__);
13530 return; 13530 return;
13531 } 13531 }
13532 13532
13533 reg = CSR_READ(sc, WMREG_MDICNFG); 13533 reg = CSR_READ(sc, WMREG_MDICNFG);
13534 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO) 13534 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
13535 reg |= MDICNFG_DEST; 13535 reg |= MDICNFG_DEST;
13536 if (nvmword & NVM_CFG3_PORTA_COM_MDIO) 13536 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
13537 reg |= MDICNFG_COM_MDIO; 13537 reg |= MDICNFG_COM_MDIO;
13538 CSR_WRITE(sc, WMREG_MDICNFG, reg); 13538 CSR_WRITE(sc, WMREG_MDICNFG, reg);
13539} 13539}
13540 13540
13541#define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff)) 13541#define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
13542 13542
13543static bool 13543static bool
13544wm_phy_is_accessible_pchlan(struct wm_softc *sc) 13544wm_phy_is_accessible_pchlan(struct wm_softc *sc)
13545{ 13545{
13546 int i; 13546 int i;
13547 uint32_t reg; 13547 uint32_t reg;
13548 uint16_t id1, id2; 13548 uint16_t id1, id2;
13549 13549
13550 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 13550 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13551 device_xname(sc->sc_dev), __func__)); 13551 device_xname(sc->sc_dev), __func__));
13552 id1 = id2 = 0xffff; 13552 id1 = id2 = 0xffff;
13553 for (i = 0; i < 2; i++) { 13553 for (i = 0; i < 2; i++) {
13554 id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1); 13554 id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
13555 if (MII_INVALIDID(id1)) 13555 if (MII_INVALIDID(id1))
13556 continue; 13556 continue;
13557 id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2); 13557 id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
13558 if (MII_INVALIDID(id2)) 13558 if (MII_INVALIDID(id2))
13559 continue; 13559 continue;
13560 break; 13560 break;
13561 } 13561 }
13562 if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) { 13562 if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
13563 goto out; 13563 goto out;
13564 } 13564 }
13565 13565
13566 if (sc->sc_type < WM_T_PCH_LPT) { 13566 if (sc->sc_type < WM_T_PCH_LPT) {
13567 sc->phy.release(sc); 13567 sc->phy.release(sc);
13568 wm_set_mdio_slow_mode_hv(sc); 13568 wm_set_mdio_slow_mode_hv(sc);
13569 id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1); 13569 id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
13570 id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2); 13570 id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
13571 sc->phy.acquire(sc); 13571 sc->phy.acquire(sc);
13572 } 13572 }
13573 if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) { 13573 if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
13574 printf("XXX return with false\n"); 13574 printf("XXX return with false\n");
13575 return false; 13575 return false;
13576 } 13576 }
13577out: 13577out:
13578 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) { 13578 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
13579 /* Only unforce SMBus if ME is not active */ 13579 /* Only unforce SMBus if ME is not active */
13580 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) { 13580 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
13581 /* Unforce SMBus mode in PHY */ 13581 /* Unforce SMBus mode in PHY */
13582 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, 13582 reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
13583 CV_SMB_CTRL); 13583 CV_SMB_CTRL);
13584 reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 13584 reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
13585 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, 13585 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
13586 CV_SMB_CTRL, reg); 13586 CV_SMB_CTRL, reg);
13587 13587
13588 /* Unforce SMBus mode in MAC */ 13588 /* Unforce SMBus mode in MAC */
13589 reg = CSR_READ(sc, WMREG_CTRL_EXT); 13589 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13590 reg &= ~CTRL_EXT_FORCE_SMBUS; 13590 reg &= ~CTRL_EXT_FORCE_SMBUS;
13591 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 13591 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13592 } 13592 }
13593 } 13593 }
13594 return true; 13594 return true;
13595} 13595}
13596 13596
13597static void 13597static void
13598wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc) 13598wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
13599{ 13599{
13600 uint32_t reg; 13600 uint32_t reg;
13601 int i; 13601 int i;
13602 13602
13603 /* Set PHY Config Counter to 50msec */ 13603 /* Set PHY Config Counter to 50msec */
13604 reg = CSR_READ(sc, WMREG_FEXTNVM3); 13604 reg = CSR_READ(sc, WMREG_FEXTNVM3);
13605 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK; 13605 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
13606 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS; 13606 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
13607 CSR_WRITE(sc, WMREG_FEXTNVM3, reg); 13607 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
13608 13608
13609 /* Toggle LANPHYPC */ 13609 /* Toggle LANPHYPC */
13610 reg = CSR_READ(sc, WMREG_CTRL); 13610 reg = CSR_READ(sc, WMREG_CTRL);
13611 reg |= CTRL_LANPHYPC_OVERRIDE; 13611 reg |= CTRL_LANPHYPC_OVERRIDE;
13612 reg &= ~CTRL_LANPHYPC_VALUE; 13612 reg &= ~CTRL_LANPHYPC_VALUE;
13613 CSR_WRITE(sc, WMREG_CTRL, reg); 13613 CSR_WRITE(sc, WMREG_CTRL, reg);
13614 CSR_WRITE_FLUSH(sc); 13614 CSR_WRITE_FLUSH(sc);
13615 delay(1000); 13615 delay(1000);
13616 reg &= ~CTRL_LANPHYPC_OVERRIDE; 13616 reg &= ~CTRL_LANPHYPC_OVERRIDE;
13617 CSR_WRITE(sc, WMREG_CTRL, reg); 13617 CSR_WRITE(sc, WMREG_CTRL, reg);
13618 CSR_WRITE_FLUSH(sc); 13618 CSR_WRITE_FLUSH(sc);
13619 13619
13620 if (sc->sc_type < WM_T_PCH_LPT) 13620 if (sc->sc_type < WM_T_PCH_LPT)
13621 delay(50 * 1000); 13621 delay(50 * 1000);
13622 else { 13622 else {
13623 i = 20; 13623 i = 20;
13624 13624
13625 do { 13625 do {
13626 delay(5 * 1000); 13626 delay(5 * 1000);
13627 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0) 13627 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
13628 && i--); 13628 && i--);
13629 13629
13630 delay(30 * 1000); 13630 delay(30 * 1000);
13631 } 13631 }
13632} 13632}
13633 13633
13634static int 13634static int
13635wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link) 13635wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
13636{ 13636{
13637 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ) 13637 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
13638 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND; 13638 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
13639 uint32_t rxa; 13639 uint32_t rxa;
13640 uint16_t scale = 0, lat_enc = 0; 13640 uint16_t scale = 0, lat_enc = 0;
13641 int64_t lat_ns, value; 13641 int64_t lat_ns, value;
13642  13642
13643 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", 13643 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13644 device_xname(sc->sc_dev), __func__)); 13644 device_xname(sc->sc_dev), __func__));
13645 13645
13646 if (link) { 13646 if (link) {
13647 pcireg_t preg; 13647 pcireg_t preg;
13648 uint16_t max_snoop, max_nosnoop, max_ltr_enc; 13648 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
13649 13649
13650 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK; 13650 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
13651 13651
13652 /* 13652 /*
13653 * Determine the maximum latency tolerated by the device. 13653 * Determine the maximum latency tolerated by the device.
13654 * 13654 *
13655 * Per the PCIe spec, the tolerated latencies are encoded as 13655 * Per the PCIe spec, the tolerated latencies are encoded as
13656 * a 3-bit encoded scale (only 0-5 are valid) multiplied by 13656 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
13657 * a 10-bit value (0-1023) to provide a range from 1 ns to 13657 * a 10-bit value (0-1023) to provide a range from 1 ns to
13658 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, 13658 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
13659 * 1=2^5ns, 2=2^10ns,...5=2^25ns. 13659 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
13660 */ 13660 */
13661 lat_ns = ((int64_t)rxa * 1024 - 13661 lat_ns = ((int64_t)rxa * 1024 -
13662 (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000; 13662 (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
13663 if (lat_ns < 0) 13663 if (lat_ns < 0)
13664 lat_ns = 0; 13664 lat_ns = 0;
13665 else { 13665 else {
13666 uint32_t status; 13666 uint32_t status;
13667 uint16_t speed; 13667 uint16_t speed;
13668 13668
13669 status = CSR_READ(sc, WMREG_STATUS); 13669 status = CSR_READ(sc, WMREG_STATUS);
13670 switch (__SHIFTOUT(status, STATUS_SPEED)) { 13670 switch (__SHIFTOUT(status, STATUS_SPEED)) {
13671 case STATUS_SPEED_10: 13671 case STATUS_SPEED_10:
13672 speed = 10; 13672 speed = 10;
13673 break; 13673 break;
13674 case STATUS_SPEED_100: 13674 case STATUS_SPEED_100:
13675 speed = 100; 13675 speed = 100;
13676 break; 13676 break;
13677 case STATUS_SPEED_1000: 13677 case STATUS_SPEED_1000:
13678 speed = 1000; 13678 speed = 1000;
13679 break; 13679 break;
13680 default: 13680 default:
13681 printf("%s: Unknown speed (status = %08x)\n", 13681 printf("%s: Unknown speed (status = %08x)\n",
13682 device_xname(sc->sc_dev), status); 13682 device_xname(sc->sc_dev), status);
13683 return -1; 13683 return -1;
13684 } 13684 }
13685 lat_ns /= speed; 13685 lat_ns /= speed;
13686 } 13686 }
13687 value = lat_ns; 13687 value = lat_ns;
13688 13688
13689 while (value > LTRV_VALUE) { 13689 while (value > LTRV_VALUE) {
13690 scale ++; 13690 scale ++;
13691 value = howmany(value, __BIT(5)); 13691 value = howmany(value, __BIT(5));
13692 } 13692 }
13693 if (scale > LTRV_SCALE_MAX) { 13693 if (scale > LTRV_SCALE_MAX) {
13694 printf("%s: Invalid LTR latency scale %d\n", 13694 printf("%s: Invalid LTR latency scale %d\n",
13695 device_xname(sc->sc_dev), scale); 13695 device_xname(sc->sc_dev), scale);
13696 return -1; 13696 return -1;
13697 } 13697 }
13698 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value); 13698 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
13699 13699
13700 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 13700 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13701 WM_PCI_LTR_CAP_LPT); 13701 WM_PCI_LTR_CAP_LPT);
13702 max_snoop = preg & 0xffff; 13702 max_snoop = preg & 0xffff;
13703 max_nosnoop = preg >> 16; 13703 max_nosnoop = preg >> 16;
13704 13704
13705 max_ltr_enc = MAX(max_snoop, max_nosnoop); 13705 max_ltr_enc = MAX(max_snoop, max_nosnoop);
13706 13706
13707 if (lat_enc > max_ltr_enc) { 13707 if (lat_enc > max_ltr_enc) {
13708 lat_enc = max_ltr_enc; 13708 lat_enc = max_ltr_enc;
13709 } 13709 }
13710 } 13710 }
13711 /* Snoop and No-Snoop latencies the same */ 13711 /* Snoop and No-Snoop latencies the same */
13712 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP); 13712 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
13713 CSR_WRITE(sc, WMREG_LTRV, reg); 13713 CSR_WRITE(sc, WMREG_LTRV, reg);
13714 13714
13715 return 0; 13715 return 0;
13716} 13716}
13717 13717
13718/* 13718/*
13719 * I210 Errata 25 and I211 Errata 10 13719 * I210 Errata 25 and I211 Errata 10
13720 * Slow System Clock. 13720 * Slow System Clock.
13721 */ 13721 */
13722static void 13722static void
13723wm_pll_workaround_i210(struct wm_softc *sc) 13723wm_pll_workaround_i210(struct wm_softc *sc)
13724{ 13724{
13725 uint32_t mdicnfg, wuc; 13725 uint32_t mdicnfg, wuc;
13726 uint32_t reg; 13726 uint32_t reg;
13727 pcireg_t pcireg; 13727 pcireg_t pcireg;
13728 uint32_t pmreg; 13728 uint32_t pmreg;
13729 uint16_t nvmword, tmp_nvmword; 13729 uint16_t nvmword, tmp_nvmword;
13730 int phyval; 13730 int phyval;
13731 bool wa_done = false; 13731 bool wa_done = false;
13732 int i; 13732 int i;
13733 13733
13734 /* Save WUC and MDICNFG registers */ 13734 /* Save WUC and MDICNFG registers */
13735 wuc = CSR_READ(sc, WMREG_WUC); 13735 wuc = CSR_READ(sc, WMREG_WUC);
13736 mdicnfg = CSR_READ(sc, WMREG_MDICNFG); 13736 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
13737 13737
13738 reg = mdicnfg & ~MDICNFG_DEST; 13738 reg = mdicnfg & ~MDICNFG_DEST;
13739 CSR_WRITE(sc, WMREG_MDICNFG, reg); 13739 CSR_WRITE(sc, WMREG_MDICNFG, reg);
13740 13740
13741 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) 13741 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
13742 nvmword = INVM_DEFAULT_AL; 13742 nvmword = INVM_DEFAULT_AL;
13743 tmp_nvmword = nvmword | INVM_PLL_WO_VAL; 13743 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
13744 13744
13745 /* Get Power Management cap offset */ 13745 /* Get Power Management cap offset */
13746 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, 13746 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13747 &pmreg, NULL) == 0) 13747 &pmreg, NULL) == 0)
13748 return; 13748 return;
13749 for (i = 0; i < WM_MAX_PLL_TRIES; i++) { 13749 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
13750 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1, 13750 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
13751 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG); 13751 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
13752 13752
13753 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) { 13753 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
13754 break; /* OK */ 13754 break; /* OK */
13755 } 13755 }
13756 13756
13757 wa_done = true; 13757 wa_done = true;
13758 /* Directly reset the internal PHY */ 13758 /* Directly reset the internal PHY */
13759 reg = CSR_READ(sc, WMREG_CTRL); 13759 reg = CSR_READ(sc, WMREG_CTRL);
13760 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET); 13760 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
13761 13761
13762 reg = CSR_READ(sc, WMREG_CTRL_EXT); 13762 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13763 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE; 13763 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
13764 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 13764 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13765 13765
13766 CSR_WRITE(sc, WMREG_WUC, 0); 13766 CSR_WRITE(sc, WMREG_WUC, 0);
13767 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16); 13767 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
13768 CSR_WRITE(sc, WMREG_EEARBC_I210, reg); 13768 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13769 13769
13770 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 13770 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13771 pmreg + PCI_PMCSR); 13771 pmreg + PCI_PMCSR);
13772 pcireg |= PCI_PMCSR_STATE_D3; 13772 pcireg |= PCI_PMCSR_STATE_D3;
13773 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 13773 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13774 pmreg + PCI_PMCSR, pcireg); 13774 pmreg + PCI_PMCSR, pcireg);
13775 delay(1000); 13775 delay(1000);
13776 pcireg &= ~PCI_PMCSR_STATE_D3; 13776 pcireg &= ~PCI_PMCSR_STATE_D3;
13777 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 13777 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13778 pmreg + PCI_PMCSR, pcireg); 13778 pmreg + PCI_PMCSR, pcireg);
13779 13779
13780 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16); 13780 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
13781 CSR_WRITE(sc, WMREG_EEARBC_I210, reg); 13781 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13782 13782
13783 /* Restore WUC register */ 13783 /* Restore WUC register */
13784 CSR_WRITE(sc, WMREG_WUC, wuc); 13784 CSR_WRITE(sc, WMREG_WUC, wuc);
13785 } 13785 }
13786 13786
13787 /* Restore MDICNFG setting */ 13787 /* Restore MDICNFG setting */
13788 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg); 13788 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
13789 if (wa_done) 13789 if (wa_done)
13790 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n"); 13790 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
13791} 13791}