| @@ -1,1085 +1,1085 @@ | | | @@ -1,1085 +1,1085 @@ |
1 | /* $NetBSD: if_wm.c,v 1.368 2015/10/13 10:21:21 knakahara Exp $ */ | | 1 | /* $NetBSD: if_wm.c,v 1.369 2015/10/13 10:26:21 knakahara Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. | | 4 | * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. | | 7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software | | 17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: | | 18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by | | 19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. | | 20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior | | 22 | * or promote products derived from this software without specific prior |
23 | * written permission. | | 23 | * written permission. |
24 | * | | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | | 36 | */ |
37 | | | 37 | |
38 | /******************************************************************************* | | 38 | /******************************************************************************* |
39 | | | 39 | |
40 | Copyright (c) 2001-2005, Intel Corporation | | 40 | Copyright (c) 2001-2005, Intel Corporation |
41 | All rights reserved. | | 41 | All rights reserved. |
42 | | | 42 | |
43 | Redistribution and use in source and binary forms, with or without | | 43 | Redistribution and use in source and binary forms, with or without |
44 | modification, are permitted provided that the following conditions are met: | | 44 | modification, are permitted provided that the following conditions are met: |
45 | | | 45 | |
46 | 1. Redistributions of source code must retain the above copyright notice, | | 46 | 1. Redistributions of source code must retain the above copyright notice, |
47 | this list of conditions and the following disclaimer. | | 47 | this list of conditions and the following disclaimer. |
48 | | | 48 | |
49 | 2. Redistributions in binary form must reproduce the above copyright | | 49 | 2. Redistributions in binary form must reproduce the above copyright |
50 | notice, this list of conditions and the following disclaimer in the | | 50 | notice, this list of conditions and the following disclaimer in the |
51 | documentation and/or other materials provided with the distribution. | | 51 | documentation and/or other materials provided with the distribution. |
52 | | | 52 | |
53 | 3. Neither the name of the Intel Corporation nor the names of its | | 53 | 3. Neither the name of the Intel Corporation nor the names of its |
54 | contributors may be used to endorse or promote products derived from | | 54 | contributors may be used to endorse or promote products derived from |
55 | this software without specific prior written permission. | | 55 | this software without specific prior written permission. |
56 | | | 56 | |
57 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | | 57 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
58 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 58 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
59 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 59 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
60 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | | 60 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
61 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 61 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
62 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 62 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
63 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 63 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
64 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 64 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
65 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 65 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
66 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 66 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
67 | POSSIBILITY OF SUCH DAMAGE. | | 67 | POSSIBILITY OF SUCH DAMAGE. |
68 | | | 68 | |
69 | *******************************************************************************/ | | 69 | *******************************************************************************/ |
70 | /* | | 70 | /* |
71 | * Device driver for the Intel i8254x family of Gigabit Ethernet chips. | | 71 | * Device driver for the Intel i8254x family of Gigabit Ethernet chips. |
72 | * | | 72 | * |
73 | * TODO (in order of importance): | | 73 | * TODO (in order of importance): |
74 | * | | 74 | * |
75 | * - Check XXX'ed comments | | 75 | * - Check XXX'ed comments |
76 | * - EEE (Energy Efficiency Ethernet) | | 76 | * - EEE (Energy Efficiency Ethernet) |
77 | * - Multi queue | | 77 | * - Multi queue |
78 | * - Image Unique ID | | 78 | * - Image Unique ID |
79 | * - LPLU other than PCH* | | 79 | * - LPLU other than PCH* |
80 | * - Virtual Function | | 80 | * - Virtual Function |
81 | * - Set LED correctly (based on contents in EEPROM) | | 81 | * - Set LED correctly (based on contents in EEPROM) |
82 | * - Rework how parameters are loaded from the EEPROM. | | 82 | * - Rework how parameters are loaded from the EEPROM. |
83 | */ | | 83 | */ |
84 | | | 84 | |
85 | #include <sys/cdefs.h> | | 85 | #include <sys/cdefs.h> |
86 | __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.368 2015/10/13 10:21:21 knakahara Exp $"); | | 86 | __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.369 2015/10/13 10:26:21 knakahara Exp $"); |
87 | | | 87 | |
88 | #ifdef _KERNEL_OPT | | 88 | #ifdef _KERNEL_OPT |
89 | #include "opt_net_mpsafe.h" | | 89 | #include "opt_net_mpsafe.h" |
90 | #endif | | 90 | #endif |
91 | | | 91 | |
92 | #include <sys/param.h> | | 92 | #include <sys/param.h> |
93 | #include <sys/systm.h> | | 93 | #include <sys/systm.h> |
94 | #include <sys/callout.h> | | 94 | #include <sys/callout.h> |
95 | #include <sys/mbuf.h> | | 95 | #include <sys/mbuf.h> |
96 | #include <sys/malloc.h> | | 96 | #include <sys/malloc.h> |
97 | #include <sys/kmem.h> | | 97 | #include <sys/kmem.h> |
98 | #include <sys/kernel.h> | | 98 | #include <sys/kernel.h> |
99 | #include <sys/socket.h> | | 99 | #include <sys/socket.h> |
100 | #include <sys/ioctl.h> | | 100 | #include <sys/ioctl.h> |
101 | #include <sys/errno.h> | | 101 | #include <sys/errno.h> |
102 | #include <sys/device.h> | | 102 | #include <sys/device.h> |
103 | #include <sys/queue.h> | | 103 | #include <sys/queue.h> |
104 | #include <sys/syslog.h> | | 104 | #include <sys/syslog.h> |
105 | #include <sys/interrupt.h> | | 105 | #include <sys/interrupt.h> |
106 | | | 106 | |
107 | #include <sys/rndsource.h> | | 107 | #include <sys/rndsource.h> |
108 | | | 108 | |
109 | #include <net/if.h> | | 109 | #include <net/if.h> |
110 | #include <net/if_dl.h> | | 110 | #include <net/if_dl.h> |
111 | #include <net/if_media.h> | | 111 | #include <net/if_media.h> |
112 | #include <net/if_ether.h> | | 112 | #include <net/if_ether.h> |
113 | | | 113 | |
114 | #include <net/bpf.h> | | 114 | #include <net/bpf.h> |
115 | | | 115 | |
116 | #include <netinet/in.h> /* XXX for struct ip */ | | 116 | #include <netinet/in.h> /* XXX for struct ip */ |
117 | #include <netinet/in_systm.h> /* XXX for struct ip */ | | 117 | #include <netinet/in_systm.h> /* XXX for struct ip */ |
118 | #include <netinet/ip.h> /* XXX for struct ip */ | | 118 | #include <netinet/ip.h> /* XXX for struct ip */ |
119 | #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ | | 119 | #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ |
120 | #include <netinet/tcp.h> /* XXX for struct tcphdr */ | | 120 | #include <netinet/tcp.h> /* XXX for struct tcphdr */ |
121 | | | 121 | |
122 | #include <sys/bus.h> | | 122 | #include <sys/bus.h> |
123 | #include <sys/intr.h> | | 123 | #include <sys/intr.h> |
124 | #include <machine/endian.h> | | 124 | #include <machine/endian.h> |
125 | | | 125 | |
126 | #include <dev/mii/mii.h> | | 126 | #include <dev/mii/mii.h> |
127 | #include <dev/mii/miivar.h> | | 127 | #include <dev/mii/miivar.h> |
128 | #include <dev/mii/miidevs.h> | | 128 | #include <dev/mii/miidevs.h> |
129 | #include <dev/mii/mii_bitbang.h> | | 129 | #include <dev/mii/mii_bitbang.h> |
130 | #include <dev/mii/ikphyreg.h> | | 130 | #include <dev/mii/ikphyreg.h> |
131 | #include <dev/mii/igphyreg.h> | | 131 | #include <dev/mii/igphyreg.h> |
132 | #include <dev/mii/igphyvar.h> | | 132 | #include <dev/mii/igphyvar.h> |
133 | #include <dev/mii/inbmphyreg.h> | | 133 | #include <dev/mii/inbmphyreg.h> |
134 | | | 134 | |
135 | #include <dev/pci/pcireg.h> | | 135 | #include <dev/pci/pcireg.h> |
136 | #include <dev/pci/pcivar.h> | | 136 | #include <dev/pci/pcivar.h> |
137 | #include <dev/pci/pcidevs.h> | | 137 | #include <dev/pci/pcidevs.h> |
138 | | | 138 | |
139 | #include <dev/pci/if_wmreg.h> | | 139 | #include <dev/pci/if_wmreg.h> |
140 | #include <dev/pci/if_wmvar.h> | | 140 | #include <dev/pci/if_wmvar.h> |
141 | | | 141 | |
142 | #ifdef WM_DEBUG | | 142 | #ifdef WM_DEBUG |
143 | #define WM_DEBUG_LINK 0x01 | | 143 | #define WM_DEBUG_LINK 0x01 |
144 | #define WM_DEBUG_TX 0x02 | | 144 | #define WM_DEBUG_TX 0x02 |
145 | #define WM_DEBUG_RX 0x04 | | 145 | #define WM_DEBUG_RX 0x04 |
146 | #define WM_DEBUG_GMII 0x08 | | 146 | #define WM_DEBUG_GMII 0x08 |
147 | #define WM_DEBUG_MANAGE 0x10 | | 147 | #define WM_DEBUG_MANAGE 0x10 |
148 | #define WM_DEBUG_NVM 0x20 | | 148 | #define WM_DEBUG_NVM 0x20 |
149 | int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII | | 149 | int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII |
150 | | WM_DEBUG_MANAGE | WM_DEBUG_NVM; | | 150 | | WM_DEBUG_MANAGE | WM_DEBUG_NVM; |
151 | | | 151 | |
152 | #define DPRINTF(x, y) if (wm_debug & (x)) printf y | | 152 | #define DPRINTF(x, y) if (wm_debug & (x)) printf y |
153 | #else | | 153 | #else |
154 | #define DPRINTF(x, y) /* nothing */ | | 154 | #define DPRINTF(x, y) /* nothing */ |
155 | #endif /* WM_DEBUG */ | | 155 | #endif /* WM_DEBUG */ |
156 | | | 156 | |
157 | #ifdef NET_MPSAFE | | 157 | #ifdef NET_MPSAFE |
158 | #define WM_MPSAFE 1 | | 158 | #define WM_MPSAFE 1 |
159 | #endif | | 159 | #endif |
160 | | | 160 | |
161 | #ifdef __HAVE_PCI_MSI_MSIX | | 161 | #ifdef __HAVE_PCI_MSI_MSIX |
162 | #define WM_MSI_MSIX 1 /* Enable by default */ | | 162 | #define WM_MSI_MSIX 1 /* Enable by default */ |
163 | #endif | | 163 | #endif |
164 | | | 164 | |
165 | /* | | 165 | /* |
166 | * This device driver's max interrupt numbers. | | 166 | * This device driver's max interrupt numbers. |
167 | */ | | 167 | */ |
168 | #define WM_MAX_NTXINTR 16 | | 168 | #define WM_MAX_NTXINTR 16 |
169 | #define WM_MAX_NRXINTR 16 | | 169 | #define WM_MAX_NRXINTR 16 |
170 | #define WM_MAX_NINTR (WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1) | | 170 | #define WM_MAX_NINTR (WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1) |
171 | | | 171 | |
172 | /* | | 172 | /* |
173 | * Transmit descriptor list size. Due to errata, we can only have | | 173 | * Transmit descriptor list size. Due to errata, we can only have |
174 | * 256 hardware descriptors in the ring on < 82544, but we use 4096 | | 174 | * 256 hardware descriptors in the ring on < 82544, but we use 4096 |
175 | * on >= 82544. We tell the upper layers that they can queue a lot | | 175 | * on >= 82544. We tell the upper layers that they can queue a lot |
176 | * of packets, and we go ahead and manage up to 64 (16 for the i82547) | | 176 | * of packets, and we go ahead and manage up to 64 (16 for the i82547) |
177 | * of them at a time. | | 177 | * of them at a time. |
178 | * | | 178 | * |
179 | * We allow up to 256 (!) DMA segments per packet. Pathological packet | | 179 | * We allow up to 256 (!) DMA segments per packet. Pathological packet |
180 | * chains containing many small mbufs have been observed in zero-copy | | 180 | * chains containing many small mbufs have been observed in zero-copy |
181 | * situations with jumbo frames. | | 181 | * situations with jumbo frames. |
182 | */ | | 182 | */ |
183 | #define WM_NTXSEGS 256 | | 183 | #define WM_NTXSEGS 256 |
184 | #define WM_IFQUEUELEN 256 | | 184 | #define WM_IFQUEUELEN 256 |
185 | #define WM_TXQUEUELEN_MAX 64 | | 185 | #define WM_TXQUEUELEN_MAX 64 |
186 | #define WM_TXQUEUELEN_MAX_82547 16 | | 186 | #define WM_TXQUEUELEN_MAX_82547 16 |
187 | #define WM_TXQUEUELEN(txq) ((txq)->txq_num) | | 187 | #define WM_TXQUEUELEN(txq) ((txq)->txq_num) |
188 | #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) | | 188 | #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) |
189 | #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) | | 189 | #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) |
190 | #define WM_NTXDESC_82542 256 | | 190 | #define WM_NTXDESC_82542 256 |
191 | #define WM_NTXDESC_82544 4096 | | 191 | #define WM_NTXDESC_82544 4096 |
192 | #define WM_NTXDESC(txq) ((txq)->txq_ndesc) | | 192 | #define WM_NTXDESC(txq) ((txq)->txq_ndesc) |
193 | #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) | | 193 | #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) |
194 | #define WM_TXDESCSIZE(txq) (WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t)) | | 194 | #define WM_TXDESCSIZE(txq) (WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t)) |
195 | #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) | | 195 | #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) |
196 | #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) | | 196 | #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) |
197 | | | 197 | |
198 | #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ | | 198 | #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ |
199 | | | 199 | |
200 | /* | | 200 | /* |
201 | * Receive descriptor list size. We have one Rx buffer for normal | | 201 | * Receive descriptor list size. We have one Rx buffer for normal |
202 | * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized | | 202 | * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized |
203 | * packet. We allocate 256 receive descriptors, each with a 2k | | 203 | * packet. We allocate 256 receive descriptors, each with a 2k |
204 | * buffer (MCLBYTES), which gives us room for 50 jumbo packets. | | 204 | * buffer (MCLBYTES), which gives us room for 50 jumbo packets. |
205 | */ | | 205 | */ |
206 | #define WM_NRXDESC 256 | | 206 | #define WM_NRXDESC 256 |
207 | #define WM_NRXDESC_MASK (WM_NRXDESC - 1) | | 207 | #define WM_NRXDESC_MASK (WM_NRXDESC - 1) |
208 | #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) | | 208 | #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) |
209 | #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) | | 209 | #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) |
210 | | | 210 | |
211 | typedef union txdescs { | | 211 | typedef union txdescs { |
212 | wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; | | 212 | wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; |
213 | nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; | | 213 | nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; |
214 | } txdescs_t; | | 214 | } txdescs_t; |
215 | | | 215 | |
216 | #define WM_CDTXOFF(x) (sizeof(wiseman_txdesc_t) * x) | | 216 | #define WM_CDTXOFF(x) (sizeof(wiseman_txdesc_t) * x) |
217 | #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x) | | 217 | #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x) |
218 | | | 218 | |
219 | /* | | 219 | /* |
220 | * Software state for transmit jobs. | | 220 | * Software state for transmit jobs. |
221 | */ | | 221 | */ |
222 | struct wm_txsoft { | | 222 | struct wm_txsoft { |
223 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ | | 223 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ |
224 | bus_dmamap_t txs_dmamap; /* our DMA map */ | | 224 | bus_dmamap_t txs_dmamap; /* our DMA map */ |
225 | int txs_firstdesc; /* first descriptor in packet */ | | 225 | int txs_firstdesc; /* first descriptor in packet */ |
226 | int txs_lastdesc; /* last descriptor in packet */ | | 226 | int txs_lastdesc; /* last descriptor in packet */ |
227 | int txs_ndesc; /* # of descriptors used */ | | 227 | int txs_ndesc; /* # of descriptors used */ |
228 | }; | | 228 | }; |
229 | | | 229 | |
230 | /* | | 230 | /* |
231 | * Software state for receive buffers. Each descriptor gets a | | 231 | * Software state for receive buffers. Each descriptor gets a |
232 | * 2k (MCLBYTES) buffer and a DMA map. For packets which fill | | 232 | * 2k (MCLBYTES) buffer and a DMA map. For packets which fill |
233 | * more than one buffer, we chain them together. | | 233 | * more than one buffer, we chain them together. |
234 | */ | | 234 | */ |
235 | struct wm_rxsoft { | | 235 | struct wm_rxsoft { |
236 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ | | 236 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ |
237 | bus_dmamap_t rxs_dmamap; /* our DMA map */ | | 237 | bus_dmamap_t rxs_dmamap; /* our DMA map */ |
238 | }; | | 238 | }; |
239 | | | 239 | |
240 | #define WM_LINKUP_TIMEOUT 50 | | 240 | #define WM_LINKUP_TIMEOUT 50 |
241 | | | 241 | |
242 | static uint16_t swfwphysem[] = { | | 242 | static uint16_t swfwphysem[] = { |
243 | SWFW_PHY0_SM, | | 243 | SWFW_PHY0_SM, |
244 | SWFW_PHY1_SM, | | 244 | SWFW_PHY1_SM, |
245 | SWFW_PHY2_SM, | | 245 | SWFW_PHY2_SM, |
246 | SWFW_PHY3_SM | | 246 | SWFW_PHY3_SM |
247 | }; | | 247 | }; |
248 | | | 248 | |
249 | static const uint32_t wm_82580_rxpbs_table[] = { | | 249 | static const uint32_t wm_82580_rxpbs_table[] = { |
250 | 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 | | 250 | 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 |
251 | }; | | 251 | }; |
252 | | | 252 | |
253 | struct wm_softc; | | 253 | struct wm_softc; |
254 | | | 254 | |
255 | struct wm_txqueue { | | 255 | struct wm_txqueue { |
256 | kmutex_t *txq_lock; /* lock for tx operations */ | | 256 | kmutex_t *txq_lock; /* lock for tx operations */ |
257 | | | 257 | |
258 | struct wm_softc *txq_sc; | | 258 | struct wm_softc *txq_sc; |
259 | | | 259 | |
260 | int txq_id; /* index of transmit queues */ | | 260 | int txq_id; /* index of transmit queues */ |
261 | int txq_intr_idx; /* index of MSI-X tables */ | | 261 | int txq_intr_idx; /* index of MSI-X tables */ |
262 | | | 262 | |
263 | /* Software state for the transmit descriptors. */ | | 263 | /* Software state for the transmit descriptors. */ |
264 | int txq_num; /* must be a power of two */ | | 264 | int txq_num; /* must be a power of two */ |
265 | struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; | | 265 | struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; |
266 | | | 266 | |
267 | /* TX control data structures. */ | | 267 | /* TX control data structures. */ |
268 | int txq_ndesc; /* must be a power of two */ | | 268 | int txq_ndesc; /* must be a power of two */ |
269 | txdescs_t *txq_descs_u; | | 269 | txdescs_t *txq_descs_u; |
270 | bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ | | 270 | bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ |
271 | bus_dma_segment_t txq_desc_seg; /* control data segment */ | | 271 | bus_dma_segment_t txq_desc_seg; /* control data segment */ |
272 | int txq_desc_rseg; /* real number of control segment */ | | 272 | int txq_desc_rseg; /* real number of control segment */ |
273 | size_t txq_desc_size; /* control data size */ | | 273 | size_t txq_desc_size; /* control data size */ |
274 | #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr | | 274 | #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr |
275 | #define txq_descs txq_descs_u->sctxu_txdescs | | 275 | #define txq_descs txq_descs_u->sctxu_txdescs |
276 | #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs | | 276 | #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs |
277 | | | 277 | |
278 | bus_addr_t txq_tdt_reg; /* offset of TDT register */ | | 278 | bus_addr_t txq_tdt_reg; /* offset of TDT register */ |
279 | | | 279 | |
280 | int txq_free; /* number of free Tx descriptors */ | | 280 | int txq_free; /* number of free Tx descriptors */ |
281 | int txq_next; /* next ready Tx descriptor */ | | 281 | int txq_next; /* next ready Tx descriptor */ |
282 | | | 282 | |
283 | int txq_sfree; /* number of free Tx jobs */ | | 283 | int txq_sfree; /* number of free Tx jobs */ |
284 | int txq_snext; /* next free Tx job */ | | 284 | int txq_snext; /* next free Tx job */ |
285 | int txq_sdirty; /* dirty Tx jobs */ | | 285 | int txq_sdirty; /* dirty Tx jobs */ |
286 | | | 286 | |
287 | /* These 4 variables are used only on the 82547. */ | | 287 | /* These 4 variables are used only on the 82547. */ |
288 | int txq_fifo_size; /* Tx FIFO size */ | | 288 | int txq_fifo_size; /* Tx FIFO size */ |
289 | int txq_fifo_head; /* current head of FIFO */ | | 289 | int txq_fifo_head; /* current head of FIFO */ |
290 | uint32_t txq_fifo_addr; /* internal address of start of FIFO */ | | 290 | uint32_t txq_fifo_addr; /* internal address of start of FIFO */ |
291 | int txq_fifo_stall; /* Tx FIFO is stalled */ | | 291 | int txq_fifo_stall; /* Tx FIFO is stalled */ |
292 | | | 292 | |
293 | /* XXX which event counter is required? */ | | 293 | /* XXX which event counter is required? */ |
294 | }; | | 294 | }; |
295 | | | 295 | |
296 | struct wm_rxqueue { | | 296 | struct wm_rxqueue { |
297 | kmutex_t *rxq_lock; /* lock for rx operations */ | | 297 | kmutex_t *rxq_lock; /* lock for rx operations */ |
298 | | | 298 | |
299 | struct wm_softc *rxq_sc; | | 299 | struct wm_softc *rxq_sc; |
300 | | | 300 | |
301 | int rxq_id; /* index of receive queues */ | | 301 | int rxq_id; /* index of receive queues */ |
302 | int rxq_intr_idx; /* index of MSI-X tables */ | | 302 | int rxq_intr_idx; /* index of MSI-X tables */ |
303 | | | 303 | |
304 | /* Software state for the receive descriptors. */ | | 304 | /* Software state for the receive descriptors. */ |
305 | wiseman_rxdesc_t *rxq_descs; | | 305 | wiseman_rxdesc_t *rxq_descs; |
306 | | | 306 | |
307 | /* RX control data structures. */ | | 307 | /* RX control data structures. */ |
308 | struct wm_rxsoft rxq_soft[WM_NRXDESC]; | | 308 | struct wm_rxsoft rxq_soft[WM_NRXDESC]; |
309 | bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ | | 309 | bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ |
310 | bus_dma_segment_t rxq_desc_seg; /* control data segment */ | | 310 | bus_dma_segment_t rxq_desc_seg; /* control data segment */ |
311 | int rxq_desc_rseg; /* real number of control segment */ | | 311 | int rxq_desc_rseg; /* real number of control segment */ |
312 | size_t rxq_desc_size; /* control data size */ | | 312 | size_t rxq_desc_size; /* control data size */ |
313 | #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr | | 313 | #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr |
314 | | | 314 | |
315 | bus_addr_t rxq_rdt_reg; /* offset of RDT register */ | | 315 | bus_addr_t rxq_rdt_reg; /* offset of RDT register */ |
316 | | | 316 | |
317 | int rxq_ptr; /* next ready Rx descriptor/queue ent */ | | 317 | int rxq_ptr; /* next ready Rx descriptor/queue ent */ |
318 | int rxq_discard; | | 318 | int rxq_discard; |
319 | int rxq_len; | | 319 | int rxq_len; |
320 | struct mbuf *rxq_head; | | 320 | struct mbuf *rxq_head; |
321 | struct mbuf *rxq_tail; | | 321 | struct mbuf *rxq_tail; |
322 | struct mbuf **rxq_tailp; | | 322 | struct mbuf **rxq_tailp; |
323 | | | 323 | |
324 | /* XXX which event counter is required? */ | | 324 | /* XXX which event counter is required? */ |
325 | }; | | 325 | }; |
326 | | | 326 | |
327 | /* | | 327 | /* |
328 | * Software state per device. | | 328 | * Software state per device. |
329 | */ | | 329 | */ |
330 | struct wm_softc { | | 330 | struct wm_softc { |
331 | device_t sc_dev; /* generic device information */ | | 331 | device_t sc_dev; /* generic device information */ |
332 | bus_space_tag_t sc_st; /* bus space tag */ | | 332 | bus_space_tag_t sc_st; /* bus space tag */ |
333 | bus_space_handle_t sc_sh; /* bus space handle */ | | 333 | bus_space_handle_t sc_sh; /* bus space handle */ |
334 | bus_size_t sc_ss; /* bus space size */ | | 334 | bus_size_t sc_ss; /* bus space size */ |
335 | bus_space_tag_t sc_iot; /* I/O space tag */ | | 335 | bus_space_tag_t sc_iot; /* I/O space tag */ |
336 | bus_space_handle_t sc_ioh; /* I/O space handle */ | | 336 | bus_space_handle_t sc_ioh; /* I/O space handle */ |
337 | bus_size_t sc_ios; /* I/O space size */ | | 337 | bus_size_t sc_ios; /* I/O space size */ |
338 | bus_space_tag_t sc_flasht; /* flash registers space tag */ | | 338 | bus_space_tag_t sc_flasht; /* flash registers space tag */ |
339 | bus_space_handle_t sc_flashh; /* flash registers space handle */ | | 339 | bus_space_handle_t sc_flashh; /* flash registers space handle */ |
340 | bus_size_t sc_flashs; /* flash registers space size */ | | 340 | bus_size_t sc_flashs; /* flash registers space size */ |
341 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ | | 341 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ |
342 | | | 342 | |
343 | struct ethercom sc_ethercom; /* ethernet common data */ | | 343 | struct ethercom sc_ethercom; /* ethernet common data */ |
344 | struct mii_data sc_mii; /* MII/media information */ | | 344 | struct mii_data sc_mii; /* MII/media information */ |
345 | | | 345 | |
346 | pci_chipset_tag_t sc_pc; | | 346 | pci_chipset_tag_t sc_pc; |
347 | pcitag_t sc_pcitag; | | 347 | pcitag_t sc_pcitag; |
348 | int sc_bus_speed; /* PCI/PCIX bus speed */ | | 348 | int sc_bus_speed; /* PCI/PCIX bus speed */ |
349 | int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ | | 349 | int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ |
350 | | | 350 | |
351 | uint16_t sc_pcidevid; /* PCI device ID */ | | 351 | uint16_t sc_pcidevid; /* PCI device ID */ |
352 | wm_chip_type sc_type; /* MAC type */ | | 352 | wm_chip_type sc_type; /* MAC type */ |
353 | int sc_rev; /* MAC revision */ | | 353 | int sc_rev; /* MAC revision */ |
354 | wm_phy_type sc_phytype; /* PHY type */ | | 354 | wm_phy_type sc_phytype; /* PHY type */ |
355 | uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ | | 355 | uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ |
356 | #define WM_MEDIATYPE_UNKNOWN 0x00 | | 356 | #define WM_MEDIATYPE_UNKNOWN 0x00 |
357 | #define WM_MEDIATYPE_FIBER 0x01 | | 357 | #define WM_MEDIATYPE_FIBER 0x01 |
358 | #define WM_MEDIATYPE_COPPER 0x02 | | 358 | #define WM_MEDIATYPE_COPPER 0x02 |
359 | #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ | | 359 | #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ |
360 | int sc_funcid; /* unit number of the chip (0 to 3) */ | | 360 | int sc_funcid; /* unit number of the chip (0 to 3) */ |
361 | int sc_flags; /* flags; see below */ | | 361 | int sc_flags; /* flags; see below */ |
362 | int sc_if_flags; /* last if_flags */ | | 362 | int sc_if_flags; /* last if_flags */ |
363 | int sc_flowflags; /* 802.3x flow control flags */ | | 363 | int sc_flowflags; /* 802.3x flow control flags */ |
364 | int sc_align_tweak; | | 364 | int sc_align_tweak; |
365 | | | 365 | |
366 | void *sc_ihs[WM_MAX_NINTR]; /* | | 366 | void *sc_ihs[WM_MAX_NINTR]; /* |
367 | * interrupt cookie. | | 367 | * interrupt cookie. |
368 | * legacy and msi use sc_ihs[0]. | | 368 | * legacy and msi use sc_ihs[0]. |
369 | */ | | 369 | */ |
370 | pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */ | | 370 | pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */ |
371 | int sc_nintrs; /* number of interrupts */ | | 371 | int sc_nintrs; /* number of interrupts */ |
372 | | | 372 | |
373 | int sc_link_intr_idx; /* index of MSI-X tables */ | | 373 | int sc_link_intr_idx; /* index of MSI-X tables */ |
374 | | | 374 | |
375 | callout_t sc_tick_ch; /* tick callout */ | | 375 | callout_t sc_tick_ch; /* tick callout */ |
376 | bool sc_stopping; | | 376 | bool sc_stopping; |
377 | | | 377 | |
378 | int sc_nvm_ver_major; | | 378 | int sc_nvm_ver_major; |
379 | int sc_nvm_ver_minor; | | 379 | int sc_nvm_ver_minor; |
380 | int sc_nvm_ver_build; | | 380 | int sc_nvm_ver_build; |
381 | int sc_nvm_addrbits; /* NVM address bits */ | | 381 | int sc_nvm_addrbits; /* NVM address bits */ |
382 | unsigned int sc_nvm_wordsize; /* NVM word size */ | | 382 | unsigned int sc_nvm_wordsize; /* NVM word size */ |
383 | int sc_ich8_flash_base; | | 383 | int sc_ich8_flash_base; |
384 | int sc_ich8_flash_bank_size; | | 384 | int sc_ich8_flash_bank_size; |
385 | int sc_nvm_k1_enabled; | | 385 | int sc_nvm_k1_enabled; |
386 | | | 386 | |
387 | int sc_ntxqueues; | | 387 | int sc_ntxqueues; |
388 | struct wm_txqueue *sc_txq; | | 388 | struct wm_txqueue *sc_txq; |
389 | | | 389 | |
390 | int sc_nrxqueues; | | 390 | int sc_nrxqueues; |
391 | struct wm_rxqueue *sc_rxq; | | 391 | struct wm_rxqueue *sc_rxq; |
392 | | | 392 | |
393 | #ifdef WM_EVENT_COUNTERS | | 393 | #ifdef WM_EVENT_COUNTERS |
394 | /* Event counters. */ | | 394 | /* Event counters. */ |
395 | struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ | | 395 | struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ |
396 | struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ | | 396 | struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ |
397 | struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ | | 397 | struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ |
398 | struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ | | 398 | struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ |
399 | struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ | | 399 | struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ |
400 | struct evcnt sc_ev_rxintr; /* Rx interrupts */ | | 400 | struct evcnt sc_ev_rxintr; /* Rx interrupts */ |
401 | struct evcnt sc_ev_linkintr; /* Link interrupts */ | | 401 | struct evcnt sc_ev_linkintr; /* Link interrupts */ |
402 | | | 402 | |
403 | struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ | | 403 | struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ |
404 | struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ | | 404 | struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ |
405 | struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ | | 405 | struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ |
406 | struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ | | 406 | struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ |
407 | struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ | | 407 | struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ |
408 | struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ | | 408 | struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ |
409 | struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ | | 409 | struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ |
410 | struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ | | 410 | struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ |
411 | | | 411 | |
412 | struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ | | 412 | struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ |
413 | struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ | | 413 | struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ |
414 | | | 414 | |
415 | struct evcnt sc_ev_tu; /* Tx underrun */ | | 415 | struct evcnt sc_ev_tu; /* Tx underrun */ |
416 | | | 416 | |
417 | struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ | | 417 | struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ |
418 | struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ | | 418 | struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ |
419 | struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ | | 419 | struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ |
420 | struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ | | 420 | struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ |
421 | struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ | | 421 | struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ |
422 | #endif /* WM_EVENT_COUNTERS */ | | 422 | #endif /* WM_EVENT_COUNTERS */ |
423 | | | 423 | |
424 | /* This variable are used only on the 82547. */ | | 424 | /* This variable are used only on the 82547. */ |
425 | callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ | | 425 | callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ |
426 | | | 426 | |
427 | uint32_t sc_ctrl; /* prototype CTRL register */ | | 427 | uint32_t sc_ctrl; /* prototype CTRL register */ |
428 | #if 0 | | 428 | #if 0 |
429 | uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ | | 429 | uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ |
430 | #endif | | 430 | #endif |
431 | uint32_t sc_icr; /* prototype interrupt bits */ | | 431 | uint32_t sc_icr; /* prototype interrupt bits */ |
432 | uint32_t sc_itr; /* prototype intr throttling reg */ | | 432 | uint32_t sc_itr; /* prototype intr throttling reg */ |
433 | uint32_t sc_tctl; /* prototype TCTL register */ | | 433 | uint32_t sc_tctl; /* prototype TCTL register */ |
434 | uint32_t sc_rctl; /* prototype RCTL register */ | | 434 | uint32_t sc_rctl; /* prototype RCTL register */ |
435 | uint32_t sc_txcw; /* prototype TXCW register */ | | 435 | uint32_t sc_txcw; /* prototype TXCW register */ |
436 | uint32_t sc_tipg; /* prototype TIPG register */ | | 436 | uint32_t sc_tipg; /* prototype TIPG register */ |
437 | uint32_t sc_fcrtl; /* prototype FCRTL register */ | | 437 | uint32_t sc_fcrtl; /* prototype FCRTL register */ |
438 | uint32_t sc_pba; /* prototype PBA register */ | | 438 | uint32_t sc_pba; /* prototype PBA register */ |
439 | | | 439 | |
440 | int sc_tbi_linkup; /* TBI link status */ | | 440 | int sc_tbi_linkup; /* TBI link status */ |
441 | int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ | | 441 | int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ |
442 | int sc_tbi_serdes_ticks; /* tbi ticks */ | | 442 | int sc_tbi_serdes_ticks; /* tbi ticks */ |
443 | | | 443 | |
444 | int sc_mchash_type; /* multicast filter offset */ | | 444 | int sc_mchash_type; /* multicast filter offset */ |
445 | | | 445 | |
446 | krndsource_t rnd_source; /* random source */ | | 446 | krndsource_t rnd_source; /* random source */ |
447 | | | 447 | |
448 | kmutex_t *sc_core_lock; /* lock for softc operations */ | | 448 | kmutex_t *sc_core_lock; /* lock for softc operations */ |
449 | }; | | 449 | }; |
450 | | | 450 | |
451 | #define WM_TX_LOCK(_txq) if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock) | | 451 | #define WM_TX_LOCK(_txq) if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock) |
452 | #define WM_TX_UNLOCK(_txq) if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock) | | 452 | #define WM_TX_UNLOCK(_txq) if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock) |
453 | #define WM_TX_LOCKED(_txq) (!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock)) | | 453 | #define WM_TX_LOCKED(_txq) (!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock)) |
454 | #define WM_RX_LOCK(_rxq) if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock) | | 454 | #define WM_RX_LOCK(_rxq) if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock) |
455 | #define WM_RX_UNLOCK(_rxq) if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock) | | 455 | #define WM_RX_UNLOCK(_rxq) if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock) |
456 | #define WM_RX_LOCKED(_rxq) (!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock)) | | 456 | #define WM_RX_LOCKED(_rxq) (!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock)) |
457 | #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) | | 457 | #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) |
458 | #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) | | 458 | #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) |
459 | #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) | | 459 | #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) |
460 | | | 460 | |
461 | #ifdef WM_MPSAFE | | 461 | #ifdef WM_MPSAFE |
462 | #define CALLOUT_FLAGS CALLOUT_MPSAFE | | 462 | #define CALLOUT_FLAGS CALLOUT_MPSAFE |
463 | #else | | 463 | #else |
464 | #define CALLOUT_FLAGS 0 | | 464 | #define CALLOUT_FLAGS 0 |
465 | #endif | | 465 | #endif |
466 | | | 466 | |
467 | #define WM_RXCHAIN_RESET(rxq) \ | | 467 | #define WM_RXCHAIN_RESET(rxq) \ |
468 | do { \ | | 468 | do { \ |
469 | (rxq)->rxq_tailp = &(rxq)->rxq_head; \ | | 469 | (rxq)->rxq_tailp = &(rxq)->rxq_head; \ |
470 | *(rxq)->rxq_tailp = NULL; \ | | 470 | *(rxq)->rxq_tailp = NULL; \ |
471 | (rxq)->rxq_len = 0; \ | | 471 | (rxq)->rxq_len = 0; \ |
472 | } while (/*CONSTCOND*/0) | | 472 | } while (/*CONSTCOND*/0) |
473 | | | 473 | |
474 | #define WM_RXCHAIN_LINK(rxq, m) \ | | 474 | #define WM_RXCHAIN_LINK(rxq, m) \ |
475 | do { \ | | 475 | do { \ |
476 | *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ | | 476 | *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ |
477 | (rxq)->rxq_tailp = &(m)->m_next; \ | | 477 | (rxq)->rxq_tailp = &(m)->m_next; \ |
478 | } while (/*CONSTCOND*/0) | | 478 | } while (/*CONSTCOND*/0) |
479 | | | 479 | |
480 | #ifdef WM_EVENT_COUNTERS | | 480 | #ifdef WM_EVENT_COUNTERS |
481 | #define WM_EVCNT_INCR(ev) (ev)->ev_count++ | | 481 | #define WM_EVCNT_INCR(ev) (ev)->ev_count++ |
482 | #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) | | 482 | #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) |
483 | #else | | 483 | #else |
484 | #define WM_EVCNT_INCR(ev) /* nothing */ | | 484 | #define WM_EVCNT_INCR(ev) /* nothing */ |
485 | #define WM_EVCNT_ADD(ev, val) /* nothing */ | | 485 | #define WM_EVCNT_ADD(ev, val) /* nothing */ |
486 | #endif | | 486 | #endif |
487 | | | 487 | |
488 | #define CSR_READ(sc, reg) \ | | 488 | #define CSR_READ(sc, reg) \ |
489 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) | | 489 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) |
490 | #define CSR_WRITE(sc, reg, val) \ | | 490 | #define CSR_WRITE(sc, reg, val) \ |
491 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) | | 491 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) |
492 | #define CSR_WRITE_FLUSH(sc) \ | | 492 | #define CSR_WRITE_FLUSH(sc) \ |
493 | (void) CSR_READ((sc), WMREG_STATUS) | | 493 | (void) CSR_READ((sc), WMREG_STATUS) |
494 | | | 494 | |
495 | #define ICH8_FLASH_READ32(sc, reg) \ | | 495 | #define ICH8_FLASH_READ32(sc, reg) \ |
496 | bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) | | 496 | bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) |
497 | #define ICH8_FLASH_WRITE32(sc, reg, data) \ | | 497 | #define ICH8_FLASH_WRITE32(sc, reg, data) \ |
498 | bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) | | 498 | bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) |
499 | | | 499 | |
500 | #define ICH8_FLASH_READ16(sc, reg) \ | | 500 | #define ICH8_FLASH_READ16(sc, reg) \ |
501 | bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) | | 501 | bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) |
502 | #define ICH8_FLASH_WRITE16(sc, reg, data) \ | | 502 | #define ICH8_FLASH_WRITE16(sc, reg, data) \ |
503 | bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) | | 503 | bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) |
504 | | | 504 | |
505 | #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((x))) | | 505 | #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((x))) |
506 | #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x))) | | 506 | #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x))) |
507 | | | 507 | |
508 | #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) | | 508 | #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) |
509 | #define WM_CDTXADDR_HI(txq, x) \ | | 509 | #define WM_CDTXADDR_HI(txq, x) \ |
510 | (sizeof(bus_addr_t) == 8 ? \ | | 510 | (sizeof(bus_addr_t) == 8 ? \ |
511 | (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) | | 511 | (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) |
512 | | | 512 | |
513 | #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) | | 513 | #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) |
514 | #define WM_CDRXADDR_HI(rxq, x) \ | | 514 | #define WM_CDRXADDR_HI(rxq, x) \ |
515 | (sizeof(bus_addr_t) == 8 ? \ | | 515 | (sizeof(bus_addr_t) == 8 ? \ |
516 | (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) | | 516 | (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) |
517 | | | 517 | |
518 | /* | | 518 | /* |
519 | * Register read/write functions. | | 519 | * Register read/write functions. |
520 | * Other than CSR_{READ|WRITE}(). | | 520 | * Other than CSR_{READ|WRITE}(). |
521 | */ | | 521 | */ |
522 | #if 0 | | 522 | #if 0 |
523 | static inline uint32_t wm_io_read(struct wm_softc *, int); | | 523 | static inline uint32_t wm_io_read(struct wm_softc *, int); |
524 | #endif | | 524 | #endif |
525 | static inline void wm_io_write(struct wm_softc *, int, uint32_t); | | 525 | static inline void wm_io_write(struct wm_softc *, int, uint32_t); |
526 | static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, | | 526 | static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, |
527 | uint32_t, uint32_t); | | 527 | uint32_t, uint32_t); |
528 | static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); | | 528 | static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); |
529 | | | 529 | |
530 | /* | | 530 | /* |
531 | * Descriptor sync/init functions. | | 531 | * Descriptor sync/init functions. |
532 | */ | | 532 | */ |
533 | static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); | | 533 | static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); |
534 | static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); | | 534 | static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); |
535 | static inline void wm_init_rxdesc(struct wm_rxqueue *, int); | | 535 | static inline void wm_init_rxdesc(struct wm_rxqueue *, int); |
536 | | | 536 | |
537 | /* | | 537 | /* |
538 | * Device driver interface functions and commonly used functions. | | 538 | * Device driver interface functions and commonly used functions. |
539 | * match, attach, detach, init, start, stop, ioctl, watchdog and so on. | | 539 | * match, attach, detach, init, start, stop, ioctl, watchdog and so on. |
540 | */ | | 540 | */ |
541 | static const struct wm_product *wm_lookup(const struct pci_attach_args *); | | 541 | static const struct wm_product *wm_lookup(const struct pci_attach_args *); |
542 | static int wm_match(device_t, cfdata_t, void *); | | 542 | static int wm_match(device_t, cfdata_t, void *); |
543 | static void wm_attach(device_t, device_t, void *); | | 543 | static void wm_attach(device_t, device_t, void *); |
544 | static int wm_detach(device_t, int); | | 544 | static int wm_detach(device_t, int); |
545 | static bool wm_suspend(device_t, const pmf_qual_t *); | | 545 | static bool wm_suspend(device_t, const pmf_qual_t *); |
546 | static bool wm_resume(device_t, const pmf_qual_t *); | | 546 | static bool wm_resume(device_t, const pmf_qual_t *); |
547 | static void wm_watchdog(struct ifnet *); | | 547 | static void wm_watchdog(struct ifnet *); |
548 | static void wm_tick(void *); | | 548 | static void wm_tick(void *); |
549 | static int wm_ifflags_cb(struct ethercom *); | | 549 | static int wm_ifflags_cb(struct ethercom *); |
550 | static int wm_ioctl(struct ifnet *, u_long, void *); | | 550 | static int wm_ioctl(struct ifnet *, u_long, void *); |
551 | /* MAC address related */ | | 551 | /* MAC address related */ |
552 | static uint16_t wm_check_alt_mac_addr(struct wm_softc *); | | 552 | static uint16_t wm_check_alt_mac_addr(struct wm_softc *); |
553 | static int wm_read_mac_addr(struct wm_softc *, uint8_t *); | | 553 | static int wm_read_mac_addr(struct wm_softc *, uint8_t *); |
554 | static void wm_set_ral(struct wm_softc *, const uint8_t *, int); | | 554 | static void wm_set_ral(struct wm_softc *, const uint8_t *, int); |
555 | static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); | | 555 | static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); |
556 | static void wm_set_filter(struct wm_softc *); | | 556 | static void wm_set_filter(struct wm_softc *); |
557 | /* Reset and init related */ | | 557 | /* Reset and init related */ |
558 | static void wm_set_vlan(struct wm_softc *); | | 558 | static void wm_set_vlan(struct wm_softc *); |
559 | static void wm_set_pcie_completion_timeout(struct wm_softc *); | | 559 | static void wm_set_pcie_completion_timeout(struct wm_softc *); |
560 | static void wm_get_auto_rd_done(struct wm_softc *); | | 560 | static void wm_get_auto_rd_done(struct wm_softc *); |
561 | static void wm_lan_init_done(struct wm_softc *); | | 561 | static void wm_lan_init_done(struct wm_softc *); |
562 | static void wm_get_cfg_done(struct wm_softc *); | | 562 | static void wm_get_cfg_done(struct wm_softc *); |
563 | static void wm_initialize_hardware_bits(struct wm_softc *); | | 563 | static void wm_initialize_hardware_bits(struct wm_softc *); |
564 | static uint32_t wm_rxpbs_adjust_82580(uint32_t); | | 564 | static uint32_t wm_rxpbs_adjust_82580(uint32_t); |
565 | static void wm_reset(struct wm_softc *); | | 565 | static void wm_reset(struct wm_softc *); |
566 | static int wm_add_rxbuf(struct wm_rxqueue *, int); | | 566 | static int wm_add_rxbuf(struct wm_rxqueue *, int); |
567 | static void wm_rxdrain(struct wm_rxqueue *); | | 567 | static void wm_rxdrain(struct wm_rxqueue *); |
568 | static void wm_init_rss(struct wm_softc *); | | 568 | static void wm_init_rss(struct wm_softc *); |
569 | static int wm_init(struct ifnet *); | | 569 | static int wm_init(struct ifnet *); |
570 | static int wm_init_locked(struct ifnet *); | | 570 | static int wm_init_locked(struct ifnet *); |
571 | static void wm_stop(struct ifnet *, int); | | 571 | static void wm_stop(struct ifnet *, int); |
572 | static void wm_stop_locked(struct ifnet *, int); | | 572 | static void wm_stop_locked(struct ifnet *, int); |
573 | static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, | | 573 | static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, |
574 | uint32_t *, uint8_t *); | | 574 | uint32_t *, uint8_t *); |
575 | static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); | | 575 | static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); |
576 | static void wm_82547_txfifo_stall(void *); | | 576 | static void wm_82547_txfifo_stall(void *); |
577 | static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); | | 577 | static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); |
578 | /* DMA related */ | | 578 | /* DMA related */ |
579 | static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); | | 579 | static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); |
580 | static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); | | 580 | static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); |
581 | static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); | | 581 | static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); |
582 | static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *); | | 582 | static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *); |
583 | static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); | | 583 | static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); |
584 | static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); | | 584 | static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); |
585 | static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *); | | 585 | static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *); |
586 | static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); | | 586 | static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); |
587 | static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); | | 587 | static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); |
588 | static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); | | 588 | static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); |
589 | static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); | | 589 | static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); |
590 | static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); | | 590 | static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); |
591 | static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); | | 591 | static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); |
592 | static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *); | | 592 | static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *); |
593 | static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *); | | 593 | static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *); |
594 | static int wm_alloc_txrx_queues(struct wm_softc *); | | 594 | static int wm_alloc_txrx_queues(struct wm_softc *); |
595 | static void wm_free_txrx_queues(struct wm_softc *); | | 595 | static void wm_free_txrx_queues(struct wm_softc *); |
596 | static int wm_init_txrx_queues(struct wm_softc *); | | 596 | static int wm_init_txrx_queues(struct wm_softc *); |
597 | /* Start */ | | 597 | /* Start */ |
598 | static void wm_start(struct ifnet *); | | 598 | static void wm_start(struct ifnet *); |
599 | static void wm_start_locked(struct ifnet *); | | 599 | static void wm_start_locked(struct ifnet *); |
600 | static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *, | | 600 | static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *, |
601 | uint32_t *, uint32_t *, bool *); | | 601 | uint32_t *, uint32_t *, bool *); |
602 | static void wm_nq_start(struct ifnet *); | | 602 | static void wm_nq_start(struct ifnet *); |
603 | static void wm_nq_start_locked(struct ifnet *); | | 603 | static void wm_nq_start_locked(struct ifnet *); |
604 | /* Interrupt */ | | 604 | /* Interrupt */ |
605 | static int wm_txeof(struct wm_softc *); | | 605 | static int wm_txeof(struct wm_softc *); |
606 | static void wm_rxeof(struct wm_rxqueue *); | | 606 | static void wm_rxeof(struct wm_rxqueue *); |
607 | static void wm_linkintr_gmii(struct wm_softc *, uint32_t); | | 607 | static void wm_linkintr_gmii(struct wm_softc *, uint32_t); |
608 | static void wm_linkintr_tbi(struct wm_softc *, uint32_t); | | 608 | static void wm_linkintr_tbi(struct wm_softc *, uint32_t); |
609 | static void wm_linkintr_serdes(struct wm_softc *, uint32_t); | | 609 | static void wm_linkintr_serdes(struct wm_softc *, uint32_t); |
610 | static void wm_linkintr(struct wm_softc *, uint32_t); | | 610 | static void wm_linkintr(struct wm_softc *, uint32_t); |
611 | static int wm_intr_legacy(void *); | | 611 | static int wm_intr_legacy(void *); |
612 | #ifdef WM_MSI_MSIX | | 612 | #ifdef WM_MSI_MSIX |
613 | static void wm_adjust_qnum(struct wm_softc *, int); | | 613 | static void wm_adjust_qnum(struct wm_softc *, int); |
614 | static int wm_setup_legacy(struct wm_softc *); | | 614 | static int wm_setup_legacy(struct wm_softc *); |
615 | static int wm_setup_msix(struct wm_softc *); | | 615 | static int wm_setup_msix(struct wm_softc *); |
616 | static int wm_txintr_msix(void *); | | 616 | static int wm_txintr_msix(void *); |
617 | static int wm_rxintr_msix(void *); | | 617 | static int wm_rxintr_msix(void *); |
618 | static int wm_linkintr_msix(void *); | | 618 | static int wm_linkintr_msix(void *); |
619 | #endif | | 619 | #endif |
620 | | | 620 | |
621 | /* | | 621 | /* |
622 | * Media related. | | 622 | * Media related. |
623 | * GMII, SGMII, TBI, SERDES and SFP. | | 623 | * GMII, SGMII, TBI, SERDES and SFP. |
624 | */ | | 624 | */ |
625 | /* Common */ | | 625 | /* Common */ |
626 | static void wm_tbi_serdes_set_linkled(struct wm_softc *); | | 626 | static void wm_tbi_serdes_set_linkled(struct wm_softc *); |
627 | /* GMII related */ | | 627 | /* GMII related */ |
628 | static void wm_gmii_reset(struct wm_softc *); | | 628 | static void wm_gmii_reset(struct wm_softc *); |
629 | static int wm_get_phy_id_82575(struct wm_softc *); | | 629 | static int wm_get_phy_id_82575(struct wm_softc *); |
630 | static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); | | 630 | static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); |
631 | static int wm_gmii_mediachange(struct ifnet *); | | 631 | static int wm_gmii_mediachange(struct ifnet *); |
632 | static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); | | 632 | static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); |
633 | static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); | | 633 | static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); |
634 | static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); | | 634 | static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); |
635 | static int wm_gmii_i82543_readreg(device_t, int, int); | | 635 | static int wm_gmii_i82543_readreg(device_t, int, int); |
636 | static void wm_gmii_i82543_writereg(device_t, int, int, int); | | 636 | static void wm_gmii_i82543_writereg(device_t, int, int, int); |
637 | static int wm_gmii_i82544_readreg(device_t, int, int); | | 637 | static int wm_gmii_i82544_readreg(device_t, int, int); |
638 | static void wm_gmii_i82544_writereg(device_t, int, int, int); | | 638 | static void wm_gmii_i82544_writereg(device_t, int, int, int); |
639 | static int wm_gmii_i80003_readreg(device_t, int, int); | | 639 | static int wm_gmii_i80003_readreg(device_t, int, int); |
640 | static void wm_gmii_i80003_writereg(device_t, int, int, int); | | 640 | static void wm_gmii_i80003_writereg(device_t, int, int, int); |
641 | static int wm_gmii_bm_readreg(device_t, int, int); | | 641 | static int wm_gmii_bm_readreg(device_t, int, int); |
642 | static void wm_gmii_bm_writereg(device_t, int, int, int); | | 642 | static void wm_gmii_bm_writereg(device_t, int, int, int); |
643 | static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); | | 643 | static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); |
644 | static int wm_gmii_hv_readreg(device_t, int, int); | | 644 | static int wm_gmii_hv_readreg(device_t, int, int); |
645 | static void wm_gmii_hv_writereg(device_t, int, int, int); | | 645 | static void wm_gmii_hv_writereg(device_t, int, int, int); |
646 | static int wm_gmii_82580_readreg(device_t, int, int); | | 646 | static int wm_gmii_82580_readreg(device_t, int, int); |
647 | static void wm_gmii_82580_writereg(device_t, int, int, int); | | 647 | static void wm_gmii_82580_writereg(device_t, int, int, int); |
648 | static int wm_gmii_gs40g_readreg(device_t, int, int); | | 648 | static int wm_gmii_gs40g_readreg(device_t, int, int); |
649 | static void wm_gmii_gs40g_writereg(device_t, int, int, int); | | 649 | static void wm_gmii_gs40g_writereg(device_t, int, int, int); |
650 | static void wm_gmii_statchg(struct ifnet *); | | 650 | static void wm_gmii_statchg(struct ifnet *); |
651 | static int wm_kmrn_readreg(struct wm_softc *, int); | | 651 | static int wm_kmrn_readreg(struct wm_softc *, int); |
652 | static void wm_kmrn_writereg(struct wm_softc *, int, int); | | 652 | static void wm_kmrn_writereg(struct wm_softc *, int, int); |
653 | /* SGMII */ | | 653 | /* SGMII */ |
654 | static bool wm_sgmii_uses_mdio(struct wm_softc *); | | 654 | static bool wm_sgmii_uses_mdio(struct wm_softc *); |
655 | static int wm_sgmii_readreg(device_t, int, int); | | 655 | static int wm_sgmii_readreg(device_t, int, int); |
656 | static void wm_sgmii_writereg(device_t, int, int, int); | | 656 | static void wm_sgmii_writereg(device_t, int, int, int); |
657 | /* TBI related */ | | 657 | /* TBI related */ |
658 | static void wm_tbi_mediainit(struct wm_softc *); | | 658 | static void wm_tbi_mediainit(struct wm_softc *); |
659 | static int wm_tbi_mediachange(struct ifnet *); | | 659 | static int wm_tbi_mediachange(struct ifnet *); |
660 | static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); | | 660 | static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); |
661 | static int wm_check_for_link(struct wm_softc *); | | 661 | static int wm_check_for_link(struct wm_softc *); |
662 | static void wm_tbi_tick(struct wm_softc *); | | 662 | static void wm_tbi_tick(struct wm_softc *); |
663 | /* SERDES related */ | | 663 | /* SERDES related */ |
664 | static void wm_serdes_power_up_link_82575(struct wm_softc *); | | 664 | static void wm_serdes_power_up_link_82575(struct wm_softc *); |
665 | static int wm_serdes_mediachange(struct ifnet *); | | 665 | static int wm_serdes_mediachange(struct ifnet *); |
666 | static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); | | 666 | static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); |
667 | static void wm_serdes_tick(struct wm_softc *); | | 667 | static void wm_serdes_tick(struct wm_softc *); |
668 | /* SFP related */ | | 668 | /* SFP related */ |
669 | static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); | | 669 | static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); |
670 | static uint32_t wm_sfp_get_media_type(struct wm_softc *); | | 670 | static uint32_t wm_sfp_get_media_type(struct wm_softc *); |
671 | | | 671 | |
672 | /* | | 672 | /* |
673 | * NVM related. | | 673 | * NVM related. |
674 | * Microwire, SPI (w/wo EERD) and Flash. | | 674 | * Microwire, SPI (w/wo EERD) and Flash. |
675 | */ | | 675 | */ |
676 | /* Misc functions */ | | 676 | /* Misc functions */ |
677 | static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); | | 677 | static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); |
678 | static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); | | 678 | static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); |
679 | static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); | | 679 | static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); |
680 | /* Microwire */ | | 680 | /* Microwire */ |
681 | static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); | | 681 | static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); |
682 | /* SPI */ | | 682 | /* SPI */ |
683 | static int wm_nvm_ready_spi(struct wm_softc *); | | 683 | static int wm_nvm_ready_spi(struct wm_softc *); |
684 | static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); | | 684 | static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); |
685 | /* Using with EERD */ | | 685 | /* Using with EERD */ |
686 | static int wm_poll_eerd_eewr_done(struct wm_softc *, int); | | 686 | static int wm_poll_eerd_eewr_done(struct wm_softc *, int); |
687 | static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); | | 687 | static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); |
688 | /* Flash */ | | 688 | /* Flash */ |
689 | static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, | | 689 | static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, |
690 | unsigned int *); | | 690 | unsigned int *); |
691 | static int32_t wm_ich8_cycle_init(struct wm_softc *); | | 691 | static int32_t wm_ich8_cycle_init(struct wm_softc *); |
692 | static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); | | 692 | static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); |
693 | static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, | | 693 | static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, |
694 | uint16_t *); | | 694 | uint16_t *); |
695 | static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); | | 695 | static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); |
696 | static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); | | 696 | static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); |
697 | static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); | | 697 | static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); |
698 | /* iNVM */ | | 698 | /* iNVM */ |
699 | static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); | | 699 | static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); |
700 | static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); | | 700 | static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); |
701 | /* Lock, detecting NVM type, validate checksum and read */ | | 701 | /* Lock, detecting NVM type, validate checksum and read */ |
702 | static int wm_nvm_acquire(struct wm_softc *); | | 702 | static int wm_nvm_acquire(struct wm_softc *); |
703 | static void wm_nvm_release(struct wm_softc *); | | 703 | static void wm_nvm_release(struct wm_softc *); |
704 | static int wm_nvm_is_onboard_eeprom(struct wm_softc *); | | 704 | static int wm_nvm_is_onboard_eeprom(struct wm_softc *); |
705 | static int wm_nvm_get_flash_presence_i210(struct wm_softc *); | | 705 | static int wm_nvm_get_flash_presence_i210(struct wm_softc *); |
706 | static int wm_nvm_validate_checksum(struct wm_softc *); | | 706 | static int wm_nvm_validate_checksum(struct wm_softc *); |
707 | static void wm_nvm_version_invm(struct wm_softc *); | | 707 | static void wm_nvm_version_invm(struct wm_softc *); |
708 | static void wm_nvm_version(struct wm_softc *); | | 708 | static void wm_nvm_version(struct wm_softc *); |
709 | static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); | | 709 | static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); |
710 | | | 710 | |
711 | /* | | 711 | /* |
712 | * Hardware semaphores. | | 712 | * Hardware semaphores. |
713 | * Very complexed... | | 713 | * Very complexed... |
714 | */ | | 714 | */ |
715 | static int wm_get_swsm_semaphore(struct wm_softc *); | | 715 | static int wm_get_swsm_semaphore(struct wm_softc *); |
716 | static void wm_put_swsm_semaphore(struct wm_softc *); | | 716 | static void wm_put_swsm_semaphore(struct wm_softc *); |
717 | static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); | | 717 | static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); |
718 | static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); | | 718 | static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); |
719 | static int wm_get_swfwhw_semaphore(struct wm_softc *); | | 719 | static int wm_get_swfwhw_semaphore(struct wm_softc *); |
720 | static void wm_put_swfwhw_semaphore(struct wm_softc *); | | 720 | static void wm_put_swfwhw_semaphore(struct wm_softc *); |
721 | static int wm_get_hw_semaphore_82573(struct wm_softc *); | | 721 | static int wm_get_hw_semaphore_82573(struct wm_softc *); |
722 | static void wm_put_hw_semaphore_82573(struct wm_softc *); | | 722 | static void wm_put_hw_semaphore_82573(struct wm_softc *); |
723 | | | 723 | |
724 | /* | | 724 | /* |
725 | * Management mode and power management related subroutines. | | 725 | * Management mode and power management related subroutines. |
726 | * BMC, AMT, suspend/resume and EEE. | | 726 | * BMC, AMT, suspend/resume and EEE. |
727 | */ | | 727 | */ |
728 | static int wm_check_mng_mode(struct wm_softc *); | | 728 | static int wm_check_mng_mode(struct wm_softc *); |
729 | static int wm_check_mng_mode_ich8lan(struct wm_softc *); | | 729 | static int wm_check_mng_mode_ich8lan(struct wm_softc *); |
730 | static int wm_check_mng_mode_82574(struct wm_softc *); | | 730 | static int wm_check_mng_mode_82574(struct wm_softc *); |
731 | static int wm_check_mng_mode_generic(struct wm_softc *); | | 731 | static int wm_check_mng_mode_generic(struct wm_softc *); |
732 | static int wm_enable_mng_pass_thru(struct wm_softc *); | | 732 | static int wm_enable_mng_pass_thru(struct wm_softc *); |
733 | static int wm_check_reset_block(struct wm_softc *); | | 733 | static int wm_check_reset_block(struct wm_softc *); |
734 | static void wm_get_hw_control(struct wm_softc *); | | 734 | static void wm_get_hw_control(struct wm_softc *); |
735 | static void wm_release_hw_control(struct wm_softc *); | | 735 | static void wm_release_hw_control(struct wm_softc *); |
736 | static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int); | | 736 | static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int); |
737 | static void wm_smbustopci(struct wm_softc *); | | 737 | static void wm_smbustopci(struct wm_softc *); |
738 | static void wm_init_manageability(struct wm_softc *); | | 738 | static void wm_init_manageability(struct wm_softc *); |
739 | static void wm_release_manageability(struct wm_softc *); | | 739 | static void wm_release_manageability(struct wm_softc *); |
740 | static void wm_get_wakeup(struct wm_softc *); | | 740 | static void wm_get_wakeup(struct wm_softc *); |
741 | #ifdef WM_WOL | | 741 | #ifdef WM_WOL |
742 | static void wm_enable_phy_wakeup(struct wm_softc *); | | 742 | static void wm_enable_phy_wakeup(struct wm_softc *); |
743 | static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); | | 743 | static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); |
744 | static void wm_enable_wakeup(struct wm_softc *); | | 744 | static void wm_enable_wakeup(struct wm_softc *); |
745 | #endif | | 745 | #endif |
746 | /* EEE */ | | 746 | /* EEE */ |
747 | static void wm_set_eee_i350(struct wm_softc *); | | 747 | static void wm_set_eee_i350(struct wm_softc *); |
748 | | | 748 | |
749 | /* | | 749 | /* |
750 | * Workarounds (mainly PHY related). | | 750 | * Workarounds (mainly PHY related). |
751 | * Basically, PHY's workarounds are in the PHY drivers. | | 751 | * Basically, PHY's workarounds are in the PHY drivers. |
752 | */ | | 752 | */ |
753 | static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); | | 753 | static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); |
754 | static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); | | 754 | static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); |
755 | static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); | | 755 | static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); |
756 | static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); | | 756 | static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); |
757 | static void wm_k1_gig_workaround_hv(struct wm_softc *, int); | | 757 | static void wm_k1_gig_workaround_hv(struct wm_softc *, int); |
758 | static void wm_set_mdio_slow_mode_hv(struct wm_softc *); | | 758 | static void wm_set_mdio_slow_mode_hv(struct wm_softc *); |
759 | static void wm_configure_k1_ich8lan(struct wm_softc *, int); | | 759 | static void wm_configure_k1_ich8lan(struct wm_softc *, int); |
760 | static void wm_reset_init_script_82575(struct wm_softc *); | | 760 | static void wm_reset_init_script_82575(struct wm_softc *); |
761 | static void wm_reset_mdicnfg_82580(struct wm_softc *); | | 761 | static void wm_reset_mdicnfg_82580(struct wm_softc *); |
762 | static void wm_pll_workaround_i210(struct wm_softc *); | | 762 | static void wm_pll_workaround_i210(struct wm_softc *); |
763 | | | 763 | |
764 | CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), | | 764 | CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), |
765 | wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); | | 765 | wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); |
766 | | | 766 | |
767 | /* | | 767 | /* |
768 | * Devices supported by this driver. | | 768 | * Devices supported by this driver. |
769 | */ | | 769 | */ |
770 | static const struct wm_product { | | 770 | static const struct wm_product { |
771 | pci_vendor_id_t wmp_vendor; | | 771 | pci_vendor_id_t wmp_vendor; |
772 | pci_product_id_t wmp_product; | | 772 | pci_product_id_t wmp_product; |
773 | const char *wmp_name; | | 773 | const char *wmp_name; |
774 | wm_chip_type wmp_type; | | 774 | wm_chip_type wmp_type; |
775 | uint32_t wmp_flags; | | 775 | uint32_t wmp_flags; |
776 | #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN | | 776 | #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN |
777 | #define WMP_F_FIBER WM_MEDIATYPE_FIBER | | 777 | #define WMP_F_FIBER WM_MEDIATYPE_FIBER |
778 | #define WMP_F_COPPER WM_MEDIATYPE_COPPER | | 778 | #define WMP_F_COPPER WM_MEDIATYPE_COPPER |
779 | #define WMP_F_SERDES WM_MEDIATYPE_SERDES | | 779 | #define WMP_F_SERDES WM_MEDIATYPE_SERDES |
780 | #define WMP_MEDIATYPE(x) ((x) & 0x03) | | 780 | #define WMP_MEDIATYPE(x) ((x) & 0x03) |
781 | } wm_products[] = { | | 781 | } wm_products[] = { |
782 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, | | 782 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, |
783 | "Intel i82542 1000BASE-X Ethernet", | | 783 | "Intel i82542 1000BASE-X Ethernet", |
784 | WM_T_82542_2_1, WMP_F_FIBER }, | | 784 | WM_T_82542_2_1, WMP_F_FIBER }, |
785 | | | 785 | |
786 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, | | 786 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, |
787 | "Intel i82543GC 1000BASE-X Ethernet", | | 787 | "Intel i82543GC 1000BASE-X Ethernet", |
788 | WM_T_82543, WMP_F_FIBER }, | | 788 | WM_T_82543, WMP_F_FIBER }, |
789 | | | 789 | |
790 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, | | 790 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, |
791 | "Intel i82543GC 1000BASE-T Ethernet", | | 791 | "Intel i82543GC 1000BASE-T Ethernet", |
792 | WM_T_82543, WMP_F_COPPER }, | | 792 | WM_T_82543, WMP_F_COPPER }, |
793 | | | 793 | |
794 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, | | 794 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, |
795 | "Intel i82544EI 1000BASE-T Ethernet", | | 795 | "Intel i82544EI 1000BASE-T Ethernet", |
796 | WM_T_82544, WMP_F_COPPER }, | | 796 | WM_T_82544, WMP_F_COPPER }, |
797 | | | 797 | |
798 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, | | 798 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, |
799 | "Intel i82544EI 1000BASE-X Ethernet", | | 799 | "Intel i82544EI 1000BASE-X Ethernet", |
800 | WM_T_82544, WMP_F_FIBER }, | | 800 | WM_T_82544, WMP_F_FIBER }, |
801 | | | 801 | |
802 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, | | 802 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, |
803 | "Intel i82544GC 1000BASE-T Ethernet", | | 803 | "Intel i82544GC 1000BASE-T Ethernet", |
804 | WM_T_82544, WMP_F_COPPER }, | | 804 | WM_T_82544, WMP_F_COPPER }, |
805 | | | 805 | |
806 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, | | 806 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, |
807 | "Intel i82544GC (LOM) 1000BASE-T Ethernet", | | 807 | "Intel i82544GC (LOM) 1000BASE-T Ethernet", |
808 | WM_T_82544, WMP_F_COPPER }, | | 808 | WM_T_82544, WMP_F_COPPER }, |
809 | | | 809 | |
810 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, | | 810 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, |
811 | "Intel i82540EM 1000BASE-T Ethernet", | | 811 | "Intel i82540EM 1000BASE-T Ethernet", |
812 | WM_T_82540, WMP_F_COPPER }, | | 812 | WM_T_82540, WMP_F_COPPER }, |
813 | | | 813 | |
814 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, | | 814 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, |
815 | "Intel i82540EM (LOM) 1000BASE-T Ethernet", | | 815 | "Intel i82540EM (LOM) 1000BASE-T Ethernet", |
816 | WM_T_82540, WMP_F_COPPER }, | | 816 | WM_T_82540, WMP_F_COPPER }, |
817 | | | 817 | |
818 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, | | 818 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, |
819 | "Intel i82540EP 1000BASE-T Ethernet", | | 819 | "Intel i82540EP 1000BASE-T Ethernet", |
820 | WM_T_82540, WMP_F_COPPER }, | | 820 | WM_T_82540, WMP_F_COPPER }, |
821 | | | 821 | |
822 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, | | 822 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, |
823 | "Intel i82540EP 1000BASE-T Ethernet", | | 823 | "Intel i82540EP 1000BASE-T Ethernet", |
824 | WM_T_82540, WMP_F_COPPER }, | | 824 | WM_T_82540, WMP_F_COPPER }, |
825 | | | 825 | |
826 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, | | 826 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, |
827 | "Intel i82540EP 1000BASE-T Ethernet", | | 827 | "Intel i82540EP 1000BASE-T Ethernet", |
828 | WM_T_82540, WMP_F_COPPER }, | | 828 | WM_T_82540, WMP_F_COPPER }, |
829 | | | 829 | |
830 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, | | 830 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, |
831 | "Intel i82545EM 1000BASE-T Ethernet", | | 831 | "Intel i82545EM 1000BASE-T Ethernet", |
832 | WM_T_82545, WMP_F_COPPER }, | | 832 | WM_T_82545, WMP_F_COPPER }, |
833 | | | 833 | |
834 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, | | 834 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, |
835 | "Intel i82545GM 1000BASE-T Ethernet", | | 835 | "Intel i82545GM 1000BASE-T Ethernet", |
836 | WM_T_82545_3, WMP_F_COPPER }, | | 836 | WM_T_82545_3, WMP_F_COPPER }, |
837 | | | 837 | |
838 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, | | 838 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, |
839 | "Intel i82545GM 1000BASE-X Ethernet", | | 839 | "Intel i82545GM 1000BASE-X Ethernet", |
840 | WM_T_82545_3, WMP_F_FIBER }, | | 840 | WM_T_82545_3, WMP_F_FIBER }, |
841 | | | 841 | |
842 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, | | 842 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, |
843 | "Intel i82545GM Gigabit Ethernet (SERDES)", | | 843 | "Intel i82545GM Gigabit Ethernet (SERDES)", |
844 | WM_T_82545_3, WMP_F_SERDES }, | | 844 | WM_T_82545_3, WMP_F_SERDES }, |
845 | | | 845 | |
846 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, | | 846 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, |
847 | "Intel i82546EB 1000BASE-T Ethernet", | | 847 | "Intel i82546EB 1000BASE-T Ethernet", |
848 | WM_T_82546, WMP_F_COPPER }, | | 848 | WM_T_82546, WMP_F_COPPER }, |
849 | | | 849 | |
850 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, | | 850 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, |
851 | "Intel i82546EB 1000BASE-T Ethernet", | | 851 | "Intel i82546EB 1000BASE-T Ethernet", |
852 | WM_T_82546, WMP_F_COPPER }, | | 852 | WM_T_82546, WMP_F_COPPER }, |
853 | | | 853 | |
854 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, | | 854 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, |
855 | "Intel i82545EM 1000BASE-X Ethernet", | | 855 | "Intel i82545EM 1000BASE-X Ethernet", |
856 | WM_T_82545, WMP_F_FIBER }, | | 856 | WM_T_82545, WMP_F_FIBER }, |
857 | | | 857 | |
858 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, | | 858 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, |
859 | "Intel i82546EB 1000BASE-X Ethernet", | | 859 | "Intel i82546EB 1000BASE-X Ethernet", |
860 | WM_T_82546, WMP_F_FIBER }, | | 860 | WM_T_82546, WMP_F_FIBER }, |
861 | | | 861 | |
862 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, | | 862 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, |
863 | "Intel i82546GB 1000BASE-T Ethernet", | | 863 | "Intel i82546GB 1000BASE-T Ethernet", |
864 | WM_T_82546_3, WMP_F_COPPER }, | | 864 | WM_T_82546_3, WMP_F_COPPER }, |
865 | | | 865 | |
866 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, | | 866 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, |
867 | "Intel i82546GB 1000BASE-X Ethernet", | | 867 | "Intel i82546GB 1000BASE-X Ethernet", |
868 | WM_T_82546_3, WMP_F_FIBER }, | | 868 | WM_T_82546_3, WMP_F_FIBER }, |
869 | | | 869 | |
870 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, | | 870 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, |
871 | "Intel i82546GB Gigabit Ethernet (SERDES)", | | 871 | "Intel i82546GB Gigabit Ethernet (SERDES)", |
872 | WM_T_82546_3, WMP_F_SERDES }, | | 872 | WM_T_82546_3, WMP_F_SERDES }, |
873 | | | 873 | |
874 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, | | 874 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, |
875 | "i82546GB quad-port Gigabit Ethernet", | | 875 | "i82546GB quad-port Gigabit Ethernet", |
876 | WM_T_82546_3, WMP_F_COPPER }, | | 876 | WM_T_82546_3, WMP_F_COPPER }, |
877 | | | 877 | |
878 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, | | 878 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, |
879 | "i82546GB quad-port Gigabit Ethernet (KSP3)", | | 879 | "i82546GB quad-port Gigabit Ethernet (KSP3)", |
880 | WM_T_82546_3, WMP_F_COPPER }, | | 880 | WM_T_82546_3, WMP_F_COPPER }, |
881 | | | 881 | |
882 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, | | 882 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, |
883 | "Intel PRO/1000MT (82546GB)", | | 883 | "Intel PRO/1000MT (82546GB)", |
884 | WM_T_82546_3, WMP_F_COPPER }, | | 884 | WM_T_82546_3, WMP_F_COPPER }, |
885 | | | 885 | |
886 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, | | 886 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, |
887 | "Intel i82541EI 1000BASE-T Ethernet", | | 887 | "Intel i82541EI 1000BASE-T Ethernet", |
888 | WM_T_82541, WMP_F_COPPER }, | | 888 | WM_T_82541, WMP_F_COPPER }, |
889 | | | 889 | |
890 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, | | 890 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, |
891 | "Intel i82541ER (LOM) 1000BASE-T Ethernet", | | 891 | "Intel i82541ER (LOM) 1000BASE-T Ethernet", |
892 | WM_T_82541, WMP_F_COPPER }, | | 892 | WM_T_82541, WMP_F_COPPER }, |
893 | | | 893 | |
894 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, | | 894 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, |
895 | "Intel i82541EI Mobile 1000BASE-T Ethernet", | | 895 | "Intel i82541EI Mobile 1000BASE-T Ethernet", |
896 | WM_T_82541, WMP_F_COPPER }, | | 896 | WM_T_82541, WMP_F_COPPER }, |
897 | | | 897 | |
898 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, | | 898 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, |
899 | "Intel i82541ER 1000BASE-T Ethernet", | | 899 | "Intel i82541ER 1000BASE-T Ethernet", |
900 | WM_T_82541_2, WMP_F_COPPER }, | | 900 | WM_T_82541_2, WMP_F_COPPER }, |
901 | | | 901 | |
902 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, | | 902 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, |
903 | "Intel i82541GI 1000BASE-T Ethernet", | | 903 | "Intel i82541GI 1000BASE-T Ethernet", |
904 | WM_T_82541_2, WMP_F_COPPER }, | | 904 | WM_T_82541_2, WMP_F_COPPER }, |
905 | | | 905 | |
906 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, | | 906 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, |
907 | "Intel i82541GI Mobile 1000BASE-T Ethernet", | | 907 | "Intel i82541GI Mobile 1000BASE-T Ethernet", |
908 | WM_T_82541_2, WMP_F_COPPER }, | | 908 | WM_T_82541_2, WMP_F_COPPER }, |
909 | | | 909 | |
910 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, | | 910 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, |
911 | "Intel i82541PI 1000BASE-T Ethernet", | | 911 | "Intel i82541PI 1000BASE-T Ethernet", |
912 | WM_T_82541_2, WMP_F_COPPER }, | | 912 | WM_T_82541_2, WMP_F_COPPER }, |
913 | | | 913 | |
914 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, | | 914 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, |
915 | "Intel i82547EI 1000BASE-T Ethernet", | | 915 | "Intel i82547EI 1000BASE-T Ethernet", |
916 | WM_T_82547, WMP_F_COPPER }, | | 916 | WM_T_82547, WMP_F_COPPER }, |
917 | | | 917 | |
918 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, | | 918 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, |
919 | "Intel i82547EI Mobile 1000BASE-T Ethernet", | | 919 | "Intel i82547EI Mobile 1000BASE-T Ethernet", |
920 | WM_T_82547, WMP_F_COPPER }, | | 920 | WM_T_82547, WMP_F_COPPER }, |
921 | | | 921 | |
922 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, | | 922 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, |
923 | "Intel i82547GI 1000BASE-T Ethernet", | | 923 | "Intel i82547GI 1000BASE-T Ethernet", |
924 | WM_T_82547_2, WMP_F_COPPER }, | | 924 | WM_T_82547_2, WMP_F_COPPER }, |
925 | | | 925 | |
926 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, | | 926 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, |
927 | "Intel PRO/1000 PT (82571EB)", | | 927 | "Intel PRO/1000 PT (82571EB)", |
928 | WM_T_82571, WMP_F_COPPER }, | | 928 | WM_T_82571, WMP_F_COPPER }, |
929 | | | 929 | |
930 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, | | 930 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, |
931 | "Intel PRO/1000 PF (82571EB)", | | 931 | "Intel PRO/1000 PF (82571EB)", |
932 | WM_T_82571, WMP_F_FIBER }, | | 932 | WM_T_82571, WMP_F_FIBER }, |
933 | | | 933 | |
934 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, | | 934 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, |
935 | "Intel PRO/1000 PB (82571EB)", | | 935 | "Intel PRO/1000 PB (82571EB)", |
936 | WM_T_82571, WMP_F_SERDES }, | | 936 | WM_T_82571, WMP_F_SERDES }, |
937 | | | 937 | |
938 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, | | 938 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, |
939 | "Intel PRO/1000 QT (82571EB)", | | 939 | "Intel PRO/1000 QT (82571EB)", |
940 | WM_T_82571, WMP_F_COPPER }, | | 940 | WM_T_82571, WMP_F_COPPER }, |
941 | | | 941 | |
942 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, | | 942 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, |
943 | "Intel PRO/1000 PT Quad Port Server Adapter", | | 943 | "Intel PRO/1000 PT Quad Port Server Adapter", |
944 | WM_T_82571, WMP_F_COPPER, }, | | 944 | WM_T_82571, WMP_F_COPPER, }, |
945 | | | 945 | |
946 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER, | | 946 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER, |
947 | "Intel Gigabit PT Quad Port Server ExpressModule", | | 947 | "Intel Gigabit PT Quad Port Server ExpressModule", |
948 | WM_T_82571, WMP_F_COPPER, }, | | 948 | WM_T_82571, WMP_F_COPPER, }, |
949 | | | 949 | |
950 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES, | | 950 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES, |
951 | "Intel 82571EB Dual Gigabit Ethernet (SERDES)", | | 951 | "Intel 82571EB Dual Gigabit Ethernet (SERDES)", |
952 | WM_T_82571, WMP_F_SERDES, }, | | 952 | WM_T_82571, WMP_F_SERDES, }, |
953 | | | 953 | |
954 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES, | | 954 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES, |
955 | "Intel 82571EB Quad Gigabit Ethernet (SERDES)", | | 955 | "Intel 82571EB Quad Gigabit Ethernet (SERDES)", |
956 | WM_T_82571, WMP_F_SERDES, }, | | 956 | WM_T_82571, WMP_F_SERDES, }, |
957 | | | 957 | |
958 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER, | | 958 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER, |
959 | "Intel 82571EB Quad 1000baseX Ethernet", | | 959 | "Intel 82571EB Quad 1000baseX Ethernet", |
960 | WM_T_82571, WMP_F_FIBER, }, | | 960 | WM_T_82571, WMP_F_FIBER, }, |
961 | | | 961 | |
962 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, | | 962 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, |
963 | "Intel i82572EI 1000baseT Ethernet", | | 963 | "Intel i82572EI 1000baseT Ethernet", |
964 | WM_T_82572, WMP_F_COPPER }, | | 964 | WM_T_82572, WMP_F_COPPER }, |
965 | | | 965 | |
966 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, | | 966 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, |
967 | "Intel i82572EI 1000baseX Ethernet", | | 967 | "Intel i82572EI 1000baseX Ethernet", |
968 | WM_T_82572, WMP_F_FIBER }, | | 968 | WM_T_82572, WMP_F_FIBER }, |
969 | | | 969 | |
970 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, | | 970 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, |
971 | "Intel i82572EI Gigabit Ethernet (SERDES)", | | 971 | "Intel i82572EI Gigabit Ethernet (SERDES)", |
972 | WM_T_82572, WMP_F_SERDES }, | | 972 | WM_T_82572, WMP_F_SERDES }, |
973 | | | 973 | |
974 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, | | 974 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, |
975 | "Intel i82572EI 1000baseT Ethernet", | | 975 | "Intel i82572EI 1000baseT Ethernet", |
976 | WM_T_82572, WMP_F_COPPER }, | | 976 | WM_T_82572, WMP_F_COPPER }, |
977 | | | 977 | |
978 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, | | 978 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, |
979 | "Intel i82573E", | | 979 | "Intel i82573E", |
980 | WM_T_82573, WMP_F_COPPER }, | | 980 | WM_T_82573, WMP_F_COPPER }, |
981 | | | 981 | |
982 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, | | 982 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, |
983 | "Intel i82573E IAMT", | | 983 | "Intel i82573E IAMT", |
984 | WM_T_82573, WMP_F_COPPER }, | | 984 | WM_T_82573, WMP_F_COPPER }, |
985 | | | 985 | |
986 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, | | 986 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, |
987 | "Intel i82573L Gigabit Ethernet", | | 987 | "Intel i82573L Gigabit Ethernet", |
988 | WM_T_82573, WMP_F_COPPER }, | | 988 | WM_T_82573, WMP_F_COPPER }, |
989 | | | 989 | |
990 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, | | 990 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, |
991 | "Intel i82574L", | | 991 | "Intel i82574L", |
992 | WM_T_82574, WMP_F_COPPER }, | | 992 | WM_T_82574, WMP_F_COPPER }, |
993 | | | 993 | |
994 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA, | | 994 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA, |
995 | "Intel i82574L", | | 995 | "Intel i82574L", |
996 | WM_T_82574, WMP_F_COPPER }, | | 996 | WM_T_82574, WMP_F_COPPER }, |
997 | | | 997 | |
998 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, | | 998 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, |
999 | "Intel i82583V", | | 999 | "Intel i82583V", |
1000 | WM_T_82583, WMP_F_COPPER }, | | 1000 | WM_T_82583, WMP_F_COPPER }, |
1001 | | | 1001 | |
1002 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, | | 1002 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, |
1003 | "i80003 dual 1000baseT Ethernet", | | 1003 | "i80003 dual 1000baseT Ethernet", |
1004 | WM_T_80003, WMP_F_COPPER }, | | 1004 | WM_T_80003, WMP_F_COPPER }, |
1005 | | | 1005 | |
1006 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, | | 1006 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, |
1007 | "i80003 dual 1000baseX Ethernet", | | 1007 | "i80003 dual 1000baseX Ethernet", |
1008 | WM_T_80003, WMP_F_COPPER }, | | 1008 | WM_T_80003, WMP_F_COPPER }, |
1009 | | | 1009 | |
1010 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, | | 1010 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, |
1011 | "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", | | 1011 | "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", |
1012 | WM_T_80003, WMP_F_SERDES }, | | 1012 | WM_T_80003, WMP_F_SERDES }, |
1013 | | | 1013 | |
1014 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, | | 1014 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, |
1015 | "Intel i80003 1000baseT Ethernet", | | 1015 | "Intel i80003 1000baseT Ethernet", |
1016 | WM_T_80003, WMP_F_COPPER }, | | 1016 | WM_T_80003, WMP_F_COPPER }, |
1017 | | | 1017 | |
1018 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, | | 1018 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, |
1019 | "Intel i80003 Gigabit Ethernet (SERDES)", | | 1019 | "Intel i80003 Gigabit Ethernet (SERDES)", |
1020 | WM_T_80003, WMP_F_SERDES }, | | 1020 | WM_T_80003, WMP_F_SERDES }, |
1021 | | | 1021 | |
1022 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, | | 1022 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, |
1023 | "Intel i82801H (M_AMT) LAN Controller", | | 1023 | "Intel i82801H (M_AMT) LAN Controller", |
1024 | WM_T_ICH8, WMP_F_COPPER }, | | 1024 | WM_T_ICH8, WMP_F_COPPER }, |
1025 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, | | 1025 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, |
1026 | "Intel i82801H (AMT) LAN Controller", | | 1026 | "Intel i82801H (AMT) LAN Controller", |
1027 | WM_T_ICH8, WMP_F_COPPER }, | | 1027 | WM_T_ICH8, WMP_F_COPPER }, |
1028 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, | | 1028 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, |
1029 | "Intel i82801H LAN Controller", | | 1029 | "Intel i82801H LAN Controller", |
1030 | WM_T_ICH8, WMP_F_COPPER }, | | 1030 | WM_T_ICH8, WMP_F_COPPER }, |
1031 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, | | 1031 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, |
1032 | "Intel i82801H (IFE) LAN Controller", | | 1032 | "Intel i82801H (IFE) LAN Controller", |
1033 | WM_T_ICH8, WMP_F_COPPER }, | | 1033 | WM_T_ICH8, WMP_F_COPPER }, |
1034 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, | | 1034 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, |
1035 | "Intel i82801H (M) LAN Controller", | | 1035 | "Intel i82801H (M) LAN Controller", |
1036 | WM_T_ICH8, WMP_F_COPPER }, | | 1036 | WM_T_ICH8, WMP_F_COPPER }, |
1037 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, | | 1037 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, |
1038 | "Intel i82801H IFE (GT) LAN Controller", | | 1038 | "Intel i82801H IFE (GT) LAN Controller", |
1039 | WM_T_ICH8, WMP_F_COPPER }, | | 1039 | WM_T_ICH8, WMP_F_COPPER }, |
1040 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, | | 1040 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, |
1041 | "Intel i82801H IFE (G) LAN Controller", | | 1041 | "Intel i82801H IFE (G) LAN Controller", |
1042 | WM_T_ICH8, WMP_F_COPPER }, | | 1042 | WM_T_ICH8, WMP_F_COPPER }, |
1043 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, | | 1043 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, |
1044 | "82801I (AMT) LAN Controller", | | 1044 | "82801I (AMT) LAN Controller", |
1045 | WM_T_ICH9, WMP_F_COPPER }, | | 1045 | WM_T_ICH9, WMP_F_COPPER }, |
1046 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, | | 1046 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, |
1047 | "82801I LAN Controller", | | 1047 | "82801I LAN Controller", |
1048 | WM_T_ICH9, WMP_F_COPPER }, | | 1048 | WM_T_ICH9, WMP_F_COPPER }, |
1049 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, | | 1049 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, |
1050 | "82801I (G) LAN Controller", | | 1050 | "82801I (G) LAN Controller", |
1051 | WM_T_ICH9, WMP_F_COPPER }, | | 1051 | WM_T_ICH9, WMP_F_COPPER }, |
1052 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, | | 1052 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, |
1053 | "82801I (GT) LAN Controller", | | 1053 | "82801I (GT) LAN Controller", |
1054 | WM_T_ICH9, WMP_F_COPPER }, | | 1054 | WM_T_ICH9, WMP_F_COPPER }, |
1055 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, | | 1055 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, |
1056 | "82801I (C) LAN Controller", | | 1056 | "82801I (C) LAN Controller", |
1057 | WM_T_ICH9, WMP_F_COPPER }, | | 1057 | WM_T_ICH9, WMP_F_COPPER }, |
1058 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, | | 1058 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, |
1059 | "82801I mobile LAN Controller", | | 1059 | "82801I mobile LAN Controller", |
1060 | WM_T_ICH9, WMP_F_COPPER }, | | 1060 | WM_T_ICH9, WMP_F_COPPER }, |
1061 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, | | 1061 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, |
1062 | "82801I mobile (V) LAN Controller", | | 1062 | "82801I mobile (V) LAN Controller", |
1063 | WM_T_ICH9, WMP_F_COPPER }, | | 1063 | WM_T_ICH9, WMP_F_COPPER }, |
1064 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, | | 1064 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, |
1065 | "82801I mobile (AMT) LAN Controller", | | 1065 | "82801I mobile (AMT) LAN Controller", |
1066 | WM_T_ICH9, WMP_F_COPPER }, | | 1066 | WM_T_ICH9, WMP_F_COPPER }, |
1067 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, | | 1067 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, |
1068 | "82567LM-4 LAN Controller", | | 1068 | "82567LM-4 LAN Controller", |
1069 | WM_T_ICH9, WMP_F_COPPER }, | | 1069 | WM_T_ICH9, WMP_F_COPPER }, |
1070 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3, | | 1070 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3, |
1071 | "82567V-3 LAN Controller", | | 1071 | "82567V-3 LAN Controller", |
1072 | WM_T_ICH9, WMP_F_COPPER }, | | 1072 | WM_T_ICH9, WMP_F_COPPER }, |
1073 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, | | 1073 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, |
1074 | "82567LM-2 LAN Controller", | | 1074 | "82567LM-2 LAN Controller", |
1075 | WM_T_ICH10, WMP_F_COPPER }, | | 1075 | WM_T_ICH10, WMP_F_COPPER }, |
1076 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, | | 1076 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, |
1077 | "82567LF-2 LAN Controller", | | 1077 | "82567LF-2 LAN Controller", |
1078 | WM_T_ICH10, WMP_F_COPPER }, | | 1078 | WM_T_ICH10, WMP_F_COPPER }, |
1079 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, | | 1079 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, |
1080 | "82567LM-3 LAN Controller", | | 1080 | "82567LM-3 LAN Controller", |
1081 | WM_T_ICH10, WMP_F_COPPER }, | | 1081 | WM_T_ICH10, WMP_F_COPPER }, |
1082 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, | | 1082 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, |
1083 | "82567LF-3 LAN Controller", | | 1083 | "82567LF-3 LAN Controller", |
1084 | WM_T_ICH10, WMP_F_COPPER }, | | 1084 | WM_T_ICH10, WMP_F_COPPER }, |
1085 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, | | 1085 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, |
| @@ -6520,1999 +6520,1999 @@ wm_nq_start_locked(struct ifnet *ifp) | | | @@ -6520,1999 +6520,1999 @@ wm_nq_start_locked(struct ifnet *ifp) |
6520 | | | 6520 | |
6521 | KASSERT(WM_TX_LOCKED(txq)); | | 6521 | KASSERT(WM_TX_LOCKED(txq)); |
6522 | | | 6522 | |
6523 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) | | 6523 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) |
6524 | return; | | 6524 | return; |
6525 | | | 6525 | |
6526 | sent = false; | | 6526 | sent = false; |
6527 | | | 6527 | |
6528 | /* | | 6528 | /* |
6529 | * Loop through the send queue, setting up transmit descriptors | | 6529 | * Loop through the send queue, setting up transmit descriptors |
6530 | * until we drain the queue, or use up all available transmit | | 6530 | * until we drain the queue, or use up all available transmit |
6531 | * descriptors. | | 6531 | * descriptors. |
6532 | */ | | 6532 | */ |
6533 | for (;;) { | | 6533 | for (;;) { |
6534 | m0 = NULL; | | 6534 | m0 = NULL; |
6535 | | | 6535 | |
6536 | /* Get a work queue entry. */ | | 6536 | /* Get a work queue entry. */ |
6537 | if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { | | 6537 | if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { |
6538 | wm_txeof(sc); | | 6538 | wm_txeof(sc); |
6539 | if (txq->txq_sfree == 0) { | | 6539 | if (txq->txq_sfree == 0) { |
6540 | DPRINTF(WM_DEBUG_TX, | | 6540 | DPRINTF(WM_DEBUG_TX, |
6541 | ("%s: TX: no free job descriptors\n", | | 6541 | ("%s: TX: no free job descriptors\n", |
6542 | device_xname(sc->sc_dev))); | | 6542 | device_xname(sc->sc_dev))); |
6543 | WM_EVCNT_INCR(&sc->sc_ev_txsstall); | | 6543 | WM_EVCNT_INCR(&sc->sc_ev_txsstall); |
6544 | break; | | 6544 | break; |
6545 | } | | 6545 | } |
6546 | } | | 6546 | } |
6547 | | | 6547 | |
6548 | /* Grab a packet off the queue. */ | | 6548 | /* Grab a packet off the queue. */ |
6549 | IFQ_DEQUEUE(&ifp->if_snd, m0); | | 6549 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
6550 | if (m0 == NULL) | | 6550 | if (m0 == NULL) |
6551 | break; | | 6551 | break; |
6552 | | | 6552 | |
6553 | DPRINTF(WM_DEBUG_TX, | | 6553 | DPRINTF(WM_DEBUG_TX, |
6554 | ("%s: TX: have packet to transmit: %p\n", | | 6554 | ("%s: TX: have packet to transmit: %p\n", |
6555 | device_xname(sc->sc_dev), m0)); | | 6555 | device_xname(sc->sc_dev), m0)); |
6556 | | | 6556 | |
6557 | txs = &txq->txq_soft[txq->txq_snext]; | | 6557 | txs = &txq->txq_soft[txq->txq_snext]; |
6558 | dmamap = txs->txs_dmamap; | | 6558 | dmamap = txs->txs_dmamap; |
6559 | | | 6559 | |
6560 | /* | | 6560 | /* |
6561 | * Load the DMA map. If this fails, the packet either | | 6561 | * Load the DMA map. If this fails, the packet either |
6562 | * didn't fit in the allotted number of segments, or we | | 6562 | * didn't fit in the allotted number of segments, or we |
6563 | * were short on resources. For the too-many-segments | | 6563 | * were short on resources. For the too-many-segments |
6564 | * case, we simply report an error and drop the packet, | | 6564 | * case, we simply report an error and drop the packet, |
6565 | * since we can't sanely copy a jumbo packet to a single | | 6565 | * since we can't sanely copy a jumbo packet to a single |
6566 | * buffer. | | 6566 | * buffer. |
6567 | */ | | 6567 | */ |
6568 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, | | 6568 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
6569 | BUS_DMA_WRITE|BUS_DMA_NOWAIT); | | 6569 | BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
6570 | if (error) { | | 6570 | if (error) { |
6571 | if (error == EFBIG) { | | 6571 | if (error == EFBIG) { |
6572 | WM_EVCNT_INCR(&sc->sc_ev_txdrop); | | 6572 | WM_EVCNT_INCR(&sc->sc_ev_txdrop); |
6573 | log(LOG_ERR, "%s: Tx packet consumes too many " | | 6573 | log(LOG_ERR, "%s: Tx packet consumes too many " |
6574 | "DMA segments, dropping...\n", | | 6574 | "DMA segments, dropping...\n", |
6575 | device_xname(sc->sc_dev)); | | 6575 | device_xname(sc->sc_dev)); |
6576 | wm_dump_mbuf_chain(sc, m0); | | 6576 | wm_dump_mbuf_chain(sc, m0); |
6577 | m_freem(m0); | | 6577 | m_freem(m0); |
6578 | continue; | | 6578 | continue; |
6579 | } | | 6579 | } |
6580 | /* Short on resources, just stop for now. */ | | 6580 | /* Short on resources, just stop for now. */ |
6581 | DPRINTF(WM_DEBUG_TX, | | 6581 | DPRINTF(WM_DEBUG_TX, |
6582 | ("%s: TX: dmamap load failed: %d\n", | | 6582 | ("%s: TX: dmamap load failed: %d\n", |
6583 | device_xname(sc->sc_dev), error)); | | 6583 | device_xname(sc->sc_dev), error)); |
6584 | break; | | 6584 | break; |
6585 | } | | 6585 | } |
6586 | | | 6586 | |
6587 | segs_needed = dmamap->dm_nsegs; | | 6587 | segs_needed = dmamap->dm_nsegs; |
6588 | | | 6588 | |
6589 | /* | | 6589 | /* |
6590 | * Ensure we have enough descriptors free to describe | | 6590 | * Ensure we have enough descriptors free to describe |
6591 | * the packet. Note, we always reserve one descriptor | | 6591 | * the packet. Note, we always reserve one descriptor |
6592 | * at the end of the ring due to the semantics of the | | 6592 | * at the end of the ring due to the semantics of the |
6593 | * TDT register, plus one more in the event we need | | 6593 | * TDT register, plus one more in the event we need |
6594 | * to load offload context. | | 6594 | * to load offload context. |
6595 | */ | | 6595 | */ |
6596 | if (segs_needed > txq->txq_free - 2) { | | 6596 | if (segs_needed > txq->txq_free - 2) { |
6597 | /* | | 6597 | /* |
6598 | * Not enough free descriptors to transmit this | | 6598 | * Not enough free descriptors to transmit this |
6599 | * packet. We haven't committed anything yet, | | 6599 | * packet. We haven't committed anything yet, |
6600 | * so just unload the DMA map, put the packet | | 6600 | * so just unload the DMA map, put the packet |
6601 | * pack on the queue, and punt. Notify the upper | | 6601 | * pack on the queue, and punt. Notify the upper |
6602 | * layer that there are no more slots left. | | 6602 | * layer that there are no more slots left. |
6603 | */ | | 6603 | */ |
6604 | DPRINTF(WM_DEBUG_TX, | | 6604 | DPRINTF(WM_DEBUG_TX, |
6605 | ("%s: TX: need %d (%d) descriptors, have %d\n", | | 6605 | ("%s: TX: need %d (%d) descriptors, have %d\n", |
6606 | device_xname(sc->sc_dev), dmamap->dm_nsegs, | | 6606 | device_xname(sc->sc_dev), dmamap->dm_nsegs, |
6607 | segs_needed, txq->txq_free - 1)); | | 6607 | segs_needed, txq->txq_free - 1)); |
6608 | ifp->if_flags |= IFF_OACTIVE; | | 6608 | ifp->if_flags |= IFF_OACTIVE; |
6609 | bus_dmamap_unload(sc->sc_dmat, dmamap); | | 6609 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
6610 | WM_EVCNT_INCR(&sc->sc_ev_txdstall); | | 6610 | WM_EVCNT_INCR(&sc->sc_ev_txdstall); |
6611 | break; | | 6611 | break; |
6612 | } | | 6612 | } |
6613 | | | 6613 | |
6614 | /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ | | 6614 | /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ |
6615 | | | 6615 | |
6616 | DPRINTF(WM_DEBUG_TX, | | 6616 | DPRINTF(WM_DEBUG_TX, |
6617 | ("%s: TX: packet has %d (%d) DMA segments\n", | | 6617 | ("%s: TX: packet has %d (%d) DMA segments\n", |
6618 | device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); | | 6618 | device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); |
6619 | | | 6619 | |
6620 | WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); | | 6620 | WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); |
6621 | | | 6621 | |
6622 | /* | | 6622 | /* |
6623 | * Store a pointer to the packet so that we can free it | | 6623 | * Store a pointer to the packet so that we can free it |
6624 | * later. | | 6624 | * later. |
6625 | * | | 6625 | * |
6626 | * Initially, we consider the number of descriptors the | | 6626 | * Initially, we consider the number of descriptors the |
6627 | * packet uses the number of DMA segments. This may be | | 6627 | * packet uses the number of DMA segments. This may be |
6628 | * incremented by 1 if we do checksum offload (a descriptor | | 6628 | * incremented by 1 if we do checksum offload (a descriptor |
6629 | * is used to set the checksum context). | | 6629 | * is used to set the checksum context). |
6630 | */ | | 6630 | */ |
6631 | txs->txs_mbuf = m0; | | 6631 | txs->txs_mbuf = m0; |
6632 | txs->txs_firstdesc = txq->txq_next; | | 6632 | txs->txs_firstdesc = txq->txq_next; |
6633 | txs->txs_ndesc = segs_needed; | | 6633 | txs->txs_ndesc = segs_needed; |
6634 | | | 6634 | |
6635 | /* Set up offload parameters for this packet. */ | | 6635 | /* Set up offload parameters for this packet. */ |
6636 | uint32_t cmdlen, fields, dcmdlen; | | 6636 | uint32_t cmdlen, fields, dcmdlen; |
6637 | if (m0->m_pkthdr.csum_flags & | | 6637 | if (m0->m_pkthdr.csum_flags & |
6638 | (M_CSUM_TSOv4|M_CSUM_TSOv6| | | 6638 | (M_CSUM_TSOv4|M_CSUM_TSOv6| |
6639 | M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| | | 6639 | M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| |
6640 | M_CSUM_TCPv6|M_CSUM_UDPv6)) { | | 6640 | M_CSUM_TCPv6|M_CSUM_UDPv6)) { |
6641 | if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields, | | 6641 | if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields, |
6642 | &do_csum) != 0) { | | 6642 | &do_csum) != 0) { |
6643 | /* Error message already displayed. */ | | 6643 | /* Error message already displayed. */ |
6644 | bus_dmamap_unload(sc->sc_dmat, dmamap); | | 6644 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
6645 | continue; | | 6645 | continue; |
6646 | } | | 6646 | } |
6647 | } else { | | 6647 | } else { |
6648 | do_csum = false; | | 6648 | do_csum = false; |
6649 | cmdlen = 0; | | 6649 | cmdlen = 0; |
6650 | fields = 0; | | 6650 | fields = 0; |
6651 | } | | 6651 | } |
6652 | | | 6652 | |
6653 | /* Sync the DMA map. */ | | 6653 | /* Sync the DMA map. */ |
6654 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, | | 6654 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
6655 | BUS_DMASYNC_PREWRITE); | | 6655 | BUS_DMASYNC_PREWRITE); |
6656 | | | 6656 | |
6657 | /* Initialize the first transmit descriptor. */ | | 6657 | /* Initialize the first transmit descriptor. */ |
6658 | nexttx = txq->txq_next; | | 6658 | nexttx = txq->txq_next; |
6659 | if (!do_csum) { | | 6659 | if (!do_csum) { |
6660 | /* setup a legacy descriptor */ | | 6660 | /* setup a legacy descriptor */ |
6661 | wm_set_dma_addr( | | 6661 | wm_set_dma_addr( |
6662 | &txq->txq_descs[nexttx].wtx_addr, | | 6662 | &txq->txq_descs[nexttx].wtx_addr, |
6663 | dmamap->dm_segs[0].ds_addr); | | 6663 | dmamap->dm_segs[0].ds_addr); |
6664 | txq->txq_descs[nexttx].wtx_cmdlen = | | 6664 | txq->txq_descs[nexttx].wtx_cmdlen = |
6665 | htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); | | 6665 | htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); |
6666 | txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0; | | 6666 | txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0; |
6667 | txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0; | | 6667 | txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0; |
6668 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != | | 6668 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != |
6669 | NULL) { | | 6669 | NULL) { |
6670 | txq->txq_descs[nexttx].wtx_cmdlen |= | | 6670 | txq->txq_descs[nexttx].wtx_cmdlen |= |
6671 | htole32(WTX_CMD_VLE); | | 6671 | htole32(WTX_CMD_VLE); |
6672 | txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = | | 6672 | txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = |
6673 | htole16(VLAN_TAG_VALUE(mtag) & 0xffff); | | 6673 | htole16(VLAN_TAG_VALUE(mtag) & 0xffff); |
6674 | } else { | | 6674 | } else { |
6675 | txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; | | 6675 | txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; |
6676 | } | | 6676 | } |
6677 | dcmdlen = 0; | | 6677 | dcmdlen = 0; |
6678 | } else { | | 6678 | } else { |
6679 | /* setup an advanced data descriptor */ | | 6679 | /* setup an advanced data descriptor */ |
6680 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = | | 6680 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = |
6681 | htole64(dmamap->dm_segs[0].ds_addr); | | 6681 | htole64(dmamap->dm_segs[0].ds_addr); |
6682 | KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); | | 6682 | KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); |
6683 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = | | 6683 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = |
6684 | htole32(dmamap->dm_segs[0].ds_len | cmdlen ); | | 6684 | htole32(dmamap->dm_segs[0].ds_len | cmdlen ); |
6685 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = | | 6685 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = |
6686 | htole32(fields); | | 6686 | htole32(fields); |
6687 | DPRINTF(WM_DEBUG_TX, | | 6687 | DPRINTF(WM_DEBUG_TX, |
6688 | ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", | | 6688 | ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", |
6689 | device_xname(sc->sc_dev), nexttx, | | 6689 | device_xname(sc->sc_dev), nexttx, |
6690 | (uint64_t)dmamap->dm_segs[0].ds_addr)); | | 6690 | (uint64_t)dmamap->dm_segs[0].ds_addr)); |
6691 | DPRINTF(WM_DEBUG_TX, | | 6691 | DPRINTF(WM_DEBUG_TX, |
6692 | ("\t 0x%08x%08x\n", fields, | | 6692 | ("\t 0x%08x%08x\n", fields, |
6693 | (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); | | 6693 | (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); |
6694 | dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; | | 6694 | dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; |
6695 | } | | 6695 | } |
6696 | | | 6696 | |
6697 | lasttx = nexttx; | | 6697 | lasttx = nexttx; |
6698 | nexttx = WM_NEXTTX(txq, nexttx); | | 6698 | nexttx = WM_NEXTTX(txq, nexttx); |
6699 | /* | | 6699 | /* |
6700 | * fill in the next descriptors. legacy or adcanced format | | 6700 | * fill in the next descriptors. legacy or adcanced format |
6701 | * is the same here | | 6701 | * is the same here |
6702 | */ | | 6702 | */ |
6703 | for (seg = 1; seg < dmamap->dm_nsegs; | | 6703 | for (seg = 1; seg < dmamap->dm_nsegs; |
6704 | seg++, nexttx = WM_NEXTTX(txq, nexttx)) { | | 6704 | seg++, nexttx = WM_NEXTTX(txq, nexttx)) { |
6705 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = | | 6705 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = |
6706 | htole64(dmamap->dm_segs[seg].ds_addr); | | 6706 | htole64(dmamap->dm_segs[seg].ds_addr); |
6707 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = | | 6707 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = |
6708 | htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); | | 6708 | htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); |
6709 | KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); | | 6709 | KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); |
6710 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0; | | 6710 | txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0; |
6711 | lasttx = nexttx; | | 6711 | lasttx = nexttx; |
6712 | | | 6712 | |
6713 | DPRINTF(WM_DEBUG_TX, | | 6713 | DPRINTF(WM_DEBUG_TX, |
6714 | ("%s: TX: desc %d: %#" PRIx64 ", " | | 6714 | ("%s: TX: desc %d: %#" PRIx64 ", " |
6715 | "len %#04zx\n", | | 6715 | "len %#04zx\n", |
6716 | device_xname(sc->sc_dev), nexttx, | | 6716 | device_xname(sc->sc_dev), nexttx, |
6717 | (uint64_t)dmamap->dm_segs[seg].ds_addr, | | 6717 | (uint64_t)dmamap->dm_segs[seg].ds_addr, |
6718 | dmamap->dm_segs[seg].ds_len)); | | 6718 | dmamap->dm_segs[seg].ds_len)); |
6719 | } | | 6719 | } |
6720 | | | 6720 | |
6721 | KASSERT(lasttx != -1); | | 6721 | KASSERT(lasttx != -1); |
6722 | | | 6722 | |
6723 | /* | | 6723 | /* |
6724 | * Set up the command byte on the last descriptor of | | 6724 | * Set up the command byte on the last descriptor of |
6725 | * the packet. If we're in the interrupt delay window, | | 6725 | * the packet. If we're in the interrupt delay window, |
6726 | * delay the interrupt. | | 6726 | * delay the interrupt. |
6727 | */ | | 6727 | */ |
6728 | KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == | | 6728 | KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == |
6729 | (NQTX_CMD_EOP | NQTX_CMD_RS)); | | 6729 | (NQTX_CMD_EOP | NQTX_CMD_RS)); |
6730 | txq->txq_descs[lasttx].wtx_cmdlen |= | | 6730 | txq->txq_descs[lasttx].wtx_cmdlen |= |
6731 | htole32(WTX_CMD_EOP | WTX_CMD_RS); | | 6731 | htole32(WTX_CMD_EOP | WTX_CMD_RS); |
6732 | | | 6732 | |
6733 | txs->txs_lastdesc = lasttx; | | 6733 | txs->txs_lastdesc = lasttx; |
6734 | | | 6734 | |
6735 | DPRINTF(WM_DEBUG_TX, | | 6735 | DPRINTF(WM_DEBUG_TX, |
6736 | ("%s: TX: desc %d: cmdlen 0x%08x\n", | | 6736 | ("%s: TX: desc %d: cmdlen 0x%08x\n", |
6737 | device_xname(sc->sc_dev), | | 6737 | device_xname(sc->sc_dev), |
6738 | lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); | | 6738 | lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); |
6739 | | | 6739 | |
6740 | /* Sync the descriptors we're using. */ | | 6740 | /* Sync the descriptors we're using. */ |
6741 | wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, | | 6741 | wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, |
6742 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 6742 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
6743 | | | 6743 | |
6744 | /* Give the packet to the chip. */ | | 6744 | /* Give the packet to the chip. */ |
6745 | CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); | | 6745 | CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); |
6746 | sent = true; | | 6746 | sent = true; |
6747 | | | 6747 | |
6748 | DPRINTF(WM_DEBUG_TX, | | 6748 | DPRINTF(WM_DEBUG_TX, |
6749 | ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); | | 6749 | ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); |
6750 | | | 6750 | |
6751 | DPRINTF(WM_DEBUG_TX, | | 6751 | DPRINTF(WM_DEBUG_TX, |
6752 | ("%s: TX: finished transmitting packet, job %d\n", | | 6752 | ("%s: TX: finished transmitting packet, job %d\n", |
6753 | device_xname(sc->sc_dev), txq->txq_snext)); | | 6753 | device_xname(sc->sc_dev), txq->txq_snext)); |
6754 | | | 6754 | |
6755 | /* Advance the tx pointer. */ | | 6755 | /* Advance the tx pointer. */ |
6756 | txq->txq_free -= txs->txs_ndesc; | | 6756 | txq->txq_free -= txs->txs_ndesc; |
6757 | txq->txq_next = nexttx; | | 6757 | txq->txq_next = nexttx; |
6758 | | | 6758 | |
6759 | txq->txq_sfree--; | | 6759 | txq->txq_sfree--; |
6760 | txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); | | 6760 | txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); |
6761 | | | 6761 | |
6762 | /* Pass the packet to any BPF listeners. */ | | 6762 | /* Pass the packet to any BPF listeners. */ |
6763 | bpf_mtap(ifp, m0); | | 6763 | bpf_mtap(ifp, m0); |
6764 | } | | 6764 | } |
6765 | | | 6765 | |
6766 | if (m0 != NULL) { | | 6766 | if (m0 != NULL) { |
6767 | ifp->if_flags |= IFF_OACTIVE; | | 6767 | ifp->if_flags |= IFF_OACTIVE; |
6768 | WM_EVCNT_INCR(&sc->sc_ev_txdrop); | | 6768 | WM_EVCNT_INCR(&sc->sc_ev_txdrop); |
6769 | DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__)); | | 6769 | DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__)); |
6770 | m_freem(m0); | | 6770 | m_freem(m0); |
6771 | } | | 6771 | } |
6772 | | | 6772 | |
6773 | if (txq->txq_sfree == 0 || txq->txq_free <= 2) { | | 6773 | if (txq->txq_sfree == 0 || txq->txq_free <= 2) { |
6774 | /* No more slots; notify upper layer. */ | | 6774 | /* No more slots; notify upper layer. */ |
6775 | ifp->if_flags |= IFF_OACTIVE; | | 6775 | ifp->if_flags |= IFF_OACTIVE; |
6776 | } | | 6776 | } |
6777 | | | 6777 | |
6778 | if (sent) { | | 6778 | if (sent) { |
6779 | /* Set a watchdog timer in case the chip flakes out. */ | | 6779 | /* Set a watchdog timer in case the chip flakes out. */ |
6780 | ifp->if_timer = 5; | | 6780 | ifp->if_timer = 5; |
6781 | } | | 6781 | } |
6782 | } | | 6782 | } |
6783 | | | 6783 | |
6784 | /* Interrupt */ | | 6784 | /* Interrupt */ |
6785 | | | 6785 | |
6786 | /* | | 6786 | /* |
6787 | * wm_txeof: | | 6787 | * wm_txeof: |
6788 | * | | 6788 | * |
6789 | * Helper; handle transmit interrupts. | | 6789 | * Helper; handle transmit interrupts. |
6790 | */ | | 6790 | */ |
6791 | static int | | 6791 | static int |
6792 | wm_txeof(struct wm_softc *sc) | | 6792 | wm_txeof(struct wm_softc *sc) |
6793 | { | | 6793 | { |
6794 | struct wm_txqueue *txq = &sc->sc_txq[0]; | | 6794 | struct wm_txqueue *txq = &sc->sc_txq[0]; |
6795 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 6795 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
6796 | struct wm_txsoft *txs; | | 6796 | struct wm_txsoft *txs; |
6797 | bool processed = false; | | 6797 | bool processed = false; |
6798 | int count = 0; | | 6798 | int count = 0; |
6799 | int i; | | 6799 | int i; |
6800 | uint8_t status; | | 6800 | uint8_t status; |
6801 | | | 6801 | |
6802 | if (sc->sc_stopping) | | 6802 | if (sc->sc_stopping) |
6803 | return 0; | | 6803 | return 0; |
6804 | | | 6804 | |
6805 | ifp->if_flags &= ~IFF_OACTIVE; | | 6805 | ifp->if_flags &= ~IFF_OACTIVE; |
6806 | | | 6806 | |
6807 | /* | | 6807 | /* |
6808 | * Go through the Tx list and free mbufs for those | | 6808 | * Go through the Tx list and free mbufs for those |
6809 | * frames which have been transmitted. | | 6809 | * frames which have been transmitted. |
6810 | */ | | 6810 | */ |
6811 | for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq); | | 6811 | for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq); |
6812 | i = WM_NEXTTXS(txq, i), txq->txq_sfree++) { | | 6812 | i = WM_NEXTTXS(txq, i), txq->txq_sfree++) { |
6813 | txs = &txq->txq_soft[i]; | | 6813 | txs = &txq->txq_soft[i]; |
6814 | | | 6814 | |
6815 | DPRINTF(WM_DEBUG_TX, | | 6815 | DPRINTF(WM_DEBUG_TX, |
6816 | ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); | | 6816 | ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); |
6817 | | | 6817 | |
6818 | wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc, | | 6818 | wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc, |
6819 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 6819 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
6820 | | | 6820 | |
6821 | status = | | 6821 | status = |
6822 | txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status; | | 6822 | txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status; |
6823 | if ((status & WTX_ST_DD) == 0) { | | 6823 | if ((status & WTX_ST_DD) == 0) { |
6824 | wm_cdtxsync(txq, txs->txs_lastdesc, 1, | | 6824 | wm_cdtxsync(txq, txs->txs_lastdesc, 1, |
6825 | BUS_DMASYNC_PREREAD); | | 6825 | BUS_DMASYNC_PREREAD); |
6826 | break; | | 6826 | break; |
6827 | } | | 6827 | } |
6828 | | | 6828 | |
6829 | processed = true; | | 6829 | processed = true; |
6830 | count++; | | 6830 | count++; |
6831 | DPRINTF(WM_DEBUG_TX, | | 6831 | DPRINTF(WM_DEBUG_TX, |
6832 | ("%s: TX: job %d done: descs %d..%d\n", | | 6832 | ("%s: TX: job %d done: descs %d..%d\n", |
6833 | device_xname(sc->sc_dev), i, txs->txs_firstdesc, | | 6833 | device_xname(sc->sc_dev), i, txs->txs_firstdesc, |
6834 | txs->txs_lastdesc)); | | 6834 | txs->txs_lastdesc)); |
6835 | | | 6835 | |
6836 | /* | | 6836 | /* |
6837 | * XXX We should probably be using the statistics | | 6837 | * XXX We should probably be using the statistics |
6838 | * XXX registers, but I don't know if they exist | | 6838 | * XXX registers, but I don't know if they exist |
6839 | * XXX on chips before the i82544. | | 6839 | * XXX on chips before the i82544. |
6840 | */ | | 6840 | */ |
6841 | | | 6841 | |
6842 | #ifdef WM_EVENT_COUNTERS | | 6842 | #ifdef WM_EVENT_COUNTERS |
6843 | if (status & WTX_ST_TU) | | 6843 | if (status & WTX_ST_TU) |
6844 | WM_EVCNT_INCR(&sc->sc_ev_tu); | | 6844 | WM_EVCNT_INCR(&sc->sc_ev_tu); |
6845 | #endif /* WM_EVENT_COUNTERS */ | | 6845 | #endif /* WM_EVENT_COUNTERS */ |
6846 | | | 6846 | |
6847 | if (status & (WTX_ST_EC|WTX_ST_LC)) { | | 6847 | if (status & (WTX_ST_EC|WTX_ST_LC)) { |
6848 | ifp->if_oerrors++; | | 6848 | ifp->if_oerrors++; |
6849 | if (status & WTX_ST_LC) | | 6849 | if (status & WTX_ST_LC) |
6850 | log(LOG_WARNING, "%s: late collision\n", | | 6850 | log(LOG_WARNING, "%s: late collision\n", |
6851 | device_xname(sc->sc_dev)); | | 6851 | device_xname(sc->sc_dev)); |
6852 | else if (status & WTX_ST_EC) { | | 6852 | else if (status & WTX_ST_EC) { |
6853 | ifp->if_collisions += 16; | | 6853 | ifp->if_collisions += 16; |
6854 | log(LOG_WARNING, "%s: excessive collisions\n", | | 6854 | log(LOG_WARNING, "%s: excessive collisions\n", |
6855 | device_xname(sc->sc_dev)); | | 6855 | device_xname(sc->sc_dev)); |
6856 | } | | 6856 | } |
6857 | } else | | 6857 | } else |
6858 | ifp->if_opackets++; | | 6858 | ifp->if_opackets++; |
6859 | | | 6859 | |
6860 | txq->txq_free += txs->txs_ndesc; | | 6860 | txq->txq_free += txs->txs_ndesc; |
6861 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, | | 6861 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, |
6862 | 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); | | 6862 | 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
6863 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 6863 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
6864 | m_freem(txs->txs_mbuf); | | 6864 | m_freem(txs->txs_mbuf); |
6865 | txs->txs_mbuf = NULL; | | 6865 | txs->txs_mbuf = NULL; |
6866 | } | | 6866 | } |
6867 | | | 6867 | |
6868 | /* Update the dirty transmit buffer pointer. */ | | 6868 | /* Update the dirty transmit buffer pointer. */ |
6869 | txq->txq_sdirty = i; | | 6869 | txq->txq_sdirty = i; |
6870 | DPRINTF(WM_DEBUG_TX, | | 6870 | DPRINTF(WM_DEBUG_TX, |
6871 | ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); | | 6871 | ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); |
6872 | | | 6872 | |
6873 | if (count != 0) | | 6873 | if (count != 0) |
6874 | rnd_add_uint32(&sc->rnd_source, count); | | 6874 | rnd_add_uint32(&sc->rnd_source, count); |
6875 | | | 6875 | |
6876 | /* | | 6876 | /* |
6877 | * If there are no more pending transmissions, cancel the watchdog | | 6877 | * If there are no more pending transmissions, cancel the watchdog |
6878 | * timer. | | 6878 | * timer. |
6879 | */ | | 6879 | */ |
6880 | if (txq->txq_sfree == WM_TXQUEUELEN(txq)) | | 6880 | if (txq->txq_sfree == WM_TXQUEUELEN(txq)) |
6881 | ifp->if_timer = 0; | | 6881 | ifp->if_timer = 0; |
6882 | | | 6882 | |
6883 | return processed; | | 6883 | return processed; |
6884 | } | | 6884 | } |
6885 | | | 6885 | |
6886 | /* | | 6886 | /* |
6887 | * wm_rxeof: | | 6887 | * wm_rxeof: |
6888 | * | | 6888 | * |
6889 | * Helper; handle receive interrupts. | | 6889 | * Helper; handle receive interrupts. |
6890 | */ | | 6890 | */ |
6891 | static void | | 6891 | static void |
6892 | wm_rxeof(struct wm_rxqueue *rxq) | | 6892 | wm_rxeof(struct wm_rxqueue *rxq) |
6893 | { | | 6893 | { |
6894 | struct wm_softc *sc = rxq->rxq_sc; | | 6894 | struct wm_softc *sc = rxq->rxq_sc; |
6895 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 6895 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
6896 | struct wm_rxsoft *rxs; | | 6896 | struct wm_rxsoft *rxs; |
6897 | struct mbuf *m; | | 6897 | struct mbuf *m; |
6898 | int i, len; | | 6898 | int i, len; |
6899 | int count = 0; | | 6899 | int count = 0; |
6900 | uint8_t status, errors; | | 6900 | uint8_t status, errors; |
6901 | uint16_t vlantag; | | 6901 | uint16_t vlantag; |
6902 | | | 6902 | |
6903 | for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) { | | 6903 | for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) { |
6904 | rxs = &rxq->rxq_soft[i]; | | 6904 | rxs = &rxq->rxq_soft[i]; |
6905 | | | 6905 | |
6906 | DPRINTF(WM_DEBUG_RX, | | 6906 | DPRINTF(WM_DEBUG_RX, |
6907 | ("%s: RX: checking descriptor %d\n", | | 6907 | ("%s: RX: checking descriptor %d\n", |
6908 | device_xname(sc->sc_dev), i)); | | 6908 | device_xname(sc->sc_dev), i)); |
6909 | | | 6909 | |
6910 | wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 6910 | wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
6911 | | | 6911 | |
6912 | status = rxq->rxq_descs[i].wrx_status; | | 6912 | status = rxq->rxq_descs[i].wrx_status; |
6913 | errors = rxq->rxq_descs[i].wrx_errors; | | 6913 | errors = rxq->rxq_descs[i].wrx_errors; |
6914 | len = le16toh(rxq->rxq_descs[i].wrx_len); | | 6914 | len = le16toh(rxq->rxq_descs[i].wrx_len); |
6915 | vlantag = rxq->rxq_descs[i].wrx_special; | | 6915 | vlantag = rxq->rxq_descs[i].wrx_special; |
6916 | | | 6916 | |
6917 | if ((status & WRX_ST_DD) == 0) { | | 6917 | if ((status & WRX_ST_DD) == 0) { |
6918 | /* We have processed all of the receive descriptors. */ | | 6918 | /* We have processed all of the receive descriptors. */ |
6919 | wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD); | | 6919 | wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD); |
6920 | break; | | 6920 | break; |
6921 | } | | 6921 | } |
6922 | | | 6922 | |
6923 | count++; | | 6923 | count++; |
6924 | if (__predict_false(rxq->rxq_discard)) { | | 6924 | if (__predict_false(rxq->rxq_discard)) { |
6925 | DPRINTF(WM_DEBUG_RX, | | 6925 | DPRINTF(WM_DEBUG_RX, |
6926 | ("%s: RX: discarding contents of descriptor %d\n", | | 6926 | ("%s: RX: discarding contents of descriptor %d\n", |
6927 | device_xname(sc->sc_dev), i)); | | 6927 | device_xname(sc->sc_dev), i)); |
6928 | wm_init_rxdesc(rxq, i); | | 6928 | wm_init_rxdesc(rxq, i); |
6929 | if (status & WRX_ST_EOP) { | | 6929 | if (status & WRX_ST_EOP) { |
6930 | /* Reset our state. */ | | 6930 | /* Reset our state. */ |
6931 | DPRINTF(WM_DEBUG_RX, | | 6931 | DPRINTF(WM_DEBUG_RX, |
6932 | ("%s: RX: resetting rxdiscard -> 0\n", | | 6932 | ("%s: RX: resetting rxdiscard -> 0\n", |
6933 | device_xname(sc->sc_dev))); | | 6933 | device_xname(sc->sc_dev))); |
6934 | rxq->rxq_discard = 0; | | 6934 | rxq->rxq_discard = 0; |
6935 | } | | 6935 | } |
6936 | continue; | | 6936 | continue; |
6937 | } | | 6937 | } |
6938 | | | 6938 | |
6939 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, | | 6939 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
6940 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); | | 6940 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
6941 | | | 6941 | |
6942 | m = rxs->rxs_mbuf; | | 6942 | m = rxs->rxs_mbuf; |
6943 | | | 6943 | |
6944 | /* | | 6944 | /* |
6945 | * Add a new receive buffer to the ring, unless of | | 6945 | * Add a new receive buffer to the ring, unless of |
6946 | * course the length is zero. Treat the latter as a | | 6946 | * course the length is zero. Treat the latter as a |
6947 | * failed mapping. | | 6947 | * failed mapping. |
6948 | */ | | 6948 | */ |
6949 | if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) { | | 6949 | if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) { |
6950 | /* | | 6950 | /* |
6951 | * Failed, throw away what we've done so | | 6951 | * Failed, throw away what we've done so |
6952 | * far, and discard the rest of the packet. | | 6952 | * far, and discard the rest of the packet. |
6953 | */ | | 6953 | */ |
6954 | ifp->if_ierrors++; | | 6954 | ifp->if_ierrors++; |
6955 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, | | 6955 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
6956 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); | | 6956 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
6957 | wm_init_rxdesc(rxq, i); | | 6957 | wm_init_rxdesc(rxq, i); |
6958 | if ((status & WRX_ST_EOP) == 0) | | 6958 | if ((status & WRX_ST_EOP) == 0) |
6959 | rxq->rxq_discard = 1; | | 6959 | rxq->rxq_discard = 1; |
6960 | if (rxq->rxq_head != NULL) | | 6960 | if (rxq->rxq_head != NULL) |
6961 | m_freem(rxq->rxq_head); | | 6961 | m_freem(rxq->rxq_head); |
6962 | WM_RXCHAIN_RESET(rxq); | | 6962 | WM_RXCHAIN_RESET(rxq); |
6963 | DPRINTF(WM_DEBUG_RX, | | 6963 | DPRINTF(WM_DEBUG_RX, |
6964 | ("%s: RX: Rx buffer allocation failed, " | | 6964 | ("%s: RX: Rx buffer allocation failed, " |
6965 | "dropping packet%s\n", device_xname(sc->sc_dev), | | 6965 | "dropping packet%s\n", device_xname(sc->sc_dev), |
6966 | rxq->rxq_discard ? " (discard)" : "")); | | 6966 | rxq->rxq_discard ? " (discard)" : "")); |
6967 | continue; | | 6967 | continue; |
6968 | } | | 6968 | } |
6969 | | | 6969 | |
6970 | m->m_len = len; | | 6970 | m->m_len = len; |
6971 | rxq->rxq_len += len; | | 6971 | rxq->rxq_len += len; |
6972 | DPRINTF(WM_DEBUG_RX, | | 6972 | DPRINTF(WM_DEBUG_RX, |
6973 | ("%s: RX: buffer at %p len %d\n", | | 6973 | ("%s: RX: buffer at %p len %d\n", |
6974 | device_xname(sc->sc_dev), m->m_data, len)); | | 6974 | device_xname(sc->sc_dev), m->m_data, len)); |
6975 | | | 6975 | |
6976 | /* If this is not the end of the packet, keep looking. */ | | 6976 | /* If this is not the end of the packet, keep looking. */ |
6977 | if ((status & WRX_ST_EOP) == 0) { | | 6977 | if ((status & WRX_ST_EOP) == 0) { |
6978 | WM_RXCHAIN_LINK(rxq, m); | | 6978 | WM_RXCHAIN_LINK(rxq, m); |
6979 | DPRINTF(WM_DEBUG_RX, | | 6979 | DPRINTF(WM_DEBUG_RX, |
6980 | ("%s: RX: not yet EOP, rxlen -> %d\n", | | 6980 | ("%s: RX: not yet EOP, rxlen -> %d\n", |
6981 | device_xname(sc->sc_dev), rxq->rxq_len)); | | 6981 | device_xname(sc->sc_dev), rxq->rxq_len)); |
6982 | continue; | | 6982 | continue; |
6983 | } | | 6983 | } |
6984 | | | 6984 | |
6985 | /* | | 6985 | /* |
6986 | * Okay, we have the entire packet now. The chip is | | 6986 | * Okay, we have the entire packet now. The chip is |
6987 | * configured to include the FCS except I350 and I21[01] | | 6987 | * configured to include the FCS except I350 and I21[01] |
6988 | * (not all chips can be configured to strip it), | | 6988 | * (not all chips can be configured to strip it), |
6989 | * so we need to trim it. | | 6989 | * so we need to trim it. |
6990 | * May need to adjust length of previous mbuf in the | | 6990 | * May need to adjust length of previous mbuf in the |
6991 | * chain if the current mbuf is too short. | | 6991 | * chain if the current mbuf is too short. |
6992 | * For an eratta, the RCTL_SECRC bit in RCTL register | | 6992 | * For an eratta, the RCTL_SECRC bit in RCTL register |
6993 | * is always set in I350, so we don't trim it. | | 6993 | * is always set in I350, so we don't trim it. |
6994 | */ | | 6994 | */ |
6995 | if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354) | | 6995 | if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354) |
6996 | && (sc->sc_type != WM_T_I210) | | 6996 | && (sc->sc_type != WM_T_I210) |
6997 | && (sc->sc_type != WM_T_I211)) { | | 6997 | && (sc->sc_type != WM_T_I211)) { |
6998 | if (m->m_len < ETHER_CRC_LEN) { | | 6998 | if (m->m_len < ETHER_CRC_LEN) { |
6999 | rxq->rxq_tail->m_len | | 6999 | rxq->rxq_tail->m_len |
7000 | -= (ETHER_CRC_LEN - m->m_len); | | 7000 | -= (ETHER_CRC_LEN - m->m_len); |
7001 | m->m_len = 0; | | 7001 | m->m_len = 0; |
7002 | } else | | 7002 | } else |
7003 | m->m_len -= ETHER_CRC_LEN; | | 7003 | m->m_len -= ETHER_CRC_LEN; |
7004 | len = rxq->rxq_len - ETHER_CRC_LEN; | | 7004 | len = rxq->rxq_len - ETHER_CRC_LEN; |
7005 | } else | | 7005 | } else |
7006 | len = rxq->rxq_len; | | 7006 | len = rxq->rxq_len; |
7007 | | | 7007 | |
7008 | WM_RXCHAIN_LINK(rxq, m); | | 7008 | WM_RXCHAIN_LINK(rxq, m); |
7009 | | | 7009 | |
7010 | *rxq->rxq_tailp = NULL; | | 7010 | *rxq->rxq_tailp = NULL; |
7011 | m = rxq->rxq_head; | | 7011 | m = rxq->rxq_head; |
7012 | | | 7012 | |
7013 | WM_RXCHAIN_RESET(rxq); | | 7013 | WM_RXCHAIN_RESET(rxq); |
7014 | | | 7014 | |
7015 | DPRINTF(WM_DEBUG_RX, | | 7015 | DPRINTF(WM_DEBUG_RX, |
7016 | ("%s: RX: have entire packet, len -> %d\n", | | 7016 | ("%s: RX: have entire packet, len -> %d\n", |
7017 | device_xname(sc->sc_dev), len)); | | 7017 | device_xname(sc->sc_dev), len)); |
7018 | | | 7018 | |
7019 | /* If an error occurred, update stats and drop the packet. */ | | 7019 | /* If an error occurred, update stats and drop the packet. */ |
7020 | if (errors & | | 7020 | if (errors & |
7021 | (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { | | 7021 | (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { |
7022 | if (errors & WRX_ER_SE) | | 7022 | if (errors & WRX_ER_SE) |
7023 | log(LOG_WARNING, "%s: symbol error\n", | | 7023 | log(LOG_WARNING, "%s: symbol error\n", |
7024 | device_xname(sc->sc_dev)); | | 7024 | device_xname(sc->sc_dev)); |
7025 | else if (errors & WRX_ER_SEQ) | | 7025 | else if (errors & WRX_ER_SEQ) |
7026 | log(LOG_WARNING, "%s: receive sequence error\n", | | 7026 | log(LOG_WARNING, "%s: receive sequence error\n", |
7027 | device_xname(sc->sc_dev)); | | 7027 | device_xname(sc->sc_dev)); |
7028 | else if (errors & WRX_ER_CE) | | 7028 | else if (errors & WRX_ER_CE) |
7029 | log(LOG_WARNING, "%s: CRC error\n", | | 7029 | log(LOG_WARNING, "%s: CRC error\n", |
7030 | device_xname(sc->sc_dev)); | | 7030 | device_xname(sc->sc_dev)); |
7031 | m_freem(m); | | 7031 | m_freem(m); |
7032 | continue; | | 7032 | continue; |
7033 | } | | 7033 | } |
7034 | | | 7034 | |
7035 | /* No errors. Receive the packet. */ | | 7035 | /* No errors. Receive the packet. */ |
7036 | m->m_pkthdr.rcvif = ifp; | | 7036 | m->m_pkthdr.rcvif = ifp; |
7037 | m->m_pkthdr.len = len; | | 7037 | m->m_pkthdr.len = len; |
7038 | | | 7038 | |
7039 | /* | | 7039 | /* |
7040 | * If VLANs are enabled, VLAN packets have been unwrapped | | 7040 | * If VLANs are enabled, VLAN packets have been unwrapped |
7041 | * for us. Associate the tag with the packet. | | 7041 | * for us. Associate the tag with the packet. |
7042 | */ | | 7042 | */ |
7043 | /* XXXX should check for i350 and i354 */ | | 7043 | /* XXXX should check for i350 and i354 */ |
7044 | if ((status & WRX_ST_VP) != 0) { | | 7044 | if ((status & WRX_ST_VP) != 0) { |
7045 | VLAN_INPUT_TAG(ifp, m, | | 7045 | VLAN_INPUT_TAG(ifp, m, |
7046 | le16toh(vlantag), | | 7046 | le16toh(vlantag), |
7047 | continue); | | 7047 | continue); |
7048 | } | | 7048 | } |
7049 | | | 7049 | |
7050 | /* Set up checksum info for this packet. */ | | 7050 | /* Set up checksum info for this packet. */ |
7051 | if ((status & WRX_ST_IXSM) == 0) { | | 7051 | if ((status & WRX_ST_IXSM) == 0) { |
7052 | if (status & WRX_ST_IPCS) { | | 7052 | if (status & WRX_ST_IPCS) { |
7053 | WM_EVCNT_INCR(&sc->sc_ev_rxipsum); | | 7053 | WM_EVCNT_INCR(&sc->sc_ev_rxipsum); |
7054 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; | | 7054 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; |
7055 | if (errors & WRX_ER_IPE) | | 7055 | if (errors & WRX_ER_IPE) |
7056 | m->m_pkthdr.csum_flags |= | | 7056 | m->m_pkthdr.csum_flags |= |
7057 | M_CSUM_IPv4_BAD; | | 7057 | M_CSUM_IPv4_BAD; |
7058 | } | | 7058 | } |
7059 | if (status & WRX_ST_TCPCS) { | | 7059 | if (status & WRX_ST_TCPCS) { |
7060 | /* | | 7060 | /* |
7061 | * Note: we don't know if this was TCP or UDP, | | 7061 | * Note: we don't know if this was TCP or UDP, |
7062 | * so we just set both bits, and expect the | | 7062 | * so we just set both bits, and expect the |
7063 | * upper layers to deal. | | 7063 | * upper layers to deal. |
7064 | */ | | 7064 | */ |
7065 | WM_EVCNT_INCR(&sc->sc_ev_rxtusum); | | 7065 | WM_EVCNT_INCR(&sc->sc_ev_rxtusum); |
7066 | m->m_pkthdr.csum_flags |= | | 7066 | m->m_pkthdr.csum_flags |= |
7067 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | | | 7067 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | |
7068 | M_CSUM_TCPv6 | M_CSUM_UDPv6; | | 7068 | M_CSUM_TCPv6 | M_CSUM_UDPv6; |
7069 | if (errors & WRX_ER_TCPE) | | 7069 | if (errors & WRX_ER_TCPE) |
7070 | m->m_pkthdr.csum_flags |= | | 7070 | m->m_pkthdr.csum_flags |= |
7071 | M_CSUM_TCP_UDP_BAD; | | 7071 | M_CSUM_TCP_UDP_BAD; |
7072 | } | | 7072 | } |
7073 | } | | 7073 | } |
7074 | | | 7074 | |
7075 | ifp->if_ipackets++; | | 7075 | ifp->if_ipackets++; |
7076 | | | 7076 | |
7077 | WM_RX_UNLOCK(rxq); | | 7077 | WM_RX_UNLOCK(rxq); |
7078 | | | 7078 | |
7079 | /* Pass this up to any BPF listeners. */ | | 7079 | /* Pass this up to any BPF listeners. */ |
7080 | bpf_mtap(ifp, m); | | 7080 | bpf_mtap(ifp, m); |
7081 | | | 7081 | |
7082 | /* Pass it on. */ | | 7082 | /* Pass it on. */ |
7083 | (*ifp->if_input)(ifp, m); | | 7083 | (*ifp->if_input)(ifp, m); |
7084 | | | 7084 | |
7085 | WM_RX_LOCK(rxq); | | 7085 | WM_RX_LOCK(rxq); |
7086 | | | 7086 | |
7087 | if (sc->sc_stopping) | | 7087 | if (sc->sc_stopping) |
7088 | break; | | 7088 | break; |
7089 | } | | 7089 | } |
7090 | | | 7090 | |
7091 | /* Update the receive pointer. */ | | 7091 | /* Update the receive pointer. */ |
7092 | rxq->rxq_ptr = i; | | 7092 | rxq->rxq_ptr = i; |
7093 | if (count != 0) | | 7093 | if (count != 0) |
7094 | rnd_add_uint32(&sc->rnd_source, count); | | 7094 | rnd_add_uint32(&sc->rnd_source, count); |
7095 | | | 7095 | |
7096 | DPRINTF(WM_DEBUG_RX, | | 7096 | DPRINTF(WM_DEBUG_RX, |
7097 | ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); | | 7097 | ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); |
7098 | } | | 7098 | } |
7099 | | | 7099 | |
7100 | /* | | 7100 | /* |
7101 | * wm_linkintr_gmii: | | 7101 | * wm_linkintr_gmii: |
7102 | * | | 7102 | * |
7103 | * Helper; handle link interrupts for GMII. | | 7103 | * Helper; handle link interrupts for GMII. |
7104 | */ | | 7104 | */ |
7105 | static void | | 7105 | static void |
7106 | wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) | | 7106 | wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) |
7107 | { | | 7107 | { |
7108 | | | 7108 | |
7109 | KASSERT(WM_CORE_LOCKED(sc)); | | 7109 | KASSERT(WM_CORE_LOCKED(sc)); |
7110 | | | 7110 | |
7111 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), | | 7111 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), |
7112 | __func__)); | | 7112 | __func__)); |
7113 | | | 7113 | |
7114 | if (icr & ICR_LSC) { | | 7114 | if (icr & ICR_LSC) { |
7115 | DPRINTF(WM_DEBUG_LINK, | | 7115 | DPRINTF(WM_DEBUG_LINK, |
7116 | ("%s: LINK: LSC -> mii_pollstat\n", | | 7116 | ("%s: LINK: LSC -> mii_pollstat\n", |
7117 | device_xname(sc->sc_dev))); | | 7117 | device_xname(sc->sc_dev))); |
7118 | mii_pollstat(&sc->sc_mii); | | 7118 | mii_pollstat(&sc->sc_mii); |
7119 | if (sc->sc_type == WM_T_82543) { | | 7119 | if (sc->sc_type == WM_T_82543) { |
7120 | int miistatus, active; | | 7120 | int miistatus, active; |
7121 | | | 7121 | |
7122 | /* | | 7122 | /* |
7123 | * With 82543, we need to force speed and | | 7123 | * With 82543, we need to force speed and |
7124 | * duplex on the MAC equal to what the PHY | | 7124 | * duplex on the MAC equal to what the PHY |
7125 | * speed and duplex configuration is. | | 7125 | * speed and duplex configuration is. |
7126 | */ | | 7126 | */ |
7127 | miistatus = sc->sc_mii.mii_media_status; | | 7127 | miistatus = sc->sc_mii.mii_media_status; |
7128 | | | 7128 | |
7129 | if (miistatus & IFM_ACTIVE) { | | 7129 | if (miistatus & IFM_ACTIVE) { |
7130 | active = sc->sc_mii.mii_media_active; | | 7130 | active = sc->sc_mii.mii_media_active; |
7131 | sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); | | 7131 | sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); |
7132 | switch (IFM_SUBTYPE(active)) { | | 7132 | switch (IFM_SUBTYPE(active)) { |
7133 | case IFM_10_T: | | 7133 | case IFM_10_T: |
7134 | sc->sc_ctrl |= CTRL_SPEED_10; | | 7134 | sc->sc_ctrl |= CTRL_SPEED_10; |
7135 | break; | | 7135 | break; |
7136 | case IFM_100_TX: | | 7136 | case IFM_100_TX: |
7137 | sc->sc_ctrl |= CTRL_SPEED_100; | | 7137 | sc->sc_ctrl |= CTRL_SPEED_100; |
7138 | break; | | 7138 | break; |
7139 | case IFM_1000_T: | | 7139 | case IFM_1000_T: |
7140 | sc->sc_ctrl |= CTRL_SPEED_1000; | | 7140 | sc->sc_ctrl |= CTRL_SPEED_1000; |
7141 | break; | | 7141 | break; |
7142 | default: | | 7142 | default: |
7143 | /* | | 7143 | /* |
7144 | * fiber? | | 7144 | * fiber? |
7145 | * Shoud not enter here. | | 7145 | * Shoud not enter here. |
7146 | */ | | 7146 | */ |
7147 | printf("unknown media (%x)\n", | | 7147 | printf("unknown media (%x)\n", |
7148 | active); | | 7148 | active); |
7149 | break; | | 7149 | break; |
7150 | } | | 7150 | } |
7151 | if (active & IFM_FDX) | | 7151 | if (active & IFM_FDX) |
7152 | sc->sc_ctrl |= CTRL_FD; | | 7152 | sc->sc_ctrl |= CTRL_FD; |
7153 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 7153 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
7154 | } | | 7154 | } |
7155 | } else if ((sc->sc_type == WM_T_ICH8) | | 7155 | } else if ((sc->sc_type == WM_T_ICH8) |
7156 | && (sc->sc_phytype == WMPHY_IGP_3)) { | | 7156 | && (sc->sc_phytype == WMPHY_IGP_3)) { |
7157 | wm_kmrn_lock_loss_workaround_ich8lan(sc); | | 7157 | wm_kmrn_lock_loss_workaround_ich8lan(sc); |
7158 | } else if (sc->sc_type == WM_T_PCH) { | | 7158 | } else if (sc->sc_type == WM_T_PCH) { |
7159 | wm_k1_gig_workaround_hv(sc, | | 7159 | wm_k1_gig_workaround_hv(sc, |
7160 | ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); | | 7160 | ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); |
7161 | } | | 7161 | } |
7162 | | | 7162 | |
7163 | if ((sc->sc_phytype == WMPHY_82578) | | 7163 | if ((sc->sc_phytype == WMPHY_82578) |
7164 | && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) | | 7164 | && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) |
7165 | == IFM_1000_T)) { | | 7165 | == IFM_1000_T)) { |
7166 | | | 7166 | |
7167 | if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { | | 7167 | if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { |
7168 | delay(200*1000); /* XXX too big */ | | 7168 | delay(200*1000); /* XXX too big */ |
7169 | | | 7169 | |
7170 | /* Link stall fix for link up */ | | 7170 | /* Link stall fix for link up */ |
7171 | wm_gmii_hv_writereg(sc->sc_dev, 1, | | 7171 | wm_gmii_hv_writereg(sc->sc_dev, 1, |
7172 | HV_MUX_DATA_CTRL, | | 7172 | HV_MUX_DATA_CTRL, |
7173 | HV_MUX_DATA_CTRL_GEN_TO_MAC | | 7173 | HV_MUX_DATA_CTRL_GEN_TO_MAC |
7174 | | HV_MUX_DATA_CTRL_FORCE_SPEED); | | 7174 | | HV_MUX_DATA_CTRL_FORCE_SPEED); |
7175 | wm_gmii_hv_writereg(sc->sc_dev, 1, | | 7175 | wm_gmii_hv_writereg(sc->sc_dev, 1, |
7176 | HV_MUX_DATA_CTRL, | | 7176 | HV_MUX_DATA_CTRL, |
7177 | HV_MUX_DATA_CTRL_GEN_TO_MAC); | | 7177 | HV_MUX_DATA_CTRL_GEN_TO_MAC); |
7178 | } | | 7178 | } |
7179 | } | | 7179 | } |
7180 | } else if (icr & ICR_RXSEQ) { | | 7180 | } else if (icr & ICR_RXSEQ) { |
7181 | DPRINTF(WM_DEBUG_LINK, | | 7181 | DPRINTF(WM_DEBUG_LINK, |
7182 | ("%s: LINK Receive sequence error\n", | | 7182 | ("%s: LINK Receive sequence error\n", |
7183 | device_xname(sc->sc_dev))); | | 7183 | device_xname(sc->sc_dev))); |
7184 | } | | 7184 | } |
7185 | } | | 7185 | } |
7186 | | | 7186 | |
7187 | /* | | 7187 | /* |
7188 | * wm_linkintr_tbi: | | 7188 | * wm_linkintr_tbi: |
7189 | * | | 7189 | * |
7190 | * Helper; handle link interrupts for TBI mode. | | 7190 | * Helper; handle link interrupts for TBI mode. |
7191 | */ | | 7191 | */ |
7192 | static void | | 7192 | static void |
7193 | wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) | | 7193 | wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) |
7194 | { | | 7194 | { |
7195 | uint32_t status; | | 7195 | uint32_t status; |
7196 | | | 7196 | |
7197 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), | | 7197 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), |
7198 | __func__)); | | 7198 | __func__)); |
7199 | | | 7199 | |
7200 | status = CSR_READ(sc, WMREG_STATUS); | | 7200 | status = CSR_READ(sc, WMREG_STATUS); |
7201 | if (icr & ICR_LSC) { | | 7201 | if (icr & ICR_LSC) { |
7202 | if (status & STATUS_LU) { | | 7202 | if (status & STATUS_LU) { |
7203 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", | | 7203 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", |
7204 | device_xname(sc->sc_dev), | | 7204 | device_xname(sc->sc_dev), |
7205 | (status & STATUS_FD) ? "FDX" : "HDX")); | | 7205 | (status & STATUS_FD) ? "FDX" : "HDX")); |
7206 | /* | | 7206 | /* |
7207 | * NOTE: CTRL will update TFCE and RFCE automatically, | | 7207 | * NOTE: CTRL will update TFCE and RFCE automatically, |
7208 | * so we should update sc->sc_ctrl | | 7208 | * so we should update sc->sc_ctrl |
7209 | */ | | 7209 | */ |
7210 | | | 7210 | |
7211 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); | | 7211 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); |
7212 | sc->sc_tctl &= ~TCTL_COLD(0x3ff); | | 7212 | sc->sc_tctl &= ~TCTL_COLD(0x3ff); |
7213 | sc->sc_fcrtl &= ~FCRTL_XONE; | | 7213 | sc->sc_fcrtl &= ~FCRTL_XONE; |
7214 | if (status & STATUS_FD) | | 7214 | if (status & STATUS_FD) |
7215 | sc->sc_tctl |= | | 7215 | sc->sc_tctl |= |
7216 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); | | 7216 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); |
7217 | else | | 7217 | else |
7218 | sc->sc_tctl |= | | 7218 | sc->sc_tctl |= |
7219 | TCTL_COLD(TX_COLLISION_DISTANCE_HDX); | | 7219 | TCTL_COLD(TX_COLLISION_DISTANCE_HDX); |
7220 | if (sc->sc_ctrl & CTRL_TFCE) | | 7220 | if (sc->sc_ctrl & CTRL_TFCE) |
7221 | sc->sc_fcrtl |= FCRTL_XONE; | | 7221 | sc->sc_fcrtl |= FCRTL_XONE; |
7222 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); | | 7222 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); |
7223 | CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? | | 7223 | CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? |
7224 | WMREG_OLD_FCRTL : WMREG_FCRTL, | | 7224 | WMREG_OLD_FCRTL : WMREG_FCRTL, |
7225 | sc->sc_fcrtl); | | 7225 | sc->sc_fcrtl); |
7226 | sc->sc_tbi_linkup = 1; | | 7226 | sc->sc_tbi_linkup = 1; |
7227 | } else { | | 7227 | } else { |
7228 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", | | 7228 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", |
7229 | device_xname(sc->sc_dev))); | | 7229 | device_xname(sc->sc_dev))); |
7230 | sc->sc_tbi_linkup = 0; | | 7230 | sc->sc_tbi_linkup = 0; |
7231 | } | | 7231 | } |
7232 | /* Update LED */ | | 7232 | /* Update LED */ |
7233 | wm_tbi_serdes_set_linkled(sc); | | 7233 | wm_tbi_serdes_set_linkled(sc); |
7234 | } else if (icr & ICR_RXSEQ) { | | 7234 | } else if (icr & ICR_RXSEQ) { |
7235 | DPRINTF(WM_DEBUG_LINK, | | 7235 | DPRINTF(WM_DEBUG_LINK, |
7236 | ("%s: LINK: Receive sequence error\n", | | 7236 | ("%s: LINK: Receive sequence error\n", |
7237 | device_xname(sc->sc_dev))); | | 7237 | device_xname(sc->sc_dev))); |
7238 | } | | 7238 | } |
7239 | } | | 7239 | } |
7240 | | | 7240 | |
7241 | /* | | 7241 | /* |
7242 | * wm_linkintr_serdes: | | 7242 | * wm_linkintr_serdes: |
7243 | * | | 7243 | * |
7244 | * Helper; handle link interrupts for TBI mode. | | 7244 | * Helper; handle link interrupts for TBI mode. |
7245 | */ | | 7245 | */ |
7246 | static void | | 7246 | static void |
7247 | wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr) | | 7247 | wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr) |
7248 | { | | 7248 | { |
7249 | struct mii_data *mii = &sc->sc_mii; | | 7249 | struct mii_data *mii = &sc->sc_mii; |
7250 | struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; | | 7250 | struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; |
7251 | uint32_t pcs_adv, pcs_lpab, reg; | | 7251 | uint32_t pcs_adv, pcs_lpab, reg; |
7252 | | | 7252 | |
7253 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), | | 7253 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), |
7254 | __func__)); | | 7254 | __func__)); |
7255 | | | 7255 | |
7256 | if (icr & ICR_LSC) { | | 7256 | if (icr & ICR_LSC) { |
7257 | /* Check PCS */ | | 7257 | /* Check PCS */ |
7258 | reg = CSR_READ(sc, WMREG_PCS_LSTS); | | 7258 | reg = CSR_READ(sc, WMREG_PCS_LSTS); |
7259 | if ((reg & PCS_LSTS_LINKOK) != 0) { | | 7259 | if ((reg & PCS_LSTS_LINKOK) != 0) { |
7260 | mii->mii_media_status |= IFM_ACTIVE; | | 7260 | mii->mii_media_status |= IFM_ACTIVE; |
7261 | sc->sc_tbi_linkup = 1; | | 7261 | sc->sc_tbi_linkup = 1; |
7262 | } else { | | 7262 | } else { |
7263 | mii->mii_media_status |= IFM_NONE; | | 7263 | mii->mii_media_status |= IFM_NONE; |
7264 | sc->sc_tbi_linkup = 0; | | 7264 | sc->sc_tbi_linkup = 0; |
7265 | wm_tbi_serdes_set_linkled(sc); | | 7265 | wm_tbi_serdes_set_linkled(sc); |
7266 | return; | | 7266 | return; |
7267 | } | | 7267 | } |
7268 | mii->mii_media_active |= IFM_1000_SX; | | 7268 | mii->mii_media_active |= IFM_1000_SX; |
7269 | if ((reg & PCS_LSTS_FDX) != 0) | | 7269 | if ((reg & PCS_LSTS_FDX) != 0) |
7270 | mii->mii_media_active |= IFM_FDX; | | 7270 | mii->mii_media_active |= IFM_FDX; |
7271 | else | | 7271 | else |
7272 | mii->mii_media_active |= IFM_HDX; | | 7272 | mii->mii_media_active |= IFM_HDX; |
7273 | if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { | | 7273 | if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { |
7274 | /* Check flow */ | | 7274 | /* Check flow */ |
7275 | reg = CSR_READ(sc, WMREG_PCS_LSTS); | | 7275 | reg = CSR_READ(sc, WMREG_PCS_LSTS); |
7276 | if ((reg & PCS_LSTS_AN_COMP) == 0) { | | 7276 | if ((reg & PCS_LSTS_AN_COMP) == 0) { |
7277 | DPRINTF(WM_DEBUG_LINK, | | 7277 | DPRINTF(WM_DEBUG_LINK, |
7278 | ("XXX LINKOK but not ACOMP\n")); | | 7278 | ("XXX LINKOK but not ACOMP\n")); |
7279 | return; | | 7279 | return; |
7280 | } | | 7280 | } |
7281 | pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV); | | 7281 | pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV); |
7282 | pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB); | | 7282 | pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB); |
7283 | DPRINTF(WM_DEBUG_LINK, | | 7283 | DPRINTF(WM_DEBUG_LINK, |
7284 | ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab)); | | 7284 | ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab)); |
7285 | if ((pcs_adv & TXCW_SYM_PAUSE) | | 7285 | if ((pcs_adv & TXCW_SYM_PAUSE) |
7286 | && (pcs_lpab & TXCW_SYM_PAUSE)) { | | 7286 | && (pcs_lpab & TXCW_SYM_PAUSE)) { |
7287 | mii->mii_media_active |= IFM_FLOW | | 7287 | mii->mii_media_active |= IFM_FLOW |
7288 | | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; | | 7288 | | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; |
7289 | } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0) | | 7289 | } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0) |
7290 | && (pcs_adv & TXCW_ASYM_PAUSE) | | 7290 | && (pcs_adv & TXCW_ASYM_PAUSE) |
7291 | && (pcs_lpab & TXCW_SYM_PAUSE) | | 7291 | && (pcs_lpab & TXCW_SYM_PAUSE) |
7292 | && (pcs_lpab & TXCW_ASYM_PAUSE)) | | 7292 | && (pcs_lpab & TXCW_ASYM_PAUSE)) |
7293 | mii->mii_media_active |= IFM_FLOW | | 7293 | mii->mii_media_active |= IFM_FLOW |
7294 | | IFM_ETH_TXPAUSE; | | 7294 | | IFM_ETH_TXPAUSE; |
7295 | else if ((pcs_adv & TXCW_SYM_PAUSE) | | 7295 | else if ((pcs_adv & TXCW_SYM_PAUSE) |
7296 | && (pcs_adv & TXCW_ASYM_PAUSE) | | 7296 | && (pcs_adv & TXCW_ASYM_PAUSE) |
7297 | && ((pcs_lpab & TXCW_SYM_PAUSE) == 0) | | 7297 | && ((pcs_lpab & TXCW_SYM_PAUSE) == 0) |
7298 | && (pcs_lpab & TXCW_ASYM_PAUSE)) | | 7298 | && (pcs_lpab & TXCW_ASYM_PAUSE)) |
7299 | mii->mii_media_active |= IFM_FLOW | | 7299 | mii->mii_media_active |= IFM_FLOW |
7300 | | IFM_ETH_RXPAUSE; | | 7300 | | IFM_ETH_RXPAUSE; |
7301 | } | | 7301 | } |
7302 | /* Update LED */ | | 7302 | /* Update LED */ |
7303 | wm_tbi_serdes_set_linkled(sc); | | 7303 | wm_tbi_serdes_set_linkled(sc); |
7304 | } else { | | 7304 | } else { |
7305 | DPRINTF(WM_DEBUG_LINK, | | 7305 | DPRINTF(WM_DEBUG_LINK, |
7306 | ("%s: LINK: Receive sequence error\n", | | 7306 | ("%s: LINK: Receive sequence error\n", |
7307 | device_xname(sc->sc_dev))); | | 7307 | device_xname(sc->sc_dev))); |
7308 | } | | 7308 | } |
7309 | } | | 7309 | } |
7310 | | | 7310 | |
7311 | /* | | 7311 | /* |
7312 | * wm_linkintr: | | 7312 | * wm_linkintr: |
7313 | * | | 7313 | * |
7314 | * Helper; handle link interrupts. | | 7314 | * Helper; handle link interrupts. |
7315 | */ | | 7315 | */ |
7316 | static void | | 7316 | static void |
7317 | wm_linkintr(struct wm_softc *sc, uint32_t icr) | | 7317 | wm_linkintr(struct wm_softc *sc, uint32_t icr) |
7318 | { | | 7318 | { |
7319 | | | 7319 | |
7320 | KASSERT(WM_CORE_LOCKED(sc)); | | 7320 | KASSERT(WM_CORE_LOCKED(sc)); |
7321 | | | 7321 | |
7322 | if (sc->sc_flags & WM_F_HAS_MII) | | 7322 | if (sc->sc_flags & WM_F_HAS_MII) |
7323 | wm_linkintr_gmii(sc, icr); | | 7323 | wm_linkintr_gmii(sc, icr); |
7324 | else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES) | | 7324 | else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES) |
7325 | && (sc->sc_type >= WM_T_82575)) | | 7325 | && (sc->sc_type >= WM_T_82575)) |
7326 | wm_linkintr_serdes(sc, icr); | | 7326 | wm_linkintr_serdes(sc, icr); |
7327 | else | | 7327 | else |
7328 | wm_linkintr_tbi(sc, icr); | | 7328 | wm_linkintr_tbi(sc, icr); |
7329 | } | | 7329 | } |
7330 | | | 7330 | |
7331 | /* | | 7331 | /* |
7332 | * wm_intr_legacy: | | 7332 | * wm_intr_legacy: |
7333 | * | | 7333 | * |
7334 | * Interrupt service routine for INTx and MSI. | | 7334 | * Interrupt service routine for INTx and MSI. |
7335 | */ | | 7335 | */ |
7336 | static int | | 7336 | static int |
7337 | wm_intr_legacy(void *arg) | | 7337 | wm_intr_legacy(void *arg) |
7338 | { | | 7338 | { |
7339 | struct wm_softc *sc = arg; | | 7339 | struct wm_softc *sc = arg; |
7340 | struct wm_txqueue *txq = &sc->sc_txq[0]; | | 7340 | struct wm_txqueue *txq = &sc->sc_txq[0]; |
7341 | struct wm_rxqueue *rxq = &sc->sc_rxq[0]; | | 7341 | struct wm_rxqueue *rxq = &sc->sc_rxq[0]; |
7342 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 7342 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
7343 | uint32_t icr, rndval = 0; | | 7343 | uint32_t icr, rndval = 0; |
7344 | int handled = 0; | | 7344 | int handled = 0; |
7345 | | | 7345 | |
7346 | DPRINTF(WM_DEBUG_TX, | | 7346 | DPRINTF(WM_DEBUG_TX, |
7347 | ("%s: INTx: got intr\n", device_xname(sc->sc_dev))); | | 7347 | ("%s: INTx: got intr\n", device_xname(sc->sc_dev))); |
7348 | while (1 /* CONSTCOND */) { | | 7348 | while (1 /* CONSTCOND */) { |
7349 | icr = CSR_READ(sc, WMREG_ICR); | | 7349 | icr = CSR_READ(sc, WMREG_ICR); |
7350 | if ((icr & sc->sc_icr) == 0) | | 7350 | if ((icr & sc->sc_icr) == 0) |
7351 | break; | | 7351 | break; |
7352 | if (rndval == 0) | | 7352 | if (rndval == 0) |
7353 | rndval = icr; | | 7353 | rndval = icr; |
7354 | | | 7354 | |
7355 | WM_RX_LOCK(rxq); | | 7355 | WM_RX_LOCK(rxq); |
7356 | | | 7356 | |
7357 | if (sc->sc_stopping) { | | 7357 | if (sc->sc_stopping) { |
7358 | WM_RX_UNLOCK(rxq); | | 7358 | WM_RX_UNLOCK(rxq); |
7359 | break; | | 7359 | break; |
7360 | } | | 7360 | } |
7361 | | | 7361 | |
7362 | handled = 1; | | 7362 | handled = 1; |
7363 | | | 7363 | |
7364 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) | | 7364 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) |
7365 | if (icr & (ICR_RXDMT0|ICR_RXT0)) { | | 7365 | if (icr & (ICR_RXDMT0|ICR_RXT0)) { |
7366 | DPRINTF(WM_DEBUG_RX, | | 7366 | DPRINTF(WM_DEBUG_RX, |
7367 | ("%s: RX: got Rx intr 0x%08x\n", | | 7367 | ("%s: RX: got Rx intr 0x%08x\n", |
7368 | device_xname(sc->sc_dev), | | 7368 | device_xname(sc->sc_dev), |
7369 | icr & (ICR_RXDMT0|ICR_RXT0))); | | 7369 | icr & (ICR_RXDMT0|ICR_RXT0))); |
7370 | WM_EVCNT_INCR(&sc->sc_ev_rxintr); | | 7370 | WM_EVCNT_INCR(&sc->sc_ev_rxintr); |
7371 | } | | 7371 | } |
7372 | #endif | | 7372 | #endif |
7373 | wm_rxeof(rxq); | | 7373 | wm_rxeof(rxq); |
7374 | | | 7374 | |
7375 | WM_RX_UNLOCK(rxq); | | 7375 | WM_RX_UNLOCK(rxq); |
7376 | WM_TX_LOCK(txq); | | 7376 | WM_TX_LOCK(txq); |
7377 | | | 7377 | |
7378 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) | | 7378 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) |
7379 | if (icr & ICR_TXDW) { | | 7379 | if (icr & ICR_TXDW) { |
7380 | DPRINTF(WM_DEBUG_TX, | | 7380 | DPRINTF(WM_DEBUG_TX, |
7381 | ("%s: TX: got TXDW interrupt\n", | | 7381 | ("%s: TX: got TXDW interrupt\n", |
7382 | device_xname(sc->sc_dev))); | | 7382 | device_xname(sc->sc_dev))); |
7383 | WM_EVCNT_INCR(&sc->sc_ev_txdw); | | 7383 | WM_EVCNT_INCR(&sc->sc_ev_txdw); |
7384 | } | | 7384 | } |
7385 | #endif | | 7385 | #endif |
7386 | wm_txeof(sc); | | 7386 | wm_txeof(sc); |
7387 | | | 7387 | |
7388 | WM_TX_UNLOCK(txq); | | 7388 | WM_TX_UNLOCK(txq); |
7389 | WM_CORE_LOCK(sc); | | 7389 | WM_CORE_LOCK(sc); |
7390 | | | 7390 | |
7391 | if (icr & (ICR_LSC|ICR_RXSEQ)) { | | 7391 | if (icr & (ICR_LSC|ICR_RXSEQ)) { |
7392 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); | | 7392 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); |
7393 | wm_linkintr(sc, icr); | | 7393 | wm_linkintr(sc, icr); |
7394 | } | | 7394 | } |
7395 | | | 7395 | |
7396 | WM_CORE_UNLOCK(sc); | | 7396 | WM_CORE_UNLOCK(sc); |
7397 | | | 7397 | |
7398 | if (icr & ICR_RXO) { | | 7398 | if (icr & ICR_RXO) { |
7399 | #if defined(WM_DEBUG) | | 7399 | #if defined(WM_DEBUG) |
7400 | log(LOG_WARNING, "%s: Receive overrun\n", | | 7400 | log(LOG_WARNING, "%s: Receive overrun\n", |
7401 | device_xname(sc->sc_dev)); | | 7401 | device_xname(sc->sc_dev)); |
7402 | #endif /* defined(WM_DEBUG) */ | | 7402 | #endif /* defined(WM_DEBUG) */ |
7403 | } | | 7403 | } |
7404 | } | | 7404 | } |
7405 | | | 7405 | |
7406 | rnd_add_uint32(&sc->rnd_source, rndval); | | 7406 | rnd_add_uint32(&sc->rnd_source, rndval); |
7407 | | | 7407 | |
7408 | if (handled) { | | 7408 | if (handled) { |
7409 | /* Try to get more packets going. */ | | 7409 | /* Try to get more packets going. */ |
7410 | ifp->if_start(ifp); | | 7410 | ifp->if_start(ifp); |
7411 | } | | 7411 | } |
7412 | | | 7412 | |
7413 | return handled; | | 7413 | return handled; |
7414 | } | | 7414 | } |
7415 | | | 7415 | |
7416 | #ifdef WM_MSI_MSIX | | 7416 | #ifdef WM_MSI_MSIX |
7417 | /* | | 7417 | /* |
7418 | * wm_txintr_msix: | | 7418 | * wm_txintr_msix: |
7419 | * | | 7419 | * |
7420 | * Interrupt service routine for TX complete interrupt for MSI-X. | | 7420 | * Interrupt service routine for TX complete interrupt for MSI-X. |
7421 | */ | | 7421 | */ |
7422 | static int | | 7422 | static int |
7423 | wm_txintr_msix(void *arg) | | 7423 | wm_txintr_msix(void *arg) |
7424 | { | | 7424 | { |
7425 | struct wm_txqueue *txq = arg; | | 7425 | struct wm_txqueue *txq = arg; |
7426 | struct wm_softc *sc = txq->txq_sc; | | 7426 | struct wm_softc *sc = txq->txq_sc; |
7427 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 7427 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
7428 | int handled = 0; | | 7428 | int handled = 0; |
7429 | | | 7429 | |
7430 | DPRINTF(WM_DEBUG_TX, | | 7430 | DPRINTF(WM_DEBUG_TX, |
7431 | ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev))); | | 7431 | ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev))); |
7432 | | | 7432 | |
7433 | if (sc->sc_type == WM_T_82574) | | 7433 | if (sc->sc_type == WM_T_82574) |
7434 | CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */ | | 7434 | CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */ |
7435 | else if (sc->sc_type == WM_T_82575) | | 7435 | else if (sc->sc_type == WM_T_82575) |
7436 | CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id)); | | 7436 | CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id)); |
7437 | else | | 7437 | else |
7438 | CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx); | | 7438 | CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx); |
7439 | | | 7439 | |
7440 | WM_TX_LOCK(txq); | | 7440 | WM_TX_LOCK(txq); |
7441 | | | 7441 | |
7442 | if (sc->sc_stopping) | | 7442 | if (sc->sc_stopping) |
7443 | goto out; | | 7443 | goto out; |
7444 | | | 7444 | |
7445 | WM_EVCNT_INCR(&sc->sc_ev_txdw); | | 7445 | WM_EVCNT_INCR(&sc->sc_ev_txdw); |
7446 | handled = wm_txeof(sc); | | 7446 | handled = wm_txeof(sc); |
7447 | | | 7447 | |
7448 | out: | | 7448 | out: |
7449 | WM_TX_UNLOCK(txq); | | 7449 | WM_TX_UNLOCK(txq); |
7450 | | | 7450 | |
7451 | if (sc->sc_type == WM_T_82574) | | 7451 | if (sc->sc_type == WM_T_82574) |
7452 | CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */ | | 7452 | CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */ |
7453 | else if (sc->sc_type == WM_T_82575) | | 7453 | else if (sc->sc_type == WM_T_82575) |
7454 | CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id)); | | 7454 | CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id)); |
7455 | else | | 7455 | else |
7456 | CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx); | | 7456 | CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx); |
7457 | | | 7457 | |
7458 | if (handled) { | | 7458 | if (handled) { |
7459 | /* Try to get more packets going. */ | | 7459 | /* Try to get more packets going. */ |
7460 | ifp->if_start(ifp); | | 7460 | ifp->if_start(ifp); |
7461 | } | | 7461 | } |
7462 | | | 7462 | |
7463 | return handled; | | 7463 | return handled; |
7464 | } | | 7464 | } |
7465 | | | 7465 | |
7466 | /* | | 7466 | /* |
7467 | * wm_rxintr_msix: | | 7467 | * wm_rxintr_msix: |
7468 | * | | 7468 | * |
7469 | * Interrupt service routine for RX interrupt for MSI-X. | | 7469 | * Interrupt service routine for RX interrupt for MSI-X. |
7470 | */ | | 7470 | */ |
7471 | static int | | 7471 | static int |
7472 | wm_rxintr_msix(void *arg) | | 7472 | wm_rxintr_msix(void *arg) |
7473 | { | | 7473 | { |
7474 | struct wm_rxqueue *rxq = arg; | | 7474 | struct wm_rxqueue *rxq = arg; |
7475 | struct wm_softc *sc = rxq->rxq_sc; | | 7475 | struct wm_softc *sc = rxq->rxq_sc; |
7476 | | | 7476 | |
7477 | DPRINTF(WM_DEBUG_RX, | | 7477 | DPRINTF(WM_DEBUG_RX, |
7478 | ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev))); | | 7478 | ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev))); |
7479 | | | 7479 | |
7480 | if (sc->sc_type == WM_T_82574) | | 7480 | if (sc->sc_type == WM_T_82574) |
7481 | CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */ | | 7481 | CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */ |
7482 | else if (sc->sc_type == WM_T_82575) | | 7482 | else if (sc->sc_type == WM_T_82575) |
7483 | CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id)); | | 7483 | CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id)); |
7484 | else | | 7484 | else |
7485 | CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx); | | 7485 | CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx); |
7486 | | | 7486 | |
7487 | WM_RX_LOCK(rxq); | | 7487 | WM_RX_LOCK(rxq); |
7488 | | | 7488 | |
7489 | if (sc->sc_stopping) | | 7489 | if (sc->sc_stopping) |
7490 | goto out; | | 7490 | goto out; |
7491 | | | 7491 | |
7492 | WM_EVCNT_INCR(&sc->sc_ev_rxintr); | | 7492 | WM_EVCNT_INCR(&sc->sc_ev_rxintr); |
7493 | wm_rxeof(rxq); | | 7493 | wm_rxeof(rxq); |
7494 | | | 7494 | |
7495 | out: | | 7495 | out: |
7496 | WM_RX_UNLOCK(rxq); | | 7496 | WM_RX_UNLOCK(rxq); |
7497 | | | 7497 | |
7498 | if (sc->sc_type == WM_T_82574) | | 7498 | if (sc->sc_type == WM_T_82574) |
7499 | CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id)); | | 7499 | CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id)); |
7500 | else if (sc->sc_type == WM_T_82575) | | 7500 | else if (sc->sc_type == WM_T_82575) |
7501 | CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id)); | | 7501 | CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id)); |
7502 | else | | 7502 | else |
7503 | CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx); | | 7503 | CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx); |
7504 | | | 7504 | |
7505 | return 1; | | 7505 | return 1; |
7506 | } | | 7506 | } |
7507 | | | 7507 | |
7508 | /* | | 7508 | /* |
7509 | * wm_linkintr_msix: | | 7509 | * wm_linkintr_msix: |
7510 | * | | 7510 | * |
7511 | * Interrupt service routine for link status change for MSI-X. | | 7511 | * Interrupt service routine for link status change for MSI-X. |
7512 | */ | | 7512 | */ |
7513 | static int | | 7513 | static int |
7514 | wm_linkintr_msix(void *arg) | | 7514 | wm_linkintr_msix(void *arg) |
7515 | { | | 7515 | { |
7516 | struct wm_softc *sc = arg; | | 7516 | struct wm_softc *sc = arg; |
7517 | uint32_t reg; | | 7517 | uint32_t reg; |
7518 | | | 7518 | |
7519 | DPRINTF(WM_DEBUG_TX, | | 7519 | DPRINTF(WM_DEBUG_LINK, |
7520 | ("%s: LINK: got link intr\n", device_xname(sc->sc_dev))); | | 7520 | ("%s: LINK: got link intr\n", device_xname(sc->sc_dev))); |
7521 | | | 7521 | |
7522 | reg = CSR_READ(sc, WMREG_ICR); | | 7522 | reg = CSR_READ(sc, WMREG_ICR); |
7523 | WM_CORE_LOCK(sc); | | 7523 | WM_CORE_LOCK(sc); |
7524 | if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0)) | | 7524 | if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0)) |
7525 | goto out; | | 7525 | goto out; |
7526 | | | 7526 | |
7527 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); | | 7527 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); |
7528 | wm_linkintr(sc, ICR_LSC); | | 7528 | wm_linkintr(sc, ICR_LSC); |
7529 | | | 7529 | |
7530 | out: | | 7530 | out: |
7531 | WM_CORE_UNLOCK(sc); | | 7531 | WM_CORE_UNLOCK(sc); |
7532 | | | 7532 | |
7533 | if (sc->sc_type == WM_T_82574) | | 7533 | if (sc->sc_type == WM_T_82574) |
7534 | CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */ | | 7534 | CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */ |
7535 | else if (sc->sc_type == WM_T_82575) | | 7535 | else if (sc->sc_type == WM_T_82575) |
7536 | CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER); | | 7536 | CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER); |
7537 | else | | 7537 | else |
7538 | CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx); | | 7538 | CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx); |
7539 | | | 7539 | |
7540 | return 1; | | 7540 | return 1; |
7541 | } | | 7541 | } |
7542 | #endif /* WM_MSI_MSIX */ | | 7542 | #endif /* WM_MSI_MSIX */ |
7543 | | | 7543 | |
7544 | /* | | 7544 | /* |
7545 | * Media related. | | 7545 | * Media related. |
7546 | * GMII, SGMII, TBI (and SERDES) | | 7546 | * GMII, SGMII, TBI (and SERDES) |
7547 | */ | | 7547 | */ |
7548 | | | 7548 | |
7549 | /* Common */ | | 7549 | /* Common */ |
7550 | | | 7550 | |
7551 | /* | | 7551 | /* |
7552 | * wm_tbi_serdes_set_linkled: | | 7552 | * wm_tbi_serdes_set_linkled: |
7553 | * | | 7553 | * |
7554 | * Update the link LED on TBI and SERDES devices. | | 7554 | * Update the link LED on TBI and SERDES devices. |
7555 | */ | | 7555 | */ |
7556 | static void | | 7556 | static void |
7557 | wm_tbi_serdes_set_linkled(struct wm_softc *sc) | | 7557 | wm_tbi_serdes_set_linkled(struct wm_softc *sc) |
7558 | { | | 7558 | { |
7559 | | | 7559 | |
7560 | if (sc->sc_tbi_linkup) | | 7560 | if (sc->sc_tbi_linkup) |
7561 | sc->sc_ctrl |= CTRL_SWDPIN(0); | | 7561 | sc->sc_ctrl |= CTRL_SWDPIN(0); |
7562 | else | | 7562 | else |
7563 | sc->sc_ctrl &= ~CTRL_SWDPIN(0); | | 7563 | sc->sc_ctrl &= ~CTRL_SWDPIN(0); |
7564 | | | 7564 | |
7565 | /* 82540 or newer devices are active low */ | | 7565 | /* 82540 or newer devices are active low */ |
7566 | sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; | | 7566 | sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; |
7567 | | | 7567 | |
7568 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 7568 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
7569 | } | | 7569 | } |
7570 | | | 7570 | |
7571 | /* GMII related */ | | 7571 | /* GMII related */ |
7572 | | | 7572 | |
7573 | /* | | 7573 | /* |
7574 | * wm_gmii_reset: | | 7574 | * wm_gmii_reset: |
7575 | * | | 7575 | * |
7576 | * Reset the PHY. | | 7576 | * Reset the PHY. |
7577 | */ | | 7577 | */ |
7578 | static void | | 7578 | static void |
7579 | wm_gmii_reset(struct wm_softc *sc) | | 7579 | wm_gmii_reset(struct wm_softc *sc) |
7580 | { | | 7580 | { |
7581 | uint32_t reg; | | 7581 | uint32_t reg; |
7582 | int rv; | | 7582 | int rv; |
7583 | | | 7583 | |
7584 | /* get phy semaphore */ | | 7584 | /* get phy semaphore */ |
7585 | switch (sc->sc_type) { | | 7585 | switch (sc->sc_type) { |
7586 | case WM_T_82571: | | 7586 | case WM_T_82571: |
7587 | case WM_T_82572: | | 7587 | case WM_T_82572: |
7588 | case WM_T_82573: | | 7588 | case WM_T_82573: |
7589 | case WM_T_82574: | | 7589 | case WM_T_82574: |
7590 | case WM_T_82583: | | 7590 | case WM_T_82583: |
7591 | /* XXX should get sw semaphore, too */ | | 7591 | /* XXX should get sw semaphore, too */ |
7592 | rv = wm_get_swsm_semaphore(sc); | | 7592 | rv = wm_get_swsm_semaphore(sc); |
7593 | break; | | 7593 | break; |
7594 | case WM_T_82575: | | 7594 | case WM_T_82575: |
7595 | case WM_T_82576: | | 7595 | case WM_T_82576: |
7596 | case WM_T_82580: | | 7596 | case WM_T_82580: |
7597 | case WM_T_I350: | | 7597 | case WM_T_I350: |
7598 | case WM_T_I354: | | 7598 | case WM_T_I354: |
7599 | case WM_T_I210: | | 7599 | case WM_T_I210: |
7600 | case WM_T_I211: | | 7600 | case WM_T_I211: |
7601 | case WM_T_80003: | | 7601 | case WM_T_80003: |
7602 | rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); | | 7602 | rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); |
7603 | break; | | 7603 | break; |
7604 | case WM_T_ICH8: | | 7604 | case WM_T_ICH8: |
7605 | case WM_T_ICH9: | | 7605 | case WM_T_ICH9: |
7606 | case WM_T_ICH10: | | 7606 | case WM_T_ICH10: |
7607 | case WM_T_PCH: | | 7607 | case WM_T_PCH: |
7608 | case WM_T_PCH2: | | 7608 | case WM_T_PCH2: |
7609 | case WM_T_PCH_LPT: | | 7609 | case WM_T_PCH_LPT: |
7610 | rv = wm_get_swfwhw_semaphore(sc); | | 7610 | rv = wm_get_swfwhw_semaphore(sc); |
7611 | break; | | 7611 | break; |
7612 | default: | | 7612 | default: |
7613 | /* nothing to do*/ | | 7613 | /* nothing to do*/ |
7614 | rv = 0; | | 7614 | rv = 0; |
7615 | break; | | 7615 | break; |
7616 | } | | 7616 | } |
7617 | if (rv != 0) { | | 7617 | if (rv != 0) { |
7618 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", | | 7618 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", |
7619 | __func__); | | 7619 | __func__); |
7620 | return; | | 7620 | return; |
7621 | } | | 7621 | } |
7622 | | | 7622 | |
7623 | switch (sc->sc_type) { | | 7623 | switch (sc->sc_type) { |
7624 | case WM_T_82542_2_0: | | 7624 | case WM_T_82542_2_0: |
7625 | case WM_T_82542_2_1: | | 7625 | case WM_T_82542_2_1: |
7626 | /* null */ | | 7626 | /* null */ |
7627 | break; | | 7627 | break; |
7628 | case WM_T_82543: | | 7628 | case WM_T_82543: |
7629 | /* | | 7629 | /* |
7630 | * With 82543, we need to force speed and duplex on the MAC | | 7630 | * With 82543, we need to force speed and duplex on the MAC |
7631 | * equal to what the PHY speed and duplex configuration is. | | 7631 | * equal to what the PHY speed and duplex configuration is. |
7632 | * In addition, we need to perform a hardware reset on the PHY | | 7632 | * In addition, we need to perform a hardware reset on the PHY |
7633 | * to take it out of reset. | | 7633 | * to take it out of reset. |
7634 | */ | | 7634 | */ |
7635 | sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; | | 7635 | sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; |
7636 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 7636 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
7637 | | | 7637 | |
7638 | /* The PHY reset pin is active-low. */ | | 7638 | /* The PHY reset pin is active-low. */ |
7639 | reg = CSR_READ(sc, WMREG_CTRL_EXT); | | 7639 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
7640 | reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | | | 7640 | reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | |
7641 | CTRL_EXT_SWDPIN(4)); | | 7641 | CTRL_EXT_SWDPIN(4)); |
7642 | reg |= CTRL_EXT_SWDPIO(4); | | 7642 | reg |= CTRL_EXT_SWDPIO(4); |
7643 | | | 7643 | |
7644 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 7644 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
7645 | CSR_WRITE_FLUSH(sc); | | 7645 | CSR_WRITE_FLUSH(sc); |
7646 | delay(10*1000); | | 7646 | delay(10*1000); |
7647 | | | 7647 | |
7648 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); | | 7648 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); |
7649 | CSR_WRITE_FLUSH(sc); | | 7649 | CSR_WRITE_FLUSH(sc); |
7650 | delay(150); | | 7650 | delay(150); |
7651 | #if 0 | | 7651 | #if 0 |
7652 | sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); | | 7652 | sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); |
7653 | #endif | | 7653 | #endif |
7654 | delay(20*1000); /* XXX extra delay to get PHY ID? */ | | 7654 | delay(20*1000); /* XXX extra delay to get PHY ID? */ |
7655 | break; | | 7655 | break; |
7656 | case WM_T_82544: /* reset 10000us */ | | 7656 | case WM_T_82544: /* reset 10000us */ |
7657 | case WM_T_82540: | | 7657 | case WM_T_82540: |
7658 | case WM_T_82545: | | 7658 | case WM_T_82545: |
7659 | case WM_T_82545_3: | | 7659 | case WM_T_82545_3: |
7660 | case WM_T_82546: | | 7660 | case WM_T_82546: |
7661 | case WM_T_82546_3: | | 7661 | case WM_T_82546_3: |
7662 | case WM_T_82541: | | 7662 | case WM_T_82541: |
7663 | case WM_T_82541_2: | | 7663 | case WM_T_82541_2: |
7664 | case WM_T_82547: | | 7664 | case WM_T_82547: |
7665 | case WM_T_82547_2: | | 7665 | case WM_T_82547_2: |
7666 | case WM_T_82571: /* reset 100us */ | | 7666 | case WM_T_82571: /* reset 100us */ |
7667 | case WM_T_82572: | | 7667 | case WM_T_82572: |
7668 | case WM_T_82573: | | 7668 | case WM_T_82573: |
7669 | case WM_T_82574: | | 7669 | case WM_T_82574: |
7670 | case WM_T_82575: | | 7670 | case WM_T_82575: |
7671 | case WM_T_82576: | | 7671 | case WM_T_82576: |
7672 | case WM_T_82580: | | 7672 | case WM_T_82580: |
7673 | case WM_T_I350: | | 7673 | case WM_T_I350: |
7674 | case WM_T_I354: | | 7674 | case WM_T_I354: |
7675 | case WM_T_I210: | | 7675 | case WM_T_I210: |
7676 | case WM_T_I211: | | 7676 | case WM_T_I211: |
7677 | case WM_T_82583: | | 7677 | case WM_T_82583: |
7678 | case WM_T_80003: | | 7678 | case WM_T_80003: |
7679 | /* generic reset */ | | 7679 | /* generic reset */ |
7680 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); | | 7680 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); |
7681 | CSR_WRITE_FLUSH(sc); | | 7681 | CSR_WRITE_FLUSH(sc); |
7682 | delay(20000); | | 7682 | delay(20000); |
7683 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 7683 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
7684 | CSR_WRITE_FLUSH(sc); | | 7684 | CSR_WRITE_FLUSH(sc); |
7685 | delay(20000); | | 7685 | delay(20000); |
7686 | | | 7686 | |
7687 | if ((sc->sc_type == WM_T_82541) | | 7687 | if ((sc->sc_type == WM_T_82541) |
7688 | || (sc->sc_type == WM_T_82541_2) | | 7688 | || (sc->sc_type == WM_T_82541_2) |
7689 | || (sc->sc_type == WM_T_82547) | | 7689 | || (sc->sc_type == WM_T_82547) |
7690 | || (sc->sc_type == WM_T_82547_2)) { | | 7690 | || (sc->sc_type == WM_T_82547_2)) { |
7691 | /* workaround for igp are done in igp_reset() */ | | 7691 | /* workaround for igp are done in igp_reset() */ |
7692 | /* XXX add code to set LED after phy reset */ | | 7692 | /* XXX add code to set LED after phy reset */ |
7693 | } | | 7693 | } |
7694 | break; | | 7694 | break; |
7695 | case WM_T_ICH8: | | 7695 | case WM_T_ICH8: |
7696 | case WM_T_ICH9: | | 7696 | case WM_T_ICH9: |
7697 | case WM_T_ICH10: | | 7697 | case WM_T_ICH10: |
7698 | case WM_T_PCH: | | 7698 | case WM_T_PCH: |
7699 | case WM_T_PCH2: | | 7699 | case WM_T_PCH2: |
7700 | case WM_T_PCH_LPT: | | 7700 | case WM_T_PCH_LPT: |
7701 | /* generic reset */ | | 7701 | /* generic reset */ |
7702 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); | | 7702 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); |
7703 | CSR_WRITE_FLUSH(sc); | | 7703 | CSR_WRITE_FLUSH(sc); |
7704 | delay(100); | | 7704 | delay(100); |
7705 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 7705 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
7706 | CSR_WRITE_FLUSH(sc); | | 7706 | CSR_WRITE_FLUSH(sc); |
7707 | delay(150); | | 7707 | delay(150); |
7708 | break; | | 7708 | break; |
7709 | default: | | 7709 | default: |
7710 | panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), | | 7710 | panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), |
7711 | __func__); | | 7711 | __func__); |
7712 | break; | | 7712 | break; |
7713 | } | | 7713 | } |
7714 | | | 7714 | |
7715 | /* release PHY semaphore */ | | 7715 | /* release PHY semaphore */ |
7716 | switch (sc->sc_type) { | | 7716 | switch (sc->sc_type) { |
7717 | case WM_T_82571: | | 7717 | case WM_T_82571: |
7718 | case WM_T_82572: | | 7718 | case WM_T_82572: |
7719 | case WM_T_82573: | | 7719 | case WM_T_82573: |
7720 | case WM_T_82574: | | 7720 | case WM_T_82574: |
7721 | case WM_T_82583: | | 7721 | case WM_T_82583: |
7722 | /* XXX should put sw semaphore, too */ | | 7722 | /* XXX should put sw semaphore, too */ |
7723 | wm_put_swsm_semaphore(sc); | | 7723 | wm_put_swsm_semaphore(sc); |
7724 | break; | | 7724 | break; |
7725 | case WM_T_82575: | | 7725 | case WM_T_82575: |
7726 | case WM_T_82576: | | 7726 | case WM_T_82576: |
7727 | case WM_T_82580: | | 7727 | case WM_T_82580: |
7728 | case WM_T_I350: | | 7728 | case WM_T_I350: |
7729 | case WM_T_I354: | | 7729 | case WM_T_I354: |
7730 | case WM_T_I210: | | 7730 | case WM_T_I210: |
7731 | case WM_T_I211: | | 7731 | case WM_T_I211: |
7732 | case WM_T_80003: | | 7732 | case WM_T_80003: |
7733 | wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); | | 7733 | wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); |
7734 | break; | | 7734 | break; |
7735 | case WM_T_ICH8: | | 7735 | case WM_T_ICH8: |
7736 | case WM_T_ICH9: | | 7736 | case WM_T_ICH9: |
7737 | case WM_T_ICH10: | | 7737 | case WM_T_ICH10: |
7738 | case WM_T_PCH: | | 7738 | case WM_T_PCH: |
7739 | case WM_T_PCH2: | | 7739 | case WM_T_PCH2: |
7740 | case WM_T_PCH_LPT: | | 7740 | case WM_T_PCH_LPT: |
7741 | wm_put_swfwhw_semaphore(sc); | | 7741 | wm_put_swfwhw_semaphore(sc); |
7742 | break; | | 7742 | break; |
7743 | default: | | 7743 | default: |
7744 | /* nothing to do*/ | | 7744 | /* nothing to do*/ |
7745 | rv = 0; | | 7745 | rv = 0; |
7746 | break; | | 7746 | break; |
7747 | } | | 7747 | } |
7748 | | | 7748 | |
7749 | /* get_cfg_done */ | | 7749 | /* get_cfg_done */ |
7750 | wm_get_cfg_done(sc); | | 7750 | wm_get_cfg_done(sc); |
7751 | | | 7751 | |
7752 | /* extra setup */ | | 7752 | /* extra setup */ |
7753 | switch (sc->sc_type) { | | 7753 | switch (sc->sc_type) { |
7754 | case WM_T_82542_2_0: | | 7754 | case WM_T_82542_2_0: |
7755 | case WM_T_82542_2_1: | | 7755 | case WM_T_82542_2_1: |
7756 | case WM_T_82543: | | 7756 | case WM_T_82543: |
7757 | case WM_T_82544: | | 7757 | case WM_T_82544: |
7758 | case WM_T_82540: | | 7758 | case WM_T_82540: |
7759 | case WM_T_82545: | | 7759 | case WM_T_82545: |
7760 | case WM_T_82545_3: | | 7760 | case WM_T_82545_3: |
7761 | case WM_T_82546: | | 7761 | case WM_T_82546: |
7762 | case WM_T_82546_3: | | 7762 | case WM_T_82546_3: |
7763 | case WM_T_82541_2: | | 7763 | case WM_T_82541_2: |
7764 | case WM_T_82547_2: | | 7764 | case WM_T_82547_2: |
7765 | case WM_T_82571: | | 7765 | case WM_T_82571: |
7766 | case WM_T_82572: | | 7766 | case WM_T_82572: |
7767 | case WM_T_82573: | | 7767 | case WM_T_82573: |
7768 | case WM_T_82574: | | 7768 | case WM_T_82574: |
7769 | case WM_T_82575: | | 7769 | case WM_T_82575: |
7770 | case WM_T_82576: | | 7770 | case WM_T_82576: |
7771 | case WM_T_82580: | | 7771 | case WM_T_82580: |
7772 | case WM_T_I350: | | 7772 | case WM_T_I350: |
7773 | case WM_T_I354: | | 7773 | case WM_T_I354: |
7774 | case WM_T_I210: | | 7774 | case WM_T_I210: |
7775 | case WM_T_I211: | | 7775 | case WM_T_I211: |
7776 | case WM_T_82583: | | 7776 | case WM_T_82583: |
7777 | case WM_T_80003: | | 7777 | case WM_T_80003: |
7778 | /* null */ | | 7778 | /* null */ |
7779 | break; | | 7779 | break; |
7780 | case WM_T_82541: | | 7780 | case WM_T_82541: |
7781 | case WM_T_82547: | | 7781 | case WM_T_82547: |
7782 | /* XXX Configure actively LED after PHY reset */ | | 7782 | /* XXX Configure actively LED after PHY reset */ |
7783 | break; | | 7783 | break; |
7784 | case WM_T_ICH8: | | 7784 | case WM_T_ICH8: |
7785 | case WM_T_ICH9: | | 7785 | case WM_T_ICH9: |
7786 | case WM_T_ICH10: | | 7786 | case WM_T_ICH10: |
7787 | case WM_T_PCH: | | 7787 | case WM_T_PCH: |
7788 | case WM_T_PCH2: | | 7788 | case WM_T_PCH2: |
7789 | case WM_T_PCH_LPT: | | 7789 | case WM_T_PCH_LPT: |
7790 | /* Allow time for h/w to get to a quiescent state afer reset */ | | 7790 | /* Allow time for h/w to get to a quiescent state afer reset */ |
7791 | delay(10*1000); | | 7791 | delay(10*1000); |
7792 | | | 7792 | |
7793 | if (sc->sc_type == WM_T_PCH) | | 7793 | if (sc->sc_type == WM_T_PCH) |
7794 | wm_hv_phy_workaround_ich8lan(sc); | | 7794 | wm_hv_phy_workaround_ich8lan(sc); |
7795 | | | 7795 | |
7796 | if (sc->sc_type == WM_T_PCH2) | | 7796 | if (sc->sc_type == WM_T_PCH2) |
7797 | wm_lv_phy_workaround_ich8lan(sc); | | 7797 | wm_lv_phy_workaround_ich8lan(sc); |
7798 | | | 7798 | |
7799 | if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) { | | 7799 | if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) { |
7800 | /* | | 7800 | /* |
7801 | * dummy read to clear the phy wakeup bit after lcd | | 7801 | * dummy read to clear the phy wakeup bit after lcd |
7802 | * reset | | 7802 | * reset |
7803 | */ | | 7803 | */ |
7804 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); | | 7804 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); |
7805 | } | | 7805 | } |
7806 | | | 7806 | |
7807 | /* | | 7807 | /* |
7808 | * XXX Configure the LCD with th extended configuration region | | 7808 | * XXX Configure the LCD with th extended configuration region |
7809 | * in NVM | | 7809 | * in NVM |
7810 | */ | | 7810 | */ |
7811 | | | 7811 | |
7812 | /* Configure the LCD with the OEM bits in NVM */ | | 7812 | /* Configure the LCD with the OEM bits in NVM */ |
7813 | if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) | | 7813 | if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) |
7814 | || (sc->sc_type == WM_T_PCH_LPT)) { | | 7814 | || (sc->sc_type == WM_T_PCH_LPT)) { |
7815 | /* | | 7815 | /* |
7816 | * Disable LPLU. | | 7816 | * Disable LPLU. |
7817 | * XXX It seems that 82567 has LPLU, too. | | 7817 | * XXX It seems that 82567 has LPLU, too. |
7818 | */ | | 7818 | */ |
7819 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS); | | 7819 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS); |
7820 | reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU); | | 7820 | reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU); |
7821 | reg |= HV_OEM_BITS_ANEGNOW; | | 7821 | reg |= HV_OEM_BITS_ANEGNOW; |
7822 | wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg); | | 7822 | wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg); |
7823 | } | | 7823 | } |
7824 | break; | | 7824 | break; |
7825 | default: | | 7825 | default: |
7826 | panic("%s: unknown type\n", __func__); | | 7826 | panic("%s: unknown type\n", __func__); |
7827 | break; | | 7827 | break; |
7828 | } | | 7828 | } |
7829 | } | | 7829 | } |
7830 | | | 7830 | |
7831 | /* | | 7831 | /* |
7832 | * wm_get_phy_id_82575: | | 7832 | * wm_get_phy_id_82575: |
7833 | * | | 7833 | * |
7834 | * Return PHY ID. Return -1 if it failed. | | 7834 | * Return PHY ID. Return -1 if it failed. |
7835 | */ | | 7835 | */ |
7836 | static int | | 7836 | static int |
7837 | wm_get_phy_id_82575(struct wm_softc *sc) | | 7837 | wm_get_phy_id_82575(struct wm_softc *sc) |
7838 | { | | 7838 | { |
7839 | uint32_t reg; | | 7839 | uint32_t reg; |
7840 | int phyid = -1; | | 7840 | int phyid = -1; |
7841 | | | 7841 | |
7842 | /* XXX */ | | 7842 | /* XXX */ |
7843 | if ((sc->sc_flags & WM_F_SGMII) == 0) | | 7843 | if ((sc->sc_flags & WM_F_SGMII) == 0) |
7844 | return -1; | | 7844 | return -1; |
7845 | | | 7845 | |
7846 | if (wm_sgmii_uses_mdio(sc)) { | | 7846 | if (wm_sgmii_uses_mdio(sc)) { |
7847 | switch (sc->sc_type) { | | 7847 | switch (sc->sc_type) { |
7848 | case WM_T_82575: | | 7848 | case WM_T_82575: |
7849 | case WM_T_82576: | | 7849 | case WM_T_82576: |
7850 | reg = CSR_READ(sc, WMREG_MDIC); | | 7850 | reg = CSR_READ(sc, WMREG_MDIC); |
7851 | phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT; | | 7851 | phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT; |
7852 | break; | | 7852 | break; |
7853 | case WM_T_82580: | | 7853 | case WM_T_82580: |
7854 | case WM_T_I350: | | 7854 | case WM_T_I350: |
7855 | case WM_T_I354: | | 7855 | case WM_T_I354: |
7856 | case WM_T_I210: | | 7856 | case WM_T_I210: |
7857 | case WM_T_I211: | | 7857 | case WM_T_I211: |
7858 | reg = CSR_READ(sc, WMREG_MDICNFG); | | 7858 | reg = CSR_READ(sc, WMREG_MDICNFG); |
7859 | phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT; | | 7859 | phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT; |
7860 | break; | | 7860 | break; |
7861 | default: | | 7861 | default: |
7862 | return -1; | | 7862 | return -1; |
7863 | } | | 7863 | } |
7864 | } | | 7864 | } |
7865 | | | 7865 | |
7866 | return phyid; | | 7866 | return phyid; |
7867 | } | | 7867 | } |
7868 | | | 7868 | |
7869 | | | 7869 | |
7870 | /* | | 7870 | /* |
7871 | * wm_gmii_mediainit: | | 7871 | * wm_gmii_mediainit: |
7872 | * | | 7872 | * |
7873 | * Initialize media for use on 1000BASE-T devices. | | 7873 | * Initialize media for use on 1000BASE-T devices. |
7874 | */ | | 7874 | */ |
7875 | static void | | 7875 | static void |
7876 | wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) | | 7876 | wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) |
7877 | { | | 7877 | { |
7878 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 7878 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
7879 | struct mii_data *mii = &sc->sc_mii; | | 7879 | struct mii_data *mii = &sc->sc_mii; |
7880 | uint32_t reg; | | 7880 | uint32_t reg; |
7881 | | | 7881 | |
7882 | /* We have GMII. */ | | 7882 | /* We have GMII. */ |
7883 | sc->sc_flags |= WM_F_HAS_MII; | | 7883 | sc->sc_flags |= WM_F_HAS_MII; |
7884 | | | 7884 | |
7885 | if (sc->sc_type == WM_T_80003) | | 7885 | if (sc->sc_type == WM_T_80003) |
7886 | sc->sc_tipg = TIPG_1000T_80003_DFLT; | | 7886 | sc->sc_tipg = TIPG_1000T_80003_DFLT; |
7887 | else | | 7887 | else |
7888 | sc->sc_tipg = TIPG_1000T_DFLT; | | 7888 | sc->sc_tipg = TIPG_1000T_DFLT; |
7889 | | | 7889 | |
7890 | /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */ | | 7890 | /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */ |
7891 | if ((sc->sc_type == WM_T_82580) | | 7891 | if ((sc->sc_type == WM_T_82580) |
7892 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) | | 7892 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) |
7893 | || (sc->sc_type == WM_T_I211)) { | | 7893 | || (sc->sc_type == WM_T_I211)) { |
7894 | reg = CSR_READ(sc, WMREG_PHPM); | | 7894 | reg = CSR_READ(sc, WMREG_PHPM); |
7895 | reg &= ~PHPM_GO_LINK_D; | | 7895 | reg &= ~PHPM_GO_LINK_D; |
7896 | CSR_WRITE(sc, WMREG_PHPM, reg); | | 7896 | CSR_WRITE(sc, WMREG_PHPM, reg); |
7897 | } | | 7897 | } |
7898 | | | 7898 | |
7899 | /* | | 7899 | /* |
7900 | * Let the chip set speed/duplex on its own based on | | 7900 | * Let the chip set speed/duplex on its own based on |
7901 | * signals from the PHY. | | 7901 | * signals from the PHY. |
7902 | * XXXbouyer - I'm not sure this is right for the 80003, | | 7902 | * XXXbouyer - I'm not sure this is right for the 80003, |
7903 | * the em driver only sets CTRL_SLU here - but it seems to work. | | 7903 | * the em driver only sets CTRL_SLU here - but it seems to work. |
7904 | */ | | 7904 | */ |
7905 | sc->sc_ctrl |= CTRL_SLU; | | 7905 | sc->sc_ctrl |= CTRL_SLU; |
7906 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 7906 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
7907 | | | 7907 | |
7908 | /* Initialize our media structures and probe the GMII. */ | | 7908 | /* Initialize our media structures and probe the GMII. */ |
7909 | mii->mii_ifp = ifp; | | 7909 | mii->mii_ifp = ifp; |
7910 | | | 7910 | |
7911 | /* | | 7911 | /* |
7912 | * Determine the PHY access method. | | 7912 | * Determine the PHY access method. |
7913 | * | | 7913 | * |
7914 | * For SGMII, use SGMII specific method. | | 7914 | * For SGMII, use SGMII specific method. |
7915 | * | | 7915 | * |
7916 | * For some devices, we can determine the PHY access method | | 7916 | * For some devices, we can determine the PHY access method |
7917 | * from sc_type. | | 7917 | * from sc_type. |
7918 | * | | 7918 | * |
7919 | * For ICH and PCH variants, it's difficult to determine the PHY | | 7919 | * For ICH and PCH variants, it's difficult to determine the PHY |
7920 | * access method by sc_type, so use the PCI product ID for some | | 7920 | * access method by sc_type, so use the PCI product ID for some |
7921 | * devices. | | 7921 | * devices. |
7922 | * For other ICH8 variants, try to use igp's method. If the PHY | | 7922 | * For other ICH8 variants, try to use igp's method. If the PHY |
7923 | * can't detect, then use bm's method. | | 7923 | * can't detect, then use bm's method. |
7924 | */ | | 7924 | */ |
7925 | switch (prodid) { | | 7925 | switch (prodid) { |
7926 | case PCI_PRODUCT_INTEL_PCH_M_LM: | | 7926 | case PCI_PRODUCT_INTEL_PCH_M_LM: |
7927 | case PCI_PRODUCT_INTEL_PCH_M_LC: | | 7927 | case PCI_PRODUCT_INTEL_PCH_M_LC: |
7928 | /* 82577 */ | | 7928 | /* 82577 */ |
7929 | sc->sc_phytype = WMPHY_82577; | | 7929 | sc->sc_phytype = WMPHY_82577; |
7930 | break; | | 7930 | break; |
7931 | case PCI_PRODUCT_INTEL_PCH_D_DM: | | 7931 | case PCI_PRODUCT_INTEL_PCH_D_DM: |
7932 | case PCI_PRODUCT_INTEL_PCH_D_DC: | | 7932 | case PCI_PRODUCT_INTEL_PCH_D_DC: |
7933 | /* 82578 */ | | 7933 | /* 82578 */ |
7934 | sc->sc_phytype = WMPHY_82578; | | 7934 | sc->sc_phytype = WMPHY_82578; |
7935 | break; | | 7935 | break; |
7936 | case PCI_PRODUCT_INTEL_PCH2_LV_LM: | | 7936 | case PCI_PRODUCT_INTEL_PCH2_LV_LM: |
7937 | case PCI_PRODUCT_INTEL_PCH2_LV_V: | | 7937 | case PCI_PRODUCT_INTEL_PCH2_LV_V: |
7938 | /* 82579 */ | | 7938 | /* 82579 */ |
7939 | sc->sc_phytype = WMPHY_82579; | | 7939 | sc->sc_phytype = WMPHY_82579; |
7940 | break; | | 7940 | break; |
7941 | case PCI_PRODUCT_INTEL_82801I_BM: | | 7941 | case PCI_PRODUCT_INTEL_82801I_BM: |
7942 | case PCI_PRODUCT_INTEL_82801J_R_BM_LM: | | 7942 | case PCI_PRODUCT_INTEL_82801J_R_BM_LM: |
7943 | case PCI_PRODUCT_INTEL_82801J_R_BM_LF: | | 7943 | case PCI_PRODUCT_INTEL_82801J_R_BM_LF: |
7944 | case PCI_PRODUCT_INTEL_82801J_D_BM_LM: | | 7944 | case PCI_PRODUCT_INTEL_82801J_D_BM_LM: |
7945 | case PCI_PRODUCT_INTEL_82801J_D_BM_LF: | | 7945 | case PCI_PRODUCT_INTEL_82801J_D_BM_LF: |
7946 | case PCI_PRODUCT_INTEL_82801J_R_BM_V: | | 7946 | case PCI_PRODUCT_INTEL_82801J_R_BM_V: |
7947 | /* 82567 */ | | 7947 | /* 82567 */ |
7948 | sc->sc_phytype = WMPHY_BM; | | 7948 | sc->sc_phytype = WMPHY_BM; |
7949 | mii->mii_readreg = wm_gmii_bm_readreg; | | 7949 | mii->mii_readreg = wm_gmii_bm_readreg; |
7950 | mii->mii_writereg = wm_gmii_bm_writereg; | | 7950 | mii->mii_writereg = wm_gmii_bm_writereg; |
7951 | break; | | 7951 | break; |
7952 | default: | | 7952 | default: |
7953 | if (((sc->sc_flags & WM_F_SGMII) != 0) | | 7953 | if (((sc->sc_flags & WM_F_SGMII) != 0) |
7954 | && !wm_sgmii_uses_mdio(sc)){ | | 7954 | && !wm_sgmii_uses_mdio(sc)){ |
7955 | /* SGMII */ | | 7955 | /* SGMII */ |
7956 | mii->mii_readreg = wm_sgmii_readreg; | | 7956 | mii->mii_readreg = wm_sgmii_readreg; |
7957 | mii->mii_writereg = wm_sgmii_writereg; | | 7957 | mii->mii_writereg = wm_sgmii_writereg; |
7958 | } else if (sc->sc_type >= WM_T_80003) { | | 7958 | } else if (sc->sc_type >= WM_T_80003) { |
7959 | /* 80003 */ | | 7959 | /* 80003 */ |
7960 | mii->mii_readreg = wm_gmii_i80003_readreg; | | 7960 | mii->mii_readreg = wm_gmii_i80003_readreg; |
7961 | mii->mii_writereg = wm_gmii_i80003_writereg; | | 7961 | mii->mii_writereg = wm_gmii_i80003_writereg; |
7962 | } else if (sc->sc_type >= WM_T_I210) { | | 7962 | } else if (sc->sc_type >= WM_T_I210) { |
7963 | /* I210 and I211 */ | | 7963 | /* I210 and I211 */ |
7964 | mii->mii_readreg = wm_gmii_gs40g_readreg; | | 7964 | mii->mii_readreg = wm_gmii_gs40g_readreg; |
7965 | mii->mii_writereg = wm_gmii_gs40g_writereg; | | 7965 | mii->mii_writereg = wm_gmii_gs40g_writereg; |
7966 | } else if (sc->sc_type >= WM_T_82580) { | | 7966 | } else if (sc->sc_type >= WM_T_82580) { |
7967 | /* 82580, I350 and I354 */ | | 7967 | /* 82580, I350 and I354 */ |
7968 | sc->sc_phytype = WMPHY_82580; | | 7968 | sc->sc_phytype = WMPHY_82580; |
7969 | mii->mii_readreg = wm_gmii_82580_readreg; | | 7969 | mii->mii_readreg = wm_gmii_82580_readreg; |
7970 | mii->mii_writereg = wm_gmii_82580_writereg; | | 7970 | mii->mii_writereg = wm_gmii_82580_writereg; |
7971 | } else if (sc->sc_type >= WM_T_82544) { | | 7971 | } else if (sc->sc_type >= WM_T_82544) { |
7972 | /* 82544, 0, [56], [17], 8257[1234] and 82583 */ | | 7972 | /* 82544, 0, [56], [17], 8257[1234] and 82583 */ |
7973 | mii->mii_readreg = wm_gmii_i82544_readreg; | | 7973 | mii->mii_readreg = wm_gmii_i82544_readreg; |
7974 | mii->mii_writereg = wm_gmii_i82544_writereg; | | 7974 | mii->mii_writereg = wm_gmii_i82544_writereg; |
7975 | } else { | | 7975 | } else { |
7976 | mii->mii_readreg = wm_gmii_i82543_readreg; | | 7976 | mii->mii_readreg = wm_gmii_i82543_readreg; |
7977 | mii->mii_writereg = wm_gmii_i82543_writereg; | | 7977 | mii->mii_writereg = wm_gmii_i82543_writereg; |
7978 | } | | 7978 | } |
7979 | break; | | 7979 | break; |
7980 | } | | 7980 | } |
7981 | if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) { | | 7981 | if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) { |
7982 | /* All PCH* use _hv_ */ | | 7982 | /* All PCH* use _hv_ */ |
7983 | mii->mii_readreg = wm_gmii_hv_readreg; | | 7983 | mii->mii_readreg = wm_gmii_hv_readreg; |
7984 | mii->mii_writereg = wm_gmii_hv_writereg; | | 7984 | mii->mii_writereg = wm_gmii_hv_writereg; |
7985 | } | | 7985 | } |
7986 | mii->mii_statchg = wm_gmii_statchg; | | 7986 | mii->mii_statchg = wm_gmii_statchg; |
7987 | | | 7987 | |
7988 | wm_gmii_reset(sc); | | 7988 | wm_gmii_reset(sc); |
7989 | | | 7989 | |
7990 | sc->sc_ethercom.ec_mii = &sc->sc_mii; | | 7990 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
7991 | ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, | | 7991 | ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, |
7992 | wm_gmii_mediastatus); | | 7992 | wm_gmii_mediastatus); |
7993 | | | 7993 | |
7994 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) | | 7994 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) |
7995 | || (sc->sc_type == WM_T_82580) | | 7995 | || (sc->sc_type == WM_T_82580) |
7996 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) | | 7996 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) |
7997 | || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) { | | 7997 | || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) { |
7998 | if ((sc->sc_flags & WM_F_SGMII) == 0) { | | 7998 | if ((sc->sc_flags & WM_F_SGMII) == 0) { |
7999 | /* Attach only one port */ | | 7999 | /* Attach only one port */ |
8000 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, | | 8000 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, |
8001 | MII_OFFSET_ANY, MIIF_DOPAUSE); | | 8001 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
8002 | } else { | | 8002 | } else { |
8003 | int i, id; | | 8003 | int i, id; |
8004 | uint32_t ctrl_ext; | | 8004 | uint32_t ctrl_ext; |
8005 | | | 8005 | |
8006 | id = wm_get_phy_id_82575(sc); | | 8006 | id = wm_get_phy_id_82575(sc); |
8007 | if (id != -1) { | | 8007 | if (id != -1) { |
8008 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, | | 8008 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, |
8009 | id, MII_OFFSET_ANY, MIIF_DOPAUSE); | | 8009 | id, MII_OFFSET_ANY, MIIF_DOPAUSE); |
8010 | } | | 8010 | } |
8011 | if ((id == -1) | | 8011 | if ((id == -1) |
8012 | || (LIST_FIRST(&mii->mii_phys) == NULL)) { | | 8012 | || (LIST_FIRST(&mii->mii_phys) == NULL)) { |
8013 | /* Power on sgmii phy if it is disabled */ | | 8013 | /* Power on sgmii phy if it is disabled */ |
8014 | ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); | | 8014 | ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); |
8015 | CSR_WRITE(sc, WMREG_CTRL_EXT, | | 8015 | CSR_WRITE(sc, WMREG_CTRL_EXT, |
8016 | ctrl_ext &~ CTRL_EXT_SWDPIN(3)); | | 8016 | ctrl_ext &~ CTRL_EXT_SWDPIN(3)); |
8017 | CSR_WRITE_FLUSH(sc); | | 8017 | CSR_WRITE_FLUSH(sc); |
8018 | delay(300*1000); /* XXX too long */ | | 8018 | delay(300*1000); /* XXX too long */ |
8019 | | | 8019 | |
8020 | /* from 1 to 8 */ | | 8020 | /* from 1 to 8 */ |
8021 | for (i = 1; i < 8; i++) | | 8021 | for (i = 1; i < 8; i++) |
8022 | mii_attach(sc->sc_dev, &sc->sc_mii, | | 8022 | mii_attach(sc->sc_dev, &sc->sc_mii, |
8023 | 0xffffffff, i, MII_OFFSET_ANY, | | 8023 | 0xffffffff, i, MII_OFFSET_ANY, |
8024 | MIIF_DOPAUSE); | | 8024 | MIIF_DOPAUSE); |
8025 | | | 8025 | |
8026 | /* restore previous sfp cage power state */ | | 8026 | /* restore previous sfp cage power state */ |
8027 | CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); | | 8027 | CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); |
8028 | } | | 8028 | } |
8029 | } | | 8029 | } |
8030 | } else { | | 8030 | } else { |
8031 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, | | 8031 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
8032 | MII_OFFSET_ANY, MIIF_DOPAUSE); | | 8032 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
8033 | } | | 8033 | } |
8034 | | | 8034 | |
8035 | /* | | 8035 | /* |
8036 | * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call | | 8036 | * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call |
8037 | * wm_set_mdio_slow_mode_hv() for a workaround and retry. | | 8037 | * wm_set_mdio_slow_mode_hv() for a workaround and retry. |
8038 | */ | | 8038 | */ |
8039 | if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) && | | 8039 | if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) && |
8040 | (LIST_FIRST(&mii->mii_phys) == NULL)) { | | 8040 | (LIST_FIRST(&mii->mii_phys) == NULL)) { |
8041 | wm_set_mdio_slow_mode_hv(sc); | | 8041 | wm_set_mdio_slow_mode_hv(sc); |
8042 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, | | 8042 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
8043 | MII_OFFSET_ANY, MIIF_DOPAUSE); | | 8043 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
8044 | } | | 8044 | } |
8045 | | | 8045 | |
8046 | /* | | 8046 | /* |
8047 | * (For ICH8 variants) | | 8047 | * (For ICH8 variants) |
8048 | * If PHY detection failed, use BM's r/w function and retry. | | 8048 | * If PHY detection failed, use BM's r/w function and retry. |
8049 | */ | | 8049 | */ |
8050 | if (LIST_FIRST(&mii->mii_phys) == NULL) { | | 8050 | if (LIST_FIRST(&mii->mii_phys) == NULL) { |
8051 | /* if failed, retry with *_bm_* */ | | 8051 | /* if failed, retry with *_bm_* */ |
8052 | mii->mii_readreg = wm_gmii_bm_readreg; | | 8052 | mii->mii_readreg = wm_gmii_bm_readreg; |
8053 | mii->mii_writereg = wm_gmii_bm_writereg; | | 8053 | mii->mii_writereg = wm_gmii_bm_writereg; |
8054 | | | 8054 | |
8055 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, | | 8055 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
8056 | MII_OFFSET_ANY, MIIF_DOPAUSE); | | 8056 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
8057 | } | | 8057 | } |
8058 | | | 8058 | |
8059 | if (LIST_FIRST(&mii->mii_phys) == NULL) { | | 8059 | if (LIST_FIRST(&mii->mii_phys) == NULL) { |
8060 | /* Any PHY wasn't find */ | | 8060 | /* Any PHY wasn't find */ |
8061 | ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL); | | 8061 | ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL); |
8062 | ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE); | | 8062 | ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE); |
8063 | sc->sc_phytype = WMPHY_NONE; | | 8063 | sc->sc_phytype = WMPHY_NONE; |
8064 | } else { | | 8064 | } else { |
8065 | /* | | 8065 | /* |
8066 | * PHY Found! | | 8066 | * PHY Found! |
8067 | * Check PHY type. | | 8067 | * Check PHY type. |
8068 | */ | | 8068 | */ |
8069 | uint32_t model; | | 8069 | uint32_t model; |
8070 | struct mii_softc *child; | | 8070 | struct mii_softc *child; |
8071 | | | 8071 | |
8072 | child = LIST_FIRST(&mii->mii_phys); | | 8072 | child = LIST_FIRST(&mii->mii_phys); |
8073 | if (device_is_a(child->mii_dev, "igphy")) { | | 8073 | if (device_is_a(child->mii_dev, "igphy")) { |
8074 | struct igphy_softc *isc = (struct igphy_softc *)child; | | 8074 | struct igphy_softc *isc = (struct igphy_softc *)child; |
8075 | | | 8075 | |
8076 | model = isc->sc_mii.mii_mpd_model; | | 8076 | model = isc->sc_mii.mii_mpd_model; |
8077 | if (model == MII_MODEL_yyINTEL_I82566) | | 8077 | if (model == MII_MODEL_yyINTEL_I82566) |
8078 | sc->sc_phytype = WMPHY_IGP_3; | | 8078 | sc->sc_phytype = WMPHY_IGP_3; |
8079 | } | | 8079 | } |
8080 | | | 8080 | |
8081 | ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); | | 8081 | ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); |
8082 | } | | 8082 | } |
8083 | } | | 8083 | } |
8084 | | | 8084 | |
8085 | /* | | 8085 | /* |
8086 | * wm_gmii_mediachange: [ifmedia interface function] | | 8086 | * wm_gmii_mediachange: [ifmedia interface function] |
8087 | * | | 8087 | * |
8088 | * Set hardware to newly-selected media on a 1000BASE-T device. | | 8088 | * Set hardware to newly-selected media on a 1000BASE-T device. |
8089 | */ | | 8089 | */ |
8090 | static int | | 8090 | static int |
8091 | wm_gmii_mediachange(struct ifnet *ifp) | | 8091 | wm_gmii_mediachange(struct ifnet *ifp) |
8092 | { | | 8092 | { |
8093 | struct wm_softc *sc = ifp->if_softc; | | 8093 | struct wm_softc *sc = ifp->if_softc; |
8094 | struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; | | 8094 | struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; |
8095 | int rc; | | 8095 | int rc; |
8096 | | | 8096 | |
8097 | if ((ifp->if_flags & IFF_UP) == 0) | | 8097 | if ((ifp->if_flags & IFF_UP) == 0) |
8098 | return 0; | | 8098 | return 0; |
8099 | | | 8099 | |
8100 | sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); | | 8100 | sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); |
8101 | sc->sc_ctrl |= CTRL_SLU; | | 8101 | sc->sc_ctrl |= CTRL_SLU; |
8102 | if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) | | 8102 | if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) |
8103 | || (sc->sc_type > WM_T_82543)) { | | 8103 | || (sc->sc_type > WM_T_82543)) { |
8104 | sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); | | 8104 | sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); |
8105 | } else { | | 8105 | } else { |
8106 | sc->sc_ctrl &= ~CTRL_ASDE; | | 8106 | sc->sc_ctrl &= ~CTRL_ASDE; |
8107 | sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; | | 8107 | sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; |
8108 | if (ife->ifm_media & IFM_FDX) | | 8108 | if (ife->ifm_media & IFM_FDX) |
8109 | sc->sc_ctrl |= CTRL_FD; | | 8109 | sc->sc_ctrl |= CTRL_FD; |
8110 | switch (IFM_SUBTYPE(ife->ifm_media)) { | | 8110 | switch (IFM_SUBTYPE(ife->ifm_media)) { |
8111 | case IFM_10_T: | | 8111 | case IFM_10_T: |
8112 | sc->sc_ctrl |= CTRL_SPEED_10; | | 8112 | sc->sc_ctrl |= CTRL_SPEED_10; |
8113 | break; | | 8113 | break; |
8114 | case IFM_100_TX: | | 8114 | case IFM_100_TX: |
8115 | sc->sc_ctrl |= CTRL_SPEED_100; | | 8115 | sc->sc_ctrl |= CTRL_SPEED_100; |
8116 | break; | | 8116 | break; |
8117 | case IFM_1000_T: | | 8117 | case IFM_1000_T: |
8118 | sc->sc_ctrl |= CTRL_SPEED_1000; | | 8118 | sc->sc_ctrl |= CTRL_SPEED_1000; |
8119 | break; | | 8119 | break; |
8120 | default: | | 8120 | default: |
8121 | panic("wm_gmii_mediachange: bad media 0x%x", | | 8121 | panic("wm_gmii_mediachange: bad media 0x%x", |
8122 | ife->ifm_media); | | 8122 | ife->ifm_media); |
8123 | } | | 8123 | } |
8124 | } | | 8124 | } |
8125 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 8125 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
8126 | if (sc->sc_type <= WM_T_82543) | | 8126 | if (sc->sc_type <= WM_T_82543) |
8127 | wm_gmii_reset(sc); | | 8127 | wm_gmii_reset(sc); |
8128 | | | 8128 | |
8129 | if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) | | 8129 | if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) |
8130 | return 0; | | 8130 | return 0; |
8131 | return rc; | | 8131 | return rc; |
8132 | } | | 8132 | } |
8133 | | | 8133 | |
8134 | /* | | 8134 | /* |
8135 | * wm_gmii_mediastatus: [ifmedia interface function] | | 8135 | * wm_gmii_mediastatus: [ifmedia interface function] |
8136 | * | | 8136 | * |
8137 | * Get the current interface media status on a 1000BASE-T device. | | 8137 | * Get the current interface media status on a 1000BASE-T device. |
8138 | */ | | 8138 | */ |
8139 | static void | | 8139 | static void |
8140 | wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) | | 8140 | wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
8141 | { | | 8141 | { |
8142 | struct wm_softc *sc = ifp->if_softc; | | 8142 | struct wm_softc *sc = ifp->if_softc; |
8143 | | | 8143 | |
8144 | ether_mediastatus(ifp, ifmr); | | 8144 | ether_mediastatus(ifp, ifmr); |
8145 | ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) | | 8145 | ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
8146 | | sc->sc_flowflags; | | 8146 | | sc->sc_flowflags; |
8147 | } | | 8147 | } |
8148 | | | 8148 | |
8149 | #define MDI_IO CTRL_SWDPIN(2) | | 8149 | #define MDI_IO CTRL_SWDPIN(2) |
8150 | #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ | | 8150 | #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ |
8151 | #define MDI_CLK CTRL_SWDPIN(3) | | 8151 | #define MDI_CLK CTRL_SWDPIN(3) |
8152 | | | 8152 | |
8153 | static void | | 8153 | static void |
8154 | wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) | | 8154 | wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) |
8155 | { | | 8155 | { |
8156 | uint32_t i, v; | | 8156 | uint32_t i, v; |
8157 | | | 8157 | |
8158 | v = CSR_READ(sc, WMREG_CTRL); | | 8158 | v = CSR_READ(sc, WMREG_CTRL); |
8159 | v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); | | 8159 | v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); |
8160 | v |= MDI_DIR | CTRL_SWDPIO(3); | | 8160 | v |= MDI_DIR | CTRL_SWDPIO(3); |
8161 | | | 8161 | |
8162 | for (i = 1 << (nbits - 1); i != 0; i >>= 1) { | | 8162 | for (i = 1 << (nbits - 1); i != 0; i >>= 1) { |
8163 | if (data & i) | | 8163 | if (data & i) |
8164 | v |= MDI_IO; | | 8164 | v |= MDI_IO; |
8165 | else | | 8165 | else |
8166 | v &= ~MDI_IO; | | 8166 | v &= ~MDI_IO; |
8167 | CSR_WRITE(sc, WMREG_CTRL, v); | | 8167 | CSR_WRITE(sc, WMREG_CTRL, v); |
8168 | CSR_WRITE_FLUSH(sc); | | 8168 | CSR_WRITE_FLUSH(sc); |
8169 | delay(10); | | 8169 | delay(10); |
8170 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); | | 8170 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); |
8171 | CSR_WRITE_FLUSH(sc); | | 8171 | CSR_WRITE_FLUSH(sc); |
8172 | delay(10); | | 8172 | delay(10); |
8173 | CSR_WRITE(sc, WMREG_CTRL, v); | | 8173 | CSR_WRITE(sc, WMREG_CTRL, v); |
8174 | CSR_WRITE_FLUSH(sc); | | 8174 | CSR_WRITE_FLUSH(sc); |
8175 | delay(10); | | 8175 | delay(10); |
8176 | } | | 8176 | } |
8177 | } | | 8177 | } |
8178 | | | 8178 | |
8179 | static uint32_t | | 8179 | static uint32_t |
8180 | wm_i82543_mii_recvbits(struct wm_softc *sc) | | 8180 | wm_i82543_mii_recvbits(struct wm_softc *sc) |
8181 | { | | 8181 | { |
8182 | uint32_t v, i, data = 0; | | 8182 | uint32_t v, i, data = 0; |
8183 | | | 8183 | |
8184 | v = CSR_READ(sc, WMREG_CTRL); | | 8184 | v = CSR_READ(sc, WMREG_CTRL); |
8185 | v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); | | 8185 | v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); |
8186 | v |= CTRL_SWDPIO(3); | | 8186 | v |= CTRL_SWDPIO(3); |
8187 | | | 8187 | |
8188 | CSR_WRITE(sc, WMREG_CTRL, v); | | 8188 | CSR_WRITE(sc, WMREG_CTRL, v); |
8189 | CSR_WRITE_FLUSH(sc); | | 8189 | CSR_WRITE_FLUSH(sc); |
8190 | delay(10); | | 8190 | delay(10); |
8191 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); | | 8191 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); |
8192 | CSR_WRITE_FLUSH(sc); | | 8192 | CSR_WRITE_FLUSH(sc); |
8193 | delay(10); | | 8193 | delay(10); |
8194 | CSR_WRITE(sc, WMREG_CTRL, v); | | 8194 | CSR_WRITE(sc, WMREG_CTRL, v); |
8195 | CSR_WRITE_FLUSH(sc); | | 8195 | CSR_WRITE_FLUSH(sc); |
8196 | delay(10); | | 8196 | delay(10); |
8197 | | | 8197 | |
8198 | for (i = 0; i < 16; i++) { | | 8198 | for (i = 0; i < 16; i++) { |
8199 | data <<= 1; | | 8199 | data <<= 1; |
8200 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); | | 8200 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); |
8201 | CSR_WRITE_FLUSH(sc); | | 8201 | CSR_WRITE_FLUSH(sc); |
8202 | delay(10); | | 8202 | delay(10); |
8203 | if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) | | 8203 | if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) |
8204 | data |= 1; | | 8204 | data |= 1; |
8205 | CSR_WRITE(sc, WMREG_CTRL, v); | | 8205 | CSR_WRITE(sc, WMREG_CTRL, v); |
8206 | CSR_WRITE_FLUSH(sc); | | 8206 | CSR_WRITE_FLUSH(sc); |
8207 | delay(10); | | 8207 | delay(10); |
8208 | } | | 8208 | } |
8209 | | | 8209 | |
8210 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); | | 8210 | CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); |
8211 | CSR_WRITE_FLUSH(sc); | | 8211 | CSR_WRITE_FLUSH(sc); |
8212 | delay(10); | | 8212 | delay(10); |
8213 | CSR_WRITE(sc, WMREG_CTRL, v); | | 8213 | CSR_WRITE(sc, WMREG_CTRL, v); |
8214 | CSR_WRITE_FLUSH(sc); | | 8214 | CSR_WRITE_FLUSH(sc); |
8215 | delay(10); | | 8215 | delay(10); |
8216 | | | 8216 | |
8217 | return data; | | 8217 | return data; |
8218 | } | | 8218 | } |
8219 | | | 8219 | |
8220 | #undef MDI_IO | | 8220 | #undef MDI_IO |
8221 | #undef MDI_DIR | | 8221 | #undef MDI_DIR |
8222 | #undef MDI_CLK | | 8222 | #undef MDI_CLK |
8223 | | | 8223 | |
8224 | /* | | 8224 | /* |
8225 | * wm_gmii_i82543_readreg: [mii interface function] | | 8225 | * wm_gmii_i82543_readreg: [mii interface function] |
8226 | * | | 8226 | * |
8227 | * Read a PHY register on the GMII (i82543 version). | | 8227 | * Read a PHY register on the GMII (i82543 version). |
8228 | */ | | 8228 | */ |
8229 | static int | | 8229 | static int |
8230 | wm_gmii_i82543_readreg(device_t self, int phy, int reg) | | 8230 | wm_gmii_i82543_readreg(device_t self, int phy, int reg) |
8231 | { | | 8231 | { |
8232 | struct wm_softc *sc = device_private(self); | | 8232 | struct wm_softc *sc = device_private(self); |
8233 | int rv; | | 8233 | int rv; |
8234 | | | 8234 | |
8235 | wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); | | 8235 | wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); |
8236 | wm_i82543_mii_sendbits(sc, reg | (phy << 5) | | | 8236 | wm_i82543_mii_sendbits(sc, reg | (phy << 5) | |
8237 | (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); | | 8237 | (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); |
8238 | rv = wm_i82543_mii_recvbits(sc) & 0xffff; | | 8238 | rv = wm_i82543_mii_recvbits(sc) & 0xffff; |
8239 | | | 8239 | |
8240 | DPRINTF(WM_DEBUG_GMII, | | 8240 | DPRINTF(WM_DEBUG_GMII, |
8241 | ("%s: GMII: read phy %d reg %d -> 0x%04x\n", | | 8241 | ("%s: GMII: read phy %d reg %d -> 0x%04x\n", |
8242 | device_xname(sc->sc_dev), phy, reg, rv)); | | 8242 | device_xname(sc->sc_dev), phy, reg, rv)); |
8243 | | | 8243 | |
8244 | return rv; | | 8244 | return rv; |
8245 | } | | 8245 | } |
8246 | | | 8246 | |
8247 | /* | | 8247 | /* |
8248 | * wm_gmii_i82543_writereg: [mii interface function] | | 8248 | * wm_gmii_i82543_writereg: [mii interface function] |
8249 | * | | 8249 | * |
8250 | * Write a PHY register on the GMII (i82543 version). | | 8250 | * Write a PHY register on the GMII (i82543 version). |
8251 | */ | | 8251 | */ |
8252 | static void | | 8252 | static void |
8253 | wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val) | | 8253 | wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val) |
8254 | { | | 8254 | { |
8255 | struct wm_softc *sc = device_private(self); | | 8255 | struct wm_softc *sc = device_private(self); |
8256 | | | 8256 | |
8257 | wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); | | 8257 | wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); |
8258 | wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | | | 8258 | wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | |
8259 | (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | | | 8259 | (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | |
8260 | (MII_COMMAND_START << 30), 32); | | 8260 | (MII_COMMAND_START << 30), 32); |
8261 | } | | 8261 | } |
8262 | | | 8262 | |
8263 | /* | | 8263 | /* |
8264 | * wm_gmii_i82544_readreg: [mii interface function] | | 8264 | * wm_gmii_i82544_readreg: [mii interface function] |
8265 | * | | 8265 | * |
8266 | * Read a PHY register on the GMII. | | 8266 | * Read a PHY register on the GMII. |
8267 | */ | | 8267 | */ |
8268 | static int | | 8268 | static int |
8269 | wm_gmii_i82544_readreg(device_t self, int phy, int reg) | | 8269 | wm_gmii_i82544_readreg(device_t self, int phy, int reg) |
8270 | { | | 8270 | { |
8271 | struct wm_softc *sc = device_private(self); | | 8271 | struct wm_softc *sc = device_private(self); |
8272 | uint32_t mdic = 0; | | 8272 | uint32_t mdic = 0; |
8273 | int i, rv; | | 8273 | int i, rv; |
8274 | | | 8274 | |
8275 | CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | | | 8275 | CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | |
8276 | MDIC_REGADD(reg)); | | 8276 | MDIC_REGADD(reg)); |
8277 | | | 8277 | |
8278 | for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { | | 8278 | for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { |
8279 | mdic = CSR_READ(sc, WMREG_MDIC); | | 8279 | mdic = CSR_READ(sc, WMREG_MDIC); |
8280 | if (mdic & MDIC_READY) | | 8280 | if (mdic & MDIC_READY) |
8281 | break; | | 8281 | break; |
8282 | delay(50); | | 8282 | delay(50); |
8283 | } | | 8283 | } |
8284 | | | 8284 | |
8285 | if ((mdic & MDIC_READY) == 0) { | | 8285 | if ((mdic & MDIC_READY) == 0) { |
8286 | log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", | | 8286 | log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", |
8287 | device_xname(sc->sc_dev), phy, reg); | | 8287 | device_xname(sc->sc_dev), phy, reg); |
8288 | rv = 0; | | 8288 | rv = 0; |
8289 | } else if (mdic & MDIC_E) { | | 8289 | } else if (mdic & MDIC_E) { |
8290 | #if 0 /* This is normal if no PHY is present. */ | | 8290 | #if 0 /* This is normal if no PHY is present. */ |
8291 | log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", | | 8291 | log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", |
8292 | device_xname(sc->sc_dev), phy, reg); | | 8292 | device_xname(sc->sc_dev), phy, reg); |
8293 | #endif | | 8293 | #endif |
8294 | rv = 0; | | 8294 | rv = 0; |
8295 | } else { | | 8295 | } else { |
8296 | rv = MDIC_DATA(mdic); | | 8296 | rv = MDIC_DATA(mdic); |
8297 | if (rv == 0xffff) | | 8297 | if (rv == 0xffff) |
8298 | rv = 0; | | 8298 | rv = 0; |
8299 | } | | 8299 | } |
8300 | | | 8300 | |
8301 | return rv; | | 8301 | return rv; |
8302 | } | | 8302 | } |
8303 | | | 8303 | |
8304 | /* | | 8304 | /* |
8305 | * wm_gmii_i82544_writereg: [mii interface function] | | 8305 | * wm_gmii_i82544_writereg: [mii interface function] |
8306 | * | | 8306 | * |
8307 | * Write a PHY register on the GMII. | | 8307 | * Write a PHY register on the GMII. |
8308 | */ | | 8308 | */ |
8309 | static void | | 8309 | static void |
8310 | wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val) | | 8310 | wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val) |
8311 | { | | 8311 | { |
8312 | struct wm_softc *sc = device_private(self); | | 8312 | struct wm_softc *sc = device_private(self); |
8313 | uint32_t mdic = 0; | | 8313 | uint32_t mdic = 0; |
8314 | int i; | | 8314 | int i; |
8315 | | | 8315 | |
8316 | CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | | | 8316 | CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | |
8317 | MDIC_REGADD(reg) | MDIC_DATA(val)); | | 8317 | MDIC_REGADD(reg) | MDIC_DATA(val)); |
8318 | | | 8318 | |
8319 | for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { | | 8319 | for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { |
8320 | mdic = CSR_READ(sc, WMREG_MDIC); | | 8320 | mdic = CSR_READ(sc, WMREG_MDIC); |
8321 | if (mdic & MDIC_READY) | | 8321 | if (mdic & MDIC_READY) |
8322 | break; | | 8322 | break; |
8323 | delay(50); | | 8323 | delay(50); |
8324 | } | | 8324 | } |
8325 | | | 8325 | |
8326 | if ((mdic & MDIC_READY) == 0) | | 8326 | if ((mdic & MDIC_READY) == 0) |
8327 | log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", | | 8327 | log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", |
8328 | device_xname(sc->sc_dev), phy, reg); | | 8328 | device_xname(sc->sc_dev), phy, reg); |
8329 | else if (mdic & MDIC_E) | | 8329 | else if (mdic & MDIC_E) |
8330 | log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", | | 8330 | log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", |
8331 | device_xname(sc->sc_dev), phy, reg); | | 8331 | device_xname(sc->sc_dev), phy, reg); |
8332 | } | | 8332 | } |
8333 | | | 8333 | |
8334 | /* | | 8334 | /* |
8335 | * wm_gmii_i80003_readreg: [mii interface function] | | 8335 | * wm_gmii_i80003_readreg: [mii interface function] |
8336 | * | | 8336 | * |
8337 | * Read a PHY register on the kumeran | | 8337 | * Read a PHY register on the kumeran |
8338 | * This could be handled by the PHY layer if we didn't have to lock the | | 8338 | * This could be handled by the PHY layer if we didn't have to lock the |
8339 | * ressource ... | | 8339 | * ressource ... |
8340 | */ | | 8340 | */ |
8341 | static int | | 8341 | static int |
8342 | wm_gmii_i80003_readreg(device_t self, int phy, int reg) | | 8342 | wm_gmii_i80003_readreg(device_t self, int phy, int reg) |
8343 | { | | 8343 | { |
8344 | struct wm_softc *sc = device_private(self); | | 8344 | struct wm_softc *sc = device_private(self); |
8345 | int sem; | | 8345 | int sem; |
8346 | int rv; | | 8346 | int rv; |
8347 | | | 8347 | |
8348 | if (phy != 1) /* only one PHY on kumeran bus */ | | 8348 | if (phy != 1) /* only one PHY on kumeran bus */ |
8349 | return 0; | | 8349 | return 0; |
8350 | | | 8350 | |
8351 | sem = swfwphysem[sc->sc_funcid]; | | 8351 | sem = swfwphysem[sc->sc_funcid]; |
8352 | if (wm_get_swfw_semaphore(sc, sem)) { | | 8352 | if (wm_get_swfw_semaphore(sc, sem)) { |
8353 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", | | 8353 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", |
8354 | __func__); | | 8354 | __func__); |
8355 | return 0; | | 8355 | return 0; |
8356 | } | | 8356 | } |
8357 | | | 8357 | |
8358 | if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | | 8358 | if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { |
8359 | wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, | | 8359 | wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, |
8360 | reg >> GG82563_PAGE_SHIFT); | | 8360 | reg >> GG82563_PAGE_SHIFT); |
8361 | } else { | | 8361 | } else { |
8362 | wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, | | 8362 | wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, |
8363 | reg >> GG82563_PAGE_SHIFT); | | 8363 | reg >> GG82563_PAGE_SHIFT); |
8364 | } | | 8364 | } |
8365 | /* Wait more 200us for a bug of the ready bit in the MDIC register */ | | 8365 | /* Wait more 200us for a bug of the ready bit in the MDIC register */ |
8366 | delay(200); | | 8366 | delay(200); |
8367 | rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); | | 8367 | rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); |
8368 | delay(200); | | 8368 | delay(200); |
8369 | | | 8369 | |
8370 | wm_put_swfw_semaphore(sc, sem); | | 8370 | wm_put_swfw_semaphore(sc, sem); |
8371 | return rv; | | 8371 | return rv; |
8372 | } | | 8372 | } |
8373 | | | 8373 | |
8374 | /* | | 8374 | /* |
8375 | * wm_gmii_i80003_writereg: [mii interface function] | | 8375 | * wm_gmii_i80003_writereg: [mii interface function] |
8376 | * | | 8376 | * |
8377 | * Write a PHY register on the kumeran. | | 8377 | * Write a PHY register on the kumeran. |
8378 | * This could be handled by the PHY layer if we didn't have to lock the | | 8378 | * This could be handled by the PHY layer if we didn't have to lock the |
8379 | * ressource ... | | 8379 | * ressource ... |
8380 | */ | | 8380 | */ |
8381 | static void | | 8381 | static void |
8382 | wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val) | | 8382 | wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val) |
8383 | { | | 8383 | { |
8384 | struct wm_softc *sc = device_private(self); | | 8384 | struct wm_softc *sc = device_private(self); |
8385 | int sem; | | 8385 | int sem; |
8386 | | | 8386 | |
8387 | if (phy != 1) /* only one PHY on kumeran bus */ | | 8387 | if (phy != 1) /* only one PHY on kumeran bus */ |
8388 | return; | | 8388 | return; |
8389 | | | 8389 | |
8390 | sem = swfwphysem[sc->sc_funcid]; | | 8390 | sem = swfwphysem[sc->sc_funcid]; |
8391 | if (wm_get_swfw_semaphore(sc, sem)) { | | 8391 | if (wm_get_swfw_semaphore(sc, sem)) { |
8392 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", | | 8392 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", |
8393 | __func__); | | 8393 | __func__); |
8394 | return; | | 8394 | return; |
8395 | } | | 8395 | } |
8396 | | | 8396 | |
8397 | if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | | 8397 | if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { |
8398 | wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, | | 8398 | wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, |
8399 | reg >> GG82563_PAGE_SHIFT); | | 8399 | reg >> GG82563_PAGE_SHIFT); |
8400 | } else { | | 8400 | } else { |
8401 | wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, | | 8401 | wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, |
8402 | reg >> GG82563_PAGE_SHIFT); | | 8402 | reg >> GG82563_PAGE_SHIFT); |
8403 | } | | 8403 | } |
8404 | /* Wait more 200us for a bug of the ready bit in the MDIC register */ | | 8404 | /* Wait more 200us for a bug of the ready bit in the MDIC register */ |
8405 | delay(200); | | 8405 | delay(200); |
8406 | wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); | | 8406 | wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); |
8407 | delay(200); | | 8407 | delay(200); |
8408 | | | 8408 | |
8409 | wm_put_swfw_semaphore(sc, sem); | | 8409 | wm_put_swfw_semaphore(sc, sem); |
8410 | } | | 8410 | } |
8411 | | | 8411 | |
8412 | /* | | 8412 | /* |
8413 | * wm_gmii_bm_readreg: [mii interface function] | | 8413 | * wm_gmii_bm_readreg: [mii interface function] |
8414 | * | | 8414 | * |
8415 | * Read a PHY register on the kumeran | | 8415 | * Read a PHY register on the kumeran |
8416 | * This could be handled by the PHY layer if we didn't have to lock the | | 8416 | * This could be handled by the PHY layer if we didn't have to lock the |
8417 | * ressource ... | | 8417 | * ressource ... |
8418 | */ | | 8418 | */ |
8419 | static int | | 8419 | static int |
8420 | wm_gmii_bm_readreg(device_t self, int phy, int reg) | | 8420 | wm_gmii_bm_readreg(device_t self, int phy, int reg) |
8421 | { | | 8421 | { |
8422 | struct wm_softc *sc = device_private(self); | | 8422 | struct wm_softc *sc = device_private(self); |
8423 | int sem; | | 8423 | int sem; |
8424 | int rv; | | 8424 | int rv; |
8425 | | | 8425 | |
8426 | sem = swfwphysem[sc->sc_funcid]; | | 8426 | sem = swfwphysem[sc->sc_funcid]; |
8427 | if (wm_get_swfw_semaphore(sc, sem)) { | | 8427 | if (wm_get_swfw_semaphore(sc, sem)) { |
8428 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", | | 8428 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", |
8429 | __func__); | | 8429 | __func__); |
8430 | return 0; | | 8430 | return 0; |
8431 | } | | 8431 | } |
8432 | | | 8432 | |
8433 | if (reg > BME1000_MAX_MULTI_PAGE_REG) { | | 8433 | if (reg > BME1000_MAX_MULTI_PAGE_REG) { |
8434 | if (phy == 1) | | 8434 | if (phy == 1) |
8435 | wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, | | 8435 | wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, |
8436 | reg); | | 8436 | reg); |
8437 | else | | 8437 | else |
8438 | wm_gmii_i82544_writereg(self, phy, | | 8438 | wm_gmii_i82544_writereg(self, phy, |
8439 | GG82563_PHY_PAGE_SELECT, | | 8439 | GG82563_PHY_PAGE_SELECT, |
8440 | reg >> GG82563_PAGE_SHIFT); | | 8440 | reg >> GG82563_PAGE_SHIFT); |
8441 | } | | 8441 | } |
8442 | | | 8442 | |
8443 | rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); | | 8443 | rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); |
8444 | wm_put_swfw_semaphore(sc, sem); | | 8444 | wm_put_swfw_semaphore(sc, sem); |
8445 | return rv; | | 8445 | return rv; |
8446 | } | | 8446 | } |
8447 | | | 8447 | |
8448 | /* | | 8448 | /* |
8449 | * wm_gmii_bm_writereg: [mii interface function] | | 8449 | * wm_gmii_bm_writereg: [mii interface function] |
8450 | * | | 8450 | * |
8451 | * Write a PHY register on the kumeran. | | 8451 | * Write a PHY register on the kumeran. |
8452 | * This could be handled by the PHY layer if we didn't have to lock the | | 8452 | * This could be handled by the PHY layer if we didn't have to lock the |
8453 | * ressource ... | | 8453 | * ressource ... |
8454 | */ | | 8454 | */ |
8455 | static void | | 8455 | static void |
8456 | wm_gmii_bm_writereg(device_t self, int phy, int reg, int val) | | 8456 | wm_gmii_bm_writereg(device_t self, int phy, int reg, int val) |
8457 | { | | 8457 | { |
8458 | struct wm_softc *sc = device_private(self); | | 8458 | struct wm_softc *sc = device_private(self); |
8459 | int sem; | | 8459 | int sem; |
8460 | | | 8460 | |
8461 | sem = swfwphysem[sc->sc_funcid]; | | 8461 | sem = swfwphysem[sc->sc_funcid]; |
8462 | if (wm_get_swfw_semaphore(sc, sem)) { | | 8462 | if (wm_get_swfw_semaphore(sc, sem)) { |
8463 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", | | 8463 | aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", |
8464 | __func__); | | 8464 | __func__); |
8465 | return; | | 8465 | return; |
8466 | } | | 8466 | } |
8467 | | | 8467 | |
8468 | if (reg > BME1000_MAX_MULTI_PAGE_REG) { | | 8468 | if (reg > BME1000_MAX_MULTI_PAGE_REG) { |
8469 | if (phy == 1) | | 8469 | if (phy == 1) |
8470 | wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, | | 8470 | wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, |
8471 | reg); | | 8471 | reg); |
8472 | else | | 8472 | else |
8473 | wm_gmii_i82544_writereg(self, phy, | | 8473 | wm_gmii_i82544_writereg(self, phy, |
8474 | GG82563_PHY_PAGE_SELECT, | | 8474 | GG82563_PHY_PAGE_SELECT, |
8475 | reg >> GG82563_PAGE_SHIFT); | | 8475 | reg >> GG82563_PAGE_SHIFT); |
8476 | } | | 8476 | } |
8477 | | | 8477 | |
8478 | wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); | | 8478 | wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); |
8479 | wm_put_swfw_semaphore(sc, sem); | | 8479 | wm_put_swfw_semaphore(sc, sem); |
8480 | } | | 8480 | } |
8481 | | | 8481 | |
8482 | static void | | 8482 | static void |
8483 | wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd) | | 8483 | wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd) |
8484 | { | | 8484 | { |
8485 | struct wm_softc *sc = device_private(self); | | 8485 | struct wm_softc *sc = device_private(self); |
8486 | uint16_t regnum = BM_PHY_REG_NUM(offset); | | 8486 | uint16_t regnum = BM_PHY_REG_NUM(offset); |
8487 | uint16_t wuce; | | 8487 | uint16_t wuce; |
8488 | | | 8488 | |
8489 | /* XXX Gig must be disabled for MDIO accesses to page 800 */ | | 8489 | /* XXX Gig must be disabled for MDIO accesses to page 800 */ |
8490 | if (sc->sc_type == WM_T_PCH) { | | 8490 | if (sc->sc_type == WM_T_PCH) { |
8491 | /* XXX e1000 driver do nothing... why? */ | | 8491 | /* XXX e1000 driver do nothing... why? */ |
8492 | } | | 8492 | } |
8493 | | | 8493 | |
8494 | /* Set page 769 */ | | 8494 | /* Set page 769 */ |
8495 | wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, | | 8495 | wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, |
8496 | BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); | | 8496 | BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); |
8497 | | | 8497 | |
8498 | wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG); | | 8498 | wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG); |
8499 | | | 8499 | |
8500 | wuce &= ~BM_WUC_HOST_WU_BIT; | | 8500 | wuce &= ~BM_WUC_HOST_WU_BIT; |
8501 | wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, | | 8501 | wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, |
8502 | wuce | BM_WUC_ENABLE_BIT); | | 8502 | wuce | BM_WUC_ENABLE_BIT); |
8503 | | | 8503 | |
8504 | /* Select page 800 */ | | 8504 | /* Select page 800 */ |
8505 | wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, | | 8505 | wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, |
8506 | BM_WUC_PAGE << BME1000_PAGE_SHIFT); | | 8506 | BM_WUC_PAGE << BME1000_PAGE_SHIFT); |
8507 | | | 8507 | |
8508 | /* Write page 800 */ | | 8508 | /* Write page 800 */ |
8509 | wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum); | | 8509 | wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum); |
8510 | | | 8510 | |
8511 | if (rd) | | 8511 | if (rd) |
8512 | *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE); | | 8512 | *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE); |
8513 | else | | 8513 | else |
8514 | wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val); | | 8514 | wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val); |
8515 | | | 8515 | |
8516 | /* Set page 769 */ | | 8516 | /* Set page 769 */ |
8517 | wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, | | 8517 | wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, |
8518 | BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); | | 8518 | BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); |