| @@ -1,1085 +1,1085 @@ | | | @@ -1,1085 +1,1085 @@ |
1 | /* $NetBSD: if_wm.c,v 1.366 2015/10/13 09:03:58 knakahara Exp $ */ | | 1 | /* $NetBSD: if_wm.c,v 1.367 2015/10/13 09:10:01 knakahara Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. | | 4 | * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. | | 7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software | | 17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: | | 18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by | | 19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. | | 20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior | | 22 | * or promote products derived from this software without specific prior |
23 | * written permission. | | 23 | * written permission. |
24 | * | | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | | 36 | */ |
37 | | | 37 | |
38 | /******************************************************************************* | | 38 | /******************************************************************************* |
39 | | | 39 | |
40 | Copyright (c) 2001-2005, Intel Corporation | | 40 | Copyright (c) 2001-2005, Intel Corporation |
41 | All rights reserved. | | 41 | All rights reserved. |
42 | | | 42 | |
43 | Redistribution and use in source and binary forms, with or without | | 43 | Redistribution and use in source and binary forms, with or without |
44 | modification, are permitted provided that the following conditions are met: | | 44 | modification, are permitted provided that the following conditions are met: |
45 | | | 45 | |
46 | 1. Redistributions of source code must retain the above copyright notice, | | 46 | 1. Redistributions of source code must retain the above copyright notice, |
47 | this list of conditions and the following disclaimer. | | 47 | this list of conditions and the following disclaimer. |
48 | | | 48 | |
49 | 2. Redistributions in binary form must reproduce the above copyright | | 49 | 2. Redistributions in binary form must reproduce the above copyright |
50 | notice, this list of conditions and the following disclaimer in the | | 50 | notice, this list of conditions and the following disclaimer in the |
51 | documentation and/or other materials provided with the distribution. | | 51 | documentation and/or other materials provided with the distribution. |
52 | | | 52 | |
53 | 3. Neither the name of the Intel Corporation nor the names of its | | 53 | 3. Neither the name of the Intel Corporation nor the names of its |
54 | contributors may be used to endorse or promote products derived from | | 54 | contributors may be used to endorse or promote products derived from |
55 | this software without specific prior written permission. | | 55 | this software without specific prior written permission. |
56 | | | 56 | |
57 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | | 57 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
58 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 58 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
59 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 59 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
60 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | | 60 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
61 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 61 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
62 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 62 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
63 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 63 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
64 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 64 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
65 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 65 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
66 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 66 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
67 | POSSIBILITY OF SUCH DAMAGE. | | 67 | POSSIBILITY OF SUCH DAMAGE. |
68 | | | 68 | |
69 | *******************************************************************************/ | | 69 | *******************************************************************************/ |
70 | /* | | 70 | /* |
71 | * Device driver for the Intel i8254x family of Gigabit Ethernet chips. | | 71 | * Device driver for the Intel i8254x family of Gigabit Ethernet chips. |
72 | * | | 72 | * |
73 | * TODO (in order of importance): | | 73 | * TODO (in order of importance): |
74 | * | | 74 | * |
75 | * - Check XXX'ed comments | | 75 | * - Check XXX'ed comments |
76 | * - EEE (Energy Efficiency Ethernet) | | 76 | * - EEE (Energy Efficiency Ethernet) |
77 | * - Multi queue | | 77 | * - Multi queue |
78 | * - Image Unique ID | | 78 | * - Image Unique ID |
79 | * - LPLU other than PCH* | | 79 | * - LPLU other than PCH* |
80 | * - Virtual Function | | 80 | * - Virtual Function |
81 | * - Set LED correctly (based on contents in EEPROM) | | 81 | * - Set LED correctly (based on contents in EEPROM) |
82 | * - Rework how parameters are loaded from the EEPROM. | | 82 | * - Rework how parameters are loaded from the EEPROM. |
83 | */ | | 83 | */ |
84 | | | 84 | |
85 | #include <sys/cdefs.h> | | 85 | #include <sys/cdefs.h> |
86 | __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.366 2015/10/13 09:03:58 knakahara Exp $"); | | 86 | __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.367 2015/10/13 09:10:01 knakahara Exp $"); |
87 | | | 87 | |
88 | #ifdef _KERNEL_OPT | | 88 | #ifdef _KERNEL_OPT |
89 | #include "opt_net_mpsafe.h" | | 89 | #include "opt_net_mpsafe.h" |
90 | #endif | | 90 | #endif |
91 | | | 91 | |
92 | #include <sys/param.h> | | 92 | #include <sys/param.h> |
93 | #include <sys/systm.h> | | 93 | #include <sys/systm.h> |
94 | #include <sys/callout.h> | | 94 | #include <sys/callout.h> |
95 | #include <sys/mbuf.h> | | 95 | #include <sys/mbuf.h> |
96 | #include <sys/malloc.h> | | 96 | #include <sys/malloc.h> |
97 | #include <sys/kmem.h> | | 97 | #include <sys/kmem.h> |
98 | #include <sys/kernel.h> | | 98 | #include <sys/kernel.h> |
99 | #include <sys/socket.h> | | 99 | #include <sys/socket.h> |
100 | #include <sys/ioctl.h> | | 100 | #include <sys/ioctl.h> |
101 | #include <sys/errno.h> | | 101 | #include <sys/errno.h> |
102 | #include <sys/device.h> | | 102 | #include <sys/device.h> |
103 | #include <sys/queue.h> | | 103 | #include <sys/queue.h> |
104 | #include <sys/syslog.h> | | 104 | #include <sys/syslog.h> |
105 | #include <sys/interrupt.h> | | 105 | #include <sys/interrupt.h> |
106 | | | 106 | |
107 | #include <sys/rndsource.h> | | 107 | #include <sys/rndsource.h> |
108 | | | 108 | |
109 | #include <net/if.h> | | 109 | #include <net/if.h> |
110 | #include <net/if_dl.h> | | 110 | #include <net/if_dl.h> |
111 | #include <net/if_media.h> | | 111 | #include <net/if_media.h> |
112 | #include <net/if_ether.h> | | 112 | #include <net/if_ether.h> |
113 | | | 113 | |
114 | #include <net/bpf.h> | | 114 | #include <net/bpf.h> |
115 | | | 115 | |
116 | #include <netinet/in.h> /* XXX for struct ip */ | | 116 | #include <netinet/in.h> /* XXX for struct ip */ |
117 | #include <netinet/in_systm.h> /* XXX for struct ip */ | | 117 | #include <netinet/in_systm.h> /* XXX for struct ip */ |
118 | #include <netinet/ip.h> /* XXX for struct ip */ | | 118 | #include <netinet/ip.h> /* XXX for struct ip */ |
119 | #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ | | 119 | #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ |
120 | #include <netinet/tcp.h> /* XXX for struct tcphdr */ | | 120 | #include <netinet/tcp.h> /* XXX for struct tcphdr */ |
121 | | | 121 | |
122 | #include <sys/bus.h> | | 122 | #include <sys/bus.h> |
123 | #include <sys/intr.h> | | 123 | #include <sys/intr.h> |
124 | #include <machine/endian.h> | | 124 | #include <machine/endian.h> |
125 | | | 125 | |
126 | #include <dev/mii/mii.h> | | 126 | #include <dev/mii/mii.h> |
127 | #include <dev/mii/miivar.h> | | 127 | #include <dev/mii/miivar.h> |
128 | #include <dev/mii/miidevs.h> | | 128 | #include <dev/mii/miidevs.h> |
129 | #include <dev/mii/mii_bitbang.h> | | 129 | #include <dev/mii/mii_bitbang.h> |
130 | #include <dev/mii/ikphyreg.h> | | 130 | #include <dev/mii/ikphyreg.h> |
131 | #include <dev/mii/igphyreg.h> | | 131 | #include <dev/mii/igphyreg.h> |
132 | #include <dev/mii/igphyvar.h> | | 132 | #include <dev/mii/igphyvar.h> |
133 | #include <dev/mii/inbmphyreg.h> | | 133 | #include <dev/mii/inbmphyreg.h> |
134 | | | 134 | |
135 | #include <dev/pci/pcireg.h> | | 135 | #include <dev/pci/pcireg.h> |
136 | #include <dev/pci/pcivar.h> | | 136 | #include <dev/pci/pcivar.h> |
137 | #include <dev/pci/pcidevs.h> | | 137 | #include <dev/pci/pcidevs.h> |
138 | | | 138 | |
139 | #include <dev/pci/if_wmreg.h> | | 139 | #include <dev/pci/if_wmreg.h> |
140 | #include <dev/pci/if_wmvar.h> | | 140 | #include <dev/pci/if_wmvar.h> |
141 | | | 141 | |
142 | #ifdef WM_DEBUG | | 142 | #ifdef WM_DEBUG |
143 | #define WM_DEBUG_LINK 0x01 | | 143 | #define WM_DEBUG_LINK 0x01 |
144 | #define WM_DEBUG_TX 0x02 | | 144 | #define WM_DEBUG_TX 0x02 |
145 | #define WM_DEBUG_RX 0x04 | | 145 | #define WM_DEBUG_RX 0x04 |
146 | #define WM_DEBUG_GMII 0x08 | | 146 | #define WM_DEBUG_GMII 0x08 |
147 | #define WM_DEBUG_MANAGE 0x10 | | 147 | #define WM_DEBUG_MANAGE 0x10 |
148 | #define WM_DEBUG_NVM 0x20 | | 148 | #define WM_DEBUG_NVM 0x20 |
149 | int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII | | 149 | int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII |
150 | | WM_DEBUG_MANAGE | WM_DEBUG_NVM; | | 150 | | WM_DEBUG_MANAGE | WM_DEBUG_NVM; |
151 | | | 151 | |
152 | #define DPRINTF(x, y) if (wm_debug & (x)) printf y | | 152 | #define DPRINTF(x, y) if (wm_debug & (x)) printf y |
153 | #else | | 153 | #else |
154 | #define DPRINTF(x, y) /* nothing */ | | 154 | #define DPRINTF(x, y) /* nothing */ |
155 | #endif /* WM_DEBUG */ | | 155 | #endif /* WM_DEBUG */ |
156 | | | 156 | |
157 | #ifdef NET_MPSAFE | | 157 | #ifdef NET_MPSAFE |
158 | #define WM_MPSAFE 1 | | 158 | #define WM_MPSAFE 1 |
159 | #endif | | 159 | #endif |
160 | | | 160 | |
161 | #ifdef __HAVE_PCI_MSI_MSIX | | 161 | #ifdef __HAVE_PCI_MSI_MSIX |
162 | #define WM_MSI_MSIX 1 /* Enable by default */ | | 162 | #define WM_MSI_MSIX 1 /* Enable by default */ |
163 | #endif | | 163 | #endif |
164 | | | 164 | |
165 | /* | | 165 | /* |
166 | * This device driver's max interrupt numbers. | | 166 | * This device driver's max interrupt numbers. |
167 | */ | | 167 | */ |
168 | #define WM_MAX_NTXINTR 16 | | 168 | #define WM_MAX_NTXINTR 16 |
169 | #define WM_MAX_NRXINTR 16 | | 169 | #define WM_MAX_NRXINTR 16 |
170 | #define WM_MAX_NINTR (WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1) | | 170 | #define WM_MAX_NINTR (WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1) |
171 | | | 171 | |
172 | /* | | 172 | /* |
173 | * Transmit descriptor list size. Due to errata, we can only have | | 173 | * Transmit descriptor list size. Due to errata, we can only have |
174 | * 256 hardware descriptors in the ring on < 82544, but we use 4096 | | 174 | * 256 hardware descriptors in the ring on < 82544, but we use 4096 |
175 | * on >= 82544. We tell the upper layers that they can queue a lot | | 175 | * on >= 82544. We tell the upper layers that they can queue a lot |
176 | * of packets, and we go ahead and manage up to 64 (16 for the i82547) | | 176 | * of packets, and we go ahead and manage up to 64 (16 for the i82547) |
177 | * of them at a time. | | 177 | * of them at a time. |
178 | * | | 178 | * |
179 | * We allow up to 256 (!) DMA segments per packet. Pathological packet | | 179 | * We allow up to 256 (!) DMA segments per packet. Pathological packet |
180 | * chains containing many small mbufs have been observed in zero-copy | | 180 | * chains containing many small mbufs have been observed in zero-copy |
181 | * situations with jumbo frames. | | 181 | * situations with jumbo frames. |
182 | */ | | 182 | */ |
183 | #define WM_NTXSEGS 256 | | 183 | #define WM_NTXSEGS 256 |
184 | #define WM_IFQUEUELEN 256 | | 184 | #define WM_IFQUEUELEN 256 |
185 | #define WM_TXQUEUELEN_MAX 64 | | 185 | #define WM_TXQUEUELEN_MAX 64 |
186 | #define WM_TXQUEUELEN_MAX_82547 16 | | 186 | #define WM_TXQUEUELEN_MAX_82547 16 |
187 | #define WM_TXQUEUELEN(txq) ((txq)->txq_num) | | 187 | #define WM_TXQUEUELEN(txq) ((txq)->txq_num) |
188 | #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) | | 188 | #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) |
189 | #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) | | 189 | #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) |
190 | #define WM_NTXDESC_82542 256 | | 190 | #define WM_NTXDESC_82542 256 |
191 | #define WM_NTXDESC_82544 4096 | | 191 | #define WM_NTXDESC_82544 4096 |
192 | #define WM_NTXDESC(txq) ((txq)->txq_ndesc) | | 192 | #define WM_NTXDESC(txq) ((txq)->txq_ndesc) |
193 | #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) | | 193 | #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) |
194 | #define WM_TXDESCSIZE(txq) (WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t)) | | 194 | #define WM_TXDESCSIZE(txq) (WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t)) |
195 | #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) | | 195 | #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) |
196 | #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) | | 196 | #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) |
197 | | | 197 | |
198 | #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ | | 198 | #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ |
199 | | | 199 | |
200 | /* | | 200 | /* |
201 | * Receive descriptor list size. We have one Rx buffer for normal | | 201 | * Receive descriptor list size. We have one Rx buffer for normal |
202 | * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized | | 202 | * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized |
203 | * packet. We allocate 256 receive descriptors, each with a 2k | | 203 | * packet. We allocate 256 receive descriptors, each with a 2k |
204 | * buffer (MCLBYTES), which gives us room for 50 jumbo packets. | | 204 | * buffer (MCLBYTES), which gives us room for 50 jumbo packets. |
205 | */ | | 205 | */ |
206 | #define WM_NRXDESC 256 | | 206 | #define WM_NRXDESC 256 |
207 | #define WM_NRXDESC_MASK (WM_NRXDESC - 1) | | 207 | #define WM_NRXDESC_MASK (WM_NRXDESC - 1) |
208 | #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) | | 208 | #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) |
209 | #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) | | 209 | #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) |
210 | | | 210 | |
211 | typedef union txdescs { | | 211 | typedef union txdescs { |
212 | wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; | | 212 | wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; |
213 | nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; | | 213 | nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; |
214 | } txdescs_t; | | 214 | } txdescs_t; |
215 | | | 215 | |
216 | #define WM_CDTXOFF(x) (sizeof(wiseman_txdesc_t) * x) | | 216 | #define WM_CDTXOFF(x) (sizeof(wiseman_txdesc_t) * x) |
217 | #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x) | | 217 | #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x) |
218 | | | 218 | |
219 | /* | | 219 | /* |
220 | * Software state for transmit jobs. | | 220 | * Software state for transmit jobs. |
221 | */ | | 221 | */ |
222 | struct wm_txsoft { | | 222 | struct wm_txsoft { |
223 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ | | 223 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ |
224 | bus_dmamap_t txs_dmamap; /* our DMA map */ | | 224 | bus_dmamap_t txs_dmamap; /* our DMA map */ |
225 | int txs_firstdesc; /* first descriptor in packet */ | | 225 | int txs_firstdesc; /* first descriptor in packet */ |
226 | int txs_lastdesc; /* last descriptor in packet */ | | 226 | int txs_lastdesc; /* last descriptor in packet */ |
227 | int txs_ndesc; /* # of descriptors used */ | | 227 | int txs_ndesc; /* # of descriptors used */ |
228 | }; | | 228 | }; |
229 | | | 229 | |
230 | /* | | 230 | /* |
231 | * Software state for receive buffers. Each descriptor gets a | | 231 | * Software state for receive buffers. Each descriptor gets a |
232 | * 2k (MCLBYTES) buffer and a DMA map. For packets which fill | | 232 | * 2k (MCLBYTES) buffer and a DMA map. For packets which fill |
233 | * more than one buffer, we chain them together. | | 233 | * more than one buffer, we chain them together. |
234 | */ | | 234 | */ |
235 | struct wm_rxsoft { | | 235 | struct wm_rxsoft { |
236 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ | | 236 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ |
237 | bus_dmamap_t rxs_dmamap; /* our DMA map */ | | 237 | bus_dmamap_t rxs_dmamap; /* our DMA map */ |
238 | }; | | 238 | }; |
239 | | | 239 | |
240 | #define WM_LINKUP_TIMEOUT 50 | | 240 | #define WM_LINKUP_TIMEOUT 50 |
241 | | | 241 | |
242 | static uint16_t swfwphysem[] = { | | 242 | static uint16_t swfwphysem[] = { |
243 | SWFW_PHY0_SM, | | 243 | SWFW_PHY0_SM, |
244 | SWFW_PHY1_SM, | | 244 | SWFW_PHY1_SM, |
245 | SWFW_PHY2_SM, | | 245 | SWFW_PHY2_SM, |
246 | SWFW_PHY3_SM | | 246 | SWFW_PHY3_SM |
247 | }; | | 247 | }; |
248 | | | 248 | |
249 | static const uint32_t wm_82580_rxpbs_table[] = { | | 249 | static const uint32_t wm_82580_rxpbs_table[] = { |
250 | 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 | | 250 | 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 |
251 | }; | | 251 | }; |
252 | | | 252 | |
253 | struct wm_softc; | | 253 | struct wm_softc; |
254 | | | 254 | |
255 | struct wm_txqueue { | | 255 | struct wm_txqueue { |
256 | kmutex_t *txq_lock; /* lock for tx operations */ | | 256 | kmutex_t *txq_lock; /* lock for tx operations */ |
257 | | | 257 | |
258 | struct wm_softc *txq_sc; | | 258 | struct wm_softc *txq_sc; |
259 | | | 259 | |
260 | int txq_id; /* index of transmit queues */ | | 260 | int txq_id; /* index of transmit queues */ |
261 | int txq_intr_idx; /* index of MSI-X tables */ | | 261 | int txq_intr_idx; /* index of MSI-X tables */ |
262 | | | 262 | |
263 | /* Software state for the transmit descriptors. */ | | 263 | /* Software state for the transmit descriptors. */ |
264 | int txq_num; /* must be a power of two */ | | 264 | int txq_num; /* must be a power of two */ |
265 | struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; | | 265 | struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; |
266 | | | 266 | |
267 | /* TX control data structures. */ | | 267 | /* TX control data structures. */ |
268 | int txq_ndesc; /* must be a power of two */ | | 268 | int txq_ndesc; /* must be a power of two */ |
269 | txdescs_t *txq_descs_u; | | 269 | txdescs_t *txq_descs_u; |
270 | bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ | | 270 | bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ |
271 | bus_dma_segment_t txq_desc_seg; /* control data segment */ | | 271 | bus_dma_segment_t txq_desc_seg; /* control data segment */ |
272 | int txq_desc_rseg; /* real number of control segment */ | | 272 | int txq_desc_rseg; /* real number of control segment */ |
273 | size_t txq_desc_size; /* control data size */ | | 273 | size_t txq_desc_size; /* control data size */ |
274 | #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr | | 274 | #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr |
275 | #define txq_descs txq_descs_u->sctxu_txdescs | | 275 | #define txq_descs txq_descs_u->sctxu_txdescs |
276 | #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs | | 276 | #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs |
277 | | | 277 | |
278 | bus_addr_t txq_tdt_reg; /* offset of TDT register */ | | 278 | bus_addr_t txq_tdt_reg; /* offset of TDT register */ |
279 | | | 279 | |
280 | int txq_free; /* number of free Tx descriptors */ | | 280 | int txq_free; /* number of free Tx descriptors */ |
281 | int txq_next; /* next ready Tx descriptor */ | | 281 | int txq_next; /* next ready Tx descriptor */ |
282 | | | 282 | |
283 | int txq_sfree; /* number of free Tx jobs */ | | 283 | int txq_sfree; /* number of free Tx jobs */ |
284 | int txq_snext; /* next free Tx job */ | | 284 | int txq_snext; /* next free Tx job */ |
285 | int txq_sdirty; /* dirty Tx jobs */ | | 285 | int txq_sdirty; /* dirty Tx jobs */ |
286 | | | 286 | |
287 | /* These 4 variables are used only on the 82547. */ | | 287 | /* These 4 variables are used only on the 82547. */ |
288 | int txq_fifo_size; /* Tx FIFO size */ | | 288 | int txq_fifo_size; /* Tx FIFO size */ |
289 | int txq_fifo_head; /* current head of FIFO */ | | 289 | int txq_fifo_head; /* current head of FIFO */ |
290 | uint32_t txq_fifo_addr; /* internal address of start of FIFO */ | | 290 | uint32_t txq_fifo_addr; /* internal address of start of FIFO */ |
291 | int txq_fifo_stall; /* Tx FIFO is stalled */ | | 291 | int txq_fifo_stall; /* Tx FIFO is stalled */ |
292 | | | 292 | |
293 | /* XXX which event counter is required? */ | | 293 | /* XXX which event counter is required? */ |
294 | }; | | 294 | }; |
295 | | | 295 | |
296 | struct wm_rxqueue { | | 296 | struct wm_rxqueue { |
297 | kmutex_t *rxq_lock; /* lock for rx operations */ | | 297 | kmutex_t *rxq_lock; /* lock for rx operations */ |
298 | | | 298 | |
299 | struct wm_softc *rxq_sc; | | 299 | struct wm_softc *rxq_sc; |
300 | | | 300 | |
301 | int rxq_id; /* index of receive queues */ | | 301 | int rxq_id; /* index of receive queues */ |
302 | int rxq_intr_idx; /* index of MSI-X tables */ | | 302 | int rxq_intr_idx; /* index of MSI-X tables */ |
303 | | | 303 | |
304 | /* Software state for the receive descriptors. */ | | 304 | /* Software state for the receive descriptors. */ |
305 | wiseman_rxdesc_t *rxq_descs; | | 305 | wiseman_rxdesc_t *rxq_descs; |
306 | | | 306 | |
307 | /* RX control data structures. */ | | 307 | /* RX control data structures. */ |
308 | struct wm_rxsoft rxq_soft[WM_NRXDESC]; | | 308 | struct wm_rxsoft rxq_soft[WM_NRXDESC]; |
309 | bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ | | 309 | bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ |
310 | bus_dma_segment_t rxq_desc_seg; /* control data segment */ | | 310 | bus_dma_segment_t rxq_desc_seg; /* control data segment */ |
311 | int rxq_desc_rseg; /* real number of control segment */ | | 311 | int rxq_desc_rseg; /* real number of control segment */ |
312 | size_t rxq_desc_size; /* control data size */ | | 312 | size_t rxq_desc_size; /* control data size */ |
313 | #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr | | 313 | #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr |
314 | | | 314 | |
315 | bus_addr_t rxq_rdt_reg; /* offset of RDT register */ | | 315 | bus_addr_t rxq_rdt_reg; /* offset of RDT register */ |
316 | | | 316 | |
317 | int rxq_ptr; /* next ready Rx descriptor/queue ent */ | | 317 | int rxq_ptr; /* next ready Rx descriptor/queue ent */ |
318 | int rxq_discard; | | 318 | int rxq_discard; |
319 | int rxq_len; | | 319 | int rxq_len; |
320 | struct mbuf *rxq_head; | | 320 | struct mbuf *rxq_head; |
321 | struct mbuf *rxq_tail; | | 321 | struct mbuf *rxq_tail; |
322 | struct mbuf **rxq_tailp; | | 322 | struct mbuf **rxq_tailp; |
323 | | | 323 | |
324 | /* XXX which event counter is required? */ | | 324 | /* XXX which event counter is required? */ |
325 | }; | | 325 | }; |
326 | | | 326 | |
327 | /* | | 327 | /* |
328 | * Software state per device. | | 328 | * Software state per device. |
329 | */ | | 329 | */ |
330 | struct wm_softc { | | 330 | struct wm_softc { |
331 | device_t sc_dev; /* generic device information */ | | 331 | device_t sc_dev; /* generic device information */ |
332 | bus_space_tag_t sc_st; /* bus space tag */ | | 332 | bus_space_tag_t sc_st; /* bus space tag */ |
333 | bus_space_handle_t sc_sh; /* bus space handle */ | | 333 | bus_space_handle_t sc_sh; /* bus space handle */ |
334 | bus_size_t sc_ss; /* bus space size */ | | 334 | bus_size_t sc_ss; /* bus space size */ |
335 | bus_space_tag_t sc_iot; /* I/O space tag */ | | 335 | bus_space_tag_t sc_iot; /* I/O space tag */ |
336 | bus_space_handle_t sc_ioh; /* I/O space handle */ | | 336 | bus_space_handle_t sc_ioh; /* I/O space handle */ |
337 | bus_size_t sc_ios; /* I/O space size */ | | 337 | bus_size_t sc_ios; /* I/O space size */ |
338 | bus_space_tag_t sc_flasht; /* flash registers space tag */ | | 338 | bus_space_tag_t sc_flasht; /* flash registers space tag */ |
339 | bus_space_handle_t sc_flashh; /* flash registers space handle */ | | 339 | bus_space_handle_t sc_flashh; /* flash registers space handle */ |
340 | bus_size_t sc_flashs; /* flash registers space size */ | | 340 | bus_size_t sc_flashs; /* flash registers space size */ |
341 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ | | 341 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ |
342 | | | 342 | |
343 | struct ethercom sc_ethercom; /* ethernet common data */ | | 343 | struct ethercom sc_ethercom; /* ethernet common data */ |
344 | struct mii_data sc_mii; /* MII/media information */ | | 344 | struct mii_data sc_mii; /* MII/media information */ |
345 | | | 345 | |
346 | pci_chipset_tag_t sc_pc; | | 346 | pci_chipset_tag_t sc_pc; |
347 | pcitag_t sc_pcitag; | | 347 | pcitag_t sc_pcitag; |
348 | int sc_bus_speed; /* PCI/PCIX bus speed */ | | 348 | int sc_bus_speed; /* PCI/PCIX bus speed */ |
349 | int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ | | 349 | int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ |
350 | | | 350 | |
351 | uint16_t sc_pcidevid; /* PCI device ID */ | | 351 | uint16_t sc_pcidevid; /* PCI device ID */ |
352 | wm_chip_type sc_type; /* MAC type */ | | 352 | wm_chip_type sc_type; /* MAC type */ |
353 | int sc_rev; /* MAC revision */ | | 353 | int sc_rev; /* MAC revision */ |
354 | wm_phy_type sc_phytype; /* PHY type */ | | 354 | wm_phy_type sc_phytype; /* PHY type */ |
355 | uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ | | 355 | uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ |
356 | #define WM_MEDIATYPE_UNKNOWN 0x00 | | 356 | #define WM_MEDIATYPE_UNKNOWN 0x00 |
357 | #define WM_MEDIATYPE_FIBER 0x01 | | 357 | #define WM_MEDIATYPE_FIBER 0x01 |
358 | #define WM_MEDIATYPE_COPPER 0x02 | | 358 | #define WM_MEDIATYPE_COPPER 0x02 |
359 | #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ | | 359 | #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ |
360 | int sc_funcid; /* unit number of the chip (0 to 3) */ | | 360 | int sc_funcid; /* unit number of the chip (0 to 3) */ |
361 | int sc_flags; /* flags; see below */ | | 361 | int sc_flags; /* flags; see below */ |
362 | int sc_if_flags; /* last if_flags */ | | 362 | int sc_if_flags; /* last if_flags */ |
363 | int sc_flowflags; /* 802.3x flow control flags */ | | 363 | int sc_flowflags; /* 802.3x flow control flags */ |
364 | int sc_align_tweak; | | 364 | int sc_align_tweak; |
365 | | | 365 | |
366 | void *sc_ihs[WM_MAX_NINTR]; /* | | 366 | void *sc_ihs[WM_MAX_NINTR]; /* |
367 | * interrupt cookie. | | 367 | * interrupt cookie. |
368 | * legacy and msi use sc_ihs[0]. | | 368 | * legacy and msi use sc_ihs[0]. |
369 | */ | | 369 | */ |
370 | pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */ | | 370 | pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */ |
371 | int sc_nintrs; /* number of interrupts */ | | 371 | int sc_nintrs; /* number of interrupts */ |
372 | | | 372 | |
373 | int sc_link_intr_idx; /* index of MSI-X tables */ | | 373 | int sc_link_intr_idx; /* index of MSI-X tables */ |
374 | | | 374 | |
375 | callout_t sc_tick_ch; /* tick callout */ | | 375 | callout_t sc_tick_ch; /* tick callout */ |
376 | bool sc_stopping; | | 376 | bool sc_stopping; |
377 | | | 377 | |
378 | int sc_nvm_ver_major; | | 378 | int sc_nvm_ver_major; |
379 | int sc_nvm_ver_minor; | | 379 | int sc_nvm_ver_minor; |
380 | int sc_nvm_ver_build; | | 380 | int sc_nvm_ver_build; |
381 | int sc_nvm_addrbits; /* NVM address bits */ | | 381 | int sc_nvm_addrbits; /* NVM address bits */ |
382 | unsigned int sc_nvm_wordsize; /* NVM word size */ | | 382 | unsigned int sc_nvm_wordsize; /* NVM word size */ |
383 | int sc_ich8_flash_base; | | 383 | int sc_ich8_flash_base; |
384 | int sc_ich8_flash_bank_size; | | 384 | int sc_ich8_flash_bank_size; |
385 | int sc_nvm_k1_enabled; | | 385 | int sc_nvm_k1_enabled; |
386 | | | 386 | |
387 | int sc_ntxqueues; | | 387 | int sc_ntxqueues; |
388 | struct wm_txqueue *sc_txq; | | 388 | struct wm_txqueue *sc_txq; |
389 | | | 389 | |
390 | int sc_nrxqueues; | | 390 | int sc_nrxqueues; |
391 | struct wm_rxqueue *sc_rxq; | | 391 | struct wm_rxqueue *sc_rxq; |
392 | | | 392 | |
393 | #ifdef WM_EVENT_COUNTERS | | 393 | #ifdef WM_EVENT_COUNTERS |
394 | /* Event counters. */ | | 394 | /* Event counters. */ |
395 | struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ | | 395 | struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ |
396 | struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ | | 396 | struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ |
397 | struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ | | 397 | struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ |
398 | struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ | | 398 | struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ |
399 | struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ | | 399 | struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ |
400 | struct evcnt sc_ev_rxintr; /* Rx interrupts */ | | 400 | struct evcnt sc_ev_rxintr; /* Rx interrupts */ |
401 | struct evcnt sc_ev_linkintr; /* Link interrupts */ | | 401 | struct evcnt sc_ev_linkintr; /* Link interrupts */ |
402 | | | 402 | |
403 | struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ | | 403 | struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ |
404 | struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ | | 404 | struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ |
405 | struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ | | 405 | struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ |
406 | struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ | | 406 | struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ |
407 | struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ | | 407 | struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ |
408 | struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ | | 408 | struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ |
409 | struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ | | 409 | struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ |
410 | struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ | | 410 | struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ |
411 | | | 411 | |
412 | struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ | | 412 | struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ |
413 | struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ | | 413 | struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ |
414 | | | 414 | |
415 | struct evcnt sc_ev_tu; /* Tx underrun */ | | 415 | struct evcnt sc_ev_tu; /* Tx underrun */ |
416 | | | 416 | |
417 | struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ | | 417 | struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ |
418 | struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ | | 418 | struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ |
419 | struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ | | 419 | struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ |
420 | struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ | | 420 | struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ |
421 | struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ | | 421 | struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ |
422 | #endif /* WM_EVENT_COUNTERS */ | | 422 | #endif /* WM_EVENT_COUNTERS */ |
423 | | | 423 | |
424 | /* This variable are used only on the 82547. */ | | 424 | /* This variable are used only on the 82547. */ |
425 | callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ | | 425 | callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ |
426 | | | 426 | |
427 | uint32_t sc_ctrl; /* prototype CTRL register */ | | 427 | uint32_t sc_ctrl; /* prototype CTRL register */ |
428 | #if 0 | | 428 | #if 0 |
429 | uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ | | 429 | uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ |
430 | #endif | | 430 | #endif |
431 | uint32_t sc_icr; /* prototype interrupt bits */ | | 431 | uint32_t sc_icr; /* prototype interrupt bits */ |
432 | uint32_t sc_itr; /* prototype intr throttling reg */ | | 432 | uint32_t sc_itr; /* prototype intr throttling reg */ |
433 | uint32_t sc_tctl; /* prototype TCTL register */ | | 433 | uint32_t sc_tctl; /* prototype TCTL register */ |
434 | uint32_t sc_rctl; /* prototype RCTL register */ | | 434 | uint32_t sc_rctl; /* prototype RCTL register */ |
435 | uint32_t sc_txcw; /* prototype TXCW register */ | | 435 | uint32_t sc_txcw; /* prototype TXCW register */ |
436 | uint32_t sc_tipg; /* prototype TIPG register */ | | 436 | uint32_t sc_tipg; /* prototype TIPG register */ |
437 | uint32_t sc_fcrtl; /* prototype FCRTL register */ | | 437 | uint32_t sc_fcrtl; /* prototype FCRTL register */ |
438 | uint32_t sc_pba; /* prototype PBA register */ | | 438 | uint32_t sc_pba; /* prototype PBA register */ |
439 | | | 439 | |
440 | int sc_tbi_linkup; /* TBI link status */ | | 440 | int sc_tbi_linkup; /* TBI link status */ |
441 | int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ | | 441 | int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ |
442 | int sc_tbi_serdes_ticks; /* tbi ticks */ | | 442 | int sc_tbi_serdes_ticks; /* tbi ticks */ |
443 | | | 443 | |
444 | int sc_mchash_type; /* multicast filter offset */ | | 444 | int sc_mchash_type; /* multicast filter offset */ |
445 | | | 445 | |
446 | krndsource_t rnd_source; /* random source */ | | 446 | krndsource_t rnd_source; /* random source */ |
447 | | | 447 | |
448 | kmutex_t *sc_core_lock; /* lock for softc operations */ | | 448 | kmutex_t *sc_core_lock; /* lock for softc operations */ |
449 | }; | | 449 | }; |
450 | | | 450 | |
451 | #define WM_TX_LOCK(_txq) if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock) | | 451 | #define WM_TX_LOCK(_txq) if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock) |
452 | #define WM_TX_UNLOCK(_txq) if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock) | | 452 | #define WM_TX_UNLOCK(_txq) if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock) |
453 | #define WM_TX_LOCKED(_txq) (!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock)) | | 453 | #define WM_TX_LOCKED(_txq) (!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock)) |
454 | #define WM_RX_LOCK(_rxq) if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock) | | 454 | #define WM_RX_LOCK(_rxq) if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock) |
455 | #define WM_RX_UNLOCK(_rxq) if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock) | | 455 | #define WM_RX_UNLOCK(_rxq) if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock) |
456 | #define WM_RX_LOCKED(_rxq) (!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock)) | | 456 | #define WM_RX_LOCKED(_rxq) (!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock)) |
457 | #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) | | 457 | #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) |
458 | #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) | | 458 | #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) |
459 | #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) | | 459 | #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) |
460 | | | 460 | |
461 | #ifdef WM_MPSAFE | | 461 | #ifdef WM_MPSAFE |
462 | #define CALLOUT_FLAGS CALLOUT_MPSAFE | | 462 | #define CALLOUT_FLAGS CALLOUT_MPSAFE |
463 | #else | | 463 | #else |
464 | #define CALLOUT_FLAGS 0 | | 464 | #define CALLOUT_FLAGS 0 |
465 | #endif | | 465 | #endif |
466 | | | 466 | |
467 | #define WM_RXCHAIN_RESET(rxq) \ | | 467 | #define WM_RXCHAIN_RESET(rxq) \ |
468 | do { \ | | 468 | do { \ |
469 | (rxq)->rxq_tailp = &(rxq)->rxq_head; \ | | 469 | (rxq)->rxq_tailp = &(rxq)->rxq_head; \ |
470 | *(rxq)->rxq_tailp = NULL; \ | | 470 | *(rxq)->rxq_tailp = NULL; \ |
471 | (rxq)->rxq_len = 0; \ | | 471 | (rxq)->rxq_len = 0; \ |
472 | } while (/*CONSTCOND*/0) | | 472 | } while (/*CONSTCOND*/0) |
473 | | | 473 | |
474 | #define WM_RXCHAIN_LINK(rxq, m) \ | | 474 | #define WM_RXCHAIN_LINK(rxq, m) \ |
475 | do { \ | | 475 | do { \ |
476 | *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ | | 476 | *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ |
477 | (rxq)->rxq_tailp = &(m)->m_next; \ | | 477 | (rxq)->rxq_tailp = &(m)->m_next; \ |
478 | } while (/*CONSTCOND*/0) | | 478 | } while (/*CONSTCOND*/0) |
479 | | | 479 | |
480 | #ifdef WM_EVENT_COUNTERS | | 480 | #ifdef WM_EVENT_COUNTERS |
481 | #define WM_EVCNT_INCR(ev) (ev)->ev_count++ | | 481 | #define WM_EVCNT_INCR(ev) (ev)->ev_count++ |
482 | #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) | | 482 | #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) |
483 | #else | | 483 | #else |
484 | #define WM_EVCNT_INCR(ev) /* nothing */ | | 484 | #define WM_EVCNT_INCR(ev) /* nothing */ |
485 | #define WM_EVCNT_ADD(ev, val) /* nothing */ | | 485 | #define WM_EVCNT_ADD(ev, val) /* nothing */ |
486 | #endif | | 486 | #endif |
487 | | | 487 | |
488 | #define CSR_READ(sc, reg) \ | | 488 | #define CSR_READ(sc, reg) \ |
489 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) | | 489 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) |
490 | #define CSR_WRITE(sc, reg, val) \ | | 490 | #define CSR_WRITE(sc, reg, val) \ |
491 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) | | 491 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) |
492 | #define CSR_WRITE_FLUSH(sc) \ | | 492 | #define CSR_WRITE_FLUSH(sc) \ |
493 | (void) CSR_READ((sc), WMREG_STATUS) | | 493 | (void) CSR_READ((sc), WMREG_STATUS) |
494 | | | 494 | |
495 | #define ICH8_FLASH_READ32(sc, reg) \ | | 495 | #define ICH8_FLASH_READ32(sc, reg) \ |
496 | bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) | | 496 | bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) |
497 | #define ICH8_FLASH_WRITE32(sc, reg, data) \ | | 497 | #define ICH8_FLASH_WRITE32(sc, reg, data) \ |
498 | bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) | | 498 | bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) |
499 | | | 499 | |
500 | #define ICH8_FLASH_READ16(sc, reg) \ | | 500 | #define ICH8_FLASH_READ16(sc, reg) \ |
501 | bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) | | 501 | bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) |
502 | #define ICH8_FLASH_WRITE16(sc, reg, data) \ | | 502 | #define ICH8_FLASH_WRITE16(sc, reg, data) \ |
503 | bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) | | 503 | bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) |
504 | | | 504 | |
505 | #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((x))) | | 505 | #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((x))) |
506 | #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x))) | | 506 | #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x))) |
507 | | | 507 | |
508 | #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) | | 508 | #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) |
509 | #define WM_CDTXADDR_HI(txq, x) \ | | 509 | #define WM_CDTXADDR_HI(txq, x) \ |
510 | (sizeof(bus_addr_t) == 8 ? \ | | 510 | (sizeof(bus_addr_t) == 8 ? \ |
511 | (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) | | 511 | (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) |
512 | | | 512 | |
513 | #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) | | 513 | #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) |
514 | #define WM_CDRXADDR_HI(rxq, x) \ | | 514 | #define WM_CDRXADDR_HI(rxq, x) \ |
515 | (sizeof(bus_addr_t) == 8 ? \ | | 515 | (sizeof(bus_addr_t) == 8 ? \ |
516 | (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) | | 516 | (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) |
517 | | | 517 | |
518 | /* | | 518 | /* |
519 | * Register read/write functions. | | 519 | * Register read/write functions. |
520 | * Other than CSR_{READ|WRITE}(). | | 520 | * Other than CSR_{READ|WRITE}(). |
521 | */ | | 521 | */ |
522 | #if 0 | | 522 | #if 0 |
523 | static inline uint32_t wm_io_read(struct wm_softc *, int); | | 523 | static inline uint32_t wm_io_read(struct wm_softc *, int); |
524 | #endif | | 524 | #endif |
525 | static inline void wm_io_write(struct wm_softc *, int, uint32_t); | | 525 | static inline void wm_io_write(struct wm_softc *, int, uint32_t); |
526 | static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, | | 526 | static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, |
527 | uint32_t, uint32_t); | | 527 | uint32_t, uint32_t); |
528 | static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); | | 528 | static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); |
529 | | | 529 | |
530 | /* | | 530 | /* |
531 | * Descriptor sync/init functions. | | 531 | * Descriptor sync/init functions. |
532 | */ | | 532 | */ |
533 | static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); | | 533 | static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); |
534 | static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); | | 534 | static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); |
535 | static inline void wm_init_rxdesc(struct wm_rxqueue *, int); | | 535 | static inline void wm_init_rxdesc(struct wm_rxqueue *, int); |
536 | | | 536 | |
537 | /* | | 537 | /* |
538 | * Device driver interface functions and commonly used functions. | | 538 | * Device driver interface functions and commonly used functions. |
539 | * match, attach, detach, init, start, stop, ioctl, watchdog and so on. | | 539 | * match, attach, detach, init, start, stop, ioctl, watchdog and so on. |
540 | */ | | 540 | */ |
541 | static const struct wm_product *wm_lookup(const struct pci_attach_args *); | | 541 | static const struct wm_product *wm_lookup(const struct pci_attach_args *); |
542 | static int wm_match(device_t, cfdata_t, void *); | | 542 | static int wm_match(device_t, cfdata_t, void *); |
543 | static void wm_attach(device_t, device_t, void *); | | 543 | static void wm_attach(device_t, device_t, void *); |
544 | static int wm_detach(device_t, int); | | 544 | static int wm_detach(device_t, int); |
545 | static bool wm_suspend(device_t, const pmf_qual_t *); | | 545 | static bool wm_suspend(device_t, const pmf_qual_t *); |
546 | static bool wm_resume(device_t, const pmf_qual_t *); | | 546 | static bool wm_resume(device_t, const pmf_qual_t *); |
547 | static void wm_watchdog(struct ifnet *); | | 547 | static void wm_watchdog(struct ifnet *); |
548 | static void wm_tick(void *); | | 548 | static void wm_tick(void *); |
549 | static int wm_ifflags_cb(struct ethercom *); | | 549 | static int wm_ifflags_cb(struct ethercom *); |
550 | static int wm_ioctl(struct ifnet *, u_long, void *); | | 550 | static int wm_ioctl(struct ifnet *, u_long, void *); |
551 | /* MAC address related */ | | 551 | /* MAC address related */ |
552 | static uint16_t wm_check_alt_mac_addr(struct wm_softc *); | | 552 | static uint16_t wm_check_alt_mac_addr(struct wm_softc *); |
553 | static int wm_read_mac_addr(struct wm_softc *, uint8_t *); | | 553 | static int wm_read_mac_addr(struct wm_softc *, uint8_t *); |
554 | static void wm_set_ral(struct wm_softc *, const uint8_t *, int); | | 554 | static void wm_set_ral(struct wm_softc *, const uint8_t *, int); |
555 | static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); | | 555 | static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); |
556 | static void wm_set_filter(struct wm_softc *); | | 556 | static void wm_set_filter(struct wm_softc *); |
557 | /* Reset and init related */ | | 557 | /* Reset and init related */ |
558 | static void wm_set_vlan(struct wm_softc *); | | 558 | static void wm_set_vlan(struct wm_softc *); |
559 | static void wm_set_pcie_completion_timeout(struct wm_softc *); | | 559 | static void wm_set_pcie_completion_timeout(struct wm_softc *); |
560 | static void wm_get_auto_rd_done(struct wm_softc *); | | 560 | static void wm_get_auto_rd_done(struct wm_softc *); |
561 | static void wm_lan_init_done(struct wm_softc *); | | 561 | static void wm_lan_init_done(struct wm_softc *); |
562 | static void wm_get_cfg_done(struct wm_softc *); | | 562 | static void wm_get_cfg_done(struct wm_softc *); |
563 | static void wm_initialize_hardware_bits(struct wm_softc *); | | 563 | static void wm_initialize_hardware_bits(struct wm_softc *); |
564 | static uint32_t wm_rxpbs_adjust_82580(uint32_t); | | 564 | static uint32_t wm_rxpbs_adjust_82580(uint32_t); |
565 | static void wm_reset(struct wm_softc *); | | 565 | static void wm_reset(struct wm_softc *); |
566 | static int wm_add_rxbuf(struct wm_rxqueue *, int); | | 566 | static int wm_add_rxbuf(struct wm_rxqueue *, int); |
567 | static void wm_rxdrain(struct wm_rxqueue *); | | 567 | static void wm_rxdrain(struct wm_rxqueue *); |
568 | static void wm_init_rss(struct wm_softc *); | | 568 | static void wm_init_rss(struct wm_softc *); |
569 | static int wm_init(struct ifnet *); | | 569 | static int wm_init(struct ifnet *); |
570 | static int wm_init_locked(struct ifnet *); | | 570 | static int wm_init_locked(struct ifnet *); |
571 | static void wm_stop(struct ifnet *, int); | | 571 | static void wm_stop(struct ifnet *, int); |
572 | static void wm_stop_locked(struct ifnet *, int); | | 572 | static void wm_stop_locked(struct ifnet *, int); |
573 | static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, | | 573 | static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, |
574 | uint32_t *, uint8_t *); | | 574 | uint32_t *, uint8_t *); |
575 | static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); | | 575 | static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); |
576 | static void wm_82547_txfifo_stall(void *); | | 576 | static void wm_82547_txfifo_stall(void *); |
577 | static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); | | 577 | static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); |
578 | /* DMA related */ | | 578 | /* DMA related */ |
579 | static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); | | 579 | static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); |
580 | static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); | | 580 | static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); |
581 | static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); | | 581 | static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); |
582 | static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *); | | 582 | static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *); |
583 | static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); | | 583 | static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); |
584 | static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); | | 584 | static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); |
585 | static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *); | | 585 | static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *); |
586 | static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); | | 586 | static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); |
587 | static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); | | 587 | static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); |
588 | static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); | | 588 | static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); |
589 | static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); | | 589 | static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); |
590 | static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); | | 590 | static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); |
591 | static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); | | 591 | static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); |
592 | static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *); | | 592 | static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *); |
593 | static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *); | | 593 | static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *); |
594 | static int wm_alloc_txrx_queues(struct wm_softc *); | | 594 | static int wm_alloc_txrx_queues(struct wm_softc *); |
595 | static void wm_free_txrx_queues(struct wm_softc *); | | 595 | static void wm_free_txrx_queues(struct wm_softc *); |
596 | static int wm_init_txrx_queues(struct wm_softc *); | | 596 | static int wm_init_txrx_queues(struct wm_softc *); |
597 | /* Start */ | | 597 | /* Start */ |
598 | static void wm_start(struct ifnet *); | | 598 | static void wm_start(struct ifnet *); |
599 | static void wm_start_locked(struct ifnet *); | | 599 | static void wm_start_locked(struct ifnet *); |
600 | static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *, | | 600 | static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *, |
601 | uint32_t *, uint32_t *, bool *); | | 601 | uint32_t *, uint32_t *, bool *); |
602 | static void wm_nq_start(struct ifnet *); | | 602 | static void wm_nq_start(struct ifnet *); |
603 | static void wm_nq_start_locked(struct ifnet *); | | 603 | static void wm_nq_start_locked(struct ifnet *); |
604 | /* Interrupt */ | | 604 | /* Interrupt */ |
605 | static int wm_txeof(struct wm_softc *); | | 605 | static int wm_txeof(struct wm_softc *); |
606 | static void wm_rxeof(struct wm_rxqueue *); | | 606 | static void wm_rxeof(struct wm_rxqueue *); |
607 | static void wm_linkintr_gmii(struct wm_softc *, uint32_t); | | 607 | static void wm_linkintr_gmii(struct wm_softc *, uint32_t); |
608 | static void wm_linkintr_tbi(struct wm_softc *, uint32_t); | | 608 | static void wm_linkintr_tbi(struct wm_softc *, uint32_t); |
609 | static void wm_linkintr_serdes(struct wm_softc *, uint32_t); | | 609 | static void wm_linkintr_serdes(struct wm_softc *, uint32_t); |
610 | static void wm_linkintr(struct wm_softc *, uint32_t); | | 610 | static void wm_linkintr(struct wm_softc *, uint32_t); |
611 | static int wm_intr_legacy(void *); | | 611 | static int wm_intr_legacy(void *); |
612 | #ifdef WM_MSI_MSIX | | 612 | #ifdef WM_MSI_MSIX |
613 | static void wm_adjust_qnum(struct wm_softc *, int); | | 613 | static void wm_adjust_qnum(struct wm_softc *, int); |
614 | static int wm_setup_legacy(struct wm_softc *); | | 614 | static int wm_setup_legacy(struct wm_softc *); |
615 | static int wm_setup_msix(struct wm_softc *); | | 615 | static int wm_setup_msix(struct wm_softc *); |
616 | static int wm_txintr_msix(void *); | | 616 | static int wm_txintr_msix(void *); |
617 | static int wm_rxintr_msix(void *); | | 617 | static int wm_rxintr_msix(void *); |
618 | static int wm_linkintr_msix(void *); | | 618 | static int wm_linkintr_msix(void *); |
619 | #endif | | 619 | #endif |
620 | | | 620 | |
621 | /* | | 621 | /* |
622 | * Media related. | | 622 | * Media related. |
623 | * GMII, SGMII, TBI, SERDES and SFP. | | 623 | * GMII, SGMII, TBI, SERDES and SFP. |
624 | */ | | 624 | */ |
625 | /* Common */ | | 625 | /* Common */ |
626 | static void wm_tbi_serdes_set_linkled(struct wm_softc *); | | 626 | static void wm_tbi_serdes_set_linkled(struct wm_softc *); |
627 | /* GMII related */ | | 627 | /* GMII related */ |
628 | static void wm_gmii_reset(struct wm_softc *); | | 628 | static void wm_gmii_reset(struct wm_softc *); |
629 | static int wm_get_phy_id_82575(struct wm_softc *); | | 629 | static int wm_get_phy_id_82575(struct wm_softc *); |
630 | static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); | | 630 | static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); |
631 | static int wm_gmii_mediachange(struct ifnet *); | | 631 | static int wm_gmii_mediachange(struct ifnet *); |
632 | static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); | | 632 | static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); |
633 | static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); | | 633 | static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); |
634 | static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); | | 634 | static uint32_t wm_i82543_mii_recvbits(struct wm_softc *); |
635 | static int wm_gmii_i82543_readreg(device_t, int, int); | | 635 | static int wm_gmii_i82543_readreg(device_t, int, int); |
636 | static void wm_gmii_i82543_writereg(device_t, int, int, int); | | 636 | static void wm_gmii_i82543_writereg(device_t, int, int, int); |
637 | static int wm_gmii_i82544_readreg(device_t, int, int); | | 637 | static int wm_gmii_i82544_readreg(device_t, int, int); |
638 | static void wm_gmii_i82544_writereg(device_t, int, int, int); | | 638 | static void wm_gmii_i82544_writereg(device_t, int, int, int); |
639 | static int wm_gmii_i80003_readreg(device_t, int, int); | | 639 | static int wm_gmii_i80003_readreg(device_t, int, int); |
640 | static void wm_gmii_i80003_writereg(device_t, int, int, int); | | 640 | static void wm_gmii_i80003_writereg(device_t, int, int, int); |
641 | static int wm_gmii_bm_readreg(device_t, int, int); | | 641 | static int wm_gmii_bm_readreg(device_t, int, int); |
642 | static void wm_gmii_bm_writereg(device_t, int, int, int); | | 642 | static void wm_gmii_bm_writereg(device_t, int, int, int); |
643 | static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); | | 643 | static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); |
644 | static int wm_gmii_hv_readreg(device_t, int, int); | | 644 | static int wm_gmii_hv_readreg(device_t, int, int); |
645 | static void wm_gmii_hv_writereg(device_t, int, int, int); | | 645 | static void wm_gmii_hv_writereg(device_t, int, int, int); |
646 | static int wm_gmii_82580_readreg(device_t, int, int); | | 646 | static int wm_gmii_82580_readreg(device_t, int, int); |
647 | static void wm_gmii_82580_writereg(device_t, int, int, int); | | 647 | static void wm_gmii_82580_writereg(device_t, int, int, int); |
648 | static int wm_gmii_gs40g_readreg(device_t, int, int); | | 648 | static int wm_gmii_gs40g_readreg(device_t, int, int); |
649 | static void wm_gmii_gs40g_writereg(device_t, int, int, int); | | 649 | static void wm_gmii_gs40g_writereg(device_t, int, int, int); |
650 | static void wm_gmii_statchg(struct ifnet *); | | 650 | static void wm_gmii_statchg(struct ifnet *); |
651 | static int wm_kmrn_readreg(struct wm_softc *, int); | | 651 | static int wm_kmrn_readreg(struct wm_softc *, int); |
652 | static void wm_kmrn_writereg(struct wm_softc *, int, int); | | 652 | static void wm_kmrn_writereg(struct wm_softc *, int, int); |
653 | /* SGMII */ | | 653 | /* SGMII */ |
654 | static bool wm_sgmii_uses_mdio(struct wm_softc *); | | 654 | static bool wm_sgmii_uses_mdio(struct wm_softc *); |
655 | static int wm_sgmii_readreg(device_t, int, int); | | 655 | static int wm_sgmii_readreg(device_t, int, int); |
656 | static void wm_sgmii_writereg(device_t, int, int, int); | | 656 | static void wm_sgmii_writereg(device_t, int, int, int); |
657 | /* TBI related */ | | 657 | /* TBI related */ |
658 | static void wm_tbi_mediainit(struct wm_softc *); | | 658 | static void wm_tbi_mediainit(struct wm_softc *); |
659 | static int wm_tbi_mediachange(struct ifnet *); | | 659 | static int wm_tbi_mediachange(struct ifnet *); |
660 | static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); | | 660 | static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); |
661 | static int wm_check_for_link(struct wm_softc *); | | 661 | static int wm_check_for_link(struct wm_softc *); |
662 | static void wm_tbi_tick(struct wm_softc *); | | 662 | static void wm_tbi_tick(struct wm_softc *); |
663 | /* SERDES related */ | | 663 | /* SERDES related */ |
664 | static void wm_serdes_power_up_link_82575(struct wm_softc *); | | 664 | static void wm_serdes_power_up_link_82575(struct wm_softc *); |
665 | static int wm_serdes_mediachange(struct ifnet *); | | 665 | static int wm_serdes_mediachange(struct ifnet *); |
666 | static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); | | 666 | static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); |
667 | static void wm_serdes_tick(struct wm_softc *); | | 667 | static void wm_serdes_tick(struct wm_softc *); |
668 | /* SFP related */ | | 668 | /* SFP related */ |
669 | static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); | | 669 | static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); |
670 | static uint32_t wm_sfp_get_media_type(struct wm_softc *); | | 670 | static uint32_t wm_sfp_get_media_type(struct wm_softc *); |
671 | | | 671 | |
672 | /* | | 672 | /* |
673 | * NVM related. | | 673 | * NVM related. |
674 | * Microwire, SPI (w/wo EERD) and Flash. | | 674 | * Microwire, SPI (w/wo EERD) and Flash. |
675 | */ | | 675 | */ |
676 | /* Misc functions */ | | 676 | /* Misc functions */ |
677 | static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); | | 677 | static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); |
678 | static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); | | 678 | static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); |
679 | static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); | | 679 | static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); |
680 | /* Microwire */ | | 680 | /* Microwire */ |
681 | static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); | | 681 | static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); |
682 | /* SPI */ | | 682 | /* SPI */ |
683 | static int wm_nvm_ready_spi(struct wm_softc *); | | 683 | static int wm_nvm_ready_spi(struct wm_softc *); |
684 | static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); | | 684 | static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); |
685 | /* Using with EERD */ | | 685 | /* Using with EERD */ |
686 | static int wm_poll_eerd_eewr_done(struct wm_softc *, int); | | 686 | static int wm_poll_eerd_eewr_done(struct wm_softc *, int); |
687 | static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); | | 687 | static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); |
688 | /* Flash */ | | 688 | /* Flash */ |
689 | static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, | | 689 | static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, |
690 | unsigned int *); | | 690 | unsigned int *); |
691 | static int32_t wm_ich8_cycle_init(struct wm_softc *); | | 691 | static int32_t wm_ich8_cycle_init(struct wm_softc *); |
692 | static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); | | 692 | static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); |
693 | static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, | | 693 | static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, |
694 | uint16_t *); | | 694 | uint16_t *); |
695 | static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); | | 695 | static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); |
696 | static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); | | 696 | static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); |
697 | static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); | | 697 | static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); |
698 | /* iNVM */ | | 698 | /* iNVM */ |
699 | static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); | | 699 | static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); |
700 | static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); | | 700 | static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); |
701 | /* Lock, detecting NVM type, validate checksum and read */ | | 701 | /* Lock, detecting NVM type, validate checksum and read */ |
702 | static int wm_nvm_acquire(struct wm_softc *); | | 702 | static int wm_nvm_acquire(struct wm_softc *); |
703 | static void wm_nvm_release(struct wm_softc *); | | 703 | static void wm_nvm_release(struct wm_softc *); |
704 | static int wm_nvm_is_onboard_eeprom(struct wm_softc *); | | 704 | static int wm_nvm_is_onboard_eeprom(struct wm_softc *); |
705 | static int wm_nvm_get_flash_presence_i210(struct wm_softc *); | | 705 | static int wm_nvm_get_flash_presence_i210(struct wm_softc *); |
706 | static int wm_nvm_validate_checksum(struct wm_softc *); | | 706 | static int wm_nvm_validate_checksum(struct wm_softc *); |
707 | static void wm_nvm_version_invm(struct wm_softc *); | | 707 | static void wm_nvm_version_invm(struct wm_softc *); |
708 | static void wm_nvm_version(struct wm_softc *); | | 708 | static void wm_nvm_version(struct wm_softc *); |
709 | static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); | | 709 | static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); |
710 | | | 710 | |
711 | /* | | 711 | /* |
712 | * Hardware semaphores. | | 712 | * Hardware semaphores. |
713 | * Very complexed... | | 713 | * Very complexed... |
714 | */ | | 714 | */ |
715 | static int wm_get_swsm_semaphore(struct wm_softc *); | | 715 | static int wm_get_swsm_semaphore(struct wm_softc *); |
716 | static void wm_put_swsm_semaphore(struct wm_softc *); | | 716 | static void wm_put_swsm_semaphore(struct wm_softc *); |
717 | static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); | | 717 | static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); |
718 | static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); | | 718 | static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); |
719 | static int wm_get_swfwhw_semaphore(struct wm_softc *); | | 719 | static int wm_get_swfwhw_semaphore(struct wm_softc *); |
720 | static void wm_put_swfwhw_semaphore(struct wm_softc *); | | 720 | static void wm_put_swfwhw_semaphore(struct wm_softc *); |
721 | static int wm_get_hw_semaphore_82573(struct wm_softc *); | | 721 | static int wm_get_hw_semaphore_82573(struct wm_softc *); |
722 | static void wm_put_hw_semaphore_82573(struct wm_softc *); | | 722 | static void wm_put_hw_semaphore_82573(struct wm_softc *); |
723 | | | 723 | |
724 | /* | | 724 | /* |
725 | * Management mode and power management related subroutines. | | 725 | * Management mode and power management related subroutines. |
726 | * BMC, AMT, suspend/resume and EEE. | | 726 | * BMC, AMT, suspend/resume and EEE. |
727 | */ | | 727 | */ |
728 | static int wm_check_mng_mode(struct wm_softc *); | | 728 | static int wm_check_mng_mode(struct wm_softc *); |
729 | static int wm_check_mng_mode_ich8lan(struct wm_softc *); | | 729 | static int wm_check_mng_mode_ich8lan(struct wm_softc *); |
730 | static int wm_check_mng_mode_82574(struct wm_softc *); | | 730 | static int wm_check_mng_mode_82574(struct wm_softc *); |
731 | static int wm_check_mng_mode_generic(struct wm_softc *); | | 731 | static int wm_check_mng_mode_generic(struct wm_softc *); |
732 | static int wm_enable_mng_pass_thru(struct wm_softc *); | | 732 | static int wm_enable_mng_pass_thru(struct wm_softc *); |
733 | static int wm_check_reset_block(struct wm_softc *); | | 733 | static int wm_check_reset_block(struct wm_softc *); |
734 | static void wm_get_hw_control(struct wm_softc *); | | 734 | static void wm_get_hw_control(struct wm_softc *); |
735 | static void wm_release_hw_control(struct wm_softc *); | | 735 | static void wm_release_hw_control(struct wm_softc *); |
736 | static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int); | | 736 | static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int); |
737 | static void wm_smbustopci(struct wm_softc *); | | 737 | static void wm_smbustopci(struct wm_softc *); |
738 | static void wm_init_manageability(struct wm_softc *); | | 738 | static void wm_init_manageability(struct wm_softc *); |
739 | static void wm_release_manageability(struct wm_softc *); | | 739 | static void wm_release_manageability(struct wm_softc *); |
740 | static void wm_get_wakeup(struct wm_softc *); | | 740 | static void wm_get_wakeup(struct wm_softc *); |
741 | #ifdef WM_WOL | | 741 | #ifdef WM_WOL |
742 | static void wm_enable_phy_wakeup(struct wm_softc *); | | 742 | static void wm_enable_phy_wakeup(struct wm_softc *); |
743 | static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); | | 743 | static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); |
744 | static void wm_enable_wakeup(struct wm_softc *); | | 744 | static void wm_enable_wakeup(struct wm_softc *); |
745 | #endif | | 745 | #endif |
746 | /* EEE */ | | 746 | /* EEE */ |
747 | static void wm_set_eee_i350(struct wm_softc *); | | 747 | static void wm_set_eee_i350(struct wm_softc *); |
748 | | | 748 | |
749 | /* | | 749 | /* |
750 | * Workarounds (mainly PHY related). | | 750 | * Workarounds (mainly PHY related). |
751 | * Basically, PHY's workarounds are in the PHY drivers. | | 751 | * Basically, PHY's workarounds are in the PHY drivers. |
752 | */ | | 752 | */ |
753 | static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); | | 753 | static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); |
754 | static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); | | 754 | static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); |
755 | static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); | | 755 | static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); |
756 | static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); | | 756 | static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); |
757 | static void wm_k1_gig_workaround_hv(struct wm_softc *, int); | | 757 | static void wm_k1_gig_workaround_hv(struct wm_softc *, int); |
758 | static void wm_set_mdio_slow_mode_hv(struct wm_softc *); | | 758 | static void wm_set_mdio_slow_mode_hv(struct wm_softc *); |
759 | static void wm_configure_k1_ich8lan(struct wm_softc *, int); | | 759 | static void wm_configure_k1_ich8lan(struct wm_softc *, int); |
760 | static void wm_reset_init_script_82575(struct wm_softc *); | | 760 | static void wm_reset_init_script_82575(struct wm_softc *); |
761 | static void wm_reset_mdicnfg_82580(struct wm_softc *); | | 761 | static void wm_reset_mdicnfg_82580(struct wm_softc *); |
762 | static void wm_pll_workaround_i210(struct wm_softc *); | | 762 | static void wm_pll_workaround_i210(struct wm_softc *); |
763 | | | 763 | |
764 | CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), | | 764 | CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), |
765 | wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); | | 765 | wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); |
766 | | | 766 | |
767 | /* | | 767 | /* |
768 | * Devices supported by this driver. | | 768 | * Devices supported by this driver. |
769 | */ | | 769 | */ |
770 | static const struct wm_product { | | 770 | static const struct wm_product { |
771 | pci_vendor_id_t wmp_vendor; | | 771 | pci_vendor_id_t wmp_vendor; |
772 | pci_product_id_t wmp_product; | | 772 | pci_product_id_t wmp_product; |
773 | const char *wmp_name; | | 773 | const char *wmp_name; |
774 | wm_chip_type wmp_type; | | 774 | wm_chip_type wmp_type; |
775 | uint32_t wmp_flags; | | 775 | uint32_t wmp_flags; |
776 | #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN | | 776 | #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN |
777 | #define WMP_F_FIBER WM_MEDIATYPE_FIBER | | 777 | #define WMP_F_FIBER WM_MEDIATYPE_FIBER |
778 | #define WMP_F_COPPER WM_MEDIATYPE_COPPER | | 778 | #define WMP_F_COPPER WM_MEDIATYPE_COPPER |
779 | #define WMP_F_SERDES WM_MEDIATYPE_SERDES | | 779 | #define WMP_F_SERDES WM_MEDIATYPE_SERDES |
780 | #define WMP_MEDIATYPE(x) ((x) & 0x03) | | 780 | #define WMP_MEDIATYPE(x) ((x) & 0x03) |
781 | } wm_products[] = { | | 781 | } wm_products[] = { |
782 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, | | 782 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, |
783 | "Intel i82542 1000BASE-X Ethernet", | | 783 | "Intel i82542 1000BASE-X Ethernet", |
784 | WM_T_82542_2_1, WMP_F_FIBER }, | | 784 | WM_T_82542_2_1, WMP_F_FIBER }, |
785 | | | 785 | |
786 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, | | 786 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, |
787 | "Intel i82543GC 1000BASE-X Ethernet", | | 787 | "Intel i82543GC 1000BASE-X Ethernet", |
788 | WM_T_82543, WMP_F_FIBER }, | | 788 | WM_T_82543, WMP_F_FIBER }, |
789 | | | 789 | |
790 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, | | 790 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, |
791 | "Intel i82543GC 1000BASE-T Ethernet", | | 791 | "Intel i82543GC 1000BASE-T Ethernet", |
792 | WM_T_82543, WMP_F_COPPER }, | | 792 | WM_T_82543, WMP_F_COPPER }, |
793 | | | 793 | |
794 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, | | 794 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, |
795 | "Intel i82544EI 1000BASE-T Ethernet", | | 795 | "Intel i82544EI 1000BASE-T Ethernet", |
796 | WM_T_82544, WMP_F_COPPER }, | | 796 | WM_T_82544, WMP_F_COPPER }, |
797 | | | 797 | |
798 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, | | 798 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, |
799 | "Intel i82544EI 1000BASE-X Ethernet", | | 799 | "Intel i82544EI 1000BASE-X Ethernet", |
800 | WM_T_82544, WMP_F_FIBER }, | | 800 | WM_T_82544, WMP_F_FIBER }, |
801 | | | 801 | |
802 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, | | 802 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, |
803 | "Intel i82544GC 1000BASE-T Ethernet", | | 803 | "Intel i82544GC 1000BASE-T Ethernet", |
804 | WM_T_82544, WMP_F_COPPER }, | | 804 | WM_T_82544, WMP_F_COPPER }, |
805 | | | 805 | |
806 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, | | 806 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, |
807 | "Intel i82544GC (LOM) 1000BASE-T Ethernet", | | 807 | "Intel i82544GC (LOM) 1000BASE-T Ethernet", |
808 | WM_T_82544, WMP_F_COPPER }, | | 808 | WM_T_82544, WMP_F_COPPER }, |
809 | | | 809 | |
810 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, | | 810 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, |
811 | "Intel i82540EM 1000BASE-T Ethernet", | | 811 | "Intel i82540EM 1000BASE-T Ethernet", |
812 | WM_T_82540, WMP_F_COPPER }, | | 812 | WM_T_82540, WMP_F_COPPER }, |
813 | | | 813 | |
814 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, | | 814 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, |
815 | "Intel i82540EM (LOM) 1000BASE-T Ethernet", | | 815 | "Intel i82540EM (LOM) 1000BASE-T Ethernet", |
816 | WM_T_82540, WMP_F_COPPER }, | | 816 | WM_T_82540, WMP_F_COPPER }, |
817 | | | 817 | |
818 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, | | 818 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, |
819 | "Intel i82540EP 1000BASE-T Ethernet", | | 819 | "Intel i82540EP 1000BASE-T Ethernet", |
820 | WM_T_82540, WMP_F_COPPER }, | | 820 | WM_T_82540, WMP_F_COPPER }, |
821 | | | 821 | |
822 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, | | 822 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, |
823 | "Intel i82540EP 1000BASE-T Ethernet", | | 823 | "Intel i82540EP 1000BASE-T Ethernet", |
824 | WM_T_82540, WMP_F_COPPER }, | | 824 | WM_T_82540, WMP_F_COPPER }, |
825 | | | 825 | |
826 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, | | 826 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, |
827 | "Intel i82540EP 1000BASE-T Ethernet", | | 827 | "Intel i82540EP 1000BASE-T Ethernet", |
828 | WM_T_82540, WMP_F_COPPER }, | | 828 | WM_T_82540, WMP_F_COPPER }, |
829 | | | 829 | |
830 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, | | 830 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, |
831 | "Intel i82545EM 1000BASE-T Ethernet", | | 831 | "Intel i82545EM 1000BASE-T Ethernet", |
832 | WM_T_82545, WMP_F_COPPER }, | | 832 | WM_T_82545, WMP_F_COPPER }, |
833 | | | 833 | |
834 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, | | 834 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, |
835 | "Intel i82545GM 1000BASE-T Ethernet", | | 835 | "Intel i82545GM 1000BASE-T Ethernet", |
836 | WM_T_82545_3, WMP_F_COPPER }, | | 836 | WM_T_82545_3, WMP_F_COPPER }, |
837 | | | 837 | |
838 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, | | 838 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, |
839 | "Intel i82545GM 1000BASE-X Ethernet", | | 839 | "Intel i82545GM 1000BASE-X Ethernet", |
840 | WM_T_82545_3, WMP_F_FIBER }, | | 840 | WM_T_82545_3, WMP_F_FIBER }, |
841 | | | 841 | |
842 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, | | 842 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, |
843 | "Intel i82545GM Gigabit Ethernet (SERDES)", | | 843 | "Intel i82545GM Gigabit Ethernet (SERDES)", |
844 | WM_T_82545_3, WMP_F_SERDES }, | | 844 | WM_T_82545_3, WMP_F_SERDES }, |
845 | | | 845 | |
846 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, | | 846 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, |
847 | "Intel i82546EB 1000BASE-T Ethernet", | | 847 | "Intel i82546EB 1000BASE-T Ethernet", |
848 | WM_T_82546, WMP_F_COPPER }, | | 848 | WM_T_82546, WMP_F_COPPER }, |
849 | | | 849 | |
850 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, | | 850 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, |
851 | "Intel i82546EB 1000BASE-T Ethernet", | | 851 | "Intel i82546EB 1000BASE-T Ethernet", |
852 | WM_T_82546, WMP_F_COPPER }, | | 852 | WM_T_82546, WMP_F_COPPER }, |
853 | | | 853 | |
854 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, | | 854 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, |
855 | "Intel i82545EM 1000BASE-X Ethernet", | | 855 | "Intel i82545EM 1000BASE-X Ethernet", |
856 | WM_T_82545, WMP_F_FIBER }, | | 856 | WM_T_82545, WMP_F_FIBER }, |
857 | | | 857 | |
858 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, | | 858 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, |
859 | "Intel i82546EB 1000BASE-X Ethernet", | | 859 | "Intel i82546EB 1000BASE-X Ethernet", |
860 | WM_T_82546, WMP_F_FIBER }, | | 860 | WM_T_82546, WMP_F_FIBER }, |
861 | | | 861 | |
862 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, | | 862 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, |
863 | "Intel i82546GB 1000BASE-T Ethernet", | | 863 | "Intel i82546GB 1000BASE-T Ethernet", |
864 | WM_T_82546_3, WMP_F_COPPER }, | | 864 | WM_T_82546_3, WMP_F_COPPER }, |
865 | | | 865 | |
866 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, | | 866 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, |
867 | "Intel i82546GB 1000BASE-X Ethernet", | | 867 | "Intel i82546GB 1000BASE-X Ethernet", |
868 | WM_T_82546_3, WMP_F_FIBER }, | | 868 | WM_T_82546_3, WMP_F_FIBER }, |
869 | | | 869 | |
870 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, | | 870 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, |
871 | "Intel i82546GB Gigabit Ethernet (SERDES)", | | 871 | "Intel i82546GB Gigabit Ethernet (SERDES)", |
872 | WM_T_82546_3, WMP_F_SERDES }, | | 872 | WM_T_82546_3, WMP_F_SERDES }, |
873 | | | 873 | |
874 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, | | 874 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, |
875 | "i82546GB quad-port Gigabit Ethernet", | | 875 | "i82546GB quad-port Gigabit Ethernet", |
876 | WM_T_82546_3, WMP_F_COPPER }, | | 876 | WM_T_82546_3, WMP_F_COPPER }, |
877 | | | 877 | |
878 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, | | 878 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, |
879 | "i82546GB quad-port Gigabit Ethernet (KSP3)", | | 879 | "i82546GB quad-port Gigabit Ethernet (KSP3)", |
880 | WM_T_82546_3, WMP_F_COPPER }, | | 880 | WM_T_82546_3, WMP_F_COPPER }, |
881 | | | 881 | |
882 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, | | 882 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, |
883 | "Intel PRO/1000MT (82546GB)", | | 883 | "Intel PRO/1000MT (82546GB)", |
884 | WM_T_82546_3, WMP_F_COPPER }, | | 884 | WM_T_82546_3, WMP_F_COPPER }, |
885 | | | 885 | |
886 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, | | 886 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, |
887 | "Intel i82541EI 1000BASE-T Ethernet", | | 887 | "Intel i82541EI 1000BASE-T Ethernet", |
888 | WM_T_82541, WMP_F_COPPER }, | | 888 | WM_T_82541, WMP_F_COPPER }, |
889 | | | 889 | |
890 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, | | 890 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, |
891 | "Intel i82541ER (LOM) 1000BASE-T Ethernet", | | 891 | "Intel i82541ER (LOM) 1000BASE-T Ethernet", |
892 | WM_T_82541, WMP_F_COPPER }, | | 892 | WM_T_82541, WMP_F_COPPER }, |
893 | | | 893 | |
894 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, | | 894 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, |
895 | "Intel i82541EI Mobile 1000BASE-T Ethernet", | | 895 | "Intel i82541EI Mobile 1000BASE-T Ethernet", |
896 | WM_T_82541, WMP_F_COPPER }, | | 896 | WM_T_82541, WMP_F_COPPER }, |
897 | | | 897 | |
898 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, | | 898 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, |
899 | "Intel i82541ER 1000BASE-T Ethernet", | | 899 | "Intel i82541ER 1000BASE-T Ethernet", |
900 | WM_T_82541_2, WMP_F_COPPER }, | | 900 | WM_T_82541_2, WMP_F_COPPER }, |
901 | | | 901 | |
902 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, | | 902 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, |
903 | "Intel i82541GI 1000BASE-T Ethernet", | | 903 | "Intel i82541GI 1000BASE-T Ethernet", |
904 | WM_T_82541_2, WMP_F_COPPER }, | | 904 | WM_T_82541_2, WMP_F_COPPER }, |
905 | | | 905 | |
906 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, | | 906 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, |
907 | "Intel i82541GI Mobile 1000BASE-T Ethernet", | | 907 | "Intel i82541GI Mobile 1000BASE-T Ethernet", |
908 | WM_T_82541_2, WMP_F_COPPER }, | | 908 | WM_T_82541_2, WMP_F_COPPER }, |
909 | | | 909 | |
910 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, | | 910 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, |
911 | "Intel i82541PI 1000BASE-T Ethernet", | | 911 | "Intel i82541PI 1000BASE-T Ethernet", |
912 | WM_T_82541_2, WMP_F_COPPER }, | | 912 | WM_T_82541_2, WMP_F_COPPER }, |
913 | | | 913 | |
914 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, | | 914 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, |
915 | "Intel i82547EI 1000BASE-T Ethernet", | | 915 | "Intel i82547EI 1000BASE-T Ethernet", |
916 | WM_T_82547, WMP_F_COPPER }, | | 916 | WM_T_82547, WMP_F_COPPER }, |
917 | | | 917 | |
918 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, | | 918 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, |
919 | "Intel i82547EI Mobile 1000BASE-T Ethernet", | | 919 | "Intel i82547EI Mobile 1000BASE-T Ethernet", |
920 | WM_T_82547, WMP_F_COPPER }, | | 920 | WM_T_82547, WMP_F_COPPER }, |
921 | | | 921 | |
922 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, | | 922 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, |
923 | "Intel i82547GI 1000BASE-T Ethernet", | | 923 | "Intel i82547GI 1000BASE-T Ethernet", |
924 | WM_T_82547_2, WMP_F_COPPER }, | | 924 | WM_T_82547_2, WMP_F_COPPER }, |
925 | | | 925 | |
926 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, | | 926 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, |
927 | "Intel PRO/1000 PT (82571EB)", | | 927 | "Intel PRO/1000 PT (82571EB)", |
928 | WM_T_82571, WMP_F_COPPER }, | | 928 | WM_T_82571, WMP_F_COPPER }, |
929 | | | 929 | |
930 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, | | 930 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, |
931 | "Intel PRO/1000 PF (82571EB)", | | 931 | "Intel PRO/1000 PF (82571EB)", |
932 | WM_T_82571, WMP_F_FIBER }, | | 932 | WM_T_82571, WMP_F_FIBER }, |
933 | | | 933 | |
934 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, | | 934 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, |
935 | "Intel PRO/1000 PB (82571EB)", | | 935 | "Intel PRO/1000 PB (82571EB)", |
936 | WM_T_82571, WMP_F_SERDES }, | | 936 | WM_T_82571, WMP_F_SERDES }, |
937 | | | 937 | |
938 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, | | 938 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, |
939 | "Intel PRO/1000 QT (82571EB)", | | 939 | "Intel PRO/1000 QT (82571EB)", |
940 | WM_T_82571, WMP_F_COPPER }, | | 940 | WM_T_82571, WMP_F_COPPER }, |
941 | | | 941 | |
942 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, | | 942 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, |
943 | "Intel PRO/1000 PT Quad Port Server Adapter", | | 943 | "Intel PRO/1000 PT Quad Port Server Adapter", |
944 | WM_T_82571, WMP_F_COPPER, }, | | 944 | WM_T_82571, WMP_F_COPPER, }, |
945 | | | 945 | |
946 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER, | | 946 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER, |
947 | "Intel Gigabit PT Quad Port Server ExpressModule", | | 947 | "Intel Gigabit PT Quad Port Server ExpressModule", |
948 | WM_T_82571, WMP_F_COPPER, }, | | 948 | WM_T_82571, WMP_F_COPPER, }, |
949 | | | 949 | |
950 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES, | | 950 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES, |
951 | "Intel 82571EB Dual Gigabit Ethernet (SERDES)", | | 951 | "Intel 82571EB Dual Gigabit Ethernet (SERDES)", |
952 | WM_T_82571, WMP_F_SERDES, }, | | 952 | WM_T_82571, WMP_F_SERDES, }, |
953 | | | 953 | |
954 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES, | | 954 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES, |
955 | "Intel 82571EB Quad Gigabit Ethernet (SERDES)", | | 955 | "Intel 82571EB Quad Gigabit Ethernet (SERDES)", |
956 | WM_T_82571, WMP_F_SERDES, }, | | 956 | WM_T_82571, WMP_F_SERDES, }, |
957 | | | 957 | |
958 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER, | | 958 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER, |
959 | "Intel 82571EB Quad 1000baseX Ethernet", | | 959 | "Intel 82571EB Quad 1000baseX Ethernet", |
960 | WM_T_82571, WMP_F_FIBER, }, | | 960 | WM_T_82571, WMP_F_FIBER, }, |
961 | | | 961 | |
962 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, | | 962 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, |
963 | "Intel i82572EI 1000baseT Ethernet", | | 963 | "Intel i82572EI 1000baseT Ethernet", |
964 | WM_T_82572, WMP_F_COPPER }, | | 964 | WM_T_82572, WMP_F_COPPER }, |
965 | | | 965 | |
966 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, | | 966 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, |
967 | "Intel i82572EI 1000baseX Ethernet", | | 967 | "Intel i82572EI 1000baseX Ethernet", |
968 | WM_T_82572, WMP_F_FIBER }, | | 968 | WM_T_82572, WMP_F_FIBER }, |
969 | | | 969 | |
970 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, | | 970 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, |
971 | "Intel i82572EI Gigabit Ethernet (SERDES)", | | 971 | "Intel i82572EI Gigabit Ethernet (SERDES)", |
972 | WM_T_82572, WMP_F_SERDES }, | | 972 | WM_T_82572, WMP_F_SERDES }, |
973 | | | 973 | |
974 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, | | 974 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, |
975 | "Intel i82572EI 1000baseT Ethernet", | | 975 | "Intel i82572EI 1000baseT Ethernet", |
976 | WM_T_82572, WMP_F_COPPER }, | | 976 | WM_T_82572, WMP_F_COPPER }, |
977 | | | 977 | |
978 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, | | 978 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, |
979 | "Intel i82573E", | | 979 | "Intel i82573E", |
980 | WM_T_82573, WMP_F_COPPER }, | | 980 | WM_T_82573, WMP_F_COPPER }, |
981 | | | 981 | |
982 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, | | 982 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, |
983 | "Intel i82573E IAMT", | | 983 | "Intel i82573E IAMT", |
984 | WM_T_82573, WMP_F_COPPER }, | | 984 | WM_T_82573, WMP_F_COPPER }, |
985 | | | 985 | |
986 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, | | 986 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, |
987 | "Intel i82573L Gigabit Ethernet", | | 987 | "Intel i82573L Gigabit Ethernet", |
988 | WM_T_82573, WMP_F_COPPER }, | | 988 | WM_T_82573, WMP_F_COPPER }, |
989 | | | 989 | |
990 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, | | 990 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, |
991 | "Intel i82574L", | | 991 | "Intel i82574L", |
992 | WM_T_82574, WMP_F_COPPER }, | | 992 | WM_T_82574, WMP_F_COPPER }, |
993 | | | 993 | |
994 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA, | | 994 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA, |
995 | "Intel i82574L", | | 995 | "Intel i82574L", |
996 | WM_T_82574, WMP_F_COPPER }, | | 996 | WM_T_82574, WMP_F_COPPER }, |
997 | | | 997 | |
998 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, | | 998 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, |
999 | "Intel i82583V", | | 999 | "Intel i82583V", |
1000 | WM_T_82583, WMP_F_COPPER }, | | 1000 | WM_T_82583, WMP_F_COPPER }, |
1001 | | | 1001 | |
1002 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, | | 1002 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, |
1003 | "i80003 dual 1000baseT Ethernet", | | 1003 | "i80003 dual 1000baseT Ethernet", |
1004 | WM_T_80003, WMP_F_COPPER }, | | 1004 | WM_T_80003, WMP_F_COPPER }, |
1005 | | | 1005 | |
1006 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, | | 1006 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, |
1007 | "i80003 dual 1000baseX Ethernet", | | 1007 | "i80003 dual 1000baseX Ethernet", |
1008 | WM_T_80003, WMP_F_COPPER }, | | 1008 | WM_T_80003, WMP_F_COPPER }, |
1009 | | | 1009 | |
1010 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, | | 1010 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, |
1011 | "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", | | 1011 | "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", |
1012 | WM_T_80003, WMP_F_SERDES }, | | 1012 | WM_T_80003, WMP_F_SERDES }, |
1013 | | | 1013 | |
1014 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, | | 1014 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, |
1015 | "Intel i80003 1000baseT Ethernet", | | 1015 | "Intel i80003 1000baseT Ethernet", |
1016 | WM_T_80003, WMP_F_COPPER }, | | 1016 | WM_T_80003, WMP_F_COPPER }, |
1017 | | | 1017 | |
1018 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, | | 1018 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, |
1019 | "Intel i80003 Gigabit Ethernet (SERDES)", | | 1019 | "Intel i80003 Gigabit Ethernet (SERDES)", |
1020 | WM_T_80003, WMP_F_SERDES }, | | 1020 | WM_T_80003, WMP_F_SERDES }, |
1021 | | | 1021 | |
1022 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, | | 1022 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, |
1023 | "Intel i82801H (M_AMT) LAN Controller", | | 1023 | "Intel i82801H (M_AMT) LAN Controller", |
1024 | WM_T_ICH8, WMP_F_COPPER }, | | 1024 | WM_T_ICH8, WMP_F_COPPER }, |
1025 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, | | 1025 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, |
1026 | "Intel i82801H (AMT) LAN Controller", | | 1026 | "Intel i82801H (AMT) LAN Controller", |
1027 | WM_T_ICH8, WMP_F_COPPER }, | | 1027 | WM_T_ICH8, WMP_F_COPPER }, |
1028 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, | | 1028 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, |
1029 | "Intel i82801H LAN Controller", | | 1029 | "Intel i82801H LAN Controller", |
1030 | WM_T_ICH8, WMP_F_COPPER }, | | 1030 | WM_T_ICH8, WMP_F_COPPER }, |
1031 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, | | 1031 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, |
1032 | "Intel i82801H (IFE) LAN Controller", | | 1032 | "Intel i82801H (IFE) LAN Controller", |
1033 | WM_T_ICH8, WMP_F_COPPER }, | | 1033 | WM_T_ICH8, WMP_F_COPPER }, |
1034 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, | | 1034 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, |
1035 | "Intel i82801H (M) LAN Controller", | | 1035 | "Intel i82801H (M) LAN Controller", |
1036 | WM_T_ICH8, WMP_F_COPPER }, | | 1036 | WM_T_ICH8, WMP_F_COPPER }, |
1037 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, | | 1037 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, |
1038 | "Intel i82801H IFE (GT) LAN Controller", | | 1038 | "Intel i82801H IFE (GT) LAN Controller", |
1039 | WM_T_ICH8, WMP_F_COPPER }, | | 1039 | WM_T_ICH8, WMP_F_COPPER }, |
1040 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, | | 1040 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, |
1041 | "Intel i82801H IFE (G) LAN Controller", | | 1041 | "Intel i82801H IFE (G) LAN Controller", |
1042 | WM_T_ICH8, WMP_F_COPPER }, | | 1042 | WM_T_ICH8, WMP_F_COPPER }, |
1043 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, | | 1043 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, |
1044 | "82801I (AMT) LAN Controller", | | 1044 | "82801I (AMT) LAN Controller", |
1045 | WM_T_ICH9, WMP_F_COPPER }, | | 1045 | WM_T_ICH9, WMP_F_COPPER }, |
1046 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, | | 1046 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, |
1047 | "82801I LAN Controller", | | 1047 | "82801I LAN Controller", |
1048 | WM_T_ICH9, WMP_F_COPPER }, | | 1048 | WM_T_ICH9, WMP_F_COPPER }, |
1049 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, | | 1049 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, |
1050 | "82801I (G) LAN Controller", | | 1050 | "82801I (G) LAN Controller", |
1051 | WM_T_ICH9, WMP_F_COPPER }, | | 1051 | WM_T_ICH9, WMP_F_COPPER }, |
1052 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, | | 1052 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, |
1053 | "82801I (GT) LAN Controller", | | 1053 | "82801I (GT) LAN Controller", |
1054 | WM_T_ICH9, WMP_F_COPPER }, | | 1054 | WM_T_ICH9, WMP_F_COPPER }, |
1055 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, | | 1055 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, |
1056 | "82801I (C) LAN Controller", | | 1056 | "82801I (C) LAN Controller", |
1057 | WM_T_ICH9, WMP_F_COPPER }, | | 1057 | WM_T_ICH9, WMP_F_COPPER }, |
1058 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, | | 1058 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, |
1059 | "82801I mobile LAN Controller", | | 1059 | "82801I mobile LAN Controller", |
1060 | WM_T_ICH9, WMP_F_COPPER }, | | 1060 | WM_T_ICH9, WMP_F_COPPER }, |
1061 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, | | 1061 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, |
1062 | "82801I mobile (V) LAN Controller", | | 1062 | "82801I mobile (V) LAN Controller", |
1063 | WM_T_ICH9, WMP_F_COPPER }, | | 1063 | WM_T_ICH9, WMP_F_COPPER }, |
1064 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, | | 1064 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, |
1065 | "82801I mobile (AMT) LAN Controller", | | 1065 | "82801I mobile (AMT) LAN Controller", |
1066 | WM_T_ICH9, WMP_F_COPPER }, | | 1066 | WM_T_ICH9, WMP_F_COPPER }, |
1067 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, | | 1067 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, |
1068 | "82567LM-4 LAN Controller", | | 1068 | "82567LM-4 LAN Controller", |
1069 | WM_T_ICH9, WMP_F_COPPER }, | | 1069 | WM_T_ICH9, WMP_F_COPPER }, |
1070 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3, | | 1070 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3, |
1071 | "82567V-3 LAN Controller", | | 1071 | "82567V-3 LAN Controller", |
1072 | WM_T_ICH9, WMP_F_COPPER }, | | 1072 | WM_T_ICH9, WMP_F_COPPER }, |
1073 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, | | 1073 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, |
1074 | "82567LM-2 LAN Controller", | | 1074 | "82567LM-2 LAN Controller", |
1075 | WM_T_ICH10, WMP_F_COPPER }, | | 1075 | WM_T_ICH10, WMP_F_COPPER }, |
1076 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, | | 1076 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, |
1077 | "82567LF-2 LAN Controller", | | 1077 | "82567LF-2 LAN Controller", |
1078 | WM_T_ICH10, WMP_F_COPPER }, | | 1078 | WM_T_ICH10, WMP_F_COPPER }, |
1079 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, | | 1079 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, |
1080 | "82567LM-3 LAN Controller", | | 1080 | "82567LM-3 LAN Controller", |
1081 | WM_T_ICH10, WMP_F_COPPER }, | | 1081 | WM_T_ICH10, WMP_F_COPPER }, |
1082 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, | | 1082 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, |
1083 | "82567LF-3 LAN Controller", | | 1083 | "82567LF-3 LAN Controller", |
1084 | WM_T_ICH10, WMP_F_COPPER }, | | 1084 | WM_T_ICH10, WMP_F_COPPER }, |
1085 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, | | 1085 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, |
| @@ -3050,2153 +3050,2154 @@ wm_mchash(struct wm_softc *sc, const uin | | | @@ -3050,2153 +3050,2154 @@ wm_mchash(struct wm_softc *sc, const uin |
3050 | hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | | | 3050 | hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | |
3051 | (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); | | 3051 | (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); |
3052 | return (hash & 0x3ff); | | 3052 | return (hash & 0x3ff); |
3053 | } | | 3053 | } |
3054 | hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | | | 3054 | hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | |
3055 | (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); | | 3055 | (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); |
3056 | | | 3056 | |
3057 | return (hash & 0xfff); | | 3057 | return (hash & 0xfff); |
3058 | } | | 3058 | } |
3059 | | | 3059 | |
3060 | /* | | 3060 | /* |
3061 | * wm_set_filter: | | 3061 | * wm_set_filter: |
3062 | * | | 3062 | * |
3063 | * Set up the receive filter. | | 3063 | * Set up the receive filter. |
3064 | */ | | 3064 | */ |
3065 | static void | | 3065 | static void |
3066 | wm_set_filter(struct wm_softc *sc) | | 3066 | wm_set_filter(struct wm_softc *sc) |
3067 | { | | 3067 | { |
3068 | struct ethercom *ec = &sc->sc_ethercom; | | 3068 | struct ethercom *ec = &sc->sc_ethercom; |
3069 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 3069 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
3070 | struct ether_multi *enm; | | 3070 | struct ether_multi *enm; |
3071 | struct ether_multistep step; | | 3071 | struct ether_multistep step; |
3072 | bus_addr_t mta_reg; | | 3072 | bus_addr_t mta_reg; |
3073 | uint32_t hash, reg, bit; | | 3073 | uint32_t hash, reg, bit; |
3074 | int i, size; | | 3074 | int i, size; |
3075 | | | 3075 | |
3076 | if (sc->sc_type >= WM_T_82544) | | 3076 | if (sc->sc_type >= WM_T_82544) |
3077 | mta_reg = WMREG_CORDOVA_MTA; | | 3077 | mta_reg = WMREG_CORDOVA_MTA; |
3078 | else | | 3078 | else |
3079 | mta_reg = WMREG_MTA; | | 3079 | mta_reg = WMREG_MTA; |
3080 | | | 3080 | |
3081 | sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); | | 3081 | sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); |
3082 | | | 3082 | |
3083 | if (ifp->if_flags & IFF_BROADCAST) | | 3083 | if (ifp->if_flags & IFF_BROADCAST) |
3084 | sc->sc_rctl |= RCTL_BAM; | | 3084 | sc->sc_rctl |= RCTL_BAM; |
3085 | if (ifp->if_flags & IFF_PROMISC) { | | 3085 | if (ifp->if_flags & IFF_PROMISC) { |
3086 | sc->sc_rctl |= RCTL_UPE; | | 3086 | sc->sc_rctl |= RCTL_UPE; |
3087 | goto allmulti; | | 3087 | goto allmulti; |
3088 | } | | 3088 | } |
3089 | | | 3089 | |
3090 | /* | | 3090 | /* |
3091 | * Set the station address in the first RAL slot, and | | 3091 | * Set the station address in the first RAL slot, and |
3092 | * clear the remaining slots. | | 3092 | * clear the remaining slots. |
3093 | */ | | 3093 | */ |
3094 | if (sc->sc_type == WM_T_ICH8) | | 3094 | if (sc->sc_type == WM_T_ICH8) |
3095 | size = WM_RAL_TABSIZE_ICH8 -1; | | 3095 | size = WM_RAL_TABSIZE_ICH8 -1; |
3096 | else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) | | 3096 | else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) |
3097 | || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) | | 3097 | || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) |
3098 | || (sc->sc_type == WM_T_PCH_LPT)) | | 3098 | || (sc->sc_type == WM_T_PCH_LPT)) |
3099 | size = WM_RAL_TABSIZE_ICH8; | | 3099 | size = WM_RAL_TABSIZE_ICH8; |
3100 | else if (sc->sc_type == WM_T_82575) | | 3100 | else if (sc->sc_type == WM_T_82575) |
3101 | size = WM_RAL_TABSIZE_82575; | | 3101 | size = WM_RAL_TABSIZE_82575; |
3102 | else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580)) | | 3102 | else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580)) |
3103 | size = WM_RAL_TABSIZE_82576; | | 3103 | size = WM_RAL_TABSIZE_82576; |
3104 | else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) | | 3104 | else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) |
3105 | size = WM_RAL_TABSIZE_I350; | | 3105 | size = WM_RAL_TABSIZE_I350; |
3106 | else | | 3106 | else |
3107 | size = WM_RAL_TABSIZE; | | 3107 | size = WM_RAL_TABSIZE; |
3108 | wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); | | 3108 | wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); |
3109 | for (i = 1; i < size; i++) | | 3109 | for (i = 1; i < size; i++) |
3110 | wm_set_ral(sc, NULL, i); | | 3110 | wm_set_ral(sc, NULL, i); |
3111 | | | 3111 | |
3112 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) | | 3112 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
3113 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) | | 3113 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
3114 | || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) | | 3114 | || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) |
3115 | size = WM_ICH8_MC_TABSIZE; | | 3115 | size = WM_ICH8_MC_TABSIZE; |
3116 | else | | 3116 | else |
3117 | size = WM_MC_TABSIZE; | | 3117 | size = WM_MC_TABSIZE; |
3118 | /* Clear out the multicast table. */ | | 3118 | /* Clear out the multicast table. */ |
3119 | for (i = 0; i < size; i++) | | 3119 | for (i = 0; i < size; i++) |
3120 | CSR_WRITE(sc, mta_reg + (i << 2), 0); | | 3120 | CSR_WRITE(sc, mta_reg + (i << 2), 0); |
3121 | | | 3121 | |
3122 | ETHER_FIRST_MULTI(step, ec, enm); | | 3122 | ETHER_FIRST_MULTI(step, ec, enm); |
3123 | while (enm != NULL) { | | 3123 | while (enm != NULL) { |
3124 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { | | 3124 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
3125 | /* | | 3125 | /* |
3126 | * We must listen to a range of multicast addresses. | | 3126 | * We must listen to a range of multicast addresses. |
3127 | * For now, just accept all multicasts, rather than | | 3127 | * For now, just accept all multicasts, rather than |
3128 | * trying to set only those filter bits needed to match | | 3128 | * trying to set only those filter bits needed to match |
3129 | * the range. (At this time, the only use of address | | 3129 | * the range. (At this time, the only use of address |
3130 | * ranges is for IP multicast routing, for which the | | 3130 | * ranges is for IP multicast routing, for which the |
3131 | * range is big enough to require all bits set.) | | 3131 | * range is big enough to require all bits set.) |
3132 | */ | | 3132 | */ |
3133 | goto allmulti; | | 3133 | goto allmulti; |
3134 | } | | 3134 | } |
3135 | | | 3135 | |
3136 | hash = wm_mchash(sc, enm->enm_addrlo); | | 3136 | hash = wm_mchash(sc, enm->enm_addrlo); |
3137 | | | 3137 | |
3138 | reg = (hash >> 5); | | 3138 | reg = (hash >> 5); |
3139 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) | | 3139 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
3140 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) | | 3140 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
3141 | || (sc->sc_type == WM_T_PCH2) | | 3141 | || (sc->sc_type == WM_T_PCH2) |
3142 | || (sc->sc_type == WM_T_PCH_LPT)) | | 3142 | || (sc->sc_type == WM_T_PCH_LPT)) |
3143 | reg &= 0x1f; | | 3143 | reg &= 0x1f; |
3144 | else | | 3144 | else |
3145 | reg &= 0x7f; | | 3145 | reg &= 0x7f; |
3146 | bit = hash & 0x1f; | | 3146 | bit = hash & 0x1f; |
3147 | | | 3147 | |
3148 | hash = CSR_READ(sc, mta_reg + (reg << 2)); | | 3148 | hash = CSR_READ(sc, mta_reg + (reg << 2)); |
3149 | hash |= 1U << bit; | | 3149 | hash |= 1U << bit; |
3150 | | | 3150 | |
3151 | /* XXX Hardware bug?? */ | | 3151 | /* XXX Hardware bug?? */ |
3152 | if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { | | 3152 | if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { |
3153 | bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); | | 3153 | bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); |
3154 | CSR_WRITE(sc, mta_reg + (reg << 2), hash); | | 3154 | CSR_WRITE(sc, mta_reg + (reg << 2), hash); |
3155 | CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); | | 3155 | CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); |
3156 | } else | | 3156 | } else |
3157 | CSR_WRITE(sc, mta_reg + (reg << 2), hash); | | 3157 | CSR_WRITE(sc, mta_reg + (reg << 2), hash); |
3158 | | | 3158 | |
3159 | ETHER_NEXT_MULTI(step, enm); | | 3159 | ETHER_NEXT_MULTI(step, enm); |
3160 | } | | 3160 | } |
3161 | | | 3161 | |
3162 | ifp->if_flags &= ~IFF_ALLMULTI; | | 3162 | ifp->if_flags &= ~IFF_ALLMULTI; |
3163 | goto setit; | | 3163 | goto setit; |
3164 | | | 3164 | |
3165 | allmulti: | | 3165 | allmulti: |
3166 | ifp->if_flags |= IFF_ALLMULTI; | | 3166 | ifp->if_flags |= IFF_ALLMULTI; |
3167 | sc->sc_rctl |= RCTL_MPE; | | 3167 | sc->sc_rctl |= RCTL_MPE; |
3168 | | | 3168 | |
3169 | setit: | | 3169 | setit: |
3170 | CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); | | 3170 | CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); |
3171 | } | | 3171 | } |
3172 | | | 3172 | |
3173 | /* Reset and init related */ | | 3173 | /* Reset and init related */ |
3174 | | | 3174 | |
3175 | static void | | 3175 | static void |
3176 | wm_set_vlan(struct wm_softc *sc) | | 3176 | wm_set_vlan(struct wm_softc *sc) |
3177 | { | | 3177 | { |
3178 | /* Deal with VLAN enables. */ | | 3178 | /* Deal with VLAN enables. */ |
3179 | if (VLAN_ATTACHED(&sc->sc_ethercom)) | | 3179 | if (VLAN_ATTACHED(&sc->sc_ethercom)) |
3180 | sc->sc_ctrl |= CTRL_VME; | | 3180 | sc->sc_ctrl |= CTRL_VME; |
3181 | else | | 3181 | else |
3182 | sc->sc_ctrl &= ~CTRL_VME; | | 3182 | sc->sc_ctrl &= ~CTRL_VME; |
3183 | | | 3183 | |
3184 | /* Write the control registers. */ | | 3184 | /* Write the control registers. */ |
3185 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 3185 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
3186 | } | | 3186 | } |
3187 | | | 3187 | |
3188 | static void | | 3188 | static void |
3189 | wm_set_pcie_completion_timeout(struct wm_softc *sc) | | 3189 | wm_set_pcie_completion_timeout(struct wm_softc *sc) |
3190 | { | | 3190 | { |
3191 | uint32_t gcr; | | 3191 | uint32_t gcr; |
3192 | pcireg_t ctrl2; | | 3192 | pcireg_t ctrl2; |
3193 | | | 3193 | |
3194 | gcr = CSR_READ(sc, WMREG_GCR); | | 3194 | gcr = CSR_READ(sc, WMREG_GCR); |
3195 | | | 3195 | |
3196 | /* Only take action if timeout value is defaulted to 0 */ | | 3196 | /* Only take action if timeout value is defaulted to 0 */ |
3197 | if ((gcr & GCR_CMPL_TMOUT_MASK) != 0) | | 3197 | if ((gcr & GCR_CMPL_TMOUT_MASK) != 0) |
3198 | goto out; | | 3198 | goto out; |
3199 | | | 3199 | |
3200 | if ((gcr & GCR_CAP_VER2) == 0) { | | 3200 | if ((gcr & GCR_CAP_VER2) == 0) { |
3201 | gcr |= GCR_CMPL_TMOUT_10MS; | | 3201 | gcr |= GCR_CMPL_TMOUT_10MS; |
3202 | goto out; | | 3202 | goto out; |
3203 | } | | 3203 | } |
3204 | | | 3204 | |
3205 | ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag, | | 3205 | ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag, |
3206 | sc->sc_pcixe_capoff + PCIE_DCSR2); | | 3206 | sc->sc_pcixe_capoff + PCIE_DCSR2); |
3207 | ctrl2 |= WM_PCIE_DCSR2_16MS; | | 3207 | ctrl2 |= WM_PCIE_DCSR2_16MS; |
3208 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, | | 3208 | pci_conf_write(sc->sc_pc, sc->sc_pcitag, |
3209 | sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2); | | 3209 | sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2); |
3210 | | | 3210 | |
3211 | out: | | 3211 | out: |
3212 | /* Disable completion timeout resend */ | | 3212 | /* Disable completion timeout resend */ |
3213 | gcr &= ~GCR_CMPL_TMOUT_RESEND; | | 3213 | gcr &= ~GCR_CMPL_TMOUT_RESEND; |
3214 | | | 3214 | |
3215 | CSR_WRITE(sc, WMREG_GCR, gcr); | | 3215 | CSR_WRITE(sc, WMREG_GCR, gcr); |
3216 | } | | 3216 | } |
3217 | | | 3217 | |
3218 | void | | 3218 | void |
3219 | wm_get_auto_rd_done(struct wm_softc *sc) | | 3219 | wm_get_auto_rd_done(struct wm_softc *sc) |
3220 | { | | 3220 | { |
3221 | int i; | | 3221 | int i; |
3222 | | | 3222 | |
3223 | /* wait for eeprom to reload */ | | 3223 | /* wait for eeprom to reload */ |
3224 | switch (sc->sc_type) { | | 3224 | switch (sc->sc_type) { |
3225 | case WM_T_82571: | | 3225 | case WM_T_82571: |
3226 | case WM_T_82572: | | 3226 | case WM_T_82572: |
3227 | case WM_T_82573: | | 3227 | case WM_T_82573: |
3228 | case WM_T_82574: | | 3228 | case WM_T_82574: |
3229 | case WM_T_82583: | | 3229 | case WM_T_82583: |
3230 | case WM_T_82575: | | 3230 | case WM_T_82575: |
3231 | case WM_T_82576: | | 3231 | case WM_T_82576: |
3232 | case WM_T_82580: | | 3232 | case WM_T_82580: |
3233 | case WM_T_I350: | | 3233 | case WM_T_I350: |
3234 | case WM_T_I354: | | 3234 | case WM_T_I354: |
3235 | case WM_T_I210: | | 3235 | case WM_T_I210: |
3236 | case WM_T_I211: | | 3236 | case WM_T_I211: |
3237 | case WM_T_80003: | | 3237 | case WM_T_80003: |
3238 | case WM_T_ICH8: | | 3238 | case WM_T_ICH8: |
3239 | case WM_T_ICH9: | | 3239 | case WM_T_ICH9: |
3240 | for (i = 0; i < 10; i++) { | | 3240 | for (i = 0; i < 10; i++) { |
3241 | if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) | | 3241 | if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) |
3242 | break; | | 3242 | break; |
3243 | delay(1000); | | 3243 | delay(1000); |
3244 | } | | 3244 | } |
3245 | if (i == 10) { | | 3245 | if (i == 10) { |
3246 | log(LOG_ERR, "%s: auto read from eeprom failed to " | | 3246 | log(LOG_ERR, "%s: auto read from eeprom failed to " |
3247 | "complete\n", device_xname(sc->sc_dev)); | | 3247 | "complete\n", device_xname(sc->sc_dev)); |
3248 | } | | 3248 | } |
3249 | break; | | 3249 | break; |
3250 | default: | | 3250 | default: |
3251 | break; | | 3251 | break; |
3252 | } | | 3252 | } |
3253 | } | | 3253 | } |
3254 | | | 3254 | |
3255 | void | | 3255 | void |
3256 | wm_lan_init_done(struct wm_softc *sc) | | 3256 | wm_lan_init_done(struct wm_softc *sc) |
3257 | { | | 3257 | { |
3258 | uint32_t reg = 0; | | 3258 | uint32_t reg = 0; |
3259 | int i; | | 3259 | int i; |
3260 | | | 3260 | |
3261 | /* wait for eeprom to reload */ | | 3261 | /* wait for eeprom to reload */ |
3262 | switch (sc->sc_type) { | | 3262 | switch (sc->sc_type) { |
3263 | case WM_T_ICH10: | | 3263 | case WM_T_ICH10: |
3264 | case WM_T_PCH: | | 3264 | case WM_T_PCH: |
3265 | case WM_T_PCH2: | | 3265 | case WM_T_PCH2: |
3266 | case WM_T_PCH_LPT: | | 3266 | case WM_T_PCH_LPT: |
3267 | for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) { | | 3267 | for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) { |
3268 | reg = CSR_READ(sc, WMREG_STATUS); | | 3268 | reg = CSR_READ(sc, WMREG_STATUS); |
3269 | if ((reg & STATUS_LAN_INIT_DONE) != 0) | | 3269 | if ((reg & STATUS_LAN_INIT_DONE) != 0) |
3270 | break; | | 3270 | break; |
3271 | delay(100); | | 3271 | delay(100); |
3272 | } | | 3272 | } |
3273 | if (i >= WM_ICH8_LAN_INIT_TIMEOUT) { | | 3273 | if (i >= WM_ICH8_LAN_INIT_TIMEOUT) { |
3274 | log(LOG_ERR, "%s: %s: lan_init_done failed to " | | 3274 | log(LOG_ERR, "%s: %s: lan_init_done failed to " |
3275 | "complete\n", device_xname(sc->sc_dev), __func__); | | 3275 | "complete\n", device_xname(sc->sc_dev), __func__); |
3276 | } | | 3276 | } |
3277 | break; | | 3277 | break; |
3278 | default: | | 3278 | default: |
3279 | panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), | | 3279 | panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), |
3280 | __func__); | | 3280 | __func__); |
3281 | break; | | 3281 | break; |
3282 | } | | 3282 | } |
3283 | | | 3283 | |
3284 | reg &= ~STATUS_LAN_INIT_DONE; | | 3284 | reg &= ~STATUS_LAN_INIT_DONE; |
3285 | CSR_WRITE(sc, WMREG_STATUS, reg); | | 3285 | CSR_WRITE(sc, WMREG_STATUS, reg); |
3286 | } | | 3286 | } |
3287 | | | 3287 | |
3288 | void | | 3288 | void |
3289 | wm_get_cfg_done(struct wm_softc *sc) | | 3289 | wm_get_cfg_done(struct wm_softc *sc) |
3290 | { | | 3290 | { |
3291 | int mask; | | 3291 | int mask; |
3292 | uint32_t reg; | | 3292 | uint32_t reg; |
3293 | int i; | | 3293 | int i; |
3294 | | | 3294 | |
3295 | /* wait for eeprom to reload */ | | 3295 | /* wait for eeprom to reload */ |
3296 | switch (sc->sc_type) { | | 3296 | switch (sc->sc_type) { |
3297 | case WM_T_82542_2_0: | | 3297 | case WM_T_82542_2_0: |
3298 | case WM_T_82542_2_1: | | 3298 | case WM_T_82542_2_1: |
3299 | /* null */ | | 3299 | /* null */ |
3300 | break; | | 3300 | break; |
3301 | case WM_T_82543: | | 3301 | case WM_T_82543: |
3302 | case WM_T_82544: | | 3302 | case WM_T_82544: |
3303 | case WM_T_82540: | | 3303 | case WM_T_82540: |
3304 | case WM_T_82545: | | 3304 | case WM_T_82545: |
3305 | case WM_T_82545_3: | | 3305 | case WM_T_82545_3: |
3306 | case WM_T_82546: | | 3306 | case WM_T_82546: |
3307 | case WM_T_82546_3: | | 3307 | case WM_T_82546_3: |
3308 | case WM_T_82541: | | 3308 | case WM_T_82541: |
3309 | case WM_T_82541_2: | | 3309 | case WM_T_82541_2: |
3310 | case WM_T_82547: | | 3310 | case WM_T_82547: |
3311 | case WM_T_82547_2: | | 3311 | case WM_T_82547_2: |
3312 | case WM_T_82573: | | 3312 | case WM_T_82573: |
3313 | case WM_T_82574: | | 3313 | case WM_T_82574: |
3314 | case WM_T_82583: | | 3314 | case WM_T_82583: |
3315 | /* generic */ | | 3315 | /* generic */ |
3316 | delay(10*1000); | | 3316 | delay(10*1000); |
3317 | break; | | 3317 | break; |
3318 | case WM_T_80003: | | 3318 | case WM_T_80003: |
3319 | case WM_T_82571: | | 3319 | case WM_T_82571: |
3320 | case WM_T_82572: | | 3320 | case WM_T_82572: |
3321 | case WM_T_82575: | | 3321 | case WM_T_82575: |
3322 | case WM_T_82576: | | 3322 | case WM_T_82576: |
3323 | case WM_T_82580: | | 3323 | case WM_T_82580: |
3324 | case WM_T_I350: | | 3324 | case WM_T_I350: |
3325 | case WM_T_I354: | | 3325 | case WM_T_I354: |
3326 | case WM_T_I210: | | 3326 | case WM_T_I210: |
3327 | case WM_T_I211: | | 3327 | case WM_T_I211: |
3328 | if (sc->sc_type == WM_T_82571) { | | 3328 | if (sc->sc_type == WM_T_82571) { |
3329 | /* Only 82571 shares port 0 */ | | 3329 | /* Only 82571 shares port 0 */ |
3330 | mask = EEMNGCTL_CFGDONE_0; | | 3330 | mask = EEMNGCTL_CFGDONE_0; |
3331 | } else | | 3331 | } else |
3332 | mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid; | | 3332 | mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid; |
3333 | for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) { | | 3333 | for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) { |
3334 | if (CSR_READ(sc, WMREG_EEMNGCTL) & mask) | | 3334 | if (CSR_READ(sc, WMREG_EEMNGCTL) & mask) |
3335 | break; | | 3335 | break; |
3336 | delay(1000); | | 3336 | delay(1000); |
3337 | } | | 3337 | } |
3338 | if (i >= WM_PHY_CFG_TIMEOUT) { | | 3338 | if (i >= WM_PHY_CFG_TIMEOUT) { |
3339 | DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n", | | 3339 | DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n", |
3340 | device_xname(sc->sc_dev), __func__)); | | 3340 | device_xname(sc->sc_dev), __func__)); |
3341 | } | | 3341 | } |
3342 | break; | | 3342 | break; |
3343 | case WM_T_ICH8: | | 3343 | case WM_T_ICH8: |
3344 | case WM_T_ICH9: | | 3344 | case WM_T_ICH9: |
3345 | case WM_T_ICH10: | | 3345 | case WM_T_ICH10: |
3346 | case WM_T_PCH: | | 3346 | case WM_T_PCH: |
3347 | case WM_T_PCH2: | | 3347 | case WM_T_PCH2: |
3348 | case WM_T_PCH_LPT: | | 3348 | case WM_T_PCH_LPT: |
3349 | delay(10*1000); | | 3349 | delay(10*1000); |
3350 | if (sc->sc_type >= WM_T_ICH10) | | 3350 | if (sc->sc_type >= WM_T_ICH10) |
3351 | wm_lan_init_done(sc); | | 3351 | wm_lan_init_done(sc); |
3352 | else | | 3352 | else |
3353 | wm_get_auto_rd_done(sc); | | 3353 | wm_get_auto_rd_done(sc); |
3354 | | | 3354 | |
3355 | reg = CSR_READ(sc, WMREG_STATUS); | | 3355 | reg = CSR_READ(sc, WMREG_STATUS); |
3356 | if ((reg & STATUS_PHYRA) != 0) | | 3356 | if ((reg & STATUS_PHYRA) != 0) |
3357 | CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA); | | 3357 | CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA); |
3358 | break; | | 3358 | break; |
3359 | default: | | 3359 | default: |
3360 | panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), | | 3360 | panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), |
3361 | __func__); | | 3361 | __func__); |
3362 | break; | | 3362 | break; |
3363 | } | | 3363 | } |
3364 | } | | 3364 | } |
3365 | | | 3365 | |
3366 | /* Init hardware bits */ | | 3366 | /* Init hardware bits */ |
3367 | void | | 3367 | void |
3368 | wm_initialize_hardware_bits(struct wm_softc *sc) | | 3368 | wm_initialize_hardware_bits(struct wm_softc *sc) |
3369 | { | | 3369 | { |
3370 | uint32_t tarc0, tarc1, reg; | | 3370 | uint32_t tarc0, tarc1, reg; |
3371 | | | 3371 | |
3372 | /* For 82571 variant, 80003 and ICHs */ | | 3372 | /* For 82571 variant, 80003 and ICHs */ |
3373 | if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583)) | | 3373 | if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583)) |
3374 | || (sc->sc_type >= WM_T_80003)) { | | 3374 | || (sc->sc_type >= WM_T_80003)) { |
3375 | | | 3375 | |
3376 | /* Transmit Descriptor Control 0 */ | | 3376 | /* Transmit Descriptor Control 0 */ |
3377 | reg = CSR_READ(sc, WMREG_TXDCTL(0)); | | 3377 | reg = CSR_READ(sc, WMREG_TXDCTL(0)); |
3378 | reg |= TXDCTL_COUNT_DESC; | | 3378 | reg |= TXDCTL_COUNT_DESC; |
3379 | CSR_WRITE(sc, WMREG_TXDCTL(0), reg); | | 3379 | CSR_WRITE(sc, WMREG_TXDCTL(0), reg); |
3380 | | | 3380 | |
3381 | /* Transmit Descriptor Control 1 */ | | 3381 | /* Transmit Descriptor Control 1 */ |
3382 | reg = CSR_READ(sc, WMREG_TXDCTL(1)); | | 3382 | reg = CSR_READ(sc, WMREG_TXDCTL(1)); |
3383 | reg |= TXDCTL_COUNT_DESC; | | 3383 | reg |= TXDCTL_COUNT_DESC; |
3384 | CSR_WRITE(sc, WMREG_TXDCTL(1), reg); | | 3384 | CSR_WRITE(sc, WMREG_TXDCTL(1), reg); |
3385 | | | 3385 | |
3386 | /* TARC0 */ | | 3386 | /* TARC0 */ |
3387 | tarc0 = CSR_READ(sc, WMREG_TARC0); | | 3387 | tarc0 = CSR_READ(sc, WMREG_TARC0); |
3388 | switch (sc->sc_type) { | | 3388 | switch (sc->sc_type) { |
3389 | case WM_T_82571: | | 3389 | case WM_T_82571: |
3390 | case WM_T_82572: | | 3390 | case WM_T_82572: |
3391 | case WM_T_82573: | | 3391 | case WM_T_82573: |
3392 | case WM_T_82574: | | 3392 | case WM_T_82574: |
3393 | case WM_T_82583: | | 3393 | case WM_T_82583: |
3394 | case WM_T_80003: | | 3394 | case WM_T_80003: |
3395 | /* Clear bits 30..27 */ | | 3395 | /* Clear bits 30..27 */ |
3396 | tarc0 &= ~__BITS(30, 27); | | 3396 | tarc0 &= ~__BITS(30, 27); |
3397 | break; | | 3397 | break; |
3398 | default: | | 3398 | default: |
3399 | break; | | 3399 | break; |
3400 | } | | 3400 | } |
3401 | | | 3401 | |
3402 | switch (sc->sc_type) { | | 3402 | switch (sc->sc_type) { |
3403 | case WM_T_82571: | | 3403 | case WM_T_82571: |
3404 | case WM_T_82572: | | 3404 | case WM_T_82572: |
3405 | tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */ | | 3405 | tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */ |
3406 | | | 3406 | |
3407 | tarc1 = CSR_READ(sc, WMREG_TARC1); | | 3407 | tarc1 = CSR_READ(sc, WMREG_TARC1); |
3408 | tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */ | | 3408 | tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */ |
3409 | tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */ | | 3409 | tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */ |
3410 | /* 8257[12] Errata No.7 */ | | 3410 | /* 8257[12] Errata No.7 */ |
3411 | tarc1 |= __BIT(22); /* TARC1 bits 22 */ | | 3411 | tarc1 |= __BIT(22); /* TARC1 bits 22 */ |
3412 | | | 3412 | |
3413 | /* TARC1 bit 28 */ | | 3413 | /* TARC1 bit 28 */ |
3414 | if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) | | 3414 | if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) |
3415 | tarc1 &= ~__BIT(28); | | 3415 | tarc1 &= ~__BIT(28); |
3416 | else | | 3416 | else |
3417 | tarc1 |= __BIT(28); | | 3417 | tarc1 |= __BIT(28); |
3418 | CSR_WRITE(sc, WMREG_TARC1, tarc1); | | 3418 | CSR_WRITE(sc, WMREG_TARC1, tarc1); |
3419 | | | 3419 | |
3420 | /* | | 3420 | /* |
3421 | * 8257[12] Errata No.13 | | 3421 | * 8257[12] Errata No.13 |
3422 | * Disable Dyamic Clock Gating. | | 3422 | * Disable Dyamic Clock Gating. |
3423 | */ | | 3423 | */ |
3424 | reg = CSR_READ(sc, WMREG_CTRL_EXT); | | 3424 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
3425 | reg &= ~CTRL_EXT_DMA_DYN_CLK; | | 3425 | reg &= ~CTRL_EXT_DMA_DYN_CLK; |
3426 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 3426 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
3427 | break; | | 3427 | break; |
3428 | case WM_T_82573: | | 3428 | case WM_T_82573: |
3429 | case WM_T_82574: | | 3429 | case WM_T_82574: |
3430 | case WM_T_82583: | | 3430 | case WM_T_82583: |
3431 | if ((sc->sc_type == WM_T_82574) | | 3431 | if ((sc->sc_type == WM_T_82574) |
3432 | || (sc->sc_type == WM_T_82583)) | | 3432 | || (sc->sc_type == WM_T_82583)) |
3433 | tarc0 |= __BIT(26); /* TARC0 bit 26 */ | | 3433 | tarc0 |= __BIT(26); /* TARC0 bit 26 */ |
3434 | | | 3434 | |
3435 | /* Extended Device Control */ | | 3435 | /* Extended Device Control */ |
3436 | reg = CSR_READ(sc, WMREG_CTRL_EXT); | | 3436 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
3437 | reg &= ~__BIT(23); /* Clear bit 23 */ | | 3437 | reg &= ~__BIT(23); /* Clear bit 23 */ |
3438 | reg |= __BIT(22); /* Set bit 22 */ | | 3438 | reg |= __BIT(22); /* Set bit 22 */ |
3439 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 3439 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
3440 | | | 3440 | |
3441 | /* Device Control */ | | 3441 | /* Device Control */ |
3442 | sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */ | | 3442 | sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */ |
3443 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 3443 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
3444 | | | 3444 | |
3445 | /* PCIe Control Register */ | | 3445 | /* PCIe Control Register */ |
3446 | /* | | 3446 | /* |
3447 | * 82573 Errata (unknown). | | 3447 | * 82573 Errata (unknown). |
3448 | * | | 3448 | * |
3449 | * 82574 Errata 25 and 82583 Errata 12 | | 3449 | * 82574 Errata 25 and 82583 Errata 12 |
3450 | * "Dropped Rx Packets": | | 3450 | * "Dropped Rx Packets": |
3451 | * NVM Image Version 2.1.4 and newer has no this bug. | | 3451 | * NVM Image Version 2.1.4 and newer has no this bug. |
3452 | */ | | 3452 | */ |
3453 | reg = CSR_READ(sc, WMREG_GCR); | | 3453 | reg = CSR_READ(sc, WMREG_GCR); |
3454 | reg |= GCR_L1_ACT_WITHOUT_L0S_RX; | | 3454 | reg |= GCR_L1_ACT_WITHOUT_L0S_RX; |
3455 | CSR_WRITE(sc, WMREG_GCR, reg); | | 3455 | CSR_WRITE(sc, WMREG_GCR, reg); |
3456 | | | 3456 | |
3457 | if ((sc->sc_type == WM_T_82574) | | 3457 | if ((sc->sc_type == WM_T_82574) |
3458 | || (sc->sc_type == WM_T_82583)) { | | 3458 | || (sc->sc_type == WM_T_82583)) { |
3459 | /* | | 3459 | /* |
3460 | * Document says this bit must be set for | | 3460 | * Document says this bit must be set for |
3461 | * proper operation. | | 3461 | * proper operation. |
3462 | */ | | 3462 | */ |
3463 | reg = CSR_READ(sc, WMREG_GCR); | | 3463 | reg = CSR_READ(sc, WMREG_GCR); |
3464 | reg |= __BIT(22); | | 3464 | reg |= __BIT(22); |
3465 | CSR_WRITE(sc, WMREG_GCR, reg); | | 3465 | CSR_WRITE(sc, WMREG_GCR, reg); |
3466 | | | 3466 | |
3467 | /* | | 3467 | /* |
3468 | * Apply workaround for hardware errata | | 3468 | * Apply workaround for hardware errata |
3469 | * documented in errata docs Fixes issue where | | 3469 | * documented in errata docs Fixes issue where |
3470 | * some error prone or unreliable PCIe | | 3470 | * some error prone or unreliable PCIe |
3471 | * completions are occurring, particularly | | 3471 | * completions are occurring, particularly |
3472 | * with ASPM enabled. Without fix, issue can | | 3472 | * with ASPM enabled. Without fix, issue can |
3473 | * cause Tx timeouts. | | 3473 | * cause Tx timeouts. |
3474 | */ | | 3474 | */ |
3475 | reg = CSR_READ(sc, WMREG_GCR2); | | 3475 | reg = CSR_READ(sc, WMREG_GCR2); |
3476 | reg |= __BIT(0); | | 3476 | reg |= __BIT(0); |
3477 | CSR_WRITE(sc, WMREG_GCR2, reg); | | 3477 | CSR_WRITE(sc, WMREG_GCR2, reg); |
3478 | } | | 3478 | } |
3479 | break; | | 3479 | break; |
3480 | case WM_T_80003: | | 3480 | case WM_T_80003: |
3481 | /* TARC0 */ | | 3481 | /* TARC0 */ |
3482 | if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER) | | 3482 | if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER) |
3483 | || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) | | 3483 | || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) |
3484 | tarc0 &= ~__BIT(20); /* Clear bits 20 */ | | 3484 | tarc0 &= ~__BIT(20); /* Clear bits 20 */ |
3485 | | | 3485 | |
3486 | /* TARC1 bit 28 */ | | 3486 | /* TARC1 bit 28 */ |
3487 | tarc1 = CSR_READ(sc, WMREG_TARC1); | | 3487 | tarc1 = CSR_READ(sc, WMREG_TARC1); |
3488 | if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) | | 3488 | if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) |
3489 | tarc1 &= ~__BIT(28); | | 3489 | tarc1 &= ~__BIT(28); |
3490 | else | | 3490 | else |
3491 | tarc1 |= __BIT(28); | | 3491 | tarc1 |= __BIT(28); |
3492 | CSR_WRITE(sc, WMREG_TARC1, tarc1); | | 3492 | CSR_WRITE(sc, WMREG_TARC1, tarc1); |
3493 | break; | | 3493 | break; |
3494 | case WM_T_ICH8: | | 3494 | case WM_T_ICH8: |
3495 | case WM_T_ICH9: | | 3495 | case WM_T_ICH9: |
3496 | case WM_T_ICH10: | | 3496 | case WM_T_ICH10: |
3497 | case WM_T_PCH: | | 3497 | case WM_T_PCH: |
3498 | case WM_T_PCH2: | | 3498 | case WM_T_PCH2: |
3499 | case WM_T_PCH_LPT: | | 3499 | case WM_T_PCH_LPT: |
3500 | /* TARC 0 */ | | 3500 | /* TARC 0 */ |
3501 | if (sc->sc_type == WM_T_ICH8) { | | 3501 | if (sc->sc_type == WM_T_ICH8) { |
3502 | /* Set TARC0 bits 29 and 28 */ | | 3502 | /* Set TARC0 bits 29 and 28 */ |
3503 | tarc0 |= __BITS(29, 28); | | 3503 | tarc0 |= __BITS(29, 28); |
3504 | } | | 3504 | } |
3505 | /* Set TARC0 bits 23,24,26,27 */ | | 3505 | /* Set TARC0 bits 23,24,26,27 */ |
3506 | tarc0 |= __BITS(27, 26) | __BITS(24, 23); | | 3506 | tarc0 |= __BITS(27, 26) | __BITS(24, 23); |
3507 | | | 3507 | |
3508 | /* CTRL_EXT */ | | 3508 | /* CTRL_EXT */ |
3509 | reg = CSR_READ(sc, WMREG_CTRL_EXT); | | 3509 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
3510 | reg |= __BIT(22); /* Set bit 22 */ | | 3510 | reg |= __BIT(22); /* Set bit 22 */ |
3511 | /* | | 3511 | /* |
3512 | * Enable PHY low-power state when MAC is at D3 | | 3512 | * Enable PHY low-power state when MAC is at D3 |
3513 | * w/o WoL | | 3513 | * w/o WoL |
3514 | */ | | 3514 | */ |
3515 | if (sc->sc_type >= WM_T_PCH) | | 3515 | if (sc->sc_type >= WM_T_PCH) |
3516 | reg |= CTRL_EXT_PHYPDEN; | | 3516 | reg |= CTRL_EXT_PHYPDEN; |
3517 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 3517 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
3518 | | | 3518 | |
3519 | /* TARC1 */ | | 3519 | /* TARC1 */ |
3520 | tarc1 = CSR_READ(sc, WMREG_TARC1); | | 3520 | tarc1 = CSR_READ(sc, WMREG_TARC1); |
3521 | /* bit 28 */ | | 3521 | /* bit 28 */ |
3522 | if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) | | 3522 | if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) |
3523 | tarc1 &= ~__BIT(28); | | 3523 | tarc1 &= ~__BIT(28); |
3524 | else | | 3524 | else |
3525 | tarc1 |= __BIT(28); | | 3525 | tarc1 |= __BIT(28); |
3526 | tarc1 |= __BIT(24) | __BIT(26) | __BIT(30); | | 3526 | tarc1 |= __BIT(24) | __BIT(26) | __BIT(30); |
3527 | CSR_WRITE(sc, WMREG_TARC1, tarc1); | | 3527 | CSR_WRITE(sc, WMREG_TARC1, tarc1); |
3528 | | | 3528 | |
3529 | /* Device Status */ | | 3529 | /* Device Status */ |
3530 | if (sc->sc_type == WM_T_ICH8) { | | 3530 | if (sc->sc_type == WM_T_ICH8) { |
3531 | reg = CSR_READ(sc, WMREG_STATUS); | | 3531 | reg = CSR_READ(sc, WMREG_STATUS); |
3532 | reg &= ~__BIT(31); | | 3532 | reg &= ~__BIT(31); |
3533 | CSR_WRITE(sc, WMREG_STATUS, reg); | | 3533 | CSR_WRITE(sc, WMREG_STATUS, reg); |
3534 | | | 3534 | |
3535 | } | | 3535 | } |
3536 | | | 3536 | |
3537 | /* | | 3537 | /* |
3538 | * Work-around descriptor data corruption issue during | | 3538 | * Work-around descriptor data corruption issue during |
3539 | * NFS v2 UDP traffic, just disable the NFS filtering | | 3539 | * NFS v2 UDP traffic, just disable the NFS filtering |
3540 | * capability. | | 3540 | * capability. |
3541 | */ | | 3541 | */ |
3542 | reg = CSR_READ(sc, WMREG_RFCTL); | | 3542 | reg = CSR_READ(sc, WMREG_RFCTL); |
3543 | reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS; | | 3543 | reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS; |
3544 | CSR_WRITE(sc, WMREG_RFCTL, reg); | | 3544 | CSR_WRITE(sc, WMREG_RFCTL, reg); |
3545 | break; | | 3545 | break; |
3546 | default: | | 3546 | default: |
3547 | break; | | 3547 | break; |
3548 | } | | 3548 | } |
3549 | CSR_WRITE(sc, WMREG_TARC0, tarc0); | | 3549 | CSR_WRITE(sc, WMREG_TARC0, tarc0); |
3550 | | | 3550 | |
3551 | /* | | 3551 | /* |
3552 | * 8257[12] Errata No.52 and some others. | | 3552 | * 8257[12] Errata No.52 and some others. |
3553 | * Avoid RSS Hash Value bug. | | 3553 | * Avoid RSS Hash Value bug. |
3554 | */ | | 3554 | */ |
3555 | switch (sc->sc_type) { | | 3555 | switch (sc->sc_type) { |
3556 | case WM_T_82571: | | 3556 | case WM_T_82571: |
3557 | case WM_T_82572: | | 3557 | case WM_T_82572: |
3558 | case WM_T_82573: | | 3558 | case WM_T_82573: |
3559 | case WM_T_80003: | | 3559 | case WM_T_80003: |
3560 | case WM_T_ICH8: | | 3560 | case WM_T_ICH8: |
3561 | reg = CSR_READ(sc, WMREG_RFCTL); | | 3561 | reg = CSR_READ(sc, WMREG_RFCTL); |
3562 | reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS; | | 3562 | reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS; |
3563 | CSR_WRITE(sc, WMREG_RFCTL, reg); | | 3563 | CSR_WRITE(sc, WMREG_RFCTL, reg); |
3564 | break; | | 3564 | break; |
3565 | default: | | 3565 | default: |
3566 | break; | | 3566 | break; |
3567 | } | | 3567 | } |
3568 | } | | 3568 | } |
3569 | } | | 3569 | } |
3570 | | | 3570 | |
3571 | static uint32_t | | 3571 | static uint32_t |
3572 | wm_rxpbs_adjust_82580(uint32_t val) | | 3572 | wm_rxpbs_adjust_82580(uint32_t val) |
3573 | { | | 3573 | { |
3574 | uint32_t rv = 0; | | 3574 | uint32_t rv = 0; |
3575 | | | 3575 | |
3576 | if (val < __arraycount(wm_82580_rxpbs_table)) | | 3576 | if (val < __arraycount(wm_82580_rxpbs_table)) |
3577 | rv = wm_82580_rxpbs_table[val]; | | 3577 | rv = wm_82580_rxpbs_table[val]; |
3578 | | | 3578 | |
3579 | return rv; | | 3579 | return rv; |
3580 | } | | 3580 | } |
3581 | | | 3581 | |
3582 | /* | | 3582 | /* |
3583 | * wm_reset: | | 3583 | * wm_reset: |
3584 | * | | 3584 | * |
3585 | * Reset the i82542 chip. | | 3585 | * Reset the i82542 chip. |
3586 | */ | | 3586 | */ |
3587 | static void | | 3587 | static void |
3588 | wm_reset(struct wm_softc *sc) | | 3588 | wm_reset(struct wm_softc *sc) |
3589 | { | | 3589 | { |
3590 | int phy_reset = 0; | | 3590 | int phy_reset = 0; |
3591 | int i, error = 0; | | 3591 | int i, error = 0; |
3592 | uint32_t reg, mask; | | 3592 | uint32_t reg, mask; |
3593 | | | 3593 | |
3594 | /* | | 3594 | /* |
3595 | * Allocate on-chip memory according to the MTU size. | | 3595 | * Allocate on-chip memory according to the MTU size. |
3596 | * The Packet Buffer Allocation register must be written | | 3596 | * The Packet Buffer Allocation register must be written |
3597 | * before the chip is reset. | | 3597 | * before the chip is reset. |
3598 | */ | | 3598 | */ |
3599 | switch (sc->sc_type) { | | 3599 | switch (sc->sc_type) { |
3600 | case WM_T_82547: | | 3600 | case WM_T_82547: |
3601 | case WM_T_82547_2: | | 3601 | case WM_T_82547_2: |
3602 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? | | 3602 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? |
3603 | PBA_22K : PBA_30K; | | 3603 | PBA_22K : PBA_30K; |
3604 | for (i = 0; i < sc->sc_ntxqueues; i++) { | | 3604 | for (i = 0; i < sc->sc_ntxqueues; i++) { |
3605 | struct wm_txqueue *txq = &sc->sc_txq[i]; | | 3605 | struct wm_txqueue *txq = &sc->sc_txq[i]; |
3606 | txq->txq_fifo_head = 0; | | 3606 | txq->txq_fifo_head = 0; |
3607 | txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; | | 3607 | txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; |
3608 | txq->txq_fifo_size = | | 3608 | txq->txq_fifo_size = |
3609 | (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; | | 3609 | (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; |
3610 | txq->txq_fifo_stall = 0; | | 3610 | txq->txq_fifo_stall = 0; |
3611 | } | | 3611 | } |
3612 | break; | | 3612 | break; |
3613 | case WM_T_82571: | | 3613 | case WM_T_82571: |
3614 | case WM_T_82572: | | 3614 | case WM_T_82572: |
3615 | case WM_T_82575: /* XXX need special handing for jumbo frames */ | | 3615 | case WM_T_82575: /* XXX need special handing for jumbo frames */ |
3616 | case WM_T_80003: | | 3616 | case WM_T_80003: |
3617 | sc->sc_pba = PBA_32K; | | 3617 | sc->sc_pba = PBA_32K; |
3618 | break; | | 3618 | break; |
3619 | case WM_T_82573: | | 3619 | case WM_T_82573: |
3620 | sc->sc_pba = PBA_12K; | | 3620 | sc->sc_pba = PBA_12K; |
3621 | break; | | 3621 | break; |
3622 | case WM_T_82574: | | 3622 | case WM_T_82574: |
3623 | case WM_T_82583: | | 3623 | case WM_T_82583: |
3624 | sc->sc_pba = PBA_20K; | | 3624 | sc->sc_pba = PBA_20K; |
3625 | break; | | 3625 | break; |
3626 | case WM_T_82576: | | 3626 | case WM_T_82576: |
3627 | sc->sc_pba = CSR_READ(sc, WMREG_RXPBS); | | 3627 | sc->sc_pba = CSR_READ(sc, WMREG_RXPBS); |
3628 | sc->sc_pba &= RXPBS_SIZE_MASK_82576; | | 3628 | sc->sc_pba &= RXPBS_SIZE_MASK_82576; |
3629 | break; | | 3629 | break; |
3630 | case WM_T_82580: | | 3630 | case WM_T_82580: |
3631 | case WM_T_I350: | | 3631 | case WM_T_I350: |
3632 | case WM_T_I354: | | 3632 | case WM_T_I354: |
3633 | sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS)); | | 3633 | sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS)); |
3634 | break; | | 3634 | break; |
3635 | case WM_T_I210: | | 3635 | case WM_T_I210: |
3636 | case WM_T_I211: | | 3636 | case WM_T_I211: |
3637 | sc->sc_pba = PBA_34K; | | 3637 | sc->sc_pba = PBA_34K; |
3638 | break; | | 3638 | break; |
3639 | case WM_T_ICH8: | | 3639 | case WM_T_ICH8: |
3640 | /* Workaround for a bit corruption issue in FIFO memory */ | | 3640 | /* Workaround for a bit corruption issue in FIFO memory */ |
3641 | sc->sc_pba = PBA_8K; | | 3641 | sc->sc_pba = PBA_8K; |
3642 | CSR_WRITE(sc, WMREG_PBS, PBA_16K); | | 3642 | CSR_WRITE(sc, WMREG_PBS, PBA_16K); |
3643 | break; | | 3643 | break; |
3644 | case WM_T_ICH9: | | 3644 | case WM_T_ICH9: |
3645 | case WM_T_ICH10: | | 3645 | case WM_T_ICH10: |
3646 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ? | | 3646 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ? |
3647 | PBA_14K : PBA_10K; | | 3647 | PBA_14K : PBA_10K; |
3648 | break; | | 3648 | break; |
3649 | case WM_T_PCH: | | 3649 | case WM_T_PCH: |
3650 | case WM_T_PCH2: | | 3650 | case WM_T_PCH2: |
3651 | case WM_T_PCH_LPT: | | 3651 | case WM_T_PCH_LPT: |
3652 | sc->sc_pba = PBA_26K; | | 3652 | sc->sc_pba = PBA_26K; |
3653 | break; | | 3653 | break; |
3654 | default: | | 3654 | default: |
3655 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? | | 3655 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? |
3656 | PBA_40K : PBA_48K; | | 3656 | PBA_40K : PBA_48K; |
3657 | break; | | 3657 | break; |
3658 | } | | 3658 | } |
3659 | /* | | 3659 | /* |
3660 | * Only old or non-multiqueue devices have the PBA register | | 3660 | * Only old or non-multiqueue devices have the PBA register |
3661 | * XXX Need special handling for 82575. | | 3661 | * XXX Need special handling for 82575. |
3662 | */ | | 3662 | */ |
3663 | if (((sc->sc_flags & WM_F_NEWQUEUE) == 0) | | 3663 | if (((sc->sc_flags & WM_F_NEWQUEUE) == 0) |
3664 | || (sc->sc_type == WM_T_82575)) | | 3664 | || (sc->sc_type == WM_T_82575)) |
3665 | CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); | | 3665 | CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); |
3666 | | | 3666 | |
3667 | /* Prevent the PCI-E bus from sticking */ | | 3667 | /* Prevent the PCI-E bus from sticking */ |
3668 | if (sc->sc_flags & WM_F_PCIE) { | | 3668 | if (sc->sc_flags & WM_F_PCIE) { |
3669 | int timeout = 800; | | 3669 | int timeout = 800; |
3670 | | | 3670 | |
3671 | sc->sc_ctrl |= CTRL_GIO_M_DIS; | | 3671 | sc->sc_ctrl |= CTRL_GIO_M_DIS; |
3672 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 3672 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
3673 | | | 3673 | |
3674 | while (timeout--) { | | 3674 | while (timeout--) { |
3675 | if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) | | 3675 | if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) |
3676 | == 0) | | 3676 | == 0) |
3677 | break; | | 3677 | break; |
3678 | delay(100); | | 3678 | delay(100); |
3679 | } | | 3679 | } |
3680 | } | | 3680 | } |
3681 | | | 3681 | |
3682 | /* Set the completion timeout for interface */ | | 3682 | /* Set the completion timeout for interface */ |
3683 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) | | 3683 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) |
3684 | || (sc->sc_type == WM_T_82580) | | 3684 | || (sc->sc_type == WM_T_82580) |
3685 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) | | 3685 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) |
3686 | || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) | | 3686 | || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) |
3687 | wm_set_pcie_completion_timeout(sc); | | 3687 | wm_set_pcie_completion_timeout(sc); |
3688 | | | 3688 | |
3689 | /* Clear interrupt */ | | 3689 | /* Clear interrupt */ |
3690 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); | | 3690 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
3691 | if (sc->sc_nintrs > 1) { | | 3691 | if (sc->sc_nintrs > 1) { |
3692 | if (sc->sc_type != WM_T_82574) { | | 3692 | if (sc->sc_type != WM_T_82574) { |
3693 | CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); | | 3693 | CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); |
3694 | CSR_WRITE(sc, WMREG_EIAC, 0); | | 3694 | CSR_WRITE(sc, WMREG_EIAC, 0); |
3695 | } else { | | 3695 | } else { |
3696 | CSR_WRITE(sc, WMREG_EIAC_82574, 0); | | 3696 | CSR_WRITE(sc, WMREG_EIAC_82574, 0); |
3697 | } | | 3697 | } |
3698 | } | | 3698 | } |
3699 | | | 3699 | |
3700 | /* Stop the transmit and receive processes. */ | | 3700 | /* Stop the transmit and receive processes. */ |
3701 | CSR_WRITE(sc, WMREG_RCTL, 0); | | 3701 | CSR_WRITE(sc, WMREG_RCTL, 0); |
3702 | sc->sc_rctl &= ~RCTL_EN; | | 3702 | sc->sc_rctl &= ~RCTL_EN; |
3703 | CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP); | | 3703 | CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP); |
3704 | CSR_WRITE_FLUSH(sc); | | 3704 | CSR_WRITE_FLUSH(sc); |
3705 | | | 3705 | |
3706 | /* XXX set_tbi_sbp_82543() */ | | 3706 | /* XXX set_tbi_sbp_82543() */ |
3707 | | | 3707 | |
3708 | delay(10*1000); | | 3708 | delay(10*1000); |
3709 | | | 3709 | |
3710 | /* Must acquire the MDIO ownership before MAC reset */ | | 3710 | /* Must acquire the MDIO ownership before MAC reset */ |
3711 | switch (sc->sc_type) { | | 3711 | switch (sc->sc_type) { |
3712 | case WM_T_82573: | | 3712 | case WM_T_82573: |
3713 | case WM_T_82574: | | 3713 | case WM_T_82574: |
3714 | case WM_T_82583: | | 3714 | case WM_T_82583: |
3715 | error = wm_get_hw_semaphore_82573(sc); | | 3715 | error = wm_get_hw_semaphore_82573(sc); |
3716 | break; | | 3716 | break; |
3717 | default: | | 3717 | default: |
3718 | break; | | 3718 | break; |
3719 | } | | 3719 | } |
3720 | | | 3720 | |
3721 | /* | | 3721 | /* |
3722 | * 82541 Errata 29? & 82547 Errata 28? | | 3722 | * 82541 Errata 29? & 82547 Errata 28? |
3723 | * See also the description about PHY_RST bit in CTRL register | | 3723 | * See also the description about PHY_RST bit in CTRL register |
3724 | * in 8254x_GBe_SDM.pdf. | | 3724 | * in 8254x_GBe_SDM.pdf. |
3725 | */ | | 3725 | */ |
3726 | if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { | | 3726 | if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { |
3727 | CSR_WRITE(sc, WMREG_CTRL, | | 3727 | CSR_WRITE(sc, WMREG_CTRL, |
3728 | CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); | | 3728 | CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); |
3729 | CSR_WRITE_FLUSH(sc); | | 3729 | CSR_WRITE_FLUSH(sc); |
3730 | delay(5000); | | 3730 | delay(5000); |
3731 | } | | 3731 | } |
3732 | | | 3732 | |
3733 | switch (sc->sc_type) { | | 3733 | switch (sc->sc_type) { |
3734 | case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ | | 3734 | case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ |
3735 | case WM_T_82541: | | 3735 | case WM_T_82541: |
3736 | case WM_T_82541_2: | | 3736 | case WM_T_82541_2: |
3737 | case WM_T_82547: | | 3737 | case WM_T_82547: |
3738 | case WM_T_82547_2: | | 3738 | case WM_T_82547_2: |
3739 | /* | | 3739 | /* |
3740 | * On some chipsets, a reset through a memory-mapped write | | 3740 | * On some chipsets, a reset through a memory-mapped write |
3741 | * cycle can cause the chip to reset before completing the | | 3741 | * cycle can cause the chip to reset before completing the |
3742 | * write cycle. This causes major headache that can be | | 3742 | * write cycle. This causes major headache that can be |
3743 | * avoided by issuing the reset via indirect register writes | | 3743 | * avoided by issuing the reset via indirect register writes |
3744 | * through I/O space. | | 3744 | * through I/O space. |
3745 | * | | 3745 | * |
3746 | * So, if we successfully mapped the I/O BAR at attach time, | | 3746 | * So, if we successfully mapped the I/O BAR at attach time, |
3747 | * use that. Otherwise, try our luck with a memory-mapped | | 3747 | * use that. Otherwise, try our luck with a memory-mapped |
3748 | * reset. | | 3748 | * reset. |
3749 | */ | | 3749 | */ |
3750 | if (sc->sc_flags & WM_F_IOH_VALID) | | 3750 | if (sc->sc_flags & WM_F_IOH_VALID) |
3751 | wm_io_write(sc, WMREG_CTRL, CTRL_RST); | | 3751 | wm_io_write(sc, WMREG_CTRL, CTRL_RST); |
3752 | else | | 3752 | else |
3753 | CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); | | 3753 | CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); |
3754 | break; | | 3754 | break; |
3755 | case WM_T_82545_3: | | 3755 | case WM_T_82545_3: |
3756 | case WM_T_82546_3: | | 3756 | case WM_T_82546_3: |
3757 | /* Use the shadow control register on these chips. */ | | 3757 | /* Use the shadow control register on these chips. */ |
3758 | CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); | | 3758 | CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); |
3759 | break; | | 3759 | break; |
3760 | case WM_T_80003: | | 3760 | case WM_T_80003: |
3761 | mask = swfwphysem[sc->sc_funcid]; | | 3761 | mask = swfwphysem[sc->sc_funcid]; |
3762 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; | | 3762 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; |
3763 | wm_get_swfw_semaphore(sc, mask); | | 3763 | wm_get_swfw_semaphore(sc, mask); |
3764 | CSR_WRITE(sc, WMREG_CTRL, reg); | | 3764 | CSR_WRITE(sc, WMREG_CTRL, reg); |
3765 | wm_put_swfw_semaphore(sc, mask); | | 3765 | wm_put_swfw_semaphore(sc, mask); |
3766 | break; | | 3766 | break; |
3767 | case WM_T_ICH8: | | 3767 | case WM_T_ICH8: |
3768 | case WM_T_ICH9: | | 3768 | case WM_T_ICH9: |
3769 | case WM_T_ICH10: | | 3769 | case WM_T_ICH10: |
3770 | case WM_T_PCH: | | 3770 | case WM_T_PCH: |
3771 | case WM_T_PCH2: | | 3771 | case WM_T_PCH2: |
3772 | case WM_T_PCH_LPT: | | 3772 | case WM_T_PCH_LPT: |
3773 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; | | 3773 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; |
3774 | if (wm_check_reset_block(sc) == 0) { | | 3774 | if (wm_check_reset_block(sc) == 0) { |
3775 | /* | | 3775 | /* |
3776 | * Gate automatic PHY configuration by hardware on | | 3776 | * Gate automatic PHY configuration by hardware on |
3777 | * non-managed 82579 | | 3777 | * non-managed 82579 |
3778 | */ | | 3778 | */ |
3779 | if ((sc->sc_type == WM_T_PCH2) | | 3779 | if ((sc->sc_type == WM_T_PCH2) |
3780 | && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) | | 3780 | && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) |
3781 | != 0)) | | 3781 | != 0)) |
3782 | wm_gate_hw_phy_config_ich8lan(sc, 1); | | 3782 | wm_gate_hw_phy_config_ich8lan(sc, 1); |
3783 | | | 3783 | |
3784 | | | 3784 | |
3785 | reg |= CTRL_PHY_RESET; | | 3785 | reg |= CTRL_PHY_RESET; |
3786 | phy_reset = 1; | | 3786 | phy_reset = 1; |
3787 | } | | 3787 | } |
3788 | wm_get_swfwhw_semaphore(sc); | | 3788 | wm_get_swfwhw_semaphore(sc); |
3789 | CSR_WRITE(sc, WMREG_CTRL, reg); | | 3789 | CSR_WRITE(sc, WMREG_CTRL, reg); |
3790 | /* Don't insert a completion barrier when reset */ | | 3790 | /* Don't insert a completion barrier when reset */ |
3791 | delay(20*1000); | | 3791 | delay(20*1000); |
3792 | wm_put_swfwhw_semaphore(sc); | | 3792 | wm_put_swfwhw_semaphore(sc); |
3793 | break; | | 3793 | break; |
3794 | case WM_T_82580: | | 3794 | case WM_T_82580: |
3795 | case WM_T_I350: | | 3795 | case WM_T_I350: |
3796 | case WM_T_I354: | | 3796 | case WM_T_I354: |
3797 | case WM_T_I210: | | 3797 | case WM_T_I210: |
3798 | case WM_T_I211: | | 3798 | case WM_T_I211: |
3799 | CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); | | 3799 | CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); |
3800 | if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII) | | 3800 | if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII) |
3801 | CSR_WRITE_FLUSH(sc); | | 3801 | CSR_WRITE_FLUSH(sc); |
3802 | delay(5000); | | 3802 | delay(5000); |
3803 | break; | | 3803 | break; |
3804 | case WM_T_82542_2_0: | | 3804 | case WM_T_82542_2_0: |
3805 | case WM_T_82542_2_1: | | 3805 | case WM_T_82542_2_1: |
3806 | case WM_T_82543: | | 3806 | case WM_T_82543: |
3807 | case WM_T_82540: | | 3807 | case WM_T_82540: |
3808 | case WM_T_82545: | | 3808 | case WM_T_82545: |
3809 | case WM_T_82546: | | 3809 | case WM_T_82546: |
3810 | case WM_T_82571: | | 3810 | case WM_T_82571: |
3811 | case WM_T_82572: | | 3811 | case WM_T_82572: |
3812 | case WM_T_82573: | | 3812 | case WM_T_82573: |
3813 | case WM_T_82574: | | 3813 | case WM_T_82574: |
3814 | case WM_T_82575: | | 3814 | case WM_T_82575: |
3815 | case WM_T_82576: | | 3815 | case WM_T_82576: |
3816 | case WM_T_82583: | | 3816 | case WM_T_82583: |
3817 | default: | | 3817 | default: |
3818 | /* Everything else can safely use the documented method. */ | | 3818 | /* Everything else can safely use the documented method. */ |
3819 | CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); | | 3819 | CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); |
3820 | break; | | 3820 | break; |
3821 | } | | 3821 | } |
3822 | | | 3822 | |
3823 | /* Must release the MDIO ownership after MAC reset */ | | 3823 | /* Must release the MDIO ownership after MAC reset */ |
3824 | switch (sc->sc_type) { | | 3824 | switch (sc->sc_type) { |
3825 | case WM_T_82573: | | 3825 | case WM_T_82573: |
3826 | case WM_T_82574: | | 3826 | case WM_T_82574: |
3827 | case WM_T_82583: | | 3827 | case WM_T_82583: |
3828 | if (error == 0) | | 3828 | if (error == 0) |
3829 | wm_put_hw_semaphore_82573(sc); | | 3829 | wm_put_hw_semaphore_82573(sc); |
3830 | break; | | 3830 | break; |
3831 | default: | | 3831 | default: |
3832 | break; | | 3832 | break; |
3833 | } | | 3833 | } |
3834 | | | 3834 | |
3835 | if (phy_reset != 0) | | 3835 | if (phy_reset != 0) |
3836 | wm_get_cfg_done(sc); | | 3836 | wm_get_cfg_done(sc); |
3837 | | | 3837 | |
3838 | /* reload EEPROM */ | | 3838 | /* reload EEPROM */ |
3839 | switch (sc->sc_type) { | | 3839 | switch (sc->sc_type) { |
3840 | case WM_T_82542_2_0: | | 3840 | case WM_T_82542_2_0: |
3841 | case WM_T_82542_2_1: | | 3841 | case WM_T_82542_2_1: |
3842 | case WM_T_82543: | | 3842 | case WM_T_82543: |
3843 | case WM_T_82544: | | 3843 | case WM_T_82544: |
3844 | delay(10); | | 3844 | delay(10); |
3845 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; | | 3845 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; |
3846 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 3846 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
3847 | CSR_WRITE_FLUSH(sc); | | 3847 | CSR_WRITE_FLUSH(sc); |
3848 | delay(2000); | | 3848 | delay(2000); |
3849 | break; | | 3849 | break; |
3850 | case WM_T_82540: | | 3850 | case WM_T_82540: |
3851 | case WM_T_82545: | | 3851 | case WM_T_82545: |
3852 | case WM_T_82545_3: | | 3852 | case WM_T_82545_3: |
3853 | case WM_T_82546: | | 3853 | case WM_T_82546: |
3854 | case WM_T_82546_3: | | 3854 | case WM_T_82546_3: |
3855 | delay(5*1000); | | 3855 | delay(5*1000); |
3856 | /* XXX Disable HW ARPs on ASF enabled adapters */ | | 3856 | /* XXX Disable HW ARPs on ASF enabled adapters */ |
3857 | break; | | 3857 | break; |
3858 | case WM_T_82541: | | 3858 | case WM_T_82541: |
3859 | case WM_T_82541_2: | | 3859 | case WM_T_82541_2: |
3860 | case WM_T_82547: | | 3860 | case WM_T_82547: |
3861 | case WM_T_82547_2: | | 3861 | case WM_T_82547_2: |
3862 | delay(20000); | | 3862 | delay(20000); |
3863 | /* XXX Disable HW ARPs on ASF enabled adapters */ | | 3863 | /* XXX Disable HW ARPs on ASF enabled adapters */ |
3864 | break; | | 3864 | break; |
3865 | case WM_T_82571: | | 3865 | case WM_T_82571: |
3866 | case WM_T_82572: | | 3866 | case WM_T_82572: |
3867 | case WM_T_82573: | | 3867 | case WM_T_82573: |
3868 | case WM_T_82574: | | 3868 | case WM_T_82574: |
3869 | case WM_T_82583: | | 3869 | case WM_T_82583: |
3870 | if (sc->sc_flags & WM_F_EEPROM_FLASH) { | | 3870 | if (sc->sc_flags & WM_F_EEPROM_FLASH) { |
3871 | delay(10); | | 3871 | delay(10); |
3872 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; | | 3872 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; |
3873 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 3873 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
3874 | CSR_WRITE_FLUSH(sc); | | 3874 | CSR_WRITE_FLUSH(sc); |
3875 | } | | 3875 | } |
3876 | /* check EECD_EE_AUTORD */ | | 3876 | /* check EECD_EE_AUTORD */ |
3877 | wm_get_auto_rd_done(sc); | | 3877 | wm_get_auto_rd_done(sc); |
3878 | /* | | 3878 | /* |
3879 | * Phy configuration from NVM just starts after EECD_AUTO_RD | | 3879 | * Phy configuration from NVM just starts after EECD_AUTO_RD |
3880 | * is set. | | 3880 | * is set. |
3881 | */ | | 3881 | */ |
3882 | if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) | | 3882 | if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) |
3883 | || (sc->sc_type == WM_T_82583)) | | 3883 | || (sc->sc_type == WM_T_82583)) |
3884 | delay(25*1000); | | 3884 | delay(25*1000); |
3885 | break; | | 3885 | break; |
3886 | case WM_T_82575: | | 3886 | case WM_T_82575: |
3887 | case WM_T_82576: | | 3887 | case WM_T_82576: |
3888 | case WM_T_82580: | | 3888 | case WM_T_82580: |
3889 | case WM_T_I350: | | 3889 | case WM_T_I350: |
3890 | case WM_T_I354: | | 3890 | case WM_T_I354: |
3891 | case WM_T_I210: | | 3891 | case WM_T_I210: |
3892 | case WM_T_I211: | | 3892 | case WM_T_I211: |
3893 | case WM_T_80003: | | 3893 | case WM_T_80003: |
3894 | /* check EECD_EE_AUTORD */ | | 3894 | /* check EECD_EE_AUTORD */ |
3895 | wm_get_auto_rd_done(sc); | | 3895 | wm_get_auto_rd_done(sc); |
3896 | break; | | 3896 | break; |
3897 | case WM_T_ICH8: | | 3897 | case WM_T_ICH8: |
3898 | case WM_T_ICH9: | | 3898 | case WM_T_ICH9: |
3899 | case WM_T_ICH10: | | 3899 | case WM_T_ICH10: |
3900 | case WM_T_PCH: | | 3900 | case WM_T_PCH: |
3901 | case WM_T_PCH2: | | 3901 | case WM_T_PCH2: |
3902 | case WM_T_PCH_LPT: | | 3902 | case WM_T_PCH_LPT: |
3903 | break; | | 3903 | break; |
3904 | default: | | 3904 | default: |
3905 | panic("%s: unknown type\n", __func__); | | 3905 | panic("%s: unknown type\n", __func__); |
3906 | } | | 3906 | } |
3907 | | | 3907 | |
3908 | /* Check whether EEPROM is present or not */ | | 3908 | /* Check whether EEPROM is present or not */ |
3909 | switch (sc->sc_type) { | | 3909 | switch (sc->sc_type) { |
3910 | case WM_T_82575: | | 3910 | case WM_T_82575: |
3911 | case WM_T_82576: | | 3911 | case WM_T_82576: |
3912 | case WM_T_82580: | | 3912 | case WM_T_82580: |
3913 | case WM_T_I350: | | 3913 | case WM_T_I350: |
3914 | case WM_T_I354: | | 3914 | case WM_T_I354: |
3915 | case WM_T_ICH8: | | 3915 | case WM_T_ICH8: |
3916 | case WM_T_ICH9: | | 3916 | case WM_T_ICH9: |
3917 | if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { | | 3917 | if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { |
3918 | /* Not found */ | | 3918 | /* Not found */ |
3919 | sc->sc_flags |= WM_F_EEPROM_INVALID; | | 3919 | sc->sc_flags |= WM_F_EEPROM_INVALID; |
3920 | if (sc->sc_type == WM_T_82575) | | 3920 | if (sc->sc_type == WM_T_82575) |
3921 | wm_reset_init_script_82575(sc); | | 3921 | wm_reset_init_script_82575(sc); |
3922 | } | | 3922 | } |
3923 | break; | | 3923 | break; |
3924 | default: | | 3924 | default: |
3925 | break; | | 3925 | break; |
3926 | } | | 3926 | } |
3927 | | | 3927 | |
3928 | if ((sc->sc_type == WM_T_82580) | | 3928 | if ((sc->sc_type == WM_T_82580) |
3929 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) { | | 3929 | || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) { |
3930 | /* clear global device reset status bit */ | | 3930 | /* clear global device reset status bit */ |
3931 | CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); | | 3931 | CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); |
3932 | } | | 3932 | } |
3933 | | | 3933 | |
3934 | /* Clear any pending interrupt events. */ | | 3934 | /* Clear any pending interrupt events. */ |
3935 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); | | 3935 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
3936 | reg = CSR_READ(sc, WMREG_ICR); | | 3936 | reg = CSR_READ(sc, WMREG_ICR); |
3937 | if (sc->sc_nintrs > 1) { | | 3937 | if (sc->sc_nintrs > 1) { |
3938 | if (sc->sc_type != WM_T_82574) { | | 3938 | if (sc->sc_type != WM_T_82574) { |
3939 | CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); | | 3939 | CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); |
3940 | CSR_WRITE(sc, WMREG_EIAC, 0); | | 3940 | CSR_WRITE(sc, WMREG_EIAC, 0); |
3941 | } else | | 3941 | } else |
3942 | CSR_WRITE(sc, WMREG_EIAC_82574, 0); | | 3942 | CSR_WRITE(sc, WMREG_EIAC_82574, 0); |
3943 | } | | 3943 | } |
3944 | | | 3944 | |
3945 | /* reload sc_ctrl */ | | 3945 | /* reload sc_ctrl */ |
3946 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); | | 3946 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); |
3947 | | | 3947 | |
3948 | if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)) | | 3948 | if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)) |
3949 | wm_set_eee_i350(sc); | | 3949 | wm_set_eee_i350(sc); |
3950 | | | 3950 | |
3951 | /* dummy read from WUC */ | | 3951 | /* dummy read from WUC */ |
3952 | if (sc->sc_type == WM_T_PCH) | | 3952 | if (sc->sc_type == WM_T_PCH) |
3953 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); | | 3953 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); |
3954 | /* | | 3954 | /* |
3955 | * For PCH, this write will make sure that any noise will be detected | | 3955 | * For PCH, this write will make sure that any noise will be detected |
3956 | * as a CRC error and be dropped rather than show up as a bad packet | | 3956 | * as a CRC error and be dropped rather than show up as a bad packet |
3957 | * to the DMA engine | | 3957 | * to the DMA engine |
3958 | */ | | 3958 | */ |
3959 | if (sc->sc_type == WM_T_PCH) | | 3959 | if (sc->sc_type == WM_T_PCH) |
3960 | CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); | | 3960 | CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); |
3961 | | | 3961 | |
3962 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) | | 3962 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
3963 | CSR_WRITE(sc, WMREG_WUC, 0); | | 3963 | CSR_WRITE(sc, WMREG_WUC, 0); |
3964 | | | 3964 | |
3965 | wm_reset_mdicnfg_82580(sc); | | 3965 | wm_reset_mdicnfg_82580(sc); |
3966 | | | 3966 | |
3967 | if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) | | 3967 | if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) |
3968 | wm_pll_workaround_i210(sc); | | 3968 | wm_pll_workaround_i210(sc); |
3969 | } | | 3969 | } |
3970 | | | 3970 | |
3971 | /* | | 3971 | /* |
3972 | * wm_add_rxbuf: | | 3972 | * wm_add_rxbuf: |
3973 | * | | 3973 | * |
3974 | * Add a receive buffer to the indiciated descriptor. | | 3974 | * Add a receive buffer to the indiciated descriptor. |
3975 | */ | | 3975 | */ |
3976 | static int | | 3976 | static int |
3977 | wm_add_rxbuf(struct wm_rxqueue *rxq, int idx) | | 3977 | wm_add_rxbuf(struct wm_rxqueue *rxq, int idx) |
3978 | { | | 3978 | { |
3979 | struct wm_softc *sc = rxq->rxq_sc; | | 3979 | struct wm_softc *sc = rxq->rxq_sc; |
3980 | struct wm_rxsoft *rxs = &rxq->rxq_soft[idx]; | | 3980 | struct wm_rxsoft *rxs = &rxq->rxq_soft[idx]; |
3981 | struct mbuf *m; | | 3981 | struct mbuf *m; |
3982 | int error; | | 3982 | int error; |
3983 | | | 3983 | |
3984 | KASSERT(WM_RX_LOCKED(rxq)); | | 3984 | KASSERT(WM_RX_LOCKED(rxq)); |
3985 | | | 3985 | |
3986 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 3986 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
3987 | if (m == NULL) | | 3987 | if (m == NULL) |
3988 | return ENOBUFS; | | 3988 | return ENOBUFS; |
3989 | | | 3989 | |
3990 | MCLGET(m, M_DONTWAIT); | | 3990 | MCLGET(m, M_DONTWAIT); |
3991 | if ((m->m_flags & M_EXT) == 0) { | | 3991 | if ((m->m_flags & M_EXT) == 0) { |
3992 | m_freem(m); | | 3992 | m_freem(m); |
3993 | return ENOBUFS; | | 3993 | return ENOBUFS; |
3994 | } | | 3994 | } |
3995 | | | 3995 | |
3996 | if (rxs->rxs_mbuf != NULL) | | 3996 | if (rxs->rxs_mbuf != NULL) |
3997 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); | | 3997 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
3998 | | | 3998 | |
3999 | rxs->rxs_mbuf = m; | | 3999 | rxs->rxs_mbuf = m; |
4000 | | | 4000 | |
4001 | m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; | | 4001 | m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; |
4002 | error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, | | 4002 | error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, |
4003 | BUS_DMA_READ|BUS_DMA_NOWAIT); | | 4003 | BUS_DMA_READ|BUS_DMA_NOWAIT); |
4004 | if (error) { | | 4004 | if (error) { |
4005 | /* XXX XXX XXX */ | | 4005 | /* XXX XXX XXX */ |
4006 | aprint_error_dev(sc->sc_dev, | | 4006 | aprint_error_dev(sc->sc_dev, |
4007 | "unable to load rx DMA map %d, error = %d\n", | | 4007 | "unable to load rx DMA map %d, error = %d\n", |
4008 | idx, error); | | 4008 | idx, error); |
4009 | panic("wm_add_rxbuf"); | | 4009 | panic("wm_add_rxbuf"); |
4010 | } | | 4010 | } |
4011 | | | 4011 | |
4012 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, | | 4012 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
4013 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); | | 4013 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
4014 | | | 4014 | |
4015 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { | | 4015 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
4016 | if ((sc->sc_rctl & RCTL_EN) != 0) | | 4016 | if ((sc->sc_rctl & RCTL_EN) != 0) |
4017 | wm_init_rxdesc(rxq, idx); | | 4017 | wm_init_rxdesc(rxq, idx); |
4018 | } else | | 4018 | } else |
4019 | wm_init_rxdesc(rxq, idx); | | 4019 | wm_init_rxdesc(rxq, idx); |
4020 | | | 4020 | |
4021 | return 0; | | 4021 | return 0; |
4022 | } | | 4022 | } |
4023 | | | 4023 | |
4024 | /* | | 4024 | /* |
4025 | * wm_rxdrain: | | 4025 | * wm_rxdrain: |
4026 | * | | 4026 | * |
4027 | * Drain the receive queue. | | 4027 | * Drain the receive queue. |
4028 | */ | | 4028 | */ |
4029 | static void | | 4029 | static void |
4030 | wm_rxdrain(struct wm_rxqueue *rxq) | | 4030 | wm_rxdrain(struct wm_rxqueue *rxq) |
4031 | { | | 4031 | { |
4032 | struct wm_softc *sc = rxq->rxq_sc; | | 4032 | struct wm_softc *sc = rxq->rxq_sc; |
4033 | struct wm_rxsoft *rxs; | | 4033 | struct wm_rxsoft *rxs; |
4034 | int i; | | 4034 | int i; |
4035 | | | 4035 | |
4036 | KASSERT(WM_RX_LOCKED(rxq)); | | 4036 | KASSERT(WM_RX_LOCKED(rxq)); |
4037 | | | 4037 | |
4038 | for (i = 0; i < WM_NRXDESC; i++) { | | 4038 | for (i = 0; i < WM_NRXDESC; i++) { |
4039 | rxs = &rxq->rxq_soft[i]; | | 4039 | rxs = &rxq->rxq_soft[i]; |
4040 | if (rxs->rxs_mbuf != NULL) { | | 4040 | if (rxs->rxs_mbuf != NULL) { |
4041 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); | | 4041 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
4042 | m_freem(rxs->rxs_mbuf); | | 4042 | m_freem(rxs->rxs_mbuf); |
4043 | rxs->rxs_mbuf = NULL; | | 4043 | rxs->rxs_mbuf = NULL; |
4044 | } | | 4044 | } |
4045 | } | | 4045 | } |
4046 | } | | 4046 | } |
4047 | | | 4047 | |
4048 | /* | | 4048 | /* |
| | | 4049 | * Setup registers for RSS. |
| | | 4050 | * |
| | | 4051 | * XXX not yet VMDq support |
| | | 4052 | */ |
| | | 4053 | static void |
| | | 4054 | wm_init_rss(struct wm_softc *sc) |
| | | 4055 | { |
| | | 4056 | uint32_t mrqc, reta_reg; |
| | | 4057 | int i; |
| | | 4058 | |
| | | 4059 | for (i = 0; i < RETA_NUM_ENTRIES; i++) { |
| | | 4060 | int qid, reta_ent; |
| | | 4061 | |
| | | 4062 | qid = i % sc->sc_nrxqueues; |
| | | 4063 | switch(sc->sc_type) { |
| | | 4064 | case WM_T_82574: |
| | | 4065 | reta_ent = __SHIFTIN(qid, |
| | | 4066 | RETA_ENT_QINDEX_MASK_82574); |
| | | 4067 | break; |
| | | 4068 | case WM_T_82575: |
| | | 4069 | reta_ent = __SHIFTIN(qid, |
| | | 4070 | RETA_ENT_QINDEX1_MASK_82575); |
| | | 4071 | break; |
| | | 4072 | default: |
| | | 4073 | reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK); |
| | | 4074 | break; |
| | | 4075 | } |
| | | 4076 | |
| | | 4077 | reta_reg = CSR_READ(sc, WMREG_RETA_Q(i)); |
| | | 4078 | reta_reg &= ~RETA_ENTRY_MASK_Q(i); |
| | | 4079 | reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i)); |
| | | 4080 | CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg); |
| | | 4081 | } |
| | | 4082 | |
| | | 4083 | for (i = 0; i < RSSRK_NUM_REGS; i++) |
| | | 4084 | CSR_WRITE(sc, WMREG_RSSRK(i), (uint32_t)random()); |
| | | 4085 | |
| | | 4086 | if (sc->sc_type == WM_T_82574) |
| | | 4087 | mrqc = MRQC_ENABLE_RSS_MQ_82574; |
| | | 4088 | else |
| | | 4089 | mrqc = MRQC_ENABLE_RSS_MQ; |
| | | 4090 | |
| | | 4091 | /* XXXX |
| | | 4092 | * The same as FreeBSD igb. |
| | | 4093 | * Why doesn't use MRQC_RSS_FIELD_IPV6_EX? |
| | | 4094 | */ |
| | | 4095 | mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP); |
| | | 4096 | mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP); |
| | | 4097 | mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP); |
| | | 4098 | mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX); |
| | | 4099 | |
| | | 4100 | CSR_WRITE(sc, WMREG_MRQC, mrqc); |
| | | 4101 | } |
| | | 4102 | |
| | | 4103 | #ifdef WM_MSI_MSIX |
| | | 4104 | |
| | | 4105 | /* |
4049 | * Adjust TX and RX queue numbers which the system actulally uses. | | 4106 | * Adjust TX and RX queue numbers which the system actulally uses. |
4050 | * | | 4107 | * |
4051 | * The numbers are affected by below parameters. | | 4108 | * The numbers are affected by below parameters. |
4052 | * - The nubmer of hardware queues | | 4109 | * - The nubmer of hardware queues |
4053 | * - The number of MSI-X vectors (= "nvectors" argument) | | 4110 | * - The number of MSI-X vectors (= "nvectors" argument) |
4054 | * - ncpu | | 4111 | * - ncpu |
4055 | */ | | 4112 | */ |
4056 | static void | | 4113 | static void |
4057 | wm_adjust_qnum(struct wm_softc *sc, int nvectors) | | 4114 | wm_adjust_qnum(struct wm_softc *sc, int nvectors) |
4058 | { | | 4115 | { |
4059 | int hw_ntxqueues, hw_nrxqueues; | | 4116 | int hw_ntxqueues, hw_nrxqueues; |
4060 | | | 4117 | |
4061 | if (nvectors < 3) { | | 4118 | if (nvectors < 3) { |
4062 | sc->sc_ntxqueues = 1; | | 4119 | sc->sc_ntxqueues = 1; |
4063 | sc->sc_nrxqueues = 1; | | 4120 | sc->sc_nrxqueues = 1; |
4064 | return; | | 4121 | return; |
4065 | } | | 4122 | } |
4066 | | | 4123 | |
4067 | switch(sc->sc_type) { | | 4124 | switch(sc->sc_type) { |
4068 | case WM_T_82572: | | 4125 | case WM_T_82572: |
4069 | hw_ntxqueues = 2; | | 4126 | hw_ntxqueues = 2; |
4070 | hw_nrxqueues = 2; | | 4127 | hw_nrxqueues = 2; |
4071 | break; | | 4128 | break; |
4072 | case WM_T_82574: | | 4129 | case WM_T_82574: |
4073 | hw_ntxqueues = 2; | | 4130 | hw_ntxqueues = 2; |
4074 | hw_nrxqueues = 2; | | 4131 | hw_nrxqueues = 2; |
4075 | break; | | 4132 | break; |
4076 | case WM_T_82575: | | 4133 | case WM_T_82575: |
4077 | hw_ntxqueues = 4; | | 4134 | hw_ntxqueues = 4; |
4078 | hw_nrxqueues = 4; | | 4135 | hw_nrxqueues = 4; |
4079 | break; | | 4136 | break; |
4080 | case WM_T_82576: | | 4137 | case WM_T_82576: |
4081 | hw_ntxqueues = 16; | | 4138 | hw_ntxqueues = 16; |
4082 | hw_nrxqueues = 16; | | 4139 | hw_nrxqueues = 16; |
4083 | break; | | 4140 | break; |
4084 | case WM_T_82580: | | 4141 | case WM_T_82580: |
4085 | case WM_T_I350: | | 4142 | case WM_T_I350: |
4086 | case WM_T_I354: | | 4143 | case WM_T_I354: |
4087 | hw_ntxqueues = 8; | | 4144 | hw_ntxqueues = 8; |
4088 | hw_nrxqueues = 8; | | 4145 | hw_nrxqueues = 8; |
4089 | break; | | 4146 | break; |
4090 | case WM_T_I210: | | 4147 | case WM_T_I210: |
4091 | hw_ntxqueues = 4; | | 4148 | hw_ntxqueues = 4; |
4092 | hw_nrxqueues = 4; | | 4149 | hw_nrxqueues = 4; |
4093 | break; | | 4150 | break; |
4094 | case WM_T_I211: | | 4151 | case WM_T_I211: |
4095 | hw_ntxqueues = 2; | | 4152 | hw_ntxqueues = 2; |
4096 | hw_nrxqueues = 2; | | 4153 | hw_nrxqueues = 2; |
4097 | break; | | 4154 | break; |
4098 | /* | | 4155 | /* |
4099 | * As below ethernet controllers does not support MSI-X, | | 4156 | * As below ethernet controllers does not support MSI-X, |
4100 | * this driver let them not use multiqueue. | | 4157 | * this driver let them not use multiqueue. |
4101 | * - WM_T_80003 | | 4158 | * - WM_T_80003 |
4102 | * - WM_T_ICH8 | | 4159 | * - WM_T_ICH8 |
4103 | * - WM_T_ICH9 | | 4160 | * - WM_T_ICH9 |
4104 | * - WM_T_ICH10 | | 4161 | * - WM_T_ICH10 |
4105 | * - WM_T_PCH | | 4162 | * - WM_T_PCH |
4106 | * - WM_T_PCH2 | | 4163 | * - WM_T_PCH2 |
4107 | * - WM_T_PCH_LPT | | 4164 | * - WM_T_PCH_LPT |
4108 | */ | | 4165 | */ |
4109 | default: | | 4166 | default: |
4110 | hw_ntxqueues = 1; | | 4167 | hw_ntxqueues = 1; |
4111 | hw_nrxqueues = 1; | | 4168 | hw_nrxqueues = 1; |
4112 | break; | | 4169 | break; |
4113 | } | | 4170 | } |
4114 | | | 4171 | |
4115 | /* | | 4172 | /* |
4116 | * As queues more then MSI-X vectors cannot improve scaling, we limit | | 4173 | * As queues more then MSI-X vectors cannot improve scaling, we limit |
4117 | * the number of queues used actually. | | 4174 | * the number of queues used actually. |
4118 | * | | 4175 | * |
4119 | * XXX | | 4176 | * XXX |
4120 | * Currently, we separate TX queue interrupts and RX queue interrupts. | | 4177 | * Currently, we separate TX queue interrupts and RX queue interrupts. |
4121 | * Howerver, the number of MSI-X vectors of recent controllers (such as | | 4178 | * Howerver, the number of MSI-X vectors of recent controllers (such as |
4122 | * I354) expects that drivers bundle a TX queue interrupt and a RX | | 4179 | * I354) expects that drivers bundle a TX queue interrupt and a RX |
4123 | * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in | | 4180 | * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in |
4124 | * such a way. | | 4181 | * such a way. |
4125 | */ | | 4182 | */ |
4126 | if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) { | | 4183 | if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) { |
4127 | sc->sc_ntxqueues = (nvectors - 1) / 2; | | 4184 | sc->sc_ntxqueues = (nvectors - 1) / 2; |
4128 | sc->sc_nrxqueues = (nvectors - 1) / 2; | | 4185 | sc->sc_nrxqueues = (nvectors - 1) / 2; |
4129 | } else { | | 4186 | } else { |
4130 | sc->sc_ntxqueues = hw_ntxqueues; | | 4187 | sc->sc_ntxqueues = hw_ntxqueues; |
4131 | sc->sc_nrxqueues = hw_nrxqueues; | | 4188 | sc->sc_nrxqueues = hw_nrxqueues; |
4132 | } | | 4189 | } |
4133 | | | 4190 | |
4134 | /* | | 4191 | /* |
4135 | * As queues more then cpus cannot improve scaling, we limit | | 4192 | * As queues more then cpus cannot improve scaling, we limit |
4136 | * the number of queues used actually. | | 4193 | * the number of queues used actually. |
4137 | */ | | 4194 | */ |
4138 | if (ncpu < sc->sc_ntxqueues) | | 4195 | if (ncpu < sc->sc_ntxqueues) |
4139 | sc->sc_ntxqueues = ncpu; | | 4196 | sc->sc_ntxqueues = ncpu; |
4140 | if (ncpu < sc->sc_nrxqueues) | | 4197 | if (ncpu < sc->sc_nrxqueues) |
4141 | sc->sc_nrxqueues = ncpu; | | 4198 | sc->sc_nrxqueues = ncpu; |
4142 | | | 4199 | |
4143 | /* XXX Currently, this driver supports RX multiqueue only. */ | | 4200 | /* XXX Currently, this driver supports RX multiqueue only. */ |
4144 | sc->sc_ntxqueues = 1; | | 4201 | sc->sc_ntxqueues = 1; |
4145 | } | | 4202 | } |
4146 | | | 4203 | |
4147 | /* | | 4204 | /* |
4148 | * Setup registers for RSS. | | | |
4149 | * | | | |
4150 | * XXX not yet VMDq support | | | |
4151 | */ | | | |
4152 | static void | | | |
4153 | wm_init_rss(struct wm_softc *sc) | | | |
4154 | { | | | |
4155 | uint32_t mrqc, reta_reg; | | | |
4156 | int i; | | | |
4157 | | | | |
4158 | for (i = 0; i < RETA_NUM_ENTRIES; i++) { | | | |
4159 | int qid, reta_ent; | | | |
4160 | | | | |
4161 | qid = i % sc->sc_nrxqueues; | | | |
4162 | switch(sc->sc_type) { | | | |
4163 | case WM_T_82574: | | | |
4164 | reta_ent = __SHIFTIN(qid, | | | |
4165 | RETA_ENT_QINDEX_MASK_82574); | | | |
4166 | break; | | | |
4167 | case WM_T_82575: | | | |
4168 | reta_ent = __SHIFTIN(qid, | | | |
4169 | RETA_ENT_QINDEX1_MASK_82575); | | | |
4170 | break; | | | |
4171 | default: | | | |
4172 | reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK); | | | |
4173 | break; | | | |
4174 | } | | | |
4175 | | | | |
4176 | reta_reg = CSR_READ(sc, WMREG_RETA_Q(i)); | | | |
4177 | reta_reg &= ~RETA_ENTRY_MASK_Q(i); | | | |
4178 | reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i)); | | | |
4179 | CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg); | | | |
4180 | } | | | |
4181 | | | | |
4182 | for (i = 0; i < RSSRK_NUM_REGS; i++) | | | |
4183 | CSR_WRITE(sc, WMREG_RSSRK(i), (uint32_t)random()); | | | |
4184 | | | | |
4185 | if (sc->sc_type == WM_T_82574) | | | |
4186 | mrqc = MRQC_ENABLE_RSS_MQ_82574; | | | |
4187 | else | | | |
4188 | mrqc = MRQC_ENABLE_RSS_MQ; | | | |
4189 | | | | |
4190 | /* XXXX | | | |
4191 | * The same as FreeBSD igb. | | | |
4192 | * Why doesn't use MRQC_RSS_FIELD_IPV6_EX? | | | |
4193 | */ | | | |
4194 | mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP); | | | |
4195 | mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP); | | | |
4196 | mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP); | | | |
4197 | mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX); | | | |
4198 | | | | |
4199 | CSR_WRITE(sc, WMREG_MRQC, mrqc); | | | |
4200 | } | | | |
4201 | | | | |
4202 | #ifdef WM_MSI_MSIX | | | |
4203 | /* | | | |
4204 | * Both single interrupt MSI and INTx can use this function. | | 4205 | * Both single interrupt MSI and INTx can use this function. |
4205 | */ | | 4206 | */ |
4206 | static int | | 4207 | static int |
4207 | wm_setup_legacy(struct wm_softc *sc) | | 4208 | wm_setup_legacy(struct wm_softc *sc) |
4208 | { | | 4209 | { |
4209 | pci_chipset_tag_t pc = sc->sc_pc; | | 4210 | pci_chipset_tag_t pc = sc->sc_pc; |
4210 | const char *intrstr = NULL; | | 4211 | const char *intrstr = NULL; |
4211 | char intrbuf[PCI_INTRSTR_LEN]; | | 4212 | char intrbuf[PCI_INTRSTR_LEN]; |
4212 | | | 4213 | |
4213 | intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf, | | 4214 | intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf, |
4214 | sizeof(intrbuf)); | | 4215 | sizeof(intrbuf)); |
4215 | #ifdef WM_MPSAFE | | 4216 | #ifdef WM_MPSAFE |
4216 | pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true); | | 4217 | pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true); |
4217 | #endif | | 4218 | #endif |
4218 | sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0], | | 4219 | sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0], |
4219 | IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev)); | | 4220 | IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev)); |
4220 | if (sc->sc_ihs[0] == NULL) { | | 4221 | if (sc->sc_ihs[0] == NULL) { |
4221 | aprint_error_dev(sc->sc_dev,"unable to establish %s\n", | | 4222 | aprint_error_dev(sc->sc_dev,"unable to establish %s\n", |
4222 | (pci_intr_type(sc->sc_intrs[0]) | | 4223 | (pci_intr_type(sc->sc_intrs[0]) |
4223 | == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx"); | | 4224 | == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx"); |
4224 | return ENOMEM; | | 4225 | return ENOMEM; |
4225 | } | | 4226 | } |
4226 | | | 4227 | |
4227 | aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); | | 4228 | aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); |
4228 | sc->sc_nintrs = 1; | | 4229 | sc->sc_nintrs = 1; |
4229 | return 0; | | 4230 | return 0; |
4230 | } | | 4231 | } |
4231 | | | 4232 | |
4232 | static int | | 4233 | static int |
4233 | wm_setup_msix(struct wm_softc *sc) | | 4234 | wm_setup_msix(struct wm_softc *sc) |
4234 | { | | 4235 | { |
4235 | void *vih; | | 4236 | void *vih; |
4236 | kcpuset_t *affinity; | | 4237 | kcpuset_t *affinity; |
4237 | int qidx, error, intr_idx, tx_established, rx_established; | | 4238 | int qidx, error, intr_idx, tx_established, rx_established; |
4238 | pci_chipset_tag_t pc = sc->sc_pc; | | 4239 | pci_chipset_tag_t pc = sc->sc_pc; |
4239 | const char *intrstr = NULL; | | 4240 | const char *intrstr = NULL; |
4240 | char intrbuf[PCI_INTRSTR_LEN]; | | 4241 | char intrbuf[PCI_INTRSTR_LEN]; |
4241 | char intr_xname[INTRDEVNAMEBUF]; | | 4242 | char intr_xname[INTRDEVNAMEBUF]; |
4242 | | | 4243 | |
4243 | kcpuset_create(&affinity, false); | | 4244 | kcpuset_create(&affinity, false); |
4244 | intr_idx = 0; | | 4245 | intr_idx = 0; |
4245 | | | 4246 | |
4246 | /* | | 4247 | /* |
4247 | * TX | | 4248 | * TX |
4248 | */ | | 4249 | */ |
4249 | tx_established = 0; | | 4250 | tx_established = 0; |
4250 | for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) { | | 4251 | for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) { |
4251 | struct wm_txqueue *txq = &sc->sc_txq[qidx]; | | 4252 | struct wm_txqueue *txq = &sc->sc_txq[qidx]; |
4252 | | | 4253 | |
4253 | intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, | | 4254 | intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, |
4254 | sizeof(intrbuf)); | | 4255 | sizeof(intrbuf)); |
4255 | #ifdef WM_MPSAFE | | 4256 | #ifdef WM_MPSAFE |
4256 | pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], | | 4257 | pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], |
4257 | PCI_INTR_MPSAFE, true); | | 4258 | PCI_INTR_MPSAFE, true); |
4258 | #endif | | 4259 | #endif |
4259 | memset(intr_xname, 0, sizeof(intr_xname)); | | 4260 | memset(intr_xname, 0, sizeof(intr_xname)); |
4260 | snprintf(intr_xname, sizeof(intr_xname), "%sTX%d", | | 4261 | snprintf(intr_xname, sizeof(intr_xname), "%sTX%d", |
4261 | device_xname(sc->sc_dev), qidx); | | 4262 | device_xname(sc->sc_dev), qidx); |
4262 | vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], | | 4263 | vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], |
4263 | IPL_NET, wm_txintr_msix, txq, intr_xname); | | 4264 | IPL_NET, wm_txintr_msix, txq, intr_xname); |
4264 | if (vih == NULL) { | | 4265 | if (vih == NULL) { |
4265 | aprint_error_dev(sc->sc_dev, | | 4266 | aprint_error_dev(sc->sc_dev, |
4266 | "unable to establish MSI-X(for TX)%s%s\n", | | 4267 | "unable to establish MSI-X(for TX)%s%s\n", |
4267 | intrstr ? " at " : "", | | 4268 | intrstr ? " at " : "", |
4268 | intrstr ? intrstr : ""); | | 4269 | intrstr ? intrstr : ""); |
4269 | | | 4270 | |
4270 | goto fail_0; | | 4271 | goto fail_0; |
4271 | } | | 4272 | } |
4272 | kcpuset_zero(affinity); | | 4273 | kcpuset_zero(affinity); |
4273 | /* Round-robin affinity */ | | 4274 | /* Round-robin affinity */ |
4274 | kcpuset_set(affinity, intr_idx % ncpu); | | 4275 | kcpuset_set(affinity, intr_idx % ncpu); |
4275 | error = interrupt_distribute(vih, affinity, NULL); | | 4276 | error = interrupt_distribute(vih, affinity, NULL); |
4276 | if (error == 0) { | | 4277 | if (error == 0) { |
4277 | aprint_normal_dev(sc->sc_dev, | | 4278 | aprint_normal_dev(sc->sc_dev, |
4278 | "for TX interrupting at %s affinity to %u\n", | | 4279 | "for TX interrupting at %s affinity to %u\n", |
4279 | intrstr, intr_idx % ncpu); | | 4280 | intrstr, intr_idx % ncpu); |
4280 | } else { | | 4281 | } else { |
4281 | aprint_normal_dev(sc->sc_dev, | | 4282 | aprint_normal_dev(sc->sc_dev, |
4282 | "for TX interrupting at %s\n", intrstr); | | 4283 | "for TX interrupting at %s\n", intrstr); |
4283 | } | | 4284 | } |
4284 | sc->sc_ihs[intr_idx] = vih; | | 4285 | sc->sc_ihs[intr_idx] = vih; |
4285 | txq->txq_id = qidx; | | 4286 | txq->txq_id = qidx; |
4286 | txq->txq_intr_idx = intr_idx; | | 4287 | txq->txq_intr_idx = intr_idx; |
4287 | | | 4288 | |
4288 | tx_established++; | | 4289 | tx_established++; |
4289 | intr_idx++; | | 4290 | intr_idx++; |
4290 | } | | 4291 | } |
4291 | | | 4292 | |
4292 | /* | | 4293 | /* |
4293 | * RX | | 4294 | * RX |
4294 | */ | | 4295 | */ |
4295 | rx_established = 0; | | 4296 | rx_established = 0; |
4296 | for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) { | | 4297 | for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) { |
4297 | struct wm_rxqueue *rxq = &sc->sc_rxq[qidx]; | | 4298 | struct wm_rxqueue *rxq = &sc->sc_rxq[qidx]; |
4298 | | | 4299 | |
4299 | intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, | | 4300 | intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, |
4300 | sizeof(intrbuf)); | | 4301 | sizeof(intrbuf)); |
4301 | #ifdef WM_MPSAFE | | 4302 | #ifdef WM_MPSAFE |
4302 | pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], | | 4303 | pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], |
4303 | PCI_INTR_MPSAFE, true); | | 4304 | PCI_INTR_MPSAFE, true); |
4304 | #endif | | 4305 | #endif |
4305 | memset(intr_xname, 0, sizeof(intr_xname)); | | 4306 | memset(intr_xname, 0, sizeof(intr_xname)); |
4306 | snprintf(intr_xname, sizeof(intr_xname), "%sRX%d", | | 4307 | snprintf(intr_xname, sizeof(intr_xname), "%sRX%d", |
4307 | device_xname(sc->sc_dev), qidx); | | 4308 | device_xname(sc->sc_dev), qidx); |
4308 | vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], | | 4309 | vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], |
4309 | IPL_NET, wm_rxintr_msix, rxq, intr_xname); | | 4310 | IPL_NET, wm_rxintr_msix, rxq, intr_xname); |
4310 | if (vih == NULL) { | | 4311 | if (vih == NULL) { |
4311 | aprint_error_dev(sc->sc_dev, | | 4312 | aprint_error_dev(sc->sc_dev, |
4312 | "unable to establish MSI-X(for RX)%s%s\n", | | 4313 | "unable to establish MSI-X(for RX)%s%s\n", |
4313 | intrstr ? " at " : "", | | 4314 | intrstr ? " at " : "", |
4314 | intrstr ? intrstr : ""); | | 4315 | intrstr ? intrstr : ""); |
4315 | | | 4316 | |
4316 | goto fail_1; | | 4317 | goto fail_1; |
4317 | } | | 4318 | } |
4318 | kcpuset_zero(affinity); | | 4319 | kcpuset_zero(affinity); |
4319 | /* Round-robin affinity */ | | 4320 | /* Round-robin affinity */ |
4320 | kcpuset_set(affinity, intr_idx % ncpu); | | 4321 | kcpuset_set(affinity, intr_idx % ncpu); |
4321 | error = interrupt_distribute(vih, affinity, NULL); | | 4322 | error = interrupt_distribute(vih, affinity, NULL); |
4322 | if (error == 0) { | | 4323 | if (error == 0) { |
4323 | aprint_normal_dev(sc->sc_dev, | | 4324 | aprint_normal_dev(sc->sc_dev, |
4324 | "for RX interrupting at %s affinity to %u\n", | | 4325 | "for RX interrupting at %s affinity to %u\n", |
4325 | intrstr, intr_idx % ncpu); | | 4326 | intrstr, intr_idx % ncpu); |
4326 | } else { | | 4327 | } else { |
4327 | aprint_normal_dev(sc->sc_dev, | | 4328 | aprint_normal_dev(sc->sc_dev, |
4328 | "for RX interrupting at %s\n", intrstr); | | 4329 | "for RX interrupting at %s\n", intrstr); |
4329 | } | | 4330 | } |
4330 | sc->sc_ihs[intr_idx] = vih; | | 4331 | sc->sc_ihs[intr_idx] = vih; |
4331 | rxq->rxq_id = qidx; | | 4332 | rxq->rxq_id = qidx; |
4332 | rxq->rxq_intr_idx = intr_idx; | | 4333 | rxq->rxq_intr_idx = intr_idx; |
4333 | | | 4334 | |
4334 | rx_established++; | | 4335 | rx_established++; |
4335 | intr_idx++; | | 4336 | intr_idx++; |
4336 | } | | 4337 | } |
4337 | | | 4338 | |
4338 | /* | | 4339 | /* |
4339 | * LINK | | 4340 | * LINK |
4340 | */ | | 4341 | */ |
4341 | intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, | | 4342 | intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, |
4342 | sizeof(intrbuf)); | | 4343 | sizeof(intrbuf)); |
4343 | #ifdef WM_MPSAFE | | 4344 | #ifdef WM_MPSAFE |
4344 | pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], | | 4345 | pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], |
4345 | PCI_INTR_MPSAFE, true); | | 4346 | PCI_INTR_MPSAFE, true); |
4346 | #endif | | 4347 | #endif |
4347 | memset(intr_xname, 0, sizeof(intr_xname)); | | 4348 | memset(intr_xname, 0, sizeof(intr_xname)); |
4348 | snprintf(intr_xname, sizeof(intr_xname), "%sLINK", | | 4349 | snprintf(intr_xname, sizeof(intr_xname), "%sLINK", |
4349 | device_xname(sc->sc_dev)); | | 4350 | device_xname(sc->sc_dev)); |
4350 | vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], | | 4351 | vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], |
4351 | IPL_NET, wm_linkintr_msix, sc, intr_xname); | | 4352 | IPL_NET, wm_linkintr_msix, sc, intr_xname); |
4352 | if (vih == NULL) { | | 4353 | if (vih == NULL) { |
4353 | aprint_error_dev(sc->sc_dev, | | 4354 | aprint_error_dev(sc->sc_dev, |
4354 | "unable to establish MSI-X(for LINK)%s%s\n", | | 4355 | "unable to establish MSI-X(for LINK)%s%s\n", |
4355 | intrstr ? " at " : "", | | 4356 | intrstr ? " at " : "", |
4356 | intrstr ? intrstr : ""); | | 4357 | intrstr ? intrstr : ""); |
4357 | | | 4358 | |
4358 | goto fail_1; | | 4359 | goto fail_1; |
4359 | } | | 4360 | } |
4360 | /* keep default affinity to LINK interrupt */ | | 4361 | /* keep default affinity to LINK interrupt */ |
4361 | aprint_normal_dev(sc->sc_dev, | | 4362 | aprint_normal_dev(sc->sc_dev, |
4362 | "for LINK interrupting at %s\n", intrstr); | | 4363 | "for LINK interrupting at %s\n", intrstr); |
4363 | sc->sc_ihs[intr_idx] = vih; | | 4364 | sc->sc_ihs[intr_idx] = vih; |
4364 | sc->sc_link_intr_idx = intr_idx; | | 4365 | sc->sc_link_intr_idx = intr_idx; |
4365 | | | 4366 | |
4366 | sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1; | | 4367 | sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1; |
4367 | kcpuset_destroy(affinity); | | 4368 | kcpuset_destroy(affinity); |
4368 | return 0; | | 4369 | return 0; |
4369 | | | 4370 | |
4370 | fail_1: | | 4371 | fail_1: |
4371 | for (qidx = 0; qidx < rx_established; qidx++) { | | 4372 | for (qidx = 0; qidx < rx_established; qidx++) { |
4372 | struct wm_rxqueue *rxq = &sc->sc_rxq[qidx]; | | 4373 | struct wm_rxqueue *rxq = &sc->sc_rxq[qidx]; |
4373 | pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]); | | 4374 | pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]); |
4374 | sc->sc_ihs[rxq->rxq_intr_idx] = NULL; | | 4375 | sc->sc_ihs[rxq->rxq_intr_idx] = NULL; |
4375 | } | | 4376 | } |
4376 | fail_0: | | 4377 | fail_0: |
4377 | for (qidx = 0; qidx < tx_established; qidx++) { | | 4378 | for (qidx = 0; qidx < tx_established; qidx++) { |
4378 | struct wm_txqueue *txq = &sc->sc_txq[qidx]; | | 4379 | struct wm_txqueue *txq = &sc->sc_txq[qidx]; |
4379 | pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]); | | 4380 | pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]); |
4380 | sc->sc_ihs[txq->txq_intr_idx] = NULL; | | 4381 | sc->sc_ihs[txq->txq_intr_idx] = NULL; |
4381 | } | | 4382 | } |
4382 | | | 4383 | |
4383 | kcpuset_destroy(affinity); | | 4384 | kcpuset_destroy(affinity); |
4384 | return ENOMEM; | | 4385 | return ENOMEM; |
4385 | } | | 4386 | } |
4386 | #endif | | 4387 | #endif |
4387 | | | 4388 | |
4388 | /* | | 4389 | /* |
4389 | * wm_init: [ifnet interface function] | | 4390 | * wm_init: [ifnet interface function] |
4390 | * | | 4391 | * |
4391 | * Initialize the interface. | | 4392 | * Initialize the interface. |
4392 | */ | | 4393 | */ |
4393 | static int | | 4394 | static int |
4394 | wm_init(struct ifnet *ifp) | | 4395 | wm_init(struct ifnet *ifp) |
4395 | { | | 4396 | { |
4396 | struct wm_softc *sc = ifp->if_softc; | | 4397 | struct wm_softc *sc = ifp->if_softc; |
4397 | int ret; | | 4398 | int ret; |
4398 | | | 4399 | |
4399 | WM_CORE_LOCK(sc); | | 4400 | WM_CORE_LOCK(sc); |
4400 | ret = wm_init_locked(ifp); | | 4401 | ret = wm_init_locked(ifp); |
4401 | WM_CORE_UNLOCK(sc); | | 4402 | WM_CORE_UNLOCK(sc); |
4402 | | | 4403 | |
4403 | return ret; | | 4404 | return ret; |
4404 | } | | 4405 | } |
4405 | | | 4406 | |
4406 | static int | | 4407 | static int |
4407 | wm_init_locked(struct ifnet *ifp) | | 4408 | wm_init_locked(struct ifnet *ifp) |
4408 | { | | 4409 | { |
4409 | struct wm_softc *sc = ifp->if_softc; | | 4410 | struct wm_softc *sc = ifp->if_softc; |
4410 | int i, j, trynum, error = 0; | | 4411 | int i, j, trynum, error = 0; |
4411 | uint32_t reg; | | 4412 | uint32_t reg; |
4412 | | | 4413 | |
4413 | KASSERT(WM_CORE_LOCKED(sc)); | | 4414 | KASSERT(WM_CORE_LOCKED(sc)); |
4414 | /* | | 4415 | /* |
4415 | * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. | | 4416 | * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. |
4416 | * There is a small but measurable benefit to avoiding the adjusment | | 4417 | * There is a small but measurable benefit to avoiding the adjusment |
4417 | * of the descriptor so that the headers are aligned, for normal mtu, | | 4418 | * of the descriptor so that the headers are aligned, for normal mtu, |
4418 | * on such platforms. One possibility is that the DMA itself is | | 4419 | * on such platforms. One possibility is that the DMA itself is |
4419 | * slightly more efficient if the front of the entire packet (instead | | 4420 | * slightly more efficient if the front of the entire packet (instead |
4420 | * of the front of the headers) is aligned. | | 4421 | * of the front of the headers) is aligned. |
4421 | * | | 4422 | * |
4422 | * Note we must always set align_tweak to 0 if we are using | | 4423 | * Note we must always set align_tweak to 0 if we are using |
4423 | * jumbo frames. | | 4424 | * jumbo frames. |
4424 | */ | | 4425 | */ |
4425 | #ifdef __NO_STRICT_ALIGNMENT | | 4426 | #ifdef __NO_STRICT_ALIGNMENT |
4426 | sc->sc_align_tweak = 0; | | 4427 | sc->sc_align_tweak = 0; |
4427 | #else | | 4428 | #else |
4428 | if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) | | 4429 | if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) |
4429 | sc->sc_align_tweak = 0; | | 4430 | sc->sc_align_tweak = 0; |
4430 | else | | 4431 | else |
4431 | sc->sc_align_tweak = 2; | | 4432 | sc->sc_align_tweak = 2; |
4432 | #endif /* __NO_STRICT_ALIGNMENT */ | | 4433 | #endif /* __NO_STRICT_ALIGNMENT */ |
4433 | | | 4434 | |
4434 | /* Cancel any pending I/O. */ | | 4435 | /* Cancel any pending I/O. */ |
4435 | wm_stop_locked(ifp, 0); | | 4436 | wm_stop_locked(ifp, 0); |
4436 | | | 4437 | |
4437 | /* update statistics before reset */ | | 4438 | /* update statistics before reset */ |
4438 | ifp->if_collisions += CSR_READ(sc, WMREG_COLC); | | 4439 | ifp->if_collisions += CSR_READ(sc, WMREG_COLC); |
4439 | ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); | | 4440 | ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); |
4440 | | | 4441 | |
4441 | /* Reset the chip to a known state. */ | | 4442 | /* Reset the chip to a known state. */ |
4442 | wm_reset(sc); | | 4443 | wm_reset(sc); |
4443 | | | 4444 | |
4444 | switch (sc->sc_type) { | | 4445 | switch (sc->sc_type) { |
4445 | case WM_T_82571: | | 4446 | case WM_T_82571: |
4446 | case WM_T_82572: | | 4447 | case WM_T_82572: |
4447 | case WM_T_82573: | | 4448 | case WM_T_82573: |
4448 | case WM_T_82574: | | 4449 | case WM_T_82574: |
4449 | case WM_T_82583: | | 4450 | case WM_T_82583: |
4450 | case WM_T_80003: | | 4451 | case WM_T_80003: |
4451 | case WM_T_ICH8: | | 4452 | case WM_T_ICH8: |
4452 | case WM_T_ICH9: | | 4453 | case WM_T_ICH9: |
4453 | case WM_T_ICH10: | | 4454 | case WM_T_ICH10: |
4454 | case WM_T_PCH: | | 4455 | case WM_T_PCH: |
4455 | case WM_T_PCH2: | | 4456 | case WM_T_PCH2: |
4456 | case WM_T_PCH_LPT: | | 4457 | case WM_T_PCH_LPT: |
4457 | if (wm_check_mng_mode(sc) != 0) | | 4458 | if (wm_check_mng_mode(sc) != 0) |
4458 | wm_get_hw_control(sc); | | 4459 | wm_get_hw_control(sc); |
4459 | break; | | 4460 | break; |
4460 | default: | | 4461 | default: |
4461 | break; | | 4462 | break; |
4462 | } | | 4463 | } |
4463 | | | 4464 | |
4464 | /* Init hardware bits */ | | 4465 | /* Init hardware bits */ |
4465 | wm_initialize_hardware_bits(sc); | | 4466 | wm_initialize_hardware_bits(sc); |
4466 | | | 4467 | |
4467 | /* Reset the PHY. */ | | 4468 | /* Reset the PHY. */ |
4468 | if (sc->sc_flags & WM_F_HAS_MII) | | 4469 | if (sc->sc_flags & WM_F_HAS_MII) |
4469 | wm_gmii_reset(sc); | | 4470 | wm_gmii_reset(sc); |
4470 | | | 4471 | |
4471 | /* Calculate (E)ITR value */ | | 4472 | /* Calculate (E)ITR value */ |
4472 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { | | 4473 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
4473 | sc->sc_itr = 450; /* For EITR */ | | 4474 | sc->sc_itr = 450; /* For EITR */ |
4474 | } else if (sc->sc_type >= WM_T_82543) { | | 4475 | } else if (sc->sc_type >= WM_T_82543) { |
4475 | /* | | 4476 | /* |
4476 | * Set up the interrupt throttling register (units of 256ns) | | 4477 | * Set up the interrupt throttling register (units of 256ns) |
4477 | * Note that a footnote in Intel's documentation says this | | 4478 | * Note that a footnote in Intel's documentation says this |
4478 | * ticker runs at 1/4 the rate when the chip is in 100Mbit | | 4479 | * ticker runs at 1/4 the rate when the chip is in 100Mbit |
4479 | * or 10Mbit mode. Empirically, it appears to be the case | | 4480 | * or 10Mbit mode. Empirically, it appears to be the case |
4480 | * that that is also true for the 1024ns units of the other | | 4481 | * that that is also true for the 1024ns units of the other |
4481 | * interrupt-related timer registers -- so, really, we ought | | 4482 | * interrupt-related timer registers -- so, really, we ought |
4482 | * to divide this value by 4 when the link speed is low. | | 4483 | * to divide this value by 4 when the link speed is low. |
4483 | * | | 4484 | * |
4484 | * XXX implement this division at link speed change! | | 4485 | * XXX implement this division at link speed change! |
4485 | */ | | 4486 | */ |
4486 | | | 4487 | |
4487 | /* | | 4488 | /* |
4488 | * For N interrupts/sec, set this value to: | | 4489 | * For N interrupts/sec, set this value to: |
4489 | * 1000000000 / (N * 256). Note that we set the | | 4490 | * 1000000000 / (N * 256). Note that we set the |
4490 | * absolute and packet timer values to this value | | 4491 | * absolute and packet timer values to this value |
4491 | * divided by 4 to get "simple timer" behavior. | | 4492 | * divided by 4 to get "simple timer" behavior. |
4492 | */ | | 4493 | */ |
4493 | | | 4494 | |
4494 | sc->sc_itr = 1500; /* 2604 ints/sec */ | | 4495 | sc->sc_itr = 1500; /* 2604 ints/sec */ |
4495 | } | | 4496 | } |
4496 | | | 4497 | |
4497 | error = wm_init_txrx_queues(sc); | | 4498 | error = wm_init_txrx_queues(sc); |
4498 | if (error) | | 4499 | if (error) |
4499 | goto out; | | 4500 | goto out; |
4500 | | | 4501 | |
4501 | /* | | 4502 | /* |
4502 | * Clear out the VLAN table -- we don't use it (yet). | | 4503 | * Clear out the VLAN table -- we don't use it (yet). |
4503 | */ | | 4504 | */ |
4504 | CSR_WRITE(sc, WMREG_VET, 0); | | 4505 | CSR_WRITE(sc, WMREG_VET, 0); |
4505 | if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) | | 4506 | if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) |
4506 | trynum = 10; /* Due to hw errata */ | | 4507 | trynum = 10; /* Due to hw errata */ |
4507 | else | | 4508 | else |
4508 | trynum = 1; | | 4509 | trynum = 1; |
4509 | for (i = 0; i < WM_VLAN_TABSIZE; i++) | | 4510 | for (i = 0; i < WM_VLAN_TABSIZE; i++) |
4510 | for (j = 0; j < trynum; j++) | | 4511 | for (j = 0; j < trynum; j++) |
4511 | CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); | | 4512 | CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); |
4512 | | | 4513 | |
4513 | /* | | 4514 | /* |
4514 | * Set up flow-control parameters. | | 4515 | * Set up flow-control parameters. |
4515 | * | | 4516 | * |
4516 | * XXX Values could probably stand some tuning. | | 4517 | * XXX Values could probably stand some tuning. |
4517 | */ | | 4518 | */ |
4518 | if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) | | 4519 | if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) |
4519 | && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) | | 4520 | && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) |
4520 | && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) { | | 4521 | && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) { |
4521 | CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); | | 4522 | CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); |
4522 | CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); | | 4523 | CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); |
4523 | CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); | | 4524 | CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); |
4524 | } | | 4525 | } |
4525 | | | 4526 | |
4526 | sc->sc_fcrtl = FCRTL_DFLT; | | 4527 | sc->sc_fcrtl = FCRTL_DFLT; |
4527 | if (sc->sc_type < WM_T_82543) { | | 4528 | if (sc->sc_type < WM_T_82543) { |
4528 | CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); | | 4529 | CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); |
4529 | CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); | | 4530 | CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); |
4530 | } else { | | 4531 | } else { |
4531 | CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); | | 4532 | CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); |
4532 | CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); | | 4533 | CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); |
4533 | } | | 4534 | } |
4534 | | | 4535 | |
4535 | if (sc->sc_type == WM_T_80003) | | 4536 | if (sc->sc_type == WM_T_80003) |
4536 | CSR_WRITE(sc, WMREG_FCTTV, 0xffff); | | 4537 | CSR_WRITE(sc, WMREG_FCTTV, 0xffff); |
4537 | else | | 4538 | else |
4538 | CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); | | 4539 | CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); |
4539 | | | 4540 | |
4540 | /* Writes the control register. */ | | 4541 | /* Writes the control register. */ |
4541 | wm_set_vlan(sc); | | 4542 | wm_set_vlan(sc); |
4542 | | | 4543 | |
4543 | if (sc->sc_flags & WM_F_HAS_MII) { | | 4544 | if (sc->sc_flags & WM_F_HAS_MII) { |
4544 | int val; | | 4545 | int val; |
4545 | | | 4546 | |
4546 | switch (sc->sc_type) { | | 4547 | switch (sc->sc_type) { |
4547 | case WM_T_80003: | | 4548 | case WM_T_80003: |
4548 | case WM_T_ICH8: | | 4549 | case WM_T_ICH8: |
4549 | case WM_T_ICH9: | | 4550 | case WM_T_ICH9: |
4550 | case WM_T_ICH10: | | 4551 | case WM_T_ICH10: |
4551 | case WM_T_PCH: | | 4552 | case WM_T_PCH: |
4552 | case WM_T_PCH2: | | 4553 | case WM_T_PCH2: |
4553 | case WM_T_PCH_LPT: | | 4554 | case WM_T_PCH_LPT: |
4554 | /* | | 4555 | /* |
4555 | * Set the mac to wait the maximum time between each | | 4556 | * Set the mac to wait the maximum time between each |
4556 | * iteration and increase the max iterations when | | 4557 | * iteration and increase the max iterations when |
4557 | * polling the phy; this fixes erroneous timeouts at | | 4558 | * polling the phy; this fixes erroneous timeouts at |
4558 | * 10Mbps. | | 4559 | * 10Mbps. |
4559 | */ | | 4560 | */ |
4560 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, | | 4561 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, |
4561 | 0xFFFF); | | 4562 | 0xFFFF); |
4562 | val = wm_kmrn_readreg(sc, | | 4563 | val = wm_kmrn_readreg(sc, |
4563 | KUMCTRLSTA_OFFSET_INB_PARAM); | | 4564 | KUMCTRLSTA_OFFSET_INB_PARAM); |
4564 | val |= 0x3F; | | 4565 | val |= 0x3F; |
4565 | wm_kmrn_writereg(sc, | | 4566 | wm_kmrn_writereg(sc, |
4566 | KUMCTRLSTA_OFFSET_INB_PARAM, val); | | 4567 | KUMCTRLSTA_OFFSET_INB_PARAM, val); |
4567 | break; | | 4568 | break; |
4568 | default: | | 4569 | default: |
4569 | break; | | 4570 | break; |
4570 | } | | 4571 | } |
4571 | | | 4572 | |
4572 | if (sc->sc_type == WM_T_80003) { | | 4573 | if (sc->sc_type == WM_T_80003) { |
4573 | val = CSR_READ(sc, WMREG_CTRL_EXT); | | 4574 | val = CSR_READ(sc, WMREG_CTRL_EXT); |
4574 | val &= ~CTRL_EXT_LINK_MODE_MASK; | | 4575 | val &= ~CTRL_EXT_LINK_MODE_MASK; |
4575 | CSR_WRITE(sc, WMREG_CTRL_EXT, val); | | 4576 | CSR_WRITE(sc, WMREG_CTRL_EXT, val); |
4576 | | | 4577 | |
4577 | /* Bypass RX and TX FIFO's */ | | 4578 | /* Bypass RX and TX FIFO's */ |
4578 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, | | 4579 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, |
4579 | KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | | 4580 | KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
4580 | | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); | | 4581 | | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); |
4581 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, | | 4582 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, |
4582 | KUMCTRLSTA_INB_CTRL_DIS_PADDING | | | 4583 | KUMCTRLSTA_INB_CTRL_DIS_PADDING | |
4583 | KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); | | 4584 | KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); |
4584 | } | | 4585 | } |
4585 | } | | 4586 | } |
4586 | #if 0 | | 4587 | #if 0 |
4587 | CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); | | 4588 | CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); |
4588 | #endif | | 4589 | #endif |
4589 | | | 4590 | |
4590 | /* Set up checksum offload parameters. */ | | 4591 | /* Set up checksum offload parameters. */ |
4591 | reg = CSR_READ(sc, WMREG_RXCSUM); | | 4592 | reg = CSR_READ(sc, WMREG_RXCSUM); |
4592 | reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); | | 4593 | reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); |
4593 | if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) | | 4594 | if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) |
4594 | reg |= RXCSUM_IPOFL; | | 4595 | reg |= RXCSUM_IPOFL; |
4595 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) | | 4596 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) |
4596 | reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; | | 4597 | reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; |
4597 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) | | 4598 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) |
4598 | reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; | | 4599 | reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; |
4599 | CSR_WRITE(sc, WMREG_RXCSUM, reg); | | 4600 | CSR_WRITE(sc, WMREG_RXCSUM, reg); |
4600 | | | 4601 | |
4601 | /* Set up MSI-X */ | | 4602 | /* Set up MSI-X */ |
4602 | if (sc->sc_nintrs > 1) { | | 4603 | if (sc->sc_nintrs > 1) { |
4603 | uint32_t ivar; | | 4604 | uint32_t ivar; |
4604 | | | 4605 | |
4605 | if (sc->sc_type == WM_T_82575) { | | 4606 | if (sc->sc_type == WM_T_82575) { |
4606 | /* Interrupt control */ | | 4607 | /* Interrupt control */ |
4607 | reg = CSR_READ(sc, WMREG_CTRL_EXT); | | 4608 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
4608 | reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR; | | 4609 | reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR; |
4609 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 4610 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
4610 | | | 4611 | |
4611 | /* TX */ | | 4612 | /* TX */ |
4612 | for (i = 0; i < sc->sc_ntxqueues; i++) { | | 4613 | for (i = 0; i < sc->sc_ntxqueues; i++) { |
4613 | struct wm_txqueue *txq = &sc->sc_txq[i]; | | 4614 | struct wm_txqueue *txq = &sc->sc_txq[i]; |
4614 | CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx), | | 4615 | CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx), |
4615 | EITR_TX_QUEUE(txq->txq_id)); | | 4616 | EITR_TX_QUEUE(txq->txq_id)); |
4616 | } | | 4617 | } |
4617 | /* RX */ | | 4618 | /* RX */ |
4618 | for (i = 0; i < sc->sc_nrxqueues; i++) { | | 4619 | for (i = 0; i < sc->sc_nrxqueues; i++) { |
4619 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; | | 4620 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; |
4620 | CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx), | | 4621 | CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx), |
4621 | EITR_RX_QUEUE(rxq->rxq_id)); | | 4622 | EITR_RX_QUEUE(rxq->rxq_id)); |
4622 | } | | 4623 | } |
4623 | /* Link status */ | | 4624 | /* Link status */ |
4624 | CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx), | | 4625 | CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx), |
4625 | EITR_OTHER); | | 4626 | EITR_OTHER); |
4626 | } else if (sc->sc_type == WM_T_82574) { | | 4627 | } else if (sc->sc_type == WM_T_82574) { |
4627 | /* Interrupt control */ | | 4628 | /* Interrupt control */ |
4628 | reg = CSR_READ(sc, WMREG_CTRL_EXT); | | 4629 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
4629 | reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME; | | 4630 | reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME; |
4630 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 4631 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
4631 | | | 4632 | |
4632 | ivar = 0; | | 4633 | ivar = 0; |
4633 | /* TX */ | | 4634 | /* TX */ |
4634 | for (i = 0; i < sc->sc_ntxqueues; i++) { | | 4635 | for (i = 0; i < sc->sc_ntxqueues; i++) { |
4635 | struct wm_txqueue *txq = &sc->sc_txq[i]; | | 4636 | struct wm_txqueue *txq = &sc->sc_txq[i]; |
4636 | ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx), | | 4637 | ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx), |
4637 | IVAR_TX_MASK_Q_82574(txq->txq_id)); | | 4638 | IVAR_TX_MASK_Q_82574(txq->txq_id)); |
4638 | } | | 4639 | } |
4639 | /* RX */ | | 4640 | /* RX */ |
4640 | for (i = 0; i < sc->sc_nrxqueues; i++) { | | 4641 | for (i = 0; i < sc->sc_nrxqueues; i++) { |
4641 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; | | 4642 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; |
4642 | ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx), | | 4643 | ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx), |
4643 | IVAR_RX_MASK_Q_82574(rxq->rxq_id)); | | 4644 | IVAR_RX_MASK_Q_82574(rxq->rxq_id)); |
4644 | } | | 4645 | } |
4645 | /* Link status */ | | 4646 | /* Link status */ |
4646 | ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx), | | 4647 | ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx), |
4647 | IVAR_OTHER_MASK); | | 4648 | IVAR_OTHER_MASK); |
4648 | CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB); | | 4649 | CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB); |
4649 | } else { | | 4650 | } else { |
4650 | /* Interrupt control */ | | 4651 | /* Interrupt control */ |
4651 | CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | | 4652 | CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR |
4652 | | GPIE_MULTI_MSIX | GPIE_EIAME | | 4653 | | GPIE_MULTI_MSIX | GPIE_EIAME |
4653 | | GPIE_PBA); | | 4654 | | GPIE_PBA); |
4654 | | | 4655 | |
4655 | switch (sc->sc_type) { | | 4656 | switch (sc->sc_type) { |
4656 | case WM_T_82580: | | 4657 | case WM_T_82580: |
4657 | case WM_T_I350: | | 4658 | case WM_T_I350: |
4658 | case WM_T_I354: | | 4659 | case WM_T_I354: |
4659 | case WM_T_I210: | | 4660 | case WM_T_I210: |
4660 | case WM_T_I211: | | 4661 | case WM_T_I211: |
4661 | /* TX */ | | 4662 | /* TX */ |
4662 | for (i = 0; i < sc->sc_ntxqueues; i++) { | | 4663 | for (i = 0; i < sc->sc_ntxqueues; i++) { |
4663 | struct wm_txqueue *txq = &sc->sc_txq[i]; | | 4664 | struct wm_txqueue *txq = &sc->sc_txq[i]; |
4664 | int qid = txq->txq_id; | | 4665 | int qid = txq->txq_id; |
4665 | ivar = CSR_READ(sc, WMREG_IVAR_Q(qid)); | | 4666 | ivar = CSR_READ(sc, WMREG_IVAR_Q(qid)); |
4666 | ivar &= ~IVAR_TX_MASK_Q(qid); | | 4667 | ivar &= ~IVAR_TX_MASK_Q(qid); |
4667 | ivar |= __SHIFTIN( | | 4668 | ivar |= __SHIFTIN( |
4668 | (txq->txq_intr_idx | IVAR_VALID), | | 4669 | (txq->txq_intr_idx | IVAR_VALID), |
4669 | IVAR_TX_MASK_Q(qid)); | | 4670 | IVAR_TX_MASK_Q(qid)); |
4670 | CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar); | | 4671 | CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar); |
4671 | } | | 4672 | } |
4672 | | | 4673 | |
4673 | /* RX */ | | 4674 | /* RX */ |
4674 | for (i = 0; i < sc->sc_nrxqueues; i++) { | | 4675 | for (i = 0; i < sc->sc_nrxqueues; i++) { |
4675 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; | | 4676 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; |
4676 | int qid = rxq->rxq_id; | | 4677 | int qid = rxq->rxq_id; |
4677 | ivar = CSR_READ(sc, WMREG_IVAR_Q(qid)); | | 4678 | ivar = CSR_READ(sc, WMREG_IVAR_Q(qid)); |
4678 | ivar &= ~IVAR_RX_MASK_Q(qid); | | 4679 | ivar &= ~IVAR_RX_MASK_Q(qid); |
4679 | ivar |= __SHIFTIN( | | 4680 | ivar |= __SHIFTIN( |
4680 | (rxq->rxq_intr_idx | IVAR_VALID), | | 4681 | (rxq->rxq_intr_idx | IVAR_VALID), |
4681 | IVAR_RX_MASK_Q(qid)); | | 4682 | IVAR_RX_MASK_Q(qid)); |
4682 | CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar); | | 4683 | CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar); |
4683 | } | | 4684 | } |
4684 | break; | | 4685 | break; |
4685 | case WM_T_82576: | | 4686 | case WM_T_82576: |
4686 | /* TX */ | | 4687 | /* TX */ |
4687 | for (i = 0; i < sc->sc_ntxqueues; i++) { | | 4688 | for (i = 0; i < sc->sc_ntxqueues; i++) { |
4688 | struct wm_txqueue *txq = &sc->sc_txq[i]; | | 4689 | struct wm_txqueue *txq = &sc->sc_txq[i]; |
4689 | int qid = txq->txq_id; | | 4690 | int qid = txq->txq_id; |
4690 | ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid)); | | 4691 | ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid)); |
4691 | ivar &= ~IVAR_TX_MASK_Q_82576(qid); | | 4692 | ivar &= ~IVAR_TX_MASK_Q_82576(qid); |
4692 | ivar |= __SHIFTIN( | | 4693 | ivar |= __SHIFTIN( |
4693 | (txq->txq_intr_idx | IVAR_VALID), | | 4694 | (txq->txq_intr_idx | IVAR_VALID), |
4694 | IVAR_TX_MASK_Q_82576(qid)); | | 4695 | IVAR_TX_MASK_Q_82576(qid)); |
4695 | CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar); | | 4696 | CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar); |
4696 | } | | 4697 | } |
4697 | | | 4698 | |
4698 | /* RX */ | | 4699 | /* RX */ |
4699 | for (i = 0; i < sc->sc_nrxqueues; i++) { | | 4700 | for (i = 0; i < sc->sc_nrxqueues; i++) { |
4700 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; | | 4701 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; |
4701 | int qid = rxq->rxq_id; | | 4702 | int qid = rxq->rxq_id; |
4702 | ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid)); | | 4703 | ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid)); |
4703 | ivar &= ~IVAR_RX_MASK_Q_82576(qid); | | 4704 | ivar &= ~IVAR_RX_MASK_Q_82576(qid); |
4704 | ivar |= __SHIFTIN( | | 4705 | ivar |= __SHIFTIN( |
4705 | (rxq->rxq_intr_idx | IVAR_VALID), | | 4706 | (rxq->rxq_intr_idx | IVAR_VALID), |
4706 | IVAR_RX_MASK_Q_82576(qid)); | | 4707 | IVAR_RX_MASK_Q_82576(qid)); |
4707 | CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar); | | 4708 | CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar); |
4708 | } | | 4709 | } |
4709 | break; | | 4710 | break; |
4710 | default: | | 4711 | default: |
4711 | break; | | 4712 | break; |
4712 | } | | 4713 | } |
4713 | | | 4714 | |
4714 | /* Link status */ | | 4715 | /* Link status */ |
4715 | ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID), | | 4716 | ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID), |
4716 | IVAR_MISC_OTHER); | | 4717 | IVAR_MISC_OTHER); |
4717 | CSR_WRITE(sc, WMREG_IVAR_MISC, ivar); | | 4718 | CSR_WRITE(sc, WMREG_IVAR_MISC, ivar); |
4718 | } | | 4719 | } |
4719 | | | 4720 | |
4720 | if (sc->sc_nrxqueues > 1) { | | 4721 | if (sc->sc_nrxqueues > 1) { |
4721 | wm_init_rss(sc); | | 4722 | wm_init_rss(sc); |
4722 | | | 4723 | |
4723 | /* | | 4724 | /* |
4724 | ** NOTE: Receive Full-Packet Checksum Offload | | 4725 | ** NOTE: Receive Full-Packet Checksum Offload |
4725 | ** is mutually exclusive with Multiqueue. However | | 4726 | ** is mutually exclusive with Multiqueue. However |
4726 | ** this is not the same as TCP/IP checksums which | | 4727 | ** this is not the same as TCP/IP checksums which |
4727 | ** still work. | | 4728 | ** still work. |
4728 | */ | | 4729 | */ |
4729 | reg = CSR_READ(sc, WMREG_RXCSUM); | | 4730 | reg = CSR_READ(sc, WMREG_RXCSUM); |
4730 | reg |= RXCSUM_PCSD; | | 4731 | reg |= RXCSUM_PCSD; |
4731 | CSR_WRITE(sc, WMREG_RXCSUM, reg); | | 4732 | CSR_WRITE(sc, WMREG_RXCSUM, reg); |
4732 | } | | 4733 | } |
4733 | } | | 4734 | } |
4734 | | | 4735 | |
4735 | /* Set up the interrupt registers. */ | | 4736 | /* Set up the interrupt registers. */ |
4736 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); | | 4737 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
4737 | sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | | | 4738 | sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | |
4738 | ICR_RXO | ICR_RXT0; | | 4739 | ICR_RXO | ICR_RXT0; |
4739 | if (sc->sc_nintrs > 1) { | | 4740 | if (sc->sc_nintrs > 1) { |
4740 | uint32_t mask; | | 4741 | uint32_t mask; |
4741 | switch (sc->sc_type) { | | 4742 | switch (sc->sc_type) { |
4742 | case WM_T_82574: | | 4743 | case WM_T_82574: |
4743 | CSR_WRITE(sc, WMREG_EIAC_82574, | | 4744 | CSR_WRITE(sc, WMREG_EIAC_82574, |
4744 | WMREG_EIAC_82574_MSIX_MASK); | | 4745 | WMREG_EIAC_82574_MSIX_MASK); |
4745 | sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK; | | 4746 | sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK; |
4746 | CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); | | 4747 | CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); |
4747 | break; | | 4748 | break; |
4748 | default: | | 4749 | default: |
4749 | if (sc->sc_type == WM_T_82575) { | | 4750 | if (sc->sc_type == WM_T_82575) { |
4750 | mask = 0; | | 4751 | mask = 0; |
4751 | for (i = 0; i < sc->sc_ntxqueues; i++) { | | 4752 | for (i = 0; i < sc->sc_ntxqueues; i++) { |
4752 | struct wm_txqueue *txq = &sc->sc_txq[i]; | | 4753 | struct wm_txqueue *txq = &sc->sc_txq[i]; |
4753 | mask |= EITR_TX_QUEUE(txq->txq_id); | | 4754 | mask |= EITR_TX_QUEUE(txq->txq_id); |
4754 | } | | 4755 | } |
4755 | for (i = 0; i < sc->sc_nrxqueues; i++) { | | 4756 | for (i = 0; i < sc->sc_nrxqueues; i++) { |
4756 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; | | 4757 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; |
4757 | mask |= EITR_RX_QUEUE(rxq->rxq_id); | | 4758 | mask |= EITR_RX_QUEUE(rxq->rxq_id); |
4758 | } | | 4759 | } |
4759 | mask |= EITR_OTHER; | | 4760 | mask |= EITR_OTHER; |
4760 | } else { | | 4761 | } else { |
4761 | mask = 0; | | 4762 | mask = 0; |
4762 | for (i = 0; i < sc->sc_ntxqueues; i++) { | | 4763 | for (i = 0; i < sc->sc_ntxqueues; i++) { |
4763 | struct wm_txqueue *txq = &sc->sc_txq[i]; | | 4764 | struct wm_txqueue *txq = &sc->sc_txq[i]; |
4764 | mask |= 1 << txq->txq_intr_idx; | | 4765 | mask |= 1 << txq->txq_intr_idx; |
4765 | } | | 4766 | } |
4766 | for (i = 0; i < sc->sc_nrxqueues; i++) { | | 4767 | for (i = 0; i < sc->sc_nrxqueues; i++) { |
4767 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; | | 4768 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; |
4768 | mask |= 1 << rxq->rxq_intr_idx; | | 4769 | mask |= 1 << rxq->rxq_intr_idx; |
4769 | } | | 4770 | } |
4770 | mask |= 1 << sc->sc_link_intr_idx; | | 4771 | mask |= 1 << sc->sc_link_intr_idx; |
4771 | } | | 4772 | } |
4772 | CSR_WRITE(sc, WMREG_EIAC, mask); | | 4773 | CSR_WRITE(sc, WMREG_EIAC, mask); |
4773 | CSR_WRITE(sc, WMREG_EIAM, mask); | | 4774 | CSR_WRITE(sc, WMREG_EIAM, mask); |
4774 | CSR_WRITE(sc, WMREG_EIMS, mask); | | 4775 | CSR_WRITE(sc, WMREG_EIMS, mask); |
4775 | CSR_WRITE(sc, WMREG_IMS, ICR_LSC); | | 4776 | CSR_WRITE(sc, WMREG_IMS, ICR_LSC); |
4776 | break; | | 4777 | break; |
4777 | } | | 4778 | } |
4778 | } else | | 4779 | } else |
4779 | CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); | | 4780 | CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); |
4780 | | | 4781 | |
4781 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) | | 4782 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
4782 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) | | 4783 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
4783 | || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { | | 4784 | || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { |
4784 | reg = CSR_READ(sc, WMREG_KABGTXD); | | 4785 | reg = CSR_READ(sc, WMREG_KABGTXD); |
4785 | reg |= KABGTXD_BGSQLBIAS; | | 4786 | reg |= KABGTXD_BGSQLBIAS; |
4786 | CSR_WRITE(sc, WMREG_KABGTXD, reg); | | 4787 | CSR_WRITE(sc, WMREG_KABGTXD, reg); |
4787 | } | | 4788 | } |
4788 | | | 4789 | |
4789 | /* Set up the inter-packet gap. */ | | 4790 | /* Set up the inter-packet gap. */ |
4790 | CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); | | 4791 | CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); |
4791 | | | 4792 | |
4792 | if (sc->sc_type >= WM_T_82543) { | | 4793 | if (sc->sc_type >= WM_T_82543) { |
4793 | /* | | 4794 | /* |
4794 | * XXX 82574 has both ITR and EITR. SET EITR when we use | | 4795 | * XXX 82574 has both ITR and EITR. SET EITR when we use |
4795 | * the multi queue function with MSI-X. | | 4796 | * the multi queue function with MSI-X. |
4796 | */ | | 4797 | */ |
4797 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { | | 4798 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
4798 | int qidx; | | 4799 | int qidx; |
4799 | for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) { | | 4800 | for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) { |
4800 | struct wm_txqueue *txq = &sc->sc_txq[qidx]; | | 4801 | struct wm_txqueue *txq = &sc->sc_txq[qidx]; |
4801 | CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx), | | 4802 | CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx), |
4802 | sc->sc_itr); | | 4803 | sc->sc_itr); |
4803 | } | | 4804 | } |
4804 | for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) { | | 4805 | for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) { |
4805 | struct wm_rxqueue *rxq = &sc->sc_rxq[qidx]; | | 4806 | struct wm_rxqueue *rxq = &sc->sc_rxq[qidx]; |
4806 | CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx), | | 4807 | CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx), |
4807 | sc->sc_itr); | | 4808 | sc->sc_itr); |
4808 | } | | 4809 | } |
4809 | /* | | 4810 | /* |
4810 | * Link interrupts occur much less than TX | | 4811 | * Link interrupts occur much less than TX |
4811 | * interrupts and RX interrupts. So, we don't | | 4812 | * interrupts and RX interrupts. So, we don't |
4812 | * tune EINTR(WM_MSIX_LINKINTR_IDX) value like | | 4813 | * tune EINTR(WM_MSIX_LINKINTR_IDX) value like |
4813 | * FreeBSD's if_igb. | | 4814 | * FreeBSD's if_igb. |
4814 | */ | | 4815 | */ |
4815 | } else | | 4816 | } else |
4816 | CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); | | 4817 | CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); |
4817 | } | | 4818 | } |
4818 | | | 4819 | |
4819 | /* Set the VLAN ethernetype. */ | | 4820 | /* Set the VLAN ethernetype. */ |
4820 | CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); | | 4821 | CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); |
4821 | | | 4822 | |
4822 | /* | | 4823 | /* |
4823 | * Set up the transmit control register; we start out with | | 4824 | * Set up the transmit control register; we start out with |
4824 | * a collision distance suitable for FDX, but update it whe | | 4825 | * a collision distance suitable for FDX, but update it whe |
4825 | * we resolve the media type. | | 4826 | * we resolve the media type. |
4826 | */ | | 4827 | */ |
4827 | sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC | | 4828 | sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC |
4828 | | TCTL_CT(TX_COLLISION_THRESHOLD) | | 4829 | | TCTL_CT(TX_COLLISION_THRESHOLD) |
4829 | | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); | | 4830 | | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); |
4830 | if (sc->sc_type >= WM_T_82571) | | 4831 | if (sc->sc_type >= WM_T_82571) |
4831 | sc->sc_tctl |= TCTL_MULR; | | 4832 | sc->sc_tctl |= TCTL_MULR; |
4832 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); | | 4833 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); |
4833 | | | 4834 | |
4834 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { | | 4835 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
4835 | /* Write TDT after TCTL.EN is set. See the document. */ | | 4836 | /* Write TDT after TCTL.EN is set. See the document. */ |
4836 | CSR_WRITE(sc, WMREG_TDT(0), 0); | | 4837 | CSR_WRITE(sc, WMREG_TDT(0), 0); |
4837 | } | | 4838 | } |
4838 | | | 4839 | |
4839 | if (sc->sc_type == WM_T_80003) { | | 4840 | if (sc->sc_type == WM_T_80003) { |
4840 | reg = CSR_READ(sc, WMREG_TCTL_EXT); | | 4841 | reg = CSR_READ(sc, WMREG_TCTL_EXT); |
4841 | reg &= ~TCTL_EXT_GCEX_MASK; | | 4842 | reg &= ~TCTL_EXT_GCEX_MASK; |
4842 | reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; | | 4843 | reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; |
4843 | CSR_WRITE(sc, WMREG_TCTL_EXT, reg); | | 4844 | CSR_WRITE(sc, WMREG_TCTL_EXT, reg); |
4844 | } | | 4845 | } |
4845 | | | 4846 | |
4846 | /* Set the media. */ | | 4847 | /* Set the media. */ |
4847 | if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) | | 4848 | if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) |
4848 | goto out; | | 4849 | goto out; |
4849 | | | 4850 | |
4850 | /* Configure for OS presence */ | | 4851 | /* Configure for OS presence */ |
4851 | wm_init_manageability(sc); | | 4852 | wm_init_manageability(sc); |
4852 | | | 4853 | |
4853 | /* | | 4854 | /* |
4854 | * Set up the receive control register; we actually program | | 4855 | * Set up the receive control register; we actually program |
4855 | * the register when we set the receive filter. Use multicast | | 4856 | * the register when we set the receive filter. Use multicast |
4856 | * address offset type 0. | | 4857 | * address offset type 0. |
4857 | * | | 4858 | * |
4858 | * Only the i82544 has the ability to strip the incoming | | 4859 | * Only the i82544 has the ability to strip the incoming |
4859 | * CRC, so we don't enable that feature. | | 4860 | * CRC, so we don't enable that feature. |
4860 | */ | | 4861 | */ |
4861 | sc->sc_mchash_type = 0; | | 4862 | sc->sc_mchash_type = 0; |
4862 | sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF | | 4863 | sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF |
4863 | | RCTL_MO(sc->sc_mchash_type); | | 4864 | | RCTL_MO(sc->sc_mchash_type); |
4864 | | | 4865 | |
4865 | /* | | 4866 | /* |
4866 | * The I350 has a bug where it always strips the CRC whether | | 4867 | * The I350 has a bug where it always strips the CRC whether |
4867 | * asked to or not. So ask for stripped CRC here and cope in rxeof | | 4868 | * asked to or not. So ask for stripped CRC here and cope in rxeof |
4868 | */ | | 4869 | */ |
4869 | if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) | | 4870 | if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) |
4870 | || (sc->sc_type == WM_T_I210)) | | 4871 | || (sc->sc_type == WM_T_I210)) |
4871 | sc->sc_rctl |= RCTL_SECRC; | | 4872 | sc->sc_rctl |= RCTL_SECRC; |
4872 | | | 4873 | |
4873 | if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) | | 4874 | if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) |
4874 | && (ifp->if_mtu > ETHERMTU)) { | | 4875 | && (ifp->if_mtu > ETHERMTU)) { |
4875 | sc->sc_rctl |= RCTL_LPE; | | 4876 | sc->sc_rctl |= RCTL_LPE; |
4876 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) | | 4877 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
4877 | CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); | | 4878 | CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); |
4878 | } | | 4879 | } |
4879 | | | 4880 | |
4880 | if (MCLBYTES == 2048) { | | 4881 | if (MCLBYTES == 2048) { |
4881 | sc->sc_rctl |= RCTL_2k; | | 4882 | sc->sc_rctl |= RCTL_2k; |
4882 | } else { | | 4883 | } else { |
4883 | if (sc->sc_type >= WM_T_82543) { | | 4884 | if (sc->sc_type >= WM_T_82543) { |
4884 | switch (MCLBYTES) { | | 4885 | switch (MCLBYTES) { |
4885 | case 4096: | | 4886 | case 4096: |
4886 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; | | 4887 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; |
4887 | break; | | 4888 | break; |
4888 | case 8192: | | 4889 | case 8192: |
4889 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; | | 4890 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; |
4890 | break; | | 4891 | break; |
4891 | case 16384: | | 4892 | case 16384: |
4892 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; | | 4893 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; |
4893 | break; | | 4894 | break; |
4894 | default: | | 4895 | default: |
4895 | panic("wm_init: MCLBYTES %d unsupported", | | 4896 | panic("wm_init: MCLBYTES %d unsupported", |
4896 | MCLBYTES); | | 4897 | MCLBYTES); |
4897 | break; | | 4898 | break; |
4898 | } | | 4899 | } |
4899 | } else panic("wm_init: i82542 requires MCLBYTES = 2048"); | | 4900 | } else panic("wm_init: i82542 requires MCLBYTES = 2048"); |
4900 | } | | 4901 | } |
4901 | | | 4902 | |
4902 | /* Set the receive filter. */ | | 4903 | /* Set the receive filter. */ |
4903 | wm_set_filter(sc); | | 4904 | wm_set_filter(sc); |
4904 | | | 4905 | |
4905 | /* Enable ECC */ | | 4906 | /* Enable ECC */ |
4906 | switch (sc->sc_type) { | | 4907 | switch (sc->sc_type) { |
4907 | case WM_T_82571: | | 4908 | case WM_T_82571: |
4908 | reg = CSR_READ(sc, WMREG_PBA_ECC); | | 4909 | reg = CSR_READ(sc, WMREG_PBA_ECC); |
4909 | reg |= PBA_ECC_CORR_EN; | | 4910 | reg |= PBA_ECC_CORR_EN; |
4910 | CSR_WRITE(sc, WMREG_PBA_ECC, reg); | | 4911 | CSR_WRITE(sc, WMREG_PBA_ECC, reg); |
4911 | break; | | 4912 | break; |
4912 | case WM_T_PCH_LPT: | | 4913 | case WM_T_PCH_LPT: |
4913 | reg = CSR_READ(sc, WMREG_PBECCSTS); | | 4914 | reg = CSR_READ(sc, WMREG_PBECCSTS); |
4914 | reg |= PBECCSTS_UNCORR_ECC_ENABLE; | | 4915 | reg |= PBECCSTS_UNCORR_ECC_ENABLE; |
4915 | CSR_WRITE(sc, WMREG_PBECCSTS, reg); | | 4916 | CSR_WRITE(sc, WMREG_PBECCSTS, reg); |
4916 | | | 4917 | |
4917 | reg = CSR_READ(sc, WMREG_CTRL); | | 4918 | reg = CSR_READ(sc, WMREG_CTRL); |
4918 | reg |= CTRL_MEHE; | | 4919 | reg |= CTRL_MEHE; |
4919 | CSR_WRITE(sc, WMREG_CTRL, reg); | | 4920 | CSR_WRITE(sc, WMREG_CTRL, reg); |
4920 | break; | | 4921 | break; |
4921 | default: | | 4922 | default: |
4922 | break; | | 4923 | break; |
4923 | } | | 4924 | } |
4924 | | | 4925 | |
4925 | /* On 575 and later set RDT only if RX enabled */ | | 4926 | /* On 575 and later set RDT only if RX enabled */ |
4926 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { | | 4927 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
4927 | int qidx; | | 4928 | int qidx; |
4928 | for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) { | | 4929 | for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) { |
4929 | struct wm_rxqueue *rxq = &sc->sc_rxq[qidx]; | | 4930 | struct wm_rxqueue *rxq = &sc->sc_rxq[qidx]; |
4930 | for (i = 0; i < WM_NRXDESC; i++) { | | 4931 | for (i = 0; i < WM_NRXDESC; i++) { |
4931 | WM_RX_LOCK(rxq); | | 4932 | WM_RX_LOCK(rxq); |
4932 | wm_init_rxdesc(rxq, i); | | 4933 | wm_init_rxdesc(rxq, i); |
4933 | WM_RX_UNLOCK(rxq); | | 4934 | WM_RX_UNLOCK(rxq); |
4934 | | | 4935 | |
4935 | } | | 4936 | } |
4936 | } | | 4937 | } |
4937 | } | | 4938 | } |
4938 | | | 4939 | |
4939 | sc->sc_stopping = false; | | 4940 | sc->sc_stopping = false; |
4940 | | | 4941 | |
4941 | /* Start the one second link check clock. */ | | 4942 | /* Start the one second link check clock. */ |
4942 | callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); | | 4943 | callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); |
4943 | | | 4944 | |
4944 | /* ...all done! */ | | 4945 | /* ...all done! */ |
4945 | ifp->if_flags |= IFF_RUNNING; | | 4946 | ifp->if_flags |= IFF_RUNNING; |
4946 | ifp->if_flags &= ~IFF_OACTIVE; | | 4947 | ifp->if_flags &= ~IFF_OACTIVE; |
4947 | | | 4948 | |
4948 | out: | | 4949 | out: |
4949 | sc->sc_if_flags = ifp->if_flags; | | 4950 | sc->sc_if_flags = ifp->if_flags; |
4950 | if (error) | | 4951 | if (error) |
4951 | log(LOG_ERR, "%s: interface not running\n", | | 4952 | log(LOG_ERR, "%s: interface not running\n", |
4952 | device_xname(sc->sc_dev)); | | 4953 | device_xname(sc->sc_dev)); |
4953 | return error; | | 4954 | return error; |
4954 | } | | 4955 | } |
4955 | | | 4956 | |
4956 | /* | | 4957 | /* |
4957 | * wm_stop: [ifnet interface function] | | 4958 | * wm_stop: [ifnet interface function] |
4958 | * | | 4959 | * |
4959 | * Stop transmission on the interface. | | 4960 | * Stop transmission on the interface. |
4960 | */ | | 4961 | */ |
4961 | static void | | 4962 | static void |
4962 | wm_stop(struct ifnet *ifp, int disable) | | 4963 | wm_stop(struct ifnet *ifp, int disable) |
4963 | { | | 4964 | { |
4964 | struct wm_softc *sc = ifp->if_softc; | | 4965 | struct wm_softc *sc = ifp->if_softc; |
4965 | | | 4966 | |
4966 | WM_CORE_LOCK(sc); | | 4967 | WM_CORE_LOCK(sc); |
4967 | wm_stop_locked(ifp, disable); | | 4968 | wm_stop_locked(ifp, disable); |
4968 | WM_CORE_UNLOCK(sc); | | 4969 | WM_CORE_UNLOCK(sc); |
4969 | } | | 4970 | } |
4970 | | | 4971 | |
4971 | static void | | 4972 | static void |
4972 | wm_stop_locked(struct ifnet *ifp, int disable) | | 4973 | wm_stop_locked(struct ifnet *ifp, int disable) |
4973 | { | | 4974 | { |
4974 | struct wm_softc *sc = ifp->if_softc; | | 4975 | struct wm_softc *sc = ifp->if_softc; |
4975 | struct wm_txsoft *txs; | | 4976 | struct wm_txsoft *txs; |
4976 | int i, qidx; | | 4977 | int i, qidx; |
4977 | | | 4978 | |
4978 | KASSERT(WM_CORE_LOCKED(sc)); | | 4979 | KASSERT(WM_CORE_LOCKED(sc)); |
4979 | | | 4980 | |
4980 | sc->sc_stopping = true; | | 4981 | sc->sc_stopping = true; |
4981 | | | 4982 | |
4982 | /* Stop the one second clock. */ | | 4983 | /* Stop the one second clock. */ |
4983 | callout_stop(&sc->sc_tick_ch); | | 4984 | callout_stop(&sc->sc_tick_ch); |
4984 | | | 4985 | |
4985 | /* Stop the 82547 Tx FIFO stall check timer. */ | | 4986 | /* Stop the 82547 Tx FIFO stall check timer. */ |
4986 | if (sc->sc_type == WM_T_82547) | | 4987 | if (sc->sc_type == WM_T_82547) |
4987 | callout_stop(&sc->sc_txfifo_ch); | | 4988 | callout_stop(&sc->sc_txfifo_ch); |
4988 | | | 4989 | |
4989 | if (sc->sc_flags & WM_F_HAS_MII) { | | 4990 | if (sc->sc_flags & WM_F_HAS_MII) { |
4990 | /* Down the MII. */ | | 4991 | /* Down the MII. */ |
4991 | mii_down(&sc->sc_mii); | | 4992 | mii_down(&sc->sc_mii); |
4992 | } else { | | 4993 | } else { |
4993 | #if 0 | | 4994 | #if 0 |
4994 | /* Should we clear PHY's status properly? */ | | 4995 | /* Should we clear PHY's status properly? */ |
4995 | wm_reset(sc); | | 4996 | wm_reset(sc); |
4996 | #endif | | 4997 | #endif |
4997 | } | | 4998 | } |
4998 | | | 4999 | |
4999 | /* Stop the transmit and receive processes. */ | | 5000 | /* Stop the transmit and receive processes. */ |
5000 | CSR_WRITE(sc, WMREG_TCTL, 0); | | 5001 | CSR_WRITE(sc, WMREG_TCTL, 0); |
5001 | CSR_WRITE(sc, WMREG_RCTL, 0); | | 5002 | CSR_WRITE(sc, WMREG_RCTL, 0); |
5002 | sc->sc_rctl &= ~RCTL_EN; | | 5003 | sc->sc_rctl &= ~RCTL_EN; |
5003 | | | 5004 | |
5004 | /* | | 5005 | /* |
5005 | * Clear the interrupt mask to ensure the device cannot assert its | | 5006 | * Clear the interrupt mask to ensure the device cannot assert its |
5006 | * interrupt line. | | 5007 | * interrupt line. |
5007 | * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to | | 5008 | * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to |
5008 | * service any currently pending or shared interrupt. | | 5009 | * service any currently pending or shared interrupt. |
5009 | */ | | 5010 | */ |
5010 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); | | 5011 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
5011 | sc->sc_icr = 0; | | 5012 | sc->sc_icr = 0; |
5012 | if (sc->sc_nintrs > 1) { | | 5013 | if (sc->sc_nintrs > 1) { |
5013 | if (sc->sc_type != WM_T_82574) { | | 5014 | if (sc->sc_type != WM_T_82574) { |
5014 | CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); | | 5015 | CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); |
5015 | CSR_WRITE(sc, WMREG_EIAC, 0); | | 5016 | CSR_WRITE(sc, WMREG_EIAC, 0); |
5016 | } else | | 5017 | } else |
5017 | CSR_WRITE(sc, WMREG_EIAC_82574, 0); | | 5018 | CSR_WRITE(sc, WMREG_EIAC_82574, 0); |
5018 | } | | 5019 | } |
5019 | | | 5020 | |
5020 | /* Release any queued transmit buffers. */ | | 5021 | /* Release any queued transmit buffers. */ |
5021 | for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) { | | 5022 | for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) { |
5022 | struct wm_txqueue *txq = &sc->sc_txq[qidx]; | | 5023 | struct wm_txqueue *txq = &sc->sc_txq[qidx]; |
5023 | WM_TX_LOCK(txq); | | 5024 | WM_TX_LOCK(txq); |
5024 | for (i = 0; i < WM_TXQUEUELEN(txq); i++) { | | 5025 | for (i = 0; i < WM_TXQUEUELEN(txq); i++) { |
5025 | txs = &txq->txq_soft[i]; | | 5026 | txs = &txq->txq_soft[i]; |
5026 | if (txs->txs_mbuf != NULL) { | | 5027 | if (txs->txs_mbuf != NULL) { |
5027 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 5028 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
5028 | m_freem(txs->txs_mbuf); | | 5029 | m_freem(txs->txs_mbuf); |
5029 | txs->txs_mbuf = NULL; | | 5030 | txs->txs_mbuf = NULL; |
5030 | } | | 5031 | } |
5031 | } | | 5032 | } |
5032 | WM_TX_UNLOCK(txq); | | 5033 | WM_TX_UNLOCK(txq); |
5033 | } | | 5034 | } |
5034 | | | 5035 | |
5035 | /* Mark the interface as down and cancel the watchdog timer. */ | | 5036 | /* Mark the interface as down and cancel the watchdog timer. */ |
5036 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 5037 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
5037 | ifp->if_timer = 0; | | 5038 | ifp->if_timer = 0; |
5038 | | | 5039 | |
5039 | if (disable) { | | 5040 | if (disable) { |
5040 | for (i = 0; i < sc->sc_nrxqueues; i++) { | | 5041 | for (i = 0; i < sc->sc_nrxqueues; i++) { |
5041 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; | | 5042 | struct wm_rxqueue *rxq = &sc->sc_rxq[i]; |
5042 | WM_RX_LOCK(rxq); | | 5043 | WM_RX_LOCK(rxq); |
5043 | wm_rxdrain(rxq); | | 5044 | wm_rxdrain(rxq); |
5044 | WM_RX_UNLOCK(rxq); | | 5045 | WM_RX_UNLOCK(rxq); |
5045 | } | | 5046 | } |
5046 | } | | 5047 | } |
5047 | | | 5048 | |
5048 | #if 0 /* notyet */ | | 5049 | #if 0 /* notyet */ |
5049 | if (sc->sc_type >= WM_T_82544) | | 5050 | if (sc->sc_type >= WM_T_82544) |
5050 | CSR_WRITE(sc, WMREG_WUC, 0); | | 5051 | CSR_WRITE(sc, WMREG_WUC, 0); |
5051 | #endif | | 5052 | #endif |
5052 | } | | 5053 | } |
5053 | | | 5054 | |
5054 | /* | | 5055 | /* |
5055 | * wm_tx_offload: | | 5056 | * wm_tx_offload: |
5056 | * | | 5057 | * |
5057 | * Set up TCP/IP checksumming parameters for the | | 5058 | * Set up TCP/IP checksumming parameters for the |
5058 | * specified packet. | | 5059 | * specified packet. |
5059 | */ | | 5060 | */ |
5060 | static int | | 5061 | static int |
5061 | wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, | | 5062 | wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, |
5062 | uint8_t *fieldsp) | | 5063 | uint8_t *fieldsp) |
5063 | { | | 5064 | { |
5064 | struct wm_txqueue *txq = &sc->sc_txq[0]; | | 5065 | struct wm_txqueue *txq = &sc->sc_txq[0]; |
5065 | struct mbuf *m0 = txs->txs_mbuf; | | 5066 | struct mbuf *m0 = txs->txs_mbuf; |
5066 | struct livengood_tcpip_ctxdesc *t; | | 5067 | struct livengood_tcpip_ctxdesc *t; |
5067 | uint32_t ipcs, tucs, cmd, cmdlen, seg; | | 5068 | uint32_t ipcs, tucs, cmd, cmdlen, seg; |
5068 | uint32_t ipcse; | | 5069 | uint32_t ipcse; |
5069 | struct ether_header *eh; | | 5070 | struct ether_header *eh; |
5070 | int offset, iphl; | | 5071 | int offset, iphl; |
5071 | uint8_t fields; | | 5072 | uint8_t fields; |
5072 | | | 5073 | |
5073 | /* | | 5074 | /* |
5074 | * XXX It would be nice if the mbuf pkthdr had offset | | 5075 | * XXX It would be nice if the mbuf pkthdr had offset |
5075 | * fields for the protocol headers. | | 5076 | * fields for the protocol headers. |
5076 | */ | | 5077 | */ |
5077 | | | 5078 | |
5078 | eh = mtod(m0, struct ether_header *); | | 5079 | eh = mtod(m0, struct ether_header *); |
5079 | switch (htons(eh->ether_type)) { | | 5080 | switch (htons(eh->ether_type)) { |
5080 | case ETHERTYPE_IP: | | 5081 | case ETHERTYPE_IP: |
5081 | case ETHERTYPE_IPV6: | | 5082 | case ETHERTYPE_IPV6: |
5082 | offset = ETHER_HDR_LEN; | | 5083 | offset = ETHER_HDR_LEN; |
5083 | break; | | 5084 | break; |
5084 | | | 5085 | |
5085 | case ETHERTYPE_VLAN: | | 5086 | case ETHERTYPE_VLAN: |
5086 | offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; | | 5087 | offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
5087 | break; | | 5088 | break; |
5088 | | | 5089 | |
5089 | default: | | 5090 | default: |
5090 | /* | | 5091 | /* |
5091 | * Don't support this protocol or encapsulation. | | 5092 | * Don't support this protocol or encapsulation. |
5092 | */ | | 5093 | */ |
5093 | *fieldsp = 0; | | 5094 | *fieldsp = 0; |
5094 | *cmdp = 0; | | 5095 | *cmdp = 0; |
5095 | return 0; | | 5096 | return 0; |
5096 | } | | 5097 | } |
5097 | | | 5098 | |
5098 | if ((m0->m_pkthdr.csum_flags & | | 5099 | if ((m0->m_pkthdr.csum_flags & |
5099 | (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { | | 5100 | (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { |
5100 | iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); | | 5101 | iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); |
5101 | } else { | | 5102 | } else { |
5102 | iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); | | 5103 | iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); |
5103 | } | | 5104 | } |
5104 | ipcse = offset + iphl - 1; | | 5105 | ipcse = offset + iphl - 1; |
5105 | | | 5106 | |
5106 | cmd = WTX_CMD_DEXT | WTX_DTYP_D; | | 5107 | cmd = WTX_CMD_DEXT | WTX_DTYP_D; |
5107 | cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; | | 5108 | cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; |
5108 | seg = 0; | | 5109 | seg = 0; |
5109 | fields = 0; | | 5110 | fields = 0; |
5110 | | | 5111 | |
5111 | if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { | | 5112 | if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { |
5112 | int hlen = offset + iphl; | | 5113 | int hlen = offset + iphl; |
5113 | bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; | | 5114 | bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; |
5114 | | | 5115 | |
5115 | if (__predict_false(m0->m_len < | | 5116 | if (__predict_false(m0->m_len < |
5116 | (hlen + sizeof(struct tcphdr)))) { | | 5117 | (hlen + sizeof(struct tcphdr)))) { |
5117 | /* | | 5118 | /* |
5118 | * TCP/IP headers are not in the first mbuf; we need | | 5119 | * TCP/IP headers are not in the first mbuf; we need |
5119 | * to do this the slow and painful way. Let's just | | 5120 | * to do this the slow and painful way. Let's just |
5120 | * hope this doesn't happen very often. | | 5121 | * hope this doesn't happen very often. |
5121 | */ | | 5122 | */ |
5122 | struct tcphdr th; | | 5123 | struct tcphdr th; |
5123 | | | 5124 | |
5124 | WM_EVCNT_INCR(&sc->sc_ev_txtsopain); | | 5125 | WM_EVCNT_INCR(&sc->sc_ev_txtsopain); |
5125 | | | 5126 | |
5126 | m_copydata(m0, hlen, sizeof(th), &th); | | 5127 | m_copydata(m0, hlen, sizeof(th), &th); |
5127 | if (v4) { | | 5128 | if (v4) { |
5128 | struct ip ip; | | 5129 | struct ip ip; |
5129 | | | 5130 | |
5130 | m_copydata(m0, offset, sizeof(ip), &ip); | | 5131 | m_copydata(m0, offset, sizeof(ip), &ip); |
5131 | ip.ip_len = 0; | | 5132 | ip.ip_len = 0; |
5132 | m_copyback(m0, | | 5133 | m_copyback(m0, |
5133 | offset + offsetof(struct ip, ip_len), | | 5134 | offset + offsetof(struct ip, ip_len), |
5134 | sizeof(ip.ip_len), &ip.ip_len); | | 5135 | sizeof(ip.ip_len), &ip.ip_len); |
5135 | th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, | | 5136 | th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, |
5136 | ip.ip_dst.s_addr, htons(IPPROTO_TCP)); | | 5137 | ip.ip_dst.s_addr, htons(IPPROTO_TCP)); |
5137 | } else { | | 5138 | } else { |
5138 | struct ip6_hdr ip6; | | 5139 | struct ip6_hdr ip6; |
5139 | | | 5140 | |
5140 | m_copydata(m0, offset, sizeof(ip6), &ip6); | | 5141 | m_copydata(m0, offset, sizeof(ip6), &ip6); |
5141 | ip6.ip6_plen = 0; | | 5142 | ip6.ip6_plen = 0; |
5142 | m_copyback(m0, | | 5143 | m_copyback(m0, |
5143 | offset + offsetof(struct ip6_hdr, ip6_plen), | | 5144 | offset + offsetof(struct ip6_hdr, ip6_plen), |
5144 | sizeof(ip6.ip6_plen), &ip6.ip6_plen); | | 5145 | sizeof(ip6.ip6_plen), &ip6.ip6_plen); |
5145 | th.th_sum = in6_cksum_phdr(&ip6.ip6_src, | | 5146 | th.th_sum = in6_cksum_phdr(&ip6.ip6_src, |
5146 | &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); | | 5147 | &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); |
5147 | } | | 5148 | } |
5148 | m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), | | 5149 | m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), |
5149 | sizeof(th.th_sum), &th.th_sum); | | 5150 | sizeof(th.th_sum), &th.th_sum); |
5150 | | | 5151 | |
5151 | hlen += th.th_off << 2; | | 5152 | hlen += th.th_off << 2; |
5152 | } else { | | 5153 | } else { |
5153 | /* | | 5154 | /* |
5154 | * TCP/IP headers are in the first mbuf; we can do | | 5155 | * TCP/IP headers are in the first mbuf; we can do |
5155 | * this the easy way. | | 5156 | * this the easy way. |
5156 | */ | | 5157 | */ |
5157 | struct tcphdr *th; | | 5158 | struct tcphdr *th; |
5158 | | | 5159 | |
5159 | if (v4) { | | 5160 | if (v4) { |
5160 | struct ip *ip = | | 5161 | struct ip *ip = |
5161 | (void *)(mtod(m0, char *) + offset); | | 5162 | (void *)(mtod(m0, char *) + offset); |
5162 | th = (void *)(mtod(m0, char *) + hlen); | | 5163 | th = (void *)(mtod(m0, char *) + hlen); |
5163 | | | 5164 | |
5164 | ip->ip_len = 0; | | 5165 | ip->ip_len = 0; |
5165 | th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, | | 5166 | th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, |
5166 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); | | 5167 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); |
5167 | } else { | | 5168 | } else { |
5168 | struct ip6_hdr *ip6 = | | 5169 | struct ip6_hdr *ip6 = |
5169 | (void *)(mtod(m0, char *) + offset); | | 5170 | (void *)(mtod(m0, char *) + offset); |
5170 | th = (void *)(mtod(m0, char *) + hlen); | | 5171 | th = (void *)(mtod(m0, char *) + hlen); |
5171 | | | 5172 | |
5172 | ip6->ip6_plen = 0; | | 5173 | ip6->ip6_plen = 0; |
5173 | th->th_sum = in6_cksum_phdr(&ip6->ip6_src, | | 5174 | th->th_sum = in6_cksum_phdr(&ip6->ip6_src, |
5174 | &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); | | 5175 | &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); |
5175 | } | | 5176 | } |
5176 | hlen += th->th_off << 2; | | 5177 | hlen += th->th_off << 2; |
5177 | } | | 5178 | } |
5178 | | | 5179 | |
5179 | if (v4) { | | 5180 | if (v4) { |
5180 | WM_EVCNT_INCR(&sc->sc_ev_txtso); | | 5181 | WM_EVCNT_INCR(&sc->sc_ev_txtso); |
5181 | cmdlen |= WTX_TCPIP_CMD_IP; | | 5182 | cmdlen |= WTX_TCPIP_CMD_IP; |
5182 | } else { | | 5183 | } else { |
5183 | WM_EVCNT_INCR(&sc->sc_ev_txtso6); | | 5184 | WM_EVCNT_INCR(&sc->sc_ev_txtso6); |
5184 | ipcse = 0; | | 5185 | ipcse = 0; |
5185 | } | | 5186 | } |
5186 | cmd |= WTX_TCPIP_CMD_TSE; | | 5187 | cmd |= WTX_TCPIP_CMD_TSE; |
5187 | cmdlen |= WTX_TCPIP_CMD_TSE | | | 5188 | cmdlen |= WTX_TCPIP_CMD_TSE | |
5188 | WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); | | 5189 | WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); |
5189 | seg = WTX_TCPIP_SEG_HDRLEN(hlen) | | | 5190 | seg = WTX_TCPIP_SEG_HDRLEN(hlen) | |
5190 | WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); | | 5191 | WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); |
5191 | } | | 5192 | } |
5192 | | | 5193 | |
5193 | /* | | 5194 | /* |
5194 | * NOTE: Even if we're not using the IP or TCP/UDP checksum | | 5195 | * NOTE: Even if we're not using the IP or TCP/UDP checksum |
5195 | * offload feature, if we load the context descriptor, we | | 5196 | * offload feature, if we load the context descriptor, we |
5196 | * MUST provide valid values for IPCSS and TUCSS fields. | | 5197 | * MUST provide valid values for IPCSS and TUCSS fields. |
5197 | */ | | 5198 | */ |
5198 | | | 5199 | |
5199 | ipcs = WTX_TCPIP_IPCSS(offset) | | | 5200 | ipcs = WTX_TCPIP_IPCSS(offset) | |
5200 | WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | | | 5201 | WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | |
5201 | WTX_TCPIP_IPCSE(ipcse); | | 5202 | WTX_TCPIP_IPCSE(ipcse); |
5202 | if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { | | 5203 | if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { |