| @@ -1,1078 +1,1078 @@ | | | @@ -1,1078 +1,1078 @@ |
1 | /* $NetBSD: if_wm.c,v 1.253 2013/06/04 16:55:07 msaitoh Exp $ */ | | 1 | /* $NetBSD: if_wm.c,v 1.254 2013/06/11 10:07:09 msaitoh Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. | | 4 | * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. | | 7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software | | 17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: | | 18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by | | 19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. | | 20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior | | 22 | * or promote products derived from this software without specific prior |
23 | * written permission. | | 23 | * written permission. |
24 | * | | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | | 36 | */ |
37 | | | 37 | |
38 | /******************************************************************************* | | 38 | /******************************************************************************* |
39 | | | 39 | |
40 | Copyright (c) 2001-2005, Intel Corporation | | 40 | Copyright (c) 2001-2005, Intel Corporation |
41 | All rights reserved. | | 41 | All rights reserved. |
42 | | | 42 | |
43 | Redistribution and use in source and binary forms, with or without | | 43 | Redistribution and use in source and binary forms, with or without |
44 | modification, are permitted provided that the following conditions are met: | | 44 | modification, are permitted provided that the following conditions are met: |
45 | | | 45 | |
46 | 1. Redistributions of source code must retain the above copyright notice, | | 46 | 1. Redistributions of source code must retain the above copyright notice, |
47 | this list of conditions and the following disclaimer. | | 47 | this list of conditions and the following disclaimer. |
48 | | | 48 | |
49 | 2. Redistributions in binary form must reproduce the above copyright | | 49 | 2. Redistributions in binary form must reproduce the above copyright |
50 | notice, this list of conditions and the following disclaimer in the | | 50 | notice, this list of conditions and the following disclaimer in the |
51 | documentation and/or other materials provided with the distribution. | | 51 | documentation and/or other materials provided with the distribution. |
52 | | | 52 | |
53 | 3. Neither the name of the Intel Corporation nor the names of its | | 53 | 3. Neither the name of the Intel Corporation nor the names of its |
54 | contributors may be used to endorse or promote products derived from | | 54 | contributors may be used to endorse or promote products derived from |
55 | this software without specific prior written permission. | | 55 | this software without specific prior written permission. |
56 | | | 56 | |
57 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | | 57 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
58 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 58 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
59 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 59 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
60 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | | 60 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
61 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 61 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
62 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 62 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
63 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 63 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
64 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 64 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
65 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 65 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
66 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 66 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
67 | POSSIBILITY OF SUCH DAMAGE. | | 67 | POSSIBILITY OF SUCH DAMAGE. |
68 | | | 68 | |
69 | *******************************************************************************/ | | 69 | *******************************************************************************/ |
70 | /* | | 70 | /* |
71 | * Device driver for the Intel i8254x family of Gigabit Ethernet chips. | | 71 | * Device driver for the Intel i8254x family of Gigabit Ethernet chips. |
72 | * | | 72 | * |
73 | * TODO (in order of importance): | | 73 | * TODO (in order of importance): |
74 | * | | 74 | * |
75 | * - Rework how parameters are loaded from the EEPROM. | | 75 | * - Rework how parameters are loaded from the EEPROM. |
76 | */ | | 76 | */ |
77 | | | 77 | |
78 | #include <sys/cdefs.h> | | 78 | #include <sys/cdefs.h> |
79 | __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.253 2013/06/04 16:55:07 msaitoh Exp $"); | | 79 | __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.254 2013/06/11 10:07:09 msaitoh Exp $"); |
80 | | | 80 | |
81 | #include <sys/param.h> | | 81 | #include <sys/param.h> |
82 | #include <sys/systm.h> | | 82 | #include <sys/systm.h> |
83 | #include <sys/callout.h> | | 83 | #include <sys/callout.h> |
84 | #include <sys/mbuf.h> | | 84 | #include <sys/mbuf.h> |
85 | #include <sys/malloc.h> | | 85 | #include <sys/malloc.h> |
86 | #include <sys/kernel.h> | | 86 | #include <sys/kernel.h> |
87 | #include <sys/socket.h> | | 87 | #include <sys/socket.h> |
88 | #include <sys/ioctl.h> | | 88 | #include <sys/ioctl.h> |
89 | #include <sys/errno.h> | | 89 | #include <sys/errno.h> |
90 | #include <sys/device.h> | | 90 | #include <sys/device.h> |
91 | #include <sys/queue.h> | | 91 | #include <sys/queue.h> |
92 | #include <sys/syslog.h> | | 92 | #include <sys/syslog.h> |
93 | | | 93 | |
94 | #include <sys/rnd.h> | | 94 | #include <sys/rnd.h> |
95 | | | 95 | |
96 | #include <net/if.h> | | 96 | #include <net/if.h> |
97 | #include <net/if_dl.h> | | 97 | #include <net/if_dl.h> |
98 | #include <net/if_media.h> | | 98 | #include <net/if_media.h> |
99 | #include <net/if_ether.h> | | 99 | #include <net/if_ether.h> |
100 | | | 100 | |
101 | #include <net/bpf.h> | | 101 | #include <net/bpf.h> |
102 | | | 102 | |
103 | #include <netinet/in.h> /* XXX for struct ip */ | | 103 | #include <netinet/in.h> /* XXX for struct ip */ |
104 | #include <netinet/in_systm.h> /* XXX for struct ip */ | | 104 | #include <netinet/in_systm.h> /* XXX for struct ip */ |
105 | #include <netinet/ip.h> /* XXX for struct ip */ | | 105 | #include <netinet/ip.h> /* XXX for struct ip */ |
106 | #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ | | 106 | #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ |
107 | #include <netinet/tcp.h> /* XXX for struct tcphdr */ | | 107 | #include <netinet/tcp.h> /* XXX for struct tcphdr */ |
108 | | | 108 | |
109 | #include <sys/bus.h> | | 109 | #include <sys/bus.h> |
110 | #include <sys/intr.h> | | 110 | #include <sys/intr.h> |
111 | #include <machine/endian.h> | | 111 | #include <machine/endian.h> |
112 | | | 112 | |
113 | #include <dev/mii/mii.h> | | 113 | #include <dev/mii/mii.h> |
114 | #include <dev/mii/miivar.h> | | 114 | #include <dev/mii/miivar.h> |
115 | #include <dev/mii/miidevs.h> | | 115 | #include <dev/mii/miidevs.h> |
116 | #include <dev/mii/mii_bitbang.h> | | 116 | #include <dev/mii/mii_bitbang.h> |
117 | #include <dev/mii/ikphyreg.h> | | 117 | #include <dev/mii/ikphyreg.h> |
118 | #include <dev/mii/igphyreg.h> | | 118 | #include <dev/mii/igphyreg.h> |
119 | #include <dev/mii/igphyvar.h> | | 119 | #include <dev/mii/igphyvar.h> |
120 | #include <dev/mii/inbmphyreg.h> | | 120 | #include <dev/mii/inbmphyreg.h> |
121 | | | 121 | |
122 | #include <dev/pci/pcireg.h> | | 122 | #include <dev/pci/pcireg.h> |
123 | #include <dev/pci/pcivar.h> | | 123 | #include <dev/pci/pcivar.h> |
124 | #include <dev/pci/pcidevs.h> | | 124 | #include <dev/pci/pcidevs.h> |
125 | | | 125 | |
126 | #include <dev/pci/if_wmreg.h> | | 126 | #include <dev/pci/if_wmreg.h> |
127 | #include <dev/pci/if_wmvar.h> | | 127 | #include <dev/pci/if_wmvar.h> |
128 | | | 128 | |
129 | #ifdef WM_DEBUG | | 129 | #ifdef WM_DEBUG |
130 | #define WM_DEBUG_LINK 0x01 | | 130 | #define WM_DEBUG_LINK 0x01 |
131 | #define WM_DEBUG_TX 0x02 | | 131 | #define WM_DEBUG_TX 0x02 |
132 | #define WM_DEBUG_RX 0x04 | | 132 | #define WM_DEBUG_RX 0x04 |
133 | #define WM_DEBUG_GMII 0x08 | | 133 | #define WM_DEBUG_GMII 0x08 |
134 | #define WM_DEBUG_MANAGE 0x10 | | 134 | #define WM_DEBUG_MANAGE 0x10 |
135 | #define WM_DEBUG_NVM 0x20 | | 135 | #define WM_DEBUG_NVM 0x20 |
136 | int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII | | 136 | int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII |
137 | | WM_DEBUG_MANAGE | WM_DEBUG_NVM; | | 137 | | WM_DEBUG_MANAGE | WM_DEBUG_NVM; |
138 | | | 138 | |
139 | #define DPRINTF(x, y) if (wm_debug & (x)) printf y | | 139 | #define DPRINTF(x, y) if (wm_debug & (x)) printf y |
140 | #else | | 140 | #else |
141 | #define DPRINTF(x, y) /* nothing */ | | 141 | #define DPRINTF(x, y) /* nothing */ |
142 | #endif /* WM_DEBUG */ | | 142 | #endif /* WM_DEBUG */ |
143 | | | 143 | |
144 | /* | | 144 | /* |
145 | * Transmit descriptor list size. Due to errata, we can only have | | 145 | * Transmit descriptor list size. Due to errata, we can only have |
146 | * 256 hardware descriptors in the ring on < 82544, but we use 4096 | | 146 | * 256 hardware descriptors in the ring on < 82544, but we use 4096 |
147 | * on >= 82544. We tell the upper layers that they can queue a lot | | 147 | * on >= 82544. We tell the upper layers that they can queue a lot |
148 | * of packets, and we go ahead and manage up to 64 (16 for the i82547) | | 148 | * of packets, and we go ahead and manage up to 64 (16 for the i82547) |
149 | * of them at a time. | | 149 | * of them at a time. |
150 | * | | 150 | * |
151 | * We allow up to 256 (!) DMA segments per packet. Pathological packet | | 151 | * We allow up to 256 (!) DMA segments per packet. Pathological packet |
152 | * chains containing many small mbufs have been observed in zero-copy | | 152 | * chains containing many small mbufs have been observed in zero-copy |
153 | * situations with jumbo frames. | | 153 | * situations with jumbo frames. |
154 | */ | | 154 | */ |
155 | #define WM_NTXSEGS 256 | | 155 | #define WM_NTXSEGS 256 |
156 | #define WM_IFQUEUELEN 256 | | 156 | #define WM_IFQUEUELEN 256 |
157 | #define WM_TXQUEUELEN_MAX 64 | | 157 | #define WM_TXQUEUELEN_MAX 64 |
158 | #define WM_TXQUEUELEN_MAX_82547 16 | | 158 | #define WM_TXQUEUELEN_MAX_82547 16 |
159 | #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) | | 159 | #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) |
160 | #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) | | 160 | #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) |
161 | #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) | | 161 | #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) |
162 | #define WM_NTXDESC_82542 256 | | 162 | #define WM_NTXDESC_82542 256 |
163 | #define WM_NTXDESC_82544 4096 | | 163 | #define WM_NTXDESC_82544 4096 |
164 | #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) | | 164 | #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) |
165 | #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) | | 165 | #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) |
166 | #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) | | 166 | #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) |
167 | #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) | | 167 | #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) |
168 | #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) | | 168 | #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) |
169 | | | 169 | |
170 | #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ | | 170 | #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ |
171 | | | 171 | |
172 | /* | | 172 | /* |
173 | * Receive descriptor list size. We have one Rx buffer for normal | | 173 | * Receive descriptor list size. We have one Rx buffer for normal |
174 | * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized | | 174 | * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized |
175 | * packet. We allocate 256 receive descriptors, each with a 2k | | 175 | * packet. We allocate 256 receive descriptors, each with a 2k |
176 | * buffer (MCLBYTES), which gives us room for 50 jumbo packets. | | 176 | * buffer (MCLBYTES), which gives us room for 50 jumbo packets. |
177 | */ | | 177 | */ |
178 | #define WM_NRXDESC 256 | | 178 | #define WM_NRXDESC 256 |
179 | #define WM_NRXDESC_MASK (WM_NRXDESC - 1) | | 179 | #define WM_NRXDESC_MASK (WM_NRXDESC - 1) |
180 | #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) | | 180 | #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) |
181 | #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) | | 181 | #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) |
182 | | | 182 | |
183 | /* | | 183 | /* |
184 | * Control structures are DMA'd to the i82542 chip. We allocate them in | | 184 | * Control structures are DMA'd to the i82542 chip. We allocate them in |
185 | * a single clump that maps to a single DMA segment to make several things | | 185 | * a single clump that maps to a single DMA segment to make several things |
186 | * easier. | | 186 | * easier. |
187 | */ | | 187 | */ |
188 | struct wm_control_data_82544 { | | 188 | struct wm_control_data_82544 { |
189 | /* | | 189 | /* |
190 | * The receive descriptors. | | 190 | * The receive descriptors. |
191 | */ | | 191 | */ |
192 | wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; | | 192 | wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; |
193 | | | 193 | |
194 | /* | | 194 | /* |
195 | * The transmit descriptors. Put these at the end, because | | 195 | * The transmit descriptors. Put these at the end, because |
196 | * we might use a smaller number of them. | | 196 | * we might use a smaller number of them. |
197 | */ | | 197 | */ |
198 | union { | | 198 | union { |
199 | wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544]; | | 199 | wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544]; |
200 | nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544]; | | 200 | nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544]; |
201 | } wdc_u; | | 201 | } wdc_u; |
202 | }; | | 202 | }; |
203 | | | 203 | |
204 | struct wm_control_data_82542 { | | 204 | struct wm_control_data_82542 { |
205 | wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; | | 205 | wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; |
206 | wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; | | 206 | wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; |
207 | }; | | 207 | }; |
208 | | | 208 | |
209 | #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) | | 209 | #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) |
210 | #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)]) | | 210 | #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)]) |
211 | #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) | | 211 | #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) |
212 | | | 212 | |
213 | /* | | 213 | /* |
214 | * Software state for transmit jobs. | | 214 | * Software state for transmit jobs. |
215 | */ | | 215 | */ |
216 | struct wm_txsoft { | | 216 | struct wm_txsoft { |
217 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ | | 217 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ |
218 | bus_dmamap_t txs_dmamap; /* our DMA map */ | | 218 | bus_dmamap_t txs_dmamap; /* our DMA map */ |
219 | int txs_firstdesc; /* first descriptor in packet */ | | 219 | int txs_firstdesc; /* first descriptor in packet */ |
220 | int txs_lastdesc; /* last descriptor in packet */ | | 220 | int txs_lastdesc; /* last descriptor in packet */ |
221 | int txs_ndesc; /* # of descriptors used */ | | 221 | int txs_ndesc; /* # of descriptors used */ |
222 | }; | | 222 | }; |
223 | | | 223 | |
224 | /* | | 224 | /* |
225 | * Software state for receive buffers. Each descriptor gets a | | 225 | * Software state for receive buffers. Each descriptor gets a |
226 | * 2k (MCLBYTES) buffer and a DMA map. For packets which fill | | 226 | * 2k (MCLBYTES) buffer and a DMA map. For packets which fill |
227 | * more than one buffer, we chain them together. | | 227 | * more than one buffer, we chain them together. |
228 | */ | | 228 | */ |
229 | struct wm_rxsoft { | | 229 | struct wm_rxsoft { |
230 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ | | 230 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ |
231 | bus_dmamap_t rxs_dmamap; /* our DMA map */ | | 231 | bus_dmamap_t rxs_dmamap; /* our DMA map */ |
232 | }; | | 232 | }; |
233 | | | 233 | |
234 | #define WM_LINKUP_TIMEOUT 50 | | 234 | #define WM_LINKUP_TIMEOUT 50 |
235 | | | 235 | |
236 | static uint16_t swfwphysem[] = { | | 236 | static uint16_t swfwphysem[] = { |
237 | SWFW_PHY0_SM, | | 237 | SWFW_PHY0_SM, |
238 | SWFW_PHY1_SM, | | 238 | SWFW_PHY1_SM, |
239 | SWFW_PHY2_SM, | | 239 | SWFW_PHY2_SM, |
240 | SWFW_PHY3_SM | | 240 | SWFW_PHY3_SM |
241 | }; | | 241 | }; |
242 | | | 242 | |
243 | /* | | 243 | /* |
244 | * Software state per device. | | 244 | * Software state per device. |
245 | */ | | 245 | */ |
246 | struct wm_softc { | | 246 | struct wm_softc { |
247 | device_t sc_dev; /* generic device information */ | | 247 | device_t sc_dev; /* generic device information */ |
248 | bus_space_tag_t sc_st; /* bus space tag */ | | 248 | bus_space_tag_t sc_st; /* bus space tag */ |
249 | bus_space_handle_t sc_sh; /* bus space handle */ | | 249 | bus_space_handle_t sc_sh; /* bus space handle */ |
250 | bus_size_t sc_ss; /* bus space size */ | | 250 | bus_size_t sc_ss; /* bus space size */ |
251 | bus_space_tag_t sc_iot; /* I/O space tag */ | | 251 | bus_space_tag_t sc_iot; /* I/O space tag */ |
252 | bus_space_handle_t sc_ioh; /* I/O space handle */ | | 252 | bus_space_handle_t sc_ioh; /* I/O space handle */ |
253 | bus_size_t sc_ios; /* I/O space size */ | | 253 | bus_size_t sc_ios; /* I/O space size */ |
254 | bus_space_tag_t sc_flasht; /* flash registers space tag */ | | 254 | bus_space_tag_t sc_flasht; /* flash registers space tag */ |
255 | bus_space_handle_t sc_flashh; /* flash registers space handle */ | | 255 | bus_space_handle_t sc_flashh; /* flash registers space handle */ |
256 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ | | 256 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ |
257 | | | 257 | |
258 | struct ethercom sc_ethercom; /* ethernet common data */ | | 258 | struct ethercom sc_ethercom; /* ethernet common data */ |
259 | struct mii_data sc_mii; /* MII/media information */ | | 259 | struct mii_data sc_mii; /* MII/media information */ |
260 | | | 260 | |
261 | pci_chipset_tag_t sc_pc; | | 261 | pci_chipset_tag_t sc_pc; |
262 | pcitag_t sc_pcitag; | | 262 | pcitag_t sc_pcitag; |
263 | int sc_bus_speed; /* PCI/PCIX bus speed */ | | 263 | int sc_bus_speed; /* PCI/PCIX bus speed */ |
264 | int sc_pcixe_capoff; /* PCI[Xe] capability register offset */ | | 264 | int sc_pcixe_capoff; /* PCI[Xe] capability register offset */ |
265 | | | 265 | |
266 | const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */ | | 266 | const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */ |
267 | wm_chip_type sc_type; /* MAC type */ | | 267 | wm_chip_type sc_type; /* MAC type */ |
268 | int sc_rev; /* MAC revision */ | | 268 | int sc_rev; /* MAC revision */ |
269 | wm_phy_type sc_phytype; /* PHY type */ | | 269 | wm_phy_type sc_phytype; /* PHY type */ |
270 | int sc_funcid; /* unit number of the chip (0 to 3) */ | | 270 | int sc_funcid; /* unit number of the chip (0 to 3) */ |
271 | int sc_flags; /* flags; see below */ | | 271 | int sc_flags; /* flags; see below */ |
272 | int sc_if_flags; /* last if_flags */ | | 272 | int sc_if_flags; /* last if_flags */ |
273 | int sc_flowflags; /* 802.3x flow control flags */ | | 273 | int sc_flowflags; /* 802.3x flow control flags */ |
274 | int sc_align_tweak; | | 274 | int sc_align_tweak; |
275 | | | 275 | |
276 | void *sc_ih; /* interrupt cookie */ | | 276 | void *sc_ih; /* interrupt cookie */ |
277 | callout_t sc_tick_ch; /* tick callout */ | | 277 | callout_t sc_tick_ch; /* tick callout */ |
278 | | | 278 | |
279 | int sc_ee_addrbits; /* EEPROM address bits */ | | 279 | int sc_ee_addrbits; /* EEPROM address bits */ |
280 | int sc_ich8_flash_base; | | 280 | int sc_ich8_flash_base; |
281 | int sc_ich8_flash_bank_size; | | 281 | int sc_ich8_flash_bank_size; |
282 | int sc_nvm_k1_enabled; | | 282 | int sc_nvm_k1_enabled; |
283 | | | 283 | |
284 | /* | | 284 | /* |
285 | * Software state for the transmit and receive descriptors. | | 285 | * Software state for the transmit and receive descriptors. |
286 | */ | | 286 | */ |
287 | int sc_txnum; /* must be a power of two */ | | 287 | int sc_txnum; /* must be a power of two */ |
288 | struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; | | 288 | struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; |
289 | struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; | | 289 | struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; |
290 | | | 290 | |
291 | /* | | 291 | /* |
292 | * Control data structures. | | 292 | * Control data structures. |
293 | */ | | 293 | */ |
294 | int sc_ntxdesc; /* must be a power of two */ | | 294 | int sc_ntxdesc; /* must be a power of two */ |
295 | struct wm_control_data_82544 *sc_control_data; | | 295 | struct wm_control_data_82544 *sc_control_data; |
296 | bus_dmamap_t sc_cddmamap; /* control data DMA map */ | | 296 | bus_dmamap_t sc_cddmamap; /* control data DMA map */ |
297 | bus_dma_segment_t sc_cd_seg; /* control data segment */ | | 297 | bus_dma_segment_t sc_cd_seg; /* control data segment */ |
298 | int sc_cd_rseg; /* real number of control segment */ | | 298 | int sc_cd_rseg; /* real number of control segment */ |
299 | size_t sc_cd_size; /* control data size */ | | 299 | size_t sc_cd_size; /* control data size */ |
300 | #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr | | 300 | #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr |
301 | #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs | | 301 | #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs |
302 | #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs | | 302 | #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs |
303 | #define sc_rxdescs sc_control_data->wcd_rxdescs | | 303 | #define sc_rxdescs sc_control_data->wcd_rxdescs |
304 | | | 304 | |
305 | #ifdef WM_EVENT_COUNTERS | | 305 | #ifdef WM_EVENT_COUNTERS |
306 | /* Event counters. */ | | 306 | /* Event counters. */ |
307 | struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ | | 307 | struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ |
308 | struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ | | 308 | struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ |
309 | struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ | | 309 | struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ |
310 | struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ | | 310 | struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ |
311 | struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ | | 311 | struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ |
312 | struct evcnt sc_ev_rxintr; /* Rx interrupts */ | | 312 | struct evcnt sc_ev_rxintr; /* Rx interrupts */ |
313 | struct evcnt sc_ev_linkintr; /* Link interrupts */ | | 313 | struct evcnt sc_ev_linkintr; /* Link interrupts */ |
314 | | | 314 | |
315 | struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ | | 315 | struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ |
316 | struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ | | 316 | struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ |
317 | struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ | | 317 | struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ |
318 | struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ | | 318 | struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ |
319 | struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ | | 319 | struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ |
320 | struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ | | 320 | struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ |
321 | struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ | | 321 | struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ |
322 | struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ | | 322 | struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ |
323 | | | 323 | |
324 | struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ | | 324 | struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ |
325 | struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ | | 325 | struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ |
326 | | | 326 | |
327 | struct evcnt sc_ev_tu; /* Tx underrun */ | | 327 | struct evcnt sc_ev_tu; /* Tx underrun */ |
328 | | | 328 | |
329 | struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ | | 329 | struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ |
330 | struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ | | 330 | struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ |
331 | struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ | | 331 | struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ |
332 | struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ | | 332 | struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ |
333 | struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ | | 333 | struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ |
334 | #endif /* WM_EVENT_COUNTERS */ | | 334 | #endif /* WM_EVENT_COUNTERS */ |
335 | | | 335 | |
336 | bus_addr_t sc_tdt_reg; /* offset of TDT register */ | | 336 | bus_addr_t sc_tdt_reg; /* offset of TDT register */ |
337 | | | 337 | |
338 | int sc_txfree; /* number of free Tx descriptors */ | | 338 | int sc_txfree; /* number of free Tx descriptors */ |
339 | int sc_txnext; /* next ready Tx descriptor */ | | 339 | int sc_txnext; /* next ready Tx descriptor */ |
340 | | | 340 | |
341 | int sc_txsfree; /* number of free Tx jobs */ | | 341 | int sc_txsfree; /* number of free Tx jobs */ |
342 | int sc_txsnext; /* next free Tx job */ | | 342 | int sc_txsnext; /* next free Tx job */ |
343 | int sc_txsdirty; /* dirty Tx jobs */ | | 343 | int sc_txsdirty; /* dirty Tx jobs */ |
344 | | | 344 | |
345 | /* These 5 variables are used only on the 82547. */ | | 345 | /* These 5 variables are used only on the 82547. */ |
346 | int sc_txfifo_size; /* Tx FIFO size */ | | 346 | int sc_txfifo_size; /* Tx FIFO size */ |
347 | int sc_txfifo_head; /* current head of FIFO */ | | 347 | int sc_txfifo_head; /* current head of FIFO */ |
348 | uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ | | 348 | uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ |
349 | int sc_txfifo_stall; /* Tx FIFO is stalled */ | | 349 | int sc_txfifo_stall; /* Tx FIFO is stalled */ |
350 | callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ | | 350 | callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ |
351 | | | 351 | |
352 | bus_addr_t sc_rdt_reg; /* offset of RDT register */ | | 352 | bus_addr_t sc_rdt_reg; /* offset of RDT register */ |
353 | | | 353 | |
354 | int sc_rxptr; /* next ready Rx descriptor/queue ent */ | | 354 | int sc_rxptr; /* next ready Rx descriptor/queue ent */ |
355 | int sc_rxdiscard; | | 355 | int sc_rxdiscard; |
356 | int sc_rxlen; | | 356 | int sc_rxlen; |
357 | struct mbuf *sc_rxhead; | | 357 | struct mbuf *sc_rxhead; |
358 | struct mbuf *sc_rxtail; | | 358 | struct mbuf *sc_rxtail; |
359 | struct mbuf **sc_rxtailp; | | 359 | struct mbuf **sc_rxtailp; |
360 | | | 360 | |
361 | uint32_t sc_ctrl; /* prototype CTRL register */ | | 361 | uint32_t sc_ctrl; /* prototype CTRL register */ |
362 | #if 0 | | 362 | #if 0 |
363 | uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ | | 363 | uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ |
364 | #endif | | 364 | #endif |
365 | uint32_t sc_icr; /* prototype interrupt bits */ | | 365 | uint32_t sc_icr; /* prototype interrupt bits */ |
366 | uint32_t sc_itr; /* prototype intr throttling reg */ | | 366 | uint32_t sc_itr; /* prototype intr throttling reg */ |
367 | uint32_t sc_tctl; /* prototype TCTL register */ | | 367 | uint32_t sc_tctl; /* prototype TCTL register */ |
368 | uint32_t sc_rctl; /* prototype RCTL register */ | | 368 | uint32_t sc_rctl; /* prototype RCTL register */ |
369 | uint32_t sc_txcw; /* prototype TXCW register */ | | 369 | uint32_t sc_txcw; /* prototype TXCW register */ |
370 | uint32_t sc_tipg; /* prototype TIPG register */ | | 370 | uint32_t sc_tipg; /* prototype TIPG register */ |
371 | uint32_t sc_fcrtl; /* prototype FCRTL register */ | | 371 | uint32_t sc_fcrtl; /* prototype FCRTL register */ |
372 | uint32_t sc_pba; /* prototype PBA register */ | | 372 | uint32_t sc_pba; /* prototype PBA register */ |
373 | | | 373 | |
374 | int sc_tbi_linkup; /* TBI link status */ | | 374 | int sc_tbi_linkup; /* TBI link status */ |
375 | int sc_tbi_anegticks; /* autonegotiation ticks */ | | 375 | int sc_tbi_anegticks; /* autonegotiation ticks */ |
376 | int sc_tbi_ticks; /* tbi ticks */ | | 376 | int sc_tbi_ticks; /* tbi ticks */ |
377 | int sc_tbi_nrxcfg; /* count of ICR_RXCFG */ | | 377 | int sc_tbi_nrxcfg; /* count of ICR_RXCFG */ |
378 | int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */ | | 378 | int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */ |
379 | | | 379 | |
380 | int sc_mchash_type; /* multicast filter offset */ | | 380 | int sc_mchash_type; /* multicast filter offset */ |
381 | | | 381 | |
382 | krndsource_t rnd_source; /* random source */ | | 382 | krndsource_t rnd_source; /* random source */ |
383 | }; | | 383 | }; |
384 | | | 384 | |
385 | #define WM_RXCHAIN_RESET(sc) \ | | 385 | #define WM_RXCHAIN_RESET(sc) \ |
386 | do { \ | | 386 | do { \ |
387 | (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ | | 387 | (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ |
388 | *(sc)->sc_rxtailp = NULL; \ | | 388 | *(sc)->sc_rxtailp = NULL; \ |
389 | (sc)->sc_rxlen = 0; \ | | 389 | (sc)->sc_rxlen = 0; \ |
390 | } while (/*CONSTCOND*/0) | | 390 | } while (/*CONSTCOND*/0) |
391 | | | 391 | |
392 | #define WM_RXCHAIN_LINK(sc, m) \ | | 392 | #define WM_RXCHAIN_LINK(sc, m) \ |
393 | do { \ | | 393 | do { \ |
394 | *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ | | 394 | *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ |
395 | (sc)->sc_rxtailp = &(m)->m_next; \ | | 395 | (sc)->sc_rxtailp = &(m)->m_next; \ |
396 | } while (/*CONSTCOND*/0) | | 396 | } while (/*CONSTCOND*/0) |
397 | | | 397 | |
398 | #ifdef WM_EVENT_COUNTERS | | 398 | #ifdef WM_EVENT_COUNTERS |
399 | #define WM_EVCNT_INCR(ev) (ev)->ev_count++ | | 399 | #define WM_EVCNT_INCR(ev) (ev)->ev_count++ |
400 | #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) | | 400 | #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) |
401 | #else | | 401 | #else |
402 | #define WM_EVCNT_INCR(ev) /* nothing */ | | 402 | #define WM_EVCNT_INCR(ev) /* nothing */ |
403 | #define WM_EVCNT_ADD(ev, val) /* nothing */ | | 403 | #define WM_EVCNT_ADD(ev, val) /* nothing */ |
404 | #endif | | 404 | #endif |
405 | | | 405 | |
406 | #define CSR_READ(sc, reg) \ | | 406 | #define CSR_READ(sc, reg) \ |
407 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) | | 407 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) |
408 | #define CSR_WRITE(sc, reg, val) \ | | 408 | #define CSR_WRITE(sc, reg, val) \ |
409 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) | | 409 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) |
410 | #define CSR_WRITE_FLUSH(sc) \ | | 410 | #define CSR_WRITE_FLUSH(sc) \ |
411 | (void) CSR_READ((sc), WMREG_STATUS) | | 411 | (void) CSR_READ((sc), WMREG_STATUS) |
412 | | | 412 | |
413 | #define ICH8_FLASH_READ32(sc, reg) \ | | 413 | #define ICH8_FLASH_READ32(sc, reg) \ |
414 | bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) | | 414 | bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) |
415 | #define ICH8_FLASH_WRITE32(sc, reg, data) \ | | 415 | #define ICH8_FLASH_WRITE32(sc, reg, data) \ |
416 | bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) | | 416 | bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) |
417 | | | 417 | |
418 | #define ICH8_FLASH_READ16(sc, reg) \ | | 418 | #define ICH8_FLASH_READ16(sc, reg) \ |
419 | bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) | | 419 | bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) |
420 | #define ICH8_FLASH_WRITE16(sc, reg, data) \ | | 420 | #define ICH8_FLASH_WRITE16(sc, reg, data) \ |
421 | bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) | | 421 | bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) |
422 | | | 422 | |
423 | #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) | | 423 | #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) |
424 | #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) | | 424 | #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) |
425 | | | 425 | |
426 | #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) | | 426 | #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) |
427 | #define WM_CDTXADDR_HI(sc, x) \ | | 427 | #define WM_CDTXADDR_HI(sc, x) \ |
428 | (sizeof(bus_addr_t) == 8 ? \ | | 428 | (sizeof(bus_addr_t) == 8 ? \ |
429 | (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) | | 429 | (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) |
430 | | | 430 | |
431 | #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) | | 431 | #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) |
432 | #define WM_CDRXADDR_HI(sc, x) \ | | 432 | #define WM_CDRXADDR_HI(sc, x) \ |
433 | (sizeof(bus_addr_t) == 8 ? \ | | 433 | (sizeof(bus_addr_t) == 8 ? \ |
434 | (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) | | 434 | (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) |
435 | | | 435 | |
436 | #define WM_CDTXSYNC(sc, x, n, ops) \ | | 436 | #define WM_CDTXSYNC(sc, x, n, ops) \ |
437 | do { \ | | 437 | do { \ |
438 | int __x, __n; \ | | 438 | int __x, __n; \ |
439 | \ | | 439 | \ |
440 | __x = (x); \ | | 440 | __x = (x); \ |
441 | __n = (n); \ | | 441 | __n = (n); \ |
442 | \ | | 442 | \ |
443 | /* If it will wrap around, sync to the end of the ring. */ \ | | 443 | /* If it will wrap around, sync to the end of the ring. */ \ |
444 | if ((__x + __n) > WM_NTXDESC(sc)) { \ | | 444 | if ((__x + __n) > WM_NTXDESC(sc)) { \ |
445 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ | | 445 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ |
446 | WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ | | 446 | WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ |
447 | (WM_NTXDESC(sc) - __x), (ops)); \ | | 447 | (WM_NTXDESC(sc) - __x), (ops)); \ |
448 | __n -= (WM_NTXDESC(sc) - __x); \ | | 448 | __n -= (WM_NTXDESC(sc) - __x); \ |
449 | __x = 0; \ | | 449 | __x = 0; \ |
450 | } \ | | 450 | } \ |
451 | \ | | 451 | \ |
452 | /* Now sync whatever is left. */ \ | | 452 | /* Now sync whatever is left. */ \ |
453 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ | | 453 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ |
454 | WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ | | 454 | WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ |
455 | } while (/*CONSTCOND*/0) | | 455 | } while (/*CONSTCOND*/0) |
456 | | | 456 | |
457 | #define WM_CDRXSYNC(sc, x, ops) \ | | 457 | #define WM_CDRXSYNC(sc, x, ops) \ |
458 | do { \ | | 458 | do { \ |
459 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ | | 459 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ |
460 | WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ | | 460 | WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ |
461 | } while (/*CONSTCOND*/0) | | 461 | } while (/*CONSTCOND*/0) |
462 | | | 462 | |
463 | #define WM_INIT_RXDESC(sc, x) \ | | 463 | #define WM_INIT_RXDESC(sc, x) \ |
464 | do { \ | | 464 | do { \ |
465 | struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ | | 465 | struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ |
466 | wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ | | 466 | wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ |
467 | struct mbuf *__m = __rxs->rxs_mbuf; \ | | 467 | struct mbuf *__m = __rxs->rxs_mbuf; \ |
468 | \ | | 468 | \ |
469 | /* \ | | 469 | /* \ |
470 | * Note: We scoot the packet forward 2 bytes in the buffer \ | | 470 | * Note: We scoot the packet forward 2 bytes in the buffer \ |
471 | * so that the payload after the Ethernet header is aligned \ | | 471 | * so that the payload after the Ethernet header is aligned \ |
472 | * to a 4-byte boundary. \ | | 472 | * to a 4-byte boundary. \ |
473 | * \ | | 473 | * \ |
474 | * XXX BRAINDAMAGE ALERT! \ | | 474 | * XXX BRAINDAMAGE ALERT! \ |
475 | * The stupid chip uses the same size for every buffer, which \ | | 475 | * The stupid chip uses the same size for every buffer, which \ |
476 | * is set in the Receive Control register. We are using the 2K \ | | 476 | * is set in the Receive Control register. We are using the 2K \ |
477 | * size option, but what we REALLY want is (2K - 2)! For this \ | | 477 | * size option, but what we REALLY want is (2K - 2)! For this \ |
478 | * reason, we can't "scoot" packets longer than the standard \ | | 478 | * reason, we can't "scoot" packets longer than the standard \ |
479 | * Ethernet MTU. On strict-alignment platforms, if the total \ | | 479 | * Ethernet MTU. On strict-alignment platforms, if the total \ |
480 | * size exceeds (2K - 2) we set align_tweak to 0 and let \ | | 480 | * size exceeds (2K - 2) we set align_tweak to 0 and let \ |
481 | * the upper layer copy the headers. \ | | 481 | * the upper layer copy the headers. \ |
482 | */ \ | | 482 | */ \ |
483 | __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ | | 483 | __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ |
484 | \ | | 484 | \ |
485 | wm_set_dma_addr(&__rxd->wrx_addr, \ | | 485 | wm_set_dma_addr(&__rxd->wrx_addr, \ |
486 | __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ | | 486 | __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ |
487 | __rxd->wrx_len = 0; \ | | 487 | __rxd->wrx_len = 0; \ |
488 | __rxd->wrx_cksum = 0; \ | | 488 | __rxd->wrx_cksum = 0; \ |
489 | __rxd->wrx_status = 0; \ | | 489 | __rxd->wrx_status = 0; \ |
490 | __rxd->wrx_errors = 0; \ | | 490 | __rxd->wrx_errors = 0; \ |
491 | __rxd->wrx_special = 0; \ | | 491 | __rxd->wrx_special = 0; \ |
492 | WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ | | 492 | WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ |
493 | \ | | 493 | \ |
494 | CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ | | 494 | CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ |
495 | } while (/*CONSTCOND*/0) | | 495 | } while (/*CONSTCOND*/0) |
496 | | | 496 | |
497 | static void wm_start(struct ifnet *); | | 497 | static void wm_start(struct ifnet *); |
498 | static void wm_nq_start(struct ifnet *); | | 498 | static void wm_nq_start(struct ifnet *); |
499 | static void wm_watchdog(struct ifnet *); | | 499 | static void wm_watchdog(struct ifnet *); |
500 | static int wm_ifflags_cb(struct ethercom *); | | 500 | static int wm_ifflags_cb(struct ethercom *); |
501 | static int wm_ioctl(struct ifnet *, u_long, void *); | | 501 | static int wm_ioctl(struct ifnet *, u_long, void *); |
502 | static int wm_init(struct ifnet *); | | 502 | static int wm_init(struct ifnet *); |
503 | static void wm_stop(struct ifnet *, int); | | 503 | static void wm_stop(struct ifnet *, int); |
504 | static bool wm_suspend(device_t, const pmf_qual_t *); | | 504 | static bool wm_suspend(device_t, const pmf_qual_t *); |
505 | static bool wm_resume(device_t, const pmf_qual_t *); | | 505 | static bool wm_resume(device_t, const pmf_qual_t *); |
506 | | | 506 | |
507 | static void wm_reset(struct wm_softc *); | | 507 | static void wm_reset(struct wm_softc *); |
508 | static void wm_rxdrain(struct wm_softc *); | | 508 | static void wm_rxdrain(struct wm_softc *); |
509 | static int wm_add_rxbuf(struct wm_softc *, int); | | 509 | static int wm_add_rxbuf(struct wm_softc *, int); |
510 | static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); | | 510 | static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); |
511 | static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); | | 511 | static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); |
512 | static int wm_validate_eeprom_checksum(struct wm_softc *); | | 512 | static int wm_validate_eeprom_checksum(struct wm_softc *); |
513 | static int wm_check_alt_mac_addr(struct wm_softc *); | | 513 | static int wm_check_alt_mac_addr(struct wm_softc *); |
514 | static int wm_read_mac_addr(struct wm_softc *, uint8_t *); | | 514 | static int wm_read_mac_addr(struct wm_softc *, uint8_t *); |
515 | static void wm_tick(void *); | | 515 | static void wm_tick(void *); |
516 | | | 516 | |
517 | static void wm_set_filter(struct wm_softc *); | | 517 | static void wm_set_filter(struct wm_softc *); |
518 | static void wm_set_vlan(struct wm_softc *); | | 518 | static void wm_set_vlan(struct wm_softc *); |
519 | | | 519 | |
520 | static int wm_intr(void *); | | 520 | static int wm_intr(void *); |
521 | static void wm_txintr(struct wm_softc *); | | 521 | static void wm_txintr(struct wm_softc *); |
522 | static void wm_rxintr(struct wm_softc *); | | 522 | static void wm_rxintr(struct wm_softc *); |
523 | static void wm_linkintr(struct wm_softc *, uint32_t); | | 523 | static void wm_linkintr(struct wm_softc *, uint32_t); |
524 | | | 524 | |
525 | static void wm_tbi_mediainit(struct wm_softc *); | | 525 | static void wm_tbi_mediainit(struct wm_softc *); |
526 | static int wm_tbi_mediachange(struct ifnet *); | | 526 | static int wm_tbi_mediachange(struct ifnet *); |
527 | static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); | | 527 | static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); |
528 | | | 528 | |
529 | static void wm_tbi_set_linkled(struct wm_softc *); | | 529 | static void wm_tbi_set_linkled(struct wm_softc *); |
530 | static void wm_tbi_check_link(struct wm_softc *); | | 530 | static void wm_tbi_check_link(struct wm_softc *); |
531 | | | 531 | |
532 | static void wm_gmii_reset(struct wm_softc *); | | 532 | static void wm_gmii_reset(struct wm_softc *); |
533 | | | 533 | |
534 | static int wm_gmii_i82543_readreg(device_t, int, int); | | 534 | static int wm_gmii_i82543_readreg(device_t, int, int); |
535 | static void wm_gmii_i82543_writereg(device_t, int, int, int); | | 535 | static void wm_gmii_i82543_writereg(device_t, int, int, int); |
536 | static int wm_gmii_i82544_readreg(device_t, int, int); | | 536 | static int wm_gmii_i82544_readreg(device_t, int, int); |
537 | static void wm_gmii_i82544_writereg(device_t, int, int, int); | | 537 | static void wm_gmii_i82544_writereg(device_t, int, int, int); |
538 | static int wm_gmii_i80003_readreg(device_t, int, int); | | 538 | static int wm_gmii_i80003_readreg(device_t, int, int); |
539 | static void wm_gmii_i80003_writereg(device_t, int, int, int); | | 539 | static void wm_gmii_i80003_writereg(device_t, int, int, int); |
540 | static int wm_gmii_bm_readreg(device_t, int, int); | | 540 | static int wm_gmii_bm_readreg(device_t, int, int); |
541 | static void wm_gmii_bm_writereg(device_t, int, int, int); | | 541 | static void wm_gmii_bm_writereg(device_t, int, int, int); |
542 | static int wm_gmii_hv_readreg(device_t, int, int); | | 542 | static int wm_gmii_hv_readreg(device_t, int, int); |
543 | static void wm_gmii_hv_writereg(device_t, int, int, int); | | 543 | static void wm_gmii_hv_writereg(device_t, int, int, int); |
544 | static int wm_gmii_82580_readreg(device_t, int, int); | | 544 | static int wm_gmii_82580_readreg(device_t, int, int); |
545 | static void wm_gmii_82580_writereg(device_t, int, int, int); | | 545 | static void wm_gmii_82580_writereg(device_t, int, int, int); |
546 | static int wm_sgmii_readreg(device_t, int, int); | | 546 | static int wm_sgmii_readreg(device_t, int, int); |
547 | static void wm_sgmii_writereg(device_t, int, int, int); | | 547 | static void wm_sgmii_writereg(device_t, int, int, int); |
548 | | | 548 | |
549 | static void wm_gmii_statchg(struct ifnet *); | | 549 | static void wm_gmii_statchg(struct ifnet *); |
550 | | | 550 | |
551 | static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); | | 551 | static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); |
552 | static int wm_gmii_mediachange(struct ifnet *); | | 552 | static int wm_gmii_mediachange(struct ifnet *); |
553 | static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); | | 553 | static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); |
554 | | | 554 | |
555 | static int wm_kmrn_readreg(struct wm_softc *, int); | | 555 | static int wm_kmrn_readreg(struct wm_softc *, int); |
556 | static void wm_kmrn_writereg(struct wm_softc *, int, int); | | 556 | static void wm_kmrn_writereg(struct wm_softc *, int, int); |
557 | | | 557 | |
558 | static void wm_set_spiaddrbits(struct wm_softc *); | | 558 | static void wm_set_spiaddrbits(struct wm_softc *); |
559 | static int wm_match(device_t, cfdata_t, void *); | | 559 | static int wm_match(device_t, cfdata_t, void *); |
560 | static void wm_attach(device_t, device_t, void *); | | 560 | static void wm_attach(device_t, device_t, void *); |
561 | static int wm_detach(device_t, int); | | 561 | static int wm_detach(device_t, int); |
562 | static int wm_is_onboard_nvm_eeprom(struct wm_softc *); | | 562 | static int wm_is_onboard_nvm_eeprom(struct wm_softc *); |
563 | static void wm_get_auto_rd_done(struct wm_softc *); | | 563 | static void wm_get_auto_rd_done(struct wm_softc *); |
564 | static void wm_lan_init_done(struct wm_softc *); | | 564 | static void wm_lan_init_done(struct wm_softc *); |
565 | static void wm_get_cfg_done(struct wm_softc *); | | 565 | static void wm_get_cfg_done(struct wm_softc *); |
566 | static int wm_get_swsm_semaphore(struct wm_softc *); | | 566 | static int wm_get_swsm_semaphore(struct wm_softc *); |
567 | static void wm_put_swsm_semaphore(struct wm_softc *); | | 567 | static void wm_put_swsm_semaphore(struct wm_softc *); |
568 | static int wm_poll_eerd_eewr_done(struct wm_softc *, int); | | 568 | static int wm_poll_eerd_eewr_done(struct wm_softc *, int); |
569 | static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); | | 569 | static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); |
570 | static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); | | 570 | static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); |
571 | static int wm_get_swfwhw_semaphore(struct wm_softc *); | | 571 | static int wm_get_swfwhw_semaphore(struct wm_softc *); |
572 | static void wm_put_swfwhw_semaphore(struct wm_softc *); | | 572 | static void wm_put_swfwhw_semaphore(struct wm_softc *); |
573 | | | 573 | |
574 | static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *); | | 574 | static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *); |
575 | static int32_t wm_ich8_cycle_init(struct wm_softc *); | | 575 | static int32_t wm_ich8_cycle_init(struct wm_softc *); |
576 | static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); | | 576 | static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); |
577 | static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, | | 577 | static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, |
578 | uint32_t, uint16_t *); | | 578 | uint32_t, uint16_t *); |
579 | static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); | | 579 | static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); |
580 | static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); | | 580 | static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); |
581 | static void wm_82547_txfifo_stall(void *); | | 581 | static void wm_82547_txfifo_stall(void *); |
582 | static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int); | | 582 | static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int); |
583 | static int wm_check_mng_mode(struct wm_softc *); | | 583 | static int wm_check_mng_mode(struct wm_softc *); |
584 | static int wm_check_mng_mode_ich8lan(struct wm_softc *); | | 584 | static int wm_check_mng_mode_ich8lan(struct wm_softc *); |
585 | static int wm_check_mng_mode_82574(struct wm_softc *); | | 585 | static int wm_check_mng_mode_82574(struct wm_softc *); |
586 | static int wm_check_mng_mode_generic(struct wm_softc *); | | 586 | static int wm_check_mng_mode_generic(struct wm_softc *); |
587 | static int wm_enable_mng_pass_thru(struct wm_softc *); | | 587 | static int wm_enable_mng_pass_thru(struct wm_softc *); |
588 | static int wm_check_reset_block(struct wm_softc *); | | 588 | static int wm_check_reset_block(struct wm_softc *); |
589 | static void wm_get_hw_control(struct wm_softc *); | | 589 | static void wm_get_hw_control(struct wm_softc *); |
590 | static int wm_check_for_link(struct wm_softc *); | | 590 | static int wm_check_for_link(struct wm_softc *); |
591 | static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); | | 591 | static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); |
592 | static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); | | 592 | static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); |
593 | #ifdef WM_WOL | | 593 | #ifdef WM_WOL |
594 | static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); | | 594 | static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); |
595 | #endif | | 595 | #endif |
596 | static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); | | 596 | static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); |
597 | static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); | | 597 | static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); |
598 | static void wm_k1_gig_workaround_hv(struct wm_softc *, int); | | 598 | static void wm_k1_gig_workaround_hv(struct wm_softc *, int); |
599 | static void wm_set_mdio_slow_mode_hv(struct wm_softc *); | | 599 | static void wm_set_mdio_slow_mode_hv(struct wm_softc *); |
600 | static void wm_configure_k1_ich8lan(struct wm_softc *, int); | | 600 | static void wm_configure_k1_ich8lan(struct wm_softc *, int); |
601 | static void wm_smbustopci(struct wm_softc *); | | 601 | static void wm_smbustopci(struct wm_softc *); |
602 | static void wm_set_pcie_completion_timeout(struct wm_softc *); | | 602 | static void wm_set_pcie_completion_timeout(struct wm_softc *); |
603 | static void wm_reset_init_script_82575(struct wm_softc *); | | 603 | static void wm_reset_init_script_82575(struct wm_softc *); |
604 | static void wm_release_manageability(struct wm_softc *); | | 604 | static void wm_release_manageability(struct wm_softc *); |
605 | static void wm_release_hw_control(struct wm_softc *); | | 605 | static void wm_release_hw_control(struct wm_softc *); |
606 | static void wm_get_wakeup(struct wm_softc *); | | 606 | static void wm_get_wakeup(struct wm_softc *); |
607 | #ifdef WM_WOL | | 607 | #ifdef WM_WOL |
608 | static void wm_enable_phy_wakeup(struct wm_softc *); | | 608 | static void wm_enable_phy_wakeup(struct wm_softc *); |
609 | static void wm_enable_wakeup(struct wm_softc *); | | 609 | static void wm_enable_wakeup(struct wm_softc *); |
610 | #endif | | 610 | #endif |
611 | static void wm_init_manageability(struct wm_softc *); | | 611 | static void wm_init_manageability(struct wm_softc *); |
612 | static void wm_set_eee_i350(struct wm_softc *); | | 612 | static void wm_set_eee_i350(struct wm_softc *); |
613 | | | 613 | |
614 | CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), | | 614 | CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), |
615 | wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); | | 615 | wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); |
616 | | | 616 | |
617 | /* | | 617 | /* |
618 | * Devices supported by this driver. | | 618 | * Devices supported by this driver. |
619 | */ | | 619 | */ |
620 | static const struct wm_product { | | 620 | static const struct wm_product { |
621 | pci_vendor_id_t wmp_vendor; | | 621 | pci_vendor_id_t wmp_vendor; |
622 | pci_product_id_t wmp_product; | | 622 | pci_product_id_t wmp_product; |
623 | const char *wmp_name; | | 623 | const char *wmp_name; |
624 | wm_chip_type wmp_type; | | 624 | wm_chip_type wmp_type; |
625 | int wmp_flags; | | 625 | int wmp_flags; |
626 | #define WMP_F_1000X 0x01 | | 626 | #define WMP_F_1000X 0x01 |
627 | #define WMP_F_1000T 0x02 | | 627 | #define WMP_F_1000T 0x02 |
628 | #define WMP_F_SERDES 0x04 | | 628 | #define WMP_F_SERDES 0x04 |
629 | } wm_products[] = { | | 629 | } wm_products[] = { |
630 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, | | 630 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, |
631 | "Intel i82542 1000BASE-X Ethernet", | | 631 | "Intel i82542 1000BASE-X Ethernet", |
632 | WM_T_82542_2_1, WMP_F_1000X }, | | 632 | WM_T_82542_2_1, WMP_F_1000X }, |
633 | | | 633 | |
634 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, | | 634 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, |
635 | "Intel i82543GC 1000BASE-X Ethernet", | | 635 | "Intel i82543GC 1000BASE-X Ethernet", |
636 | WM_T_82543, WMP_F_1000X }, | | 636 | WM_T_82543, WMP_F_1000X }, |
637 | | | 637 | |
638 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, | | 638 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, |
639 | "Intel i82543GC 1000BASE-T Ethernet", | | 639 | "Intel i82543GC 1000BASE-T Ethernet", |
640 | WM_T_82543, WMP_F_1000T }, | | 640 | WM_T_82543, WMP_F_1000T }, |
641 | | | 641 | |
642 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, | | 642 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, |
643 | "Intel i82544EI 1000BASE-T Ethernet", | | 643 | "Intel i82544EI 1000BASE-T Ethernet", |
644 | WM_T_82544, WMP_F_1000T }, | | 644 | WM_T_82544, WMP_F_1000T }, |
645 | | | 645 | |
646 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, | | 646 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, |
647 | "Intel i82544EI 1000BASE-X Ethernet", | | 647 | "Intel i82544EI 1000BASE-X Ethernet", |
648 | WM_T_82544, WMP_F_1000X }, | | 648 | WM_T_82544, WMP_F_1000X }, |
649 | | | 649 | |
650 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, | | 650 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, |
651 | "Intel i82544GC 1000BASE-T Ethernet", | | 651 | "Intel i82544GC 1000BASE-T Ethernet", |
652 | WM_T_82544, WMP_F_1000T }, | | 652 | WM_T_82544, WMP_F_1000T }, |
653 | | | 653 | |
654 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, | | 654 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, |
655 | "Intel i82544GC (LOM) 1000BASE-T Ethernet", | | 655 | "Intel i82544GC (LOM) 1000BASE-T Ethernet", |
656 | WM_T_82544, WMP_F_1000T }, | | 656 | WM_T_82544, WMP_F_1000T }, |
657 | | | 657 | |
658 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, | | 658 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, |
659 | "Intel i82540EM 1000BASE-T Ethernet", | | 659 | "Intel i82540EM 1000BASE-T Ethernet", |
660 | WM_T_82540, WMP_F_1000T }, | | 660 | WM_T_82540, WMP_F_1000T }, |
661 | | | 661 | |
662 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, | | 662 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, |
663 | "Intel i82540EM (LOM) 1000BASE-T Ethernet", | | 663 | "Intel i82540EM (LOM) 1000BASE-T Ethernet", |
664 | WM_T_82540, WMP_F_1000T }, | | 664 | WM_T_82540, WMP_F_1000T }, |
665 | | | 665 | |
666 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, | | 666 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, |
667 | "Intel i82540EP 1000BASE-T Ethernet", | | 667 | "Intel i82540EP 1000BASE-T Ethernet", |
668 | WM_T_82540, WMP_F_1000T }, | | 668 | WM_T_82540, WMP_F_1000T }, |
669 | | | 669 | |
670 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, | | 670 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, |
671 | "Intel i82540EP 1000BASE-T Ethernet", | | 671 | "Intel i82540EP 1000BASE-T Ethernet", |
672 | WM_T_82540, WMP_F_1000T }, | | 672 | WM_T_82540, WMP_F_1000T }, |
673 | | | 673 | |
674 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, | | 674 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, |
675 | "Intel i82540EP 1000BASE-T Ethernet", | | 675 | "Intel i82540EP 1000BASE-T Ethernet", |
676 | WM_T_82540, WMP_F_1000T }, | | 676 | WM_T_82540, WMP_F_1000T }, |
677 | | | 677 | |
678 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, | | 678 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, |
679 | "Intel i82545EM 1000BASE-T Ethernet", | | 679 | "Intel i82545EM 1000BASE-T Ethernet", |
680 | WM_T_82545, WMP_F_1000T }, | | 680 | WM_T_82545, WMP_F_1000T }, |
681 | | | 681 | |
682 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, | | 682 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, |
683 | "Intel i82545GM 1000BASE-T Ethernet", | | 683 | "Intel i82545GM 1000BASE-T Ethernet", |
684 | WM_T_82545_3, WMP_F_1000T }, | | 684 | WM_T_82545_3, WMP_F_1000T }, |
685 | | | 685 | |
686 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, | | 686 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, |
687 | "Intel i82545GM 1000BASE-X Ethernet", | | 687 | "Intel i82545GM 1000BASE-X Ethernet", |
688 | WM_T_82545_3, WMP_F_1000X }, | | 688 | WM_T_82545_3, WMP_F_1000X }, |
689 | #if 0 | | 689 | #if 0 |
690 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, | | 690 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, |
691 | "Intel i82545GM Gigabit Ethernet (SERDES)", | | 691 | "Intel i82545GM Gigabit Ethernet (SERDES)", |
692 | WM_T_82545_3, WMP_F_SERDES }, | | 692 | WM_T_82545_3, WMP_F_SERDES }, |
693 | #endif | | 693 | #endif |
694 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, | | 694 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, |
695 | "Intel i82546EB 1000BASE-T Ethernet", | | 695 | "Intel i82546EB 1000BASE-T Ethernet", |
696 | WM_T_82546, WMP_F_1000T }, | | 696 | WM_T_82546, WMP_F_1000T }, |
697 | | | 697 | |
698 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, | | 698 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, |
699 | "Intel i82546EB 1000BASE-T Ethernet", | | 699 | "Intel i82546EB 1000BASE-T Ethernet", |
700 | WM_T_82546, WMP_F_1000T }, | | 700 | WM_T_82546, WMP_F_1000T }, |
701 | | | 701 | |
702 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, | | 702 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, |
703 | "Intel i82545EM 1000BASE-X Ethernet", | | 703 | "Intel i82545EM 1000BASE-X Ethernet", |
704 | WM_T_82545, WMP_F_1000X }, | | 704 | WM_T_82545, WMP_F_1000X }, |
705 | | | 705 | |
706 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, | | 706 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, |
707 | "Intel i82546EB 1000BASE-X Ethernet", | | 707 | "Intel i82546EB 1000BASE-X Ethernet", |
708 | WM_T_82546, WMP_F_1000X }, | | 708 | WM_T_82546, WMP_F_1000X }, |
709 | | | 709 | |
710 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, | | 710 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, |
711 | "Intel i82546GB 1000BASE-T Ethernet", | | 711 | "Intel i82546GB 1000BASE-T Ethernet", |
712 | WM_T_82546_3, WMP_F_1000T }, | | 712 | WM_T_82546_3, WMP_F_1000T }, |
713 | | | 713 | |
714 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, | | 714 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, |
715 | "Intel i82546GB 1000BASE-X Ethernet", | | 715 | "Intel i82546GB 1000BASE-X Ethernet", |
716 | WM_T_82546_3, WMP_F_1000X }, | | 716 | WM_T_82546_3, WMP_F_1000X }, |
717 | #if 0 | | 717 | #if 0 |
718 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, | | 718 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, |
719 | "Intel i82546GB Gigabit Ethernet (SERDES)", | | 719 | "Intel i82546GB Gigabit Ethernet (SERDES)", |
720 | WM_T_82546_3, WMP_F_SERDES }, | | 720 | WM_T_82546_3, WMP_F_SERDES }, |
721 | #endif | | 721 | #endif |
722 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, | | 722 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, |
723 | "i82546GB quad-port Gigabit Ethernet", | | 723 | "i82546GB quad-port Gigabit Ethernet", |
724 | WM_T_82546_3, WMP_F_1000T }, | | 724 | WM_T_82546_3, WMP_F_1000T }, |
725 | | | 725 | |
726 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, | | 726 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, |
727 | "i82546GB quad-port Gigabit Ethernet (KSP3)", | | 727 | "i82546GB quad-port Gigabit Ethernet (KSP3)", |
728 | WM_T_82546_3, WMP_F_1000T }, | | 728 | WM_T_82546_3, WMP_F_1000T }, |
729 | | | 729 | |
730 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, | | 730 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, |
731 | "Intel PRO/1000MT (82546GB)", | | 731 | "Intel PRO/1000MT (82546GB)", |
732 | WM_T_82546_3, WMP_F_1000T }, | | 732 | WM_T_82546_3, WMP_F_1000T }, |
733 | | | 733 | |
734 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, | | 734 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, |
735 | "Intel i82541EI 1000BASE-T Ethernet", | | 735 | "Intel i82541EI 1000BASE-T Ethernet", |
736 | WM_T_82541, WMP_F_1000T }, | | 736 | WM_T_82541, WMP_F_1000T }, |
737 | | | 737 | |
738 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, | | 738 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, |
739 | "Intel i82541ER (LOM) 1000BASE-T Ethernet", | | 739 | "Intel i82541ER (LOM) 1000BASE-T Ethernet", |
740 | WM_T_82541, WMP_F_1000T }, | | 740 | WM_T_82541, WMP_F_1000T }, |
741 | | | 741 | |
742 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, | | 742 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, |
743 | "Intel i82541EI Mobile 1000BASE-T Ethernet", | | 743 | "Intel i82541EI Mobile 1000BASE-T Ethernet", |
744 | WM_T_82541, WMP_F_1000T }, | | 744 | WM_T_82541, WMP_F_1000T }, |
745 | | | 745 | |
746 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, | | 746 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, |
747 | "Intel i82541ER 1000BASE-T Ethernet", | | 747 | "Intel i82541ER 1000BASE-T Ethernet", |
748 | WM_T_82541_2, WMP_F_1000T }, | | 748 | WM_T_82541_2, WMP_F_1000T }, |
749 | | | 749 | |
750 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, | | 750 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, |
751 | "Intel i82541GI 1000BASE-T Ethernet", | | 751 | "Intel i82541GI 1000BASE-T Ethernet", |
752 | WM_T_82541_2, WMP_F_1000T }, | | 752 | WM_T_82541_2, WMP_F_1000T }, |
753 | | | 753 | |
754 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, | | 754 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, |
755 | "Intel i82541GI Mobile 1000BASE-T Ethernet", | | 755 | "Intel i82541GI Mobile 1000BASE-T Ethernet", |
756 | WM_T_82541_2, WMP_F_1000T }, | | 756 | WM_T_82541_2, WMP_F_1000T }, |
757 | | | 757 | |
758 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, | | 758 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, |
759 | "Intel i82541PI 1000BASE-T Ethernet", | | 759 | "Intel i82541PI 1000BASE-T Ethernet", |
760 | WM_T_82541_2, WMP_F_1000T }, | | 760 | WM_T_82541_2, WMP_F_1000T }, |
761 | | | 761 | |
762 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, | | 762 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, |
763 | "Intel i82547EI 1000BASE-T Ethernet", | | 763 | "Intel i82547EI 1000BASE-T Ethernet", |
764 | WM_T_82547, WMP_F_1000T }, | | 764 | WM_T_82547, WMP_F_1000T }, |
765 | | | 765 | |
766 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, | | 766 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, |
767 | "Intel i82547EI Mobile 1000BASE-T Ethernet", | | 767 | "Intel i82547EI Mobile 1000BASE-T Ethernet", |
768 | WM_T_82547, WMP_F_1000T }, | | 768 | WM_T_82547, WMP_F_1000T }, |
769 | | | 769 | |
770 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, | | 770 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, |
771 | "Intel i82547GI 1000BASE-T Ethernet", | | 771 | "Intel i82547GI 1000BASE-T Ethernet", |
772 | WM_T_82547_2, WMP_F_1000T }, | | 772 | WM_T_82547_2, WMP_F_1000T }, |
773 | | | 773 | |
774 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, | | 774 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, |
775 | "Intel PRO/1000 PT (82571EB)", | | 775 | "Intel PRO/1000 PT (82571EB)", |
776 | WM_T_82571, WMP_F_1000T }, | | 776 | WM_T_82571, WMP_F_1000T }, |
777 | | | 777 | |
778 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, | | 778 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, |
779 | "Intel PRO/1000 PF (82571EB)", | | 779 | "Intel PRO/1000 PF (82571EB)", |
780 | WM_T_82571, WMP_F_1000X }, | | 780 | WM_T_82571, WMP_F_1000X }, |
781 | #if 0 | | 781 | #if 0 |
782 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, | | 782 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, |
783 | "Intel PRO/1000 PB (82571EB)", | | 783 | "Intel PRO/1000 PB (82571EB)", |
784 | WM_T_82571, WMP_F_SERDES }, | | 784 | WM_T_82571, WMP_F_SERDES }, |
785 | #endif | | 785 | #endif |
786 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, | | 786 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, |
787 | "Intel PRO/1000 QT (82571EB)", | | 787 | "Intel PRO/1000 QT (82571EB)", |
788 | WM_T_82571, WMP_F_1000T }, | | 788 | WM_T_82571, WMP_F_1000T }, |
789 | | | 789 | |
790 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, | | 790 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, |
791 | "Intel i82572EI 1000baseT Ethernet", | | 791 | "Intel i82572EI 1000baseT Ethernet", |
792 | WM_T_82572, WMP_F_1000T }, | | 792 | WM_T_82572, WMP_F_1000T }, |
793 | | | 793 | |
794 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, | | 794 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, |
795 | "Intel PRO/1000 PT Quad Port Server Adapter", | | 795 | "Intel PRO/1000 PT Quad Port Server Adapter", |
796 | WM_T_82571, WMP_F_1000T, }, | | 796 | WM_T_82571, WMP_F_1000T, }, |
797 | | | 797 | |
798 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, | | 798 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, |
799 | "Intel i82572EI 1000baseX Ethernet", | | 799 | "Intel i82572EI 1000baseX Ethernet", |
800 | WM_T_82572, WMP_F_1000X }, | | 800 | WM_T_82572, WMP_F_1000X }, |
801 | #if 0 | | 801 | #if 0 |
802 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, | | 802 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, |
803 | "Intel i82572EI Gigabit Ethernet (SERDES)", | | 803 | "Intel i82572EI Gigabit Ethernet (SERDES)", |
804 | WM_T_82572, WMP_F_SERDES }, | | 804 | WM_T_82572, WMP_F_SERDES }, |
805 | #endif | | 805 | #endif |
806 | | | 806 | |
807 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, | | 807 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, |
808 | "Intel i82572EI 1000baseT Ethernet", | | 808 | "Intel i82572EI 1000baseT Ethernet", |
809 | WM_T_82572, WMP_F_1000T }, | | 809 | WM_T_82572, WMP_F_1000T }, |
810 | | | 810 | |
811 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, | | 811 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, |
812 | "Intel i82573E", | | 812 | "Intel i82573E", |
813 | WM_T_82573, WMP_F_1000T }, | | 813 | WM_T_82573, WMP_F_1000T }, |
814 | | | 814 | |
815 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, | | 815 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, |
816 | "Intel i82573E IAMT", | | 816 | "Intel i82573E IAMT", |
817 | WM_T_82573, WMP_F_1000T }, | | 817 | WM_T_82573, WMP_F_1000T }, |
818 | | | 818 | |
819 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, | | 819 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, |
820 | "Intel i82573L Gigabit Ethernet", | | 820 | "Intel i82573L Gigabit Ethernet", |
821 | WM_T_82573, WMP_F_1000T }, | | 821 | WM_T_82573, WMP_F_1000T }, |
822 | | | 822 | |
823 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, | | 823 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, |
824 | "Intel i82574L", | | 824 | "Intel i82574L", |
825 | WM_T_82574, WMP_F_1000T }, | | 825 | WM_T_82574, WMP_F_1000T }, |
826 | | | 826 | |
827 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, | | 827 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, |
828 | "Intel i82583V", | | 828 | "Intel i82583V", |
829 | WM_T_82583, WMP_F_1000T }, | | 829 | WM_T_82583, WMP_F_1000T }, |
830 | | | 830 | |
831 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, | | 831 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, |
832 | "i80003 dual 1000baseT Ethernet", | | 832 | "i80003 dual 1000baseT Ethernet", |
833 | WM_T_80003, WMP_F_1000T }, | | 833 | WM_T_80003, WMP_F_1000T }, |
834 | | | 834 | |
835 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, | | 835 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, |
836 | "i80003 dual 1000baseX Ethernet", | | 836 | "i80003 dual 1000baseX Ethernet", |
837 | WM_T_80003, WMP_F_1000T }, | | 837 | WM_T_80003, WMP_F_1000T }, |
838 | #if 0 | | 838 | #if 0 |
839 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, | | 839 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, |
840 | "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", | | 840 | "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", |
841 | WM_T_80003, WMP_F_SERDES }, | | 841 | WM_T_80003, WMP_F_SERDES }, |
842 | #endif | | 842 | #endif |
843 | | | 843 | |
844 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, | | 844 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, |
845 | "Intel i80003 1000baseT Ethernet", | | 845 | "Intel i80003 1000baseT Ethernet", |
846 | WM_T_80003, WMP_F_1000T }, | | 846 | WM_T_80003, WMP_F_1000T }, |
847 | #if 0 | | 847 | #if 0 |
848 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, | | 848 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, |
849 | "Intel i80003 Gigabit Ethernet (SERDES)", | | 849 | "Intel i80003 Gigabit Ethernet (SERDES)", |
850 | WM_T_80003, WMP_F_SERDES }, | | 850 | WM_T_80003, WMP_F_SERDES }, |
851 | #endif | | 851 | #endif |
852 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, | | 852 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, |
853 | "Intel i82801H (M_AMT) LAN Controller", | | 853 | "Intel i82801H (M_AMT) LAN Controller", |
854 | WM_T_ICH8, WMP_F_1000T }, | | 854 | WM_T_ICH8, WMP_F_1000T }, |
855 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, | | 855 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, |
856 | "Intel i82801H (AMT) LAN Controller", | | 856 | "Intel i82801H (AMT) LAN Controller", |
857 | WM_T_ICH8, WMP_F_1000T }, | | 857 | WM_T_ICH8, WMP_F_1000T }, |
858 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, | | 858 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, |
859 | "Intel i82801H LAN Controller", | | 859 | "Intel i82801H LAN Controller", |
860 | WM_T_ICH8, WMP_F_1000T }, | | 860 | WM_T_ICH8, WMP_F_1000T }, |
861 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, | | 861 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, |
862 | "Intel i82801H (IFE) LAN Controller", | | 862 | "Intel i82801H (IFE) LAN Controller", |
863 | WM_T_ICH8, WMP_F_1000T }, | | 863 | WM_T_ICH8, WMP_F_1000T }, |
864 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, | | 864 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, |
865 | "Intel i82801H (M) LAN Controller", | | 865 | "Intel i82801H (M) LAN Controller", |
866 | WM_T_ICH8, WMP_F_1000T }, | | 866 | WM_T_ICH8, WMP_F_1000T }, |
867 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, | | 867 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, |
868 | "Intel i82801H IFE (GT) LAN Controller", | | 868 | "Intel i82801H IFE (GT) LAN Controller", |
869 | WM_T_ICH8, WMP_F_1000T }, | | 869 | WM_T_ICH8, WMP_F_1000T }, |
870 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, | | 870 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, |
871 | "Intel i82801H IFE (G) LAN Controller", | | 871 | "Intel i82801H IFE (G) LAN Controller", |
872 | WM_T_ICH8, WMP_F_1000T }, | | 872 | WM_T_ICH8, WMP_F_1000T }, |
873 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, | | 873 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, |
874 | "82801I (AMT) LAN Controller", | | 874 | "82801I (AMT) LAN Controller", |
875 | WM_T_ICH9, WMP_F_1000T }, | | 875 | WM_T_ICH9, WMP_F_1000T }, |
876 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, | | 876 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, |
877 | "82801I LAN Controller", | | 877 | "82801I LAN Controller", |
878 | WM_T_ICH9, WMP_F_1000T }, | | 878 | WM_T_ICH9, WMP_F_1000T }, |
879 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, | | 879 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, |
880 | "82801I (G) LAN Controller", | | 880 | "82801I (G) LAN Controller", |
881 | WM_T_ICH9, WMP_F_1000T }, | | 881 | WM_T_ICH9, WMP_F_1000T }, |
882 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, | | 882 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, |
883 | "82801I (GT) LAN Controller", | | 883 | "82801I (GT) LAN Controller", |
884 | WM_T_ICH9, WMP_F_1000T }, | | 884 | WM_T_ICH9, WMP_F_1000T }, |
885 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, | | 885 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, |
886 | "82801I (C) LAN Controller", | | 886 | "82801I (C) LAN Controller", |
887 | WM_T_ICH9, WMP_F_1000T }, | | 887 | WM_T_ICH9, WMP_F_1000T }, |
888 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, | | 888 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, |
889 | "82801I mobile LAN Controller", | | 889 | "82801I mobile LAN Controller", |
890 | WM_T_ICH9, WMP_F_1000T }, | | 890 | WM_T_ICH9, WMP_F_1000T }, |
891 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, | | 891 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, |
892 | "82801I mobile (V) LAN Controller", | | 892 | "82801I mobile (V) LAN Controller", |
893 | WM_T_ICH9, WMP_F_1000T }, | | 893 | WM_T_ICH9, WMP_F_1000T }, |
894 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, | | 894 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, |
895 | "82801I mobile (AMT) LAN Controller", | | 895 | "82801I mobile (AMT) LAN Controller", |
896 | WM_T_ICH9, WMP_F_1000T }, | | 896 | WM_T_ICH9, WMP_F_1000T }, |
897 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, | | 897 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, |
898 | "82567LM-4 LAN Controller", | | 898 | "82567LM-4 LAN Controller", |
899 | WM_T_ICH9, WMP_F_1000T }, | | 899 | WM_T_ICH9, WMP_F_1000T }, |
900 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3, | | 900 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3, |
901 | "82567V-3 LAN Controller", | | 901 | "82567V-3 LAN Controller", |
902 | WM_T_ICH9, WMP_F_1000T }, | | 902 | WM_T_ICH9, WMP_F_1000T }, |
903 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, | | 903 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, |
904 | "82567LM-2 LAN Controller", | | 904 | "82567LM-2 LAN Controller", |
905 | WM_T_ICH10, WMP_F_1000T }, | | 905 | WM_T_ICH10, WMP_F_1000T }, |
906 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, | | 906 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, |
907 | "82567LF-2 LAN Controller", | | 907 | "82567LF-2 LAN Controller", |
908 | WM_T_ICH10, WMP_F_1000T }, | | 908 | WM_T_ICH10, WMP_F_1000T }, |
909 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, | | 909 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, |
910 | "82567LM-3 LAN Controller", | | 910 | "82567LM-3 LAN Controller", |
911 | WM_T_ICH10, WMP_F_1000T }, | | 911 | WM_T_ICH10, WMP_F_1000T }, |
912 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, | | 912 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, |
913 | "82567LF-3 LAN Controller", | | 913 | "82567LF-3 LAN Controller", |
914 | WM_T_ICH10, WMP_F_1000T }, | | 914 | WM_T_ICH10, WMP_F_1000T }, |
915 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, | | 915 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, |
916 | "82567V-2 LAN Controller", | | 916 | "82567V-2 LAN Controller", |
917 | WM_T_ICH10, WMP_F_1000T }, | | 917 | WM_T_ICH10, WMP_F_1000T }, |
918 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V, | | 918 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V, |
919 | "82567V-3? LAN Controller", | | 919 | "82567V-3? LAN Controller", |
920 | WM_T_ICH10, WMP_F_1000T }, | | 920 | WM_T_ICH10, WMP_F_1000T }, |
921 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE, | | 921 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE, |
922 | "HANKSVILLE LAN Controller", | | 922 | "HANKSVILLE LAN Controller", |
923 | WM_T_ICH10, WMP_F_1000T }, | | 923 | WM_T_ICH10, WMP_F_1000T }, |
924 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM, | | 924 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM, |
925 | "PCH LAN (82577LM) Controller", | | 925 | "PCH LAN (82577LM) Controller", |
926 | WM_T_PCH, WMP_F_1000T }, | | 926 | WM_T_PCH, WMP_F_1000T }, |
927 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC, | | 927 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC, |
928 | "PCH LAN (82577LC) Controller", | | 928 | "PCH LAN (82577LC) Controller", |
929 | WM_T_PCH, WMP_F_1000T }, | | 929 | WM_T_PCH, WMP_F_1000T }, |
930 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM, | | 930 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM, |
931 | "PCH LAN (82578DM) Controller", | | 931 | "PCH LAN (82578DM) Controller", |
932 | WM_T_PCH, WMP_F_1000T }, | | 932 | WM_T_PCH, WMP_F_1000T }, |
933 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC, | | 933 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC, |
934 | "PCH LAN (82578DC) Controller", | | 934 | "PCH LAN (82578DC) Controller", |
935 | WM_T_PCH, WMP_F_1000T }, | | 935 | WM_T_PCH, WMP_F_1000T }, |
936 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM, | | 936 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM, |
937 | "PCH2 LAN (82579LM) Controller", | | 937 | "PCH2 LAN (82579LM) Controller", |
938 | WM_T_PCH2, WMP_F_1000T }, | | 938 | WM_T_PCH2, WMP_F_1000T }, |
939 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V, | | 939 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V, |
940 | "PCH2 LAN (82579V) Controller", | | 940 | "PCH2 LAN (82579V) Controller", |
941 | WM_T_PCH2, WMP_F_1000T }, | | 941 | WM_T_PCH2, WMP_F_1000T }, |
942 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER, | | 942 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER, |
943 | "82575EB dual-1000baseT Ethernet", | | 943 | "82575EB dual-1000baseT Ethernet", |
944 | WM_T_82575, WMP_F_1000T }, | | 944 | WM_T_82575, WMP_F_1000T }, |
945 | #if 0 | | 945 | #if 0 |
946 | /* | | 946 | /* |
947 | * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so | | 947 | * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so |
948 | * disabled for now ... | | 948 | * disabled for now ... |
949 | */ | | 949 | */ |
950 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES, | | 950 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES, |
951 | "82575EB dual-1000baseX Ethernet (SERDES)", | | 951 | "82575EB dual-1000baseX Ethernet (SERDES)", |
952 | WM_T_82575, WMP_F_SERDES }, | | 952 | WM_T_82575, WMP_F_SERDES }, |
953 | #endif | | 953 | #endif |
954 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER, | | 954 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER, |
955 | "82575GB quad-1000baseT Ethernet", | | 955 | "82575GB quad-1000baseT Ethernet", |
956 | WM_T_82575, WMP_F_1000T }, | | 956 | WM_T_82575, WMP_F_1000T }, |
957 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM, | | 957 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM, |
958 | "82575GB quad-1000baseT Ethernet (PM)", | | 958 | "82575GB quad-1000baseT Ethernet (PM)", |
959 | WM_T_82575, WMP_F_1000T }, | | 959 | WM_T_82575, WMP_F_1000T }, |
960 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER, | | 960 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER, |
961 | "82576 1000BaseT Ethernet", | | 961 | "82576 1000BaseT Ethernet", |
962 | WM_T_82576, WMP_F_1000T }, | | 962 | WM_T_82576, WMP_F_1000T }, |
963 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER, | | 963 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER, |
964 | "82576 1000BaseX Ethernet", | | 964 | "82576 1000BaseX Ethernet", |
965 | WM_T_82576, WMP_F_1000X }, | | 965 | WM_T_82576, WMP_F_1000X }, |
966 | #if 0 | | 966 | #if 0 |
967 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES, | | 967 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES, |
968 | "82576 gigabit Ethernet (SERDES)", | | 968 | "82576 gigabit Ethernet (SERDES)", |
969 | WM_T_82576, WMP_F_SERDES }, | | 969 | WM_T_82576, WMP_F_SERDES }, |
970 | #endif | | 970 | #endif |
971 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER, | | 971 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER, |
972 | "82576 quad-1000BaseT Ethernet", | | 972 | "82576 quad-1000BaseT Ethernet", |
973 | WM_T_82576, WMP_F_1000T }, | | 973 | WM_T_82576, WMP_F_1000T }, |
974 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS, | | 974 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS, |
975 | "82576 gigabit Ethernet", | | 975 | "82576 gigabit Ethernet", |
976 | WM_T_82576, WMP_F_1000T }, | | 976 | WM_T_82576, WMP_F_1000T }, |
977 | #if 0 | | 977 | #if 0 |
978 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES, | | 978 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES, |
979 | "82576 gigabit Ethernet (SERDES)", | | 979 | "82576 gigabit Ethernet (SERDES)", |
980 | WM_T_82576, WMP_F_SERDES }, | | 980 | WM_T_82576, WMP_F_SERDES }, |
981 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD, | | 981 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD, |
982 | "82576 quad-gigabit Ethernet (SERDES)", | | 982 | "82576 quad-gigabit Ethernet (SERDES)", |
983 | WM_T_82576, WMP_F_SERDES }, | | 983 | WM_T_82576, WMP_F_SERDES }, |
984 | #endif | | 984 | #endif |
985 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER, | | 985 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER, |
986 | "82580 1000BaseT Ethernet", | | 986 | "82580 1000BaseT Ethernet", |
987 | WM_T_82580, WMP_F_1000T }, | | 987 | WM_T_82580, WMP_F_1000T }, |
988 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER, | | 988 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER, |
989 | "82580 1000BaseX Ethernet", | | 989 | "82580 1000BaseX Ethernet", |
990 | WM_T_82580, WMP_F_1000X }, | | 990 | WM_T_82580, WMP_F_1000X }, |
991 | #if 0 | | 991 | #if 0 |
992 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES, | | 992 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES, |
993 | "82580 1000BaseT Ethernet (SERDES)", | | 993 | "82580 1000BaseT Ethernet (SERDES)", |
994 | WM_T_82580, WMP_F_SERDES }, | | 994 | WM_T_82580, WMP_F_SERDES }, |
995 | #endif | | 995 | #endif |
996 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII, | | 996 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII, |
997 | "82580 gigabit Ethernet (SGMII)", | | 997 | "82580 gigabit Ethernet (SGMII)", |
998 | WM_T_82580, WMP_F_1000T }, | | 998 | WM_T_82580, WMP_F_1000T }, |
999 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL, | | 999 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL, |
1000 | "82580 dual-1000BaseT Ethernet", | | 1000 | "82580 dual-1000BaseT Ethernet", |
1001 | WM_T_82580, WMP_F_1000T }, | | 1001 | WM_T_82580, WMP_F_1000T }, |
1002 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER, | | 1002 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER, |
1003 | "82580 1000BaseT Ethernet", | | 1003 | "82580 1000BaseT Ethernet", |
1004 | WM_T_82580ER, WMP_F_1000T }, | | 1004 | WM_T_82580ER, WMP_F_1000T }, |
1005 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL, | | 1005 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL, |
1006 | "82580 dual-1000BaseT Ethernet", | | 1006 | "82580 dual-1000BaseT Ethernet", |
1007 | WM_T_82580ER, WMP_F_1000T }, | | 1007 | WM_T_82580ER, WMP_F_1000T }, |
1008 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER, | | 1008 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER, |
1009 | "82580 quad-1000BaseX Ethernet", | | 1009 | "82580 quad-1000BaseX Ethernet", |
1010 | WM_T_82580, WMP_F_1000X }, | | 1010 | WM_T_82580, WMP_F_1000X }, |
1011 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER, | | 1011 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER, |
1012 | "I350 Gigabit Network Connection", | | 1012 | "I350 Gigabit Network Connection", |
1013 | WM_T_I350, WMP_F_1000T }, | | 1013 | WM_T_I350, WMP_F_1000T }, |
1014 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER, | | 1014 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER, |
1015 | "I350 Gigabit Fiber Network Connection", | | 1015 | "I350 Gigabit Fiber Network Connection", |
1016 | WM_T_I350, WMP_F_1000X }, | | 1016 | WM_T_I350, WMP_F_1000X }, |
1017 | #if 0 | | 1017 | #if 0 |
1018 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES, | | 1018 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES, |
1019 | "I350 Gigabit Backplane Connection", | | 1019 | "I350 Gigabit Backplane Connection", |
1020 | WM_T_I350, WMP_F_SERDES }, | | 1020 | WM_T_I350, WMP_F_SERDES }, |
1021 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII, | | 1021 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII, |
1022 | "I350 Gigabit Connection", | | 1022 | "I350 Gigabit Connection", |
1023 | WM_T_I350, WMP_F_1000T }, | | 1023 | WM_T_I350, WMP_F_1000T }, |
1024 | #endif | | 1024 | #endif |
1025 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1, | | 1025 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1, |
1026 | "I210-T1 Ethernet Server Adapter", | | 1026 | "I210-T1 Ethernet Server Adapter", |
1027 | WM_T_I210, WMP_F_1000T }, | | 1027 | WM_T_I210, WMP_F_1000T }, |
1028 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1, | | 1028 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1, |
1029 | "I210 Ethernet (Copper OEM)", | | 1029 | "I210 Ethernet (Copper OEM)", |
1030 | WM_T_I210, WMP_F_1000T }, | | 1030 | WM_T_I210, WMP_F_1000T }, |
1031 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT, | | 1031 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT, |
1032 | "I210 Ethernet (Copper IT)", | | 1032 | "I210 Ethernet (Copper IT)", |
1033 | WM_T_I210, WMP_F_1000T }, | | 1033 | WM_T_I210, WMP_F_1000T }, |
1034 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER, | | 1034 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER, |
1035 | "I210 Gigabit Ethernet (Fiber)", | | 1035 | "I210 Gigabit Ethernet (Fiber)", |
1036 | WM_T_I210, WMP_F_1000X }, | | 1036 | WM_T_I210, WMP_F_1000X }, |
1037 | #if 0 | | 1037 | #if 0 |
1038 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES, | | 1038 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES, |
1039 | "I210 Gigabit Ethernet (SERDES)", | | 1039 | "I210 Gigabit Ethernet (SERDES)", |
1040 | WM_T_I210, WMP_F_SERDES }, | | 1040 | WM_T_I210, WMP_F_SERDES }, |
1041 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII, | | 1041 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII, |
1042 | "I210 Gigabit Ethernet (SGMII)", | | 1042 | "I210 Gigabit Ethernet (SGMII)", |
1043 | WM_T_I210, WMP_F_SERDES }, | | 1043 | WM_T_I210, WMP_F_SERDES }, |
1044 | #endif | | 1044 | #endif |
1045 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER, | | 1045 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER, |
1046 | "I211 Ethernet (COPPER)", | | 1046 | "I211 Ethernet (COPPER)", |
1047 | WM_T_I211, WMP_F_1000T }, | | 1047 | WM_T_I211, WMP_F_1000T }, |
1048 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V, | | 1048 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V, |
1049 | "I217 V Ethernet Connection", | | 1049 | "I217 V Ethernet Connection", |
1050 | WM_T_PCH_LPT, WMP_F_1000T }, | | 1050 | WM_T_PCH_LPT, WMP_F_1000T }, |
1051 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM, | | 1051 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM, |
1052 | "I217 LM Ethernet Connection", | | 1052 | "I217 LM Ethernet Connection", |
1053 | WM_T_PCH_LPT, WMP_F_1000T }, | | 1053 | WM_T_PCH_LPT, WMP_F_1000T }, |
1054 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V, | | 1054 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V, |
1055 | "I218 V Ethernet Connection", | | 1055 | "I218 V Ethernet Connection", |
1056 | WM_T_PCH_LPT, WMP_F_1000T }, | | 1056 | WM_T_PCH_LPT, WMP_F_1000T }, |
1057 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM, | | 1057 | { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM, |
1058 | "I218 LM Ethernet Connection", | | 1058 | "I218 LM Ethernet Connection", |
1059 | WM_T_PCH_LPT, WMP_F_1000T }, | | 1059 | WM_T_PCH_LPT, WMP_F_1000T }, |
1060 | { 0, 0, | | 1060 | { 0, 0, |
1061 | NULL, | | 1061 | NULL, |
1062 | 0, 0 }, | | 1062 | 0, 0 }, |
1063 | }; | | 1063 | }; |
1064 | | | 1064 | |
1065 | #ifdef WM_EVENT_COUNTERS | | 1065 | #ifdef WM_EVENT_COUNTERS |
1066 | static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; | | 1066 | static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; |
1067 | #endif /* WM_EVENT_COUNTERS */ | | 1067 | #endif /* WM_EVENT_COUNTERS */ |
1068 | | | 1068 | |
1069 | #if 0 /* Not currently used */ | | 1069 | #if 0 /* Not currently used */ |
1070 | static inline uint32_t | | 1070 | static inline uint32_t |
1071 | wm_io_read(struct wm_softc *sc, int reg) | | 1071 | wm_io_read(struct wm_softc *sc, int reg) |
1072 | { | | 1072 | { |
1073 | | | 1073 | |
1074 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); | | 1074 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); |
1075 | return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); | | 1075 | return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); |
1076 | } | | 1076 | } |
1077 | #endif | | 1077 | #endif |
1078 | | | 1078 | |
| @@ -2824,2001 +2824,2001 @@ wm_start(struct ifnet *ifp) | | | @@ -2824,2001 +2824,2001 @@ wm_start(struct ifnet *ifp) |
2824 | } | | 2824 | } |
2825 | } | | 2825 | } |
2826 | | | 2826 | |
2827 | /* | | 2827 | /* |
2828 | * wm_nq_tx_offload: | | 2828 | * wm_nq_tx_offload: |
2829 | * | | 2829 | * |
2830 | * Set up TCP/IP checksumming parameters for the | | 2830 | * Set up TCP/IP checksumming parameters for the |
2831 | * specified packet, for NEWQUEUE devices | | 2831 | * specified packet, for NEWQUEUE devices |
2832 | */ | | 2832 | */ |
2833 | static int | | 2833 | static int |
2834 | wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, | | 2834 | wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, |
2835 | uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) | | 2835 | uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) |
2836 | { | | 2836 | { |
2837 | struct mbuf *m0 = txs->txs_mbuf; | | 2837 | struct mbuf *m0 = txs->txs_mbuf; |
2838 | struct m_tag *mtag; | | 2838 | struct m_tag *mtag; |
2839 | uint32_t vl_len, mssidx, cmdc; | | 2839 | uint32_t vl_len, mssidx, cmdc; |
2840 | struct ether_header *eh; | | 2840 | struct ether_header *eh; |
2841 | int offset, iphl; | | 2841 | int offset, iphl; |
2842 | | | 2842 | |
2843 | /* | | 2843 | /* |
2844 | * XXX It would be nice if the mbuf pkthdr had offset | | 2844 | * XXX It would be nice if the mbuf pkthdr had offset |
2845 | * fields for the protocol headers. | | 2845 | * fields for the protocol headers. |
2846 | */ | | 2846 | */ |
2847 | *cmdlenp = 0; | | 2847 | *cmdlenp = 0; |
2848 | *fieldsp = 0; | | 2848 | *fieldsp = 0; |
2849 | | | 2849 | |
2850 | eh = mtod(m0, struct ether_header *); | | 2850 | eh = mtod(m0, struct ether_header *); |
2851 | switch (htons(eh->ether_type)) { | | 2851 | switch (htons(eh->ether_type)) { |
2852 | case ETHERTYPE_IP: | | 2852 | case ETHERTYPE_IP: |
2853 | case ETHERTYPE_IPV6: | | 2853 | case ETHERTYPE_IPV6: |
2854 | offset = ETHER_HDR_LEN; | | 2854 | offset = ETHER_HDR_LEN; |
2855 | break; | | 2855 | break; |
2856 | | | 2856 | |
2857 | case ETHERTYPE_VLAN: | | 2857 | case ETHERTYPE_VLAN: |
2858 | offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; | | 2858 | offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
2859 | break; | | 2859 | break; |
2860 | | | 2860 | |
2861 | default: | | 2861 | default: |
2862 | /* | | 2862 | /* |
2863 | * Don't support this protocol or encapsulation. | | 2863 | * Don't support this protocol or encapsulation. |
2864 | */ | | 2864 | */ |
2865 | *do_csum = false; | | 2865 | *do_csum = false; |
2866 | return 0; | | 2866 | return 0; |
2867 | } | | 2867 | } |
2868 | *do_csum = true; | | 2868 | *do_csum = true; |
2869 | *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS; | | 2869 | *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS; |
2870 | cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT; | | 2870 | cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT; |
2871 | | | 2871 | |
2872 | vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT); | | 2872 | vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT); |
2873 | KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0); | | 2873 | KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0); |
2874 | | | 2874 | |
2875 | if ((m0->m_pkthdr.csum_flags & | | 2875 | if ((m0->m_pkthdr.csum_flags & |
2876 | (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) { | | 2876 | (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) { |
2877 | iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); | | 2877 | iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); |
2878 | } else { | | 2878 | } else { |
2879 | iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); | | 2879 | iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); |
2880 | } | | 2880 | } |
2881 | vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); | | 2881 | vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); |
2882 | KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); | | 2882 | KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); |
2883 | | | 2883 | |
2884 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { | | 2884 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { |
2885 | vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK) | | 2885 | vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK) |
2886 | << NQTXC_VLLEN_VLAN_SHIFT); | | 2886 | << NQTXC_VLLEN_VLAN_SHIFT); |
2887 | *cmdlenp |= NQTX_CMD_VLE; | | 2887 | *cmdlenp |= NQTX_CMD_VLE; |
2888 | } | | 2888 | } |
2889 | | | 2889 | |
2890 | mssidx = 0; | | 2890 | mssidx = 0; |
2891 | | | 2891 | |
2892 | if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { | | 2892 | if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { |
2893 | int hlen = offset + iphl; | | 2893 | int hlen = offset + iphl; |
2894 | int tcp_hlen; | | 2894 | int tcp_hlen; |
2895 | bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; | | 2895 | bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; |
2896 | | | 2896 | |
2897 | if (__predict_false(m0->m_len < | | 2897 | if (__predict_false(m0->m_len < |
2898 | (hlen + sizeof(struct tcphdr)))) { | | 2898 | (hlen + sizeof(struct tcphdr)))) { |
2899 | /* | | 2899 | /* |
2900 | * TCP/IP headers are not in the first mbuf; we need | | 2900 | * TCP/IP headers are not in the first mbuf; we need |
2901 | * to do this the slow and painful way. Let's just | | 2901 | * to do this the slow and painful way. Let's just |
2902 | * hope this doesn't happen very often. | | 2902 | * hope this doesn't happen very often. |
2903 | */ | | 2903 | */ |
2904 | struct tcphdr th; | | 2904 | struct tcphdr th; |
2905 | | | 2905 | |
2906 | WM_EVCNT_INCR(&sc->sc_ev_txtsopain); | | 2906 | WM_EVCNT_INCR(&sc->sc_ev_txtsopain); |
2907 | | | 2907 | |
2908 | m_copydata(m0, hlen, sizeof(th), &th); | | 2908 | m_copydata(m0, hlen, sizeof(th), &th); |
2909 | if (v4) { | | 2909 | if (v4) { |
2910 | struct ip ip; | | 2910 | struct ip ip; |
2911 | | | 2911 | |
2912 | m_copydata(m0, offset, sizeof(ip), &ip); | | 2912 | m_copydata(m0, offset, sizeof(ip), &ip); |
2913 | ip.ip_len = 0; | | 2913 | ip.ip_len = 0; |
2914 | m_copyback(m0, | | 2914 | m_copyback(m0, |
2915 | offset + offsetof(struct ip, ip_len), | | 2915 | offset + offsetof(struct ip, ip_len), |
2916 | sizeof(ip.ip_len), &ip.ip_len); | | 2916 | sizeof(ip.ip_len), &ip.ip_len); |
2917 | th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, | | 2917 | th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, |
2918 | ip.ip_dst.s_addr, htons(IPPROTO_TCP)); | | 2918 | ip.ip_dst.s_addr, htons(IPPROTO_TCP)); |
2919 | } else { | | 2919 | } else { |
2920 | struct ip6_hdr ip6; | | 2920 | struct ip6_hdr ip6; |
2921 | | | 2921 | |
2922 | m_copydata(m0, offset, sizeof(ip6), &ip6); | | 2922 | m_copydata(m0, offset, sizeof(ip6), &ip6); |
2923 | ip6.ip6_plen = 0; | | 2923 | ip6.ip6_plen = 0; |
2924 | m_copyback(m0, | | 2924 | m_copyback(m0, |
2925 | offset + offsetof(struct ip6_hdr, ip6_plen), | | 2925 | offset + offsetof(struct ip6_hdr, ip6_plen), |
2926 | sizeof(ip6.ip6_plen), &ip6.ip6_plen); | | 2926 | sizeof(ip6.ip6_plen), &ip6.ip6_plen); |
2927 | th.th_sum = in6_cksum_phdr(&ip6.ip6_src, | | 2927 | th.th_sum = in6_cksum_phdr(&ip6.ip6_src, |
2928 | &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); | | 2928 | &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); |
2929 | } | | 2929 | } |
2930 | m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), | | 2930 | m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), |
2931 | sizeof(th.th_sum), &th.th_sum); | | 2931 | sizeof(th.th_sum), &th.th_sum); |
2932 | | | 2932 | |
2933 | tcp_hlen = th.th_off << 2; | | 2933 | tcp_hlen = th.th_off << 2; |
2934 | } else { | | 2934 | } else { |
2935 | /* | | 2935 | /* |
2936 | * TCP/IP headers are in the first mbuf; we can do | | 2936 | * TCP/IP headers are in the first mbuf; we can do |
2937 | * this the easy way. | | 2937 | * this the easy way. |
2938 | */ | | 2938 | */ |
2939 | struct tcphdr *th; | | 2939 | struct tcphdr *th; |
2940 | | | 2940 | |
2941 | if (v4) { | | 2941 | if (v4) { |
2942 | struct ip *ip = | | 2942 | struct ip *ip = |
2943 | (void *)(mtod(m0, char *) + offset); | | 2943 | (void *)(mtod(m0, char *) + offset); |
2944 | th = (void *)(mtod(m0, char *) + hlen); | | 2944 | th = (void *)(mtod(m0, char *) + hlen); |
2945 | | | 2945 | |
2946 | ip->ip_len = 0; | | 2946 | ip->ip_len = 0; |
2947 | th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, | | 2947 | th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, |
2948 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); | | 2948 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); |
2949 | } else { | | 2949 | } else { |
2950 | struct ip6_hdr *ip6 = | | 2950 | struct ip6_hdr *ip6 = |
2951 | (void *)(mtod(m0, char *) + offset); | | 2951 | (void *)(mtod(m0, char *) + offset); |
2952 | th = (void *)(mtod(m0, char *) + hlen); | | 2952 | th = (void *)(mtod(m0, char *) + hlen); |
2953 | | | 2953 | |
2954 | ip6->ip6_plen = 0; | | 2954 | ip6->ip6_plen = 0; |
2955 | th->th_sum = in6_cksum_phdr(&ip6->ip6_src, | | 2955 | th->th_sum = in6_cksum_phdr(&ip6->ip6_src, |
2956 | &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); | | 2956 | &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); |
2957 | } | | 2957 | } |
2958 | tcp_hlen = th->th_off << 2; | | 2958 | tcp_hlen = th->th_off << 2; |
2959 | } | | 2959 | } |
2960 | hlen += tcp_hlen; | | 2960 | hlen += tcp_hlen; |
2961 | *cmdlenp |= NQTX_CMD_TSE; | | 2961 | *cmdlenp |= NQTX_CMD_TSE; |
2962 | | | 2962 | |
2963 | if (v4) { | | 2963 | if (v4) { |
2964 | WM_EVCNT_INCR(&sc->sc_ev_txtso); | | 2964 | WM_EVCNT_INCR(&sc->sc_ev_txtso); |
2965 | *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; | | 2965 | *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; |
2966 | } else { | | 2966 | } else { |
2967 | WM_EVCNT_INCR(&sc->sc_ev_txtso6); | | 2967 | WM_EVCNT_INCR(&sc->sc_ev_txtso6); |
2968 | *fieldsp |= NQTXD_FIELDS_TUXSM; | | 2968 | *fieldsp |= NQTXD_FIELDS_TUXSM; |
2969 | } | | 2969 | } |
2970 | *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); | | 2970 | *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); |
2971 | KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); | | 2971 | KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); |
2972 | mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT); | | 2972 | mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT); |
2973 | KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0); | | 2973 | KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0); |
2974 | mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT); | | 2974 | mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT); |
2975 | KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0); | | 2975 | KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0); |
2976 | } else { | | 2976 | } else { |
2977 | *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT); | | 2977 | *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT); |
2978 | KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); | | 2978 | KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); |
2979 | } | | 2979 | } |
2980 | | | 2980 | |
2981 | if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { | | 2981 | if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { |
2982 | *fieldsp |= NQTXD_FIELDS_IXSM; | | 2982 | *fieldsp |= NQTXD_FIELDS_IXSM; |
2983 | cmdc |= NQTXC_CMD_IP4; | | 2983 | cmdc |= NQTXC_CMD_IP4; |
2984 | } | | 2984 | } |
2985 | | | 2985 | |
2986 | if (m0->m_pkthdr.csum_flags & | | 2986 | if (m0->m_pkthdr.csum_flags & |
2987 | (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { | | 2987 | (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { |
2988 | WM_EVCNT_INCR(&sc->sc_ev_txtusum); | | 2988 | WM_EVCNT_INCR(&sc->sc_ev_txtusum); |
2989 | if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) { | | 2989 | if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) { |
2990 | cmdc |= NQTXC_CMD_TCP; | | 2990 | cmdc |= NQTXC_CMD_TCP; |
2991 | } else { | | 2991 | } else { |
2992 | cmdc |= NQTXC_CMD_UDP; | | 2992 | cmdc |= NQTXC_CMD_UDP; |
2993 | } | | 2993 | } |
2994 | cmdc |= NQTXC_CMD_IP4; | | 2994 | cmdc |= NQTXC_CMD_IP4; |
2995 | *fieldsp |= NQTXD_FIELDS_TUXSM; | | 2995 | *fieldsp |= NQTXD_FIELDS_TUXSM; |
2996 | } | | 2996 | } |
2997 | if (m0->m_pkthdr.csum_flags & | | 2997 | if (m0->m_pkthdr.csum_flags & |
2998 | (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { | | 2998 | (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { |
2999 | WM_EVCNT_INCR(&sc->sc_ev_txtusum6); | | 2999 | WM_EVCNT_INCR(&sc->sc_ev_txtusum6); |
3000 | if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) { | | 3000 | if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) { |
3001 | cmdc |= NQTXC_CMD_TCP; | | 3001 | cmdc |= NQTXC_CMD_TCP; |
3002 | } else { | | 3002 | } else { |
3003 | cmdc |= NQTXC_CMD_UDP; | | 3003 | cmdc |= NQTXC_CMD_UDP; |
3004 | } | | 3004 | } |
3005 | cmdc |= NQTXC_CMD_IP6; | | 3005 | cmdc |= NQTXC_CMD_IP6; |
3006 | *fieldsp |= NQTXD_FIELDS_TUXSM; | | 3006 | *fieldsp |= NQTXD_FIELDS_TUXSM; |
3007 | } | | 3007 | } |
3008 | | | 3008 | |
3009 | /* Fill in the context descriptor. */ | | 3009 | /* Fill in the context descriptor. */ |
3010 | sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len = | | 3010 | sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len = |
3011 | htole32(vl_len); | | 3011 | htole32(vl_len); |
3012 | sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0; | | 3012 | sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0; |
3013 | sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd = | | 3013 | sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd = |
3014 | htole32(cmdc); | | 3014 | htole32(cmdc); |
3015 | sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx = | | 3015 | sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx = |
3016 | htole32(mssidx); | | 3016 | htole32(mssidx); |
3017 | WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); | | 3017 | WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); |
3018 | DPRINTF(WM_DEBUG_TX, | | 3018 | DPRINTF(WM_DEBUG_TX, |
3019 | ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), | | 3019 | ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), |
3020 | sc->sc_txnext, 0, vl_len)); | | 3020 | sc->sc_txnext, 0, vl_len)); |
3021 | DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); | | 3021 | DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); |
3022 | sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); | | 3022 | sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); |
3023 | txs->txs_ndesc++; | | 3023 | txs->txs_ndesc++; |
3024 | return 0; | | 3024 | return 0; |
3025 | } | | 3025 | } |
3026 | | | 3026 | |
3027 | /* | | 3027 | /* |
3028 | * wm_nq_start: [ifnet interface function] | | 3028 | * wm_nq_start: [ifnet interface function] |
3029 | * | | 3029 | * |
3030 | * Start packet transmission on the interface for NEWQUEUE devices | | 3030 | * Start packet transmission on the interface for NEWQUEUE devices |
3031 | */ | | 3031 | */ |
3032 | static void | | 3032 | static void |
3033 | wm_nq_start(struct ifnet *ifp) | | 3033 | wm_nq_start(struct ifnet *ifp) |
3034 | { | | 3034 | { |
3035 | struct wm_softc *sc = ifp->if_softc; | | 3035 | struct wm_softc *sc = ifp->if_softc; |
3036 | struct mbuf *m0; | | 3036 | struct mbuf *m0; |
3037 | struct m_tag *mtag; | | 3037 | struct m_tag *mtag; |
3038 | struct wm_txsoft *txs; | | 3038 | struct wm_txsoft *txs; |
3039 | bus_dmamap_t dmamap; | | 3039 | bus_dmamap_t dmamap; |
3040 | int error, nexttx, lasttx = -1, seg, segs_needed; | | 3040 | int error, nexttx, lasttx = -1, seg, segs_needed; |
3041 | bool do_csum, sent; | | 3041 | bool do_csum, sent; |
3042 | | | 3042 | |
3043 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) | | 3043 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) |
3044 | return; | | 3044 | return; |
3045 | | | 3045 | |
3046 | sent = false; | | 3046 | sent = false; |
3047 | | | 3047 | |
3048 | /* | | 3048 | /* |
3049 | * Loop through the send queue, setting up transmit descriptors | | 3049 | * Loop through the send queue, setting up transmit descriptors |
3050 | * until we drain the queue, or use up all available transmit | | 3050 | * until we drain the queue, or use up all available transmit |
3051 | * descriptors. | | 3051 | * descriptors. |
3052 | */ | | 3052 | */ |
3053 | for (;;) { | | 3053 | for (;;) { |
3054 | /* Grab a packet off the queue. */ | | 3054 | /* Grab a packet off the queue. */ |
3055 | IFQ_POLL(&ifp->if_snd, m0); | | 3055 | IFQ_POLL(&ifp->if_snd, m0); |
3056 | if (m0 == NULL) | | 3056 | if (m0 == NULL) |
3057 | break; | | 3057 | break; |
3058 | | | 3058 | |
3059 | DPRINTF(WM_DEBUG_TX, | | 3059 | DPRINTF(WM_DEBUG_TX, |
3060 | ("%s: TX: have packet to transmit: %p\n", | | 3060 | ("%s: TX: have packet to transmit: %p\n", |
3061 | device_xname(sc->sc_dev), m0)); | | 3061 | device_xname(sc->sc_dev), m0)); |
3062 | | | 3062 | |
3063 | /* Get a work queue entry. */ | | 3063 | /* Get a work queue entry. */ |
3064 | if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { | | 3064 | if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { |
3065 | wm_txintr(sc); | | 3065 | wm_txintr(sc); |
3066 | if (sc->sc_txsfree == 0) { | | 3066 | if (sc->sc_txsfree == 0) { |
3067 | DPRINTF(WM_DEBUG_TX, | | 3067 | DPRINTF(WM_DEBUG_TX, |
3068 | ("%s: TX: no free job descriptors\n", | | 3068 | ("%s: TX: no free job descriptors\n", |
3069 | device_xname(sc->sc_dev))); | | 3069 | device_xname(sc->sc_dev))); |
3070 | WM_EVCNT_INCR(&sc->sc_ev_txsstall); | | 3070 | WM_EVCNT_INCR(&sc->sc_ev_txsstall); |
3071 | break; | | 3071 | break; |
3072 | } | | 3072 | } |
3073 | } | | 3073 | } |
3074 | | | 3074 | |
3075 | txs = &sc->sc_txsoft[sc->sc_txsnext]; | | 3075 | txs = &sc->sc_txsoft[sc->sc_txsnext]; |
3076 | dmamap = txs->txs_dmamap; | | 3076 | dmamap = txs->txs_dmamap; |
3077 | | | 3077 | |
3078 | /* | | 3078 | /* |
3079 | * Load the DMA map. If this fails, the packet either | | 3079 | * Load the DMA map. If this fails, the packet either |
3080 | * didn't fit in the allotted number of segments, or we | | 3080 | * didn't fit in the allotted number of segments, or we |
3081 | * were short on resources. For the too-many-segments | | 3081 | * were short on resources. For the too-many-segments |
3082 | * case, we simply report an error and drop the packet, | | 3082 | * case, we simply report an error and drop the packet, |
3083 | * since we can't sanely copy a jumbo packet to a single | | 3083 | * since we can't sanely copy a jumbo packet to a single |
3084 | * buffer. | | 3084 | * buffer. |
3085 | */ | | 3085 | */ |
3086 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, | | 3086 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
3087 | BUS_DMA_WRITE|BUS_DMA_NOWAIT); | | 3087 | BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
3088 | if (error) { | | 3088 | if (error) { |
3089 | if (error == EFBIG) { | | 3089 | if (error == EFBIG) { |
3090 | WM_EVCNT_INCR(&sc->sc_ev_txdrop); | | 3090 | WM_EVCNT_INCR(&sc->sc_ev_txdrop); |
3091 | log(LOG_ERR, "%s: Tx packet consumes too many " | | 3091 | log(LOG_ERR, "%s: Tx packet consumes too many " |
3092 | "DMA segments, dropping...\n", | | 3092 | "DMA segments, dropping...\n", |
3093 | device_xname(sc->sc_dev)); | | 3093 | device_xname(sc->sc_dev)); |
3094 | IFQ_DEQUEUE(&ifp->if_snd, m0); | | 3094 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
3095 | wm_dump_mbuf_chain(sc, m0); | | 3095 | wm_dump_mbuf_chain(sc, m0); |
3096 | m_freem(m0); | | 3096 | m_freem(m0); |
3097 | continue; | | 3097 | continue; |
3098 | } | | 3098 | } |
3099 | /* | | 3099 | /* |
3100 | * Short on resources, just stop for now. | | 3100 | * Short on resources, just stop for now. |
3101 | */ | | 3101 | */ |
3102 | DPRINTF(WM_DEBUG_TX, | | 3102 | DPRINTF(WM_DEBUG_TX, |
3103 | ("%s: TX: dmamap load failed: %d\n", | | 3103 | ("%s: TX: dmamap load failed: %d\n", |
3104 | device_xname(sc->sc_dev), error)); | | 3104 | device_xname(sc->sc_dev), error)); |
3105 | break; | | 3105 | break; |
3106 | } | | 3106 | } |
3107 | | | 3107 | |
3108 | segs_needed = dmamap->dm_nsegs; | | 3108 | segs_needed = dmamap->dm_nsegs; |
3109 | | | 3109 | |
3110 | /* | | 3110 | /* |
3111 | * Ensure we have enough descriptors free to describe | | 3111 | * Ensure we have enough descriptors free to describe |
3112 | * the packet. Note, we always reserve one descriptor | | 3112 | * the packet. Note, we always reserve one descriptor |
3113 | * at the end of the ring due to the semantics of the | | 3113 | * at the end of the ring due to the semantics of the |
3114 | * TDT register, plus one more in the event we need | | 3114 | * TDT register, plus one more in the event we need |
3115 | * to load offload context. | | 3115 | * to load offload context. |
3116 | */ | | 3116 | */ |
3117 | if (segs_needed > sc->sc_txfree - 2) { | | 3117 | if (segs_needed > sc->sc_txfree - 2) { |
3118 | /* | | 3118 | /* |
3119 | * Not enough free descriptors to transmit this | | 3119 | * Not enough free descriptors to transmit this |
3120 | * packet. We haven't committed anything yet, | | 3120 | * packet. We haven't committed anything yet, |
3121 | * so just unload the DMA map, put the packet | | 3121 | * so just unload the DMA map, put the packet |
3122 | * pack on the queue, and punt. Notify the upper | | 3122 | * pack on the queue, and punt. Notify the upper |
3123 | * layer that there are no more slots left. | | 3123 | * layer that there are no more slots left. |
3124 | */ | | 3124 | */ |
3125 | DPRINTF(WM_DEBUG_TX, | | 3125 | DPRINTF(WM_DEBUG_TX, |
3126 | ("%s: TX: need %d (%d) descriptors, have %d\n", | | 3126 | ("%s: TX: need %d (%d) descriptors, have %d\n", |
3127 | device_xname(sc->sc_dev), dmamap->dm_nsegs, | | 3127 | device_xname(sc->sc_dev), dmamap->dm_nsegs, |
3128 | segs_needed, sc->sc_txfree - 1)); | | 3128 | segs_needed, sc->sc_txfree - 1)); |
3129 | ifp->if_flags |= IFF_OACTIVE; | | 3129 | ifp->if_flags |= IFF_OACTIVE; |
3130 | bus_dmamap_unload(sc->sc_dmat, dmamap); | | 3130 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
3131 | WM_EVCNT_INCR(&sc->sc_ev_txdstall); | | 3131 | WM_EVCNT_INCR(&sc->sc_ev_txdstall); |
3132 | break; | | 3132 | break; |
3133 | } | | 3133 | } |
3134 | | | 3134 | |
3135 | IFQ_DEQUEUE(&ifp->if_snd, m0); | | 3135 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
3136 | | | 3136 | |
3137 | /* | | 3137 | /* |
3138 | * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. | | 3138 | * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. |
3139 | */ | | 3139 | */ |
3140 | | | 3140 | |
3141 | DPRINTF(WM_DEBUG_TX, | | 3141 | DPRINTF(WM_DEBUG_TX, |
3142 | ("%s: TX: packet has %d (%d) DMA segments\n", | | 3142 | ("%s: TX: packet has %d (%d) DMA segments\n", |
3143 | device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); | | 3143 | device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); |
3144 | | | 3144 | |
3145 | WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); | | 3145 | WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); |
3146 | | | 3146 | |
3147 | /* | | 3147 | /* |
3148 | * Store a pointer to the packet so that we can free it | | 3148 | * Store a pointer to the packet so that we can free it |
3149 | * later. | | 3149 | * later. |
3150 | * | | 3150 | * |
3151 | * Initially, we consider the number of descriptors the | | 3151 | * Initially, we consider the number of descriptors the |
3152 | * packet uses the number of DMA segments. This may be | | 3152 | * packet uses the number of DMA segments. This may be |
3153 | * incremented by 1 if we do checksum offload (a descriptor | | 3153 | * incremented by 1 if we do checksum offload (a descriptor |
3154 | * is used to set the checksum context). | | 3154 | * is used to set the checksum context). |
3155 | */ | | 3155 | */ |
3156 | txs->txs_mbuf = m0; | | 3156 | txs->txs_mbuf = m0; |
3157 | txs->txs_firstdesc = sc->sc_txnext; | | 3157 | txs->txs_firstdesc = sc->sc_txnext; |
3158 | txs->txs_ndesc = segs_needed; | | 3158 | txs->txs_ndesc = segs_needed; |
3159 | | | 3159 | |
3160 | /* Set up offload parameters for this packet. */ | | 3160 | /* Set up offload parameters for this packet. */ |
3161 | uint32_t cmdlen, fields, dcmdlen; | | 3161 | uint32_t cmdlen, fields, dcmdlen; |
3162 | if (m0->m_pkthdr.csum_flags & | | 3162 | if (m0->m_pkthdr.csum_flags & |
3163 | (M_CSUM_TSOv4|M_CSUM_TSOv6| | | 3163 | (M_CSUM_TSOv4|M_CSUM_TSOv6| |
3164 | M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| | | 3164 | M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| |
3165 | M_CSUM_TCPv6|M_CSUM_UDPv6)) { | | 3165 | M_CSUM_TCPv6|M_CSUM_UDPv6)) { |
3166 | if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields, | | 3166 | if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields, |
3167 | &do_csum) != 0) { | | 3167 | &do_csum) != 0) { |
3168 | /* Error message already displayed. */ | | 3168 | /* Error message already displayed. */ |
3169 | bus_dmamap_unload(sc->sc_dmat, dmamap); | | 3169 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
3170 | continue; | | 3170 | continue; |
3171 | } | | 3171 | } |
3172 | } else { | | 3172 | } else { |
3173 | do_csum = false; | | 3173 | do_csum = false; |
3174 | cmdlen = 0; | | 3174 | cmdlen = 0; |
3175 | fields = 0; | | 3175 | fields = 0; |
3176 | } | | 3176 | } |
3177 | | | 3177 | |
3178 | /* Sync the DMA map. */ | | 3178 | /* Sync the DMA map. */ |
3179 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, | | 3179 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
3180 | BUS_DMASYNC_PREWRITE); | | 3180 | BUS_DMASYNC_PREWRITE); |
3181 | | | 3181 | |
3182 | /* | | 3182 | /* |
3183 | * Initialize the first transmit descriptor. | | 3183 | * Initialize the first transmit descriptor. |
3184 | */ | | 3184 | */ |
3185 | nexttx = sc->sc_txnext; | | 3185 | nexttx = sc->sc_txnext; |
3186 | if (!do_csum) { | | 3186 | if (!do_csum) { |
3187 | /* setup a legacy descriptor */ | | 3187 | /* setup a legacy descriptor */ |
3188 | wm_set_dma_addr( | | 3188 | wm_set_dma_addr( |
3189 | &sc->sc_txdescs[nexttx].wtx_addr, | | 3189 | &sc->sc_txdescs[nexttx].wtx_addr, |
3190 | dmamap->dm_segs[0].ds_addr); | | 3190 | dmamap->dm_segs[0].ds_addr); |
3191 | sc->sc_txdescs[nexttx].wtx_cmdlen = | | 3191 | sc->sc_txdescs[nexttx].wtx_cmdlen = |
3192 | htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); | | 3192 | htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); |
3193 | sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0; | | 3193 | sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0; |
3194 | sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0; | | 3194 | sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0; |
3195 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != | | 3195 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != |
3196 | NULL) { | | 3196 | NULL) { |
3197 | sc->sc_txdescs[nexttx].wtx_cmdlen |= | | 3197 | sc->sc_txdescs[nexttx].wtx_cmdlen |= |
3198 | htole32(WTX_CMD_VLE); | | 3198 | htole32(WTX_CMD_VLE); |
3199 | sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = | | 3199 | sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = |
3200 | htole16(VLAN_TAG_VALUE(mtag) & 0xffff); | | 3200 | htole16(VLAN_TAG_VALUE(mtag) & 0xffff); |
3201 | } else { | | 3201 | } else { |
3202 | sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; | | 3202 | sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; |
3203 | } | | 3203 | } |
3204 | dcmdlen = 0; | | 3204 | dcmdlen = 0; |
3205 | } else { | | 3205 | } else { |
3206 | /* setup an advanced data descriptor */ | | 3206 | /* setup an advanced data descriptor */ |
3207 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = | | 3207 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = |
3208 | htole64(dmamap->dm_segs[0].ds_addr); | | 3208 | htole64(dmamap->dm_segs[0].ds_addr); |
3209 | KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); | | 3209 | KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); |
3210 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = | | 3210 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = |
3211 | htole32(dmamap->dm_segs[0].ds_len | cmdlen ); | | 3211 | htole32(dmamap->dm_segs[0].ds_len | cmdlen ); |
3212 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = | | 3212 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = |
3213 | htole32(fields); | | 3213 | htole32(fields); |
3214 | DPRINTF(WM_DEBUG_TX, | | 3214 | DPRINTF(WM_DEBUG_TX, |
3215 | ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", | | 3215 | ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", |
3216 | device_xname(sc->sc_dev), nexttx, | | 3216 | device_xname(sc->sc_dev), nexttx, |
3217 | (uint64_t)dmamap->dm_segs[0].ds_addr)); | | 3217 | (uint64_t)dmamap->dm_segs[0].ds_addr)); |
3218 | DPRINTF(WM_DEBUG_TX, | | 3218 | DPRINTF(WM_DEBUG_TX, |
3219 | ("\t 0x%08x%08x\n", fields, | | 3219 | ("\t 0x%08x%08x\n", fields, |
3220 | (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); | | 3220 | (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); |
3221 | dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; | | 3221 | dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; |
3222 | } | | 3222 | } |
3223 | | | 3223 | |
3224 | lasttx = nexttx; | | 3224 | lasttx = nexttx; |
3225 | nexttx = WM_NEXTTX(sc, nexttx); | | 3225 | nexttx = WM_NEXTTX(sc, nexttx); |
3226 | /* | | 3226 | /* |
3227 | * fill in the next descriptors. legacy or adcanced format | | 3227 | * fill in the next descriptors. legacy or adcanced format |
3228 | * is the same here | | 3228 | * is the same here |
3229 | */ | | 3229 | */ |
3230 | for (seg = 1; seg < dmamap->dm_nsegs; | | 3230 | for (seg = 1; seg < dmamap->dm_nsegs; |
3231 | seg++, nexttx = WM_NEXTTX(sc, nexttx)) { | | 3231 | seg++, nexttx = WM_NEXTTX(sc, nexttx)) { |
3232 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = | | 3232 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = |
3233 | htole64(dmamap->dm_segs[seg].ds_addr); | | 3233 | htole64(dmamap->dm_segs[seg].ds_addr); |
3234 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = | | 3234 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = |
3235 | htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); | | 3235 | htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); |
3236 | KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); | | 3236 | KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); |
3237 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0; | | 3237 | sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0; |
3238 | lasttx = nexttx; | | 3238 | lasttx = nexttx; |
3239 | | | 3239 | |
3240 | DPRINTF(WM_DEBUG_TX, | | 3240 | DPRINTF(WM_DEBUG_TX, |
3241 | ("%s: TX: desc %d: %#" PRIx64 ", " | | 3241 | ("%s: TX: desc %d: %#" PRIx64 ", " |
3242 | "len %#04zx\n", | | 3242 | "len %#04zx\n", |
3243 | device_xname(sc->sc_dev), nexttx, | | 3243 | device_xname(sc->sc_dev), nexttx, |
3244 | (uint64_t)dmamap->dm_segs[seg].ds_addr, | | 3244 | (uint64_t)dmamap->dm_segs[seg].ds_addr, |
3245 | dmamap->dm_segs[seg].ds_len)); | | 3245 | dmamap->dm_segs[seg].ds_len)); |
3246 | } | | 3246 | } |
3247 | | | 3247 | |
3248 | KASSERT(lasttx != -1); | | 3248 | KASSERT(lasttx != -1); |
3249 | | | 3249 | |
3250 | /* | | 3250 | /* |
3251 | * Set up the command byte on the last descriptor of | | 3251 | * Set up the command byte on the last descriptor of |
3252 | * the packet. If we're in the interrupt delay window, | | 3252 | * the packet. If we're in the interrupt delay window, |
3253 | * delay the interrupt. | | 3253 | * delay the interrupt. |
3254 | */ | | 3254 | */ |
3255 | KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == | | 3255 | KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == |
3256 | (NQTX_CMD_EOP | NQTX_CMD_RS)); | | 3256 | (NQTX_CMD_EOP | NQTX_CMD_RS)); |
3257 | sc->sc_txdescs[lasttx].wtx_cmdlen |= | | 3257 | sc->sc_txdescs[lasttx].wtx_cmdlen |= |
3258 | htole32(WTX_CMD_EOP | WTX_CMD_RS); | | 3258 | htole32(WTX_CMD_EOP | WTX_CMD_RS); |
3259 | | | 3259 | |
3260 | txs->txs_lastdesc = lasttx; | | 3260 | txs->txs_lastdesc = lasttx; |
3261 | | | 3261 | |
3262 | DPRINTF(WM_DEBUG_TX, | | 3262 | DPRINTF(WM_DEBUG_TX, |
3263 | ("%s: TX: desc %d: cmdlen 0x%08x\n", | | 3263 | ("%s: TX: desc %d: cmdlen 0x%08x\n", |
3264 | device_xname(sc->sc_dev), | | 3264 | device_xname(sc->sc_dev), |
3265 | lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); | | 3265 | lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); |
3266 | | | 3266 | |
3267 | /* Sync the descriptors we're using. */ | | 3267 | /* Sync the descriptors we're using. */ |
3268 | WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, | | 3268 | WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, |
3269 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 3269 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
3270 | | | 3270 | |
3271 | /* Give the packet to the chip. */ | | 3271 | /* Give the packet to the chip. */ |
3272 | CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); | | 3272 | CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); |
3273 | sent = true; | | 3273 | sent = true; |
3274 | | | 3274 | |
3275 | DPRINTF(WM_DEBUG_TX, | | 3275 | DPRINTF(WM_DEBUG_TX, |
3276 | ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); | | 3276 | ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); |
3277 | | | 3277 | |
3278 | DPRINTF(WM_DEBUG_TX, | | 3278 | DPRINTF(WM_DEBUG_TX, |
3279 | ("%s: TX: finished transmitting packet, job %d\n", | | 3279 | ("%s: TX: finished transmitting packet, job %d\n", |
3280 | device_xname(sc->sc_dev), sc->sc_txsnext)); | | 3280 | device_xname(sc->sc_dev), sc->sc_txsnext)); |
3281 | | | 3281 | |
3282 | /* Advance the tx pointer. */ | | 3282 | /* Advance the tx pointer. */ |
3283 | sc->sc_txfree -= txs->txs_ndesc; | | 3283 | sc->sc_txfree -= txs->txs_ndesc; |
3284 | sc->sc_txnext = nexttx; | | 3284 | sc->sc_txnext = nexttx; |
3285 | | | 3285 | |
3286 | sc->sc_txsfree--; | | 3286 | sc->sc_txsfree--; |
3287 | sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); | | 3287 | sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); |
3288 | | | 3288 | |
3289 | /* Pass the packet to any BPF listeners. */ | | 3289 | /* Pass the packet to any BPF listeners. */ |
3290 | bpf_mtap(ifp, m0); | | 3290 | bpf_mtap(ifp, m0); |
3291 | } | | 3291 | } |
3292 | | | 3292 | |
3293 | if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { | | 3293 | if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { |
3294 | /* No more slots; notify upper layer. */ | | 3294 | /* No more slots; notify upper layer. */ |
3295 | ifp->if_flags |= IFF_OACTIVE; | | 3295 | ifp->if_flags |= IFF_OACTIVE; |
3296 | } | | 3296 | } |
3297 | | | 3297 | |
3298 | if (sent) { | | 3298 | if (sent) { |
3299 | /* Set a watchdog timer in case the chip flakes out. */ | | 3299 | /* Set a watchdog timer in case the chip flakes out. */ |
3300 | ifp->if_timer = 5; | | 3300 | ifp->if_timer = 5; |
3301 | } | | 3301 | } |
3302 | } | | 3302 | } |
3303 | | | 3303 | |
3304 | /* | | 3304 | /* |
3305 | * wm_watchdog: [ifnet interface function] | | 3305 | * wm_watchdog: [ifnet interface function] |
3306 | * | | 3306 | * |
3307 | * Watchdog timer handler. | | 3307 | * Watchdog timer handler. |
3308 | */ | | 3308 | */ |
3309 | static void | | 3309 | static void |
3310 | wm_watchdog(struct ifnet *ifp) | | 3310 | wm_watchdog(struct ifnet *ifp) |
3311 | { | | 3311 | { |
3312 | struct wm_softc *sc = ifp->if_softc; | | 3312 | struct wm_softc *sc = ifp->if_softc; |
3313 | | | 3313 | |
3314 | /* | | 3314 | /* |
3315 | * Since we're using delayed interrupts, sweep up | | 3315 | * Since we're using delayed interrupts, sweep up |
3316 | * before we report an error. | | 3316 | * before we report an error. |
3317 | */ | | 3317 | */ |
3318 | wm_txintr(sc); | | 3318 | wm_txintr(sc); |
3319 | | | 3319 | |
3320 | if (sc->sc_txfree != WM_NTXDESC(sc)) { | | 3320 | if (sc->sc_txfree != WM_NTXDESC(sc)) { |
3321 | #ifdef WM_DEBUG | | 3321 | #ifdef WM_DEBUG |
3322 | int i, j; | | 3322 | int i, j; |
3323 | struct wm_txsoft *txs; | | 3323 | struct wm_txsoft *txs; |
3324 | #endif | | 3324 | #endif |
3325 | log(LOG_ERR, | | 3325 | log(LOG_ERR, |
3326 | "%s: device timeout (txfree %d txsfree %d txnext %d)\n", | | 3326 | "%s: device timeout (txfree %d txsfree %d txnext %d)\n", |
3327 | device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, | | 3327 | device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, |
3328 | sc->sc_txnext); | | 3328 | sc->sc_txnext); |
3329 | ifp->if_oerrors++; | | 3329 | ifp->if_oerrors++; |
3330 | #ifdef WM_DEBUG | | 3330 | #ifdef WM_DEBUG |
3331 | for (i = sc->sc_txsdirty; i != sc->sc_txsnext ; | | 3331 | for (i = sc->sc_txsdirty; i != sc->sc_txsnext ; |
3332 | i = WM_NEXTTXS(sc, i)) { | | 3332 | i = WM_NEXTTXS(sc, i)) { |
3333 | txs = &sc->sc_txsoft[i]; | | 3333 | txs = &sc->sc_txsoft[i]; |
3334 | printf("txs %d tx %d -> %d\n", | | 3334 | printf("txs %d tx %d -> %d\n", |
3335 | i, txs->txs_firstdesc, txs->txs_lastdesc); | | 3335 | i, txs->txs_firstdesc, txs->txs_lastdesc); |
3336 | for (j = txs->txs_firstdesc; ; | | 3336 | for (j = txs->txs_firstdesc; ; |
3337 | j = WM_NEXTTX(sc, j)) { | | 3337 | j = WM_NEXTTX(sc, j)) { |
3338 | printf("\tdesc %d: 0x%" PRIx64 "\n", j, | | 3338 | printf("\tdesc %d: 0x%" PRIx64 "\n", j, |
3339 | sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr); | | 3339 | sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr); |
3340 | printf("\t %#08x%08x\n", | | 3340 | printf("\t %#08x%08x\n", |
3341 | sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields, | | 3341 | sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields, |
3342 | sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen); | | 3342 | sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen); |
3343 | if (j == txs->txs_lastdesc) | | 3343 | if (j == txs->txs_lastdesc) |
3344 | break; | | 3344 | break; |
3345 | } | | 3345 | } |
3346 | } | | 3346 | } |
3347 | #endif | | 3347 | #endif |
3348 | /* Reset the interface. */ | | 3348 | /* Reset the interface. */ |
3349 | (void) wm_init(ifp); | | 3349 | (void) wm_init(ifp); |
3350 | } | | 3350 | } |
3351 | | | 3351 | |
3352 | /* Try to get more packets going. */ | | 3352 | /* Try to get more packets going. */ |
3353 | ifp->if_start(ifp); | | 3353 | ifp->if_start(ifp); |
3354 | } | | 3354 | } |
3355 | | | 3355 | |
3356 | static int | | 3356 | static int |
3357 | wm_ifflags_cb(struct ethercom *ec) | | 3357 | wm_ifflags_cb(struct ethercom *ec) |
3358 | { | | 3358 | { |
3359 | struct ifnet *ifp = &ec->ec_if; | | 3359 | struct ifnet *ifp = &ec->ec_if; |
3360 | struct wm_softc *sc = ifp->if_softc; | | 3360 | struct wm_softc *sc = ifp->if_softc; |
3361 | int change = ifp->if_flags ^ sc->sc_if_flags; | | 3361 | int change = ifp->if_flags ^ sc->sc_if_flags; |
3362 | | | 3362 | |
3363 | if (change != 0) | | 3363 | if (change != 0) |
3364 | sc->sc_if_flags = ifp->if_flags; | | 3364 | sc->sc_if_flags = ifp->if_flags; |
3365 | | | 3365 | |
3366 | if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) | | 3366 | if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) |
3367 | return ENETRESET; | | 3367 | return ENETRESET; |
3368 | | | 3368 | |
3369 | if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) | | 3369 | if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) |
3370 | wm_set_filter(sc); | | 3370 | wm_set_filter(sc); |
3371 | | | 3371 | |
3372 | wm_set_vlan(sc); | | 3372 | wm_set_vlan(sc); |
3373 | | | 3373 | |
3374 | return 0; | | 3374 | return 0; |
3375 | } | | 3375 | } |
3376 | | | 3376 | |
3377 | /* | | 3377 | /* |
3378 | * wm_ioctl: [ifnet interface function] | | 3378 | * wm_ioctl: [ifnet interface function] |
3379 | * | | 3379 | * |
3380 | * Handle control requests from the operator. | | 3380 | * Handle control requests from the operator. |
3381 | */ | | 3381 | */ |
3382 | static int | | 3382 | static int |
3383 | wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) | | 3383 | wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
3384 | { | | 3384 | { |
3385 | struct wm_softc *sc = ifp->if_softc; | | 3385 | struct wm_softc *sc = ifp->if_softc; |
3386 | struct ifreq *ifr = (struct ifreq *) data; | | 3386 | struct ifreq *ifr = (struct ifreq *) data; |
3387 | struct ifaddr *ifa = (struct ifaddr *)data; | | 3387 | struct ifaddr *ifa = (struct ifaddr *)data; |
3388 | struct sockaddr_dl *sdl; | | 3388 | struct sockaddr_dl *sdl; |
3389 | int s, error; | | 3389 | int s, error; |
3390 | | | 3390 | |
3391 | s = splnet(); | | 3391 | s = splnet(); |
3392 | | | 3392 | |
3393 | switch (cmd) { | | 3393 | switch (cmd) { |
3394 | case SIOCSIFMEDIA: | | 3394 | case SIOCSIFMEDIA: |
3395 | case SIOCGIFMEDIA: | | 3395 | case SIOCGIFMEDIA: |
3396 | /* Flow control requires full-duplex mode. */ | | 3396 | /* Flow control requires full-duplex mode. */ |
3397 | if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || | | 3397 | if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || |
3398 | (ifr->ifr_media & IFM_FDX) == 0) | | 3398 | (ifr->ifr_media & IFM_FDX) == 0) |
3399 | ifr->ifr_media &= ~IFM_ETH_FMASK; | | 3399 | ifr->ifr_media &= ~IFM_ETH_FMASK; |
3400 | if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { | | 3400 | if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { |
3401 | if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { | | 3401 | if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { |
3402 | /* We can do both TXPAUSE and RXPAUSE. */ | | 3402 | /* We can do both TXPAUSE and RXPAUSE. */ |
3403 | ifr->ifr_media |= | | 3403 | ifr->ifr_media |= |
3404 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; | | 3404 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; |
3405 | } | | 3405 | } |
3406 | sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; | | 3406 | sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; |
3407 | } | | 3407 | } |
3408 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); | | 3408 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); |
3409 | break; | | 3409 | break; |
3410 | case SIOCINITIFADDR: | | 3410 | case SIOCINITIFADDR: |
3411 | if (ifa->ifa_addr->sa_family == AF_LINK) { | | 3411 | if (ifa->ifa_addr->sa_family == AF_LINK) { |
3412 | sdl = satosdl(ifp->if_dl->ifa_addr); | | 3412 | sdl = satosdl(ifp->if_dl->ifa_addr); |
3413 | (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, | | 3413 | (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, |
3414 | LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); | | 3414 | LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); |
3415 | /* unicast address is first multicast entry */ | | 3415 | /* unicast address is first multicast entry */ |
3416 | wm_set_filter(sc); | | 3416 | wm_set_filter(sc); |
3417 | error = 0; | | 3417 | error = 0; |
3418 | break; | | 3418 | break; |
3419 | } | | 3419 | } |
3420 | /*FALLTHROUGH*/ | | 3420 | /*FALLTHROUGH*/ |
3421 | default: | | 3421 | default: |
3422 | if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) | | 3422 | if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) |
3423 | break; | | 3423 | break; |
3424 | | | 3424 | |
3425 | error = 0; | | 3425 | error = 0; |
3426 | | | 3426 | |
3427 | if (cmd == SIOCSIFCAP) | | 3427 | if (cmd == SIOCSIFCAP) |
3428 | error = (*ifp->if_init)(ifp); | | 3428 | error = (*ifp->if_init)(ifp); |
3429 | else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) | | 3429 | else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
3430 | ; | | 3430 | ; |
3431 | else if (ifp->if_flags & IFF_RUNNING) { | | 3431 | else if (ifp->if_flags & IFF_RUNNING) { |
3432 | /* | | 3432 | /* |
3433 | * Multicast list has changed; set the hardware filter | | 3433 | * Multicast list has changed; set the hardware filter |
3434 | * accordingly. | | 3434 | * accordingly. |
3435 | */ | | 3435 | */ |
3436 | wm_set_filter(sc); | | 3436 | wm_set_filter(sc); |
3437 | } | | 3437 | } |
3438 | break; | | 3438 | break; |
3439 | } | | 3439 | } |
3440 | | | 3440 | |
3441 | /* Try to get more packets going. */ | | 3441 | /* Try to get more packets going. */ |
3442 | ifp->if_start(ifp); | | 3442 | ifp->if_start(ifp); |
3443 | | | 3443 | |
3444 | splx(s); | | 3444 | splx(s); |
3445 | return error; | | 3445 | return error; |
3446 | } | | 3446 | } |
3447 | | | 3447 | |
3448 | /* | | 3448 | /* |
3449 | * wm_intr: | | 3449 | * wm_intr: |
3450 | * | | 3450 | * |
3451 | * Interrupt service routine. | | 3451 | * Interrupt service routine. |
3452 | */ | | 3452 | */ |
3453 | static int | | 3453 | static int |
3454 | wm_intr(void *arg) | | 3454 | wm_intr(void *arg) |
3455 | { | | 3455 | { |
3456 | struct wm_softc *sc = arg; | | 3456 | struct wm_softc *sc = arg; |
3457 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 3457 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
3458 | uint32_t icr; | | 3458 | uint32_t icr; |
3459 | int handled = 0; | | 3459 | int handled = 0; |
3460 | | | 3460 | |
3461 | while (1 /* CONSTCOND */) { | | 3461 | while (1 /* CONSTCOND */) { |
3462 | icr = CSR_READ(sc, WMREG_ICR); | | 3462 | icr = CSR_READ(sc, WMREG_ICR); |
3463 | if ((icr & sc->sc_icr) == 0) | | 3463 | if ((icr & sc->sc_icr) == 0) |
3464 | break; | | 3464 | break; |
3465 | rnd_add_uint32(&sc->rnd_source, icr); | | 3465 | rnd_add_uint32(&sc->rnd_source, icr); |
3466 | | | 3466 | |
3467 | handled = 1; | | 3467 | handled = 1; |
3468 | | | 3468 | |
3469 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) | | 3469 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) |
3470 | if (icr & (ICR_RXDMT0|ICR_RXT0)) { | | 3470 | if (icr & (ICR_RXDMT0|ICR_RXT0)) { |
3471 | DPRINTF(WM_DEBUG_RX, | | 3471 | DPRINTF(WM_DEBUG_RX, |
3472 | ("%s: RX: got Rx intr 0x%08x\n", | | 3472 | ("%s: RX: got Rx intr 0x%08x\n", |
3473 | device_xname(sc->sc_dev), | | 3473 | device_xname(sc->sc_dev), |
3474 | icr & (ICR_RXDMT0|ICR_RXT0))); | | 3474 | icr & (ICR_RXDMT0|ICR_RXT0))); |
3475 | WM_EVCNT_INCR(&sc->sc_ev_rxintr); | | 3475 | WM_EVCNT_INCR(&sc->sc_ev_rxintr); |
3476 | } | | 3476 | } |
3477 | #endif | | 3477 | #endif |
3478 | wm_rxintr(sc); | | 3478 | wm_rxintr(sc); |
3479 | | | 3479 | |
3480 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) | | 3480 | #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) |
3481 | if (icr & ICR_TXDW) { | | 3481 | if (icr & ICR_TXDW) { |
3482 | DPRINTF(WM_DEBUG_TX, | | 3482 | DPRINTF(WM_DEBUG_TX, |
3483 | ("%s: TX: got TXDW interrupt\n", | | 3483 | ("%s: TX: got TXDW interrupt\n", |
3484 | device_xname(sc->sc_dev))); | | 3484 | device_xname(sc->sc_dev))); |
3485 | WM_EVCNT_INCR(&sc->sc_ev_txdw); | | 3485 | WM_EVCNT_INCR(&sc->sc_ev_txdw); |
3486 | } | | 3486 | } |
3487 | #endif | | 3487 | #endif |
3488 | wm_txintr(sc); | | 3488 | wm_txintr(sc); |
3489 | | | 3489 | |
3490 | if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { | | 3490 | if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { |
3491 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); | | 3491 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); |
3492 | wm_linkintr(sc, icr); | | 3492 | wm_linkintr(sc, icr); |
3493 | } | | 3493 | } |
3494 | | | 3494 | |
3495 | if (icr & ICR_RXO) { | | 3495 | if (icr & ICR_RXO) { |
3496 | #if defined(WM_DEBUG) | | 3496 | #if defined(WM_DEBUG) |
3497 | log(LOG_WARNING, "%s: Receive overrun\n", | | 3497 | log(LOG_WARNING, "%s: Receive overrun\n", |
3498 | device_xname(sc->sc_dev)); | | 3498 | device_xname(sc->sc_dev)); |
3499 | #endif /* defined(WM_DEBUG) */ | | 3499 | #endif /* defined(WM_DEBUG) */ |
3500 | } | | 3500 | } |
3501 | } | | 3501 | } |
3502 | | | 3502 | |
3503 | if (handled) { | | 3503 | if (handled) { |
3504 | /* Try to get more packets going. */ | | 3504 | /* Try to get more packets going. */ |
3505 | ifp->if_start(ifp); | | 3505 | ifp->if_start(ifp); |
3506 | } | | 3506 | } |
3507 | | | 3507 | |
3508 | return handled; | | 3508 | return handled; |
3509 | } | | 3509 | } |
3510 | | | 3510 | |
3511 | /* | | 3511 | /* |
3512 | * wm_txintr: | | 3512 | * wm_txintr: |
3513 | * | | 3513 | * |
3514 | * Helper; handle transmit interrupts. | | 3514 | * Helper; handle transmit interrupts. |
3515 | */ | | 3515 | */ |
3516 | static void | | 3516 | static void |
3517 | wm_txintr(struct wm_softc *sc) | | 3517 | wm_txintr(struct wm_softc *sc) |
3518 | { | | 3518 | { |
3519 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 3519 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
3520 | struct wm_txsoft *txs; | | 3520 | struct wm_txsoft *txs; |
3521 | uint8_t status; | | 3521 | uint8_t status; |
3522 | int i; | | 3522 | int i; |
3523 | | | 3523 | |
3524 | ifp->if_flags &= ~IFF_OACTIVE; | | 3524 | ifp->if_flags &= ~IFF_OACTIVE; |
3525 | | | 3525 | |
3526 | /* | | 3526 | /* |
3527 | * Go through the Tx list and free mbufs for those | | 3527 | * Go through the Tx list and free mbufs for those |
3528 | * frames which have been transmitted. | | 3528 | * frames which have been transmitted. |
3529 | */ | | 3529 | */ |
3530 | for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); | | 3530 | for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); |
3531 | i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { | | 3531 | i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { |
3532 | txs = &sc->sc_txsoft[i]; | | 3532 | txs = &sc->sc_txsoft[i]; |
3533 | | | 3533 | |
3534 | DPRINTF(WM_DEBUG_TX, | | 3534 | DPRINTF(WM_DEBUG_TX, |
3535 | ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); | | 3535 | ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); |
3536 | | | 3536 | |
3537 | WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, | | 3537 | WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, |
3538 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 3538 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
3539 | | | 3539 | |
3540 | status = | | 3540 | status = |
3541 | sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; | | 3541 | sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; |
3542 | if ((status & WTX_ST_DD) == 0) { | | 3542 | if ((status & WTX_ST_DD) == 0) { |
3543 | WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, | | 3543 | WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, |
3544 | BUS_DMASYNC_PREREAD); | | 3544 | BUS_DMASYNC_PREREAD); |
3545 | break; | | 3545 | break; |
3546 | } | | 3546 | } |
3547 | | | 3547 | |
3548 | DPRINTF(WM_DEBUG_TX, | | 3548 | DPRINTF(WM_DEBUG_TX, |
3549 | ("%s: TX: job %d done: descs %d..%d\n", | | 3549 | ("%s: TX: job %d done: descs %d..%d\n", |
3550 | device_xname(sc->sc_dev), i, txs->txs_firstdesc, | | 3550 | device_xname(sc->sc_dev), i, txs->txs_firstdesc, |
3551 | txs->txs_lastdesc)); | | 3551 | txs->txs_lastdesc)); |
3552 | | | 3552 | |
3553 | /* | | 3553 | /* |
3554 | * XXX We should probably be using the statistics | | 3554 | * XXX We should probably be using the statistics |
3555 | * XXX registers, but I don't know if they exist | | 3555 | * XXX registers, but I don't know if they exist |
3556 | * XXX on chips before the i82544. | | 3556 | * XXX on chips before the i82544. |
3557 | */ | | 3557 | */ |
3558 | | | 3558 | |
3559 | #ifdef WM_EVENT_COUNTERS | | 3559 | #ifdef WM_EVENT_COUNTERS |
3560 | if (status & WTX_ST_TU) | | 3560 | if (status & WTX_ST_TU) |
3561 | WM_EVCNT_INCR(&sc->sc_ev_tu); | | 3561 | WM_EVCNT_INCR(&sc->sc_ev_tu); |
3562 | #endif /* WM_EVENT_COUNTERS */ | | 3562 | #endif /* WM_EVENT_COUNTERS */ |
3563 | | | 3563 | |
3564 | if (status & (WTX_ST_EC|WTX_ST_LC)) { | | 3564 | if (status & (WTX_ST_EC|WTX_ST_LC)) { |
3565 | ifp->if_oerrors++; | | 3565 | ifp->if_oerrors++; |
3566 | if (status & WTX_ST_LC) | | 3566 | if (status & WTX_ST_LC) |
3567 | log(LOG_WARNING, "%s: late collision\n", | | 3567 | log(LOG_WARNING, "%s: late collision\n", |
3568 | device_xname(sc->sc_dev)); | | 3568 | device_xname(sc->sc_dev)); |
3569 | else if (status & WTX_ST_EC) { | | 3569 | else if (status & WTX_ST_EC) { |
3570 | ifp->if_collisions += 16; | | 3570 | ifp->if_collisions += 16; |
3571 | log(LOG_WARNING, "%s: excessive collisions\n", | | 3571 | log(LOG_WARNING, "%s: excessive collisions\n", |
3572 | device_xname(sc->sc_dev)); | | 3572 | device_xname(sc->sc_dev)); |
3573 | } | | 3573 | } |
3574 | } else | | 3574 | } else |
3575 | ifp->if_opackets++; | | 3575 | ifp->if_opackets++; |
3576 | | | 3576 | |
3577 | sc->sc_txfree += txs->txs_ndesc; | | 3577 | sc->sc_txfree += txs->txs_ndesc; |
3578 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, | | 3578 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, |
3579 | 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); | | 3579 | 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
3580 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 3580 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
3581 | m_freem(txs->txs_mbuf); | | 3581 | m_freem(txs->txs_mbuf); |
3582 | txs->txs_mbuf = NULL; | | 3582 | txs->txs_mbuf = NULL; |
3583 | } | | 3583 | } |
3584 | | | 3584 | |
3585 | /* Update the dirty transmit buffer pointer. */ | | 3585 | /* Update the dirty transmit buffer pointer. */ |
3586 | sc->sc_txsdirty = i; | | 3586 | sc->sc_txsdirty = i; |
3587 | DPRINTF(WM_DEBUG_TX, | | 3587 | DPRINTF(WM_DEBUG_TX, |
3588 | ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); | | 3588 | ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); |
3589 | | | 3589 | |
3590 | /* | | 3590 | /* |
3591 | * If there are no more pending transmissions, cancel the watchdog | | 3591 | * If there are no more pending transmissions, cancel the watchdog |
3592 | * timer. | | 3592 | * timer. |
3593 | */ | | 3593 | */ |
3594 | if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) | | 3594 | if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) |
3595 | ifp->if_timer = 0; | | 3595 | ifp->if_timer = 0; |
3596 | } | | 3596 | } |
3597 | | | 3597 | |
3598 | /* | | 3598 | /* |
3599 | * wm_rxintr: | | 3599 | * wm_rxintr: |
3600 | * | | 3600 | * |
3601 | * Helper; handle receive interrupts. | | 3601 | * Helper; handle receive interrupts. |
3602 | */ | | 3602 | */ |
3603 | static void | | 3603 | static void |
3604 | wm_rxintr(struct wm_softc *sc) | | 3604 | wm_rxintr(struct wm_softc *sc) |
3605 | { | | 3605 | { |
3606 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 3606 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
3607 | struct wm_rxsoft *rxs; | | 3607 | struct wm_rxsoft *rxs; |
3608 | struct mbuf *m; | | 3608 | struct mbuf *m; |
3609 | int i, len; | | 3609 | int i, len; |
3610 | uint8_t status, errors; | | 3610 | uint8_t status, errors; |
3611 | uint16_t vlantag; | | 3611 | uint16_t vlantag; |
3612 | | | 3612 | |
3613 | for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { | | 3613 | for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { |
3614 | rxs = &sc->sc_rxsoft[i]; | | 3614 | rxs = &sc->sc_rxsoft[i]; |
3615 | | | 3615 | |
3616 | DPRINTF(WM_DEBUG_RX, | | 3616 | DPRINTF(WM_DEBUG_RX, |
3617 | ("%s: RX: checking descriptor %d\n", | | 3617 | ("%s: RX: checking descriptor %d\n", |
3618 | device_xname(sc->sc_dev), i)); | | 3618 | device_xname(sc->sc_dev), i)); |
3619 | | | 3619 | |
3620 | WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 3620 | WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
3621 | | | 3621 | |
3622 | status = sc->sc_rxdescs[i].wrx_status; | | 3622 | status = sc->sc_rxdescs[i].wrx_status; |
3623 | errors = sc->sc_rxdescs[i].wrx_errors; | | 3623 | errors = sc->sc_rxdescs[i].wrx_errors; |
3624 | len = le16toh(sc->sc_rxdescs[i].wrx_len); | | 3624 | len = le16toh(sc->sc_rxdescs[i].wrx_len); |
3625 | vlantag = sc->sc_rxdescs[i].wrx_special; | | 3625 | vlantag = sc->sc_rxdescs[i].wrx_special; |
3626 | | | 3626 | |
3627 | if ((status & WRX_ST_DD) == 0) { | | 3627 | if ((status & WRX_ST_DD) == 0) { |
3628 | /* | | 3628 | /* |
3629 | * We have processed all of the receive descriptors. | | 3629 | * We have processed all of the receive descriptors. |
3630 | */ | | 3630 | */ |
3631 | WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); | | 3631 | WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); |
3632 | break; | | 3632 | break; |
3633 | } | | 3633 | } |
3634 | | | 3634 | |
3635 | if (__predict_false(sc->sc_rxdiscard)) { | | 3635 | if (__predict_false(sc->sc_rxdiscard)) { |
3636 | DPRINTF(WM_DEBUG_RX, | | 3636 | DPRINTF(WM_DEBUG_RX, |
3637 | ("%s: RX: discarding contents of descriptor %d\n", | | 3637 | ("%s: RX: discarding contents of descriptor %d\n", |
3638 | device_xname(sc->sc_dev), i)); | | 3638 | device_xname(sc->sc_dev), i)); |
3639 | WM_INIT_RXDESC(sc, i); | | 3639 | WM_INIT_RXDESC(sc, i); |
3640 | if (status & WRX_ST_EOP) { | | 3640 | if (status & WRX_ST_EOP) { |
3641 | /* Reset our state. */ | | 3641 | /* Reset our state. */ |
3642 | DPRINTF(WM_DEBUG_RX, | | 3642 | DPRINTF(WM_DEBUG_RX, |
3643 | ("%s: RX: resetting rxdiscard -> 0\n", | | 3643 | ("%s: RX: resetting rxdiscard -> 0\n", |
3644 | device_xname(sc->sc_dev))); | | 3644 | device_xname(sc->sc_dev))); |
3645 | sc->sc_rxdiscard = 0; | | 3645 | sc->sc_rxdiscard = 0; |
3646 | } | | 3646 | } |
3647 | continue; | | 3647 | continue; |
3648 | } | | 3648 | } |
3649 | | | 3649 | |
3650 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, | | 3650 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
3651 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); | | 3651 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
3652 | | | 3652 | |
3653 | m = rxs->rxs_mbuf; | | 3653 | m = rxs->rxs_mbuf; |
3654 | | | 3654 | |
3655 | /* | | 3655 | /* |
3656 | * Add a new receive buffer to the ring, unless of | | 3656 | * Add a new receive buffer to the ring, unless of |
3657 | * course the length is zero. Treat the latter as a | | 3657 | * course the length is zero. Treat the latter as a |
3658 | * failed mapping. | | 3658 | * failed mapping. |
3659 | */ | | 3659 | */ |
3660 | if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { | | 3660 | if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { |
3661 | /* | | 3661 | /* |
3662 | * Failed, throw away what we've done so | | 3662 | * Failed, throw away what we've done so |
3663 | * far, and discard the rest of the packet. | | 3663 | * far, and discard the rest of the packet. |
3664 | */ | | 3664 | */ |
3665 | ifp->if_ierrors++; | | 3665 | ifp->if_ierrors++; |
3666 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, | | 3666 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
3667 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); | | 3667 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
3668 | WM_INIT_RXDESC(sc, i); | | 3668 | WM_INIT_RXDESC(sc, i); |
3669 | if ((status & WRX_ST_EOP) == 0) | | 3669 | if ((status & WRX_ST_EOP) == 0) |
3670 | sc->sc_rxdiscard = 1; | | 3670 | sc->sc_rxdiscard = 1; |
3671 | if (sc->sc_rxhead != NULL) | | 3671 | if (sc->sc_rxhead != NULL) |
3672 | m_freem(sc->sc_rxhead); | | 3672 | m_freem(sc->sc_rxhead); |
3673 | WM_RXCHAIN_RESET(sc); | | 3673 | WM_RXCHAIN_RESET(sc); |
3674 | DPRINTF(WM_DEBUG_RX, | | 3674 | DPRINTF(WM_DEBUG_RX, |
3675 | ("%s: RX: Rx buffer allocation failed, " | | 3675 | ("%s: RX: Rx buffer allocation failed, " |
3676 | "dropping packet%s\n", device_xname(sc->sc_dev), | | 3676 | "dropping packet%s\n", device_xname(sc->sc_dev), |
3677 | sc->sc_rxdiscard ? " (discard)" : "")); | | 3677 | sc->sc_rxdiscard ? " (discard)" : "")); |
3678 | continue; | | 3678 | continue; |
3679 | } | | 3679 | } |
3680 | | | 3680 | |
3681 | m->m_len = len; | | 3681 | m->m_len = len; |
3682 | sc->sc_rxlen += len; | | 3682 | sc->sc_rxlen += len; |
3683 | DPRINTF(WM_DEBUG_RX, | | 3683 | DPRINTF(WM_DEBUG_RX, |
3684 | ("%s: RX: buffer at %p len %d\n", | | 3684 | ("%s: RX: buffer at %p len %d\n", |
3685 | device_xname(sc->sc_dev), m->m_data, len)); | | 3685 | device_xname(sc->sc_dev), m->m_data, len)); |
3686 | | | 3686 | |
3687 | /* | | 3687 | /* |
3688 | * If this is not the end of the packet, keep | | 3688 | * If this is not the end of the packet, keep |
3689 | * looking. | | 3689 | * looking. |
3690 | */ | | 3690 | */ |
3691 | if ((status & WRX_ST_EOP) == 0) { | | 3691 | if ((status & WRX_ST_EOP) == 0) { |
3692 | WM_RXCHAIN_LINK(sc, m); | | 3692 | WM_RXCHAIN_LINK(sc, m); |
3693 | DPRINTF(WM_DEBUG_RX, | | 3693 | DPRINTF(WM_DEBUG_RX, |
3694 | ("%s: RX: not yet EOP, rxlen -> %d\n", | | 3694 | ("%s: RX: not yet EOP, rxlen -> %d\n", |
3695 | device_xname(sc->sc_dev), sc->sc_rxlen)); | | 3695 | device_xname(sc->sc_dev), sc->sc_rxlen)); |
3696 | continue; | | 3696 | continue; |
3697 | } | | 3697 | } |
3698 | | | 3698 | |
3699 | /* | | 3699 | /* |
3700 | * Okay, we have the entire packet now. The chip is | | 3700 | * Okay, we have the entire packet now. The chip is |
3701 | * configured to include the FCS except I350 and I21[01] | | 3701 | * configured to include the FCS except I350 and I21[01] |
3702 | * (not all chips can be configured to strip it), | | 3702 | * (not all chips can be configured to strip it), |
3703 | * so we need to trim it. | | 3703 | * so we need to trim it. |
3704 | * May need to adjust length of previous mbuf in the | | 3704 | * May need to adjust length of previous mbuf in the |
3705 | * chain if the current mbuf is too short. | | 3705 | * chain if the current mbuf is too short. |
3706 | * For an eratta, the RCTL_SECRC bit in RCTL register | | 3706 | * For an eratta, the RCTL_SECRC bit in RCTL register |
3707 | * is always set in I350, so we don't trim it. | | 3707 | * is always set in I350, so we don't trim it. |
3708 | */ | | 3708 | */ |
3709 | if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210) | | 3709 | if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210) |
3710 | && (sc->sc_type != WM_T_I211)) { | | 3710 | && (sc->sc_type != WM_T_I211)) { |
3711 | if (m->m_len < ETHER_CRC_LEN) { | | 3711 | if (m->m_len < ETHER_CRC_LEN) { |
3712 | sc->sc_rxtail->m_len | | 3712 | sc->sc_rxtail->m_len |
3713 | -= (ETHER_CRC_LEN - m->m_len); | | 3713 | -= (ETHER_CRC_LEN - m->m_len); |
3714 | m->m_len = 0; | | 3714 | m->m_len = 0; |
3715 | } else | | 3715 | } else |
3716 | m->m_len -= ETHER_CRC_LEN; | | 3716 | m->m_len -= ETHER_CRC_LEN; |
3717 | len = sc->sc_rxlen - ETHER_CRC_LEN; | | 3717 | len = sc->sc_rxlen - ETHER_CRC_LEN; |
3718 | } else | | 3718 | } else |
3719 | len = sc->sc_rxlen; | | 3719 | len = sc->sc_rxlen; |
3720 | | | 3720 | |
3721 | WM_RXCHAIN_LINK(sc, m); | | 3721 | WM_RXCHAIN_LINK(sc, m); |
3722 | | | 3722 | |
3723 | *sc->sc_rxtailp = NULL; | | 3723 | *sc->sc_rxtailp = NULL; |
3724 | m = sc->sc_rxhead; | | 3724 | m = sc->sc_rxhead; |
3725 | | | 3725 | |
3726 | WM_RXCHAIN_RESET(sc); | | 3726 | WM_RXCHAIN_RESET(sc); |
3727 | | | 3727 | |
3728 | DPRINTF(WM_DEBUG_RX, | | 3728 | DPRINTF(WM_DEBUG_RX, |
3729 | ("%s: RX: have entire packet, len -> %d\n", | | 3729 | ("%s: RX: have entire packet, len -> %d\n", |
3730 | device_xname(sc->sc_dev), len)); | | 3730 | device_xname(sc->sc_dev), len)); |
3731 | | | 3731 | |
3732 | /* | | 3732 | /* |
3733 | * If an error occurred, update stats and drop the packet. | | 3733 | * If an error occurred, update stats and drop the packet. |
3734 | */ | | 3734 | */ |
3735 | if (errors & | | 3735 | if (errors & |
3736 | (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { | | 3736 | (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { |
3737 | if (errors & WRX_ER_SE) | | 3737 | if (errors & WRX_ER_SE) |
3738 | log(LOG_WARNING, "%s: symbol error\n", | | 3738 | log(LOG_WARNING, "%s: symbol error\n", |
3739 | device_xname(sc->sc_dev)); | | 3739 | device_xname(sc->sc_dev)); |
3740 | else if (errors & WRX_ER_SEQ) | | 3740 | else if (errors & WRX_ER_SEQ) |
3741 | log(LOG_WARNING, "%s: receive sequence error\n", | | 3741 | log(LOG_WARNING, "%s: receive sequence error\n", |
3742 | device_xname(sc->sc_dev)); | | 3742 | device_xname(sc->sc_dev)); |
3743 | else if (errors & WRX_ER_CE) | | 3743 | else if (errors & WRX_ER_CE) |
3744 | log(LOG_WARNING, "%s: CRC error\n", | | 3744 | log(LOG_WARNING, "%s: CRC error\n", |
3745 | device_xname(sc->sc_dev)); | | 3745 | device_xname(sc->sc_dev)); |
3746 | m_freem(m); | | 3746 | m_freem(m); |
3747 | continue; | | 3747 | continue; |
3748 | } | | 3748 | } |
3749 | | | 3749 | |
3750 | /* | | 3750 | /* |
3751 | * No errors. Receive the packet. | | 3751 | * No errors. Receive the packet. |
3752 | */ | | 3752 | */ |
3753 | m->m_pkthdr.rcvif = ifp; | | 3753 | m->m_pkthdr.rcvif = ifp; |
3754 | m->m_pkthdr.len = len; | | 3754 | m->m_pkthdr.len = len; |
3755 | | | 3755 | |
3756 | /* | | 3756 | /* |
3757 | * If VLANs are enabled, VLAN packets have been unwrapped | | 3757 | * If VLANs are enabled, VLAN packets have been unwrapped |
3758 | * for us. Associate the tag with the packet. | | 3758 | * for us. Associate the tag with the packet. |
3759 | */ | | 3759 | */ |
3760 | if ((status & WRX_ST_VP) != 0) { | | 3760 | if ((status & WRX_ST_VP) != 0) { |
3761 | VLAN_INPUT_TAG(ifp, m, | | 3761 | VLAN_INPUT_TAG(ifp, m, |
3762 | le16toh(vlantag), | | 3762 | le16toh(vlantag), |
3763 | continue); | | 3763 | continue); |
3764 | } | | 3764 | } |
3765 | | | 3765 | |
3766 | /* | | 3766 | /* |
3767 | * Set up checksum info for this packet. | | 3767 | * Set up checksum info for this packet. |
3768 | */ | | 3768 | */ |
3769 | if ((status & WRX_ST_IXSM) == 0) { | | 3769 | if ((status & WRX_ST_IXSM) == 0) { |
3770 | if (status & WRX_ST_IPCS) { | | 3770 | if (status & WRX_ST_IPCS) { |
3771 | WM_EVCNT_INCR(&sc->sc_ev_rxipsum); | | 3771 | WM_EVCNT_INCR(&sc->sc_ev_rxipsum); |
3772 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; | | 3772 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; |
3773 | if (errors & WRX_ER_IPE) | | 3773 | if (errors & WRX_ER_IPE) |
3774 | m->m_pkthdr.csum_flags |= | | 3774 | m->m_pkthdr.csum_flags |= |
3775 | M_CSUM_IPv4_BAD; | | 3775 | M_CSUM_IPv4_BAD; |
3776 | } | | 3776 | } |
3777 | if (status & WRX_ST_TCPCS) { | | 3777 | if (status & WRX_ST_TCPCS) { |
3778 | /* | | 3778 | /* |
3779 | * Note: we don't know if this was TCP or UDP, | | 3779 | * Note: we don't know if this was TCP or UDP, |
3780 | * so we just set both bits, and expect the | | 3780 | * so we just set both bits, and expect the |
3781 | * upper layers to deal. | | 3781 | * upper layers to deal. |
3782 | */ | | 3782 | */ |
3783 | WM_EVCNT_INCR(&sc->sc_ev_rxtusum); | | 3783 | WM_EVCNT_INCR(&sc->sc_ev_rxtusum); |
3784 | m->m_pkthdr.csum_flags |= | | 3784 | m->m_pkthdr.csum_flags |= |
3785 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | | | 3785 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | |
3786 | M_CSUM_TCPv6 | M_CSUM_UDPv6; | | 3786 | M_CSUM_TCPv6 | M_CSUM_UDPv6; |
3787 | if (errors & WRX_ER_TCPE) | | 3787 | if (errors & WRX_ER_TCPE) |
3788 | m->m_pkthdr.csum_flags |= | | 3788 | m->m_pkthdr.csum_flags |= |
3789 | M_CSUM_TCP_UDP_BAD; | | 3789 | M_CSUM_TCP_UDP_BAD; |
3790 | } | | 3790 | } |
3791 | } | | 3791 | } |
3792 | | | 3792 | |
3793 | ifp->if_ipackets++; | | 3793 | ifp->if_ipackets++; |
3794 | | | 3794 | |
3795 | /* Pass this up to any BPF listeners. */ | | 3795 | /* Pass this up to any BPF listeners. */ |
3796 | bpf_mtap(ifp, m); | | 3796 | bpf_mtap(ifp, m); |
3797 | | | 3797 | |
3798 | /* Pass it on. */ | | 3798 | /* Pass it on. */ |
3799 | (*ifp->if_input)(ifp, m); | | 3799 | (*ifp->if_input)(ifp, m); |
3800 | } | | 3800 | } |
3801 | | | 3801 | |
3802 | /* Update the receive pointer. */ | | 3802 | /* Update the receive pointer. */ |
3803 | sc->sc_rxptr = i; | | 3803 | sc->sc_rxptr = i; |
3804 | | | 3804 | |
3805 | DPRINTF(WM_DEBUG_RX, | | 3805 | DPRINTF(WM_DEBUG_RX, |
3806 | ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); | | 3806 | ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); |
3807 | } | | 3807 | } |
3808 | | | 3808 | |
3809 | /* | | 3809 | /* |
3810 | * wm_linkintr_gmii: | | 3810 | * wm_linkintr_gmii: |
3811 | * | | 3811 | * |
3812 | * Helper; handle link interrupts for GMII. | | 3812 | * Helper; handle link interrupts for GMII. |
3813 | */ | | 3813 | */ |
3814 | static void | | 3814 | static void |
3815 | wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) | | 3815 | wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) |
3816 | { | | 3816 | { |
3817 | | | 3817 | |
3818 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), | | 3818 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), |
3819 | __func__)); | | 3819 | __func__)); |
3820 | | | 3820 | |
3821 | if (icr & ICR_LSC) { | | 3821 | if (icr & ICR_LSC) { |
3822 | DPRINTF(WM_DEBUG_LINK, | | 3822 | DPRINTF(WM_DEBUG_LINK, |
3823 | ("%s: LINK: LSC -> mii_tick\n", | | 3823 | ("%s: LINK: LSC -> mii_pollstat\n", |
3824 | device_xname(sc->sc_dev))); | | 3824 | device_xname(sc->sc_dev))); |
3825 | mii_tick(&sc->sc_mii); | | 3825 | mii_pollstat(&sc->sc_mii); |
3826 | if (sc->sc_type == WM_T_82543) { | | 3826 | if (sc->sc_type == WM_T_82543) { |
3827 | int miistatus, active; | | 3827 | int miistatus, active; |
3828 | | | 3828 | |
3829 | /* | | 3829 | /* |
3830 | * With 82543, we need to force speed and | | 3830 | * With 82543, we need to force speed and |
3831 | * duplex on the MAC equal to what the PHY | | 3831 | * duplex on the MAC equal to what the PHY |
3832 | * speed and duplex configuration is. | | 3832 | * speed and duplex configuration is. |
3833 | */ | | 3833 | */ |
3834 | miistatus = sc->sc_mii.mii_media_status; | | 3834 | miistatus = sc->sc_mii.mii_media_status; |
3835 | | | 3835 | |
3836 | if (miistatus & IFM_ACTIVE) { | | 3836 | if (miistatus & IFM_ACTIVE) { |
3837 | active = sc->sc_mii.mii_media_active; | | 3837 | active = sc->sc_mii.mii_media_active; |
3838 | sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); | | 3838 | sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); |
3839 | switch (IFM_SUBTYPE(active)) { | | 3839 | switch (IFM_SUBTYPE(active)) { |
3840 | case IFM_10_T: | | 3840 | case IFM_10_T: |
3841 | sc->sc_ctrl |= CTRL_SPEED_10; | | 3841 | sc->sc_ctrl |= CTRL_SPEED_10; |
3842 | break; | | 3842 | break; |
3843 | case IFM_100_TX: | | 3843 | case IFM_100_TX: |
3844 | sc->sc_ctrl |= CTRL_SPEED_100; | | 3844 | sc->sc_ctrl |= CTRL_SPEED_100; |
3845 | break; | | 3845 | break; |
3846 | case IFM_1000_T: | | 3846 | case IFM_1000_T: |
3847 | sc->sc_ctrl |= CTRL_SPEED_1000; | | 3847 | sc->sc_ctrl |= CTRL_SPEED_1000; |
3848 | break; | | 3848 | break; |
3849 | default: | | 3849 | default: |
3850 | /* | | 3850 | /* |
3851 | * fiber? | | 3851 | * fiber? |
3852 | * Shoud not enter here. | | 3852 | * Shoud not enter here. |
3853 | */ | | 3853 | */ |
3854 | printf("unknown media (%x)\n", | | 3854 | printf("unknown media (%x)\n", |
3855 | active); | | 3855 | active); |
3856 | break; | | 3856 | break; |
3857 | } | | 3857 | } |
3858 | if (active & IFM_FDX) | | 3858 | if (active & IFM_FDX) |
3859 | sc->sc_ctrl |= CTRL_FD; | | 3859 | sc->sc_ctrl |= CTRL_FD; |
3860 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 3860 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
3861 | } | | 3861 | } |
3862 | } else if ((sc->sc_type == WM_T_ICH8) | | 3862 | } else if ((sc->sc_type == WM_T_ICH8) |
3863 | && (sc->sc_phytype == WMPHY_IGP_3)) { | | 3863 | && (sc->sc_phytype == WMPHY_IGP_3)) { |
3864 | wm_kmrn_lock_loss_workaround_ich8lan(sc); | | 3864 | wm_kmrn_lock_loss_workaround_ich8lan(sc); |
3865 | } else if (sc->sc_type == WM_T_PCH) { | | 3865 | } else if (sc->sc_type == WM_T_PCH) { |
3866 | wm_k1_gig_workaround_hv(sc, | | 3866 | wm_k1_gig_workaround_hv(sc, |
3867 | ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); | | 3867 | ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); |
3868 | } | | 3868 | } |
3869 | | | 3869 | |
3870 | if ((sc->sc_phytype == WMPHY_82578) | | 3870 | if ((sc->sc_phytype == WMPHY_82578) |
3871 | && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) | | 3871 | && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) |
3872 | == IFM_1000_T)) { | | 3872 | == IFM_1000_T)) { |
3873 | | | 3873 | |
3874 | if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { | | 3874 | if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { |
3875 | delay(200*1000); /* XXX too big */ | | 3875 | delay(200*1000); /* XXX too big */ |
3876 | | | 3876 | |
3877 | /* Link stall fix for link up */ | | 3877 | /* Link stall fix for link up */ |
3878 | wm_gmii_hv_writereg(sc->sc_dev, 1, | | 3878 | wm_gmii_hv_writereg(sc->sc_dev, 1, |
3879 | HV_MUX_DATA_CTRL, | | 3879 | HV_MUX_DATA_CTRL, |
3880 | HV_MUX_DATA_CTRL_GEN_TO_MAC | | 3880 | HV_MUX_DATA_CTRL_GEN_TO_MAC |
3881 | | HV_MUX_DATA_CTRL_FORCE_SPEED); | | 3881 | | HV_MUX_DATA_CTRL_FORCE_SPEED); |
3882 | wm_gmii_hv_writereg(sc->sc_dev, 1, | | 3882 | wm_gmii_hv_writereg(sc->sc_dev, 1, |
3883 | HV_MUX_DATA_CTRL, | | 3883 | HV_MUX_DATA_CTRL, |
3884 | HV_MUX_DATA_CTRL_GEN_TO_MAC); | | 3884 | HV_MUX_DATA_CTRL_GEN_TO_MAC); |
3885 | } | | 3885 | } |
3886 | } | | 3886 | } |
3887 | } else if (icr & ICR_RXSEQ) { | | 3887 | } else if (icr & ICR_RXSEQ) { |
3888 | DPRINTF(WM_DEBUG_LINK, | | 3888 | DPRINTF(WM_DEBUG_LINK, |
3889 | ("%s: LINK Receive sequence error\n", | | 3889 | ("%s: LINK Receive sequence error\n", |
3890 | device_xname(sc->sc_dev))); | | 3890 | device_xname(sc->sc_dev))); |
3891 | } | | 3891 | } |
3892 | } | | 3892 | } |
3893 | | | 3893 | |
3894 | /* | | 3894 | /* |
3895 | * wm_linkintr_tbi: | | 3895 | * wm_linkintr_tbi: |
3896 | * | | 3896 | * |
3897 | * Helper; handle link interrupts for TBI mode. | | 3897 | * Helper; handle link interrupts for TBI mode. |
3898 | */ | | 3898 | */ |
3899 | static void | | 3899 | static void |
3900 | wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) | | 3900 | wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) |
3901 | { | | 3901 | { |
3902 | uint32_t status; | | 3902 | uint32_t status; |
3903 | | | 3903 | |
3904 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), | | 3904 | DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), |
3905 | __func__)); | | 3905 | __func__)); |
3906 | | | 3906 | |
3907 | status = CSR_READ(sc, WMREG_STATUS); | | 3907 | status = CSR_READ(sc, WMREG_STATUS); |
3908 | if (icr & ICR_LSC) { | | 3908 | if (icr & ICR_LSC) { |
3909 | if (status & STATUS_LU) { | | 3909 | if (status & STATUS_LU) { |
3910 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", | | 3910 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", |
3911 | device_xname(sc->sc_dev), | | 3911 | device_xname(sc->sc_dev), |
3912 | (status & STATUS_FD) ? "FDX" : "HDX")); | | 3912 | (status & STATUS_FD) ? "FDX" : "HDX")); |
3913 | /* | | 3913 | /* |
3914 | * NOTE: CTRL will update TFCE and RFCE automatically, | | 3914 | * NOTE: CTRL will update TFCE and RFCE automatically, |
3915 | * so we should update sc->sc_ctrl | | 3915 | * so we should update sc->sc_ctrl |
3916 | */ | | 3916 | */ |
3917 | | | 3917 | |
3918 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); | | 3918 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); |
3919 | sc->sc_tctl &= ~TCTL_COLD(0x3ff); | | 3919 | sc->sc_tctl &= ~TCTL_COLD(0x3ff); |
3920 | sc->sc_fcrtl &= ~FCRTL_XONE; | | 3920 | sc->sc_fcrtl &= ~FCRTL_XONE; |
3921 | if (status & STATUS_FD) | | 3921 | if (status & STATUS_FD) |
3922 | sc->sc_tctl |= | | 3922 | sc->sc_tctl |= |
3923 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); | | 3923 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); |
3924 | else | | 3924 | else |
3925 | sc->sc_tctl |= | | 3925 | sc->sc_tctl |= |
3926 | TCTL_COLD(TX_COLLISION_DISTANCE_HDX); | | 3926 | TCTL_COLD(TX_COLLISION_DISTANCE_HDX); |
3927 | if (sc->sc_ctrl & CTRL_TFCE) | | 3927 | if (sc->sc_ctrl & CTRL_TFCE) |
3928 | sc->sc_fcrtl |= FCRTL_XONE; | | 3928 | sc->sc_fcrtl |= FCRTL_XONE; |
3929 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); | | 3929 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); |
3930 | CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? | | 3930 | CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? |
3931 | WMREG_OLD_FCRTL : WMREG_FCRTL, | | 3931 | WMREG_OLD_FCRTL : WMREG_FCRTL, |
3932 | sc->sc_fcrtl); | | 3932 | sc->sc_fcrtl); |
3933 | sc->sc_tbi_linkup = 1; | | 3933 | sc->sc_tbi_linkup = 1; |
3934 | } else { | | 3934 | } else { |
3935 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", | | 3935 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", |
3936 | device_xname(sc->sc_dev))); | | 3936 | device_xname(sc->sc_dev))); |
3937 | sc->sc_tbi_linkup = 0; | | 3937 | sc->sc_tbi_linkup = 0; |
3938 | } | | 3938 | } |
3939 | wm_tbi_set_linkled(sc); | | 3939 | wm_tbi_set_linkled(sc); |
3940 | } else if (icr & ICR_RXCFG) { | | 3940 | } else if (icr & ICR_RXCFG) { |
3941 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", | | 3941 | DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", |
3942 | device_xname(sc->sc_dev))); | | 3942 | device_xname(sc->sc_dev))); |
3943 | sc->sc_tbi_nrxcfg++; | | 3943 | sc->sc_tbi_nrxcfg++; |
3944 | wm_check_for_link(sc); | | 3944 | wm_check_for_link(sc); |
3945 | } else if (icr & ICR_RXSEQ) { | | 3945 | } else if (icr & ICR_RXSEQ) { |
3946 | DPRINTF(WM_DEBUG_LINK, | | 3946 | DPRINTF(WM_DEBUG_LINK, |
3947 | ("%s: LINK: Receive sequence error\n", | | 3947 | ("%s: LINK: Receive sequence error\n", |
3948 | device_xname(sc->sc_dev))); | | 3948 | device_xname(sc->sc_dev))); |
3949 | } | | 3949 | } |
3950 | } | | 3950 | } |
3951 | | | 3951 | |
3952 | /* | | 3952 | /* |
3953 | * wm_linkintr: | | 3953 | * wm_linkintr: |
3954 | * | | 3954 | * |
3955 | * Helper; handle link interrupts. | | 3955 | * Helper; handle link interrupts. |
3956 | */ | | 3956 | */ |
3957 | static void | | 3957 | static void |
3958 | wm_linkintr(struct wm_softc *sc, uint32_t icr) | | 3958 | wm_linkintr(struct wm_softc *sc, uint32_t icr) |
3959 | { | | 3959 | { |
3960 | | | 3960 | |
3961 | if (sc->sc_flags & WM_F_HAS_MII) | | 3961 | if (sc->sc_flags & WM_F_HAS_MII) |
3962 | wm_linkintr_gmii(sc, icr); | | 3962 | wm_linkintr_gmii(sc, icr); |
3963 | else | | 3963 | else |
3964 | wm_linkintr_tbi(sc, icr); | | 3964 | wm_linkintr_tbi(sc, icr); |
3965 | } | | 3965 | } |
3966 | | | 3966 | |
3967 | /* | | 3967 | /* |
3968 | * wm_tick: | | 3968 | * wm_tick: |
3969 | * | | 3969 | * |
3970 | * One second timer, used to check link status, sweep up | | 3970 | * One second timer, used to check link status, sweep up |
3971 | * completed transmit jobs, etc. | | 3971 | * completed transmit jobs, etc. |
3972 | */ | | 3972 | */ |
3973 | static void | | 3973 | static void |
3974 | wm_tick(void *arg) | | 3974 | wm_tick(void *arg) |
3975 | { | | 3975 | { |
3976 | struct wm_softc *sc = arg; | | 3976 | struct wm_softc *sc = arg; |
3977 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 3977 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
3978 | int s; | | 3978 | int s; |
3979 | | | 3979 | |
3980 | s = splnet(); | | 3980 | s = splnet(); |
3981 | | | 3981 | |
3982 | if (sc->sc_type >= WM_T_82542_2_1) { | | 3982 | if (sc->sc_type >= WM_T_82542_2_1) { |
3983 | WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); | | 3983 | WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); |
3984 | WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); | | 3984 | WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); |
3985 | WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); | | 3985 | WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); |
3986 | WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); | | 3986 | WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); |
3987 | WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); | | 3987 | WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); |
3988 | } | | 3988 | } |
3989 | | | 3989 | |
3990 | ifp->if_collisions += CSR_READ(sc, WMREG_COLC); | | 3990 | ifp->if_collisions += CSR_READ(sc, WMREG_COLC); |
3991 | ifp->if_ierrors += 0ULL + /* ensure quad_t */ | | 3991 | ifp->if_ierrors += 0ULL + /* ensure quad_t */ |
3992 | + CSR_READ(sc, WMREG_CRCERRS) | | 3992 | + CSR_READ(sc, WMREG_CRCERRS) |
3993 | + CSR_READ(sc, WMREG_ALGNERRC) | | 3993 | + CSR_READ(sc, WMREG_ALGNERRC) |
3994 | + CSR_READ(sc, WMREG_SYMERRC) | | 3994 | + CSR_READ(sc, WMREG_SYMERRC) |
3995 | + CSR_READ(sc, WMREG_RXERRC) | | 3995 | + CSR_READ(sc, WMREG_RXERRC) |
3996 | + CSR_READ(sc, WMREG_SEC) | | 3996 | + CSR_READ(sc, WMREG_SEC) |
3997 | + CSR_READ(sc, WMREG_CEXTERR) | | 3997 | + CSR_READ(sc, WMREG_CEXTERR) |
3998 | + CSR_READ(sc, WMREG_RLEC); | | 3998 | + CSR_READ(sc, WMREG_RLEC); |
3999 | ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC); | | 3999 | ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC); |
4000 | | | 4000 | |
4001 | if (sc->sc_flags & WM_F_HAS_MII) | | 4001 | if (sc->sc_flags & WM_F_HAS_MII) |
4002 | mii_tick(&sc->sc_mii); | | 4002 | mii_tick(&sc->sc_mii); |
4003 | else | | 4003 | else |
4004 | wm_tbi_check_link(sc); | | 4004 | wm_tbi_check_link(sc); |
4005 | | | 4005 | |
4006 | splx(s); | | 4006 | splx(s); |
4007 | | | 4007 | |
4008 | callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); | | 4008 | callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); |
4009 | } | | 4009 | } |
4010 | | | 4010 | |
4011 | /* | | 4011 | /* |
4012 | * wm_reset: | | 4012 | * wm_reset: |
4013 | * | | 4013 | * |
4014 | * Reset the i82542 chip. | | 4014 | * Reset the i82542 chip. |
4015 | */ | | 4015 | */ |
4016 | static void | | 4016 | static void |
4017 | wm_reset(struct wm_softc *sc) | | 4017 | wm_reset(struct wm_softc *sc) |
4018 | { | | 4018 | { |
4019 | int phy_reset = 0; | | 4019 | int phy_reset = 0; |
4020 | uint32_t reg, mask; | | 4020 | uint32_t reg, mask; |
4021 | int i; | | 4021 | int i; |
4022 | | | 4022 | |
4023 | /* | | 4023 | /* |
4024 | * Allocate on-chip memory according to the MTU size. | | 4024 | * Allocate on-chip memory according to the MTU size. |
4025 | * The Packet Buffer Allocation register must be written | | 4025 | * The Packet Buffer Allocation register must be written |
4026 | * before the chip is reset. | | 4026 | * before the chip is reset. |
4027 | */ | | 4027 | */ |
4028 | switch (sc->sc_type) { | | 4028 | switch (sc->sc_type) { |
4029 | case WM_T_82547: | | 4029 | case WM_T_82547: |
4030 | case WM_T_82547_2: | | 4030 | case WM_T_82547_2: |
4031 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? | | 4031 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? |
4032 | PBA_22K : PBA_30K; | | 4032 | PBA_22K : PBA_30K; |
4033 | sc->sc_txfifo_head = 0; | | 4033 | sc->sc_txfifo_head = 0; |
4034 | sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; | | 4034 | sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; |
4035 | sc->sc_txfifo_size = | | 4035 | sc->sc_txfifo_size = |
4036 | (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; | | 4036 | (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; |
4037 | sc->sc_txfifo_stall = 0; | | 4037 | sc->sc_txfifo_stall = 0; |
4038 | break; | | 4038 | break; |
4039 | case WM_T_82571: | | 4039 | case WM_T_82571: |
4040 | case WM_T_82572: | | 4040 | case WM_T_82572: |
4041 | case WM_T_82575: /* XXX need special handing for jumbo frames */ | | 4041 | case WM_T_82575: /* XXX need special handing for jumbo frames */ |
4042 | case WM_T_I350: | | 4042 | case WM_T_I350: |
4043 | case WM_T_80003: | | 4043 | case WM_T_80003: |
4044 | sc->sc_pba = PBA_32K; | | 4044 | sc->sc_pba = PBA_32K; |
4045 | break; | | 4045 | break; |
4046 | case WM_T_82580: | | 4046 | case WM_T_82580: |
4047 | case WM_T_82580ER: | | 4047 | case WM_T_82580ER: |
4048 | sc->sc_pba = PBA_35K; | | 4048 | sc->sc_pba = PBA_35K; |
4049 | break; | | 4049 | break; |
4050 | case WM_T_I210: | | 4050 | case WM_T_I210: |
4051 | case WM_T_I211: | | 4051 | case WM_T_I211: |
4052 | sc->sc_pba = PBA_34K; | | 4052 | sc->sc_pba = PBA_34K; |
4053 | break; | | 4053 | break; |
4054 | case WM_T_82576: | | 4054 | case WM_T_82576: |
4055 | sc->sc_pba = PBA_64K; | | 4055 | sc->sc_pba = PBA_64K; |
4056 | break; | | 4056 | break; |
4057 | case WM_T_82573: | | 4057 | case WM_T_82573: |
4058 | sc->sc_pba = PBA_12K; | | 4058 | sc->sc_pba = PBA_12K; |
4059 | break; | | 4059 | break; |
4060 | case WM_T_82574: | | 4060 | case WM_T_82574: |
4061 | case WM_T_82583: | | 4061 | case WM_T_82583: |
4062 | sc->sc_pba = PBA_20K; | | 4062 | sc->sc_pba = PBA_20K; |
4063 | break; | | 4063 | break; |
4064 | case WM_T_ICH8: | | 4064 | case WM_T_ICH8: |
4065 | sc->sc_pba = PBA_8K; | | 4065 | sc->sc_pba = PBA_8K; |
4066 | CSR_WRITE(sc, WMREG_PBS, PBA_16K); | | 4066 | CSR_WRITE(sc, WMREG_PBS, PBA_16K); |
4067 | break; | | 4067 | break; |
4068 | case WM_T_ICH9: | | 4068 | case WM_T_ICH9: |
4069 | case WM_T_ICH10: | | 4069 | case WM_T_ICH10: |
4070 | sc->sc_pba = PBA_10K; | | 4070 | sc->sc_pba = PBA_10K; |
4071 | break; | | 4071 | break; |
4072 | case WM_T_PCH: | | 4072 | case WM_T_PCH: |
4073 | case WM_T_PCH2: | | 4073 | case WM_T_PCH2: |
4074 | case WM_T_PCH_LPT: | | 4074 | case WM_T_PCH_LPT: |
4075 | sc->sc_pba = PBA_26K; | | 4075 | sc->sc_pba = PBA_26K; |
4076 | break; | | 4076 | break; |
4077 | default: | | 4077 | default: |
4078 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? | | 4078 | sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? |
4079 | PBA_40K : PBA_48K; | | 4079 | PBA_40K : PBA_48K; |
4080 | break; | | 4080 | break; |
4081 | } | | 4081 | } |
4082 | CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); | | 4082 | CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); |
4083 | | | 4083 | |
4084 | /* Prevent the PCI-E bus from sticking */ | | 4084 | /* Prevent the PCI-E bus from sticking */ |
4085 | if (sc->sc_flags & WM_F_PCIE) { | | 4085 | if (sc->sc_flags & WM_F_PCIE) { |
4086 | int timeout = 800; | | 4086 | int timeout = 800; |
4087 | | | 4087 | |
4088 | sc->sc_ctrl |= CTRL_GIO_M_DIS; | | 4088 | sc->sc_ctrl |= CTRL_GIO_M_DIS; |
4089 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 4089 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
4090 | | | 4090 | |
4091 | while (timeout--) { | | 4091 | while (timeout--) { |
4092 | if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) | | 4092 | if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) |
4093 | == 0) | | 4093 | == 0) |
4094 | break; | | 4094 | break; |
4095 | delay(100); | | 4095 | delay(100); |
4096 | } | | 4096 | } |
4097 | } | | 4097 | } |
4098 | | | 4098 | |
4099 | /* Set the completion timeout for interface */ | | 4099 | /* Set the completion timeout for interface */ |
4100 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) | | 4100 | if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) |
4101 | || (sc->sc_type == WM_T_I350)) | | 4101 | || (sc->sc_type == WM_T_I350)) |
4102 | wm_set_pcie_completion_timeout(sc); | | 4102 | wm_set_pcie_completion_timeout(sc); |
4103 | | | 4103 | |
4104 | /* Clear interrupt */ | | 4104 | /* Clear interrupt */ |
4105 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); | | 4105 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
4106 | | | 4106 | |
4107 | /* Stop the transmit and receive processes. */ | | 4107 | /* Stop the transmit and receive processes. */ |
4108 | CSR_WRITE(sc, WMREG_RCTL, 0); | | 4108 | CSR_WRITE(sc, WMREG_RCTL, 0); |
4109 | CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP); | | 4109 | CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP); |
4110 | sc->sc_rctl &= ~RCTL_EN; | | 4110 | sc->sc_rctl &= ~RCTL_EN; |
4111 | | | 4111 | |
4112 | /* XXX set_tbi_sbp_82543() */ | | 4112 | /* XXX set_tbi_sbp_82543() */ |
4113 | | | 4113 | |
4114 | delay(10*1000); | | 4114 | delay(10*1000); |
4115 | | | 4115 | |
4116 | /* Must acquire the MDIO ownership before MAC reset */ | | 4116 | /* Must acquire the MDIO ownership before MAC reset */ |
4117 | switch (sc->sc_type) { | | 4117 | switch (sc->sc_type) { |
4118 | case WM_T_82573: | | 4118 | case WM_T_82573: |
4119 | case WM_T_82574: | | 4119 | case WM_T_82574: |
4120 | case WM_T_82583: | | 4120 | case WM_T_82583: |
4121 | i = 0; | | 4121 | i = 0; |
4122 | reg = CSR_READ(sc, WMREG_EXTCNFCTR) | | 4122 | reg = CSR_READ(sc, WMREG_EXTCNFCTR) |
4123 | | EXTCNFCTR_MDIO_SW_OWNERSHIP; | | 4123 | | EXTCNFCTR_MDIO_SW_OWNERSHIP; |
4124 | do { | | 4124 | do { |
4125 | CSR_WRITE(sc, WMREG_EXTCNFCTR, | | 4125 | CSR_WRITE(sc, WMREG_EXTCNFCTR, |
4126 | reg | EXTCNFCTR_MDIO_SW_OWNERSHIP); | | 4126 | reg | EXTCNFCTR_MDIO_SW_OWNERSHIP); |
4127 | reg = CSR_READ(sc, WMREG_EXTCNFCTR); | | 4127 | reg = CSR_READ(sc, WMREG_EXTCNFCTR); |
4128 | if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0) | | 4128 | if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0) |
4129 | break; | | 4129 | break; |
4130 | reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP; | | 4130 | reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP; |
4131 | delay(2*1000); | | 4131 | delay(2*1000); |
4132 | i++; | | 4132 | i++; |
4133 | } while (i < WM_MDIO_OWNERSHIP_TIMEOUT); | | 4133 | } while (i < WM_MDIO_OWNERSHIP_TIMEOUT); |
4134 | break; | | 4134 | break; |
4135 | default: | | 4135 | default: |
4136 | break; | | 4136 | break; |
4137 | } | | 4137 | } |
4138 | | | 4138 | |
4139 | /* | | 4139 | /* |
4140 | * 82541 Errata 29? & 82547 Errata 28? | | 4140 | * 82541 Errata 29? & 82547 Errata 28? |
4141 | * See also the description about PHY_RST bit in CTRL register | | 4141 | * See also the description about PHY_RST bit in CTRL register |
4142 | * in 8254x_GBe_SDM.pdf. | | 4142 | * in 8254x_GBe_SDM.pdf. |
4143 | */ | | 4143 | */ |
4144 | if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { | | 4144 | if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { |
4145 | CSR_WRITE(sc, WMREG_CTRL, | | 4145 | CSR_WRITE(sc, WMREG_CTRL, |
4146 | CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); | | 4146 | CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); |
4147 | delay(5000); | | 4147 | delay(5000); |
4148 | } | | 4148 | } |
4149 | | | 4149 | |
4150 | switch (sc->sc_type) { | | 4150 | switch (sc->sc_type) { |
4151 | case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ | | 4151 | case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ |
4152 | case WM_T_82541: | | 4152 | case WM_T_82541: |
4153 | case WM_T_82541_2: | | 4153 | case WM_T_82541_2: |
4154 | case WM_T_82547: | | 4154 | case WM_T_82547: |
4155 | case WM_T_82547_2: | | 4155 | case WM_T_82547_2: |
4156 | /* | | 4156 | /* |
4157 | * On some chipsets, a reset through a memory-mapped write | | 4157 | * On some chipsets, a reset through a memory-mapped write |
4158 | * cycle can cause the chip to reset before completing the | | 4158 | * cycle can cause the chip to reset before completing the |
4159 | * write cycle. This causes major headache that can be | | 4159 | * write cycle. This causes major headache that can be |
4160 | * avoided by issuing the reset via indirect register writes | | 4160 | * avoided by issuing the reset via indirect register writes |
4161 | * through I/O space. | | 4161 | * through I/O space. |
4162 | * | | 4162 | * |
4163 | * So, if we successfully mapped the I/O BAR at attach time, | | 4163 | * So, if we successfully mapped the I/O BAR at attach time, |
4164 | * use that. Otherwise, try our luck with a memory-mapped | | 4164 | * use that. Otherwise, try our luck with a memory-mapped |
4165 | * reset. | | 4165 | * reset. |
4166 | */ | | 4166 | */ |
4167 | if (sc->sc_flags & WM_F_IOH_VALID) | | 4167 | if (sc->sc_flags & WM_F_IOH_VALID) |
4168 | wm_io_write(sc, WMREG_CTRL, CTRL_RST); | | 4168 | wm_io_write(sc, WMREG_CTRL, CTRL_RST); |
4169 | else | | 4169 | else |
4170 | CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); | | 4170 | CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); |
4171 | break; | | 4171 | break; |
4172 | case WM_T_82545_3: | | 4172 | case WM_T_82545_3: |
4173 | case WM_T_82546_3: | | 4173 | case WM_T_82546_3: |
4174 | /* Use the shadow control register on these chips. */ | | 4174 | /* Use the shadow control register on these chips. */ |
4175 | CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); | | 4175 | CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); |
4176 | break; | | 4176 | break; |
4177 | case WM_T_80003: | | 4177 | case WM_T_80003: |
4178 | mask = swfwphysem[sc->sc_funcid]; | | 4178 | mask = swfwphysem[sc->sc_funcid]; |
4179 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; | | 4179 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; |
4180 | wm_get_swfw_semaphore(sc, mask); | | 4180 | wm_get_swfw_semaphore(sc, mask); |
4181 | CSR_WRITE(sc, WMREG_CTRL, reg); | | 4181 | CSR_WRITE(sc, WMREG_CTRL, reg); |
4182 | wm_put_swfw_semaphore(sc, mask); | | 4182 | wm_put_swfw_semaphore(sc, mask); |
4183 | break; | | 4183 | break; |
4184 | case WM_T_ICH8: | | 4184 | case WM_T_ICH8: |
4185 | case WM_T_ICH9: | | 4185 | case WM_T_ICH9: |
4186 | case WM_T_ICH10: | | 4186 | case WM_T_ICH10: |
4187 | case WM_T_PCH: | | 4187 | case WM_T_PCH: |
4188 | case WM_T_PCH2: | | 4188 | case WM_T_PCH2: |
4189 | case WM_T_PCH_LPT: | | 4189 | case WM_T_PCH_LPT: |
4190 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; | | 4190 | reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; |
4191 | if (wm_check_reset_block(sc) == 0) { | | 4191 | if (wm_check_reset_block(sc) == 0) { |
4192 | /* | | 4192 | /* |
4193 | * Gate automatic PHY configuration by hardware on | | 4193 | * Gate automatic PHY configuration by hardware on |
4194 | * non-managed 82579 | | 4194 | * non-managed 82579 |
4195 | */ | | 4195 | */ |
4196 | if ((sc->sc_type == WM_T_PCH2) | | 4196 | if ((sc->sc_type == WM_T_PCH2) |
4197 | && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) | | 4197 | && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) |
4198 | != 0)) | | 4198 | != 0)) |
4199 | wm_gate_hw_phy_config_ich8lan(sc, 1); | | 4199 | wm_gate_hw_phy_config_ich8lan(sc, 1); |
4200 | | | 4200 | |
4201 | | | 4201 | |
4202 | reg |= CTRL_PHY_RESET; | | 4202 | reg |= CTRL_PHY_RESET; |
4203 | phy_reset = 1; | | 4203 | phy_reset = 1; |
4204 | } | | 4204 | } |
4205 | wm_get_swfwhw_semaphore(sc); | | 4205 | wm_get_swfwhw_semaphore(sc); |
4206 | CSR_WRITE(sc, WMREG_CTRL, reg); | | 4206 | CSR_WRITE(sc, WMREG_CTRL, reg); |
4207 | delay(20*1000); | | 4207 | delay(20*1000); |
4208 | wm_put_swfwhw_semaphore(sc); | | 4208 | wm_put_swfwhw_semaphore(sc); |
4209 | break; | | 4209 | break; |
4210 | case WM_T_82542_2_0: | | 4210 | case WM_T_82542_2_0: |
4211 | case WM_T_82542_2_1: | | 4211 | case WM_T_82542_2_1: |
4212 | case WM_T_82543: | | 4212 | case WM_T_82543: |
4213 | case WM_T_82540: | | 4213 | case WM_T_82540: |
4214 | case WM_T_82545: | | 4214 | case WM_T_82545: |
4215 | case WM_T_82546: | | 4215 | case WM_T_82546: |
4216 | case WM_T_82571: | | 4216 | case WM_T_82571: |
4217 | case WM_T_82572: | | 4217 | case WM_T_82572: |
4218 | case WM_T_82573: | | 4218 | case WM_T_82573: |
4219 | case WM_T_82574: | | 4219 | case WM_T_82574: |
4220 | case WM_T_82575: | | 4220 | case WM_T_82575: |
4221 | case WM_T_82576: | | 4221 | case WM_T_82576: |
4222 | case WM_T_82580: | | 4222 | case WM_T_82580: |
4223 | case WM_T_82580ER: | | 4223 | case WM_T_82580ER: |
4224 | case WM_T_82583: | | 4224 | case WM_T_82583: |
4225 | case WM_T_I350: | | 4225 | case WM_T_I350: |
4226 | case WM_T_I210: | | 4226 | case WM_T_I210: |
4227 | case WM_T_I211: | | 4227 | case WM_T_I211: |
4228 | default: | | 4228 | default: |
4229 | /* Everything else can safely use the documented method. */ | | 4229 | /* Everything else can safely use the documented method. */ |
4230 | CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); | | 4230 | CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); |
4231 | break; | | 4231 | break; |
4232 | } | | 4232 | } |
4233 | | | 4233 | |
4234 | if (phy_reset != 0) | | 4234 | if (phy_reset != 0) |
4235 | wm_get_cfg_done(sc); | | 4235 | wm_get_cfg_done(sc); |
4236 | | | 4236 | |
4237 | /* reload EEPROM */ | | 4237 | /* reload EEPROM */ |
4238 | switch (sc->sc_type) { | | 4238 | switch (sc->sc_type) { |
4239 | case WM_T_82542_2_0: | | 4239 | case WM_T_82542_2_0: |
4240 | case WM_T_82542_2_1: | | 4240 | case WM_T_82542_2_1: |
4241 | case WM_T_82543: | | 4241 | case WM_T_82543: |
4242 | case WM_T_82544: | | 4242 | case WM_T_82544: |
4243 | delay(10); | | 4243 | delay(10); |
4244 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; | | 4244 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; |
4245 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 4245 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
4246 | delay(2000); | | 4246 | delay(2000); |
4247 | break; | | 4247 | break; |
4248 | case WM_T_82540: | | 4248 | case WM_T_82540: |
4249 | case WM_T_82545: | | 4249 | case WM_T_82545: |
4250 | case WM_T_82545_3: | | 4250 | case WM_T_82545_3: |
4251 | case WM_T_82546: | | 4251 | case WM_T_82546: |
4252 | case WM_T_82546_3: | | 4252 | case WM_T_82546_3: |
4253 | delay(5*1000); | | 4253 | delay(5*1000); |
4254 | /* XXX Disable HW ARPs on ASF enabled adapters */ | | 4254 | /* XXX Disable HW ARPs on ASF enabled adapters */ |
4255 | break; | | 4255 | break; |
4256 | case WM_T_82541: | | 4256 | case WM_T_82541: |
4257 | case WM_T_82541_2: | | 4257 | case WM_T_82541_2: |
4258 | case WM_T_82547: | | 4258 | case WM_T_82547: |
4259 | case WM_T_82547_2: | | 4259 | case WM_T_82547_2: |
4260 | delay(20000); | | 4260 | delay(20000); |
4261 | /* XXX Disable HW ARPs on ASF enabled adapters */ | | 4261 | /* XXX Disable HW ARPs on ASF enabled adapters */ |
4262 | break; | | 4262 | break; |
4263 | case WM_T_82571: | | 4263 | case WM_T_82571: |
4264 | case WM_T_82572: | | 4264 | case WM_T_82572: |
4265 | case WM_T_82573: | | 4265 | case WM_T_82573: |
4266 | case WM_T_82574: | | 4266 | case WM_T_82574: |
4267 | case WM_T_82583: | | 4267 | case WM_T_82583: |
4268 | if (sc->sc_flags & WM_F_EEPROM_FLASH) { | | 4268 | if (sc->sc_flags & WM_F_EEPROM_FLASH) { |
4269 | delay(10); | | 4269 | delay(10); |
4270 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; | | 4270 | reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; |
4271 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); | | 4271 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg); |
4272 | } | | 4272 | } |
4273 | /* check EECD_EE_AUTORD */ | | 4273 | /* check EECD_EE_AUTORD */ |
4274 | wm_get_auto_rd_done(sc); | | 4274 | wm_get_auto_rd_done(sc); |
4275 | /* | | 4275 | /* |
4276 | * Phy configuration from NVM just starts after EECD_AUTO_RD | | 4276 | * Phy configuration from NVM just starts after EECD_AUTO_RD |
4277 | * is set. | | 4277 | * is set. |
4278 | */ | | 4278 | */ |
4279 | if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) | | 4279 | if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) |
4280 | || (sc->sc_type == WM_T_82583)) | | 4280 | || (sc->sc_type == WM_T_82583)) |
4281 | delay(25*1000); | | 4281 | delay(25*1000); |
4282 | break; | | 4282 | break; |
4283 | case WM_T_82575: | | 4283 | case WM_T_82575: |
4284 | case WM_T_82576: | | 4284 | case WM_T_82576: |
4285 | case WM_T_82580: | | 4285 | case WM_T_82580: |
4286 | case WM_T_82580ER: | | 4286 | case WM_T_82580ER: |
4287 | case WM_T_I350: | | 4287 | case WM_T_I350: |
4288 | case WM_T_I210: | | 4288 | case WM_T_I210: |
4289 | case WM_T_I211: | | 4289 | case WM_T_I211: |
4290 | case WM_T_80003: | | 4290 | case WM_T_80003: |
4291 | /* check EECD_EE_AUTORD */ | | 4291 | /* check EECD_EE_AUTORD */ |
4292 | wm_get_auto_rd_done(sc); | | 4292 | wm_get_auto_rd_done(sc); |
4293 | break; | | 4293 | break; |
4294 | case WM_T_ICH8: | | 4294 | case WM_T_ICH8: |
4295 | case WM_T_ICH9: | | 4295 | case WM_T_ICH9: |
4296 | case WM_T_ICH10: | | 4296 | case WM_T_ICH10: |
4297 | case WM_T_PCH: | | 4297 | case WM_T_PCH: |
4298 | case WM_T_PCH2: | | 4298 | case WM_T_PCH2: |
4299 | case WM_T_PCH_LPT: | | 4299 | case WM_T_PCH_LPT: |
4300 | break; | | 4300 | break; |
4301 | default: | | 4301 | default: |
4302 | panic("%s: unknown type\n", __func__); | | 4302 | panic("%s: unknown type\n", __func__); |
4303 | } | | 4303 | } |
4304 | | | 4304 | |
4305 | /* Check whether EEPROM is present or not */ | | 4305 | /* Check whether EEPROM is present or not */ |
4306 | switch (sc->sc_type) { | | 4306 | switch (sc->sc_type) { |
4307 | case WM_T_82575: | | 4307 | case WM_T_82575: |
4308 | case WM_T_82576: | | 4308 | case WM_T_82576: |
4309 | #if 0 /* XXX */ | | 4309 | #if 0 /* XXX */ |
4310 | case WM_T_82580: | | 4310 | case WM_T_82580: |
4311 | case WM_T_82580ER: | | 4311 | case WM_T_82580ER: |
4312 | #endif | | 4312 | #endif |
4313 | case WM_T_I350: | | 4313 | case WM_T_I350: |
4314 | case WM_T_ICH8: | | 4314 | case WM_T_ICH8: |
4315 | case WM_T_ICH9: | | 4315 | case WM_T_ICH9: |
4316 | if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { | | 4316 | if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { |
4317 | /* Not found */ | | 4317 | /* Not found */ |
4318 | sc->sc_flags |= WM_F_EEPROM_INVALID; | | 4318 | sc->sc_flags |= WM_F_EEPROM_INVALID; |
4319 | if ((sc->sc_type == WM_T_82575) | | 4319 | if ((sc->sc_type == WM_T_82575) |
4320 | || (sc->sc_type == WM_T_82576) | | 4320 | || (sc->sc_type == WM_T_82576) |
4321 | || (sc->sc_type == WM_T_82580) | | 4321 | || (sc->sc_type == WM_T_82580) |
4322 | || (sc->sc_type == WM_T_82580ER) | | 4322 | || (sc->sc_type == WM_T_82580ER) |
4323 | || (sc->sc_type == WM_T_I350)) | | 4323 | || (sc->sc_type == WM_T_I350)) |
4324 | wm_reset_init_script_82575(sc); | | 4324 | wm_reset_init_script_82575(sc); |
4325 | } | | 4325 | } |
4326 | break; | | 4326 | break; |
4327 | default: | | 4327 | default: |
4328 | break; | | 4328 | break; |
4329 | } | | 4329 | } |
4330 | | | 4330 | |
4331 | if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) | | 4331 | if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) |
4332 | || (sc->sc_type == WM_T_I350)) { | | 4332 | || (sc->sc_type == WM_T_I350)) { |
4333 | /* clear global device reset status bit */ | | 4333 | /* clear global device reset status bit */ |
4334 | CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); | | 4334 | CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); |
4335 | } | | 4335 | } |
4336 | | | 4336 | |
4337 | /* Clear any pending interrupt events. */ | | 4337 | /* Clear any pending interrupt events. */ |
4338 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); | | 4338 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
4339 | reg = CSR_READ(sc, WMREG_ICR); | | 4339 | reg = CSR_READ(sc, WMREG_ICR); |
4340 | | | 4340 | |
4341 | /* reload sc_ctrl */ | | 4341 | /* reload sc_ctrl */ |
4342 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); | | 4342 | sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); |
4343 | | | 4343 | |
4344 | if (sc->sc_type == WM_T_I350) | | 4344 | if (sc->sc_type == WM_T_I350) |
4345 | wm_set_eee_i350(sc); | | 4345 | wm_set_eee_i350(sc); |
4346 | | | 4346 | |
4347 | /* dummy read from WUC */ | | 4347 | /* dummy read from WUC */ |
4348 | if (sc->sc_type == WM_T_PCH) | | 4348 | if (sc->sc_type == WM_T_PCH) |
4349 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); | | 4349 | reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); |
4350 | /* | | 4350 | /* |
4351 | * For PCH, this write will make sure that any noise will be detected | | 4351 | * For PCH, this write will make sure that any noise will be detected |
4352 | * as a CRC error and be dropped rather than show up as a bad packet | | 4352 | * as a CRC error and be dropped rather than show up as a bad packet |
4353 | * to the DMA engine | | 4353 | * to the DMA engine |
4354 | */ | | 4354 | */ |
4355 | if (sc->sc_type == WM_T_PCH) | | 4355 | if (sc->sc_type == WM_T_PCH) |
4356 | CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); | | 4356 | CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); |
4357 | | | 4357 | |
4358 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) | | 4358 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
4359 | CSR_WRITE(sc, WMREG_WUC, 0); | | 4359 | CSR_WRITE(sc, WMREG_WUC, 0); |
4360 | | | 4360 | |
4361 | /* XXX need special handling for 82580 */ | | 4361 | /* XXX need special handling for 82580 */ |
4362 | } | | 4362 | } |
4363 | | | 4363 | |
4364 | static void | | 4364 | static void |
4365 | wm_set_vlan(struct wm_softc *sc) | | 4365 | wm_set_vlan(struct wm_softc *sc) |
4366 | { | | 4366 | { |
4367 | /* Deal with VLAN enables. */ | | 4367 | /* Deal with VLAN enables. */ |
4368 | if (VLAN_ATTACHED(&sc->sc_ethercom)) | | 4368 | if (VLAN_ATTACHED(&sc->sc_ethercom)) |
4369 | sc->sc_ctrl |= CTRL_VME; | | 4369 | sc->sc_ctrl |= CTRL_VME; |
4370 | else | | 4370 | else |
4371 | sc->sc_ctrl &= ~CTRL_VME; | | 4371 | sc->sc_ctrl &= ~CTRL_VME; |
4372 | | | 4372 | |
4373 | /* Write the control registers. */ | | 4373 | /* Write the control registers. */ |
4374 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); | | 4374 | CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); |
4375 | } | | 4375 | } |
4376 | | | 4376 | |
4377 | /* | | 4377 | /* |
4378 | * wm_init: [ifnet interface function] | | 4378 | * wm_init: [ifnet interface function] |
4379 | * | | 4379 | * |
4380 | * Initialize the interface. Must be called at splnet(). | | 4380 | * Initialize the interface. Must be called at splnet(). |
4381 | */ | | 4381 | */ |
4382 | static int | | 4382 | static int |
4383 | wm_init(struct ifnet *ifp) | | 4383 | wm_init(struct ifnet *ifp) |
4384 | { | | 4384 | { |
4385 | struct wm_softc *sc = ifp->if_softc; | | 4385 | struct wm_softc *sc = ifp->if_softc; |
4386 | struct wm_rxsoft *rxs; | | 4386 | struct wm_rxsoft *rxs; |
4387 | int i, j, trynum, error = 0; | | 4387 | int i, j, trynum, error = 0; |
4388 | uint32_t reg; | | 4388 | uint32_t reg; |
4389 | | | 4389 | |
4390 | /* | | 4390 | /* |
4391 | * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. | | 4391 | * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. |
4392 | * There is a small but measurable benefit to avoiding the adjusment | | 4392 | * There is a small but measurable benefit to avoiding the adjusment |
4393 | * of the descriptor so that the headers are aligned, for normal mtu, | | 4393 | * of the descriptor so that the headers are aligned, for normal mtu, |
4394 | * on such platforms. One possibility is that the DMA itself is | | 4394 | * on such platforms. One possibility is that the DMA itself is |
4395 | * slightly more efficient if the front of the entire packet (instead | | 4395 | * slightly more efficient if the front of the entire packet (instead |
4396 | * of the front of the headers) is aligned. | | 4396 | * of the front of the headers) is aligned. |
4397 | * | | 4397 | * |
4398 | * Note we must always set align_tweak to 0 if we are using | | 4398 | * Note we must always set align_tweak to 0 if we are using |
4399 | * jumbo frames. | | 4399 | * jumbo frames. |
4400 | */ | | 4400 | */ |
4401 | #ifdef __NO_STRICT_ALIGNMENT | | 4401 | #ifdef __NO_STRICT_ALIGNMENT |
4402 | sc->sc_align_tweak = 0; | | 4402 | sc->sc_align_tweak = 0; |
4403 | #else | | 4403 | #else |
4404 | if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) | | 4404 | if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) |
4405 | sc->sc_align_tweak = 0; | | 4405 | sc->sc_align_tweak = 0; |
4406 | else | | 4406 | else |
4407 | sc->sc_align_tweak = 2; | | 4407 | sc->sc_align_tweak = 2; |
4408 | #endif /* __NO_STRICT_ALIGNMENT */ | | 4408 | #endif /* __NO_STRICT_ALIGNMENT */ |
4409 | | | 4409 | |
4410 | /* Cancel any pending I/O. */ | | 4410 | /* Cancel any pending I/O. */ |
4411 | wm_stop(ifp, 0); | | 4411 | wm_stop(ifp, 0); |
4412 | | | 4412 | |
4413 | /* update statistics before reset */ | | 4413 | /* update statistics before reset */ |
4414 | ifp->if_collisions += CSR_READ(sc, WMREG_COLC); | | 4414 | ifp->if_collisions += CSR_READ(sc, WMREG_COLC); |
4415 | ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); | | 4415 | ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); |
4416 | | | 4416 | |
4417 | /* Reset the chip to a known state. */ | | 4417 | /* Reset the chip to a known state. */ |
4418 | wm_reset(sc); | | 4418 | wm_reset(sc); |
4419 | | | 4419 | |
4420 | switch (sc->sc_type) { | | 4420 | switch (sc->sc_type) { |
4421 | case WM_T_82571: | | 4421 | case WM_T_82571: |
4422 | case WM_T_82572: | | 4422 | case WM_T_82572: |
4423 | case WM_T_82573: | | 4423 | case WM_T_82573: |
4424 | case WM_T_82574: | | 4424 | case WM_T_82574: |
4425 | case WM_T_82583: | | 4425 | case WM_T_82583: |
4426 | case WM_T_80003: | | 4426 | case WM_T_80003: |
4427 | case WM_T_ICH8: | | 4427 | case WM_T_ICH8: |
4428 | case WM_T_ICH9: | | 4428 | case WM_T_ICH9: |
4429 | case WM_T_ICH10: | | 4429 | case WM_T_ICH10: |
4430 | case WM_T_PCH: | | 4430 | case WM_T_PCH: |
4431 | case WM_T_PCH2: | | 4431 | case WM_T_PCH2: |
4432 | case WM_T_PCH_LPT: | | 4432 | case WM_T_PCH_LPT: |
4433 | if (wm_check_mng_mode(sc) != 0) | | 4433 | if (wm_check_mng_mode(sc) != 0) |
4434 | wm_get_hw_control(sc); | | 4434 | wm_get_hw_control(sc); |
4435 | break; | | 4435 | break; |
4436 | default: | | 4436 | default: |
4437 | break; | | 4437 | break; |
4438 | } | | 4438 | } |
4439 | | | 4439 | |
4440 | /* Reset the PHY. */ | | 4440 | /* Reset the PHY. */ |
4441 | if (sc->sc_flags & WM_F_HAS_MII) | | 4441 | if (sc->sc_flags & WM_F_HAS_MII) |
4442 | wm_gmii_reset(sc); | | 4442 | wm_gmii_reset(sc); |
4443 | | | 4443 | |
4444 | reg = CSR_READ(sc, WMREG_CTRL_EXT); | | 4444 | reg = CSR_READ(sc, WMREG_CTRL_EXT); |
4445 | /* Enable PHY low-power state when MAC is at D3 w/o WoL */ | | 4445 | /* Enable PHY low-power state when MAC is at D3 w/o WoL */ |
4446 | if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) | | 4446 | if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) |
4447 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN); | | 4447 | CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN); |
4448 | | | 4448 | |
4449 | /* Initialize the transmit descriptor ring. */ | | 4449 | /* Initialize the transmit descriptor ring. */ |
4450 | memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); | | 4450 | memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); |
4451 | WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), | | 4451 | WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), |
4452 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 4452 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
4453 | sc->sc_txfree = WM_NTXDESC(sc); | | 4453 | sc->sc_txfree = WM_NTXDESC(sc); |
4454 | sc->sc_txnext = 0; | | 4454 | sc->sc_txnext = 0; |
4455 | | | 4455 | |
4456 | if (sc->sc_type < WM_T_82543) { | | 4456 | if (sc->sc_type < WM_T_82543) { |
4457 | CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0)); | | 4457 | CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0)); |
4458 | CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0)); | | 4458 | CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0)); |
4459 | CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); | | 4459 | CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); |
4460 | CSR_WRITE(sc, WMREG_OLD_TDH, 0); | | 4460 | CSR_WRITE(sc, WMREG_OLD_TDH, 0); |
4461 | CSR_WRITE(sc, WMREG_OLD_TDT, 0); | | 4461 | CSR_WRITE(sc, WMREG_OLD_TDT, 0); |
4462 | CSR_WRITE(sc, WMREG_OLD_TIDV, 128); | | 4462 | CSR_WRITE(sc, WMREG_OLD_TIDV, 128); |
4463 | } else { | | 4463 | } else { |
4464 | CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0)); | | 4464 | CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0)); |
4465 | CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0)); | | 4465 | CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0)); |
4466 | CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); | | 4466 | CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); |
4467 | CSR_WRITE(sc, WMREG_TDH, 0); | | 4467 | CSR_WRITE(sc, WMREG_TDH, 0); |
4468 | CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */ | | 4468 | CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */ |
4469 | CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */ | | 4469 | CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */ |
4470 | | | 4470 | |
4471 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) | | 4471 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
4472 | /* | | 4472 | /* |
4473 | * Don't write TDT before TCTL.EN is set. | | 4473 | * Don't write TDT before TCTL.EN is set. |
4474 | * See the document. | | 4474 | * See the document. |
4475 | */ | | 4475 | */ |
4476 | CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE | | 4476 | CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE |
4477 | | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) | | 4477 | | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) |
4478 | | TXDCTL_WTHRESH(0)); | | 4478 | | TXDCTL_WTHRESH(0)); |
4479 | else { | | 4479 | else { |
4480 | CSR_WRITE(sc, WMREG_TDT, 0); | | 4480 | CSR_WRITE(sc, WMREG_TDT, 0); |
4481 | CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | | | 4481 | CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | |
4482 | TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); | | 4482 | TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); |
4483 | CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | | | 4483 | CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | |
4484 | RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); | | 4484 | RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); |
4485 | } | | 4485 | } |
4486 | } | | 4486 | } |
4487 | CSR_WRITE(sc, WMREG_TQSA_LO, 0); | | 4487 | CSR_WRITE(sc, WMREG_TQSA_LO, 0); |
4488 | CSR_WRITE(sc, WMREG_TQSA_HI, 0); | | 4488 | CSR_WRITE(sc, WMREG_TQSA_HI, 0); |
4489 | | | 4489 | |
4490 | /* Initialize the transmit job descriptors. */ | | 4490 | /* Initialize the transmit job descriptors. */ |
4491 | for (i = 0; i < WM_TXQUEUELEN(sc); i++) | | 4491 | for (i = 0; i < WM_TXQUEUELEN(sc); i++) |
4492 | sc->sc_txsoft[i].txs_mbuf = NULL; | | 4492 | sc->sc_txsoft[i].txs_mbuf = NULL; |
4493 | sc->sc_txsfree = WM_TXQUEUELEN(sc); | | 4493 | sc->sc_txsfree = WM_TXQUEUELEN(sc); |
4494 | sc->sc_txsnext = 0; | | 4494 | sc->sc_txsnext = 0; |
4495 | sc->sc_txsdirty = 0; | | 4495 | sc->sc_txsdirty = 0; |
4496 | | | 4496 | |
4497 | /* | | 4497 | /* |
4498 | * Initialize the receive descriptor and receive job | | 4498 | * Initialize the receive descriptor and receive job |
4499 | * descriptor rings. | | 4499 | * descriptor rings. |
4500 | */ | | 4500 | */ |
4501 | if (sc->sc_type < WM_T_82543) { | | 4501 | if (sc->sc_type < WM_T_82543) { |
4502 | CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); | | 4502 | CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); |
4503 | CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); | | 4503 | CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); |
4504 | CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); | | 4504 | CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); |
4505 | CSR_WRITE(sc, WMREG_OLD_RDH0, 0); | | 4505 | CSR_WRITE(sc, WMREG_OLD_RDH0, 0); |
4506 | CSR_WRITE(sc, WMREG_OLD_RDT0, 0); | | 4506 | CSR_WRITE(sc, WMREG_OLD_RDT0, 0); |
4507 | CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); | | 4507 | CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); |
4508 | | | 4508 | |
4509 | CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); | | 4509 | CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); |
4510 | CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); | | 4510 | CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); |
4511 | CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); | | 4511 | CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); |
4512 | CSR_WRITE(sc, WMREG_OLD_RDH1, 0); | | 4512 | CSR_WRITE(sc, WMREG_OLD_RDH1, 0); |
4513 | CSR_WRITE(sc, WMREG_OLD_RDT1, 0); | | 4513 | CSR_WRITE(sc, WMREG_OLD_RDT1, 0); |
4514 | CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); | | 4514 | CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); |
4515 | } else { | | 4515 | } else { |
4516 | CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); | | 4516 | CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); |
4517 | CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); | | 4517 | CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); |
4518 | CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); | | 4518 | CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); |
4519 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { | | 4519 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
4520 | CSR_WRITE(sc, WMREG_EITR(0), 450); | | 4520 | CSR_WRITE(sc, WMREG_EITR(0), 450); |
4521 | if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) | | 4521 | if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) |
4522 | panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES); | | 4522 | panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES); |
4523 | CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY | | 4523 | CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY |
4524 | | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); | | 4524 | | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); |
4525 | CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE | | 4525 | CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE |
4526 | | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) | | 4526 | | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) |
4527 | | RXDCTL_WTHRESH(1)); | | 4527 | | RXDCTL_WTHRESH(1)); |
4528 | } else { | | 4528 | } else { |
4529 | CSR_WRITE(sc, WMREG_RDH, 0); | | 4529 | CSR_WRITE(sc, WMREG_RDH, 0); |
4530 | CSR_WRITE(sc, WMREG_RDT, 0); | | 4530 | CSR_WRITE(sc, WMREG_RDT, 0); |
4531 | CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */ | | 4531 | CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */ |
4532 | CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */ | | 4532 | CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */ |
4533 | } | | 4533 | } |
4534 | } | | 4534 | } |
4535 | for (i = 0; i < WM_NRXDESC; i++) { | | 4535 | for (i = 0; i < WM_NRXDESC; i++) { |
4536 | rxs = &sc->sc_rxsoft[i]; | | 4536 | rxs = &sc->sc_rxsoft[i]; |
4537 | if (rxs->rxs_mbuf == NULL) { | | 4537 | if (rxs->rxs_mbuf == NULL) { |
4538 | if ((error = wm_add_rxbuf(sc, i)) != 0) { | | 4538 | if ((error = wm_add_rxbuf(sc, i)) != 0) { |
4539 | log(LOG_ERR, "%s: unable to allocate or map " | | 4539 | log(LOG_ERR, "%s: unable to allocate or map " |
4540 | "rx buffer %d, error = %d\n", | | 4540 | "rx buffer %d, error = %d\n", |
4541 | device_xname(sc->sc_dev), i, error); | | 4541 | device_xname(sc->sc_dev), i, error); |
4542 | /* | | 4542 | /* |
4543 | * XXX Should attempt to run with fewer receive | | 4543 | * XXX Should attempt to run with fewer receive |
4544 | * XXX buffers instead of just failing. | | 4544 | * XXX buffers instead of just failing. |
4545 | */ | | 4545 | */ |
4546 | wm_rxdrain(sc); | | 4546 | wm_rxdrain(sc); |
4547 | goto out; | | 4547 | goto out; |
4548 | } | | 4548 | } |
4549 | } else { | | 4549 | } else { |
4550 | if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) | | 4550 | if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) |
4551 | WM_INIT_RXDESC(sc, i); | | 4551 | WM_INIT_RXDESC(sc, i); |
4552 | /* | | 4552 | /* |
4553 | * For 82575 and newer device, the RX descriptors | | 4553 | * For 82575 and newer device, the RX descriptors |
4554 | * must be initialized after the setting of RCTL.EN in | | 4554 | * must be initialized after the setting of RCTL.EN in |
4555 | * wm_set_filter() | | 4555 | * wm_set_filter() |
4556 | */ | | 4556 | */ |
4557 | } | | 4557 | } |
4558 | } | | 4558 | } |
4559 | sc->sc_rxptr = 0; | | 4559 | sc->sc_rxptr = 0; |
4560 | sc->sc_rxdiscard = 0; | | 4560 | sc->sc_rxdiscard = 0; |
4561 | WM_RXCHAIN_RESET(sc); | | 4561 | WM_RXCHAIN_RESET(sc); |
4562 | | | 4562 | |
4563 | /* | | 4563 | /* |
4564 | * Clear out the VLAN table -- we don't use it (yet). | | 4564 | * Clear out the VLAN table -- we don't use it (yet). |
4565 | */ | | 4565 | */ |
4566 | CSR_WRITE(sc, WMREG_VET, 0); | | 4566 | CSR_WRITE(sc, WMREG_VET, 0); |
4567 | if (sc->sc_type == WM_T_I350) | | 4567 | if (sc->sc_type == WM_T_I350) |
4568 | trynum = 10; /* Due to hw errata */ | | 4568 | trynum = 10; /* Due to hw errata */ |
4569 | else | | 4569 | else |
4570 | trynum = 1; | | 4570 | trynum = 1; |
4571 | for (i = 0; i < WM_VLAN_TABSIZE; i++) | | 4571 | for (i = 0; i < WM_VLAN_TABSIZE; i++) |
4572 | for (j = 0; j < trynum; j++) | | 4572 | for (j = 0; j < trynum; j++) |
4573 | CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); | | 4573 | CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); |
4574 | | | 4574 | |
4575 | /* | | 4575 | /* |
4576 | * Set up flow-control parameters. | | 4576 | * Set up flow-control parameters. |
4577 | * | | 4577 | * |
4578 | * XXX Values could probably stand some tuning. | | 4578 | * XXX Values could probably stand some tuning. |
4579 | */ | | 4579 | */ |
4580 | if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) | | 4580 | if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) |
4581 | && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) | | 4581 | && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) |
4582 | && (sc->sc_type != WM_T_PCH2)) { | | 4582 | && (sc->sc_type != WM_T_PCH2)) { |
4583 | CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); | | 4583 | CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); |
4584 | CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); | | 4584 | CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); |
4585 | CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); | | 4585 | CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); |
4586 | } | | 4586 | } |
4587 | | | 4587 | |
4588 | sc->sc_fcrtl = FCRTL_DFLT; | | 4588 | sc->sc_fcrtl = FCRTL_DFLT; |
4589 | if (sc->sc_type < WM_T_82543) { | | 4589 | if (sc->sc_type < WM_T_82543) { |
4590 | CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); | | 4590 | CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); |
4591 | CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); | | 4591 | CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); |
4592 | } else { | | 4592 | } else { |
4593 | CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); | | 4593 | CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); |
4594 | CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); | | 4594 | CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); |
4595 | } | | 4595 | } |
4596 | | | 4596 | |
4597 | if (sc->sc_type == WM_T_80003) | | 4597 | if (sc->sc_type == WM_T_80003) |
4598 | CSR_WRITE(sc, WMREG_FCTTV, 0xffff); | | 4598 | CSR_WRITE(sc, WMREG_FCTTV, 0xffff); |
4599 | else | | 4599 | else |
4600 | CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); | | 4600 | CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); |
4601 | | | 4601 | |
4602 | /* Writes the control register. */ | | 4602 | /* Writes the control register. */ |
4603 | wm_set_vlan(sc); | | 4603 | wm_set_vlan(sc); |
4604 | | | 4604 | |
4605 | if (sc->sc_flags & WM_F_HAS_MII) { | | 4605 | if (sc->sc_flags & WM_F_HAS_MII) { |
4606 | int val; | | 4606 | int val; |
4607 | | | 4607 | |
4608 | switch (sc->sc_type) { | | 4608 | switch (sc->sc_type) { |
4609 | case WM_T_80003: | | 4609 | case WM_T_80003: |
4610 | case WM_T_ICH8: | | 4610 | case WM_T_ICH8: |
4611 | case WM_T_ICH9: | | 4611 | case WM_T_ICH9: |
4612 | case WM_T_ICH10: | | 4612 | case WM_T_ICH10: |
4613 | case WM_T_PCH: | | 4613 | case WM_T_PCH: |
4614 | case WM_T_PCH2: | | 4614 | case WM_T_PCH2: |
4615 | case WM_T_PCH_LPT: | | 4615 | case WM_T_PCH_LPT: |
4616 | /* | | 4616 | /* |
4617 | * Set the mac to wait the maximum time between each | | 4617 | * Set the mac to wait the maximum time between each |
4618 | * iteration and increase the max iterations when | | 4618 | * iteration and increase the max iterations when |
4619 | * polling the phy; this fixes erroneous timeouts at | | 4619 | * polling the phy; this fixes erroneous timeouts at |
4620 | * 10Mbps. | | 4620 | * 10Mbps. |
4621 | */ | | 4621 | */ |
4622 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, | | 4622 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, |
4623 | 0xFFFF); | | 4623 | 0xFFFF); |
4624 | val = wm_kmrn_readreg(sc, | | 4624 | val = wm_kmrn_readreg(sc, |
4625 | KUMCTRLSTA_OFFSET_INB_PARAM); | | 4625 | KUMCTRLSTA_OFFSET_INB_PARAM); |
4626 | val |= 0x3F; | | 4626 | val |= 0x3F; |
4627 | wm_kmrn_writereg(sc, | | 4627 | wm_kmrn_writereg(sc, |
4628 | KUMCTRLSTA_OFFSET_INB_PARAM, val); | | 4628 | KUMCTRLSTA_OFFSET_INB_PARAM, val); |
4629 | break; | | 4629 | break; |
4630 | default: | | 4630 | default: |
4631 | break; | | 4631 | break; |
4632 | } | | 4632 | } |
4633 | | | 4633 | |
4634 | if (sc->sc_type == WM_T_80003) { | | 4634 | if (sc->sc_type == WM_T_80003) { |
4635 | val = CSR_READ(sc, WMREG_CTRL_EXT); | | 4635 | val = CSR_READ(sc, WMREG_CTRL_EXT); |
4636 | val &= ~CTRL_EXT_LINK_MODE_MASK; | | 4636 | val &= ~CTRL_EXT_LINK_MODE_MASK; |
4637 | CSR_WRITE(sc, WMREG_CTRL_EXT, val); | | 4637 | CSR_WRITE(sc, WMREG_CTRL_EXT, val); |
4638 | | | 4638 | |
4639 | /* Bypass RX and TX FIFO's */ | | 4639 | /* Bypass RX and TX FIFO's */ |
4640 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, | | 4640 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, |
4641 | KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | | 4641 | KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
4642 | | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); | | 4642 | | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); |
4643 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, | | 4643 | wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, |
4644 | KUMCTRLSTA_INB_CTRL_DIS_PADDING | | | 4644 | KUMCTRLSTA_INB_CTRL_DIS_PADDING | |
4645 | KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); | | 4645 | KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); |
4646 | } | | 4646 | } |
4647 | } | | 4647 | } |
4648 | #if 0 | | 4648 | #if 0 |
4649 | CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); | | 4649 | CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); |
4650 | #endif | | 4650 | #endif |
4651 | | | 4651 | |
4652 | /* | | 4652 | /* |
4653 | * Set up checksum offload parameters. | | 4653 | * Set up checksum offload parameters. |
4654 | */ | | 4654 | */ |
4655 | reg = CSR_READ(sc, WMREG_RXCSUM); | | 4655 | reg = CSR_READ(sc, WMREG_RXCSUM); |
4656 | reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); | | 4656 | reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); |
4657 | if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) | | 4657 | if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) |
4658 | reg |= RXCSUM_IPOFL; | | 4658 | reg |= RXCSUM_IPOFL; |
4659 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) | | 4659 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) |
4660 | reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; | | 4660 | reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; |
4661 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) | | 4661 | if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) |
4662 | reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; | | 4662 | reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; |
4663 | CSR_WRITE(sc, WMREG_RXCSUM, reg); | | 4663 | CSR_WRITE(sc, WMREG_RXCSUM, reg); |
4664 | | | 4664 | |
4665 | /* Reset TBI's RXCFG count */ | | 4665 | /* Reset TBI's RXCFG count */ |
4666 | sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0; | | 4666 | sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0; |
4667 | | | 4667 | |
4668 | /* | | 4668 | /* |
4669 | * Set up the interrupt registers. | | 4669 | * Set up the interrupt registers. |
4670 | */ | | 4670 | */ |
4671 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); | | 4671 | CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); |
4672 | sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | | | 4672 | sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | |
4673 | ICR_RXO | ICR_RXT0; | | 4673 | ICR_RXO | ICR_RXT0; |
4674 | if ((sc->sc_flags & WM_F_HAS_MII) == 0) | | 4674 | if ((sc->sc_flags & WM_F_HAS_MII) == 0) |
4675 | sc->sc_icr |= ICR_RXCFG; | | 4675 | sc->sc_icr |= ICR_RXCFG; |
4676 | CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); | | 4676 | CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); |
4677 | | | 4677 | |
4678 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) | | 4678 | if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) |
4679 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) | | 4679 | || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) |
4680 | || (sc->sc_type == WM_T_PCH2)) { | | 4680 | || (sc->sc_type == WM_T_PCH2)) { |
4681 | reg = CSR_READ(sc, WMREG_KABGTXD); | | 4681 | reg = CSR_READ(sc, WMREG_KABGTXD); |
4682 | reg |= KABGTXD_BGSQLBIAS; | | 4682 | reg |= KABGTXD_BGSQLBIAS; |
4683 | CSR_WRITE(sc, WMREG_KABGTXD, reg); | | 4683 | CSR_WRITE(sc, WMREG_KABGTXD, reg); |
4684 | } | | 4684 | } |
4685 | | | 4685 | |
4686 | /* Set up the inter-packet gap. */ | | 4686 | /* Set up the inter-packet gap. */ |
4687 | CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); | | 4687 | CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); |
4688 | | | 4688 | |
4689 | if (sc->sc_type >= WM_T_82543) { | | 4689 | if (sc->sc_type >= WM_T_82543) { |
4690 | /* | | 4690 | /* |
4691 | * Set up the interrupt throttling register (units of 256ns) | | 4691 | * Set up the interrupt throttling register (units of 256ns) |
4692 | * Note that a footnote in Intel's documentation says this | | 4692 | * Note that a footnote in Intel's documentation says this |
4693 | * ticker runs at 1/4 the rate when the chip is in 100Mbit | | 4693 | * ticker runs at 1/4 the rate when the chip is in 100Mbit |
4694 | * or 10Mbit mode. Empirically, it appears to be the case | | 4694 | * or 10Mbit mode. Empirically, it appears to be the case |
4695 | * that that is also true for the 1024ns units of the other | | 4695 | * that that is also true for the 1024ns units of the other |
4696 | * interrupt-related timer registers -- so, really, we ought | | 4696 | * interrupt-related timer registers -- so, really, we ought |
4697 | * to divide this value by 4 when the link speed is low. | | 4697 | * to divide this value by 4 when the link speed is low. |
4698 | * | | 4698 | * |
4699 | * XXX implement this division at link speed change! | | 4699 | * XXX implement this division at link speed change! |
4700 | */ | | 4700 | */ |
4701 | | | 4701 | |
4702 | /* | | 4702 | /* |
4703 | * For N interrupts/sec, set this value to: | | 4703 | * For N interrupts/sec, set this value to: |
4704 | * 1000000000 / (N * 256). Note that we set the | | 4704 | * 1000000000 / (N * 256). Note that we set the |
4705 | * absolute and packet timer values to this value | | 4705 | * absolute and packet timer values to this value |
4706 | * divided by 4 to get "simple timer" behavior. | | 4706 | * divided by 4 to get "simple timer" behavior. |
4707 | */ | | 4707 | */ |
4708 | | | 4708 | |
4709 | sc->sc_itr = 1500; /* 2604 ints/sec */ | | 4709 | sc->sc_itr = 1500; /* 2604 ints/sec */ |
4710 | CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); | | 4710 | CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); |
4711 | } | | 4711 | } |
4712 | | | 4712 | |
4713 | /* Set the VLAN ethernetype. */ | | 4713 | /* Set the VLAN ethernetype. */ |
4714 | CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); | | 4714 | CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); |
4715 | | | 4715 | |
4716 | /* | | 4716 | /* |
4717 | * Set up the transmit control register; we start out with | | 4717 | * Set up the transmit control register; we start out with |
4718 | * a collision distance suitable for FDX, but update it whe | | 4718 | * a collision distance suitable for FDX, but update it whe |
4719 | * we resolve the media type. | | 4719 | * we resolve the media type. |
4720 | */ | | 4720 | */ |
4721 | sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC | | 4721 | sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC |
4722 | | TCTL_CT(TX_COLLISION_THRESHOLD) | | 4722 | | TCTL_CT(TX_COLLISION_THRESHOLD) |
4723 | | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); | | 4723 | | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); |
4724 | if (sc->sc_type >= WM_T_82571) | | 4724 | if (sc->sc_type >= WM_T_82571) |
4725 | sc->sc_tctl |= TCTL_MULR; | | 4725 | sc->sc_tctl |= TCTL_MULR; |
4726 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); | | 4726 | CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); |
4727 | | | 4727 | |
4728 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { | | 4728 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
4729 | /* | | 4729 | /* |
4730 | * Write TDT after TCTL.EN is set. | | 4730 | * Write TDT after TCTL.EN is set. |
4731 | * See the document. | | 4731 | * See the document. |
4732 | */ | | 4732 | */ |
4733 | CSR_WRITE(sc, WMREG_TDT, 0); | | 4733 | CSR_WRITE(sc, WMREG_TDT, 0); |
4734 | } | | 4734 | } |
4735 | | | 4735 | |
4736 | if (sc->sc_type == WM_T_80003) { | | 4736 | if (sc->sc_type == WM_T_80003) { |
4737 | reg = CSR_READ(sc, WMREG_TCTL_EXT); | | 4737 | reg = CSR_READ(sc, WMREG_TCTL_EXT); |
4738 | reg &= ~TCTL_EXT_GCEX_MASK; | | 4738 | reg &= ~TCTL_EXT_GCEX_MASK; |
4739 | reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; | | 4739 | reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; |
4740 | CSR_WRITE(sc, WMREG_TCTL_EXT, reg); | | 4740 | CSR_WRITE(sc, WMREG_TCTL_EXT, reg); |
4741 | } | | 4741 | } |
4742 | | | 4742 | |
4743 | /* Set the media. */ | | 4743 | /* Set the media. */ |
4744 | if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) | | 4744 | if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) |
4745 | goto out; | | 4745 | goto out; |
4746 | | | 4746 | |
4747 | /* Configure for OS presence */ | | 4747 | /* Configure for OS presence */ |
4748 | wm_init_manageability(sc); | | 4748 | wm_init_manageability(sc); |
4749 | | | 4749 | |
4750 | /* | | 4750 | /* |
4751 | * Set up the receive control register; we actually program | | 4751 | * Set up the receive control register; we actually program |
4752 | * the register when we set the receive filter. Use multicast | | 4752 | * the register when we set the receive filter. Use multicast |
4753 | * address offset type 0. | | 4753 | * address offset type 0. |
4754 | * | | 4754 | * |
4755 | * Only the i82544 has the ability to strip the incoming | | 4755 | * Only the i82544 has the ability to strip the incoming |
4756 | * CRC, so we don't enable that feature. | | 4756 | * CRC, so we don't enable that feature. |
4757 | */ | | 4757 | */ |
4758 | sc->sc_mchash_type = 0; | | 4758 | sc->sc_mchash_type = 0; |
4759 | sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF | | 4759 | sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF |
4760 | | RCTL_MO(sc->sc_mchash_type); | | 4760 | | RCTL_MO(sc->sc_mchash_type); |
4761 | | | 4761 | |
4762 | /* | | 4762 | /* |
4763 | * The I350 has a bug where it always strips the CRC whether | | 4763 | * The I350 has a bug where it always strips the CRC whether |
4764 | * asked to or not. So ask for stripped CRC here and cope in rxeof | | 4764 | * asked to or not. So ask for stripped CRC here and cope in rxeof |
4765 | */ | | 4765 | */ |
4766 | if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)) | | 4766 | if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)) |
4767 | sc->sc_rctl |= RCTL_SECRC; | | 4767 | sc->sc_rctl |= RCTL_SECRC; |
4768 | | | 4768 | |
4769 | if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) | | 4769 | if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) |
4770 | && (ifp->if_mtu > ETHERMTU)) { | | 4770 | && (ifp->if_mtu > ETHERMTU)) { |
4771 | sc->sc_rctl |= RCTL_LPE; | | 4771 | sc->sc_rctl |= RCTL_LPE; |
4772 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) | | 4772 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
4773 | CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); | | 4773 | CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); |
4774 | } | | 4774 | } |
4775 | | | 4775 | |
4776 | if (MCLBYTES == 2048) { | | 4776 | if (MCLBYTES == 2048) { |
4777 | sc->sc_rctl |= RCTL_2k; | | 4777 | sc->sc_rctl |= RCTL_2k; |
4778 | } else { | | 4778 | } else { |
4779 | if (sc->sc_type >= WM_T_82543) { | | 4779 | if (sc->sc_type >= WM_T_82543) { |
4780 | switch (MCLBYTES) { | | 4780 | switch (MCLBYTES) { |
4781 | case 4096: | | 4781 | case 4096: |
4782 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; | | 4782 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; |
4783 | break; | | 4783 | break; |
4784 | case 8192: | | 4784 | case 8192: |
4785 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; | | 4785 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; |
4786 | break; | | 4786 | break; |
4787 | case 16384: | | 4787 | case 16384: |
4788 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; | | 4788 | sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; |
4789 | break; | | 4789 | break; |
4790 | default: | | 4790 | default: |
4791 | panic("wm_init: MCLBYTES %d unsupported", | | 4791 | panic("wm_init: MCLBYTES %d unsupported", |
4792 | MCLBYTES); | | 4792 | MCLBYTES); |
4793 | break; | | 4793 | break; |
4794 | } | | 4794 | } |
4795 | } else panic("wm_init: i82542 requires MCLBYTES = 2048"); | | 4795 | } else panic("wm_init: i82542 requires MCLBYTES = 2048"); |
4796 | } | | 4796 | } |
4797 | | | 4797 | |
4798 | /* Set the receive filter. */ | | 4798 | /* Set the receive filter. */ |
4799 | wm_set_filter(sc); | | 4799 | wm_set_filter(sc); |
4800 | | | 4800 | |
4801 | /* On 575 and later set RDT only if RX enabled */ | | 4801 | /* On 575 and later set RDT only if RX enabled */ |
4802 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) | | 4802 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) |
4803 | for (i = 0; i < WM_NRXDESC; i++) | | 4803 | for (i = 0; i < WM_NRXDESC; i++) |
4804 | WM_INIT_RXDESC(sc, i); | | 4804 | WM_INIT_RXDESC(sc, i); |
4805 | | | 4805 | |
4806 | /* Start the one second link check clock. */ | | 4806 | /* Start the one second link check clock. */ |
4807 | callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); | | 4807 | callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); |
4808 | | | 4808 | |
4809 | /* ...all done! */ | | 4809 | /* ...all done! */ |
4810 | ifp->if_flags |= IFF_RUNNING; | | 4810 | ifp->if_flags |= IFF_RUNNING; |
4811 | ifp->if_flags &= ~IFF_OACTIVE; | | 4811 | ifp->if_flags &= ~IFF_OACTIVE; |
4812 | | | 4812 | |
4813 | out: | | 4813 | out: |
4814 | sc->sc_if_flags = ifp->if_flags; | | 4814 | sc->sc_if_flags = ifp->if_flags; |
4815 | if (error) | | 4815 | if (error) |
4816 | log(LOG_ERR, "%s: interface not running\n", | | 4816 | log(LOG_ERR, "%s: interface not running\n", |
4817 | device_xname(sc->sc_dev)); | | 4817 | device_xname(sc->sc_dev)); |
4818 | return error; | | 4818 | return error; |
4819 | } | | 4819 | } |
4820 | | | 4820 | |
4821 | /* | | 4821 | /* |
4822 | * wm_rxdrain: | | 4822 | * wm_rxdrain: |
4823 | * | | 4823 | * |
4824 | * Drain the receive queue. | | 4824 | * Drain the receive queue. |