| @@ -1,2332 +1,2332 @@ | | | @@ -1,2332 +1,2332 @@ |
1 | /* $NetBSD: i82557.c,v 1.128 2009/03/15 14:48:11 tsutsui Exp $ */ | | 1 | /* $NetBSD: i82557.c,v 1.129 2009/03/16 12:13:04 tsutsui Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1997, 1998, 1999, 2001, 2002 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1997, 1998, 1999, 2001, 2002 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center. | | 9 | * NASA Ames Research Center. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. | | 15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright | | 16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the | | 17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. | | 18 | * documentation and/or other materials provided with the distribution. |
19 | * | | 19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. | | 30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | /* | | 33 | /* |
34 | * Copyright (c) 1995, David Greenman | | 34 | * Copyright (c) 1995, David Greenman |
35 | * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org> | | 35 | * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org> |
36 | * All rights reserved. | | 36 | * All rights reserved. |
37 | * | | 37 | * |
38 | * Redistribution and use in source and binary forms, with or without | | 38 | * Redistribution and use in source and binary forms, with or without |
39 | * modification, are permitted provided that the following conditions | | 39 | * modification, are permitted provided that the following conditions |
40 | * are met: | | 40 | * are met: |
41 | * 1. Redistributions of source code must retain the above copyright | | 41 | * 1. Redistributions of source code must retain the above copyright |
42 | * notice unmodified, this list of conditions, and the following | | 42 | * notice unmodified, this list of conditions, and the following |
43 | * disclaimer. | | 43 | * disclaimer. |
44 | * 2. Redistributions in binary form must reproduce the above copyright | | 44 | * 2. Redistributions in binary form must reproduce the above copyright |
45 | * notice, this list of conditions and the following disclaimer in the | | 45 | * notice, this list of conditions and the following disclaimer in the |
46 | * documentation and/or other materials provided with the distribution. | | 46 | * documentation and/or other materials provided with the distribution. |
47 | * | | 47 | * |
48 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | | 48 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | | 51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
58 | * SUCH DAMAGE. | | 58 | * SUCH DAMAGE. |
59 | * | | 59 | * |
60 | * Id: if_fxp.c,v 1.113 2001/05/17 23:50:24 jlemon | | 60 | * Id: if_fxp.c,v 1.113 2001/05/17 23:50:24 jlemon |
61 | */ | | 61 | */ |
62 | | | 62 | |
63 | /* | | 63 | /* |
64 | * Device driver for the Intel i82557 fast Ethernet controller, | | 64 | * Device driver for the Intel i82557 fast Ethernet controller, |
65 | * and its successors, the i82558 and i82559. | | 65 | * and its successors, the i82558 and i82559. |
66 | */ | | 66 | */ |
67 | | | 67 | |
68 | #include <sys/cdefs.h> | | 68 | #include <sys/cdefs.h> |
69 | __KERNEL_RCSID(0, "$NetBSD: i82557.c,v 1.128 2009/03/15 14:48:11 tsutsui Exp $"); | | 69 | __KERNEL_RCSID(0, "$NetBSD: i82557.c,v 1.129 2009/03/16 12:13:04 tsutsui Exp $"); |
70 | | | 70 | |
71 | #include "bpfilter.h" | | 71 | #include "bpfilter.h" |
72 | #include "rnd.h" | | 72 | #include "rnd.h" |
73 | | | 73 | |
74 | #include <sys/param.h> | | 74 | #include <sys/param.h> |
75 | #include <sys/systm.h> | | 75 | #include <sys/systm.h> |
76 | #include <sys/callout.h> | | 76 | #include <sys/callout.h> |
77 | #include <sys/mbuf.h> | | 77 | #include <sys/mbuf.h> |
78 | #include <sys/malloc.h> | | 78 | #include <sys/malloc.h> |
79 | #include <sys/kernel.h> | | 79 | #include <sys/kernel.h> |
80 | #include <sys/socket.h> | | 80 | #include <sys/socket.h> |
81 | #include <sys/ioctl.h> | | 81 | #include <sys/ioctl.h> |
82 | #include <sys/errno.h> | | 82 | #include <sys/errno.h> |
83 | #include <sys/device.h> | | 83 | #include <sys/device.h> |
84 | #include <sys/syslog.h> | | 84 | #include <sys/syslog.h> |
85 | | | 85 | |
86 | #include <machine/endian.h> | | 86 | #include <machine/endian.h> |
87 | | | 87 | |
88 | #include <uvm/uvm_extern.h> | | 88 | #include <uvm/uvm_extern.h> |
89 | | | 89 | |
90 | #if NRND > 0 | | 90 | #if NRND > 0 |
91 | #include <sys/rnd.h> | | 91 | #include <sys/rnd.h> |
92 | #endif | | 92 | #endif |
93 | | | 93 | |
94 | #include <net/if.h> | | 94 | #include <net/if.h> |
95 | #include <net/if_dl.h> | | 95 | #include <net/if_dl.h> |
96 | #include <net/if_media.h> | | 96 | #include <net/if_media.h> |
97 | #include <net/if_ether.h> | | 97 | #include <net/if_ether.h> |
98 | | | 98 | |
99 | #include <netinet/in.h> | | 99 | #include <netinet/in.h> |
100 | #include <netinet/in_systm.h> | | 100 | #include <netinet/in_systm.h> |
101 | #include <netinet/ip.h> | | 101 | #include <netinet/ip.h> |
102 | #include <netinet/tcp.h> | | 102 | #include <netinet/tcp.h> |
103 | #include <netinet/udp.h> | | 103 | #include <netinet/udp.h> |
104 | | | 104 | |
105 | #if NBPFILTER > 0 | | 105 | #if NBPFILTER > 0 |
106 | #include <net/bpf.h> | | 106 | #include <net/bpf.h> |
107 | #endif | | 107 | #endif |
108 | | | 108 | |
109 | #include <sys/bus.h> | | 109 | #include <sys/bus.h> |
110 | #include <sys/intr.h> | | 110 | #include <sys/intr.h> |
111 | | | 111 | |
112 | #include <dev/mii/miivar.h> | | 112 | #include <dev/mii/miivar.h> |
113 | | | 113 | |
114 | #include <dev/ic/i82557reg.h> | | 114 | #include <dev/ic/i82557reg.h> |
115 | #include <dev/ic/i82557var.h> | | 115 | #include <dev/ic/i82557var.h> |
116 | | | 116 | |
117 | #include <dev/microcode/i8255x/rcvbundl.h> | | 117 | #include <dev/microcode/i8255x/rcvbundl.h> |
118 | | | 118 | |
119 | /* | | 119 | /* |
120 | * NOTE! On the Alpha, we have an alignment constraint. The | | 120 | * NOTE! On the Alpha, we have an alignment constraint. The |
121 | * card DMAs the packet immediately following the RFA. However, | | 121 | * card DMAs the packet immediately following the RFA. However, |
122 | * the first thing in the packet is a 14-byte Ethernet header. | | 122 | * the first thing in the packet is a 14-byte Ethernet header. |
123 | * This means that the packet is misaligned. To compensate, | | 123 | * This means that the packet is misaligned. To compensate, |
124 | * we actually offset the RFA 2 bytes into the cluster. This | | 124 | * we actually offset the RFA 2 bytes into the cluster. This |
125 | * alignes the packet after the Ethernet header at a 32-bit | | 125 | * alignes the packet after the Ethernet header at a 32-bit |
126 | * boundary. HOWEVER! This means that the RFA is misaligned! | | 126 | * boundary. HOWEVER! This means that the RFA is misaligned! |
127 | */ | | 127 | */ |
128 | #define RFA_ALIGNMENT_FUDGE 2 | | 128 | #define RFA_ALIGNMENT_FUDGE 2 |
129 | | | 129 | |
130 | /* | | 130 | /* |
131 | * The configuration byte map has several undefined fields which | | 131 | * The configuration byte map has several undefined fields which |
132 | * must be one or must be zero. Set up a template for these bits | | 132 | * must be one or must be zero. Set up a template for these bits |
133 | * only (assuming an i82557 chip), leaving the actual configuration | | 133 | * only (assuming an i82557 chip), leaving the actual configuration |
134 | * for fxp_init(). | | 134 | * for fxp_init(). |
135 | * | | 135 | * |
136 | * See the definition of struct fxp_cb_config for the bit definitions. | | 136 | * See the definition of struct fxp_cb_config for the bit definitions. |
137 | */ | | 137 | */ |
138 | const uint8_t fxp_cb_config_template[] = { | | 138 | const uint8_t fxp_cb_config_template[] = { |
139 | 0x0, 0x0, /* cb_status */ | | 139 | 0x0, 0x0, /* cb_status */ |
140 | 0x0, 0x0, /* cb_command */ | | 140 | 0x0, 0x0, /* cb_command */ |
141 | 0x0, 0x0, 0x0, 0x0, /* link_addr */ | | 141 | 0x0, 0x0, 0x0, 0x0, /* link_addr */ |
142 | 0x0, /* 0 */ | | 142 | 0x0, /* 0 */ |
143 | 0x0, /* 1 */ | | 143 | 0x0, /* 1 */ |
144 | 0x0, /* 2 */ | | 144 | 0x0, /* 2 */ |
145 | 0x0, /* 3 */ | | 145 | 0x0, /* 3 */ |
146 | 0x0, /* 4 */ | | 146 | 0x0, /* 4 */ |
147 | 0x0, /* 5 */ | | 147 | 0x0, /* 5 */ |
148 | 0x32, /* 6 */ | | 148 | 0x32, /* 6 */ |
149 | 0x0, /* 7 */ | | 149 | 0x0, /* 7 */ |
150 | 0x0, /* 8 */ | | 150 | 0x0, /* 8 */ |
151 | 0x0, /* 9 */ | | 151 | 0x0, /* 9 */ |
152 | 0x6, /* 10 */ | | 152 | 0x6, /* 10 */ |
153 | 0x0, /* 11 */ | | 153 | 0x0, /* 11 */ |
154 | 0x0, /* 12 */ | | 154 | 0x0, /* 12 */ |
155 | 0x0, /* 13 */ | | 155 | 0x0, /* 13 */ |
156 | 0xf2, /* 14 */ | | 156 | 0xf2, /* 14 */ |
157 | 0x48, /* 15 */ | | 157 | 0x48, /* 15 */ |
158 | 0x0, /* 16 */ | | 158 | 0x0, /* 16 */ |
159 | 0x40, /* 17 */ | | 159 | 0x40, /* 17 */ |
160 | 0xf0, /* 18 */ | | 160 | 0xf0, /* 18 */ |
161 | 0x0, /* 19 */ | | 161 | 0x0, /* 19 */ |
162 | 0x3f, /* 20 */ | | 162 | 0x3f, /* 20 */ |
163 | 0x5, /* 21 */ | | 163 | 0x5, /* 21 */ |
164 | 0x0, /* 22 */ | | 164 | 0x0, /* 22 */ |
165 | 0x0, /* 23 */ | | 165 | 0x0, /* 23 */ |
166 | 0x0, /* 24 */ | | 166 | 0x0, /* 24 */ |
167 | 0x0, /* 25 */ | | 167 | 0x0, /* 25 */ |
168 | 0x0, /* 26 */ | | 168 | 0x0, /* 26 */ |
169 | 0x0, /* 27 */ | | 169 | 0x0, /* 27 */ |
170 | 0x0, /* 28 */ | | 170 | 0x0, /* 28 */ |
171 | 0x0, /* 29 */ | | 171 | 0x0, /* 29 */ |
172 | 0x0, /* 30 */ | | 172 | 0x0, /* 30 */ |
173 | 0x0, /* 31 */ | | 173 | 0x0, /* 31 */ |
174 | }; | | 174 | }; |
175 | | | 175 | |
176 | void fxp_mii_initmedia(struct fxp_softc *); | | 176 | void fxp_mii_initmedia(struct fxp_softc *); |
177 | void fxp_mii_mediastatus(struct ifnet *, struct ifmediareq *); | | 177 | void fxp_mii_mediastatus(struct ifnet *, struct ifmediareq *); |
178 | | | 178 | |
179 | void fxp_80c24_initmedia(struct fxp_softc *); | | 179 | void fxp_80c24_initmedia(struct fxp_softc *); |
180 | int fxp_80c24_mediachange(struct ifnet *); | | 180 | int fxp_80c24_mediachange(struct ifnet *); |
181 | void fxp_80c24_mediastatus(struct ifnet *, struct ifmediareq *); | | 181 | void fxp_80c24_mediastatus(struct ifnet *, struct ifmediareq *); |
182 | | | 182 | |
183 | void fxp_start(struct ifnet *); | | 183 | void fxp_start(struct ifnet *); |
184 | int fxp_ioctl(struct ifnet *, u_long, void *); | | 184 | int fxp_ioctl(struct ifnet *, u_long, void *); |
185 | void fxp_watchdog(struct ifnet *); | | 185 | void fxp_watchdog(struct ifnet *); |
186 | int fxp_init(struct ifnet *); | | 186 | int fxp_init(struct ifnet *); |
187 | void fxp_stop(struct ifnet *, int); | | 187 | void fxp_stop(struct ifnet *, int); |
188 | | | 188 | |
189 | void fxp_txintr(struct fxp_softc *); | | 189 | void fxp_txintr(struct fxp_softc *); |
190 | int fxp_rxintr(struct fxp_softc *); | | 190 | int fxp_rxintr(struct fxp_softc *); |
191 | | | 191 | |
192 | void fxp_rx_hwcksum(struct fxp_softc *,struct mbuf *, | | 192 | void fxp_rx_hwcksum(struct fxp_softc *,struct mbuf *, |
193 | const struct fxp_rfa *, u_int); | | 193 | const struct fxp_rfa *, u_int); |
194 | | | 194 | |
195 | void fxp_rxdrain(struct fxp_softc *); | | 195 | void fxp_rxdrain(struct fxp_softc *); |
196 | int fxp_add_rfabuf(struct fxp_softc *, bus_dmamap_t, int); | | 196 | int fxp_add_rfabuf(struct fxp_softc *, bus_dmamap_t, int); |
197 | int fxp_mdi_read(device_t, int, int); | | 197 | int fxp_mdi_read(device_t, int, int); |
198 | void fxp_statchg(device_t); | | 198 | void fxp_statchg(device_t); |
199 | void fxp_mdi_write(device_t, int, int, int); | | 199 | void fxp_mdi_write(device_t, int, int, int); |
200 | void fxp_autosize_eeprom(struct fxp_softc*); | | 200 | void fxp_autosize_eeprom(struct fxp_softc*); |
201 | void fxp_read_eeprom(struct fxp_softc *, uint16_t *, int, int); | | 201 | void fxp_read_eeprom(struct fxp_softc *, uint16_t *, int, int); |
202 | void fxp_write_eeprom(struct fxp_softc *, uint16_t *, int, int); | | 202 | void fxp_write_eeprom(struct fxp_softc *, uint16_t *, int, int); |
203 | void fxp_eeprom_update_cksum(struct fxp_softc *); | | 203 | void fxp_eeprom_update_cksum(struct fxp_softc *); |
204 | void fxp_get_info(struct fxp_softc *, uint8_t *); | | 204 | void fxp_get_info(struct fxp_softc *, uint8_t *); |
205 | void fxp_tick(void *); | | 205 | void fxp_tick(void *); |
206 | void fxp_mc_setup(struct fxp_softc *); | | 206 | void fxp_mc_setup(struct fxp_softc *); |
207 | void fxp_load_ucode(struct fxp_softc *); | | 207 | void fxp_load_ucode(struct fxp_softc *); |
208 | | | 208 | |
209 | int fxp_copy_small = 0; | | 209 | int fxp_copy_small = 0; |
210 | | | 210 | |
211 | /* | | 211 | /* |
212 | * Variables for interrupt mitigating microcode. | | 212 | * Variables for interrupt mitigating microcode. |
213 | */ | | 213 | */ |
214 | int fxp_int_delay = 1000; /* usec */ | | 214 | int fxp_int_delay = 1000; /* usec */ |
215 | int fxp_bundle_max = 6; /* packets */ | | 215 | int fxp_bundle_max = 6; /* packets */ |
216 | | | 216 | |
217 | struct fxp_phytype { | | 217 | struct fxp_phytype { |
218 | int fp_phy; /* type of PHY, -1 for MII at the end. */ | | 218 | int fp_phy; /* type of PHY, -1 for MII at the end. */ |
219 | void (*fp_init)(struct fxp_softc *); | | 219 | void (*fp_init)(struct fxp_softc *); |
220 | } fxp_phytype_table[] = { | | 220 | } fxp_phytype_table[] = { |
221 | { FXP_PHY_80C24, fxp_80c24_initmedia }, | | 221 | { FXP_PHY_80C24, fxp_80c24_initmedia }, |
222 | { -1, fxp_mii_initmedia }, | | 222 | { -1, fxp_mii_initmedia }, |
223 | }; | | 223 | }; |
224 | | | 224 | |
225 | /* | | 225 | /* |
226 | * Set initial transmit threshold at 64 (512 bytes). This is | | 226 | * Set initial transmit threshold at 64 (512 bytes). This is |
227 | * increased by 64 (512 bytes) at a time, to maximum of 192 | | 227 | * increased by 64 (512 bytes) at a time, to maximum of 192 |
228 | * (1536 bytes), if an underrun occurs. | | 228 | * (1536 bytes), if an underrun occurs. |
229 | */ | | 229 | */ |
230 | static int tx_threshold = 64; | | 230 | static int tx_threshold = 64; |
231 | | | 231 | |
232 | /* | | 232 | /* |
233 | * Wait for the previous command to be accepted (but not necessarily | | 233 | * Wait for the previous command to be accepted (but not necessarily |
234 | * completed). | | 234 | * completed). |
235 | */ | | 235 | */ |
236 | static inline void | | 236 | static inline void |
237 | fxp_scb_wait(struct fxp_softc *sc) | | 237 | fxp_scb_wait(struct fxp_softc *sc) |
238 | { | | 238 | { |
239 | int i = 10000; | | 239 | int i = 10000; |
240 | | | 240 | |
241 | while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) | | 241 | while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) |
242 | delay(2); | | 242 | delay(2); |
243 | if (i == 0) | | 243 | if (i == 0) |
244 | log(LOG_WARNING, | | 244 | log(LOG_WARNING, |
245 | "%s: WARNING: SCB timed out!\n", device_xname(sc->sc_dev)); | | 245 | "%s: WARNING: SCB timed out!\n", device_xname(sc->sc_dev)); |
246 | } | | 246 | } |
247 | | | 247 | |
248 | /* | | 248 | /* |
249 | * Submit a command to the i82557. | | 249 | * Submit a command to the i82557. |
250 | */ | | 250 | */ |
251 | static inline void | | 251 | static inline void |
252 | fxp_scb_cmd(struct fxp_softc *sc, uint8_t cmd) | | 252 | fxp_scb_cmd(struct fxp_softc *sc, uint8_t cmd) |
253 | { | | 253 | { |
254 | | | 254 | |
255 | CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd); | | 255 | CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd); |
256 | } | | 256 | } |
257 | | | 257 | |
258 | /* | | 258 | /* |
259 | * Finish attaching an i82557 interface. Called by bus-specific front-end. | | 259 | * Finish attaching an i82557 interface. Called by bus-specific front-end. |
260 | */ | | 260 | */ |
261 | void | | 261 | void |
262 | fxp_attach(struct fxp_softc *sc) | | 262 | fxp_attach(struct fxp_softc *sc) |
263 | { | | 263 | { |
264 | uint8_t enaddr[ETHER_ADDR_LEN]; | | 264 | uint8_t enaddr[ETHER_ADDR_LEN]; |
265 | struct ifnet *ifp; | | 265 | struct ifnet *ifp; |
266 | bus_dma_segment_t seg; | | 266 | bus_dma_segment_t seg; |
267 | int rseg, i, error; | | 267 | int rseg, i, error; |
268 | struct fxp_phytype *fp; | | 268 | struct fxp_phytype *fp; |
269 | | | 269 | |
270 | callout_init(&sc->sc_callout, 0); | | 270 | callout_init(&sc->sc_callout, 0); |
271 | | | 271 | |
272 | /* | | 272 | /* |
273 | * Enable use of extended RFDs and IPCBs for 82550 and later chips. | | 273 | * Enable use of extended RFDs and IPCBs for 82550 and later chips. |
274 | * Note: to use IPCB we need extended TXCB support too, and | | 274 | * Note: to use IPCB we need extended TXCB support too, and |
275 | * these feature flags should be set in each bus attachment. | | 275 | * these feature flags should be set in each bus attachment. |
276 | */ | | 276 | */ |
277 | if (sc->sc_flags & FXPF_EXT_RFA) { | | 277 | if (sc->sc_flags & FXPF_EXT_RFA) { |
278 | sc->sc_txcmd = htole16(FXP_CB_COMMAND_IPCBXMIT); | | 278 | sc->sc_txcmd = htole16(FXP_CB_COMMAND_IPCBXMIT); |
279 | sc->sc_rfa_size = RFA_EXT_SIZE; | | 279 | sc->sc_rfa_size = RFA_EXT_SIZE; |
280 | } else { | | 280 | } else { |
281 | sc->sc_txcmd = htole16(FXP_CB_COMMAND_XMIT); | | 281 | sc->sc_txcmd = htole16(FXP_CB_COMMAND_XMIT); |
282 | sc->sc_rfa_size = RFA_SIZE; | | 282 | sc->sc_rfa_size = RFA_SIZE; |
283 | } | | 283 | } |
284 | | | 284 | |
285 | /* | | 285 | /* |
286 | * Allocate the control data structures, and create and load the | | 286 | * Allocate the control data structures, and create and load the |
287 | * DMA map for it. | | 287 | * DMA map for it. |
288 | */ | | 288 | */ |
289 | if ((error = bus_dmamem_alloc(sc->sc_dmat, | | 289 | if ((error = bus_dmamem_alloc(sc->sc_dmat, |
290 | sizeof(struct fxp_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, | | 290 | sizeof(struct fxp_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, |
291 | 0)) != 0) { | | 291 | 0)) != 0) { |
292 | aprint_error_dev(sc->sc_dev, | | 292 | aprint_error_dev(sc->sc_dev, |
293 | "unable to allocate control data, error = %d\n", | | 293 | "unable to allocate control data, error = %d\n", |
294 | error); | | 294 | error); |
295 | goto fail_0; | | 295 | goto fail_0; |
296 | } | | 296 | } |
297 | | | 297 | |
298 | if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, | | 298 | if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, |
299 | sizeof(struct fxp_control_data), (void **)&sc->sc_control_data, | | 299 | sizeof(struct fxp_control_data), (void **)&sc->sc_control_data, |
300 | BUS_DMA_COHERENT)) != 0) { | | 300 | BUS_DMA_COHERENT)) != 0) { |
301 | aprint_error_dev(sc->sc_dev, | | 301 | aprint_error_dev(sc->sc_dev, |
302 | "unable to map control data, error = %d\n", error); | | 302 | "unable to map control data, error = %d\n", error); |
303 | goto fail_1; | | 303 | goto fail_1; |
304 | } | | 304 | } |
305 | sc->sc_cdseg = seg; | | 305 | sc->sc_cdseg = seg; |
306 | sc->sc_cdnseg = rseg; | | 306 | sc->sc_cdnseg = rseg; |
307 | | | 307 | |
308 | memset(sc->sc_control_data, 0, sizeof(struct fxp_control_data)); | | 308 | memset(sc->sc_control_data, 0, sizeof(struct fxp_control_data)); |
309 | | | 309 | |
310 | if ((error = bus_dmamap_create(sc->sc_dmat, | | 310 | if ((error = bus_dmamap_create(sc->sc_dmat, |
311 | sizeof(struct fxp_control_data), 1, | | 311 | sizeof(struct fxp_control_data), 1, |
312 | sizeof(struct fxp_control_data), 0, 0, &sc->sc_dmamap)) != 0) { | | 312 | sizeof(struct fxp_control_data), 0, 0, &sc->sc_dmamap)) != 0) { |
313 | aprint_error_dev(sc->sc_dev, | | 313 | aprint_error_dev(sc->sc_dev, |
314 | "unable to create control data DMA map, error = %d\n", | | 314 | "unable to create control data DMA map, error = %d\n", |
315 | error); | | 315 | error); |
316 | goto fail_2; | | 316 | goto fail_2; |
317 | } | | 317 | } |
318 | | | 318 | |
319 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, | | 319 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, |
320 | sc->sc_control_data, sizeof(struct fxp_control_data), NULL, | | 320 | sc->sc_control_data, sizeof(struct fxp_control_data), NULL, |
321 | 0)) != 0) { | | 321 | 0)) != 0) { |
322 | aprint_error_dev(sc->sc_dev, | | 322 | aprint_error_dev(sc->sc_dev, |
323 | "can't load control data DMA map, error = %d\n", | | 323 | "can't load control data DMA map, error = %d\n", |
324 | error); | | 324 | error); |
325 | goto fail_3; | | 325 | goto fail_3; |
326 | } | | 326 | } |
327 | | | 327 | |
328 | /* | | 328 | /* |
329 | * Create the transmit buffer DMA maps. | | 329 | * Create the transmit buffer DMA maps. |
330 | */ | | 330 | */ |
331 | for (i = 0; i < FXP_NTXCB; i++) { | | 331 | for (i = 0; i < FXP_NTXCB; i++) { |
332 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, | | 332 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
333 | (sc->sc_flags & FXPF_EXT_RFA) ? | | 333 | (sc->sc_flags & FXPF_EXT_RFA) ? |
334 | FXP_IPCB_NTXSEG : FXP_NTXSEG, | | 334 | FXP_IPCB_NTXSEG : FXP_NTXSEG, |
335 | MCLBYTES, 0, 0, &FXP_DSTX(sc, i)->txs_dmamap)) != 0) { | | 335 | MCLBYTES, 0, 0, &FXP_DSTX(sc, i)->txs_dmamap)) != 0) { |
336 | aprint_error_dev(sc->sc_dev, | | 336 | aprint_error_dev(sc->sc_dev, |
337 | "unable to create tx DMA map %d, error = %d\n", | | 337 | "unable to create tx DMA map %d, error = %d\n", |
338 | i, error); | | 338 | i, error); |
339 | goto fail_4; | | 339 | goto fail_4; |
340 | } | | 340 | } |
341 | } | | 341 | } |
342 | | | 342 | |
343 | /* | | 343 | /* |
344 | * Create the receive buffer DMA maps. | | 344 | * Create the receive buffer DMA maps. |
345 | */ | | 345 | */ |
346 | for (i = 0; i < FXP_NRFABUFS; i++) { | | 346 | for (i = 0; i < FXP_NRFABUFS; i++) { |
347 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, | | 347 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
348 | MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { | | 348 | MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { |
349 | aprint_error_dev(sc->sc_dev, | | 349 | aprint_error_dev(sc->sc_dev, |
350 | "unable to create rx DMA map %d, error = %d\n", | | 350 | "unable to create rx DMA map %d, error = %d\n", |
351 | i, error); | | 351 | i, error); |
352 | goto fail_5; | | 352 | goto fail_5; |
353 | } | | 353 | } |
354 | } | | 354 | } |
355 | | | 355 | |
356 | /* Initialize MAC address and media structures. */ | | 356 | /* Initialize MAC address and media structures. */ |
357 | fxp_get_info(sc, enaddr); | | 357 | fxp_get_info(sc, enaddr); |
358 | | | 358 | |
359 | aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", | | 359 | aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", |
360 | ether_sprintf(enaddr)); | | 360 | ether_sprintf(enaddr)); |
361 | | | 361 | |
362 | ifp = &sc->sc_ethercom.ec_if; | | 362 | ifp = &sc->sc_ethercom.ec_if; |
363 | | | 363 | |
364 | /* | | 364 | /* |
365 | * Get info about our media interface, and initialize it. Note | | 365 | * Get info about our media interface, and initialize it. Note |
366 | * the table terminates itself with a phy of -1, indicating | | 366 | * the table terminates itself with a phy of -1, indicating |
367 | * that we're using MII. | | 367 | * that we're using MII. |
368 | */ | | 368 | */ |
369 | for (fp = fxp_phytype_table; fp->fp_phy != -1; fp++) | | 369 | for (fp = fxp_phytype_table; fp->fp_phy != -1; fp++) |
370 | if (fp->fp_phy == sc->phy_primary_device) | | 370 | if (fp->fp_phy == sc->phy_primary_device) |
371 | break; | | 371 | break; |
372 | (*fp->fp_init)(sc); | | 372 | (*fp->fp_init)(sc); |
373 | | | 373 | |
374 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); | | 374 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
375 | ifp->if_softc = sc; | | 375 | ifp->if_softc = sc; |
376 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 376 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
377 | ifp->if_ioctl = fxp_ioctl; | | 377 | ifp->if_ioctl = fxp_ioctl; |
378 | ifp->if_start = fxp_start; | | 378 | ifp->if_start = fxp_start; |
379 | ifp->if_watchdog = fxp_watchdog; | | 379 | ifp->if_watchdog = fxp_watchdog; |
380 | ifp->if_init = fxp_init; | | 380 | ifp->if_init = fxp_init; |
381 | ifp->if_stop = fxp_stop; | | 381 | ifp->if_stop = fxp_stop; |
382 | IFQ_SET_READY(&ifp->if_snd); | | 382 | IFQ_SET_READY(&ifp->if_snd); |
383 | | | 383 | |
384 | if (sc->sc_flags & FXPF_EXT_RFA) { | | 384 | if (sc->sc_flags & FXPF_EXT_RFA) { |
385 | /* | | 385 | /* |
386 | * Enable hardware cksum support by EXT_RFA and IPCB. | | 386 | * Enable hardware cksum support by EXT_RFA and IPCB. |
387 | * | | 387 | * |
388 | * IFCAP_CSUM_IPv4_Tx seems to have a problem, | | 388 | * IFCAP_CSUM_IPv4_Tx seems to have a problem, |
389 | * at least, on i82550 rev.12. | | 389 | * at least, on i82550 rev.12. |
390 | * specifically, it doesn't set ipv4 checksum properly | | 390 | * specifically, it doesn't set ipv4 checksum properly |
391 | * when sending UDP (and probably TCP) packets with | | 391 | * when sending UDP (and probably TCP) packets with |
392 | * 20 byte ipv4 header + 1 or 2 byte data, | | 392 | * 20 byte ipv4 header + 1 or 2 byte data, |
393 | * though ICMP packets seem working. | | 393 | * though ICMP packets seem working. |
394 | * FreeBSD driver has related comments. | | 394 | * FreeBSD driver has related comments. |
395 | * We've added a workaround to handle the bug by padding | | 395 | * We've added a workaround to handle the bug by padding |
396 | * such packets manually. | | 396 | * such packets manually. |
397 | */ | | 397 | */ |
398 | ifp->if_capabilities = | | 398 | ifp->if_capabilities = |
399 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | | | 399 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
400 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | | | 400 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
401 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; | | 401 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
402 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; | | 402 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; |
403 | } else if (sc->sc_flags & FXPF_82559_RXCSUM) { | | 403 | } else if (sc->sc_flags & FXPF_82559_RXCSUM) { |
404 | ifp->if_capabilities = | | 404 | ifp->if_capabilities = |
405 | IFCAP_CSUM_TCPv4_Rx | | | 405 | IFCAP_CSUM_TCPv4_Rx | |
406 | IFCAP_CSUM_UDPv4_Rx; | | 406 | IFCAP_CSUM_UDPv4_Rx; |
407 | } | | 407 | } |
408 | | | 408 | |
409 | /* | | 409 | /* |
410 | * We can support 802.1Q VLAN-sized frames. | | 410 | * We can support 802.1Q VLAN-sized frames. |
411 | */ | | 411 | */ |
412 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; | | 412 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
413 | | | 413 | |
414 | /* | | 414 | /* |
415 | * Attach the interface. | | 415 | * Attach the interface. |
416 | */ | | 416 | */ |
417 | if_attach(ifp); | | 417 | if_attach(ifp); |
418 | ether_ifattach(ifp, enaddr); | | 418 | ether_ifattach(ifp, enaddr); |
419 | #if NRND > 0 | | 419 | #if NRND > 0 |
420 | rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), | | 420 | rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), |
421 | RND_TYPE_NET, 0); | | 421 | RND_TYPE_NET, 0); |
422 | #endif | | 422 | #endif |
423 | | | 423 | |
424 | #ifdef FXP_EVENT_COUNTERS | | 424 | #ifdef FXP_EVENT_COUNTERS |
425 | evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, | | 425 | evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, |
426 | NULL, device_xname(sc->sc_dev), "txstall"); | | 426 | NULL, device_xname(sc->sc_dev), "txstall"); |
427 | evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, | | 427 | evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, |
428 | NULL, device_xname(sc->sc_dev), "txintr"); | | 428 | NULL, device_xname(sc->sc_dev), "txintr"); |
429 | evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, | | 429 | evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, |
430 | NULL, device_xname(sc->sc_dev), "rxintr"); | | 430 | NULL, device_xname(sc->sc_dev), "rxintr"); |
431 | if (sc->sc_flags & FXPF_FC) { | | 431 | if (sc->sc_flags & FXPF_FC) { |
432 | evcnt_attach_dynamic(&sc->sc_ev_txpause, EVCNT_TYPE_MISC, | | 432 | evcnt_attach_dynamic(&sc->sc_ev_txpause, EVCNT_TYPE_MISC, |
433 | NULL, device_xname(sc->sc_dev), "txpause"); | | 433 | NULL, device_xname(sc->sc_dev), "txpause"); |
434 | evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_MISC, | | 434 | evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_MISC, |
435 | NULL, device_xname(sc->sc_dev), "rxpause"); | | 435 | NULL, device_xname(sc->sc_dev), "rxpause"); |
436 | } | | 436 | } |
437 | #endif /* FXP_EVENT_COUNTERS */ | | 437 | #endif /* FXP_EVENT_COUNTERS */ |
438 | | | 438 | |
439 | /* The attach is successful. */ | | 439 | /* The attach is successful. */ |
440 | sc->sc_flags |= FXPF_ATTACHED; | | 440 | sc->sc_flags |= FXPF_ATTACHED; |
441 | | | 441 | |
442 | return; | | 442 | return; |
443 | | | 443 | |
444 | /* | | 444 | /* |
445 | * Free any resources we've allocated during the failed attach | | 445 | * Free any resources we've allocated during the failed attach |
446 | * attempt. Do this in reverse order and fall though. | | 446 | * attempt. Do this in reverse order and fall though. |
447 | */ | | 447 | */ |
448 | fail_5: | | 448 | fail_5: |
449 | for (i = 0; i < FXP_NRFABUFS; i++) { | | 449 | for (i = 0; i < FXP_NRFABUFS; i++) { |
450 | if (sc->sc_rxmaps[i] != NULL) | | 450 | if (sc->sc_rxmaps[i] != NULL) |
451 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmaps[i]); | | 451 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmaps[i]); |
452 | } | | 452 | } |
453 | fail_4: | | 453 | fail_4: |
454 | for (i = 0; i < FXP_NTXCB; i++) { | | 454 | for (i = 0; i < FXP_NTXCB; i++) { |
455 | if (FXP_DSTX(sc, i)->txs_dmamap != NULL) | | 455 | if (FXP_DSTX(sc, i)->txs_dmamap != NULL) |
456 | bus_dmamap_destroy(sc->sc_dmat, | | 456 | bus_dmamap_destroy(sc->sc_dmat, |
457 | FXP_DSTX(sc, i)->txs_dmamap); | | 457 | FXP_DSTX(sc, i)->txs_dmamap); |
458 | } | | 458 | } |
459 | bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); | | 459 | bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); |
460 | fail_3: | | 460 | fail_3: |
461 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); | | 461 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); |
462 | fail_2: | | 462 | fail_2: |
463 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, | | 463 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, |
464 | sizeof(struct fxp_control_data)); | | 464 | sizeof(struct fxp_control_data)); |
465 | fail_1: | | 465 | fail_1: |
466 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); | | 466 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
467 | fail_0: | | 467 | fail_0: |
468 | return; | | 468 | return; |
469 | } | | 469 | } |
470 | | | 470 | |
471 | void | | 471 | void |
472 | fxp_mii_initmedia(struct fxp_softc *sc) | | 472 | fxp_mii_initmedia(struct fxp_softc *sc) |
473 | { | | 473 | { |
474 | int flags; | | 474 | int flags; |
475 | | | 475 | |
476 | sc->sc_flags |= FXPF_MII; | | 476 | sc->sc_flags |= FXPF_MII; |
477 | | | 477 | |
478 | sc->sc_mii.mii_ifp = &sc->sc_ethercom.ec_if; | | 478 | sc->sc_mii.mii_ifp = &sc->sc_ethercom.ec_if; |
479 | sc->sc_mii.mii_readreg = fxp_mdi_read; | | 479 | sc->sc_mii.mii_readreg = fxp_mdi_read; |
480 | sc->sc_mii.mii_writereg = fxp_mdi_write; | | 480 | sc->sc_mii.mii_writereg = fxp_mdi_write; |
481 | sc->sc_mii.mii_statchg = fxp_statchg; | | 481 | sc->sc_mii.mii_statchg = fxp_statchg; |
482 | | | 482 | |
483 | sc->sc_ethercom.ec_mii = &sc->sc_mii; | | 483 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
484 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange, | | 484 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange, |
485 | fxp_mii_mediastatus); | | 485 | fxp_mii_mediastatus); |
486 | | | 486 | |
487 | flags = MIIF_NOISOLATE; | | 487 | flags = MIIF_NOISOLATE; |
488 | if (sc->sc_flags & FXPF_FC) | | 488 | if (sc->sc_flags & FXPF_FC) |
489 | flags |= MIIF_FORCEANEG|MIIF_DOPAUSE; | | 489 | flags |= MIIF_FORCEANEG|MIIF_DOPAUSE; |
490 | /* | | 490 | /* |
491 | * The i82557 wedges if all of its PHYs are isolated! | | 491 | * The i82557 wedges if all of its PHYs are isolated! |
492 | */ | | 492 | */ |
493 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, | | 493 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
494 | MII_OFFSET_ANY, flags); | | 494 | MII_OFFSET_ANY, flags); |
495 | if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { | | 495 | if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { |
496 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); | | 496 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); |
497 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); | | 497 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); |
498 | } else | | 498 | } else |
499 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); | | 499 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
500 | } | | 500 | } |
501 | | | 501 | |
502 | void | | 502 | void |
503 | fxp_80c24_initmedia(struct fxp_softc *sc) | | 503 | fxp_80c24_initmedia(struct fxp_softc *sc) |
504 | { | | 504 | { |
505 | | | 505 | |
506 | /* | | 506 | /* |
507 | * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter | | 507 | * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter |
508 | * doesn't have a programming interface of any sort. The | | 508 | * doesn't have a programming interface of any sort. The |
509 | * media is sensed automatically based on how the link partner | | 509 | * media is sensed automatically based on how the link partner |
510 | * is configured. This is, in essence, manual configuration. | | 510 | * is configured. This is, in essence, manual configuration. |
511 | */ | | 511 | */ |
512 | aprint_normal_dev(sc->sc_dev, | | 512 | aprint_normal_dev(sc->sc_dev, |
513 | "Seeq 80c24 AutoDUPLEX media interface present\n"); | | 513 | "Seeq 80c24 AutoDUPLEX media interface present\n"); |
514 | ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_80c24_mediachange, | | 514 | ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_80c24_mediachange, |
515 | fxp_80c24_mediastatus); | | 515 | fxp_80c24_mediastatus); |
516 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); | | 516 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); |
517 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); | | 517 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); |
518 | } | | 518 | } |
519 | | | 519 | |
520 | /* | | 520 | /* |
521 | * Initialize the interface media. | | 521 | * Initialize the interface media. |
522 | */ | | 522 | */ |
523 | void | | 523 | void |
524 | fxp_get_info(struct fxp_softc *sc, uint8_t *enaddr) | | 524 | fxp_get_info(struct fxp_softc *sc, uint8_t *enaddr) |
525 | { | | 525 | { |
526 | uint16_t data, myea[ETHER_ADDR_LEN / 2]; | | 526 | uint16_t data, myea[ETHER_ADDR_LEN / 2]; |
527 | | | 527 | |
528 | /* | | 528 | /* |
529 | * Reset to a stable state. | | 529 | * Reset to a stable state. |
530 | */ | | 530 | */ |
531 | CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); | | 531 | CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); |
532 | DELAY(100); | | 532 | DELAY(100); |
533 | | | 533 | |
534 | sc->sc_eeprom_size = 0; | | 534 | sc->sc_eeprom_size = 0; |
535 | fxp_autosize_eeprom(sc); | | 535 | fxp_autosize_eeprom(sc); |
536 | if (sc->sc_eeprom_size == 0) { | | 536 | if (sc->sc_eeprom_size == 0) { |
537 | aprint_error_dev(sc->sc_dev, "failed to detect EEPROM size\n"); | | 537 | aprint_error_dev(sc->sc_dev, "failed to detect EEPROM size\n"); |
538 | sc->sc_eeprom_size = 6; /* XXX panic here? */ | | 538 | sc->sc_eeprom_size = 6; /* XXX panic here? */ |
539 | } | | 539 | } |
540 | #ifdef DEBUG | | 540 | #ifdef DEBUG |
541 | aprint_debug_dev(sc->sc_dev, "detected %d word EEPROM\n", | | 541 | aprint_debug_dev(sc->sc_dev, "detected %d word EEPROM\n", |
542 | 1 << sc->sc_eeprom_size); | | 542 | 1 << sc->sc_eeprom_size); |
543 | #endif | | 543 | #endif |
544 | | | 544 | |
545 | /* | | 545 | /* |
546 | * Get info about the primary PHY | | 546 | * Get info about the primary PHY |
547 | */ | | 547 | */ |
548 | fxp_read_eeprom(sc, &data, 6, 1); | | 548 | fxp_read_eeprom(sc, &data, 6, 1); |
549 | sc->phy_primary_device = | | 549 | sc->phy_primary_device = |
550 | (data & FXP_PHY_DEVICE_MASK) >> FXP_PHY_DEVICE_SHIFT; | | 550 | (data & FXP_PHY_DEVICE_MASK) >> FXP_PHY_DEVICE_SHIFT; |
551 | | | 551 | |
552 | /* | | 552 | /* |
553 | * Read MAC address. | | 553 | * Read MAC address. |
554 | */ | | 554 | */ |
555 | fxp_read_eeprom(sc, myea, 0, 3); | | 555 | fxp_read_eeprom(sc, myea, 0, 3); |
556 | enaddr[0] = myea[0] & 0xff; | | 556 | enaddr[0] = myea[0] & 0xff; |
557 | enaddr[1] = myea[0] >> 8; | | 557 | enaddr[1] = myea[0] >> 8; |
558 | enaddr[2] = myea[1] & 0xff; | | 558 | enaddr[2] = myea[1] & 0xff; |
559 | enaddr[3] = myea[1] >> 8; | | 559 | enaddr[3] = myea[1] >> 8; |
560 | enaddr[4] = myea[2] & 0xff; | | 560 | enaddr[4] = myea[2] & 0xff; |
561 | enaddr[5] = myea[2] >> 8; | | 561 | enaddr[5] = myea[2] >> 8; |
562 | | | 562 | |
563 | /* | | 563 | /* |
564 | * Systems based on the ICH2/ICH2-M chip from Intel, as well | | 564 | * Systems based on the ICH2/ICH2-M chip from Intel, as well |
565 | * as some i82559 designs, have a defect where the chip can | | 565 | * as some i82559 designs, have a defect where the chip can |
566 | * cause a PCI protocol violation if it receives a CU_RESUME | | 566 | * cause a PCI protocol violation if it receives a CU_RESUME |
567 | * command when it is entering the IDLE state. | | 567 | * command when it is entering the IDLE state. |
568 | * | | 568 | * |
569 | * The work-around is to disable Dynamic Standby Mode, so that | | 569 | * The work-around is to disable Dynamic Standby Mode, so that |
570 | * the chip never deasserts #CLKRUN, and always remains in the | | 570 | * the chip never deasserts #CLKRUN, and always remains in the |
571 | * active state. | | 571 | * active state. |
572 | * | | 572 | * |
573 | * Unfortunately, the only way to disable Dynamic Standby is | | 573 | * Unfortunately, the only way to disable Dynamic Standby is |
574 | * to frob an EEPROM setting and reboot (the EEPROM setting | | 574 | * to frob an EEPROM setting and reboot (the EEPROM setting |
575 | * is only consulted when the PCI bus comes out of reset). | | 575 | * is only consulted when the PCI bus comes out of reset). |
576 | * | | 576 | * |
577 | * See Intel 82801BA/82801BAM Specification Update, Errata #30. | | 577 | * See Intel 82801BA/82801BAM Specification Update, Errata #30. |
578 | */ | | 578 | */ |
579 | if (sc->sc_flags & FXPF_HAS_RESUME_BUG) { | | 579 | if (sc->sc_flags & FXPF_HAS_RESUME_BUG) { |
580 | fxp_read_eeprom(sc, &data, 10, 1); | | 580 | fxp_read_eeprom(sc, &data, 10, 1); |
581 | if (data & 0x02) { /* STB enable */ | | 581 | if (data & 0x02) { /* STB enable */ |
582 | aprint_error_dev(sc->sc_dev, "WARNING: " | | 582 | aprint_error_dev(sc->sc_dev, "WARNING: " |
583 | "Disabling dynamic standby mode in EEPROM " | | 583 | "Disabling dynamic standby mode in EEPROM " |
584 | "to work around a\n"); | | 584 | "to work around a\n"); |
585 | aprint_normal_dev(sc->sc_dev, | | 585 | aprint_normal_dev(sc->sc_dev, |
586 | "WARNING: hardware bug. You must reset " | | 586 | "WARNING: hardware bug. You must reset " |
587 | "the system before using this\n"); | | 587 | "the system before using this\n"); |
588 | aprint_normal_dev(sc->sc_dev, "WARNING: interface.\n"); | | 588 | aprint_normal_dev(sc->sc_dev, "WARNING: interface.\n"); |
589 | data &= ~0x02; | | 589 | data &= ~0x02; |
590 | fxp_write_eeprom(sc, &data, 10, 1); | | 590 | fxp_write_eeprom(sc, &data, 10, 1); |
591 | aprint_normal_dev(sc->sc_dev, "new EEPROM ID: 0x%04x\n", | | 591 | aprint_normal_dev(sc->sc_dev, "new EEPROM ID: 0x%04x\n", |
592 | data); | | 592 | data); |
593 | fxp_eeprom_update_cksum(sc); | | 593 | fxp_eeprom_update_cksum(sc); |
594 | } | | 594 | } |
595 | } | | 595 | } |
596 | | | 596 | |
597 | /* Receiver lock-up workaround detection. (FXPF_RECV_WORKAROUND) */ | | 597 | /* Receiver lock-up workaround detection. (FXPF_RECV_WORKAROUND) */ |
598 | /* Due to false positives we make it conditional on setting link1 */ | | 598 | /* Due to false positives we make it conditional on setting link1 */ |
599 | fxp_read_eeprom(sc, &data, 3, 1); | | 599 | fxp_read_eeprom(sc, &data, 3, 1); |
600 | if ((data & 0x03) != 0x03) { | | 600 | if ((data & 0x03) != 0x03) { |
601 | aprint_verbose_dev(sc->sc_dev, | | 601 | aprint_verbose_dev(sc->sc_dev, |
602 | "May need receiver lock-up workaround\n"); | | 602 | "May need receiver lock-up workaround\n"); |
603 | } | | 603 | } |
604 | } | | 604 | } |
605 | | | 605 | |
606 | static void | | 606 | static void |
607 | fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int len) | | 607 | fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int len) |
608 | { | | 608 | { |
609 | uint16_t reg; | | 609 | uint16_t reg; |
610 | int x; | | 610 | int x; |
611 | | | 611 | |
612 | for (x = 1 << (len - 1); x != 0; x >>= 1) { | | 612 | for (x = 1 << (len - 1); x != 0; x >>= 1) { |
613 | DELAY(40); | | 613 | DELAY(40); |
614 | if (data & x) | | 614 | if (data & x) |
615 | reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; | | 615 | reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; |
616 | else | | 616 | else |
617 | reg = FXP_EEPROM_EECS; | | 617 | reg = FXP_EEPROM_EECS; |
618 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); | | 618 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); |
619 | DELAY(40); | | 619 | DELAY(40); |
620 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, | | 620 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, |
621 | reg | FXP_EEPROM_EESK); | | 621 | reg | FXP_EEPROM_EESK); |
622 | DELAY(40); | | 622 | DELAY(40); |
623 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); | | 623 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); |
624 | } | | 624 | } |
625 | DELAY(40); | | 625 | DELAY(40); |
626 | } | | 626 | } |
627 | | | 627 | |
628 | /* | | 628 | /* |
629 | * Figure out EEPROM size. | | 629 | * Figure out EEPROM size. |
630 | * | | 630 | * |
631 | * 559's can have either 64-word or 256-word EEPROMs, the 558 | | 631 | * 559's can have either 64-word or 256-word EEPROMs, the 558 |
632 | * datasheet only talks about 64-word EEPROMs, and the 557 datasheet | | 632 | * datasheet only talks about 64-word EEPROMs, and the 557 datasheet |
633 | * talks about the existence of 16 to 256 word EEPROMs. | | 633 | * talks about the existence of 16 to 256 word EEPROMs. |
634 | * | | 634 | * |
635 | * The only known sizes are 64 and 256, where the 256 version is used | | 635 | * The only known sizes are 64 and 256, where the 256 version is used |
636 | * by CardBus cards to store CIS information. | | 636 | * by CardBus cards to store CIS information. |
637 | * | | 637 | * |
638 | * The address is shifted in msb-to-lsb, and after the last | | 638 | * The address is shifted in msb-to-lsb, and after the last |
639 | * address-bit the EEPROM is supposed to output a `dummy zero' bit, | | 639 | * address-bit the EEPROM is supposed to output a `dummy zero' bit, |
640 | * after which follows the actual data. We try to detect this zero, by | | 640 | * after which follows the actual data. We try to detect this zero, by |
641 | * probing the data-out bit in the EEPROM control register just after | | 641 | * probing the data-out bit in the EEPROM control register just after |
642 | * having shifted in a bit. If the bit is zero, we assume we've | | 642 | * having shifted in a bit. If the bit is zero, we assume we've |
643 | * shifted enough address bits. The data-out should be tri-state, | | 643 | * shifted enough address bits. The data-out should be tri-state, |
644 | * before this, which should translate to a logical one. | | 644 | * before this, which should translate to a logical one. |
645 | * | | 645 | * |
646 | * Other ways to do this would be to try to read a register with known | | 646 | * Other ways to do this would be to try to read a register with known |
647 | * contents with a varying number of address bits, but no such | | 647 | * contents with a varying number of address bits, but no such |
648 | * register seem to be available. The high bits of register 10 are 01 | | 648 | * register seem to be available. The high bits of register 10 are 01 |
649 | * on the 558 and 559, but apparently not on the 557. | | 649 | * on the 558 and 559, but apparently not on the 557. |
650 | * | | 650 | * |
651 | * The Linux driver computes a checksum on the EEPROM data, but the | | 651 | * The Linux driver computes a checksum on the EEPROM data, but the |
652 | * value of this checksum is not very well documented. | | 652 | * value of this checksum is not very well documented. |
653 | */ | | 653 | */ |
654 | | | 654 | |
655 | void | | 655 | void |
656 | fxp_autosize_eeprom(struct fxp_softc *sc) | | 656 | fxp_autosize_eeprom(struct fxp_softc *sc) |
657 | { | | 657 | { |
658 | int x; | | 658 | int x; |
659 | | | 659 | |
660 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); | | 660 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
661 | DELAY(40); | | 661 | DELAY(40); |
662 | | | 662 | |
663 | /* Shift in read opcode. */ | | 663 | /* Shift in read opcode. */ |
664 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); | | 664 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); |
665 | | | 665 | |
666 | /* | | 666 | /* |
667 | * Shift in address, wait for the dummy zero following a correct | | 667 | * Shift in address, wait for the dummy zero following a correct |
668 | * address shift. | | 668 | * address shift. |
669 | */ | | 669 | */ |
670 | for (x = 1; x <= 8; x++) { | | 670 | for (x = 1; x <= 8; x++) { |
671 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); | | 671 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
672 | DELAY(40); | | 672 | DELAY(40); |
673 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, | | 673 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, |
674 | FXP_EEPROM_EECS | FXP_EEPROM_EESK); | | 674 | FXP_EEPROM_EECS | FXP_EEPROM_EESK); |
675 | DELAY(40); | | 675 | DELAY(40); |
676 | if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & | | 676 | if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & |
677 | FXP_EEPROM_EEDO) == 0) | | 677 | FXP_EEPROM_EEDO) == 0) |
678 | break; | | 678 | break; |
679 | DELAY(40); | | 679 | DELAY(40); |
680 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); | | 680 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
681 | DELAY(40); | | 681 | DELAY(40); |
682 | } | | 682 | } |
683 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); | | 683 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
684 | DELAY(40); | | 684 | DELAY(40); |
685 | if (x != 6 && x != 8) { | | 685 | if (x != 6 && x != 8) { |
686 | #ifdef DEBUG | | 686 | #ifdef DEBUG |
687 | printf("%s: strange EEPROM size (%d)\n", | | 687 | printf("%s: strange EEPROM size (%d)\n", |
688 | device_xname(sc->sc_dev), 1 << x); | | 688 | device_xname(sc->sc_dev), 1 << x); |
689 | #endif | | 689 | #endif |
690 | } else | | 690 | } else |
691 | sc->sc_eeprom_size = x; | | 691 | sc->sc_eeprom_size = x; |
692 | } | | 692 | } |
693 | | | 693 | |
694 | /* | | 694 | /* |
695 | * Read from the serial EEPROM. Basically, you manually shift in | | 695 | * Read from the serial EEPROM. Basically, you manually shift in |
696 | * the read opcode (one bit at a time) and then shift in the address, | | 696 | * the read opcode (one bit at a time) and then shift in the address, |
697 | * and then you shift out the data (all of this one bit at a time). | | 697 | * and then you shift out the data (all of this one bit at a time). |
698 | * The word size is 16 bits, so you have to provide the address for | | 698 | * The word size is 16 bits, so you have to provide the address for |
699 | * every 16 bits of data. | | 699 | * every 16 bits of data. |
700 | */ | | 700 | */ |
701 | void | | 701 | void |
702 | fxp_read_eeprom(struct fxp_softc *sc, uint16_t *data, int offset, int words) | | 702 | fxp_read_eeprom(struct fxp_softc *sc, uint16_t *data, int offset, int words) |
703 | { | | 703 | { |
704 | uint16_t reg; | | 704 | uint16_t reg; |
705 | int i, x; | | 705 | int i, x; |
706 | | | 706 | |
707 | for (i = 0; i < words; i++) { | | 707 | for (i = 0; i < words; i++) { |
708 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); | | 708 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
709 | | | 709 | |
710 | /* Shift in read opcode. */ | | 710 | /* Shift in read opcode. */ |
711 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); | | 711 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); |
712 | | | 712 | |
713 | /* Shift in address. */ | | 713 | /* Shift in address. */ |
714 | fxp_eeprom_shiftin(sc, i + offset, sc->sc_eeprom_size); | | 714 | fxp_eeprom_shiftin(sc, i + offset, sc->sc_eeprom_size); |
715 | | | 715 | |
716 | reg = FXP_EEPROM_EECS; | | 716 | reg = FXP_EEPROM_EECS; |
717 | data[i] = 0; | | 717 | data[i] = 0; |
718 | | | 718 | |
719 | /* Shift out data. */ | | 719 | /* Shift out data. */ |
720 | for (x = 16; x > 0; x--) { | | 720 | for (x = 16; x > 0; x--) { |
721 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, | | 721 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, |
722 | reg | FXP_EEPROM_EESK); | | 722 | reg | FXP_EEPROM_EESK); |
723 | DELAY(40); | | 723 | DELAY(40); |
724 | if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & | | 724 | if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & |
725 | FXP_EEPROM_EEDO) | | 725 | FXP_EEPROM_EEDO) |
726 | data[i] |= (1 << (x - 1)); | | 726 | data[i] |= (1 << (x - 1)); |
727 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); | | 727 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); |
728 | DELAY(40); | | 728 | DELAY(40); |
729 | } | | 729 | } |
730 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); | | 730 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
731 | DELAY(40); | | 731 | DELAY(40); |
732 | } | | 732 | } |
733 | } | | 733 | } |
734 | | | 734 | |
735 | /* | | 735 | /* |
736 | * Write data to the serial EEPROM. | | 736 | * Write data to the serial EEPROM. |
737 | */ | | 737 | */ |
738 | void | | 738 | void |
739 | fxp_write_eeprom(struct fxp_softc *sc, uint16_t *data, int offset, int words) | | 739 | fxp_write_eeprom(struct fxp_softc *sc, uint16_t *data, int offset, int words) |
740 | { | | 740 | { |
741 | int i, j; | | 741 | int i, j; |
742 | | | 742 | |
743 | for (i = 0; i < words; i++) { | | 743 | for (i = 0; i < words; i++) { |
744 | /* Erase/write enable. */ | | 744 | /* Erase/write enable. */ |
745 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); | | 745 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
746 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_ERASE, 3); | | 746 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_ERASE, 3); |
747 | fxp_eeprom_shiftin(sc, 0x3 << (sc->sc_eeprom_size - 2), | | 747 | fxp_eeprom_shiftin(sc, 0x3 << (sc->sc_eeprom_size - 2), |
748 | sc->sc_eeprom_size); | | 748 | sc->sc_eeprom_size); |
749 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); | | 749 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
750 | DELAY(4); | | 750 | DELAY(4); |
751 | | | 751 | |
752 | /* Shift in write opcode, address, data. */ | | 752 | /* Shift in write opcode, address, data. */ |
753 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); | | 753 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
754 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); | | 754 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); |
755 | fxp_eeprom_shiftin(sc, i + offset, sc->sc_eeprom_size); | | 755 | fxp_eeprom_shiftin(sc, i + offset, sc->sc_eeprom_size); |
756 | fxp_eeprom_shiftin(sc, data[i], 16); | | 756 | fxp_eeprom_shiftin(sc, data[i], 16); |
757 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); | | 757 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
758 | DELAY(4); | | 758 | DELAY(4); |
759 | | | 759 | |
760 | /* Wait for the EEPROM to finish up. */ | | 760 | /* Wait for the EEPROM to finish up. */ |
761 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); | | 761 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
762 | DELAY(4); | | 762 | DELAY(4); |
763 | for (j = 0; j < 1000; j++) { | | 763 | for (j = 0; j < 1000; j++) { |
764 | if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & | | 764 | if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & |
765 | FXP_EEPROM_EEDO) | | 765 | FXP_EEPROM_EEDO) |
766 | break; | | 766 | break; |
767 | DELAY(50); | | 767 | DELAY(50); |
768 | } | | 768 | } |
769 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); | | 769 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
770 | DELAY(4); | | 770 | DELAY(4); |
771 | | | 771 | |
772 | /* Erase/write disable. */ | | 772 | /* Erase/write disable. */ |
773 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); | | 773 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
774 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_ERASE, 3); | | 774 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_ERASE, 3); |
775 | fxp_eeprom_shiftin(sc, 0, sc->sc_eeprom_size); | | 775 | fxp_eeprom_shiftin(sc, 0, sc->sc_eeprom_size); |
776 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); | | 776 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
777 | DELAY(4); | | 777 | DELAY(4); |
778 | } | | 778 | } |
779 | } | | 779 | } |
780 | | | 780 | |
781 | /* | | 781 | /* |
782 | * Update the checksum of the EEPROM. | | 782 | * Update the checksum of the EEPROM. |
783 | */ | | 783 | */ |
784 | void | | 784 | void |
785 | fxp_eeprom_update_cksum(struct fxp_softc *sc) | | 785 | fxp_eeprom_update_cksum(struct fxp_softc *sc) |
786 | { | | 786 | { |
787 | int i; | | 787 | int i; |
788 | uint16_t data, cksum; | | 788 | uint16_t data, cksum; |
789 | | | 789 | |
790 | cksum = 0; | | 790 | cksum = 0; |
791 | for (i = 0; i < (1 << sc->sc_eeprom_size) - 1; i++) { | | 791 | for (i = 0; i < (1 << sc->sc_eeprom_size) - 1; i++) { |
792 | fxp_read_eeprom(sc, &data, i, 1); | | 792 | fxp_read_eeprom(sc, &data, i, 1); |
793 | cksum += data; | | 793 | cksum += data; |
794 | } | | 794 | } |
795 | i = (1 << sc->sc_eeprom_size) - 1; | | 795 | i = (1 << sc->sc_eeprom_size) - 1; |
796 | cksum = 0xbaba - cksum; | | 796 | cksum = 0xbaba - cksum; |
797 | fxp_read_eeprom(sc, &data, i, 1); | | 797 | fxp_read_eeprom(sc, &data, i, 1); |
798 | fxp_write_eeprom(sc, &cksum, i, 1); | | 798 | fxp_write_eeprom(sc, &cksum, i, 1); |
799 | log(LOG_INFO, "%s: EEPROM checksum @ 0x%x: 0x%04x -> 0x%04x\n", | | 799 | log(LOG_INFO, "%s: EEPROM checksum @ 0x%x: 0x%04x -> 0x%04x\n", |
800 | device_xname(sc->sc_dev), i, data, cksum); | | 800 | device_xname(sc->sc_dev), i, data, cksum); |
801 | } | | 801 | } |
802 | | | 802 | |
803 | /* | | 803 | /* |
804 | * Start packet transmission on the interface. | | 804 | * Start packet transmission on the interface. |
805 | */ | | 805 | */ |
806 | void | | 806 | void |
807 | fxp_start(struct ifnet *ifp) | | 807 | fxp_start(struct ifnet *ifp) |
808 | { | | 808 | { |
809 | struct fxp_softc *sc = ifp->if_softc; | | 809 | struct fxp_softc *sc = ifp->if_softc; |
810 | struct mbuf *m0, *m; | | 810 | struct mbuf *m0, *m; |
811 | struct fxp_txdesc *txd; | | 811 | struct fxp_txdesc *txd; |
812 | struct fxp_txsoft *txs; | | 812 | struct fxp_txsoft *txs; |
813 | bus_dmamap_t dmamap; | | 813 | bus_dmamap_t dmamap; |
814 | int error, lasttx, nexttx, opending, seg, nsegs, len; | | 814 | int error, lasttx, nexttx, opending, seg, nsegs, len; |
815 | | | 815 | |
816 | /* | | 816 | /* |
817 | * If we want a re-init, bail out now. | | 817 | * If we want a re-init, bail out now. |
818 | */ | | 818 | */ |
819 | if (sc->sc_flags & FXPF_WANTINIT) { | | 819 | if (sc->sc_flags & FXPF_WANTINIT) { |
820 | ifp->if_flags |= IFF_OACTIVE; | | 820 | ifp->if_flags |= IFF_OACTIVE; |
821 | return; | | 821 | return; |
822 | } | | 822 | } |
823 | | | 823 | |
824 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) | | 824 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) |
825 | return; | | 825 | return; |
826 | | | 826 | |
827 | /* | | 827 | /* |
828 | * Remember the previous txpending and the current lasttx. | | 828 | * Remember the previous txpending and the current lasttx. |
829 | */ | | 829 | */ |
830 | opending = sc->sc_txpending; | | 830 | opending = sc->sc_txpending; |
831 | lasttx = sc->sc_txlast; | | 831 | lasttx = sc->sc_txlast; |
832 | | | 832 | |
833 | /* | | 833 | /* |
834 | * Loop through the send queue, setting up transmit descriptors | | 834 | * Loop through the send queue, setting up transmit descriptors |
835 | * until we drain the queue, or use up all available transmit | | 835 | * until we drain the queue, or use up all available transmit |
836 | * descriptors. | | 836 | * descriptors. |
837 | */ | | 837 | */ |
838 | for (;;) { | | 838 | for (;;) { |
839 | struct fxp_tbd *tbdp; | | 839 | struct fxp_tbd *tbdp; |
840 | int csum_flags; | | 840 | int csum_flags; |
841 | | | 841 | |
842 | /* | | 842 | /* |
843 | * Grab a packet off the queue. | | 843 | * Grab a packet off the queue. |
844 | */ | | 844 | */ |
845 | IFQ_POLL(&ifp->if_snd, m0); | | 845 | IFQ_POLL(&ifp->if_snd, m0); |
846 | if (m0 == NULL) | | 846 | if (m0 == NULL) |
847 | break; | | 847 | break; |
848 | m = NULL; | | 848 | m = NULL; |
849 | | | 849 | |
850 | if (sc->sc_txpending == FXP_NTXCB - 1) { | | 850 | if (sc->sc_txpending == FXP_NTXCB - 1) { |
851 | FXP_EVCNT_INCR(&sc->sc_ev_txstall); | | 851 | FXP_EVCNT_INCR(&sc->sc_ev_txstall); |
852 | break; | | 852 | break; |
853 | } | | 853 | } |
854 | | | 854 | |
855 | /* | | 855 | /* |
856 | * Get the next available transmit descriptor. | | 856 | * Get the next available transmit descriptor. |
857 | */ | | 857 | */ |
858 | nexttx = FXP_NEXTTX(sc->sc_txlast); | | 858 | nexttx = FXP_NEXTTX(sc->sc_txlast); |
859 | txd = FXP_CDTX(sc, nexttx); | | 859 | txd = FXP_CDTX(sc, nexttx); |
860 | txs = FXP_DSTX(sc, nexttx); | | 860 | txs = FXP_DSTX(sc, nexttx); |
861 | dmamap = txs->txs_dmamap; | | 861 | dmamap = txs->txs_dmamap; |
862 | | | 862 | |
863 | /* | | 863 | /* |
864 | * Load the DMA map. If this fails, the packet either | | 864 | * Load the DMA map. If this fails, the packet either |
865 | * didn't fit in the allotted number of frags, or we were | | 865 | * didn't fit in the allotted number of frags, or we were |
866 | * short on resources. In this case, we'll copy and try | | 866 | * short on resources. In this case, we'll copy and try |
867 | * again. | | 867 | * again. |
868 | */ | | 868 | */ |
869 | if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, | | 869 | if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
870 | BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { | | 870 | BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { |
871 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 871 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
872 | if (m == NULL) { | | 872 | if (m == NULL) { |
873 | log(LOG_ERR, "%s: unable to allocate Tx mbuf\n", | | 873 | log(LOG_ERR, "%s: unable to allocate Tx mbuf\n", |
874 | device_xname(sc->sc_dev)); | | 874 | device_xname(sc->sc_dev)); |
875 | break; | | 875 | break; |
876 | } | | 876 | } |
877 | MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); | | 877 | MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); |
878 | if (m0->m_pkthdr.len > MHLEN) { | | 878 | if (m0->m_pkthdr.len > MHLEN) { |
879 | MCLGET(m, M_DONTWAIT); | | 879 | MCLGET(m, M_DONTWAIT); |
880 | if ((m->m_flags & M_EXT) == 0) { | | 880 | if ((m->m_flags & M_EXT) == 0) { |
881 | log(LOG_ERR, "%s: unable to allocate " | | 881 | log(LOG_ERR, "%s: unable to allocate " |
882 | "Tx cluster\n", | | 882 | "Tx cluster\n", |
883 | device_xname(sc->sc_dev)); | | 883 | device_xname(sc->sc_dev)); |
884 | m_freem(m); | | 884 | m_freem(m); |
885 | break; | | 885 | break; |
886 | } | | 886 | } |
887 | } | | 887 | } |
888 | m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); | | 888 | m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); |
889 | m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; | | 889 | m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; |
890 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, | | 890 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, |
891 | m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); | | 891 | m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
892 | if (error) { | | 892 | if (error) { |
893 | log(LOG_ERR, "%s: unable to load Tx buffer, " | | 893 | log(LOG_ERR, "%s: unable to load Tx buffer, " |
894 | "error = %d\n", | | 894 | "error = %d\n", |
895 | device_xname(sc->sc_dev), error); | | 895 | device_xname(sc->sc_dev), error); |
896 | break; | | 896 | break; |
897 | } | | 897 | } |
898 | } | | 898 | } |
899 | | | 899 | |
900 | IFQ_DEQUEUE(&ifp->if_snd, m0); | | 900 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
901 | csum_flags = m0->m_pkthdr.csum_flags; | | 901 | csum_flags = m0->m_pkthdr.csum_flags; |
902 | if (m != NULL) { | | 902 | if (m != NULL) { |
903 | m_freem(m0); | | 903 | m_freem(m0); |
904 | m0 = m; | | 904 | m0 = m; |
905 | } | | 905 | } |
906 | | | 906 | |
907 | /* Initialize the fraglist. */ | | 907 | /* Initialize the fraglist. */ |
908 | tbdp = txd->txd_tbd; | | 908 | tbdp = txd->txd_tbd; |
909 | len = m0->m_pkthdr.len; | | 909 | len = m0->m_pkthdr.len; |
910 | nsegs = dmamap->dm_nsegs; | | 910 | nsegs = dmamap->dm_nsegs; |
911 | if (sc->sc_flags & FXPF_EXT_RFA) | | 911 | if (sc->sc_flags & FXPF_EXT_RFA) |
912 | tbdp++; | | 912 | tbdp++; |
913 | for (seg = 0; seg < nsegs; seg++) { | | 913 | for (seg = 0; seg < nsegs; seg++) { |
914 | tbdp[seg].tb_addr = | | 914 | tbdp[seg].tb_addr = |
915 | htole32(dmamap->dm_segs[seg].ds_addr); | | 915 | htole32(dmamap->dm_segs[seg].ds_addr); |
916 | tbdp[seg].tb_size = | | 916 | tbdp[seg].tb_size = |
917 | htole32(dmamap->dm_segs[seg].ds_len); | | 917 | htole32(dmamap->dm_segs[seg].ds_len); |
918 | } | | 918 | } |
919 | if (__predict_false(len <= FXP_IP4CSUMTX_PADLEN && | | 919 | if (__predict_false(len <= FXP_IP4CSUMTX_PADLEN && |
920 | (csum_flags & M_CSUM_IPv4) != 0)) { | | 920 | (csum_flags & M_CSUM_IPv4) != 0)) { |
921 | /* | | 921 | /* |
922 | * Pad short packets to avoid ip4csum-tx bug. | | 922 | * Pad short packets to avoid ip4csum-tx bug. |
923 | * | | 923 | * |
924 | * XXX Should we still consider if such short | | 924 | * XXX Should we still consider if such short |
925 | * (36 bytes or less) packets might already | | 925 | * (36 bytes or less) packets might already |
926 | * occupy FXP_IPCB_NTXSEG (15) fragments here? | | 926 | * occupy FXP_IPCB_NTXSEG (15) fragments here? |
927 | */ | | 927 | */ |
928 | KASSERT(nsegs < FXP_IPCB_NTXSEG); | | 928 | KASSERT(nsegs < FXP_IPCB_NTXSEG); |
929 | nsegs++; | | 929 | nsegs++; |
930 | tbdp[seg].tb_addr = htole32(FXP_CDTXPADADDR(sc)); | | 930 | tbdp[seg].tb_addr = htole32(FXP_CDTXPADADDR(sc)); |
931 | tbdp[seg].tb_size = | | 931 | tbdp[seg].tb_size = |
932 | htole32(FXP_IP4CSUMTX_PADLEN + 1 - len); | | 932 | htole32(FXP_IP4CSUMTX_PADLEN + 1 - len); |
933 | } | | 933 | } |
934 | | | 934 | |
935 | /* Sync the DMA map. */ | | 935 | /* Sync the DMA map. */ |
936 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, | | 936 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
937 | BUS_DMASYNC_PREWRITE); | | 937 | BUS_DMASYNC_PREWRITE); |
938 | | | 938 | |
939 | /* | | 939 | /* |
940 | * Store a pointer to the packet so we can free it later. | | 940 | * Store a pointer to the packet so we can free it later. |
941 | */ | | 941 | */ |
942 | txs->txs_mbuf = m0; | | 942 | txs->txs_mbuf = m0; |
943 | | | 943 | |
944 | /* | | 944 | /* |
945 | * Initialize the transmit descriptor. | | 945 | * Initialize the transmit descriptor. |
946 | */ | | 946 | */ |
947 | /* BIG_ENDIAN: no need to swap to store 0 */ | | 947 | /* BIG_ENDIAN: no need to swap to store 0 */ |
948 | txd->txd_txcb.cb_status = 0; | | 948 | txd->txd_txcb.cb_status = 0; |
949 | txd->txd_txcb.cb_command = | | 949 | txd->txd_txcb.cb_command = |
950 | sc->sc_txcmd | htole16(FXP_CB_COMMAND_SF); | | 950 | sc->sc_txcmd | htole16(FXP_CB_COMMAND_SF); |
951 | txd->txd_txcb.tx_threshold = tx_threshold; | | 951 | txd->txd_txcb.tx_threshold = tx_threshold; |
952 | txd->txd_txcb.tbd_number = nsegs; | | 952 | txd->txd_txcb.tbd_number = nsegs; |
953 | | | 953 | |
954 | KASSERT((csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) == 0); | | 954 | KASSERT((csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) == 0); |
955 | if (sc->sc_flags & FXPF_EXT_RFA) { | | 955 | if (sc->sc_flags & FXPF_EXT_RFA) { |
956 | struct m_tag *vtag; | | 956 | struct m_tag *vtag; |
957 | struct fxp_ipcb *ipcb; | | 957 | struct fxp_ipcb *ipcb; |
958 | /* | | 958 | /* |
959 | * Deal with TCP/IP checksum offload. Note that | | 959 | * Deal with TCP/IP checksum offload. Note that |
960 | * in order for TCP checksum offload to work, | | 960 | * in order for TCP checksum offload to work, |
961 | * the pseudo header checksum must have already | | 961 | * the pseudo header checksum must have already |
962 | * been computed and stored in the checksum field | | 962 | * been computed and stored in the checksum field |
963 | * in the TCP header. The stack should have | | 963 | * in the TCP header. The stack should have |
964 | * already done this for us. | | 964 | * already done this for us. |
965 | */ | | 965 | */ |
966 | ipcb = &txd->txd_u.txdu_ipcb; | | 966 | ipcb = &txd->txd_u.txdu_ipcb; |
967 | memset(ipcb, 0, sizeof(*ipcb)); | | 967 | memset(ipcb, 0, sizeof(*ipcb)); |
968 | /* | | 968 | /* |
969 | * always do hardware parsing. | | 969 | * always do hardware parsing. |
970 | */ | | 970 | */ |
971 | ipcb->ipcb_ip_activation_high = | | 971 | ipcb->ipcb_ip_activation_high = |
972 | FXP_IPCB_HARDWAREPARSING_ENABLE; | | 972 | FXP_IPCB_HARDWAREPARSING_ENABLE; |
973 | /* | | 973 | /* |
974 | * ip checksum offloading. | | 974 | * ip checksum offloading. |
975 | */ | | 975 | */ |
976 | if (csum_flags & M_CSUM_IPv4) { | | 976 | if (csum_flags & M_CSUM_IPv4) { |
977 | ipcb->ipcb_ip_schedule |= | | 977 | ipcb->ipcb_ip_schedule |= |
978 | FXP_IPCB_IP_CHECKSUM_ENABLE; | | 978 | FXP_IPCB_IP_CHECKSUM_ENABLE; |
979 | } | | 979 | } |
980 | /* | | 980 | /* |
981 | * TCP/UDP checksum offloading. | | 981 | * TCP/UDP checksum offloading. |
982 | */ | | 982 | */ |
983 | if (csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { | | 983 | if (csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { |
984 | ipcb->ipcb_ip_schedule |= | | 984 | ipcb->ipcb_ip_schedule |= |
985 | FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; | | 985 | FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; |
986 | } | | 986 | } |
987 | | | 987 | |
988 | /* | | 988 | /* |
989 | * request VLAN tag insertion if needed. | | 989 | * request VLAN tag insertion if needed. |
990 | */ | | 990 | */ |
991 | vtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0); | | 991 | vtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0); |
992 | if (vtag) { | | 992 | if (vtag) { |
993 | ipcb->ipcb_vlan_id = | | 993 | ipcb->ipcb_vlan_id = |
994 | htobe16(*(u_int *)(vtag + 1)); | | 994 | htobe16(*(u_int *)(vtag + 1)); |
995 | ipcb->ipcb_ip_activation_high |= | | 995 | ipcb->ipcb_ip_activation_high |= |
996 | FXP_IPCB_INSERTVLAN_ENABLE; | | 996 | FXP_IPCB_INSERTVLAN_ENABLE; |
997 | } | | 997 | } |
998 | } else { | | 998 | } else { |
999 | KASSERT((csum_flags & | | 999 | KASSERT((csum_flags & |
1000 | (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) == 0); | | 1000 | (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) == 0); |
1001 | } | | 1001 | } |
1002 | | | 1002 | |
1003 | FXP_CDTXSYNC(sc, nexttx, | | 1003 | FXP_CDTXSYNC(sc, nexttx, |
1004 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1004 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1005 | | | 1005 | |
1006 | /* Advance the tx pointer. */ | | 1006 | /* Advance the tx pointer. */ |
1007 | sc->sc_txpending++; | | 1007 | sc->sc_txpending++; |
1008 | sc->sc_txlast = nexttx; | | 1008 | sc->sc_txlast = nexttx; |
1009 | | | 1009 | |
1010 | #if NBPFILTER > 0 | | 1010 | #if NBPFILTER > 0 |
1011 | /* | | 1011 | /* |
1012 | * Pass packet to bpf if there is a listener. | | 1012 | * Pass packet to bpf if there is a listener. |
1013 | */ | | 1013 | */ |
1014 | if (ifp->if_bpf) | | 1014 | if (ifp->if_bpf) |
1015 | bpf_mtap(ifp->if_bpf, m0); | | 1015 | bpf_mtap(ifp->if_bpf, m0); |
1016 | #endif | | 1016 | #endif |
1017 | } | | 1017 | } |
1018 | | | 1018 | |
1019 | if (sc->sc_txpending == FXP_NTXCB - 1) { | | 1019 | if (sc->sc_txpending == FXP_NTXCB - 1) { |
1020 | /* No more slots; notify upper layer. */ | | 1020 | /* No more slots; notify upper layer. */ |
1021 | ifp->if_flags |= IFF_OACTIVE; | | 1021 | ifp->if_flags |= IFF_OACTIVE; |
1022 | } | | 1022 | } |
1023 | | | 1023 | |
1024 | if (sc->sc_txpending != opending) { | | 1024 | if (sc->sc_txpending != opending) { |
1025 | /* | | 1025 | /* |
1026 | * We enqueued packets. If the transmitter was idle, | | 1026 | * We enqueued packets. If the transmitter was idle, |
1027 | * reset the txdirty pointer. | | 1027 | * reset the txdirty pointer. |
1028 | */ | | 1028 | */ |
1029 | if (opending == 0) | | 1029 | if (opending == 0) |
1030 | sc->sc_txdirty = FXP_NEXTTX(lasttx); | | 1030 | sc->sc_txdirty = FXP_NEXTTX(lasttx); |
1031 | | | 1031 | |
1032 | /* | | 1032 | /* |
1033 | * Cause the chip to interrupt and suspend command | | 1033 | * Cause the chip to interrupt and suspend command |
1034 | * processing once the last packet we've enqueued | | 1034 | * processing once the last packet we've enqueued |
1035 | * has been transmitted. | | 1035 | * has been transmitted. |
1036 | * | | 1036 | * |
1037 | * To avoid a race between updating status bits | | 1037 | * To avoid a race between updating status bits |
1038 | * by the fxp chip and clearing command bits | | 1038 | * by the fxp chip and clearing command bits |
1039 | * by this function on machines which don't have | | 1039 | * by this function on machines which don't have |
1040 | * atomic methods to clear/set bits in memory | | 1040 | * atomic methods to clear/set bits in memory |
1041 | * smaller than 32bits (both cb_status and cb_command | | 1041 | * smaller than 32bits (both cb_status and cb_command |
1042 | * members are uint16_t and in the same 32bit word), | | 1042 | * members are uint16_t and in the same 32bit word), |
1043 | * we have to prepare a dummy TX descriptor which has | | 1043 | * we have to prepare a dummy TX descriptor which has |
1044 | * NOP command and just causes a TX completion interrupt. | | 1044 | * NOP command and just causes a TX completion interrupt. |
1045 | */ | | 1045 | */ |
1046 | sc->sc_txpending++; | | 1046 | sc->sc_txpending++; |
1047 | sc->sc_txlast = FXP_NEXTTX(sc->sc_txlast); | | 1047 | sc->sc_txlast = FXP_NEXTTX(sc->sc_txlast); |
1048 | txd = FXP_CDTX(sc, sc->sc_txlast); | | 1048 | txd = FXP_CDTX(sc, sc->sc_txlast); |
1049 | /* BIG_ENDIAN: no need to swap to store 0 */ | | 1049 | /* BIG_ENDIAN: no need to swap to store 0 */ |
1050 | txd->txd_txcb.cb_status = 0; | | 1050 | txd->txd_txcb.cb_status = 0; |
1051 | txd->txd_txcb.cb_command = htole16(FXP_CB_COMMAND_NOP | | | 1051 | txd->txd_txcb.cb_command = htole16(FXP_CB_COMMAND_NOP | |
1052 | FXP_CB_COMMAND_I | FXP_CB_COMMAND_S); | | 1052 | FXP_CB_COMMAND_I | FXP_CB_COMMAND_S); |
1053 | FXP_CDTXSYNC(sc, sc->sc_txlast, | | 1053 | FXP_CDTXSYNC(sc, sc->sc_txlast, |
1054 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1054 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1055 | | | 1055 | |
1056 | /* | | 1056 | /* |
1057 | * The entire packet chain is set up. Clear the suspend bit | | 1057 | * The entire packet chain is set up. Clear the suspend bit |
1058 | * on the command prior to the first packet we set up. | | 1058 | * on the command prior to the first packet we set up. |
1059 | */ | | 1059 | */ |
1060 | FXP_CDTXSYNC(sc, lasttx, | | 1060 | FXP_CDTXSYNC(sc, lasttx, |
1061 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 1061 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1062 | FXP_CDTX(sc, lasttx)->txd_txcb.cb_command &= | | 1062 | FXP_CDTX(sc, lasttx)->txd_txcb.cb_command &= |
1063 | htole16(~FXP_CB_COMMAND_S); | | 1063 | htole16(~FXP_CB_COMMAND_S); |
1064 | FXP_CDTXSYNC(sc, lasttx, | | 1064 | FXP_CDTXSYNC(sc, lasttx, |
1065 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1065 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1066 | | | 1066 | |
1067 | /* | | 1067 | /* |
1068 | * Issue a Resume command in case the chip was suspended. | | 1068 | * Issue a Resume command in case the chip was suspended. |
1069 | */ | | 1069 | */ |
1070 | fxp_scb_wait(sc); | | 1070 | fxp_scb_wait(sc); |
1071 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); | | 1071 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); |
1072 | | | 1072 | |
1073 | /* Set a watchdog timer in case the chip flakes out. */ | | 1073 | /* Set a watchdog timer in case the chip flakes out. */ |
1074 | ifp->if_timer = 5; | | 1074 | ifp->if_timer = 5; |
1075 | } | | 1075 | } |
1076 | } | | 1076 | } |
1077 | | | 1077 | |
1078 | /* | | 1078 | /* |
1079 | * Process interface interrupts. | | 1079 | * Process interface interrupts. |
1080 | */ | | 1080 | */ |
1081 | int | | 1081 | int |
1082 | fxp_intr(void *arg) | | 1082 | fxp_intr(void *arg) |
1083 | { | | 1083 | { |
1084 | struct fxp_softc *sc = arg; | | 1084 | struct fxp_softc *sc = arg; |
1085 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1085 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1086 | bus_dmamap_t rxmap; | | 1086 | bus_dmamap_t rxmap; |
1087 | int claimed = 0, rnr; | | 1087 | int claimed = 0, rnr; |
1088 | uint8_t statack; | | 1088 | uint8_t statack; |
1089 | | | 1089 | |
1090 | if (!device_is_active(sc->sc_dev) || sc->sc_enabled == 0) | | 1090 | if (!device_is_active(sc->sc_dev) || sc->sc_enabled == 0) |
1091 | return (0); | | 1091 | return (0); |
1092 | /* | | 1092 | /* |
1093 | * If the interface isn't running, don't try to | | 1093 | * If the interface isn't running, don't try to |
1094 | * service the interrupt.. just ack it and bail. | | 1094 | * service the interrupt.. just ack it and bail. |
1095 | */ | | 1095 | */ |
1096 | if ((ifp->if_flags & IFF_RUNNING) == 0) { | | 1096 | if ((ifp->if_flags & IFF_RUNNING) == 0) { |
1097 | statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); | | 1097 | statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); |
1098 | if (statack) { | | 1098 | if (statack) { |
1099 | claimed = 1; | | 1099 | claimed = 1; |
1100 | CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); | | 1100 | CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); |
1101 | } | | 1101 | } |
1102 | return (claimed); | | 1102 | return (claimed); |
1103 | } | | 1103 | } |
1104 | | | 1104 | |
1105 | while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { | | 1105 | while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { |
1106 | claimed = 1; | | 1106 | claimed = 1; |
1107 | | | 1107 | |
1108 | /* | | 1108 | /* |
1109 | * First ACK all the interrupts in this pass. | | 1109 | * First ACK all the interrupts in this pass. |
1110 | */ | | 1110 | */ |
1111 | CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); | | 1111 | CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); |
1112 | | | 1112 | |
1113 | /* | | 1113 | /* |
1114 | * Process receiver interrupts. If a no-resource (RNR) | | 1114 | * Process receiver interrupts. If a no-resource (RNR) |
1115 | * condition exists, get whatever packets we can and | | 1115 | * condition exists, get whatever packets we can and |
1116 | * re-start the receiver. | | 1116 | * re-start the receiver. |
1117 | */ | | 1117 | */ |
1118 | rnr = (statack & (FXP_SCB_STATACK_RNR | FXP_SCB_STATACK_SWI)) ? | | 1118 | rnr = (statack & (FXP_SCB_STATACK_RNR | FXP_SCB_STATACK_SWI)) ? |
1119 | 1 : 0; | | 1119 | 1 : 0; |
1120 | if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR | | | 1120 | if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR | |
1121 | FXP_SCB_STATACK_SWI)) { | | 1121 | FXP_SCB_STATACK_SWI)) { |
1122 | FXP_EVCNT_INCR(&sc->sc_ev_rxintr); | | 1122 | FXP_EVCNT_INCR(&sc->sc_ev_rxintr); |
1123 | rnr |= fxp_rxintr(sc); | | 1123 | rnr |= fxp_rxintr(sc); |
1124 | } | | 1124 | } |
1125 | | | 1125 | |
1126 | /* | | 1126 | /* |
1127 | * Free any finished transmit mbuf chains. | | 1127 | * Free any finished transmit mbuf chains. |
1128 | */ | | 1128 | */ |
1129 | if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) { | | 1129 | if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) { |
1130 | FXP_EVCNT_INCR(&sc->sc_ev_txintr); | | 1130 | FXP_EVCNT_INCR(&sc->sc_ev_txintr); |
1131 | fxp_txintr(sc); | | 1131 | fxp_txintr(sc); |
1132 | | | 1132 | |
1133 | /* | | 1133 | /* |
1134 | * Try to get more packets going. | | 1134 | * Try to get more packets going. |
1135 | */ | | 1135 | */ |
1136 | fxp_start(ifp); | | 1136 | fxp_start(ifp); |
1137 | | | 1137 | |
1138 | if (sc->sc_txpending == 0) { | | 1138 | if (sc->sc_txpending == 0) { |
1139 | /* | | 1139 | /* |
1140 | * Tell them that they can re-init now. | | 1140 | * Tell them that they can re-init now. |
1141 | */ | | 1141 | */ |
1142 | if (sc->sc_flags & FXPF_WANTINIT) | | 1142 | if (sc->sc_flags & FXPF_WANTINIT) |
1143 | wakeup(sc); | | 1143 | wakeup(sc); |
1144 | } | | 1144 | } |
1145 | } | | 1145 | } |
1146 | | | 1146 | |
1147 | if (rnr) { | | 1147 | if (rnr) { |
1148 | fxp_scb_wait(sc); | | 1148 | fxp_scb_wait(sc); |
1149 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_ABORT); | | 1149 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_ABORT); |
1150 | rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t); | | 1150 | rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t); |
1151 | fxp_scb_wait(sc); | | 1151 | fxp_scb_wait(sc); |
1152 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, | | 1152 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, |
1153 | rxmap->dm_segs[0].ds_addr + | | 1153 | rxmap->dm_segs[0].ds_addr + |
1154 | RFA_ALIGNMENT_FUDGE); | | 1154 | RFA_ALIGNMENT_FUDGE); |
1155 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); | | 1155 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); |
1156 | } | | 1156 | } |
1157 | } | | 1157 | } |
1158 | | | 1158 | |
1159 | #if NRND > 0 | | 1159 | #if NRND > 0 |
1160 | if (claimed) | | 1160 | if (claimed) |
1161 | rnd_add_uint32(&sc->rnd_source, statack); | | 1161 | rnd_add_uint32(&sc->rnd_source, statack); |
1162 | #endif | | 1162 | #endif |
1163 | return (claimed); | | 1163 | return (claimed); |
1164 | } | | 1164 | } |
1165 | | | 1165 | |
1166 | /* | | 1166 | /* |
1167 | * Handle transmit completion interrupts. | | 1167 | * Handle transmit completion interrupts. |
1168 | */ | | 1168 | */ |
1169 | void | | 1169 | void |
1170 | fxp_txintr(struct fxp_softc *sc) | | 1170 | fxp_txintr(struct fxp_softc *sc) |
1171 | { | | 1171 | { |
1172 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1172 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1173 | struct fxp_txdesc *txd; | | 1173 | struct fxp_txdesc *txd; |
1174 | struct fxp_txsoft *txs; | | 1174 | struct fxp_txsoft *txs; |
1175 | int i; | | 1175 | int i; |
1176 | uint16_t txstat; | | 1176 | uint16_t txstat; |
1177 | | | 1177 | |
1178 | ifp->if_flags &= ~IFF_OACTIVE; | | 1178 | ifp->if_flags &= ~IFF_OACTIVE; |
1179 | for (i = sc->sc_txdirty; sc->sc_txpending != 0; | | 1179 | for (i = sc->sc_txdirty; sc->sc_txpending != 0; |
1180 | i = FXP_NEXTTX(i), sc->sc_txpending--) { | | 1180 | i = FXP_NEXTTX(i), sc->sc_txpending--) { |
1181 | txd = FXP_CDTX(sc, i); | | 1181 | txd = FXP_CDTX(sc, i); |
1182 | txs = FXP_DSTX(sc, i); | | 1182 | txs = FXP_DSTX(sc, i); |
1183 | | | 1183 | |
1184 | FXP_CDTXSYNC(sc, i, | | 1184 | FXP_CDTXSYNC(sc, i, |
1185 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 1185 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1186 | | | 1186 | |
1187 | /* skip dummy NOP TX descriptor */ | | 1187 | /* skip dummy NOP TX descriptor */ |
1188 | if ((le16toh(txd->txd_txcb.cb_command) & FXP_CB_COMMAND_CMD) | | 1188 | if ((le16toh(txd->txd_txcb.cb_command) & FXP_CB_COMMAND_CMD) |
1189 | == FXP_CB_COMMAND_NOP) | | 1189 | == FXP_CB_COMMAND_NOP) |
1190 | continue; | | 1190 | continue; |
1191 | | | 1191 | |
1192 | txstat = le16toh(txd->txd_txcb.cb_status); | | 1192 | txstat = le16toh(txd->txd_txcb.cb_status); |
1193 | | | 1193 | |
1194 | if ((txstat & FXP_CB_STATUS_C) == 0) | | 1194 | if ((txstat & FXP_CB_STATUS_C) == 0) |
1195 | break; | | 1195 | break; |
1196 | | | 1196 | |
1197 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, | | 1197 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, |
1198 | 0, txs->txs_dmamap->dm_mapsize, | | 1198 | 0, txs->txs_dmamap->dm_mapsize, |
1199 | BUS_DMASYNC_POSTWRITE); | | 1199 | BUS_DMASYNC_POSTWRITE); |
1200 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 1200 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
1201 | m_freem(txs->txs_mbuf); | | 1201 | m_freem(txs->txs_mbuf); |
1202 | txs->txs_mbuf = NULL; | | 1202 | txs->txs_mbuf = NULL; |
1203 | } | | 1203 | } |
1204 | | | 1204 | |
1205 | /* Update the dirty transmit buffer pointer. */ | | 1205 | /* Update the dirty transmit buffer pointer. */ |
1206 | sc->sc_txdirty = i; | | 1206 | sc->sc_txdirty = i; |
1207 | | | 1207 | |
1208 | /* | | 1208 | /* |
1209 | * Cancel the watchdog timer if there are no pending | | 1209 | * Cancel the watchdog timer if there are no pending |
1210 | * transmissions. | | 1210 | * transmissions. |
1211 | */ | | 1211 | */ |
1212 | if (sc->sc_txpending == 0) | | 1212 | if (sc->sc_txpending == 0) |
1213 | ifp->if_timer = 0; | | 1213 | ifp->if_timer = 0; |
1214 | } | | 1214 | } |
1215 | | | 1215 | |
1216 | /* | | 1216 | /* |
1217 | * fxp_rx_hwcksum: check status of H/W offloading for received packets. | | 1217 | * fxp_rx_hwcksum: check status of H/W offloading for received packets. |
1218 | */ | | 1218 | */ |
1219 | | | 1219 | |
1220 | void | | 1220 | void |
1221 | fxp_rx_hwcksum(struct fxp_softc *sc, struct mbuf *m, const struct fxp_rfa *rfa, | | 1221 | fxp_rx_hwcksum(struct fxp_softc *sc, struct mbuf *m, const struct fxp_rfa *rfa, |
1222 | u_int len) | | 1222 | u_int len) |
1223 | { | | 1223 | { |
1224 | uint32_t csum_data; | | 1224 | uint32_t csum_data; |
1225 | int csum_flags; | | 1225 | int csum_flags; |
1226 | | | 1226 | |
1227 | /* | | 1227 | /* |
1228 | * check H/W Checksumming. | | 1228 | * check H/W Checksumming. |
1229 | */ | | 1229 | */ |
1230 | | | 1230 | |
1231 | csum_flags = 0; | | 1231 | csum_flags = 0; |
1232 | csum_data = 0; | | 1232 | csum_data = 0; |
1233 | | | 1233 | |
1234 | if ((sc->sc_flags & FXPF_EXT_RFA) != 0) { | | 1234 | if ((sc->sc_flags & FXPF_EXT_RFA) != 0) { |
1235 | uint8_t rxparsestat; | | 1235 | uint8_t rxparsestat; |
1236 | uint8_t csum_stat; | | 1236 | uint8_t csum_stat; |
1237 | | | 1237 | |
1238 | csum_stat = rfa->cksum_stat; | | 1238 | csum_stat = rfa->cksum_stat; |
1239 | rxparsestat = rfa->rx_parse_stat; | | 1239 | rxparsestat = rfa->rx_parse_stat; |
1240 | if ((rfa->rfa_status & htole16(FXP_RFA_STATUS_PARSE)) == 0) | | 1240 | if ((rfa->rfa_status & htole16(FXP_RFA_STATUS_PARSE)) == 0) |
1241 | goto out; | | 1241 | goto out; |
1242 | | | 1242 | |
1243 | if (csum_stat & FXP_RFDX_CS_IP_CSUM_BIT_VALID) { | | 1243 | if (csum_stat & FXP_RFDX_CS_IP_CSUM_BIT_VALID) { |
1244 | csum_flags = M_CSUM_IPv4; | | 1244 | csum_flags = M_CSUM_IPv4; |
1245 | if ((csum_stat & FXP_RFDX_CS_IP_CSUM_VALID) == 0) | | 1245 | if ((csum_stat & FXP_RFDX_CS_IP_CSUM_VALID) == 0) |
1246 | csum_flags |= M_CSUM_IPv4_BAD; | | 1246 | csum_flags |= M_CSUM_IPv4_BAD; |
1247 | } | | 1247 | } |
1248 | | | 1248 | |
1249 | if (csum_stat & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) { | | 1249 | if (csum_stat & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) { |
1250 | csum_flags |= (M_CSUM_TCPv4|M_CSUM_UDPv4); /* XXX */ | | 1250 | csum_flags |= (M_CSUM_TCPv4|M_CSUM_UDPv4); /* XXX */ |
1251 | if ((csum_stat & FXP_RFDX_CS_TCPUDP_CSUM_VALID) == 0) | | 1251 | if ((csum_stat & FXP_RFDX_CS_TCPUDP_CSUM_VALID) == 0) |
1252 | csum_flags |= M_CSUM_TCP_UDP_BAD; | | 1252 | csum_flags |= M_CSUM_TCP_UDP_BAD; |
1253 | } | | 1253 | } |
1254 | | | 1254 | |
1255 | } else if ((sc->sc_flags & FXPF_82559_RXCSUM) != 0) { | | 1255 | } else if ((sc->sc_flags & FXPF_82559_RXCSUM) != 0) { |
1256 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1256 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1257 | struct ether_header *eh; | | 1257 | struct ether_header *eh; |
1258 | struct ip *ip; | | 1258 | struct ip *ip; |
1259 | struct udphdr *uh; | | 1259 | struct udphdr *uh; |
1260 | u_int hlen, pktlen; | | 1260 | u_int hlen, pktlen; |
1261 | | | 1261 | |
1262 | if (len < ETHER_HDR_LEN + sizeof(struct ip)) | | 1262 | if (len < ETHER_HDR_LEN + sizeof(struct ip)) |
1263 | goto out; | | 1263 | goto out; |
1264 | pktlen = len - ETHER_HDR_LEN; | | 1264 | pktlen = len - ETHER_HDR_LEN; |
1265 | eh = mtod(m, struct ether_header *); | | 1265 | eh = mtod(m, struct ether_header *); |
1266 | if (ntohs(eh->ether_type) != ETHERTYPE_IP) | | 1266 | if (ntohs(eh->ether_type) != ETHERTYPE_IP) |
1267 | goto out; | | 1267 | goto out; |
1268 | ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN); | | 1268 | ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN); |
1269 | if (ip->ip_v != IPVERSION) | | 1269 | if (ip->ip_v != IPVERSION) |
1270 | goto out; | | 1270 | goto out; |
1271 | | | 1271 | |
1272 | hlen = ip->ip_hl << 2; | | 1272 | hlen = ip->ip_hl << 2; |
1273 | if (hlen < sizeof(struct ip)) | | 1273 | if (hlen < sizeof(struct ip)) |
1274 | goto out; | | 1274 | goto out; |
1275 | | | 1275 | |
1276 | /* | | 1276 | /* |
1277 | * Bail if too short, has random trailing garbage, truncated, | | 1277 | * Bail if too short, has random trailing garbage, truncated, |
1278 | * fragment, or has ethernet pad. | | 1278 | * fragment, or has ethernet pad. |
1279 | */ | | 1279 | */ |
1280 | if (ntohs(ip->ip_len) < hlen || | | 1280 | if (ntohs(ip->ip_len) < hlen || |
1281 | ntohs(ip->ip_len) != pktlen || | | 1281 | ntohs(ip->ip_len) != pktlen || |
1282 | (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0) | | 1282 | (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0) |
1283 | goto out; | | 1283 | goto out; |
1284 | | | 1284 | |
1285 | switch (ip->ip_p) { | | 1285 | switch (ip->ip_p) { |
1286 | case IPPROTO_TCP: | | 1286 | case IPPROTO_TCP: |
1287 | if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 || | | 1287 | if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 || |
1288 | pktlen < (hlen + sizeof(struct tcphdr))) | | 1288 | pktlen < (hlen + sizeof(struct tcphdr))) |
1289 | goto out; | | 1289 | goto out; |
1290 | csum_flags = | | 1290 | csum_flags = |
1291 | M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; | | 1291 | M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; |
1292 | break; | | 1292 | break; |
1293 | case IPPROTO_UDP: | | 1293 | case IPPROTO_UDP: |
1294 | if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 || | | 1294 | if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 || |
1295 | pktlen < (hlen + sizeof(struct udphdr))) | | 1295 | pktlen < (hlen + sizeof(struct udphdr))) |
1296 | goto out; | | 1296 | goto out; |
1297 | uh = (struct udphdr *)((uint8_t *)ip + hlen); | | 1297 | uh = (struct udphdr *)((uint8_t *)ip + hlen); |
1298 | if (uh->uh_sum == 0) | | 1298 | if (uh->uh_sum == 0) |
1299 | goto out; /* no checksum */ | | 1299 | goto out; /* no checksum */ |
1300 | csum_flags = | | 1300 | csum_flags = |
1301 | M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; | | 1301 | M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; |
1302 | break; | | 1302 | break; |
1303 | default: | | 1303 | default: |
1304 | goto out; | | 1304 | goto out; |
1305 | } | | 1305 | } |
1306 | | | 1306 | |
1307 | /* Extract computed checksum. */ | | 1307 | /* Extract computed checksum. */ |
1308 | csum_data = be16dec(mtod(m, uint8_t *) + len); | | 1308 | csum_data = be16dec(mtod(m, uint8_t *) + len); |
1309 | | | 1309 | |
1310 | /* | | 1310 | /* |
1311 | * The computed checksum includes IP headers, | | 1311 | * The computed checksum includes IP headers, |
1312 | * so we have to deduct them. | | 1312 | * so we have to deduct them. |
1313 | */ | | 1313 | */ |
1314 | #if 0 | | 1314 | #if 0 |
1315 | /* | | 1315 | /* |
1316 | * But in TCP/UDP layer we can assume the IP header is valid, | | 1316 | * But in TCP/UDP layer we can assume the IP header is valid, |
1317 | * i.e. a sum of the whole IP header should be 0xffff, | | 1317 | * i.e. a sum of the whole IP header should be 0xffff, |
1318 | * so we don't have to bother to deduct it. | | 1318 | * so we don't have to bother to deduct it. |
1319 | */ | | 1319 | */ |
1320 | if (hlen > 0) { | | 1320 | if (hlen > 0) { |
1321 | uint32_t hsum; | | 1321 | uint32_t hsum; |
1322 | const uint16_t *iphdr; | | 1322 | const uint16_t *iphdr; |
1323 | hsum = 0; | | 1323 | hsum = 0; |
1324 | iphdr = (uint16_t *)ip; | | 1324 | iphdr = (uint16_t *)ip; |
1325 | | | 1325 | |
1326 | while (hlen > 1) { | | 1326 | while (hlen > 1) { |
1327 | hsum += ntohs(*iphdr++); | | 1327 | hsum += ntohs(*iphdr++); |
1328 | hlen -= sizeof(uint16_t); | | 1328 | hlen -= sizeof(uint16_t); |
1329 | } | | 1329 | } |
1330 | while (hsum >> 16) | | 1330 | while (hsum >> 16) |
1331 | hsum = (hsum >> 16) + (hsum & 0xffff); | | 1331 | hsum = (hsum >> 16) + (hsum & 0xffff); |
1332 | | | 1332 | |
1333 | csum_data = ~(~csum_data - ~hsum); | | 1333 | csum_data += (uint16_t)~hsum; |
1334 | | | 1334 | |
1335 | while (csum_data >> 16) | | 1335 | while (csum_data >> 16) |
1336 | csum_data = | | 1336 | csum_data = |
1337 | (csum_data >> 16) + (csum_data & 0xffff); | | 1337 | (csum_data >> 16) + (csum_data & 0xffff); |
1338 | } | | 1338 | } |
1339 | #endif | | 1339 | #endif |
1340 | } | | 1340 | } |
1341 | out: | | 1341 | out: |
1342 | m->m_pkthdr.csum_flags = csum_flags; | | 1342 | m->m_pkthdr.csum_flags = csum_flags; |
1343 | m->m_pkthdr.csum_data = csum_data; | | 1343 | m->m_pkthdr.csum_data = csum_data; |
1344 | } | | 1344 | } |
1345 | | | 1345 | |
1346 | /* | | 1346 | /* |
1347 | * Handle receive interrupts. | | 1347 | * Handle receive interrupts. |
1348 | */ | | 1348 | */ |
1349 | int | | 1349 | int |
1350 | fxp_rxintr(struct fxp_softc *sc) | | 1350 | fxp_rxintr(struct fxp_softc *sc) |
1351 | { | | 1351 | { |
1352 | struct ethercom *ec = &sc->sc_ethercom; | | 1352 | struct ethercom *ec = &sc->sc_ethercom; |
1353 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1353 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1354 | struct mbuf *m, *m0; | | 1354 | struct mbuf *m, *m0; |
1355 | bus_dmamap_t rxmap; | | 1355 | bus_dmamap_t rxmap; |
1356 | struct fxp_rfa *rfa; | | 1356 | struct fxp_rfa *rfa; |
1357 | int rnr; | | 1357 | int rnr; |
1358 | uint16_t len, rxstat; | | 1358 | uint16_t len, rxstat; |
1359 | | | 1359 | |
1360 | rnr = 0; | | 1360 | rnr = 0; |
1361 | | | 1361 | |
1362 | for (;;) { | | 1362 | for (;;) { |
1363 | m = sc->sc_rxq.ifq_head; | | 1363 | m = sc->sc_rxq.ifq_head; |
1364 | rfa = FXP_MTORFA(m); | | 1364 | rfa = FXP_MTORFA(m); |
1365 | rxmap = M_GETCTX(m, bus_dmamap_t); | | 1365 | rxmap = M_GETCTX(m, bus_dmamap_t); |
1366 | | | 1366 | |
1367 | FXP_RFASYNC(sc, m, | | 1367 | FXP_RFASYNC(sc, m, |
1368 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 1368 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1369 | | | 1369 | |
1370 | rxstat = le16toh(rfa->rfa_status); | | 1370 | rxstat = le16toh(rfa->rfa_status); |
1371 | | | 1371 | |
1372 | if ((rxstat & FXP_RFA_STATUS_RNR) != 0) | | 1372 | if ((rxstat & FXP_RFA_STATUS_RNR) != 0) |
1373 | rnr = 1; | | 1373 | rnr = 1; |
1374 | | | 1374 | |
1375 | if ((rxstat & FXP_RFA_STATUS_C) == 0) { | | 1375 | if ((rxstat & FXP_RFA_STATUS_C) == 0) { |
1376 | /* | | 1376 | /* |
1377 | * We have processed all of the | | 1377 | * We have processed all of the |
1378 | * receive buffers. | | 1378 | * receive buffers. |
1379 | */ | | 1379 | */ |
1380 | FXP_RFASYNC(sc, m, BUS_DMASYNC_PREREAD); | | 1380 | FXP_RFASYNC(sc, m, BUS_DMASYNC_PREREAD); |
1381 | return rnr; | | 1381 | return rnr; |
1382 | } | | 1382 | } |
1383 | | | 1383 | |
1384 | IF_DEQUEUE(&sc->sc_rxq, m); | | 1384 | IF_DEQUEUE(&sc->sc_rxq, m); |
1385 | | | 1385 | |
1386 | FXP_RXBUFSYNC(sc, m, BUS_DMASYNC_POSTREAD); | | 1386 | FXP_RXBUFSYNC(sc, m, BUS_DMASYNC_POSTREAD); |
1387 | | | 1387 | |
1388 | len = le16toh(rfa->actual_size) & | | 1388 | len = le16toh(rfa->actual_size) & |
1389 | (m->m_ext.ext_size - 1); | | 1389 | (m->m_ext.ext_size - 1); |
1390 | if ((sc->sc_flags & FXPF_82559_RXCSUM) != 0) { | | 1390 | if ((sc->sc_flags & FXPF_82559_RXCSUM) != 0) { |
1391 | /* Adjust for appended checksum bytes. */ | | 1391 | /* Adjust for appended checksum bytes. */ |
1392 | len -= sizeof(uint16_t); | | 1392 | len -= sizeof(uint16_t); |
1393 | } | | 1393 | } |
1394 | | | 1394 | |
1395 | if (len < sizeof(struct ether_header)) { | | 1395 | if (len < sizeof(struct ether_header)) { |
1396 | /* | | 1396 | /* |
1397 | * Runt packet; drop it now. | | 1397 | * Runt packet; drop it now. |
1398 | */ | | 1398 | */ |
1399 | FXP_INIT_RFABUF(sc, m); | | 1399 | FXP_INIT_RFABUF(sc, m); |
1400 | continue; | | 1400 | continue; |
1401 | } | | 1401 | } |
1402 | | | 1402 | |
1403 | /* | | 1403 | /* |
1404 | * If support for 802.1Q VLAN sized frames is | | 1404 | * If support for 802.1Q VLAN sized frames is |
1405 | * enabled, we need to do some additional error | | 1405 | * enabled, we need to do some additional error |
1406 | * checking (as we are saving bad frames, in | | 1406 | * checking (as we are saving bad frames, in |
1407 | * order to receive the larger ones). | | 1407 | * order to receive the larger ones). |
1408 | */ | | 1408 | */ |
1409 | if ((ec->ec_capenable & ETHERCAP_VLAN_MTU) != 0 && | | 1409 | if ((ec->ec_capenable & ETHERCAP_VLAN_MTU) != 0 && |
1410 | (rxstat & (FXP_RFA_STATUS_OVERRUN| | | 1410 | (rxstat & (FXP_RFA_STATUS_OVERRUN| |
1411 | FXP_RFA_STATUS_RNR| | | 1411 | FXP_RFA_STATUS_RNR| |
1412 | FXP_RFA_STATUS_ALIGN| | | 1412 | FXP_RFA_STATUS_ALIGN| |
1413 | FXP_RFA_STATUS_CRC)) != 0) { | | 1413 | FXP_RFA_STATUS_CRC)) != 0) { |
1414 | FXP_INIT_RFABUF(sc, m); | | 1414 | FXP_INIT_RFABUF(sc, m); |
1415 | continue; | | 1415 | continue; |
1416 | } | | 1416 | } |
1417 | | | 1417 | |
1418 | /* | | 1418 | /* |
1419 | * check VLAN tag stripping. | | 1419 | * check VLAN tag stripping. |
1420 | */ | | 1420 | */ |
1421 | if ((sc->sc_flags & FXPF_EXT_RFA) != 0 && | | 1421 | if ((sc->sc_flags & FXPF_EXT_RFA) != 0 && |
1422 | (rfa->rfa_status & htole16(FXP_RFA_STATUS_VLAN)) != 0) { | | 1422 | (rfa->rfa_status & htole16(FXP_RFA_STATUS_VLAN)) != 0) { |
1423 | struct m_tag *vtag; | | 1423 | struct m_tag *vtag; |
1424 | | | 1424 | |
1425 | vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), | | 1425 | vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), |
1426 | M_NOWAIT); | | 1426 | M_NOWAIT); |
1427 | if (vtag == NULL) | | 1427 | if (vtag == NULL) |
1428 | goto dropit; | | 1428 | goto dropit; |
1429 | *(u_int *)(vtag + 1) = be16toh(rfa->vlan_id); | | 1429 | *(u_int *)(vtag + 1) = be16toh(rfa->vlan_id); |
1430 | m_tag_prepend(m, vtag); | | 1430 | m_tag_prepend(m, vtag); |
1431 | } | | 1431 | } |
1432 | | | 1432 | |
1433 | /* Do checksum checking. */ | | 1433 | /* Do checksum checking. */ |
1434 | if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0) | | 1434 | if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0) |
1435 | fxp_rx_hwcksum(sc, m, rfa, len); | | 1435 | fxp_rx_hwcksum(sc, m, rfa, len); |
1436 | | | 1436 | |
1437 | /* | | 1437 | /* |
1438 | * If the packet is small enough to fit in a | | 1438 | * If the packet is small enough to fit in a |
1439 | * single header mbuf, allocate one and copy | | 1439 | * single header mbuf, allocate one and copy |
1440 | * the data into it. This greatly reduces | | 1440 | * the data into it. This greatly reduces |
1441 | * memory consumption when we receive lots | | 1441 | * memory consumption when we receive lots |
1442 | * of small packets. | | 1442 | * of small packets. |
1443 | * | | 1443 | * |
1444 | * Otherwise, we add a new buffer to the receive | | 1444 | * Otherwise, we add a new buffer to the receive |
1445 | * chain. If this fails, we drop the packet and | | 1445 | * chain. If this fails, we drop the packet and |
1446 | * recycle the old buffer. | | 1446 | * recycle the old buffer. |
1447 | */ | | 1447 | */ |
1448 | if (fxp_copy_small != 0 && len <= MHLEN) { | | 1448 | if (fxp_copy_small != 0 && len <= MHLEN) { |
1449 | MGETHDR(m0, M_DONTWAIT, MT_DATA); | | 1449 | MGETHDR(m0, M_DONTWAIT, MT_DATA); |
1450 | if (m0 == NULL) | | 1450 | if (m0 == NULL) |
1451 | goto dropit; | | 1451 | goto dropit; |
1452 | MCLAIM(m0, &sc->sc_ethercom.ec_rx_mowner); | | 1452 | MCLAIM(m0, &sc->sc_ethercom.ec_rx_mowner); |
1453 | memcpy(mtod(m0, void *), | | 1453 | memcpy(mtod(m0, void *), |
1454 | mtod(m, void *), len); | | 1454 | mtod(m, void *), len); |
1455 | m0->m_pkthdr.csum_flags = m->m_pkthdr.csum_flags; | | 1455 | m0->m_pkthdr.csum_flags = m->m_pkthdr.csum_flags; |
1456 | m0->m_pkthdr.csum_data = m->m_pkthdr.csum_data; | | 1456 | m0->m_pkthdr.csum_data = m->m_pkthdr.csum_data; |
1457 | FXP_INIT_RFABUF(sc, m); | | 1457 | FXP_INIT_RFABUF(sc, m); |
1458 | m = m0; | | 1458 | m = m0; |
1459 | } else { | | 1459 | } else { |
1460 | if (fxp_add_rfabuf(sc, rxmap, 1) != 0) { | | 1460 | if (fxp_add_rfabuf(sc, rxmap, 1) != 0) { |
1461 | dropit: | | 1461 | dropit: |
1462 | ifp->if_ierrors++; | | 1462 | ifp->if_ierrors++; |
1463 | FXP_INIT_RFABUF(sc, m); | | 1463 | FXP_INIT_RFABUF(sc, m); |
1464 | continue; | | 1464 | continue; |
1465 | } | | 1465 | } |
1466 | } | | 1466 | } |
1467 | | | 1467 | |
1468 | m->m_pkthdr.rcvif = ifp; | | 1468 | m->m_pkthdr.rcvif = ifp; |
1469 | m->m_pkthdr.len = m->m_len = len; | | 1469 | m->m_pkthdr.len = m->m_len = len; |
1470 | | | 1470 | |
1471 | #if NBPFILTER > 0 | | 1471 | #if NBPFILTER > 0 |
1472 | /* | | 1472 | /* |
1473 | * Pass this up to any BPF listeners, but only | | 1473 | * Pass this up to any BPF listeners, but only |
1474 | * pass it up the stack if it's for us. | | 1474 | * pass it up the stack if it's for us. |
1475 | */ | | 1475 | */ |
1476 | if (ifp->if_bpf) | | 1476 | if (ifp->if_bpf) |
1477 | bpf_mtap(ifp->if_bpf, m); | | 1477 | bpf_mtap(ifp->if_bpf, m); |
1478 | #endif | | 1478 | #endif |
1479 | | | 1479 | |
1480 | /* Pass it on. */ | | 1480 | /* Pass it on. */ |
1481 | (*ifp->if_input)(ifp, m); | | 1481 | (*ifp->if_input)(ifp, m); |
1482 | } | | 1482 | } |
1483 | } | | 1483 | } |
1484 | | | 1484 | |
1485 | /* | | 1485 | /* |
1486 | * Update packet in/out/collision statistics. The i82557 doesn't | | 1486 | * Update packet in/out/collision statistics. The i82557 doesn't |
1487 | * allow you to access these counters without doing a fairly | | 1487 | * allow you to access these counters without doing a fairly |
1488 | * expensive DMA to get _all_ of the statistics it maintains, so | | 1488 | * expensive DMA to get _all_ of the statistics it maintains, so |
1489 | * we do this operation here only once per second. The statistics | | 1489 | * we do this operation here only once per second. The statistics |
1490 | * counters in the kernel are updated from the previous dump-stats | | 1490 | * counters in the kernel are updated from the previous dump-stats |
1491 | * DMA and then a new dump-stats DMA is started. The on-chip | | 1491 | * DMA and then a new dump-stats DMA is started. The on-chip |
1492 | * counters are zeroed when the DMA completes. If we can't start | | 1492 | * counters are zeroed when the DMA completes. If we can't start |
1493 | * the DMA immediately, we don't wait - we just prepare to read | | 1493 | * the DMA immediately, we don't wait - we just prepare to read |
1494 | * them again next time. | | 1494 | * them again next time. |
1495 | */ | | 1495 | */ |
1496 | void | | 1496 | void |
1497 | fxp_tick(void *arg) | | 1497 | fxp_tick(void *arg) |
1498 | { | | 1498 | { |
1499 | struct fxp_softc *sc = arg; | | 1499 | struct fxp_softc *sc = arg; |
1500 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1500 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1501 | struct fxp_stats *sp = &sc->sc_control_data->fcd_stats; | | 1501 | struct fxp_stats *sp = &sc->sc_control_data->fcd_stats; |
1502 | int s; | | 1502 | int s; |
1503 | | | 1503 | |
1504 | if (!device_is_active(sc->sc_dev)) | | 1504 | if (!device_is_active(sc->sc_dev)) |
1505 | return; | | 1505 | return; |
1506 | | | 1506 | |
1507 | s = splnet(); | | 1507 | s = splnet(); |
1508 | | | 1508 | |
1509 | FXP_CDSTATSSYNC(sc, BUS_DMASYNC_POSTREAD); | | 1509 | FXP_CDSTATSSYNC(sc, BUS_DMASYNC_POSTREAD); |
1510 | | | 1510 | |
1511 | ifp->if_opackets += le32toh(sp->tx_good); | | 1511 | ifp->if_opackets += le32toh(sp->tx_good); |
1512 | ifp->if_collisions += le32toh(sp->tx_total_collisions); | | 1512 | ifp->if_collisions += le32toh(sp->tx_total_collisions); |
1513 | if (sp->rx_good) { | | 1513 | if (sp->rx_good) { |
1514 | ifp->if_ipackets += le32toh(sp->rx_good); | | 1514 | ifp->if_ipackets += le32toh(sp->rx_good); |
1515 | sc->sc_rxidle = 0; | | 1515 | sc->sc_rxidle = 0; |
1516 | } else if (sc->sc_flags & FXPF_RECV_WORKAROUND) { | | 1516 | } else if (sc->sc_flags & FXPF_RECV_WORKAROUND) { |
1517 | sc->sc_rxidle++; | | 1517 | sc->sc_rxidle++; |
1518 | } | | 1518 | } |
1519 | ifp->if_ierrors += | | 1519 | ifp->if_ierrors += |
1520 | le32toh(sp->rx_crc_errors) + | | 1520 | le32toh(sp->rx_crc_errors) + |
1521 | le32toh(sp->rx_alignment_errors) + | | 1521 | le32toh(sp->rx_alignment_errors) + |
1522 | le32toh(sp->rx_rnr_errors) + | | 1522 | le32toh(sp->rx_rnr_errors) + |
1523 | le32toh(sp->rx_overrun_errors); | | 1523 | le32toh(sp->rx_overrun_errors); |
1524 | /* | | 1524 | /* |
1525 | * If any transmit underruns occurred, bump up the transmit | | 1525 | * If any transmit underruns occurred, bump up the transmit |
1526 | * threshold by another 512 bytes (64 * 8). | | 1526 | * threshold by another 512 bytes (64 * 8). |
1527 | */ | | 1527 | */ |
1528 | if (sp->tx_underruns) { | | 1528 | if (sp->tx_underruns) { |
1529 | ifp->if_oerrors += le32toh(sp->tx_underruns); | | 1529 | ifp->if_oerrors += le32toh(sp->tx_underruns); |
1530 | if (tx_threshold < 192) | | 1530 | if (tx_threshold < 192) |
1531 | tx_threshold += 64; | | 1531 | tx_threshold += 64; |
1532 | } | | 1532 | } |
1533 | #ifdef FXP_EVENT_COUNTERS | | 1533 | #ifdef FXP_EVENT_COUNTERS |
1534 | if (sc->sc_flags & FXPF_FC) { | | 1534 | if (sc->sc_flags & FXPF_FC) { |
1535 | sc->sc_ev_txpause.ev_count += sp->tx_pauseframes; | | 1535 | sc->sc_ev_txpause.ev_count += sp->tx_pauseframes; |
1536 | sc->sc_ev_rxpause.ev_count += sp->rx_pauseframes; | | 1536 | sc->sc_ev_rxpause.ev_count += sp->rx_pauseframes; |
1537 | } | | 1537 | } |
1538 | #endif | | 1538 | #endif |
1539 | | | 1539 | |
1540 | /* | | 1540 | /* |
1541 | * If we haven't received any packets in FXP_MAX_RX_IDLE seconds, | | 1541 | * If we haven't received any packets in FXP_MAX_RX_IDLE seconds, |
1542 | * then assume the receiver has locked up and attempt to clear | | 1542 | * then assume the receiver has locked up and attempt to clear |
1543 | * the condition by reprogramming the multicast filter (actually, | | 1543 | * the condition by reprogramming the multicast filter (actually, |
1544 | * resetting the interface). This is a work-around for a bug in | | 1544 | * resetting the interface). This is a work-around for a bug in |
1545 | * the 82557 where the receiver locks up if it gets certain types | | 1545 | * the 82557 where the receiver locks up if it gets certain types |
1546 | * of garbage in the synchronization bits prior to the packet header. | | 1546 | * of garbage in the synchronization bits prior to the packet header. |
1547 | * This bug is supposed to only occur in 10Mbps mode, but has been | | 1547 | * This bug is supposed to only occur in 10Mbps mode, but has been |
1548 | * seen to occur in 100Mbps mode as well (perhaps due to a 10/100 | | 1548 | * seen to occur in 100Mbps mode as well (perhaps due to a 10/100 |
1549 | * speed transition). | | 1549 | * speed transition). |
1550 | */ | | 1550 | */ |
1551 | if (sc->sc_rxidle > FXP_MAX_RX_IDLE) { | | 1551 | if (sc->sc_rxidle > FXP_MAX_RX_IDLE) { |
1552 | (void) fxp_init(ifp); | | 1552 | (void) fxp_init(ifp); |
1553 | splx(s); | | 1553 | splx(s); |
1554 | return; | | 1554 | return; |
1555 | } | | 1555 | } |
1556 | /* | | 1556 | /* |
1557 | * If there is no pending command, start another stats | | 1557 | * If there is no pending command, start another stats |
1558 | * dump. Otherwise punt for now. | | 1558 | * dump. Otherwise punt for now. |
1559 | */ | | 1559 | */ |
1560 | if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { | | 1560 | if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { |
1561 | /* | | 1561 | /* |
1562 | * Start another stats dump. | | 1562 | * Start another stats dump. |
1563 | */ | | 1563 | */ |
1564 | FXP_CDSTATSSYNC(sc, BUS_DMASYNC_PREREAD); | | 1564 | FXP_CDSTATSSYNC(sc, BUS_DMASYNC_PREREAD); |
1565 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); | | 1565 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); |
1566 | } else { | | 1566 | } else { |
1567 | /* | | 1567 | /* |
1568 | * A previous command is still waiting to be accepted. | | 1568 | * A previous command is still waiting to be accepted. |
1569 | * Just zero our copy of the stats and wait for the | | 1569 | * Just zero our copy of the stats and wait for the |
1570 | * next timer event to update them. | | 1570 | * next timer event to update them. |
1571 | */ | | 1571 | */ |
1572 | /* BIG_ENDIAN: no swap required to store 0 */ | | 1572 | /* BIG_ENDIAN: no swap required to store 0 */ |
1573 | sp->tx_good = 0; | | 1573 | sp->tx_good = 0; |
1574 | sp->tx_underruns = 0; | | 1574 | sp->tx_underruns = 0; |
1575 | sp->tx_total_collisions = 0; | | 1575 | sp->tx_total_collisions = 0; |
1576 | | | 1576 | |
1577 | sp->rx_good = 0; | | 1577 | sp->rx_good = 0; |
1578 | sp->rx_crc_errors = 0; | | 1578 | sp->rx_crc_errors = 0; |
1579 | sp->rx_alignment_errors = 0; | | 1579 | sp->rx_alignment_errors = 0; |
1580 | sp->rx_rnr_errors = 0; | | 1580 | sp->rx_rnr_errors = 0; |
1581 | sp->rx_overrun_errors = 0; | | 1581 | sp->rx_overrun_errors = 0; |
1582 | if (sc->sc_flags & FXPF_FC) { | | 1582 | if (sc->sc_flags & FXPF_FC) { |
1583 | sp->tx_pauseframes = 0; | | 1583 | sp->tx_pauseframes = 0; |
1584 | sp->rx_pauseframes = 0; | | 1584 | sp->rx_pauseframes = 0; |
1585 | } | | 1585 | } |
1586 | } | | 1586 | } |
1587 | | | 1587 | |
1588 | if (sc->sc_flags & FXPF_MII) { | | 1588 | if (sc->sc_flags & FXPF_MII) { |
1589 | /* Tick the MII clock. */ | | 1589 | /* Tick the MII clock. */ |
1590 | mii_tick(&sc->sc_mii); | | 1590 | mii_tick(&sc->sc_mii); |
1591 | } | | 1591 | } |
1592 | | | 1592 | |
1593 | splx(s); | | 1593 | splx(s); |
1594 | | | 1594 | |
1595 | /* | | 1595 | /* |
1596 | * Schedule another timeout one second from now. | | 1596 | * Schedule another timeout one second from now. |
1597 | */ | | 1597 | */ |
1598 | callout_reset(&sc->sc_callout, hz, fxp_tick, sc); | | 1598 | callout_reset(&sc->sc_callout, hz, fxp_tick, sc); |
1599 | } | | 1599 | } |
1600 | | | 1600 | |
1601 | /* | | 1601 | /* |
1602 | * Drain the receive queue. | | 1602 | * Drain the receive queue. |
1603 | */ | | 1603 | */ |
1604 | void | | 1604 | void |
1605 | fxp_rxdrain(struct fxp_softc *sc) | | 1605 | fxp_rxdrain(struct fxp_softc *sc) |
1606 | { | | 1606 | { |
1607 | bus_dmamap_t rxmap; | | 1607 | bus_dmamap_t rxmap; |
1608 | struct mbuf *m; | | 1608 | struct mbuf *m; |
1609 | | | 1609 | |
1610 | for (;;) { | | 1610 | for (;;) { |
1611 | IF_DEQUEUE(&sc->sc_rxq, m); | | 1611 | IF_DEQUEUE(&sc->sc_rxq, m); |
1612 | if (m == NULL) | | 1612 | if (m == NULL) |
1613 | break; | | 1613 | break; |
1614 | rxmap = M_GETCTX(m, bus_dmamap_t); | | 1614 | rxmap = M_GETCTX(m, bus_dmamap_t); |
1615 | bus_dmamap_unload(sc->sc_dmat, rxmap); | | 1615 | bus_dmamap_unload(sc->sc_dmat, rxmap); |
1616 | FXP_RXMAP_PUT(sc, rxmap); | | 1616 | FXP_RXMAP_PUT(sc, rxmap); |
1617 | m_freem(m); | | 1617 | m_freem(m); |
1618 | } | | 1618 | } |
1619 | } | | 1619 | } |
1620 | | | 1620 | |
1621 | /* | | 1621 | /* |
1622 | * Stop the interface. Cancels the statistics updater and resets | | 1622 | * Stop the interface. Cancels the statistics updater and resets |
1623 | * the interface. | | 1623 | * the interface. |
1624 | */ | | 1624 | */ |
1625 | void | | 1625 | void |
1626 | fxp_stop(struct ifnet *ifp, int disable) | | 1626 | fxp_stop(struct ifnet *ifp, int disable) |
1627 | { | | 1627 | { |
1628 | struct fxp_softc *sc = ifp->if_softc; | | 1628 | struct fxp_softc *sc = ifp->if_softc; |
1629 | struct fxp_txsoft *txs; | | 1629 | struct fxp_txsoft *txs; |
1630 | int i; | | 1630 | int i; |
1631 | | | 1631 | |
1632 | /* | | 1632 | /* |
1633 | * Turn down interface (done early to avoid bad interactions | | 1633 | * Turn down interface (done early to avoid bad interactions |
1634 | * between panics, shutdown hooks, and the watchdog timer) | | 1634 | * between panics, shutdown hooks, and the watchdog timer) |
1635 | */ | | 1635 | */ |
1636 | ifp->if_timer = 0; | | 1636 | ifp->if_timer = 0; |
1637 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 1637 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1638 | | | 1638 | |
1639 | /* | | 1639 | /* |
1640 | * Cancel stats updater. | | 1640 | * Cancel stats updater. |
1641 | */ | | 1641 | */ |
1642 | callout_stop(&sc->sc_callout); | | 1642 | callout_stop(&sc->sc_callout); |
1643 | if (sc->sc_flags & FXPF_MII) { | | 1643 | if (sc->sc_flags & FXPF_MII) { |
1644 | /* Down the MII. */ | | 1644 | /* Down the MII. */ |
1645 | mii_down(&sc->sc_mii); | | 1645 | mii_down(&sc->sc_mii); |
1646 | } | | 1646 | } |
1647 | | | 1647 | |
1648 | /* | | 1648 | /* |
1649 | * Issue software reset. This unloads any microcode that | | 1649 | * Issue software reset. This unloads any microcode that |
1650 | * might already be loaded. | | 1650 | * might already be loaded. |
1651 | */ | | 1651 | */ |
1652 | sc->sc_flags &= ~FXPF_UCODE_LOADED; | | 1652 | sc->sc_flags &= ~FXPF_UCODE_LOADED; |
1653 | CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); | | 1653 | CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); |
1654 | DELAY(50); | | 1654 | DELAY(50); |
1655 | | | 1655 | |
1656 | /* | | 1656 | /* |
1657 | * Release any xmit buffers. | | 1657 | * Release any xmit buffers. |
1658 | */ | | 1658 | */ |
1659 | for (i = 0; i < FXP_NTXCB; i++) { | | 1659 | for (i = 0; i < FXP_NTXCB; i++) { |
1660 | txs = FXP_DSTX(sc, i); | | 1660 | txs = FXP_DSTX(sc, i); |
1661 | if (txs->txs_mbuf != NULL) { | | 1661 | if (txs->txs_mbuf != NULL) { |
1662 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 1662 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
1663 | m_freem(txs->txs_mbuf); | | 1663 | m_freem(txs->txs_mbuf); |
1664 | txs->txs_mbuf = NULL; | | 1664 | txs->txs_mbuf = NULL; |
1665 | } | | 1665 | } |
1666 | } | | 1666 | } |
1667 | sc->sc_txpending = 0; | | 1667 | sc->sc_txpending = 0; |
1668 | | | 1668 | |
1669 | if (disable) { | | 1669 | if (disable) { |
1670 | fxp_rxdrain(sc); | | 1670 | fxp_rxdrain(sc); |
1671 | fxp_disable(sc); | | 1671 | fxp_disable(sc); |
1672 | } | | 1672 | } |
1673 | | | 1673 | |
1674 | } | | 1674 | } |
1675 | | | 1675 | |
1676 | /* | | 1676 | /* |
1677 | * Watchdog/transmission transmit timeout handler. Called when a | | 1677 | * Watchdog/transmission transmit timeout handler. Called when a |
1678 | * transmission is started on the interface, but no interrupt is | | 1678 | * transmission is started on the interface, but no interrupt is |
1679 | * received before the timeout. This usually indicates that the | | 1679 | * received before the timeout. This usually indicates that the |
1680 | * card has wedged for some reason. | | 1680 | * card has wedged for some reason. |
1681 | */ | | 1681 | */ |
1682 | void | | 1682 | void |
1683 | fxp_watchdog(struct ifnet *ifp) | | 1683 | fxp_watchdog(struct ifnet *ifp) |
1684 | { | | 1684 | { |
1685 | struct fxp_softc *sc = ifp->if_softc; | | 1685 | struct fxp_softc *sc = ifp->if_softc; |
1686 | | | 1686 | |
1687 | log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); | | 1687 | log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); |
1688 | ifp->if_oerrors++; | | 1688 | ifp->if_oerrors++; |
1689 | | | 1689 | |
1690 | (void) fxp_init(ifp); | | 1690 | (void) fxp_init(ifp); |
1691 | } | | 1691 | } |
1692 | | | 1692 | |
1693 | /* | | 1693 | /* |
1694 | * Initialize the interface. Must be called at splnet(). | | 1694 | * Initialize the interface. Must be called at splnet(). |
1695 | */ | | 1695 | */ |
1696 | int | | 1696 | int |
1697 | fxp_init(struct ifnet *ifp) | | 1697 | fxp_init(struct ifnet *ifp) |
1698 | { | | 1698 | { |
1699 | struct fxp_softc *sc = ifp->if_softc; | | 1699 | struct fxp_softc *sc = ifp->if_softc; |
1700 | struct fxp_cb_config *cbp; | | 1700 | struct fxp_cb_config *cbp; |
1701 | struct fxp_cb_ias *cb_ias; | | 1701 | struct fxp_cb_ias *cb_ias; |
1702 | struct fxp_txdesc *txd; | | 1702 | struct fxp_txdesc *txd; |
1703 | bus_dmamap_t rxmap; | | 1703 | bus_dmamap_t rxmap; |
1704 | int i, prm, save_bf, lrxen, vlan_drop, allm, error = 0; | | 1704 | int i, prm, save_bf, lrxen, vlan_drop, allm, error = 0; |
1705 | uint16_t status; | | 1705 | uint16_t status; |
1706 | | | 1706 | |
1707 | if ((error = fxp_enable(sc)) != 0) | | 1707 | if ((error = fxp_enable(sc)) != 0) |
1708 | goto out; | | 1708 | goto out; |
1709 | | | 1709 | |
1710 | /* | | 1710 | /* |
1711 | * Cancel any pending I/O | | 1711 | * Cancel any pending I/O |
1712 | */ | | 1712 | */ |
1713 | fxp_stop(ifp, 0); | | 1713 | fxp_stop(ifp, 0); |
1714 | | | 1714 | |
1715 | /* | | 1715 | /* |
1716 | * XXX just setting sc_flags to 0 here clears any FXPF_MII | | 1716 | * XXX just setting sc_flags to 0 here clears any FXPF_MII |
1717 | * flag, and this prevents the MII from detaching resulting in | | 1717 | * flag, and this prevents the MII from detaching resulting in |
1718 | * a panic. The flags field should perhaps be split in runtime | | 1718 | * a panic. The flags field should perhaps be split in runtime |
1719 | * flags and more static information. For now, just clear the | | 1719 | * flags and more static information. For now, just clear the |
1720 | * only other flag set. | | 1720 | * only other flag set. |
1721 | */ | | 1721 | */ |
1722 | | | 1722 | |
1723 | sc->sc_flags &= ~FXPF_WANTINIT; | | 1723 | sc->sc_flags &= ~FXPF_WANTINIT; |
1724 | | | 1724 | |
1725 | /* | | 1725 | /* |
1726 | * Initialize base of CBL and RFA memory. Loading with zero | | 1726 | * Initialize base of CBL and RFA memory. Loading with zero |
1727 | * sets it up for regular linear addressing. | | 1727 | * sets it up for regular linear addressing. |
1728 | */ | | 1728 | */ |
1729 | fxp_scb_wait(sc); | | 1729 | fxp_scb_wait(sc); |
1730 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); | | 1730 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); |
1731 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); | | 1731 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); |
1732 | | | 1732 | |
1733 | fxp_scb_wait(sc); | | 1733 | fxp_scb_wait(sc); |
1734 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); | | 1734 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); |
1735 | | | 1735 | |
1736 | /* | | 1736 | /* |
1737 | * Initialize the multicast filter. Do this now, since we might | | 1737 | * Initialize the multicast filter. Do this now, since we might |
1738 | * have to setup the config block differently. | | 1738 | * have to setup the config block differently. |
1739 | */ | | 1739 | */ |
1740 | fxp_mc_setup(sc); | | 1740 | fxp_mc_setup(sc); |
1741 | | | 1741 | |
1742 | prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; | | 1742 | prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; |
1743 | allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; | | 1743 | allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; |
1744 | | | 1744 | |
1745 | /* | | 1745 | /* |
1746 | * In order to support receiving 802.1Q VLAN frames, we have to | | 1746 | * In order to support receiving 802.1Q VLAN frames, we have to |
1747 | * enable "save bad frames", since they are 4 bytes larger than | | 1747 | * enable "save bad frames", since they are 4 bytes larger than |
1748 | * the normal Ethernet maximum frame length. On i82558 and later, | | 1748 | * the normal Ethernet maximum frame length. On i82558 and later, |
1749 | * we have a better mechanism for this. | | 1749 | * we have a better mechanism for this. |
1750 | */ | | 1750 | */ |
1751 | save_bf = 0; | | 1751 | save_bf = 0; |
1752 | lrxen = 0; | | 1752 | lrxen = 0; |
1753 | vlan_drop = 0; | | 1753 | vlan_drop = 0; |
1754 | if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) { | | 1754 | if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) { |
1755 | if (sc->sc_rev < FXP_REV_82558_A4) | | 1755 | if (sc->sc_rev < FXP_REV_82558_A4) |
1756 | save_bf = 1; | | 1756 | save_bf = 1; |
1757 | else | | 1757 | else |
1758 | lrxen = 1; | | 1758 | lrxen = 1; |
1759 | if (sc->sc_rev >= FXP_REV_82550) | | 1759 | if (sc->sc_rev >= FXP_REV_82550) |
1760 | vlan_drop = 1; | | 1760 | vlan_drop = 1; |
1761 | } | | 1761 | } |
1762 | | | 1762 | |
1763 | /* | | 1763 | /* |
1764 | * Initialize base of dump-stats buffer. | | 1764 | * Initialize base of dump-stats buffer. |
1765 | */ | | 1765 | */ |
1766 | fxp_scb_wait(sc); | | 1766 | fxp_scb_wait(sc); |
1767 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, | | 1767 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, |
1768 | sc->sc_cddma + FXP_CDSTATSOFF); | | 1768 | sc->sc_cddma + FXP_CDSTATSOFF); |
1769 | FXP_CDSTATSSYNC(sc, BUS_DMASYNC_PREREAD); | | 1769 | FXP_CDSTATSSYNC(sc, BUS_DMASYNC_PREREAD); |
1770 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); | | 1770 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); |
1771 | | | 1771 | |
1772 | cbp = &sc->sc_control_data->fcd_configcb; | | 1772 | cbp = &sc->sc_control_data->fcd_configcb; |
1773 | memset(cbp, 0, sizeof(struct fxp_cb_config)); | | 1773 | memset(cbp, 0, sizeof(struct fxp_cb_config)); |
1774 | | | 1774 | |
1775 | /* | | 1775 | /* |
1776 | * Load microcode for this controller. | | 1776 | * Load microcode for this controller. |
1777 | */ | | 1777 | */ |
1778 | fxp_load_ucode(sc); | | 1778 | fxp_load_ucode(sc); |
1779 | | | 1779 | |
1780 | if ((sc->sc_ethercom.ec_if.if_flags & IFF_LINK1)) | | 1780 | if ((sc->sc_ethercom.ec_if.if_flags & IFF_LINK1)) |
1781 | sc->sc_flags |= FXPF_RECV_WORKAROUND; | | 1781 | sc->sc_flags |= FXPF_RECV_WORKAROUND; |
1782 | else | | 1782 | else |
1783 | sc->sc_flags &= ~FXPF_RECV_WORKAROUND; | | 1783 | sc->sc_flags &= ~FXPF_RECV_WORKAROUND; |
1784 | | | 1784 | |
1785 | /* | | 1785 | /* |
1786 | * This copy is kind of disgusting, but there are a bunch of must be | | 1786 | * This copy is kind of disgusting, but there are a bunch of must be |
1787 | * zero and must be one bits in this structure and this is the easiest | | 1787 | * zero and must be one bits in this structure and this is the easiest |
1788 | * way to initialize them all to proper values. | | 1788 | * way to initialize them all to proper values. |
1789 | */ | | 1789 | */ |
1790 | memcpy(cbp, fxp_cb_config_template, sizeof(fxp_cb_config_template)); | | 1790 | memcpy(cbp, fxp_cb_config_template, sizeof(fxp_cb_config_template)); |
1791 | | | 1791 | |
1792 | /* BIG_ENDIAN: no need to swap to store 0 */ | | 1792 | /* BIG_ENDIAN: no need to swap to store 0 */ |
1793 | cbp->cb_status = 0; | | 1793 | cbp->cb_status = 0; |
1794 | cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | | | 1794 | cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | |
1795 | FXP_CB_COMMAND_EL); | | 1795 | FXP_CB_COMMAND_EL); |
1796 | /* BIG_ENDIAN: no need to swap to store 0xffffffff */ | | 1796 | /* BIG_ENDIAN: no need to swap to store 0xffffffff */ |
1797 | cbp->link_addr = 0xffffffff; /* (no) next command */ | | 1797 | cbp->link_addr = 0xffffffff; /* (no) next command */ |
1798 | /* bytes in config block */ | | 1798 | /* bytes in config block */ |
1799 | cbp->byte_count = (sc->sc_flags & FXPF_EXT_RFA) ? | | 1799 | cbp->byte_count = (sc->sc_flags & FXPF_EXT_RFA) ? |
1800 | FXP_EXT_CONFIG_LEN : FXP_CONFIG_LEN; | | 1800 | FXP_EXT_CONFIG_LEN : FXP_CONFIG_LEN; |
1801 | cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ | | 1801 | cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ |
1802 | cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ | | 1802 | cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ |
1803 | cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ | | 1803 | cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ |
1804 | cbp->mwi_enable = (sc->sc_flags & FXPF_MWI) ? 1 : 0; | | 1804 | cbp->mwi_enable = (sc->sc_flags & FXPF_MWI) ? 1 : 0; |
1805 | cbp->type_enable = 0; /* actually reserved */ | | 1805 | cbp->type_enable = 0; /* actually reserved */ |
1806 | cbp->read_align_en = (sc->sc_flags & FXPF_READ_ALIGN) ? 1 : 0; | | 1806 | cbp->read_align_en = (sc->sc_flags & FXPF_READ_ALIGN) ? 1 : 0; |
1807 | cbp->end_wr_on_cl = (sc->sc_flags & FXPF_WRITE_ALIGN) ? 1 : 0; | | 1807 | cbp->end_wr_on_cl = (sc->sc_flags & FXPF_WRITE_ALIGN) ? 1 : 0; |
1808 | cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ | | 1808 | cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ |
1809 | cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ | | 1809 | cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ |
1810 | cbp->dma_mbce = 0; /* (disable) dma max counters */ | | 1810 | cbp->dma_mbce = 0; /* (disable) dma max counters */ |
1811 | cbp->late_scb = 0; /* (don't) defer SCB update */ | | 1811 | cbp->late_scb = 0; /* (don't) defer SCB update */ |
1812 | cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */ | | 1812 | cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */ |
1813 | cbp->ci_int = 1; /* interrupt on CU idle */ | | 1813 | cbp->ci_int = 1; /* interrupt on CU idle */ |
1814 | cbp->ext_txcb_dis = (sc->sc_flags & FXPF_EXT_TXCB) ? 0 : 1; | | 1814 | cbp->ext_txcb_dis = (sc->sc_flags & FXPF_EXT_TXCB) ? 0 : 1; |
1815 | cbp->ext_stats_dis = 1; /* disable extended counters */ | | 1815 | cbp->ext_stats_dis = 1; /* disable extended counters */ |
1816 | cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */ | | 1816 | cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */ |
1817 | cbp->save_bf = save_bf;/* save bad frames */ | | 1817 | cbp->save_bf = save_bf;/* save bad frames */ |
1818 | cbp->disc_short_rx = !prm; /* discard short packets */ | | 1818 | cbp->disc_short_rx = !prm; /* discard short packets */ |
1819 | cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ | | 1819 | cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ |
1820 | cbp->ext_rfa = (sc->sc_flags & FXPF_EXT_RFA) ? 1 : 0; | | 1820 | cbp->ext_rfa = (sc->sc_flags & FXPF_EXT_RFA) ? 1 : 0; |
1821 | cbp->two_frames = 0; /* do not limit FIFO to 2 frames */ | | 1821 | cbp->two_frames = 0; /* do not limit FIFO to 2 frames */ |
1822 | cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */ | | 1822 | cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */ |
1823 | /* interface mode */ | | 1823 | /* interface mode */ |
1824 | cbp->mediatype = (sc->sc_flags & FXPF_MII) ? 1 : 0; | | 1824 | cbp->mediatype = (sc->sc_flags & FXPF_MII) ? 1 : 0; |
1825 | cbp->csma_dis = 0; /* (don't) disable link */ | | 1825 | cbp->csma_dis = 0; /* (don't) disable link */ |
1826 | cbp->tcp_udp_cksum = (sc->sc_flags & FXPF_82559_RXCSUM) ? 1 : 0; | | 1826 | cbp->tcp_udp_cksum = (sc->sc_flags & FXPF_82559_RXCSUM) ? 1 : 0; |
1827 | /* (don't) enable RX checksum */ | | 1827 | /* (don't) enable RX checksum */ |
1828 | cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */ | | 1828 | cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */ |
1829 | cbp->link_wake_en = 0; /* (don't) assert PME# on link change */ | | 1829 | cbp->link_wake_en = 0; /* (don't) assert PME# on link change */ |
1830 | cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */ | | 1830 | cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */ |
1831 | cbp->mc_wake_en = 0; /* (don't) assert PME# on mcmatch */ | | 1831 | cbp->mc_wake_en = 0; /* (don't) assert PME# on mcmatch */ |
1832 | cbp->nsai = 1; /* (don't) disable source addr insert */ | | 1832 | cbp->nsai = 1; /* (don't) disable source addr insert */ |
1833 | cbp->preamble_length = 2; /* (7 byte) preamble */ | | 1833 | cbp->preamble_length = 2; /* (7 byte) preamble */ |
1834 | cbp->loopback = 0; /* (don't) loopback */ | | 1834 | cbp->loopback = 0; /* (don't) loopback */ |
1835 | cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ | | 1835 | cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ |
1836 | cbp->linear_pri_mode = 0; /* (wait after xmit only) */ | | 1836 | cbp->linear_pri_mode = 0; /* (wait after xmit only) */ |
1837 | cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ | | 1837 | cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ |
1838 | cbp->promiscuous = prm; /* promiscuous mode */ | | 1838 | cbp->promiscuous = prm; /* promiscuous mode */ |
1839 | cbp->bcast_disable = 0; /* (don't) disable broadcasts */ | | 1839 | cbp->bcast_disable = 0; /* (don't) disable broadcasts */ |
1840 | cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/ | | 1840 | cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/ |
1841 | cbp->ignore_ul = 0; /* consider U/L bit in IA matching */ | | 1841 | cbp->ignore_ul = 0; /* consider U/L bit in IA matching */ |
1842 | cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */ | | 1842 | cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */ |
1843 | cbp->crscdt = (sc->sc_flags & FXPF_MII) ? 0 : 1; | | 1843 | cbp->crscdt = (sc->sc_flags & FXPF_MII) ? 0 : 1; |
1844 | cbp->stripping = !prm; /* truncate rx packet to byte count */ | | 1844 | cbp->stripping = !prm; /* truncate rx packet to byte count */ |
1845 | cbp->padding = 1; /* (do) pad short tx packets */ | | 1845 | cbp->padding = 1; /* (do) pad short tx packets */ |
1846 | cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ | | 1846 | cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ |
1847 | cbp->long_rx_en = lrxen; /* long packet receive enable */ | | 1847 | cbp->long_rx_en = lrxen; /* long packet receive enable */ |
1848 | cbp->ia_wake_en = 0; /* (don't) wake up on address match */ | | 1848 | cbp->ia_wake_en = 0; /* (don't) wake up on address match */ |
1849 | cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */ | | 1849 | cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */ |
1850 | /* must set wake_en in PMCSR also */ | | 1850 | /* must set wake_en in PMCSR also */ |
1851 | cbp->force_fdx = 0; /* (don't) force full duplex */ | | 1851 | cbp->force_fdx = 0; /* (don't) force full duplex */ |
1852 | cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ | | 1852 | cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ |
1853 | cbp->multi_ia = 0; /* (don't) accept multiple IAs */ | | 1853 | cbp->multi_ia = 0; /* (don't) accept multiple IAs */ |
1854 | cbp->mc_all = allm; /* accept all multicasts */ | | 1854 | cbp->mc_all = allm; /* accept all multicasts */ |
1855 | cbp->ext_rx_mode = (sc->sc_flags & FXPF_EXT_RFA) ? 1 : 0; | | 1855 | cbp->ext_rx_mode = (sc->sc_flags & FXPF_EXT_RFA) ? 1 : 0; |
1856 | cbp->vlan_drop_en = vlan_drop; | | 1856 | cbp->vlan_drop_en = vlan_drop; |
1857 | | | 1857 | |
1858 | if (!(sc->sc_flags & FXPF_FC)) { | | 1858 | if (!(sc->sc_flags & FXPF_FC)) { |
1859 | /* | | 1859 | /* |
1860 | * The i82557 has no hardware flow control, the values | | 1860 | * The i82557 has no hardware flow control, the values |
1861 | * here are the defaults for the chip. | | 1861 | * here are the defaults for the chip. |
1862 | */ | | 1862 | */ |
1863 | cbp->fc_delay_lsb = 0; | | 1863 | cbp->fc_delay_lsb = 0; |
1864 | cbp->fc_delay_msb = 0x40; | | 1864 | cbp->fc_delay_msb = 0x40; |
1865 | cbp->pri_fc_thresh = 3; | | 1865 | cbp->pri_fc_thresh = 3; |
1866 | cbp->tx_fc_dis = 0; | | 1866 | cbp->tx_fc_dis = 0; |
1867 | cbp->rx_fc_restop = 0; | | 1867 | cbp->rx_fc_restop = 0; |
1868 | cbp->rx_fc_restart = 0; | | 1868 | cbp->rx_fc_restart = 0; |
1869 | cbp->fc_filter = 0; | | 1869 | cbp->fc_filter = 0; |
1870 | cbp->pri_fc_loc = 1; | | 1870 | cbp->pri_fc_loc = 1; |
1871 | } else { | | 1871 | } else { |
1872 | cbp->fc_delay_lsb = 0x1f; | | 1872 | cbp->fc_delay_lsb = 0x1f; |
1873 | cbp->fc_delay_msb = 0x01; | | 1873 | cbp->fc_delay_msb = 0x01; |
1874 | cbp->pri_fc_thresh = 3; | | 1874 | cbp->pri_fc_thresh = 3; |
1875 | cbp->tx_fc_dis = 0; /* enable transmit FC */ | | 1875 | cbp->tx_fc_dis = 0; /* enable transmit FC */ |
1876 | cbp->rx_fc_restop = 1; /* enable FC restop frames */ | | 1876 | cbp->rx_fc_restop = 1; /* enable FC restop frames */ |
1877 | cbp->rx_fc_restart = 1; /* enable FC restart frames */ | | 1877 | cbp->rx_fc_restart = 1; /* enable FC restart frames */ |
1878 | cbp->fc_filter = !prm; /* drop FC frames to host */ | | 1878 | cbp->fc_filter = !prm; /* drop FC frames to host */ |
1879 | cbp->pri_fc_loc = 1; /* FC pri location (byte31) */ | | 1879 | cbp->pri_fc_loc = 1; /* FC pri location (byte31) */ |
1880 | cbp->ext_stats_dis = 0; /* enable extended stats */ | | 1880 | cbp->ext_stats_dis = 0; /* enable extended stats */ |
1881 | } | | 1881 | } |
1882 | | | 1882 | |
1883 | FXP_CDCONFIGSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1883 | FXP_CDCONFIGSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1884 | | | 1884 | |
1885 | /* | | 1885 | /* |
1886 | * Start the config command/DMA. | | 1886 | * Start the config command/DMA. |
1887 | */ | | 1887 | */ |
1888 | fxp_scb_wait(sc); | | 1888 | fxp_scb_wait(sc); |
1889 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDCONFIGOFF); | | 1889 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDCONFIGOFF); |
1890 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); | | 1890 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); |
1891 | /* ...and wait for it to complete. */ | | 1891 | /* ...and wait for it to complete. */ |
1892 | for (i = 1000; i > 0; i--) { | | 1892 | for (i = 1000; i > 0; i--) { |
1893 | FXP_CDCONFIGSYNC(sc, | | 1893 | FXP_CDCONFIGSYNC(sc, |
1894 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 1894 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1895 | status = le16toh(cbp->cb_status); | | 1895 | status = le16toh(cbp->cb_status); |
1896 | FXP_CDCONFIGSYNC(sc, BUS_DMASYNC_PREREAD); | | 1896 | FXP_CDCONFIGSYNC(sc, BUS_DMASYNC_PREREAD); |
1897 | if ((status & FXP_CB_STATUS_C) != 0) | | 1897 | if ((status & FXP_CB_STATUS_C) != 0) |
1898 | break; | | 1898 | break; |
1899 | DELAY(1); | | 1899 | DELAY(1); |
1900 | } | | 1900 | } |
1901 | if (i == 0) { | | 1901 | if (i == 0) { |
1902 | log(LOG_WARNING, "%s: line %d: dmasync timeout\n", | | 1902 | log(LOG_WARNING, "%s: line %d: dmasync timeout\n", |
1903 | device_xname(sc->sc_dev), __LINE__); | | 1903 | device_xname(sc->sc_dev), __LINE__); |
1904 | return (ETIMEDOUT); | | 1904 | return (ETIMEDOUT); |
1905 | } | | 1905 | } |
1906 | | | 1906 | |
1907 | /* | | 1907 | /* |
1908 | * Initialize the station address. | | 1908 | * Initialize the station address. |
1909 | */ | | 1909 | */ |
1910 | cb_ias = &sc->sc_control_data->fcd_iascb; | | 1910 | cb_ias = &sc->sc_control_data->fcd_iascb; |
1911 | /* BIG_ENDIAN: no need to swap to store 0 */ | | 1911 | /* BIG_ENDIAN: no need to swap to store 0 */ |
1912 | cb_ias->cb_status = 0; | | 1912 | cb_ias->cb_status = 0; |
1913 | cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); | | 1913 | cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); |
1914 | /* BIG_ENDIAN: no need to swap to store 0xffffffff */ | | 1914 | /* BIG_ENDIAN: no need to swap to store 0xffffffff */ |
1915 | cb_ias->link_addr = 0xffffffff; | | 1915 | cb_ias->link_addr = 0xffffffff; |
1916 | memcpy(cb_ias->macaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); | | 1916 | memcpy(cb_ias->macaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); |
1917 | | | 1917 | |
1918 | FXP_CDIASSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1918 | FXP_CDIASSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1919 | | | 1919 | |
1920 | /* | | 1920 | /* |
1921 | * Start the IAS (Individual Address Setup) command/DMA. | | 1921 | * Start the IAS (Individual Address Setup) command/DMA. |
1922 | */ | | 1922 | */ |
1923 | fxp_scb_wait(sc); | | 1923 | fxp_scb_wait(sc); |
1924 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDIASOFF); | | 1924 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDIASOFF); |
1925 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); | | 1925 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); |
1926 | /* ...and wait for it to complete. */ | | 1926 | /* ...and wait for it to complete. */ |
1927 | for (i = 1000; i > 0; i++) { | | 1927 | for (i = 1000; i > 0; i++) { |
1928 | FXP_CDIASSYNC(sc, | | 1928 | FXP_CDIASSYNC(sc, |
1929 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 1929 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1930 | status = le16toh(cb_ias->cb_status); | | 1930 | status = le16toh(cb_ias->cb_status); |
1931 | FXP_CDIASSYNC(sc, BUS_DMASYNC_PREREAD); | | 1931 | FXP_CDIASSYNC(sc, BUS_DMASYNC_PREREAD); |
1932 | if ((status & FXP_CB_STATUS_C) != 0) | | 1932 | if ((status & FXP_CB_STATUS_C) != 0) |
1933 | break; | | 1933 | break; |
1934 | DELAY(1); | | 1934 | DELAY(1); |
1935 | } | | 1935 | } |
1936 | if (i == 0) { | | 1936 | if (i == 0) { |
1937 | log(LOG_WARNING, "%s: line %d: dmasync timeout\n", | | 1937 | log(LOG_WARNING, "%s: line %d: dmasync timeout\n", |
1938 | device_xname(sc->sc_dev), __LINE__); | | 1938 | device_xname(sc->sc_dev), __LINE__); |
1939 | return (ETIMEDOUT); | | 1939 | return (ETIMEDOUT); |
1940 | } | | 1940 | } |
1941 | | | 1941 | |
1942 | /* | | 1942 | /* |
1943 | * Initialize the transmit descriptor ring. txlast is initialized | | 1943 | * Initialize the transmit descriptor ring. txlast is initialized |
1944 | * to the end of the list so that it will wrap around to the first | | 1944 | * to the end of the list so that it will wrap around to the first |
1945 | * descriptor when the first packet is transmitted. | | 1945 | * descriptor when the first packet is transmitted. |
1946 | */ | | 1946 | */ |
1947 | for (i = 0; i < FXP_NTXCB; i++) { | | 1947 | for (i = 0; i < FXP_NTXCB; i++) { |
1948 | txd = FXP_CDTX(sc, i); | | 1948 | txd = FXP_CDTX(sc, i); |
1949 | memset(txd, 0, sizeof(*txd)); | | 1949 | memset(txd, 0, sizeof(*txd)); |
1950 | txd->txd_txcb.cb_command = | | 1950 | txd->txd_txcb.cb_command = |
1951 | htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); | | 1951 | htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); |
1952 | txd->txd_txcb.link_addr = | | 1952 | txd->txd_txcb.link_addr = |
1953 | htole32(FXP_CDTXADDR(sc, FXP_NEXTTX(i))); | | 1953 | htole32(FXP_CDTXADDR(sc, FXP_NEXTTX(i))); |
1954 | if (sc->sc_flags & FXPF_EXT_TXCB) | | 1954 | if (sc->sc_flags & FXPF_EXT_TXCB) |
1955 | txd->txd_txcb.tbd_array_addr = | | 1955 | txd->txd_txcb.tbd_array_addr = |
1956 | htole32(FXP_CDTBDADDR(sc, i) + | | 1956 | htole32(FXP_CDTBDADDR(sc, i) + |
1957 | (2 * sizeof(struct fxp_tbd))); | | 1957 | (2 * sizeof(struct fxp_tbd))); |
1958 | else | | 1958 | else |
1959 | txd->txd_txcb.tbd_array_addr = | | 1959 | txd->txd_txcb.tbd_array_addr = |
1960 | htole32(FXP_CDTBDADDR(sc, i)); | | 1960 | htole32(FXP_CDTBDADDR(sc, i)); |
1961 | FXP_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1961 | FXP_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1962 | } | | 1962 | } |
1963 | sc->sc_txpending = 0; | | 1963 | sc->sc_txpending = 0; |
1964 | sc->sc_txdirty = 0; | | 1964 | sc->sc_txdirty = 0; |
1965 | sc->sc_txlast = FXP_NTXCB - 1; | | 1965 | sc->sc_txlast = FXP_NTXCB - 1; |
1966 | | | 1966 | |
1967 | /* | | 1967 | /* |
1968 | * Initialize the receive buffer list. | | 1968 | * Initialize the receive buffer list. |
1969 | */ | | 1969 | */ |
1970 | sc->sc_rxq.ifq_maxlen = FXP_NRFABUFS; | | 1970 | sc->sc_rxq.ifq_maxlen = FXP_NRFABUFS; |
1971 | while (sc->sc_rxq.ifq_len < FXP_NRFABUFS) { | | 1971 | while (sc->sc_rxq.ifq_len < FXP_NRFABUFS) { |
1972 | rxmap = FXP_RXMAP_GET(sc); | | 1972 | rxmap = FXP_RXMAP_GET(sc); |
1973 | if ((error = fxp_add_rfabuf(sc, rxmap, 0)) != 0) { | | 1973 | if ((error = fxp_add_rfabuf(sc, rxmap, 0)) != 0) { |
1974 | log(LOG_ERR, "%s: unable to allocate or map rx " | | 1974 | log(LOG_ERR, "%s: unable to allocate or map rx " |
1975 | "buffer %d, error = %d\n", | | 1975 | "buffer %d, error = %d\n", |
1976 | device_xname(sc->sc_dev), | | 1976 | device_xname(sc->sc_dev), |
1977 | sc->sc_rxq.ifq_len, error); | | 1977 | sc->sc_rxq.ifq_len, error); |
1978 | /* | | 1978 | /* |
1979 | * XXX Should attempt to run with fewer receive | | 1979 | * XXX Should attempt to run with fewer receive |
1980 | * XXX buffers instead of just failing. | | 1980 | * XXX buffers instead of just failing. |
1981 | */ | | 1981 | */ |
1982 | FXP_RXMAP_PUT(sc, rxmap); | | 1982 | FXP_RXMAP_PUT(sc, rxmap); |
1983 | fxp_rxdrain(sc); | | 1983 | fxp_rxdrain(sc); |
1984 | goto out; | | 1984 | goto out; |
1985 | } | | 1985 | } |
1986 | } | | 1986 | } |
1987 | sc->sc_rxidle = 0; | | 1987 | sc->sc_rxidle = 0; |
1988 | | | 1988 | |
1989 | /* | | 1989 | /* |
1990 | * Give the transmit ring to the chip. We do this by pointing | | 1990 | * Give the transmit ring to the chip. We do this by pointing |
1991 | * the chip at the last descriptor (which is a NOP|SUSPEND), and | | 1991 | * the chip at the last descriptor (which is a NOP|SUSPEND), and |
1992 | * issuing a start command. It will execute the NOP and then | | 1992 | * issuing a start command. It will execute the NOP and then |
1993 | * suspend, pointing at the first descriptor. | | 1993 | * suspend, pointing at the first descriptor. |
1994 | */ | | 1994 | */ |
1995 | fxp_scb_wait(sc); | | 1995 | fxp_scb_wait(sc); |
1996 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, FXP_CDTXADDR(sc, sc->sc_txlast)); | | 1996 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, FXP_CDTXADDR(sc, sc->sc_txlast)); |
1997 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); | | 1997 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); |
1998 | | | 1998 | |
1999 | /* | | 1999 | /* |
2000 | * Initialize receiver buffer area - RFA. | | 2000 | * Initialize receiver buffer area - RFA. |
2001 | */ | | 2001 | */ |
2002 | #if 0 /* initialization will be done by FXP_SCB_INTRCNTL_REQUEST_SWI later */ | | 2002 | #if 0 /* initialization will be done by FXP_SCB_INTRCNTL_REQUEST_SWI later */ |
2003 | rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t); | | 2003 | rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t); |
2004 | fxp_scb_wait(sc); | | 2004 | fxp_scb_wait(sc); |
2005 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, | | 2005 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, |
2006 | rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE); | | 2006 | rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE); |
2007 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); | | 2007 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); |
2008 | #endif | | 2008 | #endif |
2009 | | | 2009 | |
2010 | if (sc->sc_flags & FXPF_MII) { | | 2010 | if (sc->sc_flags & FXPF_MII) { |
2011 | /* | | 2011 | /* |
2012 | * Set current media. | | 2012 | * Set current media. |
2013 | */ | | 2013 | */ |
2014 | if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) | | 2014 | if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) |
2015 | goto out; | | 2015 | goto out; |
2016 | } | | 2016 | } |
2017 | | | 2017 | |
2018 | /* | | 2018 | /* |
2019 | * ...all done! | | 2019 | * ...all done! |
2020 | */ | | 2020 | */ |
2021 | ifp->if_flags |= IFF_RUNNING; | | 2021 | ifp->if_flags |= IFF_RUNNING; |
2022 | ifp->if_flags &= ~IFF_OACTIVE; | | 2022 | ifp->if_flags &= ~IFF_OACTIVE; |
2023 | | | 2023 | |
2024 | /* | | 2024 | /* |
2025 | * Request a software generated interrupt that will be used to | | 2025 | * Request a software generated interrupt that will be used to |
2026 | * (re)start the RU processing. If we direct the chip to start | | 2026 | * (re)start the RU processing. If we direct the chip to start |
2027 | * receiving from the start of queue now, instead of letting the | | 2027 | * receiving from the start of queue now, instead of letting the |
2028 | * interrupt handler first process all received packets, we run | | 2028 | * interrupt handler first process all received packets, we run |
2029 | * the risk of having it overwrite mbuf clusters while they are | | 2029 | * the risk of having it overwrite mbuf clusters while they are |
2030 | * being processed or after they have been returned to the pool. | | 2030 | * being processed or after they have been returned to the pool. |
2031 | */ | | 2031 | */ |
2032 | CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTRCNTL_REQUEST_SWI); | | 2032 | CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTRCNTL_REQUEST_SWI); |
2033 | | | 2033 | |
2034 | /* | | 2034 | /* |
2035 | * Start the one second timer. | | 2035 | * Start the one second timer. |
2036 | */ | | 2036 | */ |
2037 | callout_reset(&sc->sc_callout, hz, fxp_tick, sc); | | 2037 | callout_reset(&sc->sc_callout, hz, fxp_tick, sc); |
2038 | | | 2038 | |
2039 | /* | | 2039 | /* |
2040 | * Attempt to start output on the interface. | | 2040 | * Attempt to start output on the interface. |
2041 | */ | | 2041 | */ |
2042 | fxp_start(ifp); | | 2042 | fxp_start(ifp); |
2043 | | | 2043 | |
2044 | out: | | 2044 | out: |
2045 | if (error) { | | 2045 | if (error) { |
2046 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 2046 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
2047 | ifp->if_timer = 0; | | 2047 | ifp->if_timer = 0; |
2048 | log(LOG_ERR, "%s: interface not running\n", | | 2048 | log(LOG_ERR, "%s: interface not running\n", |
2049 | device_xname(sc->sc_dev)); | | 2049 | device_xname(sc->sc_dev)); |
2050 | } | | 2050 | } |
2051 | return (error); | | 2051 | return (error); |
2052 | } | | 2052 | } |
2053 | | | 2053 | |
2054 | /* | | 2054 | /* |
2055 | * Notify the world which media we're using. | | 2055 | * Notify the world which media we're using. |
2056 | */ | | 2056 | */ |
2057 | void | | 2057 | void |
2058 | fxp_mii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) | | 2058 | fxp_mii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
2059 | { | | 2059 | { |
2060 | struct fxp_softc *sc = ifp->if_softc; | | 2060 | struct fxp_softc *sc = ifp->if_softc; |
2061 | | | 2061 | |
2062 | if (sc->sc_enabled == 0) { | | 2062 | if (sc->sc_enabled == 0) { |
2063 | ifmr->ifm_active = IFM_ETHER | IFM_NONE; | | 2063 | ifmr->ifm_active = IFM_ETHER | IFM_NONE; |
2064 | ifmr->ifm_status = 0; | | 2064 | ifmr->ifm_status = 0; |
2065 | return; | | 2065 | return; |
2066 | } | | 2066 | } |
2067 | | | 2067 | |
2068 | ether_mediastatus(ifp, ifmr); | | 2068 | ether_mediastatus(ifp, ifmr); |
2069 | } | | 2069 | } |
2070 | | | 2070 | |
2071 | int | | 2071 | int |
2072 | fxp_80c24_mediachange(struct ifnet *ifp) | | 2072 | fxp_80c24_mediachange(struct ifnet *ifp) |
2073 | { | | 2073 | { |
2074 | | | 2074 | |
2075 | /* Nothing to do here. */ | | 2075 | /* Nothing to do here. */ |
2076 | return (0); | | 2076 | return (0); |
2077 | } | | 2077 | } |
2078 | | | 2078 | |
2079 | void | | 2079 | void |
2080 | fxp_80c24_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) | | 2080 | fxp_80c24_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
2081 | { | | 2081 | { |
2082 | struct fxp_softc *sc = ifp->if_softc; | | 2082 | struct fxp_softc *sc = ifp->if_softc; |
2083 | | | 2083 | |
2084 | /* | | 2084 | /* |
2085 | * Media is currently-selected media. We cannot determine | | 2085 | * Media is currently-selected media. We cannot determine |
2086 | * the link status. | | 2086 | * the link status. |
2087 | */ | | 2087 | */ |
2088 | ifmr->ifm_status = 0; | | 2088 | ifmr->ifm_status = 0; |
2089 | ifmr->ifm_active = sc->sc_mii.mii_media.ifm_cur->ifm_media; | | 2089 | ifmr->ifm_active = sc->sc_mii.mii_media.ifm_cur->ifm_media; |
2090 | } | | 2090 | } |
2091 | | | 2091 | |
2092 | /* | | 2092 | /* |
2093 | * Add a buffer to the end of the RFA buffer list. | | 2093 | * Add a buffer to the end of the RFA buffer list. |
2094 | * Return 0 if successful, error code on failure. | | 2094 | * Return 0 if successful, error code on failure. |
2095 | * | | 2095 | * |
2096 | * The RFA struct is stuck at the beginning of mbuf cluster and the | | 2096 | * The RFA struct is stuck at the beginning of mbuf cluster and the |
2097 | * data pointer is fixed up to point just past it. | | 2097 | * data pointer is fixed up to point just past it. |
2098 | */ | | 2098 | */ |
2099 | int | | 2099 | int |
2100 | fxp_add_rfabuf(struct fxp_softc *sc, bus_dmamap_t rxmap, int unload) | | 2100 | fxp_add_rfabuf(struct fxp_softc *sc, bus_dmamap_t rxmap, int unload) |
2101 | { | | 2101 | { |
2102 | struct mbuf *m; | | 2102 | struct mbuf *m; |
2103 | int error; | | 2103 | int error; |
2104 | | | 2104 | |
2105 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 2105 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
2106 | if (m == NULL) | | 2106 | if (m == NULL) |
2107 | return (ENOBUFS); | | 2107 | return (ENOBUFS); |
2108 | | | 2108 | |
2109 | MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); | | 2109 | MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); |
2110 | MCLGET(m, M_DONTWAIT); | | 2110 | MCLGET(m, M_DONTWAIT); |
2111 | if ((m->m_flags & M_EXT) == 0) { | | 2111 | if ((m->m_flags & M_EXT) == 0) { |
2112 | m_freem(m); | | 2112 | m_freem(m); |
2113 | return (ENOBUFS); | | 2113 | return (ENOBUFS); |
2114 | } | | 2114 | } |
2115 | | | 2115 | |
2116 | if (unload) | | 2116 | if (unload) |
2117 | bus_dmamap_unload(sc->sc_dmat, rxmap); | | 2117 | bus_dmamap_unload(sc->sc_dmat, rxmap); |
2118 | | | 2118 | |
2119 | M_SETCTX(m, rxmap); | | 2119 | M_SETCTX(m, rxmap); |
2120 | | | 2120 | |
2121 | m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; | | 2121 | m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; |
2122 | error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, | | 2122 | error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, |
2123 | BUS_DMA_READ|BUS_DMA_NOWAIT); | | 2123 | BUS_DMA_READ|BUS_DMA_NOWAIT); |
2124 | if (error) { | | 2124 | if (error) { |
2125 | /* XXX XXX XXX */ | | 2125 | /* XXX XXX XXX */ |
2126 | aprint_error_dev(sc->sc_dev, | | 2126 | aprint_error_dev(sc->sc_dev, |
2127 | "can't load rx DMA map %d, error = %d\n", | | 2127 | "can't load rx DMA map %d, error = %d\n", |
2128 | sc->sc_rxq.ifq_len, error); | | 2128 | sc->sc_rxq.ifq_len, error); |
2129 | panic("fxp_add_rfabuf"); | | 2129 | panic("fxp_add_rfabuf"); |
2130 | } | | 2130 | } |
2131 | | | 2131 | |
2132 | FXP_INIT_RFABUF(sc, m); | | 2132 | FXP_INIT_RFABUF(sc, m); |
2133 | | | 2133 | |
2134 | return (0); | | 2134 | return (0); |
2135 | } | | 2135 | } |
2136 | | | 2136 | |
2137 | int | | 2137 | int |
2138 | fxp_mdi_read(device_t self, int phy, int reg) | | 2138 | fxp_mdi_read(device_t self, int phy, int reg) |
2139 | { | | 2139 | { |
2140 | struct fxp_softc *sc = device_private(self); | | 2140 | struct fxp_softc *sc = device_private(self); |
2141 | int count = 10000; | | 2141 | int count = 10000; |
2142 | int value; | | 2142 | int value; |
2143 | | | 2143 | |
2144 | CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, | | 2144 | CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, |
2145 | (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); | | 2145 | (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); |
2146 | | | 2146 | |
2147 | while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & | | 2147 | while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & |
2148 | 0x10000000) == 0 && count--) | | 2148 | 0x10000000) == 0 && count--) |
2149 | DELAY(10); | | 2149 | DELAY(10); |
2150 | | | 2150 | |
2151 | if (count <= 0) | | 2151 | if (count <= 0) |
2152 | log(LOG_WARNING, | | 2152 | log(LOG_WARNING, |
2153 | "%s: fxp_mdi_read: timed out\n", device_xname(self)); | | 2153 | "%s: fxp_mdi_read: timed out\n", device_xname(self)); |
2154 | | | 2154 | |
2155 | return (value & 0xffff); | | 2155 | return (value & 0xffff); |
2156 | } | | 2156 | } |
2157 | | | 2157 | |
2158 | void | | 2158 | void |
2159 | fxp_statchg(device_t self) | | 2159 | fxp_statchg(device_t self) |
2160 | { | | 2160 | { |
2161 | | | 2161 | |
2162 | /* Nothing to do. */ | | 2162 | /* Nothing to do. */ |
2163 | } | | 2163 | } |
2164 | | | 2164 | |
2165 | void | | 2165 | void |
2166 | fxp_mdi_write(device_t self, int phy, int reg, int value) | | 2166 | fxp_mdi_write(device_t self, int phy, int reg, int value) |
2167 | { | | 2167 | { |
2168 | struct fxp_softc *sc = device_private(self); | | 2168 | struct fxp_softc *sc = device_private(self); |
2169 | int count = 10000; | | 2169 | int count = 10000; |
2170 | | | 2170 | |
2171 | CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, | | 2171 | CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, |
2172 | (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | | | 2172 | (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | |
2173 | (value & 0xffff)); | | 2173 | (value & 0xffff)); |
2174 | | | 2174 | |
2175 | while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && | | 2175 | while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && |
2176 | count--) | | 2176 | count--) |
2177 | DELAY(10); | | 2177 | DELAY(10); |
2178 | | | 2178 | |
2179 | if (count <= 0) | | 2179 | if (count <= 0) |
2180 | log(LOG_WARNING, | | 2180 | log(LOG_WARNING, |
2181 | "%s: fxp_mdi_write: timed out\n", device_xname(self)); | | 2181 | "%s: fxp_mdi_write: timed out\n", device_xname(self)); |
2182 | } | | 2182 | } |
2183 | | | 2183 | |
2184 | int | | 2184 | int |
2185 | fxp_ioctl(struct ifnet *ifp, u_long cmd, void *data) | | 2185 | fxp_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
2186 | { | | 2186 | { |
2187 | struct fxp_softc *sc = ifp->if_softc; | | 2187 | struct fxp_softc *sc = ifp->if_softc; |
2188 | struct ifreq *ifr = (struct ifreq *)data; | | 2188 | struct ifreq *ifr = (struct ifreq *)data; |
2189 | int s, error; | | 2189 | int s, error; |
2190 | | | 2190 | |
2191 | s = splnet(); | | 2191 | s = splnet(); |
2192 | | | 2192 | |
2193 | switch (cmd) { | | 2193 | switch (cmd) { |
2194 | case SIOCSIFMEDIA: | | 2194 | case SIOCSIFMEDIA: |
2195 | case SIOCGIFMEDIA: | | 2195 | case SIOCGIFMEDIA: |
2196 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); | | 2196 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); |
2197 | break; | | 2197 | break; |
2198 | | | 2198 | |
2199 | default: | | 2199 | default: |
2200 | if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) | | 2200 | if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) |
2201 | break; | | 2201 | break; |
2202 | | | 2202 | |
2203 | error = 0; | | 2203 | error = 0; |
2204 | | | 2204 | |
2205 | if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) | | 2205 | if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
2206 | ; | | 2206 | ; |
2207 | else if (ifp->if_flags & IFF_RUNNING) { | | 2207 | else if (ifp->if_flags & IFF_RUNNING) { |
2208 | /* | | 2208 | /* |
2209 | * Multicast list has changed; set the | | 2209 | * Multicast list has changed; set the |
2210 | * hardware filter accordingly. | | 2210 | * hardware filter accordingly. |
2211 | */ | | 2211 | */ |
2212 | while (sc->sc_txpending) { | | 2212 | while (sc->sc_txpending) { |
2213 | sc->sc_flags |= FXPF_WANTINIT; | | 2213 | sc->sc_flags |= FXPF_WANTINIT; |
2214 | tsleep(sc, PSOCK, "fxp_init", 0); | | 2214 | tsleep(sc, PSOCK, "fxp_init", 0); |
2215 | } | | 2215 | } |
2216 | error = fxp_init(ifp); | | 2216 | error = fxp_init(ifp); |
2217 | } | | 2217 | } |
2218 | break; | | 2218 | break; |
2219 | } | | 2219 | } |
2220 | | | 2220 | |
2221 | /* Try to get more packets going. */ | | 2221 | /* Try to get more packets going. */ |
2222 | if (sc->sc_enabled) | | 2222 | if (sc->sc_enabled) |
2223 | fxp_start(ifp); | | 2223 | fxp_start(ifp); |
2224 | | | 2224 | |
2225 | splx(s); | | 2225 | splx(s); |
2226 | return (error); | | 2226 | return (error); |
2227 | } | | 2227 | } |
2228 | | | 2228 | |
2229 | /* | | 2229 | /* |
2230 | * Program the multicast filter. | | 2230 | * Program the multicast filter. |
2231 | * | | 2231 | * |
2232 | * This function must be called at splnet(). | | 2232 | * This function must be called at splnet(). |
2233 | */ | | 2233 | */ |
2234 | void | | 2234 | void |
2235 | fxp_mc_setup(struct fxp_softc *sc) | | 2235 | fxp_mc_setup(struct fxp_softc *sc) |
2236 | { | | 2236 | { |
2237 | struct fxp_cb_mcs *mcsp = &sc->sc_control_data->fcd_mcscb; | | 2237 | struct fxp_cb_mcs *mcsp = &sc->sc_control_data->fcd_mcscb; |
2238 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 2238 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
2239 | struct ethercom *ec = &sc->sc_ethercom; | | 2239 | struct ethercom *ec = &sc->sc_ethercom; |
2240 | struct ether_multi *enm; | | 2240 | struct ether_multi *enm; |
2241 | struct ether_multistep step; | | 2241 | struct ether_multistep step; |
2242 | int count, nmcasts; | | 2242 | int count, nmcasts; |
2243 | uint16_t status; | | 2243 | uint16_t status; |
2244 | | | 2244 | |
2245 | #ifdef DIAGNOSTIC | | 2245 | #ifdef DIAGNOSTIC |
2246 | if (sc->sc_txpending) | | 2246 | if (sc->sc_txpending) |
2247 | panic("fxp_mc_setup: pending transmissions"); | | 2247 | panic("fxp_mc_setup: pending transmissions"); |
2248 | #endif | | 2248 | #endif |
2249 | | | 2249 | |
2250 | ifp->if_flags &= ~IFF_ALLMULTI; | | 2250 | ifp->if_flags &= ~IFF_ALLMULTI; |
2251 | | | 2251 | |
2252 | /* | | 2252 | /* |
2253 | * Initialize multicast setup descriptor. | | 2253 | * Initialize multicast setup descriptor. |
2254 | */ | | 2254 | */ |
2255 | nmcasts = 0; | | 2255 | nmcasts = 0; |
2256 | ETHER_FIRST_MULTI(step, ec, enm); | | 2256 | ETHER_FIRST_MULTI(step, ec, enm); |
2257 | while (enm != NULL) { | | 2257 | while (enm != NULL) { |
2258 | /* | | 2258 | /* |
2259 | * Check for too many multicast addresses or if we're | | 2259 | * Check for too many multicast addresses or if we're |
2260 | * listening to a range. Either way, we simply have | | 2260 | * listening to a range. Either way, we simply have |
2261 | * to accept all multicasts. | | 2261 | * to accept all multicasts. |
2262 | */ | | 2262 | */ |
2263 | if (nmcasts >= MAXMCADDR || | | 2263 | if (nmcasts >= MAXMCADDR || |
2264 | memcmp(enm->enm_addrlo, enm->enm_addrhi, | | 2264 | memcmp(enm->enm_addrlo, enm->enm_addrhi, |
2265 | ETHER_ADDR_LEN) != 0) { | | 2265 | ETHER_ADDR_LEN) != 0) { |
2266 | /* | | 2266 | /* |
2267 | * Callers of this function must do the | | 2267 | * Callers of this function must do the |
2268 | * right thing with this. If we're called | | 2268 | * right thing with this. If we're called |
2269 | * from outside fxp_init(), the caller must | | 2269 | * from outside fxp_init(), the caller must |
2270 | * detect if the state if IFF_ALLMULTI changes. | | 2270 | * detect if the state if IFF_ALLMULTI changes. |
2271 | * If it does, the caller must then call | | 2271 | * If it does, the caller must then call |
2272 | * fxp_init(), since allmulti is handled by | | 2272 | * fxp_init(), since allmulti is handled by |
2273 | * the config block. | | 2273 | * the config block. |
2274 | */ | | 2274 | */ |
2275 | ifp->if_flags |= IFF_ALLMULTI; | | 2275 | ifp->if_flags |= IFF_ALLMULTI; |
2276 | return; | | 2276 | return; |
2277 | } | | 2277 | } |
2278 | memcpy(&mcsp->mc_addr[nmcasts][0], enm->enm_addrlo, | | 2278 | memcpy(&mcsp->mc_addr[nmcasts][0], enm->enm_addrlo, |
2279 | ETHER_ADDR_LEN); | | 2279 | ETHER_ADDR_LEN); |
2280 | nmcasts++; | | 2280 | nmcasts++; |
2281 | ETHER_NEXT_MULTI(step, enm); | | 2281 | ETHER_NEXT_MULTI(step, enm); |
2282 | } | | 2282 | } |
2283 | | | 2283 | |
2284 | /* BIG_ENDIAN: no need to swap to store 0 */ | | 2284 | /* BIG_ENDIAN: no need to swap to store 0 */ |
2285 | mcsp->cb_status = 0; | | 2285 | mcsp->cb_status = 0; |
2286 | mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); | | 2286 | mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); |
2287 | mcsp->link_addr = htole32(FXP_CDTXADDR(sc, FXP_NEXTTX(sc->sc_txlast))); | | 2287 | mcsp->link_addr = htole32(FXP_CDTXADDR(sc, FXP_NEXTTX(sc->sc_txlast))); |
2288 | mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); | | 2288 | mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); |
2289 | | | 2289 | |
2290 | FXP_CDMCSSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 2290 | FXP_CDMCSSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
2291 | | | 2291 | |
2292 | /* | | 2292 | /* |
2293 | * Wait until the command unit is not active. This should never | | 2293 | * Wait until the command unit is not active. This should never |
2294 | * happen since nothing is queued, but make sure anyway. | | 2294 | * happen since nothing is queued, but make sure anyway. |
2295 | */ | | 2295 | */ |
2296 | count = 100; | | 2296 | count = 100; |
2297 | while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == | | 2297 | while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == |
2298 | FXP_SCB_CUS_ACTIVE && --count) | | 2298 | FXP_SCB_CUS_ACTIVE && --count) |
2299 | DELAY(1); | | 2299 | DELAY(1); |
2300 | if (count == 0) { | | 2300 | if (count == 0) { |
2301 | log(LOG_WARNING, "%s: line %d: command queue timeout\n", | | 2301 | log(LOG_WARNING, "%s: line %d: command queue timeout\n", |
2302 | device_xname(sc->sc_dev), __LINE__); | | 2302 | device_xname(sc->sc_dev), __LINE__); |
2303 | return; | | 2303 | return; |
2304 | } | | 2304 | } |
2305 | | | 2305 | |
2306 | /* | | 2306 | /* |
2307 | * Start the multicast setup command/DMA. | | 2307 | * Start the multicast setup command/DMA. |
2308 | */ | | 2308 | */ |
2309 | fxp_scb_wait(sc); | | 2309 | fxp_scb_wait(sc); |
2310 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDMCSOFF); | | 2310 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDMCSOFF); |
2311 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); | | 2311 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); |
2312 | | | 2312 | |
2313 | /* ...and wait for it to complete. */ | | 2313 | /* ...and wait for it to complete. */ |
2314 | for (count = 1000; count > 0; count--) { | | 2314 | for (count = 1000; count > 0; count--) { |
2315 | FXP_CDMCSSYNC(sc, | | 2315 | FXP_CDMCSSYNC(sc, |
2316 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 2316 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
2317 | status = le16toh(mcsp->cb_status); | | 2317 | status = le16toh(mcsp->cb_status); |
2318 | FXP_CDMCSSYNC(sc, BUS_DMASYNC_PREREAD); | | 2318 | FXP_CDMCSSYNC(sc, BUS_DMASYNC_PREREAD); |
2319 | if ((status & FXP_CB_STATUS_C) != 0) | | 2319 | if ((status & FXP_CB_STATUS_C) != 0) |
2320 | break; | | 2320 | break; |
2321 | DELAY(1); | | 2321 | DELAY(1); |
2322 | } | | 2322 | } |
2323 | if (count == 0) { | | 2323 | if (count == 0) { |
2324 | log(LOG_WARNING, "%s: line %d: dmasync timeout\n", | | 2324 | log(LOG_WARNING, "%s: line %d: dmasync timeout\n", |
2325 | device_xname(sc->sc_dev), __LINE__); | | 2325 | device_xname(sc->sc_dev), __LINE__); |
2326 | return; | | 2326 | return; |
2327 | } | | 2327 | } |
2328 | } | | 2328 | } |
2329 | | | 2329 | |
2330 | static const uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE; | | 2330 | static const uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE; |
2331 | static const uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE; | | 2331 | static const uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE; |
2332 | static const uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE; | | 2332 | static const uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE; |