| @@ -1,2158 +1,2189 @@ | | | @@ -1,2158 +1,2189 @@ |
1 | /* $NetBSD: if_vge.c,v 1.74 2019/09/13 07:55:07 msaitoh Exp $ */ | | 1 | /* $NetBSD: if_vge.c,v 1.75 2019/10/08 14:26:27 msaitoh Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2004 | | 4 | * Copyright (c) 2004 |
5 | * Bill Paul <wpaul@windriver.com>. All rights reserved. | | 5 | * Bill Paul <wpaul@windriver.com>. All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * 3. All advertising materials mentioning features or use of this software | | 15 | * 3. All advertising materials mentioning features or use of this software |
16 | * must display the following acknowledgement: | | 16 | * must display the following acknowledgement: |
17 | * This product includes software developed by Bill Paul. | | 17 | * This product includes software developed by Bill Paul. |
18 | * 4. Neither the name of the author nor the names of any co-contributors | | 18 | * 4. Neither the name of the author nor the names of any co-contributors |
19 | * may be used to endorse or promote products derived from this software | | 19 | * may be used to endorse or promote products derived from this software |
20 | * without specific prior written permission. | | 20 | * without specific prior written permission. |
21 | * | | 21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND | | 22 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND |
23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD | | 25 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD |
26 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 26 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | | 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
32 | * THE POSSIBILITY OF SUCH DAMAGE. | | 32 | * THE POSSIBILITY OF SUCH DAMAGE. |
33 | * | | 33 | * |
34 | * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp | | 34 | * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp |
35 | */ | | 35 | */ |
36 | | | 36 | |
37 | #include <sys/cdefs.h> | | 37 | #include <sys/cdefs.h> |
38 | __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.74 2019/09/13 07:55:07 msaitoh Exp $"); | | 38 | __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.75 2019/10/08 14:26:27 msaitoh Exp $"); |
39 | | | 39 | |
40 | /* | | 40 | /* |
41 | * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. | | 41 | * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. |
42 | * | | 42 | * |
43 | * Written by Bill Paul <wpaul@windriver.com> | | 43 | * Written by Bill Paul <wpaul@windriver.com> |
44 | * Senior Networking Software Engineer | | 44 | * Senior Networking Software Engineer |
45 | * Wind River Systems | | 45 | * Wind River Systems |
46 | */ | | 46 | */ |
47 | | | 47 | |
48 | /* | | 48 | /* |
49 | * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that | | 49 | * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that |
50 | * combines a tri-speed ethernet MAC and PHY, with the following | | 50 | * combines a tri-speed ethernet MAC and PHY, with the following |
51 | * features: | | 51 | * features: |
52 | * | | 52 | * |
53 | * o Jumbo frame support up to 16K | | 53 | * o Jumbo frame support up to 16K |
54 | * o Transmit and receive flow control | | 54 | * o Transmit and receive flow control |
55 | * o IPv4 checksum offload | | 55 | * o IPv4 checksum offload |
56 | * o VLAN tag insertion and stripping | | 56 | * o VLAN tag insertion and stripping |
57 | * o TCP large send | | 57 | * o TCP large send |
58 | * o 64-bit multicast hash table filter | | 58 | * o 64-bit multicast hash table filter |
59 | * o 64 entry CAM filter | | 59 | * o 64 entry CAM filter |
60 | * o 16K RX FIFO and 48K TX FIFO memory | | 60 | * o 16K RX FIFO and 48K TX FIFO memory |
61 | * o Interrupt moderation | | 61 | * o Interrupt moderation |
62 | * | | 62 | * |
63 | * The VT6122 supports up to four transmit DMA queues. The descriptors | | 63 | * The VT6122 supports up to four transmit DMA queues. The descriptors |
64 | * in the transmit ring can address up to 7 data fragments; frames which | | 64 | * in the transmit ring can address up to 7 data fragments; frames which |
65 | * span more than 7 data buffers must be coalesced, but in general the | | 65 | * span more than 7 data buffers must be coalesced, but in general the |
66 | * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments | | 66 | * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments |
67 | * long. The receive descriptors address only a single buffer. | | 67 | * long. The receive descriptors address only a single buffer. |
68 | * | | 68 | * |
69 | * There are two peculiar design issues with the VT6122. One is that | | 69 | * There are two peculiar design issues with the VT6122. One is that |
70 | * receive data buffers must be aligned on a 32-bit boundary. This is | | 70 | * receive data buffers must be aligned on a 32-bit boundary. This is |
71 | * not a problem where the VT6122 is used as a LOM device in x86-based | | 71 | * not a problem where the VT6122 is used as a LOM device in x86-based |
72 | * systems, but on architectures that generate unaligned access traps, we | | 72 | * systems, but on architectures that generate unaligned access traps, we |
73 | * have to do some copying. | | 73 | * have to do some copying. |
74 | * | | 74 | * |
75 | * The other issue has to do with the way 64-bit addresses are handled. | | 75 | * The other issue has to do with the way 64-bit addresses are handled. |
76 | * The DMA descriptors only allow you to specify 48 bits of addressing | | 76 | * The DMA descriptors only allow you to specify 48 bits of addressing |
77 | * information. The remaining 16 bits are specified using one of the | | 77 | * information. The remaining 16 bits are specified using one of the |
78 | * I/O registers. If you only have a 32-bit system, then this isn't | | 78 | * I/O registers. If you only have a 32-bit system, then this isn't |
79 | * an issue, but if you have a 64-bit system and more than 4GB of | | 79 | * an issue, but if you have a 64-bit system and more than 4GB of |
80 | * memory, you must have to make sure your network data buffers reside | | 80 | * memory, you must have to make sure your network data buffers reside |
81 | * in the same 48-bit 'segment.' | | 81 | * in the same 48-bit 'segment.' |
82 | * | | 82 | * |
83 | * Special thanks to Ryan Fu at VIA Networking for providing documentation | | 83 | * Special thanks to Ryan Fu at VIA Networking for providing documentation |
84 | * and sample NICs for testing. | | 84 | * and sample NICs for testing. |
85 | */ | | 85 | */ |
86 | | | 86 | |
87 | | | 87 | |
88 | #include <sys/param.h> | | 88 | #include <sys/param.h> |
89 | #include <sys/endian.h> | | 89 | #include <sys/endian.h> |
90 | #include <sys/systm.h> | | 90 | #include <sys/systm.h> |
91 | #include <sys/device.h> | | 91 | #include <sys/device.h> |
92 | #include <sys/sockio.h> | | 92 | #include <sys/sockio.h> |
93 | #include <sys/mbuf.h> | | 93 | #include <sys/mbuf.h> |
94 | #include <sys/malloc.h> | | 94 | #include <sys/malloc.h> |
95 | #include <sys/kernel.h> | | 95 | #include <sys/kernel.h> |
96 | #include <sys/socket.h> | | 96 | #include <sys/socket.h> |
97 | | | 97 | |
98 | #include <net/if.h> | | 98 | #include <net/if.h> |
99 | #include <net/if_arp.h> | | 99 | #include <net/if_arp.h> |
100 | #include <net/if_ether.h> | | 100 | #include <net/if_ether.h> |
101 | #include <net/if_dl.h> | | 101 | #include <net/if_dl.h> |
102 | #include <net/if_media.h> | | 102 | #include <net/if_media.h> |
103 | | | 103 | |
104 | #include <net/bpf.h> | | 104 | #include <net/bpf.h> |
105 | | | 105 | |
106 | #include <sys/bus.h> | | 106 | #include <sys/bus.h> |
107 | | | 107 | |
108 | #include <dev/mii/mii.h> | | 108 | #include <dev/mii/mii.h> |
109 | #include <dev/mii/miivar.h> | | 109 | #include <dev/mii/miivar.h> |
110 | | | 110 | |
111 | #include <dev/pci/pcireg.h> | | 111 | #include <dev/pci/pcireg.h> |
112 | #include <dev/pci/pcivar.h> | | 112 | #include <dev/pci/pcivar.h> |
113 | #include <dev/pci/pcidevs.h> | | 113 | #include <dev/pci/pcidevs.h> |
114 | | | 114 | |
115 | #include <dev/pci/if_vgereg.h> | | 115 | #include <dev/pci/if_vgereg.h> |
116 | | | 116 | |
117 | #define VGE_IFQ_MAXLEN 64 | | 117 | #define VGE_IFQ_MAXLEN 64 |
118 | | | 118 | |
119 | #define VGE_RING_ALIGN 256 | | 119 | #define VGE_RING_ALIGN 256 |
120 | | | 120 | |
121 | #define VGE_NTXDESC 256 | | 121 | #define VGE_NTXDESC 256 |
122 | #define VGE_NTXDESC_MASK (VGE_NTXDESC - 1) | | 122 | #define VGE_NTXDESC_MASK (VGE_NTXDESC - 1) |
123 | #define VGE_NEXT_TXDESC(x) ((x + 1) & VGE_NTXDESC_MASK) | | 123 | #define VGE_NEXT_TXDESC(x) ((x + 1) & VGE_NTXDESC_MASK) |
124 | #define VGE_PREV_TXDESC(x) ((x - 1) & VGE_NTXDESC_MASK) | | 124 | #define VGE_PREV_TXDESC(x) ((x - 1) & VGE_NTXDESC_MASK) |
125 | | | 125 | |
126 | #define VGE_NRXDESC 256 /* Must be a multiple of 4!! */ | | 126 | #define VGE_NRXDESC 256 /* Must be a multiple of 4!! */ |
127 | #define VGE_NRXDESC_MASK (VGE_NRXDESC - 1) | | 127 | #define VGE_NRXDESC_MASK (VGE_NRXDESC - 1) |
128 | #define VGE_NEXT_RXDESC(x) ((x + 1) & VGE_NRXDESC_MASK) | | 128 | #define VGE_NEXT_RXDESC(x) ((x + 1) & VGE_NRXDESC_MASK) |
129 | #define VGE_PREV_RXDESC(x) ((x - 1) & VGE_NRXDESC_MASK) | | 129 | #define VGE_PREV_RXDESC(x) ((x - 1) & VGE_NRXDESC_MASK) |
130 | | | 130 | |
131 | #define VGE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF) | | 131 | #define VGE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF) |
132 | #define VGE_ADDR_HI(y) ((uint64_t)(y) >> 32) | | 132 | #define VGE_ADDR_HI(y) ((uint64_t)(y) >> 32) |
133 | #define VGE_BUFLEN(y) ((y) & 0x7FFF) | | 133 | #define VGE_BUFLEN(y) ((y) & 0x7FFF) |
134 | #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) | | 134 | #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) |
135 | | | 135 | |
136 | #define VGE_POWER_MANAGEMENT 0 /* disabled for now */ | | 136 | #define VGE_POWER_MANAGEMENT 0 /* disabled for now */ |
137 | | | 137 | |
138 | /* | | 138 | /* |
139 | * Mbuf adjust factor to force 32-bit alignment of IP header. | | 139 | * Mbuf adjust factor to force 32-bit alignment of IP header. |
140 | * Drivers should pad ETHER_ALIGN bytes when setting up a | | 140 | * Drivers should pad ETHER_ALIGN bytes when setting up a |
141 | * RX mbuf so the upper layers get the IP header properly aligned | | 141 | * RX mbuf so the upper layers get the IP header properly aligned |
142 | * past the 14-byte Ethernet header. | | 142 | * past the 14-byte Ethernet header. |
143 | * | | 143 | * |
144 | * See also comment in vge_encap(). | | 144 | * See also comment in vge_encap(). |
145 | */ | | 145 | */ |
146 | | | 146 | |
147 | #ifdef __NO_STRICT_ALIGNMENT | | 147 | #ifdef __NO_STRICT_ALIGNMENT |
148 | #define VGE_RX_BUFSIZE MCLBYTES | | 148 | #define VGE_RX_BUFSIZE MCLBYTES |
149 | #else | | 149 | #else |
150 | #define VGE_RX_PAD sizeof(uint32_t) | | 150 | #define VGE_RX_PAD sizeof(uint32_t) |
151 | #define VGE_RX_BUFSIZE (MCLBYTES - VGE_RX_PAD) | | 151 | #define VGE_RX_BUFSIZE (MCLBYTES - VGE_RX_PAD) |
152 | #endif | | 152 | #endif |
153 | | | 153 | |
154 | /* | | 154 | /* |
155 | * Control structures are DMA'd to the vge chip. We allocate them in | | 155 | * Control structures are DMA'd to the vge chip. We allocate them in |
156 | * a single clump that maps to a single DMA segment to make several things | | 156 | * a single clump that maps to a single DMA segment to make several things |
157 | * easier. | | 157 | * easier. |
158 | */ | | 158 | */ |
159 | struct vge_control_data { | | 159 | struct vge_control_data { |
160 | /* TX descriptors */ | | 160 | /* TX descriptors */ |
161 | struct vge_txdesc vcd_txdescs[VGE_NTXDESC]; | | 161 | struct vge_txdesc vcd_txdescs[VGE_NTXDESC]; |
162 | /* RX descriptors */ | | 162 | /* RX descriptors */ |
163 | struct vge_rxdesc vcd_rxdescs[VGE_NRXDESC]; | | 163 | struct vge_rxdesc vcd_rxdescs[VGE_NRXDESC]; |
164 | /* dummy data for TX padding */ | | 164 | /* dummy data for TX padding */ |
165 | uint8_t vcd_pad[ETHER_PAD_LEN]; | | 165 | uint8_t vcd_pad[ETHER_PAD_LEN]; |
166 | }; | | 166 | }; |
167 | | | 167 | |
168 | #define VGE_CDOFF(x) offsetof(struct vge_control_data, x) | | 168 | #define VGE_CDOFF(x) offsetof(struct vge_control_data, x) |
169 | #define VGE_CDTXOFF(x) VGE_CDOFF(vcd_txdescs[(x)]) | | 169 | #define VGE_CDTXOFF(x) VGE_CDOFF(vcd_txdescs[(x)]) |
170 | #define VGE_CDRXOFF(x) VGE_CDOFF(vcd_rxdescs[(x)]) | | 170 | #define VGE_CDRXOFF(x) VGE_CDOFF(vcd_rxdescs[(x)]) |
171 | #define VGE_CDPADOFF() VGE_CDOFF(vcd_pad[0]) | | 171 | #define VGE_CDPADOFF() VGE_CDOFF(vcd_pad[0]) |
172 | | | 172 | |
173 | /* | | 173 | /* |
174 | * Software state for TX jobs. | | 174 | * Software state for TX jobs. |
175 | */ | | 175 | */ |
176 | struct vge_txsoft { | | 176 | struct vge_txsoft { |
177 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ | | 177 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ |
178 | bus_dmamap_t txs_dmamap; /* our DMA map */ | | 178 | bus_dmamap_t txs_dmamap; /* our DMA map */ |
179 | }; | | 179 | }; |
180 | | | 180 | |
181 | /* | | 181 | /* |
182 | * Software state for RX jobs. | | 182 | * Software state for RX jobs. |
183 | */ | | 183 | */ |
184 | struct vge_rxsoft { | | 184 | struct vge_rxsoft { |
185 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ | | 185 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ |
186 | bus_dmamap_t rxs_dmamap; /* our DMA map */ | | 186 | bus_dmamap_t rxs_dmamap; /* our DMA map */ |
187 | }; | | 187 | }; |
188 | | | 188 | |
189 | | | 189 | |
190 | struct vge_softc { | | 190 | struct vge_softc { |
191 | device_t sc_dev; | | 191 | device_t sc_dev; |
192 | | | 192 | |
193 | bus_space_tag_t sc_bst; /* bus space tag */ | | 193 | bus_space_tag_t sc_bst; /* bus space tag */ |
194 | bus_space_handle_t sc_bsh; /* bus space handle */ | | 194 | bus_space_handle_t sc_bsh; /* bus space handle */ |
195 | bus_dma_tag_t sc_dmat; | | 195 | bus_dma_tag_t sc_dmat; |
196 | | | 196 | |
197 | struct ethercom sc_ethercom; /* interface info */ | | 197 | struct ethercom sc_ethercom; /* interface info */ |
198 | uint8_t sc_eaddr[ETHER_ADDR_LEN]; | | 198 | uint8_t sc_eaddr[ETHER_ADDR_LEN]; |
199 | | | 199 | |
200 | void *sc_intrhand; | | 200 | void *sc_intrhand; |
201 | struct mii_data sc_mii; | | 201 | struct mii_data sc_mii; |
202 | uint8_t sc_type; | | 202 | uint8_t sc_type; |
203 | u_short sc_if_flags; | | 203 | u_short sc_if_flags; |
204 | int sc_link; | | 204 | int sc_link; |
205 | int sc_camidx; | | 205 | int sc_camidx; |
206 | callout_t sc_timeout; | | 206 | callout_t sc_timeout; |
207 | | | 207 | |
208 | bus_dmamap_t sc_cddmamap; | | 208 | bus_dmamap_t sc_cddmamap; |
209 | #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr | | 209 | #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr |
210 | | | 210 | |
211 | struct vge_txsoft sc_txsoft[VGE_NTXDESC]; | | 211 | struct vge_txsoft sc_txsoft[VGE_NTXDESC]; |
212 | struct vge_rxsoft sc_rxsoft[VGE_NRXDESC]; | | 212 | struct vge_rxsoft sc_rxsoft[VGE_NRXDESC]; |
213 | struct vge_control_data *sc_control_data; | | 213 | struct vge_control_data *sc_control_data; |
214 | #define sc_txdescs sc_control_data->vcd_txdescs | | 214 | #define sc_txdescs sc_control_data->vcd_txdescs |
215 | #define sc_rxdescs sc_control_data->vcd_rxdescs | | 215 | #define sc_rxdescs sc_control_data->vcd_rxdescs |
216 | | | 216 | |
217 | int sc_tx_prodidx; | | 217 | int sc_tx_prodidx; |
218 | int sc_tx_considx; | | 218 | int sc_tx_considx; |
219 | int sc_tx_free; | | 219 | int sc_tx_free; |
220 | | | 220 | |
221 | struct mbuf *sc_rx_mhead; | | 221 | struct mbuf *sc_rx_mhead; |
222 | struct mbuf *sc_rx_mtail; | | 222 | struct mbuf *sc_rx_mtail; |
223 | int sc_rx_prodidx; | | 223 | int sc_rx_prodidx; |
224 | int sc_rx_consumed; | | 224 | int sc_rx_consumed; |
225 | | | 225 | |
226 | int sc_suspended; /* 0 = normal 1 = suspended */ | | 226 | int sc_suspended; /* 0 = normal 1 = suspended */ |
227 | uint32_t sc_saved_maps[5]; /* pci data */ | | 227 | uint32_t sc_saved_maps[5]; /* pci data */ |
228 | uint32_t sc_saved_biosaddr; | | 228 | uint32_t sc_saved_biosaddr; |
229 | uint8_t sc_saved_intline; | | 229 | uint8_t sc_saved_intline; |
230 | uint8_t sc_saved_cachelnsz; | | 230 | uint8_t sc_saved_cachelnsz; |
231 | uint8_t sc_saved_lattimer; | | 231 | uint8_t sc_saved_lattimer; |
232 | }; | | 232 | }; |
233 | | | 233 | |
234 | #define VGE_CDTXADDR(sc, x) ((sc)->sc_cddma + VGE_CDTXOFF(x)) | | 234 | #define VGE_CDTXADDR(sc, x) ((sc)->sc_cddma + VGE_CDTXOFF(x)) |
235 | #define VGE_CDRXADDR(sc, x) ((sc)->sc_cddma + VGE_CDRXOFF(x)) | | 235 | #define VGE_CDRXADDR(sc, x) ((sc)->sc_cddma + VGE_CDRXOFF(x)) |
236 | #define VGE_CDPADADDR(sc) ((sc)->sc_cddma + VGE_CDPADOFF()) | | 236 | #define VGE_CDPADADDR(sc) ((sc)->sc_cddma + VGE_CDPADOFF()) |
237 | | | 237 | |
238 | #define VGE_TXDESCSYNC(sc, idx, ops) \ | | 238 | #define VGE_TXDESCSYNC(sc, idx, ops) \ |
239 | bus_dmamap_sync((sc)->sc_dmat,(sc)->sc_cddmamap, \ | | 239 | bus_dmamap_sync((sc)->sc_dmat,(sc)->sc_cddmamap, \ |
240 | VGE_CDTXOFF(idx), \ | | 240 | VGE_CDTXOFF(idx), \ |
241 | offsetof(struct vge_txdesc, td_frag[0]), \ | | 241 | offsetof(struct vge_txdesc, td_frag[0]), \ |
242 | (ops)) | | 242 | (ops)) |
243 | #define VGE_TXFRAGSYNC(sc, idx, nsegs, ops) \ | | 243 | #define VGE_TXFRAGSYNC(sc, idx, nsegs, ops) \ |
244 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ | | 244 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ |
245 | VGE_CDTXOFF(idx) + \ | | 245 | VGE_CDTXOFF(idx) + \ |
246 | offsetof(struct vge_txdesc, td_frag[0]), \ | | 246 | offsetof(struct vge_txdesc, td_frag[0]), \ |
247 | sizeof(struct vge_txfrag) * (nsegs), \ | | 247 | sizeof(struct vge_txfrag) * (nsegs), \ |
248 | (ops)) | | 248 | (ops)) |
249 | #define VGE_RXDESCSYNC(sc, idx, ops) \ | | 249 | #define VGE_RXDESCSYNC(sc, idx, ops) \ |
250 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ | | 250 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ |
251 | VGE_CDRXOFF(idx), \ | | 251 | VGE_CDRXOFF(idx), \ |
252 | sizeof(struct vge_rxdesc), \ | | 252 | sizeof(struct vge_rxdesc), \ |
253 | (ops)) | | 253 | (ops)) |
254 | | | 254 | |
255 | /* | | 255 | /* |
256 | * register space access macros | | 256 | * register space access macros |
257 | */ | | 257 | */ |
258 | #define CSR_WRITE_4(sc, reg, val) \ | | 258 | #define CSR_WRITE_4(sc, reg, val) \ |
259 | bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) | | 259 | bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) |
260 | #define CSR_WRITE_2(sc, reg, val) \ | | 260 | #define CSR_WRITE_2(sc, reg, val) \ |
261 | bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) | | 261 | bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) |
262 | #define CSR_WRITE_1(sc, reg, val) \ | | 262 | #define CSR_WRITE_1(sc, reg, val) \ |
263 | bus_space_write_1((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) | | 263 | bus_space_write_1((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) |
264 | | | 264 | |
265 | #define CSR_READ_4(sc, reg) \ | | 265 | #define CSR_READ_4(sc, reg) \ |
266 | bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) | | 266 | bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) |
267 | #define CSR_READ_2(sc, reg) \ | | 267 | #define CSR_READ_2(sc, reg) \ |
268 | bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg)) | | 268 | bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg)) |
269 | #define CSR_READ_1(sc, reg) \ | | 269 | #define CSR_READ_1(sc, reg) \ |
270 | bus_space_read_1((sc)->sc_bst, (sc)->sc_bsh, (reg)) | | 270 | bus_space_read_1((sc)->sc_bst, (sc)->sc_bsh, (reg)) |
271 | | | 271 | |
272 | #define CSR_SETBIT_1(sc, reg, x) \ | | 272 | #define CSR_SETBIT_1(sc, reg, x) \ |
273 | CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) | (x)) | | 273 | CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) | (x)) |
274 | #define CSR_SETBIT_2(sc, reg, x) \ | | 274 | #define CSR_SETBIT_2(sc, reg, x) \ |
275 | CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (x)) | | 275 | CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (x)) |
276 | #define CSR_SETBIT_4(sc, reg, x) \ | | 276 | #define CSR_SETBIT_4(sc, reg, x) \ |
277 | CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (x)) | | 277 | CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (x)) |
278 | | | 278 | |
279 | #define CSR_CLRBIT_1(sc, reg, x) \ | | 279 | #define CSR_CLRBIT_1(sc, reg, x) \ |
280 | CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) & ~(x)) | | 280 | CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) & ~(x)) |
281 | #define CSR_CLRBIT_2(sc, reg, x) \ | | 281 | #define CSR_CLRBIT_2(sc, reg, x) \ |
282 | CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(x)) | | 282 | CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(x)) |
283 | #define CSR_CLRBIT_4(sc, reg, x) \ | | 283 | #define CSR_CLRBIT_4(sc, reg, x) \ |
284 | CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(x)) | | 284 | CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(x)) |
285 | | | 285 | |
286 | #define VGE_TIMEOUT 10000 | | 286 | #define VGE_TIMEOUT 10000 |
287 | | | 287 | |
288 | #define VGE_PCI_LOIO 0x10 | | 288 | #define VGE_PCI_LOIO 0x10 |
289 | #define VGE_PCI_LOMEM 0x14 | | 289 | #define VGE_PCI_LOMEM 0x14 |
290 | | | 290 | |
291 | static inline void vge_set_txaddr(struct vge_txfrag *, bus_addr_t); | | 291 | static inline void vge_set_txaddr(struct vge_txfrag *, bus_addr_t); |
292 | static inline void vge_set_rxaddr(struct vge_rxdesc *, bus_addr_t); | | 292 | static inline void vge_set_rxaddr(struct vge_rxdesc *, bus_addr_t); |
293 | | | 293 | |
294 | static int vge_ifflags_cb(struct ethercom *); | | 294 | static int vge_ifflags_cb(struct ethercom *); |
295 | | | 295 | |
296 | static int vge_match(device_t, cfdata_t, void *); | | 296 | static int vge_match(device_t, cfdata_t, void *); |
297 | static void vge_attach(device_t, device_t, void *); | | 297 | static void vge_attach(device_t, device_t, void *); |
298 | | | 298 | |
299 | static int vge_encap(struct vge_softc *, struct mbuf *, int); | | 299 | static int vge_encap(struct vge_softc *, struct mbuf *, int); |
300 | | | 300 | |
301 | static int vge_allocmem(struct vge_softc *); | | 301 | static int vge_allocmem(struct vge_softc *); |
302 | static int vge_newbuf(struct vge_softc *, int, struct mbuf *); | | 302 | static int vge_newbuf(struct vge_softc *, int, struct mbuf *); |
303 | #ifndef __NO_STRICT_ALIGNMENT | | 303 | #ifndef __NO_STRICT_ALIGNMENT |
304 | static inline void vge_fixup_rx(struct mbuf *); | | 304 | static inline void vge_fixup_rx(struct mbuf *); |
305 | #endif | | 305 | #endif |
306 | static void vge_rxeof(struct vge_softc *); | | 306 | static void vge_rxeof(struct vge_softc *); |
307 | static void vge_txeof(struct vge_softc *); | | 307 | static void vge_txeof(struct vge_softc *); |
308 | static int vge_intr(void *); | | 308 | static int vge_intr(void *); |
309 | static void vge_tick(void *); | | 309 | static void vge_tick(void *); |
310 | static void vge_start(struct ifnet *); | | 310 | static void vge_start(struct ifnet *); |
311 | static int vge_ioctl(struct ifnet *, u_long, void *); | | 311 | static int vge_ioctl(struct ifnet *, u_long, void *); |
312 | static int vge_init(struct ifnet *); | | 312 | static int vge_init(struct ifnet *); |
313 | static void vge_stop(struct ifnet *, int); | | 313 | static void vge_stop(struct ifnet *, int); |
314 | static void vge_watchdog(struct ifnet *); | | 314 | static void vge_watchdog(struct ifnet *); |
315 | #if VGE_POWER_MANAGEMENT | | 315 | #if VGE_POWER_MANAGEMENT |
316 | static int vge_suspend(device_t); | | 316 | static int vge_suspend(device_t); |
317 | static int vge_resume(device_t); | | 317 | static int vge_resume(device_t); |
318 | #endif | | 318 | #endif |
319 | static bool vge_shutdown(device_t, int); | | 319 | static bool vge_shutdown(device_t, int); |
320 | | | 320 | |
321 | static uint16_t vge_read_eeprom(struct vge_softc *, int); | | 321 | static uint16_t vge_read_eeprom(struct vge_softc *, int); |
322 | | | 322 | |
323 | static void vge_miipoll_start(struct vge_softc *); | | 323 | static void vge_miipoll_start(struct vge_softc *); |
324 | static void vge_miipoll_stop(struct vge_softc *); | | 324 | static void vge_miipoll_stop(struct vge_softc *); |
325 | static int vge_miibus_readreg(device_t, int, int, uint16_t *); | | 325 | static int vge_miibus_readreg(device_t, int, int, uint16_t *); |
326 | static int vge_miibus_writereg(device_t, int, int, uint16_t); | | 326 | static int vge_miibus_writereg(device_t, int, int, uint16_t); |
327 | static void vge_miibus_statchg(struct ifnet *); | | 327 | static void vge_miibus_statchg(struct ifnet *); |
328 | | | 328 | |
329 | static void vge_cam_clear(struct vge_softc *); | | 329 | static void vge_cam_clear(struct vge_softc *); |
330 | static int vge_cam_set(struct vge_softc *, uint8_t *); | | 330 | static int vge_cam_set(struct vge_softc *, uint8_t *); |
| | | 331 | static void vge_clrwol(struct vge_softc *); |
331 | static void vge_setmulti(struct vge_softc *); | | 332 | static void vge_setmulti(struct vge_softc *); |
332 | static void vge_reset(struct vge_softc *); | | 333 | static void vge_reset(struct vge_softc *); |
333 | | | 334 | |
334 | CFATTACH_DECL_NEW(vge, sizeof(struct vge_softc), | | 335 | CFATTACH_DECL_NEW(vge, sizeof(struct vge_softc), |
335 | vge_match, vge_attach, NULL, NULL); | | 336 | vge_match, vge_attach, NULL, NULL); |
336 | | | 337 | |
337 | static inline void | | 338 | static inline void |
338 | vge_set_txaddr(struct vge_txfrag *f, bus_addr_t daddr) | | 339 | vge_set_txaddr(struct vge_txfrag *f, bus_addr_t daddr) |
339 | { | | 340 | { |
340 | | | 341 | |
341 | f->tf_addrlo = htole32((uint32_t)daddr); | | 342 | f->tf_addrlo = htole32((uint32_t)daddr); |
342 | if (sizeof(bus_addr_t) == sizeof(uint64_t)) | | 343 | if (sizeof(bus_addr_t) == sizeof(uint64_t)) |
343 | f->tf_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); | | 344 | f->tf_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); |
344 | else | | 345 | else |
345 | f->tf_addrhi = 0; | | 346 | f->tf_addrhi = 0; |
346 | } | | 347 | } |
347 | | | 348 | |
348 | static inline void | | 349 | static inline void |
349 | vge_set_rxaddr(struct vge_rxdesc *rxd, bus_addr_t daddr) | | 350 | vge_set_rxaddr(struct vge_rxdesc *rxd, bus_addr_t daddr) |
350 | { | | 351 | { |
351 | | | 352 | |
352 | rxd->rd_addrlo = htole32((uint32_t)daddr); | | 353 | rxd->rd_addrlo = htole32((uint32_t)daddr); |
353 | if (sizeof(bus_addr_t) == sizeof(uint64_t)) | | 354 | if (sizeof(bus_addr_t) == sizeof(uint64_t)) |
354 | rxd->rd_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); | | 355 | rxd->rd_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); |
355 | else | | 356 | else |
356 | rxd->rd_addrhi = 0; | | 357 | rxd->rd_addrhi = 0; |
357 | } | | 358 | } |
358 | | | 359 | |
359 | /* | | 360 | /* |
360 | * Read a word of data stored in the EEPROM at address 'addr.' | | 361 | * Read a word of data stored in the EEPROM at address 'addr.' |
361 | */ | | 362 | */ |
362 | static uint16_t | | 363 | static uint16_t |
363 | vge_read_eeprom(struct vge_softc *sc, int addr) | | 364 | vge_read_eeprom(struct vge_softc *sc, int addr) |
364 | { | | 365 | { |
365 | int i; | | 366 | int i; |
366 | uint16_t word = 0; | | 367 | uint16_t word = 0; |
367 | | | 368 | |
368 | /* | | 369 | /* |
369 | * Enter EEPROM embedded programming mode. In order to | | 370 | * Enter EEPROM embedded programming mode. In order to |
370 | * access the EEPROM at all, we first have to set the | | 371 | * access the EEPROM at all, we first have to set the |
371 | * EELOAD bit in the CHIPCFG2 register. | | 372 | * EELOAD bit in the CHIPCFG2 register. |
372 | */ | | 373 | */ |
373 | CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); | | 374 | CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); |
374 | CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/); | | 375 | CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/); |
375 | | | 376 | |
376 | /* Select the address of the word we want to read */ | | 377 | /* Select the address of the word we want to read */ |
377 | CSR_WRITE_1(sc, VGE_EEADDR, addr); | | 378 | CSR_WRITE_1(sc, VGE_EEADDR, addr); |
378 | | | 379 | |
379 | /* Issue read command */ | | 380 | /* Issue read command */ |
380 | CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); | | 381 | CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); |
381 | | | 382 | |
382 | /* Wait for the done bit to be set. */ | | 383 | /* Wait for the done bit to be set. */ |
383 | for (i = 0; i < VGE_TIMEOUT; i++) { | | 384 | for (i = 0; i < VGE_TIMEOUT; i++) { |
384 | if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) | | 385 | if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) |
385 | break; | | 386 | break; |
386 | } | | 387 | } |
387 | | | 388 | |
388 | if (i == VGE_TIMEOUT) { | | 389 | if (i == VGE_TIMEOUT) { |
389 | printf("%s: EEPROM read timed out\n", device_xname(sc->sc_dev)); | | 390 | printf("%s: EEPROM read timed out\n", device_xname(sc->sc_dev)); |
390 | return 0; | | 391 | return 0; |
391 | } | | 392 | } |
392 | | | 393 | |
393 | /* Read the result */ | | 394 | /* Read the result */ |
394 | word = CSR_READ_2(sc, VGE_EERDDAT); | | 395 | word = CSR_READ_2(sc, VGE_EERDDAT); |
395 | | | 396 | |
396 | /* Turn off EEPROM access mode. */ | | 397 | /* Turn off EEPROM access mode. */ |
397 | CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/); | | 398 | CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/); |
398 | CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); | | 399 | CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); |
399 | | | 400 | |
400 | return word; | | 401 | return word; |
401 | } | | 402 | } |
402 | | | 403 | |
403 | static void | | 404 | static void |
404 | vge_miipoll_stop(struct vge_softc *sc) | | 405 | vge_miipoll_stop(struct vge_softc *sc) |
405 | { | | 406 | { |
406 | int i; | | 407 | int i; |
407 | | | 408 | |
408 | CSR_WRITE_1(sc, VGE_MIICMD, 0); | | 409 | CSR_WRITE_1(sc, VGE_MIICMD, 0); |
409 | | | 410 | |
410 | for (i = 0; i < VGE_TIMEOUT; i++) { | | 411 | for (i = 0; i < VGE_TIMEOUT; i++) { |
411 | DELAY(1); | | 412 | DELAY(1); |
412 | if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) | | 413 | if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) |
413 | break; | | 414 | break; |
414 | } | | 415 | } |
415 | | | 416 | |
416 | if (i == VGE_TIMEOUT) { | | 417 | if (i == VGE_TIMEOUT) { |
417 | printf("%s: failed to idle MII autopoll\n", | | 418 | printf("%s: failed to idle MII autopoll\n", |
418 | device_xname(sc->sc_dev)); | | 419 | device_xname(sc->sc_dev)); |
419 | } | | 420 | } |
420 | } | | 421 | } |
421 | | | 422 | |
422 | static void | | 423 | static void |
423 | vge_miipoll_start(struct vge_softc *sc) | | 424 | vge_miipoll_start(struct vge_softc *sc) |
424 | { | | 425 | { |
425 | int i; | | 426 | int i; |
426 | | | 427 | |
427 | /* First, make sure we're idle. */ | | 428 | /* First, make sure we're idle. */ |
428 | | | 429 | |
429 | CSR_WRITE_1(sc, VGE_MIICMD, 0); | | 430 | CSR_WRITE_1(sc, VGE_MIICMD, 0); |
430 | CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); | | 431 | CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); |
431 | | | 432 | |
432 | for (i = 0; i < VGE_TIMEOUT; i++) { | | 433 | for (i = 0; i < VGE_TIMEOUT; i++) { |
433 | DELAY(1); | | 434 | DELAY(1); |
434 | if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) | | 435 | if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) |
435 | break; | | 436 | break; |
436 | } | | 437 | } |
437 | | | 438 | |
438 | if (i == VGE_TIMEOUT) { | | 439 | if (i == VGE_TIMEOUT) { |
439 | printf("%s: failed to idle MII autopoll\n", | | 440 | printf("%s: failed to idle MII autopoll\n", |
440 | device_xname(sc->sc_dev)); | | 441 | device_xname(sc->sc_dev)); |
441 | return; | | 442 | return; |
442 | } | | 443 | } |
443 | | | 444 | |
444 | /* Now enable auto poll mode. */ | | 445 | /* Now enable auto poll mode. */ |
445 | | | 446 | |
446 | CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); | | 447 | CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); |
447 | | | 448 | |
448 | /* And make sure it started. */ | | 449 | /* And make sure it started. */ |
449 | | | 450 | |
450 | for (i = 0; i < VGE_TIMEOUT; i++) { | | 451 | for (i = 0; i < VGE_TIMEOUT; i++) { |
451 | DELAY(1); | | 452 | DELAY(1); |
452 | if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) | | 453 | if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) |
453 | break; | | 454 | break; |
454 | } | | 455 | } |
455 | | | 456 | |
456 | if (i == VGE_TIMEOUT) { | | 457 | if (i == VGE_TIMEOUT) { |
457 | printf("%s: failed to start MII autopoll\n", | | 458 | printf("%s: failed to start MII autopoll\n", |
458 | device_xname(sc->sc_dev)); | | 459 | device_xname(sc->sc_dev)); |
459 | } | | 460 | } |
460 | } | | 461 | } |
461 | | | 462 | |
462 | static int | | 463 | static int |
463 | vge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) | | 464 | vge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) |
464 | { | | 465 | { |
465 | struct vge_softc *sc; | | 466 | struct vge_softc *sc; |
466 | int i, s; | | 467 | int i, s; |
467 | int rv = 0; | | 468 | int rv = 0; |
468 | | | 469 | |
469 | sc = device_private(dev); | | 470 | sc = device_private(dev); |
470 | if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) | | 471 | if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) |
471 | return -1; | | 472 | return -1; |
472 | | | 473 | |
473 | s = splnet(); | | 474 | s = splnet(); |
474 | vge_miipoll_stop(sc); | | 475 | vge_miipoll_stop(sc); |
475 | | | 476 | |
476 | /* Specify the register we want to read. */ | | 477 | /* Specify the register we want to read. */ |
477 | CSR_WRITE_1(sc, VGE_MIIADDR, reg); | | 478 | CSR_WRITE_1(sc, VGE_MIIADDR, reg); |
478 | | | 479 | |
479 | /* Issue read command. */ | | 480 | /* Issue read command. */ |
480 | CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); | | 481 | CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); |
481 | | | 482 | |
482 | /* Wait for the read command bit to self-clear. */ | | 483 | /* Wait for the read command bit to self-clear. */ |
483 | for (i = 0; i < VGE_TIMEOUT; i++) { | | 484 | for (i = 0; i < VGE_TIMEOUT; i++) { |
484 | DELAY(1); | | 485 | DELAY(1); |
485 | if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) | | 486 | if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) |
486 | break; | | 487 | break; |
487 | } | | 488 | } |
488 | | | 489 | |
489 | if (i == VGE_TIMEOUT) { | | 490 | if (i == VGE_TIMEOUT) { |
490 | printf("%s: MII read timed out\n", device_xname(sc->sc_dev)); | | 491 | printf("%s: MII read timed out\n", device_xname(sc->sc_dev)); |
491 | rv = ETIMEDOUT; | | 492 | rv = ETIMEDOUT; |
492 | } else | | 493 | } else |
493 | *val = CSR_READ_2(sc, VGE_MIIDATA); | | 494 | *val = CSR_READ_2(sc, VGE_MIIDATA); |
494 | | | 495 | |
495 | vge_miipoll_start(sc); | | 496 | vge_miipoll_start(sc); |
496 | splx(s); | | 497 | splx(s); |
497 | | | 498 | |
498 | return rv; | | 499 | return rv; |
499 | } | | 500 | } |
500 | | | 501 | |
501 | static int | | 502 | static int |
502 | vge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) | | 503 | vge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) |
503 | { | | 504 | { |
504 | struct vge_softc *sc; | | 505 | struct vge_softc *sc; |
505 | int i, s, rv = 0; | | 506 | int i, s, rv = 0; |
506 | | | 507 | |
507 | sc = device_private(dev); | | 508 | sc = device_private(dev); |
508 | if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) | | 509 | if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) |
509 | return -1; | | 510 | return -1; |
510 | | | 511 | |
511 | s = splnet(); | | 512 | s = splnet(); |
512 | vge_miipoll_stop(sc); | | 513 | vge_miipoll_stop(sc); |
513 | | | 514 | |
514 | /* Specify the register we want to write. */ | | 515 | /* Specify the register we want to write. */ |
515 | CSR_WRITE_1(sc, VGE_MIIADDR, reg); | | 516 | CSR_WRITE_1(sc, VGE_MIIADDR, reg); |
516 | | | 517 | |
517 | /* Specify the data we want to write. */ | | 518 | /* Specify the data we want to write. */ |
518 | CSR_WRITE_2(sc, VGE_MIIDATA, val); | | 519 | CSR_WRITE_2(sc, VGE_MIIDATA, val); |
519 | | | 520 | |
520 | /* Issue write command. */ | | 521 | /* Issue write command. */ |
521 | CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); | | 522 | CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); |
522 | | | 523 | |
523 | /* Wait for the write command bit to self-clear. */ | | 524 | /* Wait for the write command bit to self-clear. */ |
524 | for (i = 0; i < VGE_TIMEOUT; i++) { | | 525 | for (i = 0; i < VGE_TIMEOUT; i++) { |
525 | DELAY(1); | | 526 | DELAY(1); |
526 | if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) | | 527 | if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) |
527 | break; | | 528 | break; |
528 | } | | 529 | } |
529 | | | 530 | |
530 | if (i == VGE_TIMEOUT) { | | 531 | if (i == VGE_TIMEOUT) { |
531 | printf("%s: MII write timed out\n", device_xname(sc->sc_dev)); | | 532 | printf("%s: MII write timed out\n", device_xname(sc->sc_dev)); |
532 | rv = ETIMEDOUT; | | 533 | rv = ETIMEDOUT; |
533 | } | | 534 | } |
534 | | | 535 | |
535 | vge_miipoll_start(sc); | | 536 | vge_miipoll_start(sc); |
536 | splx(s); | | 537 | splx(s); |
537 | | | 538 | |
538 | return rv; | | 539 | return rv; |
539 | } | | 540 | } |
540 | | | 541 | |
541 | static void | | 542 | static void |
542 | vge_cam_clear(struct vge_softc *sc) | | 543 | vge_cam_clear(struct vge_softc *sc) |
543 | { | | 544 | { |
544 | int i; | | 545 | int i; |
545 | | | 546 | |
546 | /* | | 547 | /* |
547 | * Turn off all the mask bits. This tells the chip | | 548 | * Turn off all the mask bits. This tells the chip |
548 | * that none of the entries in the CAM filter are valid. | | 549 | * that none of the entries in the CAM filter are valid. |
549 | * desired entries will be enabled as we fill the filter in. | | 550 | * desired entries will be enabled as we fill the filter in. |
550 | */ | | 551 | */ |
551 | | | 552 | |
552 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); | | 553 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); |
553 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); | | 554 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); |
554 | CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); | | 555 | CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); |
555 | for (i = 0; i < 8; i++) | | 556 | for (i = 0; i < 8; i++) |
556 | CSR_WRITE_1(sc, VGE_CAM0 + i, 0); | | 557 | CSR_WRITE_1(sc, VGE_CAM0 + i, 0); |
557 | | | 558 | |
558 | /* Clear the VLAN filter too. */ | | 559 | /* Clear the VLAN filter too. */ |
559 | | | 560 | |
560 | CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | VGE_CAMADDR_AVSEL); | | 561 | CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | VGE_CAMADDR_AVSEL); |
561 | for (i = 0; i < 8; i++) | | 562 | for (i = 0; i < 8; i++) |
562 | CSR_WRITE_1(sc, VGE_CAM0 + i, 0); | | 563 | CSR_WRITE_1(sc, VGE_CAM0 + i, 0); |
563 | | | 564 | |
564 | CSR_WRITE_1(sc, VGE_CAMADDR, 0); | | 565 | CSR_WRITE_1(sc, VGE_CAMADDR, 0); |
565 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); | | 566 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); |
566 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); | | 567 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); |
567 | | | 568 | |
568 | sc->sc_camidx = 0; | | 569 | sc->sc_camidx = 0; |
569 | } | | 570 | } |
570 | | | 571 | |
571 | static int | | 572 | static int |
572 | vge_cam_set(struct vge_softc *sc, uint8_t *addr) | | 573 | vge_cam_set(struct vge_softc *sc, uint8_t *addr) |
573 | { | | 574 | { |
574 | int i, error; | | 575 | int i, error; |
575 | | | 576 | |
576 | error = 0; | | 577 | error = 0; |
577 | | | 578 | |
578 | if (sc->sc_camidx == VGE_CAM_MAXADDRS) | | 579 | if (sc->sc_camidx == VGE_CAM_MAXADDRS) |
579 | return ENOSPC; | | 580 | return ENOSPC; |
580 | | | 581 | |
581 | /* Select the CAM data page. */ | | 582 | /* Select the CAM data page. */ |
582 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); | | 583 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); |
583 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); | | 584 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); |
584 | | | 585 | |
585 | /* Set the filter entry we want to update and enable writing. */ | | 586 | /* Set the filter entry we want to update and enable writing. */ |
586 | CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | sc->sc_camidx); | | 587 | CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | sc->sc_camidx); |
587 | | | 588 | |
588 | /* Write the address to the CAM registers */ | | 589 | /* Write the address to the CAM registers */ |
589 | for (i = 0; i < ETHER_ADDR_LEN; i++) | | 590 | for (i = 0; i < ETHER_ADDR_LEN; i++) |
590 | CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); | | 591 | CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); |
591 | | | 592 | |
592 | /* Issue a write command. */ | | 593 | /* Issue a write command. */ |
593 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); | | 594 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); |
594 | | | 595 | |
595 | /* Wake for it to clear. */ | | 596 | /* Wake for it to clear. */ |
596 | for (i = 0; i < VGE_TIMEOUT; i++) { | | 597 | for (i = 0; i < VGE_TIMEOUT; i++) { |
597 | DELAY(1); | | 598 | DELAY(1); |
598 | if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) | | 599 | if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) |
599 | break; | | 600 | break; |
600 | } | | 601 | } |
601 | | | 602 | |
602 | if (i == VGE_TIMEOUT) { | | 603 | if (i == VGE_TIMEOUT) { |
603 | printf("%s: setting CAM filter failed\n", | | 604 | printf("%s: setting CAM filter failed\n", |
604 | device_xname(sc->sc_dev)); | | 605 | device_xname(sc->sc_dev)); |
605 | error = EIO; | | 606 | error = EIO; |
606 | goto fail; | | 607 | goto fail; |
607 | } | | 608 | } |
608 | | | 609 | |
609 | /* Select the CAM mask page. */ | | 610 | /* Select the CAM mask page. */ |
610 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); | | 611 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); |
611 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); | | 612 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); |
612 | | | 613 | |
613 | /* Set the mask bit that enables this filter. */ | | 614 | /* Set the mask bit that enables this filter. */ |
614 | CSR_SETBIT_1(sc, VGE_CAM0 + (sc->sc_camidx / 8), | | 615 | CSR_SETBIT_1(sc, VGE_CAM0 + (sc->sc_camidx / 8), |
615 | 1 << (sc->sc_camidx & 7)); | | 616 | 1 << (sc->sc_camidx & 7)); |
616 | | | 617 | |
617 | sc->sc_camidx++; | | 618 | sc->sc_camidx++; |
618 | | | 619 | |
619 | fail: | | 620 | fail: |
620 | /* Turn off access to CAM. */ | | 621 | /* Turn off access to CAM. */ |
621 | CSR_WRITE_1(sc, VGE_CAMADDR, 0); | | 622 | CSR_WRITE_1(sc, VGE_CAMADDR, 0); |
622 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); | | 623 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); |
623 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); | | 624 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); |
624 | | | 625 | |
625 | return error; | | 626 | return error; |
626 | } | | 627 | } |
627 | | | 628 | |
628 | /* | | 629 | /* |
629 | * Program the multicast filter. We use the 64-entry CAM filter | | 630 | * Program the multicast filter. We use the 64-entry CAM filter |
630 | * for perfect filtering. If there's more than 64 multicast addresses, | | 631 | * for perfect filtering. If there's more than 64 multicast addresses, |
631 | * we use the hash filter instead. | | 632 | * we use the hash filter instead. |
632 | */ | | 633 | */ |
633 | static void | | 634 | static void |
634 | vge_setmulti(struct vge_softc *sc) | | 635 | vge_setmulti(struct vge_softc *sc) |
635 | { | | 636 | { |
636 | struct ethercom *ec = &sc->sc_ethercom; | | 637 | struct ethercom *ec = &sc->sc_ethercom; |
637 | struct ifnet *ifp = &ec->ec_if; | | 638 | struct ifnet *ifp = &ec->ec_if; |
638 | int error; | | 639 | int error; |
639 | uint32_t h, hashes[2] = { 0, 0 }; | | 640 | uint32_t h, hashes[2] = { 0, 0 }; |
640 | struct ether_multi *enm; | | 641 | struct ether_multi *enm; |
641 | struct ether_multistep step; | | 642 | struct ether_multistep step; |
642 | | | 643 | |
643 | error = 0; | | 644 | error = 0; |
644 | | | 645 | |
645 | /* First, zot all the multicast entries. */ | | 646 | /* First, zot all the multicast entries. */ |
646 | vge_cam_clear(sc); | | 647 | vge_cam_clear(sc); |
647 | CSR_WRITE_4(sc, VGE_MAR0, 0); | | 648 | CSR_WRITE_4(sc, VGE_MAR0, 0); |
648 | CSR_WRITE_4(sc, VGE_MAR1, 0); | | 649 | CSR_WRITE_4(sc, VGE_MAR1, 0); |
649 | ifp->if_flags &= ~IFF_ALLMULTI; | | 650 | ifp->if_flags &= ~IFF_ALLMULTI; |
650 | | | 651 | |
651 | /* | | 652 | /* |
652 | * If the user wants allmulti or promisc mode, enable reception | | 653 | * If the user wants allmulti or promisc mode, enable reception |
653 | * of all multicast frames. | | 654 | * of all multicast frames. |
654 | */ | | 655 | */ |
655 | if (ifp->if_flags & IFF_PROMISC) { | | 656 | if (ifp->if_flags & IFF_PROMISC) { |
656 | allmulti: | | 657 | allmulti: |
657 | CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); | | 658 | CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); |
658 | CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); | | 659 | CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); |
659 | ifp->if_flags |= IFF_ALLMULTI; | | 660 | ifp->if_flags |= IFF_ALLMULTI; |
660 | return; | | 661 | return; |
661 | } | | 662 | } |
662 | | | 663 | |
663 | /* Now program new ones */ | | 664 | /* Now program new ones */ |
664 | ETHER_LOCK(ec); | | 665 | ETHER_LOCK(ec); |
665 | ETHER_FIRST_MULTI(step, ec, enm); | | 666 | ETHER_FIRST_MULTI(step, ec, enm); |
666 | while (enm != NULL) { | | 667 | while (enm != NULL) { |
667 | /* | | 668 | /* |
668 | * If multicast range, fall back to ALLMULTI. | | 669 | * If multicast range, fall back to ALLMULTI. |
669 | */ | | 670 | */ |
670 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, | | 671 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, |
671 | ETHER_ADDR_LEN) != 0) { | | 672 | ETHER_ADDR_LEN) != 0) { |
672 | ETHER_UNLOCK(ec); | | 673 | ETHER_UNLOCK(ec); |
673 | goto allmulti; | | 674 | goto allmulti; |
674 | } | | 675 | } |
675 | | | 676 | |
676 | error = vge_cam_set(sc, enm->enm_addrlo); | | 677 | error = vge_cam_set(sc, enm->enm_addrlo); |
677 | if (error) | | 678 | if (error) |
678 | break; | | 679 | break; |
679 | | | 680 | |
680 | ETHER_NEXT_MULTI(step, enm); | | 681 | ETHER_NEXT_MULTI(step, enm); |
681 | } | | 682 | } |
682 | ETHER_UNLOCK(ec); | | 683 | ETHER_UNLOCK(ec); |
683 | | | 684 | |
684 | /* If there were too many addresses, use the hash filter. */ | | 685 | /* If there were too many addresses, use the hash filter. */ |
685 | if (error) { | | 686 | if (error) { |
686 | vge_cam_clear(sc); | | 687 | vge_cam_clear(sc); |
687 | | | 688 | |
688 | ETHER_LOCK(ec); | | 689 | ETHER_LOCK(ec); |
689 | ETHER_FIRST_MULTI(step, ec, enm); | | 690 | ETHER_FIRST_MULTI(step, ec, enm); |
690 | while (enm != NULL) { | | 691 | while (enm != NULL) { |
691 | /* | | 692 | /* |
692 | * If multicast range, fall back to ALLMULTI. | | 693 | * If multicast range, fall back to ALLMULTI. |
693 | */ | | 694 | */ |
694 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, | | 695 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, |
695 | ETHER_ADDR_LEN) != 0) { | | 696 | ETHER_ADDR_LEN) != 0) { |
696 | ETHER_UNLOCK(ec); | | 697 | ETHER_UNLOCK(ec); |
697 | goto allmulti; | | 698 | goto allmulti; |
698 | } | | 699 | } |
699 | | | 700 | |
700 | h = ether_crc32_be(enm->enm_addrlo, | | 701 | h = ether_crc32_be(enm->enm_addrlo, |
701 | ETHER_ADDR_LEN) >> 26; | | 702 | ETHER_ADDR_LEN) >> 26; |
702 | hashes[h >> 5] |= 1 << (h & 0x1f); | | 703 | hashes[h >> 5] |= 1 << (h & 0x1f); |
703 | | | 704 | |
704 | ETHER_NEXT_MULTI(step, enm); | | 705 | ETHER_NEXT_MULTI(step, enm); |
705 | } | | 706 | } |
706 | ETHER_UNLOCK(ec); | | 707 | ETHER_UNLOCK(ec); |
707 | | | 708 | |
708 | CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); | | 709 | CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); |
709 | CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); | | 710 | CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); |
710 | } | | 711 | } |
711 | } | | 712 | } |
712 | | | 713 | |
713 | static void | | 714 | static void |
714 | vge_reset(struct vge_softc *sc) | | 715 | vge_reset(struct vge_softc *sc) |
715 | { | | 716 | { |
716 | int i; | | 717 | int i; |
717 | | | 718 | |
718 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); | | 719 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); |
719 | | | 720 | |
720 | for (i = 0; i < VGE_TIMEOUT; i++) { | | 721 | for (i = 0; i < VGE_TIMEOUT; i++) { |
721 | DELAY(5); | | 722 | DELAY(5); |
722 | if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) | | 723 | if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) |
723 | break; | | 724 | break; |
724 | } | | 725 | } |
725 | | | 726 | |
726 | if (i == VGE_TIMEOUT) { | | 727 | if (i == VGE_TIMEOUT) { |
727 | printf("%s: soft reset timed out", device_xname(sc->sc_dev)); | | 728 | printf("%s: soft reset timed out", device_xname(sc->sc_dev)); |
728 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); | | 729 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); |
729 | DELAY(2000); | | 730 | DELAY(2000); |
730 | } | | 731 | } |
731 | | | 732 | |
732 | DELAY(5000); | | 733 | DELAY(5000); |
733 | | | 734 | |
734 | CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); | | 735 | CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); |
735 | | | 736 | |
736 | for (i = 0; i < VGE_TIMEOUT; i++) { | | 737 | for (i = 0; i < VGE_TIMEOUT; i++) { |
737 | DELAY(5); | | 738 | DELAY(5); |
738 | if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) | | 739 | if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) |
739 | break; | | 740 | break; |
740 | } | | 741 | } |
741 | | | 742 | |
742 | if (i == VGE_TIMEOUT) { | | 743 | if (i == VGE_TIMEOUT) { |
743 | printf("%s: EEPROM reload timed out\n", | | 744 | printf("%s: EEPROM reload timed out\n", |
744 | device_xname(sc->sc_dev)); | | 745 | device_xname(sc->sc_dev)); |
745 | return; | | 746 | return; |
746 | } | | 747 | } |
747 | | | 748 | |
748 | /* | | 749 | /* |
749 | * On some machine, the first read data from EEPROM could be | | 750 | * On some machine, the first read data from EEPROM could be |
750 | * messed up, so read one dummy data here to avoid the mess. | | 751 | * messed up, so read one dummy data here to avoid the mess. |
751 | */ | | 752 | */ |
752 | (void)vge_read_eeprom(sc, 0); | | 753 | (void)vge_read_eeprom(sc, 0); |
753 | | | 754 | |
754 | CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); | | 755 | CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); |
755 | } | | 756 | } |
756 | | | 757 | |
757 | /* | | 758 | /* |
758 | * Probe for a VIA gigabit chip. Check the PCI vendor and device | | 759 | * Probe for a VIA gigabit chip. Check the PCI vendor and device |
759 | * IDs against our list and return a device name if we find a match. | | 760 | * IDs against our list and return a device name if we find a match. |
760 | */ | | 761 | */ |
761 | static int | | 762 | static int |
762 | vge_match(device_t parent, cfdata_t match, void *aux) | | 763 | vge_match(device_t parent, cfdata_t match, void *aux) |
763 | { | | 764 | { |
764 | struct pci_attach_args *pa = aux; | | 765 | struct pci_attach_args *pa = aux; |
765 | | | 766 | |
766 | if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH | | 767 | if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH |
767 | && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X) | | 768 | && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X) |
768 | return 1; | | 769 | return 1; |
769 | | | 770 | |
770 | return 0; | | 771 | return 0; |
771 | } | | 772 | } |
772 | | | 773 | |
773 | static int | | 774 | static int |
774 | vge_allocmem(struct vge_softc *sc) | | 775 | vge_allocmem(struct vge_softc *sc) |
775 | { | | 776 | { |
776 | int error; | | 777 | int error; |
777 | int nseg; | | 778 | int nseg; |
778 | int i; | | 779 | int i; |
779 | bus_dma_segment_t seg; | | 780 | bus_dma_segment_t seg; |
780 | | | 781 | |
781 | /* | | 782 | /* |
782 | * Allocate memory for control data. | | 783 | * Allocate memory for control data. |
783 | */ | | 784 | */ |
784 | | | 785 | |
785 | error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vge_control_data), | | 786 | error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vge_control_data), |
786 | VGE_RING_ALIGN, 0, &seg, 1, &nseg, BUS_DMA_NOWAIT); | | 787 | VGE_RING_ALIGN, 0, &seg, 1, &nseg, BUS_DMA_NOWAIT); |
787 | if (error) { | | 788 | if (error) { |
788 | aprint_error_dev(sc->sc_dev, | | 789 | aprint_error_dev(sc->sc_dev, |
789 | "could not allocate control data dma memory\n"); | | 790 | "could not allocate control data dma memory\n"); |
790 | goto fail_1; | | 791 | goto fail_1; |
791 | } | | 792 | } |
792 | | | 793 | |
793 | /* Map the memory to kernel VA space */ | | 794 | /* Map the memory to kernel VA space */ |
794 | | | 795 | |
795 | error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, | | 796 | error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, |
796 | sizeof(struct vge_control_data), (void **)&sc->sc_control_data, | | 797 | sizeof(struct vge_control_data), (void **)&sc->sc_control_data, |
797 | BUS_DMA_NOWAIT); | | 798 | BUS_DMA_NOWAIT); |
798 | if (error) { | | 799 | if (error) { |
799 | aprint_error_dev(sc->sc_dev, | | 800 | aprint_error_dev(sc->sc_dev, |
800 | "could not map control data dma memory\n"); | | 801 | "could not map control data dma memory\n"); |
801 | goto fail_2; | | 802 | goto fail_2; |
802 | } | | 803 | } |
803 | memset(sc->sc_control_data, 0, sizeof(struct vge_control_data)); | | 804 | memset(sc->sc_control_data, 0, sizeof(struct vge_control_data)); |
804 | | | 805 | |
805 | /* | | 806 | /* |
806 | * Create map for control data. | | 807 | * Create map for control data. |
807 | */ | | 808 | */ |
808 | error = bus_dmamap_create(sc->sc_dmat, | | 809 | error = bus_dmamap_create(sc->sc_dmat, |
809 | sizeof(struct vge_control_data), 1, | | 810 | sizeof(struct vge_control_data), 1, |
810 | sizeof(struct vge_control_data), 0, BUS_DMA_NOWAIT, | | 811 | sizeof(struct vge_control_data), 0, BUS_DMA_NOWAIT, |
811 | &sc->sc_cddmamap); | | 812 | &sc->sc_cddmamap); |
812 | if (error) { | | 813 | if (error) { |
813 | aprint_error_dev(sc->sc_dev, | | 814 | aprint_error_dev(sc->sc_dev, |
814 | "could not create control data dmamap\n"); | | 815 | "could not create control data dmamap\n"); |
815 | goto fail_3; | | 816 | goto fail_3; |
816 | } | | 817 | } |
817 | | | 818 | |
818 | /* Load the map for the control data. */ | | 819 | /* Load the map for the control data. */ |
819 | error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, | | 820 | error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, |
820 | sc->sc_control_data, sizeof(struct vge_control_data), NULL, | | 821 | sc->sc_control_data, sizeof(struct vge_control_data), NULL, |
821 | BUS_DMA_NOWAIT); | | 822 | BUS_DMA_NOWAIT); |
822 | if (error) { | | 823 | if (error) { |
823 | aprint_error_dev(sc->sc_dev, | | 824 | aprint_error_dev(sc->sc_dev, |
824 | "could not load control data dma memory\n"); | | 825 | "could not load control data dma memory\n"); |
825 | goto fail_4; | | 826 | goto fail_4; |
826 | } | | 827 | } |
827 | | | 828 | |
828 | /* Create DMA maps for TX buffers */ | | 829 | /* Create DMA maps for TX buffers */ |
829 | | | 830 | |
830 | for (i = 0; i < VGE_NTXDESC; i++) { | | 831 | for (i = 0; i < VGE_NTXDESC; i++) { |
831 | error = bus_dmamap_create(sc->sc_dmat, VGE_TX_MAXLEN, | | 832 | error = bus_dmamap_create(sc->sc_dmat, VGE_TX_MAXLEN, |
832 | VGE_TX_FRAGS, VGE_TX_MAXLEN, 0, BUS_DMA_NOWAIT, | | 833 | VGE_TX_FRAGS, VGE_TX_MAXLEN, 0, BUS_DMA_NOWAIT, |
833 | &sc->sc_txsoft[i].txs_dmamap); | | 834 | &sc->sc_txsoft[i].txs_dmamap); |
834 | if (error) { | | 835 | if (error) { |
835 | aprint_error_dev(sc->sc_dev, | | 836 | aprint_error_dev(sc->sc_dev, |
836 | "can't create DMA map for TX descs\n"); | | 837 | "can't create DMA map for TX descs\n"); |
837 | goto fail_5; | | 838 | goto fail_5; |
838 | } | | 839 | } |
839 | } | | 840 | } |
840 | | | 841 | |
841 | /* Create DMA maps for RX buffers */ | | 842 | /* Create DMA maps for RX buffers */ |
842 | | | 843 | |
843 | for (i = 0; i < VGE_NRXDESC; i++) { | | 844 | for (i = 0; i < VGE_NRXDESC; i++) { |
844 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, | | 845 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
845 | 1, MCLBYTES, 0, BUS_DMA_NOWAIT, | | 846 | 1, MCLBYTES, 0, BUS_DMA_NOWAIT, |
846 | &sc->sc_rxsoft[i].rxs_dmamap); | | 847 | &sc->sc_rxsoft[i].rxs_dmamap); |
847 | if (error) { | | 848 | if (error) { |
848 | aprint_error_dev(sc->sc_dev, | | 849 | aprint_error_dev(sc->sc_dev, |
849 | "can't create DMA map for RX descs\n"); | | 850 | "can't create DMA map for RX descs\n"); |
850 | goto fail_6; | | 851 | goto fail_6; |
851 | } | | 852 | } |
852 | sc->sc_rxsoft[i].rxs_mbuf = NULL; | | 853 | sc->sc_rxsoft[i].rxs_mbuf = NULL; |
853 | } | | 854 | } |
854 | | | 855 | |
855 | return 0; | | 856 | return 0; |
856 | | | 857 | |
857 | fail_6: | | 858 | fail_6: |
858 | for (i = 0; i < VGE_NRXDESC; i++) { | | 859 | for (i = 0; i < VGE_NRXDESC; i++) { |
859 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL) | | 860 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL) |
860 | bus_dmamap_destroy(sc->sc_dmat, | | 861 | bus_dmamap_destroy(sc->sc_dmat, |
861 | sc->sc_rxsoft[i].rxs_dmamap); | | 862 | sc->sc_rxsoft[i].rxs_dmamap); |
862 | } | | 863 | } |
863 | fail_5: | | 864 | fail_5: |
864 | for (i = 0; i < VGE_NTXDESC; i++) { | | 865 | for (i = 0; i < VGE_NTXDESC; i++) { |
865 | if (sc->sc_txsoft[i].txs_dmamap != NULL) | | 866 | if (sc->sc_txsoft[i].txs_dmamap != NULL) |
866 | bus_dmamap_destroy(sc->sc_dmat, | | 867 | bus_dmamap_destroy(sc->sc_dmat, |
867 | sc->sc_txsoft[i].txs_dmamap); | | 868 | sc->sc_txsoft[i].txs_dmamap); |
868 | } | | 869 | } |
869 | bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); | | 870 | bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); |
870 | fail_4: | | 871 | fail_4: |
871 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); | | 872 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); |
872 | fail_3: | | 873 | fail_3: |
873 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, | | 874 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, |
874 | sizeof(struct vge_control_data)); | | 875 | sizeof(struct vge_control_data)); |
875 | fail_2: | | 876 | fail_2: |
876 | bus_dmamem_free(sc->sc_dmat, &seg, nseg); | | 877 | bus_dmamem_free(sc->sc_dmat, &seg, nseg); |
877 | fail_1: | | 878 | fail_1: |
878 | return ENOMEM; | | 879 | return ENOMEM; |
879 | } | | 880 | } |
880 | | | 881 | |
881 | /* | | 882 | /* |
882 | * Attach the interface. Allocate softc structures, do ifmedia | | 883 | * Attach the interface. Allocate softc structures, do ifmedia |
883 | * setup and ethernet/BPF attach. | | 884 | * setup and ethernet/BPF attach. |
884 | */ | | 885 | */ |
885 | static void | | 886 | static void |
886 | vge_attach(device_t parent, device_t self, void *aux) | | 887 | vge_attach(device_t parent, device_t self, void *aux) |
887 | { | | 888 | { |
888 | uint8_t *eaddr; | | 889 | uint8_t *eaddr; |
889 | struct vge_softc *sc = device_private(self); | | 890 | struct vge_softc *sc = device_private(self); |
890 | struct ifnet *ifp; | | 891 | struct ifnet *ifp; |
891 | struct mii_data * const mii = &sc->sc_mii; | | 892 | struct mii_data * const mii = &sc->sc_mii; |
892 | struct pci_attach_args *pa = aux; | | 893 | struct pci_attach_args *pa = aux; |
893 | pci_chipset_tag_t pc = pa->pa_pc; | | 894 | pci_chipset_tag_t pc = pa->pa_pc; |
894 | const char *intrstr; | | 895 | const char *intrstr; |
895 | pci_intr_handle_t ih; | | 896 | pci_intr_handle_t ih; |
896 | uint16_t val; | | 897 | uint16_t val; |
897 | char intrbuf[PCI_INTRSTR_LEN]; | | 898 | char intrbuf[PCI_INTRSTR_LEN]; |
898 | | | 899 | |
899 | sc->sc_dev = self; | | 900 | sc->sc_dev = self; |
900 | | | 901 | |
901 | pci_aprint_devinfo_fancy(pa, NULL, "VIA VT612X Gigabit Ethernet", 1); | | 902 | pci_aprint_devinfo_fancy(pa, NULL, "VIA VT612X Gigabit Ethernet", 1); |
902 | | | 903 | |
903 | /* Make sure bus-mastering is enabled */ | | 904 | /* Make sure bus-mastering is enabled */ |
904 | pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, | | 905 | pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, |
905 | pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | | | 906 | pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | |
906 | PCI_COMMAND_MASTER_ENABLE); | | 907 | PCI_COMMAND_MASTER_ENABLE); |
907 | | | 908 | |
908 | /* | | 909 | /* |
909 | * Map control/status registers. | | 910 | * Map control/status registers. |
910 | */ | | 911 | */ |
911 | if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, | | 912 | if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, |
912 | &sc->sc_bst, &sc->sc_bsh, NULL, NULL) != 0) { | | 913 | &sc->sc_bst, &sc->sc_bsh, NULL, NULL) != 0) { |
913 | aprint_error_dev(self, "couldn't map memory\n"); | | 914 | aprint_error_dev(self, "couldn't map memory\n"); |
914 | return; | | 915 | return; |
915 | } | | 916 | } |
916 | | | 917 | |
917 | /* | | 918 | /* |
918 | * Map and establish our interrupt. | | 919 | * Map and establish our interrupt. |
919 | */ | | 920 | */ |
920 | if (pci_intr_map(pa, &ih)) { | | 921 | if (pci_intr_map(pa, &ih)) { |
921 | aprint_error_dev(self, "unable to map interrupt\n"); | | 922 | aprint_error_dev(self, "unable to map interrupt\n"); |
922 | return; | | 923 | return; |
923 | } | | 924 | } |
924 | intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); | | 925 | intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); |
925 | sc->sc_intrhand = pci_intr_establish_xname(pc, ih, IPL_NET, vge_intr, | | 926 | sc->sc_intrhand = pci_intr_establish_xname(pc, ih, IPL_NET, vge_intr, |
926 | sc, device_xname(self)); | | 927 | sc, device_xname(self)); |
927 | if (sc->sc_intrhand == NULL) { | | 928 | if (sc->sc_intrhand == NULL) { |
928 | aprint_error_dev(self, "unable to establish interrupt"); | | 929 | aprint_error_dev(self, "unable to establish interrupt"); |
929 | if (intrstr != NULL) | | 930 | if (intrstr != NULL) |
930 | aprint_error(" at %s", intrstr); | | 931 | aprint_error(" at %s", intrstr); |
931 | aprint_error("\n"); | | 932 | aprint_error("\n"); |
932 | return; | | 933 | return; |
933 | } | | 934 | } |
934 | aprint_normal_dev(self, "interrupting at %s\n", intrstr); | | 935 | aprint_normal_dev(self, "interrupting at %s\n", intrstr); |
935 | | | 936 | |
936 | /* Reset the adapter. */ | | 937 | /* Reset the adapter. */ |
937 | vge_reset(sc); | | 938 | vge_reset(sc); |
938 | | | 939 | |
939 | /* | | 940 | /* |
940 | * Get station address from the EEPROM. | | 941 | * Get station address from the EEPROM. |
941 | */ | | 942 | */ |
942 | eaddr = sc->sc_eaddr; | | 943 | eaddr = sc->sc_eaddr; |
943 | val = vge_read_eeprom(sc, VGE_EE_EADDR + 0); | | 944 | val = vge_read_eeprom(sc, VGE_EE_EADDR + 0); |
944 | eaddr[0] = val & 0xff; | | 945 | eaddr[0] = val & 0xff; |
945 | eaddr[1] = val >> 8; | | 946 | eaddr[1] = val >> 8; |
946 | val = vge_read_eeprom(sc, VGE_EE_EADDR + 1); | | 947 | val = vge_read_eeprom(sc, VGE_EE_EADDR + 1); |
947 | eaddr[2] = val & 0xff; | | 948 | eaddr[2] = val & 0xff; |
948 | eaddr[3] = val >> 8; | | 949 | eaddr[3] = val >> 8; |
949 | val = vge_read_eeprom(sc, VGE_EE_EADDR + 2); | | 950 | val = vge_read_eeprom(sc, VGE_EE_EADDR + 2); |
950 | eaddr[4] = val & 0xff; | | 951 | eaddr[4] = val & 0xff; |
951 | eaddr[5] = val >> 8; | | 952 | eaddr[5] = val >> 8; |
952 | | | 953 | |
953 | aprint_normal_dev(self, "Ethernet address %s\n", | | 954 | aprint_normal_dev(self, "Ethernet address %s\n", |
954 | ether_sprintf(eaddr)); | | 955 | ether_sprintf(eaddr)); |
955 | | | 956 | |
| | | 957 | /* Clear WOL and take hardware from powerdown. */ |
| | | 958 | vge_clrwol(sc); |
| | | 959 | |
956 | /* | | 960 | /* |
957 | * Use the 32bit tag. Hardware supports 48bit physical addresses, | | 961 | * Use the 32bit tag. Hardware supports 48bit physical addresses, |
958 | * but we don't use that for now. | | 962 | * but we don't use that for now. |
959 | */ | | 963 | */ |
960 | sc->sc_dmat = pa->pa_dmat; | | 964 | sc->sc_dmat = pa->pa_dmat; |
961 | | | 965 | |
962 | if (vge_allocmem(sc) != 0) | | 966 | if (vge_allocmem(sc) != 0) |
963 | return; | | 967 | return; |
964 | | | 968 | |
965 | ifp = &sc->sc_ethercom.ec_if; | | 969 | ifp = &sc->sc_ethercom.ec_if; |
966 | ifp->if_softc = sc; | | 970 | ifp->if_softc = sc; |
967 | strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); | | 971 | strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); |
968 | ifp->if_mtu = ETHERMTU; | | 972 | ifp->if_mtu = ETHERMTU; |
969 | ifp->if_baudrate = IF_Gbps(1); | | 973 | ifp->if_baudrate = IF_Gbps(1); |
970 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 974 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
971 | ifp->if_ioctl = vge_ioctl; | | 975 | ifp->if_ioctl = vge_ioctl; |
972 | ifp->if_start = vge_start; | | 976 | ifp->if_start = vge_start; |
973 | ifp->if_init = vge_init; | | 977 | ifp->if_init = vge_init; |
974 | ifp->if_stop = vge_stop; | | 978 | ifp->if_stop = vge_stop; |
975 | | | 979 | |
976 | /* | | 980 | /* |
977 | * We can support 802.1Q VLAN-sized frames and jumbo | | 981 | * We can support 802.1Q VLAN-sized frames and jumbo |
978 | * Ethernet frames. | | 982 | * Ethernet frames. |
979 | */ | | 983 | */ |
980 | sc->sc_ethercom.ec_capabilities |= | | 984 | sc->sc_ethercom.ec_capabilities |= |
981 | ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU | | | 985 | ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU | |
982 | ETHERCAP_VLAN_HWTAGGING; | | 986 | ETHERCAP_VLAN_HWTAGGING; |
983 | sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; | | 987 | sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; |
984 | | | 988 | |
985 | /* | | 989 | /* |
986 | * We can do IPv4/TCPv4/UDPv4 checksums in hardware. | | 990 | * We can do IPv4/TCPv4/UDPv4 checksums in hardware. |
987 | */ | | 991 | */ |
988 | ifp->if_capabilities |= | | 992 | ifp->if_capabilities |= |
989 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | | | 993 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
990 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | | | 994 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
991 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; | | 995 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
992 | | | 996 | |
993 | #ifdef DEVICE_POLLING | | 997 | #ifdef DEVICE_POLLING |
994 | #ifdef IFCAP_POLLING | | 998 | #ifdef IFCAP_POLLING |
995 | ifp->if_capabilities |= IFCAP_POLLING; | | 999 | ifp->if_capabilities |= IFCAP_POLLING; |
996 | #endif | | 1000 | #endif |
997 | #endif | | 1001 | #endif |
998 | ifp->if_watchdog = vge_watchdog; | | 1002 | ifp->if_watchdog = vge_watchdog; |
999 | IFQ_SET_MAXLEN(&ifp->if_snd, uimax(VGE_IFQ_MAXLEN, IFQ_MAXLEN)); | | 1003 | IFQ_SET_MAXLEN(&ifp->if_snd, uimax(VGE_IFQ_MAXLEN, IFQ_MAXLEN)); |
1000 | IFQ_SET_READY(&ifp->if_snd); | | 1004 | IFQ_SET_READY(&ifp->if_snd); |
1001 | | | 1005 | |
1002 | /* | | 1006 | /* |
1003 | * Initialize our media structures and probe the MII. | | 1007 | * Initialize our media structures and probe the MII. |
1004 | */ | | 1008 | */ |
1005 | mii->mii_ifp = ifp; | | 1009 | mii->mii_ifp = ifp; |
1006 | mii->mii_readreg = vge_miibus_readreg; | | 1010 | mii->mii_readreg = vge_miibus_readreg; |
1007 | mii->mii_writereg = vge_miibus_writereg; | | 1011 | mii->mii_writereg = vge_miibus_writereg; |
1008 | mii->mii_statchg = vge_miibus_statchg; | | 1012 | mii->mii_statchg = vge_miibus_statchg; |
1009 | | | 1013 | |
1010 | sc->sc_ethercom.ec_mii = mii; | | 1014 | sc->sc_ethercom.ec_mii = mii; |
1011 | ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); | | 1015 | ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); |
1012 | mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, | | 1016 | mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, |
1013 | MII_OFFSET_ANY, MIIF_DOPAUSE); | | 1017 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
1014 | if (LIST_FIRST(&mii->mii_phys) == NULL) { | | 1018 | if (LIST_FIRST(&mii->mii_phys) == NULL) { |
1015 | ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); | | 1019 | ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); |
1016 | ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); | | 1020 | ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); |
1017 | } else | | 1021 | } else |
1018 | ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); | | 1022 | ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); |
1019 | | | 1023 | |
1020 | /* | | 1024 | /* |
1021 | * Attach the interface. | | 1025 | * Attach the interface. |
1022 | */ | | 1026 | */ |
1023 | if_attach(ifp); | | 1027 | if_attach(ifp); |
1024 | if_deferred_start_init(ifp, NULL); | | 1028 | if_deferred_start_init(ifp, NULL); |
1025 | ether_ifattach(ifp, eaddr); | | 1029 | ether_ifattach(ifp, eaddr); |
1026 | ether_set_ifflags_cb(&sc->sc_ethercom, vge_ifflags_cb); | | 1030 | ether_set_ifflags_cb(&sc->sc_ethercom, vge_ifflags_cb); |
1027 | | | 1031 | |
1028 | callout_init(&sc->sc_timeout, 0); | | 1032 | callout_init(&sc->sc_timeout, 0); |
1029 | callout_setfunc(&sc->sc_timeout, vge_tick, sc); | | 1033 | callout_setfunc(&sc->sc_timeout, vge_tick, sc); |
1030 | | | 1034 | |
1031 | /* | | 1035 | /* |
1032 | * Make sure the interface is shutdown during reboot. | | 1036 | * Make sure the interface is shutdown during reboot. |
1033 | */ | | 1037 | */ |
1034 | if (pmf_device_register1(self, NULL, NULL, vge_shutdown)) | | 1038 | if (pmf_device_register1(self, NULL, NULL, vge_shutdown)) |
1035 | pmf_class_network_register(self, ifp); | | 1039 | pmf_class_network_register(self, ifp); |
1036 | else | | 1040 | else |
1037 | aprint_error_dev(self, "couldn't establish power handler\n"); | | 1041 | aprint_error_dev(self, "couldn't establish power handler\n"); |
1038 | } | | 1042 | } |
1039 | | | 1043 | |
1040 | static int | | 1044 | static int |
1041 | vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) | | 1045 | vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) |
1042 | { | | 1046 | { |
1043 | struct mbuf *m_new; | | 1047 | struct mbuf *m_new; |
1044 | struct vge_rxdesc *rxd; | | 1048 | struct vge_rxdesc *rxd; |
1045 | struct vge_rxsoft *rxs; | | 1049 | struct vge_rxsoft *rxs; |
1046 | bus_dmamap_t map; | | 1050 | bus_dmamap_t map; |
1047 | int i; | | 1051 | int i; |
1048 | #ifdef DIAGNOSTIC | | 1052 | #ifdef DIAGNOSTIC |
1049 | uint32_t rd_sts; | | 1053 | uint32_t rd_sts; |
1050 | #endif | | 1054 | #endif |
1051 | | | 1055 | |
1052 | m_new = NULL; | | 1056 | m_new = NULL; |
1053 | if (m == NULL) { | | 1057 | if (m == NULL) { |
1054 | MGETHDR(m_new, M_DONTWAIT, MT_DATA); | | 1058 | MGETHDR(m_new, M_DONTWAIT, MT_DATA); |
1055 | if (m_new == NULL) | | 1059 | if (m_new == NULL) |
1056 | return ENOBUFS; | | 1060 | return ENOBUFS; |
1057 | | | 1061 | |
1058 | MCLGET(m_new, M_DONTWAIT); | | 1062 | MCLGET(m_new, M_DONTWAIT); |
1059 | if ((m_new->m_flags & M_EXT) == 0) { | | 1063 | if ((m_new->m_flags & M_EXT) == 0) { |
1060 | m_freem(m_new); | | 1064 | m_freem(m_new); |
1061 | return ENOBUFS; | | 1065 | return ENOBUFS; |
1062 | } | | 1066 | } |
1063 | | | 1067 | |
1064 | m = m_new; | | 1068 | m = m_new; |
1065 | } else | | 1069 | } else |
1066 | m->m_data = m->m_ext.ext_buf; | | 1070 | m->m_data = m->m_ext.ext_buf; |
1067 | | | 1071 | |
1068 | | | 1072 | |
1069 | /* | | 1073 | /* |
1070 | * This is part of an evil trick to deal with non-x86 platforms. | | 1074 | * This is part of an evil trick to deal with non-x86 platforms. |
1071 | * The VIA chip requires RX buffers to be aligned on 32-bit | | 1075 | * The VIA chip requires RX buffers to be aligned on 32-bit |
1072 | * boundaries, but that will hose non-x86 machines. To get around | | 1076 | * boundaries, but that will hose non-x86 machines. To get around |
1073 | * this, we leave some empty space at the start of each buffer | | 1077 | * this, we leave some empty space at the start of each buffer |
1074 | * and for non-x86 hosts, we copy the buffer back two bytes | | 1078 | * and for non-x86 hosts, we copy the buffer back two bytes |
1075 | * to achieve word alignment. This is slightly more efficient | | 1079 | * to achieve word alignment. This is slightly more efficient |
1076 | * than allocating a new buffer, copying the contents, and | | 1080 | * than allocating a new buffer, copying the contents, and |
1077 | * discarding the old buffer. | | 1081 | * discarding the old buffer. |
1078 | */ | | 1082 | */ |
1079 | m->m_len = m->m_pkthdr.len = VGE_RX_BUFSIZE; | | 1083 | m->m_len = m->m_pkthdr.len = VGE_RX_BUFSIZE; |
1080 | #ifndef __NO_STRICT_ALIGNMENT | | 1084 | #ifndef __NO_STRICT_ALIGNMENT |
1081 | m->m_data += VGE_RX_PAD; | | 1085 | m->m_data += VGE_RX_PAD; |
1082 | #endif | | 1086 | #endif |
1083 | rxs = &sc->sc_rxsoft[idx]; | | 1087 | rxs = &sc->sc_rxsoft[idx]; |
1084 | map = rxs->rxs_dmamap; | | 1088 | map = rxs->rxs_dmamap; |
1085 | | | 1089 | |
1086 | if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) | | 1090 | if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) |
1087 | goto out; | | 1091 | goto out; |
1088 | | | 1092 | |
1089 | rxd = &sc->sc_rxdescs[idx]; | | 1093 | rxd = &sc->sc_rxdescs[idx]; |
1090 | | | 1094 | |
1091 | #ifdef DIAGNOSTIC | | 1095 | #ifdef DIAGNOSTIC |
1092 | /* If this descriptor is still owned by the chip, bail. */ | | 1096 | /* If this descriptor is still owned by the chip, bail. */ |
1093 | VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1097 | VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1094 | rd_sts = le32toh(rxd->rd_sts); | | 1098 | rd_sts = le32toh(rxd->rd_sts); |
1095 | VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); | | 1099 | VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); |
1096 | if (rd_sts & VGE_RDSTS_OWN) { | | 1100 | if (rd_sts & VGE_RDSTS_OWN) { |
1097 | panic("%s: tried to map busy RX descriptor", | | 1101 | panic("%s: tried to map busy RX descriptor", |
1098 | device_xname(sc->sc_dev)); | | 1102 | device_xname(sc->sc_dev)); |
1099 | } | | 1103 | } |
1100 | #endif | | 1104 | #endif |
1101 | | | 1105 | |
1102 | rxs->rxs_mbuf = m; | | 1106 | rxs->rxs_mbuf = m; |
1103 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, | | 1107 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, |
1104 | BUS_DMASYNC_PREREAD); | | 1108 | BUS_DMASYNC_PREREAD); |
1105 | | | 1109 | |
1106 | rxd->rd_buflen = | | 1110 | rxd->rd_buflen = |
1107 | htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I); | | 1111 | htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I); |
1108 | vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr); | | 1112 | vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr); |
1109 | rxd->rd_sts = 0; | | 1113 | rxd->rd_sts = 0; |
1110 | rxd->rd_ctl = 0; | | 1114 | rxd->rd_ctl = 0; |
1111 | VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1115 | VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1112 | | | 1116 | |
1113 | /* | | 1117 | /* |
1114 | * Note: the manual fails to document the fact that for | | 1118 | * Note: the manual fails to document the fact that for |
1115 | * proper opration, the driver needs to replentish the RX | | 1119 | * proper opration, the driver needs to replentish the RX |
1116 | * DMA ring 4 descriptors at a time (rather than one at a | | 1120 | * DMA ring 4 descriptors at a time (rather than one at a |
1117 | * time, like most chips). We can allocate the new buffers | | 1121 | * time, like most chips). We can allocate the new buffers |
1118 | * but we should not set the OWN bits until we're ready | | 1122 | * but we should not set the OWN bits until we're ready |
1119 | * to hand back 4 of them in one shot. | | 1123 | * to hand back 4 of them in one shot. |
1120 | */ | | 1124 | */ |
1121 | | | 1125 | |
1122 | #define VGE_RXCHUNK 4 | | 1126 | #define VGE_RXCHUNK 4 |
1123 | sc->sc_rx_consumed++; | | 1127 | sc->sc_rx_consumed++; |
1124 | if (sc->sc_rx_consumed == VGE_RXCHUNK) { | | 1128 | if (sc->sc_rx_consumed == VGE_RXCHUNK) { |
1125 | for (i = idx; i != idx - VGE_RXCHUNK; i--) { | | 1129 | for (i = idx; i != idx - VGE_RXCHUNK; i--) { |
1126 | KASSERT(i >= 0); | | 1130 | KASSERT(i >= 0); |
1127 | sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN); | | 1131 | sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN); |
1128 | VGE_RXDESCSYNC(sc, i, | | 1132 | VGE_RXDESCSYNC(sc, i, |
1129 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1133 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1130 | } | | 1134 | } |
1131 | sc->sc_rx_consumed = 0; | | 1135 | sc->sc_rx_consumed = 0; |
1132 | } | | 1136 | } |
1133 | | | 1137 | |
1134 | return 0; | | 1138 | return 0; |
1135 | out: | | 1139 | out: |
1136 | if (m_new != NULL) | | 1140 | if (m_new != NULL) |
1137 | m_freem(m_new); | | 1141 | m_freem(m_new); |
1138 | return ENOMEM; | | 1142 | return ENOMEM; |
1139 | } | | 1143 | } |
1140 | | | 1144 | |
1141 | #ifndef __NO_STRICT_ALIGNMENT | | 1145 | #ifndef __NO_STRICT_ALIGNMENT |
1142 | static inline void | | 1146 | static inline void |
1143 | vge_fixup_rx(struct mbuf *m) | | 1147 | vge_fixup_rx(struct mbuf *m) |
1144 | { | | 1148 | { |
1145 | int i; | | 1149 | int i; |
1146 | uint16_t *src, *dst; | | 1150 | uint16_t *src, *dst; |
1147 | | | 1151 | |
1148 | src = mtod(m, uint16_t *); | | 1152 | src = mtod(m, uint16_t *); |
1149 | dst = src - 1; | | 1153 | dst = src - 1; |
1150 | | | 1154 | |
1151 | for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) | | 1155 | for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) |
1152 | *dst++ = *src++; | | 1156 | *dst++ = *src++; |
1153 | | | 1157 | |
1154 | m->m_data -= ETHER_ALIGN; | | 1158 | m->m_data -= ETHER_ALIGN; |
1155 | } | | 1159 | } |
1156 | #endif | | 1160 | #endif |
1157 | | | 1161 | |
1158 | /* | | 1162 | /* |
1159 | * RX handler. We support the reception of jumbo frames that have | | 1163 | * RX handler. We support the reception of jumbo frames that have |
1160 | * been fragmented across multiple 2K mbuf cluster buffers. | | 1164 | * been fragmented across multiple 2K mbuf cluster buffers. |
1161 | */ | | 1165 | */ |
1162 | static void | | 1166 | static void |
1163 | vge_rxeof(struct vge_softc *sc) | | 1167 | vge_rxeof(struct vge_softc *sc) |
1164 | { | | 1168 | { |
1165 | struct mbuf *m; | | 1169 | struct mbuf *m; |
1166 | struct ifnet *ifp; | | 1170 | struct ifnet *ifp; |
1167 | int idx, total_len, lim; | | 1171 | int idx, total_len, lim; |
1168 | struct vge_rxdesc *cur_rxd; | | 1172 | struct vge_rxdesc *cur_rxd; |
1169 | struct vge_rxsoft *rxs; | | 1173 | struct vge_rxsoft *rxs; |
1170 | uint32_t rxstat, rxctl; | | 1174 | uint32_t rxstat, rxctl; |
1171 | | | 1175 | |
1172 | ifp = &sc->sc_ethercom.ec_if; | | 1176 | ifp = &sc->sc_ethercom.ec_if; |
1173 | lim = 0; | | 1177 | lim = 0; |
1174 | | | 1178 | |
1175 | /* Invalidate the descriptor memory */ | | 1179 | /* Invalidate the descriptor memory */ |
1176 | | | 1180 | |
1177 | for (idx = sc->sc_rx_prodidx;; idx = VGE_NEXT_RXDESC(idx)) { | | 1181 | for (idx = sc->sc_rx_prodidx;; idx = VGE_NEXT_RXDESC(idx)) { |
1178 | cur_rxd = &sc->sc_rxdescs[idx]; | | 1182 | cur_rxd = &sc->sc_rxdescs[idx]; |
1179 | | | 1183 | |
1180 | VGE_RXDESCSYNC(sc, idx, | | 1184 | VGE_RXDESCSYNC(sc, idx, |
1181 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1185 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1182 | rxstat = le32toh(cur_rxd->rd_sts); | | 1186 | rxstat = le32toh(cur_rxd->rd_sts); |
1183 | if ((rxstat & VGE_RDSTS_OWN) != 0) { | | 1187 | if ((rxstat & VGE_RDSTS_OWN) != 0) { |
1184 | VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); | | 1188 | VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); |
1185 | break; | | 1189 | break; |
1186 | } | | 1190 | } |
1187 | | | 1191 | |
1188 | rxctl = le32toh(cur_rxd->rd_ctl); | | 1192 | rxctl = le32toh(cur_rxd->rd_ctl); |
1189 | rxs = &sc->sc_rxsoft[idx]; | | 1193 | rxs = &sc->sc_rxsoft[idx]; |
1190 | m = rxs->rxs_mbuf; | | 1194 | m = rxs->rxs_mbuf; |
1191 | total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16; | | 1195 | total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16; |
1192 | | | 1196 | |
1193 | /* Invalidate the RX mbuf and unload its map */ | | 1197 | /* Invalidate the RX mbuf and unload its map */ |
1194 | | | 1198 | |
1195 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, | | 1199 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, |
1196 | 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); | | 1200 | 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
1197 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); | | 1201 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
1198 | | | 1202 | |
1199 | /* | | 1203 | /* |
1200 | * If the 'start of frame' bit is set, this indicates | | 1204 | * If the 'start of frame' bit is set, this indicates |
1201 | * either the first fragment in a multi-fragment receive, | | 1205 | * either the first fragment in a multi-fragment receive, |
1202 | * or an intermediate fragment. Either way, we want to | | 1206 | * or an intermediate fragment. Either way, we want to |
1203 | * accumulate the buffers. | | 1207 | * accumulate the buffers. |
1204 | */ | | 1208 | */ |
1205 | if (rxstat & VGE_RXPKT_SOF) { | | 1209 | if (rxstat & VGE_RXPKT_SOF) { |
1206 | m->m_len = VGE_RX_BUFSIZE; | | 1210 | m->m_len = VGE_RX_BUFSIZE; |
1207 | if (sc->sc_rx_mhead == NULL) | | 1211 | if (sc->sc_rx_mhead == NULL) |
1208 | sc->sc_rx_mhead = sc->sc_rx_mtail = m; | | 1212 | sc->sc_rx_mhead = sc->sc_rx_mtail = m; |
1209 | else { | | 1213 | else { |
1210 | m->m_flags &= ~M_PKTHDR; | | 1214 | m->m_flags &= ~M_PKTHDR; |
1211 | sc->sc_rx_mtail->m_next = m; | | 1215 | sc->sc_rx_mtail->m_next = m; |
1212 | sc->sc_rx_mtail = m; | | 1216 | sc->sc_rx_mtail = m; |
1213 | } | | 1217 | } |
1214 | vge_newbuf(sc, idx, NULL); | | 1218 | vge_newbuf(sc, idx, NULL); |
1215 | continue; | | 1219 | continue; |
1216 | } | | 1220 | } |
1217 | | | 1221 | |
1218 | /* | | 1222 | /* |
1219 | * Bad/error frames will have the RXOK bit cleared. | | 1223 | * Bad/error frames will have the RXOK bit cleared. |
1220 | * However, there's one error case we want to allow: | | 1224 | * However, there's one error case we want to allow: |
1221 | * if a VLAN tagged frame arrives and the chip can't | | 1225 | * if a VLAN tagged frame arrives and the chip can't |
1222 | * match it against the CAM filter, it considers this | | 1226 | * match it against the CAM filter, it considers this |
1223 | * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. | | 1227 | * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. |
1224 | * We don't want to drop the frame though: our VLAN | | 1228 | * We don't want to drop the frame though: our VLAN |
1225 | * filtering is done in software. | | 1229 | * filtering is done in software. |
1226 | */ | | 1230 | */ |
1227 | if ((rxstat & VGE_RDSTS_RXOK) == 0 && | | 1231 | if ((rxstat & VGE_RDSTS_RXOK) == 0 && |
1228 | (rxstat & VGE_RDSTS_VIDM) == 0 && | | 1232 | (rxstat & VGE_RDSTS_VIDM) == 0 && |
1229 | (rxstat & VGE_RDSTS_CSUMERR) == 0) { | | 1233 | (rxstat & VGE_RDSTS_CSUMERR) == 0) { |
1230 | ifp->if_ierrors++; | | 1234 | ifp->if_ierrors++; |
1231 | /* | | 1235 | /* |
1232 | * If this is part of a multi-fragment packet, | | 1236 | * If this is part of a multi-fragment packet, |
1233 | * discard all the pieces. | | 1237 | * discard all the pieces. |
1234 | */ | | 1238 | */ |
1235 | if (sc->sc_rx_mhead != NULL) { | | 1239 | if (sc->sc_rx_mhead != NULL) { |
1236 | m_freem(sc->sc_rx_mhead); | | 1240 | m_freem(sc->sc_rx_mhead); |
1237 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; | | 1241 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; |
1238 | } | | 1242 | } |
1239 | vge_newbuf(sc, idx, m); | | 1243 | vge_newbuf(sc, idx, m); |
1240 | continue; | | 1244 | continue; |
1241 | } | | 1245 | } |
1242 | | | 1246 | |
1243 | /* | | 1247 | /* |
1244 | * If allocating a replacement mbuf fails, | | 1248 | * If allocating a replacement mbuf fails, |
1245 | * reload the current one. | | 1249 | * reload the current one. |
1246 | */ | | 1250 | */ |
1247 | | | 1251 | |
1248 | if (vge_newbuf(sc, idx, NULL)) { | | 1252 | if (vge_newbuf(sc, idx, NULL)) { |
1249 | ifp->if_ierrors++; | | 1253 | ifp->if_ierrors++; |
1250 | if (sc->sc_rx_mhead != NULL) { | | 1254 | if (sc->sc_rx_mhead != NULL) { |
1251 | m_freem(sc->sc_rx_mhead); | | 1255 | m_freem(sc->sc_rx_mhead); |
1252 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; | | 1256 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; |
1253 | } | | 1257 | } |
1254 | vge_newbuf(sc, idx, m); | | 1258 | vge_newbuf(sc, idx, m); |
1255 | continue; | | 1259 | continue; |
1256 | } | | 1260 | } |
1257 | | | 1261 | |
1258 | if (sc->sc_rx_mhead != NULL) { | | 1262 | if (sc->sc_rx_mhead != NULL) { |
1259 | m->m_len = total_len % VGE_RX_BUFSIZE; | | 1263 | m->m_len = total_len % VGE_RX_BUFSIZE; |
1260 | /* | | 1264 | /* |
1261 | * Special case: if there's 4 bytes or less | | 1265 | * Special case: if there's 4 bytes or less |
1262 | * in this buffer, the mbuf can be discarded: | | 1266 | * in this buffer, the mbuf can be discarded: |
1263 | * the last 4 bytes is the CRC, which we don't | | 1267 | * the last 4 bytes is the CRC, which we don't |
1264 | * care about anyway. | | 1268 | * care about anyway. |
1265 | */ | | 1269 | */ |
1266 | if (m->m_len <= ETHER_CRC_LEN) { | | 1270 | if (m->m_len <= ETHER_CRC_LEN) { |
1267 | sc->sc_rx_mtail->m_len -= | | 1271 | sc->sc_rx_mtail->m_len -= |
1268 | (ETHER_CRC_LEN - m->m_len); | | 1272 | (ETHER_CRC_LEN - m->m_len); |
1269 | m_freem(m); | | 1273 | m_freem(m); |
1270 | } else { | | 1274 | } else { |
1271 | m->m_len -= ETHER_CRC_LEN; | | 1275 | m->m_len -= ETHER_CRC_LEN; |
1272 | m->m_flags &= ~M_PKTHDR; | | 1276 | m->m_flags &= ~M_PKTHDR; |
1273 | sc->sc_rx_mtail->m_next = m; | | 1277 | sc->sc_rx_mtail->m_next = m; |
1274 | } | | 1278 | } |
1275 | m = sc->sc_rx_mhead; | | 1279 | m = sc->sc_rx_mhead; |
1276 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; | | 1280 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; |
1277 | m->m_pkthdr.len = total_len - ETHER_CRC_LEN; | | 1281 | m->m_pkthdr.len = total_len - ETHER_CRC_LEN; |
1278 | } else | | 1282 | } else |
1279 | m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; | | 1283 | m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; |
1280 | | | 1284 | |
1281 | #ifndef __NO_STRICT_ALIGNMENT | | 1285 | #ifndef __NO_STRICT_ALIGNMENT |
1282 | vge_fixup_rx(m); | | 1286 | vge_fixup_rx(m); |
1283 | #endif | | 1287 | #endif |
1284 | m_set_rcvif(m, ifp); | | 1288 | m_set_rcvif(m, ifp); |
1285 | | | 1289 | |
1286 | /* Do RX checksumming if enabled */ | | 1290 | /* Do RX checksumming if enabled */ |
1287 | if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { | | 1291 | if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { |
1288 | | | 1292 | |
1289 | /* Check IP header checksum */ | | 1293 | /* Check IP header checksum */ |
1290 | if (rxctl & VGE_RDCTL_IPPKT) | | 1294 | if (rxctl & VGE_RDCTL_IPPKT) |
1291 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; | | 1295 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; |
1292 | if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0) | | 1296 | if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0) |
1293 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; | | 1297 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; |
1294 | } | | 1298 | } |
1295 | | | 1299 | |
1296 | if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) { | | 1300 | if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) { |
1297 | /* Check UDP checksum */ | | 1301 | /* Check UDP checksum */ |
1298 | if (rxctl & VGE_RDCTL_TCPPKT) | | 1302 | if (rxctl & VGE_RDCTL_TCPPKT) |
1299 | m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; | | 1303 | m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; |
1300 | | | 1304 | |
1301 | if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) | | 1305 | if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) |
1302 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; | | 1306 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; |
1303 | } | | 1307 | } |
1304 | | | 1308 | |
1305 | if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) { | | 1309 | if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) { |
1306 | /* Check UDP checksum */ | | 1310 | /* Check UDP checksum */ |
1307 | if (rxctl & VGE_RDCTL_UDPPKT) | | 1311 | if (rxctl & VGE_RDCTL_UDPPKT) |
1308 | m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; | | 1312 | m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; |
1309 | | | 1313 | |
1310 | if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) | | 1314 | if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) |
1311 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; | | 1315 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; |
1312 | } | | 1316 | } |
1313 | | | 1317 | |
1314 | if (rxstat & VGE_RDSTS_VTAG) { | | 1318 | if (rxstat & VGE_RDSTS_VTAG) { |
1315 | /* | | 1319 | /* |
1316 | * We use bswap16() here because: | | 1320 | * We use bswap16() here because: |
1317 | * On LE machines, tag is stored in BE as stream data. | | 1321 | * On LE machines, tag is stored in BE as stream data. |
1318 | * On BE machines, tag is stored in BE as stream data | | 1322 | * On BE machines, tag is stored in BE as stream data |
1319 | * but it was already swapped by le32toh() above. | | 1323 | * but it was already swapped by le32toh() above. |
1320 | */ | | 1324 | */ |
1321 | vlan_set_tag(m, bswap16(rxctl & VGE_RDCTL_VLANID)); | | 1325 | vlan_set_tag(m, bswap16(rxctl & VGE_RDCTL_VLANID)); |
1322 | } | | 1326 | } |
1323 | | | 1327 | |
1324 | if_percpuq_enqueue(ifp->if_percpuq, m); | | 1328 | if_percpuq_enqueue(ifp->if_percpuq, m); |
1325 | | | 1329 | |
1326 | lim++; | | 1330 | lim++; |
1327 | if (lim == VGE_NRXDESC) | | 1331 | if (lim == VGE_NRXDESC) |
1328 | break; | | 1332 | break; |
1329 | } | | 1333 | } |
1330 | | | 1334 | |
1331 | sc->sc_rx_prodidx = idx; | | 1335 | sc->sc_rx_prodidx = idx; |
1332 | CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); | | 1336 | CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); |
1333 | } | | 1337 | } |
1334 | | | 1338 | |
1335 | static void | | 1339 | static void |
1336 | vge_txeof(struct vge_softc *sc) | | 1340 | vge_txeof(struct vge_softc *sc) |
1337 | { | | 1341 | { |
1338 | struct ifnet *ifp; | | 1342 | struct ifnet *ifp; |
1339 | struct vge_txsoft *txs; | | 1343 | struct vge_txsoft *txs; |
1340 | uint32_t txstat; | | 1344 | uint32_t txstat; |
1341 | int idx; | | 1345 | int idx; |
1342 | | | 1346 | |
1343 | ifp = &sc->sc_ethercom.ec_if; | | 1347 | ifp = &sc->sc_ethercom.ec_if; |
1344 | | | 1348 | |
1345 | for (idx = sc->sc_tx_considx; | | 1349 | for (idx = sc->sc_tx_considx; |
1346 | sc->sc_tx_free < VGE_NTXDESC; | | 1350 | sc->sc_tx_free < VGE_NTXDESC; |
1347 | idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) { | | 1351 | idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) { |
1348 | VGE_TXDESCSYNC(sc, idx, | | 1352 | VGE_TXDESCSYNC(sc, idx, |
1349 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1353 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1350 | txstat = le32toh(sc->sc_txdescs[idx].td_sts); | | 1354 | txstat = le32toh(sc->sc_txdescs[idx].td_sts); |
1351 | VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); | | 1355 | VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); |
1352 | if (txstat & VGE_TDSTS_OWN) { | | 1356 | if (txstat & VGE_TDSTS_OWN) { |
1353 | break; | | 1357 | break; |
1354 | } | | 1358 | } |
1355 | | | 1359 | |
1356 | txs = &sc->sc_txsoft[idx]; | | 1360 | txs = &sc->sc_txsoft[idx]; |
1357 | m_freem(txs->txs_mbuf); | | 1361 | m_freem(txs->txs_mbuf); |
1358 | txs->txs_mbuf = NULL; | | 1362 | txs->txs_mbuf = NULL; |
1359 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0, | | 1363 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0, |
1360 | txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); | | 1364 | txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
1361 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 1365 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
1362 | if (txstat & (VGE_TDSTS_EXCESSCOLL | VGE_TDSTS_COLL)) | | 1366 | if (txstat & (VGE_TDSTS_EXCESSCOLL | VGE_TDSTS_COLL)) |
1363 | ifp->if_collisions++; | | 1367 | ifp->if_collisions++; |
1364 | if (txstat & VGE_TDSTS_TXERR) | | 1368 | if (txstat & VGE_TDSTS_TXERR) |
1365 | ifp->if_oerrors++; | | 1369 | ifp->if_oerrors++; |
1366 | else | | 1370 | else |
1367 | ifp->if_opackets++; | | 1371 | ifp->if_opackets++; |
1368 | } | | 1372 | } |
1369 | | | 1373 | |
1370 | sc->sc_tx_considx = idx; | | 1374 | sc->sc_tx_considx = idx; |
1371 | | | 1375 | |
1372 | if (sc->sc_tx_free > 0) { | | 1376 | if (sc->sc_tx_free > 0) { |
1373 | ifp->if_flags &= ~IFF_OACTIVE; | | 1377 | ifp->if_flags &= ~IFF_OACTIVE; |
1374 | } | | 1378 | } |
1375 | | | 1379 | |
1376 | /* | | 1380 | /* |
1377 | * If not all descriptors have been released reaped yet, | | 1381 | * If not all descriptors have been released reaped yet, |
1378 | * reload the timer so that we will eventually get another | | 1382 | * reload the timer so that we will eventually get another |
1379 | * interrupt that will cause us to re-enter this routine. | | 1383 | * interrupt that will cause us to re-enter this routine. |
1380 | * This is done in case the transmitter has gone idle. | | 1384 | * This is done in case the transmitter has gone idle. |
1381 | */ | | 1385 | */ |
1382 | if (sc->sc_tx_free < VGE_NTXDESC) | | 1386 | if (sc->sc_tx_free < VGE_NTXDESC) |
1383 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); | | 1387 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); |
1384 | else | | 1388 | else |
1385 | ifp->if_timer = 0; | | 1389 | ifp->if_timer = 0; |
1386 | } | | 1390 | } |
1387 | | | 1391 | |
1388 | static void | | 1392 | static void |
1389 | vge_tick(void *arg) | | 1393 | vge_tick(void *arg) |
1390 | { | | 1394 | { |
1391 | struct vge_softc *sc; | | 1395 | struct vge_softc *sc; |
1392 | struct ifnet *ifp; | | 1396 | struct ifnet *ifp; |
1393 | struct mii_data *mii; | | 1397 | struct mii_data *mii; |
1394 | int s; | | 1398 | int s; |
1395 | | | 1399 | |
1396 | sc = arg; | | 1400 | sc = arg; |
1397 | ifp = &sc->sc_ethercom.ec_if; | | 1401 | ifp = &sc->sc_ethercom.ec_if; |
1398 | mii = &sc->sc_mii; | | 1402 | mii = &sc->sc_mii; |
1399 | | | 1403 | |
1400 | s = splnet(); | | 1404 | s = splnet(); |
1401 | | | 1405 | |
1402 | callout_schedule(&sc->sc_timeout, hz); | | 1406 | callout_schedule(&sc->sc_timeout, hz); |
1403 | | | 1407 | |
1404 | mii_tick(mii); | | 1408 | mii_tick(mii); |
1405 | if (sc->sc_link) { | | 1409 | if (sc->sc_link) { |
1406 | if ((mii->mii_media_status & IFM_ACTIVE) == 0) | | 1410 | if ((mii->mii_media_status & IFM_ACTIVE) == 0) |
1407 | sc->sc_link = 0; | | 1411 | sc->sc_link = 0; |
1408 | } else { | | 1412 | } else { |
1409 | if (mii->mii_media_status & IFM_ACTIVE && | | 1413 | if (mii->mii_media_status & IFM_ACTIVE && |
1410 | IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { | | 1414 | IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { |
1411 | sc->sc_link = 1; | | 1415 | sc->sc_link = 1; |
1412 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) | | 1416 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) |
1413 | vge_start(ifp); | | 1417 | vge_start(ifp); |
1414 | } | | 1418 | } |
1415 | } | | 1419 | } |
1416 | | | 1420 | |
1417 | splx(s); | | 1421 | splx(s); |
1418 | } | | 1422 | } |
1419 | | | 1423 | |
1420 | static int | | 1424 | static int |
1421 | vge_intr(void *arg) | | 1425 | vge_intr(void *arg) |
1422 | { | | 1426 | { |
1423 | struct vge_softc *sc; | | 1427 | struct vge_softc *sc; |
1424 | struct ifnet *ifp; | | 1428 | struct ifnet *ifp; |
1425 | uint32_t status; | | 1429 | uint32_t status; |
1426 | int claim; | | 1430 | int claim; |
1427 | | | 1431 | |
1428 | sc = arg; | | 1432 | sc = arg; |
1429 | claim = 0; | | 1433 | claim = 0; |
1430 | if (sc->sc_suspended) { | | 1434 | if (sc->sc_suspended) { |
1431 | return claim; | | 1435 | return claim; |
1432 | } | | 1436 | } |
1433 | | | 1437 | |
1434 | ifp = &sc->sc_ethercom.ec_if; | | 1438 | ifp = &sc->sc_ethercom.ec_if; |
1435 | | | 1439 | |
1436 | if ((ifp->if_flags & IFF_UP) == 0) { | | 1440 | if ((ifp->if_flags & IFF_UP) == 0) { |
1437 | return claim; | | 1441 | return claim; |
1438 | } | | 1442 | } |
1439 | | | 1443 | |
1440 | /* Disable interrupts */ | | 1444 | /* Disable interrupts */ |
1441 | CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); | | 1445 | CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); |
1442 | | | 1446 | |
1443 | for (;;) { | | 1447 | for (;;) { |
1444 | | | 1448 | |
1445 | status = CSR_READ_4(sc, VGE_ISR); | | 1449 | status = CSR_READ_4(sc, VGE_ISR); |
1446 | /* If the card has gone away the read returns 0xffffffff. */ | | 1450 | /* If the card has gone away the read returns 0xffffffff. */ |
1447 | if (status == 0xFFFFFFFF) | | 1451 | if (status == 0xFFFFFFFF) |
1448 | break; | | 1452 | break; |
1449 | | | 1453 | |
1450 | if (status) { | | 1454 | if (status) { |
1451 | claim = 1; | | 1455 | claim = 1; |
1452 | CSR_WRITE_4(sc, VGE_ISR, status); | | 1456 | CSR_WRITE_4(sc, VGE_ISR, status); |
1453 | } | | 1457 | } |
1454 | | | 1458 | |
1455 | if ((status & VGE_INTRS) == 0) | | 1459 | if ((status & VGE_INTRS) == 0) |
1456 | break; | | 1460 | break; |
1457 | | | 1461 | |
1458 | if (status & (VGE_ISR_RXOK | VGE_ISR_RXOK_HIPRIO)) | | 1462 | if (status & (VGE_ISR_RXOK | VGE_ISR_RXOK_HIPRIO)) |
1459 | vge_rxeof(sc); | | 1463 | vge_rxeof(sc); |
1460 | | | 1464 | |
1461 | if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) { | | 1465 | if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) { |
1462 | vge_rxeof(sc); | | 1466 | vge_rxeof(sc); |
1463 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); | | 1467 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); |
1464 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); | | 1468 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); |
1465 | } | | 1469 | } |
1466 | | | 1470 | |
1467 | if (status & (VGE_ISR_TXOK0 | VGE_ISR_TIMER0)) | | 1471 | if (status & (VGE_ISR_TXOK0 | VGE_ISR_TIMER0)) |
1468 | vge_txeof(sc); | | 1472 | vge_txeof(sc); |
1469 | | | 1473 | |
1470 | if (status & (VGE_ISR_TXDMA_STALL | VGE_ISR_RXDMA_STALL)) | | 1474 | if (status & (VGE_ISR_TXDMA_STALL | VGE_ISR_RXDMA_STALL)) |
1471 | vge_init(ifp); | | 1475 | vge_init(ifp); |
1472 | | | 1476 | |
1473 | if (status & VGE_ISR_LINKSTS) | | 1477 | if (status & VGE_ISR_LINKSTS) |
1474 | vge_tick(sc); | | 1478 | vge_tick(sc); |
1475 | } | | 1479 | } |
1476 | | | 1480 | |
1477 | /* Re-enable interrupts */ | | 1481 | /* Re-enable interrupts */ |
1478 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); | | 1482 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); |
1479 | | | 1483 | |
1480 | if (claim) | | 1484 | if (claim) |
1481 | if_schedule_deferred_start(ifp); | | 1485 | if_schedule_deferred_start(ifp); |
1482 | | | 1486 | |
1483 | return claim; | | 1487 | return claim; |
1484 | } | | 1488 | } |
1485 | | | 1489 | |
1486 | static int | | 1490 | static int |
1487 | vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) | | 1491 | vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) |
1488 | { | | 1492 | { |
1489 | struct vge_txsoft *txs; | | 1493 | struct vge_txsoft *txs; |
1490 | struct vge_txdesc *txd; | | 1494 | struct vge_txdesc *txd; |
1491 | struct vge_txfrag *f; | | 1495 | struct vge_txfrag *f; |
1492 | struct mbuf *m_new; | | 1496 | struct mbuf *m_new; |
1493 | bus_dmamap_t map; | | 1497 | bus_dmamap_t map; |
1494 | int m_csumflags, seg, error, flags; | | 1498 | int m_csumflags, seg, error, flags; |
1495 | size_t sz; | | 1499 | size_t sz; |
1496 | uint32_t td_sts, td_ctl; | | 1500 | uint32_t td_sts, td_ctl; |
1497 | | | 1501 | |
1498 | KASSERT(sc->sc_tx_free > 0); | | 1502 | KASSERT(sc->sc_tx_free > 0); |
1499 | | | 1503 | |
1500 | txd = &sc->sc_txdescs[idx]; | | 1504 | txd = &sc->sc_txdescs[idx]; |
1501 | | | 1505 | |
1502 | #ifdef DIAGNOSTIC | | 1506 | #ifdef DIAGNOSTIC |
1503 | /* If this descriptor is still owned by the chip, bail. */ | | 1507 | /* If this descriptor is still owned by the chip, bail. */ |
1504 | VGE_TXDESCSYNC(sc, idx, | | 1508 | VGE_TXDESCSYNC(sc, idx, |
1505 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1509 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1506 | td_sts = le32toh(txd->td_sts); | | 1510 | td_sts = le32toh(txd->td_sts); |
1507 | VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); | | 1511 | VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); |
1508 | if (td_sts & VGE_TDSTS_OWN) { | | 1512 | if (td_sts & VGE_TDSTS_OWN) { |
1509 | return ENOBUFS; | | 1513 | return ENOBUFS; |
1510 | } | | 1514 | } |
1511 | #endif | | 1515 | #endif |
1512 | | | 1516 | |
1513 | /* | | 1517 | /* |
1514 | * Preserve m_pkthdr.csum_flags here since m_head might be | | 1518 | * Preserve m_pkthdr.csum_flags here since m_head might be |
1515 | * updated by m_defrag() | | 1519 | * updated by m_defrag() |
1516 | */ | | 1520 | */ |
1517 | m_csumflags = m_head->m_pkthdr.csum_flags; | | 1521 | m_csumflags = m_head->m_pkthdr.csum_flags; |
1518 | | | 1522 | |
1519 | txs = &sc->sc_txsoft[idx]; | | 1523 | txs = &sc->sc_txsoft[idx]; |
1520 | map = txs->txs_dmamap; | | 1524 | map = txs->txs_dmamap; |
1521 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head, BUS_DMA_NOWAIT); | | 1525 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head, BUS_DMA_NOWAIT); |
1522 | | | 1526 | |
1523 | /* If too many segments to map, coalesce */ | | 1527 | /* If too many segments to map, coalesce */ |
1524 | if (error == EFBIG || | | 1528 | if (error == EFBIG || |
1525 | (m_head->m_pkthdr.len < ETHER_PAD_LEN && | | 1529 | (m_head->m_pkthdr.len < ETHER_PAD_LEN && |
1526 | map->dm_nsegs == VGE_TX_FRAGS)) { | | 1530 | map->dm_nsegs == VGE_TX_FRAGS)) { |
1527 | m_new = m_defrag(m_head, M_DONTWAIT); | | 1531 | m_new = m_defrag(m_head, M_DONTWAIT); |
1528 | if (m_new == NULL) | | 1532 | if (m_new == NULL) |
1529 | return EFBIG; | | 1533 | return EFBIG; |
1530 | | | 1534 | |
1531 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, | | 1535 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, |
1532 | m_new, BUS_DMA_NOWAIT); | | 1536 | m_new, BUS_DMA_NOWAIT); |
1533 | if (error) { | | 1537 | if (error) { |
1534 | m_freem(m_new); | | 1538 | m_freem(m_new); |
1535 | return error; | | 1539 | return error; |
1536 | } | | 1540 | } |
1537 | | | 1541 | |
1538 | m_head = m_new; | | 1542 | m_head = m_new; |
1539 | } else if (error) | | 1543 | } else if (error) |
1540 | return error; | | 1544 | return error; |
1541 | | | 1545 | |
1542 | txs->txs_mbuf = m_head; | | 1546 | txs->txs_mbuf = m_head; |
1543 | | | 1547 | |
1544 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, | | 1548 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, |
1545 | BUS_DMASYNC_PREWRITE); | | 1549 | BUS_DMASYNC_PREWRITE); |
1546 | | | 1550 | |
1547 | for (seg = 0, f = &txd->td_frag[0]; seg < map->dm_nsegs; seg++, f++) { | | 1551 | for (seg = 0, f = &txd->td_frag[0]; seg < map->dm_nsegs; seg++, f++) { |
1548 | f->tf_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len)); | | 1552 | f->tf_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len)); |
1549 | vge_set_txaddr(f, map->dm_segs[seg].ds_addr); | | 1553 | vge_set_txaddr(f, map->dm_segs[seg].ds_addr); |
1550 | } | | 1554 | } |
1551 | | | 1555 | |
1552 | /* Argh. This chip does not autopad short frames */ | | 1556 | /* Argh. This chip does not autopad short frames */ |
1553 | sz = m_head->m_pkthdr.len; | | 1557 | sz = m_head->m_pkthdr.len; |
1554 | if (sz < ETHER_PAD_LEN) { | | 1558 | if (sz < ETHER_PAD_LEN) { |
1555 | f->tf_buflen = htole16(VGE_BUFLEN(ETHER_PAD_LEN - sz)); | | 1559 | f->tf_buflen = htole16(VGE_BUFLEN(ETHER_PAD_LEN - sz)); |
1556 | vge_set_txaddr(f, VGE_CDPADADDR(sc)); | | 1560 | vge_set_txaddr(f, VGE_CDPADADDR(sc)); |
1557 | sz = ETHER_PAD_LEN; | | 1561 | sz = ETHER_PAD_LEN; |
1558 | seg++; | | 1562 | seg++; |
1559 | } | | 1563 | } |
1560 | VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE); | | 1564 | VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE); |
1561 | | | 1565 | |
1562 | /* | | 1566 | /* |
1563 | * When telling the chip how many segments there are, we | | 1567 | * When telling the chip how many segments there are, we |
1564 | * must use nsegs + 1 instead of just nsegs. Darned if I | | 1568 | * must use nsegs + 1 instead of just nsegs. Darned if I |
1565 | * know why. | | 1569 | * know why. |
1566 | */ | | 1570 | */ |
1567 | seg++; | | 1571 | seg++; |
1568 | | | 1572 | |
1569 | flags = 0; | | 1573 | flags = 0; |
1570 | if (m_csumflags & M_CSUM_IPv4) | | 1574 | if (m_csumflags & M_CSUM_IPv4) |
1571 | flags |= VGE_TDCTL_IPCSUM; | | 1575 | flags |= VGE_TDCTL_IPCSUM; |
1572 | if (m_csumflags & M_CSUM_TCPv4) | | 1576 | if (m_csumflags & M_CSUM_TCPv4) |
1573 | flags |= VGE_TDCTL_TCPCSUM; | | 1577 | flags |= VGE_TDCTL_TCPCSUM; |
1574 | if (m_csumflags & M_CSUM_UDPv4) | | 1578 | if (m_csumflags & M_CSUM_UDPv4) |
1575 | flags |= VGE_TDCTL_UDPCSUM; | | 1579 | flags |= VGE_TDCTL_UDPCSUM; |
1576 | td_sts = sz << 16; | | 1580 | td_sts = sz << 16; |
1577 | td_ctl = flags | (seg << 28) | VGE_TD_LS_NORM; | | 1581 | td_ctl = flags | (seg << 28) | VGE_TD_LS_NORM; |
1578 | | | 1582 | |
1579 | if (sz > ETHERMTU + ETHER_HDR_LEN) | | 1583 | if (sz > ETHERMTU + ETHER_HDR_LEN) |
1580 | td_ctl |= VGE_TDCTL_JUMBO; | | 1584 | td_ctl |= VGE_TDCTL_JUMBO; |
1581 | | | 1585 | |
1582 | /* | | 1586 | /* |
1583 | * Set up hardware VLAN tagging. | | 1587 | * Set up hardware VLAN tagging. |
1584 | */ | | 1588 | */ |
1585 | if (vlan_has_tag(m_head)) { | | 1589 | if (vlan_has_tag(m_head)) { |
1586 | /* | | 1590 | /* |
1587 | * No need htons() here since vge(4) chip assumes | | 1591 | * No need htons() here since vge(4) chip assumes |
1588 | * that tags are written in little endian and | | 1592 | * that tags are written in little endian and |
1589 | * we already use htole32() here. | | 1593 | * we already use htole32() here. |
1590 | */ | | 1594 | */ |
1591 | td_ctl |= vlan_get_tag(m_head) | VGE_TDCTL_VTAG; | | 1595 | td_ctl |= vlan_get_tag(m_head) | VGE_TDCTL_VTAG; |
1592 | } | | 1596 | } |
1593 | txd->td_ctl = htole32(td_ctl); | | 1597 | txd->td_ctl = htole32(td_ctl); |
1594 | txd->td_sts = htole32(td_sts); | | 1598 | txd->td_sts = htole32(td_sts); |
1595 | VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1599 | VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1596 | | | 1600 | |
1597 | txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts); | | 1601 | txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts); |
1598 | VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1602 | VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1599 | | | 1603 | |
1600 | sc->sc_tx_free--; | | 1604 | sc->sc_tx_free--; |
1601 | | | 1605 | |
1602 | return 0; | | 1606 | return 0; |
1603 | } | | 1607 | } |
1604 | | | 1608 | |
1605 | /* | | 1609 | /* |
1606 | * Main transmit routine. | | 1610 | * Main transmit routine. |
1607 | */ | | 1611 | */ |
1608 | | | 1612 | |
1609 | static void | | 1613 | static void |
1610 | vge_start(struct ifnet *ifp) | | 1614 | vge_start(struct ifnet *ifp) |
1611 | { | | 1615 | { |
1612 | struct vge_softc *sc; | | 1616 | struct vge_softc *sc; |
1613 | struct vge_txsoft *txs; | | 1617 | struct vge_txsoft *txs; |
1614 | struct mbuf *m_head; | | 1618 | struct mbuf *m_head; |
1615 | int idx, pidx, ofree, error; | | 1619 | int idx, pidx, ofree, error; |
1616 | | | 1620 | |
1617 | sc = ifp->if_softc; | | 1621 | sc = ifp->if_softc; |
1618 | | | 1622 | |
1619 | if (!sc->sc_link || | | 1623 | if (!sc->sc_link || |
1620 | (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) { | | 1624 | (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) { |
1621 | return; | | 1625 | return; |
1622 | } | | 1626 | } |
1623 | | | 1627 | |
1624 | m_head = NULL; | | 1628 | m_head = NULL; |
1625 | idx = sc->sc_tx_prodidx; | | 1629 | idx = sc->sc_tx_prodidx; |
1626 | pidx = VGE_PREV_TXDESC(idx); | | 1630 | pidx = VGE_PREV_TXDESC(idx); |
1627 | ofree = sc->sc_tx_free; | | 1631 | ofree = sc->sc_tx_free; |
1628 | | | 1632 | |
1629 | /* | | 1633 | /* |
1630 | * Loop through the send queue, setting up transmit descriptors | | 1634 | * Loop through the send queue, setting up transmit descriptors |
1631 | * until we drain the queue, or use up all available transmit | | 1635 | * until we drain the queue, or use up all available transmit |
1632 | * descriptors. | | 1636 | * descriptors. |
1633 | */ | | 1637 | */ |
1634 | for (;;) { | | 1638 | for (;;) { |
1635 | /* Grab a packet off the queue. */ | | 1639 | /* Grab a packet off the queue. */ |
1636 | IFQ_POLL(&ifp->if_snd, m_head); | | 1640 | IFQ_POLL(&ifp->if_snd, m_head); |
1637 | if (m_head == NULL) | | 1641 | if (m_head == NULL) |
1638 | break; | | 1642 | break; |
1639 | | | 1643 | |
1640 | if (sc->sc_tx_free == 0) { | | 1644 | if (sc->sc_tx_free == 0) { |
1641 | /* | | 1645 | /* |
1642 | * All slots used, stop for now. | | 1646 | * All slots used, stop for now. |
1643 | */ | | 1647 | */ |
1644 | ifp->if_flags |= IFF_OACTIVE; | | 1648 | ifp->if_flags |= IFF_OACTIVE; |
1645 | break; | | 1649 | break; |
1646 | } | | 1650 | } |
1647 | | | 1651 | |
1648 | txs = &sc->sc_txsoft[idx]; | | 1652 | txs = &sc->sc_txsoft[idx]; |
1649 | KASSERT(txs->txs_mbuf == NULL); | | 1653 | KASSERT(txs->txs_mbuf == NULL); |
1650 | | | 1654 | |
1651 | if ((error = vge_encap(sc, m_head, idx))) { | | 1655 | if ((error = vge_encap(sc, m_head, idx))) { |
1652 | if (error == EFBIG) { | | 1656 | if (error == EFBIG) { |
1653 | printf("%s: Tx packet consumes too many " | | 1657 | printf("%s: Tx packet consumes too many " |
1654 | "DMA segments, dropping...\n", | | 1658 | "DMA segments, dropping...\n", |
1655 | device_xname(sc->sc_dev)); | | 1659 | device_xname(sc->sc_dev)); |
1656 | IFQ_DEQUEUE(&ifp->if_snd, m_head); | | 1660 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
1657 | m_freem(m_head); | | 1661 | m_freem(m_head); |
1658 | continue; | | 1662 | continue; |
1659 | } | | 1663 | } |
1660 | | | 1664 | |
1661 | /* | | 1665 | /* |
1662 | * Short on resources, just stop for now. | | 1666 | * Short on resources, just stop for now. |
1663 | */ | | 1667 | */ |
1664 | if (error == ENOBUFS) | | 1668 | if (error == ENOBUFS) |
1665 | ifp->if_flags |= IFF_OACTIVE; | | 1669 | ifp->if_flags |= IFF_OACTIVE; |
1666 | break; | | 1670 | break; |
1667 | } | | 1671 | } |
1668 | | | 1672 | |
1669 | IFQ_DEQUEUE(&ifp->if_snd, m_head); | | 1673 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
1670 | | | 1674 | |
1671 | /* | | 1675 | /* |
1672 | * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. | | 1676 | * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. |
1673 | */ | | 1677 | */ |
1674 | | | 1678 | |
1675 | sc->sc_txdescs[pidx].td_frag[0].tf_buflen |= | | 1679 | sc->sc_txdescs[pidx].td_frag[0].tf_buflen |= |
1676 | htole16(VGE_TXDESC_Q); | | 1680 | htole16(VGE_TXDESC_Q); |
1677 | VGE_TXFRAGSYNC(sc, pidx, 1, | | 1681 | VGE_TXFRAGSYNC(sc, pidx, 1, |
1678 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1682 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1679 | | | 1683 | |
1680 | if (txs->txs_mbuf != m_head) { | | 1684 | if (txs->txs_mbuf != m_head) { |
1681 | m_freem(m_head); | | 1685 | m_freem(m_head); |
1682 | m_head = txs->txs_mbuf; | | 1686 | m_head = txs->txs_mbuf; |
1683 | } | | 1687 | } |
1684 | | | 1688 | |
1685 | pidx = idx; | | 1689 | pidx = idx; |
1686 | idx = VGE_NEXT_TXDESC(idx); | | 1690 | idx = VGE_NEXT_TXDESC(idx); |
1687 | | | 1691 | |
1688 | /* | | 1692 | /* |
1689 | * If there's a BPF listener, bounce a copy of this frame | | 1693 | * If there's a BPF listener, bounce a copy of this frame |
1690 | * to him. | | 1694 | * to him. |
1691 | */ | | 1695 | */ |
1692 | bpf_mtap(ifp, m_head, BPF_D_OUT); | | 1696 | bpf_mtap(ifp, m_head, BPF_D_OUT); |
1693 | } | | 1697 | } |
1694 | | | 1698 | |
1695 | if (sc->sc_tx_free < ofree) { | | 1699 | if (sc->sc_tx_free < ofree) { |
1696 | /* TX packet queued */ | | 1700 | /* TX packet queued */ |
1697 | | | 1701 | |
1698 | sc->sc_tx_prodidx = idx; | | 1702 | sc->sc_tx_prodidx = idx; |
1699 | | | 1703 | |
1700 | /* Issue a transmit command. */ | | 1704 | /* Issue a transmit command. */ |
1701 | CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); | | 1705 | CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); |
1702 | | | 1706 | |
1703 | /* | | 1707 | /* |
1704 | * Use the countdown timer for interrupt moderation. | | 1708 | * Use the countdown timer for interrupt moderation. |
1705 | * 'TX done' interrupts are disabled. Instead, we reset the | | 1709 | * 'TX done' interrupts are disabled. Instead, we reset the |
1706 | * countdown timer, which will begin counting until it hits | | 1710 | * countdown timer, which will begin counting until it hits |
1707 | * the value in the SSTIMER register, and then trigger an | | 1711 | * the value in the SSTIMER register, and then trigger an |
1708 | * interrupt. Each time we set the TIMER0_ENABLE bit, the | | 1712 | * interrupt. Each time we set the TIMER0_ENABLE bit, the |
1709 | * the timer count is reloaded. Only when the transmitter | | 1713 | * the timer count is reloaded. Only when the transmitter |
1710 | * is idle will the timer hit 0 and an interrupt fire. | | 1714 | * is idle will the timer hit 0 and an interrupt fire. |
1711 | */ | | 1715 | */ |
1712 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); | | 1716 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); |
1713 | | | 1717 | |
1714 | /* | | 1718 | /* |
1715 | * Set a timeout in case the chip goes out to lunch. | | 1719 | * Set a timeout in case the chip goes out to lunch. |
1716 | */ | | 1720 | */ |
1717 | ifp->if_timer = 5; | | 1721 | ifp->if_timer = 5; |
1718 | } | | 1722 | } |
1719 | } | | 1723 | } |
1720 | | | 1724 | |
1721 | static int | | 1725 | static int |
1722 | vge_init(struct ifnet *ifp) | | 1726 | vge_init(struct ifnet *ifp) |
1723 | { | | 1727 | { |
1724 | struct vge_softc *sc; | | 1728 | struct vge_softc *sc; |
1725 | int i, rc = 0; | | 1729 | int i, rc = 0; |
1726 | | | 1730 | |
1727 | sc = ifp->if_softc; | | 1731 | sc = ifp->if_softc; |
1728 | | | 1732 | |
1729 | /* | | 1733 | /* |
1730 | * Cancel pending I/O and free all RX/TX buffers. | | 1734 | * Cancel pending I/O and free all RX/TX buffers. |
1731 | */ | | 1735 | */ |
1732 | vge_stop(ifp, 0); | | 1736 | vge_stop(ifp, 0); |
1733 | vge_reset(sc); | | 1737 | vge_reset(sc); |
1734 | | | 1738 | |
1735 | /* Initialize the RX descriptors and mbufs. */ | | 1739 | /* Initialize the RX descriptors and mbufs. */ |
1736 | memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); | | 1740 | memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); |
1737 | sc->sc_rx_consumed = 0; | | 1741 | sc->sc_rx_consumed = 0; |
1738 | for (i = 0; i < VGE_NRXDESC; i++) { | | 1742 | for (i = 0; i < VGE_NRXDESC; i++) { |
1739 | if (vge_newbuf(sc, i, NULL) == ENOBUFS) { | | 1743 | if (vge_newbuf(sc, i, NULL) == ENOBUFS) { |
1740 | printf("%s: unable to allocate or map rx buffer\n", | | 1744 | printf("%s: unable to allocate or map rx buffer\n", |
1741 | device_xname(sc->sc_dev)); | | 1745 | device_xname(sc->sc_dev)); |
1742 | return 1; /* XXX */ | | 1746 | return 1; /* XXX */ |
1743 | } | | 1747 | } |
1744 | } | | 1748 | } |
1745 | sc->sc_rx_prodidx = 0; | | 1749 | sc->sc_rx_prodidx = 0; |
1746 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; | | 1750 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; |
1747 | | | 1751 | |
1748 | /* Initialize the TX descriptors and mbufs. */ | | 1752 | /* Initialize the TX descriptors and mbufs. */ |
1749 | memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); | | 1753 | memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); |
1750 | bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, | | 1754 | bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, |
1751 | VGE_CDTXOFF(0), sizeof(sc->sc_txdescs), | | 1755 | VGE_CDTXOFF(0), sizeof(sc->sc_txdescs), |
1752 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1756 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1753 | for (i = 0; i < VGE_NTXDESC; i++) | | 1757 | for (i = 0; i < VGE_NTXDESC; i++) |
1754 | sc->sc_txsoft[i].txs_mbuf = NULL; | | 1758 | sc->sc_txsoft[i].txs_mbuf = NULL; |
1755 | | | 1759 | |
1756 | sc->sc_tx_prodidx = 0; | | 1760 | sc->sc_tx_prodidx = 0; |
1757 | sc->sc_tx_considx = 0; | | 1761 | sc->sc_tx_considx = 0; |
1758 | sc->sc_tx_free = VGE_NTXDESC; | | 1762 | sc->sc_tx_free = VGE_NTXDESC; |
1759 | | | 1763 | |
1760 | /* Set our station address */ | | 1764 | /* Set our station address */ |
1761 | for (i = 0; i < ETHER_ADDR_LEN; i++) | | 1765 | for (i = 0; i < ETHER_ADDR_LEN; i++) |
1762 | CSR_WRITE_1(sc, VGE_PAR0 + i, sc->sc_eaddr[i]); | | 1766 | CSR_WRITE_1(sc, VGE_PAR0 + i, sc->sc_eaddr[i]); |
1763 | | | 1767 | |
1764 | /* | | 1768 | /* |
1765 | * Set receive FIFO threshold. Also allow transmission and | | 1769 | * Set receive FIFO threshold. Also allow transmission and |
1766 | * reception of VLAN tagged frames. | | 1770 | * reception of VLAN tagged frames. |
1767 | */ | | 1771 | */ |
1768 | CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR | VGE_RXCFG_VTAGOPT); | | 1772 | CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR | VGE_RXCFG_VTAGOPT); |
1769 | CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES | VGE_VTAG_OPT2); | | 1773 | CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES | VGE_VTAG_OPT2); |
1770 | | | 1774 | |
1771 | /* Set DMA burst length */ | | 1775 | /* Set DMA burst length */ |
1772 | CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); | | 1776 | CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); |
1773 | CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); | | 1777 | CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); |
1774 | | | 1778 | |
1775 | CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO | VGE_TXCFG_NONBLK); | | 1779 | CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO | VGE_TXCFG_NONBLK); |
1776 | | | 1780 | |
1777 | /* Set collision backoff algorithm */ | | 1781 | /* Set collision backoff algorithm */ |
1778 | CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM | | | 1782 | CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM | |
1779 | VGE_CHIPCFG1_CAP | VGE_CHIPCFG1_MBA | VGE_CHIPCFG1_BAKOPT); | | 1783 | VGE_CHIPCFG1_CAP | VGE_CHIPCFG1_MBA | VGE_CHIPCFG1_BAKOPT); |
1780 | CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); | | 1784 | CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); |
1781 | | | 1785 | |
1782 | /* Disable LPSEL field in priority resolution */ | | 1786 | /* Disable LPSEL field in priority resolution */ |
1783 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); | | 1787 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); |
1784 | | | 1788 | |
1785 | /* | | 1789 | /* |
1786 | * Load the addresses of the DMA queues into the chip. | | 1790 | * Load the addresses of the DMA queues into the chip. |
1787 | * Note that we only use one transmit queue. | | 1791 | * Note that we only use one transmit queue. |
1788 | */ | | 1792 | */ |
1789 | | | 1793 | |
1790 | CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(VGE_CDTXADDR(sc, 0))); | | 1794 | CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(VGE_CDTXADDR(sc, 0))); |
1791 | CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_NTXDESC - 1); | | 1795 | CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_NTXDESC - 1); |
1792 | | | 1796 | |
1793 | CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(VGE_CDRXADDR(sc, 0))); | | 1797 | CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(VGE_CDRXADDR(sc, 0))); |
1794 | CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_NRXDESC - 1); | | 1798 | CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_NRXDESC - 1); |
1795 | CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_NRXDESC); | | 1799 | CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_NRXDESC); |
1796 | | | 1800 | |
1797 | /* Enable and wake up the RX descriptor queue */ | | 1801 | /* Enable and wake up the RX descriptor queue */ |
1798 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); | | 1802 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); |
1799 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); | | 1803 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); |
1800 | | | 1804 | |
1801 | /* Enable the TX descriptor queue */ | | 1805 | /* Enable the TX descriptor queue */ |
1802 | CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); | | 1806 | CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); |
1803 | | | 1807 | |
1804 | /* Set up the receive filter -- allow large frames for VLANs. */ | | 1808 | /* Set up the receive filter -- allow large frames for VLANs. */ |
1805 | CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST | VGE_RXCTL_RX_GIANT); | | 1809 | CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST | VGE_RXCTL_RX_GIANT); |
1806 | | | 1810 | |
1807 | /* If we want promiscuous mode, set the allframes bit. */ | | 1811 | /* If we want promiscuous mode, set the allframes bit. */ |
1808 | if (ifp->if_flags & IFF_PROMISC) { | | 1812 | if (ifp->if_flags & IFF_PROMISC) { |
1809 | CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); | | 1813 | CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); |
1810 | } | | 1814 | } |
1811 | | | 1815 | |
1812 | /* Set capture broadcast bit to capture broadcast frames. */ | | 1816 | /* Set capture broadcast bit to capture broadcast frames. */ |
1813 | if (ifp->if_flags & IFF_BROADCAST) { | | 1817 | if (ifp->if_flags & IFF_BROADCAST) { |
1814 | CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); | | 1818 | CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); |
1815 | } | | 1819 | } |
1816 | | | 1820 | |
1817 | /* Set multicast bit to capture multicast frames. */ | | 1821 | /* Set multicast bit to capture multicast frames. */ |
1818 | if (ifp->if_flags & IFF_MULTICAST) { | | 1822 | if (ifp->if_flags & IFF_MULTICAST) { |
1819 | CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); | | 1823 | CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); |
1820 | } | | 1824 | } |
1821 | | | 1825 | |
1822 | /* Init the cam filter. */ | | 1826 | /* Init the cam filter. */ |
1823 | vge_cam_clear(sc); | | 1827 | vge_cam_clear(sc); |
1824 | | | 1828 | |
1825 | /* Init the multicast filter. */ | | 1829 | /* Init the multicast filter. */ |
1826 | vge_setmulti(sc); | | 1830 | vge_setmulti(sc); |
1827 | | | 1831 | |
1828 | /* Enable flow control */ | | 1832 | /* Enable flow control */ |
1829 | | | 1833 | |
1830 | CSR_WRITE_1(sc, VGE_CRS2, 0x8B); | | 1834 | CSR_WRITE_1(sc, VGE_CRS2, 0x8B); |
1831 | | | 1835 | |
1832 | /* Enable jumbo frame reception (if desired) */ | | 1836 | /* Enable jumbo frame reception (if desired) */ |
1833 | | | 1837 | |
1834 | /* Start the MAC. */ | | 1838 | /* Start the MAC. */ |
1835 | CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); | | 1839 | CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); |
1836 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); | | 1840 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); |
1837 | CSR_WRITE_1(sc, VGE_CRS0, | | 1841 | CSR_WRITE_1(sc, VGE_CRS0, |
1838 | VGE_CR0_TX_ENABLE | VGE_CR0_RX_ENABLE | VGE_CR0_START); | | 1842 | VGE_CR0_TX_ENABLE | VGE_CR0_RX_ENABLE | VGE_CR0_START); |
1839 | | | 1843 | |
1840 | /* | | 1844 | /* |
1841 | * Configure one-shot timer for microsecond | | 1845 | * Configure one-shot timer for microsecond |
1842 | * resulution and load it for 500 usecs. | | 1846 | * resulution and load it for 500 usecs. |
1843 | */ | | 1847 | */ |
1844 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); | | 1848 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); |
1845 | CSR_WRITE_2(sc, VGE_SSTIMER, 400); | | 1849 | CSR_WRITE_2(sc, VGE_SSTIMER, 400); |
1846 | | | 1850 | |
1847 | /* | | 1851 | /* |
1848 | * Configure interrupt moderation for receive. Enable | | 1852 | * Configure interrupt moderation for receive. Enable |
1849 | * the holdoff counter and load it, and set the RX | | 1853 | * the holdoff counter and load it, and set the RX |
1850 | * suppression count to the number of descriptors we | | 1854 | * suppression count to the number of descriptors we |
1851 | * want to allow before triggering an interrupt. | | 1855 | * want to allow before triggering an interrupt. |
1852 | * The holdoff timer is in units of 20 usecs. | | 1856 | * The holdoff timer is in units of 20 usecs. |
1853 | */ | | 1857 | */ |
1854 | | | 1858 | |
1855 | #ifdef notyet | | 1859 | #ifdef notyet |
1856 | CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); | | 1860 | CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); |
1857 | /* Select the interrupt holdoff timer page. */ | | 1861 | /* Select the interrupt holdoff timer page. */ |
1858 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); | | 1862 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); |
1859 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); | | 1863 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); |
1860 | CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ | | 1864 | CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ |
1861 | | | 1865 | |
1862 | /* Enable use of the holdoff timer. */ | | 1866 | /* Enable use of the holdoff timer. */ |
1863 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); | | 1867 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); |
1864 | CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); | | 1868 | CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); |
1865 | | | 1869 | |
1866 | /* Select the RX suppression threshold page. */ | | 1870 | /* Select the RX suppression threshold page. */ |
1867 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); | | 1871 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); |
1868 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); | | 1872 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); |
1869 | CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ | | 1873 | CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ |
1870 | | | 1874 | |
1871 | /* Restore the page select bits. */ | | 1875 | /* Restore the page select bits. */ |
1872 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); | | 1876 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); |
1873 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); | | 1877 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); |
1874 | #endif | | 1878 | #endif |
1875 | | | 1879 | |
1876 | #ifdef DEVICE_POLLING | | 1880 | #ifdef DEVICE_POLLING |
1877 | /* | | 1881 | /* |
1878 | * Disable interrupts if we are polling. | | 1882 | * Disable interrupts if we are polling. |
1879 | */ | | 1883 | */ |
1880 | if (ifp->if_flags & IFF_POLLING) { | | 1884 | if (ifp->if_flags & IFF_POLLING) { |
1881 | CSR_WRITE_4(sc, VGE_IMR, 0); | | 1885 | CSR_WRITE_4(sc, VGE_IMR, 0); |
1882 | CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); | | 1886 | CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); |
1883 | } else /* otherwise ... */ | | 1887 | } else /* otherwise ... */ |
1884 | #endif /* DEVICE_POLLING */ | | 1888 | #endif /* DEVICE_POLLING */ |
1885 | { | | 1889 | { |
1886 | /* | | 1890 | /* |
1887 | * Enable interrupts. | | 1891 | * Enable interrupts. |
1888 | */ | | 1892 | */ |
1889 | CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); | | 1893 | CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); |
1890 | CSR_WRITE_4(sc, VGE_ISR, 0); | | 1894 | CSR_WRITE_4(sc, VGE_ISR, 0); |
1891 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); | | 1895 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); |
1892 | } | | 1896 | } |
1893 | | | 1897 | |
1894 | if ((rc = ether_mediachange(ifp)) != 0) | | 1898 | if ((rc = ether_mediachange(ifp)) != 0) |
1895 | goto out; | | 1899 | goto out; |
1896 | | | 1900 | |
1897 | ifp->if_flags |= IFF_RUNNING; | | 1901 | ifp->if_flags |= IFF_RUNNING; |
1898 | ifp->if_flags &= ~IFF_OACTIVE; | | 1902 | ifp->if_flags &= ~IFF_OACTIVE; |
1899 | | | 1903 | |
1900 | sc->sc_if_flags = 0; | | 1904 | sc->sc_if_flags = 0; |
1901 | sc->sc_link = 0; | | 1905 | sc->sc_link = 0; |
1902 | | | 1906 | |
1903 | callout_schedule(&sc->sc_timeout, hz); | | 1907 | callout_schedule(&sc->sc_timeout, hz); |
1904 | | | 1908 | |
1905 | out: | | 1909 | out: |
1906 | return rc; | | 1910 | return rc; |
1907 | } | | 1911 | } |
1908 | | | 1912 | |
1909 | static void | | 1913 | static void |
1910 | vge_miibus_statchg(struct ifnet *ifp) | | 1914 | vge_miibus_statchg(struct ifnet *ifp) |
1911 | { | | 1915 | { |
1912 | struct vge_softc *sc = ifp->if_softc; | | 1916 | struct vge_softc *sc = ifp->if_softc; |
1913 | struct mii_data *mii = &sc->sc_mii; | | 1917 | struct mii_data *mii = &sc->sc_mii; |
1914 | struct ifmedia_entry *ife = mii->mii_media.ifm_cur; | | 1918 | struct ifmedia_entry *ife = mii->mii_media.ifm_cur; |
1915 | | | 1919 | |
1916 | /* | | 1920 | /* |
1917 | * If the user manually selects a media mode, we need to turn | | 1921 | * If the user manually selects a media mode, we need to turn |
1918 | * on the forced MAC mode bit in the DIAGCTL register. If the | | 1922 | * on the forced MAC mode bit in the DIAGCTL register. If the |
1919 | * user happens to choose a full duplex mode, we also need to | | 1923 | * user happens to choose a full duplex mode, we also need to |
1920 | * set the 'force full duplex' bit. This applies only to | | 1924 | * set the 'force full duplex' bit. This applies only to |
1921 | * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC | | 1925 | * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC |
1922 | * mode is disabled, and in 1000baseT mode, full duplex is | | 1926 | * mode is disabled, and in 1000baseT mode, full duplex is |
1923 | * always implied, so we turn on the forced mode bit but leave | | 1927 | * always implied, so we turn on the forced mode bit but leave |
1924 | * the FDX bit cleared. | | 1928 | * the FDX bit cleared. |
1925 | */ | | 1929 | */ |
1926 | | | 1930 | |
1927 | switch (IFM_SUBTYPE(ife->ifm_media)) { | | 1931 | switch (IFM_SUBTYPE(ife->ifm_media)) { |
1928 | case IFM_AUTO: | | 1932 | case IFM_AUTO: |
1929 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); | | 1933 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); |
1930 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); | | 1934 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); |
1931 | break; | | 1935 | break; |
1932 | case IFM_1000_T: | | 1936 | case IFM_1000_T: |
1933 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); | | 1937 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); |
1934 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); | | 1938 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); |
1935 | break; | | 1939 | break; |
1936 | case IFM_100_TX: | | 1940 | case IFM_100_TX: |
1937 | case IFM_10_T: | | 1941 | case IFM_10_T: |
1938 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); | | 1942 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); |
1939 | if ((ife->ifm_media & IFM_FDX) != 0) { | | 1943 | if ((ife->ifm_media & IFM_FDX) != 0) { |
1940 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); | | 1944 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); |
1941 | } else { | | 1945 | } else { |
1942 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); | | 1946 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); |
1943 | } | | 1947 | } |
1944 | break; | | 1948 | break; |
1945 | default: | | 1949 | default: |
1946 | printf("%s: unknown media type: %x\n", | | 1950 | printf("%s: unknown media type: %x\n", |
1947 | device_xname(sc->sc_dev), | | 1951 | device_xname(sc->sc_dev), |
1948 | IFM_SUBTYPE(ife->ifm_media)); | | 1952 | IFM_SUBTYPE(ife->ifm_media)); |
1949 | break; | | 1953 | break; |
1950 | } | | 1954 | } |
1951 | } | | 1955 | } |
1952 | | | 1956 | |
1953 | static int | | 1957 | static int |
1954 | vge_ifflags_cb(struct ethercom *ec) | | 1958 | vge_ifflags_cb(struct ethercom *ec) |
1955 | { | | 1959 | { |
1956 | struct ifnet *ifp = &ec->ec_if; | | 1960 | struct ifnet *ifp = &ec->ec_if; |
1957 | struct vge_softc *sc = ifp->if_softc; | | 1961 | struct vge_softc *sc = ifp->if_softc; |
1958 | u_short change = ifp->if_flags ^ sc->sc_if_flags; | | 1962 | u_short change = ifp->if_flags ^ sc->sc_if_flags; |
1959 | | | 1963 | |
1960 | if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) | | 1964 | if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) |
1961 | return ENETRESET; | | 1965 | return ENETRESET; |
1962 | else if ((change & IFF_PROMISC) == 0) | | 1966 | else if ((change & IFF_PROMISC) == 0) |
1963 | return 0; | | 1967 | return 0; |
1964 | | | 1968 | |
1965 | if ((ifp->if_flags & IFF_PROMISC) == 0) | | 1969 | if ((ifp->if_flags & IFF_PROMISC) == 0) |
1966 | CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); | | 1970 | CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); |
1967 | else | | 1971 | else |
1968 | CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); | | 1972 | CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); |
1969 | vge_setmulti(sc); | | 1973 | vge_setmulti(sc); |
1970 | return 0; | | 1974 | return 0; |
1971 | } | | 1975 | } |
1972 | | | 1976 | |
1973 | static int | | 1977 | static int |
1974 | vge_ioctl(struct ifnet *ifp, u_long command, void *data) | | 1978 | vge_ioctl(struct ifnet *ifp, u_long command, void *data) |
1975 | { | | 1979 | { |
1976 | struct vge_softc *sc; | | 1980 | struct vge_softc *sc; |
1977 | int s, error; | | 1981 | int s, error; |
1978 | | | 1982 | |
1979 | sc = ifp->if_softc; | | 1983 | sc = ifp->if_softc; |
1980 | error = 0; | | 1984 | error = 0; |
1981 | | | 1985 | |
1982 | s = splnet(); | | 1986 | s = splnet(); |
1983 | | | 1987 | |
1984 | if ((error = ether_ioctl(ifp, command, data)) == ENETRESET) { | | 1988 | if ((error = ether_ioctl(ifp, command, data)) == ENETRESET) { |
1985 | error = 0; | | 1989 | error = 0; |
1986 | if (command != SIOCADDMULTI && command != SIOCDELMULTI) | | 1990 | if (command != SIOCADDMULTI && command != SIOCDELMULTI) |
1987 | ; | | 1991 | ; |
1988 | else if (ifp->if_flags & IFF_RUNNING) { | | 1992 | else if (ifp->if_flags & IFF_RUNNING) { |
1989 | /* | | 1993 | /* |
1990 | * Multicast list has changed; set the hardware filter | | 1994 | * Multicast list has changed; set the hardware filter |
1991 | * accordingly. | | 1995 | * accordingly. |
1992 | */ | | 1996 | */ |
1993 | vge_setmulti(sc); | | 1997 | vge_setmulti(sc); |
1994 | } | | 1998 | } |
1995 | } | | 1999 | } |
1996 | sc->sc_if_flags = ifp->if_flags; | | 2000 | sc->sc_if_flags = ifp->if_flags; |
1997 | | | 2001 | |
1998 | splx(s); | | 2002 | splx(s); |
1999 | return error; | | 2003 | return error; |
2000 | } | | 2004 | } |
2001 | | | 2005 | |
2002 | static void | | 2006 | static void |
2003 | vge_watchdog(struct ifnet *ifp) | | 2007 | vge_watchdog(struct ifnet *ifp) |
2004 | { | | 2008 | { |
2005 | struct vge_softc *sc; | | 2009 | struct vge_softc *sc; |
2006 | int s; | | 2010 | int s; |
2007 | | | 2011 | |
2008 | sc = ifp->if_softc; | | 2012 | sc = ifp->if_softc; |
2009 | s = splnet(); | | 2013 | s = splnet(); |
2010 | printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); | | 2014 | printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); |
2011 | ifp->if_oerrors++; | | 2015 | ifp->if_oerrors++; |
2012 | | | 2016 | |
2013 | vge_txeof(sc); | | 2017 | vge_txeof(sc); |
2014 | vge_rxeof(sc); | | 2018 | vge_rxeof(sc); |
2015 | | | 2019 | |
2016 | vge_init(ifp); | | 2020 | vge_init(ifp); |
2017 | | | 2021 | |
2018 | splx(s); | | 2022 | splx(s); |
2019 | } | | 2023 | } |
2020 | | | 2024 | |
2021 | /* | | 2025 | /* |
2022 | * Stop the adapter and free any mbufs allocated to the | | 2026 | * Stop the adapter and free any mbufs allocated to the |
2023 | * RX and TX lists. | | 2027 | * RX and TX lists. |
2024 | */ | | 2028 | */ |
2025 | static void | | 2029 | static void |
2026 | vge_stop(struct ifnet *ifp, int disable) | | 2030 | vge_stop(struct ifnet *ifp, int disable) |
2027 | { | | 2031 | { |
2028 | struct vge_softc *sc = ifp->if_softc; | | 2032 | struct vge_softc *sc = ifp->if_softc; |
2029 | struct vge_txsoft *txs; | | 2033 | struct vge_txsoft *txs; |
2030 | struct vge_rxsoft *rxs; | | 2034 | struct vge_rxsoft *rxs; |
2031 | int i, s; | | 2035 | int i, s; |
2032 | | | 2036 | |
2033 | s = splnet(); | | 2037 | s = splnet(); |
2034 | ifp->if_timer = 0; | | 2038 | ifp->if_timer = 0; |
2035 | | | 2039 | |
2036 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 2040 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
2037 | #ifdef DEVICE_POLLING | | 2041 | #ifdef DEVICE_POLLING |
2038 | ether_poll_deregister(ifp); | | 2042 | ether_poll_deregister(ifp); |
2039 | #endif /* DEVICE_POLLING */ | | 2043 | #endif /* DEVICE_POLLING */ |
2040 | | | 2044 | |
2041 | CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); | | 2045 | CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); |
2042 | CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); | | 2046 | CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); |
2043 | CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); | | 2047 | CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); |
2044 | CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); | | 2048 | CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); |
2045 | CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); | | 2049 | CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); |
2046 | CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); | | 2050 | CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); |
2047 | | | 2051 | |
2048 | if (sc->sc_rx_mhead != NULL) { | | 2052 | if (sc->sc_rx_mhead != NULL) { |
2049 | m_freem(sc->sc_rx_mhead); | | 2053 | m_freem(sc->sc_rx_mhead); |
2050 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; | | 2054 | sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; |
2051 | } | | 2055 | } |
2052 | | | 2056 | |
2053 | /* Free the TX list buffers. */ | | 2057 | /* Free the TX list buffers. */ |
2054 | | | 2058 | |
2055 | for (i = 0; i < VGE_NTXDESC; i++) { | | 2059 | for (i = 0; i < VGE_NTXDESC; i++) { |
2056 | txs = &sc->sc_txsoft[i]; | | 2060 | txs = &sc->sc_txsoft[i]; |
2057 | if (txs->txs_mbuf != NULL) { | | 2061 | if (txs->txs_mbuf != NULL) { |
2058 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 2062 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
2059 | m_freem(txs->txs_mbuf); | | 2063 | m_freem(txs->txs_mbuf); |
2060 | txs->txs_mbuf = NULL; | | 2064 | txs->txs_mbuf = NULL; |
2061 | } | | 2065 | } |
2062 | } | | 2066 | } |
2063 | | | 2067 | |
2064 | /* Free the RX list buffers. */ | | 2068 | /* Free the RX list buffers. */ |
2065 | | | 2069 | |
2066 | for (i = 0; i < VGE_NRXDESC; i++) { | | 2070 | for (i = 0; i < VGE_NRXDESC; i++) { |
2067 | rxs = &sc->sc_rxsoft[i]; | | 2071 | rxs = &sc->sc_rxsoft[i]; |
2068 | if (rxs->rxs_mbuf != NULL) { | | 2072 | if (rxs->rxs_mbuf != NULL) { |
2069 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); | | 2073 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
2070 | m_freem(rxs->rxs_mbuf); | | 2074 | m_freem(rxs->rxs_mbuf); |
2071 | rxs->rxs_mbuf = NULL; | | 2075 | rxs->rxs_mbuf = NULL; |
2072 | } | | 2076 | } |
2073 | } | | 2077 | } |
2074 | | | 2078 | |
2075 | splx(s); | | 2079 | splx(s); |
2076 | } | | 2080 | } |
2077 | | | 2081 | |
2078 | #if VGE_POWER_MANAGEMENT | | 2082 | #if VGE_POWER_MANAGEMENT |
2079 | /* | | 2083 | /* |
2080 | * Device suspend routine. Stop the interface and save some PCI | | 2084 | * Device suspend routine. Stop the interface and save some PCI |
2081 | * settings in case the BIOS doesn't restore them properly on | | 2085 | * settings in case the BIOS doesn't restore them properly on |
2082 | * resume. | | 2086 | * resume. |
2083 | */ | | 2087 | */ |
2084 | static int | | 2088 | static int |
2085 | vge_suspend(device_t dev) | | 2089 | vge_suspend(device_t dev) |
2086 | { | | 2090 | { |
2087 | struct vge_softc *sc; | | 2091 | struct vge_softc *sc; |
2088 | int i; | | 2092 | int i; |
2089 | | | 2093 | |
2090 | sc = device_get_softc(dev); | | 2094 | sc = device_get_softc(dev); |
2091 | | | 2095 | |
2092 | vge_stop(sc); | | 2096 | vge_stop(sc); |
2093 | | | 2097 | |
2094 | for (i = 0; i < 5; i++) | | 2098 | for (i = 0; i < 5; i++) |
2095 | sc->sc_saved_maps[i] = | | 2099 | sc->sc_saved_maps[i] = |
2096 | pci_read_config(dev, PCIR_MAPS + i * 4, 4); | | 2100 | pci_read_config(dev, PCIR_MAPS + i * 4, 4); |
2097 | sc->sc_saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); | | 2101 | sc->sc_saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); |
2098 | sc->sc_saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); | | 2102 | sc->sc_saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); |
2099 | sc->sc_saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); | | 2103 | sc->sc_saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); |
2100 | sc->sc_saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); | | 2104 | sc->sc_saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); |
2101 | | | 2105 | |
2102 | sc->suspended = 1; | | 2106 | sc->suspended = 1; |
2103 | | | 2107 | |
2104 | return 0; | | 2108 | return 0; |
2105 | } | | 2109 | } |
2106 | | | 2110 | |
2107 | /* | | 2111 | /* |
2108 | * Device resume routine. Restore some PCI settings in case the BIOS | | 2112 | * Device resume routine. Restore some PCI settings in case the BIOS |
2109 | * doesn't, re-enable busmastering, and restart the interface if | | 2113 | * doesn't, re-enable busmastering, and restart the interface if |
2110 | * appropriate. | | 2114 | * appropriate. |
2111 | */ | | 2115 | */ |
2112 | static int | | 2116 | static int |
2113 | vge_resume(device_t dev) | | 2117 | vge_resume(device_t dev) |
2114 | { | | 2118 | { |
2115 | struct vge_softc *sc; | | 2119 | struct vge_softc *sc; |
2116 | struct ifnet *ifp; | | 2120 | struct ifnet *ifp; |
2117 | int i; | | 2121 | int i; |
2118 | | | 2122 | |
2119 | sc = device_private(dev); | | 2123 | sc = device_private(dev); |
2120 | ifp = &sc->sc_ethercom.ec_if; | | 2124 | ifp = &sc->sc_ethercom.ec_if; |
2121 | | | 2125 | |
2122 | /* better way to do this? */ | | 2126 | /* better way to do this? */ |
2123 | for (i = 0; i < 5; i++) | | 2127 | for (i = 0; i < 5; i++) |
2124 | pci_write_config(dev, PCIR_MAPS + i * 4, | | 2128 | pci_write_config(dev, PCIR_MAPS + i * 4, |
2125 | sc->sc_saved_maps[i], 4); | | 2129 | sc->sc_saved_maps[i], 4); |
2126 | pci_write_config(dev, PCIR_BIOS, sc->sc_saved_biosaddr, 4); | | 2130 | pci_write_config(dev, PCIR_BIOS, sc->sc_saved_biosaddr, 4); |
2127 | pci_write_config(dev, PCIR_INTLINE, sc->sc_saved_intline, 1); | | 2131 | pci_write_config(dev, PCIR_INTLINE, sc->sc_saved_intline, 1); |
2128 | pci_write_config(dev, PCIR_CACHELNSZ, sc->sc_saved_cachelnsz, 1); | | 2132 | pci_write_config(dev, PCIR_CACHELNSZ, sc->sc_saved_cachelnsz, 1); |
2129 | pci_write_config(dev, PCIR_LATTIMER, sc->sc_saved_lattimer, 1); | | 2133 | pci_write_config(dev, PCIR_LATTIMER, sc->sc_saved_lattimer, 1); |
2130 | | | 2134 | |
2131 | /* reenable busmastering */ | | 2135 | /* reenable busmastering */ |
2132 | pci_enable_busmaster(dev); | | 2136 | pci_enable_busmaster(dev); |
2133 | pci_enable_io(dev, SYS_RES_MEMORY); | | 2137 | pci_enable_io(dev, SYS_RES_MEMORY); |
2134 | | | 2138 | |
2135 | /* reinitialize interface if necessary */ | | 2139 | /* reinitialize interface if necessary */ |
2136 | if (ifp->if_flags & IFF_UP) | | 2140 | if (ifp->if_flags & IFF_UP) |
2137 | vge_init(sc); | | 2141 | vge_init(sc); |
2138 | | | 2142 | |
2139 | sc->suspended = 0; | | 2143 | sc->suspended = 0; |
2140 | | | 2144 | |
2141 | return 0; | | 2145 | return 0; |
2142 | } | | 2146 | } |
2143 | #endif | | 2147 | #endif |
2144 | | | 2148 | |
2145 | /* | | 2149 | /* |
2146 | * Stop all chip I/O so that the kernel's probe routines don't | | 2150 | * Stop all chip I/O so that the kernel's probe routines don't |
2147 | * get confused by errant DMAs when rebooting. | | 2151 | * get confused by errant DMAs when rebooting. |
2148 | */ | | 2152 | */ |
2149 | static bool | | 2153 | static bool |
2150 | vge_shutdown(device_t self, int howto) | | 2154 | vge_shutdown(device_t self, int howto) |
2151 | { | | 2155 | { |
2152 | struct vge_softc *sc; | | 2156 | struct vge_softc *sc; |
2153 | | | 2157 | |
2154 | sc = device_private(self); | | 2158 | sc = device_private(self); |
2155 | vge_stop(&sc->sc_ethercom.ec_if, 1); | | 2159 | vge_stop(&sc->sc_ethercom.ec_if, 1); |
2156 | | | 2160 | |
2157 | return true; | | 2161 | return true; |
2158 | } | | 2162 | } |
| | | 2163 | |
| | | 2164 | static void |
| | | 2165 | vge_clrwol(struct vge_softc *sc) |
| | | 2166 | { |
| | | 2167 | uint8_t val; |
| | | 2168 | |
| | | 2169 | val = CSR_READ_1(sc, VGE_PWRSTAT); |
| | | 2170 | val &= ~VGE_STICKHW_SWPTAG; |
| | | 2171 | CSR_WRITE_1(sc, VGE_PWRSTAT, val); |
| | | 2172 | /* Disable WOL and clear power state indicator. */ |
| | | 2173 | val = CSR_READ_1(sc, VGE_PWRSTAT); |
| | | 2174 | val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1); |
| | | 2175 | CSR_WRITE_1(sc, VGE_PWRSTAT, val); |
| | | 2176 | |
| | | 2177 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); |
| | | 2178 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); |
| | | 2179 | |
| | | 2180 | /* Clear WOL on pattern match. */ |
| | | 2181 | CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); |
| | | 2182 | /* Disable WOL on magic/unicast packet. */ |
| | | 2183 | CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); |
| | | 2184 | CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | |
| | | 2185 | VGE_WOLCFG_PMEOVR); |
| | | 2186 | /* Clear WOL status on pattern match. */ |
| | | 2187 | CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); |
| | | 2188 | CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); |
| | | 2189 | } |