Tue Oct 8 14:26:27 2019 UTC ()
 Copy vge_clrwol() from FreeBSD and call it in vge_attach() to recover from
powerdown mode. Fixes PR kern/41525 reported by Aran Clauson.


(msaitoh)
diff -r1.74 -r1.75 src/sys/dev/pci/if_vge.c
diff -r1.4 -r1.5 src/sys/dev/pci/if_vgereg.h

cvs diff -r1.74 -r1.75 src/sys/dev/pci/if_vge.c (switch to unified diff)

--- src/sys/dev/pci/if_vge.c 2019/09/13 07:55:07 1.74
+++ src/sys/dev/pci/if_vge.c 2019/10/08 14:26:27 1.75
@@ -1,2158 +1,2189 @@ @@ -1,2158 +1,2189 @@
1/* $NetBSD: if_vge.c,v 1.74 2019/09/13 07:55:07 msaitoh Exp $ */ 1/* $NetBSD: if_vge.c,v 1.75 2019/10/08 14:26:27 msaitoh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2004 4 * Copyright (c) 2004
5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software 15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement: 16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul. 17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors 18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software 19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission. 20 * without specific prior written permission.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE. 32 * THE POSSIBILITY OF SUCH DAMAGE.
33 * 33 *
34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp 34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
35 */ 35 */
36 36
37#include <sys/cdefs.h> 37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.74 2019/09/13 07:55:07 msaitoh Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.75 2019/10/08 14:26:27 msaitoh Exp $");
39 39
40/* 40/*
41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
42 * 42 *
43 * Written by Bill Paul <wpaul@windriver.com> 43 * Written by Bill Paul <wpaul@windriver.com>
44 * Senior Networking Software Engineer 44 * Senior Networking Software Engineer
45 * Wind River Systems 45 * Wind River Systems
46 */ 46 */
47 47
48/* 48/*
49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that 49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that
50 * combines a tri-speed ethernet MAC and PHY, with the following 50 * combines a tri-speed ethernet MAC and PHY, with the following
51 * features: 51 * features:
52 * 52 *
53 * o Jumbo frame support up to 16K 53 * o Jumbo frame support up to 16K
54 * o Transmit and receive flow control 54 * o Transmit and receive flow control
55 * o IPv4 checksum offload 55 * o IPv4 checksum offload
56 * o VLAN tag insertion and stripping 56 * o VLAN tag insertion and stripping
57 * o TCP large send 57 * o TCP large send
58 * o 64-bit multicast hash table filter 58 * o 64-bit multicast hash table filter
59 * o 64 entry CAM filter 59 * o 64 entry CAM filter
60 * o 16K RX FIFO and 48K TX FIFO memory 60 * o 16K RX FIFO and 48K TX FIFO memory
61 * o Interrupt moderation 61 * o Interrupt moderation
62 * 62 *
63 * The VT6122 supports up to four transmit DMA queues. The descriptors 63 * The VT6122 supports up to four transmit DMA queues. The descriptors
64 * in the transmit ring can address up to 7 data fragments; frames which 64 * in the transmit ring can address up to 7 data fragments; frames which
65 * span more than 7 data buffers must be coalesced, but in general the 65 * span more than 7 data buffers must be coalesced, but in general the
66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
67 * long. The receive descriptors address only a single buffer. 67 * long. The receive descriptors address only a single buffer.
68 * 68 *
69 * There are two peculiar design issues with the VT6122. One is that 69 * There are two peculiar design issues with the VT6122. One is that
70 * receive data buffers must be aligned on a 32-bit boundary. This is 70 * receive data buffers must be aligned on a 32-bit boundary. This is
71 * not a problem where the VT6122 is used as a LOM device in x86-based 71 * not a problem where the VT6122 is used as a LOM device in x86-based
72 * systems, but on architectures that generate unaligned access traps, we 72 * systems, but on architectures that generate unaligned access traps, we
73 * have to do some copying. 73 * have to do some copying.
74 * 74 *
75 * The other issue has to do with the way 64-bit addresses are handled. 75 * The other issue has to do with the way 64-bit addresses are handled.
76 * The DMA descriptors only allow you to specify 48 bits of addressing 76 * The DMA descriptors only allow you to specify 48 bits of addressing
77 * information. The remaining 16 bits are specified using one of the 77 * information. The remaining 16 bits are specified using one of the
78 * I/O registers. If you only have a 32-bit system, then this isn't 78 * I/O registers. If you only have a 32-bit system, then this isn't
79 * an issue, but if you have a 64-bit system and more than 4GB of 79 * an issue, but if you have a 64-bit system and more than 4GB of
80 * memory, you must have to make sure your network data buffers reside 80 * memory, you must have to make sure your network data buffers reside
81 * in the same 48-bit 'segment.' 81 * in the same 48-bit 'segment.'
82 * 82 *
83 * Special thanks to Ryan Fu at VIA Networking for providing documentation 83 * Special thanks to Ryan Fu at VIA Networking for providing documentation
84 * and sample NICs for testing. 84 * and sample NICs for testing.
85 */ 85 */
86 86
87 87
88#include <sys/param.h> 88#include <sys/param.h>
89#include <sys/endian.h> 89#include <sys/endian.h>
90#include <sys/systm.h> 90#include <sys/systm.h>
91#include <sys/device.h> 91#include <sys/device.h>
92#include <sys/sockio.h> 92#include <sys/sockio.h>
93#include <sys/mbuf.h> 93#include <sys/mbuf.h>
94#include <sys/malloc.h> 94#include <sys/malloc.h>
95#include <sys/kernel.h> 95#include <sys/kernel.h>
96#include <sys/socket.h> 96#include <sys/socket.h>
97 97
98#include <net/if.h> 98#include <net/if.h>
99#include <net/if_arp.h> 99#include <net/if_arp.h>
100#include <net/if_ether.h> 100#include <net/if_ether.h>
101#include <net/if_dl.h> 101#include <net/if_dl.h>
102#include <net/if_media.h> 102#include <net/if_media.h>
103 103
104#include <net/bpf.h> 104#include <net/bpf.h>
105 105
106#include <sys/bus.h> 106#include <sys/bus.h>
107 107
108#include <dev/mii/mii.h> 108#include <dev/mii/mii.h>
109#include <dev/mii/miivar.h> 109#include <dev/mii/miivar.h>
110 110
111#include <dev/pci/pcireg.h> 111#include <dev/pci/pcireg.h>
112#include <dev/pci/pcivar.h> 112#include <dev/pci/pcivar.h>
113#include <dev/pci/pcidevs.h> 113#include <dev/pci/pcidevs.h>
114 114
115#include <dev/pci/if_vgereg.h> 115#include <dev/pci/if_vgereg.h>
116 116
117#define VGE_IFQ_MAXLEN 64 117#define VGE_IFQ_MAXLEN 64
118 118
119#define VGE_RING_ALIGN 256 119#define VGE_RING_ALIGN 256
120 120
121#define VGE_NTXDESC 256 121#define VGE_NTXDESC 256
122#define VGE_NTXDESC_MASK (VGE_NTXDESC - 1) 122#define VGE_NTXDESC_MASK (VGE_NTXDESC - 1)
123#define VGE_NEXT_TXDESC(x) ((x + 1) & VGE_NTXDESC_MASK) 123#define VGE_NEXT_TXDESC(x) ((x + 1) & VGE_NTXDESC_MASK)
124#define VGE_PREV_TXDESC(x) ((x - 1) & VGE_NTXDESC_MASK) 124#define VGE_PREV_TXDESC(x) ((x - 1) & VGE_NTXDESC_MASK)
125 125
126#define VGE_NRXDESC 256 /* Must be a multiple of 4!! */ 126#define VGE_NRXDESC 256 /* Must be a multiple of 4!! */
127#define VGE_NRXDESC_MASK (VGE_NRXDESC - 1) 127#define VGE_NRXDESC_MASK (VGE_NRXDESC - 1)
128#define VGE_NEXT_RXDESC(x) ((x + 1) & VGE_NRXDESC_MASK) 128#define VGE_NEXT_RXDESC(x) ((x + 1) & VGE_NRXDESC_MASK)
129#define VGE_PREV_RXDESC(x) ((x - 1) & VGE_NRXDESC_MASK) 129#define VGE_PREV_RXDESC(x) ((x - 1) & VGE_NRXDESC_MASK)
130 130
131#define VGE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF) 131#define VGE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF)
132#define VGE_ADDR_HI(y) ((uint64_t)(y) >> 32) 132#define VGE_ADDR_HI(y) ((uint64_t)(y) >> 32)
133#define VGE_BUFLEN(y) ((y) & 0x7FFF) 133#define VGE_BUFLEN(y) ((y) & 0x7FFF)
134#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 134#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
135 135
136#define VGE_POWER_MANAGEMENT 0 /* disabled for now */ 136#define VGE_POWER_MANAGEMENT 0 /* disabled for now */
137 137
138/* 138/*
139 * Mbuf adjust factor to force 32-bit alignment of IP header. 139 * Mbuf adjust factor to force 32-bit alignment of IP header.
140 * Drivers should pad ETHER_ALIGN bytes when setting up a 140 * Drivers should pad ETHER_ALIGN bytes when setting up a
141 * RX mbuf so the upper layers get the IP header properly aligned 141 * RX mbuf so the upper layers get the IP header properly aligned
142 * past the 14-byte Ethernet header. 142 * past the 14-byte Ethernet header.
143 * 143 *
144 * See also comment in vge_encap(). 144 * See also comment in vge_encap().
145 */ 145 */
146 146
147#ifdef __NO_STRICT_ALIGNMENT 147#ifdef __NO_STRICT_ALIGNMENT
148#define VGE_RX_BUFSIZE MCLBYTES 148#define VGE_RX_BUFSIZE MCLBYTES
149#else 149#else
150#define VGE_RX_PAD sizeof(uint32_t) 150#define VGE_RX_PAD sizeof(uint32_t)
151#define VGE_RX_BUFSIZE (MCLBYTES - VGE_RX_PAD) 151#define VGE_RX_BUFSIZE (MCLBYTES - VGE_RX_PAD)
152#endif 152#endif
153 153
154/* 154/*
155 * Control structures are DMA'd to the vge chip. We allocate them in 155 * Control structures are DMA'd to the vge chip. We allocate them in
156 * a single clump that maps to a single DMA segment to make several things 156 * a single clump that maps to a single DMA segment to make several things
157 * easier. 157 * easier.
158 */ 158 */
159struct vge_control_data { 159struct vge_control_data {
160 /* TX descriptors */ 160 /* TX descriptors */
161 struct vge_txdesc vcd_txdescs[VGE_NTXDESC]; 161 struct vge_txdesc vcd_txdescs[VGE_NTXDESC];
162 /* RX descriptors */ 162 /* RX descriptors */
163 struct vge_rxdesc vcd_rxdescs[VGE_NRXDESC]; 163 struct vge_rxdesc vcd_rxdescs[VGE_NRXDESC];
164 /* dummy data for TX padding */ 164 /* dummy data for TX padding */
165 uint8_t vcd_pad[ETHER_PAD_LEN]; 165 uint8_t vcd_pad[ETHER_PAD_LEN];
166}; 166};
167 167
168#define VGE_CDOFF(x) offsetof(struct vge_control_data, x) 168#define VGE_CDOFF(x) offsetof(struct vge_control_data, x)
169#define VGE_CDTXOFF(x) VGE_CDOFF(vcd_txdescs[(x)]) 169#define VGE_CDTXOFF(x) VGE_CDOFF(vcd_txdescs[(x)])
170#define VGE_CDRXOFF(x) VGE_CDOFF(vcd_rxdescs[(x)]) 170#define VGE_CDRXOFF(x) VGE_CDOFF(vcd_rxdescs[(x)])
171#define VGE_CDPADOFF() VGE_CDOFF(vcd_pad[0]) 171#define VGE_CDPADOFF() VGE_CDOFF(vcd_pad[0])
172 172
173/* 173/*
174 * Software state for TX jobs. 174 * Software state for TX jobs.
175 */ 175 */
176struct vge_txsoft { 176struct vge_txsoft {
177 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 177 struct mbuf *txs_mbuf; /* head of our mbuf chain */
178 bus_dmamap_t txs_dmamap; /* our DMA map */ 178 bus_dmamap_t txs_dmamap; /* our DMA map */
179}; 179};
180 180
181/* 181/*
182 * Software state for RX jobs. 182 * Software state for RX jobs.
183 */ 183 */
184struct vge_rxsoft { 184struct vge_rxsoft {
185 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 185 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
186 bus_dmamap_t rxs_dmamap; /* our DMA map */ 186 bus_dmamap_t rxs_dmamap; /* our DMA map */
187}; 187};
188 188
189 189
190struct vge_softc { 190struct vge_softc {
191 device_t sc_dev; 191 device_t sc_dev;
192 192
193 bus_space_tag_t sc_bst; /* bus space tag */ 193 bus_space_tag_t sc_bst; /* bus space tag */
194 bus_space_handle_t sc_bsh; /* bus space handle */ 194 bus_space_handle_t sc_bsh; /* bus space handle */
195 bus_dma_tag_t sc_dmat; 195 bus_dma_tag_t sc_dmat;
196 196
197 struct ethercom sc_ethercom; /* interface info */ 197 struct ethercom sc_ethercom; /* interface info */
198 uint8_t sc_eaddr[ETHER_ADDR_LEN]; 198 uint8_t sc_eaddr[ETHER_ADDR_LEN];
199 199
200 void *sc_intrhand; 200 void *sc_intrhand;
201 struct mii_data sc_mii; 201 struct mii_data sc_mii;
202 uint8_t sc_type; 202 uint8_t sc_type;
203 u_short sc_if_flags; 203 u_short sc_if_flags;
204 int sc_link; 204 int sc_link;
205 int sc_camidx; 205 int sc_camidx;
206 callout_t sc_timeout; 206 callout_t sc_timeout;
207 207
208 bus_dmamap_t sc_cddmamap; 208 bus_dmamap_t sc_cddmamap;
209#define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 209#define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
210 210
211 struct vge_txsoft sc_txsoft[VGE_NTXDESC]; 211 struct vge_txsoft sc_txsoft[VGE_NTXDESC];
212 struct vge_rxsoft sc_rxsoft[VGE_NRXDESC]; 212 struct vge_rxsoft sc_rxsoft[VGE_NRXDESC];
213 struct vge_control_data *sc_control_data; 213 struct vge_control_data *sc_control_data;
214#define sc_txdescs sc_control_data->vcd_txdescs 214#define sc_txdescs sc_control_data->vcd_txdescs
215#define sc_rxdescs sc_control_data->vcd_rxdescs 215#define sc_rxdescs sc_control_data->vcd_rxdescs
216 216
217 int sc_tx_prodidx; 217 int sc_tx_prodidx;
218 int sc_tx_considx; 218 int sc_tx_considx;
219 int sc_tx_free; 219 int sc_tx_free;
220 220
221 struct mbuf *sc_rx_mhead; 221 struct mbuf *sc_rx_mhead;
222 struct mbuf *sc_rx_mtail; 222 struct mbuf *sc_rx_mtail;
223 int sc_rx_prodidx; 223 int sc_rx_prodidx;
224 int sc_rx_consumed; 224 int sc_rx_consumed;
225 225
226 int sc_suspended; /* 0 = normal 1 = suspended */ 226 int sc_suspended; /* 0 = normal 1 = suspended */
227 uint32_t sc_saved_maps[5]; /* pci data */ 227 uint32_t sc_saved_maps[5]; /* pci data */
228 uint32_t sc_saved_biosaddr; 228 uint32_t sc_saved_biosaddr;
229 uint8_t sc_saved_intline; 229 uint8_t sc_saved_intline;
230 uint8_t sc_saved_cachelnsz; 230 uint8_t sc_saved_cachelnsz;
231 uint8_t sc_saved_lattimer; 231 uint8_t sc_saved_lattimer;
232}; 232};
233 233
234#define VGE_CDTXADDR(sc, x) ((sc)->sc_cddma + VGE_CDTXOFF(x)) 234#define VGE_CDTXADDR(sc, x) ((sc)->sc_cddma + VGE_CDTXOFF(x))
235#define VGE_CDRXADDR(sc, x) ((sc)->sc_cddma + VGE_CDRXOFF(x)) 235#define VGE_CDRXADDR(sc, x) ((sc)->sc_cddma + VGE_CDRXOFF(x))
236#define VGE_CDPADADDR(sc) ((sc)->sc_cddma + VGE_CDPADOFF()) 236#define VGE_CDPADADDR(sc) ((sc)->sc_cddma + VGE_CDPADOFF())
237 237
238#define VGE_TXDESCSYNC(sc, idx, ops) \ 238#define VGE_TXDESCSYNC(sc, idx, ops) \
239 bus_dmamap_sync((sc)->sc_dmat,(sc)->sc_cddmamap, \ 239 bus_dmamap_sync((sc)->sc_dmat,(sc)->sc_cddmamap, \
240 VGE_CDTXOFF(idx), \ 240 VGE_CDTXOFF(idx), \
241 offsetof(struct vge_txdesc, td_frag[0]), \ 241 offsetof(struct vge_txdesc, td_frag[0]), \
242 (ops)) 242 (ops))
243#define VGE_TXFRAGSYNC(sc, idx, nsegs, ops) \ 243#define VGE_TXFRAGSYNC(sc, idx, nsegs, ops) \
244 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 244 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
245 VGE_CDTXOFF(idx) + \ 245 VGE_CDTXOFF(idx) + \
246 offsetof(struct vge_txdesc, td_frag[0]), \ 246 offsetof(struct vge_txdesc, td_frag[0]), \
247 sizeof(struct vge_txfrag) * (nsegs), \ 247 sizeof(struct vge_txfrag) * (nsegs), \
248 (ops)) 248 (ops))
249#define VGE_RXDESCSYNC(sc, idx, ops) \ 249#define VGE_RXDESCSYNC(sc, idx, ops) \
250 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 250 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
251 VGE_CDRXOFF(idx), \ 251 VGE_CDRXOFF(idx), \
252 sizeof(struct vge_rxdesc), \ 252 sizeof(struct vge_rxdesc), \
253 (ops)) 253 (ops))
254 254
255/* 255/*
256 * register space access macros 256 * register space access macros
257 */ 257 */
258#define CSR_WRITE_4(sc, reg, val) \ 258#define CSR_WRITE_4(sc, reg, val) \
259 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 259 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
260#define CSR_WRITE_2(sc, reg, val) \ 260#define CSR_WRITE_2(sc, reg, val) \
261 bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 261 bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
262#define CSR_WRITE_1(sc, reg, val) \ 262#define CSR_WRITE_1(sc, reg, val) \
263 bus_space_write_1((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 263 bus_space_write_1((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
264 264
265#define CSR_READ_4(sc, reg) \ 265#define CSR_READ_4(sc, reg) \
266 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 266 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
267#define CSR_READ_2(sc, reg) \ 267#define CSR_READ_2(sc, reg) \
268 bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg)) 268 bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg))
269#define CSR_READ_1(sc, reg) \ 269#define CSR_READ_1(sc, reg) \
270 bus_space_read_1((sc)->sc_bst, (sc)->sc_bsh, (reg)) 270 bus_space_read_1((sc)->sc_bst, (sc)->sc_bsh, (reg))
271 271
272#define CSR_SETBIT_1(sc, reg, x) \ 272#define CSR_SETBIT_1(sc, reg, x) \
273 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) | (x)) 273 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) | (x))
274#define CSR_SETBIT_2(sc, reg, x) \ 274#define CSR_SETBIT_2(sc, reg, x) \
275 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (x)) 275 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (x))
276#define CSR_SETBIT_4(sc, reg, x) \ 276#define CSR_SETBIT_4(sc, reg, x) \
277 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (x)) 277 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (x))
278 278
279#define CSR_CLRBIT_1(sc, reg, x) \ 279#define CSR_CLRBIT_1(sc, reg, x) \
280 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) & ~(x)) 280 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) & ~(x))
281#define CSR_CLRBIT_2(sc, reg, x) \ 281#define CSR_CLRBIT_2(sc, reg, x) \
282 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(x)) 282 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(x))
283#define CSR_CLRBIT_4(sc, reg, x) \ 283#define CSR_CLRBIT_4(sc, reg, x) \
284 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(x)) 284 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(x))
285 285
286#define VGE_TIMEOUT 10000 286#define VGE_TIMEOUT 10000
287 287
288#define VGE_PCI_LOIO 0x10 288#define VGE_PCI_LOIO 0x10
289#define VGE_PCI_LOMEM 0x14 289#define VGE_PCI_LOMEM 0x14
290 290
291static inline void vge_set_txaddr(struct vge_txfrag *, bus_addr_t); 291static inline void vge_set_txaddr(struct vge_txfrag *, bus_addr_t);
292static inline void vge_set_rxaddr(struct vge_rxdesc *, bus_addr_t); 292static inline void vge_set_rxaddr(struct vge_rxdesc *, bus_addr_t);
293 293
294static int vge_ifflags_cb(struct ethercom *); 294static int vge_ifflags_cb(struct ethercom *);
295 295
296static int vge_match(device_t, cfdata_t, void *); 296static int vge_match(device_t, cfdata_t, void *);
297static void vge_attach(device_t, device_t, void *); 297static void vge_attach(device_t, device_t, void *);
298 298
299static int vge_encap(struct vge_softc *, struct mbuf *, int); 299static int vge_encap(struct vge_softc *, struct mbuf *, int);
300 300
301static int vge_allocmem(struct vge_softc *); 301static int vge_allocmem(struct vge_softc *);
302static int vge_newbuf(struct vge_softc *, int, struct mbuf *); 302static int vge_newbuf(struct vge_softc *, int, struct mbuf *);
303#ifndef __NO_STRICT_ALIGNMENT 303#ifndef __NO_STRICT_ALIGNMENT
304static inline void vge_fixup_rx(struct mbuf *); 304static inline void vge_fixup_rx(struct mbuf *);
305#endif 305#endif
306static void vge_rxeof(struct vge_softc *); 306static void vge_rxeof(struct vge_softc *);
307static void vge_txeof(struct vge_softc *); 307static void vge_txeof(struct vge_softc *);
308static int vge_intr(void *); 308static int vge_intr(void *);
309static void vge_tick(void *); 309static void vge_tick(void *);
310static void vge_start(struct ifnet *); 310static void vge_start(struct ifnet *);
311static int vge_ioctl(struct ifnet *, u_long, void *); 311static int vge_ioctl(struct ifnet *, u_long, void *);
312static int vge_init(struct ifnet *); 312static int vge_init(struct ifnet *);
313static void vge_stop(struct ifnet *, int); 313static void vge_stop(struct ifnet *, int);
314static void vge_watchdog(struct ifnet *); 314static void vge_watchdog(struct ifnet *);
315#if VGE_POWER_MANAGEMENT 315#if VGE_POWER_MANAGEMENT
316static int vge_suspend(device_t); 316static int vge_suspend(device_t);
317static int vge_resume(device_t); 317static int vge_resume(device_t);
318#endif 318#endif
319static bool vge_shutdown(device_t, int); 319static bool vge_shutdown(device_t, int);
320 320
321static uint16_t vge_read_eeprom(struct vge_softc *, int); 321static uint16_t vge_read_eeprom(struct vge_softc *, int);
322 322
323static void vge_miipoll_start(struct vge_softc *); 323static void vge_miipoll_start(struct vge_softc *);
324static void vge_miipoll_stop(struct vge_softc *); 324static void vge_miipoll_stop(struct vge_softc *);
325static int vge_miibus_readreg(device_t, int, int, uint16_t *); 325static int vge_miibus_readreg(device_t, int, int, uint16_t *);
326static int vge_miibus_writereg(device_t, int, int, uint16_t); 326static int vge_miibus_writereg(device_t, int, int, uint16_t);
327static void vge_miibus_statchg(struct ifnet *); 327static void vge_miibus_statchg(struct ifnet *);
328 328
329static void vge_cam_clear(struct vge_softc *); 329static void vge_cam_clear(struct vge_softc *);
330static int vge_cam_set(struct vge_softc *, uint8_t *); 330static int vge_cam_set(struct vge_softc *, uint8_t *);
 331static void vge_clrwol(struct vge_softc *);
331static void vge_setmulti(struct vge_softc *); 332static void vge_setmulti(struct vge_softc *);
332static void vge_reset(struct vge_softc *); 333static void vge_reset(struct vge_softc *);
333 334
334CFATTACH_DECL_NEW(vge, sizeof(struct vge_softc), 335CFATTACH_DECL_NEW(vge, sizeof(struct vge_softc),
335 vge_match, vge_attach, NULL, NULL); 336 vge_match, vge_attach, NULL, NULL);
336 337
337static inline void 338static inline void
338vge_set_txaddr(struct vge_txfrag *f, bus_addr_t daddr) 339vge_set_txaddr(struct vge_txfrag *f, bus_addr_t daddr)
339{ 340{
340 341
341 f->tf_addrlo = htole32((uint32_t)daddr); 342 f->tf_addrlo = htole32((uint32_t)daddr);
342 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 343 if (sizeof(bus_addr_t) == sizeof(uint64_t))
343 f->tf_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); 344 f->tf_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF);
344 else 345 else
345 f->tf_addrhi = 0; 346 f->tf_addrhi = 0;
346} 347}
347 348
348static inline void 349static inline void
349vge_set_rxaddr(struct vge_rxdesc *rxd, bus_addr_t daddr) 350vge_set_rxaddr(struct vge_rxdesc *rxd, bus_addr_t daddr)
350{ 351{
351 352
352 rxd->rd_addrlo = htole32((uint32_t)daddr); 353 rxd->rd_addrlo = htole32((uint32_t)daddr);
353 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 354 if (sizeof(bus_addr_t) == sizeof(uint64_t))
354 rxd->rd_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); 355 rxd->rd_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF);
355 else 356 else
356 rxd->rd_addrhi = 0; 357 rxd->rd_addrhi = 0;
357} 358}
358 359
359/* 360/*
360 * Read a word of data stored in the EEPROM at address 'addr.' 361 * Read a word of data stored in the EEPROM at address 'addr.'
361 */ 362 */
362static uint16_t 363static uint16_t
363vge_read_eeprom(struct vge_softc *sc, int addr) 364vge_read_eeprom(struct vge_softc *sc, int addr)
364{ 365{
365 int i; 366 int i;
366 uint16_t word = 0; 367 uint16_t word = 0;
367 368
368 /* 369 /*
369 * Enter EEPROM embedded programming mode. In order to 370 * Enter EEPROM embedded programming mode. In order to
370 * access the EEPROM at all, we first have to set the 371 * access the EEPROM at all, we first have to set the
371 * EELOAD bit in the CHIPCFG2 register. 372 * EELOAD bit in the CHIPCFG2 register.
372 */ 373 */
373 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 374 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
374 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/); 375 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/);
375 376
376 /* Select the address of the word we want to read */ 377 /* Select the address of the word we want to read */
377 CSR_WRITE_1(sc, VGE_EEADDR, addr); 378 CSR_WRITE_1(sc, VGE_EEADDR, addr);
378 379
379 /* Issue read command */ 380 /* Issue read command */
380 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 381 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
381 382
382 /* Wait for the done bit to be set. */ 383 /* Wait for the done bit to be set. */
383 for (i = 0; i < VGE_TIMEOUT; i++) { 384 for (i = 0; i < VGE_TIMEOUT; i++) {
384 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 385 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
385 break; 386 break;
386 } 387 }
387 388
388 if (i == VGE_TIMEOUT) { 389 if (i == VGE_TIMEOUT) {
389 printf("%s: EEPROM read timed out\n", device_xname(sc->sc_dev)); 390 printf("%s: EEPROM read timed out\n", device_xname(sc->sc_dev));
390 return 0; 391 return 0;
391 } 392 }
392 393
393 /* Read the result */ 394 /* Read the result */
394 word = CSR_READ_2(sc, VGE_EERDDAT); 395 word = CSR_READ_2(sc, VGE_EERDDAT);
395 396
396 /* Turn off EEPROM access mode. */ 397 /* Turn off EEPROM access mode. */
397 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/); 398 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/);
398 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 399 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
399 400
400 return word; 401 return word;
401} 402}
402 403
403static void 404static void
404vge_miipoll_stop(struct vge_softc *sc) 405vge_miipoll_stop(struct vge_softc *sc)
405{ 406{
406 int i; 407 int i;
407 408
408 CSR_WRITE_1(sc, VGE_MIICMD, 0); 409 CSR_WRITE_1(sc, VGE_MIICMD, 0);
409 410
410 for (i = 0; i < VGE_TIMEOUT; i++) { 411 for (i = 0; i < VGE_TIMEOUT; i++) {
411 DELAY(1); 412 DELAY(1);
412 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 413 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
413 break; 414 break;
414 } 415 }
415 416
416 if (i == VGE_TIMEOUT) { 417 if (i == VGE_TIMEOUT) {
417 printf("%s: failed to idle MII autopoll\n", 418 printf("%s: failed to idle MII autopoll\n",
418 device_xname(sc->sc_dev)); 419 device_xname(sc->sc_dev));
419 } 420 }
420} 421}
421 422
422static void 423static void
423vge_miipoll_start(struct vge_softc *sc) 424vge_miipoll_start(struct vge_softc *sc)
424{ 425{
425 int i; 426 int i;
426 427
427 /* First, make sure we're idle. */ 428 /* First, make sure we're idle. */
428 429
429 CSR_WRITE_1(sc, VGE_MIICMD, 0); 430 CSR_WRITE_1(sc, VGE_MIICMD, 0);
430 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 431 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
431 432
432 for (i = 0; i < VGE_TIMEOUT; i++) { 433 for (i = 0; i < VGE_TIMEOUT; i++) {
433 DELAY(1); 434 DELAY(1);
434 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 435 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
435 break; 436 break;
436 } 437 }
437 438
438 if (i == VGE_TIMEOUT) { 439 if (i == VGE_TIMEOUT) {
439 printf("%s: failed to idle MII autopoll\n", 440 printf("%s: failed to idle MII autopoll\n",
440 device_xname(sc->sc_dev)); 441 device_xname(sc->sc_dev));
441 return; 442 return;
442 } 443 }
443 444
444 /* Now enable auto poll mode. */ 445 /* Now enable auto poll mode. */
445 446
446 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 447 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
447 448
448 /* And make sure it started. */ 449 /* And make sure it started. */
449 450
450 for (i = 0; i < VGE_TIMEOUT; i++) { 451 for (i = 0; i < VGE_TIMEOUT; i++) {
451 DELAY(1); 452 DELAY(1);
452 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 453 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
453 break; 454 break;
454 } 455 }
455 456
456 if (i == VGE_TIMEOUT) { 457 if (i == VGE_TIMEOUT) {
457 printf("%s: failed to start MII autopoll\n", 458 printf("%s: failed to start MII autopoll\n",
458 device_xname(sc->sc_dev)); 459 device_xname(sc->sc_dev));
459 } 460 }
460} 461}
461 462
462static int 463static int
463vge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 464vge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
464{ 465{
465 struct vge_softc *sc; 466 struct vge_softc *sc;
466 int i, s; 467 int i, s;
467 int rv = 0; 468 int rv = 0;
468 469
469 sc = device_private(dev); 470 sc = device_private(dev);
470 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 471 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
471 return -1; 472 return -1;
472 473
473 s = splnet(); 474 s = splnet();
474 vge_miipoll_stop(sc); 475 vge_miipoll_stop(sc);
475 476
476 /* Specify the register we want to read. */ 477 /* Specify the register we want to read. */
477 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 478 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
478 479
479 /* Issue read command. */ 480 /* Issue read command. */
480 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 481 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
481 482
482 /* Wait for the read command bit to self-clear. */ 483 /* Wait for the read command bit to self-clear. */
483 for (i = 0; i < VGE_TIMEOUT; i++) { 484 for (i = 0; i < VGE_TIMEOUT; i++) {
484 DELAY(1); 485 DELAY(1);
485 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 486 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
486 break; 487 break;
487 } 488 }
488 489
489 if (i == VGE_TIMEOUT) { 490 if (i == VGE_TIMEOUT) {
490 printf("%s: MII read timed out\n", device_xname(sc->sc_dev)); 491 printf("%s: MII read timed out\n", device_xname(sc->sc_dev));
491 rv = ETIMEDOUT; 492 rv = ETIMEDOUT;
492 } else 493 } else
493 *val = CSR_READ_2(sc, VGE_MIIDATA); 494 *val = CSR_READ_2(sc, VGE_MIIDATA);
494 495
495 vge_miipoll_start(sc); 496 vge_miipoll_start(sc);
496 splx(s); 497 splx(s);
497 498
498 return rv; 499 return rv;
499} 500}
500 501
501static int 502static int
502vge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 503vge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
503{ 504{
504 struct vge_softc *sc; 505 struct vge_softc *sc;
505 int i, s, rv = 0; 506 int i, s, rv = 0;
506 507
507 sc = device_private(dev); 508 sc = device_private(dev);
508 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 509 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
509 return -1; 510 return -1;
510 511
511 s = splnet(); 512 s = splnet();
512 vge_miipoll_stop(sc); 513 vge_miipoll_stop(sc);
513 514
514 /* Specify the register we want to write. */ 515 /* Specify the register we want to write. */
515 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 516 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
516 517
517 /* Specify the data we want to write. */ 518 /* Specify the data we want to write. */
518 CSR_WRITE_2(sc, VGE_MIIDATA, val); 519 CSR_WRITE_2(sc, VGE_MIIDATA, val);
519 520
520 /* Issue write command. */ 521 /* Issue write command. */
521 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 522 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
522 523
523 /* Wait for the write command bit to self-clear. */ 524 /* Wait for the write command bit to self-clear. */
524 for (i = 0; i < VGE_TIMEOUT; i++) { 525 for (i = 0; i < VGE_TIMEOUT; i++) {
525 DELAY(1); 526 DELAY(1);
526 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 527 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
527 break; 528 break;
528 } 529 }
529 530
530 if (i == VGE_TIMEOUT) { 531 if (i == VGE_TIMEOUT) {
531 printf("%s: MII write timed out\n", device_xname(sc->sc_dev)); 532 printf("%s: MII write timed out\n", device_xname(sc->sc_dev));
532 rv = ETIMEDOUT; 533 rv = ETIMEDOUT;
533 } 534 }
534 535
535 vge_miipoll_start(sc); 536 vge_miipoll_start(sc);
536 splx(s); 537 splx(s);
537 538
538 return rv; 539 return rv;
539} 540}
540 541
541static void 542static void
542vge_cam_clear(struct vge_softc *sc) 543vge_cam_clear(struct vge_softc *sc)
543{ 544{
544 int i; 545 int i;
545 546
546 /* 547 /*
547 * Turn off all the mask bits. This tells the chip 548 * Turn off all the mask bits. This tells the chip
548 * that none of the entries in the CAM filter are valid. 549 * that none of the entries in the CAM filter are valid.
549 * desired entries will be enabled as we fill the filter in. 550 * desired entries will be enabled as we fill the filter in.
550 */ 551 */
551 552
552 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 553 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
553 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 554 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
554 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 555 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
555 for (i = 0; i < 8; i++) 556 for (i = 0; i < 8; i++)
556 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 557 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
557 558
558 /* Clear the VLAN filter too. */ 559 /* Clear the VLAN filter too. */
559 560
560 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | VGE_CAMADDR_AVSEL); 561 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | VGE_CAMADDR_AVSEL);
561 for (i = 0; i < 8; i++) 562 for (i = 0; i < 8; i++)
562 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 563 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
563 564
564 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 565 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
565 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 566 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
566 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 567 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
567 568
568 sc->sc_camidx = 0; 569 sc->sc_camidx = 0;
569} 570}
570 571
571static int 572static int
572vge_cam_set(struct vge_softc *sc, uint8_t *addr) 573vge_cam_set(struct vge_softc *sc, uint8_t *addr)
573{ 574{
574 int i, error; 575 int i, error;
575 576
576 error = 0; 577 error = 0;
577 578
578 if (sc->sc_camidx == VGE_CAM_MAXADDRS) 579 if (sc->sc_camidx == VGE_CAM_MAXADDRS)
579 return ENOSPC; 580 return ENOSPC;
580 581
581 /* Select the CAM data page. */ 582 /* Select the CAM data page. */
582 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 583 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
583 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 584 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
584 585
585 /* Set the filter entry we want to update and enable writing. */ 586 /* Set the filter entry we want to update and enable writing. */
586 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | sc->sc_camidx); 587 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | sc->sc_camidx);
587 588
588 /* Write the address to the CAM registers */ 589 /* Write the address to the CAM registers */
589 for (i = 0; i < ETHER_ADDR_LEN; i++) 590 for (i = 0; i < ETHER_ADDR_LEN; i++)
590 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 591 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
591 592
592 /* Issue a write command. */ 593 /* Issue a write command. */
593 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 594 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
594 595
595 /* Wake for it to clear. */ 596 /* Wake for it to clear. */
596 for (i = 0; i < VGE_TIMEOUT; i++) { 597 for (i = 0; i < VGE_TIMEOUT; i++) {
597 DELAY(1); 598 DELAY(1);
598 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 599 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
599 break; 600 break;
600 } 601 }
601 602
602 if (i == VGE_TIMEOUT) { 603 if (i == VGE_TIMEOUT) {
603 printf("%s: setting CAM filter failed\n", 604 printf("%s: setting CAM filter failed\n",
604 device_xname(sc->sc_dev)); 605 device_xname(sc->sc_dev));
605 error = EIO; 606 error = EIO;
606 goto fail; 607 goto fail;
607 } 608 }
608 609
609 /* Select the CAM mask page. */ 610 /* Select the CAM mask page. */
610 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 611 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
611 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 612 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
612 613
613 /* Set the mask bit that enables this filter. */ 614 /* Set the mask bit that enables this filter. */
614 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->sc_camidx / 8), 615 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->sc_camidx / 8),
615 1 << (sc->sc_camidx & 7)); 616 1 << (sc->sc_camidx & 7));
616 617
617 sc->sc_camidx++; 618 sc->sc_camidx++;
618 619
619 fail: 620 fail:
620 /* Turn off access to CAM. */ 621 /* Turn off access to CAM. */
621 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 622 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
622 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 623 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
623 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 624 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
624 625
625 return error; 626 return error;
626} 627}
627 628
628/* 629/*
629 * Program the multicast filter. We use the 64-entry CAM filter 630 * Program the multicast filter. We use the 64-entry CAM filter
630 * for perfect filtering. If there's more than 64 multicast addresses, 631 * for perfect filtering. If there's more than 64 multicast addresses,
631 * we use the hash filter instead. 632 * we use the hash filter instead.
632 */ 633 */
633static void 634static void
634vge_setmulti(struct vge_softc *sc) 635vge_setmulti(struct vge_softc *sc)
635{ 636{
636 struct ethercom *ec = &sc->sc_ethercom; 637 struct ethercom *ec = &sc->sc_ethercom;
637 struct ifnet *ifp = &ec->ec_if; 638 struct ifnet *ifp = &ec->ec_if;
638 int error; 639 int error;
639 uint32_t h, hashes[2] = { 0, 0 }; 640 uint32_t h, hashes[2] = { 0, 0 };
640 struct ether_multi *enm; 641 struct ether_multi *enm;
641 struct ether_multistep step; 642 struct ether_multistep step;
642 643
643 error = 0; 644 error = 0;
644 645
645 /* First, zot all the multicast entries. */ 646 /* First, zot all the multicast entries. */
646 vge_cam_clear(sc); 647 vge_cam_clear(sc);
647 CSR_WRITE_4(sc, VGE_MAR0, 0); 648 CSR_WRITE_4(sc, VGE_MAR0, 0);
648 CSR_WRITE_4(sc, VGE_MAR1, 0); 649 CSR_WRITE_4(sc, VGE_MAR1, 0);
649 ifp->if_flags &= ~IFF_ALLMULTI; 650 ifp->if_flags &= ~IFF_ALLMULTI;
650 651
651 /* 652 /*
652 * If the user wants allmulti or promisc mode, enable reception 653 * If the user wants allmulti or promisc mode, enable reception
653 * of all multicast frames. 654 * of all multicast frames.
654 */ 655 */
655 if (ifp->if_flags & IFF_PROMISC) { 656 if (ifp->if_flags & IFF_PROMISC) {
656 allmulti: 657 allmulti:
657 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 658 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
658 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 659 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
659 ifp->if_flags |= IFF_ALLMULTI; 660 ifp->if_flags |= IFF_ALLMULTI;
660 return; 661 return;
661 } 662 }
662 663
663 /* Now program new ones */ 664 /* Now program new ones */
664 ETHER_LOCK(ec); 665 ETHER_LOCK(ec);
665 ETHER_FIRST_MULTI(step, ec, enm); 666 ETHER_FIRST_MULTI(step, ec, enm);
666 while (enm != NULL) { 667 while (enm != NULL) {
667 /* 668 /*
668 * If multicast range, fall back to ALLMULTI. 669 * If multicast range, fall back to ALLMULTI.
669 */ 670 */
670 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 671 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
671 ETHER_ADDR_LEN) != 0) { 672 ETHER_ADDR_LEN) != 0) {
672 ETHER_UNLOCK(ec); 673 ETHER_UNLOCK(ec);
673 goto allmulti; 674 goto allmulti;
674 } 675 }
675 676
676 error = vge_cam_set(sc, enm->enm_addrlo); 677 error = vge_cam_set(sc, enm->enm_addrlo);
677 if (error) 678 if (error)
678 break; 679 break;
679 680
680 ETHER_NEXT_MULTI(step, enm); 681 ETHER_NEXT_MULTI(step, enm);
681 } 682 }
682 ETHER_UNLOCK(ec); 683 ETHER_UNLOCK(ec);
683 684
684 /* If there were too many addresses, use the hash filter. */ 685 /* If there were too many addresses, use the hash filter. */
685 if (error) { 686 if (error) {
686 vge_cam_clear(sc); 687 vge_cam_clear(sc);
687 688
688 ETHER_LOCK(ec); 689 ETHER_LOCK(ec);
689 ETHER_FIRST_MULTI(step, ec, enm); 690 ETHER_FIRST_MULTI(step, ec, enm);
690 while (enm != NULL) { 691 while (enm != NULL) {
691 /* 692 /*
692 * If multicast range, fall back to ALLMULTI. 693 * If multicast range, fall back to ALLMULTI.
693 */ 694 */
694 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 695 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
695 ETHER_ADDR_LEN) != 0) { 696 ETHER_ADDR_LEN) != 0) {
696 ETHER_UNLOCK(ec); 697 ETHER_UNLOCK(ec);
697 goto allmulti; 698 goto allmulti;
698 } 699 }
699 700
700 h = ether_crc32_be(enm->enm_addrlo, 701 h = ether_crc32_be(enm->enm_addrlo,
701 ETHER_ADDR_LEN) >> 26; 702 ETHER_ADDR_LEN) >> 26;
702 hashes[h >> 5] |= 1 << (h & 0x1f); 703 hashes[h >> 5] |= 1 << (h & 0x1f);
703 704
704 ETHER_NEXT_MULTI(step, enm); 705 ETHER_NEXT_MULTI(step, enm);
705 } 706 }
706 ETHER_UNLOCK(ec); 707 ETHER_UNLOCK(ec);
707 708
708 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 709 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
709 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 710 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
710 } 711 }
711} 712}
712 713
713static void 714static void
714vge_reset(struct vge_softc *sc) 715vge_reset(struct vge_softc *sc)
715{ 716{
716 int i; 717 int i;
717 718
718 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 719 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
719 720
720 for (i = 0; i < VGE_TIMEOUT; i++) { 721 for (i = 0; i < VGE_TIMEOUT; i++) {
721 DELAY(5); 722 DELAY(5);
722 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 723 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
723 break; 724 break;
724 } 725 }
725 726
726 if (i == VGE_TIMEOUT) { 727 if (i == VGE_TIMEOUT) {
727 printf("%s: soft reset timed out", device_xname(sc->sc_dev)); 728 printf("%s: soft reset timed out", device_xname(sc->sc_dev));
728 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 729 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
729 DELAY(2000); 730 DELAY(2000);
730 } 731 }
731 732
732 DELAY(5000); 733 DELAY(5000);
733 734
734 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 735 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
735 736
736 for (i = 0; i < VGE_TIMEOUT; i++) { 737 for (i = 0; i < VGE_TIMEOUT; i++) {
737 DELAY(5); 738 DELAY(5);
738 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 739 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
739 break; 740 break;
740 } 741 }
741 742
742 if (i == VGE_TIMEOUT) { 743 if (i == VGE_TIMEOUT) {
743 printf("%s: EEPROM reload timed out\n", 744 printf("%s: EEPROM reload timed out\n",
744 device_xname(sc->sc_dev)); 745 device_xname(sc->sc_dev));
745 return; 746 return;
746 } 747 }
747 748
748 /* 749 /*
749 * On some machine, the first read data from EEPROM could be 750 * On some machine, the first read data from EEPROM could be
750 * messed up, so read one dummy data here to avoid the mess. 751 * messed up, so read one dummy data here to avoid the mess.
751 */ 752 */
752 (void)vge_read_eeprom(sc, 0); 753 (void)vge_read_eeprom(sc, 0);
753 754
754 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 755 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
755} 756}
756 757
757/* 758/*
758 * Probe for a VIA gigabit chip. Check the PCI vendor and device 759 * Probe for a VIA gigabit chip. Check the PCI vendor and device
759 * IDs against our list and return a device name if we find a match. 760 * IDs against our list and return a device name if we find a match.
760 */ 761 */
761static int 762static int
762vge_match(device_t parent, cfdata_t match, void *aux) 763vge_match(device_t parent, cfdata_t match, void *aux)
763{ 764{
764 struct pci_attach_args *pa = aux; 765 struct pci_attach_args *pa = aux;
765 766
766 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH 767 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
767 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X) 768 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
768 return 1; 769 return 1;
769 770
770 return 0; 771 return 0;
771} 772}
772 773
773static int 774static int
774vge_allocmem(struct vge_softc *sc) 775vge_allocmem(struct vge_softc *sc)
775{ 776{
776 int error; 777 int error;
777 int nseg; 778 int nseg;
778 int i; 779 int i;
779 bus_dma_segment_t seg; 780 bus_dma_segment_t seg;
780 781
781 /* 782 /*
782 * Allocate memory for control data. 783 * Allocate memory for control data.
783 */ 784 */
784 785
785 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vge_control_data), 786 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vge_control_data),
786 VGE_RING_ALIGN, 0, &seg, 1, &nseg, BUS_DMA_NOWAIT); 787 VGE_RING_ALIGN, 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
787 if (error) { 788 if (error) {
788 aprint_error_dev(sc->sc_dev, 789 aprint_error_dev(sc->sc_dev,
789 "could not allocate control data dma memory\n"); 790 "could not allocate control data dma memory\n");
790 goto fail_1; 791 goto fail_1;
791 } 792 }
792 793
793 /* Map the memory to kernel VA space */ 794 /* Map the memory to kernel VA space */
794 795
795 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 796 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
796 sizeof(struct vge_control_data), (void **)&sc->sc_control_data, 797 sizeof(struct vge_control_data), (void **)&sc->sc_control_data,
797 BUS_DMA_NOWAIT); 798 BUS_DMA_NOWAIT);
798 if (error) { 799 if (error) {
799 aprint_error_dev(sc->sc_dev, 800 aprint_error_dev(sc->sc_dev,
800 "could not map control data dma memory\n"); 801 "could not map control data dma memory\n");
801 goto fail_2; 802 goto fail_2;
802 } 803 }
803 memset(sc->sc_control_data, 0, sizeof(struct vge_control_data)); 804 memset(sc->sc_control_data, 0, sizeof(struct vge_control_data));
804 805
805 /* 806 /*
806 * Create map for control data. 807 * Create map for control data.
807 */ 808 */
808 error = bus_dmamap_create(sc->sc_dmat, 809 error = bus_dmamap_create(sc->sc_dmat,
809 sizeof(struct vge_control_data), 1, 810 sizeof(struct vge_control_data), 1,
810 sizeof(struct vge_control_data), 0, BUS_DMA_NOWAIT, 811 sizeof(struct vge_control_data), 0, BUS_DMA_NOWAIT,
811 &sc->sc_cddmamap); 812 &sc->sc_cddmamap);
812 if (error) { 813 if (error) {
813 aprint_error_dev(sc->sc_dev, 814 aprint_error_dev(sc->sc_dev,
814 "could not create control data dmamap\n"); 815 "could not create control data dmamap\n");
815 goto fail_3; 816 goto fail_3;
816 } 817 }
817 818
818 /* Load the map for the control data. */ 819 /* Load the map for the control data. */
819 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 820 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
820 sc->sc_control_data, sizeof(struct vge_control_data), NULL, 821 sc->sc_control_data, sizeof(struct vge_control_data), NULL,
821 BUS_DMA_NOWAIT); 822 BUS_DMA_NOWAIT);
822 if (error) { 823 if (error) {
823 aprint_error_dev(sc->sc_dev, 824 aprint_error_dev(sc->sc_dev,
824 "could not load control data dma memory\n"); 825 "could not load control data dma memory\n");
825 goto fail_4; 826 goto fail_4;
826 } 827 }
827 828
828 /* Create DMA maps for TX buffers */ 829 /* Create DMA maps for TX buffers */
829 830
830 for (i = 0; i < VGE_NTXDESC; i++) { 831 for (i = 0; i < VGE_NTXDESC; i++) {
831 error = bus_dmamap_create(sc->sc_dmat, VGE_TX_MAXLEN, 832 error = bus_dmamap_create(sc->sc_dmat, VGE_TX_MAXLEN,
832 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0, BUS_DMA_NOWAIT, 833 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0, BUS_DMA_NOWAIT,
833 &sc->sc_txsoft[i].txs_dmamap); 834 &sc->sc_txsoft[i].txs_dmamap);
834 if (error) { 835 if (error) {
835 aprint_error_dev(sc->sc_dev, 836 aprint_error_dev(sc->sc_dev,
836 "can't create DMA map for TX descs\n"); 837 "can't create DMA map for TX descs\n");
837 goto fail_5; 838 goto fail_5;
838 } 839 }
839 } 840 }
840 841
841 /* Create DMA maps for RX buffers */ 842 /* Create DMA maps for RX buffers */
842 843
843 for (i = 0; i < VGE_NRXDESC; i++) { 844 for (i = 0; i < VGE_NRXDESC; i++) {
844 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 845 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
845 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 846 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
846 &sc->sc_rxsoft[i].rxs_dmamap); 847 &sc->sc_rxsoft[i].rxs_dmamap);
847 if (error) { 848 if (error) {
848 aprint_error_dev(sc->sc_dev, 849 aprint_error_dev(sc->sc_dev,
849 "can't create DMA map for RX descs\n"); 850 "can't create DMA map for RX descs\n");
850 goto fail_6; 851 goto fail_6;
851 } 852 }
852 sc->sc_rxsoft[i].rxs_mbuf = NULL; 853 sc->sc_rxsoft[i].rxs_mbuf = NULL;
853 } 854 }
854 855
855 return 0; 856 return 0;
856 857
857 fail_6: 858 fail_6:
858 for (i = 0; i < VGE_NRXDESC; i++) { 859 for (i = 0; i < VGE_NRXDESC; i++) {
859 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 860 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
860 bus_dmamap_destroy(sc->sc_dmat, 861 bus_dmamap_destroy(sc->sc_dmat,
861 sc->sc_rxsoft[i].rxs_dmamap); 862 sc->sc_rxsoft[i].rxs_dmamap);
862 } 863 }
863 fail_5: 864 fail_5:
864 for (i = 0; i < VGE_NTXDESC; i++) { 865 for (i = 0; i < VGE_NTXDESC; i++) {
865 if (sc->sc_txsoft[i].txs_dmamap != NULL) 866 if (sc->sc_txsoft[i].txs_dmamap != NULL)
866 bus_dmamap_destroy(sc->sc_dmat, 867 bus_dmamap_destroy(sc->sc_dmat,
867 sc->sc_txsoft[i].txs_dmamap); 868 sc->sc_txsoft[i].txs_dmamap);
868 } 869 }
869 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 870 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
870 fail_4: 871 fail_4:
871 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 872 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
872 fail_3: 873 fail_3:
873 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 874 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
874 sizeof(struct vge_control_data)); 875 sizeof(struct vge_control_data));
875 fail_2: 876 fail_2:
876 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 877 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
877 fail_1: 878 fail_1:
878 return ENOMEM; 879 return ENOMEM;
879} 880}
880 881
881/* 882/*
882 * Attach the interface. Allocate softc structures, do ifmedia 883 * Attach the interface. Allocate softc structures, do ifmedia
883 * setup and ethernet/BPF attach. 884 * setup and ethernet/BPF attach.
884 */ 885 */
885static void 886static void
886vge_attach(device_t parent, device_t self, void *aux) 887vge_attach(device_t parent, device_t self, void *aux)
887{ 888{
888 uint8_t *eaddr; 889 uint8_t *eaddr;
889 struct vge_softc *sc = device_private(self); 890 struct vge_softc *sc = device_private(self);
890 struct ifnet *ifp; 891 struct ifnet *ifp;
891 struct mii_data * const mii = &sc->sc_mii; 892 struct mii_data * const mii = &sc->sc_mii;
892 struct pci_attach_args *pa = aux; 893 struct pci_attach_args *pa = aux;
893 pci_chipset_tag_t pc = pa->pa_pc; 894 pci_chipset_tag_t pc = pa->pa_pc;
894 const char *intrstr; 895 const char *intrstr;
895 pci_intr_handle_t ih; 896 pci_intr_handle_t ih;
896 uint16_t val; 897 uint16_t val;
897 char intrbuf[PCI_INTRSTR_LEN]; 898 char intrbuf[PCI_INTRSTR_LEN];
898 899
899 sc->sc_dev = self; 900 sc->sc_dev = self;
900 901
901 pci_aprint_devinfo_fancy(pa, NULL, "VIA VT612X Gigabit Ethernet", 1); 902 pci_aprint_devinfo_fancy(pa, NULL, "VIA VT612X Gigabit Ethernet", 1);
902 903
903 /* Make sure bus-mastering is enabled */ 904 /* Make sure bus-mastering is enabled */
904 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 905 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
905 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 906 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
906 PCI_COMMAND_MASTER_ENABLE); 907 PCI_COMMAND_MASTER_ENABLE);
907 908
908 /* 909 /*
909 * Map control/status registers. 910 * Map control/status registers.
910 */ 911 */
911 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 912 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
912 &sc->sc_bst, &sc->sc_bsh, NULL, NULL) != 0) { 913 &sc->sc_bst, &sc->sc_bsh, NULL, NULL) != 0) {
913 aprint_error_dev(self, "couldn't map memory\n"); 914 aprint_error_dev(self, "couldn't map memory\n");
914 return; 915 return;
915 } 916 }
916 917
917 /* 918 /*
918 * Map and establish our interrupt. 919 * Map and establish our interrupt.
919 */ 920 */
920 if (pci_intr_map(pa, &ih)) { 921 if (pci_intr_map(pa, &ih)) {
921 aprint_error_dev(self, "unable to map interrupt\n"); 922 aprint_error_dev(self, "unable to map interrupt\n");
922 return; 923 return;
923 } 924 }
924 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 925 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
925 sc->sc_intrhand = pci_intr_establish_xname(pc, ih, IPL_NET, vge_intr, 926 sc->sc_intrhand = pci_intr_establish_xname(pc, ih, IPL_NET, vge_intr,
926 sc, device_xname(self)); 927 sc, device_xname(self));
927 if (sc->sc_intrhand == NULL) { 928 if (sc->sc_intrhand == NULL) {
928 aprint_error_dev(self, "unable to establish interrupt"); 929 aprint_error_dev(self, "unable to establish interrupt");
929 if (intrstr != NULL) 930 if (intrstr != NULL)
930 aprint_error(" at %s", intrstr); 931 aprint_error(" at %s", intrstr);
931 aprint_error("\n"); 932 aprint_error("\n");
932 return; 933 return;
933 } 934 }
934 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 935 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
935 936
936 /* Reset the adapter. */ 937 /* Reset the adapter. */
937 vge_reset(sc); 938 vge_reset(sc);
938 939
939 /* 940 /*
940 * Get station address from the EEPROM. 941 * Get station address from the EEPROM.
941 */ 942 */
942 eaddr = sc->sc_eaddr; 943 eaddr = sc->sc_eaddr;
943 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0); 944 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0);
944 eaddr[0] = val & 0xff; 945 eaddr[0] = val & 0xff;
945 eaddr[1] = val >> 8; 946 eaddr[1] = val >> 8;
946 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1); 947 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1);
947 eaddr[2] = val & 0xff; 948 eaddr[2] = val & 0xff;
948 eaddr[3] = val >> 8; 949 eaddr[3] = val >> 8;
949 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2); 950 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2);
950 eaddr[4] = val & 0xff; 951 eaddr[4] = val & 0xff;
951 eaddr[5] = val >> 8; 952 eaddr[5] = val >> 8;
952 953
953 aprint_normal_dev(self, "Ethernet address %s\n", 954 aprint_normal_dev(self, "Ethernet address %s\n",
954 ether_sprintf(eaddr)); 955 ether_sprintf(eaddr));
955 956
 957 /* Clear WOL and take hardware from powerdown. */
 958 vge_clrwol(sc);
 959
956 /* 960 /*
957 * Use the 32bit tag. Hardware supports 48bit physical addresses, 961 * Use the 32bit tag. Hardware supports 48bit physical addresses,
958 * but we don't use that for now. 962 * but we don't use that for now.
959 */ 963 */
960 sc->sc_dmat = pa->pa_dmat; 964 sc->sc_dmat = pa->pa_dmat;
961 965
962 if (vge_allocmem(sc) != 0) 966 if (vge_allocmem(sc) != 0)
963 return; 967 return;
964 968
965 ifp = &sc->sc_ethercom.ec_if; 969 ifp = &sc->sc_ethercom.ec_if;
966 ifp->if_softc = sc; 970 ifp->if_softc = sc;
967 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 971 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
968 ifp->if_mtu = ETHERMTU; 972 ifp->if_mtu = ETHERMTU;
969 ifp->if_baudrate = IF_Gbps(1); 973 ifp->if_baudrate = IF_Gbps(1);
970 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 974 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
971 ifp->if_ioctl = vge_ioctl; 975 ifp->if_ioctl = vge_ioctl;
972 ifp->if_start = vge_start; 976 ifp->if_start = vge_start;
973 ifp->if_init = vge_init; 977 ifp->if_init = vge_init;
974 ifp->if_stop = vge_stop; 978 ifp->if_stop = vge_stop;
975 979
976 /* 980 /*
977 * We can support 802.1Q VLAN-sized frames and jumbo 981 * We can support 802.1Q VLAN-sized frames and jumbo
978 * Ethernet frames. 982 * Ethernet frames.
979 */ 983 */
980 sc->sc_ethercom.ec_capabilities |= 984 sc->sc_ethercom.ec_capabilities |=
981 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU | 985 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
982 ETHERCAP_VLAN_HWTAGGING; 986 ETHERCAP_VLAN_HWTAGGING;
983 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 987 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
984 988
985 /* 989 /*
986 * We can do IPv4/TCPv4/UDPv4 checksums in hardware. 990 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
987 */ 991 */
988 ifp->if_capabilities |= 992 ifp->if_capabilities |=
989 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 993 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
990 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 994 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
991 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 995 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
992 996
993#ifdef DEVICE_POLLING 997#ifdef DEVICE_POLLING
994#ifdef IFCAP_POLLING 998#ifdef IFCAP_POLLING
995 ifp->if_capabilities |= IFCAP_POLLING; 999 ifp->if_capabilities |= IFCAP_POLLING;
996#endif 1000#endif
997#endif 1001#endif
998 ifp->if_watchdog = vge_watchdog; 1002 ifp->if_watchdog = vge_watchdog;
999 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(VGE_IFQ_MAXLEN, IFQ_MAXLEN)); 1003 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
1000 IFQ_SET_READY(&ifp->if_snd); 1004 IFQ_SET_READY(&ifp->if_snd);
1001 1005
1002 /* 1006 /*
1003 * Initialize our media structures and probe the MII. 1007 * Initialize our media structures and probe the MII.
1004 */ 1008 */
1005 mii->mii_ifp = ifp; 1009 mii->mii_ifp = ifp;
1006 mii->mii_readreg = vge_miibus_readreg; 1010 mii->mii_readreg = vge_miibus_readreg;
1007 mii->mii_writereg = vge_miibus_writereg; 1011 mii->mii_writereg = vge_miibus_writereg;
1008 mii->mii_statchg = vge_miibus_statchg; 1012 mii->mii_statchg = vge_miibus_statchg;
1009 1013
1010 sc->sc_ethercom.ec_mii = mii; 1014 sc->sc_ethercom.ec_mii = mii;
1011 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 1015 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
1012 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 1016 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
1013 MII_OFFSET_ANY, MIIF_DOPAUSE); 1017 MII_OFFSET_ANY, MIIF_DOPAUSE);
1014 if (LIST_FIRST(&mii->mii_phys) == NULL) { 1018 if (LIST_FIRST(&mii->mii_phys) == NULL) {
1015 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 1019 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
1016 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 1020 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
1017 } else 1021 } else
1018 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1022 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1019 1023
1020 /* 1024 /*
1021 * Attach the interface. 1025 * Attach the interface.
1022 */ 1026 */
1023 if_attach(ifp); 1027 if_attach(ifp);
1024 if_deferred_start_init(ifp, NULL); 1028 if_deferred_start_init(ifp, NULL);
1025 ether_ifattach(ifp, eaddr); 1029 ether_ifattach(ifp, eaddr);
1026 ether_set_ifflags_cb(&sc->sc_ethercom, vge_ifflags_cb); 1030 ether_set_ifflags_cb(&sc->sc_ethercom, vge_ifflags_cb);
1027 1031
1028 callout_init(&sc->sc_timeout, 0); 1032 callout_init(&sc->sc_timeout, 0);
1029 callout_setfunc(&sc->sc_timeout, vge_tick, sc); 1033 callout_setfunc(&sc->sc_timeout, vge_tick, sc);
1030 1034
1031 /* 1035 /*
1032 * Make sure the interface is shutdown during reboot. 1036 * Make sure the interface is shutdown during reboot.
1033 */ 1037 */
1034 if (pmf_device_register1(self, NULL, NULL, vge_shutdown)) 1038 if (pmf_device_register1(self, NULL, NULL, vge_shutdown))
1035 pmf_class_network_register(self, ifp); 1039 pmf_class_network_register(self, ifp);
1036 else 1040 else
1037 aprint_error_dev(self, "couldn't establish power handler\n"); 1041 aprint_error_dev(self, "couldn't establish power handler\n");
1038} 1042}
1039 1043
1040static int 1044static int
1041vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 1045vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
1042{ 1046{
1043 struct mbuf *m_new; 1047 struct mbuf *m_new;
1044 struct vge_rxdesc *rxd; 1048 struct vge_rxdesc *rxd;
1045 struct vge_rxsoft *rxs; 1049 struct vge_rxsoft *rxs;
1046 bus_dmamap_t map; 1050 bus_dmamap_t map;
1047 int i; 1051 int i;
1048#ifdef DIAGNOSTIC 1052#ifdef DIAGNOSTIC
1049 uint32_t rd_sts; 1053 uint32_t rd_sts;
1050#endif 1054#endif
1051 1055
1052 m_new = NULL; 1056 m_new = NULL;
1053 if (m == NULL) { 1057 if (m == NULL) {
1054 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1058 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1055 if (m_new == NULL) 1059 if (m_new == NULL)
1056 return ENOBUFS; 1060 return ENOBUFS;
1057 1061
1058 MCLGET(m_new, M_DONTWAIT); 1062 MCLGET(m_new, M_DONTWAIT);
1059 if ((m_new->m_flags & M_EXT) == 0) { 1063 if ((m_new->m_flags & M_EXT) == 0) {
1060 m_freem(m_new); 1064 m_freem(m_new);
1061 return ENOBUFS; 1065 return ENOBUFS;
1062 } 1066 }
1063 1067
1064 m = m_new; 1068 m = m_new;
1065 } else 1069 } else
1066 m->m_data = m->m_ext.ext_buf; 1070 m->m_data = m->m_ext.ext_buf;
1067 1071
1068 1072
1069 /* 1073 /*
1070 * This is part of an evil trick to deal with non-x86 platforms. 1074 * This is part of an evil trick to deal with non-x86 platforms.
1071 * The VIA chip requires RX buffers to be aligned on 32-bit 1075 * The VIA chip requires RX buffers to be aligned on 32-bit
1072 * boundaries, but that will hose non-x86 machines. To get around 1076 * boundaries, but that will hose non-x86 machines. To get around
1073 * this, we leave some empty space at the start of each buffer 1077 * this, we leave some empty space at the start of each buffer
1074 * and for non-x86 hosts, we copy the buffer back two bytes 1078 * and for non-x86 hosts, we copy the buffer back two bytes
1075 * to achieve word alignment. This is slightly more efficient 1079 * to achieve word alignment. This is slightly more efficient
1076 * than allocating a new buffer, copying the contents, and 1080 * than allocating a new buffer, copying the contents, and
1077 * discarding the old buffer. 1081 * discarding the old buffer.
1078 */ 1082 */
1079 m->m_len = m->m_pkthdr.len = VGE_RX_BUFSIZE; 1083 m->m_len = m->m_pkthdr.len = VGE_RX_BUFSIZE;
1080#ifndef __NO_STRICT_ALIGNMENT 1084#ifndef __NO_STRICT_ALIGNMENT
1081 m->m_data += VGE_RX_PAD; 1085 m->m_data += VGE_RX_PAD;
1082#endif 1086#endif
1083 rxs = &sc->sc_rxsoft[idx]; 1087 rxs = &sc->sc_rxsoft[idx];
1084 map = rxs->rxs_dmamap; 1088 map = rxs->rxs_dmamap;
1085 1089
1086 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) 1090 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0)
1087 goto out; 1091 goto out;
1088 1092
1089 rxd = &sc->sc_rxdescs[idx]; 1093 rxd = &sc->sc_rxdescs[idx];
1090 1094
1091#ifdef DIAGNOSTIC 1095#ifdef DIAGNOSTIC
1092 /* If this descriptor is still owned by the chip, bail. */ 1096 /* If this descriptor is still owned by the chip, bail. */
1093 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1097 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1094 rd_sts = le32toh(rxd->rd_sts); 1098 rd_sts = le32toh(rxd->rd_sts);
1095 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1099 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1096 if (rd_sts & VGE_RDSTS_OWN) { 1100 if (rd_sts & VGE_RDSTS_OWN) {
1097 panic("%s: tried to map busy RX descriptor", 1101 panic("%s: tried to map busy RX descriptor",
1098 device_xname(sc->sc_dev)); 1102 device_xname(sc->sc_dev));
1099 } 1103 }
1100#endif 1104#endif
1101 1105
1102 rxs->rxs_mbuf = m; 1106 rxs->rxs_mbuf = m;
1103 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1107 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1104 BUS_DMASYNC_PREREAD); 1108 BUS_DMASYNC_PREREAD);
1105 1109
1106 rxd->rd_buflen = 1110 rxd->rd_buflen =
1107 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I); 1111 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I);
1108 vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr); 1112 vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr);
1109 rxd->rd_sts = 0; 1113 rxd->rd_sts = 0;
1110 rxd->rd_ctl = 0; 1114 rxd->rd_ctl = 0;
1111 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1115 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1112 1116
1113 /* 1117 /*
1114 * Note: the manual fails to document the fact that for 1118 * Note: the manual fails to document the fact that for
1115 * proper opration, the driver needs to replentish the RX 1119 * proper opration, the driver needs to replentish the RX
1116 * DMA ring 4 descriptors at a time (rather than one at a 1120 * DMA ring 4 descriptors at a time (rather than one at a
1117 * time, like most chips). We can allocate the new buffers 1121 * time, like most chips). We can allocate the new buffers
1118 * but we should not set the OWN bits until we're ready 1122 * but we should not set the OWN bits until we're ready
1119 * to hand back 4 of them in one shot. 1123 * to hand back 4 of them in one shot.
1120 */ 1124 */
1121 1125
1122#define VGE_RXCHUNK 4 1126#define VGE_RXCHUNK 4
1123 sc->sc_rx_consumed++; 1127 sc->sc_rx_consumed++;
1124 if (sc->sc_rx_consumed == VGE_RXCHUNK) { 1128 if (sc->sc_rx_consumed == VGE_RXCHUNK) {
1125 for (i = idx; i != idx - VGE_RXCHUNK; i--) { 1129 for (i = idx; i != idx - VGE_RXCHUNK; i--) {
1126 KASSERT(i >= 0); 1130 KASSERT(i >= 0);
1127 sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN); 1131 sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN);
1128 VGE_RXDESCSYNC(sc, i, 1132 VGE_RXDESCSYNC(sc, i,
1129 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1133 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1130 } 1134 }
1131 sc->sc_rx_consumed = 0; 1135 sc->sc_rx_consumed = 0;
1132 } 1136 }
1133 1137
1134 return 0; 1138 return 0;
1135 out: 1139 out:
1136 if (m_new != NULL) 1140 if (m_new != NULL)
1137 m_freem(m_new); 1141 m_freem(m_new);
1138 return ENOMEM; 1142 return ENOMEM;
1139} 1143}
1140 1144
1141#ifndef __NO_STRICT_ALIGNMENT 1145#ifndef __NO_STRICT_ALIGNMENT
1142static inline void 1146static inline void
1143vge_fixup_rx(struct mbuf *m) 1147vge_fixup_rx(struct mbuf *m)
1144{ 1148{
1145 int i; 1149 int i;
1146 uint16_t *src, *dst; 1150 uint16_t *src, *dst;
1147 1151
1148 src = mtod(m, uint16_t *); 1152 src = mtod(m, uint16_t *);
1149 dst = src - 1; 1153 dst = src - 1;
1150 1154
1151 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1155 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1152 *dst++ = *src++; 1156 *dst++ = *src++;
1153 1157
1154 m->m_data -= ETHER_ALIGN; 1158 m->m_data -= ETHER_ALIGN;
1155} 1159}
1156#endif 1160#endif
1157 1161
1158/* 1162/*
1159 * RX handler. We support the reception of jumbo frames that have 1163 * RX handler. We support the reception of jumbo frames that have
1160 * been fragmented across multiple 2K mbuf cluster buffers. 1164 * been fragmented across multiple 2K mbuf cluster buffers.
1161 */ 1165 */
1162static void 1166static void
1163vge_rxeof(struct vge_softc *sc) 1167vge_rxeof(struct vge_softc *sc)
1164{ 1168{
1165 struct mbuf *m; 1169 struct mbuf *m;
1166 struct ifnet *ifp; 1170 struct ifnet *ifp;
1167 int idx, total_len, lim; 1171 int idx, total_len, lim;
1168 struct vge_rxdesc *cur_rxd; 1172 struct vge_rxdesc *cur_rxd;
1169 struct vge_rxsoft *rxs; 1173 struct vge_rxsoft *rxs;
1170 uint32_t rxstat, rxctl; 1174 uint32_t rxstat, rxctl;
1171 1175
1172 ifp = &sc->sc_ethercom.ec_if; 1176 ifp = &sc->sc_ethercom.ec_if;
1173 lim = 0; 1177 lim = 0;
1174 1178
1175 /* Invalidate the descriptor memory */ 1179 /* Invalidate the descriptor memory */
1176 1180
1177 for (idx = sc->sc_rx_prodidx;; idx = VGE_NEXT_RXDESC(idx)) { 1181 for (idx = sc->sc_rx_prodidx;; idx = VGE_NEXT_RXDESC(idx)) {
1178 cur_rxd = &sc->sc_rxdescs[idx]; 1182 cur_rxd = &sc->sc_rxdescs[idx];
1179 1183
1180 VGE_RXDESCSYNC(sc, idx, 1184 VGE_RXDESCSYNC(sc, idx,
1181 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1185 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1182 rxstat = le32toh(cur_rxd->rd_sts); 1186 rxstat = le32toh(cur_rxd->rd_sts);
1183 if ((rxstat & VGE_RDSTS_OWN) != 0) { 1187 if ((rxstat & VGE_RDSTS_OWN) != 0) {
1184 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1188 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1185 break; 1189 break;
1186 } 1190 }
1187 1191
1188 rxctl = le32toh(cur_rxd->rd_ctl); 1192 rxctl = le32toh(cur_rxd->rd_ctl);
1189 rxs = &sc->sc_rxsoft[idx]; 1193 rxs = &sc->sc_rxsoft[idx];
1190 m = rxs->rxs_mbuf; 1194 m = rxs->rxs_mbuf;
1191 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16; 1195 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16;
1192 1196
1193 /* Invalidate the RX mbuf and unload its map */ 1197 /* Invalidate the RX mbuf and unload its map */
1194 1198
1195 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 1199 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap,
1196 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1200 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1197 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1201 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1198 1202
1199 /* 1203 /*
1200 * If the 'start of frame' bit is set, this indicates 1204 * If the 'start of frame' bit is set, this indicates
1201 * either the first fragment in a multi-fragment receive, 1205 * either the first fragment in a multi-fragment receive,
1202 * or an intermediate fragment. Either way, we want to 1206 * or an intermediate fragment. Either way, we want to
1203 * accumulate the buffers. 1207 * accumulate the buffers.
1204 */ 1208 */
1205 if (rxstat & VGE_RXPKT_SOF) { 1209 if (rxstat & VGE_RXPKT_SOF) {
1206 m->m_len = VGE_RX_BUFSIZE; 1210 m->m_len = VGE_RX_BUFSIZE;
1207 if (sc->sc_rx_mhead == NULL) 1211 if (sc->sc_rx_mhead == NULL)
1208 sc->sc_rx_mhead = sc->sc_rx_mtail = m; 1212 sc->sc_rx_mhead = sc->sc_rx_mtail = m;
1209 else { 1213 else {
1210 m->m_flags &= ~M_PKTHDR; 1214 m->m_flags &= ~M_PKTHDR;
1211 sc->sc_rx_mtail->m_next = m; 1215 sc->sc_rx_mtail->m_next = m;
1212 sc->sc_rx_mtail = m; 1216 sc->sc_rx_mtail = m;
1213 } 1217 }
1214 vge_newbuf(sc, idx, NULL); 1218 vge_newbuf(sc, idx, NULL);
1215 continue; 1219 continue;
1216 } 1220 }
1217 1221
1218 /* 1222 /*
1219 * Bad/error frames will have the RXOK bit cleared. 1223 * Bad/error frames will have the RXOK bit cleared.
1220 * However, there's one error case we want to allow: 1224 * However, there's one error case we want to allow:
1221 * if a VLAN tagged frame arrives and the chip can't 1225 * if a VLAN tagged frame arrives and the chip can't
1222 * match it against the CAM filter, it considers this 1226 * match it against the CAM filter, it considers this
1223 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1227 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1224 * We don't want to drop the frame though: our VLAN 1228 * We don't want to drop the frame though: our VLAN
1225 * filtering is done in software. 1229 * filtering is done in software.
1226 */ 1230 */
1227 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1231 if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1228 (rxstat & VGE_RDSTS_VIDM) == 0 && 1232 (rxstat & VGE_RDSTS_VIDM) == 0 &&
1229 (rxstat & VGE_RDSTS_CSUMERR) == 0) { 1233 (rxstat & VGE_RDSTS_CSUMERR) == 0) {
1230 ifp->if_ierrors++; 1234 ifp->if_ierrors++;
1231 /* 1235 /*
1232 * If this is part of a multi-fragment packet, 1236 * If this is part of a multi-fragment packet,
1233 * discard all the pieces. 1237 * discard all the pieces.
1234 */ 1238 */
1235 if (sc->sc_rx_mhead != NULL) { 1239 if (sc->sc_rx_mhead != NULL) {
1236 m_freem(sc->sc_rx_mhead); 1240 m_freem(sc->sc_rx_mhead);
1237 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1241 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1238 } 1242 }
1239 vge_newbuf(sc, idx, m); 1243 vge_newbuf(sc, idx, m);
1240 continue; 1244 continue;
1241 } 1245 }
1242 1246
1243 /* 1247 /*
1244 * If allocating a replacement mbuf fails, 1248 * If allocating a replacement mbuf fails,
1245 * reload the current one. 1249 * reload the current one.
1246 */ 1250 */
1247 1251
1248 if (vge_newbuf(sc, idx, NULL)) { 1252 if (vge_newbuf(sc, idx, NULL)) {
1249 ifp->if_ierrors++; 1253 ifp->if_ierrors++;
1250 if (sc->sc_rx_mhead != NULL) { 1254 if (sc->sc_rx_mhead != NULL) {
1251 m_freem(sc->sc_rx_mhead); 1255 m_freem(sc->sc_rx_mhead);
1252 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1256 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1253 } 1257 }
1254 vge_newbuf(sc, idx, m); 1258 vge_newbuf(sc, idx, m);
1255 continue; 1259 continue;
1256 } 1260 }
1257 1261
1258 if (sc->sc_rx_mhead != NULL) { 1262 if (sc->sc_rx_mhead != NULL) {
1259 m->m_len = total_len % VGE_RX_BUFSIZE; 1263 m->m_len = total_len % VGE_RX_BUFSIZE;
1260 /* 1264 /*
1261 * Special case: if there's 4 bytes or less 1265 * Special case: if there's 4 bytes or less
1262 * in this buffer, the mbuf can be discarded: 1266 * in this buffer, the mbuf can be discarded:
1263 * the last 4 bytes is the CRC, which we don't 1267 * the last 4 bytes is the CRC, which we don't
1264 * care about anyway. 1268 * care about anyway.
1265 */ 1269 */
1266 if (m->m_len <= ETHER_CRC_LEN) { 1270 if (m->m_len <= ETHER_CRC_LEN) {
1267 sc->sc_rx_mtail->m_len -= 1271 sc->sc_rx_mtail->m_len -=
1268 (ETHER_CRC_LEN - m->m_len); 1272 (ETHER_CRC_LEN - m->m_len);
1269 m_freem(m); 1273 m_freem(m);
1270 } else { 1274 } else {
1271 m->m_len -= ETHER_CRC_LEN; 1275 m->m_len -= ETHER_CRC_LEN;
1272 m->m_flags &= ~M_PKTHDR; 1276 m->m_flags &= ~M_PKTHDR;
1273 sc->sc_rx_mtail->m_next = m; 1277 sc->sc_rx_mtail->m_next = m;
1274 } 1278 }
1275 m = sc->sc_rx_mhead; 1279 m = sc->sc_rx_mhead;
1276 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1280 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1277 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1281 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1278 } else 1282 } else
1279 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1283 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1280 1284
1281#ifndef __NO_STRICT_ALIGNMENT 1285#ifndef __NO_STRICT_ALIGNMENT
1282 vge_fixup_rx(m); 1286 vge_fixup_rx(m);
1283#endif 1287#endif
1284 m_set_rcvif(m, ifp); 1288 m_set_rcvif(m, ifp);
1285 1289
1286 /* Do RX checksumming if enabled */ 1290 /* Do RX checksumming if enabled */
1287 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 1291 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
1288 1292
1289 /* Check IP header checksum */ 1293 /* Check IP header checksum */
1290 if (rxctl & VGE_RDCTL_IPPKT) 1294 if (rxctl & VGE_RDCTL_IPPKT)
1291 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1295 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1292 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0) 1296 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
1293 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1297 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1294 } 1298 }
1295 1299
1296 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) { 1300 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1297 /* Check UDP checksum */ 1301 /* Check UDP checksum */
1298 if (rxctl & VGE_RDCTL_TCPPKT) 1302 if (rxctl & VGE_RDCTL_TCPPKT)
1299 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1303 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1300 1304
1301 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) 1305 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1302 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1306 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1303 } 1307 }
1304 1308
1305 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) { 1309 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
1306 /* Check UDP checksum */ 1310 /* Check UDP checksum */
1307 if (rxctl & VGE_RDCTL_UDPPKT) 1311 if (rxctl & VGE_RDCTL_UDPPKT)
1308 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1312 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1309 1313
1310 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) 1314 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1311 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1315 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1312 } 1316 }
1313 1317
1314 if (rxstat & VGE_RDSTS_VTAG) { 1318 if (rxstat & VGE_RDSTS_VTAG) {
1315 /* 1319 /*
1316 * We use bswap16() here because: 1320 * We use bswap16() here because:
1317 * On LE machines, tag is stored in BE as stream data. 1321 * On LE machines, tag is stored in BE as stream data.
1318 * On BE machines, tag is stored in BE as stream data 1322 * On BE machines, tag is stored in BE as stream data
1319 * but it was already swapped by le32toh() above. 1323 * but it was already swapped by le32toh() above.
1320 */ 1324 */
1321 vlan_set_tag(m, bswap16(rxctl & VGE_RDCTL_VLANID)); 1325 vlan_set_tag(m, bswap16(rxctl & VGE_RDCTL_VLANID));
1322 } 1326 }
1323 1327
1324 if_percpuq_enqueue(ifp->if_percpuq, m); 1328 if_percpuq_enqueue(ifp->if_percpuq, m);
1325 1329
1326 lim++; 1330 lim++;
1327 if (lim == VGE_NRXDESC) 1331 if (lim == VGE_NRXDESC)
1328 break; 1332 break;
1329 } 1333 }
1330 1334
1331 sc->sc_rx_prodidx = idx; 1335 sc->sc_rx_prodidx = idx;
1332 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1336 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1333} 1337}
1334 1338
1335static void 1339static void
1336vge_txeof(struct vge_softc *sc) 1340vge_txeof(struct vge_softc *sc)
1337{ 1341{
1338 struct ifnet *ifp; 1342 struct ifnet *ifp;
1339 struct vge_txsoft *txs; 1343 struct vge_txsoft *txs;
1340 uint32_t txstat; 1344 uint32_t txstat;
1341 int idx; 1345 int idx;
1342 1346
1343 ifp = &sc->sc_ethercom.ec_if; 1347 ifp = &sc->sc_ethercom.ec_if;
1344 1348
1345 for (idx = sc->sc_tx_considx; 1349 for (idx = sc->sc_tx_considx;
1346 sc->sc_tx_free < VGE_NTXDESC; 1350 sc->sc_tx_free < VGE_NTXDESC;
1347 idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) { 1351 idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) {
1348 VGE_TXDESCSYNC(sc, idx, 1352 VGE_TXDESCSYNC(sc, idx,
1349 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1353 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1350 txstat = le32toh(sc->sc_txdescs[idx].td_sts); 1354 txstat = le32toh(sc->sc_txdescs[idx].td_sts);
1351 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1355 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1352 if (txstat & VGE_TDSTS_OWN) { 1356 if (txstat & VGE_TDSTS_OWN) {
1353 break; 1357 break;
1354 } 1358 }
1355 1359
1356 txs = &sc->sc_txsoft[idx]; 1360 txs = &sc->sc_txsoft[idx];
1357 m_freem(txs->txs_mbuf); 1361 m_freem(txs->txs_mbuf);
1358 txs->txs_mbuf = NULL; 1362 txs->txs_mbuf = NULL;
1359 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0, 1363 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0,
1360 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1364 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1361 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1365 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1362 if (txstat & (VGE_TDSTS_EXCESSCOLL | VGE_TDSTS_COLL)) 1366 if (txstat & (VGE_TDSTS_EXCESSCOLL | VGE_TDSTS_COLL))
1363 ifp->if_collisions++; 1367 ifp->if_collisions++;
1364 if (txstat & VGE_TDSTS_TXERR) 1368 if (txstat & VGE_TDSTS_TXERR)
1365 ifp->if_oerrors++; 1369 ifp->if_oerrors++;
1366 else 1370 else
1367 ifp->if_opackets++; 1371 ifp->if_opackets++;
1368 } 1372 }
1369 1373
1370 sc->sc_tx_considx = idx; 1374 sc->sc_tx_considx = idx;
1371 1375
1372 if (sc->sc_tx_free > 0) { 1376 if (sc->sc_tx_free > 0) {
1373 ifp->if_flags &= ~IFF_OACTIVE; 1377 ifp->if_flags &= ~IFF_OACTIVE;
1374 } 1378 }
1375 1379
1376 /* 1380 /*
1377 * If not all descriptors have been released reaped yet, 1381 * If not all descriptors have been released reaped yet,
1378 * reload the timer so that we will eventually get another 1382 * reload the timer so that we will eventually get another
1379 * interrupt that will cause us to re-enter this routine. 1383 * interrupt that will cause us to re-enter this routine.
1380 * This is done in case the transmitter has gone idle. 1384 * This is done in case the transmitter has gone idle.
1381 */ 1385 */
1382 if (sc->sc_tx_free < VGE_NTXDESC) 1386 if (sc->sc_tx_free < VGE_NTXDESC)
1383 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1387 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1384 else 1388 else
1385 ifp->if_timer = 0; 1389 ifp->if_timer = 0;
1386} 1390}
1387 1391
1388static void 1392static void
1389vge_tick(void *arg) 1393vge_tick(void *arg)
1390{ 1394{
1391 struct vge_softc *sc; 1395 struct vge_softc *sc;
1392 struct ifnet *ifp; 1396 struct ifnet *ifp;
1393 struct mii_data *mii; 1397 struct mii_data *mii;
1394 int s; 1398 int s;
1395 1399
1396 sc = arg; 1400 sc = arg;
1397 ifp = &sc->sc_ethercom.ec_if; 1401 ifp = &sc->sc_ethercom.ec_if;
1398 mii = &sc->sc_mii; 1402 mii = &sc->sc_mii;
1399 1403
1400 s = splnet(); 1404 s = splnet();
1401 1405
1402 callout_schedule(&sc->sc_timeout, hz); 1406 callout_schedule(&sc->sc_timeout, hz);
1403 1407
1404 mii_tick(mii); 1408 mii_tick(mii);
1405 if (sc->sc_link) { 1409 if (sc->sc_link) {
1406 if ((mii->mii_media_status & IFM_ACTIVE) == 0) 1410 if ((mii->mii_media_status & IFM_ACTIVE) == 0)
1407 sc->sc_link = 0; 1411 sc->sc_link = 0;
1408 } else { 1412 } else {
1409 if (mii->mii_media_status & IFM_ACTIVE && 1413 if (mii->mii_media_status & IFM_ACTIVE &&
1410 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1414 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1411 sc->sc_link = 1; 1415 sc->sc_link = 1;
1412 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1416 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1413 vge_start(ifp); 1417 vge_start(ifp);
1414 } 1418 }
1415 } 1419 }
1416 1420
1417 splx(s); 1421 splx(s);
1418} 1422}
1419 1423
1420static int 1424static int
1421vge_intr(void *arg) 1425vge_intr(void *arg)
1422{ 1426{
1423 struct vge_softc *sc; 1427 struct vge_softc *sc;
1424 struct ifnet *ifp; 1428 struct ifnet *ifp;
1425 uint32_t status; 1429 uint32_t status;
1426 int claim; 1430 int claim;
1427 1431
1428 sc = arg; 1432 sc = arg;
1429 claim = 0; 1433 claim = 0;
1430 if (sc->sc_suspended) { 1434 if (sc->sc_suspended) {
1431 return claim; 1435 return claim;
1432 } 1436 }
1433 1437
1434 ifp = &sc->sc_ethercom.ec_if; 1438 ifp = &sc->sc_ethercom.ec_if;
1435 1439
1436 if ((ifp->if_flags & IFF_UP) == 0) { 1440 if ((ifp->if_flags & IFF_UP) == 0) {
1437 return claim; 1441 return claim;
1438 } 1442 }
1439 1443
1440 /* Disable interrupts */ 1444 /* Disable interrupts */
1441 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1445 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1442 1446
1443 for (;;) { 1447 for (;;) {
1444 1448
1445 status = CSR_READ_4(sc, VGE_ISR); 1449 status = CSR_READ_4(sc, VGE_ISR);
1446 /* If the card has gone away the read returns 0xffffffff. */ 1450 /* If the card has gone away the read returns 0xffffffff. */
1447 if (status == 0xFFFFFFFF) 1451 if (status == 0xFFFFFFFF)
1448 break; 1452 break;
1449 1453
1450 if (status) { 1454 if (status) {
1451 claim = 1; 1455 claim = 1;
1452 CSR_WRITE_4(sc, VGE_ISR, status); 1456 CSR_WRITE_4(sc, VGE_ISR, status);
1453 } 1457 }
1454 1458
1455 if ((status & VGE_INTRS) == 0) 1459 if ((status & VGE_INTRS) == 0)
1456 break; 1460 break;
1457 1461
1458 if (status & (VGE_ISR_RXOK | VGE_ISR_RXOK_HIPRIO)) 1462 if (status & (VGE_ISR_RXOK | VGE_ISR_RXOK_HIPRIO))
1459 vge_rxeof(sc); 1463 vge_rxeof(sc);
1460 1464
1461 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) { 1465 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) {
1462 vge_rxeof(sc); 1466 vge_rxeof(sc);
1463 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1467 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1464 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1468 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1465 } 1469 }
1466 1470
1467 if (status & (VGE_ISR_TXOK0 | VGE_ISR_TIMER0)) 1471 if (status & (VGE_ISR_TXOK0 | VGE_ISR_TIMER0))
1468 vge_txeof(sc); 1472 vge_txeof(sc);
1469 1473
1470 if (status & (VGE_ISR_TXDMA_STALL | VGE_ISR_RXDMA_STALL)) 1474 if (status & (VGE_ISR_TXDMA_STALL | VGE_ISR_RXDMA_STALL))
1471 vge_init(ifp); 1475 vge_init(ifp);
1472 1476
1473 if (status & VGE_ISR_LINKSTS) 1477 if (status & VGE_ISR_LINKSTS)
1474 vge_tick(sc); 1478 vge_tick(sc);
1475 } 1479 }
1476 1480
1477 /* Re-enable interrupts */ 1481 /* Re-enable interrupts */
1478 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1482 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1479 1483
1480 if (claim) 1484 if (claim)
1481 if_schedule_deferred_start(ifp); 1485 if_schedule_deferred_start(ifp);
1482 1486
1483 return claim; 1487 return claim;
1484} 1488}
1485 1489
1486static int 1490static int
1487vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1491vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1488{ 1492{
1489 struct vge_txsoft *txs; 1493 struct vge_txsoft *txs;
1490 struct vge_txdesc *txd; 1494 struct vge_txdesc *txd;
1491 struct vge_txfrag *f; 1495 struct vge_txfrag *f;
1492 struct mbuf *m_new; 1496 struct mbuf *m_new;
1493 bus_dmamap_t map; 1497 bus_dmamap_t map;
1494 int m_csumflags, seg, error, flags; 1498 int m_csumflags, seg, error, flags;
1495 size_t sz; 1499 size_t sz;
1496 uint32_t td_sts, td_ctl; 1500 uint32_t td_sts, td_ctl;
1497 1501
1498 KASSERT(sc->sc_tx_free > 0); 1502 KASSERT(sc->sc_tx_free > 0);
1499 1503
1500 txd = &sc->sc_txdescs[idx]; 1504 txd = &sc->sc_txdescs[idx];
1501 1505
1502#ifdef DIAGNOSTIC 1506#ifdef DIAGNOSTIC
1503 /* If this descriptor is still owned by the chip, bail. */ 1507 /* If this descriptor is still owned by the chip, bail. */
1504 VGE_TXDESCSYNC(sc, idx, 1508 VGE_TXDESCSYNC(sc, idx,
1505 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1509 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1506 td_sts = le32toh(txd->td_sts); 1510 td_sts = le32toh(txd->td_sts);
1507 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1511 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1508 if (td_sts & VGE_TDSTS_OWN) { 1512 if (td_sts & VGE_TDSTS_OWN) {
1509 return ENOBUFS; 1513 return ENOBUFS;
1510 } 1514 }
1511#endif 1515#endif
1512 1516
1513 /* 1517 /*
1514 * Preserve m_pkthdr.csum_flags here since m_head might be 1518 * Preserve m_pkthdr.csum_flags here since m_head might be
1515 * updated by m_defrag() 1519 * updated by m_defrag()
1516 */ 1520 */
1517 m_csumflags = m_head->m_pkthdr.csum_flags; 1521 m_csumflags = m_head->m_pkthdr.csum_flags;
1518 1522
1519 txs = &sc->sc_txsoft[idx]; 1523 txs = &sc->sc_txsoft[idx];
1520 map = txs->txs_dmamap; 1524 map = txs->txs_dmamap;
1521 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head, BUS_DMA_NOWAIT); 1525 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head, BUS_DMA_NOWAIT);
1522 1526
1523 /* If too many segments to map, coalesce */ 1527 /* If too many segments to map, coalesce */
1524 if (error == EFBIG || 1528 if (error == EFBIG ||
1525 (m_head->m_pkthdr.len < ETHER_PAD_LEN && 1529 (m_head->m_pkthdr.len < ETHER_PAD_LEN &&
1526 map->dm_nsegs == VGE_TX_FRAGS)) { 1530 map->dm_nsegs == VGE_TX_FRAGS)) {
1527 m_new = m_defrag(m_head, M_DONTWAIT); 1531 m_new = m_defrag(m_head, M_DONTWAIT);
1528 if (m_new == NULL) 1532 if (m_new == NULL)
1529 return EFBIG; 1533 return EFBIG;
1530 1534
1531 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, 1535 error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1532 m_new, BUS_DMA_NOWAIT); 1536 m_new, BUS_DMA_NOWAIT);
1533 if (error) { 1537 if (error) {
1534 m_freem(m_new); 1538 m_freem(m_new);
1535 return error; 1539 return error;
1536 } 1540 }
1537 1541
1538 m_head = m_new; 1542 m_head = m_new;
1539 } else if (error) 1543 } else if (error)
1540 return error; 1544 return error;
1541 1545
1542 txs->txs_mbuf = m_head; 1546 txs->txs_mbuf = m_head;
1543 1547
1544 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1548 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1545 BUS_DMASYNC_PREWRITE); 1549 BUS_DMASYNC_PREWRITE);
1546 1550
1547 for (seg = 0, f = &txd->td_frag[0]; seg < map->dm_nsegs; seg++, f++) { 1551 for (seg = 0, f = &txd->td_frag[0]; seg < map->dm_nsegs; seg++, f++) {
1548 f->tf_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len)); 1552 f->tf_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len));
1549 vge_set_txaddr(f, map->dm_segs[seg].ds_addr); 1553 vge_set_txaddr(f, map->dm_segs[seg].ds_addr);
1550 } 1554 }
1551 1555
1552 /* Argh. This chip does not autopad short frames */ 1556 /* Argh. This chip does not autopad short frames */
1553 sz = m_head->m_pkthdr.len; 1557 sz = m_head->m_pkthdr.len;
1554 if (sz < ETHER_PAD_LEN) { 1558 if (sz < ETHER_PAD_LEN) {
1555 f->tf_buflen = htole16(VGE_BUFLEN(ETHER_PAD_LEN - sz)); 1559 f->tf_buflen = htole16(VGE_BUFLEN(ETHER_PAD_LEN - sz));
1556 vge_set_txaddr(f, VGE_CDPADADDR(sc)); 1560 vge_set_txaddr(f, VGE_CDPADADDR(sc));
1557 sz = ETHER_PAD_LEN; 1561 sz = ETHER_PAD_LEN;
1558 seg++; 1562 seg++;
1559 } 1563 }
1560 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE); 1564 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE);
1561 1565
1562 /* 1566 /*
1563 * When telling the chip how many segments there are, we 1567 * When telling the chip how many segments there are, we
1564 * must use nsegs + 1 instead of just nsegs. Darned if I 1568 * must use nsegs + 1 instead of just nsegs. Darned if I
1565 * know why. 1569 * know why.
1566 */ 1570 */
1567 seg++; 1571 seg++;
1568 1572
1569 flags = 0; 1573 flags = 0;
1570 if (m_csumflags & M_CSUM_IPv4) 1574 if (m_csumflags & M_CSUM_IPv4)
1571 flags |= VGE_TDCTL_IPCSUM; 1575 flags |= VGE_TDCTL_IPCSUM;
1572 if (m_csumflags & M_CSUM_TCPv4) 1576 if (m_csumflags & M_CSUM_TCPv4)
1573 flags |= VGE_TDCTL_TCPCSUM; 1577 flags |= VGE_TDCTL_TCPCSUM;
1574 if (m_csumflags & M_CSUM_UDPv4) 1578 if (m_csumflags & M_CSUM_UDPv4)
1575 flags |= VGE_TDCTL_UDPCSUM; 1579 flags |= VGE_TDCTL_UDPCSUM;
1576 td_sts = sz << 16; 1580 td_sts = sz << 16;
1577 td_ctl = flags | (seg << 28) | VGE_TD_LS_NORM; 1581 td_ctl = flags | (seg << 28) | VGE_TD_LS_NORM;
1578 1582
1579 if (sz > ETHERMTU + ETHER_HDR_LEN) 1583 if (sz > ETHERMTU + ETHER_HDR_LEN)
1580 td_ctl |= VGE_TDCTL_JUMBO; 1584 td_ctl |= VGE_TDCTL_JUMBO;
1581 1585
1582 /* 1586 /*
1583 * Set up hardware VLAN tagging. 1587 * Set up hardware VLAN tagging.
1584 */ 1588 */
1585 if (vlan_has_tag(m_head)) { 1589 if (vlan_has_tag(m_head)) {
1586 /* 1590 /*
1587 * No need htons() here since vge(4) chip assumes 1591 * No need htons() here since vge(4) chip assumes
1588 * that tags are written in little endian and 1592 * that tags are written in little endian and
1589 * we already use htole32() here. 1593 * we already use htole32() here.
1590 */ 1594 */
1591 td_ctl |= vlan_get_tag(m_head) | VGE_TDCTL_VTAG; 1595 td_ctl |= vlan_get_tag(m_head) | VGE_TDCTL_VTAG;
1592 } 1596 }
1593 txd->td_ctl = htole32(td_ctl); 1597 txd->td_ctl = htole32(td_ctl);
1594 txd->td_sts = htole32(td_sts); 1598 txd->td_sts = htole32(td_sts);
1595 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1599 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1596 1600
1597 txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts); 1601 txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts);
1598 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1602 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1599 1603
1600 sc->sc_tx_free--; 1604 sc->sc_tx_free--;
1601 1605
1602 return 0; 1606 return 0;
1603} 1607}
1604 1608
1605/* 1609/*
1606 * Main transmit routine. 1610 * Main transmit routine.
1607 */ 1611 */
1608 1612
1609static void 1613static void
1610vge_start(struct ifnet *ifp) 1614vge_start(struct ifnet *ifp)
1611{ 1615{
1612 struct vge_softc *sc; 1616 struct vge_softc *sc;
1613 struct vge_txsoft *txs; 1617 struct vge_txsoft *txs;
1614 struct mbuf *m_head; 1618 struct mbuf *m_head;
1615 int idx, pidx, ofree, error; 1619 int idx, pidx, ofree, error;
1616 1620
1617 sc = ifp->if_softc; 1621 sc = ifp->if_softc;
1618 1622
1619 if (!sc->sc_link || 1623 if (!sc->sc_link ||
1620 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) { 1624 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) {
1621 return; 1625 return;
1622 } 1626 }
1623 1627
1624 m_head = NULL; 1628 m_head = NULL;
1625 idx = sc->sc_tx_prodidx; 1629 idx = sc->sc_tx_prodidx;
1626 pidx = VGE_PREV_TXDESC(idx); 1630 pidx = VGE_PREV_TXDESC(idx);
1627 ofree = sc->sc_tx_free; 1631 ofree = sc->sc_tx_free;
1628 1632
1629 /* 1633 /*
1630 * Loop through the send queue, setting up transmit descriptors 1634 * Loop through the send queue, setting up transmit descriptors
1631 * until we drain the queue, or use up all available transmit 1635 * until we drain the queue, or use up all available transmit
1632 * descriptors. 1636 * descriptors.
1633 */ 1637 */
1634 for (;;) { 1638 for (;;) {
1635 /* Grab a packet off the queue. */ 1639 /* Grab a packet off the queue. */
1636 IFQ_POLL(&ifp->if_snd, m_head); 1640 IFQ_POLL(&ifp->if_snd, m_head);
1637 if (m_head == NULL) 1641 if (m_head == NULL)
1638 break; 1642 break;
1639 1643
1640 if (sc->sc_tx_free == 0) { 1644 if (sc->sc_tx_free == 0) {
1641 /* 1645 /*
1642 * All slots used, stop for now. 1646 * All slots used, stop for now.
1643 */ 1647 */
1644 ifp->if_flags |= IFF_OACTIVE; 1648 ifp->if_flags |= IFF_OACTIVE;
1645 break; 1649 break;
1646 } 1650 }
1647 1651
1648 txs = &sc->sc_txsoft[idx]; 1652 txs = &sc->sc_txsoft[idx];
1649 KASSERT(txs->txs_mbuf == NULL); 1653 KASSERT(txs->txs_mbuf == NULL);
1650 1654
1651 if ((error = vge_encap(sc, m_head, idx))) { 1655 if ((error = vge_encap(sc, m_head, idx))) {
1652 if (error == EFBIG) { 1656 if (error == EFBIG) {
1653 printf("%s: Tx packet consumes too many " 1657 printf("%s: Tx packet consumes too many "
1654 "DMA segments, dropping...\n", 1658 "DMA segments, dropping...\n",
1655 device_xname(sc->sc_dev)); 1659 device_xname(sc->sc_dev));
1656 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1660 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1657 m_freem(m_head); 1661 m_freem(m_head);
1658 continue; 1662 continue;
1659 } 1663 }
1660 1664
1661 /* 1665 /*
1662 * Short on resources, just stop for now. 1666 * Short on resources, just stop for now.
1663 */ 1667 */
1664 if (error == ENOBUFS) 1668 if (error == ENOBUFS)
1665 ifp->if_flags |= IFF_OACTIVE; 1669 ifp->if_flags |= IFF_OACTIVE;
1666 break; 1670 break;
1667 } 1671 }
1668 1672
1669 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1673 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1670 1674
1671 /* 1675 /*
1672 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1676 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1673 */ 1677 */
1674 1678
1675 sc->sc_txdescs[pidx].td_frag[0].tf_buflen |= 1679 sc->sc_txdescs[pidx].td_frag[0].tf_buflen |=
1676 htole16(VGE_TXDESC_Q); 1680 htole16(VGE_TXDESC_Q);
1677 VGE_TXFRAGSYNC(sc, pidx, 1, 1681 VGE_TXFRAGSYNC(sc, pidx, 1,
1678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1682 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1679 1683
1680 if (txs->txs_mbuf != m_head) { 1684 if (txs->txs_mbuf != m_head) {
1681 m_freem(m_head); 1685 m_freem(m_head);
1682 m_head = txs->txs_mbuf; 1686 m_head = txs->txs_mbuf;
1683 } 1687 }
1684 1688
1685 pidx = idx; 1689 pidx = idx;
1686 idx = VGE_NEXT_TXDESC(idx); 1690 idx = VGE_NEXT_TXDESC(idx);
1687 1691
1688 /* 1692 /*
1689 * If there's a BPF listener, bounce a copy of this frame 1693 * If there's a BPF listener, bounce a copy of this frame
1690 * to him. 1694 * to him.
1691 */ 1695 */
1692 bpf_mtap(ifp, m_head, BPF_D_OUT); 1696 bpf_mtap(ifp, m_head, BPF_D_OUT);
1693 } 1697 }
1694 1698
1695 if (sc->sc_tx_free < ofree) { 1699 if (sc->sc_tx_free < ofree) {
1696 /* TX packet queued */ 1700 /* TX packet queued */
1697 1701
1698 sc->sc_tx_prodidx = idx; 1702 sc->sc_tx_prodidx = idx;
1699 1703
1700 /* Issue a transmit command. */ 1704 /* Issue a transmit command. */
1701 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1705 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1702 1706
1703 /* 1707 /*
1704 * Use the countdown timer for interrupt moderation. 1708 * Use the countdown timer for interrupt moderation.
1705 * 'TX done' interrupts are disabled. Instead, we reset the 1709 * 'TX done' interrupts are disabled. Instead, we reset the
1706 * countdown timer, which will begin counting until it hits 1710 * countdown timer, which will begin counting until it hits
1707 * the value in the SSTIMER register, and then trigger an 1711 * the value in the SSTIMER register, and then trigger an
1708 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1712 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1709 * the timer count is reloaded. Only when the transmitter 1713 * the timer count is reloaded. Only when the transmitter
1710 * is idle will the timer hit 0 and an interrupt fire. 1714 * is idle will the timer hit 0 and an interrupt fire.
1711 */ 1715 */
1712 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1716 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1713 1717
1714 /* 1718 /*
1715 * Set a timeout in case the chip goes out to lunch. 1719 * Set a timeout in case the chip goes out to lunch.
1716 */ 1720 */
1717 ifp->if_timer = 5; 1721 ifp->if_timer = 5;
1718 } 1722 }
1719} 1723}
1720 1724
1721static int 1725static int
1722vge_init(struct ifnet *ifp) 1726vge_init(struct ifnet *ifp)
1723{ 1727{
1724 struct vge_softc *sc; 1728 struct vge_softc *sc;
1725 int i, rc = 0; 1729 int i, rc = 0;
1726 1730
1727 sc = ifp->if_softc; 1731 sc = ifp->if_softc;
1728 1732
1729 /* 1733 /*
1730 * Cancel pending I/O and free all RX/TX buffers. 1734 * Cancel pending I/O and free all RX/TX buffers.
1731 */ 1735 */
1732 vge_stop(ifp, 0); 1736 vge_stop(ifp, 0);
1733 vge_reset(sc); 1737 vge_reset(sc);
1734 1738
1735 /* Initialize the RX descriptors and mbufs. */ 1739 /* Initialize the RX descriptors and mbufs. */
1736 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 1740 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
1737 sc->sc_rx_consumed = 0; 1741 sc->sc_rx_consumed = 0;
1738 for (i = 0; i < VGE_NRXDESC; i++) { 1742 for (i = 0; i < VGE_NRXDESC; i++) {
1739 if (vge_newbuf(sc, i, NULL) == ENOBUFS) { 1743 if (vge_newbuf(sc, i, NULL) == ENOBUFS) {
1740 printf("%s: unable to allocate or map rx buffer\n", 1744 printf("%s: unable to allocate or map rx buffer\n",
1741 device_xname(sc->sc_dev)); 1745 device_xname(sc->sc_dev));
1742 return 1; /* XXX */ 1746 return 1; /* XXX */
1743 } 1747 }
1744 } 1748 }
1745 sc->sc_rx_prodidx = 0; 1749 sc->sc_rx_prodidx = 0;
1746 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1750 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1747 1751
1748 /* Initialize the TX descriptors and mbufs. */ 1752 /* Initialize the TX descriptors and mbufs. */
1749 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1753 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1750 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, 1754 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap,
1751 VGE_CDTXOFF(0), sizeof(sc->sc_txdescs), 1755 VGE_CDTXOFF(0), sizeof(sc->sc_txdescs),
1752 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1756 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1753 for (i = 0; i < VGE_NTXDESC; i++) 1757 for (i = 0; i < VGE_NTXDESC; i++)
1754 sc->sc_txsoft[i].txs_mbuf = NULL; 1758 sc->sc_txsoft[i].txs_mbuf = NULL;
1755 1759
1756 sc->sc_tx_prodidx = 0; 1760 sc->sc_tx_prodidx = 0;
1757 sc->sc_tx_considx = 0; 1761 sc->sc_tx_considx = 0;
1758 sc->sc_tx_free = VGE_NTXDESC; 1762 sc->sc_tx_free = VGE_NTXDESC;
1759 1763
1760 /* Set our station address */ 1764 /* Set our station address */
1761 for (i = 0; i < ETHER_ADDR_LEN; i++) 1765 for (i = 0; i < ETHER_ADDR_LEN; i++)
1762 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->sc_eaddr[i]); 1766 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->sc_eaddr[i]);
1763 1767
1764 /* 1768 /*
1765 * Set receive FIFO threshold. Also allow transmission and 1769 * Set receive FIFO threshold. Also allow transmission and
1766 * reception of VLAN tagged frames. 1770 * reception of VLAN tagged frames.
1767 */ 1771 */
1768 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR | VGE_RXCFG_VTAGOPT); 1772 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR | VGE_RXCFG_VTAGOPT);
1769 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES | VGE_VTAG_OPT2); 1773 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES | VGE_VTAG_OPT2);
1770 1774
1771 /* Set DMA burst length */ 1775 /* Set DMA burst length */
1772 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1776 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1773 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1777 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1774 1778
1775 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO | VGE_TXCFG_NONBLK); 1779 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO | VGE_TXCFG_NONBLK);
1776 1780
1777 /* Set collision backoff algorithm */ 1781 /* Set collision backoff algorithm */
1778 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM | 1782 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM |
1779 VGE_CHIPCFG1_CAP | VGE_CHIPCFG1_MBA | VGE_CHIPCFG1_BAKOPT); 1783 VGE_CHIPCFG1_CAP | VGE_CHIPCFG1_MBA | VGE_CHIPCFG1_BAKOPT);
1780 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1784 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1781 1785
1782 /* Disable LPSEL field in priority resolution */ 1786 /* Disable LPSEL field in priority resolution */
1783 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1787 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1784 1788
1785 /* 1789 /*
1786 * Load the addresses of the DMA queues into the chip. 1790 * Load the addresses of the DMA queues into the chip.
1787 * Note that we only use one transmit queue. 1791 * Note that we only use one transmit queue.
1788 */ 1792 */
1789 1793
1790 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(VGE_CDTXADDR(sc, 0))); 1794 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(VGE_CDTXADDR(sc, 0)));
1791 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_NTXDESC - 1); 1795 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_NTXDESC - 1);
1792 1796
1793 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(VGE_CDRXADDR(sc, 0))); 1797 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(VGE_CDRXADDR(sc, 0)));
1794 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_NRXDESC - 1); 1798 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_NRXDESC - 1);
1795 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_NRXDESC); 1799 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_NRXDESC);
1796 1800
1797 /* Enable and wake up the RX descriptor queue */ 1801 /* Enable and wake up the RX descriptor queue */
1798 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1802 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1799 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1803 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1800 1804
1801 /* Enable the TX descriptor queue */ 1805 /* Enable the TX descriptor queue */
1802 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1806 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1803 1807
1804 /* Set up the receive filter -- allow large frames for VLANs. */ 1808 /* Set up the receive filter -- allow large frames for VLANs. */
1805 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST | VGE_RXCTL_RX_GIANT); 1809 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST | VGE_RXCTL_RX_GIANT);
1806 1810
1807 /* If we want promiscuous mode, set the allframes bit. */ 1811 /* If we want promiscuous mode, set the allframes bit. */
1808 if (ifp->if_flags & IFF_PROMISC) { 1812 if (ifp->if_flags & IFF_PROMISC) {
1809 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1813 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1810 } 1814 }
1811 1815
1812 /* Set capture broadcast bit to capture broadcast frames. */ 1816 /* Set capture broadcast bit to capture broadcast frames. */
1813 if (ifp->if_flags & IFF_BROADCAST) { 1817 if (ifp->if_flags & IFF_BROADCAST) {
1814 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1818 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1815 } 1819 }
1816 1820
1817 /* Set multicast bit to capture multicast frames. */ 1821 /* Set multicast bit to capture multicast frames. */
1818 if (ifp->if_flags & IFF_MULTICAST) { 1822 if (ifp->if_flags & IFF_MULTICAST) {
1819 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1823 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1820 } 1824 }
1821 1825
1822 /* Init the cam filter. */ 1826 /* Init the cam filter. */
1823 vge_cam_clear(sc); 1827 vge_cam_clear(sc);
1824 1828
1825 /* Init the multicast filter. */ 1829 /* Init the multicast filter. */
1826 vge_setmulti(sc); 1830 vge_setmulti(sc);
1827 1831
1828 /* Enable flow control */ 1832 /* Enable flow control */
1829 1833
1830 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1834 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1831 1835
1832 /* Enable jumbo frame reception (if desired) */ 1836 /* Enable jumbo frame reception (if desired) */
1833 1837
1834 /* Start the MAC. */ 1838 /* Start the MAC. */
1835 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1839 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1836 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1840 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1837 CSR_WRITE_1(sc, VGE_CRS0, 1841 CSR_WRITE_1(sc, VGE_CRS0,
1838 VGE_CR0_TX_ENABLE | VGE_CR0_RX_ENABLE | VGE_CR0_START); 1842 VGE_CR0_TX_ENABLE | VGE_CR0_RX_ENABLE | VGE_CR0_START);
1839 1843
1840 /* 1844 /*
1841 * Configure one-shot timer for microsecond 1845 * Configure one-shot timer for microsecond
1842 * resulution and load it for 500 usecs. 1846 * resulution and load it for 500 usecs.
1843 */ 1847 */
1844 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1848 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1845 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1849 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1846 1850
1847 /* 1851 /*
1848 * Configure interrupt moderation for receive. Enable 1852 * Configure interrupt moderation for receive. Enable
1849 * the holdoff counter and load it, and set the RX 1853 * the holdoff counter and load it, and set the RX
1850 * suppression count to the number of descriptors we 1854 * suppression count to the number of descriptors we
1851 * want to allow before triggering an interrupt. 1855 * want to allow before triggering an interrupt.
1852 * The holdoff timer is in units of 20 usecs. 1856 * The holdoff timer is in units of 20 usecs.
1853 */ 1857 */
1854 1858
1855#ifdef notyet 1859#ifdef notyet
1856 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1860 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1857 /* Select the interrupt holdoff timer page. */ 1861 /* Select the interrupt holdoff timer page. */
1858 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1862 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1859 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1863 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1860 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1864 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1861 1865
1862 /* Enable use of the holdoff timer. */ 1866 /* Enable use of the holdoff timer. */
1863 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1867 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1864 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1868 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1865 1869
1866 /* Select the RX suppression threshold page. */ 1870 /* Select the RX suppression threshold page. */
1867 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1871 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1868 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1872 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1869 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1873 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1870 1874
1871 /* Restore the page select bits. */ 1875 /* Restore the page select bits. */
1872 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1876 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1873 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1877 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1874#endif 1878#endif
1875 1879
1876#ifdef DEVICE_POLLING 1880#ifdef DEVICE_POLLING
1877 /* 1881 /*
1878 * Disable interrupts if we are polling. 1882 * Disable interrupts if we are polling.
1879 */ 1883 */
1880 if (ifp->if_flags & IFF_POLLING) { 1884 if (ifp->if_flags & IFF_POLLING) {
1881 CSR_WRITE_4(sc, VGE_IMR, 0); 1885 CSR_WRITE_4(sc, VGE_IMR, 0);
1882 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1886 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1883 } else /* otherwise ... */ 1887 } else /* otherwise ... */
1884#endif /* DEVICE_POLLING */ 1888#endif /* DEVICE_POLLING */
1885 { 1889 {
1886 /* 1890 /*
1887 * Enable interrupts. 1891 * Enable interrupts.
1888 */ 1892 */
1889 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 1893 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1890 CSR_WRITE_4(sc, VGE_ISR, 0); 1894 CSR_WRITE_4(sc, VGE_ISR, 0);
1891 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1895 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1892 } 1896 }
1893 1897
1894 if ((rc = ether_mediachange(ifp)) != 0) 1898 if ((rc = ether_mediachange(ifp)) != 0)
1895 goto out; 1899 goto out;
1896 1900
1897 ifp->if_flags |= IFF_RUNNING; 1901 ifp->if_flags |= IFF_RUNNING;
1898 ifp->if_flags &= ~IFF_OACTIVE; 1902 ifp->if_flags &= ~IFF_OACTIVE;
1899 1903
1900 sc->sc_if_flags = 0; 1904 sc->sc_if_flags = 0;
1901 sc->sc_link = 0; 1905 sc->sc_link = 0;
1902 1906
1903 callout_schedule(&sc->sc_timeout, hz); 1907 callout_schedule(&sc->sc_timeout, hz);
1904 1908
1905out: 1909out:
1906 return rc; 1910 return rc;
1907} 1911}
1908 1912
1909static void 1913static void
1910vge_miibus_statchg(struct ifnet *ifp) 1914vge_miibus_statchg(struct ifnet *ifp)
1911{ 1915{
1912 struct vge_softc *sc = ifp->if_softc; 1916 struct vge_softc *sc = ifp->if_softc;
1913 struct mii_data *mii = &sc->sc_mii; 1917 struct mii_data *mii = &sc->sc_mii;
1914 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 1918 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
1915 1919
1916 /* 1920 /*
1917 * If the user manually selects a media mode, we need to turn 1921 * If the user manually selects a media mode, we need to turn
1918 * on the forced MAC mode bit in the DIAGCTL register. If the 1922 * on the forced MAC mode bit in the DIAGCTL register. If the
1919 * user happens to choose a full duplex mode, we also need to 1923 * user happens to choose a full duplex mode, we also need to
1920 * set the 'force full duplex' bit. This applies only to 1924 * set the 'force full duplex' bit. This applies only to
1921 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1925 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1922 * mode is disabled, and in 1000baseT mode, full duplex is 1926 * mode is disabled, and in 1000baseT mode, full duplex is
1923 * always implied, so we turn on the forced mode bit but leave 1927 * always implied, so we turn on the forced mode bit but leave
1924 * the FDX bit cleared. 1928 * the FDX bit cleared.
1925 */ 1929 */
1926 1930
1927 switch (IFM_SUBTYPE(ife->ifm_media)) { 1931 switch (IFM_SUBTYPE(ife->ifm_media)) {
1928 case IFM_AUTO: 1932 case IFM_AUTO:
1929 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1933 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1930 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1934 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1931 break; 1935 break;
1932 case IFM_1000_T: 1936 case IFM_1000_T:
1933 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1937 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1934 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1938 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1935 break; 1939 break;
1936 case IFM_100_TX: 1940 case IFM_100_TX:
1937 case IFM_10_T: 1941 case IFM_10_T:
1938 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1942 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1939 if ((ife->ifm_media & IFM_FDX) != 0) { 1943 if ((ife->ifm_media & IFM_FDX) != 0) {
1940 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1944 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1941 } else { 1945 } else {
1942 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1946 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1943 } 1947 }
1944 break; 1948 break;
1945 default: 1949 default:
1946 printf("%s: unknown media type: %x\n", 1950 printf("%s: unknown media type: %x\n",
1947 device_xname(sc->sc_dev), 1951 device_xname(sc->sc_dev),
1948 IFM_SUBTYPE(ife->ifm_media)); 1952 IFM_SUBTYPE(ife->ifm_media));
1949 break; 1953 break;
1950 } 1954 }
1951} 1955}
1952 1956
1953static int 1957static int
1954vge_ifflags_cb(struct ethercom *ec) 1958vge_ifflags_cb(struct ethercom *ec)
1955{ 1959{
1956 struct ifnet *ifp = &ec->ec_if; 1960 struct ifnet *ifp = &ec->ec_if;
1957 struct vge_softc *sc = ifp->if_softc; 1961 struct vge_softc *sc = ifp->if_softc;
1958 u_short change = ifp->if_flags ^ sc->sc_if_flags; 1962 u_short change = ifp->if_flags ^ sc->sc_if_flags;
1959 1963
1960 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 1964 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
1961 return ENETRESET; 1965 return ENETRESET;
1962 else if ((change & IFF_PROMISC) == 0) 1966 else if ((change & IFF_PROMISC) == 0)
1963 return 0; 1967 return 0;
1964 1968
1965 if ((ifp->if_flags & IFF_PROMISC) == 0) 1969 if ((ifp->if_flags & IFF_PROMISC) == 0)
1966 CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1970 CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1967 else 1971 else
1968 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1972 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1969 vge_setmulti(sc); 1973 vge_setmulti(sc);
1970 return 0; 1974 return 0;
1971} 1975}
1972 1976
1973static int 1977static int
1974vge_ioctl(struct ifnet *ifp, u_long command, void *data) 1978vge_ioctl(struct ifnet *ifp, u_long command, void *data)
1975{ 1979{
1976 struct vge_softc *sc; 1980 struct vge_softc *sc;
1977 int s, error; 1981 int s, error;
1978 1982
1979 sc = ifp->if_softc; 1983 sc = ifp->if_softc;
1980 error = 0; 1984 error = 0;
1981 1985
1982 s = splnet(); 1986 s = splnet();
1983 1987
1984 if ((error = ether_ioctl(ifp, command, data)) == ENETRESET) { 1988 if ((error = ether_ioctl(ifp, command, data)) == ENETRESET) {
1985 error = 0; 1989 error = 0;
1986 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 1990 if (command != SIOCADDMULTI && command != SIOCDELMULTI)
1987 ; 1991 ;
1988 else if (ifp->if_flags & IFF_RUNNING) { 1992 else if (ifp->if_flags & IFF_RUNNING) {
1989 /* 1993 /*
1990 * Multicast list has changed; set the hardware filter 1994 * Multicast list has changed; set the hardware filter
1991 * accordingly. 1995 * accordingly.
1992 */ 1996 */
1993 vge_setmulti(sc); 1997 vge_setmulti(sc);
1994 } 1998 }
1995 } 1999 }
1996 sc->sc_if_flags = ifp->if_flags; 2000 sc->sc_if_flags = ifp->if_flags;
1997 2001
1998 splx(s); 2002 splx(s);
1999 return error; 2003 return error;
2000} 2004}
2001 2005
2002static void 2006static void
2003vge_watchdog(struct ifnet *ifp) 2007vge_watchdog(struct ifnet *ifp)
2004{ 2008{
2005 struct vge_softc *sc; 2009 struct vge_softc *sc;
2006 int s; 2010 int s;
2007 2011
2008 sc = ifp->if_softc; 2012 sc = ifp->if_softc;
2009 s = splnet(); 2013 s = splnet();
2010 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 2014 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
2011 ifp->if_oerrors++; 2015 ifp->if_oerrors++;
2012 2016
2013 vge_txeof(sc); 2017 vge_txeof(sc);
2014 vge_rxeof(sc); 2018 vge_rxeof(sc);
2015 2019
2016 vge_init(ifp); 2020 vge_init(ifp);
2017 2021
2018 splx(s); 2022 splx(s);
2019} 2023}
2020 2024
2021/* 2025/*
2022 * Stop the adapter and free any mbufs allocated to the 2026 * Stop the adapter and free any mbufs allocated to the
2023 * RX and TX lists. 2027 * RX and TX lists.
2024 */ 2028 */
2025static void 2029static void
2026vge_stop(struct ifnet *ifp, int disable) 2030vge_stop(struct ifnet *ifp, int disable)
2027{ 2031{
2028 struct vge_softc *sc = ifp->if_softc; 2032 struct vge_softc *sc = ifp->if_softc;
2029 struct vge_txsoft *txs; 2033 struct vge_txsoft *txs;
2030 struct vge_rxsoft *rxs; 2034 struct vge_rxsoft *rxs;
2031 int i, s; 2035 int i, s;
2032 2036
2033 s = splnet(); 2037 s = splnet();
2034 ifp->if_timer = 0; 2038 ifp->if_timer = 0;
2035 2039
2036 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2040 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2037#ifdef DEVICE_POLLING 2041#ifdef DEVICE_POLLING
2038 ether_poll_deregister(ifp); 2042 ether_poll_deregister(ifp);
2039#endif /* DEVICE_POLLING */ 2043#endif /* DEVICE_POLLING */
2040 2044
2041 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2045 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2042 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2046 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2043 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2047 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2044 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2048 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2045 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2049 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2046 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2050 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2047 2051
2048 if (sc->sc_rx_mhead != NULL) { 2052 if (sc->sc_rx_mhead != NULL) {
2049 m_freem(sc->sc_rx_mhead); 2053 m_freem(sc->sc_rx_mhead);
2050 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 2054 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
2051 } 2055 }
2052 2056
2053 /* Free the TX list buffers. */ 2057 /* Free the TX list buffers. */
2054 2058
2055 for (i = 0; i < VGE_NTXDESC; i++) { 2059 for (i = 0; i < VGE_NTXDESC; i++) {
2056 txs = &sc->sc_txsoft[i]; 2060 txs = &sc->sc_txsoft[i];
2057 if (txs->txs_mbuf != NULL) { 2061 if (txs->txs_mbuf != NULL) {
2058 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2062 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2059 m_freem(txs->txs_mbuf); 2063 m_freem(txs->txs_mbuf);
2060 txs->txs_mbuf = NULL; 2064 txs->txs_mbuf = NULL;
2061 } 2065 }
2062 } 2066 }
2063 2067
2064 /* Free the RX list buffers. */ 2068 /* Free the RX list buffers. */
2065 2069
2066 for (i = 0; i < VGE_NRXDESC; i++) { 2070 for (i = 0; i < VGE_NRXDESC; i++) {
2067 rxs = &sc->sc_rxsoft[i]; 2071 rxs = &sc->sc_rxsoft[i];
2068 if (rxs->rxs_mbuf != NULL) { 2072 if (rxs->rxs_mbuf != NULL) {
2069 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2073 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2070 m_freem(rxs->rxs_mbuf); 2074 m_freem(rxs->rxs_mbuf);
2071 rxs->rxs_mbuf = NULL; 2075 rxs->rxs_mbuf = NULL;
2072 } 2076 }
2073 } 2077 }
2074 2078
2075 splx(s); 2079 splx(s);
2076} 2080}
2077 2081
2078#if VGE_POWER_MANAGEMENT 2082#if VGE_POWER_MANAGEMENT
2079/* 2083/*
2080 * Device suspend routine. Stop the interface and save some PCI 2084 * Device suspend routine. Stop the interface and save some PCI
2081 * settings in case the BIOS doesn't restore them properly on 2085 * settings in case the BIOS doesn't restore them properly on
2082 * resume. 2086 * resume.
2083 */ 2087 */
2084static int 2088static int
2085vge_suspend(device_t dev) 2089vge_suspend(device_t dev)
2086{ 2090{
2087 struct vge_softc *sc; 2091 struct vge_softc *sc;
2088 int i; 2092 int i;
2089 2093
2090 sc = device_get_softc(dev); 2094 sc = device_get_softc(dev);
2091 2095
2092 vge_stop(sc); 2096 vge_stop(sc);
2093 2097
2094 for (i = 0; i < 5; i++) 2098 for (i = 0; i < 5; i++)
2095 sc->sc_saved_maps[i] = 2099 sc->sc_saved_maps[i] =
2096 pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2100 pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2097 sc->sc_saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2101 sc->sc_saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2098 sc->sc_saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2102 sc->sc_saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2099 sc->sc_saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2103 sc->sc_saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2100 sc->sc_saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2104 sc->sc_saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2101 2105
2102 sc->suspended = 1; 2106 sc->suspended = 1;
2103 2107
2104 return 0; 2108 return 0;
2105} 2109}
2106 2110
2107/* 2111/*
2108 * Device resume routine. Restore some PCI settings in case the BIOS 2112 * Device resume routine. Restore some PCI settings in case the BIOS
2109 * doesn't, re-enable busmastering, and restart the interface if 2113 * doesn't, re-enable busmastering, and restart the interface if
2110 * appropriate. 2114 * appropriate.
2111 */ 2115 */
2112static int 2116static int
2113vge_resume(device_t dev) 2117vge_resume(device_t dev)
2114{ 2118{
2115 struct vge_softc *sc; 2119 struct vge_softc *sc;
2116 struct ifnet *ifp; 2120 struct ifnet *ifp;
2117 int i; 2121 int i;
2118 2122
2119 sc = device_private(dev); 2123 sc = device_private(dev);
2120 ifp = &sc->sc_ethercom.ec_if; 2124 ifp = &sc->sc_ethercom.ec_if;
2121 2125
2122 /* better way to do this? */ 2126 /* better way to do this? */
2123 for (i = 0; i < 5; i++) 2127 for (i = 0; i < 5; i++)
2124 pci_write_config(dev, PCIR_MAPS + i * 4, 2128 pci_write_config(dev, PCIR_MAPS + i * 4,
2125 sc->sc_saved_maps[i], 4); 2129 sc->sc_saved_maps[i], 4);
2126 pci_write_config(dev, PCIR_BIOS, sc->sc_saved_biosaddr, 4); 2130 pci_write_config(dev, PCIR_BIOS, sc->sc_saved_biosaddr, 4);
2127 pci_write_config(dev, PCIR_INTLINE, sc->sc_saved_intline, 1); 2131 pci_write_config(dev, PCIR_INTLINE, sc->sc_saved_intline, 1);
2128 pci_write_config(dev, PCIR_CACHELNSZ, sc->sc_saved_cachelnsz, 1); 2132 pci_write_config(dev, PCIR_CACHELNSZ, sc->sc_saved_cachelnsz, 1);
2129 pci_write_config(dev, PCIR_LATTIMER, sc->sc_saved_lattimer, 1); 2133 pci_write_config(dev, PCIR_LATTIMER, sc->sc_saved_lattimer, 1);
2130 2134
2131 /* reenable busmastering */ 2135 /* reenable busmastering */
2132 pci_enable_busmaster(dev); 2136 pci_enable_busmaster(dev);
2133 pci_enable_io(dev, SYS_RES_MEMORY); 2137 pci_enable_io(dev, SYS_RES_MEMORY);
2134 2138
2135 /* reinitialize interface if necessary */ 2139 /* reinitialize interface if necessary */
2136 if (ifp->if_flags & IFF_UP) 2140 if (ifp->if_flags & IFF_UP)
2137 vge_init(sc); 2141 vge_init(sc);
2138 2142
2139 sc->suspended = 0; 2143 sc->suspended = 0;
2140 2144
2141 return 0; 2145 return 0;
2142} 2146}
2143#endif 2147#endif
2144 2148
2145/* 2149/*
2146 * Stop all chip I/O so that the kernel's probe routines don't 2150 * Stop all chip I/O so that the kernel's probe routines don't
2147 * get confused by errant DMAs when rebooting. 2151 * get confused by errant DMAs when rebooting.
2148 */ 2152 */
2149static bool 2153static bool
2150vge_shutdown(device_t self, int howto) 2154vge_shutdown(device_t self, int howto)
2151{ 2155{
2152 struct vge_softc *sc; 2156 struct vge_softc *sc;
2153 2157
2154 sc = device_private(self); 2158 sc = device_private(self);
2155 vge_stop(&sc->sc_ethercom.ec_if, 1); 2159 vge_stop(&sc->sc_ethercom.ec_if, 1);
2156 2160
2157 return true; 2161 return true;
2158} 2162}
 2163
 2164static void
 2165vge_clrwol(struct vge_softc *sc)
 2166{
 2167 uint8_t val;
 2168
 2169 val = CSR_READ_1(sc, VGE_PWRSTAT);
 2170 val &= ~VGE_STICKHW_SWPTAG;
 2171 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
 2172 /* Disable WOL and clear power state indicator. */
 2173 val = CSR_READ_1(sc, VGE_PWRSTAT);
 2174 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
 2175 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
 2176
 2177 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
 2178 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 2179
 2180 /* Clear WOL on pattern match. */
 2181 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
 2182 /* Disable WOL on magic/unicast packet. */
 2183 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
 2184 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
 2185 VGE_WOLCFG_PMEOVR);
 2186 /* Clear WOL status on pattern match. */
 2187 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
 2188 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
 2189}

cvs diff -r1.4 -r1.5 src/sys/dev/pci/if_vgereg.h (switch to unified diff)

--- src/sys/dev/pci/if_vgereg.h 2019/07/11 03:49:51 1.4
+++ src/sys/dev/pci/if_vgereg.h 2019/10/08 14:26:27 1.5
@@ -1,695 +1,720 @@ @@ -1,695 +1,720 @@
1/*- 1/*-
2 * Copyright (c) 2004 2 * Copyright (c) 2004
3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 * 4 *
5 * Redistribution and use in source and binary forms, with or without 5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions 6 * modification, are permitted provided that the following conditions
7 * are met: 7 * are met:
8 * 1. Redistributions of source code must retain the above copyright 8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the 11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution. 12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software 13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement: 14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul. 15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors 16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software 17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission. 18 * without specific prior written permission.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE. 30 * THE POSSIBILITY OF SUCH DAMAGE.
31 * 31 *
32 * $FreeBSD: src/sys/dev/vge/if_vgereg.h,v 1.2 2005/01/06 01:43:31 imp Exp $ 32 * $FreeBSD: src/sys/dev/vge/if_vgereg.h,v 1.2 2005/01/06 01:43:31 imp Exp $
33 */ 33 */
34 34
35/* 35/*
36 * Register definitions for the VIA VT6122 gigabit ethernet controller. 36 * Register definitions for the VIA VT6122 gigabit ethernet controller.
37 * Definitions for the built-in copper PHY can be found in vgphy.h. 37 * Definitions for the built-in copper PHY can be found in vgphy.h.
38 * 38 *
39 * The VT612x controllers have 256 bytes of register space. The 39 * The VT612x controllers have 256 bytes of register space. The
40 * manual seems to imply that the registers should all be accessed 40 * manual seems to imply that the registers should all be accessed
41 * using 32-bit I/O cycles, but some of them are less than 32 bits 41 * using 32-bit I/O cycles, but some of them are less than 32 bits
42 * wide. Go figure. 42 * wide. Go figure.
43 */ 43 */
44 44
45#ifndef _IF_VGEREG_H_ 45#ifndef _IF_VGEREG_H_
46#define _IF_VGEREG_H_ 46#define _IF_VGEREG_H_
47 47
48#define VGE_PAR0 0x00 /* physical address register */ 48#define VGE_PAR0 0x00 /* physical address register */
49#define VGE_PAR1 0x02 49#define VGE_PAR1 0x02
50#define VGE_PAR2 0x04 50#define VGE_PAR2 0x04
51#define VGE_RXCTL 0x06 /* RX control register */ 51#define VGE_RXCTL 0x06 /* RX control register */
52#define VGE_TXCTL 0x07 /* TX control register */ 52#define VGE_TXCTL 0x07 /* TX control register */
53#define VGE_CRS0 0x08 /* Global cmd register 0 (w to set) */ 53#define VGE_CRS0 0x08 /* Global cmd register 0 (w to set) */
54#define VGE_CRS1 0x09 /* Global cmd register 1 (w to set) */ 54#define VGE_CRS1 0x09 /* Global cmd register 1 (w to set) */
55#define VGE_CRS2 0x0A /* Global cmd register 2 (w to set) */ 55#define VGE_CRS2 0x0A /* Global cmd register 2 (w to set) */
56#define VGE_CRS3 0x0B /* Global cmd register 3 (w to set) */ 56#define VGE_CRS3 0x0B /* Global cmd register 3 (w to set) */
57#define VGE_CRC0 0x0C /* Global cmd register 0 (w to clr) */ 57#define VGE_CRC0 0x0C /* Global cmd register 0 (w to clr) */
58#define VGE_CRC1 0x0D /* Global cmd register 1 (w to clr) */ 58#define VGE_CRC1 0x0D /* Global cmd register 1 (w to clr) */
59#define VGE_CRC2 0x0E /* Global cmd register 2 (w to clr) */ 59#define VGE_CRC2 0x0E /* Global cmd register 2 (w to clr) */
60#define VGE_CRC3 0x0F /* Global cmd register 3 (w to clr) */ 60#define VGE_CRC3 0x0F /* Global cmd register 3 (w to clr) */
61#define VGE_MAR0 0x10 /* Mcast hash/CAM register 0 */ 61#define VGE_MAR0 0x10 /* Mcast hash/CAM register 0 */
62#define VGE_MAR1 0x14 /* Mcast hash/CAM register 1 */ 62#define VGE_MAR1 0x14 /* Mcast hash/CAM register 1 */
63#define VGE_CAM0 0x10 63#define VGE_CAM0 0x10
64#define VGE_CAM1 0x11 64#define VGE_CAM1 0x11
65#define VGE_CAM2 0x12 65#define VGE_CAM2 0x12
66#define VGE_CAM3 0x13 66#define VGE_CAM3 0x13
67#define VGE_CAM4 0x14 67#define VGE_CAM4 0x14
68#define VGE_CAM5 0x15 68#define VGE_CAM5 0x15
69#define VGE_CAM6 0x16 69#define VGE_CAM6 0x16
70#define VGE_CAM7 0x17 70#define VGE_CAM7 0x17
71#define VGE_TXDESC_HIADDR 0x18 /* Hi part of 64bit txdesc base addr */ 71#define VGE_TXDESC_HIADDR 0x18 /* Hi part of 64bit txdesc base addr */
72#define VGE_DATABUF_HIADDR 0x1C /* Hi part of 64bit data buffer addr */ 72#define VGE_DATABUF_HIADDR 0x1C /* Hi part of 64bit data buffer addr */
73#define VGE_INTCTL0 0x20 /* interrupt control register */ 73#define VGE_INTCTL0 0x20 /* interrupt control register */
74#define VGE_RXSUPPTHR 0x20 74#define VGE_RXSUPPTHR 0x20
75#define VGE_TXSUPPTHR 0x20 75#define VGE_TXSUPPTHR 0x20
76#define VGE_INTHOLDOFF 0x20 76#define VGE_INTHOLDOFF 0x20
77#define VGE_INTCTL1 0x21 /* interrupt control register */ 77#define VGE_INTCTL1 0x21 /* interrupt control register */
78#define VGE_TXHOSTERR 0x22 /* TX host error status */ 78#define VGE_TXHOSTERR 0x22 /* TX host error status */
79#define VGE_RXHOSTERR 0x23 /* RX host error status */ 79#define VGE_RXHOSTERR 0x23 /* RX host error status */
80#define VGE_ISR 0x24 /* Interrupt status register */ 80#define VGE_ISR 0x24 /* Interrupt status register */
81#define VGE_IMR 0x28 /* Interrupt mask register */ 81#define VGE_IMR 0x28 /* Interrupt mask register */
82#define VGE_TXSTS_PORT 0x2C /* Transmit status port (???) */ 82#define VGE_TXSTS_PORT 0x2C /* Transmit status port (???) */
83#define VGE_TXQCSRS 0x30 /* TX queue ctl/status set */ 83#define VGE_TXQCSRS 0x30 /* TX queue ctl/status set */
84#define VGE_RXQCSRS 0x32 /* RX queue ctl/status set */ 84#define VGE_RXQCSRS 0x32 /* RX queue ctl/status set */
85#define VGE_TXQCSRC 0x34 /* TX queue ctl/status clear */ 85#define VGE_TXQCSRC 0x34 /* TX queue ctl/status clear */
86#define VGE_RXQCSRC 0x36 /* RX queue ctl/status clear */ 86#define VGE_RXQCSRC 0x36 /* RX queue ctl/status clear */
87#define VGE_RXDESC_ADDR_LO 0x38 /* RX desc base addr (lo 32 bits) */ 87#define VGE_RXDESC_ADDR_LO 0x38 /* RX desc base addr (lo 32 bits) */
88#define VGE_RXDESC_CONSIDX 0x3C /* Current RX descriptor index */ 88#define VGE_RXDESC_CONSIDX 0x3C /* Current RX descriptor index */
89#define VGE_RXQTIMER 0x3E /* RX queue timer pend register */ 89#define VGE_RXQTIMER 0x3E /* RX queue timer pend register */
90#define VGE_TXQTIMER 0x3F /* TX queue timer pend register */ 90#define VGE_TXQTIMER 0x3F /* TX queue timer pend register */
91#define VGE_TXDESC_ADDR_LO0 0x40 /* TX desc0 base addr (lo 32 bits) */ 91#define VGE_TXDESC_ADDR_LO0 0x40 /* TX desc0 base addr (lo 32 bits) */
92#define VGE_TXDESC_ADDR_LO1 0x44 /* TX desc1 base addr (lo 32 bits) */ 92#define VGE_TXDESC_ADDR_LO1 0x44 /* TX desc1 base addr (lo 32 bits) */
93#define VGE_TXDESC_ADDR_LO2 0x48 /* TX desc2 base addr (lo 32 bits) */ 93#define VGE_TXDESC_ADDR_LO2 0x48 /* TX desc2 base addr (lo 32 bits) */
94#define VGE_TXDESC_ADDR_LO3 0x4C /* TX desc3 base addr (lo 32 bits) */ 94#define VGE_TXDESC_ADDR_LO3 0x4C /* TX desc3 base addr (lo 32 bits) */
95#define VGE_RXDESCNUM 0x50 /* Size of RX desc ring */ 95#define VGE_RXDESCNUM 0x50 /* Size of RX desc ring */
96#define VGE_TXDESCNUM 0x52 /* Size of TX desc ring */ 96#define VGE_TXDESCNUM 0x52 /* Size of TX desc ring */
97#define VGE_TXDESC_CONSIDX0 0x54 /* Current TX descriptor index */ 97#define VGE_TXDESC_CONSIDX0 0x54 /* Current TX descriptor index */
98#define VGE_TXDESC_CONSIDX1 0x56 /* Current TX descriptor index */ 98#define VGE_TXDESC_CONSIDX1 0x56 /* Current TX descriptor index */
99#define VGE_TXDESC_CONSIDX2 0x58 /* Current TX descriptor index */ 99#define VGE_TXDESC_CONSIDX2 0x58 /* Current TX descriptor index */
100#define VGE_TXDESC_CONSIDX3 0x5A /* Current TX descriptor index */ 100#define VGE_TXDESC_CONSIDX3 0x5A /* Current TX descriptor index */
101#define VGE_TX_PAUSE_TIMER 0x5C /* TX pause frame timer */ 101#define VGE_TX_PAUSE_TIMER 0x5C /* TX pause frame timer */
102#define VGE_RXDESC_RESIDUECNT 0x5E /* RX descriptor residue count */ 102#define VGE_RXDESC_RESIDUECNT 0x5E /* RX descriptor residue count */
103#define VGE_FIFOTEST0 0x60 /* FIFO test register */ 103#define VGE_FIFOTEST0 0x60 /* FIFO test register */
104#define VGE_FIFOTEST1 0x64 /* FIFO test register */ 104#define VGE_FIFOTEST1 0x64 /* FIFO test register */
105#define VGE_CAMADDR 0x68 /* CAM address register */ 105#define VGE_CAMADDR 0x68 /* CAM address register */
106#define VGE_CAMCTL 0x69 /* CAM control register */ 106#define VGE_CAMCTL 0x69 /* CAM control register */
107#define VGE_GFTEST 0x6A 107#define VGE_GFTEST 0x6A
108#define VGE_FTSCMD 0x6B 108#define VGE_FTSCMD 0x6B
109#define VGE_MIICFG 0x6C /* MII port config register */ 109#define VGE_MIICFG 0x6C /* MII port config register */
110#define VGE_MIISTS 0x6D /* MII port status register */ 110#define VGE_MIISTS 0x6D /* MII port status register */
111#define VGE_PHYSTS0 0x6E /* PHY status register */ 111#define VGE_PHYSTS0 0x6E /* PHY status register */
112#define VGE_PHYSTS1 0x6F /* PHY status register */ 112#define VGE_PHYSTS1 0x6F /* PHY status register */
113#define VGE_MIICMD 0x70 /* MII command register */ 113#define VGE_MIICMD 0x70 /* MII command register */
114#define VGE_MIIADDR 0x71 /* MII address register */ 114#define VGE_MIIADDR 0x71 /* MII address register */
115#define VGE_MIIDATA 0x72 /* MII data register */ 115#define VGE_MIIDATA 0x72 /* MII data register */
116#define VGE_SSTIMER 0x74 /* single-shot timer */ 116#define VGE_SSTIMER 0x74 /* single-shot timer */
117#define VGE_PTIMER 0x76 /* periodic timer */ 117#define VGE_PTIMER 0x76 /* periodic timer */
118#define VGE_CHIPCFG0 0x78 /* chip config A */ 118#define VGE_CHIPCFG0 0x78 /* chip config A */
119#define VGE_CHIPCFG1 0x79 /* chip config B */ 119#define VGE_CHIPCFG1 0x79 /* chip config B */
120#define VGE_CHIPCFG2 0x7A /* chip config C */ 120#define VGE_CHIPCFG2 0x7A /* chip config C */
121#define VGE_CHIPCFG3 0x7B /* chip config D */ 121#define VGE_CHIPCFG3 0x7B /* chip config D */
122#define VGE_DMACFG0 0x7C /* DMA config 0 */ 122#define VGE_DMACFG0 0x7C /* DMA config 0 */
123#define VGE_DMACFG1 0x7D /* DMA config 1 */ 123#define VGE_DMACFG1 0x7D /* DMA config 1 */
124#define VGE_RXCFG 0x7E /* MAC RX config */ 124#define VGE_RXCFG 0x7E /* MAC RX config */
125#define VGE_TXCFG 0x7F /* MAC TX config */ 125#define VGE_TXCFG 0x7F /* MAC TX config */
126#define VGE_PWRMGMT 0x82 /* power management shadow register */ 126#define VGE_PWRMGMT 0x82 /* power management shadow register */
127#define VGE_PWRSTAT 0x83 /* power state shadow register */ 127#define VGE_PWRSTAT 0x83 /* power state shadow register */
128#define VGE_MIBCSR 0x84 /* MIB control/status register */ 128#define VGE_MIBCSR 0x84 /* MIB control/status register */
129#define VGE_SWEEDATA 0x85 /* EEPROM software loaded data */ 129#define VGE_SWEEDATA 0x85 /* EEPROM software loaded data */
130#define VGE_MIBDATA 0x88 /* MIB data register */ 130#define VGE_MIBDATA 0x88 /* MIB data register */
131#define VGE_EEWRDAT 0x8C /* EEPROM embedded write */ 131#define VGE_EEWRDAT 0x8C /* EEPROM embedded write */
132#define VGE_EECSUM 0x92 /* EEPROM checksum */ 132#define VGE_EECSUM 0x92 /* EEPROM checksum */
133#define VGE_EECSR 0x93 /* EEPROM control/status */ 133#define VGE_EECSR 0x93 /* EEPROM control/status */
134#define VGE_EERDDAT 0x94 /* EEPROM embedded read */ 134#define VGE_EERDDAT 0x94 /* EEPROM embedded read */
135#define VGE_EEADDR 0x96 /* EEPROM address */ 135#define VGE_EEADDR 0x96 /* EEPROM address */
136#define VGE_EECMD 0x97 /* EEPROM embedded command */ 136#define VGE_EECMD 0x97 /* EEPROM embedded command */
137#define VGE_CHIPSTRAP 0x99 /* Chip jumper strapping status */ 137#define VGE_CHIPSTRAP 0x99 /* Chip jumper strapping status */
138#define VGE_MEDIASTRAP 0x9B /* Media jumper strapping */ 138#define VGE_MEDIASTRAP 0x9B /* Media jumper strapping */
139#define VGE_DIAGSTS 0x9C /* Chip diagnostic status */ 139#define VGE_DIAGSTS 0x9C /* Chip diagnostic status */
140#define VGE_DBGCTL 0x9E /* Chip debug control */ 140#define VGE_DBGCTL 0x9E /* Chip debug control */
141#define VGE_DIAGCTL 0x9F /* Chip diagnostic control */ 141#define VGE_DIAGCTL 0x9F /* Chip diagnostic control */
142#define VGE_WOLCR0S 0xA0 /* WOL0 event set */ 142#define VGE_WOLCR0S 0xA0 /* WOL0 event set */
143#define VGE_WOLCR1S 0xA1 /* WOL1 event set */ 143#define VGE_WOLCR1S 0xA1 /* WOL1 event set */
144#define VGE_PWRCFGS 0xA2 /* Power management config set */ 144#define VGE_PWRCFGS 0xA2 /* Power management config set */
145#define VGE_WOLCFGS 0xA3 /* WOL config set */ 145#define VGE_WOLCFGS 0xA3 /* WOL config set */
146#define VGE_WOLCR0C 0xA4 /* WOL0 event clear */ 146#define VGE_WOLCR0C 0xA4 /* WOL0 event clear */
147#define VGE_WOLCR1C 0xA5 /* WOL1 event clear */ 147#define VGE_WOLCR1C 0xA5 /* WOL1 event clear */
148#define VGE_PWRCFGC 0xA6 /* Power management config clear */ 148#define VGE_PWRCFGC 0xA6 /* Power management config clear */
149#define VGE_WOLCFGC 0xA7 /* WOL config clear */ 149#define VGE_WOLCFGC 0xA7 /* WOL config clear */
150#define VGE_WOLSR0S 0xA8 /* WOL status set */ 150#define VGE_WOLSR0S 0xA8 /* WOL status set */
151#define VGE_WOLSR1S 0xA9 /* WOL status set */ 151#define VGE_WOLSR1S 0xA9 /* WOL status set */
152#define VGE_WOLSR0C 0xAC /* WOL status clear */ 152#define VGE_WOLSR0C 0xAC /* WOL status clear */
153#define VGE_WOLSR1C 0xAD /* WOL status clear */ 153#define VGE_WOLSR1C 0xAD /* WOL status clear */
154#define VGE_WAKEPAT_CRC0 0xB0 154#define VGE_WAKEPAT_CRC0 0xB0
155#define VGE_WAKEPAT_CRC1 0xB2 155#define VGE_WAKEPAT_CRC1 0xB2
156#define VGE_WAKEPAT_CRC2 0xB4 156#define VGE_WAKEPAT_CRC2 0xB4
157#define VGE_WAKEPAT_CRC3 0xB6 157#define VGE_WAKEPAT_CRC3 0xB6
158#define VGE_WAKEPAT_CRC4 0xB8 158#define VGE_WAKEPAT_CRC4 0xB8
159#define VGE_WAKEPAT_CRC5 0xBA 159#define VGE_WAKEPAT_CRC5 0xBA
160#define VGE_WAKEPAT_CRC6 0xBC 160#define VGE_WAKEPAT_CRC6 0xBC
161#define VGE_WAKEPAT_CRC7 0xBE 161#define VGE_WAKEPAT_CRC7 0xBE
162#define VGE_WAKEPAT_MSK0_0 0xC0 162#define VGE_WAKEPAT_MSK0_0 0xC0
163#define VGE_WAKEPAT_MSK0_1 0xC4 163#define VGE_WAKEPAT_MSK0_1 0xC4
164#define VGE_WAKEPAT_MSK0_2 0xC8 164#define VGE_WAKEPAT_MSK0_2 0xC8
165#define VGE_WAKEPAT_MSK0_3 0xCC 165#define VGE_WAKEPAT_MSK0_3 0xCC
166#define VGE_WAKEPAT_MSK1_0 0xD0 166#define VGE_WAKEPAT_MSK1_0 0xD0
167#define VGE_WAKEPAT_MSK1_1 0xD4 167#define VGE_WAKEPAT_MSK1_1 0xD4
168#define VGE_WAKEPAT_MSK1_2 0xD8 168#define VGE_WAKEPAT_MSK1_2 0xD8
169#define VGE_WAKEPAT_MSK1_3 0xDC 169#define VGE_WAKEPAT_MSK1_3 0xDC
170#define VGE_WAKEPAT_MSK2_0 0xE0 170#define VGE_WAKEPAT_MSK2_0 0xE0
171#define VGE_WAKEPAT_MSK2_1 0xE4 171#define VGE_WAKEPAT_MSK2_1 0xE4
172#define VGE_WAKEPAT_MSK2_2 0xE8 172#define VGE_WAKEPAT_MSK2_2 0xE8
173#define VGE_WAKEPAT_MSK2_3 0xEC 173#define VGE_WAKEPAT_MSK2_3 0xEC
174#define VGE_WAKEPAT_MSK3_0 0xF0 174#define VGE_WAKEPAT_MSK3_0 0xF0
175#define VGE_WAKEPAT_MSK3_1 0xF4 175#define VGE_WAKEPAT_MSK3_1 0xF4
176#define VGE_WAKEPAT_MSK3_2 0xF8 176#define VGE_WAKEPAT_MSK3_2 0xF8
177#define VGE_WAKEPAT_MSK3_3 0xFC 177#define VGE_WAKEPAT_MSK3_3 0xFC
178 178
179/* Receive control register */ 179/* Receive control register */
180 180
181#define VGE_RXCTL_RX_BADFRAMES 0x01 /* accept CRC error frames */ 181#define VGE_RXCTL_RX_BADFRAMES 0x01 /* accept CRC error frames */
182#define VGE_RXCTL_RX_RUNT 0x02 /* accept runts */ 182#define VGE_RXCTL_RX_RUNT 0x02 /* accept runts */
183#define VGE_RXCTL_RX_MCAST 0x04 /* accept multicasts */ 183#define VGE_RXCTL_RX_MCAST 0x04 /* accept multicasts */
184#define VGE_RXCTL_RX_BCAST 0x08 /* accept broadcasts */ 184#define VGE_RXCTL_RX_BCAST 0x08 /* accept broadcasts */
185#define VGE_RXCTL_RX_PROMISC 0x10 /* promisc mode */ 185#define VGE_RXCTL_RX_PROMISC 0x10 /* promisc mode */
186#define VGE_RXCTL_RX_GIANT 0x20 /* accept VLAN tagged frames */ 186#define VGE_RXCTL_RX_GIANT 0x20 /* accept VLAN tagged frames */
187#define VGE_RXCTL_RX_UCAST 0x40 /* use perfect filtering */ 187#define VGE_RXCTL_RX_UCAST 0x40 /* use perfect filtering */
188#define VGE_RXCTL_RX_SYMERR 0x80 /* accept symbol err packet */ 188#define VGE_RXCTL_RX_SYMERR 0x80 /* accept symbol err packet */
189 189
190/* Transmit control register */ 190/* Transmit control register */
191 191
192#define VGE_TXCTL_LOOPCTL 0x03 /* loopback control */ 192#define VGE_TXCTL_LOOPCTL 0x03 /* loopback control */
193#define VGE_TXCTL_COLLCTL 0x0C /* collision retry control */ 193#define VGE_TXCTL_COLLCTL 0x0C /* collision retry control */
194 194
195#define VGE_TXLOOPCTL_OFF 0x00 195#define VGE_TXLOOPCTL_OFF 0x00
196#define VGE_TXLOOPCTL_MAC_INTERNAL 0x01 196#define VGE_TXLOOPCTL_MAC_INTERNAL 0x01
197#define VGE_TXLOOPCTL_EXTERNAL 0x02 197#define VGE_TXLOOPCTL_EXTERNAL 0x02
198 198
199#define VGE_TXCOLLS_NORMAL 0x00 /* one set of 16 retries */ 199#define VGE_TXCOLLS_NORMAL 0x00 /* one set of 16 retries */
200#define VGE_TXCOLLS_32 0x04 /* two sets of 16 retries */ 200#define VGE_TXCOLLS_32 0x04 /* two sets of 16 retries */
201#define VGE_TXCOLLS_48 0x08 /* three sets of 16 retries */ 201#define VGE_TXCOLLS_48 0x08 /* three sets of 16 retries */
202#define VGE_TXCOLLS_INFINITE 0x0C /* retry forever */ 202#define VGE_TXCOLLS_INFINITE 0x0C /* retry forever */
203 203
204/* Global command register 0 */ 204/* Global command register 0 */
205 205
206#define VGE_CR0_START 0x01 /* start NIC */ 206#define VGE_CR0_START 0x01 /* start NIC */
207#define VGE_CR0_STOP 0x02 /* stop NIC */ 207#define VGE_CR0_STOP 0x02 /* stop NIC */
208#define VGE_CR0_RX_ENABLE 0x04 /* turn on RX engine */ 208#define VGE_CR0_RX_ENABLE 0x04 /* turn on RX engine */
209#define VGE_CR0_TX_ENABLE 0x08 /* turn on TX engine */ 209#define VGE_CR0_TX_ENABLE 0x08 /* turn on TX engine */
210 210
211/* Global command register 1 */ 211/* Global command register 1 */
212 212
213#define VGE_CR1_NOUCAST 0x01 /* disable unicast reception */ 213#define VGE_CR1_NOUCAST 0x01 /* disable unicast reception */
214#define VGE_CR1_NOPOLL 0x08 /* disable RX/TX desc polling */ 214#define VGE_CR1_NOPOLL 0x08 /* disable RX/TX desc polling */
215#define VGE_CR1_TIMER0_ENABLE 0x20 /* enable single shot timer */ 215#define VGE_CR1_TIMER0_ENABLE 0x20 /* enable single shot timer */
216#define VGE_CR1_TIMER1_ENABLE 0x40 /* enable periodic timer */ 216#define VGE_CR1_TIMER1_ENABLE 0x40 /* enable periodic timer */
217#define VGE_CR1_SOFTRESET 0x80 /* software reset */ 217#define VGE_CR1_SOFTRESET 0x80 /* software reset */
218 218
219/* Global command register 2 */ 219/* Global command register 2 */
220 220
221#define VGE_CR2_TXPAUSE_THRESH_LO 0x03 /* TX pause frame lo threshold */ 221#define VGE_CR2_TXPAUSE_THRESH_LO 0x03 /* TX pause frame lo threshold */
222#define VGE_CR2_TXPAUSE_THRESH_HI 0x0C /* TX pause frame hi threshold */ 222#define VGE_CR2_TXPAUSE_THRESH_HI 0x0C /* TX pause frame hi threshold */
223#define VGE_CR2_HDX_FLOWCTL_ENABLE 0x10 /* half duplex flow control */ 223#define VGE_CR2_HDX_FLOWCTL_ENABLE 0x10 /* half duplex flow control */
224#define VGE_CR2_FDX_RXFLOWCTL_ENABLE 0x20 /* full duplex RX flow control */ 224#define VGE_CR2_FDX_RXFLOWCTL_ENABLE 0x20 /* full duplex RX flow control */
225#define VGE_CR2_FDX_TXFLOWCTL_ENABLE 0x40 /* full duplex TX flow control */ 225#define VGE_CR2_FDX_TXFLOWCTL_ENABLE 0x40 /* full duplex TX flow control */
226#define VGE_CR2_XON_ENABLE 0x80 /* 802.3x XON/XOFF flow control */ 226#define VGE_CR2_XON_ENABLE 0x80 /* 802.3x XON/XOFF flow control */
227 227
228/* Global command register 3 */ 228/* Global command register 3 */
229 229
230#define VGE_CR3_INT_SWPEND 0x01 /* disable multi-level int bits */ 230#define VGE_CR3_INT_SWPEND 0x01 /* disable multi-level int bits */
231#define VGE_CR3_INT_GMSK 0x02 /* mask off all interrupts */ 231#define VGE_CR3_INT_GMSK 0x02 /* mask off all interrupts */
232#define VGE_CR3_INT_HOLDOFF 0x04 /* enable int hold off timer */ 232#define VGE_CR3_INT_HOLDOFF 0x04 /* enable int hold off timer */
233#define VGE_CR3_DIAG 0x10 /* diagnostic enabled */ 233#define VGE_CR3_DIAG 0x10 /* diagnostic enabled */
234#define VGE_CR3_PHYRST 0x20 /* assert PHYRSTZ */ 234#define VGE_CR3_PHYRST 0x20 /* assert PHYRSTZ */
235#define VGE_CR3_STOP_FORCE 0x40 /* force NIC to stopped state */ 235#define VGE_CR3_STOP_FORCE 0x40 /* force NIC to stopped state */
236 236
237/* Interrupt control register */ 237/* Interrupt control register */
238 238
239#define VGE_INTCTL_SC_RELOAD 0x01 /* reload hold timer */ 239#define VGE_INTCTL_SC_RELOAD 0x01 /* reload hold timer */
240#define VGE_INTCTL_HC_RELOAD 0x02 /* enable hold timer reload */ 240#define VGE_INTCTL_HC_RELOAD 0x02 /* enable hold timer reload */
241#define VGE_INTCTL_STATUS 0x04 /* interrupt pending status */ 241#define VGE_INTCTL_STATUS 0x04 /* interrupt pending status */
242#define VGE_INTCTL_MASK 0x18 /* multilayer int mask */ 242#define VGE_INTCTL_MASK 0x18 /* multilayer int mask */
243#define VGE_INTCTL_RXINTSUP_DISABLE 0x20 /* disable RX int suppression */ 243#define VGE_INTCTL_RXINTSUP_DISABLE 0x20 /* disable RX int suppression */
244#define VGE_INTCTL_TXINTSUP_DISABLE 0x40 /* disable TX int suppression */ 244#define VGE_INTCTL_TXINTSUP_DISABLE 0x40 /* disable TX int suppression */
245#define VGE_INTCTL_SOFTINT 0x80 /* request soft interrupt */ 245#define VGE_INTCTL_SOFTINT 0x80 /* request soft interrupt */
246 246
247#define VGE_INTMASK_LAYER0 0x00 247#define VGE_INTMASK_LAYER0 0x00
248#define VGE_INTMASK_LAYER1 0x08 248#define VGE_INTMASK_LAYER1 0x08
249#define VGE_INTMASK_ALL 0x10 249#define VGE_INTMASK_ALL 0x10
250#define VGE_INTMASK_ALL2 0x18 250#define VGE_INTMASK_ALL2 0x18
251 251
252/* Transmit host error status register */ 252/* Transmit host error status register */
253 253
254#define VGE_TXHOSTERR_TDSTRUCT 0x01 /* bad TX desc structure */ 254#define VGE_TXHOSTERR_TDSTRUCT 0x01 /* bad TX desc structure */
255#define VGE_TXHOSTERR_TDFETCH_BUSERR 0x02 /* bus error on desc fetch */ 255#define VGE_TXHOSTERR_TDFETCH_BUSERR 0x02 /* bus error on desc fetch */
256#define VGE_TXHOSTERR_TDWBACK_BUSERR 0x04 /* bus error on desc writeback */ 256#define VGE_TXHOSTERR_TDWBACK_BUSERR 0x04 /* bus error on desc writeback */
257#define VGE_TXHOSTERR_FIFOERR 0x08 /* TX FIFO DMA bus error */ 257#define VGE_TXHOSTERR_FIFOERR 0x08 /* TX FIFO DMA bus error */
258 258
259/* Receive host error status register */ 259/* Receive host error status register */
260 260
261#define VGE_RXHOSTERR_RDSTRUCT 0x01 /* bad RX desc structure */ 261#define VGE_RXHOSTERR_RDSTRUCT 0x01 /* bad RX desc structure */
262#define VGE_RXHOSTERR_RDFETCH_BUSERR 0x02 /* bus error on desc fetch */ 262#define VGE_RXHOSTERR_RDFETCH_BUSERR 0x02 /* bus error on desc fetch */
263#define VGE_RXHOSTERR_RDWBACK_BUSERR 0x04 /* bus error on desc writeback */ 263#define VGE_RXHOSTERR_RDWBACK_BUSERR 0x04 /* bus error on desc writeback */
264#define VGE_RXHOSTERR_FIFOERR 0x08 /* RX FIFO DMA bus error */ 264#define VGE_RXHOSTERR_FIFOERR 0x08 /* RX FIFO DMA bus error */
265 265
266/* Interrupt status register */ 266/* Interrupt status register */
267 267
268#define VGE_ISR_RXOK_HIPRIO 0x00000001 /* hi prio RX int */ 268#define VGE_ISR_RXOK_HIPRIO 0x00000001 /* hi prio RX int */
269#define VGE_ISR_TXOK_HIPRIO 0x00000002 /* hi prio TX int */ 269#define VGE_ISR_TXOK_HIPRIO 0x00000002 /* hi prio TX int */
270#define VGE_ISR_RXOK 0x00000004 /* normal RX done */ 270#define VGE_ISR_RXOK 0x00000004 /* normal RX done */
271#define VGE_ISR_TXOK 0x00000008 /* combo results for next 4 bits */ 271#define VGE_ISR_TXOK 0x00000008 /* combo results for next 4 bits */
272#define VGE_ISR_TXOK0 0x00000010 /* TX complete on queue 0 */ 272#define VGE_ISR_TXOK0 0x00000010 /* TX complete on queue 0 */
273#define VGE_ISR_TXOK1 0x00000020 /* TX complete on queue 1 */ 273#define VGE_ISR_TXOK1 0x00000020 /* TX complete on queue 1 */
274#define VGE_ISR_TXOK2 0x00000040 /* TX complete on queue 2 */ 274#define VGE_ISR_TXOK2 0x00000040 /* TX complete on queue 2 */
275#define VGE_ISR_TXOK3 0x00000080 /* TX complete on queue 3 */ 275#define VGE_ISR_TXOK3 0x00000080 /* TX complete on queue 3 */
276#define VGE_ISR_RXCNTOFLOW 0x00000400 /* RX packet count overflow */ 276#define VGE_ISR_RXCNTOFLOW 0x00000400 /* RX packet count overflow */
277#define VGE_ISR_RXPAUSE 0x00000800 /* pause frame RX'ed */ 277#define VGE_ISR_RXPAUSE 0x00000800 /* pause frame RX'ed */
278#define VGE_ISR_RXOFLOW 0x00001000 /* RX FIFO overflow */ 278#define VGE_ISR_RXOFLOW 0x00001000 /* RX FIFO overflow */
279#define VGE_ISR_RXNODESC 0x00002000 /* ran out of RX descriptors */ 279#define VGE_ISR_RXNODESC 0x00002000 /* ran out of RX descriptors */
280#define VGE_ISR_RXNODESC_WARN 0x00004000 /* running out of RX descs */ 280#define VGE_ISR_RXNODESC_WARN 0x00004000 /* running out of RX descs */
281#define VGE_ISR_LINKSTS 0x00008000 /* link status change */ 281#define VGE_ISR_LINKSTS 0x00008000 /* link status change */
282#define VGE_ISR_TIMER0 0x00010000 /* one shot timer expired */ 282#define VGE_ISR_TIMER0 0x00010000 /* one shot timer expired */
283#define VGE_ISR_TIMER1 0x00020000 /* periodic timer expired */ 283#define VGE_ISR_TIMER1 0x00020000 /* periodic timer expired */
284#define VGE_ISR_PWR 0x00040000 /* wake up power event */ 284#define VGE_ISR_PWR 0x00040000 /* wake up power event */
285#define VGE_ISR_PHYINT 0x00080000 /* PHY interrupt */ 285#define VGE_ISR_PHYINT 0x00080000 /* PHY interrupt */
286#define VGE_ISR_STOPPED 0x00100000 /* software shutdown complete */ 286#define VGE_ISR_STOPPED 0x00100000 /* software shutdown complete */
287#define VGE_ISR_MIBOFLOW 0x00200000 /* MIB counter overflow warning */ 287#define VGE_ISR_MIBOFLOW 0x00200000 /* MIB counter overflow warning */
288#define VGE_ISR_SOFTINT 0x00400000 /* software interrupt */ 288#define VGE_ISR_SOFTINT 0x00400000 /* software interrupt */
289#define VGE_ISR_HOLDOFF_RELOAD 0x00800000 /* reload hold timer */ 289#define VGE_ISR_HOLDOFF_RELOAD 0x00800000 /* reload hold timer */
290#define VGE_ISR_RXDMA_STALL 0x01000000 /* RX DMA stall */ 290#define VGE_ISR_RXDMA_STALL 0x01000000 /* RX DMA stall */
291#define VGE_ISR_TXDMA_STALL 0x02000000 /* TX DMA STALL */ 291#define VGE_ISR_TXDMA_STALL 0x02000000 /* TX DMA STALL */
292#define VGE_ISR_ISRC0 0x10000000 /* interrupt source indication */ 292#define VGE_ISR_ISRC0 0x10000000 /* interrupt source indication */
293#define VGE_ISR_ISRC1 0x20000000 /* interrupt source indication */ 293#define VGE_ISR_ISRC1 0x20000000 /* interrupt source indication */
294#define VGE_ISR_ISRC2 0x40000000 /* interrupt source indication */ 294#define VGE_ISR_ISRC2 0x40000000 /* interrupt source indication */
295#define VGE_ISR_ISRC3 0x80000000 /* interrupt source indication */ 295#define VGE_ISR_ISRC3 0x80000000 /* interrupt source indication */
296 296
297#define VGE_INTRS (VGE_ISR_TXOK0|VGE_ISR_RXOK|VGE_ISR_STOPPED| \ 297#define VGE_INTRS (VGE_ISR_TXOK0|VGE_ISR_RXOK|VGE_ISR_STOPPED| \
298 VGE_ISR_RXOFLOW|VGE_ISR_PHYINT| \ 298 VGE_ISR_RXOFLOW|VGE_ISR_PHYINT| \
299 VGE_ISR_LINKSTS|VGE_ISR_RXNODESC| \ 299 VGE_ISR_LINKSTS|VGE_ISR_RXNODESC| \
300 VGE_ISR_RXDMA_STALL|VGE_ISR_TXDMA_STALL| \ 300 VGE_ISR_RXDMA_STALL|VGE_ISR_TXDMA_STALL| \
301 VGE_ISR_MIBOFLOW|VGE_ISR_TIMER0) 301 VGE_ISR_MIBOFLOW|VGE_ISR_TIMER0)
302 302
303/* Interrupt mask register */ 303/* Interrupt mask register */
304 304
305#define VGE_IMR_RXOK_HIPRIO 0x00000001 /* hi prio RX int */ 305#define VGE_IMR_RXOK_HIPRIO 0x00000001 /* hi prio RX int */
306#define VGE_IMR_TXOK_HIPRIO 0x00000002 /* hi prio TX int */ 306#define VGE_IMR_TXOK_HIPRIO 0x00000002 /* hi prio TX int */
307#define VGE_IMR_RXOK 0x00000004 /* normal RX done */ 307#define VGE_IMR_RXOK 0x00000004 /* normal RX done */
308#define VGE_IMR_TXOK 0x00000008 /* combo results for next 4 bits */ 308#define VGE_IMR_TXOK 0x00000008 /* combo results for next 4 bits */
309#define VGE_IMR_TXOK0 0x00000010 /* TX complete on queue 0 */ 309#define VGE_IMR_TXOK0 0x00000010 /* TX complete on queue 0 */
310#define VGE_IMR_TXOK1 0x00000020 /* TX complete on queue 1 */ 310#define VGE_IMR_TXOK1 0x00000020 /* TX complete on queue 1 */
311#define VGE_IMR_TXOK2 0x00000040 /* TX complete on queue 2 */ 311#define VGE_IMR_TXOK2 0x00000040 /* TX complete on queue 2 */
312#define VGE_IMR_TXOK3 0x00000080 /* TX complete on queue 3 */ 312#define VGE_IMR_TXOK3 0x00000080 /* TX complete on queue 3 */
313#define VGE_IMR_RXCNTOFLOW 0x00000400 /* RX packet count overflow */ 313#define VGE_IMR_RXCNTOFLOW 0x00000400 /* RX packet count overflow */
314#define VGE_IMR_RXPAUSE 0x00000800 /* pause frame RX'ed */ 314#define VGE_IMR_RXPAUSE 0x00000800 /* pause frame RX'ed */
315#define VGE_IMR_RXOFLOW 0x00001000 /* RX FIFO overflow */ 315#define VGE_IMR_RXOFLOW 0x00001000 /* RX FIFO overflow */
316#define VGE_IMR_RXNODESC 0x00002000 /* ran out of RX descriptors */ 316#define VGE_IMR_RXNODESC 0x00002000 /* ran out of RX descriptors */
317#define VGE_IMR_RXNODESC_WARN 0x00004000 /* running out of RX descs */ 317#define VGE_IMR_RXNODESC_WARN 0x00004000 /* running out of RX descs */
318#define VGE_IMR_LINKSTS 0x00008000 /* link status change */ 318#define VGE_IMR_LINKSTS 0x00008000 /* link status change */
319#define VGE_IMR_TIMER0 0x00010000 /* one shot timer expired */ 319#define VGE_IMR_TIMER0 0x00010000 /* one shot timer expired */
320#define VGE_IMR_TIMER1 0x00020000 /* periodic timer expired */ 320#define VGE_IMR_TIMER1 0x00020000 /* periodic timer expired */
321#define VGE_IMR_PWR 0x00040000 /* wake up power event */ 321#define VGE_IMR_PWR 0x00040000 /* wake up power event */
322#define VGE_IMR_PHYINT 0x00080000 /* PHY interrupt */ 322#define VGE_IMR_PHYINT 0x00080000 /* PHY interrupt */
323#define VGE_IMR_STOPPED 0x00100000 /* software shutdown complete */ 323#define VGE_IMR_STOPPED 0x00100000 /* software shutdown complete */
324#define VGE_IMR_MIBOFLOW 0x00200000 /* MIB counter overflow warning */ 324#define VGE_IMR_MIBOFLOW 0x00200000 /* MIB counter overflow warning */
325#define VGE_IMR_SOFTINT 0x00400000 /* software interrupt */ 325#define VGE_IMR_SOFTINT 0x00400000 /* software interrupt */
326#define VGE_IMR_HOLDOFF_RELOAD 0x00800000 /* reload hold timer */ 326#define VGE_IMR_HOLDOFF_RELOAD 0x00800000 /* reload hold timer */
327#define VGE_IMR_RXDMA_STALL 0x01000000 /* RX DMA stall */ 327#define VGE_IMR_RXDMA_STALL 0x01000000 /* RX DMA stall */
328#define VGE_IMR_TXDMA_STALL 0x02000000 /* TX DMA STALL */ 328#define VGE_IMR_TXDMA_STALL 0x02000000 /* TX DMA STALL */
329#define VGE_IMR_ISRC0 0x10000000 /* interrupt source indication */ 329#define VGE_IMR_ISRC0 0x10000000 /* interrupt source indication */
330#define VGE_IMR_ISRC1 0x20000000 /* interrupt source indication */ 330#define VGE_IMR_ISRC1 0x20000000 /* interrupt source indication */
331#define VGE_IMR_ISRC2 0x40000000 /* interrupt source indication */ 331#define VGE_IMR_ISRC2 0x40000000 /* interrupt source indication */
332#define VGE_IMR_ISRC3 0x80000000 /* interrupt source indication */ 332#define VGE_IMR_ISRC3 0x80000000 /* interrupt source indication */
333 333
334/* TX descriptor queue control/status register */ 334/* TX descriptor queue control/status register */
335 335
336#define VGE_TXQCSR_RUN0 0x0001 /* Enable TX queue 0 */ 336#define VGE_TXQCSR_RUN0 0x0001 /* Enable TX queue 0 */
337#define VGE_TXQCSR_ACT0 0x0002 /* queue 0 active indicator */ 337#define VGE_TXQCSR_ACT0 0x0002 /* queue 0 active indicator */
338#define VGE_TXQCSR_WAK0 0x0004 /* Wake up (poll) queue 0 */ 338#define VGE_TXQCSR_WAK0 0x0004 /* Wake up (poll) queue 0 */
339#define VGE_TXQCST_DEAD0 0x0008 /* queue 0 dead indicator */ 339#define VGE_TXQCST_DEAD0 0x0008 /* queue 0 dead indicator */
340#define VGE_TXQCSR_RUN1 0x0010 /* Enable TX queue 1 */ 340#define VGE_TXQCSR_RUN1 0x0010 /* Enable TX queue 1 */
341#define VGE_TXQCSR_ACT1 0x0020 /* queue 1 active indicator */ 341#define VGE_TXQCSR_ACT1 0x0020 /* queue 1 active indicator */
342#define VGE_TXQCSR_WAK1 0x0040 /* Wake up (poll) queue 1 */ 342#define VGE_TXQCSR_WAK1 0x0040 /* Wake up (poll) queue 1 */
343#define VGE_TXQCST_DEAD1 0x0080 /* queue 1 dead indicator */ 343#define VGE_TXQCST_DEAD1 0x0080 /* queue 1 dead indicator */
344#define VGE_TXQCSR_RUN2 0x0100 /* Enable TX queue 2 */ 344#define VGE_TXQCSR_RUN2 0x0100 /* Enable TX queue 2 */
345#define VGE_TXQCSR_ACT2 0x0200 /* queue 2 active indicator */ 345#define VGE_TXQCSR_ACT2 0x0200 /* queue 2 active indicator */
346#define VGE_TXQCSR_WAK2 0x0400 /* Wake up (poll) queue 2 */ 346#define VGE_TXQCSR_WAK2 0x0400 /* Wake up (poll) queue 2 */
347#define VGE_TXQCST_DEAD2 0x0800 /* queue 2 dead indicator */ 347#define VGE_TXQCST_DEAD2 0x0800 /* queue 2 dead indicator */
348#define VGE_TXQCSR_RUN3 0x1000 /* Enable TX queue 3 */ 348#define VGE_TXQCSR_RUN3 0x1000 /* Enable TX queue 3 */
349#define VGE_TXQCSR_ACT3 0x2000 /* queue 3 active indicator */ 349#define VGE_TXQCSR_ACT3 0x2000 /* queue 3 active indicator */
350#define VGE_TXQCSR_WAK3 0x4000 /* Wake up (poll) queue 3 */ 350#define VGE_TXQCSR_WAK3 0x4000 /* Wake up (poll) queue 3 */
351#define VGE_TXQCST_DEAD3 0x8000 /* queue 3 dead indicator */ 351#define VGE_TXQCST_DEAD3 0x8000 /* queue 3 dead indicator */
352 352
353/* RX descriptor queue control/status register */ 353/* RX descriptor queue control/status register */
354 354
355#define VGE_RXQCSR_RUN 0x0001 /* Enable RX queue */ 355#define VGE_RXQCSR_RUN 0x0001 /* Enable RX queue */
356#define VGE_RXQCSR_ACT 0x0002 /* queue active indicator */ 356#define VGE_RXQCSR_ACT 0x0002 /* queue active indicator */
357#define VGE_RXQCSR_WAK 0x0004 /* Wake up (poll) queue */ 357#define VGE_RXQCSR_WAK 0x0004 /* Wake up (poll) queue */
358#define VGE_RXQCSR_DEAD 0x0008 /* queue dead indicator */ 358#define VGE_RXQCSR_DEAD 0x0008 /* queue dead indicator */
359 359
360/* RX/TX queue empty interrupt delay timer register */ 360/* RX/TX queue empty interrupt delay timer register */
361 361
362#define VGE_QTIMER_PENDCNT 0x3F 362#define VGE_QTIMER_PENDCNT 0x3F
363#define VGE_QTIMER_RESOLUTION 0xC0 363#define VGE_QTIMER_RESOLUTION 0xC0
364 364
365#define VGE_QTIMER_RES_1US 0x00 365#define VGE_QTIMER_RES_1US 0x00
366#define VGE_QTIMER_RES_4US 0x40 366#define VGE_QTIMER_RES_4US 0x40
367#define VGE_QTIMER_RES_16US 0x80 367#define VGE_QTIMER_RES_16US 0x80
368#define VGE_QTIMER_RES_64US 0xC0 368#define VGE_QTIMER_RES_64US 0xC0
369 369
370/* CAM address register */ 370/* CAM address register */
371 371
372#define VGE_CAMADDR_ADDR 0x3F /* CAM address to program */ 372#define VGE_CAMADDR_ADDR 0x3F /* CAM address to program */
373#define VGE_CAMADDR_AVSEL 0x40 /* 0 = address cam, 1 = VLAN cam */ 373#define VGE_CAMADDR_AVSEL 0x40 /* 0 = address cam, 1 = VLAN cam */
374#define VGE_CAMADDR_ENABLE 0x80 /* enable CAM read/write */ 374#define VGE_CAMADDR_ENABLE 0x80 /* enable CAM read/write */
375 375
376#define VGE_CAM_MAXADDRS 64 376#define VGE_CAM_MAXADDRS 64
377 377
378/* 378/*
379 * CAM command register 379 * CAM command register
380 * Note that the page select bits in this register affect three 380 * Note that the page select bits in this register affect three
381 * different things: 381 * different things:
382 * - The behavior of the MAR0/MAR1 registers at offset 0x10 (the 382 * - The behavior of the MAR0/MAR1 registers at offset 0x10 (the
383 * page select bits control whether the MAR0/MAR1 registers affect 383 * page select bits control whether the MAR0/MAR1 registers affect
384 * the multicast hash filter or the CAM table) 384 * the multicast hash filter or the CAM table)
385 * - The behavior of the interrupt holdoff timer register at offset 385 * - The behavior of the interrupt holdoff timer register at offset
386 * 0x20 (the page select bits allow you to set the interrupt 386 * 0x20 (the page select bits allow you to set the interrupt
387 * holdoff timer, the TX interrupt suppression count or the 387 * holdoff timer, the TX interrupt suppression count or the
388 * RX interrupt suppression count) 388 * RX interrupt suppression count)
389 * - The behavior the WOL pattern programming registers at offset 389 * - The behavior the WOL pattern programming registers at offset
390 * 0xC0 (controls which pattern is set) 390 * 0xC0 (controls which pattern is set)
391 */ 391 */
392 392
393 393
394#define VGE_CAMCTL_WRITE 0x04 /* CAM write command */ 394#define VGE_CAMCTL_WRITE 0x04 /* CAM write command */
395#define VGE_CAMCTL_READ 0x08 /* CAM read command */ 395#define VGE_CAMCTL_READ 0x08 /* CAM read command */
396#define VGE_CAMCTL_INTPKT_SIZ 0x10 /* select interesting pkt CAM size */ 396#define VGE_CAMCTL_INTPKT_SIZ 0x10 /* select interesting pkt CAM size */
397#define VGE_CAMCTL_INTPKT_ENB 0x20 /* enable interesting packet mode */ 397#define VGE_CAMCTL_INTPKT_ENB 0x20 /* enable interesting packet mode */
398#define VGE_CAMCTL_PAGESEL 0xC0 /* page select */ 398#define VGE_CAMCTL_PAGESEL 0xC0 /* page select */
399 399
400#define VGE_PAGESEL_MAR 0x00 400#define VGE_PAGESEL_MAR 0x00
401#define VGE_PAGESEL_CAMMASK 0x40 401#define VGE_PAGESEL_CAMMASK 0x40
402#define VGE_PAGESEL_CAMDATA 0x80 402#define VGE_PAGESEL_CAMDATA 0x80
403 403
404#define VGE_PAGESEL_INTHLDOFF 0x00 404#define VGE_PAGESEL_INTHLDOFF 0x00
405#define VGE_PAGESEL_TXSUPPTHR 0x40 405#define VGE_PAGESEL_TXSUPPTHR 0x40
406#define VGE_PAGESEL_RXSUPPTHR 0x80 406#define VGE_PAGESEL_RXSUPPTHR 0x80
407 407
408#define VGE_PAGESEL_WOLPAT0 0x00 408#define VGE_PAGESEL_WOLPAT0 0x00
409#define VGE_PAGESEL_WOLPAT1 0x40 409#define VGE_PAGESEL_WOLPAT1 0x40
410 410
411/* MII port config register */ 411/* MII port config register */
412 412
413#define VGE_MIICFG_PHYADDR 0x1F /* PHY address (internal PHY is 1) */ 413#define VGE_MIICFG_PHYADDR 0x1F /* PHY address (internal PHY is 1) */
414#define VGE_MIICFG_MDCSPEED 0x20 /* MDC accelerate x 4 */ 414#define VGE_MIICFG_MDCSPEED 0x20 /* MDC accelerate x 4 */
415#define VGE_MIICFG_POLLINT 0xC0 /* polling interval */ 415#define VGE_MIICFG_POLLINT 0xC0 /* polling interval */
416 416
417#define VGE_MIIPOLLINT_1024 0x00 417#define VGE_MIIPOLLINT_1024 0x00
418#define VGE_MIIPOLLINT_512 0x40 418#define VGE_MIIPOLLINT_512 0x40
419#define VGE_MIIPOLLINT_128 0x80 419#define VGE_MIIPOLLINT_128 0x80
420#define VGE_MIIPOLLINT_64 0xC0 420#define VGE_MIIPOLLINT_64 0xC0
421 421
422/* MII port status register */ 422/* MII port status register */
423 423
424#define VGE_MIISTS_IIDL 0x80 /* not at sofrware/timer poll cycle */ 424#define VGE_MIISTS_IIDL 0x80 /* not at sofrware/timer poll cycle */
425 425
426/* PHY status register */ 426/* PHY status register */
427 427
428#define VGE_PHYSTS_TXFLOWCAP 0x01 /* resolved TX flow control cap */ 428#define VGE_PHYSTS_TXFLOWCAP 0x01 /* resolved TX flow control cap */
429#define VGE_PHYSTS_RXFLOWCAP 0x02 /* resolved RX flow control cap */ 429#define VGE_PHYSTS_RXFLOWCAP 0x02 /* resolved RX flow control cap */
430#define VGE_PHYSTS_SPEED10 0x04 /* PHY in 10Mbps mode */ 430#define VGE_PHYSTS_SPEED10 0x04 /* PHY in 10Mbps mode */
431#define VGE_PHYSTS_SPEED1000 0x08 /* PHY in giga mode */ 431#define VGE_PHYSTS_SPEED1000 0x08 /* PHY in giga mode */
432#define VGE_PHYSTS_FDX 0x10 /* PHY in full duplex mode */ 432#define VGE_PHYSTS_FDX 0x10 /* PHY in full duplex mode */
433#define VGE_PHYSTS_LINK 0x40 /* link status */ 433#define VGE_PHYSTS_LINK 0x40 /* link status */
434#define VGE_PHYSTS_RESETSTS 0x80 /* reset status */ 434#define VGE_PHYSTS_RESETSTS 0x80 /* reset status */
435 435
436/* MII management command register */ 436/* MII management command register */
437 437
438#define VGE_MIICMD_MDC 0x01 /* clock pin */ 438#define VGE_MIICMD_MDC 0x01 /* clock pin */
439#define VGE_MIICMD_MDI 0x02 /* data in pin */ 439#define VGE_MIICMD_MDI 0x02 /* data in pin */
440#define VGE_MIICMD_MDO 0x04 /* data out pin */ 440#define VGE_MIICMD_MDO 0x04 /* data out pin */
441#define VGE_MIICMD_MOUT 0x08 /* data out pin enable */ 441#define VGE_MIICMD_MOUT 0x08 /* data out pin enable */
442#define VGE_MIICMD_MDP 0x10 /* enable direct programming mode */ 442#define VGE_MIICMD_MDP 0x10 /* enable direct programming mode */
443#define VGE_MIICMD_WCMD 0x20 /* embedded mode write */ 443#define VGE_MIICMD_WCMD 0x20 /* embedded mode write */
444#define VGE_MIICMD_RCMD 0x40 /* embadded mode read */ 444#define VGE_MIICMD_RCMD 0x40 /* embadded mode read */
445#define VGE_MIICMD_MAUTO 0x80 /* enable autopolling */ 445#define VGE_MIICMD_MAUTO 0x80 /* enable autopolling */
446 446
447/* MII address register */ 447/* MII address register */
448 448
449#define VGE_MIIADDR_SWMPL 0x80 /* initiate priority resolution */ 449#define VGE_MIIADDR_SWMPL 0x80 /* initiate priority resolution */
450 450
451/* Chip config register A */ 451/* Chip config register A */
452 452
453#define VGE_CHIPCFG0_PACPI 0x01 /* pre-ACPI wakeup function */ 453#define VGE_CHIPCFG0_PACPI 0x01 /* pre-ACPI wakeup function */
454#define VGE_CHIPCFG0_ABSHDN 0x02 /* abnormal shutdown function */ 454#define VGE_CHIPCFG0_ABSHDN 0x02 /* abnormal shutdown function */
455#define VGE_CHIPCFG0_GPIO1PD 0x04 /* GPIO pin enable */ 455#define VGE_CHIPCFG0_GPIO1PD 0x04 /* GPIO pin enable */
456#define VGE_CHIPCFG0_SKIPTAG 0x08 /* omit 802.1p tag from CRC calc */ 456#define VGE_CHIPCFG0_SKIPTAG 0x08 /* omit 802.1p tag from CRC calc */
457#define VGE_CHIPCFG0_PHLED 0x30 /* phy LED select */ 457#define VGE_CHIPCFG0_PHLED 0x30 /* phy LED select */
458 458
459/* Chip config register B */ 459/* Chip config register B */
460/* Note: some of these bits are not documented in the manual! */ 460/* Note: some of these bits are not documented in the manual! */
461 461
462#define VGE_CHIPCFG1_BAKOPT 0x01 462#define VGE_CHIPCFG1_BAKOPT 0x01
463#define VGE_CHIPCFG1_MBA 0x02 463#define VGE_CHIPCFG1_MBA 0x02
464#define VGE_CHIPCFG1_CAP 0x04 464#define VGE_CHIPCFG1_CAP 0x04
465#define VGE_CHIPCFG1_CRANDOM 0x08 465#define VGE_CHIPCFG1_CRANDOM 0x08
466#define VGE_CHIPCFG1_OFSET 0x10 466#define VGE_CHIPCFG1_OFSET 0x10
467#define VGE_CHIPCFG1_SLOTTIME 0x20 /* slot time 512/500 in giga mode */ 467#define VGE_CHIPCFG1_SLOTTIME 0x20 /* slot time 512/500 in giga mode */
468#define VGE_CHIPCFG1_MIIOPT 0x40 468#define VGE_CHIPCFG1_MIIOPT 0x40
469#define VGE_CHIPCFG1_GTCKOPT 0x80 469#define VGE_CHIPCFG1_GTCKOPT 0x80
470 470
471/* Chip config register C */ 471/* Chip config register C */
472 472
473#define VGE_CHIPCFG2_EELOAD 0x80 /* enable EEPROM programming */ 473#define VGE_CHIPCFG2_EELOAD 0x80 /* enable EEPROM programming */
474 474
475/* Chip config register D */ 475/* Chip config register D */
476 476
477#define VGE_CHIPCFG3_64BIT_DAC 0x20 /* enable 64bit via DAC */ 477#define VGE_CHIPCFG3_64BIT_DAC 0x20 /* enable 64bit via DAC */
478#define VGE_CHIPCFG3_IODISABLE 0x80 /* disable I/O access mode */ 478#define VGE_CHIPCFG3_IODISABLE 0x80 /* disable I/O access mode */
479 479
480/* DMA config register 0 */ 480/* DMA config register 0 */
481 481
482#define VGE_DMACFG0_BURSTLEN 0x07 /* RX/TX DMA burst (in dwords) */ 482#define VGE_DMACFG0_BURSTLEN 0x07 /* RX/TX DMA burst (in dwords) */
483 483
484#define VGE_DMABURST_8 0x00 484#define VGE_DMABURST_8 0x00
485#define VGE_DMABURST_16 0x01 485#define VGE_DMABURST_16 0x01
486#define VGE_DMABURST_32 0x02 486#define VGE_DMABURST_32 0x02
487#define VGE_DMABURST_64 0x03 487#define VGE_DMABURST_64 0x03
488#define VGE_DMABURST_128 0x04 488#define VGE_DMABURST_128 0x04
489#define VGE_DMABURST_256 0x05 489#define VGE_DMABURST_256 0x05
490#define VGE_DMABURST_STRFWD 0x07 490#define VGE_DMABURST_STRFWD 0x07
491 491
492/* DMA config register 1 */ 492/* DMA config register 1 */
493 493
494#define VGE_DMACFG1_LATENB 0x01 /* Latency timer enable */ 494#define VGE_DMACFG1_LATENB 0x01 /* Latency timer enable */
495#define VGE_DMACFG1_MWWAIT 0x02 /* insert wait on master write */ 495#define VGE_DMACFG1_MWWAIT 0x02 /* insert wait on master write */
496#define VGE_DMACFG1_MRWAIT 0x04 /* insert wait on master read */ 496#define VGE_DMACFG1_MRWAIT 0x04 /* insert wait on master read */
497#define VGE_DMACFG1_MRM 0x08 /* use memory read multiple */ 497#define VGE_DMACFG1_MRM 0x08 /* use memory read multiple */
498#define VGE_DMACFG1_PERR_DIS 0x10 /* disable parity error checking */ 498#define VGE_DMACFG1_PERR_DIS 0x10 /* disable parity error checking */
499#define VGE_DMACFG1_XMRL 0x20 /* disable memory read line support */ 499#define VGE_DMACFG1_XMRL 0x20 /* disable memory read line support */
500 500
501/* RX MAC config register */ 501/* RX MAC config register */
502 502
503#define VGE_RXCFG_VLANFILT 0x01 /* filter VLAN ID mismatches */ 503#define VGE_RXCFG_VLANFILT 0x01 /* filter VLAN ID mismatches */
504#define VGE_RXCFG_VTAGOPT 0x06 /* VLAN tag handling */ 504#define VGE_RXCFG_VTAGOPT 0x06 /* VLAN tag handling */
505#define VGE_RXCFG_FIFO_LOWAT 0x08 /* RX FIFO low watermark (7QW/15QW) */ 505#define VGE_RXCFG_FIFO_LOWAT 0x08 /* RX FIFO low watermark (7QW/15QW) */
506#define VGE_RXCFG_FIFO_THR 0x30 /* RX FIFO threshold */ 506#define VGE_RXCFG_FIFO_THR 0x30 /* RX FIFO threshold */
507#define VGE_RXCFG_ARB_PRIO 0x80 /* arbitration priority */ 507#define VGE_RXCFG_ARB_PRIO 0x80 /* arbitration priority */
508 508
509#define VGE_VTAG_OPT0 0x00 /* TX: no tag insertion 509#define VGE_VTAG_OPT0 0x00 /* TX: no tag insertion
510 RX: rx all, no tag extraction */ 510 RX: rx all, no tag extraction */
511 511
512#define VGE_VTAG_OPT1 0x02 /* TX: no tag insertion 512#define VGE_VTAG_OPT1 0x02 /* TX: no tag insertion
513 RX: rx only tagged pkts, no 513 RX: rx only tagged pkts, no
514 extraction */ 514 extraction */
515 515
516#define VGE_VTAG_OPT2 0x04 /* TX: perform tag insertion, 516#define VGE_VTAG_OPT2 0x04 /* TX: perform tag insertion,
517 RX: rx all, extract tags */ 517 RX: rx all, extract tags */
518 518
519#define VGE_VTAG_OPT3 0x06 /* TX: perform tag insertion, 519#define VGE_VTAG_OPT3 0x06 /* TX: perform tag insertion,
520 RX: rx only tagged pkts, 520 RX: rx only tagged pkts,
521 with extraction */ 521 with extraction */
522 522
523#define VGE_RXFIFOTHR_128BYTES 0x00 523#define VGE_RXFIFOTHR_128BYTES 0x00
524#define VGE_RXFIFOTHR_512BYTES 0x10 524#define VGE_RXFIFOTHR_512BYTES 0x10
525#define VGE_RXFIFOTHR_1024BYTES 0x20 525#define VGE_RXFIFOTHR_1024BYTES 0x20
526#define VGE_RXFIFOTHR_STRNFWD 0x30 526#define VGE_RXFIFOTHR_STRNFWD 0x30
527 527
528/* TX MAC config register */ 528/* TX MAC config register */
529 529
530#define VGE_TXCFG_SNAPOPT 0x01 /* 1 == insert VLAN tag at 530#define VGE_TXCFG_SNAPOPT 0x01 /* 1 == insert VLAN tag at
531 13th byte 531 13th byte
532 0 == insert VLANM tag after 532 0 == insert VLANM tag after
533 SNAP header (21st byte) */ 533 SNAP header (21st byte) */
534#define VGE_TXCFG_NONBLK 0x02 /* priority TX/non-blocking mode */ 534#define VGE_TXCFG_NONBLK 0x02 /* priority TX/non-blocking mode */
535#define VGE_TXCFG_NONBLK_THR 0x0C /* non-blocking threshold */ 535#define VGE_TXCFG_NONBLK_THR 0x0C /* non-blocking threshold */
536#define VGE_TXCFG_ARB_PRIO 0x80 /* arbitration priority */ 536#define VGE_TXCFG_ARB_PRIO 0x80 /* arbitration priority */
537 537
538#define VGE_TXBLOCK_64PKTS 0x00 538#define VGE_TXBLOCK_64PKTS 0x00
539#define VGE_TXBLOCK_32PKTS 0x04 539#define VGE_TXBLOCK_32PKTS 0x04
540#define VGE_TXBLOCK_128PKTS 0x08 540#define VGE_TXBLOCK_128PKTS 0x08
541#define VGE_TXBLOCK_8PKTS 0x0C 541#define VGE_TXBLOCK_8PKTS 0x0C
542 542
 543/* Sticky bit shadow register */
 544
 545#define VGE_STICKHW_DS0 0x01
 546#define VGE_STICKHW_DS1 0x02
 547#define VGE_STICKHW_WOL_ENB 0x04
 548#define VGE_STICKHW_WOL_STS 0x08
 549#define VGE_STICKHW_SWPTAG 0x10
 550
 551/* WOL pattern control */
 552#define VGE_WOLCR0_PATTERN0 0x01
 553#define VGE_WOLCR0_PATTERN1 0x02
 554#define VGE_WOLCR0_PATTERN2 0x04
 555#define VGE_WOLCR0_PATTERN3 0x08
 556#define VGE_WOLCR0_PATTERN4 0x10
 557#define VGE_WOLCR0_PATTERN5 0x20
 558#define VGE_WOLCR0_PATTERN6 0x40
 559#define VGE_WOLCR0_PATTERN7 0x80
 560#define VGE_WOLCR0_PATTERN_ALL 0xFF
 561
 562/* WOL config register */
 563#define VGE_WOLCFG_PHYINT_ENB 0x01
 564#define VGE_WOLCFG_SAB 0x10
 565#define VGE_WOLCFG_SAM 0x20
 566#define VGE_WOLCFG_PMEOVR 0x80
 567
543/* EEPROM control/status register */ 568/* EEPROM control/status register */
544 569
545#define VGE_EECSR_EDO 0x01 /* data out pin */ 570#define VGE_EECSR_EDO 0x01 /* data out pin */
546#define VGE_EECSR_EDI 0x02 /* data in pin */ 571#define VGE_EECSR_EDI 0x02 /* data in pin */
547#define VGE_EECSR_ECK 0x04 /* clock pin */ 572#define VGE_EECSR_ECK 0x04 /* clock pin */
548#define VGE_EECSR_ECS 0x08 /* chip select pin */ 573#define VGE_EECSR_ECS 0x08 /* chip select pin */
549#define VGE_EECSR_DPM 0x10 /* direct program mode enable */ 574#define VGE_EECSR_DPM 0x10 /* direct program mode enable */
550#define VGE_EECSR_RELOAD 0x20 /* trigger reload from EEPROM */ 575#define VGE_EECSR_RELOAD 0x20 /* trigger reload from EEPROM */
551#define VGE_EECSR_EMBP 0x40 /* embedded program mode enable */ 576#define VGE_EECSR_EMBP 0x40 /* embedded program mode enable */
552 577
553/* EEPROM embedded command register */ 578/* EEPROM embedded command register */
554 579
555#define VGE_EECMD_ERD 0x01 /* EEPROM read command */ 580#define VGE_EECMD_ERD 0x01 /* EEPROM read command */
556#define VGE_EECMD_EWR 0x02 /* EEPROM write command */ 581#define VGE_EECMD_EWR 0x02 /* EEPROM write command */
557#define VGE_EECMD_EWEN 0x04 /* EEPROM write enable */ 582#define VGE_EECMD_EWEN 0x04 /* EEPROM write enable */
558#define VGE_EECMD_EWDIS 0x08 /* EEPROM write disable */ 583#define VGE_EECMD_EWDIS 0x08 /* EEPROM write disable */
559#define VGE_EECMD_EDONE 0x80 /* read/write done */ 584#define VGE_EECMD_EDONE 0x80 /* read/write done */
560 585
561/* Chip operation and diagnostic control register */ 586/* Chip operation and diagnostic control register */
562 587
563#define VGE_DIAGCTL_PHYINT_ENB 0x01 /* Enable PHY interrupts */ 588#define VGE_DIAGCTL_PHYINT_ENB 0x01 /* Enable PHY interrupts */
564#define VGE_DIAGCTL_TIMER0_RES 0x02 /* timer0 uSec resolution */ 589#define VGE_DIAGCTL_TIMER0_RES 0x02 /* timer0 uSec resolution */
565#define VGE_DIAGCTL_TIMER1_RES 0x04 /* timer1 uSec resolution */ 590#define VGE_DIAGCTL_TIMER1_RES 0x04 /* timer1 uSec resolution */
566#define VGE_DIAGCTL_LPSEL_DIS 0x08 /* disable LPSEL field */ 591#define VGE_DIAGCTL_LPSEL_DIS 0x08 /* disable LPSEL field */
567#define VGE_DIAGCTL_MACFORCE 0x10 /* MAC side force mode */ 592#define VGE_DIAGCTL_MACFORCE 0x10 /* MAC side force mode */
568#define VGE_DIAGCTL_FCRSVD 0x20 /* reserved for future fiber use */ 593#define VGE_DIAGCTL_FCRSVD 0x20 /* reserved for future fiber use */
569#define VGE_DIAGCTL_FDXFORCE 0x40 /* force full duplex mode */ 594#define VGE_DIAGCTL_FDXFORCE 0x40 /* force full duplex mode */
570#define VGE_DIAGCTL_GMII 0x80 /* force GMII mode, otherwise MII */ 595#define VGE_DIAGCTL_GMII 0x80 /* force GMII mode, otherwise MII */
571 596
572/* Location of station address in EEPROM */ 597/* Location of station address in EEPROM */
573#define VGE_EE_EADDR 0 598#define VGE_EE_EADDR 0
574 599
575/* DMA descriptor structures */ 600/* DMA descriptor structures */
576 601
577/* 602/*
578 * Each TX DMA descriptor has a control and status word, and 7 603 * Each TX DMA descriptor has a control and status word, and 7
579 * fragment address/length words. If a transmitted packet spans 604 * fragment address/length words. If a transmitted packet spans
580 * more than 7 fragments, it has to be coalesced. 605 * more than 7 fragments, it has to be coalesced.
581 */ 606 */
582 607
583#define VGE_TX_FRAGS 7 608#define VGE_TX_FRAGS 7
584#define VGE_TX_MAXLEN (1 << 14) /* maximum TX packet size */ 609#define VGE_TX_MAXLEN (1 << 14) /* maximum TX packet size */
585 610
586struct vge_txfrag { 611struct vge_txfrag {
587 volatile uint32_t tf_addrlo; 612 volatile uint32_t tf_addrlo;
588 volatile uint16_t tf_addrhi; 613 volatile uint16_t tf_addrhi;
589 volatile uint16_t tf_buflen; 614 volatile uint16_t tf_buflen;
590}; 615};
591 616
592/* 617/*
593 * The high bit in the buflen field of fragment #0 has special meaning. 618 * The high bit in the buflen field of fragment #0 has special meaning.
594 * Normally, the chip requires the driver to issue a TX poll command 619 * Normally, the chip requires the driver to issue a TX poll command
595 * for every packet that gets put in the TX DMA queue. Sometimes though, 620 * for every packet that gets put in the TX DMA queue. Sometimes though,
596 * the driver might want to queue up several packets at once and just 621 * the driver might want to queue up several packets at once and just
597 * issue one transmit command to have all of them processed. In order 622 * issue one transmit command to have all of them processed. In order
598 * to obtain this behavior, the special 'queue' bit must be set. 623 * to obtain this behavior, the special 'queue' bit must be set.
599 */ 624 */
600 625
601#define VGE_TXDESC_Q 0x8000 626#define VGE_TXDESC_Q 0x8000
602 627
603struct vge_txdesc { 628struct vge_txdesc {
604 volatile uint32_t td_sts; 629 volatile uint32_t td_sts;
605 volatile uint32_t td_ctl; 630 volatile uint32_t td_ctl;
606 struct vge_txfrag td_frag[VGE_TX_FRAGS]; 631 struct vge_txfrag td_frag[VGE_TX_FRAGS];
607}; 632};
608 633
609#define VGE_TDSTS_COLLCNT 0x0000000F /* TX collision count */ 634#define VGE_TDSTS_COLLCNT 0x0000000F /* TX collision count */
610#define VGE_TDSTS_COLL 0x00000010 /* collision seen */ 635#define VGE_TDSTS_COLL 0x00000010 /* collision seen */
611#define VGE_TDSTS_OWINCOLL 0x00000020 /* out of window collision */ 636#define VGE_TDSTS_OWINCOLL 0x00000020 /* out of window collision */
612#define VGE_TDSTS_OWT 0x00000040 /* jumbo frame tx abort */ 637#define VGE_TDSTS_OWT 0x00000040 /* jumbo frame tx abort */
613#define VGE_TDSTS_EXCESSCOLL 0x00000080 /* TX aborted, excess colls */ 638#define VGE_TDSTS_EXCESSCOLL 0x00000080 /* TX aborted, excess colls */
614#define VGE_TDSTS_HBEATFAIL 0x00000100 /* heartbeat detect failed */ 639#define VGE_TDSTS_HBEATFAIL 0x00000100 /* heartbeat detect failed */
615#define VGE_TDSTS_CARRLOSS 0x00000200 /* carrier sense lost */ 640#define VGE_TDSTS_CARRLOSS 0x00000200 /* carrier sense lost */
616#define VGE_TDSTS_SHUTDOWN 0x00000400 /* shutdown during TX */ 641#define VGE_TDSTS_SHUTDOWN 0x00000400 /* shutdown during TX */
617#define VGE_TDSTS_LINKFAIL 0x00001000 /* link fail during TX */ 642#define VGE_TDSTS_LINKFAIL 0x00001000 /* link fail during TX */
618#define VGE_TDSTS_GMII 0x00002000 /* GMII transmission */ 643#define VGE_TDSTS_GMII 0x00002000 /* GMII transmission */
619#define VGE_TDSTS_FDX 0x00004000 /* full duplex transmit */ 644#define VGE_TDSTS_FDX 0x00004000 /* full duplex transmit */
620#define VGE_TDSTS_TXERR 0x00008000 /* error occurred */ 645#define VGE_TDSTS_TXERR 0x00008000 /* error occurred */
621#define VGE_TDSTS_SEGSIZE 0x3FFF0000 /* TCP large send size */ 646#define VGE_TDSTS_SEGSIZE 0x3FFF0000 /* TCP large send size */
622#define VGE_TDSTS_OWN 0x80000000 /* own bit */ 647#define VGE_TDSTS_OWN 0x80000000 /* own bit */
623 648
624#define VGE_TDCTL_VLANID 0x00000FFF /* VLAN ID */ 649#define VGE_TDCTL_VLANID 0x00000FFF /* VLAN ID */
625#define VGE_TDCTL_CFI 0x00001000 /* VLAN CFI bit */ 650#define VGE_TDCTL_CFI 0x00001000 /* VLAN CFI bit */
626#define VGE_TDCTL_PRIO 0x0000E000 /* VLAN prio bits */ 651#define VGE_TDCTL_PRIO 0x0000E000 /* VLAN prio bits */
627#define VGE_TDCTL_NOCRC 0x00010000 /* disable CRC generation */ 652#define VGE_TDCTL_NOCRC 0x00010000 /* disable CRC generation */
628#define VGE_TDCTL_JUMBO 0x00020000 /* jumbo frame */ 653#define VGE_TDCTL_JUMBO 0x00020000 /* jumbo frame */
629#define VGE_TDCTL_TCPCSUM 0x00040000 /* do TCP hw checksum */ 654#define VGE_TDCTL_TCPCSUM 0x00040000 /* do TCP hw checksum */
630#define VGE_TDCTL_UDPCSUM 0x00080000 /* do UDP hw checksum */ 655#define VGE_TDCTL_UDPCSUM 0x00080000 /* do UDP hw checksum */
631#define VGE_TDCTL_IPCSUM 0x00100000 /* do IP hw checksum */ 656#define VGE_TDCTL_IPCSUM 0x00100000 /* do IP hw checksum */
632#define VGE_TDCTL_VTAG 0x00200000 /* insert VLAN tag */ 657#define VGE_TDCTL_VTAG 0x00200000 /* insert VLAN tag */
633#define VGE_TDCTL_PRIO_INT 0x00400000 /* priority int request */ 658#define VGE_TDCTL_PRIO_INT 0x00400000 /* priority int request */
634#define VGE_TDCTL_TIC 0x00800000 /* transfer int request */ 659#define VGE_TDCTL_TIC 0x00800000 /* transfer int request */
635#define VGE_TDCTL_TCPLSCTL 0x03000000 /* TCP large send ctl */ 660#define VGE_TDCTL_TCPLSCTL 0x03000000 /* TCP large send ctl */
636#define VGE_TDCTL_FRAGCNT 0xF0000000 /* number of frags used */ 661#define VGE_TDCTL_FRAGCNT 0xF0000000 /* number of frags used */
637 662
638#define VGE_TD_LS_MOF 0x00000000 /* middle of large send */ 663#define VGE_TD_LS_MOF 0x00000000 /* middle of large send */
639#define VGE_TD_LS_SOF 0x01000000 /* start of large send */ 664#define VGE_TD_LS_SOF 0x01000000 /* start of large send */
640#define VGE_TD_LS_EOF 0x02000000 /* end of large send */ 665#define VGE_TD_LS_EOF 0x02000000 /* end of large send */
641#define VGE_TD_LS_NORM 0x03000000 /* normal frame */ 666#define VGE_TD_LS_NORM 0x03000000 /* normal frame */
642 667
643/* Receive DMA descriptors have a single fragment pointer. */ 668/* Receive DMA descriptors have a single fragment pointer. */
644 669
645struct vge_rxdesc { 670struct vge_rxdesc {
646 volatile uint32_t rd_sts; 671 volatile uint32_t rd_sts;
647 volatile uint32_t rd_ctl; 672 volatile uint32_t rd_ctl;
648 volatile uint32_t rd_addrlo; 673 volatile uint32_t rd_addrlo;
649 volatile uint16_t rd_addrhi; 674 volatile uint16_t rd_addrhi;
650 volatile uint16_t rd_buflen; 675 volatile uint16_t rd_buflen;
651}; 676};
652 677
653/* 678/*
654 * Like the TX descriptor, the high bit in the buflen field in the 679 * Like the TX descriptor, the high bit in the buflen field in the
655 * RX descriptor has special meaning. This bit controls whether or 680 * RX descriptor has special meaning. This bit controls whether or
656 * not interrupts are generated for this descriptor. 681 * not interrupts are generated for this descriptor.
657 */ 682 */
658 683
659#define VGE_RXDESC_I 0x8000 684#define VGE_RXDESC_I 0x8000
660 685
661#define VGE_RDSTS_VIDM 0x00000001 /* VLAN tag filter miss */ 686#define VGE_RDSTS_VIDM 0x00000001 /* VLAN tag filter miss */
662#define VGE_RDSTS_CRCERR 0x00000002 /* bad CRC error */ 687#define VGE_RDSTS_CRCERR 0x00000002 /* bad CRC error */
663#define VGE_RDSTS_FAERR 0x00000004 /* frame alignment error */ 688#define VGE_RDSTS_FAERR 0x00000004 /* frame alignment error */
664#define VGE_RDSTS_CSUMERR 0x00000008 /* bad TCP/IP checksum */ 689#define VGE_RDSTS_CSUMERR 0x00000008 /* bad TCP/IP checksum */
665#define VGE_RDSTS_RLERR 0x00000010 /* RX length error */ 690#define VGE_RDSTS_RLERR 0x00000010 /* RX length error */
666#define VGE_RDSTS_SYMERR 0x00000020 /* PCS symbol error */ 691#define VGE_RDSTS_SYMERR 0x00000020 /* PCS symbol error */
667#define VGE_RDSTS_SNTAG 0x00000040 /* RX'ed tagged SNAP pkt */ 692#define VGE_RDSTS_SNTAG 0x00000040 /* RX'ed tagged SNAP pkt */
668#define VGE_RDSTS_DETAG 0x00000080 /* VLAN tag extracted */ 693#define VGE_RDSTS_DETAG 0x00000080 /* VLAN tag extracted */
669#define VGE_RDSTS_BOUNDARY 0x00000300 /* frame boundary bits */ 694#define VGE_RDSTS_BOUNDARY 0x00000300 /* frame boundary bits */
670#define VGE_RDSTS_VTAG 0x00000400 /* VLAN tag indicator */ 695#define VGE_RDSTS_VTAG 0x00000400 /* VLAN tag indicator */
671#define VGE_RDSTS_UCAST 0x00000800 /* unicast frame */ 696#define VGE_RDSTS_UCAST 0x00000800 /* unicast frame */
672#define VGE_RDSTS_BCAST 0x00001000 /* broadcast frame */ 697#define VGE_RDSTS_BCAST 0x00001000 /* broadcast frame */
673#define VGE_RDSTS_MCAST 0x00002000 /* multicast frame */ 698#define VGE_RDSTS_MCAST 0x00002000 /* multicast frame */
674#define VGE_RDSTS_PFT 0x00004000 /* perfect filter hit */ 699#define VGE_RDSTS_PFT 0x00004000 /* perfect filter hit */
675#define VGE_RDSTS_RXOK 0x00008000 /* frame is good. */ 700#define VGE_RDSTS_RXOK 0x00008000 /* frame is good. */
676#define VGE_RDSTS_BUFSIZ 0x3FFF0000 /* received frame len */ 701#define VGE_RDSTS_BUFSIZ 0x3FFF0000 /* received frame len */
677#define VGE_RDSTS_SHUTDOWN 0x40000000 /* shutdown during RX */ 702#define VGE_RDSTS_SHUTDOWN 0x40000000 /* shutdown during RX */
678#define VGE_RDSTS_OWN 0x80000000 /* own bit. */ 703#define VGE_RDSTS_OWN 0x80000000 /* own bit. */
679 704
680#define VGE_RXPKT_ONEFRAG 0x00000000 /* only one fragment */ 705#define VGE_RXPKT_ONEFRAG 0x00000000 /* only one fragment */
681#define VGE_RXPKT_EOF 0x00000100 /* first frag in frame */ 706#define VGE_RXPKT_EOF 0x00000100 /* first frag in frame */
682#define VGE_RXPKT_SOF 0x00000200 /* last frag in frame */ 707#define VGE_RXPKT_SOF 0x00000200 /* last frag in frame */
683#define VGE_RXPKT_MOF 0x00000300 /* intermediate frag */ 708#define VGE_RXPKT_MOF 0x00000300 /* intermediate frag */
684 709
685#define VGE_RDCTL_VLANID 0x0000FFFF /* VLAN ID info */ 710#define VGE_RDCTL_VLANID 0x0000FFFF /* VLAN ID info */
686#define VGE_RDCTL_UDPPKT 0x00010000 /* UDP packet received */ 711#define VGE_RDCTL_UDPPKT 0x00010000 /* UDP packet received */
687#define VGE_RDCTL_TCPPKT 0x00020000 /* TCP packet received */ 712#define VGE_RDCTL_TCPPKT 0x00020000 /* TCP packet received */
688#define VGE_RDCTL_IPPKT 0x00040000 /* IP packet received */ 713#define VGE_RDCTL_IPPKT 0x00040000 /* IP packet received */
689#define VGE_RDCTL_UDPZERO 0x00080000 /* pkt with UDP CSUM of 0 */ 714#define VGE_RDCTL_UDPZERO 0x00080000 /* pkt with UDP CSUM of 0 */
690#define VGE_RDCTL_FRAG 0x00100000 /* received IP frag */ 715#define VGE_RDCTL_FRAG 0x00100000 /* received IP frag */
691#define VGE_RDCTL_PROTOCSUMOK 0x00200000 /* TCP/UDP checksum ok */ 716#define VGE_RDCTL_PROTOCSUMOK 0x00200000 /* TCP/UDP checksum ok */
692#define VGE_RDCTL_IPCSUMOK 0x00400000 /* IP checksum ok */ 717#define VGE_RDCTL_IPCSUMOK 0x00400000 /* IP checksum ok */
693#define VGE_RDCTL_FILTIDX 0x3C000000 /* interesting filter idx */ 718#define VGE_RDCTL_FILTIDX 0x3C000000 /* interesting filter idx */
694 719
695#endif /* _IF_VGEREG_H_ */ 720#endif /* _IF_VGEREG_H_ */