Tue Jan 28 11:12:30 2020 UTC ()
Pull up following revision(s) (requested by msaitoh in ticket #667):

	sys/dev/ic/rtl81x9var.h: revision 1.57
	sys/dev/ic/rtl81x9.c: revision 1.107
	sys/dev/ic/rtl81x9reg.h: revision 1.51
	sys/dev/ic/rtl8169.c: revision 1.160
	sys/dev/ic/rtl81x9reg.h: revision 1.52
	sys/dev/ic/rtl8169.c: revision 1.161

 Use unsigned in rtk_setmulti() to avoid undefined behavior. Found bk kUBSan.
8168H model didn't link up well. some models seems to require to enable TX/RX after configuration.
RTKQ_TXRXEN_LATER quirk flag added. it may be able to unify with RTKQ_RXDV_GATED flag?
 Sort RTK_HWREV_* by value.

Improve some chip revisions support:
 - Add 8168FP, 8411, 8168G, 8401E, 8105E, 8105E_SPIN1, 8106E and 8402 from
   {Free,Open}BSD.
 - Renumber RTK_HWREV_8103E from 0x24C00000 to 0x34c00000. 0x24C00000 is newly
   used as RTK_HWREV_8102EL_SPIN1. Same as {Free,Open}BSD.


(martin)
diff -r1.159 -r1.159.2.1 src/sys/dev/ic/rtl8169.c
diff -r1.106 -r1.106.2.1 src/sys/dev/ic/rtl81x9.c
diff -r1.50 -r1.50.4.1 src/sys/dev/ic/rtl81x9reg.h
diff -r1.56 -r1.56.18.1 src/sys/dev/ic/rtl81x9var.h

cvs diff -r1.159 -r1.159.2.1 src/sys/dev/ic/rtl8169.c (switch to unified diff)

--- src/sys/dev/ic/rtl8169.c 2019/05/30 02:32:18 1.159
+++ src/sys/dev/ic/rtl8169.c 2020/01/28 11:12:30 1.159.2.1
@@ -1,2132 +1,2163 @@ @@ -1,2132 +1,2163 @@
1/* $NetBSD: rtl8169.c,v 1.159 2019/05/30 02:32:18 msaitoh Exp $ */ 1/* $NetBSD: rtl8169.c,v 1.159.2.1 2020/01/28 11:12:30 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997, 1998-2003 4 * Copyright (c) 1997, 1998-2003
5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software 15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement: 16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul. 17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors 18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software 19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission. 20 * without specific prior written permission.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE. 32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: rtl8169.c,v 1.159 2019/05/30 02:32:18 msaitoh Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: rtl8169.c,v 1.159.2.1 2020/01/28 11:12:30 martin Exp $");
37/* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */ 37/* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */
38 38
39/* 39/*
40 * RealTek 8139C+/8169/8169S/8168/8110S PCI NIC driver 40 * RealTek 8139C+/8169/8169S/8168/8110S PCI NIC driver
41 * 41 *
42 * Written by Bill Paul <wpaul@windriver.com> 42 * Written by Bill Paul <wpaul@windriver.com>
43 * Senior Networking Software Engineer 43 * Senior Networking Software Engineer
44 * Wind River Systems 44 * Wind River Systems
45 */ 45 */
46 46
47/* 47/*
48 * This driver is designed to support RealTek's next generation of 48 * This driver is designed to support RealTek's next generation of
49 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 49 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
50 * six devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 50 * six devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
51 * RTL8110S, the RTL8168 and the RTL8111. 51 * RTL8110S, the RTL8168 and the RTL8111.
52 * 52 *
53 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 53 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
54 * with the older 8139 family, however it also supports a special 54 * with the older 8139 family, however it also supports a special
55 * C+ mode of operation that provides several new performance enhancing 55 * C+ mode of operation that provides several new performance enhancing
56 * features. These include: 56 * features. These include:
57 * 57 *
58 * o Descriptor based DMA mechanism. Each descriptor represents 58 * o Descriptor based DMA mechanism. Each descriptor represents
59 * a single packet fragment. Data buffers may be aligned on 59 * a single packet fragment. Data buffers may be aligned on
60 * any byte boundary. 60 * any byte boundary.
61 * 61 *
62 * o 64-bit DMA 62 * o 64-bit DMA
63 * 63 *
64 * o TCP/IP checksum offload for both RX and TX 64 * o TCP/IP checksum offload for both RX and TX
65 * 65 *
66 * o High and normal priority transmit DMA rings 66 * o High and normal priority transmit DMA rings
67 * 67 *
68 * o VLAN tag insertion and extraction 68 * o VLAN tag insertion and extraction
69 * 69 *
70 * o TCP large send (segmentation offload) 70 * o TCP large send (segmentation offload)
71 * 71 *
72 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 72 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
73 * programming API is fairly straightforward. The RX filtering, EEPROM 73 * programming API is fairly straightforward. The RX filtering, EEPROM
74 * access and PHY access is the same as it is on the older 8139 series 74 * access and PHY access is the same as it is on the older 8139 series
75 * chips. 75 * chips.
76 * 76 *
77 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 77 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
78 * same programming API and feature set as the 8139C+ with the following 78 * same programming API and feature set as the 8139C+ with the following
79 * differences and additions: 79 * differences and additions:
80 * 80 *
81 * o 1000Mbps mode 81 * o 1000Mbps mode
82 * 82 *
83 * o Jumbo frames 83 * o Jumbo frames
84 * 84 *
85 * o GMII and TBI ports/registers for interfacing with copper 85 * o GMII and TBI ports/registers for interfacing with copper
86 * or fiber PHYs 86 * or fiber PHYs
87 * 87 *
88 * o RX and TX DMA rings can have up to 1024 descriptors 88 * o RX and TX DMA rings can have up to 1024 descriptors
89 * (the 8139C+ allows a maximum of 64) 89 * (the 8139C+ allows a maximum of 64)
90 * 90 *
91 * o Slight differences in register layout from the 8139C+ 91 * o Slight differences in register layout from the 8139C+
92 * 92 *
93 * The TX start and timer interrupt registers are at different locations 93 * The TX start and timer interrupt registers are at different locations
94 * on the 8169 than they are on the 8139C+. Also, the status word in the 94 * on the 8169 than they are on the 8139C+. Also, the status word in the
95 * RX descriptor has a slightly different bit layout. The 8169 does not 95 * RX descriptor has a slightly different bit layout. The 8169 does not
96 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 96 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
97 * copper gigE PHY. 97 * copper gigE PHY.
98 * 98 *
99 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 99 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
100 * (the 'S' stands for 'single-chip'). These devices have the same 100 * (the 'S' stands for 'single-chip'). These devices have the same
101 * programming API as the older 8169, but also have some vendor-specific 101 * programming API as the older 8169, but also have some vendor-specific
102 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 102 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
103 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 103 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
104 * 104 *
105 * This driver takes advantage of the RX and TX checksum offload and 105 * This driver takes advantage of the RX and TX checksum offload and
106 * VLAN tag insertion/extraction features. It also implements TX 106 * VLAN tag insertion/extraction features. It also implements TX
107 * interrupt moderation using the timer interrupt registers, which 107 * interrupt moderation using the timer interrupt registers, which
108 * significantly reduces TX interrupt load. There is also support 108 * significantly reduces TX interrupt load. There is also support
109 * for jumbo frames, however the 8169/8169S/8110S can not transmit 109 * for jumbo frames, however the 8169/8169S/8110S can not transmit
110 * jumbo frames larger than 7.5K, so the max MTU possible with this 110 * jumbo frames larger than 7.5K, so the max MTU possible with this
111 * driver is 7500 bytes. 111 * driver is 7500 bytes.
112 */ 112 */
113 113
114 114
115#include <sys/param.h> 115#include <sys/param.h>
116#include <sys/endian.h> 116#include <sys/endian.h>
117#include <sys/systm.h> 117#include <sys/systm.h>
118#include <sys/sockio.h> 118#include <sys/sockio.h>
119#include <sys/mbuf.h> 119#include <sys/mbuf.h>
120#include <sys/malloc.h> 120#include <sys/malloc.h>
121#include <sys/kernel.h> 121#include <sys/kernel.h>
122#include <sys/socket.h> 122#include <sys/socket.h>
123#include <sys/device.h> 123#include <sys/device.h>
124 124
125#include <net/if.h> 125#include <net/if.h>
126#include <net/if_arp.h> 126#include <net/if_arp.h>
127#include <net/if_dl.h> 127#include <net/if_dl.h>
128#include <net/if_ether.h> 128#include <net/if_ether.h>
129#include <net/if_media.h> 129#include <net/if_media.h>
130#include <net/if_vlanvar.h> 130#include <net/if_vlanvar.h>
131 131
132#include <netinet/in_systm.h> /* XXX for IP_MAXPACKET */ 132#include <netinet/in_systm.h> /* XXX for IP_MAXPACKET */
133#include <netinet/in.h> /* XXX for IP_MAXPACKET */ 133#include <netinet/in.h> /* XXX for IP_MAXPACKET */
134#include <netinet/ip.h> /* XXX for IP_MAXPACKET */ 134#include <netinet/ip.h> /* XXX for IP_MAXPACKET */
135 135
136#include <net/bpf.h> 136#include <net/bpf.h>
137#include <sys/rndsource.h> 137#include <sys/rndsource.h>
138 138
139#include <sys/bus.h> 139#include <sys/bus.h>
140 140
141#include <dev/mii/mii.h> 141#include <dev/mii/mii.h>
142#include <dev/mii/miivar.h> 142#include <dev/mii/miivar.h>
143 143
144#include <dev/ic/rtl81x9reg.h> 144#include <dev/ic/rtl81x9reg.h>
145#include <dev/ic/rtl81x9var.h> 145#include <dev/ic/rtl81x9var.h>
146 146
147#include <dev/ic/rtl8169var.h> 147#include <dev/ic/rtl8169var.h>
148 148
149static inline void re_set_bufaddr(struct re_desc *, bus_addr_t); 149static inline void re_set_bufaddr(struct re_desc *, bus_addr_t);
150 150
151static int re_newbuf(struct rtk_softc *, int, struct mbuf *); 151static int re_newbuf(struct rtk_softc *, int, struct mbuf *);
152static int re_rx_list_init(struct rtk_softc *); 152static int re_rx_list_init(struct rtk_softc *);
153static int re_tx_list_init(struct rtk_softc *); 153static int re_tx_list_init(struct rtk_softc *);
154static void re_rxeof(struct rtk_softc *); 154static void re_rxeof(struct rtk_softc *);
155static void re_txeof(struct rtk_softc *); 155static void re_txeof(struct rtk_softc *);
156static void re_tick(void *); 156static void re_tick(void *);
157static void re_start(struct ifnet *); 157static void re_start(struct ifnet *);
158static int re_ioctl(struct ifnet *, u_long, void *); 158static int re_ioctl(struct ifnet *, u_long, void *);
159static int re_init(struct ifnet *); 159static int re_init(struct ifnet *);
160static void re_stop(struct ifnet *, int); 160static void re_stop(struct ifnet *, int);
161static void re_watchdog(struct ifnet *); 161static void re_watchdog(struct ifnet *);
162 162
163static int re_enable(struct rtk_softc *); 163static int re_enable(struct rtk_softc *);
164static void re_disable(struct rtk_softc *); 164static void re_disable(struct rtk_softc *);
165 165
166static int re_gmii_readreg(device_t, int, int, uint16_t *); 166static int re_gmii_readreg(device_t, int, int, uint16_t *);
167static int re_gmii_writereg(device_t, int, int, uint16_t); 167static int re_gmii_writereg(device_t, int, int, uint16_t);
168 168
169static int re_miibus_readreg(device_t, int, int, uint16_t *); 169static int re_miibus_readreg(device_t, int, int, uint16_t *);
170static int re_miibus_writereg(device_t, int, int, uint16_t); 170static int re_miibus_writereg(device_t, int, int, uint16_t);
171static void re_miibus_statchg(struct ifnet *); 171static void re_miibus_statchg(struct ifnet *);
172 172
173static void re_reset(struct rtk_softc *); 173static void re_reset(struct rtk_softc *);
174 174
175static inline void 175static inline void
176re_set_bufaddr(struct re_desc *d, bus_addr_t addr) 176re_set_bufaddr(struct re_desc *d, bus_addr_t addr)
177{ 177{
178 178
179 d->re_bufaddr_lo = htole32((uint32_t)addr); 179 d->re_bufaddr_lo = htole32((uint32_t)addr);
180 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 180 if (sizeof(bus_addr_t) == sizeof(uint64_t))
181 d->re_bufaddr_hi = htole32((uint64_t)addr >> 32); 181 d->re_bufaddr_hi = htole32((uint64_t)addr >> 32);
182 else 182 else
183 d->re_bufaddr_hi = 0; 183 d->re_bufaddr_hi = 0;
184} 184}
185 185
186static int 186static int
187re_gmii_readreg(device_t dev, int phy, int reg, uint16_t *val) 187re_gmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
188{ 188{
189 struct rtk_softc *sc = device_private(dev); 189 struct rtk_softc *sc = device_private(dev);
190 uint32_t data; 190 uint32_t data;
191 int i; 191 int i;
192 192
193 if (phy != 7) 193 if (phy != 7)
194 return -1; 194 return -1;
195 195
196 /* Let the rgephy driver read the GMEDIASTAT register */ 196 /* Let the rgephy driver read the GMEDIASTAT register */
197 197
198 if (reg == RTK_GMEDIASTAT) { 198 if (reg == RTK_GMEDIASTAT) {
199 *val = CSR_READ_1(sc, RTK_GMEDIASTAT); 199 *val = CSR_READ_1(sc, RTK_GMEDIASTAT);
200 return 0; 200 return 0;
201 } 201 }
202 202
203 CSR_WRITE_4(sc, RTK_PHYAR, reg << 16); 203 CSR_WRITE_4(sc, RTK_PHYAR, reg << 16);
204 DELAY(1000); 204 DELAY(1000);
205 205
206 for (i = 0; i < RTK_TIMEOUT; i++) { 206 for (i = 0; i < RTK_TIMEOUT; i++) {
207 data = CSR_READ_4(sc, RTK_PHYAR); 207 data = CSR_READ_4(sc, RTK_PHYAR);
208 if (data & RTK_PHYAR_BUSY) 208 if (data & RTK_PHYAR_BUSY)
209 break; 209 break;
210 DELAY(100); 210 DELAY(100);
211 } 211 }
212 212
213 if (i == RTK_TIMEOUT) { 213 if (i == RTK_TIMEOUT) {
214 printf("%s: PHY read failed\n", device_xname(sc->sc_dev)); 214 printf("%s: PHY read failed\n", device_xname(sc->sc_dev));
215 return ETIMEDOUT; 215 return ETIMEDOUT;
216 } 216 }
217 217
218 *val = data & RTK_PHYAR_PHYDATA; 218 *val = data & RTK_PHYAR_PHYDATA;
219 return 0; 219 return 0;
220} 220}
221 221
222static int 222static int
223re_gmii_writereg(device_t dev, int phy, int reg, uint16_t val) 223re_gmii_writereg(device_t dev, int phy, int reg, uint16_t val)
224{ 224{
225 struct rtk_softc *sc = device_private(dev); 225 struct rtk_softc *sc = device_private(dev);
226 uint32_t data; 226 uint32_t data;
227 int i; 227 int i;
228 228
229 CSR_WRITE_4(sc, RTK_PHYAR, (reg << 16) | 229 CSR_WRITE_4(sc, RTK_PHYAR, (reg << 16) |
230 (val & RTK_PHYAR_PHYDATA) | RTK_PHYAR_BUSY); 230 (val & RTK_PHYAR_PHYDATA) | RTK_PHYAR_BUSY);
231 DELAY(1000); 231 DELAY(1000);
232 232
233 for (i = 0; i < RTK_TIMEOUT; i++) { 233 for (i = 0; i < RTK_TIMEOUT; i++) {
234 data = CSR_READ_4(sc, RTK_PHYAR); 234 data = CSR_READ_4(sc, RTK_PHYAR);
235 if (!(data & RTK_PHYAR_BUSY)) 235 if (!(data & RTK_PHYAR_BUSY))
236 break; 236 break;
237 DELAY(100); 237 DELAY(100);
238 } 238 }
239 239
240 if (i == RTK_TIMEOUT) { 240 if (i == RTK_TIMEOUT) {
241 printf("%s: PHY write reg %x <- %hx failed\n", 241 printf("%s: PHY write reg %x <- %hx failed\n",
242 device_xname(sc->sc_dev), reg, val); 242 device_xname(sc->sc_dev), reg, val);
243 return ETIMEDOUT; 243 return ETIMEDOUT;
244 } 244 }
245 245
246 return 0; 246 return 0;
247} 247}
248 248
249static int 249static int
250re_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 250re_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
251{ 251{
252 struct rtk_softc *sc = device_private(dev); 252 struct rtk_softc *sc = device_private(dev);
253 uint16_t re8139_reg = 0; 253 uint16_t re8139_reg = 0;
254 int s, rv = 0; 254 int s, rv = 0;
255 255
256 s = splnet(); 256 s = splnet();
257 257
258 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { 258 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
259 rv = re_gmii_readreg(dev, phy, reg, val); 259 rv = re_gmii_readreg(dev, phy, reg, val);
260 splx(s); 260 splx(s);
261 return rv; 261 return rv;
262 } 262 }
263 263
264 /* Pretend the internal PHY is only at address 0 */ 264 /* Pretend the internal PHY is only at address 0 */
265 if (phy) { 265 if (phy) {
266 splx(s); 266 splx(s);
267 return -1; 267 return -1;
268 } 268 }
269 switch (reg) { 269 switch (reg) {
270 case MII_BMCR: 270 case MII_BMCR:
271 re8139_reg = RTK_BMCR; 271 re8139_reg = RTK_BMCR;
272 break; 272 break;
273 case MII_BMSR: 273 case MII_BMSR:
274 re8139_reg = RTK_BMSR; 274 re8139_reg = RTK_BMSR;
275 break; 275 break;
276 case MII_ANAR: 276 case MII_ANAR:
277 re8139_reg = RTK_ANAR; 277 re8139_reg = RTK_ANAR;
278 break; 278 break;
279 case MII_ANER: 279 case MII_ANER:
280 re8139_reg = RTK_ANER; 280 re8139_reg = RTK_ANER;
281 break; 281 break;
282 case MII_ANLPAR: 282 case MII_ANLPAR:
283 re8139_reg = RTK_LPAR; 283 re8139_reg = RTK_LPAR;
284 break; 284 break;
285 case MII_PHYIDR1: 285 case MII_PHYIDR1:
286 case MII_PHYIDR2: 286 case MII_PHYIDR2:
287 *val = 0; 287 *val = 0;
288 splx(s); 288 splx(s);
289 return 0; 289 return 0;
290 /* 290 /*
291 * Allow the rlphy driver to read the media status 291 * Allow the rlphy driver to read the media status
292 * register. If we have a link partner which does not 292 * register. If we have a link partner which does not
293 * support NWAY, this is the register which will tell 293 * support NWAY, this is the register which will tell
294 * us the results of parallel detection. 294 * us the results of parallel detection.
295 */ 295 */
296 case RTK_MEDIASTAT: 296 case RTK_MEDIASTAT:
297 *val = CSR_READ_1(sc, RTK_MEDIASTAT); 297 *val = CSR_READ_1(sc, RTK_MEDIASTAT);
298 splx(s); 298 splx(s);
299 return 0; 299 return 0;
300 default: 300 default:
301 printf("%s: bad phy register\n", device_xname(sc->sc_dev)); 301 printf("%s: bad phy register\n", device_xname(sc->sc_dev));
302 splx(s); 302 splx(s);
303 return -1; 303 return -1;
304 } 304 }
305 *val = CSR_READ_2(sc, re8139_reg); 305 *val = CSR_READ_2(sc, re8139_reg);
306 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0 && re8139_reg == RTK_BMCR) { 306 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0 && re8139_reg == RTK_BMCR) {
307 /* 8139C+ has different bit layout. */ 307 /* 8139C+ has different bit layout. */
308 *val &= ~(BMCR_LOOP | BMCR_ISO); 308 *val &= ~(BMCR_LOOP | BMCR_ISO);
309 } 309 }
310 splx(s); 310 splx(s);
311 return 0; 311 return 0;
312} 312}
313 313
314static int 314static int
315re_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 315re_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
316{ 316{
317 struct rtk_softc *sc = device_private(dev); 317 struct rtk_softc *sc = device_private(dev);
318 uint16_t re8139_reg = 0; 318 uint16_t re8139_reg = 0;
319 int s, rv; 319 int s, rv;
320 320
321 s = splnet(); 321 s = splnet();
322 322
323 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { 323 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
324 rv = re_gmii_writereg(dev, phy, reg, val); 324 rv = re_gmii_writereg(dev, phy, reg, val);
325 splx(s); 325 splx(s);
326 return rv; 326 return rv;
327 } 327 }
328 328
329 /* Pretend the internal PHY is only at address 0 */ 329 /* Pretend the internal PHY is only at address 0 */
330 if (phy) { 330 if (phy) {
331 splx(s); 331 splx(s);
332 return -1; 332 return -1;
333 } 333 }
334 switch (reg) { 334 switch (reg) {
335 case MII_BMCR: 335 case MII_BMCR:
336 re8139_reg = RTK_BMCR; 336 re8139_reg = RTK_BMCR;
337 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) { 337 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) {
338 /* 8139C+ has different bit layout. */ 338 /* 8139C+ has different bit layout. */
339 val &= ~(BMCR_LOOP | BMCR_ISO); 339 val &= ~(BMCR_LOOP | BMCR_ISO);
340 } 340 }
341 break; 341 break;
342 case MII_BMSR: 342 case MII_BMSR:
343 re8139_reg = RTK_BMSR; 343 re8139_reg = RTK_BMSR;
344 break; 344 break;
345 case MII_ANAR: 345 case MII_ANAR:
346 re8139_reg = RTK_ANAR; 346 re8139_reg = RTK_ANAR;
347 break; 347 break;
348 case MII_ANER: 348 case MII_ANER:
349 re8139_reg = RTK_ANER; 349 re8139_reg = RTK_ANER;
350 break; 350 break;
351 case MII_ANLPAR: 351 case MII_ANLPAR:
352 re8139_reg = RTK_LPAR; 352 re8139_reg = RTK_LPAR;
353 break; 353 break;
354 case MII_PHYIDR1: 354 case MII_PHYIDR1:
355 case MII_PHYIDR2: 355 case MII_PHYIDR2:
356 splx(s); 356 splx(s);
357 return 0; 357 return 0;
358 break; 358 break;
359 default: 359 default:
360 printf("%s: bad phy register\n", device_xname(sc->sc_dev)); 360 printf("%s: bad phy register\n", device_xname(sc->sc_dev));
361 splx(s); 361 splx(s);
362 return -1; 362 return -1;
363 } 363 }
364 CSR_WRITE_2(sc, re8139_reg, val); 364 CSR_WRITE_2(sc, re8139_reg, val);
365 splx(s); 365 splx(s);
366 return 0; 366 return 0;
367} 367}
368 368
369static void 369static void
370re_miibus_statchg(struct ifnet *ifp) 370re_miibus_statchg(struct ifnet *ifp)
371{ 371{
372 372
373 return; 373 return;
374} 374}
375 375
376static void 376static void
377re_reset(struct rtk_softc *sc) 377re_reset(struct rtk_softc *sc)
378{ 378{
379 int i; 379 int i;
380 380
381 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET); 381 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
382 382
383 for (i = 0; i < RTK_TIMEOUT; i++) { 383 for (i = 0; i < RTK_TIMEOUT; i++) {
384 DELAY(10); 384 DELAY(10);
385 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0) 385 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
386 break; 386 break;
387 } 387 }
388 if (i == RTK_TIMEOUT) 388 if (i == RTK_TIMEOUT)
389 printf("%s: reset never completed!\n", 389 printf("%s: reset never completed!\n",
390 device_xname(sc->sc_dev)); 390 device_xname(sc->sc_dev));
391 391
392 /* 392 /*
393 * NB: Realtek-supplied FreeBSD driver does this only for MACFG_3, 393 * NB: Realtek-supplied FreeBSD driver does this only for MACFG_3,
394 * but also says "Rtl8169s sigle chip detected". 394 * but also says "Rtl8169s sigle chip detected".
395 */ 395 */
396 if ((sc->sc_quirk & RTKQ_MACLDPS) != 0) 396 if ((sc->sc_quirk & RTKQ_MACLDPS) != 0)
397 CSR_WRITE_1(sc, RTK_LDPS, 1); 397 CSR_WRITE_1(sc, RTK_LDPS, 1);
398 398
399} 399}
400 400
401/* 401/*
402 * The following routine is designed to test for a defect on some 402 * The following routine is designed to test for a defect on some
403 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 403 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
404 * lines connected to the bus, however for a 32-bit only card, they 404 * lines connected to the bus, however for a 32-bit only card, they
405 * should be pulled high. The result of this defect is that the 405 * should be pulled high. The result of this defect is that the
406 * NIC will not work right if you plug it into a 64-bit slot: DMA 406 * NIC will not work right if you plug it into a 64-bit slot: DMA
407 * operations will be done with 64-bit transfers, which will fail 407 * operations will be done with 64-bit transfers, which will fail
408 * because the 64-bit data lines aren't connected. 408 * because the 64-bit data lines aren't connected.
409 * 409 *
410 * There's no way to work around this (short of talking a soldering 410 * There's no way to work around this (short of talking a soldering
411 * iron to the board), however we can detect it. The method we use 411 * iron to the board), however we can detect it. The method we use
412 * here is to put the NIC into digital loopback mode, set the receiver 412 * here is to put the NIC into digital loopback mode, set the receiver
413 * to promiscuous mode, and then try to send a frame. We then compare 413 * to promiscuous mode, and then try to send a frame. We then compare
414 * the frame data we sent to what was received. If the data matches, 414 * the frame data we sent to what was received. If the data matches,
415 * then the NIC is working correctly, otherwise we know the user has 415 * then the NIC is working correctly, otherwise we know the user has
416 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 416 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
417 * slot. In the latter case, there's no way the NIC can work correctly, 417 * slot. In the latter case, there's no way the NIC can work correctly,
418 * so we print out a message on the console and abort the device attach. 418 * so we print out a message on the console and abort the device attach.
419 */ 419 */
420 420
421int 421int
422re_diag(struct rtk_softc *sc) 422re_diag(struct rtk_softc *sc)
423{ 423{
424 struct ifnet *ifp = &sc->ethercom.ec_if; 424 struct ifnet *ifp = &sc->ethercom.ec_if;
425 struct mbuf *m0; 425 struct mbuf *m0;
426 struct ether_header *eh; 426 struct ether_header *eh;
427 struct re_rxsoft *rxs; 427 struct re_rxsoft *rxs;
428 struct re_desc *cur_rx; 428 struct re_desc *cur_rx;
429 bus_dmamap_t dmamap; 429 bus_dmamap_t dmamap;
430 uint16_t status; 430 uint16_t status;
431 uint32_t rxstat; 431 uint32_t rxstat;
432 int total_len, i, s, error = 0; 432 int total_len, i, s, error = 0;
433 static const uint8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 433 static const uint8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
434 static const uint8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 434 static const uint8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
435 435
436 /* Allocate a single mbuf */ 436 /* Allocate a single mbuf */
437 437
438 MGETHDR(m0, M_DONTWAIT, MT_DATA); 438 MGETHDR(m0, M_DONTWAIT, MT_DATA);
439 if (m0 == NULL) 439 if (m0 == NULL)
440 return ENOBUFS; 440 return ENOBUFS;
441 441
442 /* 442 /*
443 * Initialize the NIC in test mode. This sets the chip up 443 * Initialize the NIC in test mode. This sets the chip up
444 * so that it can send and receive frames, but performs the 444 * so that it can send and receive frames, but performs the
445 * following special functions: 445 * following special functions:
446 * - Puts receiver in promiscuous mode 446 * - Puts receiver in promiscuous mode
447 * - Enables digital loopback mode 447 * - Enables digital loopback mode
448 * - Leaves interrupts turned off 448 * - Leaves interrupts turned off
449 */ 449 */
450 450
451 ifp->if_flags |= IFF_PROMISC; 451 ifp->if_flags |= IFF_PROMISC;
452 sc->re_testmode = 1; 452 sc->re_testmode = 1;
453 re_init(ifp); 453 re_init(ifp);
454 re_stop(ifp, 0); 454 re_stop(ifp, 0);
455 DELAY(100000); 455 DELAY(100000);
456 re_init(ifp); 456 re_init(ifp);
457 457
458 /* Put some data in the mbuf */ 458 /* Put some data in the mbuf */
459 459
460 eh = mtod(m0, struct ether_header *); 460 eh = mtod(m0, struct ether_header *);
461 memcpy(eh->ether_dhost, &dst, ETHER_ADDR_LEN); 461 memcpy(eh->ether_dhost, &dst, ETHER_ADDR_LEN);
462 memcpy(eh->ether_shost, &src, ETHER_ADDR_LEN); 462 memcpy(eh->ether_shost, &src, ETHER_ADDR_LEN);
463 eh->ether_type = htons(ETHERTYPE_IP); 463 eh->ether_type = htons(ETHERTYPE_IP);
464 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 464 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
465 465
466 /* 466 /*
467 * Queue the packet, start transmission. 467 * Queue the packet, start transmission.
468 */ 468 */
469 469
470 CSR_WRITE_2(sc, RTK_ISR, 0xFFFF); 470 CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
471 s = splnet(); 471 s = splnet();
472 IF_ENQUEUE(&ifp->if_snd, m0); 472 IF_ENQUEUE(&ifp->if_snd, m0);
473 re_start(ifp); 473 re_start(ifp);
474 splx(s); 474 splx(s);
475 m0 = NULL; 475 m0 = NULL;
476 476
477 /* Wait for it to propagate through the chip */ 477 /* Wait for it to propagate through the chip */
478 478
479 DELAY(100000); 479 DELAY(100000);
480 for (i = 0; i < RTK_TIMEOUT; i++) { 480 for (i = 0; i < RTK_TIMEOUT; i++) {
481 status = CSR_READ_2(sc, RTK_ISR); 481 status = CSR_READ_2(sc, RTK_ISR);
482 if ((status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) == 482 if ((status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) ==
483 (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) 483 (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK))
484 break; 484 break;
485 DELAY(10); 485 DELAY(10);
486 } 486 }
487 if (i == RTK_TIMEOUT) { 487 if (i == RTK_TIMEOUT) {
488 aprint_error_dev(sc->sc_dev, 488 aprint_error_dev(sc->sc_dev,
489 "diagnostic failed, failed to receive packet " 489 "diagnostic failed, failed to receive packet "
490 "in loopback mode\n"); 490 "in loopback mode\n");
491 error = EIO; 491 error = EIO;
492 goto done; 492 goto done;
493 } 493 }
494 494
495 /* 495 /*
496 * The packet should have been dumped into the first 496 * The packet should have been dumped into the first
497 * entry in the RX DMA ring. Grab it from there. 497 * entry in the RX DMA ring. Grab it from there.
498 */ 498 */
499 499
500 rxs = &sc->re_ldata.re_rxsoft[0]; 500 rxs = &sc->re_ldata.re_rxsoft[0];
501 dmamap = rxs->rxs_dmamap; 501 dmamap = rxs->rxs_dmamap;
502 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 502 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
503 BUS_DMASYNC_POSTREAD); 503 BUS_DMASYNC_POSTREAD);
504 bus_dmamap_unload(sc->sc_dmat, dmamap); 504 bus_dmamap_unload(sc->sc_dmat, dmamap);
505 505
506 m0 = rxs->rxs_mbuf; 506 m0 = rxs->rxs_mbuf;
507 rxs->rxs_mbuf = NULL; 507 rxs->rxs_mbuf = NULL;
508 eh = mtod(m0, struct ether_header *); 508 eh = mtod(m0, struct ether_header *);
509 509
510 RE_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 510 RE_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
511 cur_rx = &sc->re_ldata.re_rx_list[0]; 511 cur_rx = &sc->re_ldata.re_rx_list[0];
512 rxstat = le32toh(cur_rx->re_cmdstat); 512 rxstat = le32toh(cur_rx->re_cmdstat);
513 total_len = rxstat & sc->re_rxlenmask; 513 total_len = rxstat & sc->re_rxlenmask;
514 514
515 if (total_len != ETHER_MIN_LEN) { 515 if (total_len != ETHER_MIN_LEN) {
516 aprint_error_dev(sc->sc_dev, 516 aprint_error_dev(sc->sc_dev,
517 "diagnostic failed, received short packet\n"); 517 "diagnostic failed, received short packet\n");
518 error = EIO; 518 error = EIO;
519 goto done; 519 goto done;
520 } 520 }
521 521
522 /* Test that the received packet data matches what we sent. */ 522 /* Test that the received packet data matches what we sent. */
523 523
524 if (memcmp(&eh->ether_dhost, &dst, ETHER_ADDR_LEN) || 524 if (memcmp(&eh->ether_dhost, &dst, ETHER_ADDR_LEN) ||
525 memcmp(&eh->ether_shost, &src, ETHER_ADDR_LEN) || 525 memcmp(&eh->ether_shost, &src, ETHER_ADDR_LEN) ||
526 ntohs(eh->ether_type) != ETHERTYPE_IP) { 526 ntohs(eh->ether_type) != ETHERTYPE_IP) {
527 aprint_error_dev(sc->sc_dev, "WARNING, DMA FAILURE!\n" 527 aprint_error_dev(sc->sc_dev, "WARNING, DMA FAILURE!\n"
528 "expected TX data: %s/%s/0x%x\n" 528 "expected TX data: %s/%s/0x%x\n"
529 "received RX data: %s/%s/0x%x\n" 529 "received RX data: %s/%s/0x%x\n"
530 "You may have a defective 32-bit NIC plugged " 530 "You may have a defective 32-bit NIC plugged "
531 "into a 64-bit PCI slot.\n" 531 "into a 64-bit PCI slot.\n"
532 "Please re-install the NIC in a 32-bit slot " 532 "Please re-install the NIC in a 32-bit slot "
533 "for proper operation.\n" 533 "for proper operation.\n"
534 "Read the re(4) man page for more details.\n" , 534 "Read the re(4) man page for more details.\n" ,
535 ether_sprintf(dst), ether_sprintf(src), ETHERTYPE_IP, 535 ether_sprintf(dst), ether_sprintf(src), ETHERTYPE_IP,
536 ether_sprintf(eh->ether_dhost), 536 ether_sprintf(eh->ether_dhost),
537 ether_sprintf(eh->ether_shost), ntohs(eh->ether_type)); 537 ether_sprintf(eh->ether_shost), ntohs(eh->ether_type));
538 error = EIO; 538 error = EIO;
539 } 539 }
540 540
541 done: 541 done:
542 /* Turn interface off, release resources */ 542 /* Turn interface off, release resources */
543 543
544 sc->re_testmode = 0; 544 sc->re_testmode = 0;
545 ifp->if_flags &= ~IFF_PROMISC; 545 ifp->if_flags &= ~IFF_PROMISC;
546 re_stop(ifp, 0); 546 re_stop(ifp, 0);
547 if (m0 != NULL) 547 if (m0 != NULL)
548 m_freem(m0); 548 m_freem(m0);
549 549
550 return error; 550 return error;
551} 551}
552 552
553 553
554/* 554/*
555 * Attach the interface. Allocate softc structures, do ifmedia 555 * Attach the interface. Allocate softc structures, do ifmedia
556 * setup and ethernet/BPF attach. 556 * setup and ethernet/BPF attach.
557 */ 557 */
558void 558void
559re_attach(struct rtk_softc *sc) 559re_attach(struct rtk_softc *sc)
560{ 560{
561 uint8_t eaddr[ETHER_ADDR_LEN]; 561 uint8_t eaddr[ETHER_ADDR_LEN];
562 struct ifnet *ifp; 562 struct ifnet *ifp;
563 struct mii_data *mii = &sc->mii; 563 struct mii_data *mii = &sc->mii;
564 int error = 0, i; 564 int error = 0, i;
565 565
566 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { 566 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
567 uint32_t hwrev; 567 uint32_t hwrev;
568 568
569 /* Revision of 8169/8169S/8110s in bits 30..26, 23 */ 569 /* Revision of 8169/8169S/8110s in bits 30..26, 23 */
570 hwrev = CSR_READ_4(sc, RTK_TXCFG) & RTK_TXCFG_HWREV; 570 hwrev = CSR_READ_4(sc, RTK_TXCFG) & RTK_TXCFG_HWREV;
571 switch (hwrev) { 571 switch (hwrev) {
572 case RTK_HWREV_8169: 572 case RTK_HWREV_8169:
573 sc->sc_quirk |= RTKQ_8169NONS; 573 sc->sc_quirk |= RTKQ_8169NONS;
574 break; 574 break;
575 case RTK_HWREV_8169S: 575 case RTK_HWREV_8169S:
576 case RTK_HWREV_8110S: 576 case RTK_HWREV_8110S:
577 case RTK_HWREV_8169_8110SB: 577 case RTK_HWREV_8169_8110SB:
578 case RTK_HWREV_8169_8110SBL: 578 case RTK_HWREV_8169_8110SBL:
579 case RTK_HWREV_8169_8110SC: 579 case RTK_HWREV_8169_8110SC:
580 sc->sc_quirk |= RTKQ_MACLDPS; 580 sc->sc_quirk |= RTKQ_MACLDPS;
581 break; 581 break;
582 case RTK_HWREV_8168_SPIN1: 582 case RTK_HWREV_8168_SPIN1:
583 case RTK_HWREV_8168_SPIN2: 583 case RTK_HWREV_8168_SPIN2:
584 case RTK_HWREV_8168_SPIN3: 584 case RTK_HWREV_8168_SPIN3:
585 sc->sc_quirk |= RTKQ_MACSTAT; 585 sc->sc_quirk |= RTKQ_MACSTAT;
586 break; 586 break;
587 case RTK_HWREV_8168C: 587 case RTK_HWREV_8168C:
588 case RTK_HWREV_8168C_SPIN2: 588 case RTK_HWREV_8168C_SPIN2:
589 case RTK_HWREV_8168CP: 589 case RTK_HWREV_8168CP:
590 case RTK_HWREV_8168D: 590 case RTK_HWREV_8168D:
591 case RTK_HWREV_8168DP: 591 case RTK_HWREV_8168DP:
592 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 592 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
593 RTKQ_MACSTAT | RTKQ_CMDSTOP; 593 RTKQ_MACSTAT | RTKQ_CMDSTOP;
594 /* 594 /*
595 * From FreeBSD driver: 595 * From FreeBSD driver:
596 * 596 *
597 * These (8168/8111) controllers support jumbo frame 597 * These (8168/8111) controllers support jumbo frame
598 * but it seems that enabling it requires touching 598 * but it seems that enabling it requires touching
599 * additional magic registers. Depending on MAC 599 * additional magic registers. Depending on MAC
600 * revisions some controllers need to disable 600 * revisions some controllers need to disable
601 * checksum offload. So disable jumbo frame until 601 * checksum offload. So disable jumbo frame until
602 * I have better idea what it really requires to 602 * I have better idea what it really requires to
603 * make it support. 603 * make it support.
604 * RTL8168C/CP : supports up to 6KB jumbo frame. 604 * RTL8168C/CP : supports up to 6KB jumbo frame.
605 * RTL8111C/CP : supports up to 9KB jumbo frame. 605 * RTL8111C/CP : supports up to 9KB jumbo frame.
606 */ 606 */
607 sc->sc_quirk |= RTKQ_NOJUMBO; 607 sc->sc_quirk |= RTKQ_NOJUMBO;
608 break; 608 break;
609 case RTK_HWREV_8168E: 609 case RTK_HWREV_8168E:
610 case RTK_HWREV_8168H: 
611 case RTK_HWREV_8168H_SPIN1: 610 case RTK_HWREV_8168H_SPIN1:
612 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 611 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
613 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_PHYWAKE_PM | 612 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_PHYWAKE_PM |
614 RTKQ_NOJUMBO; 613 RTKQ_NOJUMBO;
615 break; 614 break;
 615 case RTK_HWREV_8168H:
 616 case RTK_HWREV_8168FP:
 617 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
 618 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_PHYWAKE_PM |
 619 RTKQ_NOJUMBO | RTKQ_RXDV_GATED | RTKQ_TXRXEN_LATER;
 620 break;
616 case RTK_HWREV_8168E_VL: 621 case RTK_HWREV_8168E_VL:
617 case RTK_HWREV_8168F: 622 case RTK_HWREV_8168F:
 623 case RTK_HWREV_8411:
618 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 624 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
619 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO; 625 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO;
620 break; 626 break;
 627 case RTK_HWREV_8168EP:
621 case RTK_HWREV_8168G: 628 case RTK_HWREV_8168G:
622 case RTK_HWREV_8168G_SPIN1: 629 case RTK_HWREV_8168G_SPIN1:
623 case RTK_HWREV_8168G_SPIN2: 630 case RTK_HWREV_8168G_SPIN2:
624 case RTK_HWREV_8168G_SPIN4: 631 case RTK_HWREV_8168G_SPIN4:
625 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 632 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
626 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO |  633 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO |
627 RTKQ_RXDV_GATED; 634 RTKQ_RXDV_GATED;
628 break; 635 break;
629 case RTK_HWREV_8100E: 636 case RTK_HWREV_8100E:
630 case RTK_HWREV_8100E_SPIN2: 637 case RTK_HWREV_8100E_SPIN2:
631 case RTK_HWREV_8101E: 638 case RTK_HWREV_8101E:
632 sc->sc_quirk |= RTKQ_NOJUMBO; 639 sc->sc_quirk |= RTKQ_NOJUMBO;
633 break; 640 break;
634 case RTK_HWREV_8102E: 641 case RTK_HWREV_8102E:
635 case RTK_HWREV_8102EL: 642 case RTK_HWREV_8102EL:
636 case RTK_HWREV_8103E: 643 case RTK_HWREV_8102EL_SPIN1:
637 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 644 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
638 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO; 645 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO;
639 break; 646 break;
 647 case RTK_HWREV_8103E:
 648 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
 649 RTKQ_MACSTAT | RTKQ_CMDSTOP;
 650 break;
 651 case RTK_HWREV_8401E:
 652 case RTK_HWREV_8105E:
 653 case RTK_HWREV_8105E_SPIN1:
 654 case RTK_HWREV_8106E:
 655 sc->sc_quirk |= RTKQ_PHYWAKE_PM |
 656 RTKQ_DESCV2 | RTKQ_NOEECMD | RTKQ_MACSTAT |
 657 RTKQ_CMDSTOP;
 658 break;
 659 case RTK_HWREV_8402:
 660 sc->sc_quirk |= RTKQ_PHYWAKE_PM |
 661 RTKQ_DESCV2 | RTKQ_NOEECMD | RTKQ_MACSTAT |
 662 RTKQ_CMDSTOP; /* CMDSTOP_WAIT_TXQ */
 663 break;
640 default: 664 default:
641 aprint_normal_dev(sc->sc_dev, 665 aprint_normal_dev(sc->sc_dev,
642 "Unknown revision (0x%08x)\n", hwrev); 666 "Unknown revision (0x%08x)\n", hwrev);
643 /* assume the latest features */ 667 /* assume the latest features */
644 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD; 668 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD;
645 sc->sc_quirk |= RTKQ_NOJUMBO; 669 sc->sc_quirk |= RTKQ_NOJUMBO;
646 } 670 }
647 671
648 /* Set RX length mask */ 672 /* Set RX length mask */
649 sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN; 673 sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN;
650 sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8169; 674 sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8169;
651 } else { 675 } else {
652 sc->sc_quirk |= RTKQ_NOJUMBO; 676 sc->sc_quirk |= RTKQ_NOJUMBO;
653 677
654 /* Set RX length mask */ 678 /* Set RX length mask */
655 sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN; 679 sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN;
656 sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8139; 680 sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8139;
657 } 681 }
658 682
659 /* Reset the adapter. */ 683 /* Reset the adapter. */
660 re_reset(sc); 684 re_reset(sc);
661 685
662 /* 686 /*
663 * RTL81x9 chips automatically read EEPROM to init MAC address, 687 * RTL81x9 chips automatically read EEPROM to init MAC address,
664 * and some NAS override its MAC address per own configuration, 688 * and some NAS override its MAC address per own configuration,
665 * so no need to explicitely read EEPROM and set ID registers. 689 * so no need to explicitely read EEPROM and set ID registers.
666 */ 690 */
667#ifdef RE_USE_EECMD 691#ifdef RE_USE_EECMD
668 if ((sc->sc_quirk & RTKQ_NOEECMD) != 0) { 692 if ((sc->sc_quirk & RTKQ_NOEECMD) != 0) {
669 /* 693 /*
670 * Get station address from ID registers. 694 * Get station address from ID registers.
671 */ 695 */
672 for (i = 0; i < ETHER_ADDR_LEN; i++) 696 for (i = 0; i < ETHER_ADDR_LEN; i++)
673 eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i); 697 eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
674 } else { 698 } else {
675 uint16_t val; 699 uint16_t val;
676 int addr_len; 700 int addr_len;
677 701
678 /* 702 /*
679 * Get station address from the EEPROM. 703 * Get station address from the EEPROM.
680 */ 704 */
681 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129) 705 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
682 addr_len = RTK_EEADDR_LEN1; 706 addr_len = RTK_EEADDR_LEN1;
683 else 707 else
684 addr_len = RTK_EEADDR_LEN0; 708 addr_len = RTK_EEADDR_LEN0;
685 709
686 /* 710 /*
687 * Get station address from the EEPROM. 711 * Get station address from the EEPROM.
688 */ 712 */
689 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) { 713 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) {
690 val = rtk_read_eeprom(sc, RTK_EE_EADDR0 + i, addr_len); 714 val = rtk_read_eeprom(sc, RTK_EE_EADDR0 + i, addr_len);
691 eaddr[(i * 2) + 0] = val & 0xff; 715 eaddr[(i * 2) + 0] = val & 0xff;
692 eaddr[(i * 2) + 1] = val >> 8; 716 eaddr[(i * 2) + 1] = val >> 8;
693 } 717 }
694 } 718 }
695#else 719#else
696 /* 720 /*
697 * Get station address from ID registers. 721 * Get station address from ID registers.
698 */ 722 */
699 for (i = 0; i < ETHER_ADDR_LEN; i++) 723 for (i = 0; i < ETHER_ADDR_LEN; i++)
700 eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i); 724 eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
701#endif 725#endif
702 726
703 /* Take PHY out of power down mode. */ 727 /* Take PHY out of power down mode. */
704 if ((sc->sc_quirk & RTKQ_PHYWAKE_PM) != 0) 728 if ((sc->sc_quirk & RTKQ_PHYWAKE_PM) != 0)
705 CSR_WRITE_1(sc, RTK_PMCH, CSR_READ_1(sc, RTK_PMCH) | 0x80); 729 CSR_WRITE_1(sc, RTK_PMCH, CSR_READ_1(sc, RTK_PMCH) | 0x80);
706 730
707 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 731 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
708 ether_sprintf(eaddr)); 732 ether_sprintf(eaddr));
709 733
710 if (sc->re_ldata.re_tx_desc_cnt > 734 if (sc->re_ldata.re_tx_desc_cnt >
711 PAGE_SIZE / sizeof(struct re_desc)) { 735 PAGE_SIZE / sizeof(struct re_desc)) {
712 sc->re_ldata.re_tx_desc_cnt = 736 sc->re_ldata.re_tx_desc_cnt =
713 PAGE_SIZE / sizeof(struct re_desc); 737 PAGE_SIZE / sizeof(struct re_desc);
714 } 738 }
715 739
716 aprint_verbose_dev(sc->sc_dev, "using %d tx descriptors\n", 740 aprint_verbose_dev(sc->sc_dev, "using %d tx descriptors\n",
717 sc->re_ldata.re_tx_desc_cnt); 741 sc->re_ldata.re_tx_desc_cnt);
718 KASSERT(RE_NEXT_TX_DESC(sc, RE_TX_DESC_CNT(sc) - 1) == 0); 742 KASSERT(RE_NEXT_TX_DESC(sc, RE_TX_DESC_CNT(sc) - 1) == 0);
719 743
720 /* Allocate DMA'able memory for the TX ring */ 744 /* Allocate DMA'able memory for the TX ring */
721 if ((error = bus_dmamem_alloc(sc->sc_dmat, RE_TX_LIST_SZ(sc), 745 if ((error = bus_dmamem_alloc(sc->sc_dmat, RE_TX_LIST_SZ(sc),
722 RE_RING_ALIGN, 0, &sc->re_ldata.re_tx_listseg, 1, 746 RE_RING_ALIGN, 0, &sc->re_ldata.re_tx_listseg, 1,
723 &sc->re_ldata.re_tx_listnseg, BUS_DMA_NOWAIT)) != 0) { 747 &sc->re_ldata.re_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
724 aprint_error_dev(sc->sc_dev, 748 aprint_error_dev(sc->sc_dev,
725 "can't allocate tx listseg, error = %d\n", error); 749 "can't allocate tx listseg, error = %d\n", error);
726 goto fail_0; 750 goto fail_0;
727 } 751 }
728 752
729 /* Load the map for the TX ring. */ 753 /* Load the map for the TX ring. */
730 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_tx_listseg, 754 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_tx_listseg,
731 sc->re_ldata.re_tx_listnseg, RE_TX_LIST_SZ(sc), 755 sc->re_ldata.re_tx_listnseg, RE_TX_LIST_SZ(sc),
732 (void **)&sc->re_ldata.re_tx_list, 756 (void **)&sc->re_ldata.re_tx_list,
733 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 757 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
734 aprint_error_dev(sc->sc_dev, 758 aprint_error_dev(sc->sc_dev,
735 "can't map tx list, error = %d\n", error); 759 "can't map tx list, error = %d\n", error);
736 goto fail_1; 760 goto fail_1;
737 } 761 }
738 memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc)); 762 memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc));
739 763
740 if ((error = bus_dmamap_create(sc->sc_dmat, RE_TX_LIST_SZ(sc), 1, 764 if ((error = bus_dmamap_create(sc->sc_dmat, RE_TX_LIST_SZ(sc), 1,
741 RE_TX_LIST_SZ(sc), 0, 0, 765 RE_TX_LIST_SZ(sc), 0, 0,
742 &sc->re_ldata.re_tx_list_map)) != 0) { 766 &sc->re_ldata.re_tx_list_map)) != 0) {
743 aprint_error_dev(sc->sc_dev, 767 aprint_error_dev(sc->sc_dev,
744 "can't create tx list map, error = %d\n", error); 768 "can't create tx list map, error = %d\n", error);
745 goto fail_2; 769 goto fail_2;
746 } 770 }
747 771
748 772
749 if ((error = bus_dmamap_load(sc->sc_dmat, 773 if ((error = bus_dmamap_load(sc->sc_dmat,
750 sc->re_ldata.re_tx_list_map, sc->re_ldata.re_tx_list, 774 sc->re_ldata.re_tx_list_map, sc->re_ldata.re_tx_list,
751 RE_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 775 RE_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
752 aprint_error_dev(sc->sc_dev, 776 aprint_error_dev(sc->sc_dev,
753 "can't load tx list, error = %d\n", error); 777 "can't load tx list, error = %d\n", error);
754 goto fail_3; 778 goto fail_3;
755 } 779 }
756 780
757 /* Create DMA maps for TX buffers */ 781 /* Create DMA maps for TX buffers */
758 for (i = 0; i < RE_TX_QLEN; i++) { 782 for (i = 0; i < RE_TX_QLEN; i++) {
759 error = bus_dmamap_create(sc->sc_dmat, 783 error = bus_dmamap_create(sc->sc_dmat,
760 round_page(IP_MAXPACKET), 784 round_page(IP_MAXPACKET),
761 RE_TX_DESC_CNT(sc), RE_TDESC_CMD_FRAGLEN, 785 RE_TX_DESC_CNT(sc), RE_TDESC_CMD_FRAGLEN,
762 0, 0, &sc->re_ldata.re_txq[i].txq_dmamap); 786 0, 0, &sc->re_ldata.re_txq[i].txq_dmamap);
763 if (error) { 787 if (error) {
764 aprint_error_dev(sc->sc_dev, 788 aprint_error_dev(sc->sc_dev,
765 "can't create DMA map for TX\n"); 789 "can't create DMA map for TX\n");
766 goto fail_4; 790 goto fail_4;
767 } 791 }
768 } 792 }
769 793
770 /* Allocate DMA'able memory for the RX ring */ 794 /* Allocate DMA'able memory for the RX ring */
771 /* XXX see also a comment about RE_RX_DMAMEM_SZ in rtl81x9var.h */ 795 /* XXX see also a comment about RE_RX_DMAMEM_SZ in rtl81x9var.h */
772 if ((error = bus_dmamem_alloc(sc->sc_dmat, 796 if ((error = bus_dmamem_alloc(sc->sc_dmat,
773 RE_RX_DMAMEM_SZ, RE_RING_ALIGN, 0, &sc->re_ldata.re_rx_listseg, 1, 797 RE_RX_DMAMEM_SZ, RE_RING_ALIGN, 0, &sc->re_ldata.re_rx_listseg, 1,
774 &sc->re_ldata.re_rx_listnseg, BUS_DMA_NOWAIT)) != 0) { 798 &sc->re_ldata.re_rx_listnseg, BUS_DMA_NOWAIT)) != 0) {
775 aprint_error_dev(sc->sc_dev, 799 aprint_error_dev(sc->sc_dev,
776 "can't allocate rx listseg, error = %d\n", error); 800 "can't allocate rx listseg, error = %d\n", error);
777 goto fail_4; 801 goto fail_4;
778 } 802 }
779 803
780 /* Load the map for the RX ring. */ 804 /* Load the map for the RX ring. */
781 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_rx_listseg, 805 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_rx_listseg,
782 sc->re_ldata.re_rx_listnseg, RE_RX_DMAMEM_SZ, 806 sc->re_ldata.re_rx_listnseg, RE_RX_DMAMEM_SZ,
783 (void **)&sc->re_ldata.re_rx_list, 807 (void **)&sc->re_ldata.re_rx_list,
784 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 808 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
785 aprint_error_dev(sc->sc_dev, 809 aprint_error_dev(sc->sc_dev,
786 "can't map rx list, error = %d\n", error); 810 "can't map rx list, error = %d\n", error);
787 goto fail_5; 811 goto fail_5;
788 } 812 }
789 memset(sc->re_ldata.re_rx_list, 0, RE_RX_DMAMEM_SZ); 813 memset(sc->re_ldata.re_rx_list, 0, RE_RX_DMAMEM_SZ);
790 814
791 if ((error = bus_dmamap_create(sc->sc_dmat, 815 if ((error = bus_dmamap_create(sc->sc_dmat,
792 RE_RX_DMAMEM_SZ, 1, RE_RX_DMAMEM_SZ, 0, 0, 816 RE_RX_DMAMEM_SZ, 1, RE_RX_DMAMEM_SZ, 0, 0,
793 &sc->re_ldata.re_rx_list_map)) != 0) { 817 &sc->re_ldata.re_rx_list_map)) != 0) {
794 aprint_error_dev(sc->sc_dev, 818 aprint_error_dev(sc->sc_dev,
795 "can't create rx list map, error = %d\n", error); 819 "can't create rx list map, error = %d\n", error);
796 goto fail_6; 820 goto fail_6;
797 } 821 }
798 822
799 if ((error = bus_dmamap_load(sc->sc_dmat, 823 if ((error = bus_dmamap_load(sc->sc_dmat,
800 sc->re_ldata.re_rx_list_map, sc->re_ldata.re_rx_list, 824 sc->re_ldata.re_rx_list_map, sc->re_ldata.re_rx_list,
801 RE_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) { 825 RE_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
802 aprint_error_dev(sc->sc_dev, 826 aprint_error_dev(sc->sc_dev,
803 "can't load rx list, error = %d\n", error); 827 "can't load rx list, error = %d\n", error);
804 goto fail_7; 828 goto fail_7;
805 } 829 }
806 830
807 /* Create DMA maps for RX buffers */ 831 /* Create DMA maps for RX buffers */
808 for (i = 0; i < RE_RX_DESC_CNT; i++) { 832 for (i = 0; i < RE_RX_DESC_CNT; i++) {
809 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 833 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
810 0, 0, &sc->re_ldata.re_rxsoft[i].rxs_dmamap); 834 0, 0, &sc->re_ldata.re_rxsoft[i].rxs_dmamap);
811 if (error) { 835 if (error) {
812 aprint_error_dev(sc->sc_dev, 836 aprint_error_dev(sc->sc_dev,
813 "can't create DMA map for RX\n"); 837 "can't create DMA map for RX\n");
814 goto fail_8; 838 goto fail_8;
815 } 839 }
816 } 840 }
817 841
818 /* 842 /*
819 * Record interface as attached. From here, we should not fail. 843 * Record interface as attached. From here, we should not fail.
820 */ 844 */
821 sc->sc_flags |= RTK_ATTACHED; 845 sc->sc_flags |= RTK_ATTACHED;
822 846
823 ifp = &sc->ethercom.ec_if; 847 ifp = &sc->ethercom.ec_if;
824 ifp->if_softc = sc; 848 ifp->if_softc = sc;
825 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 849 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
826 ifp->if_mtu = ETHERMTU; 850 ifp->if_mtu = ETHERMTU;
827 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 851 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
828 ifp->if_ioctl = re_ioctl; 852 ifp->if_ioctl = re_ioctl;
829 sc->ethercom.ec_capabilities |= 853 sc->ethercom.ec_capabilities |=
830 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 854 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
831 ifp->if_start = re_start; 855 ifp->if_start = re_start;
832 ifp->if_stop = re_stop; 856 ifp->if_stop = re_stop;
833 857
834 /* 858 /*
835 * IFCAP_CSUM_IPv4_Tx on re(4) is broken for small packets, 859 * IFCAP_CSUM_IPv4_Tx on re(4) is broken for small packets,
836 * so we have a workaround to handle the bug by padding 860 * so we have a workaround to handle the bug by padding
837 * such packets manually. 861 * such packets manually.
838 */ 862 */
839 ifp->if_capabilities |= 863 ifp->if_capabilities |=
840 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 864 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
841 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 865 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
842 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 866 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
843 IFCAP_TSOv4; 867 IFCAP_TSOv4;
844 868
845 ifp->if_watchdog = re_watchdog; 869 ifp->if_watchdog = re_watchdog;
846 ifp->if_init = re_init; 870 ifp->if_init = re_init;
847 ifp->if_snd.ifq_maxlen = RE_IFQ_MAXLEN; 871 ifp->if_snd.ifq_maxlen = RE_IFQ_MAXLEN;
848 ifp->if_capenable = ifp->if_capabilities; 872 ifp->if_capenable = ifp->if_capabilities;
849 IFQ_SET_READY(&ifp->if_snd); 873 IFQ_SET_READY(&ifp->if_snd);
850 874
851 callout_init(&sc->rtk_tick_ch, 0); 875 callout_init(&sc->rtk_tick_ch, 0);
852 876
853 /* Do MII setup */ 877 /* Do MII setup */
854 mii->mii_ifp = ifp; 878 mii->mii_ifp = ifp;
855 mii->mii_readreg = re_miibus_readreg; 879 mii->mii_readreg = re_miibus_readreg;
856 mii->mii_writereg = re_miibus_writereg; 880 mii->mii_writereg = re_miibus_writereg;
857 mii->mii_statchg = re_miibus_statchg; 881 mii->mii_statchg = re_miibus_statchg;
858 sc->ethercom.ec_mii = mii; 882 sc->ethercom.ec_mii = mii;
859 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 883 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
860 ether_mediastatus); 884 ether_mediastatus);
861 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 885 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
862 MII_OFFSET_ANY, 0); 886 MII_OFFSET_ANY, 0);
863 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 887 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
864 888
865 /* 889 /*
866 * Call MI attach routine. 890 * Call MI attach routine.
867 */ 891 */
868 if_attach(ifp); 892 if_attach(ifp);
869 if_deferred_start_init(ifp, NULL); 893 if_deferred_start_init(ifp, NULL);
870 ether_ifattach(ifp, eaddr); 894 ether_ifattach(ifp, eaddr);
871 895
872 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 896 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
873 RND_TYPE_NET, RND_FLAG_DEFAULT); 897 RND_TYPE_NET, RND_FLAG_DEFAULT);
874 898
875 if (pmf_device_register(sc->sc_dev, NULL, NULL)) 899 if (pmf_device_register(sc->sc_dev, NULL, NULL))
876 pmf_class_network_register(sc->sc_dev, ifp); 900 pmf_class_network_register(sc->sc_dev, ifp);
877 else 901 else
878 aprint_error_dev(sc->sc_dev, 902 aprint_error_dev(sc->sc_dev,
879 "couldn't establish power handler\n"); 903 "couldn't establish power handler\n");
880 904
881 return; 905 return;
882 906
883 fail_8: 907 fail_8:
884 /* Destroy DMA maps for RX buffers. */ 908 /* Destroy DMA maps for RX buffers. */
885 for (i = 0; i < RE_RX_DESC_CNT; i++) 909 for (i = 0; i < RE_RX_DESC_CNT; i++)
886 if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL) 910 if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL)
887 bus_dmamap_destroy(sc->sc_dmat, 911 bus_dmamap_destroy(sc->sc_dmat,
888 sc->re_ldata.re_rxsoft[i].rxs_dmamap); 912 sc->re_ldata.re_rxsoft[i].rxs_dmamap);
889 913
890 /* Free DMA'able memory for the RX ring. */ 914 /* Free DMA'able memory for the RX ring. */
891 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map); 915 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
892 fail_7: 916 fail_7:
893 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map); 917 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
894 fail_6: 918 fail_6:
895 bus_dmamem_unmap(sc->sc_dmat, 919 bus_dmamem_unmap(sc->sc_dmat,
896 (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ); 920 (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ);
897 fail_5: 921 fail_5:
898 bus_dmamem_free(sc->sc_dmat, 922 bus_dmamem_free(sc->sc_dmat,
899 &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg); 923 &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg);
900 924
901 fail_4: 925 fail_4:
902 /* Destroy DMA maps for TX buffers. */ 926 /* Destroy DMA maps for TX buffers. */
903 for (i = 0; i < RE_TX_QLEN; i++) 927 for (i = 0; i < RE_TX_QLEN; i++)
904 if (sc->re_ldata.re_txq[i].txq_dmamap != NULL) 928 if (sc->re_ldata.re_txq[i].txq_dmamap != NULL)
905 bus_dmamap_destroy(sc->sc_dmat, 929 bus_dmamap_destroy(sc->sc_dmat,
906 sc->re_ldata.re_txq[i].txq_dmamap); 930 sc->re_ldata.re_txq[i].txq_dmamap);
907 931
908 /* Free DMA'able memory for the TX ring. */ 932 /* Free DMA'able memory for the TX ring. */
909 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map); 933 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
910 fail_3: 934 fail_3:
911 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map); 935 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
912 fail_2: 936 fail_2:
913 bus_dmamem_unmap(sc->sc_dmat, 937 bus_dmamem_unmap(sc->sc_dmat,
914 (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc)); 938 (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
915 fail_1: 939 fail_1:
916 bus_dmamem_free(sc->sc_dmat, 940 bus_dmamem_free(sc->sc_dmat,
917 &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg); 941 &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg);
918 fail_0: 942 fail_0:
919 return; 943 return;
920} 944}
921 945
922 946
923/* 947/*
924 * re_activate: 948 * re_activate:
925 * Handle device activation/deactivation requests. 949 * Handle device activation/deactivation requests.
926 */ 950 */
927int 951int
928re_activate(device_t self, enum devact act) 952re_activate(device_t self, enum devact act)
929{ 953{
930 struct rtk_softc *sc = device_private(self); 954 struct rtk_softc *sc = device_private(self);
931 955
932 switch (act) { 956 switch (act) {
933 case DVACT_DEACTIVATE: 957 case DVACT_DEACTIVATE:
934 if_deactivate(&sc->ethercom.ec_if); 958 if_deactivate(&sc->ethercom.ec_if);
935 return 0; 959 return 0;
936 default: 960 default:
937 return EOPNOTSUPP; 961 return EOPNOTSUPP;
938 } 962 }
939} 963}
940 964
941/* 965/*
942 * re_detach: 966 * re_detach:
943 * Detach a rtk interface. 967 * Detach a rtk interface.
944 */ 968 */
945int 969int
946re_detach(struct rtk_softc *sc) 970re_detach(struct rtk_softc *sc)
947{ 971{
948 struct ifnet *ifp = &sc->ethercom.ec_if; 972 struct ifnet *ifp = &sc->ethercom.ec_if;
949 int i; 973 int i;
950 974
951 /* 975 /*
952 * Succeed now if there isn't any work to do. 976 * Succeed now if there isn't any work to do.
953 */ 977 */
954 if ((sc->sc_flags & RTK_ATTACHED) == 0) 978 if ((sc->sc_flags & RTK_ATTACHED) == 0)
955 return 0; 979 return 0;
956 980
957 /* Unhook our tick handler. */ 981 /* Unhook our tick handler. */
958 callout_stop(&sc->rtk_tick_ch); 982 callout_stop(&sc->rtk_tick_ch);
959 983
960 /* Detach all PHYs. */ 984 /* Detach all PHYs. */
961 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY); 985 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
962 986
963 /* Delete all remaining media. */ 987 /* Delete all remaining media. */
964 ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY); 988 ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
965 989
966 rnd_detach_source(&sc->rnd_source); 990 rnd_detach_source(&sc->rnd_source);
967 ether_ifdetach(ifp); 991 ether_ifdetach(ifp);
968 if_detach(ifp); 992 if_detach(ifp);
969 993
970 /* Destroy DMA maps for RX buffers. */ 994 /* Destroy DMA maps for RX buffers. */
971 for (i = 0; i < RE_RX_DESC_CNT; i++) 995 for (i = 0; i < RE_RX_DESC_CNT; i++)
972 if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL) 996 if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL)
973 bus_dmamap_destroy(sc->sc_dmat, 997 bus_dmamap_destroy(sc->sc_dmat,
974 sc->re_ldata.re_rxsoft[i].rxs_dmamap); 998 sc->re_ldata.re_rxsoft[i].rxs_dmamap);
975 999
976 /* Free DMA'able memory for the RX ring. */ 1000 /* Free DMA'able memory for the RX ring. */
977 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map); 1001 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
978 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map); 1002 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
979 bus_dmamem_unmap(sc->sc_dmat, 1003 bus_dmamem_unmap(sc->sc_dmat,
980 (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ); 1004 (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ);
981 bus_dmamem_free(sc->sc_dmat, 1005 bus_dmamem_free(sc->sc_dmat,
982 &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg); 1006 &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg);
983 1007
984 /* Destroy DMA maps for TX buffers. */ 1008 /* Destroy DMA maps for TX buffers. */
985 for (i = 0; i < RE_TX_QLEN; i++) 1009 for (i = 0; i < RE_TX_QLEN; i++)
986 if (sc->re_ldata.re_txq[i].txq_dmamap != NULL) 1010 if (sc->re_ldata.re_txq[i].txq_dmamap != NULL)
987 bus_dmamap_destroy(sc->sc_dmat, 1011 bus_dmamap_destroy(sc->sc_dmat,
988 sc->re_ldata.re_txq[i].txq_dmamap); 1012 sc->re_ldata.re_txq[i].txq_dmamap);
989 1013
990 /* Free DMA'able memory for the TX ring. */ 1014 /* Free DMA'able memory for the TX ring. */
991 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map); 1015 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
992 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map); 1016 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
993 bus_dmamem_unmap(sc->sc_dmat, 1017 bus_dmamem_unmap(sc->sc_dmat,
994 (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc)); 1018 (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
995 bus_dmamem_free(sc->sc_dmat, 1019 bus_dmamem_free(sc->sc_dmat,
996 &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg); 1020 &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg);
997 1021
998 pmf_device_deregister(sc->sc_dev); 1022 pmf_device_deregister(sc->sc_dev);
999 1023
1000 /* we don't want to run again */ 1024 /* we don't want to run again */
1001 sc->sc_flags &= ~RTK_ATTACHED; 1025 sc->sc_flags &= ~RTK_ATTACHED;
1002 1026
1003 return 0; 1027 return 0;
1004} 1028}
1005 1029
1006/* 1030/*
1007 * re_enable: 1031 * re_enable:
1008 * Enable the RTL81X9 chip. 1032 * Enable the RTL81X9 chip.
1009 */ 1033 */
1010static int 1034static int
1011re_enable(struct rtk_softc *sc) 1035re_enable(struct rtk_softc *sc)
1012{ 1036{
1013 1037
1014 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) { 1038 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
1015 if ((*sc->sc_enable)(sc) != 0) { 1039 if ((*sc->sc_enable)(sc) != 0) {
1016 printf("%s: device enable failed\n", 1040 printf("%s: device enable failed\n",
1017 device_xname(sc->sc_dev)); 1041 device_xname(sc->sc_dev));
1018 return EIO; 1042 return EIO;
1019 } 1043 }
1020 sc->sc_flags |= RTK_ENABLED; 1044 sc->sc_flags |= RTK_ENABLED;
1021 } 1045 }
1022 return 0; 1046 return 0;
1023} 1047}
1024 1048
1025/* 1049/*
1026 * re_disable: 1050 * re_disable:
1027 * Disable the RTL81X9 chip. 1051 * Disable the RTL81X9 chip.
1028 */ 1052 */
1029static void 1053static void
1030re_disable(struct rtk_softc *sc) 1054re_disable(struct rtk_softc *sc)
1031{ 1055{
1032 1056
1033 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) { 1057 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
1034 (*sc->sc_disable)(sc); 1058 (*sc->sc_disable)(sc);
1035 sc->sc_flags &= ~RTK_ENABLED; 1059 sc->sc_flags &= ~RTK_ENABLED;
1036 } 1060 }
1037} 1061}
1038 1062
1039static int 1063static int
1040re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m) 1064re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m)
1041{ 1065{
1042 struct mbuf *n = NULL; 1066 struct mbuf *n = NULL;
1043 bus_dmamap_t map; 1067 bus_dmamap_t map;
1044 struct re_desc *d; 1068 struct re_desc *d;
1045 struct re_rxsoft *rxs; 1069 struct re_rxsoft *rxs;
1046 uint32_t cmdstat; 1070 uint32_t cmdstat;
1047 int error; 1071 int error;
1048 1072
1049 if (m == NULL) { 1073 if (m == NULL) {
1050 MGETHDR(n, M_DONTWAIT, MT_DATA); 1074 MGETHDR(n, M_DONTWAIT, MT_DATA);
1051 if (n == NULL) 1075 if (n == NULL)
1052 return ENOBUFS; 1076 return ENOBUFS;
1053 1077
1054 MCLGET(n, M_DONTWAIT); 1078 MCLGET(n, M_DONTWAIT);
1055 if ((n->m_flags & M_EXT) == 0) { 1079 if ((n->m_flags & M_EXT) == 0) {
1056 m_freem(n); 1080 m_freem(n);
1057 return ENOBUFS; 1081 return ENOBUFS;
1058 } 1082 }
1059 m = n; 1083 m = n;
1060 } else 1084 } else
1061 m->m_data = m->m_ext.ext_buf; 1085 m->m_data = m->m_ext.ext_buf;
1062 1086
1063 /* 1087 /*
1064 * Initialize mbuf length fields and fixup 1088 * Initialize mbuf length fields and fixup
1065 * alignment so that the frame payload is 1089 * alignment so that the frame payload is
1066 * longword aligned. 1090 * longword aligned.
1067 */ 1091 */
1068 m->m_len = m->m_pkthdr.len = MCLBYTES - RE_ETHER_ALIGN; 1092 m->m_len = m->m_pkthdr.len = MCLBYTES - RE_ETHER_ALIGN;
1069 m->m_data += RE_ETHER_ALIGN; 1093 m->m_data += RE_ETHER_ALIGN;
1070 1094
1071 rxs = &sc->re_ldata.re_rxsoft[idx]; 1095 rxs = &sc->re_ldata.re_rxsoft[idx];
1072 map = rxs->rxs_dmamap; 1096 map = rxs->rxs_dmamap;
1073 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1097 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1074 BUS_DMA_READ|BUS_DMA_NOWAIT); 1098 BUS_DMA_READ|BUS_DMA_NOWAIT);
1075 1099
1076 if (error) 1100 if (error)
1077 goto out; 1101 goto out;
1078 1102
1079 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1103 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1080 BUS_DMASYNC_PREREAD); 1104 BUS_DMASYNC_PREREAD);
1081 1105
1082 d = &sc->re_ldata.re_rx_list[idx]; 1106 d = &sc->re_ldata.re_rx_list[idx];
1083#ifdef DIAGNOSTIC 1107#ifdef DIAGNOSTIC
1084 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1108 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1085 cmdstat = le32toh(d->re_cmdstat); 1109 cmdstat = le32toh(d->re_cmdstat);
1086 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1110 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1087 if (cmdstat & RE_RDESC_STAT_OWN) { 1111 if (cmdstat & RE_RDESC_STAT_OWN) {
1088 panic("%s: tried to map busy RX descriptor", 1112 panic("%s: tried to map busy RX descriptor",
1089 device_xname(sc->sc_dev)); 1113 device_xname(sc->sc_dev));
1090 } 1114 }
1091#endif 1115#endif
1092 1116
1093 rxs->rxs_mbuf = m; 1117 rxs->rxs_mbuf = m;
1094 1118
1095 d->re_vlanctl = 0; 1119 d->re_vlanctl = 0;
1096 cmdstat = map->dm_segs[0].ds_len; 1120 cmdstat = map->dm_segs[0].ds_len;
1097 if (idx == (RE_RX_DESC_CNT - 1)) 1121 if (idx == (RE_RX_DESC_CNT - 1))
1098 cmdstat |= RE_RDESC_CMD_EOR; 1122 cmdstat |= RE_RDESC_CMD_EOR;
1099 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1123 re_set_bufaddr(d, map->dm_segs[0].ds_addr);
1100 d->re_cmdstat = htole32(cmdstat); 1124 d->re_cmdstat = htole32(cmdstat);
1101 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1125 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1102 cmdstat |= RE_RDESC_CMD_OWN; 1126 cmdstat |= RE_RDESC_CMD_OWN;
1103 d->re_cmdstat = htole32(cmdstat); 1127 d->re_cmdstat = htole32(cmdstat);
1104 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1128 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1105 1129
1106 return 0; 1130 return 0;
1107 out: 1131 out:
1108 if (n != NULL) 1132 if (n != NULL)
1109 m_freem(n); 1133 m_freem(n);
1110 return ENOMEM; 1134 return ENOMEM;
1111} 1135}
1112 1136
1113static int 1137static int
1114re_tx_list_init(struct rtk_softc *sc) 1138re_tx_list_init(struct rtk_softc *sc)
1115{ 1139{
1116 int i; 1140 int i;
1117 1141
1118 memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc)); 1142 memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc));
1119 for (i = 0; i < RE_TX_QLEN; i++) { 1143 for (i = 0; i < RE_TX_QLEN; i++) {
1120 sc->re_ldata.re_txq[i].txq_mbuf = NULL; 1144 sc->re_ldata.re_txq[i].txq_mbuf = NULL;
1121 } 1145 }
1122 1146
1123 bus_dmamap_sync(sc->sc_dmat, 1147 bus_dmamap_sync(sc->sc_dmat,
1124 sc->re_ldata.re_tx_list_map, 0, 1148 sc->re_ldata.re_tx_list_map, 0,
1125 sc->re_ldata.re_tx_list_map->dm_mapsize, 1149 sc->re_ldata.re_tx_list_map->dm_mapsize,
1126 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1150 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1127 sc->re_ldata.re_txq_prodidx = 0; 1151 sc->re_ldata.re_txq_prodidx = 0;
1128 sc->re_ldata.re_txq_considx = 0; 1152 sc->re_ldata.re_txq_considx = 0;
1129 sc->re_ldata.re_txq_free = RE_TX_QLEN; 1153 sc->re_ldata.re_txq_free = RE_TX_QLEN;
1130 sc->re_ldata.re_tx_free = RE_TX_DESC_CNT(sc); 1154 sc->re_ldata.re_tx_free = RE_TX_DESC_CNT(sc);
1131 sc->re_ldata.re_tx_nextfree = 0; 1155 sc->re_ldata.re_tx_nextfree = 0;
1132 1156
1133 return 0; 1157 return 0;
1134} 1158}
1135 1159
1136static int 1160static int
1137re_rx_list_init(struct rtk_softc *sc) 1161re_rx_list_init(struct rtk_softc *sc)
1138{ 1162{
1139 int i; 1163 int i;
1140 1164
1141 memset(sc->re_ldata.re_rx_list, 0, RE_RX_LIST_SZ); 1165 memset(sc->re_ldata.re_rx_list, 0, RE_RX_LIST_SZ);
1142 1166
1143 for (i = 0; i < RE_RX_DESC_CNT; i++) { 1167 for (i = 0; i < RE_RX_DESC_CNT; i++) {
1144 if (re_newbuf(sc, i, NULL) == ENOBUFS) 1168 if (re_newbuf(sc, i, NULL) == ENOBUFS)
1145 return ENOBUFS; 1169 return ENOBUFS;
1146 } 1170 }
1147 1171
1148 sc->re_ldata.re_rx_prodidx = 0; 1172 sc->re_ldata.re_rx_prodidx = 0;
1149 sc->re_head = sc->re_tail = NULL; 1173 sc->re_head = sc->re_tail = NULL;
1150 1174
1151 return 0; 1175 return 0;
1152} 1176}
1153 1177
1154/* 1178/*
1155 * RX handler for C+ and 8169. For the gigE chips, we support 1179 * RX handler for C+ and 8169. For the gigE chips, we support
1156 * the reception of jumbo frames that have been fragmented 1180 * the reception of jumbo frames that have been fragmented
1157 * across multiple 2K mbuf cluster buffers. 1181 * across multiple 2K mbuf cluster buffers.
1158 */ 1182 */
1159static void 1183static void
1160re_rxeof(struct rtk_softc *sc) 1184re_rxeof(struct rtk_softc *sc)
1161{ 1185{
1162 struct mbuf *m; 1186 struct mbuf *m;
1163 struct ifnet *ifp; 1187 struct ifnet *ifp;
1164 int i, total_len; 1188 int i, total_len;
1165 struct re_desc *cur_rx; 1189 struct re_desc *cur_rx;
1166 struct re_rxsoft *rxs; 1190 struct re_rxsoft *rxs;
1167 uint32_t rxstat, rxvlan; 1191 uint32_t rxstat, rxvlan;
1168 1192
1169 ifp = &sc->ethercom.ec_if; 1193 ifp = &sc->ethercom.ec_if;
1170 1194
1171 for (i = sc->re_ldata.re_rx_prodidx;; i = RE_NEXT_RX_DESC(sc, i)) { 1195 for (i = sc->re_ldata.re_rx_prodidx;; i = RE_NEXT_RX_DESC(sc, i)) {
1172 cur_rx = &sc->re_ldata.re_rx_list[i]; 1196 cur_rx = &sc->re_ldata.re_rx_list[i];
1173 RE_RXDESCSYNC(sc, i, 1197 RE_RXDESCSYNC(sc, i,
1174 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1198 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1175 rxstat = le32toh(cur_rx->re_cmdstat); 1199 rxstat = le32toh(cur_rx->re_cmdstat);
1176 rxvlan = le32toh(cur_rx->re_vlanctl); 1200 rxvlan = le32toh(cur_rx->re_vlanctl);
1177 RE_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1201 RE_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD);
1178 if ((rxstat & RE_RDESC_STAT_OWN) != 0) { 1202 if ((rxstat & RE_RDESC_STAT_OWN) != 0) {
1179 break; 1203 break;
1180 } 1204 }
1181 total_len = rxstat & sc->re_rxlenmask; 1205 total_len = rxstat & sc->re_rxlenmask;
1182 rxs = &sc->re_ldata.re_rxsoft[i]; 1206 rxs = &sc->re_ldata.re_rxsoft[i];
1183 m = rxs->rxs_mbuf; 1207 m = rxs->rxs_mbuf;
1184 1208
1185 /* Invalidate the RX mbuf and unload its map */ 1209 /* Invalidate the RX mbuf and unload its map */
1186 1210
1187 bus_dmamap_sync(sc->sc_dmat, 1211 bus_dmamap_sync(sc->sc_dmat,
1188 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1212 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,
1189 BUS_DMASYNC_POSTREAD); 1213 BUS_DMASYNC_POSTREAD);
1190 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1214 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1191 1215
1192 if ((rxstat & RE_RDESC_STAT_EOF) == 0) { 1216 if ((rxstat & RE_RDESC_STAT_EOF) == 0) {
1193 m->m_len = MCLBYTES - RE_ETHER_ALIGN; 1217 m->m_len = MCLBYTES - RE_ETHER_ALIGN;
1194 if (sc->re_head == NULL) 1218 if (sc->re_head == NULL)
1195 sc->re_head = sc->re_tail = m; 1219 sc->re_head = sc->re_tail = m;
1196 else { 1220 else {
1197 m_remove_pkthdr(m); 1221 m_remove_pkthdr(m);
1198 sc->re_tail->m_next = m; 1222 sc->re_tail->m_next = m;
1199 sc->re_tail = m; 1223 sc->re_tail = m;
1200 } 1224 }
1201 re_newbuf(sc, i, NULL); 1225 re_newbuf(sc, i, NULL);
1202 continue; 1226 continue;
1203 } 1227 }
1204 1228
1205 /* 1229 /*
1206 * NOTE: for the 8139C+, the frame length field 1230 * NOTE: for the 8139C+, the frame length field
1207 * is always 12 bits in size, but for the gigE chips, 1231 * is always 12 bits in size, but for the gigE chips,
1208 * it is 13 bits (since the max RX frame length is 16K). 1232 * it is 13 bits (since the max RX frame length is 16K).
1209 * Unfortunately, all 32 bits in the status word 1233 * Unfortunately, all 32 bits in the status word
1210 * were already used, so to make room for the extra 1234 * were already used, so to make room for the extra
1211 * length bit, RealTek took out the 'frame alignment 1235 * length bit, RealTek took out the 'frame alignment
1212 * error' bit and shifted the other status bits 1236 * error' bit and shifted the other status bits
1213 * over one slot. The OWN, EOR, FS and LS bits are 1237 * over one slot. The OWN, EOR, FS and LS bits are
1214 * still in the same places. We have already extracted 1238 * still in the same places. We have already extracted
1215 * the frame length and checked the OWN bit, so rather 1239 * the frame length and checked the OWN bit, so rather
1216 * than using an alternate bit mapping, we shift the 1240 * than using an alternate bit mapping, we shift the
1217 * status bits one space to the right so we can evaluate 1241 * status bits one space to the right so we can evaluate
1218 * them using the 8169 status as though it was in the 1242 * them using the 8169 status as though it was in the
1219 * same format as that of the 8139C+. 1243 * same format as that of the 8139C+.
1220 */ 1244 */
1221 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) 1245 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0)
1222 rxstat >>= 1; 1246 rxstat >>= 1;
1223 1247
1224 if (__predict_false((rxstat & RE_RDESC_STAT_RXERRSUM) != 0)) { 1248 if (__predict_false((rxstat & RE_RDESC_STAT_RXERRSUM) != 0)) {
1225#ifdef RE_DEBUG 1249#ifdef RE_DEBUG
1226 printf("%s: RX error (rxstat = 0x%08x)", 1250 printf("%s: RX error (rxstat = 0x%08x)",
1227 device_xname(sc->sc_dev), rxstat); 1251 device_xname(sc->sc_dev), rxstat);
1228 if (rxstat & RE_RDESC_STAT_FRALIGN) 1252 if (rxstat & RE_RDESC_STAT_FRALIGN)
1229 printf(", frame alignment error"); 1253 printf(", frame alignment error");
1230 if (rxstat & RE_RDESC_STAT_BUFOFLOW) 1254 if (rxstat & RE_RDESC_STAT_BUFOFLOW)
1231 printf(", out of buffer space"); 1255 printf(", out of buffer space");
1232 if (rxstat & RE_RDESC_STAT_FIFOOFLOW) 1256 if (rxstat & RE_RDESC_STAT_FIFOOFLOW)
1233 printf(", FIFO overrun"); 1257 printf(", FIFO overrun");
1234 if (rxstat & RE_RDESC_STAT_GIANT) 1258 if (rxstat & RE_RDESC_STAT_GIANT)
1235 printf(", giant packet"); 1259 printf(", giant packet");
1236 if (rxstat & RE_RDESC_STAT_RUNT) 1260 if (rxstat & RE_RDESC_STAT_RUNT)
1237 printf(", runt packet"); 1261 printf(", runt packet");
1238 if (rxstat & RE_RDESC_STAT_CRCERR) 1262 if (rxstat & RE_RDESC_STAT_CRCERR)
1239 printf(", CRC error"); 1263 printf(", CRC error");
1240 printf("\n"); 1264 printf("\n");
1241#endif 1265#endif
1242 ifp->if_ierrors++; 1266 ifp->if_ierrors++;
1243 /* 1267 /*
1244 * If this is part of a multi-fragment packet, 1268 * If this is part of a multi-fragment packet,
1245 * discard all the pieces. 1269 * discard all the pieces.
1246 */ 1270 */
1247 if (sc->re_head != NULL) { 1271 if (sc->re_head != NULL) {
1248 m_freem(sc->re_head); 1272 m_freem(sc->re_head);
1249 sc->re_head = sc->re_tail = NULL; 1273 sc->re_head = sc->re_tail = NULL;
1250 } 1274 }
1251 re_newbuf(sc, i, m); 1275 re_newbuf(sc, i, m);
1252 continue; 1276 continue;
1253 } 1277 }
1254 1278
1255 /* 1279 /*
1256 * If allocating a replacement mbuf fails, 1280 * If allocating a replacement mbuf fails,
1257 * reload the current one. 1281 * reload the current one.
1258 */ 1282 */
1259 1283
1260 if (__predict_false(re_newbuf(sc, i, NULL) != 0)) { 1284 if (__predict_false(re_newbuf(sc, i, NULL) != 0)) {
1261 ifp->if_ierrors++; 1285 ifp->if_ierrors++;
1262 if (sc->re_head != NULL) { 1286 if (sc->re_head != NULL) {
1263 m_freem(sc->re_head); 1287 m_freem(sc->re_head);
1264 sc->re_head = sc->re_tail = NULL; 1288 sc->re_head = sc->re_tail = NULL;
1265 } 1289 }
1266 re_newbuf(sc, i, m); 1290 re_newbuf(sc, i, m);
1267 continue; 1291 continue;
1268 } 1292 }
1269 1293
1270 if (sc->re_head != NULL) { 1294 if (sc->re_head != NULL) {
1271 m->m_len = total_len % (MCLBYTES - RE_ETHER_ALIGN); 1295 m->m_len = total_len % (MCLBYTES - RE_ETHER_ALIGN);
1272 /* 1296 /*
1273 * Special case: if there's 4 bytes or less 1297 * Special case: if there's 4 bytes or less
1274 * in this buffer, the mbuf can be discarded: 1298 * in this buffer, the mbuf can be discarded:
1275 * the last 4 bytes is the CRC, which we don't 1299 * the last 4 bytes is the CRC, which we don't
1276 * care about anyway. 1300 * care about anyway.
1277 */ 1301 */
1278 if (m->m_len <= ETHER_CRC_LEN) { 1302 if (m->m_len <= ETHER_CRC_LEN) {
1279 sc->re_tail->m_len -= 1303 sc->re_tail->m_len -=
1280 (ETHER_CRC_LEN - m->m_len); 1304 (ETHER_CRC_LEN - m->m_len);
1281 m_freem(m); 1305 m_freem(m);
1282 } else { 1306 } else {
1283 m->m_len -= ETHER_CRC_LEN; 1307 m->m_len -= ETHER_CRC_LEN;
1284 m_remove_pkthdr(m); 1308 m_remove_pkthdr(m);
1285 sc->re_tail->m_next = m; 1309 sc->re_tail->m_next = m;
1286 } 1310 }
1287 m = sc->re_head; 1311 m = sc->re_head;
1288 sc->re_head = sc->re_tail = NULL; 1312 sc->re_head = sc->re_tail = NULL;
1289 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1313 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1290 } else 1314 } else
1291 m->m_pkthdr.len = m->m_len = 1315 m->m_pkthdr.len = m->m_len =
1292 (total_len - ETHER_CRC_LEN); 1316 (total_len - ETHER_CRC_LEN);
1293 1317
1294 m_set_rcvif(m, ifp); 1318 m_set_rcvif(m, ifp);
1295 1319
1296 /* Do RX checksumming */ 1320 /* Do RX checksumming */
1297 if ((sc->sc_quirk & RTKQ_DESCV2) == 0) { 1321 if ((sc->sc_quirk & RTKQ_DESCV2) == 0) {
1298 /* Check IP header checksum */ 1322 /* Check IP header checksum */
1299 if ((rxstat & RE_RDESC_STAT_PROTOID) != 0) { 1323 if ((rxstat & RE_RDESC_STAT_PROTOID) != 0) {
1300 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1324 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1301 if (rxstat & RE_RDESC_STAT_IPSUMBAD) 1325 if (rxstat & RE_RDESC_STAT_IPSUMBAD)
1302 m->m_pkthdr.csum_flags |= 1326 m->m_pkthdr.csum_flags |=
1303 M_CSUM_IPv4_BAD; 1327 M_CSUM_IPv4_BAD;
1304 1328
1305 /* Check TCP/UDP checksum */ 1329 /* Check TCP/UDP checksum */
1306 if (RE_TCPPKT(rxstat)) { 1330 if (RE_TCPPKT(rxstat)) {
1307 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1331 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1308 if (rxstat & RE_RDESC_STAT_TCPSUMBAD) 1332 if (rxstat & RE_RDESC_STAT_TCPSUMBAD)
1309 m->m_pkthdr.csum_flags |= 1333 m->m_pkthdr.csum_flags |=
1310 M_CSUM_TCP_UDP_BAD; 1334 M_CSUM_TCP_UDP_BAD;
1311 } else if (RE_UDPPKT(rxstat)) { 1335 } else if (RE_UDPPKT(rxstat)) {
1312 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1336 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1313 if (rxstat & RE_RDESC_STAT_UDPSUMBAD) { 1337 if (rxstat & RE_RDESC_STAT_UDPSUMBAD) {
1314 /* 1338 /*
1315 * XXX: 8139C+ thinks UDP csum 1339 * XXX: 8139C+ thinks UDP csum
1316 * 0xFFFF is bad, force software 1340 * 0xFFFF is bad, force software
1317 * calculation. 1341 * calculation.
1318 */ 1342 */
1319 if (sc->sc_quirk & RTKQ_8139CPLUS) 1343 if (sc->sc_quirk & RTKQ_8139CPLUS)
1320 m->m_pkthdr.csum_flags 1344 m->m_pkthdr.csum_flags
1321 &= ~M_CSUM_UDPv4; 1345 &= ~M_CSUM_UDPv4;
1322 else 1346 else
1323 m->m_pkthdr.csum_flags 1347 m->m_pkthdr.csum_flags
1324 |= M_CSUM_TCP_UDP_BAD; 1348 |= M_CSUM_TCP_UDP_BAD;
1325 } 1349 }
1326 } 1350 }
1327 } 1351 }
1328 } else { 1352 } else {
1329 /* Check IPv4 header checksum */ 1353 /* Check IPv4 header checksum */
1330 if ((rxvlan & RE_RDESC_VLANCTL_IPV4) != 0) { 1354 if ((rxvlan & RE_RDESC_VLANCTL_IPV4) != 0) {
1331 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1355 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1332 if (rxstat & RE_RDESC_STAT_IPSUMBAD) 1356 if (rxstat & RE_RDESC_STAT_IPSUMBAD)
1333 m->m_pkthdr.csum_flags |= 1357 m->m_pkthdr.csum_flags |=
1334 M_CSUM_IPv4_BAD; 1358 M_CSUM_IPv4_BAD;
1335 1359
1336 /* Check TCPv4/UDPv4 checksum */ 1360 /* Check TCPv4/UDPv4 checksum */
1337 if (RE_TCPPKT(rxstat)) { 1361 if (RE_TCPPKT(rxstat)) {
1338 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1362 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1339 if (rxstat & RE_RDESC_STAT_TCPSUMBAD) 1363 if (rxstat & RE_RDESC_STAT_TCPSUMBAD)
1340 m->m_pkthdr.csum_flags |= 1364 m->m_pkthdr.csum_flags |=
1341 M_CSUM_TCP_UDP_BAD; 1365 M_CSUM_TCP_UDP_BAD;
1342 } else if (RE_UDPPKT(rxstat)) { 1366 } else if (RE_UDPPKT(rxstat)) {
1343 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1367 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1344 if (rxstat & RE_RDESC_STAT_UDPSUMBAD) 1368 if (rxstat & RE_RDESC_STAT_UDPSUMBAD)
1345 m->m_pkthdr.csum_flags |= 1369 m->m_pkthdr.csum_flags |=
1346 M_CSUM_TCP_UDP_BAD; 1370 M_CSUM_TCP_UDP_BAD;
1347 } 1371 }
1348 } 1372 }
1349 /* XXX Check TCPv6/UDPv6 checksum? */ 1373 /* XXX Check TCPv6/UDPv6 checksum? */
1350 } 1374 }
1351 1375
1352 if (rxvlan & RE_RDESC_VLANCTL_TAG) { 1376 if (rxvlan & RE_RDESC_VLANCTL_TAG) {
1353 vlan_set_tag(m, 1377 vlan_set_tag(m,
1354 bswap16(rxvlan & RE_RDESC_VLANCTL_DATA)); 1378 bswap16(rxvlan & RE_RDESC_VLANCTL_DATA));
1355 } 1379 }
1356 if_percpuq_enqueue(ifp->if_percpuq, m); 1380 if_percpuq_enqueue(ifp->if_percpuq, m);
1357 } 1381 }
1358 1382
1359 sc->re_ldata.re_rx_prodidx = i; 1383 sc->re_ldata.re_rx_prodidx = i;
1360} 1384}
1361 1385
1362static void 1386static void
1363re_txeof(struct rtk_softc *sc) 1387re_txeof(struct rtk_softc *sc)
1364{ 1388{
1365 struct ifnet *ifp; 1389 struct ifnet *ifp;
1366 struct re_txq *txq; 1390 struct re_txq *txq;
1367 uint32_t txstat; 1391 uint32_t txstat;
1368 int idx, descidx; 1392 int idx, descidx;
1369 1393
1370 ifp = &sc->ethercom.ec_if; 1394 ifp = &sc->ethercom.ec_if;
1371 1395
1372 for (idx = sc->re_ldata.re_txq_considx; 1396 for (idx = sc->re_ldata.re_txq_considx;
1373 sc->re_ldata.re_txq_free < RE_TX_QLEN; 1397 sc->re_ldata.re_txq_free < RE_TX_QLEN;
1374 idx = RE_NEXT_TXQ(sc, idx), sc->re_ldata.re_txq_free++) { 1398 idx = RE_NEXT_TXQ(sc, idx), sc->re_ldata.re_txq_free++) {
1375 txq = &sc->re_ldata.re_txq[idx]; 1399 txq = &sc->re_ldata.re_txq[idx];
1376 KASSERT(txq->txq_mbuf != NULL); 1400 KASSERT(txq->txq_mbuf != NULL);
1377 1401
1378 descidx = txq->txq_descidx; 1402 descidx = txq->txq_descidx;
1379 RE_TXDESCSYNC(sc, descidx, 1403 RE_TXDESCSYNC(sc, descidx,
1380 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1404 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1381 txstat = 1405 txstat =
1382 le32toh(sc->re_ldata.re_tx_list[descidx].re_cmdstat); 1406 le32toh(sc->re_ldata.re_tx_list[descidx].re_cmdstat);
1383 RE_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); 1407 RE_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD);
1384 KASSERT((txstat & RE_TDESC_CMD_EOF) != 0); 1408 KASSERT((txstat & RE_TDESC_CMD_EOF) != 0);
1385 if (txstat & RE_TDESC_CMD_OWN) { 1409 if (txstat & RE_TDESC_CMD_OWN) {
1386 break; 1410 break;
1387 } 1411 }
1388 1412
1389 sc->re_ldata.re_tx_free += txq->txq_nsegs; 1413 sc->re_ldata.re_tx_free += txq->txq_nsegs;
1390 KASSERT(sc->re_ldata.re_tx_free <= RE_TX_DESC_CNT(sc)); 1414 KASSERT(sc->re_ldata.re_tx_free <= RE_TX_DESC_CNT(sc));
1391 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1415 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,
1392 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1416 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1393 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1417 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1394 m_freem(txq->txq_mbuf); 1418 m_freem(txq->txq_mbuf);
1395 txq->txq_mbuf = NULL; 1419 txq->txq_mbuf = NULL;
1396 1420
1397 if (txstat & (RE_TDESC_STAT_EXCESSCOL | RE_TDESC_STAT_COLCNT)) 1421 if (txstat & (RE_TDESC_STAT_EXCESSCOL | RE_TDESC_STAT_COLCNT))
1398 ifp->if_collisions++; 1422 ifp->if_collisions++;
1399 if (txstat & RE_TDESC_STAT_TXERRSUM) 1423 if (txstat & RE_TDESC_STAT_TXERRSUM)
1400 ifp->if_oerrors++; 1424 ifp->if_oerrors++;
1401 else 1425 else
1402 ifp->if_opackets++; 1426 ifp->if_opackets++;
1403 } 1427 }
1404 1428
1405 sc->re_ldata.re_txq_considx = idx; 1429 sc->re_ldata.re_txq_considx = idx;
1406 1430
1407 if (sc->re_ldata.re_txq_free > RE_NTXDESC_RSVD) 1431 if (sc->re_ldata.re_txq_free > RE_NTXDESC_RSVD)
1408 ifp->if_flags &= ~IFF_OACTIVE; 1432 ifp->if_flags &= ~IFF_OACTIVE;
1409 1433
1410 /* 1434 /*
1411 * If not all descriptors have been released reaped yet, 1435 * If not all descriptors have been released reaped yet,
1412 * reload the timer so that we will eventually get another 1436 * reload the timer so that we will eventually get another
1413 * interrupt that will cause us to re-enter this routine. 1437 * interrupt that will cause us to re-enter this routine.
1414 * This is done in case the transmitter has gone idle. 1438 * This is done in case the transmitter has gone idle.
1415 */ 1439 */
1416 if (sc->re_ldata.re_txq_free < RE_TX_QLEN) { 1440 if (sc->re_ldata.re_txq_free < RE_TX_QLEN) {
1417 if ((sc->sc_quirk & RTKQ_IM_HW) == 0) 1441 if ((sc->sc_quirk & RTKQ_IM_HW) == 0)
1418 CSR_WRITE_4(sc, RTK_TIMERCNT, 1); 1442 CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1419 if ((sc->sc_quirk & RTKQ_PCIE) != 0) { 1443 if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
1420 /* 1444 /*
1421 * Some chips will ignore a second TX request 1445 * Some chips will ignore a second TX request
1422 * issued while an existing transmission is in 1446 * issued while an existing transmission is in
1423 * progress. If the transmitter goes idle but 1447 * progress. If the transmitter goes idle but
1424 * there are still packets waiting to be sent, 1448 * there are still packets waiting to be sent,
1425 * we need to restart the channel here to flush 1449 * we need to restart the channel here to flush
1426 * them out. This only seems to be required with 1450 * them out. This only seems to be required with
1427 * the PCIe devices. 1451 * the PCIe devices.
1428 */ 1452 */
1429 CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START); 1453 CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START);
1430 } 1454 }
1431 } else 1455 } else
1432 ifp->if_timer = 0; 1456 ifp->if_timer = 0;
1433} 1457}
1434 1458
1435static void 1459static void
1436re_tick(void *arg) 1460re_tick(void *arg)
1437{ 1461{
1438 struct rtk_softc *sc = arg; 1462 struct rtk_softc *sc = arg;
1439 int s; 1463 int s;
1440 1464
1441 /* XXX: just return for 8169S/8110S with rev 2 or newer phy */ 1465 /* XXX: just return for 8169S/8110S with rev 2 or newer phy */
1442 s = splnet(); 1466 s = splnet();
1443 1467
1444 mii_tick(&sc->mii); 1468 mii_tick(&sc->mii);
1445 splx(s); 1469 splx(s);
1446 1470
1447 callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc); 1471 callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
1448} 1472}
1449 1473
1450int 1474int
1451re_intr(void *arg) 1475re_intr(void *arg)
1452{ 1476{
1453 struct rtk_softc *sc = arg; 1477 struct rtk_softc *sc = arg;
1454 struct ifnet *ifp; 1478 struct ifnet *ifp;
1455 uint16_t status; 1479 uint16_t status;
1456 int handled = 0; 1480 int handled = 0;
1457 1481
1458 if (!device_has_power(sc->sc_dev)) 1482 if (!device_has_power(sc->sc_dev))
1459 return 0; 1483 return 0;
1460 1484
1461 ifp = &sc->ethercom.ec_if; 1485 ifp = &sc->ethercom.ec_if;
1462 1486
1463 if ((ifp->if_flags & IFF_UP) == 0) 1487 if ((ifp->if_flags & IFF_UP) == 0)
1464 return 0; 1488 return 0;
1465 1489
1466 const uint16_t status_mask = (sc->sc_quirk & RTKQ_IM_HW) ? 1490 const uint16_t status_mask = (sc->sc_quirk & RTKQ_IM_HW) ?
1467 RTK_INTRS_IM_HW : RTK_INTRS_CPLUS; 1491 RTK_INTRS_IM_HW : RTK_INTRS_CPLUS;
1468 1492
1469 for (;;) { 1493 for (;;) {
1470 1494
1471 status = CSR_READ_2(sc, RTK_ISR); 1495 status = CSR_READ_2(sc, RTK_ISR);
1472 /* If the card has gone away the read returns 0xffff. */ 1496 /* If the card has gone away the read returns 0xffff. */
1473 if (status == 0xffff) 1497 if (status == 0xffff)
1474 break; 1498 break;
1475 if (status) { 1499 if (status) {
1476 handled = 1; 1500 handled = 1;
1477 CSR_WRITE_2(sc, RTK_ISR, status); 1501 CSR_WRITE_2(sc, RTK_ISR, status);
1478 } 1502 }
1479 1503
1480 if ((status & status_mask) == 0) 1504 if ((status & status_mask) == 0)
1481 break; 1505 break;
1482 1506
1483 if (status & (RTK_ISR_RX_OK | RTK_ISR_RX_ERR)) 1507 if (status & (RTK_ISR_RX_OK | RTK_ISR_RX_ERR))
1484 re_rxeof(sc); 1508 re_rxeof(sc);
1485 1509
1486 if (status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_TX_ERR | 1510 if (status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_TX_ERR |
1487 RTK_ISR_TX_DESC_UNAVAIL | RTK_ISR_TX_OK)) 1511 RTK_ISR_TX_DESC_UNAVAIL | RTK_ISR_TX_OK))
1488 re_txeof(sc); 1512 re_txeof(sc);
1489 1513
1490 if (status & RTK_ISR_SYSTEM_ERR) { 1514 if (status & RTK_ISR_SYSTEM_ERR) {
1491 re_init(ifp); 1515 re_init(ifp);
1492 } 1516 }
1493 1517
1494 if (status & RTK_ISR_LINKCHG) { 1518 if (status & RTK_ISR_LINKCHG) {
1495 callout_stop(&sc->rtk_tick_ch); 1519 callout_stop(&sc->rtk_tick_ch);
1496 re_tick(sc); 1520 re_tick(sc);
1497 } 1521 }
1498 } 1522 }
1499 1523
1500 if (handled) 1524 if (handled)
1501 if_schedule_deferred_start(ifp); 1525 if_schedule_deferred_start(ifp);
1502 1526
1503 rnd_add_uint32(&sc->rnd_source, status); 1527 rnd_add_uint32(&sc->rnd_source, status);
1504 1528
1505 return handled; 1529 return handled;
1506} 1530}
1507 1531
1508 1532
1509 1533
1510/* 1534/*
1511 * Main transmit routine for C+ and gigE NICs. 1535 * Main transmit routine for C+ and gigE NICs.
1512 */ 1536 */
1513 1537
1514static void 1538static void
1515re_start(struct ifnet *ifp) 1539re_start(struct ifnet *ifp)
1516{ 1540{
1517 struct rtk_softc *sc; 1541 struct rtk_softc *sc;
1518 struct mbuf *m; 1542 struct mbuf *m;
1519 bus_dmamap_t map; 1543 bus_dmamap_t map;
1520 struct re_txq *txq; 1544 struct re_txq *txq;
1521 struct re_desc *d; 1545 struct re_desc *d;
1522 uint32_t cmdstat, re_flags, vlanctl; 1546 uint32_t cmdstat, re_flags, vlanctl;
1523 int ofree, idx, error, nsegs, seg; 1547 int ofree, idx, error, nsegs, seg;
1524 int startdesc, curdesc, lastdesc; 1548 int startdesc, curdesc, lastdesc;
1525 bool pad; 1549 bool pad;
1526 1550
1527 sc = ifp->if_softc; 1551 sc = ifp->if_softc;
1528 ofree = sc->re_ldata.re_txq_free; 1552 ofree = sc->re_ldata.re_txq_free;
1529 1553
1530 for (idx = sc->re_ldata.re_txq_prodidx;; idx = RE_NEXT_TXQ(sc, idx)) { 1554 for (idx = sc->re_ldata.re_txq_prodidx;; idx = RE_NEXT_TXQ(sc, idx)) {
1531 1555
1532 IFQ_POLL(&ifp->if_snd, m); 1556 IFQ_POLL(&ifp->if_snd, m);
1533 if (m == NULL) 1557 if (m == NULL)
1534 break; 1558 break;
1535 1559
1536 if (sc->re_ldata.re_txq_free == 0 || 1560 if (sc->re_ldata.re_txq_free == 0 ||
1537 sc->re_ldata.re_tx_free == 0) { 1561 sc->re_ldata.re_tx_free == 0) {
1538 /* no more free slots left */ 1562 /* no more free slots left */
1539 ifp->if_flags |= IFF_OACTIVE; 1563 ifp->if_flags |= IFF_OACTIVE;
1540 break; 1564 break;
1541 } 1565 }
1542 1566
1543 /* 1567 /*
1544 * Set up checksum offload. Note: checksum offload bits must 1568 * Set up checksum offload. Note: checksum offload bits must
1545 * appear in all descriptors of a multi-descriptor transmit 1569 * appear in all descriptors of a multi-descriptor transmit
1546 * attempt. (This is according to testing done with an 8169 1570 * attempt. (This is according to testing done with an 8169
1547 * chip. I'm not sure if this is a requirement or a bug.) 1571 * chip. I'm not sure if this is a requirement or a bug.)
1548 */ 1572 */
1549 1573
1550 vlanctl = 0; 1574 vlanctl = 0;
1551 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) { 1575 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) {
1552 uint32_t segsz = m->m_pkthdr.segsz; 1576 uint32_t segsz = m->m_pkthdr.segsz;
1553 1577
1554 if ((sc->sc_quirk & RTKQ_DESCV2) == 0) { 1578 if ((sc->sc_quirk & RTKQ_DESCV2) == 0) {
1555 re_flags = RE_TDESC_CMD_LGSEND | 1579 re_flags = RE_TDESC_CMD_LGSEND |
1556 (segsz << RE_TDESC_CMD_MSSVAL_SHIFT); 1580 (segsz << RE_TDESC_CMD_MSSVAL_SHIFT);
1557 } else { 1581 } else {
1558 re_flags = RE_TDESC_CMD_LGSEND_V4; 1582 re_flags = RE_TDESC_CMD_LGSEND_V4;
1559 vlanctl |= 1583 vlanctl |=
1560 (segsz << RE_TDESC_VLANCTL_MSSVAL_SHIFT); 1584 (segsz << RE_TDESC_VLANCTL_MSSVAL_SHIFT);
1561 } 1585 }
1562 } else { 1586 } else {
1563 /* 1587 /*
1564 * set RE_TDESC_CMD_IPCSUM if any checksum offloading 1588 * set RE_TDESC_CMD_IPCSUM if any checksum offloading
1565 * is requested. otherwise, RE_TDESC_CMD_TCPCSUM/ 1589 * is requested. otherwise, RE_TDESC_CMD_TCPCSUM/
1566 * RE_TDESC_CMD_UDPCSUM doesn't make effects. 1590 * RE_TDESC_CMD_UDPCSUM doesn't make effects.
1567 */ 1591 */
1568 re_flags = 0; 1592 re_flags = 0;
1569 if ((m->m_pkthdr.csum_flags & 1593 if ((m->m_pkthdr.csum_flags &
1570 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) 1594 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4))
1571 != 0) { 1595 != 0) {
1572 if ((sc->sc_quirk & RTKQ_DESCV2) == 0) { 1596 if ((sc->sc_quirk & RTKQ_DESCV2) == 0) {
1573 re_flags |= RE_TDESC_CMD_IPCSUM; 1597 re_flags |= RE_TDESC_CMD_IPCSUM;
1574 if (m->m_pkthdr.csum_flags & 1598 if (m->m_pkthdr.csum_flags &
1575 M_CSUM_TCPv4) { 1599 M_CSUM_TCPv4) {
1576 re_flags |= 1600 re_flags |=
1577 RE_TDESC_CMD_TCPCSUM; 1601 RE_TDESC_CMD_TCPCSUM;
1578 } else if (m->m_pkthdr.csum_flags & 1602 } else if (m->m_pkthdr.csum_flags &
1579 M_CSUM_UDPv4) { 1603 M_CSUM_UDPv4) {
1580 re_flags |= 1604 re_flags |=
1581 RE_TDESC_CMD_UDPCSUM; 1605 RE_TDESC_CMD_UDPCSUM;
1582 } 1606 }
1583 } else { 1607 } else {
1584 vlanctl |= RE_TDESC_VLANCTL_IPCSUM; 1608 vlanctl |= RE_TDESC_VLANCTL_IPCSUM;
1585 if (m->m_pkthdr.csum_flags & 1609 if (m->m_pkthdr.csum_flags &
1586 M_CSUM_TCPv4) { 1610 M_CSUM_TCPv4) {
1587 vlanctl |= 1611 vlanctl |=
1588 RE_TDESC_VLANCTL_TCPCSUM; 1612 RE_TDESC_VLANCTL_TCPCSUM;
1589 } else if (m->m_pkthdr.csum_flags & 1613 } else if (m->m_pkthdr.csum_flags &
1590 M_CSUM_UDPv4) { 1614 M_CSUM_UDPv4) {
1591 vlanctl |= 1615 vlanctl |=
1592 RE_TDESC_VLANCTL_UDPCSUM; 1616 RE_TDESC_VLANCTL_UDPCSUM;
1593 } 1617 }
1594 } 1618 }
1595 } 1619 }
1596 } 1620 }
1597 1621
1598 txq = &sc->re_ldata.re_txq[idx]; 1622 txq = &sc->re_ldata.re_txq[idx];
1599 map = txq->txq_dmamap; 1623 map = txq->txq_dmamap;
1600 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1624 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1601 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1625 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1602 1626
1603 if (__predict_false(error)) { 1627 if (__predict_false(error)) {
1604 /* XXX try to defrag if EFBIG? */ 1628 /* XXX try to defrag if EFBIG? */
1605 printf("%s: can't map mbuf (error %d)\n", 1629 printf("%s: can't map mbuf (error %d)\n",
1606 device_xname(sc->sc_dev), error); 1630 device_xname(sc->sc_dev), error);
1607 1631
1608 IFQ_DEQUEUE(&ifp->if_snd, m); 1632 IFQ_DEQUEUE(&ifp->if_snd, m);
1609 m_freem(m); 1633 m_freem(m);
1610 ifp->if_oerrors++; 1634 ifp->if_oerrors++;
1611 continue; 1635 continue;
1612 } 1636 }
1613 1637
1614 nsegs = map->dm_nsegs; 1638 nsegs = map->dm_nsegs;
1615 pad = false; 1639 pad = false;
1616 if (__predict_false(m->m_pkthdr.len <= RE_IP4CSUMTX_PADLEN && 1640 if (__predict_false(m->m_pkthdr.len <= RE_IP4CSUMTX_PADLEN &&
1617 (re_flags & RE_TDESC_CMD_IPCSUM) != 0 && 1641 (re_flags & RE_TDESC_CMD_IPCSUM) != 0 &&
1618 (sc->sc_quirk & RTKQ_DESCV2) == 0)) { 1642 (sc->sc_quirk & RTKQ_DESCV2) == 0)) {
1619 pad = true; 1643 pad = true;
1620 nsegs++; 1644 nsegs++;
1621 } 1645 }
1622 1646
1623 if (nsegs > sc->re_ldata.re_tx_free) { 1647 if (nsegs > sc->re_ldata.re_tx_free) {
1624 /* 1648 /*
1625 * Not enough free descriptors to transmit this packet. 1649 * Not enough free descriptors to transmit this packet.
1626 */ 1650 */
1627 ifp->if_flags |= IFF_OACTIVE; 1651 ifp->if_flags |= IFF_OACTIVE;
1628 bus_dmamap_unload(sc->sc_dmat, map); 1652 bus_dmamap_unload(sc->sc_dmat, map);
1629 break; 1653 break;
1630 } 1654 }
1631 1655
1632 IFQ_DEQUEUE(&ifp->if_snd, m); 1656 IFQ_DEQUEUE(&ifp->if_snd, m);
1633 1657
1634 /* 1658 /*
1635 * Make sure that the caches are synchronized before we 1659 * Make sure that the caches are synchronized before we
1636 * ask the chip to start DMA for the packet data. 1660 * ask the chip to start DMA for the packet data.
1637 */ 1661 */
1638 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1662 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1639 BUS_DMASYNC_PREWRITE); 1663 BUS_DMASYNC_PREWRITE);
1640 1664
1641 /* 1665 /*
1642 * Set up hardware VLAN tagging. Note: vlan tag info must 1666 * Set up hardware VLAN tagging. Note: vlan tag info must
1643 * appear in all descriptors of a multi-descriptor 1667 * appear in all descriptors of a multi-descriptor
1644 * transmission attempt. 1668 * transmission attempt.
1645 */ 1669 */
1646 if (vlan_has_tag(m)) 1670 if (vlan_has_tag(m))
1647 vlanctl |= bswap16(vlan_get_tag(m)) | 1671 vlanctl |= bswap16(vlan_get_tag(m)) |
1648 RE_TDESC_VLANCTL_TAG; 1672 RE_TDESC_VLANCTL_TAG;
1649 1673
1650 /* 1674 /*
1651 * Map the segment array into descriptors. 1675 * Map the segment array into descriptors.
1652 * Note that we set the start-of-frame and 1676 * Note that we set the start-of-frame and
1653 * end-of-frame markers for either TX or RX, 1677 * end-of-frame markers for either TX or RX,
1654 * but they really only have meaning in the TX case. 1678 * but they really only have meaning in the TX case.
1655 * (In the RX case, it's the chip that tells us 1679 * (In the RX case, it's the chip that tells us
1656 * where packets begin and end.) 1680 * where packets begin and end.)
1657 * We also keep track of the end of the ring 1681 * We also keep track of the end of the ring
1658 * and set the end-of-ring bits as needed, 1682 * and set the end-of-ring bits as needed,
1659 * and we set the ownership bits in all except 1683 * and we set the ownership bits in all except
1660 * the very first descriptor. (The caller will 1684 * the very first descriptor. (The caller will
1661 * set this descriptor later when it start 1685 * set this descriptor later when it start
1662 * transmission or reception.) 1686 * transmission or reception.)
1663 */ 1687 */
1664 curdesc = startdesc = sc->re_ldata.re_tx_nextfree; 1688 curdesc = startdesc = sc->re_ldata.re_tx_nextfree;
1665 lastdesc = -1; 1689 lastdesc = -1;
1666 for (seg = 0; seg < map->dm_nsegs; 1690 for (seg = 0; seg < map->dm_nsegs;
1667 seg++, curdesc = RE_NEXT_TX_DESC(sc, curdesc)) { 1691 seg++, curdesc = RE_NEXT_TX_DESC(sc, curdesc)) {
1668 d = &sc->re_ldata.re_tx_list[curdesc]; 1692 d = &sc->re_ldata.re_tx_list[curdesc];
1669#ifdef DIAGNOSTIC 1693#ifdef DIAGNOSTIC
1670 RE_TXDESCSYNC(sc, curdesc, 1694 RE_TXDESCSYNC(sc, curdesc,
1671 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1695 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1672 cmdstat = le32toh(d->re_cmdstat); 1696 cmdstat = le32toh(d->re_cmdstat);
1673 RE_TXDESCSYNC(sc, curdesc, BUS_DMASYNC_PREREAD); 1697 RE_TXDESCSYNC(sc, curdesc, BUS_DMASYNC_PREREAD);
1674 if (cmdstat & RE_TDESC_STAT_OWN) { 1698 if (cmdstat & RE_TDESC_STAT_OWN) {
1675 panic("%s: tried to map busy TX descriptor", 1699 panic("%s: tried to map busy TX descriptor",
1676 device_xname(sc->sc_dev)); 1700 device_xname(sc->sc_dev));
1677 } 1701 }
1678#endif 1702#endif
1679 1703
1680 d->re_vlanctl = htole32(vlanctl); 1704 d->re_vlanctl = htole32(vlanctl);
1681 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1705 re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
1682 cmdstat = re_flags | map->dm_segs[seg].ds_len; 1706 cmdstat = re_flags | map->dm_segs[seg].ds_len;
1683 if (seg == 0) 1707 if (seg == 0)
1684 cmdstat |= RE_TDESC_CMD_SOF; 1708 cmdstat |= RE_TDESC_CMD_SOF;
1685 else 1709 else
1686 cmdstat |= RE_TDESC_CMD_OWN; 1710 cmdstat |= RE_TDESC_CMD_OWN;
1687 if (curdesc == (RE_TX_DESC_CNT(sc) - 1)) 1711 if (curdesc == (RE_TX_DESC_CNT(sc) - 1))
1688 cmdstat |= RE_TDESC_CMD_EOR; 1712 cmdstat |= RE_TDESC_CMD_EOR;
1689 if (seg == nsegs - 1) { 1713 if (seg == nsegs - 1) {
1690 cmdstat |= RE_TDESC_CMD_EOF; 1714 cmdstat |= RE_TDESC_CMD_EOF;
1691 lastdesc = curdesc; 1715 lastdesc = curdesc;
1692 } 1716 }
1693 d->re_cmdstat = htole32(cmdstat); 1717 d->re_cmdstat = htole32(cmdstat);
1694 RE_TXDESCSYNC(sc, curdesc, 1718 RE_TXDESCSYNC(sc, curdesc,
1695 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1719 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1696 } 1720 }
1697 if (__predict_false(pad)) { 1721 if (__predict_false(pad)) {
1698 d = &sc->re_ldata.re_tx_list[curdesc]; 1722 d = &sc->re_ldata.re_tx_list[curdesc];
1699 d->re_vlanctl = htole32(vlanctl); 1723 d->re_vlanctl = htole32(vlanctl);
1700 re_set_bufaddr(d, RE_TXPADDADDR(sc)); 1724 re_set_bufaddr(d, RE_TXPADDADDR(sc));
1701 cmdstat = re_flags | 1725 cmdstat = re_flags |
1702 RE_TDESC_CMD_OWN | RE_TDESC_CMD_EOF | 1726 RE_TDESC_CMD_OWN | RE_TDESC_CMD_EOF |
1703 (RE_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1727 (RE_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len);
1704 if (curdesc == (RE_TX_DESC_CNT(sc) - 1)) 1728 if (curdesc == (RE_TX_DESC_CNT(sc) - 1))
1705 cmdstat |= RE_TDESC_CMD_EOR; 1729 cmdstat |= RE_TDESC_CMD_EOR;
1706 d->re_cmdstat = htole32(cmdstat); 1730 d->re_cmdstat = htole32(cmdstat);
1707 RE_TXDESCSYNC(sc, curdesc, 1731 RE_TXDESCSYNC(sc, curdesc,
1708 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1732 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1709 lastdesc = curdesc; 1733 lastdesc = curdesc;
1710 curdesc = RE_NEXT_TX_DESC(sc, curdesc); 1734 curdesc = RE_NEXT_TX_DESC(sc, curdesc);
1711 } 1735 }
1712 KASSERT(lastdesc != -1); 1736 KASSERT(lastdesc != -1);
1713 1737
1714 /* Transfer ownership of packet to the chip. */ 1738 /* Transfer ownership of packet to the chip. */
1715 1739
1716 sc->re_ldata.re_tx_list[startdesc].re_cmdstat |= 1740 sc->re_ldata.re_tx_list[startdesc].re_cmdstat |=
1717 htole32(RE_TDESC_CMD_OWN); 1741 htole32(RE_TDESC_CMD_OWN);
1718 RE_TXDESCSYNC(sc, startdesc, 1742 RE_TXDESCSYNC(sc, startdesc,
1719 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1743 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1720 1744
1721 /* update info of TX queue and descriptors */ 1745 /* update info of TX queue and descriptors */
1722 txq->txq_mbuf = m; 1746 txq->txq_mbuf = m;
1723 txq->txq_descidx = lastdesc; 1747 txq->txq_descidx = lastdesc;
1724 txq->txq_nsegs = nsegs; 1748 txq->txq_nsegs = nsegs;
1725 1749
1726 sc->re_ldata.re_txq_free--; 1750 sc->re_ldata.re_txq_free--;
1727 sc->re_ldata.re_tx_free -= nsegs; 1751 sc->re_ldata.re_tx_free -= nsegs;
1728 sc->re_ldata.re_tx_nextfree = curdesc; 1752 sc->re_ldata.re_tx_nextfree = curdesc;
1729 1753
1730 /* 1754 /*
1731 * If there's a BPF listener, bounce a copy of this frame 1755 * If there's a BPF listener, bounce a copy of this frame
1732 * to him. 1756 * to him.
1733 */ 1757 */
1734 bpf_mtap(ifp, m, BPF_D_OUT); 1758 bpf_mtap(ifp, m, BPF_D_OUT);
1735 } 1759 }
1736 1760
1737 if (sc->re_ldata.re_txq_free < ofree) { 1761 if (sc->re_ldata.re_txq_free < ofree) {
1738 /* 1762 /*
1739 * TX packets are enqueued. 1763 * TX packets are enqueued.
1740 */ 1764 */
1741 sc->re_ldata.re_txq_prodidx = idx; 1765 sc->re_ldata.re_txq_prodidx = idx;
1742 1766
1743 /* 1767 /*
1744 * Start the transmitter to poll. 1768 * Start the transmitter to poll.
1745 * 1769 *
1746 * RealTek put the TX poll request register in a different 1770 * RealTek put the TX poll request register in a different
1747 * location on the 8169 gigE chip. I don't know why. 1771 * location on the 8169 gigE chip. I don't know why.
1748 */ 1772 */
1749 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) 1773 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0)
1750 CSR_WRITE_1(sc, RTK_TXSTART, RTK_TXSTART_START); 1774 CSR_WRITE_1(sc, RTK_TXSTART, RTK_TXSTART_START);
1751 else 1775 else
1752 CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START); 1776 CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START);
1753 1777
1754 if ((sc->sc_quirk & RTKQ_IM_HW) == 0) { 1778 if ((sc->sc_quirk & RTKQ_IM_HW) == 0) {
1755 /* 1779 /*
1756 * Use the countdown timer for interrupt moderation. 1780 * Use the countdown timer for interrupt moderation.
1757 * 'TX done' interrupts are disabled. Instead, we reset 1781 * 'TX done' interrupts are disabled. Instead, we reset
1758 * the countdown timer, which will begin counting until 1782 * the countdown timer, which will begin counting until
1759 * it hits the value in the TIMERINT register, and then 1783 * it hits the value in the TIMERINT register, and then
1760 * trigger an interrupt. Each time we write to the 1784 * trigger an interrupt. Each time we write to the
1761 * TIMERCNT register, the timer count is reset to 0. 1785 * TIMERCNT register, the timer count is reset to 0.
1762 */ 1786 */
1763 CSR_WRITE_4(sc, RTK_TIMERCNT, 1); 1787 CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1764 } 1788 }
1765 1789
1766 /* 1790 /*
1767 * Set a timeout in case the chip goes out to lunch. 1791 * Set a timeout in case the chip goes out to lunch.
1768 */ 1792 */
1769 ifp->if_timer = 5; 1793 ifp->if_timer = 5;
1770 } 1794 }
1771} 1795}
1772 1796
1773static int 1797static int
1774re_init(struct ifnet *ifp) 1798re_init(struct ifnet *ifp)
1775{ 1799{
1776 struct rtk_softc *sc = ifp->if_softc; 1800 struct rtk_softc *sc = ifp->if_softc;
1777 uint32_t rxcfg = 0; 1801 uint32_t rxcfg = 0;
1778 uint16_t cfg; 1802 uint16_t cfg;
1779 int error; 1803 int error;
1780#ifdef RE_USE_EECMD 1804#ifdef RE_USE_EECMD
1781 const uint8_t *enaddr; 1805 const uint8_t *enaddr;
1782 uint32_t reg; 1806 uint32_t reg;
1783#endif 1807#endif
1784 1808
1785 if ((error = re_enable(sc)) != 0) 1809 if ((error = re_enable(sc)) != 0)
1786 goto out; 1810 goto out;
1787 1811
1788 /* 1812 /*
1789 * Cancel pending I/O and free all RX/TX buffers. 1813 * Cancel pending I/O and free all RX/TX buffers.
1790 */ 1814 */
1791 re_stop(ifp, 0); 1815 re_stop(ifp, 0);
1792 1816
1793 re_reset(sc); 1817 re_reset(sc);
1794 1818
1795 /* 1819 /*
1796 * Enable C+ RX and TX mode, as well as VLAN stripping and 1820 * Enable C+ RX and TX mode, as well as VLAN stripping and
1797 * RX checksum offload. We must configure the C+ register 1821 * RX checksum offload. We must configure the C+ register
1798 * before all others. 1822 * before all others.
1799 */ 1823 */
1800 cfg = RE_CPLUSCMD_PCI_MRW; 1824 cfg = RE_CPLUSCMD_PCI_MRW;
1801 1825
1802 /* 1826 /*
1803 * XXX: For old 8169 set bit 14. 1827 * XXX: For old 8169 set bit 14.
1804 * For 8169S/8110S and above, do not set bit 14. 1828 * For 8169S/8110S and above, do not set bit 14.
1805 */ 1829 */
1806 if ((sc->sc_quirk & RTKQ_8169NONS) != 0) 1830 if ((sc->sc_quirk & RTKQ_8169NONS) != 0)
1807 cfg |= (0x1 << 14); 1831 cfg |= (0x1 << 14);
1808 1832
1809 if ((sc->ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0) 1833 if ((sc->ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
1810 cfg |= RE_CPLUSCMD_VLANSTRIP; 1834 cfg |= RE_CPLUSCMD_VLANSTRIP;
1811 if ((ifp->if_capenable & (IFCAP_CSUM_IPv4_Rx | 1835 if ((ifp->if_capenable & (IFCAP_CSUM_IPv4_Rx |
1812 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) != 0) 1836 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) != 0)
1813 cfg |= RE_CPLUSCMD_RXCSUM_ENB; 1837 cfg |= RE_CPLUSCMD_RXCSUM_ENB;
1814 if ((sc->sc_quirk & RTKQ_MACSTAT) != 0) { 1838 if ((sc->sc_quirk & RTKQ_MACSTAT) != 0) {
1815 cfg |= RE_CPLUSCMD_MACSTAT_DIS; 1839 cfg |= RE_CPLUSCMD_MACSTAT_DIS;
1816 cfg |= RE_CPLUSCMD_TXENB; 1840 cfg |= RE_CPLUSCMD_TXENB;
1817 } else 1841 } else
1818 cfg |= RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB; 1842 cfg |= RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB;
1819 1843
1820 CSR_WRITE_2(sc, RTK_CPLUS_CMD, cfg); 1844 CSR_WRITE_2(sc, RTK_CPLUS_CMD, cfg);
1821 1845
1822 /* XXX: from Realtek-supplied Linux driver. Wholly undocumented. */ 1846 /* XXX: from Realtek-supplied Linux driver. Wholly undocumented. */
1823 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { 1847 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
1824 if ((sc->sc_quirk & RTKQ_IM_HW) == 0) { 1848 if ((sc->sc_quirk & RTKQ_IM_HW) == 0) {
1825 CSR_WRITE_2(sc, RTK_IM, 0x0000); 1849 CSR_WRITE_2(sc, RTK_IM, 0x0000);
1826 } else { 1850 } else {
1827 CSR_WRITE_2(sc, RTK_IM, 0x5151); 1851 CSR_WRITE_2(sc, RTK_IM, 0x5151);
1828 } 1852 }
1829 } 1853 }
1830 1854
1831 DELAY(10000); 1855 DELAY(10000);
1832 1856
1833#ifdef RE_USE_EECMD 1857#ifdef RE_USE_EECMD
1834 /* 1858 /*
1835 * Init our MAC address. Even though the chipset 1859 * Init our MAC address. Even though the chipset
1836 * documentation doesn't mention it, we need to enter "Config 1860 * documentation doesn't mention it, we need to enter "Config
1837 * register write enable" mode to modify the ID registers. 1861 * register write enable" mode to modify the ID registers.
1838 */ 1862 */
1839 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_WRITECFG); 1863 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_WRITECFG);
1840 enaddr = CLLADDR(ifp->if_sadl); 1864 enaddr = CLLADDR(ifp->if_sadl);
1841 reg = enaddr[0] | (enaddr[1] << 8) | 1865 reg = enaddr[0] | (enaddr[1] << 8) |
1842 (enaddr[2] << 16) | (enaddr[3] << 24); 1866 (enaddr[2] << 16) | (enaddr[3] << 24);
1843 CSR_WRITE_4(sc, RTK_IDR0, reg); 1867 CSR_WRITE_4(sc, RTK_IDR0, reg);
1844 reg = enaddr[4] | (enaddr[5] << 8); 1868 reg = enaddr[4] | (enaddr[5] << 8);
1845 CSR_WRITE_4(sc, RTK_IDR4, reg); 1869 CSR_WRITE_4(sc, RTK_IDR4, reg);
1846 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF); 1870 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
1847#endif 1871#endif
1848 1872
1849 /* 1873 /*
1850 * For C+ mode, initialize the RX descriptors and mbufs. 1874 * For C+ mode, initialize the RX descriptors and mbufs.
1851 */ 1875 */
1852 re_rx_list_init(sc); 1876 re_rx_list_init(sc);
1853 re_tx_list_init(sc); 1877 re_tx_list_init(sc);
1854 1878
1855 /* 1879 /*
1856 * Load the addresses of the RX and TX lists into the chip. 1880 * Load the addresses of the RX and TX lists into the chip.
1857 */ 1881 */
1858 CSR_WRITE_4(sc, RTK_RXLIST_ADDR_HI, 1882 CSR_WRITE_4(sc, RTK_RXLIST_ADDR_HI,
1859 RE_ADDR_HI(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr)); 1883 RE_ADDR_HI(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr));
1860 CSR_WRITE_4(sc, RTK_RXLIST_ADDR_LO, 1884 CSR_WRITE_4(sc, RTK_RXLIST_ADDR_LO,
1861 RE_ADDR_LO(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr)); 1885 RE_ADDR_LO(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr));
1862 1886
1863 CSR_WRITE_4(sc, RTK_TXLIST_ADDR_HI, 1887 CSR_WRITE_4(sc, RTK_TXLIST_ADDR_HI,
1864 RE_ADDR_HI(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr)); 1888 RE_ADDR_HI(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr));
1865 CSR_WRITE_4(sc, RTK_TXLIST_ADDR_LO, 1889 CSR_WRITE_4(sc, RTK_TXLIST_ADDR_LO,
1866 RE_ADDR_LO(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr)); 1890 RE_ADDR_LO(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr));
1867 1891
1868 if (sc->sc_quirk & RTKQ_RXDV_GATED) { 1892 if (sc->sc_quirk & RTKQ_RXDV_GATED) {
1869 CSR_WRITE_4(sc, RTK_MISC, 1893 CSR_WRITE_4(sc, RTK_MISC,
1870 CSR_READ_4(sc, RTK_MISC) & ~RTK_MISC_RXDV_GATED_EN); 1894 CSR_READ_4(sc, RTK_MISC) & ~RTK_MISC_RXDV_GATED_EN);
1871 } 1895 }
1872  1896
1873 /* 1897 /*
1874 * Enable transmit and receive. 1898 * Enable transmit and receive.
1875 */ 1899 */
1876 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB); 1900 if ((sc->sc_quirk & RTKQ_TXRXEN_LATER) == 0)
 1901 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1877 1902
1878 /* 1903 /*
1879 * Set the initial TX and RX configuration. 1904 * Set the initial TX and RX configuration.
1880 */ 1905 */
1881 if (sc->re_testmode && (sc->sc_quirk & RTKQ_8169NONS) != 0) { 1906 if (sc->re_testmode && (sc->sc_quirk & RTKQ_8169NONS) != 0) {
1882 /* test mode is needed only for old 8169 */ 1907 /* test mode is needed only for old 8169 */
1883 CSR_WRITE_4(sc, RTK_TXCFG, 1908 CSR_WRITE_4(sc, RTK_TXCFG,
1884 RE_TXCFG_CONFIG | RTK_LOOPTEST_ON); 1909 RE_TXCFG_CONFIG | RTK_LOOPTEST_ON);
1885 } else 1910 } else
1886 CSR_WRITE_4(sc, RTK_TXCFG, RE_TXCFG_CONFIG); 1911 CSR_WRITE_4(sc, RTK_TXCFG, RE_TXCFG_CONFIG);
1887 1912
1888 CSR_WRITE_1(sc, RTK_EARLY_TX_THRESH, 16); 1913 CSR_WRITE_1(sc, RTK_EARLY_TX_THRESH, 16);
1889 1914
1890 CSR_WRITE_4(sc, RTK_RXCFG, RE_RXCFG_CONFIG); 1915 CSR_WRITE_4(sc, RTK_RXCFG, RE_RXCFG_CONFIG);
1891 1916
1892 /* Set the individual bit to receive frames for this host only. */ 1917 /* Set the individual bit to receive frames for this host only. */
1893 rxcfg = CSR_READ_4(sc, RTK_RXCFG); 1918 rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1894 rxcfg |= RTK_RXCFG_RX_INDIV; 1919 rxcfg |= RTK_RXCFG_RX_INDIV;
1895 1920
1896 /* If we want promiscuous mode, set the allframes bit. */ 1921 /* If we want promiscuous mode, set the allframes bit. */
1897 if (ifp->if_flags & IFF_PROMISC) 1922 if (ifp->if_flags & IFF_PROMISC)
1898 rxcfg |= RTK_RXCFG_RX_ALLPHYS; 1923 rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1899 else 1924 else
1900 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS; 1925 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1901 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1926 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1902 1927
1903 /* 1928 /*
1904 * Set capture broadcast bit to capture broadcast frames. 1929 * Set capture broadcast bit to capture broadcast frames.
1905 */ 1930 */
1906 if (ifp->if_flags & IFF_BROADCAST) 1931 if (ifp->if_flags & IFF_BROADCAST)
1907 rxcfg |= RTK_RXCFG_RX_BROAD; 1932 rxcfg |= RTK_RXCFG_RX_BROAD;
1908 else 1933 else
1909 rxcfg &= ~RTK_RXCFG_RX_BROAD; 1934 rxcfg &= ~RTK_RXCFG_RX_BROAD;
1910 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1935 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1911 1936
1912 /* 1937 /*
1913 * Program the multicast filter, if necessary. 1938 * Program the multicast filter, if necessary.
1914 */ 1939 */
1915 rtk_setmulti(sc); 1940 rtk_setmulti(sc);
1916 1941
1917 /* 1942 /*
 1943 * some chips require to enable TX/RX *AFTER* TX/RX configuration
 1944 */
 1945 if ((sc->sc_quirk & RTKQ_TXRXEN_LATER) != 0)
 1946 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
 1947
 1948 /*
1918 * Enable interrupts. 1949 * Enable interrupts.
1919 */ 1950 */
1920 if (sc->re_testmode) 1951 if (sc->re_testmode)
1921 CSR_WRITE_2(sc, RTK_IMR, 0); 1952 CSR_WRITE_2(sc, RTK_IMR, 0);
1922 else if ((sc->sc_quirk & RTKQ_IM_HW) != 0) 1953 else if ((sc->sc_quirk & RTKQ_IM_HW) != 0)
1923 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_IM_HW); 1954 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_IM_HW);
1924 else 1955 else
1925 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS); 1956 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
1926 1957
1927 /* Start RX/TX process. */ 1958 /* Start RX/TX process. */
1928 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0); 1959 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1929#ifdef notdef 1960#ifdef notdef
1930 /* Enable receiver and transmitter. */ 1961 /* Enable receiver and transmitter. */
1931 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB); 1962 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1932#endif 1963#endif
1933 1964
1934 /* 1965 /*
1935 * Initialize the timer interrupt register so that 1966 * Initialize the timer interrupt register so that
1936 * a timer interrupt will be generated once the timer 1967 * a timer interrupt will be generated once the timer
1937 * reaches a certain number of ticks. The timer is 1968 * reaches a certain number of ticks. The timer is
1938 * reloaded on each transmit. This gives us TX interrupt 1969 * reloaded on each transmit. This gives us TX interrupt
1939 * moderation, which dramatically improves TX frame rate. 1970 * moderation, which dramatically improves TX frame rate.
1940 */ 1971 */
1941 1972
1942 unsigned defer; /* timer interval / ns */ 1973 unsigned defer; /* timer interval / ns */
1943 unsigned period; /* busclock period / ns */ 1974 unsigned period; /* busclock period / ns */
1944 1975
1945 /* 1976 /*
1946 * Maximum frame rate 1977 * Maximum frame rate
1947 * 1500 byte PDU -> 81274 Hz 1978 * 1500 byte PDU -> 81274 Hz
1948 * 46 byte PDU -> 1488096 Hz 1979 * 46 byte PDU -> 1488096 Hz
1949 * 1980 *
1950 * Deferring interrupts by up to 128us needs descriptors for 1981 * Deferring interrupts by up to 128us needs descriptors for
1951 * 1500 byte PDU -> 10.4 frames 1982 * 1500 byte PDU -> 10.4 frames
1952 * 46 byte PDU -> 190.4 frames 1983 * 46 byte PDU -> 190.4 frames
1953 * 1984 *
1954 */ 1985 */
1955 defer = 128000; 1986 defer = 128000;
1956 1987
1957 if ((sc->sc_quirk & RTKQ_IM_HW) != 0) { 1988 if ((sc->sc_quirk & RTKQ_IM_HW) != 0) {
1958 period = 1; 1989 period = 1;
1959 defer = 0; 1990 defer = 0;
1960 } else if ((sc->sc_quirk & RTKQ_PCIE) != 0) { 1991 } else if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
1961 period = 8; 1992 period = 8;
1962 } else { 1993 } else {
1963 switch (CSR_READ_1(sc, RTK_CFG2_BUSFREQ) & 0x7) { 1994 switch (CSR_READ_1(sc, RTK_CFG2_BUSFREQ) & 0x7) {
1964 case RTK_BUSFREQ_33MHZ: 1995 case RTK_BUSFREQ_33MHZ:
1965 period = 30; 1996 period = 30;
1966 break; 1997 break;
1967 case RTK_BUSFREQ_66MHZ: 1998 case RTK_BUSFREQ_66MHZ:
1968 period = 15; 1999 period = 15;
1969 break; 2000 break;
1970 default: 2001 default:
1971 /* lowest possible clock */ 2002 /* lowest possible clock */
1972 period = 60; 2003 period = 60;
1973 break; 2004 break;
1974 } 2005 }
1975 } 2006 }
1976 2007
1977 /* Timer Interrupt register address varies */ 2008 /* Timer Interrupt register address varies */
1978 uint16_t re8139_reg; 2009 uint16_t re8139_reg;
1979 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) 2010 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0)
1980 re8139_reg = RTK_TIMERINT; 2011 re8139_reg = RTK_TIMERINT;
1981 else 2012 else
1982 re8139_reg = RTK_TIMERINT_8169; 2013 re8139_reg = RTK_TIMERINT_8169;
1983 CSR_WRITE_4(sc, re8139_reg, defer / period); 2014 CSR_WRITE_4(sc, re8139_reg, defer / period);
1984 2015
1985 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { 2016 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
1986 /* 2017 /*
1987 * For 8169 gigE NICs, set the max allowed RX packet 2018 * For 8169 gigE NICs, set the max allowed RX packet
1988 * size so we can receive jumbo frames. 2019 * size so we can receive jumbo frames.
1989 */ 2020 */
1990 CSR_WRITE_2(sc, RTK_MAXRXPKTLEN, 16383); 2021 CSR_WRITE_2(sc, RTK_MAXRXPKTLEN, 16383);
1991 } 2022 }
1992 2023
1993 if (sc->re_testmode) 2024 if (sc->re_testmode)
1994 return 0; 2025 return 0;
1995 2026
1996 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD); 2027 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD);
1997 2028
1998 ifp->if_flags |= IFF_RUNNING; 2029 ifp->if_flags |= IFF_RUNNING;
1999 ifp->if_flags &= ~IFF_OACTIVE; 2030 ifp->if_flags &= ~IFF_OACTIVE;
2000 2031
2001 callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc); 2032 callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
2002 2033
2003 out: 2034 out:
2004 if (error) { 2035 if (error) {
2005 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2036 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2006 ifp->if_timer = 0; 2037 ifp->if_timer = 0;
2007 printf("%s: interface not running\n", 2038 printf("%s: interface not running\n",
2008 device_xname(sc->sc_dev)); 2039 device_xname(sc->sc_dev));
2009 } 2040 }
2010 2041
2011 return error; 2042 return error;
2012} 2043}
2013 2044
2014static int 2045static int
2015re_ioctl(struct ifnet *ifp, u_long command, void *data) 2046re_ioctl(struct ifnet *ifp, u_long command, void *data)
2016{ 2047{
2017 struct rtk_softc *sc = ifp->if_softc; 2048 struct rtk_softc *sc = ifp->if_softc;
2018 struct ifreq *ifr = data; 2049 struct ifreq *ifr = data;
2019 int s, error = 0; 2050 int s, error = 0;
2020 2051
2021 s = splnet(); 2052 s = splnet();
2022 2053
2023 switch (command) { 2054 switch (command) {
2024 case SIOCSIFMTU: 2055 case SIOCSIFMTU:
2025 /* 2056 /*
2026 * Disable jumbo frames if it's not supported. 2057 * Disable jumbo frames if it's not supported.
2027 */ 2058 */
2028 if ((sc->sc_quirk & RTKQ_NOJUMBO) != 0 && 2059 if ((sc->sc_quirk & RTKQ_NOJUMBO) != 0 &&
2029 ifr->ifr_mtu > ETHERMTU) { 2060 ifr->ifr_mtu > ETHERMTU) {
2030 error = EINVAL; 2061 error = EINVAL;
2031 break; 2062 break;
2032 } 2063 }
2033 2064
2034 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO) 2065 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
2035 error = EINVAL; 2066 error = EINVAL;
2036 else if ((error = ifioctl_common(ifp, command, data)) == 2067 else if ((error = ifioctl_common(ifp, command, data)) ==
2037 ENETRESET) 2068 ENETRESET)
2038 error = 0; 2069 error = 0;
2039 break; 2070 break;
2040 default: 2071 default:
2041 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 2072 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2042 break; 2073 break;
2043 2074
2044 error = 0; 2075 error = 0;
2045 2076
2046 if (command == SIOCSIFCAP) 2077 if (command == SIOCSIFCAP)
2047 error = (*ifp->if_init)(ifp); 2078 error = (*ifp->if_init)(ifp);
2048 else if (command != SIOCADDMULTI && command != SIOCDELMULTI) 2079 else if (command != SIOCADDMULTI && command != SIOCDELMULTI)
2049 ; 2080 ;
2050 else if (ifp->if_flags & IFF_RUNNING) 2081 else if (ifp->if_flags & IFF_RUNNING)
2051 rtk_setmulti(sc); 2082 rtk_setmulti(sc);
2052 break; 2083 break;
2053 } 2084 }
2054 2085
2055 splx(s); 2086 splx(s);
2056 2087
2057 return error; 2088 return error;
2058} 2089}
2059 2090
2060static void 2091static void
2061re_watchdog(struct ifnet *ifp) 2092re_watchdog(struct ifnet *ifp)
2062{ 2093{
2063 struct rtk_softc *sc; 2094 struct rtk_softc *sc;
2064 int s; 2095 int s;
2065 2096
2066 sc = ifp->if_softc; 2097 sc = ifp->if_softc;
2067 s = splnet(); 2098 s = splnet();
2068 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 2099 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
2069 ifp->if_oerrors++; 2100 ifp->if_oerrors++;
2070 2101
2071 re_txeof(sc); 2102 re_txeof(sc);
2072 re_rxeof(sc); 2103 re_rxeof(sc);
2073 2104
2074 re_init(ifp); 2105 re_init(ifp);
2075 2106
2076 splx(s); 2107 splx(s);
2077} 2108}
2078 2109
2079/* 2110/*
2080 * Stop the adapter and free any mbufs allocated to the 2111 * Stop the adapter and free any mbufs allocated to the
2081 * RX and TX lists. 2112 * RX and TX lists.
2082 */ 2113 */
2083static void 2114static void
2084re_stop(struct ifnet *ifp, int disable) 2115re_stop(struct ifnet *ifp, int disable)
2085{ 2116{
2086 int i; 2117 int i;
2087 struct rtk_softc *sc = ifp->if_softc; 2118 struct rtk_softc *sc = ifp->if_softc;
2088 2119
2089 callout_stop(&sc->rtk_tick_ch); 2120 callout_stop(&sc->rtk_tick_ch);
2090 2121
2091 mii_down(&sc->mii); 2122 mii_down(&sc->mii);
2092 2123
2093 if ((sc->sc_quirk & RTKQ_CMDSTOP) != 0) 2124 if ((sc->sc_quirk & RTKQ_CMDSTOP) != 0)
2094 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_STOPREQ | RTK_CMD_TX_ENB | 2125 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_STOPREQ | RTK_CMD_TX_ENB |
2095 RTK_CMD_RX_ENB); 2126 RTK_CMD_RX_ENB);
2096 else 2127 else
2097 CSR_WRITE_1(sc, RTK_COMMAND, 0x00); 2128 CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
2098 DELAY(1000); 2129 DELAY(1000);
2099 CSR_WRITE_2(sc, RTK_IMR, 0x0000); 2130 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
2100 CSR_WRITE_2(sc, RTK_ISR, 0xFFFF); 2131 CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
2101 2132
2102 if (sc->re_head != NULL) { 2133 if (sc->re_head != NULL) {
2103 m_freem(sc->re_head); 2134 m_freem(sc->re_head);
2104 sc->re_head = sc->re_tail = NULL; 2135 sc->re_head = sc->re_tail = NULL;
2105 } 2136 }
2106 2137
2107 /* Free the TX list buffers. */ 2138 /* Free the TX list buffers. */
2108 for (i = 0; i < RE_TX_QLEN; i++) { 2139 for (i = 0; i < RE_TX_QLEN; i++) {
2109 if (sc->re_ldata.re_txq[i].txq_mbuf != NULL) { 2140 if (sc->re_ldata.re_txq[i].txq_mbuf != NULL) {
2110 bus_dmamap_unload(sc->sc_dmat, 2141 bus_dmamap_unload(sc->sc_dmat,
2111 sc->re_ldata.re_txq[i].txq_dmamap); 2142 sc->re_ldata.re_txq[i].txq_dmamap);
2112 m_freem(sc->re_ldata.re_txq[i].txq_mbuf); 2143 m_freem(sc->re_ldata.re_txq[i].txq_mbuf);
2113 sc->re_ldata.re_txq[i].txq_mbuf = NULL; 2144 sc->re_ldata.re_txq[i].txq_mbuf = NULL;
2114 } 2145 }
2115 } 2146 }
2116 2147
2117 /* Free the RX list buffers. */ 2148 /* Free the RX list buffers. */
2118 for (i = 0; i < RE_RX_DESC_CNT; i++) { 2149 for (i = 0; i < RE_RX_DESC_CNT; i++) {
2119 if (sc->re_ldata.re_rxsoft[i].rxs_mbuf != NULL) { 2150 if (sc->re_ldata.re_rxsoft[i].rxs_mbuf != NULL) {
2120 bus_dmamap_unload(sc->sc_dmat, 2151 bus_dmamap_unload(sc->sc_dmat,
2121 sc->re_ldata.re_rxsoft[i].rxs_dmamap); 2152 sc->re_ldata.re_rxsoft[i].rxs_dmamap);
2122 m_freem(sc->re_ldata.re_rxsoft[i].rxs_mbuf); 2153 m_freem(sc->re_ldata.re_rxsoft[i].rxs_mbuf);
2123 sc->re_ldata.re_rxsoft[i].rxs_mbuf = NULL; 2154 sc->re_ldata.re_rxsoft[i].rxs_mbuf = NULL;
2124 } 2155 }
2125 } 2156 }
2126 2157
2127 if (disable) 2158 if (disable)
2128 re_disable(sc); 2159 re_disable(sc);
2129 2160
2130 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2161 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2131 ifp->if_timer = 0; 2162 ifp->if_timer = 0;
2132} 2163}

cvs diff -r1.106 -r1.106.2.1 src/sys/dev/ic/rtl81x9.c (switch to unified diff)

--- src/sys/dev/ic/rtl81x9.c 2019/05/28 07:41:48 1.106
+++ src/sys/dev/ic/rtl81x9.c 2020/01/28 11:12:30 1.106.2.1
@@ -1,1515 +1,1515 @@ @@ -1,1515 +1,1515 @@
1/* $NetBSD: rtl81x9.c,v 1.106 2019/05/28 07:41:48 msaitoh Exp $ */ 1/* $NetBSD: rtl81x9.c,v 1.106.2.1 2020/01/28 11:12:30 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997, 1998 4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software 15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement: 16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul. 17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors 18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software 19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission. 20 * without specific prior written permission.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE. 32 * THE POSSIBILITY OF SUCH DAMAGE.
33 * 33 *
34 * FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp 34 * FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp
35 */ 35 */
36 36
37/* 37/*
38 * RealTek 8129/8139 PCI NIC driver 38 * RealTek 8129/8139 PCI NIC driver
39 * 39 *
40 * Supports several extremely cheap PCI 10/100 adapters based on 40 * Supports several extremely cheap PCI 10/100 adapters based on
41 * the RealTek chipset. Datasheets can be obtained from 41 * the RealTek chipset. Datasheets can be obtained from
42 * www.realtek.com.tw. 42 * www.realtek.com.tw.
43 * 43 *
44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Written by Bill Paul <wpaul@ctr.columbia.edu>
45 * Electrical Engineering Department 45 * Electrical Engineering Department
46 * Columbia University, New York City 46 * Columbia University, New York City
47 */ 47 */
48 48
49/* 49/*
50 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is 50 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
51 * probably the worst PCI ethernet controller ever made, with the possible 51 * probably the worst PCI ethernet controller ever made, with the possible
52 * exception of the FEAST chip made by SMC. The 8139 supports bus-master 52 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
53 * DMA, but it has a terrible interface that nullifies any performance 53 * DMA, but it has a terrible interface that nullifies any performance
54 * gains that bus-master DMA usually offers. 54 * gains that bus-master DMA usually offers.
55 * 55 *
56 * For transmission, the chip offers a series of four TX descriptor 56 * For transmission, the chip offers a series of four TX descriptor
57 * registers. Each transmit frame must be in a contiguous buffer, aligned 57 * registers. Each transmit frame must be in a contiguous buffer, aligned
58 * on a longword (32-bit) boundary. This means we almost always have to 58 * on a longword (32-bit) boundary. This means we almost always have to
59 * do mbuf copies in order to transmit a frame, except in the unlikely 59 * do mbuf copies in order to transmit a frame, except in the unlikely
60 * case where a) the packet fits into a single mbuf, and b) the packet 60 * case where a) the packet fits into a single mbuf, and b) the packet
61 * is 32-bit aligned within the mbuf's data area. The presence of only 61 * is 32-bit aligned within the mbuf's data area. The presence of only
62 * four descriptor registers means that we can never have more than four 62 * four descriptor registers means that we can never have more than four
63 * packets queued for transmission at any one time. 63 * packets queued for transmission at any one time.
64 * 64 *
65 * Reception is not much better. The driver has to allocate a single large 65 * Reception is not much better. The driver has to allocate a single large
66 * buffer area (up to 64K in size) into which the chip will DMA received 66 * buffer area (up to 64K in size) into which the chip will DMA received
67 * frames. Because we don't know where within this region received packets 67 * frames. Because we don't know where within this region received packets
68 * will begin or end, we have no choice but to copy data from the buffer 68 * will begin or end, we have no choice but to copy data from the buffer
69 * area into mbufs in order to pass the packets up to the higher protocol 69 * area into mbufs in order to pass the packets up to the higher protocol
70 * levels. 70 * levels.
71 * 71 *
72 * It's impossible given this rotten design to really achieve decent 72 * It's impossible given this rotten design to really achieve decent
73 * performance at 100Mbps, unless you happen to have a 400MHz PII or 73 * performance at 100Mbps, unless you happen to have a 400MHz PII or
74 * some equally overmuscled CPU to drive it. 74 * some equally overmuscled CPU to drive it.
75 * 75 *
76 * On the bright side, the 8139 does have a built-in PHY, although 76 * On the bright side, the 8139 does have a built-in PHY, although
77 * rather than using an MDIO serial interface like most other NICs, the 77 * rather than using an MDIO serial interface like most other NICs, the
78 * PHY registers are directly accessible through the 8139's register 78 * PHY registers are directly accessible through the 8139's register
79 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast 79 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
80 * filter. 80 * filter.
81 * 81 *
82 * The 8129 chip is an older version of the 8139 that uses an external PHY 82 * The 8129 chip is an older version of the 8139 that uses an external PHY
83 * chip. The 8129 has a serial MDIO interface for accessing the MII where 83 * chip. The 8129 has a serial MDIO interface for accessing the MII where
84 * the 8139 lets you directly access the on-board PHY registers. We need 84 * the 8139 lets you directly access the on-board PHY registers. We need
85 * to select which interface to use depending on the chip type. 85 * to select which interface to use depending on the chip type.
86 */ 86 */
87 87
88#include <sys/cdefs.h> 88#include <sys/cdefs.h>
89__KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.106 2019/05/28 07:41:48 msaitoh Exp $"); 89__KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.106.2.1 2020/01/28 11:12:30 martin Exp $");
90 90
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/systm.h> 93#include <sys/systm.h>
94#include <sys/callout.h> 94#include <sys/callout.h>
95#include <sys/device.h> 95#include <sys/device.h>
96#include <sys/sockio.h> 96#include <sys/sockio.h>
97#include <sys/mbuf.h> 97#include <sys/mbuf.h>
98#include <sys/malloc.h> 98#include <sys/malloc.h>
99#include <sys/kernel.h> 99#include <sys/kernel.h>
100#include <sys/socket.h> 100#include <sys/socket.h>
101 101
102#include <net/if.h> 102#include <net/if.h>
103#include <net/if_arp.h> 103#include <net/if_arp.h>
104#include <net/if_ether.h> 104#include <net/if_ether.h>
105#include <net/if_dl.h> 105#include <net/if_dl.h>
106#include <net/if_media.h> 106#include <net/if_media.h>
107 107
108#include <net/bpf.h> 108#include <net/bpf.h>
109#include <sys/rndsource.h> 109#include <sys/rndsource.h>
110 110
111#include <sys/bus.h> 111#include <sys/bus.h>
112#include <machine/endian.h> 112#include <machine/endian.h>
113 113
114#include <dev/mii/mii.h> 114#include <dev/mii/mii.h>
115#include <dev/mii/miivar.h> 115#include <dev/mii/miivar.h>
116 116
117#include <dev/ic/rtl81x9reg.h> 117#include <dev/ic/rtl81x9reg.h>
118#include <dev/ic/rtl81x9var.h> 118#include <dev/ic/rtl81x9var.h>
119 119
120static void rtk_reset(struct rtk_softc *); 120static void rtk_reset(struct rtk_softc *);
121static void rtk_rxeof(struct rtk_softc *); 121static void rtk_rxeof(struct rtk_softc *);
122static void rtk_txeof(struct rtk_softc *); 122static void rtk_txeof(struct rtk_softc *);
123static void rtk_start(struct ifnet *); 123static void rtk_start(struct ifnet *);
124static int rtk_ioctl(struct ifnet *, u_long, void *); 124static int rtk_ioctl(struct ifnet *, u_long, void *);
125static int rtk_init(struct ifnet *); 125static int rtk_init(struct ifnet *);
126static void rtk_stop(struct ifnet *, int); 126static void rtk_stop(struct ifnet *, int);
127 127
128static void rtk_watchdog(struct ifnet *); 128static void rtk_watchdog(struct ifnet *);
129 129
130static void rtk_eeprom_putbyte(struct rtk_softc *, int, int); 130static void rtk_eeprom_putbyte(struct rtk_softc *, int, int);
131static void rtk_mii_sync(struct rtk_softc *); 131static void rtk_mii_sync(struct rtk_softc *);
132static void rtk_mii_send(struct rtk_softc *, uint32_t, int); 132static void rtk_mii_send(struct rtk_softc *, uint32_t, int);
133static int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *); 133static int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *);
134static int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *); 134static int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *);
135 135
136static int rtk_phy_readreg(device_t, int, int, uint16_t *); 136static int rtk_phy_readreg(device_t, int, int, uint16_t *);
137static int rtk_phy_writereg(device_t, int, int, uint16_t); 137static int rtk_phy_writereg(device_t, int, int, uint16_t);
138static void rtk_phy_statchg(struct ifnet *); 138static void rtk_phy_statchg(struct ifnet *);
139static void rtk_tick(void *); 139static void rtk_tick(void *);
140 140
141static int rtk_enable(struct rtk_softc *); 141static int rtk_enable(struct rtk_softc *);
142static void rtk_disable(struct rtk_softc *); 142static void rtk_disable(struct rtk_softc *);
143 143
144static void rtk_list_tx_init(struct rtk_softc *); 144static void rtk_list_tx_init(struct rtk_softc *);
145 145
146#define EE_SET(x) \ 146#define EE_SET(x) \
147 CSR_WRITE_1(sc, RTK_EECMD, \ 147 CSR_WRITE_1(sc, RTK_EECMD, \
148 CSR_READ_1(sc, RTK_EECMD) | (x)) 148 CSR_READ_1(sc, RTK_EECMD) | (x))
149 149
150#define EE_CLR(x) \ 150#define EE_CLR(x) \
151 CSR_WRITE_1(sc, RTK_EECMD, \ 151 CSR_WRITE_1(sc, RTK_EECMD, \
152 CSR_READ_1(sc, RTK_EECMD) & ~(x)) 152 CSR_READ_1(sc, RTK_EECMD) & ~(x))
153 153
154#define EE_DELAY() DELAY(100) 154#define EE_DELAY() DELAY(100)
155 155
156#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 156#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
157 157
158/* 158/*
159 * Send a read command and address to the EEPROM, check for ACK. 159 * Send a read command and address to the EEPROM, check for ACK.
160 */ 160 */
161static void 161static void
162rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len) 162rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len)
163{ 163{
164 int d, i; 164 int d, i;
165 165
166 d = (RTK_EECMD_READ << addr_len) | addr; 166 d = (RTK_EECMD_READ << addr_len) | addr;
167 167
168 /* 168 /*
169 * Feed in each bit and stobe the clock. 169 * Feed in each bit and stobe the clock.
170 */ 170 */
171 for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) { 171 for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) {
172 if (d & (1 << (i - 1))) { 172 if (d & (1 << (i - 1))) {
173 EE_SET(RTK_EE_DATAIN); 173 EE_SET(RTK_EE_DATAIN);
174 } else { 174 } else {
175 EE_CLR(RTK_EE_DATAIN); 175 EE_CLR(RTK_EE_DATAIN);
176 } 176 }
177 EE_DELAY(); 177 EE_DELAY();
178 EE_SET(RTK_EE_CLK); 178 EE_SET(RTK_EE_CLK);
179 EE_DELAY(); 179 EE_DELAY();
180 EE_CLR(RTK_EE_CLK); 180 EE_CLR(RTK_EE_CLK);
181 EE_DELAY(); 181 EE_DELAY();
182 } 182 }
183} 183}
184 184
185/* 185/*
186 * Read a word of data stored in the EEPROM at address 'addr.' 186 * Read a word of data stored in the EEPROM at address 'addr.'
187 */ 187 */
188uint16_t 188uint16_t
189rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len) 189rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len)
190{ 190{
191 uint16_t word; 191 uint16_t word;
192 int i; 192 int i;
193 193
194 /* Enter EEPROM access mode. */ 194 /* Enter EEPROM access mode. */
195 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM); 195 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM);
196 EE_DELAY(); 196 EE_DELAY();
197 EE_SET(RTK_EE_SEL); 197 EE_SET(RTK_EE_SEL);
198 198
199 /* 199 /*
200 * Send address of word we want to read. 200 * Send address of word we want to read.
201 */ 201 */
202 rtk_eeprom_putbyte(sc, addr, addr_len); 202 rtk_eeprom_putbyte(sc, addr, addr_len);
203 203
204 /* 204 /*
205 * Start reading bits from EEPROM. 205 * Start reading bits from EEPROM.
206 */ 206 */
207 word = 0; 207 word = 0;
208 for (i = 16; i > 0; i--) { 208 for (i = 16; i > 0; i--) {
209 EE_SET(RTK_EE_CLK); 209 EE_SET(RTK_EE_CLK);
210 EE_DELAY(); 210 EE_DELAY();
211 if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT) 211 if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT)
212 word |= 1 << (i - 1); 212 word |= 1 << (i - 1);
213 EE_CLR(RTK_EE_CLK); 213 EE_CLR(RTK_EE_CLK);
214 EE_DELAY(); 214 EE_DELAY();
215 } 215 }
216 216
217 /* Turn off EEPROM access mode. */ 217 /* Turn off EEPROM access mode. */
218 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF); 218 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
219 219
220 return word; 220 return word;
221} 221}
222 222
223/* 223/*
224 * MII access routines are provided for the 8129, which 224 * MII access routines are provided for the 8129, which
225 * doesn't have a built-in PHY. For the 8139, we fake things 225 * doesn't have a built-in PHY. For the 8139, we fake things
226 * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the 226 * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the
227 * direct access PHY registers. 227 * direct access PHY registers.
228 */ 228 */
229#define MII_SET(x) \ 229#define MII_SET(x) \
230 CSR_WRITE_1(sc, RTK_MII, \ 230 CSR_WRITE_1(sc, RTK_MII, \
231 CSR_READ_1(sc, RTK_MII) | (x)) 231 CSR_READ_1(sc, RTK_MII) | (x))
232 232
233#define MII_CLR(x) \ 233#define MII_CLR(x) \
234 CSR_WRITE_1(sc, RTK_MII, \ 234 CSR_WRITE_1(sc, RTK_MII, \
235 CSR_READ_1(sc, RTK_MII) & ~(x)) 235 CSR_READ_1(sc, RTK_MII) & ~(x))
236 236
237/* 237/*
238 * Sync the PHYs by setting data bit and strobing the clock 32 times. 238 * Sync the PHYs by setting data bit and strobing the clock 32 times.
239 */ 239 */
240static void 240static void
241rtk_mii_sync(struct rtk_softc *sc) 241rtk_mii_sync(struct rtk_softc *sc)
242{ 242{
243 int i; 243 int i;
244 244
245 MII_SET(RTK_MII_DIR | RTK_MII_DATAOUT); 245 MII_SET(RTK_MII_DIR | RTK_MII_DATAOUT);
246 246
247 for (i = 0; i < 32; i++) { 247 for (i = 0; i < 32; i++) {
248 MII_SET(RTK_MII_CLK); 248 MII_SET(RTK_MII_CLK);
249 DELAY(1); 249 DELAY(1);
250 MII_CLR(RTK_MII_CLK); 250 MII_CLR(RTK_MII_CLK);
251 DELAY(1); 251 DELAY(1);
252 } 252 }
253} 253}
254 254
255/* 255/*
256 * Clock a series of bits through the MII. 256 * Clock a series of bits through the MII.
257 */ 257 */
258static void 258static void
259rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt) 259rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt)
260{ 260{
261 int i; 261 int i;
262 262
263 MII_CLR(RTK_MII_CLK); 263 MII_CLR(RTK_MII_CLK);
264 264
265 for (i = cnt; i > 0; i--) { 265 for (i = cnt; i > 0; i--) {
266 if (bits & (1 << (i - 1))) { 266 if (bits & (1 << (i - 1))) {
267 MII_SET(RTK_MII_DATAOUT); 267 MII_SET(RTK_MII_DATAOUT);
268 } else { 268 } else {
269 MII_CLR(RTK_MII_DATAOUT); 269 MII_CLR(RTK_MII_DATAOUT);
270 } 270 }
271 DELAY(1); 271 DELAY(1);
272 MII_CLR(RTK_MII_CLK); 272 MII_CLR(RTK_MII_CLK);
273 DELAY(1); 273 DELAY(1);
274 MII_SET(RTK_MII_CLK); 274 MII_SET(RTK_MII_CLK);
275 } 275 }
276} 276}
277 277
278/* 278/*
279 * Read an PHY register through the MII. 279 * Read an PHY register through the MII.
280 */ 280 */
281static int 281static int
282rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame) 282rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
283{ 283{
284 int i, ack, s, rv = 0; 284 int i, ack, s, rv = 0;
285 285
286 s = splnet(); 286 s = splnet();
287 287
288 /* 288 /*
289 * Set up frame for RX. 289 * Set up frame for RX.
290 */ 290 */
291 frame->mii_stdelim = RTK_MII_STARTDELIM; 291 frame->mii_stdelim = RTK_MII_STARTDELIM;
292 frame->mii_opcode = RTK_MII_READOP; 292 frame->mii_opcode = RTK_MII_READOP;
293 frame->mii_turnaround = 0; 293 frame->mii_turnaround = 0;
294 frame->mii_data = 0; 294 frame->mii_data = 0;
295 295
296 CSR_WRITE_2(sc, RTK_MII, 0); 296 CSR_WRITE_2(sc, RTK_MII, 0);
297 297
298 /* 298 /*
299 * Turn on data xmit. 299 * Turn on data xmit.
300 */ 300 */
301 MII_SET(RTK_MII_DIR); 301 MII_SET(RTK_MII_DIR);
302 302
303 rtk_mii_sync(sc); 303 rtk_mii_sync(sc);
304 304
305 /* 305 /*
306 * Send command/address info. 306 * Send command/address info.
307 */ 307 */
308 rtk_mii_send(sc, frame->mii_stdelim, 2); 308 rtk_mii_send(sc, frame->mii_stdelim, 2);
309 rtk_mii_send(sc, frame->mii_opcode, 2); 309 rtk_mii_send(sc, frame->mii_opcode, 2);
310 rtk_mii_send(sc, frame->mii_phyaddr, 5); 310 rtk_mii_send(sc, frame->mii_phyaddr, 5);
311 rtk_mii_send(sc, frame->mii_regaddr, 5); 311 rtk_mii_send(sc, frame->mii_regaddr, 5);
312 312
313 /* Idle bit */ 313 /* Idle bit */
314 MII_CLR((RTK_MII_CLK | RTK_MII_DATAOUT)); 314 MII_CLR((RTK_MII_CLK | RTK_MII_DATAOUT));
315 DELAY(1); 315 DELAY(1);
316 MII_SET(RTK_MII_CLK); 316 MII_SET(RTK_MII_CLK);
317 DELAY(1); 317 DELAY(1);
318 318
319 /* Turn off xmit. */ 319 /* Turn off xmit. */
320 MII_CLR(RTK_MII_DIR); 320 MII_CLR(RTK_MII_DIR);
321 321
322 /* Check for ack */ 322 /* Check for ack */
323 MII_CLR(RTK_MII_CLK); 323 MII_CLR(RTK_MII_CLK);
324 DELAY(1); 324 DELAY(1);
325 ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN; 325 ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN;
326 MII_SET(RTK_MII_CLK); 326 MII_SET(RTK_MII_CLK);
327 DELAY(1); 327 DELAY(1);
328 328
329 /* 329 /*
330 * Now try reading data bits. If the ack failed, we still 330 * Now try reading data bits. If the ack failed, we still
331 * need to clock through 16 cycles to keep the PHY(s) in sync. 331 * need to clock through 16 cycles to keep the PHY(s) in sync.
332 */ 332 */
333 if (ack) { 333 if (ack) {
334 for (i = 0; i < 16; i++) { 334 for (i = 0; i < 16; i++) {
335 MII_CLR(RTK_MII_CLK); 335 MII_CLR(RTK_MII_CLK);
336 DELAY(1); 336 DELAY(1);
337 MII_SET(RTK_MII_CLK); 337 MII_SET(RTK_MII_CLK);
338 DELAY(1); 338 DELAY(1);
339 } 339 }
340 rv = -1; 340 rv = -1;
341 goto fail; 341 goto fail;
342 } 342 }
343 343
344 for (i = 16; i > 0; i--) { 344 for (i = 16; i > 0; i--) {
345 MII_CLR(RTK_MII_CLK); 345 MII_CLR(RTK_MII_CLK);
346 DELAY(1); 346 DELAY(1);
347 if (!ack) { 347 if (!ack) {
348 if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN) 348 if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN)
349 frame->mii_data |= 1 << (i - 1); 349 frame->mii_data |= 1 << (i - 1);
350 DELAY(1); 350 DELAY(1);
351 } 351 }
352 MII_SET(RTK_MII_CLK); 352 MII_SET(RTK_MII_CLK);
353 DELAY(1); 353 DELAY(1);
354 } 354 }
355 355
356 fail: 356 fail:
357 MII_CLR(RTK_MII_CLK); 357 MII_CLR(RTK_MII_CLK);
358 DELAY(1); 358 DELAY(1);
359 MII_SET(RTK_MII_CLK); 359 MII_SET(RTK_MII_CLK);
360 DELAY(1); 360 DELAY(1);
361 361
362 splx(s); 362 splx(s);
363 363
364 return rv; 364 return rv;
365} 365}
366 366
367/* 367/*
368 * Write to a PHY register through the MII. 368 * Write to a PHY register through the MII.
369 */ 369 */
370static int 370static int
371rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame) 371rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
372{ 372{
373 int s; 373 int s;
374 374
375 s = splnet(); 375 s = splnet();
376 /* 376 /*
377 * Set up frame for TX. 377 * Set up frame for TX.
378 */ 378 */
379 frame->mii_stdelim = RTK_MII_STARTDELIM; 379 frame->mii_stdelim = RTK_MII_STARTDELIM;
380 frame->mii_opcode = RTK_MII_WRITEOP; 380 frame->mii_opcode = RTK_MII_WRITEOP;
381 frame->mii_turnaround = RTK_MII_TURNAROUND; 381 frame->mii_turnaround = RTK_MII_TURNAROUND;
382 382
383 /* 383 /*
384 * Turn on data output. 384 * Turn on data output.
385 */ 385 */
386 MII_SET(RTK_MII_DIR); 386 MII_SET(RTK_MII_DIR);
387 387
388 rtk_mii_sync(sc); 388 rtk_mii_sync(sc);
389 389
390 rtk_mii_send(sc, frame->mii_stdelim, 2); 390 rtk_mii_send(sc, frame->mii_stdelim, 2);
391 rtk_mii_send(sc, frame->mii_opcode, 2); 391 rtk_mii_send(sc, frame->mii_opcode, 2);
392 rtk_mii_send(sc, frame->mii_phyaddr, 5); 392 rtk_mii_send(sc, frame->mii_phyaddr, 5);
393 rtk_mii_send(sc, frame->mii_regaddr, 5); 393 rtk_mii_send(sc, frame->mii_regaddr, 5);
394 rtk_mii_send(sc, frame->mii_turnaround, 2); 394 rtk_mii_send(sc, frame->mii_turnaround, 2);
395 rtk_mii_send(sc, frame->mii_data, 16); 395 rtk_mii_send(sc, frame->mii_data, 16);
396 396
397 /* Idle bit. */ 397 /* Idle bit. */
398 MII_SET(RTK_MII_CLK); 398 MII_SET(RTK_MII_CLK);
399 DELAY(1); 399 DELAY(1);
400 MII_CLR(RTK_MII_CLK); 400 MII_CLR(RTK_MII_CLK);
401 DELAY(1); 401 DELAY(1);
402 402
403 /* 403 /*
404 * Turn off xmit. 404 * Turn off xmit.
405 */ 405 */
406 MII_CLR(RTK_MII_DIR); 406 MII_CLR(RTK_MII_DIR);
407 407
408 splx(s); 408 splx(s);
409 409
410 return 0; 410 return 0;
411} 411}
412 412
413static int 413static int
414rtk_phy_readreg(device_t self, int phy, int reg, uint16_t *val) 414rtk_phy_readreg(device_t self, int phy, int reg, uint16_t *val)
415{ 415{
416 struct rtk_softc *sc = device_private(self); 416 struct rtk_softc *sc = device_private(self);
417 struct rtk_mii_frame frame; 417 struct rtk_mii_frame frame;
418 int rv; 418 int rv;
419 int rtk8139_reg; 419 int rtk8139_reg;
420 420
421 if ((sc->sc_quirk & RTKQ_8129) == 0) { 421 if ((sc->sc_quirk & RTKQ_8129) == 0) {
422 if (phy != 7) 422 if (phy != 7)
423 return -1; 423 return -1;
424 424
425 switch (reg) { 425 switch (reg) {
426 case MII_BMCR: 426 case MII_BMCR:
427 rtk8139_reg = RTK_BMCR; 427 rtk8139_reg = RTK_BMCR;
428 break; 428 break;
429 case MII_BMSR: 429 case MII_BMSR:
430 rtk8139_reg = RTK_BMSR; 430 rtk8139_reg = RTK_BMSR;
431 break; 431 break;
432 case MII_ANAR: 432 case MII_ANAR:
433 rtk8139_reg = RTK_ANAR; 433 rtk8139_reg = RTK_ANAR;
434 break; 434 break;
435 case MII_ANER: 435 case MII_ANER:
436 rtk8139_reg = RTK_ANER; 436 rtk8139_reg = RTK_ANER;
437 break; 437 break;
438 case MII_ANLPAR: 438 case MII_ANLPAR:
439 rtk8139_reg = RTK_LPAR; 439 rtk8139_reg = RTK_LPAR;
440 break; 440 break;
441 case MII_PHYIDR1: 441 case MII_PHYIDR1:
442 case MII_PHYIDR2: 442 case MII_PHYIDR2:
443 *val = 0; 443 *val = 0;
444 return 0; 444 return 0;
445 default: 445 default:
446#if 0 446#if 0
447 printf("%s: bad phy register\n", device_xname(self)); 447 printf("%s: bad phy register\n", device_xname(self));
448#endif 448#endif
449 return -1; 449 return -1;
450 } 450 }
451 *val = CSR_READ_2(sc, rtk8139_reg); 451 *val = CSR_READ_2(sc, rtk8139_reg);
452 return 0; 452 return 0;
453 } 453 }
454 454
455 memset(&frame, 0, sizeof(frame)); 455 memset(&frame, 0, sizeof(frame));
456 456
457 frame.mii_phyaddr = phy; 457 frame.mii_phyaddr = phy;
458 frame.mii_regaddr = reg; 458 frame.mii_regaddr = reg;
459 rv = rtk_mii_readreg(sc, &frame); 459 rv = rtk_mii_readreg(sc, &frame);
460 *val = frame.mii_data; 460 *val = frame.mii_data;
461 461
462 return rv; 462 return rv;
463} 463}
464 464
465static int 465static int
466rtk_phy_writereg(device_t self, int phy, int reg, uint16_t val) 466rtk_phy_writereg(device_t self, int phy, int reg, uint16_t val)
467{ 467{
468 struct rtk_softc *sc = device_private(self); 468 struct rtk_softc *sc = device_private(self);
469 struct rtk_mii_frame frame; 469 struct rtk_mii_frame frame;
470 int rtk8139_reg; 470 int rtk8139_reg;
471 471
472 if ((sc->sc_quirk & RTKQ_8129) == 0) { 472 if ((sc->sc_quirk & RTKQ_8129) == 0) {
473 if (phy != 7) 473 if (phy != 7)
474 return -1; 474 return -1;
475 475
476 switch (reg) { 476 switch (reg) {
477 case MII_BMCR: 477 case MII_BMCR:
478 rtk8139_reg = RTK_BMCR; 478 rtk8139_reg = RTK_BMCR;
479 break; 479 break;
480 case MII_BMSR: 480 case MII_BMSR:
481 rtk8139_reg = RTK_BMSR; 481 rtk8139_reg = RTK_BMSR;
482 break; 482 break;
483 case MII_ANAR: 483 case MII_ANAR:
484 rtk8139_reg = RTK_ANAR; 484 rtk8139_reg = RTK_ANAR;
485 break; 485 break;
486 case MII_ANER: 486 case MII_ANER:
487 rtk8139_reg = RTK_ANER; 487 rtk8139_reg = RTK_ANER;
488 break; 488 break;
489 case MII_ANLPAR: 489 case MII_ANLPAR:
490 rtk8139_reg = RTK_LPAR; 490 rtk8139_reg = RTK_LPAR;
491 break; 491 break;
492 default: 492 default:
493#if 0 493#if 0
494 printf("%s: bad phy register\n", device_xname(self)); 494 printf("%s: bad phy register\n", device_xname(self));
495#endif 495#endif
496 return -1; 496 return -1;
497 } 497 }
498 CSR_WRITE_2(sc, rtk8139_reg, val); 498 CSR_WRITE_2(sc, rtk8139_reg, val);
499 return 0; 499 return 0;
500 } 500 }
501 501
502 memset(&frame, 0, sizeof(frame)); 502 memset(&frame, 0, sizeof(frame));
503 503
504 frame.mii_phyaddr = phy; 504 frame.mii_phyaddr = phy;
505 frame.mii_regaddr = reg; 505 frame.mii_regaddr = reg;
506 frame.mii_data = val; 506 frame.mii_data = val;
507 507
508 return rtk_mii_writereg(sc, &frame); 508 return rtk_mii_writereg(sc, &frame);
509} 509}
510 510
511static void 511static void
512rtk_phy_statchg(struct ifnet *ifp) 512rtk_phy_statchg(struct ifnet *ifp)
513{ 513{
514 514
515 /* Nothing to do. */ 515 /* Nothing to do. */
516} 516}
517 517
518#define rtk_calchash(addr) \ 518#define rtk_calchash(addr) \
519 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26) 519 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
520 520
521/* 521/*
522 * Program the 64-bit multicast hash filter. 522 * Program the 64-bit multicast hash filter.
523 */ 523 */
524void 524void
525rtk_setmulti(struct rtk_softc *sc) 525rtk_setmulti(struct rtk_softc *sc)
526{ 526{
527 struct ethercom *ec = &sc->ethercom; 527 struct ethercom *ec = &sc->ethercom;
528 struct ifnet *ifp = &ec->ec_if; 528 struct ifnet *ifp = &ec->ec_if;
529 uint32_t hashes[2] = { 0, 0 }; 529 uint32_t hashes[2] = { 0, 0 };
530 uint32_t rxfilt; 530 uint32_t rxfilt;
531 struct ether_multi *enm; 531 struct ether_multi *enm;
532 struct ether_multistep step; 532 struct ether_multistep step;
533 int h, mcnt; 533 int h, mcnt;
534 534
535 rxfilt = CSR_READ_4(sc, RTK_RXCFG); 535 rxfilt = CSR_READ_4(sc, RTK_RXCFG);
536 536
537 if (ifp->if_flags & IFF_PROMISC) { 537 if (ifp->if_flags & IFF_PROMISC) {
538 allmulti: 538 allmulti:
539 ifp->if_flags |= IFF_ALLMULTI; 539 ifp->if_flags |= IFF_ALLMULTI;
540 rxfilt |= RTK_RXCFG_RX_MULTI; 540 rxfilt |= RTK_RXCFG_RX_MULTI;
541 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt); 541 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
542 CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF); 542 CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF);
543 CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF); 543 CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF);
544 return; 544 return;
545 } 545 }
546 546
547 /* first, zot all the existing hash bits */ 547 /* first, zot all the existing hash bits */
548 CSR_WRITE_4(sc, RTK_MAR0, 0); 548 CSR_WRITE_4(sc, RTK_MAR0, 0);
549 CSR_WRITE_4(sc, RTK_MAR4, 0); 549 CSR_WRITE_4(sc, RTK_MAR4, 0);
550 550
551 /* now program new ones */ 551 /* now program new ones */
552 ETHER_LOCK(ec); 552 ETHER_LOCK(ec);
553 ETHER_FIRST_MULTI(step, ec, enm); 553 ETHER_FIRST_MULTI(step, ec, enm);
554 mcnt = 0; 554 mcnt = 0;
555 while (enm != NULL) { 555 while (enm != NULL) {
556 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 556 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
557 ETHER_ADDR_LEN) != 0) { 557 ETHER_ADDR_LEN) != 0) {
558 ETHER_UNLOCK(ec); 558 ETHER_UNLOCK(ec);
559 goto allmulti; 559 goto allmulti;
560 } 560 }
561 561
562 h = rtk_calchash(enm->enm_addrlo); 562 h = rtk_calchash(enm->enm_addrlo);
563 if (h < 32) 563 if (h < 32)
564 hashes[0] |= (1 << h); 564 hashes[0] |= __BIT(h);
565 else 565 else
566 hashes[1] |= (1 << (h - 32)); 566 hashes[1] |= __BIT(h - 32);
567 mcnt++; 567 mcnt++;
568 ETHER_NEXT_MULTI(step, enm); 568 ETHER_NEXT_MULTI(step, enm);
569 } 569 }
570 ETHER_UNLOCK(ec); 570 ETHER_UNLOCK(ec);
571 571
572 ifp->if_flags &= ~IFF_ALLMULTI; 572 ifp->if_flags &= ~IFF_ALLMULTI;
573 573
574 if (mcnt) 574 if (mcnt)
575 rxfilt |= RTK_RXCFG_RX_MULTI; 575 rxfilt |= RTK_RXCFG_RX_MULTI;
576 else 576 else
577 rxfilt &= ~RTK_RXCFG_RX_MULTI; 577 rxfilt &= ~RTK_RXCFG_RX_MULTI;
578 578
579 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt); 579 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
580 580
581 /* 581 /*
582 * For some unfathomable reason, RealTek decided to reverse 582 * For some unfathomable reason, RealTek decided to reverse
583 * the order of the multicast hash registers in the PCI Express 583 * the order of the multicast hash registers in the PCI Express
584 * parts. This means we have to write the hash pattern in reverse 584 * parts. This means we have to write the hash pattern in reverse
585 * order for those devices. 585 * order for those devices.
586 */ 586 */
587 if ((sc->sc_quirk & RTKQ_PCIE) != 0) { 587 if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
588 CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1])); 588 CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1]));
589 CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0])); 589 CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0]));
590 } else { 590 } else {
591 CSR_WRITE_4(sc, RTK_MAR0, hashes[0]); 591 CSR_WRITE_4(sc, RTK_MAR0, hashes[0]);
592 CSR_WRITE_4(sc, RTK_MAR4, hashes[1]); 592 CSR_WRITE_4(sc, RTK_MAR4, hashes[1]);
593 } 593 }
594} 594}
595 595
596void 596void
597rtk_reset(struct rtk_softc *sc) 597rtk_reset(struct rtk_softc *sc)
598{ 598{
599 int i; 599 int i;
600 600
601 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET); 601 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
602 602
603 for (i = 0; i < RTK_TIMEOUT; i++) { 603 for (i = 0; i < RTK_TIMEOUT; i++) {
604 DELAY(10); 604 DELAY(10);
605 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0) 605 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
606 break; 606 break;
607 } 607 }
608 if (i == RTK_TIMEOUT) 608 if (i == RTK_TIMEOUT)
609 printf("%s: reset never completed!\n", 609 printf("%s: reset never completed!\n",
610 device_xname(sc->sc_dev)); 610 device_xname(sc->sc_dev));
611} 611}
612 612
613/* 613/*
614 * Attach the interface. Allocate softc structures, do ifmedia 614 * Attach the interface. Allocate softc structures, do ifmedia
615 * setup and ethernet/BPF attach. 615 * setup and ethernet/BPF attach.
616 */ 616 */
617void 617void
618rtk_attach(struct rtk_softc *sc) 618rtk_attach(struct rtk_softc *sc)
619{ 619{
620 device_t self = sc->sc_dev; 620 device_t self = sc->sc_dev;
621 struct ifnet *ifp; 621 struct ifnet *ifp;
622 struct mii_data * const mii = &sc->mii; 622 struct mii_data * const mii = &sc->mii;
623 struct rtk_tx_desc *txd; 623 struct rtk_tx_desc *txd;
624 uint16_t val; 624 uint16_t val;
625 uint8_t eaddr[ETHER_ADDR_LEN]; 625 uint8_t eaddr[ETHER_ADDR_LEN];
626 int error; 626 int error;
627 int i, addr_len; 627 int i, addr_len;
628 628
629 callout_init(&sc->rtk_tick_ch, 0); 629 callout_init(&sc->rtk_tick_ch, 0);
630 630
631 /* 631 /*
632 * Check EEPROM type 9346 or 9356. 632 * Check EEPROM type 9346 or 9356.
633 */ 633 */
634 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129) 634 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
635 addr_len = RTK_EEADDR_LEN1; 635 addr_len = RTK_EEADDR_LEN1;
636 else 636 else
637 addr_len = RTK_EEADDR_LEN0; 637 addr_len = RTK_EEADDR_LEN0;
638 638
639 /* 639 /*
640 * Get station address. 640 * Get station address.
641 */ 641 */
642 val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len); 642 val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len);
643 eaddr[0] = val & 0xff; 643 eaddr[0] = val & 0xff;
644 eaddr[1] = val >> 8; 644 eaddr[1] = val >> 8;
645 val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len); 645 val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len);
646 eaddr[2] = val & 0xff; 646 eaddr[2] = val & 0xff;
647 eaddr[3] = val >> 8; 647 eaddr[3] = val >> 8;
648 val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len); 648 val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len);
649 eaddr[4] = val & 0xff; 649 eaddr[4] = val & 0xff;
650 eaddr[5] = val >> 8; 650 eaddr[5] = val >> 8;
651 651
652 if ((error = bus_dmamem_alloc(sc->sc_dmat, 652 if ((error = bus_dmamem_alloc(sc->sc_dmat,
653 RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg, 653 RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg,
654 BUS_DMA_NOWAIT)) != 0) { 654 BUS_DMA_NOWAIT)) != 0) {
655 aprint_error_dev(self, 655 aprint_error_dev(self,
656 "can't allocate recv buffer, error = %d\n", error); 656 "can't allocate recv buffer, error = %d\n", error);
657 goto fail_0; 657 goto fail_0;
658 } 658 }
659 659
660 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg, 660 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg,
661 RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf, 661 RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf,
662 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 662 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
663 aprint_error_dev(self, 663 aprint_error_dev(self,
664 "can't map recv buffer, error = %d\n", error); 664 "can't map recv buffer, error = %d\n", error);
665 goto fail_1; 665 goto fail_1;
666 } 666 }
667 667
668 if ((error = bus_dmamap_create(sc->sc_dmat, 668 if ((error = bus_dmamap_create(sc->sc_dmat,
669 RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT, 669 RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT,
670 &sc->recv_dmamap)) != 0) { 670 &sc->recv_dmamap)) != 0) {
671 aprint_error_dev(self, 671 aprint_error_dev(self,
672 "can't create recv buffer DMA map, error = %d\n", error); 672 "can't create recv buffer DMA map, error = %d\n", error);
673 goto fail_2; 673 goto fail_2;
674 } 674 }
675 675
676 if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap, 676 if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap,
677 sc->rtk_rx_buf, RTK_RXBUFLEN + 16, 677 sc->rtk_rx_buf, RTK_RXBUFLEN + 16,
678 NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) { 678 NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) {
679 aprint_error_dev(self, 679 aprint_error_dev(self,
680 "can't load recv buffer DMA map, error = %d\n", error); 680 "can't load recv buffer DMA map, error = %d\n", error);
681 goto fail_3; 681 goto fail_3;
682 } 682 }
683 683
684 for (i = 0; i < RTK_TX_LIST_CNT; i++) { 684 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
685 txd = &sc->rtk_tx_descs[i]; 685 txd = &sc->rtk_tx_descs[i];
686 if ((error = bus_dmamap_create(sc->sc_dmat, 686 if ((error = bus_dmamap_create(sc->sc_dmat,
687 MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 687 MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
688 &txd->txd_dmamap)) != 0) { 688 &txd->txd_dmamap)) != 0) {
689 aprint_error_dev(self, 689 aprint_error_dev(self,
690 "can't create snd buffer DMA map, error = %d\n", 690 "can't create snd buffer DMA map, error = %d\n",
691 error); 691 error);
692 goto fail_4; 692 goto fail_4;
693 } 693 }
694 txd->txd_txaddr = RTK_TXADDR0 + (i * 4); 694 txd->txd_txaddr = RTK_TXADDR0 + (i * 4);
695 txd->txd_txstat = RTK_TXSTAT0 + (i * 4); 695 txd->txd_txstat = RTK_TXSTAT0 + (i * 4);
696 } 696 }
697 SIMPLEQ_INIT(&sc->rtk_tx_free); 697 SIMPLEQ_INIT(&sc->rtk_tx_free);
698 SIMPLEQ_INIT(&sc->rtk_tx_dirty); 698 SIMPLEQ_INIT(&sc->rtk_tx_dirty);
699 699
700 /* 700 /*
701 * From this point forward, the attachment cannot fail. A failure 701 * From this point forward, the attachment cannot fail. A failure
702 * before this releases all resources thar may have been 702 * before this releases all resources thar may have been
703 * allocated. 703 * allocated.
704 */ 704 */
705 sc->sc_flags |= RTK_ATTACHED; 705 sc->sc_flags |= RTK_ATTACHED;
706 706
707 /* Reset the adapter. */ 707 /* Reset the adapter. */
708 rtk_reset(sc); 708 rtk_reset(sc);
709 709
710 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr)); 710 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
711 711
712 ifp = &sc->ethercom.ec_if; 712 ifp = &sc->ethercom.ec_if;
713 ifp->if_softc = sc; 713 ifp->if_softc = sc;
714 strcpy(ifp->if_xname, device_xname(self)); 714 strcpy(ifp->if_xname, device_xname(self));
715 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 715 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
716 ifp->if_ioctl = rtk_ioctl; 716 ifp->if_ioctl = rtk_ioctl;
717 ifp->if_start = rtk_start; 717 ifp->if_start = rtk_start;
718 ifp->if_watchdog = rtk_watchdog; 718 ifp->if_watchdog = rtk_watchdog;
719 ifp->if_init = rtk_init; 719 ifp->if_init = rtk_init;
720 ifp->if_stop = rtk_stop; 720 ifp->if_stop = rtk_stop;
721 IFQ_SET_READY(&ifp->if_snd); 721 IFQ_SET_READY(&ifp->if_snd);
722 722
723 /* 723 /*
724 * Do ifmedia setup. 724 * Do ifmedia setup.
725 */ 725 */
726 mii->mii_ifp = ifp; 726 mii->mii_ifp = ifp;
727 mii->mii_readreg = rtk_phy_readreg; 727 mii->mii_readreg = rtk_phy_readreg;
728 mii->mii_writereg = rtk_phy_writereg; 728 mii->mii_writereg = rtk_phy_writereg;
729 mii->mii_statchg = rtk_phy_statchg; 729 mii->mii_statchg = rtk_phy_statchg;
730 sc->ethercom.ec_mii = mii; 730 sc->ethercom.ec_mii = mii;
731 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 731 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
732 ether_mediastatus); 732 ether_mediastatus);
733 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); 733 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
734 734
735 /* Choose a default media. */ 735 /* Choose a default media. */
736 if (LIST_FIRST(&mii->mii_phys) == NULL) { 736 if (LIST_FIRST(&mii->mii_phys) == NULL) {
737 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 737 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
738 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 738 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
739 } else 739 } else
740 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 740 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
741 741
742 /* 742 /*
743 * Call MI attach routines. 743 * Call MI attach routines.
744 */ 744 */
745 if_attach(ifp); 745 if_attach(ifp);
746 if_deferred_start_init(ifp, NULL); 746 if_deferred_start_init(ifp, NULL);
747 ether_ifattach(ifp, eaddr); 747 ether_ifattach(ifp, eaddr);
748 748
749 rnd_attach_source(&sc->rnd_source, device_xname(self), 749 rnd_attach_source(&sc->rnd_source, device_xname(self),
750 RND_TYPE_NET, RND_FLAG_DEFAULT); 750 RND_TYPE_NET, RND_FLAG_DEFAULT);
751 751
752 return; 752 return;
753 fail_4: 753 fail_4:
754 for (i = 0; i < RTK_TX_LIST_CNT; i++) { 754 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
755 txd = &sc->rtk_tx_descs[i]; 755 txd = &sc->rtk_tx_descs[i];
756 if (txd->txd_dmamap != NULL) 756 if (txd->txd_dmamap != NULL)
757 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap); 757 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
758 } 758 }
759 fail_3: 759 fail_3:
760 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap); 760 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
761 fail_2: 761 fail_2:
762 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf, 762 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
763 RTK_RXBUFLEN + 16); 763 RTK_RXBUFLEN + 16);
764 fail_1: 764 fail_1:
765 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg); 765 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
766 fail_0: 766 fail_0:
767 return; 767 return;
768} 768}
769 769
770/* 770/*
771 * Initialize the transmit descriptors. 771 * Initialize the transmit descriptors.
772 */ 772 */
773static void 773static void
774rtk_list_tx_init(struct rtk_softc *sc) 774rtk_list_tx_init(struct rtk_softc *sc)
775{ 775{
776 struct rtk_tx_desc *txd; 776 struct rtk_tx_desc *txd;
777 int i; 777 int i;
778 778
779 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) 779 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL)
780 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q); 780 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
781 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) 781 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL)
782 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q); 782 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
783 783
784 for (i = 0; i < RTK_TX_LIST_CNT; i++) { 784 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
785 txd = &sc->rtk_tx_descs[i]; 785 txd = &sc->rtk_tx_descs[i];
786 CSR_WRITE_4(sc, txd->txd_txaddr, 0); 786 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
787 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q); 787 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
788 } 788 }
789} 789}
790 790
791/* 791/*
792 * rtk_activate: 792 * rtk_activate:
793 * Handle device activation/deactivation requests. 793 * Handle device activation/deactivation requests.
794 */ 794 */
795int 795int
796rtk_activate(device_t self, enum devact act) 796rtk_activate(device_t self, enum devact act)
797{ 797{
798 struct rtk_softc *sc = device_private(self); 798 struct rtk_softc *sc = device_private(self);
799 799
800 switch (act) { 800 switch (act) {
801 case DVACT_DEACTIVATE: 801 case DVACT_DEACTIVATE:
802 if_deactivate(&sc->ethercom.ec_if); 802 if_deactivate(&sc->ethercom.ec_if);
803 return 0; 803 return 0;
804 default: 804 default:
805 return EOPNOTSUPP; 805 return EOPNOTSUPP;
806 } 806 }
807} 807}
808 808
809/* 809/*
810 * rtk_detach: 810 * rtk_detach:
811 * Detach a rtk interface. 811 * Detach a rtk interface.
812 */ 812 */
813int 813int
814rtk_detach(struct rtk_softc *sc) 814rtk_detach(struct rtk_softc *sc)
815{ 815{
816 struct ifnet *ifp = &sc->ethercom.ec_if; 816 struct ifnet *ifp = &sc->ethercom.ec_if;
817 struct rtk_tx_desc *txd; 817 struct rtk_tx_desc *txd;
818 int i; 818 int i;
819 819
820 /* 820 /*
821 * Succeed now if there isn't any work to do. 821 * Succeed now if there isn't any work to do.
822 */ 822 */
823 if ((sc->sc_flags & RTK_ATTACHED) == 0) 823 if ((sc->sc_flags & RTK_ATTACHED) == 0)
824 return 0; 824 return 0;
825 825
826 /* Unhook our tick handler. */ 826 /* Unhook our tick handler. */
827 callout_stop(&sc->rtk_tick_ch); 827 callout_stop(&sc->rtk_tick_ch);
828 828
829 /* Detach all PHYs. */ 829 /* Detach all PHYs. */
830 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY); 830 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
831 831
832 /* Delete all remaining media. */ 832 /* Delete all remaining media. */
833 ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY); 833 ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
834 834
835 rnd_detach_source(&sc->rnd_source); 835 rnd_detach_source(&sc->rnd_source);
836 836
837 ether_ifdetach(ifp); 837 ether_ifdetach(ifp);
838 if_detach(ifp); 838 if_detach(ifp);
839 839
840 for (i = 0; i < RTK_TX_LIST_CNT; i++) { 840 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
841 txd = &sc->rtk_tx_descs[i]; 841 txd = &sc->rtk_tx_descs[i];
842 if (txd->txd_dmamap != NULL) 842 if (txd->txd_dmamap != NULL)
843 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap); 843 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
844 } 844 }
845 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap); 845 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
846 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf, 846 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
847 RTK_RXBUFLEN + 16); 847 RTK_RXBUFLEN + 16);
848 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg); 848 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
849 849
850 /* we don't want to run again */ 850 /* we don't want to run again */
851 sc->sc_flags &= ~RTK_ATTACHED; 851 sc->sc_flags &= ~RTK_ATTACHED;
852 852
853 return 0; 853 return 0;
854} 854}
855 855
856/* 856/*
857 * rtk_enable: 857 * rtk_enable:
858 * Enable the RTL81X9 chip. 858 * Enable the RTL81X9 chip.
859 */ 859 */
860int 860int
861rtk_enable(struct rtk_softc *sc) 861rtk_enable(struct rtk_softc *sc)
862{ 862{
863 863
864 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) { 864 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
865 if ((*sc->sc_enable)(sc) != 0) { 865 if ((*sc->sc_enable)(sc) != 0) {
866 printf("%s: device enable failed\n", 866 printf("%s: device enable failed\n",
867 device_xname(sc->sc_dev)); 867 device_xname(sc->sc_dev));
868 return EIO; 868 return EIO;
869 } 869 }
870 sc->sc_flags |= RTK_ENABLED; 870 sc->sc_flags |= RTK_ENABLED;
871 } 871 }
872 return 0; 872 return 0;
873} 873}
874 874
875/* 875/*
876 * rtk_disable: 876 * rtk_disable:
877 * Disable the RTL81X9 chip. 877 * Disable the RTL81X9 chip.
878 */ 878 */
879void 879void
880rtk_disable(struct rtk_softc *sc) 880rtk_disable(struct rtk_softc *sc)
881{ 881{
882 882
883 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) { 883 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
884 (*sc->sc_disable)(sc); 884 (*sc->sc_disable)(sc);
885 sc->sc_flags &= ~RTK_ENABLED; 885 sc->sc_flags &= ~RTK_ENABLED;
886 } 886 }
887} 887}
888 888
889/* 889/*
890 * A frame has been uploaded: pass the resulting mbuf chain up to 890 * A frame has been uploaded: pass the resulting mbuf chain up to
891 * the higher level protocols. 891 * the higher level protocols.
892 * 892 *
893 * You know there's something wrong with a PCI bus-master chip design. 893 * You know there's something wrong with a PCI bus-master chip design.
894 * 894 *
895 * The receive operation is badly documented in the datasheet, so I'll 895 * The receive operation is badly documented in the datasheet, so I'll
896 * attempt to document it here. The driver provides a buffer area and 896 * attempt to document it here. The driver provides a buffer area and
897 * places its base address in the RX buffer start address register. 897 * places its base address in the RX buffer start address register.
898 * The chip then begins copying frames into the RX buffer. Each frame 898 * The chip then begins copying frames into the RX buffer. Each frame
899 * is preceded by a 32-bit RX status word which specifies the length 899 * is preceded by a 32-bit RX status word which specifies the length
900 * of the frame and certain other status bits. Each frame (starting with 900 * of the frame and certain other status bits. Each frame (starting with
901 * the status word) is also 32-bit aligned. The frame length is in the 901 * the status word) is also 32-bit aligned. The frame length is in the
902 * first 16 bits of the status word; the lower 15 bits correspond with 902 * first 16 bits of the status word; the lower 15 bits correspond with
903 * the 'rx status register' mentioned in the datasheet. 903 * the 'rx status register' mentioned in the datasheet.
904 * 904 *
905 * Note: to make the Alpha happy, the frame payload needs to be aligned 905 * Note: to make the Alpha happy, the frame payload needs to be aligned
906 * on a 32-bit boundary. To achieve this, we copy the data to mbuf 906 * on a 32-bit boundary. To achieve this, we copy the data to mbuf
907 * shifted forward 2 bytes. 907 * shifted forward 2 bytes.
908 */ 908 */
909static void 909static void
910rtk_rxeof(struct rtk_softc *sc) 910rtk_rxeof(struct rtk_softc *sc)
911{ 911{
912 struct mbuf *m; 912 struct mbuf *m;
913 struct ifnet *ifp; 913 struct ifnet *ifp;
914 uint8_t *rxbufpos, *dst; 914 uint8_t *rxbufpos, *dst;
915 u_int total_len, wrap; 915 u_int total_len, wrap;
916 uint32_t rxstat; 916 uint32_t rxstat;
917 uint16_t cur_rx, new_rx; 917 uint16_t cur_rx, new_rx;
918 uint16_t limit; 918 uint16_t limit;
919 uint16_t rx_bytes, max_bytes; 919 uint16_t rx_bytes, max_bytes;
920 920
921 ifp = &sc->ethercom.ec_if; 921 ifp = &sc->ethercom.ec_if;
922 922
923 cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN; 923 cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN;
924 924
925 /* Do not try to read past this point. */ 925 /* Do not try to read past this point. */
926 limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN; 926 limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN;
927 927
928 if (limit < cur_rx) 928 if (limit < cur_rx)
929 max_bytes = (RTK_RXBUFLEN - cur_rx) + limit; 929 max_bytes = (RTK_RXBUFLEN - cur_rx) + limit;
930 else 930 else
931 max_bytes = limit - cur_rx; 931 max_bytes = limit - cur_rx;
932 rx_bytes = 0; 932 rx_bytes = 0;
933 933
934 while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) { 934 while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) {
935 rxbufpos = sc->rtk_rx_buf + cur_rx; 935 rxbufpos = sc->rtk_rx_buf + cur_rx;
936 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx, 936 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
937 RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD); 937 RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD);
938 rxstat = le32toh(*(uint32_t *)rxbufpos); 938 rxstat = le32toh(*(uint32_t *)rxbufpos);
939 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx, 939 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
940 RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD); 940 RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD);
941 941
942 /* 942 /*
943 * Here's a totally undocumented fact for you. When the 943 * Here's a totally undocumented fact for you. When the
944 * RealTek chip is in the process of copying a packet into 944 * RealTek chip is in the process of copying a packet into
945 * RAM for you, the length will be 0xfff0. If you spot a 945 * RAM for you, the length will be 0xfff0. If you spot a
946 * packet header with this value, you need to stop. The 946 * packet header with this value, you need to stop. The
947 * datasheet makes absolutely no mention of this and 947 * datasheet makes absolutely no mention of this and
948 * RealTek should be shot for this. 948 * RealTek should be shot for this.
949 */ 949 */
950 total_len = rxstat >> 16; 950 total_len = rxstat >> 16;
951 if (total_len == RTK_RXSTAT_UNFINISHED) 951 if (total_len == RTK_RXSTAT_UNFINISHED)
952 break; 952 break;
953 953
954 if ((rxstat & RTK_RXSTAT_RXOK) == 0 || 954 if ((rxstat & RTK_RXSTAT_RXOK) == 0 ||
955 total_len < ETHER_MIN_LEN || 955 total_len < ETHER_MIN_LEN ||
956 total_len > (MCLBYTES - RTK_ETHER_ALIGN)) { 956 total_len > (MCLBYTES - RTK_ETHER_ALIGN)) {
957 ifp->if_ierrors++; 957 ifp->if_ierrors++;
958 958
959 /* 959 /*
960 * submitted by:[netbsd-pcmcia:00484] 960 * submitted by:[netbsd-pcmcia:00484]
961 * Takahiro Kambe <taca@sky.yamashina.kyoto.jp> 961 * Takahiro Kambe <taca@sky.yamashina.kyoto.jp>
962 * obtain from: 962 * obtain from:
963 * FreeBSD if_rl.c rev 1.24->1.25 963 * FreeBSD if_rl.c rev 1.24->1.25
964 * 964 *
965 */ 965 */
966#if 0 966#if 0
967 if (rxstat & (RTK_RXSTAT_BADSYM | RTK_RXSTAT_RUNT | 967 if (rxstat & (RTK_RXSTAT_BADSYM | RTK_RXSTAT_RUNT |
968 RTK_RXSTAT_GIANT | RTK_RXSTAT_CRCERR | 968 RTK_RXSTAT_GIANT | RTK_RXSTAT_CRCERR |
969 RTK_RXSTAT_ALIGNERR)) { 969 RTK_RXSTAT_ALIGNERR)) {
970 CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB); 970 CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB);
971 CSR_WRITE_2(sc, RTK_COMMAND, 971 CSR_WRITE_2(sc, RTK_COMMAND,
972 RTK_CMD_TX_ENB | RTK_CMD_RX_ENB); 972 RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
973 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG); 973 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
974 CSR_WRITE_4(sc, RTK_RXADDR, 974 CSR_WRITE_4(sc, RTK_RXADDR,
975 sc->recv_dmamap->dm_segs[0].ds_addr); 975 sc->recv_dmamap->dm_segs[0].ds_addr);
976 cur_rx = 0; 976 cur_rx = 0;
977 } 977 }
978 break; 978 break;
979#else 979#else
980 rtk_init(ifp); 980 rtk_init(ifp);
981 return; 981 return;
982#endif 982#endif
983 } 983 }
984 984
985 /* No errors; receive the packet. */ 985 /* No errors; receive the packet. */
986 rx_bytes += total_len + RTK_RXSTAT_LEN; 986 rx_bytes += total_len + RTK_RXSTAT_LEN;
987 987
988 /* 988 /*
989 * Avoid trying to read more bytes than we know 989 * Avoid trying to read more bytes than we know
990 * the chip has prepared for us. 990 * the chip has prepared for us.
991 */ 991 */
992 if (rx_bytes > max_bytes) 992 if (rx_bytes > max_bytes)
993 break; 993 break;
994 994
995 /* 995 /*
996 * Skip the status word, wrapping around to the beginning 996 * Skip the status word, wrapping around to the beginning
997 * of the Rx area, if necessary. 997 * of the Rx area, if necessary.
998 */ 998 */
999 cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN; 999 cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN;
1000 rxbufpos = sc->rtk_rx_buf + cur_rx; 1000 rxbufpos = sc->rtk_rx_buf + cur_rx;
1001 1001
1002 /* 1002 /*
1003 * Compute the number of bytes at which the packet 1003 * Compute the number of bytes at which the packet
1004 * will wrap to the beginning of the ring buffer. 1004 * will wrap to the beginning of the ring buffer.
1005 */ 1005 */
1006 wrap = RTK_RXBUFLEN - cur_rx; 1006 wrap = RTK_RXBUFLEN - cur_rx;
1007 1007
1008 /* 1008 /*
1009 * Compute where the next pending packet is. 1009 * Compute where the next pending packet is.
1010 */ 1010 */
1011 if (total_len > wrap) 1011 if (total_len > wrap)
1012 new_rx = total_len - wrap; 1012 new_rx = total_len - wrap;
1013 else 1013 else
1014 new_rx = cur_rx + total_len; 1014 new_rx = cur_rx + total_len;
1015 /* Round up to 32-bit boundary. */ 1015 /* Round up to 32-bit boundary. */
1016 new_rx = roundup2(new_rx, sizeof(uint32_t)) % RTK_RXBUFLEN; 1016 new_rx = roundup2(new_rx, sizeof(uint32_t)) % RTK_RXBUFLEN;
1017 1017
1018 /* 1018 /*
1019 * The RealTek chip includes the CRC with every 1019 * The RealTek chip includes the CRC with every
1020 * incoming packet; trim it off here. 1020 * incoming packet; trim it off here.
1021 */ 1021 */
1022 total_len -= ETHER_CRC_LEN; 1022 total_len -= ETHER_CRC_LEN;
1023 1023
1024 /* 1024 /*
1025 * Now allocate an mbuf (and possibly a cluster) to hold 1025 * Now allocate an mbuf (and possibly a cluster) to hold
1026 * the packet. Note we offset the packet 2 bytes so that 1026 * the packet. Note we offset the packet 2 bytes so that
1027 * data after the Ethernet header will be 4-byte aligned. 1027 * data after the Ethernet header will be 4-byte aligned.
1028 */ 1028 */
1029 MGETHDR(m, M_DONTWAIT, MT_DATA); 1029 MGETHDR(m, M_DONTWAIT, MT_DATA);
1030 if (m == NULL) { 1030 if (m == NULL) {
1031 printf("%s: unable to allocate Rx mbuf\n", 1031 printf("%s: unable to allocate Rx mbuf\n",
1032 device_xname(sc->sc_dev)); 1032 device_xname(sc->sc_dev));
1033 ifp->if_ierrors++; 1033 ifp->if_ierrors++;
1034 goto next_packet; 1034 goto next_packet;
1035 } 1035 }
1036 if (total_len > (MHLEN - RTK_ETHER_ALIGN)) { 1036 if (total_len > (MHLEN - RTK_ETHER_ALIGN)) {
1037 MCLGET(m, M_DONTWAIT); 1037 MCLGET(m, M_DONTWAIT);
1038 if ((m->m_flags & M_EXT) == 0) { 1038 if ((m->m_flags & M_EXT) == 0) {
1039 printf("%s: unable to allocate Rx cluster\n", 1039 printf("%s: unable to allocate Rx cluster\n",
1040 device_xname(sc->sc_dev)); 1040 device_xname(sc->sc_dev));
1041 ifp->if_ierrors++; 1041 ifp->if_ierrors++;
1042 m_freem(m); 1042 m_freem(m);
1043 m = NULL; 1043 m = NULL;
1044 goto next_packet; 1044 goto next_packet;
1045 } 1045 }
1046 } 1046 }
1047 m->m_data += RTK_ETHER_ALIGN; /* for alignment */ 1047 m->m_data += RTK_ETHER_ALIGN; /* for alignment */
1048 m_set_rcvif(m, ifp); 1048 m_set_rcvif(m, ifp);
1049 m->m_pkthdr.len = m->m_len = total_len; 1049 m->m_pkthdr.len = m->m_len = total_len;
1050 dst = mtod(m, void *); 1050 dst = mtod(m, void *);
1051 1051
1052 /* 1052 /*
1053 * If the packet wraps, copy up to the wrapping point. 1053 * If the packet wraps, copy up to the wrapping point.
1054 */ 1054 */
1055 if (total_len > wrap) { 1055 if (total_len > wrap) {
1056 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 1056 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1057 cur_rx, wrap, BUS_DMASYNC_POSTREAD); 1057 cur_rx, wrap, BUS_DMASYNC_POSTREAD);
1058 memcpy(dst, rxbufpos, wrap); 1058 memcpy(dst, rxbufpos, wrap);
1059 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 1059 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1060 cur_rx, wrap, BUS_DMASYNC_PREREAD); 1060 cur_rx, wrap, BUS_DMASYNC_PREREAD);
1061 cur_rx = 0; 1061 cur_rx = 0;
1062 rxbufpos = sc->rtk_rx_buf; 1062 rxbufpos = sc->rtk_rx_buf;
1063 total_len -= wrap; 1063 total_len -= wrap;
1064 dst += wrap; 1064 dst += wrap;
1065 } 1065 }
1066 1066
1067 /* 1067 /*
1068 * ...and now the rest. 1068 * ...and now the rest.
1069 */ 1069 */
1070 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 1070 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1071 cur_rx, total_len, BUS_DMASYNC_POSTREAD); 1071 cur_rx, total_len, BUS_DMASYNC_POSTREAD);
1072 memcpy(dst, rxbufpos, total_len); 1072 memcpy(dst, rxbufpos, total_len);
1073 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 1073 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1074 cur_rx, total_len, BUS_DMASYNC_PREREAD); 1074 cur_rx, total_len, BUS_DMASYNC_PREREAD);
1075 1075
1076 next_packet: 1076 next_packet:
1077 CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN); 1077 CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN);
1078 cur_rx = new_rx; 1078 cur_rx = new_rx;
1079 1079
1080 if (m == NULL) 1080 if (m == NULL)
1081 continue; 1081 continue;
1082 1082
1083 /* pass it on. */ 1083 /* pass it on. */
1084 if_percpuq_enqueue(ifp->if_percpuq, m); 1084 if_percpuq_enqueue(ifp->if_percpuq, m);
1085 } 1085 }
1086} 1086}
1087 1087
1088/* 1088/*
1089 * A frame was downloaded to the chip. It's safe for us to clean up 1089 * A frame was downloaded to the chip. It's safe for us to clean up
1090 * the list buffers. 1090 * the list buffers.
1091 */ 1091 */
1092static void 1092static void
1093rtk_txeof(struct rtk_softc *sc) 1093rtk_txeof(struct rtk_softc *sc)
1094{ 1094{
1095 struct ifnet *ifp; 1095 struct ifnet *ifp;
1096 struct rtk_tx_desc *txd; 1096 struct rtk_tx_desc *txd;
1097 uint32_t txstat; 1097 uint32_t txstat;
1098 1098
1099 ifp = &sc->ethercom.ec_if; 1099 ifp = &sc->ethercom.ec_if;
1100 1100
1101 /* 1101 /*
1102 * Go through our tx list and free mbufs for those 1102 * Go through our tx list and free mbufs for those
1103 * frames that have been uploaded. 1103 * frames that have been uploaded.
1104 */ 1104 */
1105 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) { 1105 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1106 txstat = CSR_READ_4(sc, txd->txd_txstat); 1106 txstat = CSR_READ_4(sc, txd->txd_txstat);
1107 if ((txstat & (RTK_TXSTAT_TX_OK | 1107 if ((txstat & (RTK_TXSTAT_TX_OK |
1108 RTK_TXSTAT_TX_UNDERRUN | RTK_TXSTAT_TXABRT)) == 0) 1108 RTK_TXSTAT_TX_UNDERRUN | RTK_TXSTAT_TXABRT)) == 0)
1109 break; 1109 break;
1110 1110
1111 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q); 1111 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1112 1112
1113 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0, 1113 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0,
1114 txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1114 txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1115 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap); 1115 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1116 m_freem(txd->txd_mbuf); 1116 m_freem(txd->txd_mbuf);
1117 txd->txd_mbuf = NULL; 1117 txd->txd_mbuf = NULL;
1118 1118
1119 ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24; 1119 ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24;
1120 1120
1121 if (txstat & RTK_TXSTAT_TX_OK) 1121 if (txstat & RTK_TXSTAT_TX_OK)
1122 ifp->if_opackets++; 1122 ifp->if_opackets++;
1123 else { 1123 else {
1124 ifp->if_oerrors++; 1124 ifp->if_oerrors++;
1125 1125
1126 /* 1126 /*
1127 * Increase Early TX threshold if underrun occurred. 1127 * Increase Early TX threshold if underrun occurred.
1128 * Increase step 64 bytes. 1128 * Increase step 64 bytes.
1129 */ 1129 */
1130 if (txstat & RTK_TXSTAT_TX_UNDERRUN) { 1130 if (txstat & RTK_TXSTAT_TX_UNDERRUN) {
1131#ifdef DEBUG 1131#ifdef DEBUG
1132 printf("%s: transmit underrun;", 1132 printf("%s: transmit underrun;",
1133 device_xname(sc->sc_dev)); 1133 device_xname(sc->sc_dev));
1134#endif 1134#endif
1135 if (sc->sc_txthresh < RTK_TXTH_MAX) { 1135 if (sc->sc_txthresh < RTK_TXTH_MAX) {
1136 sc->sc_txthresh += 2; 1136 sc->sc_txthresh += 2;
1137#ifdef DEBUG 1137#ifdef DEBUG
1138 printf(" new threshold: %d bytes", 1138 printf(" new threshold: %d bytes",
1139 sc->sc_txthresh * 32); 1139 sc->sc_txthresh * 32);
1140#endif 1140#endif
1141 } 1141 }
1142#ifdef DEBUG 1142#ifdef DEBUG
1143 printf("\n"); 1143 printf("\n");
1144#endif 1144#endif
1145 } 1145 }
1146 if (txstat & (RTK_TXSTAT_TXABRT | RTK_TXSTAT_OUTOFWIN)) 1146 if (txstat & (RTK_TXSTAT_TXABRT | RTK_TXSTAT_OUTOFWIN))
1147 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG); 1147 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1148 } 1148 }
1149 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q); 1149 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
1150 ifp->if_flags &= ~IFF_OACTIVE; 1150 ifp->if_flags &= ~IFF_OACTIVE;
1151 } 1151 }
1152 1152
1153 /* Clear the timeout timer if there is no pending packet. */ 1153 /* Clear the timeout timer if there is no pending packet. */
1154 if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty)) 1154 if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty))
1155 ifp->if_timer = 0; 1155 ifp->if_timer = 0;
1156 1156
1157} 1157}
1158 1158
1159int 1159int
1160rtk_intr(void *arg) 1160rtk_intr(void *arg)
1161{ 1161{
1162 struct rtk_softc *sc; 1162 struct rtk_softc *sc;
1163 struct ifnet *ifp; 1163 struct ifnet *ifp;
1164 uint16_t status; 1164 uint16_t status;
1165 int handled; 1165 int handled;
1166 1166
1167 sc = arg; 1167 sc = arg;
1168 ifp = &sc->ethercom.ec_if; 1168 ifp = &sc->ethercom.ec_if;
1169 1169
1170 if (!device_has_power(sc->sc_dev)) 1170 if (!device_has_power(sc->sc_dev))
1171 return 0; 1171 return 0;
1172 1172
1173 /* Disable interrupts. */ 1173 /* Disable interrupts. */
1174 CSR_WRITE_2(sc, RTK_IMR, 0x0000); 1174 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1175 1175
1176 handled = 0; 1176 handled = 0;
1177 for (;;) { 1177 for (;;) {
1178 1178
1179 status = CSR_READ_2(sc, RTK_ISR); 1179 status = CSR_READ_2(sc, RTK_ISR);
1180 1180
1181 if (status == 0xffff) 1181 if (status == 0xffff)
1182 break; /* Card is gone... */ 1182 break; /* Card is gone... */
1183 1183
1184 if (status) 1184 if (status)
1185 CSR_WRITE_2(sc, RTK_ISR, status); 1185 CSR_WRITE_2(sc, RTK_ISR, status);
1186 1186
1187 if ((status & RTK_INTRS) == 0) 1187 if ((status & RTK_INTRS) == 0)
1188 break; 1188 break;
1189 1189
1190 handled = 1; 1190 handled = 1;
1191 1191
1192 if (status & RTK_ISR_RX_OK) 1192 if (status & RTK_ISR_RX_OK)
1193 rtk_rxeof(sc); 1193 rtk_rxeof(sc);
1194 1194
1195 if (status & RTK_ISR_RX_ERR) 1195 if (status & RTK_ISR_RX_ERR)
1196 rtk_rxeof(sc); 1196 rtk_rxeof(sc);
1197 1197
1198 if (status & (RTK_ISR_TX_OK | RTK_ISR_TX_ERR)) 1198 if (status & (RTK_ISR_TX_OK | RTK_ISR_TX_ERR))
1199 rtk_txeof(sc); 1199 rtk_txeof(sc);
1200 1200
1201 if (status & RTK_ISR_SYSTEM_ERR) { 1201 if (status & RTK_ISR_SYSTEM_ERR) {
1202 rtk_reset(sc); 1202 rtk_reset(sc);
1203 rtk_init(ifp); 1203 rtk_init(ifp);
1204 } 1204 }
1205 } 1205 }
1206 1206
1207 /* Re-enable interrupts. */ 1207 /* Re-enable interrupts. */
1208 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS); 1208 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1209 1209
1210 if_schedule_deferred_start(ifp); 1210 if_schedule_deferred_start(ifp);
1211 1211
1212 rnd_add_uint32(&sc->rnd_source, status); 1212 rnd_add_uint32(&sc->rnd_source, status);
1213 1213
1214 return handled; 1214 return handled;
1215} 1215}
1216 1216
1217/* 1217/*
1218 * Main transmit routine. 1218 * Main transmit routine.
1219 */ 1219 */
1220 1220
1221static void 1221static void
1222rtk_start(struct ifnet *ifp) 1222rtk_start(struct ifnet *ifp)
1223{ 1223{
1224 struct rtk_softc *sc; 1224 struct rtk_softc *sc;
1225 struct rtk_tx_desc *txd; 1225 struct rtk_tx_desc *txd;
1226 struct mbuf *m_head, *m_new; 1226 struct mbuf *m_head, *m_new;
1227 int error, len; 1227 int error, len;
1228 1228
1229 sc = ifp->if_softc; 1229 sc = ifp->if_softc;
1230 1230
1231 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) { 1231 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
1232 IFQ_POLL(&ifp->if_snd, m_head); 1232 IFQ_POLL(&ifp->if_snd, m_head);
1233 if (m_head == NULL) 1233 if (m_head == NULL)
1234 break; 1234 break;
1235 m_new = NULL; 1235 m_new = NULL;
1236 1236
1237 /* 1237 /*
1238 * Load the DMA map. If this fails, the packet didn't 1238 * Load the DMA map. If this fails, the packet didn't
1239 * fit in one DMA segment, and we need to copy. Note, 1239 * fit in one DMA segment, and we need to copy. Note,
1240 * the packet must also be aligned. 1240 * the packet must also be aligned.
1241 * if the packet is too small, copy it too, so we're sure 1241 * if the packet is too small, copy it too, so we're sure
1242 * so have enough room for the pad buffer. 1242 * so have enough room for the pad buffer.
1243 */ 1243 */
1244 if ((mtod(m_head, uintptr_t) & 3) != 0 || 1244 if ((mtod(m_head, uintptr_t) & 3) != 0 ||
1245 m_head->m_pkthdr.len < ETHER_PAD_LEN || 1245 m_head->m_pkthdr.len < ETHER_PAD_LEN ||
1246 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap, 1246 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
1247 m_head, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) { 1247 m_head, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
1248 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1248 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1249 if (m_new == NULL) { 1249 if (m_new == NULL) {
1250 printf("%s: unable to allocate Tx mbuf\n", 1250 printf("%s: unable to allocate Tx mbuf\n",
1251 device_xname(sc->sc_dev)); 1251 device_xname(sc->sc_dev));
1252 break; 1252 break;
1253 } 1253 }
1254 if (m_head->m_pkthdr.len > MHLEN) { 1254 if (m_head->m_pkthdr.len > MHLEN) {
1255 MCLGET(m_new, M_DONTWAIT); 1255 MCLGET(m_new, M_DONTWAIT);
1256 if ((m_new->m_flags & M_EXT) == 0) { 1256 if ((m_new->m_flags & M_EXT) == 0) {
1257 printf("%s: unable to allocate Tx " 1257 printf("%s: unable to allocate Tx "
1258 "cluster\n", 1258 "cluster\n",
1259 device_xname(sc->sc_dev)); 1259 device_xname(sc->sc_dev));
1260 m_freem(m_new); 1260 m_freem(m_new);
1261 break; 1261 break;
1262 } 1262 }
1263 } 1263 }
1264 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1264 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1265 mtod(m_new, void *)); 1265 mtod(m_new, void *));
1266 m_new->m_pkthdr.len = m_new->m_len = 1266 m_new->m_pkthdr.len = m_new->m_len =
1267 m_head->m_pkthdr.len; 1267 m_head->m_pkthdr.len;
1268 if (m_head->m_pkthdr.len < ETHER_PAD_LEN) { 1268 if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
1269 memset( 1269 memset(
1270 mtod(m_new, char *) + m_head->m_pkthdr.len, 1270 mtod(m_new, char *) + m_head->m_pkthdr.len,
1271 0, ETHER_PAD_LEN - m_head->m_pkthdr.len); 1271 0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
1272 m_new->m_pkthdr.len = m_new->m_len = 1272 m_new->m_pkthdr.len = m_new->m_len =
1273 ETHER_PAD_LEN; 1273 ETHER_PAD_LEN;
1274 } 1274 }
1275 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1275 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1276 txd->txd_dmamap, m_new, 1276 txd->txd_dmamap, m_new,
1277 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1277 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1278 if (error) { 1278 if (error) {
1279 printf("%s: unable to load Tx buffer, " 1279 printf("%s: unable to load Tx buffer, "
1280 "error = %d\n", 1280 "error = %d\n",
1281 device_xname(sc->sc_dev), error); 1281 device_xname(sc->sc_dev), error);
1282 break; 1282 break;
1283 } 1283 }
1284 } 1284 }
1285 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1285 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1286 /* 1286 /*
1287 * If there's a BPF listener, bounce a copy of this frame 1287 * If there's a BPF listener, bounce a copy of this frame
1288 * to him. 1288 * to him.
1289 */ 1289 */
1290 bpf_mtap(ifp, m_head, BPF_D_OUT); 1290 bpf_mtap(ifp, m_head, BPF_D_OUT);
1291 if (m_new != NULL) { 1291 if (m_new != NULL) {
1292 m_freem(m_head); 1292 m_freem(m_head);
1293 m_head = m_new; 1293 m_head = m_new;
1294 } 1294 }
1295 txd->txd_mbuf = m_head; 1295 txd->txd_mbuf = m_head;
1296 1296
1297 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q); 1297 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
1298 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q); 1298 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q);
1299 1299
1300 /* 1300 /*
1301 * Transmit the frame. 1301 * Transmit the frame.
1302 */ 1302 */
1303 bus_dmamap_sync(sc->sc_dmat, 1303 bus_dmamap_sync(sc->sc_dmat,
1304 txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize, 1304 txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize,
1305 BUS_DMASYNC_PREWRITE); 1305 BUS_DMASYNC_PREWRITE);
1306 1306
1307 len = txd->txd_dmamap->dm_segs[0].ds_len; 1307 len = txd->txd_dmamap->dm_segs[0].ds_len;
1308 1308
1309 CSR_WRITE_4(sc, txd->txd_txaddr, 1309 CSR_WRITE_4(sc, txd->txd_txaddr,
1310 txd->txd_dmamap->dm_segs[0].ds_addr); 1310 txd->txd_dmamap->dm_segs[0].ds_addr);
1311 CSR_WRITE_4(sc, txd->txd_txstat, 1311 CSR_WRITE_4(sc, txd->txd_txstat,
1312 RTK_TXSTAT_THRESH(sc->sc_txthresh) | len); 1312 RTK_TXSTAT_THRESH(sc->sc_txthresh) | len);
1313 1313
1314 /* 1314 /*
1315 * Set a timeout in case the chip goes out to lunch. 1315 * Set a timeout in case the chip goes out to lunch.
1316 */ 1316 */
1317 ifp->if_timer = 5; 1317 ifp->if_timer = 5;
1318 } 1318 }
1319 1319
1320 /* 1320 /*
1321 * We broke out of the loop because all our TX slots are 1321 * We broke out of the loop because all our TX slots are
1322 * full. Mark the NIC as busy until it drains some of the 1322 * full. Mark the NIC as busy until it drains some of the
1323 * packets from the queue. 1323 * packets from the queue.
1324 */ 1324 */
1325 if (SIMPLEQ_EMPTY(&sc->rtk_tx_free)) 1325 if (SIMPLEQ_EMPTY(&sc->rtk_tx_free))
1326 ifp->if_flags |= IFF_OACTIVE; 1326 ifp->if_flags |= IFF_OACTIVE;
1327} 1327}
1328 1328
1329static int 1329static int
1330rtk_init(struct ifnet *ifp) 1330rtk_init(struct ifnet *ifp)
1331{ 1331{
1332 struct rtk_softc *sc = ifp->if_softc; 1332 struct rtk_softc *sc = ifp->if_softc;
1333 int error, i; 1333 int error, i;
1334 uint32_t rxcfg; 1334 uint32_t rxcfg;
1335 1335
1336 if ((error = rtk_enable(sc)) != 0) 1336 if ((error = rtk_enable(sc)) != 0)
1337 goto out; 1337 goto out;
1338 1338
1339 /* 1339 /*
1340 * Cancel pending I/O. 1340 * Cancel pending I/O.
1341 */ 1341 */
1342 rtk_stop(ifp, 0); 1342 rtk_stop(ifp, 0);
1343 1343
1344 /* Init our MAC address */ 1344 /* Init our MAC address */
1345 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1345 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1346 CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]); 1346 CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]);
1347 } 1347 }
1348 1348
1349 /* Init the RX buffer pointer register. */ 1349 /* Init the RX buffer pointer register. */
1350 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0, 1350 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0,
1351 sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1351 sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1352 CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr); 1352 CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr);
1353 1353
1354 /* Init TX descriptors. */ 1354 /* Init TX descriptors. */
1355 rtk_list_tx_init(sc); 1355 rtk_list_tx_init(sc);
1356 1356
1357 /* Init Early TX threshold. */ 1357 /* Init Early TX threshold. */
1358 sc->sc_txthresh = RTK_TXTH_256; 1358 sc->sc_txthresh = RTK_TXTH_256;
1359 /* 1359 /*
1360 * Enable transmit and receive. 1360 * Enable transmit and receive.
1361 */ 1361 */
1362 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB); 1362 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1363 1363
1364 /* 1364 /*
1365 * Set the initial TX and RX configuration. 1365 * Set the initial TX and RX configuration.
1366 */ 1366 */
1367 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG); 1367 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1368 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG); 1368 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1369 1369
1370 /* Set the individual bit to receive frames for this host only. */ 1370 /* Set the individual bit to receive frames for this host only. */
1371 rxcfg = CSR_READ_4(sc, RTK_RXCFG); 1371 rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1372 rxcfg |= RTK_RXCFG_RX_INDIV; 1372 rxcfg |= RTK_RXCFG_RX_INDIV;
1373 1373
1374 /* If we want promiscuous mode, set the allframes bit. */ 1374 /* If we want promiscuous mode, set the allframes bit. */
1375 if (ifp->if_flags & IFF_PROMISC) { 1375 if (ifp->if_flags & IFF_PROMISC) {
1376 rxcfg |= RTK_RXCFG_RX_ALLPHYS; 1376 rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1377 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1377 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1378 } else { 1378 } else {
1379 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS; 1379 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1380 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1380 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1381 } 1381 }
1382 1382
1383 /* 1383 /*
1384 * Set capture broadcast bit to capture broadcast frames. 1384 * Set capture broadcast bit to capture broadcast frames.
1385 */ 1385 */
1386 if (ifp->if_flags & IFF_BROADCAST) { 1386 if (ifp->if_flags & IFF_BROADCAST) {
1387 rxcfg |= RTK_RXCFG_RX_BROAD; 1387 rxcfg |= RTK_RXCFG_RX_BROAD;
1388 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1388 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1389 } else { 1389 } else {
1390 rxcfg &= ~RTK_RXCFG_RX_BROAD; 1390 rxcfg &= ~RTK_RXCFG_RX_BROAD;
1391 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1391 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1392 } 1392 }
1393 1393
1394 /* 1394 /*
1395 * Program the multicast filter, if necessary. 1395 * Program the multicast filter, if necessary.
1396 */ 1396 */
1397 rtk_setmulti(sc); 1397 rtk_setmulti(sc);
1398 1398
1399 /* 1399 /*
1400 * Enable interrupts. 1400 * Enable interrupts.
1401 */ 1401 */
1402 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS); 1402 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1403 1403
1404 /* Start RX/TX process. */ 1404 /* Start RX/TX process. */
1405 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0); 1405 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1406 1406
1407 /* Enable receiver and transmitter. */ 1407 /* Enable receiver and transmitter. */
1408 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB); 1408 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1409 1409
1410 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD | RTK_CFG1_FULLDUPLEX); 1410 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD | RTK_CFG1_FULLDUPLEX);
1411 1411
1412 /* 1412 /*
1413 * Set current media. 1413 * Set current media.
1414 */ 1414 */
1415 if ((error = ether_mediachange(ifp)) != 0) 1415 if ((error = ether_mediachange(ifp)) != 0)
1416 goto out; 1416 goto out;
1417 1417
1418 ifp->if_flags |= IFF_RUNNING; 1418 ifp->if_flags |= IFF_RUNNING;
1419 ifp->if_flags &= ~IFF_OACTIVE; 1419 ifp->if_flags &= ~IFF_OACTIVE;
1420 1420
1421 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc); 1421 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1422 1422
1423 out: 1423 out:
1424 if (error) { 1424 if (error) {
1425 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1425 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1426 ifp->if_timer = 0; 1426 ifp->if_timer = 0;
1427 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 1427 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1428 } 1428 }
1429 return error; 1429 return error;
1430} 1430}
1431 1431
1432static int 1432static int
1433rtk_ioctl(struct ifnet *ifp, u_long command, void *data) 1433rtk_ioctl(struct ifnet *ifp, u_long command, void *data)
1434{ 1434{
1435 struct rtk_softc *sc = ifp->if_softc; 1435 struct rtk_softc *sc = ifp->if_softc;
1436 int s, error; 1436 int s, error;
1437 1437
1438 s = splnet(); 1438 s = splnet();
1439 error = ether_ioctl(ifp, command, data); 1439 error = ether_ioctl(ifp, command, data);
1440 if (error == ENETRESET) { 1440 if (error == ENETRESET) {
1441 if (ifp->if_flags & IFF_RUNNING) { 1441 if (ifp->if_flags & IFF_RUNNING) {
1442 /* 1442 /*
1443 * Multicast list has changed. Set the 1443 * Multicast list has changed. Set the
1444 * hardware filter accordingly. 1444 * hardware filter accordingly.
1445 */ 1445 */
1446 rtk_setmulti(sc); 1446 rtk_setmulti(sc);
1447 } 1447 }
1448 error = 0; 1448 error = 0;
1449 } 1449 }
1450 splx(s); 1450 splx(s);
1451 1451
1452 return error; 1452 return error;
1453} 1453}
1454 1454
1455static void 1455static void
1456rtk_watchdog(struct ifnet *ifp) 1456rtk_watchdog(struct ifnet *ifp)
1457{ 1457{
1458 struct rtk_softc *sc; 1458 struct rtk_softc *sc;
1459 1459
1460 sc = ifp->if_softc; 1460 sc = ifp->if_softc;
1461 1461
1462 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 1462 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1463 ifp->if_oerrors++; 1463 ifp->if_oerrors++;
1464 rtk_txeof(sc); 1464 rtk_txeof(sc);
1465 rtk_rxeof(sc); 1465 rtk_rxeof(sc);
1466 rtk_init(ifp); 1466 rtk_init(ifp);
1467} 1467}
1468 1468
1469/* 1469/*
1470 * Stop the adapter and free any mbufs allocated to the 1470 * Stop the adapter and free any mbufs allocated to the
1471 * RX and TX lists. 1471 * RX and TX lists.
1472 */ 1472 */
1473static void 1473static void
1474rtk_stop(struct ifnet *ifp, int disable) 1474rtk_stop(struct ifnet *ifp, int disable)
1475{ 1475{
1476 struct rtk_softc *sc = ifp->if_softc; 1476 struct rtk_softc *sc = ifp->if_softc;
1477 struct rtk_tx_desc *txd; 1477 struct rtk_tx_desc *txd;
1478 1478
1479 callout_stop(&sc->rtk_tick_ch); 1479 callout_stop(&sc->rtk_tick_ch);
1480 1480
1481 mii_down(&sc->mii); 1481 mii_down(&sc->mii);
1482 1482
1483 CSR_WRITE_1(sc, RTK_COMMAND, 0x00); 1483 CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1484 CSR_WRITE_2(sc, RTK_IMR, 0x0000); 1484 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1485 1485
1486 /* 1486 /*
1487 * Free the TX list buffers. 1487 * Free the TX list buffers.
1488 */ 1488 */
1489 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) { 1489 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1490 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q); 1490 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1491 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap); 1491 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1492 m_freem(txd->txd_mbuf); 1492 m_freem(txd->txd_mbuf);
1493 txd->txd_mbuf = NULL; 1493 txd->txd_mbuf = NULL;
1494 CSR_WRITE_4(sc, txd->txd_txaddr, 0); 1494 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
1495 } 1495 }
1496 1496
1497 if (disable) 1497 if (disable)
1498 rtk_disable(sc); 1498 rtk_disable(sc);
1499 1499
1500 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1500 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1501 ifp->if_timer = 0; 1501 ifp->if_timer = 0;
1502} 1502}
1503 1503
1504static void 1504static void
1505rtk_tick(void *arg) 1505rtk_tick(void *arg)
1506{ 1506{
1507 struct rtk_softc *sc = arg; 1507 struct rtk_softc *sc = arg;
1508 int s; 1508 int s;
1509 1509
1510 s = splnet(); 1510 s = splnet();
1511 mii_tick(&sc->mii); 1511 mii_tick(&sc->mii);
1512 splx(s); 1512 splx(s);
1513 1513
1514 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc); 1514 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1515} 1515}

cvs diff -r1.50 -r1.50.4.1 src/sys/dev/ic/rtl81x9reg.h (switch to unified diff)

--- src/sys/dev/ic/rtl81x9reg.h 2019/04/05 23:46:04 1.50
+++ src/sys/dev/ic/rtl81x9reg.h 2020/01/28 11:12:30 1.50.4.1
@@ -1,610 +1,619 @@ @@ -1,610 +1,619 @@
1/* $NetBSD: rtl81x9reg.h,v 1.50 2019/04/05 23:46:04 uwe Exp $ */ 1/* $NetBSD: rtl81x9reg.h,v 1.50.4.1 2020/01/28 11:12:30 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997, 1998 4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software 15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement: 16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul. 17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors 18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software 19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission. 20 * without specific prior written permission.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE. 32 * THE POSSIBILITY OF SUCH DAMAGE.
33 * 33 *
34 * FreeBSD Id: if_rlreg.h,v 1.9 1999/06/20 18:56:09 wpaul Exp 34 * FreeBSD Id: if_rlreg.h,v 1.9 1999/06/20 18:56:09 wpaul Exp
35 */ 35 */
36 36
37/* 37/*
38 * RealTek 8129/8139 register offsets 38 * RealTek 8129/8139 register offsets
39 */ 39 */
40#define RTK_IDR0 0x0000 /* ID register 0 (station addr) */ 40#define RTK_IDR0 0x0000 /* ID register 0 (station addr) */
41#define RTK_IDR1 0x0001 /* Must use 32-bit accesses (?) */ 41#define RTK_IDR1 0x0001 /* Must use 32-bit accesses (?) */
42#define RTK_IDR2 0x0002 42#define RTK_IDR2 0x0002
43#define RTK_IDR3 0x0003 43#define RTK_IDR3 0x0003
44#define RTK_IDR4 0x0004 44#define RTK_IDR4 0x0004
45#define RTK_IDR5 0x0005 45#define RTK_IDR5 0x0005
46 /* 0006-0007 reserved */ 46 /* 0006-0007 reserved */
47#define RTK_MAR0 0x0008 /* Multicast hash table */ 47#define RTK_MAR0 0x0008 /* Multicast hash table */
48#define RTK_MAR1 0x0009 48#define RTK_MAR1 0x0009
49#define RTK_MAR2 0x000A 49#define RTK_MAR2 0x000A
50#define RTK_MAR3 0x000B 50#define RTK_MAR3 0x000B
51#define RTK_MAR4 0x000C 51#define RTK_MAR4 0x000C
52#define RTK_MAR5 0x000D 52#define RTK_MAR5 0x000D
53#define RTK_MAR6 0x000E 53#define RTK_MAR6 0x000E
54#define RTK_MAR7 0x000F 54#define RTK_MAR7 0x000F
55 55
56#define RTK_TXSTAT0 0x0010 /* status of TX descriptor 0 */ 56#define RTK_TXSTAT0 0x0010 /* status of TX descriptor 0 */
57#define RTK_TXSTAT1 0x0014 /* status of TX descriptor 1 */ 57#define RTK_TXSTAT1 0x0014 /* status of TX descriptor 1 */
58#define RTK_TXSTAT2 0x0018 /* status of TX descriptor 2 */ 58#define RTK_TXSTAT2 0x0018 /* status of TX descriptor 2 */
59#define RTK_TXSTAT3 0x001C /* status of TX descriptor 3 */ 59#define RTK_TXSTAT3 0x001C /* status of TX descriptor 3 */
60 60
61#define RTK_TXADDR0 0x0020 /* address of TX descriptor 0 */ 61#define RTK_TXADDR0 0x0020 /* address of TX descriptor 0 */
62#define RTK_TXADDR1 0x0024 /* address of TX descriptor 1 */ 62#define RTK_TXADDR1 0x0024 /* address of TX descriptor 1 */
63#define RTK_TXADDR2 0x0028 /* address of TX descriptor 2 */ 63#define RTK_TXADDR2 0x0028 /* address of TX descriptor 2 */
64#define RTK_TXADDR3 0x002C /* address of TX descriptor 3 */ 64#define RTK_TXADDR3 0x002C /* address of TX descriptor 3 */
65 65
66#define RTK_RXADDR 0x0030 /* RX ring start address */ 66#define RTK_RXADDR 0x0030 /* RX ring start address */
67#define RTK_RX_EARLY_BYTES 0x0034 /* RX early byte count */ 67#define RTK_RX_EARLY_BYTES 0x0034 /* RX early byte count */
68#define RTK_RX_EARLY_STAT 0x0036 /* RX early status */ 68#define RTK_RX_EARLY_STAT 0x0036 /* RX early status */
69#define RTK_COMMAND 0x0037 /* command register */ 69#define RTK_COMMAND 0x0037 /* command register */
70#define RTK_CURRXADDR 0x0038 /* current address of packet read */ 70#define RTK_CURRXADDR 0x0038 /* current address of packet read */
71#define RTK_CURRXBUF 0x003A /* current RX buffer address */ 71#define RTK_CURRXBUF 0x003A /* current RX buffer address */
72#define RTK_IMR 0x003C /* interrupt mask register */ 72#define RTK_IMR 0x003C /* interrupt mask register */
73#define RTK_ISR 0x003E /* interrupt status register */ 73#define RTK_ISR 0x003E /* interrupt status register */
74#define RTK_TXCFG 0x0040 /* transmit config */ 74#define RTK_TXCFG 0x0040 /* transmit config */
75#define RTK_RXCFG 0x0044 /* receive config */ 75#define RTK_RXCFG 0x0044 /* receive config */
76#define RTK_TIMERCNT 0x0048 /* timer count register */ 76#define RTK_TIMERCNT 0x0048 /* timer count register */
77#define RTK_MISSEDPKT 0x004C /* missed packet counter */ 77#define RTK_MISSEDPKT 0x004C /* missed packet counter */
78#define RTK_EECMD 0x0050 /* EEPROM command register */ 78#define RTK_EECMD 0x0050 /* EEPROM command register */
79#define RTK_CFG0 0x0051 /* config register #0 */ 79#define RTK_CFG0 0x0051 /* config register #0 */
80#define RTK_CFG1 0x0052 /* config register #1 */ 80#define RTK_CFG1 0x0052 /* config register #1 */
81 /* 0053-0057 reserved */ 81 /* 0053-0057 reserved */
82#define RTK_MEDIASTAT 0x0058 /* media status register (8139) */ 82#define RTK_MEDIASTAT 0x0058 /* media status register (8139) */
83 /* 0059-005A reserved */ 83 /* 0059-005A reserved */
84#define RTK_MII 0x005A /* 8129 chip only */ 84#define RTK_MII 0x005A /* 8129 chip only */
85#define RTK_HALTCLK 0x005B 85#define RTK_HALTCLK 0x005B
86#define RTK_MULTIINTR 0x005C /* multiple interrupt */ 86#define RTK_MULTIINTR 0x005C /* multiple interrupt */
87#define RTK_PCIREV 0x005E /* PCI revision value */ 87#define RTK_PCIREV 0x005E /* PCI revision value */
88 /* 005F reserved */ 88 /* 005F reserved */
89#define RTK_TXSTAT_ALL 0x0060 /* TX status of all descriptors */ 89#define RTK_TXSTAT_ALL 0x0060 /* TX status of all descriptors */
90 90
91/* Direct PHY access registers only available on 8139 */ 91/* Direct PHY access registers only available on 8139 */
92#define RTK_BMCR 0x0062 /* PHY basic mode control */ 92#define RTK_BMCR 0x0062 /* PHY basic mode control */
93#define RTK_BMSR 0x0064 /* PHY basic mode status */ 93#define RTK_BMSR 0x0064 /* PHY basic mode status */
94#define RTK_ANAR 0x0066 /* PHY autoneg advert */ 94#define RTK_ANAR 0x0066 /* PHY autoneg advert */
95#define RTK_LPAR 0x0068 /* PHY link partner ability */ 95#define RTK_LPAR 0x0068 /* PHY link partner ability */
96#define RTK_ANER 0x006A /* PHY autoneg expansion */ 96#define RTK_ANER 0x006A /* PHY autoneg expansion */
97 97
98#define RTK_DISCCNT 0x006C /* disconnect counter */ 98#define RTK_DISCCNT 0x006C /* disconnect counter */
99#define RTK_FALSECAR 0x006E /* false carrier counter */ 99#define RTK_FALSECAR 0x006E /* false carrier counter */
100#define RTK_NWAYTST 0x0070 /* NWAY test register */ 100#define RTK_NWAYTST 0x0070 /* NWAY test register */
101#define RTK_RX_ER 0x0072 /* RX_ER counter */ 101#define RTK_RX_ER 0x0072 /* RX_ER counter */
102#define RTK_CSCFG 0x0074 /* CS configuration register */ 102#define RTK_CSCFG 0x0074 /* CS configuration register */
103 103
104/* 104/*
105 * When operating in special C+ mode, some of the registers in an 105 * When operating in special C+ mode, some of the registers in an
106 * 8139C+ chip have different definitions. These are also used for 106 * 8139C+ chip have different definitions. These are also used for
107 * the 8169 gigE chip. 107 * the 8169 gigE chip.
108 */ 108 */
109#define RTK_DUMPSTATS_LO 0x0010 /* counter dump command register */ 109#define RTK_DUMPSTATS_LO 0x0010 /* counter dump command register */
110#define RTK_DUMPSTATS_HI 0x0014 /* counter dump command register */ 110#define RTK_DUMPSTATS_HI 0x0014 /* counter dump command register */
111#define RTK_TXLIST_ADDR_LO 0x0020 /* 64 bits, 256 byte alignment */ 111#define RTK_TXLIST_ADDR_LO 0x0020 /* 64 bits, 256 byte alignment */
112#define RTK_TXLIST_ADDR_HI 0x0024 /* 64 bits, 256 byte alignment */ 112#define RTK_TXLIST_ADDR_HI 0x0024 /* 64 bits, 256 byte alignment */
113#define RTK_TXLIST_ADDR_HPRIO_LO 0x0028 /* 64 bits, 256 byte aligned */ 113#define RTK_TXLIST_ADDR_HPRIO_LO 0x0028 /* 64 bits, 256 byte aligned */
114#define RTK_TXLIST_ADDR_HPRIO_HI 0x002C /* 64 bits, 256 byte aligned */ 114#define RTK_TXLIST_ADDR_HPRIO_HI 0x002C /* 64 bits, 256 byte aligned */
115#define RTK_CFG2 0x0053 115#define RTK_CFG2 0x0053
116#define RTK_TIMERINT 0x0054 /* interrupt on timer expire */ 116#define RTK_TIMERINT 0x0054 /* interrupt on timer expire */
117#define RTK_TXSTART 0x00D9 /* 8 bits */ 117#define RTK_TXSTART 0x00D9 /* 8 bits */
118#define RTK_CPLUS_CMD 0x00E0 /* 16 bits */ 118#define RTK_CPLUS_CMD 0x00E0 /* 16 bits */
119#define RTK_RXLIST_ADDR_LO 0x00E4 /* 64 bits, 256 byte alignment */ 119#define RTK_RXLIST_ADDR_LO 0x00E4 /* 64 bits, 256 byte alignment */
120#define RTK_RXLIST_ADDR_HI 0x00E8 /* 64 bits, 256 byte alignment */ 120#define RTK_RXLIST_ADDR_HI 0x00E8 /* 64 bits, 256 byte alignment */
121#define RTK_EARLY_TX_THRESH 0x00EC /* 8 bits */ 121#define RTK_EARLY_TX_THRESH 0x00EC /* 8 bits */
122 122
123/* 123/*
124 * Registers specific to the 8169 gigE chip 124 * Registers specific to the 8169 gigE chip
125 */ 125 */
126#define RTK_GTXSTART 0x0038 /* 8 bits */ 126#define RTK_GTXSTART 0x0038 /* 8 bits */
127#define RTK_TIMERINT_8169 0x0058 /* different offset than 8139 */ 127#define RTK_TIMERINT_8169 0x0058 /* different offset than 8139 */
128#define RTK_PHYAR 0x0060 128#define RTK_PHYAR 0x0060
129#define RTK_CSIDR 0x0064 129#define RTK_CSIDR 0x0064
130#define RTK_CSIAR 0x0068 130#define RTK_CSIAR 0x0068
131#define RTK_TBI_LPAR 0x006A 131#define RTK_TBI_LPAR 0x006A
132#define RTK_GMEDIASTAT 0x006C /* 8 bits */ 132#define RTK_GMEDIASTAT 0x006C /* 8 bits */
133#define RTK_PMCH 0x006F /* 8 bits */ 133#define RTK_PMCH 0x006F /* 8 bits */
134#define RTK_EPHYAR 0x0080 134#define RTK_EPHYAR 0x0080
135#define RTK_LDPS 0x0082 /* Link Down Power Saving */ 135#define RTK_LDPS 0x0082 /* Link Down Power Saving */
136#define RTK_DBG_REG 0x00D1 136#define RTK_DBG_REG 0x00D1
137#define RTK_MAXRXPKTLEN 0x00DA /* 16 bits, chip multiplies by 8 */ 137#define RTK_MAXRXPKTLEN 0x00DA /* 16 bits, chip multiplies by 8 */
138#define RTK_IM 0x00E2 138#define RTK_IM 0x00E2
139#define RTK_MISC 0x00F0 139#define RTK_MISC 0x00F0
140 140
141/* 141/*
142 * TX config register bits 142 * TX config register bits
143 */ 143 */
144#define RTK_TXCFG_CLRABRT 0x00000001 /* retransmit aborted pkt */ 144#define RTK_TXCFG_CLRABRT 0x00000001 /* retransmit aborted pkt */
145#define RTK_TXCFG_MAXDMA 0x00000700 /* max DMA burst size */ 145#define RTK_TXCFG_MAXDMA 0x00000700 /* max DMA burst size */
146#define RTK_TXCFG_CRCAPPEND 0x00010000 /* CRC append (0 = yes) */ 146#define RTK_TXCFG_CRCAPPEND 0x00010000 /* CRC append (0 = yes) */
147#define RTK_TXCFG_LOOPBKTST 0x00060000 /* loopback test */ 147#define RTK_TXCFG_LOOPBKTST 0x00060000 /* loopback test */
148#define RTK_TXCFG_IFG2 0x00080000 /* 8169 only */ 148#define RTK_TXCFG_IFG2 0x00080000 /* 8169 only */
149#define RTK_TXCFG_IFG 0x03000000 /* interframe gap */ 149#define RTK_TXCFG_IFG 0x03000000 /* interframe gap */
150#define RTK_TXCFG_HWREV 0x7CC00000 150#define RTK_TXCFG_HWREV 0x7CC00000
151 151
152#define RTK_LOOPTEST_OFF 0x00000000 152#define RTK_LOOPTEST_OFF 0x00000000
153#define RTK_LOOPTEST_ON 0x00020000 153#define RTK_LOOPTEST_ON 0x00020000
154#define RTK_LOOPTEST_ON_CPLUS 0x00060000 154#define RTK_LOOPTEST_ON_CPLUS 0x00060000
155 155
156/* Known revision codes. */ 156/* Known revision codes. */
157#define RTK_HWREV_8169 0x00000000 157#define RTK_HWREV_8169 0x00000000
158#define RTK_HWREV_8110S 0x00800000 158#define RTK_HWREV_8110S 0x00800000
159#define RTK_HWREV_8169S 0x04000000 159#define RTK_HWREV_8169S 0x04000000
160#define RTK_HWREV_8169_8110SB 0x10000000 160#define RTK_HWREV_8169_8110SB 0x10000000
161#define RTK_HWREV_8169_8110SC 0x18000000 161#define RTK_HWREV_8169_8110SC 0x18000000
 162#define RTK_HWREV_8401E 0x24000000
162#define RTK_HWREV_8102EL 0x24800000 163#define RTK_HWREV_8102EL 0x24800000
163#define RTK_HWREV_8103E 0x24C00000 164#define RTK_HWREV_8102EL_SPIN1 0x24C00000
164#define RTK_HWREV_8168D 0x28000000 165#define RTK_HWREV_8168D 0x28000000
165#define RTK_HWREV_8168DP 0x28800000 166#define RTK_HWREV_8168DP 0x28800000
166#define RTK_HWREV_8168E 0x2C000000 167#define RTK_HWREV_8168E 0x2C000000
167#define RTK_HWREV_8168E_VL 0x2C800000 168#define RTK_HWREV_8168E_VL 0x2C800000
168#define RTK_HWREV_8168_SPIN1 0x30000000 169#define RTK_HWREV_8168_SPIN1 0x30000000
169#define RTK_HWREV_8168G 0x4c000000 
170#define RTK_HWREV_8168G_SPIN1 0x4c100000 
171#define RTK_HWREV_8168G_SPIN2 0x50900000 
172#define RTK_HWREV_8168G_SPIN4 0x5c800000 
173#define RTK_HWREV_8168GU 0x50800000 
174#define RTK_HWREV_8100E 0x30800000 170#define RTK_HWREV_8100E 0x30800000
175#define RTK_HWREV_8101E 0x34000000 171#define RTK_HWREV_8101E 0x34000000
176#define RTK_HWREV_8102E 0x34800000 172#define RTK_HWREV_8102E 0x34800000
 173#define RTK_HWREV_8103E 0x34c00000
177#define RTK_HWREV_8168_SPIN2 0x38000000 174#define RTK_HWREV_8168_SPIN2 0x38000000
178#define RTK_HWREV_8168_SPIN3 0x38400000 175#define RTK_HWREV_8168_SPIN3 0x38400000
179#define RTK_HWREV_8100E_SPIN2 0x38800000 176#define RTK_HWREV_8100E_SPIN2 0x38800000
180#define RTK_HWREV_8168C 0x3C000000 177#define RTK_HWREV_8168C 0x3C000000
181#define RTK_HWREV_8168C_SPIN2 0x3C400000 178#define RTK_HWREV_8168C_SPIN2 0x3C400000
182#define RTK_HWREV_8168CP 0x3C800000 179#define RTK_HWREV_8168CP 0x3C800000
 180#define RTK_HWREV_8105E 0x40800000
 181#define RTK_HWREV_8105E_SPIN1 0x40C00000
 182#define RTK_HWREV_8402 0x44000000
 183#define RTK_HWREV_8106E 0x44800000
183#define RTK_HWREV_8168F 0x48000000 184#define RTK_HWREV_8168F 0x48000000
 185#define RTK_HWREV_8411 0x48800000
 186#define RTK_HWREV_8168G 0x4c000000
 187#define RTK_HWREV_8168G_SPIN1 0x4c100000
 188#define RTK_HWREV_8168EP 0x50000000
 189#define RTK_HWREV_8168GU 0x50800000
 190#define RTK_HWREV_8168G_SPIN2 0x50900000
184#define RTK_HWREV_8168H 0x54000000 191#define RTK_HWREV_8168H 0x54000000
185#define RTK_HWREV_8168H_SPIN1 0x54100000 192#define RTK_HWREV_8168H_SPIN1 0x54100000
 193#define RTK_HWREV_8168FP 0x54800000
 194#define RTK_HWREV_8168G_SPIN4 0x5c800000
186#define RTK_HWREV_8139 0x60000000 195#define RTK_HWREV_8139 0x60000000
187#define RTK_HWREV_8139A 0x70000000 196#define RTK_HWREV_8139A 0x70000000
188#define RTK_HWREV_8139AG 0x70800000 197#define RTK_HWREV_8139AG 0x70800000
189#define RTK_HWREV_8139B 0x78000000 
190#define RTK_HWREV_8130 0x7C000000 
191#define RTK_HWREV_8139C 0x74000000 198#define RTK_HWREV_8139C 0x74000000
192#define RTK_HWREV_8139D 0x74400000 199#define RTK_HWREV_8139D 0x74400000
193#define RTK_HWREV_8139CPLUS 0x74800000 200#define RTK_HWREV_8139CPLUS 0x74800000
194#define RTK_HWREV_8101 0x74c00000 201#define RTK_HWREV_8101 0x74c00000
 202#define RTK_HWREV_8139B 0x78000000
195#define RTK_HWREV_8100 0x78800000 203#define RTK_HWREV_8100 0x78800000
 204#define RTK_HWREV_8130 0x7C000000
196#define RTK_HWREV_8169_8110SBL 0x7cc00000 205#define RTK_HWREV_8169_8110SBL 0x7cc00000
197 206
198#define RTK_TXDMA_16BYTES 0x00000000 207#define RTK_TXDMA_16BYTES 0x00000000
199#define RTK_TXDMA_32BYTES 0x00000100 208#define RTK_TXDMA_32BYTES 0x00000100
200#define RTK_TXDMA_64BYTES 0x00000200 209#define RTK_TXDMA_64BYTES 0x00000200
201#define RTK_TXDMA_128BYTES 0x00000300 210#define RTK_TXDMA_128BYTES 0x00000300
202#define RTK_TXDMA_256BYTES 0x00000400 211#define RTK_TXDMA_256BYTES 0x00000400
203#define RTK_TXDMA_512BYTES 0x00000500 212#define RTK_TXDMA_512BYTES 0x00000500
204#define RTK_TXDMA_1024BYTES 0x00000600 213#define RTK_TXDMA_1024BYTES 0x00000600
205#define RTK_TXDMA_2048BYTES 0x00000700 214#define RTK_TXDMA_2048BYTES 0x00000700
206 215
207/* 216/*
208 * Transmit descriptor status register bits. 217 * Transmit descriptor status register bits.
209 */ 218 */
210#define RTK_TXSTAT_LENMASK 0x00001FFF 219#define RTK_TXSTAT_LENMASK 0x00001FFF
211#define RTK_TXSTAT_OWN 0x00002000 220#define RTK_TXSTAT_OWN 0x00002000
212#define RTK_TXSTAT_TX_UNDERRUN 0x00004000 221#define RTK_TXSTAT_TX_UNDERRUN 0x00004000
213#define RTK_TXSTAT_TX_OK 0x00008000 222#define RTK_TXSTAT_TX_OK 0x00008000
214#define RTK_TXSTAT_EARLY_THRESH 0x003F0000 223#define RTK_TXSTAT_EARLY_THRESH 0x003F0000
215#define RTK_TXSTAT_COLLCNT 0x0F000000 224#define RTK_TXSTAT_COLLCNT 0x0F000000
216#define RTK_TXSTAT_CARR_HBEAT 0x10000000 225#define RTK_TXSTAT_CARR_HBEAT 0x10000000
217#define RTK_TXSTAT_OUTOFWIN 0x20000000 226#define RTK_TXSTAT_OUTOFWIN 0x20000000
218#define RTK_TXSTAT_TXABRT 0x40000000 227#define RTK_TXSTAT_TXABRT 0x40000000
219#define RTK_TXSTAT_CARRLOSS 0x80000000 228#define RTK_TXSTAT_CARRLOSS 0x80000000
220 229
221#define RTK_TXSTAT_THRESH(x) (((x) << 16) & RTK_TXSTAT_EARLY_THRESH) 230#define RTK_TXSTAT_THRESH(x) (((x) << 16) & RTK_TXSTAT_EARLY_THRESH)
222#define RTK_TXTH_256 8 /* (x) * 32 bytes */ 231#define RTK_TXTH_256 8 /* (x) * 32 bytes */
223#define RTK_TXTH_1536 48 232#define RTK_TXTH_1536 48
224 233
225/* MISC register */ 234/* MISC register */
226#define RTK_MISC_TXPLA_RST __BIT(29) 235#define RTK_MISC_TXPLA_RST __BIT(29)
227#define RTK_MISC_DISABLE_LAN_EN __BIT(23) /* Enable GPIO pin */ 236#define RTK_MISC_DISABLE_LAN_EN __BIT(23) /* Enable GPIO pin */
228#define RTK_MISC_PWM_EN __BIT(22) 237#define RTK_MISC_PWM_EN __BIT(22)
229#define RTK_MISC_RXDV_GATED_EN __BIT(19) 238#define RTK_MISC_RXDV_GATED_EN __BIT(19)
230#define RTK_MISC_EARLY_TALLY_EN __BIT(16) 239#define RTK_MISC_EARLY_TALLY_EN __BIT(16)
231 240
232 241
233/* 242/*
234 * Interrupt status register bits. 243 * Interrupt status register bits.
235 */ 244 */
236#define RTK_ISR_RX_OK 0x0001 245#define RTK_ISR_RX_OK 0x0001
237#define RTK_ISR_RX_ERR 0x0002 246#define RTK_ISR_RX_ERR 0x0002
238#define RTK_ISR_TX_OK 0x0004 247#define RTK_ISR_TX_OK 0x0004
239#define RTK_ISR_TX_ERR 0x0008 248#define RTK_ISR_TX_ERR 0x0008
240#define RTK_ISR_RX_OVERRUN 0x0010 249#define RTK_ISR_RX_OVERRUN 0x0010
241#define RTK_ISR_PKT_UNDERRUN 0x0020 250#define RTK_ISR_PKT_UNDERRUN 0x0020
242#define RTK_ISR_LINKCHG 0x0020 /* 8169 only */ 251#define RTK_ISR_LINKCHG 0x0020 /* 8169 only */
243#define RTK_ISR_FIFO_OFLOW 0x0040 /* 8139 only */ 252#define RTK_ISR_FIFO_OFLOW 0x0040 /* 8139 only */
244#define RTK_ISR_TX_DESC_UNAVAIL 0x0080 /* C+ only */ 253#define RTK_ISR_TX_DESC_UNAVAIL 0x0080 /* C+ only */
245#define RTK_ISR_SWI 0x0100 /* C+ only */ 254#define RTK_ISR_SWI 0x0100 /* C+ only */
246#define RTK_ISR_CABLE_LEN_CHGD 0x2000 255#define RTK_ISR_CABLE_LEN_CHGD 0x2000
247#define RTK_ISR_PCS_TIMEOUT 0x4000 /* 8129 only */ 256#define RTK_ISR_PCS_TIMEOUT 0x4000 /* 8129 only */
248#define RTK_ISR_TIMEOUT_EXPIRED 0x4000 257#define RTK_ISR_TIMEOUT_EXPIRED 0x4000
249#define RTK_ISR_SYSTEM_ERR 0x8000 258#define RTK_ISR_SYSTEM_ERR 0x8000
250 259
251#define RTK_INTRS \ 260#define RTK_INTRS \
252 (RTK_ISR_TX_OK|RTK_ISR_RX_OK|RTK_ISR_RX_ERR|RTK_ISR_TX_ERR| \ 261 (RTK_ISR_TX_OK|RTK_ISR_RX_OK|RTK_ISR_RX_ERR|RTK_ISR_TX_ERR| \
253 RTK_ISR_RX_OVERRUN|RTK_ISR_PKT_UNDERRUN|RTK_ISR_FIFO_OFLOW| \ 262 RTK_ISR_RX_OVERRUN|RTK_ISR_PKT_UNDERRUN|RTK_ISR_FIFO_OFLOW| \
254 RTK_ISR_PCS_TIMEOUT|RTK_ISR_SYSTEM_ERR) 263 RTK_ISR_PCS_TIMEOUT|RTK_ISR_SYSTEM_ERR)
255 264
256#define RTK_INTRS_CPLUS \ 265#define RTK_INTRS_CPLUS \
257 (RTK_ISR_RX_OK|RTK_ISR_RX_ERR|RTK_ISR_TX_ERR| \ 266 (RTK_ISR_RX_OK|RTK_ISR_RX_ERR|RTK_ISR_TX_ERR| \
258 RTK_ISR_RX_OVERRUN|RTK_ISR_PKT_UNDERRUN|RTK_ISR_FIFO_OFLOW| \ 267 RTK_ISR_RX_OVERRUN|RTK_ISR_PKT_UNDERRUN|RTK_ISR_FIFO_OFLOW| \
259 RTK_ISR_PCS_TIMEOUT|RTK_ISR_SYSTEM_ERR|RTK_ISR_TIMEOUT_EXPIRED) 268 RTK_ISR_PCS_TIMEOUT|RTK_ISR_SYSTEM_ERR|RTK_ISR_TIMEOUT_EXPIRED)
260 269
261#define RTK_INTRS_IM_HW \ 270#define RTK_INTRS_IM_HW \
262 (RTK_INTRS_CPLUS|RTK_ISR_TX_OK) 271 (RTK_INTRS_CPLUS|RTK_ISR_TX_OK)
263 272
264/* 273/*
265 * Media status register. (8139 only) 274 * Media status register. (8139 only)
266 */ 275 */
267#define RTK_MEDIASTAT_RXPAUSE 0x01 276#define RTK_MEDIASTAT_RXPAUSE 0x01
268#define RTK_MEDIASTAT_TXPAUSE 0x02 277#define RTK_MEDIASTAT_TXPAUSE 0x02
269#define RTK_MEDIASTAT_LINK 0x04 278#define RTK_MEDIASTAT_LINK 0x04
270#define RTK_MEDIASTAT_SPEED10 0x08 279#define RTK_MEDIASTAT_SPEED10 0x08
271#define RTK_MEDIASTAT_RXFLOWCTL 0x40 /* duplex mode */ 280#define RTK_MEDIASTAT_RXFLOWCTL 0x40 /* duplex mode */
272#define RTK_MEDIASTAT_TXFLOWCTL 0x80 /* duplex mode */ 281#define RTK_MEDIASTAT_TXFLOWCTL 0x80 /* duplex mode */
273 282
274/* 283/*
275 * Receive config register. 284 * Receive config register.
276 */ 285 */
277#define RTK_RXCFG_RX_ALLPHYS 0x00000001 /* accept all nodes */ 286#define RTK_RXCFG_RX_ALLPHYS 0x00000001 /* accept all nodes */
278#define RTK_RXCFG_RX_INDIV 0x00000002 /* match filter */ 287#define RTK_RXCFG_RX_INDIV 0x00000002 /* match filter */
279#define RTK_RXCFG_RX_MULTI 0x00000004 /* accept all multicast */ 288#define RTK_RXCFG_RX_MULTI 0x00000004 /* accept all multicast */
280#define RTK_RXCFG_RX_BROAD 0x00000008 /* accept all broadcast */ 289#define RTK_RXCFG_RX_BROAD 0x00000008 /* accept all broadcast */
281#define RTK_RXCFG_RX_RUNT 0x00000010 290#define RTK_RXCFG_RX_RUNT 0x00000010
282#define RTK_RXCFG_RX_ERRPKT 0x00000020 291#define RTK_RXCFG_RX_ERRPKT 0x00000020
283#define RTK_RXCFG_WRAP 0x00000080 292#define RTK_RXCFG_WRAP 0x00000080
284#define RTK_RXCFG_MAXDMA 0x00000700 293#define RTK_RXCFG_MAXDMA 0x00000700
285#define RTK_RXCFG_BUFSZ 0x00001800 294#define RTK_RXCFG_BUFSZ 0x00001800
286#define RTK_RXCFG_FIFOTHRESH 0x0000E000 295#define RTK_RXCFG_FIFOTHRESH 0x0000E000
287#define RTK_RXCFG_EARLYTHRESH 0x07000000 296#define RTK_RXCFG_EARLYTHRESH 0x07000000
288 297
289#define RTK_RXDMA_16BYTES 0x00000000 298#define RTK_RXDMA_16BYTES 0x00000000
290#define RTK_RXDMA_32BYTES 0x00000100 299#define RTK_RXDMA_32BYTES 0x00000100
291#define RTK_RXDMA_64BYTES 0x00000200 300#define RTK_RXDMA_64BYTES 0x00000200
292#define RTK_RXDMA_128BYTES 0x00000300 301#define RTK_RXDMA_128BYTES 0x00000300
293#define RTK_RXDMA_256BYTES 0x00000400 302#define RTK_RXDMA_256BYTES 0x00000400
294#define RTK_RXDMA_512BYTES 0x00000500 303#define RTK_RXDMA_512BYTES 0x00000500
295#define RTK_RXDMA_1024BYTES 0x00000600 304#define RTK_RXDMA_1024BYTES 0x00000600
296#define RTK_RXDMA_UNLIMITED 0x00000700 305#define RTK_RXDMA_UNLIMITED 0x00000700
297 306
298#define RTK_RXBUF_8 0x00000000 307#define RTK_RXBUF_8 0x00000000
299#define RTK_RXBUF_16 0x00000800 308#define RTK_RXBUF_16 0x00000800
300#define RTK_RXBUF_32 0x00001000 309#define RTK_RXBUF_32 0x00001000
301#define RTK_RXBUF_64 0x00001800 310#define RTK_RXBUF_64 0x00001800
302#define RTK_RXBUF_LEN(x) (1 << (((x) >> 11) + 13)) 311#define RTK_RXBUF_LEN(x) (1 << (((x) >> 11) + 13))
303 312
304#define RTK_RXFIFO_16BYTES 0x00000000 313#define RTK_RXFIFO_16BYTES 0x00000000
305#define RTK_RXFIFO_32BYTES 0x00002000 314#define RTK_RXFIFO_32BYTES 0x00002000
306#define RTK_RXFIFO_64BYTES 0x00004000 315#define RTK_RXFIFO_64BYTES 0x00004000
307#define RTK_RXFIFO_128BYTES 0x00006000 316#define RTK_RXFIFO_128BYTES 0x00006000
308#define RTK_RXFIFO_256BYTES 0x00008000 317#define RTK_RXFIFO_256BYTES 0x00008000
309#define RTK_RXFIFO_512BYTES 0x0000A000 318#define RTK_RXFIFO_512BYTES 0x0000A000
310#define RTK_RXFIFO_1024BYTES 0x0000C000 319#define RTK_RXFIFO_1024BYTES 0x0000C000
311#define RTK_RXFIFO_NOTHRESH 0x0000E000 320#define RTK_RXFIFO_NOTHRESH 0x0000E000
312 321
313/* 322/*
314 * Bits in RX status header (included with RX'ed packet 323 * Bits in RX status header (included with RX'ed packet
315 * in ring buffer). 324 * in ring buffer).
316 */ 325 */
317#define RTK_RXSTAT_RXOK 0x00000001 326#define RTK_RXSTAT_RXOK 0x00000001
318#define RTK_RXSTAT_ALIGNERR 0x00000002 327#define RTK_RXSTAT_ALIGNERR 0x00000002
319#define RTK_RXSTAT_CRCERR 0x00000004 328#define RTK_RXSTAT_CRCERR 0x00000004
320#define RTK_RXSTAT_GIANT 0x00000008 329#define RTK_RXSTAT_GIANT 0x00000008
321#define RTK_RXSTAT_RUNT 0x00000010 330#define RTK_RXSTAT_RUNT 0x00000010
322#define RTK_RXSTAT_BADSYM 0x00000020 331#define RTK_RXSTAT_BADSYM 0x00000020
323#define RTK_RXSTAT_BROAD 0x00002000 332#define RTK_RXSTAT_BROAD 0x00002000
324#define RTK_RXSTAT_INDIV 0x00004000 333#define RTK_RXSTAT_INDIV 0x00004000
325#define RTK_RXSTAT_MULTI 0x00008000 334#define RTK_RXSTAT_MULTI 0x00008000
326#define RTK_RXSTAT_LENMASK 0xFFFF0000 335#define RTK_RXSTAT_LENMASK 0xFFFF0000
327 336
328#define RTK_RXSTAT_UNFINISHED 0xFFF0 /* DMA still in progress */ 337#define RTK_RXSTAT_UNFINISHED 0xFFF0 /* DMA still in progress */
329/* 338/*
330 * Command register. 339 * Command register.
331 */ 340 */
332#define RTK_CMD_EMPTY_RXBUF 0x0001 341#define RTK_CMD_EMPTY_RXBUF 0x0001
333#define RTK_CMD_TX_ENB 0x0004 342#define RTK_CMD_TX_ENB 0x0004
334#define RTK_CMD_RX_ENB 0x0008 343#define RTK_CMD_RX_ENB 0x0008
335#define RTK_CMD_RESET 0x0010 344#define RTK_CMD_RESET 0x0010
336#define RTK_CMD_STOPREQ 0x0080 345#define RTK_CMD_STOPREQ 0x0080
337 346
338/* 347/*
339 * EEPROM control register 348 * EEPROM control register
340 */ 349 */
341#define RTK_EE_DATAOUT 0x01 /* Data out */ 350#define RTK_EE_DATAOUT 0x01 /* Data out */
342#define RTK_EE_DATAIN 0x02 /* Data in */ 351#define RTK_EE_DATAIN 0x02 /* Data in */
343#define RTK_EE_CLK 0x04 /* clock */ 352#define RTK_EE_CLK 0x04 /* clock */
344#define RTK_EE_SEL 0x08 /* chip select */ 353#define RTK_EE_SEL 0x08 /* chip select */
345#define RTK_EE_MODE (0x40|0x80) 354#define RTK_EE_MODE (0x40|0x80)
346 355
347#define RTK_EEMODE_OFF 0x00 356#define RTK_EEMODE_OFF 0x00
348#define RTK_EEMODE_AUTOLOAD 0x40 357#define RTK_EEMODE_AUTOLOAD 0x40
349#define RTK_EEMODE_PROGRAM 0x80 358#define RTK_EEMODE_PROGRAM 0x80
350#define RTK_EEMODE_WRITECFG (0x80|0x40) 359#define RTK_EEMODE_WRITECFG (0x80|0x40)
351 360
352/* 9346/9356 EEPROM commands */ 361/* 9346/9356 EEPROM commands */
353#define RTK_EEADDR_LEN0 6 /* 9346 */ 362#define RTK_EEADDR_LEN0 6 /* 9346 */
354#define RTK_EEADDR_LEN1 8 /* 9356 */ 363#define RTK_EEADDR_LEN1 8 /* 9356 */
355#define RTK_EECMD_LEN 4 364#define RTK_EECMD_LEN 4
356 365
357#define RTK_EECMD_WRITE 0x5 /* 0101b */ 366#define RTK_EECMD_WRITE 0x5 /* 0101b */
358#define RTK_EECMD_READ 0x6 /* 0110b */ 367#define RTK_EECMD_READ 0x6 /* 0110b */
359#define RTK_EECMD_ERASE 0x7 /* 0111b */ 368#define RTK_EECMD_ERASE 0x7 /* 0111b */
360 369
361#define RTK_EE_ID 0x00 370#define RTK_EE_ID 0x00
362#define RTK_EE_PCI_VID 0x01 371#define RTK_EE_PCI_VID 0x01
363#define RTK_EE_PCI_DID 0x02 372#define RTK_EE_PCI_DID 0x02
364/* Location of station address inside EEPROM */ 373/* Location of station address inside EEPROM */
365#define RTK_EE_EADDR0 0x07 374#define RTK_EE_EADDR0 0x07
366#define RTK_EE_EADDR1 0x08 375#define RTK_EE_EADDR1 0x08
367#define RTK_EE_EADDR2 0x09 376#define RTK_EE_EADDR2 0x09
368 377
369/* 378/*
370 * MII register (8129 only) 379 * MII register (8129 only)
371 */ 380 */
372#define RTK_MII_CLK 0x01 381#define RTK_MII_CLK 0x01
373#define RTK_MII_DATAIN 0x02 382#define RTK_MII_DATAIN 0x02
374#define RTK_MII_DATAOUT 0x04 383#define RTK_MII_DATAOUT 0x04
375#define RTK_MII_DIR 0x80 /* 0 == input, 1 == output */ 384#define RTK_MII_DIR 0x80 /* 0 == input, 1 == output */
376 385
377/* 386/*
378 * Config 0 register 387 * Config 0 register
379 */ 388 */
380#define RTK_CFG0_ROM0 0x01 389#define RTK_CFG0_ROM0 0x01
381#define RTK_CFG0_ROM1 0x02 390#define RTK_CFG0_ROM1 0x02
382#define RTK_CFG0_ROM2 0x04 391#define RTK_CFG0_ROM2 0x04
383#define RTK_CFG0_PL0 0x08 392#define RTK_CFG0_PL0 0x08
384#define RTK_CFG0_PL1 0x10 393#define RTK_CFG0_PL1 0x10
385#define RTK_CFG0_10MBPS 0x20 /* 10 Mbps internal mode */ 394#define RTK_CFG0_10MBPS 0x20 /* 10 Mbps internal mode */
386#define RTK_CFG0_PCS 0x40 395#define RTK_CFG0_PCS 0x40
387#define RTK_CFG0_SCR 0x80 396#define RTK_CFG0_SCR 0x80
388 397
389/* 398/*
390 * Config 1 register 399 * Config 1 register
391 */ 400 */
392#define RTK_CFG1_PWRDWN 0x01 401#define RTK_CFG1_PWRDWN 0x01
393#define RTK_CFG1_SLEEP 0x02 402#define RTK_CFG1_SLEEP 0x02
394#define RTK_CFG1_IOMAP 0x04 403#define RTK_CFG1_IOMAP 0x04
395#define RTK_CFG1_MEMMAP 0x08 404#define RTK_CFG1_MEMMAP 0x08
396#define RTK_CFG1_RSVD 0x10 405#define RTK_CFG1_RSVD 0x10
397#define RTK_CFG1_DRVLOAD 0x20 406#define RTK_CFG1_DRVLOAD 0x20
398#define RTK_CFG1_LED0 0x40 407#define RTK_CFG1_LED0 0x40
399#define RTK_CFG1_FULLDUPLEX 0x40 /* 8129 only */ 408#define RTK_CFG1_FULLDUPLEX 0x40 /* 8129 only */
400#define RTK_CFG1_LED1 0x80 409#define RTK_CFG1_LED1 0x80
401 410
402/* 411/*
403 * 8139C+ register definitions 412 * 8139C+ register definitions
404 */ 413 */
405 414
406/* RTK_DUMPSTATS_LO register */ 415/* RTK_DUMPSTATS_LO register */
407 416
408#define RTK_DUMPSTATS_START 0x00000008 417#define RTK_DUMPSTATS_START 0x00000008
409 418
410/* Transmit start register */ 419/* Transmit start register */
411 420
412#define RTK_TXSTART_SWI 0x01 /* generate TX interrupt */ 421#define RTK_TXSTART_SWI 0x01 /* generate TX interrupt */
413#define RTK_TXSTART_START 0x40 /* start normal queue transmit */ 422#define RTK_TXSTART_START 0x40 /* start normal queue transmit */
414#define RTK_TXSTART_HPRIO_START 0x80 /* start hi prio queue transmit */ 423#define RTK_TXSTART_HPRIO_START 0x80 /* start hi prio queue transmit */
415 424
416/* 425/*
417 * Config 2 register, 8139C+/8169/8169S/8110S only 426 * Config 2 register, 8139C+/8169/8169S/8110S only
418 */ 427 */
419#define RTK_CFG2_BUSFREQ 0x07 428#define RTK_CFG2_BUSFREQ 0x07
420#define RTK_CFG2_BUSWIDTH 0x08 429#define RTK_CFG2_BUSWIDTH 0x08
421#define RTK_CFG2_AUXPWRSTS 0x10 430#define RTK_CFG2_AUXPWRSTS 0x10
422 431
423#define RTK_BUSFREQ_33MHZ 0x00 432#define RTK_BUSFREQ_33MHZ 0x00
424#define RTK_BUSFREQ_66MHZ 0x01 433#define RTK_BUSFREQ_66MHZ 0x01
425 434
426#define RTK_BUSWIDTH_32BITS 0x00 435#define RTK_BUSWIDTH_32BITS 0x00
427#define RTK_BUSWIDTH_64BITS 0x08 436#define RTK_BUSWIDTH_64BITS 0x08
428 437
429/* C+ mode command register */ 438/* C+ mode command register */
430 439
431#define RE_CPLUSCMD_TXENB 0x0001 /* enable C+ transmit mode */ 440#define RE_CPLUSCMD_TXENB 0x0001 /* enable C+ transmit mode */
432#define RE_CPLUSCMD_RXENB 0x0002 /* enable C+ receive mode */ 441#define RE_CPLUSCMD_RXENB 0x0002 /* enable C+ receive mode */
433#define RE_CPLUSCMD_PCI_MRW 0x0008 /* enable PCI multi-read/write */ 442#define RE_CPLUSCMD_PCI_MRW 0x0008 /* enable PCI multi-read/write */
434#define RE_CPLUSCMD_PCI_DAC 0x0010 /* PCI dual-address cycle only */ 443#define RE_CPLUSCMD_PCI_DAC 0x0010 /* PCI dual-address cycle only */
435#define RE_CPLUSCMD_RXCSUM_ENB 0x0020 /* enable RX checksum offload */ 444#define RE_CPLUSCMD_RXCSUM_ENB 0x0020 /* enable RX checksum offload */
436#define RE_CPLUSCMD_VLANSTRIP 0x0040 /* enable VLAN tag stripping */ 445#define RE_CPLUSCMD_VLANSTRIP 0x0040 /* enable VLAN tag stripping */
437#define RE_CPLUSCMD_MACSTAT_DIS 0x0080 /* 8168B/C/CP */ 446#define RE_CPLUSCMD_MACSTAT_DIS 0x0080 /* 8168B/C/CP */
438#define RE_CPLUSCMD_ASF 0x0100 /* 8168C/CP */ 447#define RE_CPLUSCMD_ASF 0x0100 /* 8168C/CP */
439#define RE_CPLUSCMD_DBG_SEL 0x0200 /* 8168C/CP */ 448#define RE_CPLUSCMD_DBG_SEL 0x0200 /* 8168C/CP */
440#define RE_CPLUSCMD_FORCE_TXFC 0x0400 /* 8168C/CP */ 449#define RE_CPLUSCMD_FORCE_TXFC 0x0400 /* 8168C/CP */
441#define RE_CPLUSCMD_FORCE_RXFC 0x0800 /* 8168C/CP */ 450#define RE_CPLUSCMD_FORCE_RXFC 0x0800 /* 8168C/CP */
442#define RE_CPLUSCMD_FORCE_HDPX 0x1000 /* 8168C/CP */ 451#define RE_CPLUSCMD_FORCE_HDPX 0x1000 /* 8168C/CP */
443#define RE_CPLUSCMD_NORMAL_MODE 0x2000 /* 8168C/CP */ 452#define RE_CPLUSCMD_NORMAL_MODE 0x2000 /* 8168C/CP */
444#define RE_CPLUSCMD_DBG_ENB 0x4000 /* 8168C/CP */ 453#define RE_CPLUSCMD_DBG_ENB 0x4000 /* 8168C/CP */
445#define RE_CPLUSCMD_BIST_ENB 0x8000 /* 8168C/CP */ 454#define RE_CPLUSCMD_BIST_ENB 0x8000 /* 8168C/CP */
446 455
447/* C+ early transmit threshold */ 456/* C+ early transmit threshold */
448 457
449#define RTK_EARLYTXTHRESH_CNT 0x003F /* byte count times 8 */ 458#define RTK_EARLYTXTHRESH_CNT 0x003F /* byte count times 8 */
450 459
451/* 460/*
452 * Gigabit PHY access register (8169 only) 461 * Gigabit PHY access register (8169 only)
453 */ 462 */
454 463
455#define RTK_PHYAR_PHYDATA 0x0000FFFF 464#define RTK_PHYAR_PHYDATA 0x0000FFFF
456#define RTK_PHYAR_PHYREG 0x001F0000 465#define RTK_PHYAR_PHYREG 0x001F0000
457#define RTK_PHYAR_BUSY 0x80000000 466#define RTK_PHYAR_BUSY 0x80000000
458 467
459/* 468/*
460 * Gigabit media status (8169 only) 469 * Gigabit media status (8169 only)
461 */ 470 */
462#define RTK_GMEDIASTAT_FDX 0x01 /* full duplex */ 471#define RTK_GMEDIASTAT_FDX 0x01 /* full duplex */
463#define RTK_GMEDIASTAT_LINK 0x02 /* link up */ 472#define RTK_GMEDIASTAT_LINK 0x02 /* link up */
464#define RTK_GMEDIASTAT_10MBPS 0x04 /* 10mps link */ 473#define RTK_GMEDIASTAT_10MBPS 0x04 /* 10mps link */
465#define RTK_GMEDIASTAT_100MBPS 0x08 /* 100mbps link */ 474#define RTK_GMEDIASTAT_100MBPS 0x08 /* 100mbps link */
466#define RTK_GMEDIASTAT_1000MBPS 0x10 /* gigE link */ 475#define RTK_GMEDIASTAT_1000MBPS 0x10 /* gigE link */
467#define RTK_GMEDIASTAT_RXFLOW 0x20 /* RX flow control on */ 476#define RTK_GMEDIASTAT_RXFLOW 0x20 /* RX flow control on */
468#define RTK_GMEDIASTAT_TXFLOW 0x40 /* TX flow control on */ 477#define RTK_GMEDIASTAT_TXFLOW 0x40 /* TX flow control on */
469#define RTK_GMEDIASTAT_TBI 0x80 /* TBI enabled */ 478#define RTK_GMEDIASTAT_TBI 0x80 /* TBI enabled */
470 479
471 480
472#define RTK_TX_EARLYTHRESH ((256 / 32) << 16) 481#define RTK_TX_EARLYTHRESH ((256 / 32) << 16)
473#define RTK_RX_FIFOTHRESH RTK_RXFIFO_256BYTES 482#define RTK_RX_FIFOTHRESH RTK_RXFIFO_256BYTES
474#define RTK_RX_MAXDMA RTK_RXDMA_256BYTES 483#define RTK_RX_MAXDMA RTK_RXDMA_256BYTES
475#define RTK_TX_MAXDMA RTK_TXDMA_256BYTES 484#define RTK_TX_MAXDMA RTK_TXDMA_256BYTES
476 485
477#define RTK_RXCFG_CONFIG (RTK_RX_FIFOTHRESH|RTK_RX_MAXDMA|RTK_RX_BUF_SZ) 486#define RTK_RXCFG_CONFIG (RTK_RX_FIFOTHRESH|RTK_RX_MAXDMA|RTK_RX_BUF_SZ)
478#define RTK_TXCFG_CONFIG (RTK_TXCFG_IFG|RTK_TX_MAXDMA) 487#define RTK_TXCFG_CONFIG (RTK_TXCFG_IFG|RTK_TX_MAXDMA)
479 488
480#define RE_RX_FIFOTHRESH RTK_RXFIFO_NOTHRESH 489#define RE_RX_FIFOTHRESH RTK_RXFIFO_NOTHRESH
481#define RE_RX_MAXDMA RTK_RXDMA_UNLIMITED 490#define RE_RX_MAXDMA RTK_RXDMA_UNLIMITED
482#define RE_TX_MAXDMA RTK_TXDMA_2048BYTES 491#define RE_TX_MAXDMA RTK_TXDMA_2048BYTES
483 492
484#define RE_RXCFG_CONFIG (RE_RX_FIFOTHRESH|RE_RX_MAXDMA|RTK_RX_BUF_SZ) 493#define RE_RXCFG_CONFIG (RE_RX_FIFOTHRESH|RE_RX_MAXDMA|RTK_RX_BUF_SZ)
485#define RE_TXCFG_CONFIG (RTK_TXCFG_IFG|RE_TX_MAXDMA) 494#define RE_TXCFG_CONFIG (RTK_TXCFG_IFG|RE_TX_MAXDMA)
486 495
487/* 496/*
488 * RX/TX descriptor definition. When large send mode is enabled, the 497 * RX/TX descriptor definition. When large send mode is enabled, the
489 * lower 11 bits of the TX rtk_cmd word are used to hold the MSS, and 498 * lower 11 bits of the TX rtk_cmd word are used to hold the MSS, and
490 * the checksum offload bits are disabled. The structure layout is 499 * the checksum offload bits are disabled. The structure layout is
491 * the same for RX and TX descriptors 500 * the same for RX and TX descriptors
492 */ 501 */
493 502
494struct re_desc { 503struct re_desc {
495 volatile uint32_t re_cmdstat; 504 volatile uint32_t re_cmdstat;
496 volatile uint32_t re_vlanctl; 505 volatile uint32_t re_vlanctl;
497 volatile uint32_t re_bufaddr_lo; 506 volatile uint32_t re_bufaddr_lo;
498 volatile uint32_t re_bufaddr_hi; 507 volatile uint32_t re_bufaddr_hi;
499}; 508};
500 509
501#define RE_TDESC_CMD_FRAGLEN 0x0000FFFF 510#define RE_TDESC_CMD_FRAGLEN 0x0000FFFF
502#define RE_TDESC_CMD_TCPCSUM 0x00010000 /* TCP checksum enable */ 511#define RE_TDESC_CMD_TCPCSUM 0x00010000 /* TCP checksum enable */
503#define RE_TDESC_CMD_UDPCSUM 0x00020000 /* UDP checksum enable */ 512#define RE_TDESC_CMD_UDPCSUM 0x00020000 /* UDP checksum enable */
504#define RE_TDESC_CMD_IPCSUM 0x00040000 /* IP header checksum enable */ 513#define RE_TDESC_CMD_IPCSUM 0x00040000 /* IP header checksum enable */
505#define RE_TDESC_CMD_MSSVAL 0x07FF0000 /* Large send MSS value */ 514#define RE_TDESC_CMD_MSSVAL 0x07FF0000 /* Large send MSS value */
506#define RE_TDESC_CMD_MSSVAL_SHIFT 16 /* Shift of the above */ 515#define RE_TDESC_CMD_MSSVAL_SHIFT 16 /* Shift of the above */
507#define RE_TDESC_CMD_LGSEND 0x08000000 /* TCP large send enb */ 516#define RE_TDESC_CMD_LGSEND 0x08000000 /* TCP large send enb */
508#define RE_TDESC_CMD_EOF 0x10000000 /* end of frame marker */ 517#define RE_TDESC_CMD_EOF 0x10000000 /* end of frame marker */
509#define RE_TDESC_CMD_SOF 0x20000000 /* start of frame marker */ 518#define RE_TDESC_CMD_SOF 0x20000000 /* start of frame marker */
510#define RE_TDESC_CMD_EOR 0x40000000 /* end of ring marker */ 519#define RE_TDESC_CMD_EOR 0x40000000 /* end of ring marker */
511#define RE_TDESC_CMD_OWN 0x80000000 /* chip owns descriptor */ 520#define RE_TDESC_CMD_OWN 0x80000000 /* chip owns descriptor */
512#define RE_TDESC_CMD_LGTCPHO 0x01fc0000 /* DESCV2 TCP hdr off lg send */ 521#define RE_TDESC_CMD_LGTCPHO 0x01fc0000 /* DESCV2 TCP hdr off lg send */
513#define RE_TDESC_CMD_LGTCPHO_SHIFT 18 522#define RE_TDESC_CMD_LGTCPHO_SHIFT 18
514#define RE_TDESC_CMD_LGSEND_V4 0x04000000 /* DESCV2 TCPv4 large send en */ 523#define RE_TDESC_CMD_LGSEND_V4 0x04000000 /* DESCV2 TCPv4 large send en */
515#define RE_TDESC_CMD_LGSEND_V6 0x02000000 /* DESCV2 TCPv6 large send en */ 524#define RE_TDESC_CMD_LGSEND_V6 0x02000000 /* DESCV2 TCPv6 large send en */
516 525
517#define RE_TDESC_VLANCTL_TAG 0x00020000 /* Insert VLAN tag */ 526#define RE_TDESC_VLANCTL_TAG 0x00020000 /* Insert VLAN tag */
518#define RE_TDESC_VLANCTL_DATA 0x0000FFFF /* TAG data */ 527#define RE_TDESC_VLANCTL_DATA 0x0000FFFF /* TAG data */
519#define RE_TDESC_VLANCTL_UDPCSUM 0x80000000 /* DESCV2 UDP cksum enable */ 528#define RE_TDESC_VLANCTL_UDPCSUM 0x80000000 /* DESCV2 UDP cksum enable */
520#define RE_TDESC_VLANCTL_TCPCSUM 0x40000000 /* DESCV2 TCP cksum enable */ 529#define RE_TDESC_VLANCTL_TCPCSUM 0x40000000 /* DESCV2 TCP cksum enable */
521#define RE_TDESC_VLANCTL_IPCSUM 0x20000000 /* DESCV2 IP hdr cksum enable */ 530#define RE_TDESC_VLANCTL_IPCSUM 0x20000000 /* DESCV2 IP hdr cksum enable */
522#define RE_TDESC_VLANCTL_MSSVAL 0x0ffc0000 /* DESCV2 large send MSS val */ 531#define RE_TDESC_VLANCTL_MSSVAL 0x0ffc0000 /* DESCV2 large send MSS val */
523#define RE_TDESC_VLANCTL_MSSVAL_SHIFT 18 532#define RE_TDESC_VLANCTL_MSSVAL_SHIFT 18
524 533
525/* 534/*
526 * Error bits are valid only on the last descriptor of a frame 535 * Error bits are valid only on the last descriptor of a frame
527 * (i.e. RE_TDESC_CMD_EOF == 1) 536 * (i.e. RE_TDESC_CMD_EOF == 1)
528 */ 537 */
529 538
530#define RE_TDESC_STAT_COLCNT 0x000F0000 /* collision count */ 539#define RE_TDESC_STAT_COLCNT 0x000F0000 /* collision count */
531#define RE_TDESC_STAT_EXCESSCOL 0x00100000 /* excessive collisions */ 540#define RE_TDESC_STAT_EXCESSCOL 0x00100000 /* excessive collisions */
532#define RE_TDESC_STAT_LINKFAIL 0x00200000 /* link faulure */ 541#define RE_TDESC_STAT_LINKFAIL 0x00200000 /* link faulure */
533#define RE_TDESC_STAT_OWINCOL 0x00400000 /* out-of-window collision */ 542#define RE_TDESC_STAT_OWINCOL 0x00400000 /* out-of-window collision */
534#define RE_TDESC_STAT_TXERRSUM 0x00800000 /* transmit error summary */ 543#define RE_TDESC_STAT_TXERRSUM 0x00800000 /* transmit error summary */
535#define RE_TDESC_STAT_UNDERRUN 0x02000000 /* TX underrun occurred */ 544#define RE_TDESC_STAT_UNDERRUN 0x02000000 /* TX underrun occurred */
536#define RE_TDESC_STAT_OWN 0x80000000 545#define RE_TDESC_STAT_OWN 0x80000000
537 546
538/* 547/*
539 * RX descriptor cmd/vlan definitions 548 * RX descriptor cmd/vlan definitions
540 */ 549 */
541 550
542#define RE_RDESC_CMD_EOR 0x40000000 551#define RE_RDESC_CMD_EOR 0x40000000
543#define RE_RDESC_CMD_OWN 0x80000000 552#define RE_RDESC_CMD_OWN 0x80000000
544#define RE_RDESC_CMD_BUFLEN 0x00001FFF 553#define RE_RDESC_CMD_BUFLEN 0x00001FFF
545 554
546#define RE_RDESC_STAT_OWN 0x80000000 555#define RE_RDESC_STAT_OWN 0x80000000
547#define RE_RDESC_STAT_EOR 0x40000000 556#define RE_RDESC_STAT_EOR 0x40000000
548#define RE_RDESC_STAT_SOF 0x20000000 557#define RE_RDESC_STAT_SOF 0x20000000
549#define RE_RDESC_STAT_EOF 0x10000000 558#define RE_RDESC_STAT_EOF 0x10000000
550#define RE_RDESC_STAT_FRALIGN 0x08000000 /* frame alignment error */ 559#define RE_RDESC_STAT_FRALIGN 0x08000000 /* frame alignment error */
551#define RE_RDESC_STAT_MCAST 0x04000000 /* multicast pkt received */ 560#define RE_RDESC_STAT_MCAST 0x04000000 /* multicast pkt received */
552#define RE_RDESC_STAT_UCAST 0x02000000 /* unicast pkt received */ 561#define RE_RDESC_STAT_UCAST 0x02000000 /* unicast pkt received */
553#define RE_RDESC_STAT_BCAST 0x01000000 /* broadcast pkt received */ 562#define RE_RDESC_STAT_BCAST 0x01000000 /* broadcast pkt received */
554#define RE_RDESC_STAT_BUFOFLOW 0x00800000 /* out of buffer space */ 563#define RE_RDESC_STAT_BUFOFLOW 0x00800000 /* out of buffer space */
555#define RE_RDESC_STAT_FIFOOFLOW 0x00400000 /* FIFO overrun */ 564#define RE_RDESC_STAT_FIFOOFLOW 0x00400000 /* FIFO overrun */
556#define RE_RDESC_STAT_GIANT 0x00200000 /* pkt > 4096 bytes */ 565#define RE_RDESC_STAT_GIANT 0x00200000 /* pkt > 4096 bytes */
557#define RE_RDESC_STAT_RXERRSUM 0x00100000 /* RX error summary */ 566#define RE_RDESC_STAT_RXERRSUM 0x00100000 /* RX error summary */
558#define RE_RDESC_STAT_RUNT 0x00080000 /* runt packet received */ 567#define RE_RDESC_STAT_RUNT 0x00080000 /* runt packet received */
559#define RE_RDESC_STAT_CRCERR 0x00040000 /* CRC error */ 568#define RE_RDESC_STAT_CRCERR 0x00040000 /* CRC error */
560#define RE_RDESC_STAT_PROTOID 0x00030000 /* Protocol type */ 569#define RE_RDESC_STAT_PROTOID 0x00030000 /* Protocol type */
561#define RE_RDESC_STAT_IPSUMBAD 0x00008000 /* IP header checksum bad */ 570#define RE_RDESC_STAT_IPSUMBAD 0x00008000 /* IP header checksum bad */
562#define RE_RDESC_STAT_UDPSUMBAD 0x00004000 /* UDP checksum bad */ 571#define RE_RDESC_STAT_UDPSUMBAD 0x00004000 /* UDP checksum bad */
563#define RE_RDESC_STAT_TCPSUMBAD 0x00002000 /* TCP checksum bad */ 572#define RE_RDESC_STAT_TCPSUMBAD 0x00002000 /* TCP checksum bad */
564#define RE_RDESC_STAT_FRAGLEN 0x00001FFF /* RX'ed frame/frag len */ 573#define RE_RDESC_STAT_FRAGLEN 0x00001FFF /* RX'ed frame/frag len */
565#define RE_RDESC_STAT_GFRAGLEN 0x00003FFF /* RX'ed frame/frag len */ 574#define RE_RDESC_STAT_GFRAGLEN 0x00003FFF /* RX'ed frame/frag len */
566 575
567#define RE_RDESC_VLANCTL_TAG 0x00010000 /* VLAN tag available 576#define RE_RDESC_VLANCTL_TAG 0x00010000 /* VLAN tag available
568 (re_vlandata valid)*/ 577 (re_vlandata valid)*/
569#define RE_RDESC_VLANCTL_DATA 0x0000FFFF /* TAG data */ 578#define RE_RDESC_VLANCTL_DATA 0x0000FFFF /* TAG data */
570#define RE_RDESC_VLANCTL_IPV6 0x80000000 /* DESCV2 IPV6 packet */ 579#define RE_RDESC_VLANCTL_IPV6 0x80000000 /* DESCV2 IPV6 packet */
571#define RE_RDESC_VLANCTL_IPV4 0x40000000 /* DESCV2 IPV4 packet */ 580#define RE_RDESC_VLANCTL_IPV4 0x40000000 /* DESCV2 IPV4 packet */
572 581
573#define RE_PROTOID_NONIP 0x00000000 582#define RE_PROTOID_NONIP 0x00000000
574#define RE_PROTOID_TCPIP 0x00010000 583#define RE_PROTOID_TCPIP 0x00010000
575#define RE_PROTOID_UDPIP 0x00020000 584#define RE_PROTOID_UDPIP 0x00020000
576#define RE_PROTOID_IP 0x00030000 585#define RE_PROTOID_IP 0x00030000
577#define RE_TCPPKT(x) (((x) & RE_RDESC_STAT_PROTOID) == \ 586#define RE_TCPPKT(x) (((x) & RE_RDESC_STAT_PROTOID) == \
578 RE_PROTOID_TCPIP) 587 RE_PROTOID_TCPIP)
579#define RE_UDPPKT(x) (((x) & RE_RDESC_STAT_PROTOID) == \ 588#define RE_UDPPKT(x) (((x) & RE_RDESC_STAT_PROTOID) == \
580 RE_PROTOID_UDPIP) 589 RE_PROTOID_UDPIP)
581 590
582#define RE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF) 591#define RE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF)
583#define RE_ADDR_HI(y) ((uint64_t)(y) >> 32) 592#define RE_ADDR_HI(y) ((uint64_t)(y) >> 32)
584 593
585/* 594/*
586 * Statistics counter structure (8139C+ and 8169 only) 595 * Statistics counter structure (8139C+ and 8169 only)
587 */ 596 */
588struct re_stats { 597struct re_stats {
589 uint32_t re_tx_pkts_lo; 598 uint32_t re_tx_pkts_lo;
590 uint32_t re_tx_pkts_hi; 599 uint32_t re_tx_pkts_hi;
591 uint32_t re_tx_errs_lo; 600 uint32_t re_tx_errs_lo;
592 uint32_t re_tx_errs_hi; 601 uint32_t re_tx_errs_hi;
593 uint32_t re_tx_errs; 602 uint32_t re_tx_errs;
594 uint16_t re_missed_pkts; 603 uint16_t re_missed_pkts;
595 uint16_t re_rx_framealign_errs; 604 uint16_t re_rx_framealign_errs;
596 uint32_t re_tx_onecoll; 605 uint32_t re_tx_onecoll;
597 uint32_t re_tx_multicolls; 606 uint32_t re_tx_multicolls;
598 uint32_t re_rx_ucasts_hi; 607 uint32_t re_rx_ucasts_hi;
599 uint32_t re_rx_ucasts_lo; 608 uint32_t re_rx_ucasts_lo;
600 uint32_t re_rx_bcasts_lo; 609 uint32_t re_rx_bcasts_lo;
601 uint32_t re_rx_bcasts_hi; 610 uint32_t re_rx_bcasts_hi;
602 uint32_t re_rx_mcasts; 611 uint32_t re_rx_mcasts;
603 uint16_t re_tx_aborts; 612 uint16_t re_tx_aborts;
604 uint16_t re_rx_underruns; 613 uint16_t re_rx_underruns;
605}; 614};
606 615
607#define RE_IFQ_MAXLEN 512 616#define RE_IFQ_MAXLEN 512
608 617
609#define RE_JUMBO_FRAMELEN ETHER_MAX_LEN_JUMBO 618#define RE_JUMBO_FRAMELEN ETHER_MAX_LEN_JUMBO
610#define RE_JUMBO_MTU ETHERMTU_JUMBO 619#define RE_JUMBO_MTU ETHERMTU_JUMBO

cvs diff -r1.56 -r1.56.18.1 src/sys/dev/ic/rtl81x9var.h (switch to unified diff)

--- src/sys/dev/ic/rtl81x9var.h 2017/04/19 00:20:02 1.56
+++ src/sys/dev/ic/rtl81x9var.h 2020/01/28 11:12:30 1.56.18.1
@@ -1,305 +1,306 @@ @@ -1,305 +1,306 @@
1/* $NetBSD: rtl81x9var.h,v 1.56 2017/04/19 00:20:02 jmcneill Exp $ */ 1/* $NetBSD: rtl81x9var.h,v 1.56.18.1 2020/01/28 11:12:30 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997, 1998 4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software 15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement: 16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul. 17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors 18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software 19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission. 20 * without specific prior written permission.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE. 32 * THE POSSIBILITY OF SUCH DAMAGE.
33 * 33 *
34 * FreeBSD Id: if_rlreg.h,v 1.9 1999/06/20 18:56:09 wpaul Exp 34 * FreeBSD Id: if_rlreg.h,v 1.9 1999/06/20 18:56:09 wpaul Exp
35 */ 35 */
36 36
37#include <sys/rndsource.h> 37#include <sys/rndsource.h>
38 38
39#define RTK_ETHER_ALIGN 2 39#define RTK_ETHER_ALIGN 2
40#define RTK_RXSTAT_LEN 4 40#define RTK_RXSTAT_LEN 4
41 41
42#ifdef __NO_STRICT_ALIGNMENT 42#ifdef __NO_STRICT_ALIGNMENT
43/* 43/*
44 * XXX According to PR kern/33763, some 8168 and variants can't DMA 44 * XXX According to PR kern/33763, some 8168 and variants can't DMA
45 * XXX RX packet data into unaligned buffer. This means such chips will 45 * XXX RX packet data into unaligned buffer. This means such chips will
46 * XXX never work on !__NO_STRICT_ALIGNMENT hosts without copying buffer. 46 * XXX never work on !__NO_STRICT_ALIGNMENT hosts without copying buffer.
47 */ 47 */
48#define RE_ETHER_ALIGN 0 48#define RE_ETHER_ALIGN 0
49#else 49#else
50#define RE_ETHER_ALIGN 2 50#define RE_ETHER_ALIGN 2
51#endif 51#endif
52 52
53struct rtk_type { 53struct rtk_type {
54 uint16_t rtk_vid; 54 uint16_t rtk_vid;
55 uint16_t rtk_did; 55 uint16_t rtk_did;
56 int rtk_basetype; 56 int rtk_basetype;
57#define RTK_8129 1 57#define RTK_8129 1
58#define RTK_8139 2 58#define RTK_8139 2
59#define RTK_8139CPLUS 3 59#define RTK_8139CPLUS 3
60#define RTK_8169 4 60#define RTK_8169 4
61#define RTK_8168 5 61#define RTK_8168 5
62#define RTK_8101E 6 62#define RTK_8101E 6
63 const char *rtk_name; 63 const char *rtk_name;
64}; 64};
65 65
66struct rtk_mii_frame { 66struct rtk_mii_frame {
67 uint8_t mii_stdelim; 67 uint8_t mii_stdelim;
68 uint8_t mii_opcode; 68 uint8_t mii_opcode;
69 uint8_t mii_phyaddr; 69 uint8_t mii_phyaddr;
70 uint8_t mii_regaddr; 70 uint8_t mii_regaddr;
71 uint8_t mii_turnaround; 71 uint8_t mii_turnaround;
72 uint16_t mii_data; 72 uint16_t mii_data;
73}; 73};
74 74
75/* 75/*
76 * MII constants 76 * MII constants
77 */ 77 */
78#define RTK_MII_STARTDELIM 0x01 78#define RTK_MII_STARTDELIM 0x01
79#define RTK_MII_READOP 0x02 79#define RTK_MII_READOP 0x02
80#define RTK_MII_WRITEOP 0x01 80#define RTK_MII_WRITEOP 0x01
81#define RTK_MII_TURNAROUND 0x02 81#define RTK_MII_TURNAROUND 0x02
82 82
83 83
84/* 84/*
85 * The RealTek doesn't use a fragment-based descriptor mechanism. 85 * The RealTek doesn't use a fragment-based descriptor mechanism.
86 * Instead, there are only four register sets, each or which represents 86 * Instead, there are only four register sets, each or which represents
87 * one 'descriptor.' Basically, each TX descriptor is just a contiguous 87 * one 'descriptor.' Basically, each TX descriptor is just a contiguous
88 * packet buffer (32-bit aligned!) and we place the buffer addresses in 88 * packet buffer (32-bit aligned!) and we place the buffer addresses in
89 * the registers so the chip knows where they are. 89 * the registers so the chip knows where they are.
90 * 90 *
91 * We can sort of kludge together the same kind of buffer management 91 * We can sort of kludge together the same kind of buffer management
92 * used in previous drivers, but we have to do buffer copies almost all 92 * used in previous drivers, but we have to do buffer copies almost all
93 * the time, so it doesn't really buy us much. 93 * the time, so it doesn't really buy us much.
94 * 94 *
95 * For reception, there's just one large buffer where the chip stores 95 * For reception, there's just one large buffer where the chip stores
96 * all received packets. 96 * all received packets.
97 */ 97 */
98 98
99#ifdef dreamcast 99#ifdef dreamcast
100/* 100/*
101 * XXX dreamcast has only 32KB DMA'able memory on its PCI bridge. 101 * XXX dreamcast has only 32KB DMA'able memory on its PCI bridge.
102 * XXX Maybe this should be handled by prop_dictionary, or 102 * XXX Maybe this should be handled by prop_dictionary, or
103 * XXX some other new API which returns available DMA resources. 103 * XXX some other new API which returns available DMA resources.
104 */ 104 */
105#define RTK_RX_BUF_SZ RTK_RXBUF_16 105#define RTK_RX_BUF_SZ RTK_RXBUF_16
106#else 106#else
107#define RTK_RX_BUF_SZ RTK_RXBUF_64 107#define RTK_RX_BUF_SZ RTK_RXBUF_64
108#endif 108#endif
109#define RTK_RXBUFLEN RTK_RXBUF_LEN(RTK_RX_BUF_SZ) 109#define RTK_RXBUFLEN RTK_RXBUF_LEN(RTK_RX_BUF_SZ)
110#define RTK_TX_LIST_CNT 4 110#define RTK_TX_LIST_CNT 4
111 111
112/* 112/*
113 * The 8139C+ and 8169 gigE chips support descriptor-based TX 113 * The 8139C+ and 8169 gigE chips support descriptor-based TX
114 * and RX. In fact, they even support TCP large send. Descriptors 114 * and RX. In fact, they even support TCP large send. Descriptors
115 * must be allocated in contiguous blocks that are aligned on a 115 * must be allocated in contiguous blocks that are aligned on a
116 * 256-byte boundary. The RX rings can hold a maximum of 64 descriptors. 116 * 256-byte boundary. The RX rings can hold a maximum of 64 descriptors.
117 * The TX rings can hold upto 64 descriptors on 8139C+, and 117 * The TX rings can hold upto 64 descriptors on 8139C+, and
118 * 1024 descriptors on 8169 gigE chips. 118 * 1024 descriptors on 8169 gigE chips.
119 */ 119 */
120#define RE_RING_ALIGN 256 120#define RE_RING_ALIGN 256
121 121
122/* 122/*
123 * Size of descriptors and TX queue. 123 * Size of descriptors and TX queue.
124 * These numbers must be power of two to simplify RE_NEXT_*() macro. 124 * These numbers must be power of two to simplify RE_NEXT_*() macro.
125 */ 125 */
126#define RE_RX_DESC_CNT 64 126#define RE_RX_DESC_CNT 64
127#define RE_TX_DESC_CNT_8139 64 127#define RE_TX_DESC_CNT_8139 64
128#define RE_TX_DESC_CNT_8169 1024 128#define RE_TX_DESC_CNT_8169 1024
129#define RE_TX_QLEN 64 129#define RE_TX_QLEN 64
130 130
131#define RE_NTXDESC_RSVD 4 131#define RE_NTXDESC_RSVD 4
132 132
133struct re_rxsoft { 133struct re_rxsoft {
134 struct mbuf *rxs_mbuf; 134 struct mbuf *rxs_mbuf;
135 bus_dmamap_t rxs_dmamap; 135 bus_dmamap_t rxs_dmamap;
136}; 136};
137 137
138struct re_txq { 138struct re_txq {
139 struct mbuf *txq_mbuf; 139 struct mbuf *txq_mbuf;
140 bus_dmamap_t txq_dmamap; 140 bus_dmamap_t txq_dmamap;
141 int txq_descidx; 141 int txq_descidx;
142 int txq_nsegs; 142 int txq_nsegs;
143}; 143};
144 144
145struct re_list_data { 145struct re_list_data {
146 struct re_txq re_txq[RE_TX_QLEN]; 146 struct re_txq re_txq[RE_TX_QLEN];
147 int re_txq_considx; 147 int re_txq_considx;
148 int re_txq_prodidx; 148 int re_txq_prodidx;
149 int re_txq_free; 149 int re_txq_free;
150 150
151 bus_dmamap_t re_tx_list_map; 151 bus_dmamap_t re_tx_list_map;
152 struct re_desc *re_tx_list; 152 struct re_desc *re_tx_list;
153 int re_tx_free; /* # of free descriptors */ 153 int re_tx_free; /* # of free descriptors */
154 int re_tx_nextfree; /* next descriptor to use */ 154 int re_tx_nextfree; /* next descriptor to use */
155 int re_tx_desc_cnt; /* # of descriptors */ 155 int re_tx_desc_cnt; /* # of descriptors */
156 bus_dma_segment_t re_tx_listseg; 156 bus_dma_segment_t re_tx_listseg;
157 int re_tx_listnseg; 157 int re_tx_listnseg;
158 158
159 struct re_rxsoft re_rxsoft[RE_RX_DESC_CNT]; 159 struct re_rxsoft re_rxsoft[RE_RX_DESC_CNT];
160 bus_dmamap_t re_rx_list_map; 160 bus_dmamap_t re_rx_list_map;
161 struct re_desc *re_rx_list; 161 struct re_desc *re_rx_list;
162 int re_rx_prodidx; 162 int re_rx_prodidx;
163 bus_dma_segment_t re_rx_listseg; 163 bus_dma_segment_t re_rx_listseg;
164 int re_rx_listnseg; 164 int re_rx_listnseg;
165}; 165};
166 166
167struct rtk_tx_desc { 167struct rtk_tx_desc {
168 SIMPLEQ_ENTRY(rtk_tx_desc) txd_q; 168 SIMPLEQ_ENTRY(rtk_tx_desc) txd_q;
169 struct mbuf *txd_mbuf; 169 struct mbuf *txd_mbuf;
170 bus_dmamap_t txd_dmamap; 170 bus_dmamap_t txd_dmamap;
171 bus_addr_t txd_txaddr; 171 bus_addr_t txd_txaddr;
172 bus_addr_t txd_txstat; 172 bus_addr_t txd_txstat;
173}; 173};
174 174
175struct rtk_softc { 175struct rtk_softc {
176 device_t sc_dev; 176 device_t sc_dev;
177 struct ethercom ethercom; /* interface info */ 177 struct ethercom ethercom; /* interface info */
178 struct mii_data mii; 178 struct mii_data mii;
179 struct callout rtk_tick_ch; /* tick callout */ 179 struct callout rtk_tick_ch; /* tick callout */
180 bus_space_tag_t rtk_btag; /* bus space tag */ 180 bus_space_tag_t rtk_btag; /* bus space tag */
181 bus_space_handle_t rtk_bhandle; /* bus space handle */ 181 bus_space_handle_t rtk_bhandle; /* bus space handle */
182 bus_size_t rtk_bsize; /* bus space mapping size */ 182 bus_size_t rtk_bsize; /* bus space mapping size */
183 u_int sc_quirk; /* chip quirks */ 183 u_int sc_quirk; /* chip quirks */
184#define RTKQ_8129 0x00000001 /* 8129 */ 184#define RTKQ_8129 0x00000001 /* 8129 */
185#define RTKQ_8139CPLUS 0x00000002 /* 8139C+ */ 185#define RTKQ_8139CPLUS 0x00000002 /* 8139C+ */
186#define RTKQ_8169NONS 0x00000004 /* old non-single 8169 */ 186#define RTKQ_8169NONS 0x00000004 /* old non-single 8169 */
187#define RTKQ_PCIE 0x00000008 /* PCIe variants */ 187#define RTKQ_PCIE 0x00000008 /* PCIe variants */
188#define RTKQ_MACLDPS 0x00000010 /* has LDPS register */ 188#define RTKQ_MACLDPS 0x00000010 /* has LDPS register */
189#define RTKQ_DESCV2 0x00000020 /* has V2 TX/RX descriptor */ 189#define RTKQ_DESCV2 0x00000020 /* has V2 TX/RX descriptor */
190#define RTKQ_NOJUMBO 0x00000040 /* no jumbo MTU support */ 190#define RTKQ_NOJUMBO 0x00000040 /* no jumbo MTU support */
191#define RTKQ_NOEECMD 0x00000080 /* unusable EEPROM command */ 191#define RTKQ_NOEECMD 0x00000080 /* unusable EEPROM command */
192#define RTKQ_MACSTAT 0x00000100 /* set MACSTAT_DIS on init */ 192#define RTKQ_MACSTAT 0x00000100 /* set MACSTAT_DIS on init */
193#define RTKQ_CMDSTOP 0x00000200 /* set STOPREQ on stop */ 193#define RTKQ_CMDSTOP 0x00000200 /* set STOPREQ on stop */
194#define RTKQ_PHYWAKE_PM 0x00000400 /* wake PHY from power down */ 194#define RTKQ_PHYWAKE_PM 0x00000400 /* wake PHY from power down */
195#define RTKQ_RXDV_GATED 0x00000800 195#define RTKQ_RXDV_GATED 0x00000800
196#define RTKQ_IM_HW 0x00001000 /* HW interrupt mitigation */ 196#define RTKQ_IM_HW 0x00001000 /* HW interrupt mitigation */
 197#define RTKQ_TXRXEN_LATER 0x00002000 /* TX/RX enable timing */
197 198
198 bus_dma_tag_t sc_dmat; 199 bus_dma_tag_t sc_dmat;
199 200
200 bus_dma_segment_t sc_dmaseg; /* for rtk(4) */ 201 bus_dma_segment_t sc_dmaseg; /* for rtk(4) */
201 int sc_dmanseg; /* for rtk(4) */ 202 int sc_dmanseg; /* for rtk(4) */
202 203
203 bus_dmamap_t recv_dmamap; /* for rtk(4) */ 204 bus_dmamap_t recv_dmamap; /* for rtk(4) */
204 uint8_t *rtk_rx_buf; 205 uint8_t *rtk_rx_buf;
205 206
206 struct rtk_tx_desc rtk_tx_descs[RTK_TX_LIST_CNT]; 207 struct rtk_tx_desc rtk_tx_descs[RTK_TX_LIST_CNT];
207 SIMPLEQ_HEAD(, rtk_tx_desc) rtk_tx_free; 208 SIMPLEQ_HEAD(, rtk_tx_desc) rtk_tx_free;
208 SIMPLEQ_HEAD(, rtk_tx_desc) rtk_tx_dirty; 209 SIMPLEQ_HEAD(, rtk_tx_desc) rtk_tx_dirty;
209 210
210 struct re_list_data re_ldata; 211 struct re_list_data re_ldata;
211 struct mbuf *re_head; 212 struct mbuf *re_head;
212 struct mbuf *re_tail; 213 struct mbuf *re_tail;
213 uint32_t re_rxlenmask; 214 uint32_t re_rxlenmask;
214 int re_testmode; 215 int re_testmode;
215 216
216 int sc_flags; /* misc flags */ 217 int sc_flags; /* misc flags */
217#define RTK_ATTACHED 0x00000001 /* attach has succeeded */ 218#define RTK_ATTACHED 0x00000001 /* attach has succeeded */
218#define RTK_ENABLED 0x00000002 /* chip is enabled */ 219#define RTK_ENABLED 0x00000002 /* chip is enabled */
219#define RTK_IS_ENABLED(sc) ((sc)->sc_flags & RTK_ENABLED) 220#define RTK_IS_ENABLED(sc) ((sc)->sc_flags & RTK_ENABLED)
220 221
221 int sc_txthresh; /* Early tx threshold */ 222 int sc_txthresh; /* Early tx threshold */
222 int sc_rev; /* MII revision */ 223 int sc_rev; /* MII revision */
223 224
224 /* Power management hooks. */ 225 /* Power management hooks. */
225 int (*sc_enable) (struct rtk_softc *); 226 int (*sc_enable) (struct rtk_softc *);
226 void (*sc_disable) (struct rtk_softc *); 227 void (*sc_disable) (struct rtk_softc *);
227 228
228 krndsource_t rnd_source; 229 krndsource_t rnd_source;
229}; 230};
230 231
231#define RE_TX_DESC_CNT(sc) ((sc)->re_ldata.re_tx_desc_cnt) 232#define RE_TX_DESC_CNT(sc) ((sc)->re_ldata.re_tx_desc_cnt)
232#define RE_TX_LIST_SZ(sc) (RE_TX_DESC_CNT(sc) * sizeof(struct re_desc)) 233#define RE_TX_LIST_SZ(sc) (RE_TX_DESC_CNT(sc) * sizeof(struct re_desc))
233#define RE_NEXT_TX_DESC(sc, x) (((x) + 1) & (RE_TX_DESC_CNT(sc) - 1)) 234#define RE_NEXT_TX_DESC(sc, x) (((x) + 1) & (RE_TX_DESC_CNT(sc) - 1))
234 235
235#define RE_RX_LIST_SZ (RE_RX_DESC_CNT * sizeof(struct re_desc)) 236#define RE_RX_LIST_SZ (RE_RX_DESC_CNT * sizeof(struct re_desc))
236#define RE_NEXT_RX_DESC(sc, x) (((x) + 1) & (RE_RX_DESC_CNT - 1)) 237#define RE_NEXT_RX_DESC(sc, x) (((x) + 1) & (RE_RX_DESC_CNT - 1))
237 238
238#define RE_NEXT_TXQ(sc, x) (((x) + 1) & (RE_TX_QLEN - 1)) 239#define RE_NEXT_TXQ(sc, x) (((x) + 1) & (RE_TX_QLEN - 1))
239 240
240#define RE_TXDESCSYNC(sc, idx, ops) \ 241#define RE_TXDESCSYNC(sc, idx, ops) \
241 bus_dmamap_sync((sc)->sc_dmat, \ 242 bus_dmamap_sync((sc)->sc_dmat, \
242 (sc)->re_ldata.re_tx_list_map, \ 243 (sc)->re_ldata.re_tx_list_map, \
243 sizeof(struct re_desc) * (idx), \ 244 sizeof(struct re_desc) * (idx), \
244 sizeof(struct re_desc), \ 245 sizeof(struct re_desc), \
245 (ops)) 246 (ops))
246#define RE_RXDESCSYNC(sc, idx, ops) \ 247#define RE_RXDESCSYNC(sc, idx, ops) \
247 bus_dmamap_sync((sc)->sc_dmat, \ 248 bus_dmamap_sync((sc)->sc_dmat, \
248 (sc)->re_ldata.re_rx_list_map, \ 249 (sc)->re_ldata.re_rx_list_map, \
249 sizeof(struct re_desc) * (idx), \ 250 sizeof(struct re_desc) * (idx), \
250 sizeof(struct re_desc), \ 251 sizeof(struct re_desc), \
251 (ops)) 252 (ops))
252 253
253/* 254/*
254 * re(4) hardware ip4csum-tx could be mangled with 28 byte or less IP packets 255 * re(4) hardware ip4csum-tx could be mangled with 28 byte or less IP packets
255 */ 256 */
256#define RE_IP4CSUMTX_MINLEN 28 257#define RE_IP4CSUMTX_MINLEN 28
257#define RE_IP4CSUMTX_PADLEN (ETHER_HDR_LEN + RE_IP4CSUMTX_MINLEN) 258#define RE_IP4CSUMTX_PADLEN (ETHER_HDR_LEN + RE_IP4CSUMTX_MINLEN)
258/* 259/*
259 * XXX 260 * XXX
260 * We are allocating pad DMA buffer after RX DMA descs for now 261 * We are allocating pad DMA buffer after RX DMA descs for now
261 * because RE_TX_LIST_SZ(sc) always occupies whole page but 262 * because RE_TX_LIST_SZ(sc) always occupies whole page but
262 * RE_RX_LIST_SZ is less than PAGE_SIZE so there is some unused region. 263 * RE_RX_LIST_SZ is less than PAGE_SIZE so there is some unused region.
263 */ 264 */
264#define RE_RX_DMAMEM_SZ (RE_RX_LIST_SZ + RE_IP4CSUMTX_PADLEN) 265#define RE_RX_DMAMEM_SZ (RE_RX_LIST_SZ + RE_IP4CSUMTX_PADLEN)
265#define RE_TXPADOFF RE_RX_LIST_SZ 266#define RE_TXPADOFF RE_RX_LIST_SZ
266#define RE_TXPADDADDR(sc) \ 267#define RE_TXPADDADDR(sc) \
267 ((sc)->re_ldata.re_rx_list_map->dm_segs[0].ds_addr + RE_TXPADOFF) 268 ((sc)->re_ldata.re_rx_list_map->dm_segs[0].ds_addr + RE_TXPADOFF)
268 269
269 270
270#define RTK_TXTH_MAX RTK_TXTH_1536 271#define RTK_TXTH_MAX RTK_TXTH_1536
271 272
272/* 273/*
273 * register space access macros 274 * register space access macros
274 */ 275 */
275#define CSR_WRITE_4(sc, reg, val) \ 276#define CSR_WRITE_4(sc, reg, val) \
276 bus_space_write_4(sc->rtk_btag, sc->rtk_bhandle, reg, val) 277 bus_space_write_4(sc->rtk_btag, sc->rtk_bhandle, reg, val)
277#define CSR_WRITE_2(sc, reg, val) \ 278#define CSR_WRITE_2(sc, reg, val) \
278 bus_space_write_2(sc->rtk_btag, sc->rtk_bhandle, reg, val) 279 bus_space_write_2(sc->rtk_btag, sc->rtk_bhandle, reg, val)
279#define CSR_WRITE_1(sc, reg, val) \ 280#define CSR_WRITE_1(sc, reg, val) \
280 bus_space_write_1(sc->rtk_btag, sc->rtk_bhandle, reg, val) 281 bus_space_write_1(sc->rtk_btag, sc->rtk_bhandle, reg, val)
281 282
282#define CSR_READ_4(sc, reg) \ 283#define CSR_READ_4(sc, reg) \
283 bus_space_read_4(sc->rtk_btag, sc->rtk_bhandle, reg) 284 bus_space_read_4(sc->rtk_btag, sc->rtk_bhandle, reg)
284#define CSR_READ_2(sc, reg) \ 285#define CSR_READ_2(sc, reg) \
285 bus_space_read_2(sc->rtk_btag, sc->rtk_bhandle, reg) 286 bus_space_read_2(sc->rtk_btag, sc->rtk_bhandle, reg)
286#define CSR_READ_1(sc, reg) \ 287#define CSR_READ_1(sc, reg) \
287 bus_space_read_1(sc->rtk_btag, sc->rtk_bhandle, reg) 288 bus_space_read_1(sc->rtk_btag, sc->rtk_bhandle, reg)
288 289
289#define RTK_TIMEOUT 1000 290#define RTK_TIMEOUT 1000
290 291
291/* 292/*
292 * PCI low memory base and low I/O base registers 293 * PCI low memory base and low I/O base registers
293 */ 294 */
294 295
295#define RTK_PCI_LOIO 0x10 296#define RTK_PCI_LOIO 0x10
296#define RTK_PCI_LOMEM 0x14 297#define RTK_PCI_LOMEM 0x14
297 298
298#ifdef _KERNEL 299#ifdef _KERNEL
299uint16_t rtk_read_eeprom(struct rtk_softc *, int, int); 300uint16_t rtk_read_eeprom(struct rtk_softc *, int, int);
300void rtk_setmulti(struct rtk_softc *); 301void rtk_setmulti(struct rtk_softc *);
301void rtk_attach(struct rtk_softc *); 302void rtk_attach(struct rtk_softc *);
302int rtk_detach(struct rtk_softc *); 303int rtk_detach(struct rtk_softc *);
303int rtk_activate(device_t, enum devact); 304int rtk_activate(device_t, enum devact);
304int rtk_intr(void *); 305int rtk_intr(void *);
305#endif /* _KERNEL */ 306#endif /* _KERNEL */