| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: ralink_eth.c,v 1.9 2016/06/10 13:27:12 ozaki-r Exp $ */ | | 1 | /* $NetBSD: ralink_eth.c,v 1.10 2016/10/05 15:39:31 ryo Exp $ */ |
2 | /*- | | 2 | /*- |
3 | * Copyright (c) 2011 CradlePoint Technology, Inc. | | 3 | * Copyright (c) 2011 CradlePoint Technology, Inc. |
4 | * All rights reserved. | | 4 | * All rights reserved. |
5 | * | | 5 | * |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
| @@ -19,27 +19,27 @@ | | | @@ -19,27 +19,27 @@ |
19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS | | 19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS |
20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. | | 26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | /* ralink_eth.c -- Ralink Ethernet Driver */ | | 29 | /* ralink_eth.c -- Ralink Ethernet Driver */ |
30 | | | 30 | |
31 | #include <sys/cdefs.h> | | 31 | #include <sys/cdefs.h> |
32 | __KERNEL_RCSID(0, "$NetBSD: ralink_eth.c,v 1.9 2016/06/10 13:27:12 ozaki-r Exp $"); | | 32 | __KERNEL_RCSID(0, "$NetBSD: ralink_eth.c,v 1.10 2016/10/05 15:39:31 ryo Exp $"); |
33 | | | 33 | |
34 | #include <sys/param.h> | | 34 | #include <sys/param.h> |
35 | #include <sys/bus.h> | | 35 | #include <sys/bus.h> |
36 | #include <sys/callout.h> | | 36 | #include <sys/callout.h> |
37 | #include <sys/device.h> | | 37 | #include <sys/device.h> |
38 | #include <sys/endian.h> | | 38 | #include <sys/endian.h> |
39 | #include <sys/errno.h> | | 39 | #include <sys/errno.h> |
40 | #include <sys/ioctl.h> | | 40 | #include <sys/ioctl.h> |
41 | #include <sys/intr.h> | | 41 | #include <sys/intr.h> |
42 | #include <sys/kernel.h> | | 42 | #include <sys/kernel.h> |
43 | #include <sys/malloc.h> | | 43 | #include <sys/malloc.h> |
44 | #include <sys/mbuf.h> | | 44 | #include <sys/mbuf.h> |
45 | #include <sys/socket.h> | | 45 | #include <sys/socket.h> |
| @@ -53,83 +53,83 @@ __KERNEL_RCSID(0, "$NetBSD: ralink_eth.c | | | @@ -53,83 +53,83 @@ __KERNEL_RCSID(0, "$NetBSD: ralink_eth.c |
53 | #include <net/if_ether.h> | | 53 | #include <net/if_ether.h> |
54 | #include <net/if_vlanvar.h> | | 54 | #include <net/if_vlanvar.h> |
55 | | | 55 | |
56 | #include <net/bpf.h> | | 56 | #include <net/bpf.h> |
57 | | | 57 | |
58 | #include <dev/mii/mii.h> | | 58 | #include <dev/mii/mii.h> |
59 | #include <dev/mii/miivar.h> | | 59 | #include <dev/mii/miivar.h> |
60 | #include <dev/mii/mii_bitbang.h> | | 60 | #include <dev/mii/mii_bitbang.h> |
61 | | | 61 | |
62 | #include <mips/ralink/ralink_var.h> | | 62 | #include <mips/ralink/ralink_var.h> |
63 | #include <mips/ralink/ralink_reg.h> | | 63 | #include <mips/ralink/ralink_reg.h> |
64 | #if 0 | | 64 | #if 0 |
65 | #define CPDEBUG /* XXX TMP DEBUG FIXME */ | | 65 | #define CPDEBUG /* XXX TMP DEBUG FIXME */ |
66 | #define RALINK_ETH_DEBUG /* XXX TMP DEBUG FIXME */ | | 66 | #define RALINK_ETH_DEBUG /* XXX TMP DEBUG FIXME */ |
67 | #define ENABLE_RALINK_DEBUG_ERROR 1 | | 67 | #define ENABLE_RALINK_DEBUG_ERROR 1 |
68 | #define ENABLE_RALINK_DEBUG_MISC 1 | | 68 | #define ENABLE_RALINK_DEBUG_MISC 1 |
69 | #define ENABLE_RALINK_DEBUG_INFO 1 | | 69 | #define ENABLE_RALINK_DEBUG_INFO 1 |
70 | #define ENABLE_RALINK_DEBUG_FORCE 1 | | 70 | #define ENABLE_RALINK_DEBUG_FORCE 1 |
71 | #define ENABLE_RALINK_DEBUG_REG 1 | | 71 | #define ENABLE_RALINK_DEBUG_REG 1 |
72 | #endif | | 72 | #endif |
73 | #include <mips/ralink/ralink_debug.h> | | 73 | #include <mips/ralink/ralink_debug.h> |
74 | | | 74 | |
75 | | | 75 | |
76 | /* PDMA RX Descriptor Format */ | | 76 | /* PDMA RX Descriptor Format */ |
77 | struct ralink_rx_desc { | | 77 | struct ralink_rx_desc { |
78 | uint32_t data_ptr; | | 78 | uint32_t data_ptr; |
79 | uint32_t rxd_info1; | | 79 | uint32_t rxd_info1; |
80 | #define RXD_LEN1(x) (((x) >> 0) & 0x3fff) | | 80 | #define RXD_LEN1(x) (((x) >> 0) & 0x3fff) |
81 | #define RXD_LAST1 (1 << 14) | | 81 | #define RXD_LAST1 (1 << 14) |
82 | #define RXD_LEN0(x) (((x) >> 16) & 0x3fff) | | 82 | #define RXD_LEN0(x) (((x) >> 16) & 0x3fff) |
83 | #define RXD_LAST0 (1 << 30) | | 83 | #define RXD_LAST0 (1 << 30) |
84 | #define RXD_DDONE (1 << 31) | | 84 | #define RXD_DDONE (1 << 31) |
85 | uint32_t unused; | | 85 | uint32_t unused; |
86 | uint32_t rxd_info2; | | 86 | uint32_t rxd_info2; |
87 | #define RXD_FOE(x) (((x) >> 0) & 0x3fff) | | 87 | #define RXD_FOE(x) (((x) >> 0) & 0x3fff) |
88 | #define RXD_FVLD (1 << 14) | | 88 | #define RXD_FVLD (1 << 14) |
89 | #define RXD_INFO(x) (((x) >> 16) & 0xff) | | 89 | #define RXD_INFO(x) (((x) >> 16) & 0xff) |
90 | #define RXD_PORT(x) (((x) >> 24) & 0x7) | | 90 | #define RXD_PORT(x) (((x) >> 24) & 0x7) |
91 | #define RXD_INFO_CPU (1 << 27) | | 91 | #define RXD_INFO_CPU (1 << 27) |
92 | #define RXD_L4_FAIL (1 << 28) | | 92 | #define RXD_L4_FAIL (1 << 28) |
93 | #define RXD_IP_FAIL (1 << 29) | | 93 | #define RXD_IP_FAIL (1 << 29) |
94 | #define RXD_L4_VLD (1 << 30) | | 94 | #define RXD_L4_VLD (1 << 30) |
95 | #define RXD_IP_VLD (1 << 31) | | 95 | #define RXD_IP_VLD (1 << 31) |
96 | }; | | 96 | }; |
97 | | | 97 | |
98 | /* PDMA RX Descriptor Format */ | | 98 | /* PDMA RX Descriptor Format */ |
99 | struct ralink_tx_desc { | | 99 | struct ralink_tx_desc { |
100 | uint32_t data_ptr0; | | 100 | uint32_t data_ptr0; |
101 | uint32_t txd_info1; | | 101 | uint32_t txd_info1; |
102 | #define TXD_LEN1(x) (((x) & 0x3fff) << 0) | | 102 | #define TXD_LEN1(x) (((x) & 0x3fff) << 0) |
103 | #define TXD_LAST1 (1 << 14) | | 103 | #define TXD_LAST1 (1 << 14) |
104 | #define TXD_BURST (1 << 15) | | 104 | #define TXD_BURST (1 << 15) |
105 | #define TXD_LEN0(x) (((x) & 0x3fff) << 16) | | 105 | #define TXD_LEN0(x) (((x) & 0x3fff) << 16) |
106 | #define TXD_LAST0 (1 << 30) | | 106 | #define TXD_LAST0 (1 << 30) |
107 | #define TXD_DDONE (1 << 31) | | 107 | #define TXD_DDONE (1 << 31) |
108 | uint32_t data_ptr1; | | 108 | uint32_t data_ptr1; |
109 | uint32_t txd_info2; | | 109 | uint32_t txd_info2; |
110 | #define TXD_VIDX(x) (((x) & 0xf) << 0) | | 110 | #define TXD_VIDX(x) (((x) & 0xf) << 0) |
111 | #define TXD_VPRI(x) (((x) & 0x7) << 4) | | 111 | #define TXD_VPRI(x) (((x) & 0x7) << 4) |
112 | #define TXD_VEN (1 << 7) | | 112 | #define TXD_VEN (1 << 7) |
113 | #define TXD_SIDX(x) (((x) & 0xf) << 8) | | 113 | #define TXD_SIDX(x) (((x) & 0xf) << 8) |
114 | #define TXD_SEN(x) (1 << 13) | | 114 | #define TXD_SEN(x) (1 << 13) |
115 | #define TXD_QN(x) (((x) & 0x7) << 16) | | 115 | #define TXD_QN(x) (((x) & 0x7) << 16) |
116 | #define TXD_PN(x) (((x) & 0x7) << 24) | | 116 | #define TXD_PN(x) (((x) & 0x7) << 24) |
117 | #define TXD_PN_CPU 0 | | 117 | #define TXD_PN_CPU 0 |
118 | #define TXD_PN_GDMA1 1 | | 118 | #define TXD_PN_GDMA1 1 |
119 | #define TXD_PN_GDMA2 2 | | 119 | #define TXD_PN_GDMA2 2 |
120 | #define TXD_TCP_EN (1 << 29) | | 120 | #define TXD_TCP_EN (1 << 29) |
121 | #define TXD_UDP_EN (1 << 30) | | 121 | #define TXD_UDP_EN (1 << 30) |
122 | #define TXD_IP_EN (1 << 31) | | 122 | #define TXD_IP_EN (1 << 31) |
123 | }; | | 123 | }; |
124 | | | 124 | |
125 | /* TODO: | | 125 | /* TODO: |
126 | * try to scale number of descriptors swith size of memory | | 126 | * try to scale number of descriptors swith size of memory |
127 | * these numbers may have a significant impact on performance/memory/mbuf usage | | 127 | * these numbers may have a significant impact on performance/memory/mbuf usage |
128 | */ | | 128 | */ |
129 | #if RTMEMSIZE >= 64 | | 129 | #if RTMEMSIZE >= 64 |
130 | #define RALINK_ETH_NUM_RX_DESC 256 | | 130 | #define RALINK_ETH_NUM_RX_DESC 256 |
131 | #define RALINK_ETH_NUM_TX_DESC 256 | | 131 | #define RALINK_ETH_NUM_TX_DESC 256 |
132 | #else | | 132 | #else |
133 | #define RALINK_ETH_NUM_RX_DESC 64 | | 133 | #define RALINK_ETH_NUM_RX_DESC 64 |
134 | #define RALINK_ETH_NUM_TX_DESC 64 | | 134 | #define RALINK_ETH_NUM_TX_DESC 64 |
135 | #endif | | 135 | #endif |
| @@ -245,27 +245,28 @@ static void ralink_eth_start(struct ifne | | | @@ -245,27 +245,28 @@ static void ralink_eth_start(struct ifne |
245 | static void ralink_eth_watchdog(struct ifnet *); | | 245 | static void ralink_eth_watchdog(struct ifnet *); |
246 | static int ralink_eth_ioctl(struct ifnet *, u_long, void *); | | 246 | static int ralink_eth_ioctl(struct ifnet *, u_long, void *); |
247 | | | 247 | |
248 | /* mii functions */ | | 248 | /* mii functions */ |
249 | #if defined(RT3050) || defined(RT3052) | | 249 | #if defined(RT3050) || defined(RT3052) |
250 | static void ralink_eth_mdio_enable(ralink_eth_softc_t *, bool); | | 250 | static void ralink_eth_mdio_enable(ralink_eth_softc_t *, bool); |
251 | #endif | | 251 | #endif |
252 | static void ralink_eth_mii_statchg(struct ifnet *); | | 252 | static void ralink_eth_mii_statchg(struct ifnet *); |
253 | static void ralink_eth_mii_tick(void *); | | 253 | static void ralink_eth_mii_tick(void *); |
254 | static int ralink_eth_mii_read(device_t, int, int); | | 254 | static int ralink_eth_mii_read(device_t, int, int); |
255 | static void ralink_eth_mii_write(device_t, int, int, int); | | 255 | static void ralink_eth_mii_write(device_t, int, int, int); |
256 | | | 256 | |
257 | CFATTACH_DECL_NEW(reth, sizeof(struct ralink_eth_softc), | | 257 | CFATTACH_DECL_NEW(reth, sizeof(struct ralink_eth_softc), |
258 | ralink_eth_match, ralink_eth_attach, ralink_eth_detach, ralink_eth_activate); | | 258 | ralink_eth_match, ralink_eth_attach, ralink_eth_detach, |
| | | 259 | ralink_eth_activate); |
259 | | | 260 | |
260 | static inline uint32_t | | 261 | static inline uint32_t |
261 | sy_read(const ralink_eth_softc_t *sc, const bus_size_t off) | | 262 | sy_read(const ralink_eth_softc_t *sc, const bus_size_t off) |
262 | { | | 263 | { |
263 | return bus_space_read_4(sc->sc_memt, sc->sc_sy_memh, off); | | 264 | return bus_space_read_4(sc->sc_memt, sc->sc_sy_memh, off); |
264 | } | | 265 | } |
265 | | | 266 | |
266 | static inline void | | 267 | static inline void |
267 | sy_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) | | 268 | sy_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) |
268 | { | | 269 | { |
269 | bus_space_write_4(sc->sc_memt, sc->sc_sy_memh, off, val); | | 270 | bus_space_write_4(sc->sc_memt, sc->sc_sy_memh, off, val); |
270 | } | | 271 | } |
271 | | | 272 | |
| @@ -307,255 +308,257 @@ ralink_eth_match(device_t parent, cfdata | | | @@ -307,255 +308,257 @@ ralink_eth_match(device_t parent, cfdata |
307 | */ | | 308 | */ |
308 | void | | 309 | void |
309 | ralink_eth_attach(device_t parent, device_t self, void *aux) | | 310 | ralink_eth_attach(device_t parent, device_t self, void *aux) |
310 | { | | 311 | { |
311 | ralink_eth_softc_t * const sc = device_private(self); | | 312 | ralink_eth_softc_t * const sc = device_private(self); |
312 | const struct mainbus_attach_args *ma = aux; | | 313 | const struct mainbus_attach_args *ma = aux; |
313 | int error; | | 314 | int error; |
314 | int i; | | 315 | int i; |
315 | | | 316 | |
316 | aprint_naive(": Ralink Ethernet\n"); | | 317 | aprint_naive(": Ralink Ethernet\n"); |
317 | aprint_normal(": Ralink Ethernet\n"); | | 318 | aprint_normal(": Ralink Ethernet\n"); |
318 | | | 319 | |
319 | evcnt_attach_dynamic(&sc->sc_evcnt_spurious_intr, EVCNT_TYPE_INTR, NULL, | | 320 | evcnt_attach_dynamic(&sc->sc_evcnt_spurious_intr, EVCNT_TYPE_INTR, NULL, |
320 | device_xname(self), "spurious intr"); | | 321 | device_xname(self), "spurious intr"); |
321 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr, EVCNT_TYPE_INTR, NULL, | | 322 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr, EVCNT_TYPE_INTR, NULL, |
322 | device_xname(self), "rxintr"); | | 323 | device_xname(self), "rxintr"); |
323 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_len, | | 324 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_len, |
324 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, | | 325 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, |
325 | device_xname(self), "rxintr skip: no room for VLAN header"); | | 326 | device_xname(self), "rxintr skip: no room for VLAN header"); |
326 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_none, | | 327 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_none, |
327 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, | | 328 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, |
328 | device_xname(self), "rxintr skip: no VLAN tag"); | | 329 | device_xname(self), "rxintr skip: no VLAN tag"); |
329 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_inval, | | 330 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_inval, |
330 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, | | 331 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, |
331 | device_xname(self), "rxintr skip: invalid VLAN tag"); | | 332 | device_xname(self), "rxintr skip: invalid VLAN tag"); |
332 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_inact, | | 333 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_inact, |
333 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, | | 334 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, |
334 | device_xname(self), "rxintr skip: partition inactive"); | | 335 | device_xname(self), "rxintr skip: partition inactive"); |
335 | evcnt_attach_dynamic(&sc->sc_evcnt_txintr, EVCNT_TYPE_INTR, NULL, | | 336 | evcnt_attach_dynamic(&sc->sc_evcnt_txintr, EVCNT_TYPE_INTR, NULL, |
336 | device_xname(self), "txintr"); | | 337 | device_xname(self), "txintr"); |
337 | evcnt_attach_dynamic(&sc->sc_evcnt_input, EVCNT_TYPE_INTR, NULL, | | 338 | evcnt_attach_dynamic(&sc->sc_evcnt_input, EVCNT_TYPE_INTR, NULL, |
338 | device_xname(self), "input"); | | 339 | device_xname(self), "input"); |
339 | evcnt_attach_dynamic(&sc->sc_evcnt_output, EVCNT_TYPE_INTR, NULL, | | 340 | evcnt_attach_dynamic(&sc->sc_evcnt_output, EVCNT_TYPE_INTR, NULL, |
340 | device_xname(self), "output"); | | 341 | device_xname(self), "output"); |
341 | evcnt_attach_dynamic(&sc->sc_evcnt_watchdog, EVCNT_TYPE_INTR, NULL, | | 342 | evcnt_attach_dynamic(&sc->sc_evcnt_watchdog, EVCNT_TYPE_INTR, NULL, |
342 | device_xname(self), "watchdog"); | | 343 | device_xname(self), "watchdog"); |
343 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_tx, | | 344 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_tx, |
344 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, | | 345 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, |
345 | device_xname(self), "watchdog TX timeout"); | | 346 | device_xname(self), "watchdog TX timeout"); |
346 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_spurious, | | 347 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_spurious, |
347 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, | | 348 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, |
348 | device_xname(self), "watchdog spurious"); | | 349 | device_xname(self), "watchdog spurious"); |
349 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_reactivate, | | 350 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_reactivate, |
350 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, | | 351 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, |
351 | device_xname(self), "watchdog reactivate"); | | 352 | device_xname(self), "watchdog reactivate"); |
352 | evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_hdr_fail, | | 353 | evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_hdr_fail, |
353 | EVCNT_TYPE_INTR, NULL, | | 354 | EVCNT_TYPE_INTR, NULL, |
354 | device_xname(self), "add rxbuf hdr fail"); | | 355 | device_xname(self), "add rxbuf hdr fail"); |
355 | evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_mcl_fail, | | 356 | evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_mcl_fail, |
356 | EVCNT_TYPE_INTR, NULL, | | 357 | EVCNT_TYPE_INTR, NULL, |
357 | device_xname(self), "add rxbuf mcl fail"); | | 358 | device_xname(self), "add rxbuf mcl fail"); |
358 | | | 359 | |
359 | /* | | 360 | /* |
360 | * In order to obtain unique initial Ethernet address on a host, | | 361 | * In order to obtain unique initial Ethernet address on a host, |
361 | * do some randomisation using the current uptime. It's not meant | | 362 | * do some randomisation using the current uptime. It's not meant |
362 | * for anything but avoiding hard-coding an address. | | 363 | * for anything but avoiding hard-coding an address. |
363 | */ | | 364 | */ |
364 | uint8_t enaddr[ETHER_ADDR_LEN] = { 0x00, 0x30, 0x44, 0x00, 0x00, 0x00 }; | | 365 | uint8_t enaddr[ETHER_ADDR_LEN] = { 0x00, 0x30, 0x44, 0x00, 0x00, 0x00 }; |
365 | | | 366 | |
366 | sc->sc_dev = self; | | 367 | sc->sc_dev = self; |
367 | sc->sc_dmat = ma->ma_dmat; | | 368 | sc->sc_dmat = ma->ma_dmat; |
368 | sc->sc_memt = ma->ma_memt; | | 369 | sc->sc_memt = ma->ma_memt; |
369 | sc->sc_sy_size = 0x10000; | | 370 | sc->sc_sy_size = 0x10000; |
370 | sc->sc_fe_size = 0x10000; | | 371 | sc->sc_fe_size = 0x10000; |
371 | sc->sc_sw_size = 0x08000; | | 372 | sc->sc_sw_size = 0x08000; |
372 | | | 373 | |
373 | /* | | 374 | /* |
374 | * map the registers | | 375 | * map the registers |
375 | * | | 376 | * |
376 | * we map the Sysctl, Frame Engine and Ether Switch registers | | 377 | * we map the Sysctl, Frame Engine and Ether Switch registers |
377 | * seperately so we can use the defined register offsets sanely | | 378 | * seperately so we can use the defined register offsets sanely |
378 | */ | | 379 | */ |
379 | if ((error = bus_space_map(sc->sc_memt, RA_SYSCTL_BASE, | | 380 | if ((error = bus_space_map(sc->sc_memt, RA_SYSCTL_BASE, |
380 | sc->sc_sy_size, 0, &sc->sc_sy_memh)) != 0) { | | 381 | sc->sc_sy_size, 0, &sc->sc_sy_memh)) != 0) { |
381 | aprint_error_dev(self, "unable to map Sysctl registers, " | | 382 | aprint_error_dev(self, "unable to map Sysctl registers, " |
382 | "error=%d\n", error); | | 383 | "error=%d\n", error); |
383 | goto fail_0a; | | 384 | goto fail_0a; |
384 | } | | 385 | } |
385 | if ((error = bus_space_map(sc->sc_memt, RA_FRAME_ENGINE_BASE, | | 386 | if ((error = bus_space_map(sc->sc_memt, RA_FRAME_ENGINE_BASE, |
386 | sc->sc_fe_size, 0, &sc->sc_fe_memh)) != 0) { | | 387 | sc->sc_fe_size, 0, &sc->sc_fe_memh)) != 0) { |
387 | aprint_error_dev(self, "unable to map Frame Engine registers, " | | 388 | aprint_error_dev(self, "unable to map Frame Engine registers, " |
388 | "error=%d\n", error); | | 389 | "error=%d\n", error); |
389 | goto fail_0b; | | 390 | goto fail_0b; |
390 | } | | 391 | } |
391 | if ((error = bus_space_map(sc->sc_memt, RA_ETH_SW_BASE, | | 392 | if ((error = bus_space_map(sc->sc_memt, RA_ETH_SW_BASE, |
392 | sc->sc_sw_size, 0, &sc->sc_sw_memh)) != 0) { | | 393 | sc->sc_sw_size, 0, &sc->sc_sw_memh)) != 0) { |
393 | aprint_error_dev(self, "unable to map Ether Switch registers, " | | 394 | aprint_error_dev(self, "unable to map Ether Switch registers, " |
394 | "error=%d\n", error); | | 395 | "error=%d\n", error); |
395 | goto fail_0c; | | 396 | goto fail_0c; |
396 | } | | 397 | } |
397 | | | 398 | |
398 | /* Allocate desc structures, and create & load the DMA map for them */ | | 399 | /* Allocate desc structures, and create & load the DMA map for them */ |
399 | if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ralink_descs), | | 400 | if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ralink_descs), |
400 | PAGE_SIZE, 0, &sc->sc_dseg, 1, &sc->sc_ndseg, 0)) != 0) { | | 401 | PAGE_SIZE, 0, &sc->sc_dseg, 1, &sc->sc_ndseg, 0)) != 0) { |
401 | aprint_error_dev(self, "unable to allocate transmit descs, " | | 402 | aprint_error_dev(self, "unable to allocate transmit descs, " |
402 | "error=%d\n", error); | | 403 | "error=%d\n", error); |
403 | goto fail_1; | | 404 | goto fail_1; |
404 | } | | 405 | } |
405 | | | 406 | |
406 | if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg, | | 407 | if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg, |
407 | sizeof(struct ralink_descs), (void **)&sc->sc_descs, BUS_DMA_COHERENT)) | | 408 | sizeof(struct ralink_descs), (void **)&sc->sc_descs, |
408 | != 0) { | | 409 | BUS_DMA_COHERENT)) != 0) { |
409 | aprint_error_dev(self, "unable to map control data, " | | 410 | aprint_error_dev(self, "unable to map control data, " |
410 | "error=%d\n", error); | | 411 | "error=%d\n", error); |
411 | goto fail_2; | | 412 | goto fail_2; |
412 | } | | 413 | } |
413 | | | 414 | |
414 | if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ralink_descs), 1, | | 415 | if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ralink_descs), |
415 | sizeof(struct ralink_descs), 0, 0, &sc->sc_pdmamap)) != 0) { | | 416 | 1, sizeof(struct ralink_descs), 0, 0, &sc->sc_pdmamap)) != 0) { |
416 | aprint_error_dev(self, "unable to create control data DMA map, " | | 417 | aprint_error_dev(self, "unable to create control data DMA map, " |
417 | "error=%d\n", error); | | 418 | "error=%d\n", error); |
418 | goto fail_3; | | 419 | goto fail_3; |
419 | } | | 420 | } |
420 | | | 421 | |
421 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_pdmamap, sc->sc_descs, | | 422 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_pdmamap, sc->sc_descs, |
422 | sizeof(struct ralink_descs), NULL, 0)) != 0) { | | 423 | sizeof(struct ralink_descs), NULL, 0)) != 0) { |
423 | aprint_error_dev(self, "unable to load control data DMA map, " | | 424 | aprint_error_dev(self, "unable to load control data DMA map, " |
424 | "error=%d\n", error); | | 425 | "error=%d\n", error); |
425 | goto fail_4; | | 426 | goto fail_4; |
426 | } | | 427 | } |
427 | | | 428 | |
428 | /* Create the transmit buffer DMA maps. */ | | 429 | /* Create the transmit buffer DMA maps. */ |
429 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { | | 430 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { |
430 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, | | 431 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
431 | RALINK_ETH_MAX_TX_SEGS, MCLBYTES, 0, 0, | | 432 | RALINK_ETH_MAX_TX_SEGS, MCLBYTES, 0, 0, |
432 | &sc->sc_txstate[i].txs_dmamap)) != 0) { | | 433 | &sc->sc_txstate[i].txs_dmamap)) != 0) { |
433 | aprint_error_dev(self, "unable to create tx DMA map %d, " | | 434 | aprint_error_dev(self, |
434 | "error=%d\n", i, error); | | 435 | "unable to create tx DMA map %d, error=%d\n", |
| | | 436 | i, error); |
435 | goto fail_5; | | 437 | goto fail_5; |
436 | } | | 438 | } |
437 | } | | 439 | } |
438 | | | 440 | |
439 | /* Create the receive buffer DMA maps. */ | | 441 | /* Create the receive buffer DMA maps. */ |
440 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { | | 442 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { |
441 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, | | 443 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
442 | MCLBYTES, 0, 0, &sc->sc_rxstate[i].rxs_dmamap)) != 0) { | | 444 | MCLBYTES, 0, 0, &sc->sc_rxstate[i].rxs_dmamap)) != 0) { |
443 | aprint_error_dev(self, "unable to create rx DMA map %d, " | | 445 | aprint_error_dev(self, |
444 | "error=%d\n", i, error); | | 446 | "unable to create rx DMA map %d, error=%d\n", |
| | | 447 | i, error); |
445 | goto fail_6; | | 448 | goto fail_6; |
446 | } | | 449 | } |
447 | sc->sc_rxstate[i].rxs_mbuf = NULL; | | 450 | sc->sc_rxstate[i].rxs_mbuf = NULL; |
448 | } | | 451 | } |
449 | | | 452 | |
450 | /* this is a zero buffer used for zero'ing out short packets */ | | 453 | /* this is a zero buffer used for zero'ing out short packets */ |
451 | memset(sc->ralink_zero_buf, 0, RALINK_MIN_BUF); | | 454 | memset(sc->ralink_zero_buf, 0, RALINK_MIN_BUF); |
452 | | | 455 | |
453 | /* setup some address in hardware */ | | 456 | /* setup some address in hardware */ |
454 | fe_write(sc, RA_FE_GDMA1_MAC_LSB, | | 457 | fe_write(sc, RA_FE_GDMA1_MAC_LSB, |
455 | (enaddr[5] | (enaddr[4] << 8) | | | 458 | (enaddr[5] | (enaddr[4] << 8) | |
456 | (enaddr[3] << 16) | (enaddr[2] << 24))); | | 459 | (enaddr[3] << 16) | (enaddr[2] << 24))); |
457 | fe_write(sc, RA_FE_GDMA1_MAC_MSB, | | 460 | fe_write(sc, RA_FE_GDMA1_MAC_MSB, |
458 | (enaddr[1] | (enaddr[0] << 8))); | | 461 | (enaddr[1] | (enaddr[0] << 8))); |
459 | | | 462 | |
460 | /* | | 463 | /* |
461 | * iterate through ports | | 464 | * iterate through ports |
462 | * slickrock must use specific non-linear sequence | | 465 | * slickrock must use specific non-linear sequence |
463 | * others are linear | | 466 | * others are linear |
464 | */ | | 467 | */ |
465 | struct ifnet * const ifp = &sc->sc_ethercom.ec_if; | | 468 | struct ifnet * const ifp = &sc->sc_ethercom.ec_if; |
466 | | | 469 | |
467 | strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); | | 470 | strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); |
468 | | | 471 | |
469 | /* | | 472 | /* |
470 | * Initialize our media structures. | | 473 | * Initialize our media structures. |
471 | * This may probe the PHY, if present. | | 474 | * This may probe the PHY, if present. |
472 | */ | | 475 | */ |
473 | sc->sc_mii.mii_ifp = ifp; | | 476 | sc->sc_mii.mii_ifp = ifp; |
474 | sc->sc_mii.mii_readreg = ralink_eth_mii_read; | | 477 | sc->sc_mii.mii_readreg = ralink_eth_mii_read; |
475 | sc->sc_mii.mii_writereg = ralink_eth_mii_write; | | 478 | sc->sc_mii.mii_writereg = ralink_eth_mii_write; |
476 | sc->sc_mii.mii_statchg = ralink_eth_mii_statchg; | | 479 | sc->sc_mii.mii_statchg = ralink_eth_mii_statchg; |
477 | sc->sc_ethercom.ec_mii = &sc->sc_mii; | | 480 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
478 | ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, | | 481 | ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, |
479 | ether_mediastatus); | | 482 | ether_mediastatus); |
480 | mii_attach(sc->sc_dev, &sc->sc_mii, ~0, i, MII_OFFSET_ANY, | | 483 | mii_attach(sc->sc_dev, &sc->sc_mii, ~0, i, MII_OFFSET_ANY, |
481 | MIIF_FORCEANEG|MIIF_DOPAUSE|MIIF_NOISOLATE); | | 484 | MIIF_FORCEANEG|MIIF_DOPAUSE|MIIF_NOISOLATE); |
482 | | | 485 | |
483 | if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { | | 486 | if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { |
484 | #if 1 | | 487 | #if 1 |
485 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| | | 488 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| |
486 | IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE, 0, NULL); | | 489 | IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE, 0, NULL); |
487 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| | | 490 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| |
488 | IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE); | | 491 | IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE); |
489 | #else | | 492 | #else |
490 | ifmedia_add(&sc->sc_mii.mii_media, | | 493 | ifmedia_add(&sc->sc_mii.mii_media, |
491 | IFM_ETHER|IFM_MANUAL, 0, NULL); | | 494 | IFM_ETHER|IFM_MANUAL, 0, NULL); |
492 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); | | 495 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); |
493 | #endif | | 496 | #endif |
494 | } else { | | 497 | } else { |
495 | /* Ensure we mask ok for the switch multiple phy's */ | | 498 | /* Ensure we mask ok for the switch multiple phy's */ |
496 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); | | 499 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
497 | } | | 500 | } |
498 | | | 501 | |
499 | ifp->if_softc = sc; | | 502 | ifp->if_softc = sc; |
500 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 503 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
501 | ifp->if_init = ralink_eth_init; | | 504 | ifp->if_init = ralink_eth_init; |
502 | ifp->if_start = ralink_eth_start; | | 505 | ifp->if_start = ralink_eth_start; |
503 | ifp->if_ioctl = ralink_eth_ioctl; | | 506 | ifp->if_ioctl = ralink_eth_ioctl; |
504 | ifp->if_stop = ralink_eth_stop; | | 507 | ifp->if_stop = ralink_eth_stop; |
505 | ifp->if_watchdog = ralink_eth_watchdog; | | 508 | ifp->if_watchdog = ralink_eth_watchdog; |
506 | IFQ_SET_READY(&ifp->if_snd); | | 509 | IFQ_SET_READY(&ifp->if_snd); |
507 | | | 510 | |
508 | /* We can support 802.1Q VLAN-sized frames. */ | | 511 | /* We can support 802.1Q VLAN-sized frames. */ |
509 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; | | 512 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
510 | | | 513 | |
511 | /* We support IPV4 CRC Offload */ | | 514 | /* We support IPV4 CRC Offload */ |
512 | ifp->if_capabilities |= | | 515 | ifp->if_capabilities |= |
513 | (IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | | | 516 | (IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
514 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | | | 517 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
515 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx); | | 518 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx); |
516 | | | 519 | |
517 | /* Attach the interface. */ | | 520 | /* Attach the interface. */ |
518 | if_attach(ifp); | | 521 | if_attach(ifp); |
519 | ether_ifattach(ifp, enaddr); | | 522 | ether_ifattach(ifp, enaddr); |
520 | | | 523 | |
521 | /* init our mii ticker */ | | 524 | /* init our mii ticker */ |
522 | callout_init(&sc->sc_tick_callout, 0); | | 525 | callout_init(&sc->sc_tick_callout, 0); |
523 | callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc); | | 526 | callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc); |
524 | | | 527 | |
525 | return; | | 528 | return; |
526 | | | 529 | |
527 | /* | | 530 | /* |
528 | * Free any resources we've allocated during the failed attach | | 531 | * Free any resources we've allocated during the failed attach |
529 | * attempt. Do this in reverse order and fall through. | | 532 | * attempt. Do this in reverse order and fall through. |
530 | */ | | 533 | */ |
531 | fail_6: | | 534 | fail_6: |
532 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { | | 535 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { |
533 | if (sc->sc_rxstate[i].rxs_dmamap != NULL) | | 536 | if (sc->sc_rxstate[i].rxs_dmamap != NULL) |
534 | bus_dmamap_destroy(sc->sc_dmat, | | 537 | bus_dmamap_destroy(sc->sc_dmat, |
535 | sc->sc_rxstate[i].rxs_dmamap); | | 538 | sc->sc_rxstate[i].rxs_dmamap); |
536 | } | | 539 | } |
537 | fail_5: | | 540 | fail_5: |
538 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { | | 541 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { |
539 | if (sc->sc_txstate[i].txs_dmamap != NULL) | | 542 | if (sc->sc_txstate[i].txs_dmamap != NULL) |
540 | bus_dmamap_destroy(sc->sc_dmat, | | 543 | bus_dmamap_destroy(sc->sc_dmat, |
541 | sc->sc_txstate[i].txs_dmamap); | | 544 | sc->sc_txstate[i].txs_dmamap); |
542 | } | | 545 | } |
543 | bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); | | 546 | bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); |
544 | fail_4: | | 547 | fail_4: |
545 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); | | 548 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); |
546 | fail_3: | | 549 | fail_3: |
547 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, | | 550 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, |
548 | sizeof(struct ralink_descs)); | | 551 | sizeof(struct ralink_descs)); |
549 | fail_2: | | 552 | fail_2: |
550 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); | | 553 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); |
551 | fail_1: | | 554 | fail_1: |
552 | bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); | | 555 | bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); |
553 | fail_0c: | | 556 | fail_0c: |
554 | bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); | | 557 | bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); |
555 | fail_0b: | | 558 | fail_0b: |
556 | bus_space_unmap(sc->sc_memt, sc->sc_sy_memh, sc->sc_fe_size); | | 559 | bus_space_unmap(sc->sc_memt, sc->sc_sy_memh, sc->sc_fe_size); |
557 | fail_0a: | | 560 | fail_0a: |
558 | return; | | 561 | return; |
559 | } | | 562 | } |
560 | | | 563 | |
561 | /* | | 564 | /* |
| @@ -590,32 +593,32 @@ ralink_eth_enable(ralink_eth_softc_t *sc | | | @@ -590,32 +593,32 @@ ralink_eth_enable(ralink_eth_softc_t *sc |
590 | RALINK_DEBUG_FUNC_ENTRY(); | | 593 | RALINK_DEBUG_FUNC_ENTRY(); |
591 | | | 594 | |
592 | if (sc->sc_ih != NULL) { | | 595 | if (sc->sc_ih != NULL) { |
593 | RALINK_DEBUG(RALINK_DEBUG_MISC, "%s() already active", | | 596 | RALINK_DEBUG(RALINK_DEBUG_MISC, "%s() already active", |
594 | __func__); | | 597 | __func__); |
595 | return EALREADY; | | 598 | return EALREADY; |
596 | } | | 599 | } |
597 | | | 600 | |
598 | sc->sc_pending_tx = 0; | | 601 | sc->sc_pending_tx = 0; |
599 | | | 602 | |
600 | int s = splnet(); | | 603 | int s = splnet(); |
601 | ralink_eth_hw_init(sc); | | 604 | ralink_eth_hw_init(sc); |
602 | sc->sc_ih = ra_intr_establish(RA_IRQ_FENGINE, | | 605 | sc->sc_ih = ra_intr_establish(RA_IRQ_FENGINE, |
603 | ralink_eth_intr, sc, 1); | | 606 | ralink_eth_intr, sc, 1); |
604 | splx(s); | | 607 | splx(s); |
605 | if (sc->sc_ih == NULL) { | | 608 | if (sc->sc_ih == NULL) { |
606 | RALINK_DEBUG(RALINK_DEBUG_ERROR, | | 609 | RALINK_DEBUG(RALINK_DEBUG_ERROR, |
607 | "%s: unable to establish interrupt\n", | | 610 | "%s: unable to establish interrupt\n", |
608 | device_xname(sc->sc_dev)); | | 611 | device_xname(sc->sc_dev)); |
609 | return EIO; | | 612 | return EIO; |
610 | } | | 613 | } |
611 | | | 614 | |
612 | return 0; | | 615 | return 0; |
613 | } | | 616 | } |
614 | | | 617 | |
615 | /* | | 618 | /* |
616 | * ralink_eth_partition_disable | | 619 | * ralink_eth_partition_disable |
617 | */ | | 620 | */ |
618 | static void | | 621 | static void |
619 | ralink_eth_disable(ralink_eth_softc_t *sc) | | 622 | ralink_eth_disable(ralink_eth_softc_t *sc) |
620 | { | | 623 | { |
621 | RALINK_DEBUG_FUNC_ENTRY(); | | 624 | RALINK_DEBUG_FUNC_ENTRY(); |
| @@ -665,27 +668,27 @@ ralink_eth_detach(device_t self, int fla | | | @@ -665,27 +668,27 @@ ralink_eth_detach(device_t self, int fla |
665 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { | | 668 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { |
666 | txs = &sc->sc_txstate[i]; | | 669 | txs = &sc->sc_txstate[i]; |
667 | if (txs->txs_mbuf != NULL) { | | 670 | if (txs->txs_mbuf != NULL) { |
668 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 671 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
669 | m_freem(txs->txs_mbuf); | | 672 | m_freem(txs->txs_mbuf); |
670 | txs->txs_mbuf = NULL; | | 673 | txs->txs_mbuf = NULL; |
671 | } | | 674 | } |
672 | bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); | | 675 | bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); |
673 | } | | 676 | } |
674 | | | 677 | |
675 | bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); | | 678 | bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); |
676 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); | | 679 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); |
677 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, | | 680 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, |
678 | sizeof(struct ralink_descs)); | | 681 | sizeof(struct ralink_descs)); |
679 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); | | 682 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); |
680 | | | 683 | |
681 | bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); | | 684 | bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); |
682 | bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); | | 685 | bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); |
683 | | | 686 | |
684 | return 0; | | 687 | return 0; |
685 | } | | 688 | } |
686 | | | 689 | |
687 | /* | | 690 | /* |
688 | * ralink_eth_reset | | 691 | * ralink_eth_reset |
689 | */ | | 692 | */ |
690 | static void | | 693 | static void |
691 | ralink_eth_reset(ralink_eth_softc_t *sc) | | 694 | ralink_eth_reset(ralink_eth_softc_t *sc) |
| @@ -722,61 +725,61 @@ static void | | | @@ -722,61 +725,61 @@ static void |
722 | ralink_eth_hw_init(ralink_eth_softc_t *sc) | | 725 | ralink_eth_hw_init(ralink_eth_softc_t *sc) |
723 | { | | 726 | { |
724 | RALINK_DEBUG_FUNC_ENTRY(); | | 727 | RALINK_DEBUG_FUNC_ENTRY(); |
725 | struct ralink_eth_txstate *txs; | | 728 | struct ralink_eth_txstate *txs; |
726 | uint32_t r; | | 729 | uint32_t r; |
727 | int i; | | 730 | int i; |
728 | | | 731 | |
729 | /* reset to a known good state */ | | 732 | /* reset to a known good state */ |
730 | ralink_eth_reset(sc); | | 733 | ralink_eth_reset(sc); |
731 | | | 734 | |
732 | #if defined(RT3050) || defined(RT3052) | | 735 | #if defined(RT3050) || defined(RT3052) |
733 | /* Bring the switch to a sane default state (from linux driver) */ | | 736 | /* Bring the switch to a sane default state (from linux driver) */ |
734 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SGC2, | | 737 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SGC2, |
735 | 0x00000000); | | 738 | 0x00000000); |
736 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PFC1, | | 739 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PFC1, |
737 | 0x00405555); /* check VLAN tag on port forward */ | | 740 | 0x00405555); /* check VLAN tag on port forward */ |
738 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VLANI0, | | 741 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VLANI0, |
739 | 0x00002001); | | 742 | 0x00002001); |
740 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC0, | | 743 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC0, |
741 | 0x00001002); | | 744 | 0x00001002); |
742 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC1, | | 745 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC1, |
743 | 0x00001001); | | 746 | 0x00001001); |
744 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC2, | | 747 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC2, |
745 | 0x00001001); | | 748 | 0x00001001); |
746 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VMSC0, | | 749 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VMSC0, |
747 | 0xffff417e); | | 750 | 0xffff417e); |
748 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC0, | | 751 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC0, |
749 | 0x00007f7f); | | 752 | 0x00007f7f); |
750 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC2, | | 753 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC2, |
751 | 0x00007f3f); | | 754 | 0x00007f3f); |
752 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FTC2, | | 755 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FTC2, |
753 | 0x00d6500c); | | 756 | 0x00d6500c); |
754 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SWGC, | | 757 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SWGC, |
755 | 0x0008a301); /* hashing algorithm=XOR48 */ | | 758 | 0x0008a301); /* hashing algorithm=XOR48 */ |
756 | /* aging interval=300sec */ | | 759 | /* aging interval=300sec */ |
757 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SOCPC, | | 760 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SOCPC, |
758 | 0x02404040); | | 761 | 0x02404040); |
759 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPORT, | | 762 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPORT, |
760 | 0x3f502b28); /* Change polling Ext PHY Addr=0x0 */ | | 763 | 0x3f502b28); /* Change polling Ext PHY Addr=0x0 */ |
761 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPA, | | 764 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPA, |
762 | 0x00000000); | | 765 | 0x00000000); |
763 | | | 766 | |
764 | /* do some mii magic TODO: define these registers/bits */ | | 767 | /* do some mii magic TODO: define these registers/bits */ |
765 | /* lower down PHY 10Mbps mode power */ | | 768 | /* lower down PHY 10Mbps mode power */ |
766 | /* select local register */ | | 769 | /* select local register */ |
767 | ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000); | | 770 | ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000); |
768 | | | 771 | |
769 | for (i=0;i<5;i++){ | | 772 | for (i=0; i < 5; i++) { |
770 | /* set TX10 waveform coefficient */ | | 773 | /* set TX10 waveform coefficient */ |
771 | ralink_eth_mii_write(sc->sc_dev, i, 26, 0x1601); | | 774 | ralink_eth_mii_write(sc->sc_dev, i, 26, 0x1601); |
772 | | | 775 | |
773 | /* set TX100/TX10 AD/DA current bias */ | | 776 | /* set TX100/TX10 AD/DA current bias */ |
774 | ralink_eth_mii_write(sc->sc_dev, i, 29, 0x7058); | | 777 | ralink_eth_mii_write(sc->sc_dev, i, 29, 0x7058); |
775 | | | 778 | |
776 | /* set TX100 slew rate control */ | | 779 | /* set TX100 slew rate control */ |
777 | ralink_eth_mii_write(sc->sc_dev, i, 30, 0x0018); | | 780 | ralink_eth_mii_write(sc->sc_dev, i, 30, 0x0018); |
778 | } | | 781 | } |
779 | | | 782 | |
780 | /* PHY IOT */ | | 783 | /* PHY IOT */ |
781 | | | 784 | |
782 | /* select global register */ | | 785 | /* select global register */ |
| @@ -789,34 +792,34 @@ ralink_eth_hw_init(ralink_eth_softc_t *s | | | @@ -789,34 +792,34 @@ ralink_eth_hw_init(ralink_eth_softc_t *s |
789 | ralink_eth_mii_write(sc->sc_dev, 0, 17, 0x0fe0); | | 792 | ralink_eth_mii_write(sc->sc_dev, 0, 17, 0x0fe0); |
790 | | | 793 | |
791 | /* set squelch amplitude to higher threshold */ | | 794 | /* set squelch amplitude to higher threshold */ |
792 | ralink_eth_mii_write(sc->sc_dev, 0, 18, 0x40ba); | | 795 | ralink_eth_mii_write(sc->sc_dev, 0, 18, 0x40ba); |
793 | | | 796 | |
794 | /* longer TP_IDL tail length */ | | 797 | /* longer TP_IDL tail length */ |
795 | ralink_eth_mii_write(sc->sc_dev, 0, 14, 0x65); | | 798 | ralink_eth_mii_write(sc->sc_dev, 0, 14, 0x65); |
796 | | | 799 | |
797 | /* select local register */ | | 800 | /* select local register */ |
798 | ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000); | | 801 | ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000); |
799 | #else | | 802 | #else |
800 | /* GE1 + GigSW */ | | 803 | /* GE1 + GigSW */ |
801 | fe_write(sc, RA_FE_MDIO_CFG1, | | 804 | fe_write(sc, RA_FE_MDIO_CFG1, |
802 | MDIO_CFG_PHY_ADDR(0x1f) | | | 805 | MDIO_CFG_PHY_ADDR(0x1f) | |
803 | MDIO_CFG_BP_EN | | | 806 | MDIO_CFG_BP_EN | |
804 | MDIO_CFG_FORCE_CFG | | | 807 | MDIO_CFG_FORCE_CFG | |
805 | MDIO_CFG_SPEED(MDIO_CFG_SPEED_1000M) | | | 808 | MDIO_CFG_SPEED(MDIO_CFG_SPEED_1000M) | |
806 | MDIO_CFG_FULL_DUPLEX | | | 809 | MDIO_CFG_FULL_DUPLEX | |
807 | MDIO_CFG_FC_TX | | | 810 | MDIO_CFG_FC_TX | |
808 | MDIO_CFG_FC_RX | | | 811 | MDIO_CFG_FC_RX | |
809 | MDIO_CFG_TX_CLK_MODE(MDIO_CFG_TX_CLK_MODE_3COM)); | | 812 | MDIO_CFG_TX_CLK_MODE(MDIO_CFG_TX_CLK_MODE_3COM)); |
810 | #endif | | 813 | #endif |
811 | | | 814 | |
812 | /* | | 815 | /* |
813 | * TODO: QOS - RT3052 has 4 TX queues for QOS, | | 816 | * TODO: QOS - RT3052 has 4 TX queues for QOS, |
814 | * forgoing for 1 for simplicity | | 817 | * forgoing for 1 for simplicity |
815 | */ | | 818 | */ |
816 | | | 819 | |
817 | /* | | 820 | /* |
818 | * Allocate DMA accessible memory for TX/RX descriptor rings | | 821 | * Allocate DMA accessible memory for TX/RX descriptor rings |
819 | */ | | 822 | */ |
820 | | | 823 | |
821 | /* Initialize the TX queues. */ | | 824 | /* Initialize the TX queues. */ |
822 | SIMPLEQ_INIT(&sc->sc_txfreeq); | | 825 | SIMPLEQ_INIT(&sc->sc_txfreeq); |
| @@ -831,152 +834,152 @@ ralink_eth_hw_init(ralink_eth_softc_t *s | | | @@ -831,152 +834,152 @@ ralink_eth_hw_init(ralink_eth_softc_t *s |
831 | /* setup the freeq as well */ | | 834 | /* setup the freeq as well */ |
832 | txs = &sc->sc_txstate[i]; | | 835 | txs = &sc->sc_txstate[i]; |
833 | txs->txs_mbuf = NULL; | | 836 | txs->txs_mbuf = NULL; |
834 | txs->txs_idx = i; | | 837 | txs->txs_idx = i; |
835 | SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); | | 838 | SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); |
836 | } | | 839 | } |
837 | | | 840 | |
838 | /* | | 841 | /* |
839 | * Flush the TX descriptors | | 842 | * Flush the TX descriptors |
840 | * - TODO: can we just access descriptors via KSEG1 | | 843 | * - TODO: can we just access descriptors via KSEG1 |
841 | * to avoid the flush? | | 844 | * to avoid the flush? |
842 | */ | | 845 | */ |
843 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, | | 846 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, |
844 | (int)&sc->sc_txdesc - (int)sc->sc_descs, sizeof(sc->sc_txdesc), | | 847 | (int)&sc->sc_txdesc - (int)sc->sc_descs, sizeof(sc->sc_txdesc), |
845 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 848 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
846 | | | 849 | |
847 | /* Initialize the RX descriptor ring */ | | 850 | /* Initialize the RX descriptor ring */ |
848 | memset(sc->sc_rxdesc, 0, sizeof(sc->sc_rxdesc)); | | 851 | memset(sc->sc_rxdesc, 0, sizeof(sc->sc_rxdesc)); |
849 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { | | 852 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { |
850 | if (ralink_eth_add_rxbuf(sc, i)) { | | 853 | if (ralink_eth_add_rxbuf(sc, i)) { |
851 | panic("Can't allocate rx mbuf\n"); | | 854 | panic("Can't allocate rx mbuf\n"); |
852 | } | | 855 | } |
853 | } | | 856 | } |
854 | | | 857 | |
855 | /* | | 858 | /* |
856 | * Flush the RX descriptors | | 859 | * Flush the RX descriptors |
857 | * - TODO: can we just access descriptors via KSEG1 | | 860 | * - TODO: can we just access descriptors via KSEG1 |
858 | * to avoid the flush? | | 861 | * to avoid the flush? |
859 | */ | | 862 | */ |
860 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, | | 863 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, |
861 | (int)&sc->sc_rxdesc - (int)sc->sc_descs, sizeof(sc->sc_rxdesc), | | 864 | (int)&sc->sc_rxdesc - (int)sc->sc_descs, sizeof(sc->sc_rxdesc), |
862 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 865 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
863 | | | 866 | |
864 | /* Clear the PDMA state */ | | 867 | /* Clear the PDMA state */ |
865 | r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); | | 868 | r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); |
866 | r &= 0xff; | | 869 | r &= 0xff; |
867 | fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, r); | | 870 | fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, r); |
868 | (void) fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); | | 871 | (void) fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); |
869 | | | 872 | |
870 | /* Setup the PDMA VLAN ID's */ | | 873 | /* Setup the PDMA VLAN ID's */ |
871 | fe_write(sc, RA_FE_VLAN_ID_0001, 0x00010000); | | 874 | fe_write(sc, RA_FE_VLAN_ID_0001, 0x00010000); |
872 | fe_write(sc, RA_FE_VLAN_ID_0203, 0x00030002); | | 875 | fe_write(sc, RA_FE_VLAN_ID_0203, 0x00030002); |
873 | fe_write(sc, RA_FE_VLAN_ID_0405, 0x00050004); | | 876 | fe_write(sc, RA_FE_VLAN_ID_0405, 0x00050004); |
874 | fe_write(sc, RA_FE_VLAN_ID_0607, 0x00070006); | | 877 | fe_write(sc, RA_FE_VLAN_ID_0607, 0x00070006); |
875 | fe_write(sc, RA_FE_VLAN_ID_0809, 0x00090008); | | 878 | fe_write(sc, RA_FE_VLAN_ID_0809, 0x00090008); |
876 | fe_write(sc, RA_FE_VLAN_ID_1011, 0x000b000a); | | 879 | fe_write(sc, RA_FE_VLAN_ID_1011, 0x000b000a); |
877 | fe_write(sc, RA_FE_VLAN_ID_1213, 0x000d000c); | | 880 | fe_write(sc, RA_FE_VLAN_ID_1213, 0x000d000c); |
878 | fe_write(sc, RA_FE_VLAN_ID_1415, 0x000f000e); | | 881 | fe_write(sc, RA_FE_VLAN_ID_1415, 0x000f000e); |
879 | | | 882 | |
880 | /* Give the TX and TX rings to the chip. */ | | 883 | /* Give the TX and TX rings to the chip. */ |
881 | fe_write(sc, RA_FE_PDMA_TX0_PTR, | | 884 | fe_write(sc, RA_FE_PDMA_TX0_PTR, |
882 | htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_txdesc))); | | 885 | htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_txdesc))); |
883 | fe_write(sc, RA_FE_PDMA_TX0_COUNT, htole32(RALINK_ETH_NUM_TX_DESC)); | | 886 | fe_write(sc, RA_FE_PDMA_TX0_COUNT, htole32(RALINK_ETH_NUM_TX_DESC)); |
884 | fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, 0); | | 887 | fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, 0); |
885 | fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_TX0); | | 888 | fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_TX0); |
886 | | | 889 | |
887 | fe_write(sc, RA_FE_PDMA_RX0_PTR, | | 890 | fe_write(sc, RA_FE_PDMA_RX0_PTR, |
888 | htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_rxdesc))); | | 891 | htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_rxdesc))); |
889 | fe_write(sc, RA_FE_PDMA_RX0_COUNT, htole32(RALINK_ETH_NUM_RX_DESC)); | | 892 | fe_write(sc, RA_FE_PDMA_RX0_COUNT, htole32(RALINK_ETH_NUM_RX_DESC)); |
890 | fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, | | 893 | fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, |
891 | htole32(RALINK_ETH_NUM_RX_DESC - 1)); | | 894 | htole32(RALINK_ETH_NUM_RX_DESC - 1)); |
892 | fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_RX0); | | 895 | fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_RX0); |
893 | fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, | | 896 | fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, |
894 | htole32(RALINK_ETH_NUM_RX_DESC - 1)); | | 897 | htole32(RALINK_ETH_NUM_RX_DESC - 1)); |
895 | | | 898 | |
896 | /* Start PDMA */ | | 899 | /* Start PDMA */ |
897 | fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, | | 900 | fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, |
898 | FE_PDMA_GLOBAL_CFG_TX_WB_DDONE | | | 901 | FE_PDMA_GLOBAL_CFG_TX_WB_DDONE | |
899 | FE_PDMA_GLOBAL_CFG_RX_DMA_EN | | | 902 | FE_PDMA_GLOBAL_CFG_RX_DMA_EN | |
900 | FE_PDMA_GLOBAL_CFG_TX_DMA_EN | | | 903 | FE_PDMA_GLOBAL_CFG_TX_DMA_EN | |
901 | FE_PDMA_GLOBAL_CFG_BURST_SZ_4); | | 904 | FE_PDMA_GLOBAL_CFG_BURST_SZ_4); |
902 | | | 905 | |
903 | /* Setup the clock for the Frame Engine */ | | 906 | /* Setup the clock for the Frame Engine */ |
904 | fe_write(sc, RA_FE_GLOBAL_CFG, | | 907 | fe_write(sc, RA_FE_GLOBAL_CFG, |
905 | FE_GLOBAL_CFG_EXT_VLAN(0x8100) | | | 908 | FE_GLOBAL_CFG_EXT_VLAN(0x8100) | |
906 | FE_GLOBAL_CFG_US_CLK(RA_BUS_FREQ / 1000000) | | | 909 | FE_GLOBAL_CFG_US_CLK(RA_BUS_FREQ / 1000000) | |
907 | FE_GLOBAL_CFG_L2_SPACE(0x8)); | | 910 | FE_GLOBAL_CFG_L2_SPACE(0x8)); |
908 | | | 911 | |
909 | /* Turn on all interrupts */ | | 912 | /* Turn on all interrupts */ |
910 | fe_write(sc, RA_FE_INT_ENABLE, | | 913 | fe_write(sc, RA_FE_INT_ENABLE, |
911 | FE_INT_RX | FE_INT_TX3 | FE_INT_TX2 | FE_INT_TX1 | FE_INT_TX0); | | 914 | FE_INT_RX | FE_INT_TX3 | FE_INT_TX2 | FE_INT_TX1 | FE_INT_TX0); |
912 | | | 915 | |
913 | /* | | 916 | /* |
914 | * Configure GDMA forwarding | | 917 | * Configure GDMA forwarding |
915 | * - default all packets to CPU | | 918 | * - default all packets to CPU |
916 | * - Turn on auto-CRC | | 919 | * - Turn on auto-CRC |
917 | */ | | 920 | */ |
918 | #if 0 | | 921 | #if 0 |
919 | fe_write(sc, RA_FE_GDMA1_FWD_CFG, | | 922 | fe_write(sc, RA_FE_GDMA1_FWD_CFG, |
920 | (FE_GDMA_FWD_CFG_DIS_TX_CRC | FE_GDMA_FWD_CFG_DIS_TX_PAD)); | | 923 | (FE_GDMA_FWD_CFG_DIS_TX_CRC | FE_GDMA_FWD_CFG_DIS_TX_PAD)); |
921 | #endif | | 924 | #endif |
922 | fe_write(sc, RA_FE_GDMA1_FWD_CFG, | | 925 | fe_write(sc, RA_FE_GDMA1_FWD_CFG, |
923 | FE_GDMA_FWD_CFG_JUMBO_LEN(MCLBYTES/1024) | | | 926 | FE_GDMA_FWD_CFG_JUMBO_LEN(MCLBYTES/1024) | |
924 | FE_GDMA_FWD_CFG_STRIP_RX_CRC | | | 927 | FE_GDMA_FWD_CFG_STRIP_RX_CRC | |
925 | FE_GDMA_FWD_CFG_IP4_CRC_EN | | | 928 | FE_GDMA_FWD_CFG_IP4_CRC_EN | |
926 | FE_GDMA_FWD_CFG_TCP_CRC_EN | | | 929 | FE_GDMA_FWD_CFG_TCP_CRC_EN | |
927 | FE_GDMA_FWD_CFG_UDP_CRC_EN); | | 930 | FE_GDMA_FWD_CFG_UDP_CRC_EN); |
928 | | | 931 | |
929 | /* CDMA also needs CRCs turned on */ | | 932 | /* CDMA also needs CRCs turned on */ |
930 | r = fe_read(sc, RA_FE_CDMA_CSG_CFG); | | 933 | r = fe_read(sc, RA_FE_CDMA_CSG_CFG); |
931 | r |= (FE_CDMA_CSG_CFG_IP4_CRC_EN | FE_CDMA_CSG_CFG_UDP_CRC_EN | | | 934 | r |= (FE_CDMA_CSG_CFG_IP4_CRC_EN | FE_CDMA_CSG_CFG_UDP_CRC_EN | |
932 | FE_CDMA_CSG_CFG_TCP_CRC_EN); | | 935 | FE_CDMA_CSG_CFG_TCP_CRC_EN); |
933 | fe_write(sc, RA_FE_CDMA_CSG_CFG, r); | | 936 | fe_write(sc, RA_FE_CDMA_CSG_CFG, r); |
934 | | | 937 | |
935 | /* Configure Flow Control Thresholds */ | | 938 | /* Configure Flow Control Thresholds */ |
936 | #ifdef RT3883 | | 939 | #ifdef RT3883 |
937 | fe_write(sc, RA_FE_PSE_FQ_CFG, | | 940 | fe_write(sc, RA_FE_PSE_FQ_CFG, |
938 | FE_PSE_FQ_MAX_COUNT(0xff) | | | 941 | FE_PSE_FQ_MAX_COUNT(0xff) | |
939 | FE_PSE_FQ_FC_RELEASE(0x90) | | | 942 | FE_PSE_FQ_FC_RELEASE(0x90) | |
940 | FE_PSE_FQ_FC_ASSERT(0x80)); | | 943 | FE_PSE_FQ_FC_ASSERT(0x80)); |
941 | #else | | 944 | #else |
942 | fe_write(sc, RA_FE_PSE_FQ_CFG, | | 945 | fe_write(sc, RA_FE_PSE_FQ_CFG, |
943 | FE_PSE_FQ_MAX_COUNT(0x80) | | | 946 | FE_PSE_FQ_MAX_COUNT(0x80) | |
944 | FE_PSE_FQ_FC_RELEASE(0x50) | | | 947 | FE_PSE_FQ_FC_RELEASE(0x50) | |
945 | FE_PSE_FQ_FC_ASSERT(0x40)); | | 948 | FE_PSE_FQ_FC_ASSERT(0x40)); |
946 | #endif | | 949 | #endif |
947 | | | 950 | |
948 | #ifdef RALINK_ETH_DEBUG | | 951 | #ifdef RALINK_ETH_DEBUG |
949 | printf("FE_MDIO_CFG1: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG1)); | | 952 | printf("FE_MDIO_CFG1: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG1)); |
950 | printf("FE_MDIO_CFG2: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG2)); | | 953 | printf("FE_MDIO_CFG2: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG2)); |
951 | printf("FE_PDMA_TX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_TX0_PTR)); | | 954 | printf("FE_PDMA_TX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_TX0_PTR)); |
952 | printf("FE_PDMA_TX0_COUNT: %08x\n", | | 955 | printf("FE_PDMA_TX0_COUNT: %08x\n", |
953 | fe_read(sc, RA_FE_PDMA_TX0_COUNT)); | | 956 | fe_read(sc, RA_FE_PDMA_TX0_COUNT)); |
954 | printf("FE_PDMA_TX0_CPU_IDX: %08x\n", | | 957 | printf("FE_PDMA_TX0_CPU_IDX: %08x\n", |
955 | fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX)); | | 958 | fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX)); |
956 | printf("FE_PDMA_TX0_DMA_IDX: %08x\n", | | 959 | printf("FE_PDMA_TX0_DMA_IDX: %08x\n", |
957 | fe_read(sc, RA_FE_PDMA_TX0_DMA_IDX)); | | 960 | fe_read(sc, RA_FE_PDMA_TX0_DMA_IDX)); |
958 | printf("FE_PDMA_RX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_RX0_PTR)); | | 961 | printf("FE_PDMA_RX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_RX0_PTR)); |
959 | printf("FE_PDMA_RX0_COUNT: %08x\n", | | 962 | printf("FE_PDMA_RX0_COUNT: %08x\n", |
960 | fe_read(sc, RA_FE_PDMA_RX0_COUNT)); | | 963 | fe_read(sc, RA_FE_PDMA_RX0_COUNT)); |
961 | printf("FE_PDMA_RX0_CPU_IDX: %08x\n", | | 964 | printf("FE_PDMA_RX0_CPU_IDX: %08x\n", |
962 | fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX)); | | 965 | fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX)); |
963 | printf("FE_PDMA_RX0_DMA_IDX: %08x\n", | | 966 | printf("FE_PDMA_RX0_DMA_IDX: %08x\n", |
964 | fe_read(sc, RA_FE_PDMA_RX0_DMA_IDX)); | | 967 | fe_read(sc, RA_FE_PDMA_RX0_DMA_IDX)); |
965 | printf("FE_PDMA_GLOBAL_CFG: %08x\n", | | 968 | printf("FE_PDMA_GLOBAL_CFG: %08x\n", |
966 | fe_read(sc, RA_FE_PDMA_GLOBAL_CFG)); | | 969 | fe_read(sc, RA_FE_PDMA_GLOBAL_CFG)); |
967 | printf("FE_GLOBAL_CFG: %08x\n", fe_read(sc, RA_FE_GLOBAL_CFG)); | | 970 | printf("FE_GLOBAL_CFG: %08x\n", fe_read(sc, RA_FE_GLOBAL_CFG)); |
968 | printf("FE_GDMA1_FWD_CFG: %08x\n", | | 971 | printf("FE_GDMA1_FWD_CFG: %08x\n", |
969 | fe_read(sc, RA_FE_GDMA1_FWD_CFG)); | | 972 | fe_read(sc, RA_FE_GDMA1_FWD_CFG)); |
970 | printf("FE_CDMA_CSG_CFG: %08x\n", fe_read(sc, RA_FE_CDMA_CSG_CFG)); | | 973 | printf("FE_CDMA_CSG_CFG: %08x\n", fe_read(sc, RA_FE_CDMA_CSG_CFG)); |
971 | printf("FE_PSE_FQ_CFG: %08x\n", fe_read(sc, RA_FE_PSE_FQ_CFG)); | | 974 | printf("FE_PSE_FQ_CFG: %08x\n", fe_read(sc, RA_FE_PSE_FQ_CFG)); |
972 | #endif | | 975 | #endif |
973 | | | 976 | |
974 | /* Force PSE Reset to get everything finalized */ | | 977 | /* Force PSE Reset to get everything finalized */ |
975 | fe_write(sc, RA_FE_GLOBAL_RESET, FE_GLOBAL_RESET_PSE); | | 978 | fe_write(sc, RA_FE_GLOBAL_RESET, FE_GLOBAL_RESET_PSE); |
976 | fe_write(sc, RA_FE_GLOBAL_RESET, 0); | | 979 | fe_write(sc, RA_FE_GLOBAL_RESET, 0); |
977 | } | | 980 | } |
978 | | | 981 | |
979 | /* | | 982 | /* |
980 | * ralink_eth_init | | 983 | * ralink_eth_init |
981 | */ | | 984 | */ |
982 | static int | | 985 | static int |
| @@ -1055,36 +1058,36 @@ ralink_eth_add_rxbuf(ralink_eth_softc_t | | | @@ -1055,36 +1058,36 @@ ralink_eth_add_rxbuf(ralink_eth_softc_t |
1055 | m_freem(m); | | 1058 | m_freem(m); |
1056 | printf("MCLGET failed\n"); | | 1059 | printf("MCLGET failed\n"); |
1057 | sc->sc_evcnt_add_rxbuf_mcl_fail.ev_count++; | | 1060 | sc->sc_evcnt_add_rxbuf_mcl_fail.ev_count++; |
1058 | return ENOBUFS; | | 1061 | return ENOBUFS; |
1059 | } | | 1062 | } |
1060 | | | 1063 | |
1061 | m->m_data = m->m_ext.ext_buf; | | 1064 | m->m_data = m->m_ext.ext_buf; |
1062 | rxs->rxs_mbuf = m; | | 1065 | rxs->rxs_mbuf = m; |
1063 | | | 1066 | |
1064 | error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf, | | 1067 | error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf, |
1065 | m->m_ext.ext_size, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT); | | 1068 | m->m_ext.ext_size, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT); |
1066 | if (error) { | | 1069 | if (error) { |
1067 | aprint_error_dev(sc->sc_dev, "can't load rx DMA map %d, " | | 1070 | aprint_error_dev(sc->sc_dev, "can't load rx DMA map %d, " |
1068 | "error=%d\n", idx, error); | | 1071 | "error=%d\n", idx, error); |
1069 | panic(__func__); /* XXX */ | | 1072 | panic(__func__); /* XXX */ |
1070 | } | | 1073 | } |
1071 | | | 1074 | |
1072 | sc->sc_rxdesc[idx].data_ptr = MIPS_KSEG0_TO_PHYS( | | 1075 | sc->sc_rxdesc[idx].data_ptr = MIPS_KSEG0_TO_PHYS( |
1073 | rxs->rxs_dmamap->dm_segs[0].ds_addr + RALINK_ETHER_ALIGN); | | 1076 | rxs->rxs_dmamap->dm_segs[0].ds_addr + RALINK_ETHER_ALIGN); |
1074 | sc->sc_rxdesc[idx].rxd_info1 = RXD_LAST0; | | 1077 | sc->sc_rxdesc[idx].rxd_info1 = RXD_LAST0; |
1075 | | | 1078 | |
1076 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, | | 1079 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
1077 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); | | 1080 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
1078 | | | 1081 | |
1079 | return 0; | | 1082 | return 0; |
1080 | } | | 1083 | } |
1081 | | | 1084 | |
1082 | | | 1085 | |
1083 | /* | | 1086 | /* |
1084 | * ralink_eth_start | | 1087 | * ralink_eth_start |
1085 | */ | | 1088 | */ |
1086 | static void | | 1089 | static void |
1087 | ralink_eth_start(struct ifnet *ifp) | | 1090 | ralink_eth_start(struct ifnet *ifp) |
1088 | { | | 1091 | { |
1089 | RALINK_DEBUG_FUNC_ENTRY(); | | 1092 | RALINK_DEBUG_FUNC_ENTRY(); |
1090 | ralink_eth_softc_t * const sc = ifp->if_softc; | | 1093 | ralink_eth_softc_t * const sc = ifp->if_softc; |
| @@ -1108,119 +1111,119 @@ ralink_eth_start(struct ifnet *ifp) | | | @@ -1108,119 +1111,119 @@ ralink_eth_start(struct ifnet *ifp) |
1108 | * transmit descriptors. | | 1111 | * transmit descriptors. |
1109 | */ | | 1112 | */ |
1110 | while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL) { | | 1113 | while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL) { |
1111 | /* Grab a packet off the queue. */ | | 1114 | /* Grab a packet off the queue. */ |
1112 | IFQ_POLL(&ifp->if_snd, m0); | | 1115 | IFQ_POLL(&ifp->if_snd, m0); |
1113 | if (m0 == NULL) | | 1116 | if (m0 == NULL) |
1114 | break; | | 1117 | break; |
1115 | | | 1118 | |
1116 | dmamap = txs->txs_dmamap; | | 1119 | dmamap = txs->txs_dmamap; |
1117 | | | 1120 | |
1118 | if (m0->m_pkthdr.len < RALINK_MIN_BUF) { | | 1121 | if (m0->m_pkthdr.len < RALINK_MIN_BUF) { |
1119 | int padlen = 64 - m0->m_pkthdr.len; | | 1122 | int padlen = 64 - m0->m_pkthdr.len; |
1120 | m_copyback(m0, m0->m_pkthdr.len, padlen, | | 1123 | m_copyback(m0, m0->m_pkthdr.len, padlen, |
1121 | sc->ralink_zero_buf); | | 1124 | sc->ralink_zero_buf); |
1122 | /* TODO : need some checking here */ | | 1125 | /* TODO : need some checking here */ |
1123 | } | | 1126 | } |
1124 | | | 1127 | |
1125 | /* | | 1128 | /* |
1126 | * Do we need to align the buffer | | 1129 | * Do we need to align the buffer |
1127 | * or does the DMA map load fail? | | 1130 | * or does the DMA map load fail? |
1128 | */ | | 1131 | */ |
1129 | if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, | | 1132 | if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
1130 | BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { | | 1133 | BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { |
1131 | | | 1134 | |
1132 | /* Allocate a new mbuf for re-alignment */ | | 1135 | /* Allocate a new mbuf for re-alignment */ |
1133 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 1136 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1134 | if (m == NULL) { | | 1137 | if (m == NULL) { |
1135 | aprint_error_dev(sc->sc_dev, | | 1138 | aprint_error_dev(sc->sc_dev, |
1136 | "unable to allocate aligned Tx mbuf\n"); | | 1139 | "unable to allocate aligned Tx mbuf\n"); |
1137 | break; | | 1140 | break; |
1138 | } | | 1141 | } |
1139 | MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); | | 1142 | MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); |
1140 | if (m0->m_pkthdr.len > MHLEN) { | | 1143 | if (m0->m_pkthdr.len > MHLEN) { |
1141 | MCLGET(m, M_DONTWAIT); | | 1144 | MCLGET(m, M_DONTWAIT); |
1142 | if ((m->m_flags & M_EXT) == 0) { | | 1145 | if ((m->m_flags & M_EXT) == 0) { |
1143 | aprint_error_dev(sc->sc_dev, | | 1146 | aprint_error_dev(sc->sc_dev, |
1144 | "unable to allocate Tx cluster\n"); | | 1147 | "unable to allocate Tx cluster\n"); |
1145 | m_freem(m); | | 1148 | m_freem(m); |
1146 | break; | | 1149 | break; |
1147 | } | | 1150 | } |
1148 | } | | 1151 | } |
1149 | m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); | | 1152 | m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); |
1150 | m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; | | 1153 | m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; |
1151 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m, | | 1154 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m, |
1152 | BUS_DMA_WRITE|BUS_DMA_NOWAIT); | | 1155 | BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
1153 | if (error) { | | 1156 | if (error) { |
1154 | aprint_error_dev(sc->sc_dev, | | 1157 | aprint_error_dev(sc->sc_dev, |
1155 | "unable to load Tx buffer error=%d\n", | | 1158 | "unable to load Tx buffer error=%d\n", |
1156 | error); | | 1159 | error); |
1157 | m_freem(m); | | 1160 | m_freem(m); |
1158 | break; | | 1161 | break; |
1159 | } | | 1162 | } |
1160 | } | | 1163 | } |
1161 | | | 1164 | |
1162 | IFQ_DEQUEUE(&ifp->if_snd, m0); | | 1165 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
1163 | /* did we copy the buffer out already? */ | | 1166 | /* did we copy the buffer out already? */ |
1164 | if (m != NULL) { | | 1167 | if (m != NULL) { |
1165 | m_freem(m0); | | 1168 | m_freem(m0); |
1166 | m0 = m; | | 1169 | m0 = m; |
1167 | } | | 1170 | } |
1168 | | | 1171 | |
1169 | /* Sync the DMA map. */ | | 1172 | /* Sync the DMA map. */ |
1170 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, | | 1173 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
1171 | BUS_DMASYNC_PREWRITE); | | 1174 | BUS_DMASYNC_PREWRITE); |
1172 | | | 1175 | |
1173 | /* Initialize the transmit descriptor */ | | 1176 | /* Initialize the transmit descriptor */ |
1174 | sc->sc_txdesc[tx_cpu_idx].data_ptr0 = | | 1177 | sc->sc_txdesc[tx_cpu_idx].data_ptr0 = |
1175 | MIPS_KSEG0_TO_PHYS(dmamap->dm_segs[0].ds_addr); | | 1178 | MIPS_KSEG0_TO_PHYS(dmamap->dm_segs[0].ds_addr); |
1176 | sc->sc_txdesc[tx_cpu_idx].txd_info1 = | | 1179 | sc->sc_txdesc[tx_cpu_idx].txd_info1 = |
1177 | TXD_LEN0(dmamap->dm_segs[0].ds_len) | TXD_LAST0; | | 1180 | TXD_LEN0(dmamap->dm_segs[0].ds_len) | TXD_LAST0; |
1178 | sc->sc_txdesc[tx_cpu_idx].txd_info2 = | | 1181 | sc->sc_txdesc[tx_cpu_idx].txd_info2 = |
1179 | TXD_QN(3) | TXD_PN(TXD_PN_GDMA1); | | 1182 | TXD_QN(3) | TXD_PN(TXD_PN_GDMA1); |
1180 | sc->sc_txdesc[tx_cpu_idx].txd_info2 = TXD_QN(3) | | | 1183 | sc->sc_txdesc[tx_cpu_idx].txd_info2 = TXD_QN(3) | |
1181 | TXD_PN(TXD_PN_GDMA1) | TXD_VEN | | | 1184 | TXD_PN(TXD_PN_GDMA1) | TXD_VEN | |
1182 | // TXD_VIDX(pt->vlan_id) | | | 1185 | // TXD_VIDX(pt->vlan_id) | |
1183 | TXD_TCP_EN | TXD_UDP_EN | TXD_IP_EN; | | 1186 | TXD_TCP_EN | TXD_UDP_EN | TXD_IP_EN; |
1184 | | | 1187 | |
1185 | RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", | | 1188 | RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", |
1186 | tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr0, | | 1189 | tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr0, |
1187 | sc->sc_txdesc[tx_cpu_idx].data_ptr0); | | 1190 | sc->sc_txdesc[tx_cpu_idx].data_ptr0); |
1188 | RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", | | 1191 | RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", |
1189 | tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].txd_info1, | | 1192 | tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].txd_info1, |
1190 | sc->sc_txdesc[tx_cpu_idx].txd_info1); | | 1193 | sc->sc_txdesc[tx_cpu_idx].txd_info1); |
1191 | RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", | | 1194 | RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", |
1192 | tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr1, | | 1195 | tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr1, |
1193 | sc->sc_txdesc[tx_cpu_idx].data_ptr1); | | 1196 | sc->sc_txdesc[tx_cpu_idx].data_ptr1); |
1194 | RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", tx_cpu_idx, | | 1197 | RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", |
1195 | (int)&sc->sc_txdesc[tx_cpu_idx].txd_info2, | | 1198 | tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].txd_info2, |
1196 | sc->sc_txdesc[tx_cpu_idx].txd_info2); | | 1199 | sc->sc_txdesc[tx_cpu_idx].txd_info2); |
1197 | | | 1200 | |
1198 | /* sync the descriptor we're using. */ | | 1201 | /* sync the descriptor we're using. */ |
1199 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, | | 1202 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, |
1200 | (int)&sc->sc_txdesc[tx_cpu_idx] - (int)sc->sc_descs, | | 1203 | (int)&sc->sc_txdesc[tx_cpu_idx] - (int)sc->sc_descs, |
1201 | sizeof(struct ralink_tx_desc), | | 1204 | sizeof(struct ralink_tx_desc), |
1202 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1205 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1203 | | | 1206 | |
1204 | /* | | 1207 | /* |
1205 | * Store a pointer to the packet so we can free it later, | | 1208 | * Store a pointer to the packet so we can free it later, |
1206 | * and remember what txdirty will be once the packet is | | 1209 | * and remember what txdirty will be once the packet is |
1207 | * done. | | 1210 | * done. |
1208 | */ | | 1211 | */ |
1209 | txs->txs_mbuf = m0; | | 1212 | txs->txs_mbuf = m0; |
1210 | sc->sc_pending_tx++; | | 1213 | sc->sc_pending_tx++; |
1211 | if (txs->txs_idx != tx_cpu_idx) { | | 1214 | if (txs->txs_idx != tx_cpu_idx) { |
1212 | panic("txs_idx doesn't match %d != %d\n", | | 1215 | panic("txs_idx doesn't match %d != %d\n", |
1213 | txs->txs_idx, tx_cpu_idx); | | 1216 | txs->txs_idx, tx_cpu_idx); |
1214 | } | | 1217 | } |
1215 | | | 1218 | |
1216 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); | | 1219 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); |
1217 | SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); | | 1220 | SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); |
1218 | | | 1221 | |
1219 | /* Pass the packet to any BPF listeners. */ | | 1222 | /* Pass the packet to any BPF listeners. */ |
1220 | bpf_mtap(ifp, m0); | | 1223 | bpf_mtap(ifp, m0); |
1221 | | | 1224 | |
1222 | /* Set a watchdog timer in case the chip flakes out. */ | | 1225 | /* Set a watchdog timer in case the chip flakes out. */ |
1223 | ifp->if_timer = 5; | | 1226 | ifp->if_timer = 5; |
1224 | | | 1227 | |
1225 | tx_cpu_idx = (tx_cpu_idx + 1) % RALINK_ETH_NUM_TX_DESC; | | 1228 | tx_cpu_idx = (tx_cpu_idx + 1) % RALINK_ETH_NUM_TX_DESC; |
1226 | | | 1229 | |
| @@ -1243,32 +1246,32 @@ ralink_eth_start(struct ifnet *ifp) | | | @@ -1243,32 +1246,32 @@ ralink_eth_start(struct ifnet *ifp) |
1243 | */ | | 1246 | */ |
1244 | static void | | 1247 | static void |
1245 | ralink_eth_watchdog(struct ifnet *ifp) | | 1248 | ralink_eth_watchdog(struct ifnet *ifp) |
1246 | { | | 1249 | { |
1247 | RALINK_DEBUG_FUNC_ENTRY(); | | 1250 | RALINK_DEBUG_FUNC_ENTRY(); |
1248 | ralink_eth_softc_t * const sc = ifp->if_softc; | | 1251 | ralink_eth_softc_t * const sc = ifp->if_softc; |
1249 | bool doing_transmit; | | 1252 | bool doing_transmit; |
1250 | | | 1253 | |
1251 | sc->sc_evcnt_watchdog.ev_count++; | | 1254 | sc->sc_evcnt_watchdog.ev_count++; |
1252 | doing_transmit = !SIMPLEQ_EMPTY(&sc->sc_txdirtyq); | | 1255 | doing_transmit = !SIMPLEQ_EMPTY(&sc->sc_txdirtyq); |
1253 | | | 1256 | |
1254 | if (doing_transmit) { | | 1257 | if (doing_transmit) { |
1255 | RALINK_DEBUG(RALINK_DEBUG_ERROR, "%s: transmit timeout\n", | | 1258 | RALINK_DEBUG(RALINK_DEBUG_ERROR, "%s: transmit timeout\n", |
1256 | ifp->if_xname); | | 1259 | ifp->if_xname); |
1257 | ifp->if_oerrors++; | | 1260 | ifp->if_oerrors++; |
1258 | sc->sc_evcnt_wd_tx.ev_count++; | | 1261 | sc->sc_evcnt_wd_tx.ev_count++; |
1259 | } else { | | 1262 | } else { |
1260 | RALINK_DEBUG(RALINK_DEBUG_ERROR, "%s: spurious watchog timeout\n", | | 1263 | RALINK_DEBUG(RALINK_DEBUG_ERROR, |
1261 | ifp->if_xname); | | 1264 | "%s: spurious watchog timeout\n", ifp->if_xname); |
1262 | sc->sc_evcnt_wd_spurious.ev_count++; | | 1265 | sc->sc_evcnt_wd_spurious.ev_count++; |
1263 | return; | | 1266 | return; |
1264 | } | | 1267 | } |
1265 | | | 1268 | |
1266 | sc->sc_evcnt_wd_reactivate.ev_count++; | | 1269 | sc->sc_evcnt_wd_reactivate.ev_count++; |
1267 | const int s = splnet(); | | 1270 | const int s = splnet(); |
1268 | /* deactive the active partitions, retaining the active information */ | | 1271 | /* deactive the active partitions, retaining the active information */ |
1269 | ralink_eth_disable(sc); | | 1272 | ralink_eth_disable(sc); |
1270 | ralink_eth_enable(sc); | | 1273 | ralink_eth_enable(sc); |
1271 | splx(s); | | 1274 | splx(s); |
1272 | | | 1275 | |
1273 | /* Try to get more packets going. */ | | 1276 | /* Try to get more packets going. */ |
1274 | ralink_eth_start(ifp); | | 1277 | ralink_eth_start(ifp); |
| @@ -1333,31 +1336,31 @@ ralink_eth_ioctl(struct ifnet *ifp, u_lo | | | @@ -1333,31 +1336,31 @@ ralink_eth_ioctl(struct ifnet *ifp, u_lo |
1333 | return error; | | 1336 | return error; |
1334 | } | | 1337 | } |
1335 | | | 1338 | |
1336 | /* | | 1339 | /* |
1337 | * ralink_eth_intr | | 1340 | * ralink_eth_intr |
1338 | * | | 1341 | * |
1339 | */ | | 1342 | */ |
1340 | static int | | 1343 | static int |
1341 | ralink_eth_intr(void *arg) | | 1344 | ralink_eth_intr(void *arg) |
1342 | { | | 1345 | { |
1343 | RALINK_DEBUG_FUNC_ENTRY(); | | 1346 | RALINK_DEBUG_FUNC_ENTRY(); |
1344 | ralink_eth_softc_t * const sc = arg; | | 1347 | ralink_eth_softc_t * const sc = arg; |
1345 | | | 1348 | |
1346 | for (u_int n=0;; n = 1) { | | 1349 | for (u_int n = 0;; n = 1) { |
1347 | u_int32_t status = fe_read(sc, RA_FE_INT_STATUS); | | 1350 | u_int32_t status = fe_read(sc, RA_FE_INT_STATUS); |
1348 | fe_write(sc, RA_FE_INT_STATUS, ~0); | | 1351 | fe_write(sc, RA_FE_INT_STATUS, ~0); |
1349 | RALINK_DEBUG(RALINK_DEBUG_REG,"%s() status: 0x%08x\n", | | 1352 | RALINK_DEBUG(RALINK_DEBUG_REG,"%s() status: 0x%08x\n", |
1350 | __func__, status); | | 1353 | __func__, status); |
1351 | | | 1354 | |
1352 | if ((status & (FE_INT_RX | FE_INT_TX0)) == 0) { | | 1355 | if ((status & (FE_INT_RX | FE_INT_TX0)) == 0) { |
1353 | if (n == 0) | | 1356 | if (n == 0) |
1354 | sc->sc_evcnt_spurious_intr.ev_count++; | | 1357 | sc->sc_evcnt_spurious_intr.ev_count++; |
1355 | return (n != 0); | | 1358 | return (n != 0); |
1356 | } | | 1359 | } |
1357 | | | 1360 | |
1358 | if (status & FE_INT_RX) | | 1361 | if (status & FE_INT_RX) |
1359 | ralink_eth_rxintr(sc); | | 1362 | ralink_eth_rxintr(sc); |
1360 | | | 1363 | |
1361 | if (status & FE_INT_TX0) | | 1364 | if (status & FE_INT_TX0) |
1362 | ralink_eth_txintr(sc); | | 1365 | ralink_eth_txintr(sc); |
1363 | } | | 1366 | } |
| @@ -1381,108 +1384,108 @@ ralink_eth_rxintr(ralink_eth_softc_t *sc | | | @@ -1381,108 +1384,108 @@ ralink_eth_rxintr(ralink_eth_softc_t *sc |
1381 | int len; | | 1384 | int len; |
1382 | int rx_cpu_idx; | | 1385 | int rx_cpu_idx; |
1383 | | | 1386 | |
1384 | KASSERT(curcpu()->ci_cpl >= IPL_NET); | | 1387 | KASSERT(curcpu()->ci_cpl >= IPL_NET); |
1385 | sc->sc_evcnt_rxintr.ev_count++; | | 1388 | sc->sc_evcnt_rxintr.ev_count++; |
1386 | rx_cpu_idx = fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX); | | 1389 | rx_cpu_idx = fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX); |
1387 | | | 1390 | |
1388 | for (;;) { | | 1391 | for (;;) { |
1389 | rx_cpu_idx = (rx_cpu_idx + 1) % RALINK_ETH_NUM_RX_DESC; | | 1392 | rx_cpu_idx = (rx_cpu_idx + 1) % RALINK_ETH_NUM_RX_DESC; |
1390 | | | 1393 | |
1391 | rxs = &sc->sc_rxstate[rx_cpu_idx]; | | 1394 | rxs = &sc->sc_rxstate[rx_cpu_idx]; |
1392 | | | 1395 | |
1393 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, | | 1396 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, |
1394 | (int)&sc->sc_rxdesc[rx_cpu_idx] - (int)sc->sc_descs, | | 1397 | (int)&sc->sc_rxdesc[rx_cpu_idx] - (int)sc->sc_descs, |
1395 | sizeof(struct ralink_rx_desc), | | 1398 | sizeof(struct ralink_rx_desc), |
1396 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 1399 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1397 | | | 1400 | |
1398 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", | | 1401 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", |
1399 | rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].data_ptr, | | 1402 | rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].data_ptr, |
1400 | sc->sc_rxdesc[rx_cpu_idx].data_ptr); | | 1403 | sc->sc_rxdesc[rx_cpu_idx].data_ptr); |
1401 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", | | 1404 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", |
1402 | rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info1, | | 1405 | rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info1, |
1403 | sc->sc_rxdesc[rx_cpu_idx].rxd_info1); | | 1406 | sc->sc_rxdesc[rx_cpu_idx].rxd_info1); |
1404 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", rx_cpu_idx, | | | |
1405 | (int)&sc->sc_rxdesc[rx_cpu_idx].unused, | | | |
1406 | sc->sc_rxdesc[rx_cpu_idx].unused); | | | |
1407 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", | | 1407 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", |
1408 | rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info2, | | 1408 | rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].unused, |
1409 | sc->sc_rxdesc[rx_cpu_idx].rxd_info2); | | 1409 | sc->sc_rxdesc[rx_cpu_idx].unused); |
| | | 1410 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", |
| | | 1411 | rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info2, |
| | | 1412 | sc->sc_rxdesc[rx_cpu_idx].rxd_info2); |
1410 | | | 1413 | |
1411 | if (!(sc->sc_rxdesc[rx_cpu_idx].rxd_info1 & RXD_DDONE)) | | 1414 | if (!(sc->sc_rxdesc[rx_cpu_idx].rxd_info1 & RXD_DDONE)) |
1412 | break; | | 1415 | break; |
1413 | | | 1416 | |
1414 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, | | 1417 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
1415 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); | | 1418 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
1416 | | | 1419 | |
1417 | /* | | 1420 | /* |
1418 | * No errors; receive the packet. | | 1421 | * No errors; receive the packet. |
1419 | * Note the chip includes the CRC with every packet. | | 1422 | * Note the chip includes the CRC with every packet. |
1420 | */ | | 1423 | */ |
1421 | len = RXD_LEN0(sc->sc_rxdesc[rx_cpu_idx].rxd_info1); | | 1424 | len = RXD_LEN0(sc->sc_rxdesc[rx_cpu_idx].rxd_info1); |
1422 | | | 1425 | |
1423 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) packet rx %d bytes\n", | | 1426 | RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) packet rx %d bytes\n", |
1424 | rx_cpu_idx, len); | | 1427 | rx_cpu_idx, len); |
1425 | | | 1428 | |
1426 | /* | | 1429 | /* |
1427 | * Allocate a new mbuf cluster. If that fails, we are | | 1430 | * Allocate a new mbuf cluster. If that fails, we are |
1428 | * out of memory, and must drop the packet and recycle | | 1431 | * out of memory, and must drop the packet and recycle |
1429 | * the buffer that's already attached to this descriptor. | | 1432 | * the buffer that's already attached to this descriptor. |
1430 | */ | | 1433 | */ |
1431 | m = rxs->rxs_mbuf; | | 1434 | m = rxs->rxs_mbuf; |
1432 | if (ralink_eth_add_rxbuf(sc, rx_cpu_idx) != 0) | | 1435 | if (ralink_eth_add_rxbuf(sc, rx_cpu_idx) != 0) |
1433 | break; | | 1436 | break; |
1434 | m->m_data += RALINK_ETHER_ALIGN; | | 1437 | m->m_data += RALINK_ETHER_ALIGN; |
1435 | m->m_pkthdr.len = m->m_len = len; | | 1438 | m->m_pkthdr.len = m->m_len = len; |
1436 | | | 1439 | |
1437 | #ifdef RALINK_ETH_DEBUG | | 1440 | #ifdef RALINK_ETH_DEBUG |
1438 | { | | 1441 | { |
1439 | struct ether_header *eh = mtod(m, struct ether_header *); | | 1442 | struct ether_header *eh = mtod(m, struct ether_header *); |
1440 | printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost)); | | 1443 | printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost)); |
1441 | printf("rx: eth_src: %s type: 0x%04x \n", | | 1444 | printf("rx: eth_src: %s type: 0x%04x \n", |
1442 | ether_sprintf(eh->ether_shost), ntohs(eh->ether_type)); | | 1445 | ether_sprintf(eh->ether_shost), ntohs(eh->ether_type)); |
1443 | printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014)); | | 1446 | printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014)); |
1444 | printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098)); | | 1447 | printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098)); |
1445 | | | 1448 | |
1446 | unsigned char * s = mtod(m, unsigned char *); | | 1449 | unsigned char * s = mtod(m, unsigned char *); |
1447 | for (int j = 0; j < 32; j++) | | 1450 | for (int j = 0; j < 32; j++) |
1448 | printf("%02x%c", *(s + j), | | 1451 | printf("%02x%c", *(s + j), |
1449 | (j == 15 || j == 31) ? '\n' : ' '); | | 1452 | (j == 15 || j == 31) ? '\n' : ' '); |
1450 | } | | 1453 | } |
1451 | #endif | | 1454 | #endif |
1452 | | | 1455 | |
1453 | /* | | 1456 | /* |
1454 | * claim the buffer here since we can't do it at | | 1457 | * claim the buffer here since we can't do it at |
1455 | * allocation time due to the SW partitions | | 1458 | * allocation time due to the SW partitions |
1456 | */ | | 1459 | */ |
1457 | MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); | | 1460 | MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); |
1458 | | | 1461 | |
1459 | /* push it up the inteface */ | | 1462 | /* push it up the inteface */ |
1460 | ifp->if_ipackets++; | | 1463 | ifp->if_ipackets++; |
1461 | m_set_rcvif(m, ifp); | | 1464 | m_set_rcvif(m, ifp); |
1462 | | | 1465 | |
1463 | #ifdef RALINK_ETH_DEBUG | | 1466 | #ifdef RALINK_ETH_DEBUG |
1464 | { | | 1467 | { |
1465 | struct ether_header *eh = mtod(m, struct ether_header *); | | 1468 | struct ether_header *eh = mtod(m, struct ether_header *); |
1466 | printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost)); | | 1469 | printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost)); |
1467 | printf("rx: eth_src: %s type: 0x%04x\n", | | 1470 | printf("rx: eth_src: %s type: 0x%04x\n", |
1468 | ether_sprintf(eh->ether_shost), ntohs(eh->ether_type)); | | 1471 | ether_sprintf(eh->ether_shost), ntohs(eh->ether_type)); |
1469 | printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014)); | | 1472 | printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014)); |
1470 | printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098)); | | 1473 | printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098)); |
1471 | | | 1474 | |
1472 | unsigned char * s = mtod(m, unsigned char *); | | 1475 | unsigned char * s = mtod(m, unsigned char *); |
1473 | for (int j = 0; j < 32; j++) | | 1476 | for (int j = 0; j < 32; j++) |
1474 | printf("%02x%c", *(s + j), | | 1477 | printf("%02x%c", *(s + j), |
1475 | (j == 15 || j == 31) ? '\n' : ' '); | | 1478 | (j == 15 || j == 31) ? '\n' : ' '); |
1476 | } | | 1479 | } |
1477 | #endif | | 1480 | #endif |
1478 | | | 1481 | |
1479 | /* | | 1482 | /* |
1480 | * XXX: M_CSUM_TCPv4 and M_CSUM_UDPv4 do not currently work when | | 1483 | * XXX: M_CSUM_TCPv4 and M_CSUM_UDPv4 do not currently work when |
1481 | * using PF's ROUTETO option for load balancing. | | 1484 | * using PF's ROUTETO option for load balancing. |
1482 | */ | | 1485 | */ |
1483 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; | | 1486 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; |
1484 | | | 1487 | |
1485 | /* | | 1488 | /* |
1486 | * Pass this up to any BPF listeners, but only | | 1489 | * Pass this up to any BPF listeners, but only |
1487 | * pass it up the stack if its for us. | | 1490 | * pass it up the stack if its for us. |
1488 | */ | | 1491 | */ |
| @@ -1504,53 +1507,54 @@ ralink_eth_txintr(ralink_eth_softc_t *sc | | | @@ -1504,53 +1507,54 @@ ralink_eth_txintr(ralink_eth_softc_t *sc |
1504 | { | | 1507 | { |
1505 | RALINK_DEBUG_FUNC_ENTRY(); | | 1508 | RALINK_DEBUG_FUNC_ENTRY(); |
1506 | struct ralink_eth_txstate *txs; | | 1509 | struct ralink_eth_txstate *txs; |
1507 | | | 1510 | |
1508 | KASSERT(curcpu()->ci_cpl >= IPL_NET); | | 1511 | KASSERT(curcpu()->ci_cpl >= IPL_NET); |
1509 | sc->sc_evcnt_txintr.ev_count++; | | 1512 | sc->sc_evcnt_txintr.ev_count++; |
1510 | | | 1513 | |
1511 | /* | | 1514 | /* |
1512 | * Go through our Tx list and free mbufs for those | | 1515 | * Go through our Tx list and free mbufs for those |
1513 | * frames that have been transmitted. | | 1516 | * frames that have been transmitted. |
1514 | */ | | 1517 | */ |
1515 | while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { | | 1518 | while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { |
1516 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, | | 1519 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, |
1517 | (int)&sc->sc_txdesc[txs->txs_idx] - (int)sc->sc_descs, | | 1520 | (int)&sc->sc_txdesc[txs->txs_idx] - (int)sc->sc_descs, |
1518 | sizeof(struct ralink_tx_desc), | | 1521 | sizeof(struct ralink_tx_desc), |
1519 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 1522 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1520 | | | 1523 | |
1521 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", txs->txs_idx, | | 1524 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", |
1522 | (int)&sc->sc_txdesc[txs->txs_idx].data_ptr0, | | 1525 | txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].data_ptr0, |
1523 | sc->sc_txdesc[txs->txs_idx].data_ptr0); | | 1526 | sc->sc_txdesc[txs->txs_idx].data_ptr0); |
1524 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", txs->txs_idx, | | 1527 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", |
1525 | (int)&sc->sc_txdesc[txs->txs_idx].txd_info1, | | 1528 | txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].txd_info1, |
1526 | sc->sc_txdesc[txs->txs_idx].txd_info1); | | 1529 | sc->sc_txdesc[txs->txs_idx].txd_info1); |
1527 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", txs->txs_idx, | | 1530 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", |
1528 | (int)&sc->sc_txdesc[txs->txs_idx].data_ptr1, | | 1531 | txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].data_ptr1, |
1529 | sc->sc_txdesc[txs->txs_idx].data_ptr1); | | 1532 | sc->sc_txdesc[txs->txs_idx].data_ptr1); |
1530 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", txs->txs_idx, | | 1533 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", |
1531 | (int)&sc->sc_txdesc[txs->txs_idx].txd_info2, | | 1534 | txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].txd_info2, |
1532 | sc->sc_txdesc[txs->txs_idx].txd_info2); | | 1535 | sc->sc_txdesc[txs->txs_idx].txd_info2); |
1533 | | | 1536 | |
1534 | /* we're finished if the current tx isn't done */ | | 1537 | /* we're finished if the current tx isn't done */ |
1535 | if (!(sc->sc_txdesc[txs->txs_idx].txd_info1 & TXD_DDONE)) | | 1538 | if (!(sc->sc_txdesc[txs->txs_idx].txd_info1 & TXD_DDONE)) |
1536 | break; | | 1539 | break; |
1537 | | | 1540 | |
1538 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) transmitted\n", txs->txs_idx); | | 1541 | RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) transmitted\n", |
| | | 1542 | txs->txs_idx); |
1539 | | | 1543 | |
1540 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); | | 1544 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); |
1541 | | | 1545 | |
1542 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0, | | 1546 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0, |
1543 | txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); | | 1547 | txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
1544 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 1548 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
1545 | m_freem(txs->txs_mbuf); | | 1549 | m_freem(txs->txs_mbuf); |
1546 | txs->txs_mbuf = NULL; | | 1550 | txs->txs_mbuf = NULL; |
1547 | | | 1551 | |
1548 | SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); | | 1552 | SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); |
1549 | | | 1553 | |
1550 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1554 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1551 | ifp->if_flags &= ~IFF_OACTIVE; | | 1555 | ifp->if_flags &= ~IFF_OACTIVE; |
1552 | ifp->if_opackets++; | | 1556 | ifp->if_opackets++; |
1553 | sc->sc_evcnt_output.ev_count++; | | 1557 | sc->sc_evcnt_output.ev_count++; |
1554 | | | 1558 | |
1555 | if (--sc->sc_pending_tx == 0) | | 1559 | if (--sc->sc_pending_tx == 0) |
1556 | ifp->if_timer = 0; | | 1560 | ifp->if_timer = 0; |
| @@ -1632,98 +1636,98 @@ ralink_eth_mii_read(device_t self, int p | | | @@ -1632,98 +1636,98 @@ ralink_eth_mii_read(device_t self, int p |
1632 | for (;;) { | | 1636 | for (;;) { |
1633 | /* rd_rdy: read operation is complete */ | | 1637 | /* rd_rdy: read operation is complete */ |
1634 | #if defined(RT3050) || defined(RT3052) | | 1638 | #if defined(RT3050) || defined(RT3052) |
1635 | if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0) | | 1639 | if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0) |
1636 | break; | | 1640 | break; |
1637 | #else | | 1641 | #else |
1638 | if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) | | 1642 | if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) |
1639 | break; | | 1643 | break; |
1640 | #endif | | 1644 | #endif |
1641 | } | | 1645 | } |
1642 | | | 1646 | |
1643 | #if defined(RT3050) || defined(RT3052) | | 1647 | #if defined(RT3050) || defined(RT3052) |
1644 | sw_write(sc, RA_ETH_SW_PCTL0, | | 1648 | sw_write(sc, RA_ETH_SW_PCTL0, |
1645 | PCTL0_RD_CMD | PCTL0_REG(phy_reg) | PCTL0_ADDR(phy_addr)); | | 1649 | PCTL0_RD_CMD | PCTL0_REG(phy_reg) | PCTL0_ADDR(phy_addr)); |
1646 | #else | | 1650 | #else |
1647 | fe_write(sc, RA_FE_MDIO_ACCESS, | | 1651 | fe_write(sc, RA_FE_MDIO_ACCESS, |
1648 | MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg)); | | 1652 | MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg)); |
1649 | fe_write(sc, RA_FE_MDIO_ACCESS, | | 1653 | fe_write(sc, RA_FE_MDIO_ACCESS, |
1650 | MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg) | | | 1654 | MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg) | |
1651 | MDIO_ACCESS_TRG); | | 1655 | MDIO_ACCESS_TRG); |
1652 | #endif | | 1656 | #endif |
1653 | | | 1657 | |
1654 | /* | | 1658 | /* |
1655 | * make sure read operation is complete | | 1659 | * make sure read operation is complete |
1656 | * TODO: timeout (linux uses jiffies to measure 5 seconds) | | 1660 | * TODO: timeout (linux uses jiffies to measure 5 seconds) |
1657 | */ | | 1661 | */ |
1658 | for (;;) { | | 1662 | for (;;) { |
1659 | #if defined(RT3050) || defined(RT3052) | | 1663 | #if defined(RT3050) || defined(RT3052) |
1660 | if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) != 0) { | | 1664 | if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) != 0) { |
1661 | int data = PCTL1_RD_VAL( | | 1665 | int data = PCTL1_RD_VAL( |
1662 | sw_read(sc, RA_ETH_SW_PCTL1)); | | 1666 | sw_read(sc, RA_ETH_SW_PCTL1)); |
1663 | ralink_eth_mdio_enable(sc, false); | | 1667 | ralink_eth_mdio_enable(sc, false); |
1664 | return data; | | 1668 | return data; |
1665 | } | | 1669 | } |
1666 | #else | | 1670 | #else |
1667 | if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) { | | 1671 | if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) { |
1668 | int data = MDIO_ACCESS_DATA( | | 1672 | int data = MDIO_ACCESS_DATA( |
1669 | fe_read(sc, RA_FE_MDIO_ACCESS)); | | 1673 | fe_read(sc, RA_FE_MDIO_ACCESS)); |
1670 | ralink_eth_mdio_enable(sc, false); | | 1674 | ralink_eth_mdio_enable(sc, false); |
1671 | return data; | | 1675 | return data; |
1672 | } | | 1676 | } |
1673 | #endif | | 1677 | #endif |
1674 | } | | 1678 | } |
1675 | } | | 1679 | } |
1676 | | | 1680 | |
1677 | /* | | 1681 | /* |
1678 | * ralink_eth_mii_write | | 1682 | * ralink_eth_mii_write |
1679 | */ | | 1683 | */ |
1680 | static void | | 1684 | static void |
1681 | ralink_eth_mii_write(device_t self, int phy_addr, int phy_reg, int val) | | 1685 | ralink_eth_mii_write(device_t self, int phy_addr, int phy_reg, int val) |
1682 | { | | 1686 | { |
1683 | ralink_eth_softc_t *sc = device_private(self); | | 1687 | ralink_eth_softc_t *sc = device_private(self); |
1684 | KASSERT(sc != NULL); | | 1688 | KASSERT(sc != NULL); |
1685 | #if 0 | | 1689 | #if 0 |
1686 | printf("%s() phy_addr: %d phy_reg: %d val: 0x%04x\n", | | 1690 | printf("%s() phy_addr: %d phy_reg: %d val: 0x%04x\n", |
1687 | __func__, phy_addr, phy_reg, val); | | 1691 | __func__, phy_addr, phy_reg, val); |
1688 | #endif | | 1692 | #endif |
1689 | ralink_eth_mdio_enable(sc, true); | | 1693 | ralink_eth_mdio_enable(sc, true); |
1690 | | | 1694 | |
1691 | /* | | 1695 | /* |
1692 | * make sure previous write operation is complete | | 1696 | * make sure previous write operation is complete |
1693 | * TODO: timeout (linux uses jiffies to measure 5 seconds) | | 1697 | * TODO: timeout (linux uses jiffies to measure 5 seconds) |
1694 | */ | | 1698 | */ |
1695 | for (;;) { | | 1699 | for (;;) { |
1696 | #if defined(RT3050) || defined(RT3052) | | 1700 | #if defined(RT3050) || defined(RT3052) |
1697 | if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0) | | 1701 | if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0) |
1698 | break; | | 1702 | break; |
1699 | #else | | 1703 | #else |
1700 | if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) | | 1704 | if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) |
1701 | break; | | 1705 | break; |
1702 | #endif | | 1706 | #endif |
1703 | } | | 1707 | } |
1704 | | | 1708 | |
1705 | #if defined(RT3050) || defined(RT3052) | | 1709 | #if defined(RT3050) || defined(RT3052) |
1706 | sw_write(sc, RA_ETH_SW_PCTL0, | | 1710 | sw_write(sc, RA_ETH_SW_PCTL0, |
1707 | PCTL0_WR_CMD | PCTL0_WR_VAL(val) | PCTL0_REG(phy_reg) | | | 1711 | PCTL0_WR_CMD | PCTL0_WR_VAL(val) | PCTL0_REG(phy_reg) | |
1708 | PCTL0_ADDR(phy_addr)); | | 1712 | PCTL0_ADDR(phy_addr)); |
1709 | #else | | 1713 | #else |
1710 | fe_write(sc, RA_FE_MDIO_ACCESS, | | 1714 | fe_write(sc, RA_FE_MDIO_ACCESS, |
1711 | MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) | | | 1715 | MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) | |
1712 | MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val)); | | 1716 | MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val)); |
1713 | fe_write(sc, RA_FE_MDIO_ACCESS, | | 1717 | fe_write(sc, RA_FE_MDIO_ACCESS, |
1714 | MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) | | | 1718 | MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) | |
1715 | MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val) | | | 1719 | MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val) | |
1716 | MDIO_ACCESS_TRG); | | 1720 | MDIO_ACCESS_TRG); |
1717 | #endif | | 1721 | #endif |
1718 | | | 1722 | |
1719 | | | 1723 | |
1720 | /* make sure write operation is complete */ | | 1724 | /* make sure write operation is complete */ |
1721 | for (;;) { | | 1725 | for (;;) { |
1722 | #if defined(RT3050) || defined(RT3052) | | 1726 | #if defined(RT3050) || defined(RT3052) |
1723 | if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_WR_DONE) != 0) { | | 1727 | if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_WR_DONE) != 0) { |
1724 | ralink_eth_mdio_enable(sc, false); | | 1728 | ralink_eth_mdio_enable(sc, false); |
1725 | return; | | 1729 | return; |
1726 | } | | 1730 | } |
1727 | #else | | 1731 | #else |
1728 | if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0){ | | 1732 | if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0){ |
1729 | ralink_eth_mdio_enable(sc, false); | | 1733 | ralink_eth_mdio_enable(sc, false); |