| @@ -1,1057 +1,1056 @@ | | | @@ -1,1057 +1,1056 @@ |
1 | /* $NetBSD: ralink_eth.c,v 1.2 2011/07/28 15:38:49 matt Exp $ */ | | 1 | /* $NetBSD: ralink_eth.c,v 1.3 2011/08/01 23:01:40 matt Exp $ */ |
2 | /*- | | 2 | /*- |
3 | * Copyright (c) 2011 CradlePoint Technology, Inc. | | 3 | * Copyright (c) 2011 CradlePoint Technology, Inc. |
4 | * All rights reserved. | | 4 | * All rights reserved. |
5 | * | | 5 | * |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY CRADLEPOINT TECHNOLOGY, INC. AND CONTRIBUTORS | | 16 | * THIS SOFTWARE IS PROVIDED BY CRADLEPOINT TECHNOLOGY, INC. AND CONTRIBUTORS |
17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS | | 19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS |
20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. | | 26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | /* ralink_eth.c -- Ralink Ethernet Driver */ | | 29 | /* ralink_eth.c -- Ralink Ethernet Driver */ |
30 | | | 30 | |
31 | #include <sys/cdefs.h> | | 31 | #include <sys/cdefs.h> |
32 | __KERNEL_RCSID(0, "$NetBSD: ralink_eth.c,v 1.2 2011/07/28 15:38:49 matt Exp $"); | | 32 | __KERNEL_RCSID(0, "$NetBSD: ralink_eth.c,v 1.3 2011/08/01 23:01:40 matt Exp $"); |
33 | | | 33 | |
34 | #include <sys/param.h> | | 34 | #include <sys/param.h> |
| | | 35 | #include <sys/bus.h> |
35 | #include <sys/callout.h> | | 36 | #include <sys/callout.h> |
36 | #include <sys/device.h> | | 37 | #include <sys/device.h> |
37 | #include <sys/endian.h> | | 38 | #include <sys/endian.h> |
38 | #include <sys/errno.h> | | 39 | #include <sys/errno.h> |
39 | #include <sys/ioctl.h> | | 40 | #include <sys/ioctl.h> |
| | | 41 | #include <sys/intr.h> |
40 | #include <sys/kernel.h> | | 42 | #include <sys/kernel.h> |
41 | #include <sys/malloc.h> | | 43 | #include <sys/malloc.h> |
42 | #include <sys/mbuf.h> | | 44 | #include <sys/mbuf.h> |
43 | #include <sys/socket.h> | | 45 | #include <sys/socket.h> |
44 | #include <sys/systm.h> | | 46 | #include <sys/systm.h> |
45 | | | 47 | |
46 | #include <uvm/uvm_extern.h> | | 48 | #include <uvm/uvm_extern.h> |
47 | | | 49 | |
48 | #include <net/if.h> | | 50 | #include <net/if.h> |
49 | #include <net/if_dl.h> | | 51 | #include <net/if_dl.h> |
50 | #include <net/if_media.h> | | 52 | #include <net/if_media.h> |
51 | #include <net/if_ether.h> | | 53 | #include <net/if_ether.h> |
52 | #include <net/if_vlanvar.h> | | 54 | #include <net/if_vlanvar.h> |
53 | | | 55 | |
54 | #include <net/bpf.h> | | 56 | #include <net/bpf.h> |
55 | | | 57 | |
56 | #include <machine/bus.h> | | | |
57 | #include <machine/intr.h> | | | |
58 | | | | |
59 | #include <dev/mii/mii.h> | | 58 | #include <dev/mii/mii.h> |
60 | #include <dev/mii/miivar.h> | | 59 | #include <dev/mii/miivar.h> |
61 | #include <dev/mii/mii_bitbang.h> | | 60 | #include <dev/mii/mii_bitbang.h> |
62 | | | 61 | |
63 | #include <mips/ralink/ralink_var.h> | | 62 | #include <mips/ralink/ralink_var.h> |
64 | #include <mips/ralink/ralink_reg.h> | | 63 | #include <mips/ralink/ralink_reg.h> |
65 | #if 0 | | 64 | #if 0 |
66 | #define CPDEBUG /* XXX TMP DEBUG FIXME */ | | 65 | #define CPDEBUG /* XXX TMP DEBUG FIXME */ |
67 | #define RALINK_ETH_DEBUG /* XXX TMP DEBUG FIXME */ | | 66 | #define RALINK_ETH_DEBUG /* XXX TMP DEBUG FIXME */ |
68 | #define ENABLE_RALINK_DEBUG_ERROR 1 | | 67 | #define ENABLE_RALINK_DEBUG_ERROR 1 |
69 | #define ENABLE_RALINK_DEBUG_MISC 1 | | 68 | #define ENABLE_RALINK_DEBUG_MISC 1 |
70 | #define ENABLE_RALINK_DEBUG_INFO 1 | | 69 | #define ENABLE_RALINK_DEBUG_INFO 1 |
71 | #define ENABLE_RALINK_DEBUG_FORCE 1 | | 70 | #define ENABLE_RALINK_DEBUG_FORCE 1 |
72 | #define ENABLE_RALINK_DEBUG_REG 1 | | 71 | #define ENABLE_RALINK_DEBUG_REG 1 |
73 | #endif | | 72 | #endif |
74 | #include <mips/ralink/ralink_debug.h> | | 73 | #include <mips/ralink/ralink_debug.h> |
75 | | | 74 | |
76 | | | 75 | |
77 | /* PDMA RX Descriptor Format */ | | 76 | /* PDMA RX Descriptor Format */ |
78 | struct ralink_rx_desc { | | 77 | struct ralink_rx_desc { |
79 | uint32_t data_ptr; | | 78 | uint32_t data_ptr; |
80 | uint32_t rxd_info1; | | 79 | uint32_t rxd_info1; |
81 | #define RXD_LEN1(x) (((x) >> 0) & 0x3fff) | | 80 | #define RXD_LEN1(x) (((x) >> 0) & 0x3fff) |
82 | #define RXD_LAST1 (1 << 14) | | 81 | #define RXD_LAST1 (1 << 14) |
83 | #define RXD_LEN0(x) (((x) >> 16) & 0x3fff) | | 82 | #define RXD_LEN0(x) (((x) >> 16) & 0x3fff) |
84 | #define RXD_LAST0 (1 << 30) | | 83 | #define RXD_LAST0 (1 << 30) |
85 | #define RXD_DDONE (1 << 31) | | 84 | #define RXD_DDONE (1 << 31) |
86 | uint32_t unused; | | 85 | uint32_t unused; |
87 | uint32_t rxd_info2; | | 86 | uint32_t rxd_info2; |
88 | #define RXD_FOE(x) (((x) >> 0) & 0x3fff) | | 87 | #define RXD_FOE(x) (((x) >> 0) & 0x3fff) |
89 | #define RXD_FVLD (1 << 14) | | 88 | #define RXD_FVLD (1 << 14) |
90 | #define RXD_INFO(x) (((x) >> 16) & 0xff) | | 89 | #define RXD_INFO(x) (((x) >> 16) & 0xff) |
91 | #define RXD_PORT(x) (((x) >> 24) & 0x7) | | 90 | #define RXD_PORT(x) (((x) >> 24) & 0x7) |
92 | #define RXD_INFO_CPU (1 << 27) | | 91 | #define RXD_INFO_CPU (1 << 27) |
93 | #define RXD_L4_FAIL (1 << 28) | | 92 | #define RXD_L4_FAIL (1 << 28) |
94 | #define RXD_IP_FAIL (1 << 29) | | 93 | #define RXD_IP_FAIL (1 << 29) |
95 | #define RXD_L4_VLD (1 << 30) | | 94 | #define RXD_L4_VLD (1 << 30) |
96 | #define RXD_IP_VLD (1 << 31) | | 95 | #define RXD_IP_VLD (1 << 31) |
97 | }; | | 96 | }; |
98 | | | 97 | |
99 | /* PDMA RX Descriptor Format */ | | 98 | /* PDMA RX Descriptor Format */ |
100 | struct ralink_tx_desc { | | 99 | struct ralink_tx_desc { |
101 | uint32_t data_ptr0; | | 100 | uint32_t data_ptr0; |
102 | uint32_t txd_info1; | | 101 | uint32_t txd_info1; |
103 | #define TXD_LEN1(x) (((x) & 0x3fff) << 0) | | 102 | #define TXD_LEN1(x) (((x) & 0x3fff) << 0) |
104 | #define TXD_LAST1 (1 << 14) | | 103 | #define TXD_LAST1 (1 << 14) |
105 | #define TXD_BURST (1 << 15) | | 104 | #define TXD_BURST (1 << 15) |
106 | #define TXD_LEN0(x) (((x) & 0x3fff) << 16) | | 105 | #define TXD_LEN0(x) (((x) & 0x3fff) << 16) |
107 | #define TXD_LAST0 (1 << 30) | | 106 | #define TXD_LAST0 (1 << 30) |
108 | #define TXD_DDONE (1 << 31) | | 107 | #define TXD_DDONE (1 << 31) |
109 | uint32_t data_ptr1; | | 108 | uint32_t data_ptr1; |
110 | uint32_t txd_info2; | | 109 | uint32_t txd_info2; |
111 | #define TXD_VIDX(x) (((x) & 0xf) << 0) | | 110 | #define TXD_VIDX(x) (((x) & 0xf) << 0) |
112 | #define TXD_VPRI(x) (((x) & 0x7) << 4) | | 111 | #define TXD_VPRI(x) (((x) & 0x7) << 4) |
113 | #define TXD_VEN (1 << 7) | | 112 | #define TXD_VEN (1 << 7) |
114 | #define TXD_SIDX(x) (((x) & 0xf) << 8) | | 113 | #define TXD_SIDX(x) (((x) & 0xf) << 8) |
115 | #define TXD_SEN(x) (1 << 13) | | 114 | #define TXD_SEN(x) (1 << 13) |
116 | #define TXD_QN(x) (((x) & 0x7) << 16) | | 115 | #define TXD_QN(x) (((x) & 0x7) << 16) |
117 | #define TXD_PN(x) (((x) & 0x7) << 24) | | 116 | #define TXD_PN(x) (((x) & 0x7) << 24) |
118 | #define TXD_PN_CPU 0 | | 117 | #define TXD_PN_CPU 0 |
119 | #define TXD_PN_GDMA1 1 | | 118 | #define TXD_PN_GDMA1 1 |
120 | #define TXD_PN_GDMA2 2 | | 119 | #define TXD_PN_GDMA2 2 |
121 | #define TXD_TCP_EN (1 << 29) | | 120 | #define TXD_TCP_EN (1 << 29) |
122 | #define TXD_UDP_EN (1 << 30) | | 121 | #define TXD_UDP_EN (1 << 30) |
123 | #define TXD_IP_EN (1 << 31) | | 122 | #define TXD_IP_EN (1 << 31) |
124 | }; | | 123 | }; |
125 | | | 124 | |
126 | /* TODO: | | 125 | /* TODO: |
127 | * try to scale number of descriptors swith size of memory | | 126 | * try to scale number of descriptors swith size of memory |
128 | * these numbers may have a significant impact on performance/memory/mbuf usage | | 127 | * these numbers may have a significant impact on performance/memory/mbuf usage |
129 | */ | | 128 | */ |
130 | #if RTMEMSIZE >= 64 | | 129 | #if RTMEMSIZE >= 64 |
131 | #define RALINK_ETH_NUM_RX_DESC 256 | | 130 | #define RALINK_ETH_NUM_RX_DESC 256 |
132 | #define RALINK_ETH_NUM_TX_DESC 256 | | 131 | #define RALINK_ETH_NUM_TX_DESC 256 |
133 | #elif RTMEMSIZE >= 32 | | 132 | #elif RTMEMSIZE >= 32 |
134 | #define RALINK_ETH_NUM_RX_DESC 64 | | 133 | #define RALINK_ETH_NUM_RX_DESC 64 |
135 | #define RALINK_ETH_NUM_TX_DESC 64 | | 134 | #define RALINK_ETH_NUM_TX_DESC 64 |
136 | #else | | 135 | #else |
137 | #error RTMEMSIZE invalid /* XXX */ | | 136 | #error RTMEMSIZE invalid /* XXX */ |
138 | #endif | | 137 | #endif |
139 | /* maximum segments per packet */ | | 138 | /* maximum segments per packet */ |
140 | #define RALINK_ETH_MAX_TX_SEGS 1 | | 139 | #define RALINK_ETH_MAX_TX_SEGS 1 |
141 | | | 140 | |
142 | /* define a struct for ease of dma memory allocation */ | | 141 | /* define a struct for ease of dma memory allocation */ |
143 | struct ralink_descs { | | 142 | struct ralink_descs { |
144 | struct ralink_rx_desc rxdesc[RALINK_ETH_NUM_RX_DESC]; | | 143 | struct ralink_rx_desc rxdesc[RALINK_ETH_NUM_RX_DESC]; |
145 | struct ralink_tx_desc txdesc[RALINK_ETH_NUM_TX_DESC]; | | 144 | struct ralink_tx_desc txdesc[RALINK_ETH_NUM_TX_DESC]; |
146 | }; | | 145 | }; |
147 | | | 146 | |
148 | /* Software state for transmit jobs. */ | | 147 | /* Software state for transmit jobs. */ |
149 | struct ralink_eth_txstate { | | 148 | struct ralink_eth_txstate { |
150 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ | | 149 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ |
151 | bus_dmamap_t txs_dmamap; /* our DMA map */ | | 150 | bus_dmamap_t txs_dmamap; /* our DMA map */ |
152 | int txs_idx; /* the index in txdesc ring that */ | | 151 | int txs_idx; /* the index in txdesc ring that */ |
153 | /* this state is tracking */ | | 152 | /* this state is tracking */ |
154 | SIMPLEQ_ENTRY(ralink_eth_txstate) txs_q; | | 153 | SIMPLEQ_ENTRY(ralink_eth_txstate) txs_q; |
155 | }; | | 154 | }; |
156 | | | 155 | |
157 | SIMPLEQ_HEAD(ralink_eth_txsq, ralink_eth_txstate); | | 156 | SIMPLEQ_HEAD(ralink_eth_txsq, ralink_eth_txstate); |
158 | | | 157 | |
159 | /* | | 158 | /* |
160 | * Software state for receive jobs. | | 159 | * Software state for receive jobs. |
161 | */ | | 160 | */ |
162 | struct ralink_eth_rxstate { | | 161 | struct ralink_eth_rxstate { |
163 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ | | 162 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ |
164 | bus_dmamap_t rxs_dmamap; /* our DMA map */ | | 163 | bus_dmamap_t rxs_dmamap; /* our DMA map */ |
165 | }; | | 164 | }; |
166 | | | 165 | |
167 | typedef struct ralink_eth_softc { | | 166 | typedef struct ralink_eth_softc { |
168 | device_t sc_dev; /* generic device information */ | | 167 | device_t sc_dev; /* generic device information */ |
169 | bus_space_tag_t sc_memt; /* bus space tag */ | | 168 | bus_space_tag_t sc_memt; /* bus space tag */ |
170 | bus_space_handle_t sc_sy_memh; /* handle at SYSCTL_BASE */ | | 169 | bus_space_handle_t sc_sy_memh; /* handle at SYSCTL_BASE */ |
171 | bus_space_handle_t sc_fe_memh; /* handle at FRAME_ENGINE_BASE */ | | 170 | bus_space_handle_t sc_fe_memh; /* handle at FRAME_ENGINE_BASE */ |
172 | bus_space_handle_t sc_sw_memh; /* handle at ETH_SW_BASE */ | | 171 | bus_space_handle_t sc_sw_memh; /* handle at ETH_SW_BASE */ |
173 | int sc_sy_size; /* size of Sysctl regs space */ | | 172 | int sc_sy_size; /* size of Sysctl regs space */ |
174 | int sc_fe_size; /* size of Frame Engine regs space */ | | 173 | int sc_fe_size; /* size of Frame Engine regs space */ |
175 | int sc_sw_size; /* size of Ether Switch regs space */ | | 174 | int sc_sw_size; /* size of Ether Switch regs space */ |
176 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ | | 175 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ |
177 | void *sc_ih; /* interrupt handle */ | | 176 | void *sc_ih; /* interrupt handle */ |
178 | | | 177 | |
179 | /* tx/rx dma mapping */ | | 178 | /* tx/rx dma mapping */ |
180 | bus_dma_segment_t sc_dseg; | | 179 | bus_dma_segment_t sc_dseg; |
181 | int sc_ndseg; | | 180 | int sc_ndseg; |
182 | bus_dmamap_t sc_pdmamap; /* PDMA DMA map */ | | 181 | bus_dmamap_t sc_pdmamap; /* PDMA DMA map */ |
183 | #define sc_pdma sc_pdmamap->dm_segs[0].ds_addr | | 182 | #define sc_pdma sc_pdmamap->dm_segs[0].ds_addr |
184 | | | 183 | |
185 | struct ralink_descs *sc_descs; | | 184 | struct ralink_descs *sc_descs; |
186 | #define sc_rxdesc sc_descs->rxdesc | | 185 | #define sc_rxdesc sc_descs->rxdesc |
187 | #define sc_txdesc sc_descs->txdesc | | 186 | #define sc_txdesc sc_descs->txdesc |
188 | | | 187 | |
189 | #define RALINK_MIN_BUF 64 | | 188 | #define RALINK_MIN_BUF 64 |
190 | char ralink_zero_buf[RALINK_MIN_BUF]; | | 189 | char ralink_zero_buf[RALINK_MIN_BUF]; |
191 | | | 190 | |
192 | struct ralink_eth_txstate sc_txstate[RALINK_ETH_NUM_TX_DESC]; | | 191 | struct ralink_eth_txstate sc_txstate[RALINK_ETH_NUM_TX_DESC]; |
193 | struct ralink_eth_rxstate sc_rxstate[RALINK_ETH_NUM_RX_DESC]; | | 192 | struct ralink_eth_rxstate sc_rxstate[RALINK_ETH_NUM_RX_DESC]; |
194 | | | 193 | |
195 | struct ralink_eth_txsq sc_txfreeq; /* free Tx descsofts */ | | 194 | struct ralink_eth_txsq sc_txfreeq; /* free Tx descsofts */ |
196 | struct ralink_eth_txsq sc_txdirtyq; /* dirty Tx descsofts */ | | 195 | struct ralink_eth_txsq sc_txdirtyq; /* dirty Tx descsofts */ |
197 | | | 196 | |
198 | struct ethercom sc_ethercom; /* ethernet common data */ | | 197 | struct ethercom sc_ethercom; /* ethernet common data */ |
199 | u_int sc_pending_tx; | | 198 | u_int sc_pending_tx; |
200 | | | 199 | |
201 | /* mii */ | | 200 | /* mii */ |
202 | struct mii_data sc_mii; | | 201 | struct mii_data sc_mii; |
203 | struct callout sc_tick_callout; | | 202 | struct callout sc_tick_callout; |
204 | | | 203 | |
205 | struct evcnt sc_evcnt_spurious_intr; | | 204 | struct evcnt sc_evcnt_spurious_intr; |
206 | struct evcnt sc_evcnt_rxintr; | | 205 | struct evcnt sc_evcnt_rxintr; |
207 | struct evcnt sc_evcnt_rxintr_skip_len; | | 206 | struct evcnt sc_evcnt_rxintr_skip_len; |
208 | struct evcnt sc_evcnt_rxintr_skip_tag_none; | | 207 | struct evcnt sc_evcnt_rxintr_skip_tag_none; |
209 | struct evcnt sc_evcnt_rxintr_skip_tag_inval; | | 208 | struct evcnt sc_evcnt_rxintr_skip_tag_inval; |
210 | struct evcnt sc_evcnt_rxintr_skip_inact; | | 209 | struct evcnt sc_evcnt_rxintr_skip_inact; |
211 | struct evcnt sc_evcnt_txintr; | | 210 | struct evcnt sc_evcnt_txintr; |
212 | struct evcnt sc_evcnt_input; | | 211 | struct evcnt sc_evcnt_input; |
213 | struct evcnt sc_evcnt_output; | | 212 | struct evcnt sc_evcnt_output; |
214 | struct evcnt sc_evcnt_watchdog; | | 213 | struct evcnt sc_evcnt_watchdog; |
215 | struct evcnt sc_evcnt_wd_reactivate; | | 214 | struct evcnt sc_evcnt_wd_reactivate; |
216 | struct evcnt sc_evcnt_wd_tx; | | 215 | struct evcnt sc_evcnt_wd_tx; |
217 | struct evcnt sc_evcnt_wd_spurious; | | 216 | struct evcnt sc_evcnt_wd_spurious; |
218 | struct evcnt sc_evcnt_add_rxbuf_hdr_fail; | | 217 | struct evcnt sc_evcnt_add_rxbuf_hdr_fail; |
219 | struct evcnt sc_evcnt_add_rxbuf_mcl_fail; | | 218 | struct evcnt sc_evcnt_add_rxbuf_mcl_fail; |
220 | } ralink_eth_softc_t; | | 219 | } ralink_eth_softc_t; |
221 | | | 220 | |
222 | /* alignment so the IP header is aligned */ | | 221 | /* alignment so the IP header is aligned */ |
223 | #define RALINK_ETHER_ALIGN 2 | | 222 | #define RALINK_ETHER_ALIGN 2 |
224 | | | 223 | |
225 | /* device functions */ | | 224 | /* device functions */ |
226 | static int ralink_eth_match(device_t, cfdata_t, void *); | | 225 | static int ralink_eth_match(device_t, cfdata_t, void *); |
227 | static void ralink_eth_attach(device_t, device_t, void *); | | 226 | static void ralink_eth_attach(device_t, device_t, void *); |
228 | static int ralink_eth_detach(device_t, int); | | 227 | static int ralink_eth_detach(device_t, int); |
229 | static int ralink_eth_activate(device_t, enum devact); | | 228 | static int ralink_eth_activate(device_t, enum devact); |
230 | | | 229 | |
231 | /* local driver functions */ | | 230 | /* local driver functions */ |
232 | static void ralink_eth_hw_init(ralink_eth_softc_t *); | | 231 | static void ralink_eth_hw_init(ralink_eth_softc_t *); |
233 | static int ralink_eth_intr(void *); | | 232 | static int ralink_eth_intr(void *); |
234 | static void ralink_eth_reset(ralink_eth_softc_t *); | | 233 | static void ralink_eth_reset(ralink_eth_softc_t *); |
235 | static void ralink_eth_rxintr(ralink_eth_softc_t *); | | 234 | static void ralink_eth_rxintr(ralink_eth_softc_t *); |
236 | static void ralink_eth_txintr(ralink_eth_softc_t *); | | 235 | static void ralink_eth_txintr(ralink_eth_softc_t *); |
237 | | | 236 | |
238 | /* partition functions */ | | 237 | /* partition functions */ |
239 | static int ralink_eth_enable(ralink_eth_softc_t *); | | 238 | static int ralink_eth_enable(ralink_eth_softc_t *); |
240 | static void ralink_eth_disable(ralink_eth_softc_t *); | | 239 | static void ralink_eth_disable(ralink_eth_softc_t *); |
241 | | | 240 | |
242 | /* ifnet functions */ | | 241 | /* ifnet functions */ |
243 | static int ralink_eth_init(struct ifnet *); | | 242 | static int ralink_eth_init(struct ifnet *); |
244 | static void ralink_eth_rxdrain(ralink_eth_softc_t *); | | 243 | static void ralink_eth_rxdrain(ralink_eth_softc_t *); |
245 | static void ralink_eth_stop(struct ifnet *, int); | | 244 | static void ralink_eth_stop(struct ifnet *, int); |
246 | static int ralink_eth_add_rxbuf(ralink_eth_softc_t *, int); | | 245 | static int ralink_eth_add_rxbuf(ralink_eth_softc_t *, int); |
247 | static void ralink_eth_start(struct ifnet *); | | 246 | static void ralink_eth_start(struct ifnet *); |
248 | static void ralink_eth_watchdog(struct ifnet *); | | 247 | static void ralink_eth_watchdog(struct ifnet *); |
249 | static int ralink_eth_ioctl(struct ifnet *, u_long, void *); | | 248 | static int ralink_eth_ioctl(struct ifnet *, u_long, void *); |
250 | | | 249 | |
251 | /* mii functions */ | | 250 | /* mii functions */ |
252 | #if defined(RT3050) || defined(RT3052) | | 251 | #if defined(RT3050) || defined(RT3052) |
253 | static void ralink_eth_mdio_enable(ralink_eth_softc_t *, bool); | | 252 | static void ralink_eth_mdio_enable(ralink_eth_softc_t *, bool); |
254 | #endif | | 253 | #endif |
255 | static void ralink_eth_mii_statchg(device_t); | | 254 | static void ralink_eth_mii_statchg(device_t); |
256 | static void ralink_eth_mii_tick(void *); | | 255 | static void ralink_eth_mii_tick(void *); |
257 | static int ralink_eth_mii_read(device_t, int, int); | | 256 | static int ralink_eth_mii_read(device_t, int, int); |
258 | static void ralink_eth_mii_write(device_t, int, int, int); | | 257 | static void ralink_eth_mii_write(device_t, int, int, int); |
259 | | | 258 | |
260 | CFATTACH_DECL_NEW(reth, sizeof(struct ralink_eth_softc), | | 259 | CFATTACH_DECL_NEW(reth, sizeof(struct ralink_eth_softc), |
261 | ralink_eth_match, ralink_eth_attach, ralink_eth_detach, ralink_eth_activate); | | 260 | ralink_eth_match, ralink_eth_attach, ralink_eth_detach, ralink_eth_activate); |
262 | | | 261 | |
263 | static inline uint32_t | | 262 | static inline uint32_t |
264 | sy_read(const ralink_eth_softc_t *sc, const bus_size_t off) | | 263 | sy_read(const ralink_eth_softc_t *sc, const bus_size_t off) |
265 | { | | 264 | { |
266 | return bus_space_read_4(sc->sc_memt, sc->sc_sy_memh, off); | | 265 | return bus_space_read_4(sc->sc_memt, sc->sc_sy_memh, off); |
267 | } | | 266 | } |
268 | | | 267 | |
269 | static inline void | | 268 | static inline void |
270 | sy_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) | | 269 | sy_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) |
271 | { | | 270 | { |
272 | bus_space_write_4(sc->sc_memt, sc->sc_sy_memh, off, val); | | 271 | bus_space_write_4(sc->sc_memt, sc->sc_sy_memh, off, val); |
273 | } | | 272 | } |
274 | | | 273 | |
275 | static inline uint32_t | | 274 | static inline uint32_t |
276 | fe_read(const ralink_eth_softc_t *sc, const bus_size_t off) | | 275 | fe_read(const ralink_eth_softc_t *sc, const bus_size_t off) |
277 | { | | 276 | { |
278 | return bus_space_read_4(sc->sc_memt, sc->sc_fe_memh, off); | | 277 | return bus_space_read_4(sc->sc_memt, sc->sc_fe_memh, off); |
279 | } | | 278 | } |
280 | | | 279 | |
281 | static inline void | | 280 | static inline void |
282 | fe_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) | | 281 | fe_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) |
283 | { | | 282 | { |
284 | bus_space_write_4(sc->sc_memt, sc->sc_fe_memh, off, val); | | 283 | bus_space_write_4(sc->sc_memt, sc->sc_fe_memh, off, val); |
285 | } | | 284 | } |
286 | | | 285 | |
287 | static inline uint32_t | | 286 | static inline uint32_t |
288 | sw_read(const ralink_eth_softc_t *sc, const bus_size_t off) | | 287 | sw_read(const ralink_eth_softc_t *sc, const bus_size_t off) |
289 | { | | 288 | { |
290 | return bus_space_read_4(sc->sc_memt, sc->sc_sw_memh, off); | | 289 | return bus_space_read_4(sc->sc_memt, sc->sc_sw_memh, off); |
291 | } | | 290 | } |
292 | | | 291 | |
293 | static inline void | | 292 | static inline void |
294 | sw_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) | | 293 | sw_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) |
295 | { | | 294 | { |
296 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, off, val); | | 295 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, off, val); |
297 | } | | 296 | } |
298 | | | 297 | |
299 | /* | | 298 | /* |
300 | * ralink_eth_match | | 299 | * ralink_eth_match |
301 | */ | | 300 | */ |
302 | int | | 301 | int |
303 | ralink_eth_match(device_t parent, cfdata_t cf, void *aux) | | 302 | ralink_eth_match(device_t parent, cfdata_t cf, void *aux) |
304 | { | | 303 | { |
305 | return 1; | | 304 | return 1; |
306 | } | | 305 | } |
307 | | | 306 | |
308 | /* | | 307 | /* |
309 | * ralink_eth_attach | | 308 | * ralink_eth_attach |
310 | */ | | 309 | */ |
311 | void | | 310 | void |
312 | ralink_eth_attach(device_t parent, device_t self, void *aux) | | 311 | ralink_eth_attach(device_t parent, device_t self, void *aux) |
313 | { | | 312 | { |
314 | ralink_eth_softc_t * const sc = device_private(self); | | 313 | ralink_eth_softc_t * const sc = device_private(self); |
315 | const struct mainbus_attach_args *ma = aux; | | 314 | const struct mainbus_attach_args *ma = aux; |
316 | int error; | | 315 | int error; |
317 | int i; | | 316 | int i; |
318 | | | 317 | |
319 | aprint_naive(": Ralink Ethernet\n"); | | 318 | aprint_naive(": Ralink Ethernet\n"); |
320 | aprint_normal(": Ralink Ethernet\n"); | | 319 | aprint_normal(": Ralink Ethernet\n"); |
321 | | | 320 | |
322 | evcnt_attach_dynamic(&sc->sc_evcnt_spurious_intr, EVCNT_TYPE_INTR, NULL, | | 321 | evcnt_attach_dynamic(&sc->sc_evcnt_spurious_intr, EVCNT_TYPE_INTR, NULL, |
323 | device_xname(self), "spurious intr"); | | 322 | device_xname(self), "spurious intr"); |
324 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr, EVCNT_TYPE_INTR, NULL, | | 323 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr, EVCNT_TYPE_INTR, NULL, |
325 | device_xname(self), "rxintr"); | | 324 | device_xname(self), "rxintr"); |
326 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_len, | | 325 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_len, |
327 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, | | 326 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, |
328 | device_xname(self), "rxintr skip: no room for VLAN header"); | | 327 | device_xname(self), "rxintr skip: no room for VLAN header"); |
329 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_none, | | 328 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_none, |
330 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, | | 329 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, |
331 | device_xname(self), "rxintr skip: no VLAN tag"); | | 330 | device_xname(self), "rxintr skip: no VLAN tag"); |
332 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_inval, | | 331 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_inval, |
333 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, | | 332 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, |
334 | device_xname(self), "rxintr skip: invalid VLAN tag"); | | 333 | device_xname(self), "rxintr skip: invalid VLAN tag"); |
335 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_inact, | | 334 | evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_inact, |
336 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, | | 335 | EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, |
337 | device_xname(self), "rxintr skip: partition inactive"); | | 336 | device_xname(self), "rxintr skip: partition inactive"); |
338 | evcnt_attach_dynamic(&sc->sc_evcnt_txintr, EVCNT_TYPE_INTR, NULL, | | 337 | evcnt_attach_dynamic(&sc->sc_evcnt_txintr, EVCNT_TYPE_INTR, NULL, |
339 | device_xname(self), "txintr"); | | 338 | device_xname(self), "txintr"); |
340 | evcnt_attach_dynamic(&sc->sc_evcnt_input, EVCNT_TYPE_INTR, NULL, | | 339 | evcnt_attach_dynamic(&sc->sc_evcnt_input, EVCNT_TYPE_INTR, NULL, |
341 | device_xname(self), "input"); | | 340 | device_xname(self), "input"); |
342 | evcnt_attach_dynamic(&sc->sc_evcnt_output, EVCNT_TYPE_INTR, NULL, | | 341 | evcnt_attach_dynamic(&sc->sc_evcnt_output, EVCNT_TYPE_INTR, NULL, |
343 | device_xname(self), "output"); | | 342 | device_xname(self), "output"); |
344 | evcnt_attach_dynamic(&sc->sc_evcnt_watchdog, EVCNT_TYPE_INTR, NULL, | | 343 | evcnt_attach_dynamic(&sc->sc_evcnt_watchdog, EVCNT_TYPE_INTR, NULL, |
345 | device_xname(self), "watchdog"); | | 344 | device_xname(self), "watchdog"); |
346 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_tx, | | 345 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_tx, |
347 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, | | 346 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, |
348 | device_xname(self), "watchdog TX timeout"); | | 347 | device_xname(self), "watchdog TX timeout"); |
349 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_spurious, | | 348 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_spurious, |
350 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, | | 349 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, |
351 | device_xname(self), "watchdog spurious"); | | 350 | device_xname(self), "watchdog spurious"); |
352 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_reactivate, | | 351 | evcnt_attach_dynamic(&sc->sc_evcnt_wd_reactivate, |
353 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, | | 352 | EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, |
354 | device_xname(self), "watchdog reactivate"); | | 353 | device_xname(self), "watchdog reactivate"); |
355 | evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_hdr_fail, | | 354 | evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_hdr_fail, |
356 | EVCNT_TYPE_INTR, NULL, | | 355 | EVCNT_TYPE_INTR, NULL, |
357 | device_xname(self), "add rxbuf hdr fail"); | | 356 | device_xname(self), "add rxbuf hdr fail"); |
358 | evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_mcl_fail, | | 357 | evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_mcl_fail, |
359 | EVCNT_TYPE_INTR, NULL, | | 358 | EVCNT_TYPE_INTR, NULL, |
360 | device_xname(self), "add rxbuf mcl fail"); | | 359 | device_xname(self), "add rxbuf mcl fail"); |
361 | | | 360 | |
362 | /* | | 361 | /* |
363 | * In order to obtain unique initial Ethernet address on a host, | | 362 | * In order to obtain unique initial Ethernet address on a host, |
364 | * do some randomisation using the current uptime. It's not meant | | 363 | * do some randomisation using the current uptime. It's not meant |
365 | * for anything but avoiding hard-coding an address. | | 364 | * for anything but avoiding hard-coding an address. |
366 | */ | | 365 | */ |
367 | uint8_t enaddr[ETHER_ADDR_LEN] = { 0x00, 0x30, 0x44, 0x00, 0x00, 0x00 }; | | 366 | uint8_t enaddr[ETHER_ADDR_LEN] = { 0x00, 0x30, 0x44, 0x00, 0x00, 0x00 }; |
368 | | | 367 | |
369 | sc->sc_dev = self; | | 368 | sc->sc_dev = self; |
370 | sc->sc_dmat = ma->ma_dmat; | | 369 | sc->sc_dmat = ma->ma_dmat; |
371 | sc->sc_memt = ma->ma_memt; | | 370 | sc->sc_memt = ma->ma_memt; |
372 | sc->sc_sy_size = 0x10000; | | 371 | sc->sc_sy_size = 0x10000; |
373 | sc->sc_fe_size = 0x10000; | | 372 | sc->sc_fe_size = 0x10000; |
374 | sc->sc_sw_size = 0x08000; | | 373 | sc->sc_sw_size = 0x08000; |
375 | | | 374 | |
376 | /* | | 375 | /* |
377 | * map the registers | | 376 | * map the registers |
378 | * | | 377 | * |
379 | * we map the Sysctl, Frame Engine and Ether Switch registers | | 378 | * we map the Sysctl, Frame Engine and Ether Switch registers |
380 | * seperately so we can use the defined register offsets sanely | | 379 | * seperately so we can use the defined register offsets sanely |
381 | */ | | 380 | */ |
382 | if ((error = bus_space_map(sc->sc_memt, RA_SYSCTL_BASE, | | 381 | if ((error = bus_space_map(sc->sc_memt, RA_SYSCTL_BASE, |
383 | sc->sc_sy_size, 0, &sc->sc_sy_memh)) != 0) { | | 382 | sc->sc_sy_size, 0, &sc->sc_sy_memh)) != 0) { |
384 | aprint_error_dev(self, "unable to map Sysctl registers, " | | 383 | aprint_error_dev(self, "unable to map Sysctl registers, " |
385 | "error=%d\n", error); | | 384 | "error=%d\n", error); |
386 | goto fail_0a; | | 385 | goto fail_0a; |
387 | } | | 386 | } |
388 | if ((error = bus_space_map(sc->sc_memt, RA_FRAME_ENGINE_BASE, | | 387 | if ((error = bus_space_map(sc->sc_memt, RA_FRAME_ENGINE_BASE, |
389 | sc->sc_fe_size, 0, &sc->sc_fe_memh)) != 0) { | | 388 | sc->sc_fe_size, 0, &sc->sc_fe_memh)) != 0) { |
390 | aprint_error_dev(self, "unable to map Frame Engine registers, " | | 389 | aprint_error_dev(self, "unable to map Frame Engine registers, " |
391 | "error=%d\n", error); | | 390 | "error=%d\n", error); |
392 | goto fail_0b; | | 391 | goto fail_0b; |
393 | } | | 392 | } |
394 | if ((error = bus_space_map(sc->sc_memt, RA_ETH_SW_BASE, | | 393 | if ((error = bus_space_map(sc->sc_memt, RA_ETH_SW_BASE, |
395 | sc->sc_sw_size, 0, &sc->sc_sw_memh)) != 0) { | | 394 | sc->sc_sw_size, 0, &sc->sc_sw_memh)) != 0) { |
396 | aprint_error_dev(self, "unable to map Ether Switch registers, " | | 395 | aprint_error_dev(self, "unable to map Ether Switch registers, " |
397 | "error=%d\n", error); | | 396 | "error=%d\n", error); |
398 | goto fail_0c; | | 397 | goto fail_0c; |
399 | } | | 398 | } |
400 | | | 399 | |
401 | /* Allocate desc structures, and create & load the DMA map for them */ | | 400 | /* Allocate desc structures, and create & load the DMA map for them */ |
402 | if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ralink_descs), | | 401 | if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ralink_descs), |
403 | PAGE_SIZE, 0, &sc->sc_dseg, 1, &sc->sc_ndseg, 0)) != 0) { | | 402 | PAGE_SIZE, 0, &sc->sc_dseg, 1, &sc->sc_ndseg, 0)) != 0) { |
404 | aprint_error_dev(self, "unable to allocate transmit descs, " | | 403 | aprint_error_dev(self, "unable to allocate transmit descs, " |
405 | "error=%d\n", error); | | 404 | "error=%d\n", error); |
406 | goto fail_1; | | 405 | goto fail_1; |
407 | } | | 406 | } |
408 | | | 407 | |
409 | if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg, | | 408 | if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg, |
410 | sizeof(struct ralink_descs), (void **)&sc->sc_descs, BUS_DMA_COHERENT)) | | 409 | sizeof(struct ralink_descs), (void **)&sc->sc_descs, BUS_DMA_COHERENT)) |
411 | != 0) { | | 410 | != 0) { |
412 | aprint_error_dev(self, "unable to map control data, " | | 411 | aprint_error_dev(self, "unable to map control data, " |
413 | "error=%d\n", error); | | 412 | "error=%d\n", error); |
414 | goto fail_2; | | 413 | goto fail_2; |
415 | } | | 414 | } |
416 | | | 415 | |
417 | if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ralink_descs), 1, | | 416 | if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ralink_descs), 1, |
418 | sizeof(struct ralink_descs), 0, 0, &sc->sc_pdmamap)) != 0) { | | 417 | sizeof(struct ralink_descs), 0, 0, &sc->sc_pdmamap)) != 0) { |
419 | aprint_error_dev(self, "unable to create control data DMA map, " | | 418 | aprint_error_dev(self, "unable to create control data DMA map, " |
420 | "error=%d\n", error); | | 419 | "error=%d\n", error); |
421 | goto fail_3; | | 420 | goto fail_3; |
422 | } | | 421 | } |
423 | | | 422 | |
424 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_pdmamap, sc->sc_descs, | | 423 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_pdmamap, sc->sc_descs, |
425 | sizeof(struct ralink_descs), NULL, 0)) != 0) { | | 424 | sizeof(struct ralink_descs), NULL, 0)) != 0) { |
426 | aprint_error_dev(self, "unable to load control data DMA map, " | | 425 | aprint_error_dev(self, "unable to load control data DMA map, " |
427 | "error=%d\n", error); | | 426 | "error=%d\n", error); |
428 | goto fail_4; | | 427 | goto fail_4; |
429 | } | | 428 | } |
430 | | | 429 | |
431 | /* Create the transmit buffer DMA maps. */ | | 430 | /* Create the transmit buffer DMA maps. */ |
432 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { | | 431 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { |
433 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, | | 432 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
434 | RALINK_ETH_MAX_TX_SEGS, MCLBYTES, 0, 0, | | 433 | RALINK_ETH_MAX_TX_SEGS, MCLBYTES, 0, 0, |
435 | &sc->sc_txstate[i].txs_dmamap)) != 0) { | | 434 | &sc->sc_txstate[i].txs_dmamap)) != 0) { |
436 | aprint_error_dev(self, "unable to create tx DMA map %d, " | | 435 | aprint_error_dev(self, "unable to create tx DMA map %d, " |
437 | "error=%d\n", i, error); | | 436 | "error=%d\n", i, error); |
438 | goto fail_5; | | 437 | goto fail_5; |
439 | } | | 438 | } |
440 | } | | 439 | } |
441 | | | 440 | |
442 | /* Create the receive buffer DMA maps. */ | | 441 | /* Create the receive buffer DMA maps. */ |
443 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { | | 442 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { |
444 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, | | 443 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
445 | MCLBYTES, 0, 0, &sc->sc_rxstate[i].rxs_dmamap)) != 0) { | | 444 | MCLBYTES, 0, 0, &sc->sc_rxstate[i].rxs_dmamap)) != 0) { |
446 | aprint_error_dev(self, "unable to create rx DMA map %d, " | | 445 | aprint_error_dev(self, "unable to create rx DMA map %d, " |
447 | "error=%d\n", i, error); | | 446 | "error=%d\n", i, error); |
448 | goto fail_6; | | 447 | goto fail_6; |
449 | } | | 448 | } |
450 | sc->sc_rxstate[i].rxs_mbuf = NULL; | | 449 | sc->sc_rxstate[i].rxs_mbuf = NULL; |
451 | } | | 450 | } |
452 | | | 451 | |
453 | /* this is a zero buffer used for zero'ing out short packets */ | | 452 | /* this is a zero buffer used for zero'ing out short packets */ |
454 | memset(sc->ralink_zero_buf, 0, RALINK_MIN_BUF); | | 453 | memset(sc->ralink_zero_buf, 0, RALINK_MIN_BUF); |
455 | | | 454 | |
456 | /* setup some address in hardware */ | | 455 | /* setup some address in hardware */ |
457 | fe_write(sc, RA_FE_GDMA1_MAC_LSB, | | 456 | fe_write(sc, RA_FE_GDMA1_MAC_LSB, |
458 | (enaddr[5] | (enaddr[4] << 8) | | | 457 | (enaddr[5] | (enaddr[4] << 8) | |
459 | (enaddr[3] << 16) | (enaddr[2] << 24))); | | 458 | (enaddr[3] << 16) | (enaddr[2] << 24))); |
460 | fe_write(sc, RA_FE_GDMA1_MAC_MSB, | | 459 | fe_write(sc, RA_FE_GDMA1_MAC_MSB, |
461 | (enaddr[1] | (enaddr[0] << 8))); | | 460 | (enaddr[1] | (enaddr[0] << 8))); |
462 | | | 461 | |
463 | /* | | 462 | /* |
464 | * iterate through ports | | 463 | * iterate through ports |
465 | * slickrock must use specific non-linear sequence | | 464 | * slickrock must use specific non-linear sequence |
466 | * others are linear | | 465 | * others are linear |
467 | */ | | 466 | */ |
468 | struct ifnet * const ifp = &sc->sc_ethercom.ec_if; | | 467 | struct ifnet * const ifp = &sc->sc_ethercom.ec_if; |
469 | | | 468 | |
470 | strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); | | 469 | strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); |
471 | | | 470 | |
472 | /* | | 471 | /* |
473 | * Initialize our media structures. | | 472 | * Initialize our media structures. |
474 | * This may probe the PHY, if present. | | 473 | * This may probe the PHY, if present. |
475 | */ | | 474 | */ |
476 | sc->sc_mii.mii_ifp = ifp; | | 475 | sc->sc_mii.mii_ifp = ifp; |
477 | sc->sc_mii.mii_readreg = ralink_eth_mii_read; | | 476 | sc->sc_mii.mii_readreg = ralink_eth_mii_read; |
478 | sc->sc_mii.mii_writereg = ralink_eth_mii_write; | | 477 | sc->sc_mii.mii_writereg = ralink_eth_mii_write; |
479 | sc->sc_mii.mii_statchg = ralink_eth_mii_statchg; | | 478 | sc->sc_mii.mii_statchg = ralink_eth_mii_statchg; |
480 | sc->sc_ethercom.ec_mii = &sc->sc_mii; | | 479 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
481 | ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, | | 480 | ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, |
482 | ether_mediastatus); | | 481 | ether_mediastatus); |
483 | mii_attach(sc->sc_dev, &sc->sc_mii, ~0, i, MII_OFFSET_ANY, | | 482 | mii_attach(sc->sc_dev, &sc->sc_mii, ~0, i, MII_OFFSET_ANY, |
484 | MIIF_FORCEANEG|MIIF_DOPAUSE|MIIF_NOISOLATE); | | 483 | MIIF_FORCEANEG|MIIF_DOPAUSE|MIIF_NOISOLATE); |
485 | | | 484 | |
486 | if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { | | 485 | if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { |
487 | #if 1 | | 486 | #if 1 |
488 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| | | 487 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| |
489 | IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE, 0, NULL); | | 488 | IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE, 0, NULL); |
490 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| | | 489 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| |
491 | IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE); | | 490 | IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE); |
492 | #else | | 491 | #else |
493 | ifmedia_add(&sc->sc_mii.mii_media, | | 492 | ifmedia_add(&sc->sc_mii.mii_media, |
494 | IFM_ETHER|IFM_MANUAL, 0, NULL); | | 493 | IFM_ETHER|IFM_MANUAL, 0, NULL); |
495 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); | | 494 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); |
496 | #endif | | 495 | #endif |
497 | } else { | | 496 | } else { |
498 | /* Ensure we mask ok for the switch multiple phy's */ | | 497 | /* Ensure we mask ok for the switch multiple phy's */ |
499 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); | | 498 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
500 | } | | 499 | } |
501 | | | 500 | |
502 | ifp->if_softc = sc; | | 501 | ifp->if_softc = sc; |
503 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 502 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
504 | ifp->if_init = ralink_eth_init; | | 503 | ifp->if_init = ralink_eth_init; |
505 | ifp->if_start = ralink_eth_start; | | 504 | ifp->if_start = ralink_eth_start; |
506 | ifp->if_ioctl = ralink_eth_ioctl; | | 505 | ifp->if_ioctl = ralink_eth_ioctl; |
507 | ifp->if_stop = ralink_eth_stop; | | 506 | ifp->if_stop = ralink_eth_stop; |
508 | ifp->if_watchdog = ralink_eth_watchdog; | | 507 | ifp->if_watchdog = ralink_eth_watchdog; |
509 | IFQ_SET_READY(&ifp->if_snd); | | 508 | IFQ_SET_READY(&ifp->if_snd); |
510 | | | 509 | |
511 | /* We can support 802.1Q VLAN-sized frames. */ | | 510 | /* We can support 802.1Q VLAN-sized frames. */ |
512 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; | | 511 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
513 | | | 512 | |
514 | /* We support IPV4 CRC Offload */ | | 513 | /* We support IPV4 CRC Offload */ |
515 | ifp->if_capabilities |= | | 514 | ifp->if_capabilities |= |
516 | (IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | | | 515 | (IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
517 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | | | 516 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
518 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx); | | 517 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx); |
519 | | | 518 | |
520 | /* Attach the interface. */ | | 519 | /* Attach the interface. */ |
521 | if_attach(ifp); | | 520 | if_attach(ifp); |
522 | ether_ifattach(ifp, enaddr); | | 521 | ether_ifattach(ifp, enaddr); |
523 | | | 522 | |
524 | /* init our mii ticker */ | | 523 | /* init our mii ticker */ |
525 | callout_init(&sc->sc_tick_callout, 0); | | 524 | callout_init(&sc->sc_tick_callout, 0); |
526 | callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc); | | 525 | callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc); |
527 | | | 526 | |
528 | return; | | 527 | return; |
529 | | | 528 | |
530 | /* | | 529 | /* |
531 | * Free any resources we've allocated during the failed attach | | 530 | * Free any resources we've allocated during the failed attach |
532 | * attempt. Do this in reverse order and fall through. | | 531 | * attempt. Do this in reverse order and fall through. |
533 | */ | | 532 | */ |
534 | fail_6: | | 533 | fail_6: |
535 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { | | 534 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { |
536 | if (sc->sc_rxstate[i].rxs_dmamap != NULL) | | 535 | if (sc->sc_rxstate[i].rxs_dmamap != NULL) |
537 | bus_dmamap_destroy(sc->sc_dmat, | | 536 | bus_dmamap_destroy(sc->sc_dmat, |
538 | sc->sc_rxstate[i].rxs_dmamap); | | 537 | sc->sc_rxstate[i].rxs_dmamap); |
539 | } | | 538 | } |
540 | fail_5: | | 539 | fail_5: |
541 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { | | 540 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { |
542 | if (sc->sc_txstate[i].txs_dmamap != NULL) | | 541 | if (sc->sc_txstate[i].txs_dmamap != NULL) |
543 | bus_dmamap_destroy(sc->sc_dmat, | | 542 | bus_dmamap_destroy(sc->sc_dmat, |
544 | sc->sc_txstate[i].txs_dmamap); | | 543 | sc->sc_txstate[i].txs_dmamap); |
545 | } | | 544 | } |
546 | bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); | | 545 | bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); |
547 | fail_4: | | 546 | fail_4: |
548 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); | | 547 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); |
549 | fail_3: | | 548 | fail_3: |
550 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, | | 549 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, |
551 | sizeof(struct ralink_descs)); | | 550 | sizeof(struct ralink_descs)); |
552 | fail_2: | | 551 | fail_2: |
553 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); | | 552 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); |
554 | fail_1: | | 553 | fail_1: |
555 | bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); | | 554 | bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); |
556 | fail_0c: | | 555 | fail_0c: |
557 | bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); | | 556 | bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); |
558 | fail_0b: | | 557 | fail_0b: |
559 | bus_space_unmap(sc->sc_memt, sc->sc_sy_memh, sc->sc_fe_size); | | 558 | bus_space_unmap(sc->sc_memt, sc->sc_sy_memh, sc->sc_fe_size); |
560 | fail_0a: | | 559 | fail_0a: |
561 | return; | | 560 | return; |
562 | } | | 561 | } |
563 | | | 562 | |
564 | /* | | 563 | /* |
565 | * ralink_eth_activate: | | 564 | * ralink_eth_activate: |
566 | * | | 565 | * |
567 | * Handle device activation/deactivation requests. | | 566 | * Handle device activation/deactivation requests. |
568 | */ | | 567 | */ |
569 | int | | 568 | int |
570 | ralink_eth_activate(device_t self, enum devact act) | | 569 | ralink_eth_activate(device_t self, enum devact act) |
571 | { | | 570 | { |
572 | ralink_eth_softc_t * const sc = device_private(self); | | 571 | ralink_eth_softc_t * const sc = device_private(self); |
573 | int error = 0; | | 572 | int error = 0; |
574 | int s; | | 573 | int s; |
575 | | | 574 | |
576 | s = splnet(); | | 575 | s = splnet(); |
577 | switch (act) { | | 576 | switch (act) { |
578 | case DVACT_DEACTIVATE: | | 577 | case DVACT_DEACTIVATE: |
579 | if_deactivate(&sc->sc_ethercom.ec_if); | | 578 | if_deactivate(&sc->sc_ethercom.ec_if); |
580 | break; | | 579 | break; |
581 | } | | 580 | } |
582 | splx(s); | | 581 | splx(s); |
583 | | | 582 | |
584 | return error; | | 583 | return error; |
585 | } | | 584 | } |
586 | | | 585 | |
587 | /* | | 586 | /* |
588 | * ralink_eth_partition_enable | | 587 | * ralink_eth_partition_enable |
589 | */ | | 588 | */ |
590 | static int | | 589 | static int |
591 | ralink_eth_enable(ralink_eth_softc_t *sc) | | 590 | ralink_eth_enable(ralink_eth_softc_t *sc) |
592 | { | | 591 | { |
593 | RALINK_DEBUG_FUNC_ENTRY(); | | 592 | RALINK_DEBUG_FUNC_ENTRY(); |
594 | | | 593 | |
595 | if (sc->sc_ih != NULL) { | | 594 | if (sc->sc_ih != NULL) { |
596 | RALINK_DEBUG(RALINK_DEBUG_MISC, "%s() already active", | | 595 | RALINK_DEBUG(RALINK_DEBUG_MISC, "%s() already active", |
597 | __func__); | | 596 | __func__); |
598 | return EALREADY; | | 597 | return EALREADY; |
599 | } | | 598 | } |
600 | | | 599 | |
601 | sc->sc_pending_tx = 0; | | 600 | sc->sc_pending_tx = 0; |
602 | | | 601 | |
603 | int s = splnet(); | | 602 | int s = splnet(); |
604 | ralink_eth_hw_init(sc); | | 603 | ralink_eth_hw_init(sc); |
605 | sc->sc_ih = ra_intr_establish(RA_IRQ_FENGINE, | | 604 | sc->sc_ih = ra_intr_establish(RA_IRQ_FENGINE, |
606 | ralink_eth_intr, sc, 1); | | 605 | ralink_eth_intr, sc, 1); |
607 | splx(s); | | 606 | splx(s); |
608 | if (sc->sc_ih == NULL) { | | 607 | if (sc->sc_ih == NULL) { |
609 | RALINK_DEBUG(RALINK_DEBUG_ERROR, | | 608 | RALINK_DEBUG(RALINK_DEBUG_ERROR, |
610 | "%s: unable to establish interrupt\n", | | 609 | "%s: unable to establish interrupt\n", |
611 | device_xname(sc->sc_dev)); | | 610 | device_xname(sc->sc_dev)); |
612 | return EIO; | | 611 | return EIO; |
613 | } | | 612 | } |
614 | | | 613 | |
615 | return 0; | | 614 | return 0; |
616 | } | | 615 | } |
617 | | | 616 | |
618 | /* | | 617 | /* |
619 | * ralink_eth_partition_disable | | 618 | * ralink_eth_partition_disable |
620 | */ | | 619 | */ |
621 | static void | | 620 | static void |
622 | ralink_eth_disable(ralink_eth_softc_t *sc) | | 621 | ralink_eth_disable(ralink_eth_softc_t *sc) |
623 | { | | 622 | { |
624 | RALINK_DEBUG_FUNC_ENTRY(); | | 623 | RALINK_DEBUG_FUNC_ENTRY(); |
625 | | | 624 | |
626 | int s = splnet(); | | 625 | int s = splnet(); |
627 | ralink_eth_rxdrain(sc); | | 626 | ralink_eth_rxdrain(sc); |
628 | ra_intr_disestablish(sc->sc_ih); | | 627 | ra_intr_disestablish(sc->sc_ih); |
629 | sc->sc_ih = NULL; | | 628 | sc->sc_ih = NULL; |
630 | | | 629 | |
631 | /* stop the mii ticker */ | | 630 | /* stop the mii ticker */ |
632 | callout_stop(&sc->sc_tick_callout); | | 631 | callout_stop(&sc->sc_tick_callout); |
633 | | | 632 | |
634 | /* quiesce the block */ | | 633 | /* quiesce the block */ |
635 | ralink_eth_reset(sc); | | 634 | ralink_eth_reset(sc); |
636 | splx(s); | | 635 | splx(s); |
637 | } | | 636 | } |
638 | | | 637 | |
639 | /* | | 638 | /* |
640 | * ralink_eth_detach | | 639 | * ralink_eth_detach |
641 | */ | | 640 | */ |
642 | static int | | 641 | static int |
643 | ralink_eth_detach(device_t self, int flags) | | 642 | ralink_eth_detach(device_t self, int flags) |
644 | { | | 643 | { |
645 | RALINK_DEBUG_FUNC_ENTRY(); | | 644 | RALINK_DEBUG_FUNC_ENTRY(); |
646 | ralink_eth_softc_t * const sc = device_private(self); | | 645 | ralink_eth_softc_t * const sc = device_private(self); |
647 | struct ifnet * const ifp = &sc->sc_ethercom.ec_if; | | 646 | struct ifnet * const ifp = &sc->sc_ethercom.ec_if; |
648 | struct ralink_eth_rxstate *rxs; | | 647 | struct ralink_eth_rxstate *rxs; |
649 | struct ralink_eth_txstate *txs; | | 648 | struct ralink_eth_txstate *txs; |
650 | int i; | | 649 | int i; |
651 | | | 650 | |
652 | ralink_eth_disable(sc); | | 651 | ralink_eth_disable(sc); |
653 | mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); | | 652 | mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); |
654 | ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); | | 653 | ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); |
655 | ether_ifdetach(ifp); | | 654 | ether_ifdetach(ifp); |
656 | if_detach(ifp); | | 655 | if_detach(ifp); |
657 | | | 656 | |
658 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { | | 657 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { |
659 | rxs = &sc->sc_rxstate[i]; | | 658 | rxs = &sc->sc_rxstate[i]; |
660 | if (rxs->rxs_mbuf != NULL) { | | 659 | if (rxs->rxs_mbuf != NULL) { |
661 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); | | 660 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
662 | m_freem(rxs->rxs_mbuf); | | 661 | m_freem(rxs->rxs_mbuf); |
663 | rxs->rxs_mbuf = NULL; | | 662 | rxs->rxs_mbuf = NULL; |
664 | } | | 663 | } |
665 | bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap); | | 664 | bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap); |
666 | } | | 665 | } |
667 | | | 666 | |
668 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { | | 667 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { |
669 | txs = &sc->sc_txstate[i]; | | 668 | txs = &sc->sc_txstate[i]; |
670 | if (txs->txs_mbuf != NULL) { | | 669 | if (txs->txs_mbuf != NULL) { |
671 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 670 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
672 | m_freem(txs->txs_mbuf); | | 671 | m_freem(txs->txs_mbuf); |
673 | txs->txs_mbuf = NULL; | | 672 | txs->txs_mbuf = NULL; |
674 | } | | 673 | } |
675 | bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); | | 674 | bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); |
676 | } | | 675 | } |
677 | | | 676 | |
678 | bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); | | 677 | bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); |
679 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); | | 678 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); |
680 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, | | 679 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, |
681 | sizeof(struct ralink_descs)); | | 680 | sizeof(struct ralink_descs)); |
682 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); | | 681 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); |
683 | | | 682 | |
684 | bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); | | 683 | bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); |
685 | bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); | | 684 | bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); |
686 | | | 685 | |
687 | return 0; | | 686 | return 0; |
688 | } | | 687 | } |
689 | | | 688 | |
690 | /* | | 689 | /* |
691 | * ralink_eth_reset | | 690 | * ralink_eth_reset |
692 | */ | | 691 | */ |
693 | static void | | 692 | static void |
694 | ralink_eth_reset(ralink_eth_softc_t *sc) | | 693 | ralink_eth_reset(ralink_eth_softc_t *sc) |
695 | { | | 694 | { |
696 | RALINK_DEBUG_FUNC_ENTRY(); | | 695 | RALINK_DEBUG_FUNC_ENTRY(); |
697 | uint32_t r; | | 696 | uint32_t r; |
698 | | | 697 | |
699 | /* Reset the frame engine */ | | 698 | /* Reset the frame engine */ |
700 | r = sy_read(sc, RA_SYSCTL_RST); | | 699 | r = sy_read(sc, RA_SYSCTL_RST); |
701 | r |= RST_FE; | | 700 | r |= RST_FE; |
702 | sy_write(sc, RA_SYSCTL_RST, r); | | 701 | sy_write(sc, RA_SYSCTL_RST, r); |
703 | r ^= RST_FE; | | 702 | r ^= RST_FE; |
704 | sy_write(sc, RA_SYSCTL_RST, r); | | 703 | sy_write(sc, RA_SYSCTL_RST, r); |
705 | | | 704 | |
706 | /* Wait until the PDMA is quiscent */ | | 705 | /* Wait until the PDMA is quiscent */ |
707 | for (;;) { | | 706 | for (;;) { |
708 | r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); | | 707 | r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); |
709 | if (r & FE_PDMA_GLOBAL_CFG_RX_DMA_BUSY) { | | 708 | if (r & FE_PDMA_GLOBAL_CFG_RX_DMA_BUSY) { |
710 | aprint_normal_dev(sc->sc_dev, "RX DMA BUSY\n"); | | 709 | aprint_normal_dev(sc->sc_dev, "RX DMA BUSY\n"); |
711 | continue; | | 710 | continue; |
712 | } | | 711 | } |
713 | if (r & FE_PDMA_GLOBAL_CFG_TX_DMA_BUSY) { | | 712 | if (r & FE_PDMA_GLOBAL_CFG_TX_DMA_BUSY) { |
714 | aprint_normal_dev(sc->sc_dev, "TX DMA BUSY\n"); | | 713 | aprint_normal_dev(sc->sc_dev, "TX DMA BUSY\n"); |
715 | continue; | | 714 | continue; |
716 | } | | 715 | } |
717 | break; | | 716 | break; |
718 | } | | 717 | } |
719 | } | | 718 | } |
720 | | | 719 | |
721 | /* | | 720 | /* |
722 | * ralink_eth_hw_init | | 721 | * ralink_eth_hw_init |
723 | */ | | 722 | */ |
724 | static void | | 723 | static void |
725 | ralink_eth_hw_init(ralink_eth_softc_t *sc) | | 724 | ralink_eth_hw_init(ralink_eth_softc_t *sc) |
726 | { | | 725 | { |
727 | RALINK_DEBUG_FUNC_ENTRY(); | | 726 | RALINK_DEBUG_FUNC_ENTRY(); |
728 | struct ralink_eth_txstate *txs; | | 727 | struct ralink_eth_txstate *txs; |
729 | uint32_t r; | | 728 | uint32_t r; |
730 | int i; | | 729 | int i; |
731 | | | 730 | |
732 | /* reset to a known good state */ | | 731 | /* reset to a known good state */ |
733 | ralink_eth_reset(sc); | | 732 | ralink_eth_reset(sc); |
734 | | | 733 | |
735 | #if defined(RT3050) || defined(RT3052) | | 734 | #if defined(RT3050) || defined(RT3052) |
736 | /* Bring the switch to a sane default state (from linux driver) */ | | 735 | /* Bring the switch to a sane default state (from linux driver) */ |
737 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SGC2, | | 736 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SGC2, |
738 | 0x00000000); | | 737 | 0x00000000); |
739 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PFC1, | | 738 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PFC1, |
740 | 0x00405555); /* check VLAN tag on port forward */); | | 739 | 0x00405555); /* check VLAN tag on port forward */); |
741 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VLANI0, | | 740 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VLANI0, |
742 | 0x00002001); | | 741 | 0x00002001); |
743 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC0, | | 742 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC0, |
744 | 0x00001002); | | 743 | 0x00001002); |
745 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC1, | | 744 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC1, |
746 | 0x00001001); | | 745 | 0x00001001); |
747 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC2, | | 746 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC2, |
748 | 0x00001001); | | 747 | 0x00001001); |
749 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VMSC0, | | 748 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VMSC0, |
750 | 0xffff417e); | | 749 | 0xffff417e); |
751 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC0, | | 750 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC0, |
752 | 0x00007f7f); | | 751 | 0x00007f7f); |
753 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC2, | | 752 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC2, |
754 | 0x00007f3f); | | 753 | 0x00007f3f); |
755 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FTC2, | | 754 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FTC2, |
756 | 0x00d6500c); | | 755 | 0x00d6500c); |
757 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SWGC, | | 756 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SWGC, |
758 | 0x0008a301); /* hashing algorithm=XOR48 */ | | 757 | 0x0008a301); /* hashing algorithm=XOR48 */ |
759 | /* aging interval=300sec */ | | 758 | /* aging interval=300sec */ |
760 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SOCPC, | | 759 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SOCPC, |
761 | 0x02404040); | | 760 | 0x02404040); |
762 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPORT, | | 761 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPORT, |
763 | 0x3f502b28); /* Change polling Ext PHY Addr=0x0 */ | | 762 | 0x3f502b28); /* Change polling Ext PHY Addr=0x0 */ |
764 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPA, | | 763 | bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPA, |
765 | 0x00000000); | | 764 | 0x00000000); |
766 | | | 765 | |
767 | /* do some mii magic TODO: define these registers/bits */ | | 766 | /* do some mii magic TODO: define these registers/bits */ |
768 | /* lower down PHY 10Mbps mode power */ | | 767 | /* lower down PHY 10Mbps mode power */ |
769 | /* select local register */ | | 768 | /* select local register */ |
770 | ralink_eth_mii_write(&sc->sc_dev, 0, 31, 0x8000); | | 769 | ralink_eth_mii_write(&sc->sc_dev, 0, 31, 0x8000); |
771 | | | 770 | |
772 | for (i=0;i<5;i++){ | | 771 | for (i=0;i<5;i++){ |
773 | /* set TX10 waveform coefficient */ | | 772 | /* set TX10 waveform coefficient */ |
774 | ralink_eth_mii_write(&sc->sc_dev, i, 26, 0x1601); | | 773 | ralink_eth_mii_write(&sc->sc_dev, i, 26, 0x1601); |
775 | | | 774 | |
776 | /* set TX100/TX10 AD/DA current bias */ | | 775 | /* set TX100/TX10 AD/DA current bias */ |
777 | ralink_eth_mii_write(&sc->sc_dev, i, 29, 0x7058); | | 776 | ralink_eth_mii_write(&sc->sc_dev, i, 29, 0x7058); |
778 | | | 777 | |
779 | /* set TX100 slew rate control */ | | 778 | /* set TX100 slew rate control */ |
780 | ralink_eth_mii_write(&sc->sc_dev, i, 30, 0x0018); | | 779 | ralink_eth_mii_write(&sc->sc_dev, i, 30, 0x0018); |
781 | } | | 780 | } |
782 | | | 781 | |
783 | /* PHY IOT */ | | 782 | /* PHY IOT */ |
784 | | | 783 | |
785 | /* select global register */ | | 784 | /* select global register */ |
786 | ralink_eth_mii_write(&sc->sc_dev, 0, 31, 0x0); | | 785 | ralink_eth_mii_write(&sc->sc_dev, 0, 31, 0x0); |
787 | | | 786 | |
788 | /* tune TP_IDL tail and head waveform */ | | 787 | /* tune TP_IDL tail and head waveform */ |
789 | ralink_eth_mii_write(&sc->sc_dev, 0, 22, 0x052f); | | 788 | ralink_eth_mii_write(&sc->sc_dev, 0, 22, 0x052f); |
790 | | | 789 | |
791 | /* set TX10 signal amplitude threshold to minimum */ | | 790 | /* set TX10 signal amplitude threshold to minimum */ |
792 | ralink_eth_mii_write(&sc->sc_dev, 0, 17, 0x0fe0); | | 791 | ralink_eth_mii_write(&sc->sc_dev, 0, 17, 0x0fe0); |
793 | | | 792 | |
794 | /* set squelch amplitude to higher threshold */ | | 793 | /* set squelch amplitude to higher threshold */ |
795 | ralink_eth_mii_write(&sc->sc_dev, 0, 18, 0x40ba); | | 794 | ralink_eth_mii_write(&sc->sc_dev, 0, 18, 0x40ba); |
796 | | | 795 | |
797 | /* longer TP_IDL tail length */ | | 796 | /* longer TP_IDL tail length */ |
798 | ralink_eth_mii_write(&sc->sc_dev, 0, 14, 0x65); | | 797 | ralink_eth_mii_write(&sc->sc_dev, 0, 14, 0x65); |
799 | | | 798 | |
800 | /* select local register */ | | 799 | /* select local register */ |
801 | ralink_eth_mii_write(&sc->sc_dev, 0, 31, 0x8000); | | 800 | ralink_eth_mii_write(&sc->sc_dev, 0, 31, 0x8000); |
802 | #else | | 801 | #else |
803 | /* GE1 + GigSW */ | | 802 | /* GE1 + GigSW */ |
804 | fe_write(sc, RA_FE_MDIO_CFG1, | | 803 | fe_write(sc, RA_FE_MDIO_CFG1, |
805 | MDIO_CFG_PHY_ADDR(0x1f) | | | 804 | MDIO_CFG_PHY_ADDR(0x1f) | |
806 | MDIO_CFG_BP_EN | | | 805 | MDIO_CFG_BP_EN | |
807 | MDIO_CFG_FORCE_CFG | | | 806 | MDIO_CFG_FORCE_CFG | |
808 | MDIO_CFG_SPEED(MDIO_CFG_SPEED_1000M) | | | 807 | MDIO_CFG_SPEED(MDIO_CFG_SPEED_1000M) | |
809 | MDIO_CFG_FULL_DUPLEX | | | 808 | MDIO_CFG_FULL_DUPLEX | |
810 | MDIO_CFG_FC_TX | | | 809 | MDIO_CFG_FC_TX | |
811 | MDIO_CFG_FC_RX | | | 810 | MDIO_CFG_FC_RX | |
812 | MDIO_CFG_TX_CLK_MODE(MDIO_CFG_TX_CLK_MODE_3COM)); | | 811 | MDIO_CFG_TX_CLK_MODE(MDIO_CFG_TX_CLK_MODE_3COM)); |
813 | #endif | | 812 | #endif |
814 | | | 813 | |
815 | /* | | 814 | /* |
816 | * TODO: QOS - RT3052 has 4 TX queues for QOS, | | 815 | * TODO: QOS - RT3052 has 4 TX queues for QOS, |
817 | * forgoing for 1 for simplicity | | 816 | * forgoing for 1 for simplicity |
818 | */ | | 817 | */ |
819 | | | 818 | |
820 | /* | | 819 | /* |
821 | * Allocate DMA accessible memory for TX/RX descriptor rings | | 820 | * Allocate DMA accessible memory for TX/RX descriptor rings |
822 | */ | | 821 | */ |
823 | | | 822 | |
824 | /* Initialize the TX queues. */ | | 823 | /* Initialize the TX queues. */ |
825 | SIMPLEQ_INIT(&sc->sc_txfreeq); | | 824 | SIMPLEQ_INIT(&sc->sc_txfreeq); |
826 | SIMPLEQ_INIT(&sc->sc_txdirtyq); | | 825 | SIMPLEQ_INIT(&sc->sc_txdirtyq); |
827 | | | 826 | |
828 | /* Initialize the TX descriptor ring. */ | | 827 | /* Initialize the TX descriptor ring. */ |
829 | memset(sc->sc_txdesc, 0, sizeof(sc->sc_txdesc)); | | 828 | memset(sc->sc_txdesc, 0, sizeof(sc->sc_txdesc)); |
830 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { | | 829 | for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { |
831 | | | 830 | |
832 | sc->sc_txdesc[i].txd_info1 = TXD_LAST0 | TXD_DDONE; | | 831 | sc->sc_txdesc[i].txd_info1 = TXD_LAST0 | TXD_DDONE; |
833 | | | 832 | |
834 | /* setup the freeq as well */ | | 833 | /* setup the freeq as well */ |
835 | txs = &sc->sc_txstate[i]; | | 834 | txs = &sc->sc_txstate[i]; |
836 | txs->txs_mbuf = NULL; | | 835 | txs->txs_mbuf = NULL; |
837 | txs->txs_idx = i; | | 836 | txs->txs_idx = i; |
838 | SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); | | 837 | SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); |
839 | } | | 838 | } |
840 | | | 839 | |
841 | /* | | 840 | /* |
842 | * Flush the TX descriptors | | 841 | * Flush the TX descriptors |
843 | * - TODO: can we just access descriptors via KSEG1 | | 842 | * - TODO: can we just access descriptors via KSEG1 |
844 | * to avoid the flush? | | 843 | * to avoid the flush? |
845 | */ | | 844 | */ |
846 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, | | 845 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, |
847 | (int)&sc->sc_txdesc - (int)sc->sc_descs, sizeof(sc->sc_txdesc), | | 846 | (int)&sc->sc_txdesc - (int)sc->sc_descs, sizeof(sc->sc_txdesc), |
848 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 847 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
849 | | | 848 | |
850 | /* Initialize the RX descriptor ring */ | | 849 | /* Initialize the RX descriptor ring */ |
851 | memset(sc->sc_rxdesc, 0, sizeof(sc->sc_rxdesc)); | | 850 | memset(sc->sc_rxdesc, 0, sizeof(sc->sc_rxdesc)); |
852 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { | | 851 | for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { |
853 | if (ralink_eth_add_rxbuf(sc, i)) { | | 852 | if (ralink_eth_add_rxbuf(sc, i)) { |
854 | panic("Can't allocate rx mbuf\n"); | | 853 | panic("Can't allocate rx mbuf\n"); |
855 | } | | 854 | } |
856 | } | | 855 | } |
857 | | | 856 | |
858 | /* | | 857 | /* |
859 | * Flush the RX descriptors | | 858 | * Flush the RX descriptors |
860 | * - TODO: can we just access descriptors via KSEG1 | | 859 | * - TODO: can we just access descriptors via KSEG1 |
861 | * to avoid the flush? | | 860 | * to avoid the flush? |
862 | */ | | 861 | */ |
863 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, | | 862 | bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, |
864 | (int)&sc->sc_rxdesc - (int)sc->sc_descs, sizeof(sc->sc_rxdesc), | | 863 | (int)&sc->sc_rxdesc - (int)sc->sc_descs, sizeof(sc->sc_rxdesc), |
865 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 864 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
866 | | | 865 | |
867 | /* Clear the PDMA state */ | | 866 | /* Clear the PDMA state */ |
868 | r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); | | 867 | r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); |
869 | r &= 0xff; | | 868 | r &= 0xff; |
870 | fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, r); | | 869 | fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, r); |
871 | (void) fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); | | 870 | (void) fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); |
872 | | | 871 | |
873 | /* Setup the PDMA VLAN ID's */ | | 872 | /* Setup the PDMA VLAN ID's */ |
874 | fe_write(sc, RA_FE_VLAN_ID_0001, 0x00010000); | | 873 | fe_write(sc, RA_FE_VLAN_ID_0001, 0x00010000); |
875 | fe_write(sc, RA_FE_VLAN_ID_0203, 0x00030002); | | 874 | fe_write(sc, RA_FE_VLAN_ID_0203, 0x00030002); |
876 | fe_write(sc, RA_FE_VLAN_ID_0405, 0x00050004); | | 875 | fe_write(sc, RA_FE_VLAN_ID_0405, 0x00050004); |
877 | fe_write(sc, RA_FE_VLAN_ID_0607, 0x00070006); | | 876 | fe_write(sc, RA_FE_VLAN_ID_0607, 0x00070006); |
878 | fe_write(sc, RA_FE_VLAN_ID_0809, 0x00090008); | | 877 | fe_write(sc, RA_FE_VLAN_ID_0809, 0x00090008); |
879 | fe_write(sc, RA_FE_VLAN_ID_1011, 0x000b000a); | | 878 | fe_write(sc, RA_FE_VLAN_ID_1011, 0x000b000a); |
880 | fe_write(sc, RA_FE_VLAN_ID_1213, 0x000d000c); | | 879 | fe_write(sc, RA_FE_VLAN_ID_1213, 0x000d000c); |
881 | fe_write(sc, RA_FE_VLAN_ID_1415, 0x000f000e); | | 880 | fe_write(sc, RA_FE_VLAN_ID_1415, 0x000f000e); |
882 | | | 881 | |
883 | /* Give the TX and TX rings to the chip. */ | | 882 | /* Give the TX and TX rings to the chip. */ |
884 | fe_write(sc, RA_FE_PDMA_TX0_PTR, | | 883 | fe_write(sc, RA_FE_PDMA_TX0_PTR, |
885 | htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_txdesc))); | | 884 | htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_txdesc))); |
886 | fe_write(sc, RA_FE_PDMA_TX0_COUNT, htole32(RALINK_ETH_NUM_TX_DESC)); | | 885 | fe_write(sc, RA_FE_PDMA_TX0_COUNT, htole32(RALINK_ETH_NUM_TX_DESC)); |
887 | fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, 0); | | 886 | fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, 0); |
888 | fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_TX0); | | 887 | fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_TX0); |
889 | | | 888 | |
890 | fe_write(sc, RA_FE_PDMA_RX0_PTR, | | 889 | fe_write(sc, RA_FE_PDMA_RX0_PTR, |
891 | htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_rxdesc))); | | 890 | htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_rxdesc))); |
892 | fe_write(sc, RA_FE_PDMA_RX0_COUNT, htole32(RALINK_ETH_NUM_RX_DESC)); | | 891 | fe_write(sc, RA_FE_PDMA_RX0_COUNT, htole32(RALINK_ETH_NUM_RX_DESC)); |
893 | fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, | | 892 | fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, |
894 | htole32(RALINK_ETH_NUM_RX_DESC - 1)); | | 893 | htole32(RALINK_ETH_NUM_RX_DESC - 1)); |
895 | fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_RX0); | | 894 | fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_RX0); |
896 | fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, | | 895 | fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, |
897 | htole32(RALINK_ETH_NUM_RX_DESC - 1)); | | 896 | htole32(RALINK_ETH_NUM_RX_DESC - 1)); |
898 | | | 897 | |
899 | /* Start PDMA */ | | 898 | /* Start PDMA */ |
900 | fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, | | 899 | fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, |
901 | FE_PDMA_GLOBAL_CFG_TX_WB_DDONE | | | 900 | FE_PDMA_GLOBAL_CFG_TX_WB_DDONE | |
902 | FE_PDMA_GLOBAL_CFG_RX_DMA_EN | | | 901 | FE_PDMA_GLOBAL_CFG_RX_DMA_EN | |
903 | FE_PDMA_GLOBAL_CFG_TX_DMA_EN | | | 902 | FE_PDMA_GLOBAL_CFG_TX_DMA_EN | |
904 | FE_PDMA_GLOBAL_CFG_BURST_SZ_4); | | 903 | FE_PDMA_GLOBAL_CFG_BURST_SZ_4); |
905 | | | 904 | |
906 | /* Setup the clock for the Frame Engine */ | | 905 | /* Setup the clock for the Frame Engine */ |
907 | fe_write(sc, RA_FE_GLOBAL_CFG, | | 906 | fe_write(sc, RA_FE_GLOBAL_CFG, |
908 | FE_GLOBAL_CFG_EXT_VLAN(0x8100) | | | 907 | FE_GLOBAL_CFG_EXT_VLAN(0x8100) | |
909 | FE_GLOBAL_CFG_US_CLK(RA_BUS_FREQ / 1000000) | | | 908 | FE_GLOBAL_CFG_US_CLK(RA_BUS_FREQ / 1000000) | |
910 | FE_GLOBAL_CFG_L2_SPACE(0x8)); | | 909 | FE_GLOBAL_CFG_L2_SPACE(0x8)); |
911 | | | 910 | |
912 | /* Turn on all interrupts */ | | 911 | /* Turn on all interrupts */ |
913 | fe_write(sc, RA_FE_INT_ENABLE, | | 912 | fe_write(sc, RA_FE_INT_ENABLE, |
914 | FE_INT_RX | FE_INT_TX3 | FE_INT_TX2 | FE_INT_TX1 | FE_INT_TX0); | | 913 | FE_INT_RX | FE_INT_TX3 | FE_INT_TX2 | FE_INT_TX1 | FE_INT_TX0); |
915 | | | 914 | |
916 | /* | | 915 | /* |
917 | * Configure GDMA forwarding | | 916 | * Configure GDMA forwarding |
918 | * - default all packets to CPU | | 917 | * - default all packets to CPU |
919 | * - Turn on auto-CRC | | 918 | * - Turn on auto-CRC |
920 | */ | | 919 | */ |
921 | #if 0 | | 920 | #if 0 |
922 | fe_write(sc, RA_FE_GDMA1_FWD_CFG, | | 921 | fe_write(sc, RA_FE_GDMA1_FWD_CFG, |
923 | (FE_GDMA_FWD_CFG_DIS_TX_CRC | FE_GDMA_FWD_CFG_DIS_TX_PAD)); | | 922 | (FE_GDMA_FWD_CFG_DIS_TX_CRC | FE_GDMA_FWD_CFG_DIS_TX_PAD)); |
924 | #endif | | 923 | #endif |
925 | fe_write(sc, RA_FE_GDMA1_FWD_CFG, | | 924 | fe_write(sc, RA_FE_GDMA1_FWD_CFG, |
926 | FE_GDMA_FWD_CFG_JUMBO_LEN(MCLBYTES/1024) | | | 925 | FE_GDMA_FWD_CFG_JUMBO_LEN(MCLBYTES/1024) | |
927 | FE_GDMA_FWD_CFG_STRIP_RX_CRC | | | 926 | FE_GDMA_FWD_CFG_STRIP_RX_CRC | |
928 | FE_GDMA_FWD_CFG_IP4_CRC_EN | | | 927 | FE_GDMA_FWD_CFG_IP4_CRC_EN | |
929 | FE_GDMA_FWD_CFG_TCP_CRC_EN | | | 928 | FE_GDMA_FWD_CFG_TCP_CRC_EN | |
930 | FE_GDMA_FWD_CFG_UDP_CRC_EN); | | 929 | FE_GDMA_FWD_CFG_UDP_CRC_EN); |
931 | | | 930 | |
932 | /* CDMA also needs CRCs turned on */ | | 931 | /* CDMA also needs CRCs turned on */ |
933 | r = fe_read(sc, RA_FE_CDMA_CSG_CFG); | | 932 | r = fe_read(sc, RA_FE_CDMA_CSG_CFG); |
934 | r |= (FE_CDMA_CSG_CFG_IP4_CRC_EN | FE_CDMA_CSG_CFG_UDP_CRC_EN | | | 933 | r |= (FE_CDMA_CSG_CFG_IP4_CRC_EN | FE_CDMA_CSG_CFG_UDP_CRC_EN | |
935 | FE_CDMA_CSG_CFG_TCP_CRC_EN); | | 934 | FE_CDMA_CSG_CFG_TCP_CRC_EN); |
936 | fe_write(sc, RA_FE_CDMA_CSG_CFG, r); | | 935 | fe_write(sc, RA_FE_CDMA_CSG_CFG, r); |
937 | | | 936 | |
938 | /* Configure Flow Control Thresholds */ | | 937 | /* Configure Flow Control Thresholds */ |
939 | #ifdef RT3883 | | 938 | #ifdef RT3883 |
940 | fe_write(sc, RA_FE_PSE_FQ_CFG, | | 939 | fe_write(sc, RA_FE_PSE_FQ_CFG, |
941 | FE_PSE_FQ_MAX_COUNT(0xff) | | | 940 | FE_PSE_FQ_MAX_COUNT(0xff) | |
942 | FE_PSE_FQ_FC_RELEASE(0x90) | | | 941 | FE_PSE_FQ_FC_RELEASE(0x90) | |
943 | FE_PSE_FQ_FC_ASSERT(0x80)); | | 942 | FE_PSE_FQ_FC_ASSERT(0x80)); |
944 | #else | | 943 | #else |
945 | fe_write(sc, RA_FE_PSE_FQ_CFG, | | 944 | fe_write(sc, RA_FE_PSE_FQ_CFG, |
946 | FE_PSE_FQ_MAX_COUNT(0x80) | | | 945 | FE_PSE_FQ_MAX_COUNT(0x80) | |
947 | FE_PSE_FQ_FC_RELEASE(0x50) | | | 946 | FE_PSE_FQ_FC_RELEASE(0x50) | |
948 | FE_PSE_FQ_FC_ASSERT(0x40)); | | 947 | FE_PSE_FQ_FC_ASSERT(0x40)); |
949 | #endif | | 948 | #endif |
950 | | | 949 | |
951 | #ifdef RALINK_ETH_DEBUG | | 950 | #ifdef RALINK_ETH_DEBUG |
952 | printf("FE_MDIO_CFG1: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG1)); | | 951 | printf("FE_MDIO_CFG1: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG1)); |
953 | printf("FE_MDIO_CFG2: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG2)); | | 952 | printf("FE_MDIO_CFG2: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG2)); |
954 | printf("FE_PDMA_TX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_TX0_PTR)); | | 953 | printf("FE_PDMA_TX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_TX0_PTR)); |
955 | printf("FE_PDMA_TX0_COUNT: %08x\n", | | 954 | printf("FE_PDMA_TX0_COUNT: %08x\n", |
956 | fe_read(sc, RA_FE_PDMA_TX0_COUNT)); | | 955 | fe_read(sc, RA_FE_PDMA_TX0_COUNT)); |
957 | printf("FE_PDMA_TX0_CPU_IDX: %08x\n", | | 956 | printf("FE_PDMA_TX0_CPU_IDX: %08x\n", |
958 | fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX)); | | 957 | fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX)); |
959 | printf("FE_PDMA_TX0_DMA_IDX: %08x\n", | | 958 | printf("FE_PDMA_TX0_DMA_IDX: %08x\n", |
960 | fe_read(sc, RA_FE_PDMA_TX0_DMA_IDX)); | | 959 | fe_read(sc, RA_FE_PDMA_TX0_DMA_IDX)); |
961 | printf("FE_PDMA_RX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_RX0_PTR)); | | 960 | printf("FE_PDMA_RX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_RX0_PTR)); |
962 | printf("FE_PDMA_RX0_COUNT: %08x\n", | | 961 | printf("FE_PDMA_RX0_COUNT: %08x\n", |
963 | fe_read(sc, RA_FE_PDMA_RX0_COUNT)); | | 962 | fe_read(sc, RA_FE_PDMA_RX0_COUNT)); |
964 | printf("FE_PDMA_RX0_CPU_IDX: %08x\n", | | 963 | printf("FE_PDMA_RX0_CPU_IDX: %08x\n", |
965 | fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX)); | | 964 | fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX)); |
966 | printf("FE_PDMA_RX0_DMA_IDX: %08x\n", | | 965 | printf("FE_PDMA_RX0_DMA_IDX: %08x\n", |
967 | fe_read(sc, RA_FE_PDMA_RX0_DMA_IDX)); | | 966 | fe_read(sc, RA_FE_PDMA_RX0_DMA_IDX)); |
968 | printf("FE_PDMA_GLOBAL_CFG: %08x\n", | | 967 | printf("FE_PDMA_GLOBAL_CFG: %08x\n", |
969 | fe_read(sc, RA_FE_PDMA_GLOBAL_CFG)); | | 968 | fe_read(sc, RA_FE_PDMA_GLOBAL_CFG)); |
970 | printf("FE_GLOBAL_CFG: %08x\n", fe_read(sc, RA_FE_GLOBAL_CFG)); | | 969 | printf("FE_GLOBAL_CFG: %08x\n", fe_read(sc, RA_FE_GLOBAL_CFG)); |
971 | printf("FE_GDMA1_FWD_CFG: %08x\n", | | 970 | printf("FE_GDMA1_FWD_CFG: %08x\n", |
972 | fe_read(sc, RA_FE_GDMA1_FWD_CFG)); | | 971 | fe_read(sc, RA_FE_GDMA1_FWD_CFG)); |
973 | printf("FE_CDMA_CSG_CFG: %08x\n", fe_read(sc, RA_FE_CDMA_CSG_CFG)); | | 972 | printf("FE_CDMA_CSG_CFG: %08x\n", fe_read(sc, RA_FE_CDMA_CSG_CFG)); |
974 | printf("FE_PSE_FQ_CFG: %08x\n", fe_read(sc, RA_FE_PSE_FQ_CFG)); | | 973 | printf("FE_PSE_FQ_CFG: %08x\n", fe_read(sc, RA_FE_PSE_FQ_CFG)); |
975 | #endif | | 974 | #endif |
976 | | | 975 | |
977 | /* Force PSE Reset to get everything finalized */ | | 976 | /* Force PSE Reset to get everything finalized */ |
978 | fe_write(sc, RA_FE_GLOBAL_RESET, FE_GLOBAL_RESET_PSE); | | 977 | fe_write(sc, RA_FE_GLOBAL_RESET, FE_GLOBAL_RESET_PSE); |
979 | fe_write(sc, RA_FE_GLOBAL_RESET, 0); | | 978 | fe_write(sc, RA_FE_GLOBAL_RESET, 0); |
980 | } | | 979 | } |
981 | | | 980 | |
982 | /* | | 981 | /* |
983 | * ralink_eth_init | | 982 | * ralink_eth_init |
984 | */ | | 983 | */ |
985 | static int | | 984 | static int |
986 | ralink_eth_init(struct ifnet *ifp) | | 985 | ralink_eth_init(struct ifnet *ifp) |
987 | { | | 986 | { |
988 | RALINK_DEBUG_FUNC_ENTRY(); | | 987 | RALINK_DEBUG_FUNC_ENTRY(); |
989 | ralink_eth_softc_t * const sc = ifp->if_softc; | | 988 | ralink_eth_softc_t * const sc = ifp->if_softc; |
990 | int error; | | 989 | int error; |
991 | | | 990 | |
992 | error = ralink_eth_enable(sc); | | 991 | error = ralink_eth_enable(sc); |
993 | if (!error) { | | 992 | if (!error) { |
994 | /* Note that the interface is now running. */ | | 993 | /* Note that the interface is now running. */ |
995 | ifp->if_flags |= IFF_RUNNING; | | 994 | ifp->if_flags |= IFF_RUNNING; |
996 | ifp->if_flags &= ~IFF_OACTIVE; | | 995 | ifp->if_flags &= ~IFF_OACTIVE; |
997 | } | | 996 | } |
998 | | | 997 | |
999 | return error; | | 998 | return error; |
1000 | } | | 999 | } |
1001 | | | 1000 | |
1002 | /* | | 1001 | /* |
1003 | * ralink_eth_rxdrain | | 1002 | * ralink_eth_rxdrain |
1004 | * | | 1003 | * |
1005 | * Drain the receive queue. | | 1004 | * Drain the receive queue. |
1006 | */ | | 1005 | */ |
1007 | static void | | 1006 | static void |
1008 | ralink_eth_rxdrain(ralink_eth_softc_t *sc) | | 1007 | ralink_eth_rxdrain(ralink_eth_softc_t *sc) |
1009 | { | | 1008 | { |
1010 | RALINK_DEBUG_FUNC_ENTRY(); | | 1009 | RALINK_DEBUG_FUNC_ENTRY(); |
1011 | | | 1010 | |
1012 | for (int i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { | | 1011 | for (int i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { |
1013 | struct ralink_eth_rxstate *rxs = &sc->sc_rxstate[i]; | | 1012 | struct ralink_eth_rxstate *rxs = &sc->sc_rxstate[i]; |
1014 | if (rxs->rxs_mbuf != NULL) { | | 1013 | if (rxs->rxs_mbuf != NULL) { |
1015 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); | | 1014 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
1016 | m_freem(rxs->rxs_mbuf); | | 1015 | m_freem(rxs->rxs_mbuf); |
1017 | rxs->rxs_mbuf = NULL; | | 1016 | rxs->rxs_mbuf = NULL; |
1018 | } | | 1017 | } |
1019 | } | | 1018 | } |
1020 | } | | 1019 | } |
1021 | | | 1020 | |
1022 | /* | | 1021 | /* |
1023 | * ralink_eth_stop | | 1022 | * ralink_eth_stop |
1024 | */ | | 1023 | */ |
1025 | static void | | 1024 | static void |
1026 | ralink_eth_stop(struct ifnet *ifp, int disable) | | 1025 | ralink_eth_stop(struct ifnet *ifp, int disable) |
1027 | { | | 1026 | { |
1028 | RALINK_DEBUG_FUNC_ENTRY(); | | 1027 | RALINK_DEBUG_FUNC_ENTRY(); |
1029 | ralink_eth_softc_t * const sc = ifp->if_softc; | | 1028 | ralink_eth_softc_t * const sc = ifp->if_softc; |
1030 | | | 1029 | |
1031 | ralink_eth_disable(sc); | | 1030 | ralink_eth_disable(sc); |
1032 | | | 1031 | |
1033 | /* Mark the interface down and cancel the watchdog timer. */ | | 1032 | /* Mark the interface down and cancel the watchdog timer. */ |
1034 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 1033 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1035 | ifp->if_timer = 0; | | 1034 | ifp->if_timer = 0; |
1036 | } | | 1035 | } |
1037 | | | 1036 | |
1038 | /* | | 1037 | /* |
1039 | * ralink_eth_add_rxbuf | | 1038 | * ralink_eth_add_rxbuf |
1040 | */ | | 1039 | */ |
1041 | static int | | 1040 | static int |
1042 | ralink_eth_add_rxbuf(ralink_eth_softc_t *sc, int idx) | | 1041 | ralink_eth_add_rxbuf(ralink_eth_softc_t *sc, int idx) |
1043 | { | | 1042 | { |
1044 | RALINK_DEBUG_FUNC_ENTRY(); | | 1043 | RALINK_DEBUG_FUNC_ENTRY(); |
1045 | struct ralink_eth_rxstate * const rxs = &sc->sc_rxstate[idx]; | | 1044 | struct ralink_eth_rxstate * const rxs = &sc->sc_rxstate[idx]; |
1046 | struct mbuf *m; | | 1045 | struct mbuf *m; |
1047 | int error; | | 1046 | int error; |
1048 | | | 1047 | |
1049 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 1048 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1050 | if (m == NULL) { | | 1049 | if (m == NULL) { |
1051 | printf("MGETHDR failed\n"); | | 1050 | printf("MGETHDR failed\n"); |
1052 | sc->sc_evcnt_add_rxbuf_hdr_fail.ev_count++; | | 1051 | sc->sc_evcnt_add_rxbuf_hdr_fail.ev_count++; |
1053 | return ENOBUFS; | | 1052 | return ENOBUFS; |
1054 | } | | 1053 | } |
1055 | | | 1054 | |
1056 | MCLGET(m, M_DONTWAIT); | | 1055 | MCLGET(m, M_DONTWAIT); |
1057 | if ((m->m_flags & M_EXT) == 0) { | | 1056 | if ((m->m_flags & M_EXT) == 0) { |