| @@ -1,1938 +1,1939 @@ | | | @@ -1,1938 +1,1939 @@ |
1 | /* $NetBSD: if_mvgbe.c,v 1.10 2011/07/30 19:06:57 rjs Exp $ */ | | 1 | /* $NetBSD: if_mvgbe.c,v 1.11 2011/09/01 14:39:03 jakllsch Exp $ */ |
2 | /* | | 2 | /* |
3 | * Copyright (c) 2007, 2008 KIYOHARA Takashi | | 3 | * Copyright (c) 2007, 2008 KIYOHARA Takashi |
4 | * All rights reserved. | | 4 | * All rights reserved. |
5 | * | | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions | | 7 | * modification, are permitted provided that the following conditions |
8 | * are met: | | 8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright | | 9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. | | 10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright | | 11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the | | 12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. | | 13 | * documentation and/or other materials provided with the distribution. |
14 | * | | 14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | | 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
17 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | | 17 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
18 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, | | 18 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, |
19 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 19 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
20 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 20 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
21 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 21 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
22 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | | 22 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
23 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | | 23 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
25 | * POSSIBILITY OF SUCH DAMAGE. | | 25 | * POSSIBILITY OF SUCH DAMAGE. |
26 | */ | | 26 | */ |
27 | #include <sys/cdefs.h> | | 27 | #include <sys/cdefs.h> |
28 | __KERNEL_RCSID(0, "$NetBSD: if_mvgbe.c,v 1.10 2011/07/30 19:06:57 rjs Exp $"); | | 28 | __KERNEL_RCSID(0, "$NetBSD: if_mvgbe.c,v 1.11 2011/09/01 14:39:03 jakllsch Exp $"); |
29 | | | 29 | |
30 | #include "rnd.h" | | 30 | #include "rnd.h" |
31 | | | 31 | |
32 | #include <sys/param.h> | | 32 | #include <sys/param.h> |
33 | #include <sys/bus.h> | | 33 | #include <sys/bus.h> |
34 | #include <sys/device.h> | | 34 | #include <sys/device.h> |
35 | #include <sys/endian.h> | | 35 | #include <sys/endian.h> |
36 | #include <sys/errno.h> | | 36 | #include <sys/errno.h> |
37 | #include <sys/kmem.h> | | 37 | #include <sys/kmem.h> |
38 | #include <sys/mutex.h> | | 38 | #include <sys/mutex.h> |
39 | #include <sys/sockio.h> | | 39 | #include <sys/sockio.h> |
40 | | | 40 | |
41 | #include <dev/marvell/marvellreg.h> | | 41 | #include <dev/marvell/marvellreg.h> |
42 | #include <dev/marvell/marvellvar.h> | | 42 | #include <dev/marvell/marvellvar.h> |
43 | #include <dev/marvell/mvgbereg.h> | | 43 | #include <dev/marvell/mvgbereg.h> |
44 | | | 44 | |
45 | #include <net/if.h> | | 45 | #include <net/if.h> |
46 | #include <net/if_ether.h> | | 46 | #include <net/if_ether.h> |
47 | #include <net/if_media.h> | | 47 | #include <net/if_media.h> |
48 | | | 48 | |
49 | #include <netinet/in.h> | | 49 | #include <netinet/in.h> |
50 | #include <netinet/in_systm.h> | | 50 | #include <netinet/in_systm.h> |
51 | #include <netinet/ip.h> | | 51 | #include <netinet/ip.h> |
52 | | | 52 | |
53 | #include <net/bpf.h> | | 53 | #include <net/bpf.h> |
54 | #if NRND > 0 | | 54 | #if NRND > 0 |
55 | #include <sys/rnd.h> | | 55 | #include <sys/rnd.h> |
56 | #endif | | 56 | #endif |
57 | | | 57 | |
58 | #include <dev/mii/mii.h> | | 58 | #include <dev/mii/mii.h> |
59 | #include <dev/mii/miivar.h> | | 59 | #include <dev/mii/miivar.h> |
60 | | | 60 | |
61 | #include "locators.h" | | 61 | #include "locators.h" |
62 | | | 62 | |
63 | /* #define MVGBE_DEBUG 3 */ | | 63 | /* #define MVGBE_DEBUG 3 */ |
64 | #ifdef MVGBE_DEBUG | | 64 | #ifdef MVGBE_DEBUG |
65 | #define DPRINTF(x) if (mvgbe_debug) printf x | | 65 | #define DPRINTF(x) if (mvgbe_debug) printf x |
66 | #define DPRINTFN(n,x) if (mvgbe_debug >= (n)) printf x | | 66 | #define DPRINTFN(n,x) if (mvgbe_debug >= (n)) printf x |
67 | int mvgbe_debug = MVGBE_DEBUG; | | 67 | int mvgbe_debug = MVGBE_DEBUG; |
68 | #else | | 68 | #else |
69 | #define DPRINTF(x) | | 69 | #define DPRINTF(x) |
70 | #define DPRINTFN(n,x) | | 70 | #define DPRINTFN(n,x) |
71 | #endif | | 71 | #endif |
72 | | | 72 | |
73 | | | 73 | |
74 | #define MVGBE_READ(sc, reg) \ | | 74 | #define MVGBE_READ(sc, reg) \ |
75 | bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)) | | 75 | bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)) |
76 | #define MVGBE_WRITE(sc, reg, val) \ | | 76 | #define MVGBE_WRITE(sc, reg, val) \ |
77 | bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) | | 77 | bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) |
78 | #define MVGBE_READ_FILTER(sc, reg, val, c) \ | | 78 | #define MVGBE_READ_FILTER(sc, reg, val, c) \ |
79 | bus_space_read_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) | | 79 | bus_space_read_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) |
80 | #define MVGBE_WRITE_FILTER(sc, reg, val, c) \ | | 80 | #define MVGBE_WRITE_FILTER(sc, reg, val, c) \ |
81 | bus_space_write_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) | | 81 | bus_space_write_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) |
82 | | | 82 | |
83 | #define MVGBE_TX_RING_CNT 256 | | 83 | #define MVGBE_TX_RING_CNT 256 |
84 | #define MVGBE_TX_RING_MSK (MVGBE_TX_RING_CNT - 1) | | 84 | #define MVGBE_TX_RING_MSK (MVGBE_TX_RING_CNT - 1) |
85 | #define MVGBE_TX_RING_NEXT(x) (((x) + 1) & MVGBE_TX_RING_MSK) | | 85 | #define MVGBE_TX_RING_NEXT(x) (((x) + 1) & MVGBE_TX_RING_MSK) |
86 | #define MVGBE_RX_RING_CNT 256 | | 86 | #define MVGBE_RX_RING_CNT 256 |
87 | #define MVGBE_RX_RING_MSK (MVGBE_RX_RING_CNT - 1) | | 87 | #define MVGBE_RX_RING_MSK (MVGBE_RX_RING_CNT - 1) |
88 | #define MVGBE_RX_RING_NEXT(x) (((x) + 1) & MVGBE_RX_RING_MSK) | | 88 | #define MVGBE_RX_RING_NEXT(x) (((x) + 1) & MVGBE_RX_RING_MSK) |
89 | | | 89 | |
90 | CTASSERT(MVGBE_TX_RING_CNT > 1 && MVGBE_TX_RING_NEXT(MVGBE_TX_RING_CNT) == | | 90 | CTASSERT(MVGBE_TX_RING_CNT > 1 && MVGBE_TX_RING_NEXT(MVGBE_TX_RING_CNT) == |
91 | (MVGBE_TX_RING_CNT + 1) % MVGBE_TX_RING_CNT); | | 91 | (MVGBE_TX_RING_CNT + 1) % MVGBE_TX_RING_CNT); |
92 | CTASSERT(MVGBE_RX_RING_CNT > 1 && MVGBE_RX_RING_NEXT(MVGBE_RX_RING_CNT) == | | 92 | CTASSERT(MVGBE_RX_RING_CNT > 1 && MVGBE_RX_RING_NEXT(MVGBE_RX_RING_CNT) == |
93 | (MVGBE_RX_RING_CNT + 1) % MVGBE_RX_RING_CNT); | | 93 | (MVGBE_RX_RING_CNT + 1) % MVGBE_RX_RING_CNT); |
94 | | | 94 | |
95 | #define MVGBE_JSLOTS 384 /* XXXX */ | | 95 | #define MVGBE_JSLOTS 384 /* XXXX */ |
96 | #define MVGBE_JLEN ((MVGBE_MRU + MVGBE_RXBUF_ALIGN)&~MVGBE_RXBUF_MASK) | | 96 | #define MVGBE_JLEN ((MVGBE_MRU + MVGBE_RXBUF_ALIGN)&~MVGBE_RXBUF_MASK) |
97 | #define MVGBE_NTXSEG 30 | | 97 | #define MVGBE_NTXSEG 30 |
98 | #define MVGBE_JPAGESZ PAGE_SIZE | | 98 | #define MVGBE_JPAGESZ PAGE_SIZE |
99 | #define MVGBE_RESID \ | | 99 | #define MVGBE_RESID \ |
100 | (MVGBE_JPAGESZ - (MVGBE_JLEN * MVGBE_JSLOTS) % MVGBE_JPAGESZ) | | 100 | (MVGBE_JPAGESZ - (MVGBE_JLEN * MVGBE_JSLOTS) % MVGBE_JPAGESZ) |
101 | #define MVGBE_JMEM \ | | 101 | #define MVGBE_JMEM \ |
102 | ((MVGBE_JLEN * MVGBE_JSLOTS) + MVGBE_RESID) | | 102 | ((MVGBE_JLEN * MVGBE_JSLOTS) + MVGBE_RESID) |
103 | | | 103 | |
104 | #define MVGBE_TX_RING_ADDR(sc, i) \ | | 104 | #define MVGBE_TX_RING_ADDR(sc, i) \ |
105 | ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ | | 105 | ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ |
106 | offsetof(struct mvgbe_ring_data, mvgbe_tx_ring[(i)])) | | 106 | offsetof(struct mvgbe_ring_data, mvgbe_tx_ring[(i)])) |
107 | | | 107 | |
108 | #define MVGBE_RX_RING_ADDR(sc, i) \ | | 108 | #define MVGBE_RX_RING_ADDR(sc, i) \ |
109 | ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ | | 109 | ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ |
110 | offsetof(struct mvgbe_ring_data, mvgbe_rx_ring[(i)])) | | 110 | offsetof(struct mvgbe_ring_data, mvgbe_rx_ring[(i)])) |
111 | | | 111 | |
112 | #define MVGBE_CDOFF(x) offsetof(struct mvgbe_ring_data, x) | | 112 | #define MVGBE_CDOFF(x) offsetof(struct mvgbe_ring_data, x) |
113 | #define MVGBE_CDTXOFF(x) MVGBE_CDOFF(mvgbe_tx_ring[(x)]) | | 113 | #define MVGBE_CDTXOFF(x) MVGBE_CDOFF(mvgbe_tx_ring[(x)]) |
114 | #define MVGBE_CDRXOFF(x) MVGBE_CDOFF(mvgbe_rx_ring[(x)]) | | 114 | #define MVGBE_CDRXOFF(x) MVGBE_CDOFF(mvgbe_rx_ring[(x)]) |
115 | | | 115 | |
116 | #define MVGBE_CDTXSYNC(sc, x, n, ops) \ | | 116 | #define MVGBE_CDTXSYNC(sc, x, n, ops) \ |
117 | do { \ | | 117 | do { \ |
118 | int __x, __n; \ | | 118 | int __x, __n; \ |
119 | const int __descsize = sizeof(struct mvgbe_tx_desc); \ | | 119 | const int __descsize = sizeof(struct mvgbe_tx_desc); \ |
120 | \ | | 120 | \ |
121 | __x = (x); \ | | 121 | __x = (x); \ |
122 | __n = (n); \ | | 122 | __n = (n); \ |
123 | \ | | 123 | \ |
124 | /* If it will wrap around, sync to the end of the ring. */ \ | | 124 | /* If it will wrap around, sync to the end of the ring. */ \ |
125 | if ((__x + __n) > MVGBE_TX_RING_CNT) { \ | | 125 | if ((__x + __n) > MVGBE_TX_RING_CNT) { \ |
126 | bus_dmamap_sync((sc)->sc_dmat, \ | | 126 | bus_dmamap_sync((sc)->sc_dmat, \ |
127 | (sc)->sc_ring_map, MVGBE_CDTXOFF(__x), \ | | 127 | (sc)->sc_ring_map, MVGBE_CDTXOFF(__x), \ |
128 | __descsize * (MVGBE_TX_RING_CNT - __x), (ops)); \ | | 128 | __descsize * (MVGBE_TX_RING_CNT - __x), (ops)); \ |
129 | __n -= (MVGBE_TX_RING_CNT - __x); \ | | 129 | __n -= (MVGBE_TX_RING_CNT - __x); \ |
130 | __x = 0; \ | | 130 | __x = 0; \ |
131 | } \ | | 131 | } \ |
132 | \ | | 132 | \ |
133 | /* Now sync whatever is left. */ \ | | 133 | /* Now sync whatever is left. */ \ |
134 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ | | 134 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ |
135 | MVGBE_CDTXOFF((__x)), __descsize * __n, (ops)); \ | | 135 | MVGBE_CDTXOFF((__x)), __descsize * __n, (ops)); \ |
136 | } while (0 /*CONSTCOND*/) | | 136 | } while (0 /*CONSTCOND*/) |
137 | | | 137 | |
138 | #define MVGBE_CDRXSYNC(sc, x, ops) \ | | 138 | #define MVGBE_CDRXSYNC(sc, x, ops) \ |
139 | do { \ | | 139 | do { \ |
140 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ | | 140 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ |
141 | MVGBE_CDRXOFF((x)), sizeof(struct mvgbe_rx_desc), (ops)); \ | | 141 | MVGBE_CDRXOFF((x)), sizeof(struct mvgbe_rx_desc), (ops)); \ |
142 | } while (/*CONSTCOND*/0) | | 142 | } while (/*CONSTCOND*/0) |
143 | | | 143 | |
144 | | | 144 | |
145 | struct mvgbe_jpool_entry { | | 145 | struct mvgbe_jpool_entry { |
146 | int slot; | | 146 | int slot; |
147 | LIST_ENTRY(mvgbe_jpool_entry) jpool_entries; | | 147 | LIST_ENTRY(mvgbe_jpool_entry) jpool_entries; |
148 | }; | | 148 | }; |
149 | | | 149 | |
150 | struct mvgbe_chain { | | 150 | struct mvgbe_chain { |
151 | void *mvgbe_desc; | | 151 | void *mvgbe_desc; |
152 | struct mbuf *mvgbe_mbuf; | | 152 | struct mbuf *mvgbe_mbuf; |
153 | struct mvgbe_chain *mvgbe_next; | | 153 | struct mvgbe_chain *mvgbe_next; |
154 | }; | | 154 | }; |
155 | | | 155 | |
156 | struct mvgbe_txmap_entry { | | 156 | struct mvgbe_txmap_entry { |
157 | bus_dmamap_t dmamap; | | 157 | bus_dmamap_t dmamap; |
158 | SIMPLEQ_ENTRY(mvgbe_txmap_entry) link; | | 158 | SIMPLEQ_ENTRY(mvgbe_txmap_entry) link; |
159 | }; | | 159 | }; |
160 | | | 160 | |
161 | struct mvgbe_chain_data { | | 161 | struct mvgbe_chain_data { |
162 | struct mvgbe_chain mvgbe_tx_chain[MVGBE_TX_RING_CNT]; | | 162 | struct mvgbe_chain mvgbe_tx_chain[MVGBE_TX_RING_CNT]; |
163 | struct mvgbe_txmap_entry *mvgbe_tx_map[MVGBE_TX_RING_CNT]; | | 163 | struct mvgbe_txmap_entry *mvgbe_tx_map[MVGBE_TX_RING_CNT]; |
164 | int mvgbe_tx_prod; | | 164 | int mvgbe_tx_prod; |
165 | int mvgbe_tx_cons; | | 165 | int mvgbe_tx_cons; |
166 | int mvgbe_tx_cnt; | | 166 | int mvgbe_tx_cnt; |
167 | | | 167 | |
168 | struct mvgbe_chain mvgbe_rx_chain[MVGBE_RX_RING_CNT]; | | 168 | struct mvgbe_chain mvgbe_rx_chain[MVGBE_RX_RING_CNT]; |
169 | bus_dmamap_t mvgbe_rx_map[MVGBE_RX_RING_CNT]; | | 169 | bus_dmamap_t mvgbe_rx_map[MVGBE_RX_RING_CNT]; |
170 | bus_dmamap_t mvgbe_rx_jumbo_map; | | 170 | bus_dmamap_t mvgbe_rx_jumbo_map; |
171 | int mvgbe_rx_prod; | | 171 | int mvgbe_rx_prod; |
172 | int mvgbe_rx_cons; | | 172 | int mvgbe_rx_cons; |
173 | int mvgbe_rx_cnt; | | 173 | int mvgbe_rx_cnt; |
174 | | | 174 | |
175 | /* Stick the jumbo mem management stuff here too. */ | | 175 | /* Stick the jumbo mem management stuff here too. */ |
176 | void *mvgbe_jslots[MVGBE_JSLOTS]; | | 176 | void *mvgbe_jslots[MVGBE_JSLOTS]; |
177 | void *mvgbe_jumbo_buf; | | 177 | void *mvgbe_jumbo_buf; |
178 | }; | | 178 | }; |
179 | | | 179 | |
180 | struct mvgbe_ring_data { | | 180 | struct mvgbe_ring_data { |
181 | struct mvgbe_tx_desc mvgbe_tx_ring[MVGBE_TX_RING_CNT]; | | 181 | struct mvgbe_tx_desc mvgbe_tx_ring[MVGBE_TX_RING_CNT]; |
182 | struct mvgbe_rx_desc mvgbe_rx_ring[MVGBE_RX_RING_CNT]; | | 182 | struct mvgbe_rx_desc mvgbe_rx_ring[MVGBE_RX_RING_CNT]; |
183 | }; | | 183 | }; |
184 | | | 184 | |
185 | struct mvgbec_softc { | | 185 | struct mvgbec_softc { |
186 | device_t sc_dev; | | 186 | device_t sc_dev; |
187 | | | 187 | |
188 | bus_space_tag_t sc_iot; | | 188 | bus_space_tag_t sc_iot; |
189 | bus_space_handle_t sc_ioh; | | 189 | bus_space_handle_t sc_ioh; |
190 | | | 190 | |
191 | kmutex_t sc_mtx; | | 191 | kmutex_t sc_mtx; |
192 | | | 192 | |
193 | int sc_fix_tqtb; | | 193 | int sc_fix_tqtb; |
194 | }; | | 194 | }; |
195 | | | 195 | |
196 | struct mvgbe_softc { | | 196 | struct mvgbe_softc { |
197 | device_t sc_dev; | | 197 | device_t sc_dev; |
198 | int sc_port; | | 198 | int sc_port; |
199 | | | 199 | |
200 | bus_space_tag_t sc_iot; | | 200 | bus_space_tag_t sc_iot; |
201 | bus_space_handle_t sc_ioh; | | 201 | bus_space_handle_t sc_ioh; |
202 | bus_space_handle_t sc_dafh; /* dest address filter handle */ | | 202 | bus_space_handle_t sc_dafh; /* dest address filter handle */ |
203 | bus_dma_tag_t sc_dmat; | | 203 | bus_dma_tag_t sc_dmat; |
204 | | | 204 | |
205 | struct ethercom sc_ethercom; | | 205 | struct ethercom sc_ethercom; |
206 | struct mii_data sc_mii; | | 206 | struct mii_data sc_mii; |
207 | u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */ | | 207 | u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */ |
208 | | | 208 | |
209 | struct mvgbe_chain_data sc_cdata; | | 209 | struct mvgbe_chain_data sc_cdata; |
210 | struct mvgbe_ring_data *sc_rdata; | | 210 | struct mvgbe_ring_data *sc_rdata; |
211 | bus_dmamap_t sc_ring_map; | | 211 | bus_dmamap_t sc_ring_map; |
212 | int sc_if_flags; | | 212 | int sc_if_flags; |
213 | | | 213 | |
214 | LIST_HEAD(__mvgbe_jfreehead, mvgbe_jpool_entry) sc_jfree_listhead; | | 214 | LIST_HEAD(__mvgbe_jfreehead, mvgbe_jpool_entry) sc_jfree_listhead; |
215 | LIST_HEAD(__mvgbe_jinusehead, mvgbe_jpool_entry) sc_jinuse_listhead; | | 215 | LIST_HEAD(__mvgbe_jinusehead, mvgbe_jpool_entry) sc_jinuse_listhead; |
216 | SIMPLEQ_HEAD(__mvgbe_txmaphead, mvgbe_txmap_entry) sc_txmap_head; | | 216 | SIMPLEQ_HEAD(__mvgbe_txmaphead, mvgbe_txmap_entry) sc_txmap_head; |
217 | | | 217 | |
218 | #if NRND > 0 | | 218 | #if NRND > 0 |
219 | rndsource_element_t sc_rnd_source; | | 219 | rndsource_element_t sc_rnd_source; |
220 | #endif | | 220 | #endif |
221 | }; | | 221 | }; |
222 | | | 222 | |
223 | | | 223 | |
224 | /* Gigabit Ethernet Unit Global part functions */ | | 224 | /* Gigabit Ethernet Unit Global part functions */ |
225 | | | 225 | |
226 | static int mvgbec_match(device_t, struct cfdata *, void *); | | 226 | static int mvgbec_match(device_t, struct cfdata *, void *); |
227 | static void mvgbec_attach(device_t, device_t, void *); | | 227 | static void mvgbec_attach(device_t, device_t, void *); |
228 | | | 228 | |
229 | static int mvgbec_print(void *, const char *); | | 229 | static int mvgbec_print(void *, const char *); |
230 | static int mvgbec_search(device_t, cfdata_t, const int *, void *); | | 230 | static int mvgbec_search(device_t, cfdata_t, const int *, void *); |
231 | | | 231 | |
232 | /* MII funcstions */ | | 232 | /* MII funcstions */ |
233 | static int mvgbec_miibus_readreg(device_t, int, int); | | 233 | static int mvgbec_miibus_readreg(device_t, int, int); |
234 | static void mvgbec_miibus_writereg(device_t, int, int, int); | | 234 | static void mvgbec_miibus_writereg(device_t, int, int, int); |
235 | static void mvgbec_miibus_statchg(device_t); | | 235 | static void mvgbec_miibus_statchg(device_t); |
236 | | | 236 | |
237 | static void mvgbec_wininit(struct mvgbec_softc *); | | 237 | static void mvgbec_wininit(struct mvgbec_softc *); |
238 | | | 238 | |
239 | /* Gigabit Ethernet Port part functions */ | | 239 | /* Gigabit Ethernet Port part functions */ |
240 | | | 240 | |
241 | static int mvgbe_match(device_t, struct cfdata *, void *); | | 241 | static int mvgbe_match(device_t, struct cfdata *, void *); |
242 | static void mvgbe_attach(device_t, device_t, void *); | | 242 | static void mvgbe_attach(device_t, device_t, void *); |
243 | | | 243 | |
244 | static int mvgbe_intr(void *); | | 244 | static int mvgbe_intr(void *); |
245 | | | 245 | |
246 | static void mvgbe_start(struct ifnet *); | | 246 | static void mvgbe_start(struct ifnet *); |
247 | static int mvgbe_ioctl(struct ifnet *, u_long, void *); | | 247 | static int mvgbe_ioctl(struct ifnet *, u_long, void *); |
248 | static int mvgbe_init(struct ifnet *); | | 248 | static int mvgbe_init(struct ifnet *); |
249 | static void mvgbe_stop(struct ifnet *, int); | | 249 | static void mvgbe_stop(struct ifnet *, int); |
250 | static void mvgbe_watchdog(struct ifnet *); | | 250 | static void mvgbe_watchdog(struct ifnet *); |
251 | | | 251 | |
252 | static int mvgbe_ifflags_cb(struct ethercom *); | | 252 | static int mvgbe_ifflags_cb(struct ethercom *); |
253 | | | 253 | |
254 | static int mvgbe_mediachange(struct ifnet *); | | 254 | static int mvgbe_mediachange(struct ifnet *); |
255 | static void mvgbe_mediastatus(struct ifnet *, struct ifmediareq *); | | 255 | static void mvgbe_mediastatus(struct ifnet *, struct ifmediareq *); |
256 | | | 256 | |
257 | static int mvgbe_init_rx_ring(struct mvgbe_softc *); | | 257 | static int mvgbe_init_rx_ring(struct mvgbe_softc *); |
258 | static int mvgbe_init_tx_ring(struct mvgbe_softc *); | | 258 | static int mvgbe_init_tx_ring(struct mvgbe_softc *); |
259 | static int mvgbe_newbuf(struct mvgbe_softc *, int, struct mbuf *, bus_dmamap_t); | | 259 | static int mvgbe_newbuf(struct mvgbe_softc *, int, struct mbuf *, bus_dmamap_t); |
260 | static int mvgbe_alloc_jumbo_mem(struct mvgbe_softc *); | | 260 | static int mvgbe_alloc_jumbo_mem(struct mvgbe_softc *); |
261 | static void *mvgbe_jalloc(struct mvgbe_softc *); | | 261 | static void *mvgbe_jalloc(struct mvgbe_softc *); |
262 | static void mvgbe_jfree(struct mbuf *, void *, size_t, void *); | | 262 | static void mvgbe_jfree(struct mbuf *, void *, size_t, void *); |
263 | static int mvgbe_encap(struct mvgbe_softc *, struct mbuf *, uint32_t *); | | 263 | static int mvgbe_encap(struct mvgbe_softc *, struct mbuf *, uint32_t *); |
264 | static void mvgbe_rxeof(struct mvgbe_softc *); | | 264 | static void mvgbe_rxeof(struct mvgbe_softc *); |
265 | static void mvgbe_txeof(struct mvgbe_softc *); | | 265 | static void mvgbe_txeof(struct mvgbe_softc *); |
266 | static uint8_t mvgbe_crc8(const uint8_t *, size_t); | | 266 | static uint8_t mvgbe_crc8(const uint8_t *, size_t); |
267 | static void mvgbe_filter_setup(struct mvgbe_softc *); | | 267 | static void mvgbe_filter_setup(struct mvgbe_softc *); |
268 | #ifdef MVGBE_DEBUG | | 268 | #ifdef MVGBE_DEBUG |
269 | static void mvgbe_dump_txdesc(struct mvgbe_tx_desc *, int); | | 269 | static void mvgbe_dump_txdesc(struct mvgbe_tx_desc *, int); |
270 | #endif | | 270 | #endif |
271 | | | 271 | |
272 | CFATTACH_DECL_NEW(mvgbec_gt, sizeof(struct mvgbec_softc), | | 272 | CFATTACH_DECL_NEW(mvgbec_gt, sizeof(struct mvgbec_softc), |
273 | mvgbec_match, mvgbec_attach, NULL, NULL); | | 273 | mvgbec_match, mvgbec_attach, NULL, NULL); |
274 | CFATTACH_DECL_NEW(mvgbec_mbus, sizeof(struct mvgbec_softc), | | 274 | CFATTACH_DECL_NEW(mvgbec_mbus, sizeof(struct mvgbec_softc), |
275 | mvgbec_match, mvgbec_attach, NULL, NULL); | | 275 | mvgbec_match, mvgbec_attach, NULL, NULL); |
276 | | | 276 | |
277 | CFATTACH_DECL_NEW(mvgbe, sizeof(struct mvgbe_softc), | | 277 | CFATTACH_DECL_NEW(mvgbe, sizeof(struct mvgbe_softc), |
278 | mvgbe_match, mvgbe_attach, NULL, NULL); | | 278 | mvgbe_match, mvgbe_attach, NULL, NULL); |
279 | | | 279 | |
280 | device_t mvgbec0 = NULL; | | 280 | device_t mvgbec0 = NULL; |
281 | | | 281 | |
282 | struct mvgbe_port { | | 282 | struct mvgbe_port { |
283 | int model; | | 283 | int model; |
284 | int unit; | | 284 | int unit; |
285 | int ports; | | 285 | int ports; |
286 | int irqs[3]; | | 286 | int irqs[3]; |
287 | int flags; | | 287 | int flags; |
288 | #define FLAGS_FIX_TQTB (1 << 0) | | 288 | #define FLAGS_FIX_TQTB (1 << 0) |
289 | } mvgbe_ports[] = { | | 289 | } mvgbe_ports[] = { |
290 | { MARVELL_DISCOVERY_II, 0, 3, { 32, 33, 34 }, 0 }, | | 290 | { MARVELL_DISCOVERY_II, 0, 3, { 32, 33, 34 }, 0 }, |
291 | { MARVELL_DISCOVERY_III, 0, 3, { 32, 33, 34 }, 0 }, | | 291 | { MARVELL_DISCOVERY_III, 0, 3, { 32, 33, 34 }, 0 }, |
292 | #if 0 | | 292 | #if 0 |
293 | { MARVELL_DISCOVERY_LT, 0, ?, { }, 0 }, | | 293 | { MARVELL_DISCOVERY_LT, 0, ?, { }, 0 }, |
294 | { MARVELL_DISCOVERY_V, 0, ?, { }, 0 }, | | 294 | { MARVELL_DISCOVERY_V, 0, ?, { }, 0 }, |
295 | { MARVELL_DISCOVERY_VI, 0, ?, { }, 0 }, | | 295 | { MARVELL_DISCOVERY_VI, 0, ?, { }, 0 }, |
296 | #endif | | 296 | #endif |
297 | { MARVELL_ORION_1_88F5082, 0, 1, { 21 }, 0 }, | | 297 | { MARVELL_ORION_1_88F5082, 0, 1, { 21 }, 0 }, |
298 | { MARVELL_ORION_1_88F5180N, 0, 1, { 21 }, 0 }, | | 298 | { MARVELL_ORION_1_88F5180N, 0, 1, { 21 }, 0 }, |
299 | { MARVELL_ORION_1_88F5181, 0, 1, { 21 }, 0 }, | | 299 | { MARVELL_ORION_1_88F5181, 0, 1, { 21 }, 0 }, |
300 | { MARVELL_ORION_1_88F5182, 0, 1, { 21 }, 0 }, | | 300 | { MARVELL_ORION_1_88F5182, 0, 1, { 21 }, 0 }, |
301 | { MARVELL_ORION_2_88F5281, 0, 1, { 21 }, 0 }, | | 301 | { MARVELL_ORION_2_88F5281, 0, 1, { 21 }, 0 }, |
302 | { MARVELL_ORION_1_88F6082, 0, 1, { 21 }, 0 }, | | 302 | { MARVELL_ORION_1_88F6082, 0, 1, { 21 }, 0 }, |
303 | { MARVELL_ORION_1_88W8660, 0, 1, { 21 }, 0 }, | | 303 | { MARVELL_ORION_1_88W8660, 0, 1, { 21 }, 0 }, |
304 | | | 304 | |
305 | { MARVELL_KIRKWOOD_88F6180, 0, 1, { 11 }, FLAGS_FIX_TQTB }, | | 305 | { MARVELL_KIRKWOOD_88F6180, 0, 1, { 11 }, FLAGS_FIX_TQTB }, |
306 | { MARVELL_KIRKWOOD_88F6192, 0, 1, { 11 }, FLAGS_FIX_TQTB }, | | 306 | { MARVELL_KIRKWOOD_88F6192, 0, 1, { 11 }, FLAGS_FIX_TQTB }, |
307 | { MARVELL_KIRKWOOD_88F6192, 1, 1, { 14 }, FLAGS_FIX_TQTB }, | | 307 | { MARVELL_KIRKWOOD_88F6192, 1, 1, { 14 }, FLAGS_FIX_TQTB }, |
308 | { MARVELL_KIRKWOOD_88F6281, 0, 1, { 11 }, FLAGS_FIX_TQTB }, | | 308 | { MARVELL_KIRKWOOD_88F6281, 0, 1, { 11 }, FLAGS_FIX_TQTB }, |
309 | { MARVELL_KIRKWOOD_88F6281, 1, 1, { 15 }, FLAGS_FIX_TQTB }, | | 309 | { MARVELL_KIRKWOOD_88F6281, 1, 1, { 15 }, FLAGS_FIX_TQTB }, |
310 | | | 310 | |
311 | { MARVELL_MV78XX0_MV78100, 0, 1, { 40 }, FLAGS_FIX_TQTB }, | | 311 | { MARVELL_MV78XX0_MV78100, 0, 1, { 40 }, FLAGS_FIX_TQTB }, |
312 | { MARVELL_MV78XX0_MV78100, 1, 1, { 44 }, FLAGS_FIX_TQTB }, | | 312 | { MARVELL_MV78XX0_MV78100, 1, 1, { 44 }, FLAGS_FIX_TQTB }, |
313 | { MARVELL_MV78XX0_MV78200, 0, 1, { 40 }, FLAGS_FIX_TQTB }, | | 313 | { MARVELL_MV78XX0_MV78200, 0, 1, { 40 }, FLAGS_FIX_TQTB }, |
314 | { MARVELL_MV78XX0_MV78200, 1, 1, { 44 }, FLAGS_FIX_TQTB }, | | 314 | { MARVELL_MV78XX0_MV78200, 1, 1, { 44 }, FLAGS_FIX_TQTB }, |
315 | { MARVELL_MV78XX0_MV78200, 2, 1, { 48 }, FLAGS_FIX_TQTB }, | | 315 | { MARVELL_MV78XX0_MV78200, 2, 1, { 48 }, FLAGS_FIX_TQTB }, |
316 | { MARVELL_MV78XX0_MV78200, 3, 1, { 52 }, FLAGS_FIX_TQTB }, | | 316 | { MARVELL_MV78XX0_MV78200, 3, 1, { 52 }, FLAGS_FIX_TQTB }, |
317 | }; | | 317 | }; |
318 | | | 318 | |
319 | | | 319 | |
320 | /* ARGSUSED */ | | 320 | /* ARGSUSED */ |
321 | static int | | 321 | static int |
322 | mvgbec_match(device_t parent, cfdata_t match, void *aux) | | 322 | mvgbec_match(device_t parent, cfdata_t match, void *aux) |
323 | { | | 323 | { |
324 | struct marvell_attach_args *mva = aux; | | 324 | struct marvell_attach_args *mva = aux; |
325 | int i; | | 325 | int i; |
326 | | | 326 | |
327 | if (strcmp(mva->mva_name, match->cf_name) != 0) | | 327 | if (strcmp(mva->mva_name, match->cf_name) != 0) |
328 | return 0; | | 328 | return 0; |
329 | if (mva->mva_offset == MVA_OFFSET_DEFAULT) | | 329 | if (mva->mva_offset == MVA_OFFSET_DEFAULT) |
330 | return 0; | | 330 | return 0; |
331 | | | 331 | |
332 | for (i = 0; i < __arraycount(mvgbe_ports); i++) | | 332 | for (i = 0; i < __arraycount(mvgbe_ports); i++) |
333 | if (mva->mva_model == mvgbe_ports[i].model) { | | 333 | if (mva->mva_model == mvgbe_ports[i].model) { |
334 | mva->mva_size = MVGBE_SIZE; | | 334 | mva->mva_size = MVGBE_SIZE; |
335 | return 1; | | 335 | return 1; |
336 | } | | 336 | } |
337 | return 0; | | 337 | return 0; |
338 | } | | 338 | } |
339 | | | 339 | |
340 | /* ARGSUSED */ | | 340 | /* ARGSUSED */ |
341 | static void | | 341 | static void |
342 | mvgbec_attach(device_t parent, device_t self, void *aux) | | 342 | mvgbec_attach(device_t parent, device_t self, void *aux) |
343 | { | | 343 | { |
344 | struct mvgbec_softc *sc = device_private(self); | | 344 | struct mvgbec_softc *sc = device_private(self); |
345 | struct marvell_attach_args *mva = aux, gbea; | | 345 | struct marvell_attach_args *mva = aux, gbea; |
346 | struct mvgbe_softc *port; | | 346 | struct mvgbe_softc *port; |
347 | struct mii_softc *mii; | | 347 | struct mii_softc *mii; |
348 | device_t child; | | 348 | device_t child; |
349 | uint32_t phyaddr; | | 349 | uint32_t phyaddr; |
350 | int i, j; | | 350 | int i, j; |
351 | | | 351 | |
352 | aprint_naive("\n"); | | 352 | aprint_naive("\n"); |
353 | aprint_normal(": Marvell Gigabit Ethernet Controller\n"); | | 353 | aprint_normal(": Marvell Gigabit Ethernet Controller\n"); |
354 | | | 354 | |
355 | sc->sc_dev = self; | | 355 | sc->sc_dev = self; |
356 | sc->sc_iot = mva->mva_iot; | | 356 | sc->sc_iot = mva->mva_iot; |
357 | if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset, | | 357 | if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset, |
358 | mva->mva_size, &sc->sc_ioh)) { | | 358 | mva->mva_size, &sc->sc_ioh)) { |
359 | aprint_error_dev(self, "Cannot map registers\n"); | | 359 | aprint_error_dev(self, "Cannot map registers\n"); |
360 | return; | | 360 | return; |
361 | } | | 361 | } |
362 | | | 362 | |
363 | if (mvgbec0 == NULL) | | 363 | if (mvgbec0 == NULL) |
364 | mvgbec0 = self; | | 364 | mvgbec0 = self; |
365 | | | 365 | |
366 | phyaddr = 0; | | 366 | phyaddr = 0; |
367 | MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr); | | 367 | MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr); |
368 | | | 368 | |
369 | mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); | | 369 | mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); |
370 | | | 370 | |
371 | /* Disable and clear Gigabit Ethernet Unit interrupts */ | | 371 | /* Disable and clear Gigabit Ethernet Unit interrupts */ |
372 | MVGBE_WRITE(sc, MVGBE_EUIM, 0); | | 372 | MVGBE_WRITE(sc, MVGBE_EUIM, 0); |
373 | MVGBE_WRITE(sc, MVGBE_EUIC, 0); | | 373 | MVGBE_WRITE(sc, MVGBE_EUIC, 0); |
374 | | | 374 | |
375 | mvgbec_wininit(sc); | | 375 | mvgbec_wininit(sc); |
376 | | | 376 | |
377 | memset(&gbea, 0, sizeof(gbea)); | | 377 | memset(&gbea, 0, sizeof(gbea)); |
378 | for (i = 0; i < __arraycount(mvgbe_ports); i++) { | | 378 | for (i = 0; i < __arraycount(mvgbe_ports); i++) { |
379 | if (mvgbe_ports[i].model != mva->mva_model || | | 379 | if (mvgbe_ports[i].model != mva->mva_model || |
380 | mvgbe_ports[i].unit != mva->mva_unit) | | 380 | mvgbe_ports[i].unit != mva->mva_unit) |
381 | continue; | | 381 | continue; |
382 | | | 382 | |
383 | sc->sc_fix_tqtb = mvgbe_ports[i].flags & FLAGS_FIX_TQTB; | | 383 | sc->sc_fix_tqtb = mvgbe_ports[i].flags & FLAGS_FIX_TQTB; |
384 | | | 384 | |
385 | for (j = 0; j < mvgbe_ports[i].ports; j++) { | | 385 | for (j = 0; j < mvgbe_ports[i].ports; j++) { |
386 | gbea.mva_name = "mvgbe"; | | 386 | gbea.mva_name = "mvgbe"; |
387 | gbea.mva_model = mva->mva_model; | | 387 | gbea.mva_model = mva->mva_model; |
388 | gbea.mva_iot = sc->sc_iot; | | 388 | gbea.mva_iot = sc->sc_iot; |
389 | gbea.mva_ioh = sc->sc_ioh; | | 389 | gbea.mva_ioh = sc->sc_ioh; |
390 | gbea.mva_unit = j; | | 390 | gbea.mva_unit = j; |
391 | gbea.mva_dmat = mva->mva_dmat; | | 391 | gbea.mva_dmat = mva->mva_dmat; |
392 | gbea.mva_irq = mvgbe_ports[i].irqs[j]; | | 392 | gbea.mva_irq = mvgbe_ports[i].irqs[j]; |
393 | child = config_found_sm_loc(sc->sc_dev, "mvgbec", NULL, | | 393 | child = config_found_sm_loc(sc->sc_dev, "mvgbec", NULL, |
394 | &gbea, mvgbec_print, mvgbec_search); | | 394 | &gbea, mvgbec_print, mvgbec_search); |
395 | if (child) { | | 395 | if (child) { |
396 | port = device_private(child); | | 396 | port = device_private(child); |
397 | mii = LIST_FIRST(&port->sc_mii.mii_phys); | | 397 | mii = LIST_FIRST(&port->sc_mii.mii_phys); |
398 | phyaddr |= MVGBE_PHYADDR_PHYAD(j, mii->mii_phy); | | 398 | phyaddr |= MVGBE_PHYADDR_PHYAD(j, mii->mii_phy); |
399 | } | | 399 | } |
400 | } | | 400 | } |
401 | break; | | 401 | break; |
402 | } | | 402 | } |
403 | MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr); | | 403 | MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr); |
404 | } | | 404 | } |
405 | | | 405 | |
406 | static int | | 406 | static int |
407 | mvgbec_print(void *aux, const char *pnp) | | 407 | mvgbec_print(void *aux, const char *pnp) |
408 | { | | 408 | { |
409 | struct marvell_attach_args *gbea = aux; | | 409 | struct marvell_attach_args *gbea = aux; |
410 | | | 410 | |
411 | if (pnp) | | 411 | if (pnp) |
412 | aprint_normal("%s at %s port %d", | | 412 | aprint_normal("%s at %s port %d", |
413 | gbea->mva_name, pnp, gbea->mva_unit); | | 413 | gbea->mva_name, pnp, gbea->mva_unit); |
414 | else { | | 414 | else { |
415 | if (gbea->mva_unit != MVGBECCF_PORT_DEFAULT) | | 415 | if (gbea->mva_unit != MVGBECCF_PORT_DEFAULT) |
416 | aprint_normal(" port %d", gbea->mva_unit); | | 416 | aprint_normal(" port %d", gbea->mva_unit); |
417 | if (gbea->mva_irq != MVGBECCF_IRQ_DEFAULT) | | 417 | if (gbea->mva_irq != MVGBECCF_IRQ_DEFAULT) |
418 | aprint_normal(" irq %d", gbea->mva_irq); | | 418 | aprint_normal(" irq %d", gbea->mva_irq); |
419 | } | | 419 | } |
420 | return UNCONF; | | 420 | return UNCONF; |
421 | } | | 421 | } |
422 | | | 422 | |
423 | /* ARGSUSED */ | | 423 | /* ARGSUSED */ |
424 | static int | | 424 | static int |
425 | mvgbec_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux) | | 425 | mvgbec_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux) |
426 | { | | 426 | { |
427 | struct marvell_attach_args *gbea = aux; | | 427 | struct marvell_attach_args *gbea = aux; |
428 | | | 428 | |
429 | if (cf->cf_loc[MVGBECCF_PORT] == gbea->mva_unit && | | 429 | if (cf->cf_loc[MVGBECCF_PORT] == gbea->mva_unit && |
430 | cf->cf_loc[MVGBECCF_IRQ] != MVGBECCF_IRQ_DEFAULT) | | 430 | cf->cf_loc[MVGBECCF_IRQ] != MVGBECCF_IRQ_DEFAULT) |
431 | gbea->mva_irq = cf->cf_loc[MVGBECCF_IRQ]; | | 431 | gbea->mva_irq = cf->cf_loc[MVGBECCF_IRQ]; |
432 | | | 432 | |
433 | return config_match(parent, cf, aux); | | 433 | return config_match(parent, cf, aux); |
434 | } | | 434 | } |
435 | | | 435 | |
436 | static int | | 436 | static int |
437 | mvgbec_miibus_readreg(device_t dev, int phy, int reg) | | 437 | mvgbec_miibus_readreg(device_t dev, int phy, int reg) |
438 | { | | 438 | { |
439 | struct mvgbe_softc *sc = device_private(dev); | | 439 | struct mvgbe_softc *sc = device_private(dev); |
440 | struct mvgbec_softc *csc; | | 440 | struct mvgbec_softc *csc; |
441 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 441 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
442 | uint32_t smi, val; | | 442 | uint32_t smi, val; |
443 | int i; | | 443 | int i; |
444 | | | 444 | |
445 | if (mvgbec0 == NULL) { | | 445 | if (mvgbec0 == NULL) { |
446 | aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n"); | | 446 | aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n"); |
447 | return -1; | | 447 | return -1; |
448 | } | | 448 | } |
449 | csc = device_private(mvgbec0); | | 449 | csc = device_private(mvgbec0); |
450 | | | 450 | |
451 | mutex_enter(&csc->sc_mtx); | | 451 | mutex_enter(&csc->sc_mtx); |
452 | | | 452 | |
453 | for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { | | 453 | for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { |
454 | DELAY(1); | | 454 | DELAY(1); |
455 | if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) | | 455 | if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) |
456 | break; | | 456 | break; |
457 | } | | 457 | } |
458 | if (i == MVGBE_PHY_TIMEOUT) { | | 458 | if (i == MVGBE_PHY_TIMEOUT) { |
459 | aprint_error_ifnet(ifp, "SMI busy timeout\n"); | | 459 | aprint_error_ifnet(ifp, "SMI busy timeout\n"); |
460 | mutex_exit(&csc->sc_mtx); | | 460 | mutex_exit(&csc->sc_mtx); |
461 | return -1; | | 461 | return -1; |
462 | } | | 462 | } |
463 | | | 463 | |
464 | smi = | | 464 | smi = |
465 | MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | MVGBE_SMI_OPCODE_READ; | | 465 | MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | MVGBE_SMI_OPCODE_READ; |
466 | MVGBE_WRITE(csc, MVGBE_SMI, smi); | | 466 | MVGBE_WRITE(csc, MVGBE_SMI, smi); |
467 | | | 467 | |
468 | for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { | | 468 | for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { |
469 | DELAY(1); | | 469 | DELAY(1); |
470 | smi = MVGBE_READ(csc, MVGBE_SMI); | | 470 | smi = MVGBE_READ(csc, MVGBE_SMI); |
471 | if (smi & MVGBE_SMI_READVALID) | | 471 | if (smi & MVGBE_SMI_READVALID) |
472 | break; | | 472 | break; |
473 | } | | 473 | } |
474 | | | 474 | |
475 | mutex_exit(&csc->sc_mtx); | | 475 | mutex_exit(&csc->sc_mtx); |
476 | | | 476 | |
477 | DPRINTFN(9, ("mvgbec_miibus_readreg: i=%d, timeout=%d\n", | | 477 | DPRINTFN(9, ("mvgbec_miibus_readreg: i=%d, timeout=%d\n", |
478 | i, MVGBE_PHY_TIMEOUT)); | | 478 | i, MVGBE_PHY_TIMEOUT)); |
479 | | | 479 | |
480 | val = smi & MVGBE_SMI_DATA_MASK; | | 480 | val = smi & MVGBE_SMI_DATA_MASK; |
481 | | | 481 | |
482 | DPRINTFN(9, ("mvgbec_miibus_readreg phy=%d, reg=%#x, val=%#x\n", | | 482 | DPRINTFN(9, ("mvgbec_miibus_readreg phy=%d, reg=%#x, val=%#x\n", |
483 | phy, reg, val)); | | 483 | phy, reg, val)); |
484 | | | 484 | |
485 | return val; | | 485 | return val; |
486 | } | | 486 | } |
487 | | | 487 | |
488 | static void | | 488 | static void |
489 | mvgbec_miibus_writereg(device_t dev, int phy, int reg, int val) | | 489 | mvgbec_miibus_writereg(device_t dev, int phy, int reg, int val) |
490 | { | | 490 | { |
491 | struct mvgbe_softc *sc = device_private(dev); | | 491 | struct mvgbe_softc *sc = device_private(dev); |
492 | struct mvgbec_softc *csc; | | 492 | struct mvgbec_softc *csc; |
493 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 493 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
494 | uint32_t smi; | | 494 | uint32_t smi; |
495 | int i; | | 495 | int i; |
496 | | | 496 | |
497 | if (mvgbec0 == NULL) { | | 497 | if (mvgbec0 == NULL) { |
498 | aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n"); | | 498 | aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n"); |
499 | return; | | 499 | return; |
500 | } | | 500 | } |
501 | csc = device_private(mvgbec0); | | 501 | csc = device_private(mvgbec0); |
502 | | | 502 | |
503 | DPRINTFN(9, ("mvgbec_miibus_writereg phy=%d reg=%#x val=%#x\n", | | 503 | DPRINTFN(9, ("mvgbec_miibus_writereg phy=%d reg=%#x val=%#x\n", |
504 | phy, reg, val)); | | 504 | phy, reg, val)); |
505 | | | 505 | |
506 | mutex_enter(&csc->sc_mtx); | | 506 | mutex_enter(&csc->sc_mtx); |
507 | | | 507 | |
508 | for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { | | 508 | for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { |
509 | DELAY(1); | | 509 | DELAY(1); |
510 | if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) | | 510 | if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) |
511 | break; | | 511 | break; |
512 | } | | 512 | } |
513 | if (i == MVGBE_PHY_TIMEOUT) { | | 513 | if (i == MVGBE_PHY_TIMEOUT) { |
514 | aprint_error_ifnet(ifp, "SMI busy timeout\n"); | | 514 | aprint_error_ifnet(ifp, "SMI busy timeout\n"); |
515 | mutex_exit(&csc->sc_mtx); | | 515 | mutex_exit(&csc->sc_mtx); |
516 | return; | | 516 | return; |
517 | } | | 517 | } |
518 | | | 518 | |
519 | smi = MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | | | 519 | smi = MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | |
520 | MVGBE_SMI_OPCODE_WRITE | (val & MVGBE_SMI_DATA_MASK); | | 520 | MVGBE_SMI_OPCODE_WRITE | (val & MVGBE_SMI_DATA_MASK); |
521 | MVGBE_WRITE(csc, MVGBE_SMI, smi); | | 521 | MVGBE_WRITE(csc, MVGBE_SMI, smi); |
522 | | | 522 | |
523 | for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { | | 523 | for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { |
524 | DELAY(1); | | 524 | DELAY(1); |
525 | if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) | | 525 | if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) |
526 | break; | | 526 | break; |
527 | } | | 527 | } |
528 | | | 528 | |
529 | mutex_exit(&csc->sc_mtx); | | 529 | mutex_exit(&csc->sc_mtx); |
530 | | | 530 | |
531 | if (i == MVGBE_PHY_TIMEOUT) | | 531 | if (i == MVGBE_PHY_TIMEOUT) |
532 | aprint_error_ifnet(ifp, "phy write timed out\n"); | | 532 | aprint_error_ifnet(ifp, "phy write timed out\n"); |
533 | } | | 533 | } |
534 | | | 534 | |
535 | static void | | 535 | static void |
536 | mvgbec_miibus_statchg(device_t dev) | | 536 | mvgbec_miibus_statchg(device_t dev) |
537 | { | | 537 | { |
538 | | | 538 | |
539 | /* nothing to do */ | | 539 | /* nothing to do */ |
540 | } | | 540 | } |
541 | | | 541 | |
542 | | | 542 | |
543 | static void | | 543 | static void |
544 | mvgbec_wininit(struct mvgbec_softc *sc) | | 544 | mvgbec_wininit(struct mvgbec_softc *sc) |
545 | { | | 545 | { |
546 | device_t pdev = device_parent(sc->sc_dev); | | 546 | device_t pdev = device_parent(sc->sc_dev); |
547 | uint64_t base; | | 547 | uint64_t base; |
548 | uint32_t en, ac, size; | | 548 | uint32_t en, ac, size; |
549 | int window, target, attr, rv, i; | | 549 | int window, target, attr, rv, i; |
550 | static int tags[] = { | | 550 | static int tags[] = { |
551 | MARVELL_TAG_SDRAM_CS0, | | 551 | MARVELL_TAG_SDRAM_CS0, |
552 | MARVELL_TAG_SDRAM_CS1, | | 552 | MARVELL_TAG_SDRAM_CS1, |
553 | MARVELL_TAG_SDRAM_CS2, | | 553 | MARVELL_TAG_SDRAM_CS2, |
554 | MARVELL_TAG_SDRAM_CS3, | | 554 | MARVELL_TAG_SDRAM_CS3, |
555 | | | 555 | |
556 | MARVELL_TAG_UNDEFINED, | | 556 | MARVELL_TAG_UNDEFINED, |
557 | }; | | 557 | }; |
558 | | | 558 | |
559 | /* First disable all address decode windows */ | | 559 | /* First disable all address decode windows */ |
560 | en = MVGBE_BARE_EN_MASK; | | 560 | en = MVGBE_BARE_EN_MASK; |
561 | MVGBE_WRITE(sc, MVGBE_BARE, en); | | 561 | MVGBE_WRITE(sc, MVGBE_BARE, en); |
562 | | | 562 | |
563 | ac = 0; | | 563 | ac = 0; |
564 | for (window = 0, i = 0; | | 564 | for (window = 0, i = 0; |
565 | tags[i] != MARVELL_TAG_UNDEFINED && window < MVGBE_NWINDOW; i++) { | | 565 | tags[i] != MARVELL_TAG_UNDEFINED && window < MVGBE_NWINDOW; i++) { |
566 | rv = marvell_winparams_by_tag(pdev, tags[i], | | 566 | rv = marvell_winparams_by_tag(pdev, tags[i], |
567 | &target, &attr, &base, &size); | | 567 | &target, &attr, &base, &size); |
568 | if (rv != 0 || size == 0) | | 568 | if (rv != 0 || size == 0) |
569 | continue; | | 569 | continue; |
570 | | | 570 | |
571 | if (base > 0xffffffffULL) { | | 571 | if (base > 0xffffffffULL) { |
572 | if (window >= MVGBE_NREMAP) { | | 572 | if (window >= MVGBE_NREMAP) { |
573 | aprint_error_dev(sc->sc_dev, | | 573 | aprint_error_dev(sc->sc_dev, |
574 | "can't remap window %d\n", window); | | 574 | "can't remap window %d\n", window); |
575 | continue; | | 575 | continue; |
576 | } | | 576 | } |
577 | MVGBE_WRITE(sc, MVGBE_HA(window), | | 577 | MVGBE_WRITE(sc, MVGBE_HA(window), |
578 | (base >> 32) & 0xffffffff); | | 578 | (base >> 32) & 0xffffffff); |
579 | } | | 579 | } |
580 | | | 580 | |
581 | MVGBE_WRITE(sc, MVGBE_BASEADDR(window), | | 581 | MVGBE_WRITE(sc, MVGBE_BASEADDR(window), |
582 | MVGBE_BASEADDR_TARGET(target) | | | 582 | MVGBE_BASEADDR_TARGET(target) | |
583 | MVGBE_BASEADDR_ATTR(attr) | | | 583 | MVGBE_BASEADDR_ATTR(attr) | |
584 | MVGBE_BASEADDR_BASE(base)); | | 584 | MVGBE_BASEADDR_BASE(base)); |
585 | MVGBE_WRITE(sc, MVGBE_S(window), MVGBE_S_SIZE(size)); | | 585 | MVGBE_WRITE(sc, MVGBE_S(window), MVGBE_S_SIZE(size)); |
586 | | | 586 | |
587 | en &= ~(1 << window); | | 587 | en &= ~(1 << window); |
588 | /* set full access (r/w) */ | | 588 | /* set full access (r/w) */ |
589 | ac |= MVGBE_EPAP_EPAR(window, MVGBE_EPAP_AC_FA); | | 589 | ac |= MVGBE_EPAP_EPAR(window, MVGBE_EPAP_AC_FA); |
590 | window++; | | 590 | window++; |
591 | } | | 591 | } |
592 | /* allow to access decode window */ | | 592 | /* allow to access decode window */ |
593 | MVGBE_WRITE(sc, MVGBE_EPAP, ac); | | 593 | MVGBE_WRITE(sc, MVGBE_EPAP, ac); |
594 | | | 594 | |
595 | MVGBE_WRITE(sc, MVGBE_BARE, en); | | 595 | MVGBE_WRITE(sc, MVGBE_BARE, en); |
596 | } | | 596 | } |
597 | | | 597 | |
598 | | | 598 | |
599 | /* ARGSUSED */ | | 599 | /* ARGSUSED */ |
600 | static int | | 600 | static int |
601 | mvgbe_match(device_t parent, cfdata_t match, void *aux) | | 601 | mvgbe_match(device_t parent, cfdata_t match, void *aux) |
602 | { | | 602 | { |
603 | struct marvell_attach_args *mva = aux; | | 603 | struct marvell_attach_args *mva = aux; |
604 | uint32_t pbase, maddrh, maddrl; | | 604 | uint32_t pbase, maddrh, maddrl; |
605 | | | 605 | |
606 | pbase = MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE; | | 606 | pbase = MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE; |
607 | maddrh = | | 607 | maddrh = |
608 | bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAH); | | 608 | bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAH); |
609 | maddrl = | | 609 | maddrl = |
610 | bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAL); | | 610 | bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAL); |
611 | if ((maddrh | maddrl) == 0) | | 611 | if ((maddrh | maddrl) == 0) |
612 | return 0; | | 612 | return 0; |
613 | | | 613 | |
614 | return 1; | | 614 | return 1; |
615 | } | | 615 | } |
616 | | | 616 | |
617 | /* ARGSUSED */ | | 617 | /* ARGSUSED */ |
618 | static void | | 618 | static void |
619 | mvgbe_attach(device_t parent, device_t self, void *aux) | | 619 | mvgbe_attach(device_t parent, device_t self, void *aux) |
620 | { | | 620 | { |
621 | struct mvgbe_softc *sc = device_private(self); | | 621 | struct mvgbe_softc *sc = device_private(self); |
622 | struct marvell_attach_args *mva = aux; | | 622 | struct marvell_attach_args *mva = aux; |
623 | struct mvgbe_txmap_entry *entry; | | 623 | struct mvgbe_txmap_entry *entry; |
624 | struct ifnet *ifp; | | 624 | struct ifnet *ifp; |
625 | bus_dma_segment_t seg; | | 625 | bus_dma_segment_t seg; |
626 | bus_dmamap_t dmamap; | | 626 | bus_dmamap_t dmamap; |
627 | int rseg, i; | | 627 | int rseg, i; |
628 | uint32_t maddrh, maddrl; | | 628 | uint32_t maddrh, maddrl; |
629 | void *kva; | | 629 | void *kva; |
630 | | | 630 | |
631 | aprint_naive("\n"); | | 631 | aprint_naive("\n"); |
632 | aprint_normal("\n"); | | 632 | aprint_normal("\n"); |
633 | | | 633 | |
634 | sc->sc_dev = self; | | 634 | sc->sc_dev = self; |
635 | sc->sc_port = mva->mva_unit; | | 635 | sc->sc_port = mva->mva_unit; |
636 | sc->sc_iot = mva->mva_iot; | | 636 | sc->sc_iot = mva->mva_iot; |
637 | if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, | | 637 | if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, |
638 | MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE, | | 638 | MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE, |
639 | MVGBE_PORTR_SIZE, &sc->sc_ioh)) { | | 639 | MVGBE_PORTR_SIZE, &sc->sc_ioh)) { |
640 | aprint_error_dev(self, "Cannot map registers\n"); | | 640 | aprint_error_dev(self, "Cannot map registers\n"); |
641 | return; | | 641 | return; |
642 | } | | 642 | } |
643 | if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, | | 643 | if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, |
644 | MVGBE_PORTDAFR_BASE + mva->mva_unit * MVGBE_PORTDAFR_SIZE, | | 644 | MVGBE_PORTDAFR_BASE + mva->mva_unit * MVGBE_PORTDAFR_SIZE, |
645 | MVGBE_PORTDAFR_SIZE, &sc->sc_dafh)) { | | 645 | MVGBE_PORTDAFR_SIZE, &sc->sc_dafh)) { |
646 | aprint_error_dev(self, | | 646 | aprint_error_dev(self, |
647 | "Cannot map destination address filter registers\n"); | | 647 | "Cannot map destination address filter registers\n"); |
648 | return; | | 648 | return; |
649 | } | | 649 | } |
650 | sc->sc_dmat = mva->mva_dmat; | | 650 | sc->sc_dmat = mva->mva_dmat; |
651 | | | 651 | |
652 | maddrh = MVGBE_READ(sc, MVGBE_MACAH); | | 652 | maddrh = MVGBE_READ(sc, MVGBE_MACAH); |
653 | maddrl = MVGBE_READ(sc, MVGBE_MACAL); | | 653 | maddrl = MVGBE_READ(sc, MVGBE_MACAL); |
654 | sc->sc_enaddr[0] = maddrh >> 24; | | 654 | sc->sc_enaddr[0] = maddrh >> 24; |
655 | sc->sc_enaddr[1] = maddrh >> 16; | | 655 | sc->sc_enaddr[1] = maddrh >> 16; |
656 | sc->sc_enaddr[2] = maddrh >> 8; | | 656 | sc->sc_enaddr[2] = maddrh >> 8; |
657 | sc->sc_enaddr[3] = maddrh >> 0; | | 657 | sc->sc_enaddr[3] = maddrh >> 0; |
658 | sc->sc_enaddr[4] = maddrl >> 8; | | 658 | sc->sc_enaddr[4] = maddrl >> 8; |
659 | sc->sc_enaddr[5] = maddrl >> 0; | | 659 | sc->sc_enaddr[5] = maddrl >> 0; |
660 | aprint_normal_dev(self, "Ethernet address %s\n", | | 660 | aprint_normal_dev(self, "Ethernet address %s\n", |
661 | ether_sprintf(sc->sc_enaddr)); | | 661 | ether_sprintf(sc->sc_enaddr)); |
662 | | | 662 | |
663 | /* clear all ethernet port interrupts */ | | 663 | /* clear all ethernet port interrupts */ |
664 | MVGBE_WRITE(sc, MVGBE_IC, 0); | | 664 | MVGBE_WRITE(sc, MVGBE_IC, 0); |
665 | MVGBE_WRITE(sc, MVGBE_ICE, 0); | | 665 | MVGBE_WRITE(sc, MVGBE_ICE, 0); |
666 | | | 666 | |
667 | marvell_intr_establish(mva->mva_irq, IPL_NET, mvgbe_intr, sc); | | 667 | marvell_intr_establish(mva->mva_irq, IPL_NET, mvgbe_intr, sc); |
668 | | | 668 | |
669 | /* Allocate the descriptor queues. */ | | 669 | /* Allocate the descriptor queues. */ |
670 | if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mvgbe_ring_data), | | 670 | if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mvgbe_ring_data), |
671 | PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { | | 671 | PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { |
672 | aprint_error_dev(self, "can't alloc rx buffers\n"); | | 672 | aprint_error_dev(self, "can't alloc rx buffers\n"); |
673 | return; | | 673 | return; |
674 | } | | 674 | } |
675 | if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, | | 675 | if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, |
676 | sizeof(struct mvgbe_ring_data), &kva, BUS_DMA_NOWAIT)) { | | 676 | sizeof(struct mvgbe_ring_data), &kva, BUS_DMA_NOWAIT)) { |
677 | aprint_error_dev(self, "can't map dma buffers (%lu bytes)\n", | | 677 | aprint_error_dev(self, "can't map dma buffers (%lu bytes)\n", |
678 | (u_long)sizeof(struct mvgbe_ring_data)); | | 678 | (u_long)sizeof(struct mvgbe_ring_data)); |
679 | goto fail1; | | 679 | goto fail1; |
680 | } | | 680 | } |
681 | if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 1, | | 681 | if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 1, |
682 | sizeof(struct mvgbe_ring_data), 0, BUS_DMA_NOWAIT, | | 682 | sizeof(struct mvgbe_ring_data), 0, BUS_DMA_NOWAIT, |
683 | &sc->sc_ring_map)) { | | 683 | &sc->sc_ring_map)) { |
684 | aprint_error_dev(self, "can't create dma map\n"); | | 684 | aprint_error_dev(self, "can't create dma map\n"); |
685 | goto fail2; | | 685 | goto fail2; |
686 | } | | 686 | } |
687 | if (bus_dmamap_load(sc->sc_dmat, sc->sc_ring_map, kva, | | 687 | if (bus_dmamap_load(sc->sc_dmat, sc->sc_ring_map, kva, |
688 | sizeof(struct mvgbe_ring_data), NULL, BUS_DMA_NOWAIT)) { | | 688 | sizeof(struct mvgbe_ring_data), NULL, BUS_DMA_NOWAIT)) { |
689 | aprint_error_dev(self, "can't load dma map\n"); | | 689 | aprint_error_dev(self, "can't load dma map\n"); |
690 | goto fail3; | | 690 | goto fail3; |
691 | } | | 691 | } |
692 | for (i = 0; i < MVGBE_RX_RING_CNT; i++) | | 692 | for (i = 0; i < MVGBE_RX_RING_CNT; i++) |
693 | sc->sc_cdata.mvgbe_rx_chain[i].mvgbe_mbuf = NULL; | | 693 | sc->sc_cdata.mvgbe_rx_chain[i].mvgbe_mbuf = NULL; |
694 | | | 694 | |
695 | SIMPLEQ_INIT(&sc->sc_txmap_head); | | 695 | SIMPLEQ_INIT(&sc->sc_txmap_head); |
696 | for (i = 0; i < MVGBE_TX_RING_CNT; i++) { | | 696 | for (i = 0; i < MVGBE_TX_RING_CNT; i++) { |
697 | sc->sc_cdata.mvgbe_tx_chain[i].mvgbe_mbuf = NULL; | | 697 | sc->sc_cdata.mvgbe_tx_chain[i].mvgbe_mbuf = NULL; |
698 | | | 698 | |
699 | if (bus_dmamap_create(sc->sc_dmat, | | 699 | if (bus_dmamap_create(sc->sc_dmat, |
700 | MVGBE_JLEN, MVGBE_NTXSEG, MVGBE_JLEN, 0, | | 700 | MVGBE_JLEN, MVGBE_NTXSEG, MVGBE_JLEN, 0, |
701 | BUS_DMA_NOWAIT, &dmamap)) { | | 701 | BUS_DMA_NOWAIT, &dmamap)) { |
702 | aprint_error_dev(self, "Can't create TX dmamap\n"); | | 702 | aprint_error_dev(self, "Can't create TX dmamap\n"); |
703 | goto fail4; | | 703 | goto fail4; |
704 | } | | 704 | } |
705 | | | 705 | |
706 | entry = kmem_alloc(sizeof(*entry), KM_SLEEP); | | 706 | entry = kmem_alloc(sizeof(*entry), KM_SLEEP); |
707 | if (!entry) { | | 707 | if (!entry) { |
708 | aprint_error_dev(self, "Can't alloc txmap entry\n"); | | 708 | aprint_error_dev(self, "Can't alloc txmap entry\n"); |
709 | bus_dmamap_destroy(sc->sc_dmat, dmamap); | | 709 | bus_dmamap_destroy(sc->sc_dmat, dmamap); |
710 | goto fail4; | | 710 | goto fail4; |
711 | } | | 711 | } |
712 | entry->dmamap = dmamap; | | 712 | entry->dmamap = dmamap; |
713 | SIMPLEQ_INSERT_HEAD(&sc->sc_txmap_head, entry, link); | | 713 | SIMPLEQ_INSERT_HEAD(&sc->sc_txmap_head, entry, link); |
714 | } | | 714 | } |
715 | | | 715 | |
716 | sc->sc_rdata = (struct mvgbe_ring_data *)kva; | | 716 | sc->sc_rdata = (struct mvgbe_ring_data *)kva; |
717 | memset(sc->sc_rdata, 0, sizeof(struct mvgbe_ring_data)); | | 717 | memset(sc->sc_rdata, 0, sizeof(struct mvgbe_ring_data)); |
718 | | | 718 | |
719 | /* | | 719 | /* |
720 | * We can support 802.1Q VLAN-sized frames and jumbo | | 720 | * We can support 802.1Q VLAN-sized frames and jumbo |
721 | * Ethernet frames. | | 721 | * Ethernet frames. |
722 | */ | | 722 | */ |
723 | sc->sc_ethercom.ec_capabilities |= | | 723 | sc->sc_ethercom.ec_capabilities |= |
724 | ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; | | 724 | ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; |
725 | | | 725 | |
726 | /* Try to allocate memory for jumbo buffers. */ | | 726 | /* Try to allocate memory for jumbo buffers. */ |
727 | if (mvgbe_alloc_jumbo_mem(sc)) { | | 727 | if (mvgbe_alloc_jumbo_mem(sc)) { |
728 | aprint_error_dev(self, "jumbo buffer allocation failed\n"); | | 728 | aprint_error_dev(self, "jumbo buffer allocation failed\n"); |
729 | goto fail4; | | 729 | goto fail4; |
730 | } | | 730 | } |
731 | | | 731 | |
732 | ifp = &sc->sc_ethercom.ec_if; | | 732 | ifp = &sc->sc_ethercom.ec_if; |
733 | ifp->if_softc = sc; | | 733 | ifp->if_softc = sc; |
734 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 734 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
735 | ifp->if_start = mvgbe_start; | | 735 | ifp->if_start = mvgbe_start; |
736 | ifp->if_ioctl = mvgbe_ioctl; | | 736 | ifp->if_ioctl = mvgbe_ioctl; |
737 | ifp->if_init = mvgbe_init; | | 737 | ifp->if_init = mvgbe_init; |
738 | ifp->if_stop = mvgbe_stop; | | 738 | ifp->if_stop = mvgbe_stop; |
739 | ifp->if_watchdog = mvgbe_watchdog; | | 739 | ifp->if_watchdog = mvgbe_watchdog; |
740 | /* | | 740 | /* |
741 | * We can do IPv4/TCPv4/UDPv4 checksums in hardware. | | 741 | * We can do IPv4/TCPv4/UDPv4 checksums in hardware. |
742 | */ | | 742 | */ |
743 | sc->sc_ethercom.ec_if.if_capabilities |= | | 743 | sc->sc_ethercom.ec_if.if_capabilities |= |
744 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | | | 744 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
745 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | | | 745 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
746 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; | | 746 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
747 | /* | | 747 | /* |
748 | * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums. | | 748 | * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums. |
749 | */ | | 749 | */ |
750 | sc->sc_ethercom.ec_if.if_capabilities &= ~IFCAP_CSUM_TCPv4_Tx; | | 750 | sc->sc_ethercom.ec_if.if_capabilities &= ~IFCAP_CSUM_TCPv4_Tx; |
751 | IFQ_SET_MAXLEN(&ifp->if_snd, max(MVGBE_TX_RING_CNT - 1, IFQ_MAXLEN)); | | 751 | IFQ_SET_MAXLEN(&ifp->if_snd, max(MVGBE_TX_RING_CNT - 1, IFQ_MAXLEN)); |
752 | IFQ_SET_READY(&ifp->if_snd); | | 752 | IFQ_SET_READY(&ifp->if_snd); |
753 | strcpy(ifp->if_xname, device_xname(sc->sc_dev)); | | 753 | strcpy(ifp->if_xname, device_xname(sc->sc_dev)); |
754 | | | 754 | |
755 | mvgbe_stop(ifp, 0); | | 755 | mvgbe_stop(ifp, 0); |
756 | | | 756 | |
757 | /* | | 757 | /* |
758 | * Do MII setup. | | 758 | * Do MII setup. |
759 | */ | | 759 | */ |
760 | sc->sc_mii.mii_ifp = ifp; | | 760 | sc->sc_mii.mii_ifp = ifp; |
761 | sc->sc_mii.mii_readreg = mvgbec_miibus_readreg; | | 761 | sc->sc_mii.mii_readreg = mvgbec_miibus_readreg; |
762 | sc->sc_mii.mii_writereg = mvgbec_miibus_writereg; | | 762 | sc->sc_mii.mii_writereg = mvgbec_miibus_writereg; |
763 | sc->sc_mii.mii_statchg = mvgbec_miibus_statchg; | | 763 | sc->sc_mii.mii_statchg = mvgbec_miibus_statchg; |
764 | | | 764 | |
765 | sc->sc_ethercom.ec_mii = &sc->sc_mii; | | 765 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
766 | ifmedia_init(&sc->sc_mii.mii_media, 0, | | 766 | ifmedia_init(&sc->sc_mii.mii_media, 0, |
767 | mvgbe_mediachange, mvgbe_mediastatus); | | 767 | mvgbe_mediachange, mvgbe_mediastatus); |
768 | mii_attach(self, &sc->sc_mii, 0xffffffff, | | 768 | mii_attach(self, &sc->sc_mii, 0xffffffff, |
769 | MII_PHY_ANY, parent == mvgbec0 ? 0 : 1, 0); | | 769 | MII_PHY_ANY, parent == mvgbec0 ? 0 : 1, 0); |
770 | if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { | | 770 | if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { |
771 | aprint_error_dev(self, "no PHY found!\n"); | | 771 | aprint_error_dev(self, "no PHY found!\n"); |
772 | ifmedia_add(&sc->sc_mii.mii_media, | | 772 | ifmedia_add(&sc->sc_mii.mii_media, |
773 | IFM_ETHER|IFM_MANUAL, 0, NULL); | | 773 | IFM_ETHER|IFM_MANUAL, 0, NULL); |
774 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); | | 774 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); |
775 | } else | | 775 | } else |
776 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); | | 776 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
777 | | | 777 | |
778 | /* | | 778 | /* |
779 | * Call MI attach routines. | | 779 | * Call MI attach routines. |
780 | */ | | 780 | */ |
781 | if_attach(ifp); | | 781 | if_attach(ifp); |
782 | | | 782 | |
783 | ether_ifattach(ifp, sc->sc_enaddr); | | 783 | ether_ifattach(ifp, sc->sc_enaddr); |
784 | ether_set_ifflags_cb(&sc->sc_ethercom, mvgbe_ifflags_cb); | | 784 | ether_set_ifflags_cb(&sc->sc_ethercom, mvgbe_ifflags_cb); |
785 | | | 785 | |
786 | #if NRND > 0 | | 786 | #if NRND > 0 |
787 | rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), | | 787 | rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), |
788 | RND_TYPE_NET, 0); | | 788 | RND_TYPE_NET, 0); |
789 | #endif | | 789 | #endif |
790 | | | 790 | |
791 | return; | | 791 | return; |
792 | | | 792 | |
793 | fail4: | | 793 | fail4: |
794 | while ((entry = SIMPLEQ_FIRST(&sc->sc_txmap_head)) != NULL) { | | 794 | while ((entry = SIMPLEQ_FIRST(&sc->sc_txmap_head)) != NULL) { |
795 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); | | 795 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); |
796 | bus_dmamap_destroy(sc->sc_dmat, entry->dmamap); | | 796 | bus_dmamap_destroy(sc->sc_dmat, entry->dmamap); |
797 | } | | 797 | } |
798 | bus_dmamap_unload(sc->sc_dmat, sc->sc_ring_map); | | 798 | bus_dmamap_unload(sc->sc_dmat, sc->sc_ring_map); |
799 | fail3: | | 799 | fail3: |
800 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_ring_map); | | 800 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_ring_map); |
801 | fail2: | | 801 | fail2: |
802 | bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct mvgbe_ring_data)); | | 802 | bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct mvgbe_ring_data)); |
803 | fail1: | | 803 | fail1: |
804 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); | | 804 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
805 | return; | | 805 | return; |
806 | } | | 806 | } |
807 | | | 807 | |
808 | | | 808 | |
809 | static int | | 809 | static int |
810 | mvgbe_intr(void *arg) | | 810 | mvgbe_intr(void *arg) |
811 | { | | 811 | { |
812 | struct mvgbe_softc *sc = arg; | | 812 | struct mvgbe_softc *sc = arg; |
813 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 813 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
814 | uint32_t ic, ice, datum = 0; | | 814 | uint32_t ic, ice, datum = 0; |
815 | int claimed = 0; | | 815 | int claimed = 0; |
816 | | | 816 | |
817 | for (;;) { | | 817 | for (;;) { |
818 | ice = MVGBE_READ(sc, MVGBE_ICE); | | 818 | ice = MVGBE_READ(sc, MVGBE_ICE); |
819 | ic = MVGBE_READ(sc, MVGBE_IC); | | 819 | ic = MVGBE_READ(sc, MVGBE_IC); |
820 | | | 820 | |
821 | DPRINTFN(3, ("mvgbe_intr: ic=%#x, ice=%#x\n", ic, ice)); | | 821 | DPRINTFN(3, ("mvgbe_intr: ic=%#x, ice=%#x\n", ic, ice)); |
822 | if (ic == 0 && ice == 0) | | 822 | if (ic == 0 && ice == 0) |
823 | break; | | 823 | break; |
824 | | | 824 | |
825 | datum = datum ^ ic ^ ice; | | 825 | datum = datum ^ ic ^ ice; |
826 | | | 826 | |
827 | MVGBE_WRITE(sc, MVGBE_IC, ~ic); | | 827 | MVGBE_WRITE(sc, MVGBE_IC, ~ic); |
828 | MVGBE_WRITE(sc, MVGBE_ICE, ~ice); | | 828 | MVGBE_WRITE(sc, MVGBE_ICE, ~ice); |
829 | | | 829 | |
830 | claimed = 1; | | 830 | claimed = 1; |
831 | | | 831 | |
832 | if (ice & MVGBE_ICE_LINKCHG) { | | 832 | if (ice & MVGBE_ICE_LINKCHG) { |
833 | if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { | | 833 | if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { |
834 | /* Enable port RX and TX. */ | | 834 | /* Enable port RX and TX. */ |
835 | MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); | | 835 | MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); |
836 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); | | 836 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); |
837 | } else { | | 837 | } else { |
838 | MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ(0)); | | 838 | MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ(0)); |
839 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); | | 839 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); |
840 | } | | 840 | } |
841 | } | | 841 | } |
842 | | | 842 | |
843 | if (ic & (MVGBE_IC_RXBUF | MVGBE_IC_RXERROR)) | | 843 | if (ic & (MVGBE_IC_RXBUF | MVGBE_IC_RXERROR)) |
844 | mvgbe_rxeof(sc); | | 844 | mvgbe_rxeof(sc); |
845 | | | 845 | |
846 | if (ice & (MVGBE_ICE_TXBUF | MVGBE_ICE_TXERR)) | | 846 | if (ice & (MVGBE_ICE_TXBUF | MVGBE_ICE_TXERR)) |
847 | mvgbe_txeof(sc); | | 847 | mvgbe_txeof(sc); |
848 | } | | 848 | } |
849 | | | 849 | |
850 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) | | 850 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) |
851 | mvgbe_start(ifp); | | 851 | mvgbe_start(ifp); |
852 | | | 852 | |
853 | #if NRND > 0 | | 853 | #if NRND > 0 |
854 | if (RND_ENABLED(&sc->sc_rnd_source)) | | 854 | if (RND_ENABLED(&sc->sc_rnd_source)) |
855 | rnd_add_uint32(&sc->sc_rnd_source, datum); | | 855 | rnd_add_uint32(&sc->sc_rnd_source, datum); |
856 | #endif | | 856 | #endif |
857 | | | 857 | |
858 | return claimed; | | 858 | return claimed; |
859 | } | | 859 | } |
860 | | | 860 | |
861 | static void | | 861 | static void |
862 | mvgbe_start(struct ifnet *ifp) | | 862 | mvgbe_start(struct ifnet *ifp) |
863 | { | | 863 | { |
864 | struct mvgbe_softc *sc = ifp->if_softc; | | 864 | struct mvgbe_softc *sc = ifp->if_softc; |
865 | struct mbuf *m_head = NULL; | | 865 | struct mbuf *m_head = NULL; |
866 | uint32_t idx = sc->sc_cdata.mvgbe_tx_prod; | | 866 | uint32_t idx = sc->sc_cdata.mvgbe_tx_prod; |
867 | int pkts = 0; | | 867 | int pkts = 0; |
868 | | | 868 | |
869 | DPRINTFN(3, ("mvgbe_start (idx %d, tx_chain[idx] %p)\n", idx, | | 869 | DPRINTFN(3, ("mvgbe_start (idx %d, tx_chain[idx] %p)\n", idx, |
870 | sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf)); | | 870 | sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf)); |
871 | | | 871 | |
872 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) | | 872 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) |
873 | return; | | 873 | return; |
874 | /* If Link is DOWN, can't start TX */ | | 874 | /* If Link is DOWN, can't start TX */ |
875 | if (!(MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP)) | | 875 | if (!(MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP)) |
876 | return; | | 876 | return; |
877 | | | 877 | |
878 | while (sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf == NULL) { | | 878 | while (sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf == NULL) { |
879 | IFQ_POLL(&ifp->if_snd, m_head); | | 879 | IFQ_POLL(&ifp->if_snd, m_head); |
880 | if (m_head == NULL) | | 880 | if (m_head == NULL) |
881 | break; | | 881 | break; |
882 | | | 882 | |
883 | /* | | 883 | /* |
884 | * Pack the data into the transmit ring. If we | | 884 | * Pack the data into the transmit ring. If we |
885 | * don't have room, set the OACTIVE flag and wait | | 885 | * don't have room, set the OACTIVE flag and wait |
886 | * for the NIC to drain the ring. | | 886 | * for the NIC to drain the ring. |
887 | */ | | 887 | */ |
888 | if (mvgbe_encap(sc, m_head, &idx)) { | | 888 | if (mvgbe_encap(sc, m_head, &idx)) { |
889 | ifp->if_flags |= IFF_OACTIVE; | | 889 | ifp->if_flags |= IFF_OACTIVE; |
890 | break; | | 890 | break; |
891 | } | | 891 | } |
892 | | | 892 | |
893 | /* now we are committed to transmit the packet */ | | 893 | /* now we are committed to transmit the packet */ |
894 | IFQ_DEQUEUE(&ifp->if_snd, m_head); | | 894 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
895 | pkts++; | | 895 | pkts++; |
896 | | | 896 | |
897 | /* | | 897 | /* |
898 | * If there's a BPF listener, bounce a copy of this frame | | 898 | * If there's a BPF listener, bounce a copy of this frame |
899 | * to him. | | 899 | * to him. |
900 | */ | | 900 | */ |
901 | bpf_mtap(ifp, m_head); | | 901 | bpf_mtap(ifp, m_head); |
902 | } | | 902 | } |
903 | if (pkts == 0) | | 903 | if (pkts == 0) |
904 | return; | | 904 | return; |
905 | | | 905 | |
906 | /* Transmit at Queue 0 */ | | 906 | /* Transmit at Queue 0 */ |
907 | if (idx != sc->sc_cdata.mvgbe_tx_prod) { | | 907 | if (idx != sc->sc_cdata.mvgbe_tx_prod) { |
908 | sc->sc_cdata.mvgbe_tx_prod = idx; | | 908 | sc->sc_cdata.mvgbe_tx_prod = idx; |
909 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); | | 909 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); |
910 | | | 910 | |
911 | /* | | 911 | /* |
912 | * Set a timeout in case the chip goes out to lunch. | | 912 | * Set a timeout in case the chip goes out to lunch. |
913 | */ | | 913 | */ |
914 | ifp->if_timer = 5; | | 914 | ifp->if_timer = 5; |
915 | } | | 915 | } |
916 | } | | 916 | } |
917 | | | 917 | |
918 | static int | | 918 | static int |
919 | mvgbe_ioctl(struct ifnet *ifp, u_long cmd, void *data) | | 919 | mvgbe_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
920 | { | | 920 | { |
921 | struct mvgbe_softc *sc = ifp->if_softc; | | 921 | struct mvgbe_softc *sc = ifp->if_softc; |
922 | struct ifreq *ifr = data; | | 922 | struct ifreq *ifr = data; |
923 | int s, error = 0; | | 923 | int s, error = 0; |
924 | | | 924 | |
925 | s = splnet(); | | 925 | s = splnet(); |
926 | | | 926 | |
927 | switch (cmd) { | | 927 | switch (cmd) { |
928 | case SIOCGIFMEDIA: | | 928 | case SIOCGIFMEDIA: |
929 | case SIOCSIFMEDIA: | | 929 | case SIOCSIFMEDIA: |
930 | DPRINTFN(2, ("mvgbe_ioctl MEDIA\n")); | | 930 | DPRINTFN(2, ("mvgbe_ioctl MEDIA\n")); |
931 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); | | 931 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); |
932 | break; | | 932 | break; |
933 | default: | | 933 | default: |
934 | DPRINTFN(2, ("mvgbe_ioctl ETHER\n")); | | 934 | DPRINTFN(2, ("mvgbe_ioctl ETHER\n")); |
935 | error = ether_ioctl(ifp, cmd, data); | | 935 | error = ether_ioctl(ifp, cmd, data); |
936 | if (error == ENETRESET) { | | 936 | if (error == ENETRESET) { |
937 | if (ifp->if_flags & IFF_RUNNING) { | | 937 | if (ifp->if_flags & IFF_RUNNING) { |
938 | mvgbe_filter_setup(sc); | | 938 | mvgbe_filter_setup(sc); |
939 | } | | 939 | } |
940 | error = 0; | | 940 | error = 0; |
941 | } | | 941 | } |
942 | break; | | 942 | break; |
943 | } | | 943 | } |
944 | | | 944 | |
945 | splx(s); | | 945 | splx(s); |
946 | | | 946 | |
947 | return error; | | 947 | return error; |
948 | } | | 948 | } |
949 | | | 949 | |
950 | int mvgbe_rximt = 0; | | 950 | int mvgbe_rximt = 0; |
951 | int mvgbe_tximt = 0; | | 951 | int mvgbe_tximt = 0; |
952 | | | 952 | |
953 | static int | | 953 | static int |
954 | mvgbe_init(struct ifnet *ifp) | | 954 | mvgbe_init(struct ifnet *ifp) |
955 | { | | 955 | { |
956 | struct mvgbe_softc *sc = ifp->if_softc; | | 956 | struct mvgbe_softc *sc = ifp->if_softc; |
957 | struct mvgbec_softc *csc = device_private(device_parent(sc->sc_dev)); | | 957 | struct mvgbec_softc *csc = device_private(device_parent(sc->sc_dev)); |
958 | struct mii_data *mii = &sc->sc_mii; | | 958 | struct mii_data *mii = &sc->sc_mii; |
959 | uint32_t reg; | | 959 | uint32_t reg; |
960 | int i; | | 960 | int i; |
961 | | | 961 | |
962 | DPRINTFN(2, ("mvgbe_init\n")); | | 962 | DPRINTFN(2, ("mvgbe_init\n")); |
963 | | | 963 | |
964 | /* Cancel pending I/O and free all RX/TX buffers. */ | | 964 | /* Cancel pending I/O and free all RX/TX buffers. */ |
965 | mvgbe_stop(ifp, 0); | | 965 | mvgbe_stop(ifp, 0); |
966 | | | 966 | |
967 | /* clear all ethernet port interrupts */ | | 967 | /* clear all ethernet port interrupts */ |
968 | MVGBE_WRITE(sc, MVGBE_IC, 0); | | 968 | MVGBE_WRITE(sc, MVGBE_IC, 0); |
969 | MVGBE_WRITE(sc, MVGBE_ICE, 0); | | 969 | MVGBE_WRITE(sc, MVGBE_ICE, 0); |
970 | | | 970 | |
971 | /* Init TX/RX descriptors */ | | 971 | /* Init TX/RX descriptors */ |
972 | if (mvgbe_init_tx_ring(sc) == ENOBUFS) { | | 972 | if (mvgbe_init_tx_ring(sc) == ENOBUFS) { |
973 | aprint_error_ifnet(ifp, | | 973 | aprint_error_ifnet(ifp, |
974 | "initialization failed: no memory for tx buffers\n"); | | 974 | "initialization failed: no memory for tx buffers\n"); |
975 | return ENOBUFS; | | 975 | return ENOBUFS; |
976 | } | | 976 | } |
977 | if (mvgbe_init_rx_ring(sc) == ENOBUFS) { | | 977 | if (mvgbe_init_rx_ring(sc) == ENOBUFS) { |
978 | aprint_error_ifnet(ifp, | | 978 | aprint_error_ifnet(ifp, |
979 | "initialization failed: no memory for rx buffers\n"); | | 979 | "initialization failed: no memory for rx buffers\n"); |
980 | return ENOBUFS; | | 980 | return ENOBUFS; |
981 | } | | 981 | } |
982 | | | 982 | |
| | | 983 | MVGBE_WRITE(sc, MVGBE_MTU, 0); /* hw reset value is wrong */ |
983 | MVGBE_WRITE(sc, MVGBE_PSC, | | 984 | MVGBE_WRITE(sc, MVGBE_PSC, |
984 | MVGBE_PSC_ANFC | /* Enable Auto-Neg Flow Ctrl */ | | 985 | MVGBE_PSC_ANFC | /* Enable Auto-Neg Flow Ctrl */ |
985 | MVGBE_PSC_RESERVED | /* Must be set to 1 */ | | 986 | MVGBE_PSC_RESERVED | /* Must be set to 1 */ |
986 | MVGBE_PSC_FLFAIL | /* Do NOT Force Link Fail */ | | 987 | MVGBE_PSC_FLFAIL | /* Do NOT Force Link Fail */ |
987 | MVGBE_PSC_MRU(MVGBE_PSC_MRU_9022) | /* we want 9k */ | | 988 | MVGBE_PSC_MRU(MVGBE_PSC_MRU_9022) | /* we want 9k */ |
988 | MVGBE_PSC_SETFULLDX); /* Set_FullDx */ | | 989 | MVGBE_PSC_SETFULLDX); /* Set_FullDx */ |
989 | /* XXXX: mvgbe(4) always use RGMII. */ | | 990 | /* XXXX: mvgbe(4) always use RGMII. */ |
990 | MVGBE_WRITE(sc, MVGBE_PSC1, | | 991 | MVGBE_WRITE(sc, MVGBE_PSC1, |
991 | MVGBE_READ(sc, MVGBE_PSC1) | MVGBE_PSC1_RGMIIEN); | | 992 | MVGBE_READ(sc, MVGBE_PSC1) | MVGBE_PSC1_RGMIIEN); |
992 | /* XXXX: Also always Weighted Round-Robin Priority Mode */ | | 993 | /* XXXX: Also always Weighted Round-Robin Priority Mode */ |
993 | MVGBE_WRITE(sc, MVGBE_TQFPC, MVGBE_TQFPC_EN(0)); | | 994 | MVGBE_WRITE(sc, MVGBE_TQFPC, MVGBE_TQFPC_EN(0)); |
994 | | | 995 | |
995 | MVGBE_WRITE(sc, MVGBE_CRDP(0), MVGBE_RX_RING_ADDR(sc, 0)); | | 996 | MVGBE_WRITE(sc, MVGBE_CRDP(0), MVGBE_RX_RING_ADDR(sc, 0)); |
996 | MVGBE_WRITE(sc, MVGBE_TCQDP, MVGBE_TX_RING_ADDR(sc, 0)); | | 997 | MVGBE_WRITE(sc, MVGBE_TCQDP, MVGBE_TX_RING_ADDR(sc, 0)); |
997 | | | 998 | |
998 | if (csc->sc_fix_tqtb) { | | 999 | if (csc->sc_fix_tqtb) { |
999 | /* | | 1000 | /* |
1000 | * Queue 0 (offset 0x72700) must be programmed to 0x3fffffff. | | 1001 | * Queue 0 (offset 0x72700) must be programmed to 0x3fffffff. |
1001 | * And offset 0x72704 must be programmed to 0x03ffffff. | | 1002 | * And offset 0x72704 must be programmed to 0x03ffffff. |
1002 | * Queue 1 through 7 must be programmed to 0x0. | | 1003 | * Queue 1 through 7 must be programmed to 0x0. |
1003 | */ | | 1004 | */ |
1004 | MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(0), 0x3fffffff); | | 1005 | MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(0), 0x3fffffff); |
1005 | MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(0), 0x03ffffff); | | 1006 | MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(0), 0x03ffffff); |
1006 | for (i = 1; i < 8; i++) { | | 1007 | for (i = 1; i < 8; i++) { |
1007 | MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x0); | | 1008 | MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x0); |
1008 | MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0x0); | | 1009 | MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0x0); |
1009 | } | | 1010 | } |
1010 | } else | | 1011 | } else |
1011 | for (i = 1; i < 8; i++) { | | 1012 | for (i = 1; i < 8; i++) { |
1012 | MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x3fffffff); | | 1013 | MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x3fffffff); |
1013 | MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0xffff7fff); | | 1014 | MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0xffff7fff); |
1014 | MVGBE_WRITE(sc, MVGBE_TQAC(i), 0xfc0000ff); | | 1015 | MVGBE_WRITE(sc, MVGBE_TQAC(i), 0xfc0000ff); |
1015 | } | | 1016 | } |
1016 | | | 1017 | |
1017 | MVGBE_WRITE(sc, MVGBE_PXC, MVGBE_PXC_RXCS); | | 1018 | MVGBE_WRITE(sc, MVGBE_PXC, MVGBE_PXC_RXCS); |
1018 | MVGBE_WRITE(sc, MVGBE_PXCX, 0); | | 1019 | MVGBE_WRITE(sc, MVGBE_PXCX, 0); |
1019 | MVGBE_WRITE(sc, MVGBE_SDC, | | 1020 | MVGBE_WRITE(sc, MVGBE_SDC, |
1020 | MVGBE_SDC_RXBSZ_16_64BITWORDS | | | 1021 | MVGBE_SDC_RXBSZ_16_64BITWORDS | |
1021 | #if BYTE_ORDER == LITTLE_ENDIAN | | 1022 | #if BYTE_ORDER == LITTLE_ENDIAN |
1022 | MVGBE_SDC_BLMR | /* Big/Little Endian Receive Mode: No swap */ | | 1023 | MVGBE_SDC_BLMR | /* Big/Little Endian Receive Mode: No swap */ |
1023 | MVGBE_SDC_BLMT | /* Big/Little Endian Transmit Mode: No swap */ | | 1024 | MVGBE_SDC_BLMT | /* Big/Little Endian Transmit Mode: No swap */ |
1024 | #endif | | 1025 | #endif |
1025 | MVGBE_SDC_IPGINTRX(mvgbe_rximt) | | | 1026 | MVGBE_SDC_IPGINTRX(mvgbe_rximt) | |
1026 | MVGBE_SDC_TXBSZ_16_64BITWORDS); | | 1027 | MVGBE_SDC_TXBSZ_16_64BITWORDS); |
1027 | MVGBE_WRITE(sc, MVGBE_PTFUT, MVGBE_PTFUT_IPGINTTX(mvgbe_tximt)); | | 1028 | MVGBE_WRITE(sc, MVGBE_PTFUT, MVGBE_PTFUT_IPGINTTX(mvgbe_tximt)); |
1028 | | | 1029 | |
1029 | mvgbe_filter_setup(sc); | | 1030 | mvgbe_filter_setup(sc); |
1030 | | | 1031 | |
1031 | mii_mediachg(mii); | | 1032 | mii_mediachg(mii); |
1032 | | | 1033 | |
1033 | /* Enable port */ | | 1034 | /* Enable port */ |
1034 | reg = MVGBE_READ(sc, MVGBE_PSC); | | 1035 | reg = MVGBE_READ(sc, MVGBE_PSC); |
1035 | MVGBE_WRITE(sc, MVGBE_PSC, reg | MVGBE_PSC_PORTEN); | | 1036 | MVGBE_WRITE(sc, MVGBE_PSC, reg | MVGBE_PSC_PORTEN); |
1036 | | | 1037 | |
1037 | /* If Link is UP, Start RX and TX traffic */ | | 1038 | /* If Link is UP, Start RX and TX traffic */ |
1038 | if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { | | 1039 | if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { |
1039 | /* Enable port RX/TX. */ | | 1040 | /* Enable port RX/TX. */ |
1040 | MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); | | 1041 | MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); |
1041 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); | | 1042 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); |
1042 | } | | 1043 | } |
1043 | | | 1044 | |
1044 | /* Enable interrupt masks */ | | 1045 | /* Enable interrupt masks */ |
1045 | MVGBE_WRITE(sc, MVGBE_PIM, | | 1046 | MVGBE_WRITE(sc, MVGBE_PIM, |
1046 | MVGBE_IC_RXBUF | | | 1047 | MVGBE_IC_RXBUF | |
1047 | MVGBE_IC_EXTEND | | | 1048 | MVGBE_IC_EXTEND | |
1048 | MVGBE_IC_RXBUFQ_MASK | | | 1049 | MVGBE_IC_RXBUFQ_MASK | |
1049 | MVGBE_IC_RXERROR | | | 1050 | MVGBE_IC_RXERROR | |
1050 | MVGBE_IC_RXERRQ_MASK); | | 1051 | MVGBE_IC_RXERRQ_MASK); |
1051 | MVGBE_WRITE(sc, MVGBE_PEIM, | | 1052 | MVGBE_WRITE(sc, MVGBE_PEIM, |
1052 | MVGBE_ICE_TXBUF | | | 1053 | MVGBE_ICE_TXBUF | |
1053 | MVGBE_ICE_TXERR | | | 1054 | MVGBE_ICE_TXERR | |
1054 | MVGBE_ICE_LINKCHG); | | 1055 | MVGBE_ICE_LINKCHG); |
1055 | | | 1056 | |
1056 | ifp->if_flags |= IFF_RUNNING; | | 1057 | ifp->if_flags |= IFF_RUNNING; |
1057 | ifp->if_flags &= ~IFF_OACTIVE; | | 1058 | ifp->if_flags &= ~IFF_OACTIVE; |
1058 | | | 1059 | |
1059 | return 0; | | 1060 | return 0; |
1060 | } | | 1061 | } |
1061 | | | 1062 | |
1062 | /* ARGSUSED */ | | 1063 | /* ARGSUSED */ |
1063 | static void | | 1064 | static void |
1064 | mvgbe_stop(struct ifnet *ifp, int disable) | | 1065 | mvgbe_stop(struct ifnet *ifp, int disable) |
1065 | { | | 1066 | { |
1066 | struct mvgbe_softc *sc = ifp->if_softc; | | 1067 | struct mvgbe_softc *sc = ifp->if_softc; |
1067 | struct mvgbe_chain_data *cdata = &sc->sc_cdata; | | 1068 | struct mvgbe_chain_data *cdata = &sc->sc_cdata; |
1068 | uint32_t reg; | | 1069 | uint32_t reg; |
1069 | int i, cnt; | | 1070 | int i, cnt; |
1070 | | | 1071 | |
1071 | DPRINTFN(2, ("mvgbe_stop\n")); | | 1072 | DPRINTFN(2, ("mvgbe_stop\n")); |
1072 | | | 1073 | |
1073 | /* Stop Rx port activity. Check port Rx activity. */ | | 1074 | /* Stop Rx port activity. Check port Rx activity. */ |
1074 | reg = MVGBE_READ(sc, MVGBE_RQC); | | 1075 | reg = MVGBE_READ(sc, MVGBE_RQC); |
1075 | if (reg & MVGBE_RQC_ENQ_MASK) | | 1076 | if (reg & MVGBE_RQC_ENQ_MASK) |
1076 | /* Issue stop command for active channels only */ | | 1077 | /* Issue stop command for active channels only */ |
1077 | MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ_DISABLE(reg)); | | 1078 | MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ_DISABLE(reg)); |
1078 | | | 1079 | |
1079 | /* Stop Tx port activity. Check port Tx activity. */ | | 1080 | /* Stop Tx port activity. Check port Tx activity. */ |
1080 | if (MVGBE_READ(sc, MVGBE_TQC) & MVGBE_TQC_ENQ) | | 1081 | if (MVGBE_READ(sc, MVGBE_TQC) & MVGBE_TQC_ENQ) |
1081 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); | | 1082 | MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); |
1082 | | | 1083 | |
1083 | /* Force link down */ | | 1084 | /* Force link down */ |
1084 | reg = MVGBE_READ(sc, MVGBE_PSC); | | 1085 | reg = MVGBE_READ(sc, MVGBE_PSC); |
1085 | MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_FLFAIL); | | 1086 | MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_FLFAIL); |
1086 | | | 1087 | |
1087 | #define RX_DISABLE_TIMEOUT 0x1000000 | | 1088 | #define RX_DISABLE_TIMEOUT 0x1000000 |
1088 | #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 | | 1089 | #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 |
1089 | /* Wait for all Rx activity to terminate. */ | | 1090 | /* Wait for all Rx activity to terminate. */ |
1090 | cnt = 0; | | 1091 | cnt = 0; |
1091 | do { | | 1092 | do { |
1092 | if (cnt >= RX_DISABLE_TIMEOUT) { | | 1093 | if (cnt >= RX_DISABLE_TIMEOUT) { |
1093 | aprint_error_ifnet(ifp, | | 1094 | aprint_error_ifnet(ifp, |
1094 | "timeout for RX stopped. rqc 0x%x\n", reg); | | 1095 | "timeout for RX stopped. rqc 0x%x\n", reg); |
1095 | break; | | 1096 | break; |
1096 | } | | 1097 | } |
1097 | cnt++; | | 1098 | cnt++; |
1098 | | | 1099 | |
1099 | /* | | 1100 | /* |
1100 | * Check Receive Queue Command register that all Rx queues | | 1101 | * Check Receive Queue Command register that all Rx queues |
1101 | * are stopped | | 1102 | * are stopped |
1102 | */ | | 1103 | */ |
1103 | reg = MVGBE_READ(sc, MVGBE_RQC); | | 1104 | reg = MVGBE_READ(sc, MVGBE_RQC); |
1104 | } while (reg & 0xff); | | 1105 | } while (reg & 0xff); |
1105 | | | 1106 | |
1106 | /* Double check to verify that TX FIFO is empty */ | | 1107 | /* Double check to verify that TX FIFO is empty */ |
1107 | cnt = 0; | | 1108 | cnt = 0; |
1108 | while (1) { | | 1109 | while (1) { |
1109 | do { | | 1110 | do { |
1110 | if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { | | 1111 | if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { |
1111 | aprint_error_ifnet(ifp, | | 1112 | aprint_error_ifnet(ifp, |
1112 | "timeout for TX FIFO empty. status 0x%x\n", | | 1113 | "timeout for TX FIFO empty. status 0x%x\n", |
1113 | reg); | | 1114 | reg); |
1114 | break; | | 1115 | break; |
1115 | } | | 1116 | } |
1116 | cnt++; | | 1117 | cnt++; |
1117 | | | 1118 | |
1118 | reg = MVGBE_READ(sc, MVGBE_PS); | | 1119 | reg = MVGBE_READ(sc, MVGBE_PS); |
1119 | } while | | 1120 | } while |
1120 | (!(reg & MVGBE_PS_TXFIFOEMP) || reg & MVGBE_PS_TXINPROG); | | 1121 | (!(reg & MVGBE_PS_TXFIFOEMP) || reg & MVGBE_PS_TXINPROG); |
1121 | | | 1122 | |
1122 | if (cnt >= TX_FIFO_EMPTY_TIMEOUT) | | 1123 | if (cnt >= TX_FIFO_EMPTY_TIMEOUT) |
1123 | break; | | 1124 | break; |
1124 | | | 1125 | |
1125 | /* Double check */ | | 1126 | /* Double check */ |
1126 | reg = MVGBE_READ(sc, MVGBE_PS); | | 1127 | reg = MVGBE_READ(sc, MVGBE_PS); |
1127 | if (reg & MVGBE_PS_TXFIFOEMP && !(reg & MVGBE_PS_TXINPROG)) | | 1128 | if (reg & MVGBE_PS_TXFIFOEMP && !(reg & MVGBE_PS_TXINPROG)) |
1128 | break; | | 1129 | break; |
1129 | else | | 1130 | else |
1130 | aprint_error_ifnet(ifp, | | 1131 | aprint_error_ifnet(ifp, |
1131 | "TX FIFO empty double check failed." | | 1132 | "TX FIFO empty double check failed." |
1132 | " %d loops, status 0x%x\n", cnt, reg); | | 1133 | " %d loops, status 0x%x\n", cnt, reg); |
1133 | } | | 1134 | } |
1134 | | | 1135 | |
1135 | /* Reset the Enable bit in the Port Serial Control Register */ | | 1136 | /* Reset the Enable bit in the Port Serial Control Register */ |
1136 | reg = MVGBE_READ(sc, MVGBE_PSC); | | 1137 | reg = MVGBE_READ(sc, MVGBE_PSC); |
1137 | MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_PORTEN); | | 1138 | MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_PORTEN); |
1138 | | | 1139 | |
1139 | /* Disable interrupts */ | | 1140 | /* Disable interrupts */ |
1140 | MVGBE_WRITE(sc, MVGBE_PIM, 0); | | 1141 | MVGBE_WRITE(sc, MVGBE_PIM, 0); |
1141 | MVGBE_WRITE(sc, MVGBE_PEIM, 0); | | 1142 | MVGBE_WRITE(sc, MVGBE_PEIM, 0); |
1142 | | | 1143 | |
1143 | /* Free RX and TX mbufs still in the queues. */ | | 1144 | /* Free RX and TX mbufs still in the queues. */ |
1144 | for (i = 0; i < MVGBE_RX_RING_CNT; i++) { | | 1145 | for (i = 0; i < MVGBE_RX_RING_CNT; i++) { |
1145 | if (cdata->mvgbe_rx_chain[i].mvgbe_mbuf != NULL) { | | 1146 | if (cdata->mvgbe_rx_chain[i].mvgbe_mbuf != NULL) { |
1146 | m_freem(cdata->mvgbe_rx_chain[i].mvgbe_mbuf); | | 1147 | m_freem(cdata->mvgbe_rx_chain[i].mvgbe_mbuf); |
1147 | cdata->mvgbe_rx_chain[i].mvgbe_mbuf = NULL; | | 1148 | cdata->mvgbe_rx_chain[i].mvgbe_mbuf = NULL; |
1148 | } | | 1149 | } |
1149 | } | | 1150 | } |
1150 | for (i = 0; i < MVGBE_TX_RING_CNT; i++) { | | 1151 | for (i = 0; i < MVGBE_TX_RING_CNT; i++) { |
1151 | if (cdata->mvgbe_tx_chain[i].mvgbe_mbuf != NULL) { | | 1152 | if (cdata->mvgbe_tx_chain[i].mvgbe_mbuf != NULL) { |
1152 | m_freem(cdata->mvgbe_tx_chain[i].mvgbe_mbuf); | | 1153 | m_freem(cdata->mvgbe_tx_chain[i].mvgbe_mbuf); |
1153 | cdata->mvgbe_tx_chain[i].mvgbe_mbuf = NULL; | | 1154 | cdata->mvgbe_tx_chain[i].mvgbe_mbuf = NULL; |
1154 | } | | 1155 | } |
1155 | } | | 1156 | } |
1156 | | | 1157 | |
1157 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 1158 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1158 | } | | 1159 | } |
1159 | | | 1160 | |
1160 | static void | | 1161 | static void |
1161 | mvgbe_watchdog(struct ifnet *ifp) | | 1162 | mvgbe_watchdog(struct ifnet *ifp) |
1162 | { | | 1163 | { |
1163 | struct mvgbe_softc *sc = ifp->if_softc; | | 1164 | struct mvgbe_softc *sc = ifp->if_softc; |
1164 | | | 1165 | |
1165 | /* | | 1166 | /* |
1166 | * Reclaim first as there is a possibility of losing Tx completion | | 1167 | * Reclaim first as there is a possibility of losing Tx completion |
1167 | * interrupts. | | 1168 | * interrupts. |
1168 | */ | | 1169 | */ |
1169 | mvgbe_txeof(sc); | | 1170 | mvgbe_txeof(sc); |
1170 | if (sc->sc_cdata.mvgbe_tx_cnt != 0) { | | 1171 | if (sc->sc_cdata.mvgbe_tx_cnt != 0) { |
1171 | aprint_error_ifnet(ifp, "watchdog timeout\n"); | | 1172 | aprint_error_ifnet(ifp, "watchdog timeout\n"); |
1172 | | | 1173 | |
1173 | ifp->if_oerrors++; | | 1174 | ifp->if_oerrors++; |
1174 | | | 1175 | |
1175 | mvgbe_init(ifp); | | 1176 | mvgbe_init(ifp); |
1176 | } | | 1177 | } |
1177 | } | | 1178 | } |
1178 | | | 1179 | |
1179 | static int | | 1180 | static int |
1180 | mvgbe_ifflags_cb(struct ethercom *ec) | | 1181 | mvgbe_ifflags_cb(struct ethercom *ec) |
1181 | { | | 1182 | { |
1182 | struct ifnet *ifp = &ec->ec_if; | | 1183 | struct ifnet *ifp = &ec->ec_if; |
1183 | struct mvgbe_softc *sc = ifp->if_softc; | | 1184 | struct mvgbe_softc *sc = ifp->if_softc; |
1184 | int change = ifp->if_flags ^ sc->sc_if_flags; | | 1185 | int change = ifp->if_flags ^ sc->sc_if_flags; |
1185 | | | 1186 | |
1186 | if (change != 0) | | 1187 | if (change != 0) |
1187 | sc->sc_if_flags = ifp->if_flags; | | 1188 | sc->sc_if_flags = ifp->if_flags; |
1188 | | | 1189 | |
1189 | if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) | | 1190 | if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) |
1190 | return ENETRESET; | | 1191 | return ENETRESET; |
1191 | | | 1192 | |
1192 | if ((change & IFF_PROMISC) != 0) | | 1193 | if ((change & IFF_PROMISC) != 0) |
1193 | mvgbe_filter_setup(sc); | | 1194 | mvgbe_filter_setup(sc); |
1194 | | | 1195 | |
1195 | return 0; | | 1196 | return 0; |
1196 | } | | 1197 | } |
1197 | | | 1198 | |
1198 | /* | | 1199 | /* |
1199 | * Set media options. | | 1200 | * Set media options. |
1200 | */ | | 1201 | */ |
1201 | static int | | 1202 | static int |
1202 | mvgbe_mediachange(struct ifnet *ifp) | | 1203 | mvgbe_mediachange(struct ifnet *ifp) |
1203 | { | | 1204 | { |
1204 | return ether_mediachange(ifp); | | 1205 | return ether_mediachange(ifp); |
1205 | } | | 1206 | } |
1206 | | | 1207 | |
1207 | /* | | 1208 | /* |
1208 | * Report current media status. | | 1209 | * Report current media status. |
1209 | */ | | 1210 | */ |
1210 | static void | | 1211 | static void |
1211 | mvgbe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) | | 1212 | mvgbe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
1212 | { | | 1213 | { |
1213 | ether_mediastatus(ifp, ifmr); | | 1214 | ether_mediastatus(ifp, ifmr); |
1214 | } | | 1215 | } |
1215 | | | 1216 | |
1216 | | | 1217 | |
1217 | static int | | 1218 | static int |
1218 | mvgbe_init_rx_ring(struct mvgbe_softc *sc) | | 1219 | mvgbe_init_rx_ring(struct mvgbe_softc *sc) |
1219 | { | | 1220 | { |
1220 | struct mvgbe_chain_data *cd = &sc->sc_cdata; | | 1221 | struct mvgbe_chain_data *cd = &sc->sc_cdata; |
1221 | struct mvgbe_ring_data *rd = sc->sc_rdata; | | 1222 | struct mvgbe_ring_data *rd = sc->sc_rdata; |
1222 | int i; | | 1223 | int i; |
1223 | | | 1224 | |
1224 | memset(rd->mvgbe_rx_ring, 0, | | 1225 | memset(rd->mvgbe_rx_ring, 0, |
1225 | sizeof(struct mvgbe_rx_desc) * MVGBE_RX_RING_CNT); | | 1226 | sizeof(struct mvgbe_rx_desc) * MVGBE_RX_RING_CNT); |
1226 | | | 1227 | |
1227 | for (i = 0; i < MVGBE_RX_RING_CNT; i++) { | | 1228 | for (i = 0; i < MVGBE_RX_RING_CNT; i++) { |
1228 | cd->mvgbe_rx_chain[i].mvgbe_desc = | | 1229 | cd->mvgbe_rx_chain[i].mvgbe_desc = |
1229 | &rd->mvgbe_rx_ring[i]; | | 1230 | &rd->mvgbe_rx_ring[i]; |
1230 | if (i == MVGBE_RX_RING_CNT - 1) { | | 1231 | if (i == MVGBE_RX_RING_CNT - 1) { |
1231 | cd->mvgbe_rx_chain[i].mvgbe_next = | | 1232 | cd->mvgbe_rx_chain[i].mvgbe_next = |
1232 | &cd->mvgbe_rx_chain[0]; | | 1233 | &cd->mvgbe_rx_chain[0]; |
1233 | rd->mvgbe_rx_ring[i].nextdescptr = | | 1234 | rd->mvgbe_rx_ring[i].nextdescptr = |
1234 | MVGBE_RX_RING_ADDR(sc, 0); | | 1235 | MVGBE_RX_RING_ADDR(sc, 0); |
1235 | } else { | | 1236 | } else { |
1236 | cd->mvgbe_rx_chain[i].mvgbe_next = | | 1237 | cd->mvgbe_rx_chain[i].mvgbe_next = |
1237 | &cd->mvgbe_rx_chain[i + 1]; | | 1238 | &cd->mvgbe_rx_chain[i + 1]; |
1238 | rd->mvgbe_rx_ring[i].nextdescptr = | | 1239 | rd->mvgbe_rx_ring[i].nextdescptr = |
1239 | MVGBE_RX_RING_ADDR(sc, i + 1); | | 1240 | MVGBE_RX_RING_ADDR(sc, i + 1); |
1240 | } | | 1241 | } |
1241 | } | | 1242 | } |
1242 | | | 1243 | |
1243 | for (i = 0; i < MVGBE_RX_RING_CNT; i++) { | | 1244 | for (i = 0; i < MVGBE_RX_RING_CNT; i++) { |
1244 | if (mvgbe_newbuf(sc, i, NULL, | | 1245 | if (mvgbe_newbuf(sc, i, NULL, |
1245 | sc->sc_cdata.mvgbe_rx_jumbo_map) == ENOBUFS) { | | 1246 | sc->sc_cdata.mvgbe_rx_jumbo_map) == ENOBUFS) { |
1246 | aprint_error_ifnet(&sc->sc_ethercom.ec_if, | | 1247 | aprint_error_ifnet(&sc->sc_ethercom.ec_if, |
1247 | "failed alloc of %dth mbuf\n", i); | | 1248 | "failed alloc of %dth mbuf\n", i); |
1248 | return ENOBUFS; | | 1249 | return ENOBUFS; |
1249 | } | | 1250 | } |
1250 | } | | 1251 | } |
1251 | sc->sc_cdata.mvgbe_rx_prod = 0; | | 1252 | sc->sc_cdata.mvgbe_rx_prod = 0; |
1252 | sc->sc_cdata.mvgbe_rx_cons = 0; | | 1253 | sc->sc_cdata.mvgbe_rx_cons = 0; |
1253 | | | 1254 | |
1254 | return 0; | | 1255 | return 0; |
1255 | } | | 1256 | } |
1256 | | | 1257 | |
1257 | static int | | 1258 | static int |
1258 | mvgbe_init_tx_ring(struct mvgbe_softc *sc) | | 1259 | mvgbe_init_tx_ring(struct mvgbe_softc *sc) |
1259 | { | | 1260 | { |
1260 | struct mvgbe_chain_data *cd = &sc->sc_cdata; | | 1261 | struct mvgbe_chain_data *cd = &sc->sc_cdata; |
1261 | struct mvgbe_ring_data *rd = sc->sc_rdata; | | 1262 | struct mvgbe_ring_data *rd = sc->sc_rdata; |
1262 | int i; | | 1263 | int i; |
1263 | | | 1264 | |
1264 | memset(sc->sc_rdata->mvgbe_tx_ring, 0, | | 1265 | memset(sc->sc_rdata->mvgbe_tx_ring, 0, |
1265 | sizeof(struct mvgbe_tx_desc) * MVGBE_TX_RING_CNT); | | 1266 | sizeof(struct mvgbe_tx_desc) * MVGBE_TX_RING_CNT); |
1266 | | | 1267 | |
1267 | for (i = 0; i < MVGBE_TX_RING_CNT; i++) { | | 1268 | for (i = 0; i < MVGBE_TX_RING_CNT; i++) { |
1268 | cd->mvgbe_tx_chain[i].mvgbe_desc = | | 1269 | cd->mvgbe_tx_chain[i].mvgbe_desc = |
1269 | &rd->mvgbe_tx_ring[i]; | | 1270 | &rd->mvgbe_tx_ring[i]; |
1270 | if (i == MVGBE_TX_RING_CNT - 1) { | | 1271 | if (i == MVGBE_TX_RING_CNT - 1) { |
1271 | cd->mvgbe_tx_chain[i].mvgbe_next = | | 1272 | cd->mvgbe_tx_chain[i].mvgbe_next = |
1272 | &cd->mvgbe_tx_chain[0]; | | 1273 | &cd->mvgbe_tx_chain[0]; |
1273 | rd->mvgbe_tx_ring[i].nextdescptr = | | 1274 | rd->mvgbe_tx_ring[i].nextdescptr = |
1274 | MVGBE_TX_RING_ADDR(sc, 0); | | 1275 | MVGBE_TX_RING_ADDR(sc, 0); |
1275 | } else { | | 1276 | } else { |
1276 | cd->mvgbe_tx_chain[i].mvgbe_next = | | 1277 | cd->mvgbe_tx_chain[i].mvgbe_next = |
1277 | &cd->mvgbe_tx_chain[i + 1]; | | 1278 | &cd->mvgbe_tx_chain[i + 1]; |
1278 | rd->mvgbe_tx_ring[i].nextdescptr = | | 1279 | rd->mvgbe_tx_ring[i].nextdescptr = |
1279 | MVGBE_TX_RING_ADDR(sc, i + 1); | | 1280 | MVGBE_TX_RING_ADDR(sc, i + 1); |
1280 | } | | 1281 | } |
1281 | rd->mvgbe_tx_ring[i].cmdsts = MVGBE_BUFFER_OWNED_BY_HOST; | | 1282 | rd->mvgbe_tx_ring[i].cmdsts = MVGBE_BUFFER_OWNED_BY_HOST; |
1282 | } | | 1283 | } |
1283 | | | 1284 | |
1284 | sc->sc_cdata.mvgbe_tx_prod = 0; | | 1285 | sc->sc_cdata.mvgbe_tx_prod = 0; |
1285 | sc->sc_cdata.mvgbe_tx_cons = 0; | | 1286 | sc->sc_cdata.mvgbe_tx_cons = 0; |
1286 | sc->sc_cdata.mvgbe_tx_cnt = 0; | | 1287 | sc->sc_cdata.mvgbe_tx_cnt = 0; |
1287 | | | 1288 | |
1288 | MVGBE_CDTXSYNC(sc, 0, MVGBE_TX_RING_CNT, | | 1289 | MVGBE_CDTXSYNC(sc, 0, MVGBE_TX_RING_CNT, |
1289 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1290 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1290 | | | 1291 | |
1291 | return 0; | | 1292 | return 0; |
1292 | } | | 1293 | } |
1293 | | | 1294 | |
1294 | static int | | 1295 | static int |
1295 | mvgbe_newbuf(struct mvgbe_softc *sc, int i, struct mbuf *m, | | 1296 | mvgbe_newbuf(struct mvgbe_softc *sc, int i, struct mbuf *m, |
1296 | bus_dmamap_t dmamap) | | 1297 | bus_dmamap_t dmamap) |
1297 | { | | 1298 | { |
1298 | struct mbuf *m_new = NULL; | | 1299 | struct mbuf *m_new = NULL; |
1299 | struct mvgbe_chain *c; | | 1300 | struct mvgbe_chain *c; |
1300 | struct mvgbe_rx_desc *r; | | 1301 | struct mvgbe_rx_desc *r; |
1301 | int align; | | 1302 | int align; |
1302 | | | 1303 | |
1303 | if (m == NULL) { | | 1304 | if (m == NULL) { |
1304 | void *buf = NULL; | | 1305 | void *buf = NULL; |
1305 | | | 1306 | |
1306 | MGETHDR(m_new, M_DONTWAIT, MT_DATA); | | 1307 | MGETHDR(m_new, M_DONTWAIT, MT_DATA); |
1307 | if (m_new == NULL) { | | 1308 | if (m_new == NULL) { |
1308 | aprint_error_ifnet(&sc->sc_ethercom.ec_if, | | 1309 | aprint_error_ifnet(&sc->sc_ethercom.ec_if, |
1309 | "no memory for rx list -- packet dropped!\n"); | | 1310 | "no memory for rx list -- packet dropped!\n"); |
1310 | return ENOBUFS; | | 1311 | return ENOBUFS; |
1311 | } | | 1312 | } |
1312 | | | 1313 | |
1313 | /* Allocate the jumbo buffer */ | | 1314 | /* Allocate the jumbo buffer */ |
1314 | buf = mvgbe_jalloc(sc); | | 1315 | buf = mvgbe_jalloc(sc); |
1315 | if (buf == NULL) { | | 1316 | if (buf == NULL) { |
1316 | m_freem(m_new); | | 1317 | m_freem(m_new); |
1317 | DPRINTFN(1, ("%s jumbo allocation failed -- packet " | | 1318 | DPRINTFN(1, ("%s jumbo allocation failed -- packet " |
1318 | "dropped!\n", sc->sc_ethercom.ec_if.if_xname)); | | 1319 | "dropped!\n", sc->sc_ethercom.ec_if.if_xname)); |
1319 | return ENOBUFS; | | 1320 | return ENOBUFS; |
1320 | } | | 1321 | } |
1321 | | | 1322 | |
1322 | /* Attach the buffer to the mbuf */ | | 1323 | /* Attach the buffer to the mbuf */ |
1323 | m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; | | 1324 | m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; |
1324 | MEXTADD(m_new, buf, MVGBE_JLEN, 0, mvgbe_jfree, sc); | | 1325 | MEXTADD(m_new, buf, MVGBE_JLEN, 0, mvgbe_jfree, sc); |
1325 | } else { | | 1326 | } else { |
1326 | /* | | 1327 | /* |
1327 | * We're re-using a previously allocated mbuf; | | 1328 | * We're re-using a previously allocated mbuf; |
1328 | * be sure to re-init pointers and lengths to | | 1329 | * be sure to re-init pointers and lengths to |
1329 | * default values. | | 1330 | * default values. |
1330 | */ | | 1331 | */ |
1331 | m_new = m; | | 1332 | m_new = m; |
1332 | m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; | | 1333 | m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; |
1333 | m_new->m_data = m_new->m_ext.ext_buf; | | 1334 | m_new->m_data = m_new->m_ext.ext_buf; |
1334 | } | | 1335 | } |
1335 | align = (u_long)m_new->m_data & MVGBE_RXBUF_MASK; | | 1336 | align = (u_long)m_new->m_data & MVGBE_RXBUF_MASK; |
1336 | if (align != 0) { | | 1337 | if (align != 0) { |
1337 | DPRINTFN(1,("align = %d\n", align)); | | 1338 | DPRINTFN(1,("align = %d\n", align)); |
1338 | m_adj(m_new, MVGBE_RXBUF_ALIGN - align); | | 1339 | m_adj(m_new, MVGBE_RXBUF_ALIGN - align); |
1339 | } | | 1340 | } |
1340 | | | 1341 | |
1341 | c = &sc->sc_cdata.mvgbe_rx_chain[i]; | | 1342 | c = &sc->sc_cdata.mvgbe_rx_chain[i]; |
1342 | r = c->mvgbe_desc; | | 1343 | r = c->mvgbe_desc; |
1343 | c->mvgbe_mbuf = m_new; | | 1344 | c->mvgbe_mbuf = m_new; |
1344 | r->bufptr = dmamap->dm_segs[0].ds_addr + | | 1345 | r->bufptr = dmamap->dm_segs[0].ds_addr + |
1345 | (((vaddr_t)m_new->m_data - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf)); | | 1346 | (((vaddr_t)m_new->m_data - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf)); |
1346 | r->bufsize = MVGBE_JLEN & ~MVGBE_RXBUF_MASK; | | 1347 | r->bufsize = MVGBE_JLEN & ~MVGBE_RXBUF_MASK; |
1347 | r->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_ENABLE_INTERRUPT; | | 1348 | r->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_ENABLE_INTERRUPT; |
1348 | | | 1349 | |
1349 | MVGBE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1350 | MVGBE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1350 | | | 1351 | |
1351 | return 0; | | 1352 | return 0; |
1352 | } | | 1353 | } |
1353 | | | 1354 | |
1354 | /* | | 1355 | /* |
1355 | * Memory management for jumbo frames. | | 1356 | * Memory management for jumbo frames. |
1356 | */ | | 1357 | */ |
1357 | | | 1358 | |
1358 | static int | | 1359 | static int |
1359 | mvgbe_alloc_jumbo_mem(struct mvgbe_softc *sc) | | 1360 | mvgbe_alloc_jumbo_mem(struct mvgbe_softc *sc) |
1360 | { | | 1361 | { |
1361 | char *ptr, *kva; | | 1362 | char *ptr, *kva; |
1362 | bus_dma_segment_t seg; | | 1363 | bus_dma_segment_t seg; |
1363 | int i, rseg, state, error; | | 1364 | int i, rseg, state, error; |
1364 | struct mvgbe_jpool_entry *entry; | | 1365 | struct mvgbe_jpool_entry *entry; |
1365 | | | 1366 | |
1366 | state = error = 0; | | 1367 | state = error = 0; |
1367 | | | 1368 | |
1368 | /* Grab a big chunk o' storage. */ | | 1369 | /* Grab a big chunk o' storage. */ |
1369 | if (bus_dmamem_alloc(sc->sc_dmat, MVGBE_JMEM, PAGE_SIZE, 0, | | 1370 | if (bus_dmamem_alloc(sc->sc_dmat, MVGBE_JMEM, PAGE_SIZE, 0, |
1370 | &seg, 1, &rseg, BUS_DMA_NOWAIT)) { | | 1371 | &seg, 1, &rseg, BUS_DMA_NOWAIT)) { |
1371 | aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n"); | | 1372 | aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n"); |
1372 | return ENOBUFS; | | 1373 | return ENOBUFS; |
1373 | } | | 1374 | } |
1374 | | | 1375 | |
1375 | state = 1; | | 1376 | state = 1; |
1376 | if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, MVGBE_JMEM, | | 1377 | if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, MVGBE_JMEM, |
1377 | (void **)&kva, BUS_DMA_NOWAIT)) { | | 1378 | (void **)&kva, BUS_DMA_NOWAIT)) { |
1378 | aprint_error_dev(sc->sc_dev, | | 1379 | aprint_error_dev(sc->sc_dev, |
1379 | "can't map dma buffers (%d bytes)\n", MVGBE_JMEM); | | 1380 | "can't map dma buffers (%d bytes)\n", MVGBE_JMEM); |
1380 | error = ENOBUFS; | | 1381 | error = ENOBUFS; |
1381 | goto out; | | 1382 | goto out; |
1382 | } | | 1383 | } |
1383 | | | 1384 | |
1384 | state = 2; | | 1385 | state = 2; |
1385 | if (bus_dmamap_create(sc->sc_dmat, MVGBE_JMEM, 1, MVGBE_JMEM, 0, | | 1386 | if (bus_dmamap_create(sc->sc_dmat, MVGBE_JMEM, 1, MVGBE_JMEM, 0, |
1386 | BUS_DMA_NOWAIT, &sc->sc_cdata.mvgbe_rx_jumbo_map)) { | | 1387 | BUS_DMA_NOWAIT, &sc->sc_cdata.mvgbe_rx_jumbo_map)) { |
1387 | aprint_error_dev(sc->sc_dev, "can't create dma map\n"); | | 1388 | aprint_error_dev(sc->sc_dev, "can't create dma map\n"); |
1388 | error = ENOBUFS; | | 1389 | error = ENOBUFS; |
1389 | goto out; | | 1390 | goto out; |
1390 | } | | 1391 | } |
1391 | | | 1392 | |
1392 | state = 3; | | 1393 | state = 3; |
1393 | if (bus_dmamap_load(sc->sc_dmat, sc->sc_cdata.mvgbe_rx_jumbo_map, | | 1394 | if (bus_dmamap_load(sc->sc_dmat, sc->sc_cdata.mvgbe_rx_jumbo_map, |
1394 | kva, MVGBE_JMEM, NULL, BUS_DMA_NOWAIT)) { | | 1395 | kva, MVGBE_JMEM, NULL, BUS_DMA_NOWAIT)) { |
1395 | aprint_error_dev(sc->sc_dev, "can't load dma map\n"); | | 1396 | aprint_error_dev(sc->sc_dev, "can't load dma map\n"); |
1396 | error = ENOBUFS; | | 1397 | error = ENOBUFS; |
1397 | goto out; | | 1398 | goto out; |
1398 | } | | 1399 | } |
1399 | | | 1400 | |
1400 | state = 4; | | 1401 | state = 4; |
1401 | sc->sc_cdata.mvgbe_jumbo_buf = (void *)kva; | | 1402 | sc->sc_cdata.mvgbe_jumbo_buf = (void *)kva; |
1402 | DPRINTFN(1,("mvgbe_jumbo_buf = %p\n", sc->sc_cdata.mvgbe_jumbo_buf)); | | 1403 | DPRINTFN(1,("mvgbe_jumbo_buf = %p\n", sc->sc_cdata.mvgbe_jumbo_buf)); |
1403 | | | 1404 | |
1404 | LIST_INIT(&sc->sc_jfree_listhead); | | 1405 | LIST_INIT(&sc->sc_jfree_listhead); |
1405 | LIST_INIT(&sc->sc_jinuse_listhead); | | 1406 | LIST_INIT(&sc->sc_jinuse_listhead); |
1406 | | | 1407 | |
1407 | /* | | 1408 | /* |
1408 | * Now divide it up into 9K pieces and save the addresses | | 1409 | * Now divide it up into 9K pieces and save the addresses |
1409 | * in an array. | | 1410 | * in an array. |
1410 | */ | | 1411 | */ |
1411 | ptr = sc->sc_cdata.mvgbe_jumbo_buf; | | 1412 | ptr = sc->sc_cdata.mvgbe_jumbo_buf; |
1412 | for (i = 0; i < MVGBE_JSLOTS; i++) { | | 1413 | for (i = 0; i < MVGBE_JSLOTS; i++) { |
1413 | sc->sc_cdata.mvgbe_jslots[i] = ptr; | | 1414 | sc->sc_cdata.mvgbe_jslots[i] = ptr; |
1414 | ptr += MVGBE_JLEN; | | 1415 | ptr += MVGBE_JLEN; |
1415 | entry = kmem_alloc(sizeof(struct mvgbe_jpool_entry), KM_SLEEP); | | 1416 | entry = kmem_alloc(sizeof(struct mvgbe_jpool_entry), KM_SLEEP); |
1416 | if (entry == NULL) { | | 1417 | if (entry == NULL) { |
1417 | aprint_error_dev(sc->sc_dev, | | 1418 | aprint_error_dev(sc->sc_dev, |
1418 | "no memory for jumbo buffer queue!\n"); | | 1419 | "no memory for jumbo buffer queue!\n"); |
1419 | error = ENOBUFS; | | 1420 | error = ENOBUFS; |
1420 | goto out; | | 1421 | goto out; |
1421 | } | | 1422 | } |
1422 | entry->slot = i; | | 1423 | entry->slot = i; |
1423 | if (i) | | 1424 | if (i) |
1424 | LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, | | 1425 | LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, |
1425 | jpool_entries); | | 1426 | jpool_entries); |
1426 | else | | 1427 | else |
1427 | LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, | | 1428 | LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, |
1428 | jpool_entries); | | 1429 | jpool_entries); |
1429 | } | | 1430 | } |
1430 | out: | | 1431 | out: |
1431 | if (error != 0) { | | 1432 | if (error != 0) { |
1432 | switch (state) { | | 1433 | switch (state) { |
1433 | case 4: | | 1434 | case 4: |
1434 | bus_dmamap_unload(sc->sc_dmat, | | 1435 | bus_dmamap_unload(sc->sc_dmat, |
1435 | sc->sc_cdata.mvgbe_rx_jumbo_map); | | 1436 | sc->sc_cdata.mvgbe_rx_jumbo_map); |
1436 | case 3: | | 1437 | case 3: |
1437 | bus_dmamap_destroy(sc->sc_dmat, | | 1438 | bus_dmamap_destroy(sc->sc_dmat, |
1438 | sc->sc_cdata.mvgbe_rx_jumbo_map); | | 1439 | sc->sc_cdata.mvgbe_rx_jumbo_map); |
1439 | case 2: | | 1440 | case 2: |
1440 | bus_dmamem_unmap(sc->sc_dmat, kva, MVGBE_JMEM); | | 1441 | bus_dmamem_unmap(sc->sc_dmat, kva, MVGBE_JMEM); |
1441 | case 1: | | 1442 | case 1: |
1442 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); | | 1443 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
1443 | break; | | 1444 | break; |
1444 | default: | | 1445 | default: |
1445 | break; | | 1446 | break; |
1446 | } | | 1447 | } |
1447 | } | | 1448 | } |
1448 | | | 1449 | |
1449 | return error; | | 1450 | return error; |
1450 | } | | 1451 | } |
1451 | | | 1452 | |
1452 | /* | | 1453 | /* |
1453 | * Allocate a jumbo buffer. | | 1454 | * Allocate a jumbo buffer. |
1454 | */ | | 1455 | */ |
1455 | static void * | | 1456 | static void * |
1456 | mvgbe_jalloc(struct mvgbe_softc *sc) | | 1457 | mvgbe_jalloc(struct mvgbe_softc *sc) |
1457 | { | | 1458 | { |
1458 | struct mvgbe_jpool_entry *entry; | | 1459 | struct mvgbe_jpool_entry *entry; |
1459 | | | 1460 | |
1460 | entry = LIST_FIRST(&sc->sc_jfree_listhead); | | 1461 | entry = LIST_FIRST(&sc->sc_jfree_listhead); |
1461 | | | 1462 | |
1462 | if (entry == NULL) | | 1463 | if (entry == NULL) |
1463 | return NULL; | | 1464 | return NULL; |
1464 | | | 1465 | |
1465 | LIST_REMOVE(entry, jpool_entries); | | 1466 | LIST_REMOVE(entry, jpool_entries); |
1466 | LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, jpool_entries); | | 1467 | LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, jpool_entries); |
1467 | return sc->sc_cdata.mvgbe_jslots[entry->slot]; | | 1468 | return sc->sc_cdata.mvgbe_jslots[entry->slot]; |
1468 | } | | 1469 | } |
1469 | | | 1470 | |
1470 | /* | | 1471 | /* |
1471 | * Release a jumbo buffer. | | 1472 | * Release a jumbo buffer. |
1472 | */ | | 1473 | */ |
1473 | static void | | 1474 | static void |
1474 | mvgbe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) | | 1475 | mvgbe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) |
1475 | { | | 1476 | { |
1476 | struct mvgbe_jpool_entry *entry; | | 1477 | struct mvgbe_jpool_entry *entry; |
1477 | struct mvgbe_softc *sc; | | 1478 | struct mvgbe_softc *sc; |
1478 | int i, s; | | 1479 | int i, s; |
1479 | | | 1480 | |
1480 | /* Extract the softc struct pointer. */ | | 1481 | /* Extract the softc struct pointer. */ |
1481 | sc = (struct mvgbe_softc *)arg; | | 1482 | sc = (struct mvgbe_softc *)arg; |
1482 | | | 1483 | |
1483 | if (sc == NULL) | | 1484 | if (sc == NULL) |
1484 | panic("%s: can't find softc pointer!", __func__); | | 1485 | panic("%s: can't find softc pointer!", __func__); |
1485 | | | 1486 | |
1486 | /* calculate the slot this buffer belongs to */ | | 1487 | /* calculate the slot this buffer belongs to */ |
1487 | | | 1488 | |
1488 | i = ((vaddr_t)buf - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf) / MVGBE_JLEN; | | 1489 | i = ((vaddr_t)buf - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf) / MVGBE_JLEN; |
1489 | | | 1490 | |
1490 | if ((i < 0) || (i >= MVGBE_JSLOTS)) | | 1491 | if ((i < 0) || (i >= MVGBE_JSLOTS)) |
1491 | panic("%s: asked to free buffer that we don't manage!", | | 1492 | panic("%s: asked to free buffer that we don't manage!", |
1492 | __func__); | | 1493 | __func__); |
1493 | | | 1494 | |
1494 | s = splvm(); | | 1495 | s = splvm(); |
1495 | entry = LIST_FIRST(&sc->sc_jinuse_listhead); | | 1496 | entry = LIST_FIRST(&sc->sc_jinuse_listhead); |
1496 | if (entry == NULL) | | 1497 | if (entry == NULL) |
1497 | panic("%s: buffer not in use!", __func__); | | 1498 | panic("%s: buffer not in use!", __func__); |
1498 | entry->slot = i; | | 1499 | entry->slot = i; |
1499 | LIST_REMOVE(entry, jpool_entries); | | 1500 | LIST_REMOVE(entry, jpool_entries); |
1500 | LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, jpool_entries); | | 1501 | LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, jpool_entries); |
1501 | | | 1502 | |
1502 | if (__predict_true(m != NULL)) | | 1503 | if (__predict_true(m != NULL)) |
1503 | pool_cache_put(mb_cache, m); | | 1504 | pool_cache_put(mb_cache, m); |
1504 | splx(s); | | 1505 | splx(s); |
1505 | } | | 1506 | } |
1506 | | | 1507 | |
1507 | static int | | 1508 | static int |
1508 | mvgbe_encap(struct mvgbe_softc *sc, struct mbuf *m_head, | | 1509 | mvgbe_encap(struct mvgbe_softc *sc, struct mbuf *m_head, |
1509 | uint32_t *txidx) | | 1510 | uint32_t *txidx) |
1510 | { | | 1511 | { |
1511 | struct mvgbe_tx_desc *f = NULL; | | 1512 | struct mvgbe_tx_desc *f = NULL; |
1512 | struct mvgbe_txmap_entry *entry; | | 1513 | struct mvgbe_txmap_entry *entry; |
1513 | bus_dma_segment_t *txseg; | | 1514 | bus_dma_segment_t *txseg; |
1514 | bus_dmamap_t txmap; | | 1515 | bus_dmamap_t txmap; |
1515 | uint32_t first, current, last, cmdsts = 0; | | 1516 | uint32_t first, current, last, cmdsts = 0; |
1516 | int m_csumflags, i; | | 1517 | int m_csumflags, i; |
1517 | | | 1518 | |
1518 | DPRINTFN(3, ("mvgbe_encap\n")); | | 1519 | DPRINTFN(3, ("mvgbe_encap\n")); |
1519 | | | 1520 | |
1520 | entry = SIMPLEQ_FIRST(&sc->sc_txmap_head); | | 1521 | entry = SIMPLEQ_FIRST(&sc->sc_txmap_head); |
1521 | if (entry == NULL) { | | 1522 | if (entry == NULL) { |
1522 | DPRINTFN(2, ("mvgbe_encap: no txmap available\n")); | | 1523 | DPRINTFN(2, ("mvgbe_encap: no txmap available\n")); |
1523 | return ENOBUFS; | | 1524 | return ENOBUFS; |
1524 | } | | 1525 | } |
1525 | txmap = entry->dmamap; | | 1526 | txmap = entry->dmamap; |
1526 | | | 1527 | |
1527 | first = current = last = *txidx; | | 1528 | first = current = last = *txidx; |
1528 | | | 1529 | |
1529 | /* | | 1530 | /* |
1530 | * Preserve m_pkthdr.csum_flags here since m_head might be | | 1531 | * Preserve m_pkthdr.csum_flags here since m_head might be |
1531 | * updated by m_defrag() | | 1532 | * updated by m_defrag() |
1532 | */ | | 1533 | */ |
1533 | m_csumflags = m_head->m_pkthdr.csum_flags; | | 1534 | m_csumflags = m_head->m_pkthdr.csum_flags; |
1534 | | | 1535 | |
1535 | /* | | 1536 | /* |
1536 | * Start packing the mbufs in this chain into | | 1537 | * Start packing the mbufs in this chain into |
1537 | * the fragment pointers. Stop when we run out | | 1538 | * the fragment pointers. Stop when we run out |
1538 | * of fragments or hit the end of the mbuf chain. | | 1539 | * of fragments or hit the end of the mbuf chain. |
1539 | */ | | 1540 | */ |
1540 | if (bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m_head, BUS_DMA_NOWAIT)) { | | 1541 | if (bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m_head, BUS_DMA_NOWAIT)) { |
1541 | DPRINTFN(1, ("mvgbe_encap: dmamap failed\n")); | | 1542 | DPRINTFN(1, ("mvgbe_encap: dmamap failed\n")); |
1542 | return ENOBUFS; | | 1543 | return ENOBUFS; |
1543 | } | | 1544 | } |
1544 | | | 1545 | |
1545 | /* Sync the DMA map. */ | | 1546 | /* Sync the DMA map. */ |
1546 | bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize, | | 1547 | bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize, |
1547 | BUS_DMASYNC_PREWRITE); | | 1548 | BUS_DMASYNC_PREWRITE); |
1548 | | | 1549 | |
1549 | if (sc->sc_cdata.mvgbe_tx_cnt + txmap->dm_nsegs >= | | 1550 | if (sc->sc_cdata.mvgbe_tx_cnt + txmap->dm_nsegs >= |
1550 | MVGBE_TX_RING_CNT) { | | 1551 | MVGBE_TX_RING_CNT) { |
1551 | DPRINTFN(2, ("mvgbe_encap: too few descriptors free\n")); | | 1552 | DPRINTFN(2, ("mvgbe_encap: too few descriptors free\n")); |
1552 | bus_dmamap_unload(sc->sc_dmat, txmap); | | 1553 | bus_dmamap_unload(sc->sc_dmat, txmap); |
1553 | return ENOBUFS; | | 1554 | return ENOBUFS; |
1554 | } | | 1555 | } |
1555 | | | 1556 | |
1556 | txseg = txmap->dm_segs; | | 1557 | txseg = txmap->dm_segs; |
1557 | | | 1558 | |
1558 | DPRINTFN(2, ("mvgbe_encap: dm_nsegs=%d\n", txmap->dm_nsegs)); | | 1559 | DPRINTFN(2, ("mvgbe_encap: dm_nsegs=%d\n", txmap->dm_nsegs)); |
1559 | | | 1560 | |
1560 | for (i = 0; i < txmap->dm_nsegs; i++) { | | 1561 | for (i = 0; i < txmap->dm_nsegs; i++) { |
1561 | f = &sc->sc_rdata->mvgbe_tx_ring[current]; | | 1562 | f = &sc->sc_rdata->mvgbe_tx_ring[current]; |
1562 | f->bufptr = txseg[i].ds_addr; | | 1563 | f->bufptr = txseg[i].ds_addr; |
1563 | f->bytecnt = txseg[i].ds_len; | | 1564 | f->bytecnt = txseg[i].ds_len; |
1564 | f->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA; | | 1565 | f->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA; |
1565 | last = current; | | 1566 | last = current; |
1566 | current = MVGBE_TX_RING_NEXT(current); | | 1567 | current = MVGBE_TX_RING_NEXT(current); |
1567 | } | | 1568 | } |
1568 | | | 1569 | |
1569 | if (m_csumflags & M_CSUM_IPv4) | | 1570 | if (m_csumflags & M_CSUM_IPv4) |
1570 | cmdsts |= MVGBE_TX_GENERATE_IP_CHKSUM; | | 1571 | cmdsts |= MVGBE_TX_GENERATE_IP_CHKSUM; |
1571 | if (m_csumflags & M_CSUM_TCPv4) | | 1572 | if (m_csumflags & M_CSUM_TCPv4) |
1572 | cmdsts |= | | 1573 | cmdsts |= |
1573 | MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_TCP; | | 1574 | MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_TCP; |
1574 | if (m_csumflags & M_CSUM_UDPv4) | | 1575 | if (m_csumflags & M_CSUM_UDPv4) |
1575 | cmdsts |= | | 1576 | cmdsts |= |
1576 | MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_UDP; | | 1577 | MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_UDP; |
1577 | if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { | | 1578 | if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { |
1578 | const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t); | | 1579 | const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t); |
1579 | | | 1580 | |
1580 | cmdsts |= MVGBE_TX_IP_NO_FRAG | | | 1581 | cmdsts |= MVGBE_TX_IP_NO_FRAG | |
1581 | MVGBE_TX_IP_HEADER_LEN(iphdr_unitlen); /* unit is 4B */ | | 1582 | MVGBE_TX_IP_HEADER_LEN(iphdr_unitlen); /* unit is 4B */ |
1582 | } | | 1583 | } |
1583 | if (txmap->dm_nsegs == 1) | | 1584 | if (txmap->dm_nsegs == 1) |
1584 | f->cmdsts = cmdsts | | | 1585 | f->cmdsts = cmdsts | |
1585 | MVGBE_BUFFER_OWNED_BY_DMA | | | 1586 | MVGBE_BUFFER_OWNED_BY_DMA | |
1586 | MVGBE_TX_GENERATE_CRC | | | 1587 | MVGBE_TX_GENERATE_CRC | |
1587 | MVGBE_TX_ENABLE_INTERRUPT | | | 1588 | MVGBE_TX_ENABLE_INTERRUPT | |
1588 | MVGBE_TX_ZERO_PADDING | | | 1589 | MVGBE_TX_ZERO_PADDING | |
1589 | MVGBE_TX_FIRST_DESC | | | 1590 | MVGBE_TX_FIRST_DESC | |
1590 | MVGBE_TX_LAST_DESC; | | 1591 | MVGBE_TX_LAST_DESC; |
1591 | else { | | 1592 | else { |
1592 | f = &sc->sc_rdata->mvgbe_tx_ring[first]; | | 1593 | f = &sc->sc_rdata->mvgbe_tx_ring[first]; |
1593 | f->cmdsts = cmdsts | | | 1594 | f->cmdsts = cmdsts | |
1594 | MVGBE_BUFFER_OWNED_BY_DMA | | | 1595 | MVGBE_BUFFER_OWNED_BY_DMA | |
1595 | MVGBE_TX_GENERATE_CRC | | | 1596 | MVGBE_TX_GENERATE_CRC | |
1596 | MVGBE_TX_FIRST_DESC; | | 1597 | MVGBE_TX_FIRST_DESC; |
1597 | | | 1598 | |
1598 | f = &sc->sc_rdata->mvgbe_tx_ring[last]; | | 1599 | f = &sc->sc_rdata->mvgbe_tx_ring[last]; |
1599 | f->cmdsts = | | 1600 | f->cmdsts = |
1600 | MVGBE_BUFFER_OWNED_BY_DMA | | | 1601 | MVGBE_BUFFER_OWNED_BY_DMA | |
1601 | MVGBE_TX_ENABLE_INTERRUPT | | | 1602 | MVGBE_TX_ENABLE_INTERRUPT | |
1602 | MVGBE_TX_ZERO_PADDING | | | 1603 | MVGBE_TX_ZERO_PADDING | |
1603 | MVGBE_TX_LAST_DESC; | | 1604 | MVGBE_TX_LAST_DESC; |
1604 | } | | 1605 | } |
1605 | | | 1606 | |
1606 | sc->sc_cdata.mvgbe_tx_chain[last].mvgbe_mbuf = m_head; | | 1607 | sc->sc_cdata.mvgbe_tx_chain[last].mvgbe_mbuf = m_head; |
1607 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); | | 1608 | SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); |
1608 | sc->sc_cdata.mvgbe_tx_map[last] = entry; | | 1609 | sc->sc_cdata.mvgbe_tx_map[last] = entry; |
1609 | | | 1610 | |
1610 | /* Sync descriptors before handing to chip */ | | 1611 | /* Sync descriptors before handing to chip */ |
1611 | MVGBE_CDTXSYNC(sc, *txidx, txmap->dm_nsegs, | | 1612 | MVGBE_CDTXSYNC(sc, *txidx, txmap->dm_nsegs, |
1612 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1613 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1613 | | | 1614 | |
1614 | sc->sc_cdata.mvgbe_tx_cnt += i; | | 1615 | sc->sc_cdata.mvgbe_tx_cnt += i; |
1615 | *txidx = current; | | 1616 | *txidx = current; |
1616 | | | 1617 | |
1617 | DPRINTFN(3, ("mvgbe_encap: completed successfully\n")); | | 1618 | DPRINTFN(3, ("mvgbe_encap: completed successfully\n")); |
1618 | | | 1619 | |
1619 | return 0; | | 1620 | return 0; |
1620 | } | | 1621 | } |
1621 | | | 1622 | |
1622 | static void | | 1623 | static void |
1623 | mvgbe_rxeof(struct mvgbe_softc *sc) | | 1624 | mvgbe_rxeof(struct mvgbe_softc *sc) |
1624 | { | | 1625 | { |
1625 | struct mvgbe_chain_data *cdata = &sc->sc_cdata; | | 1626 | struct mvgbe_chain_data *cdata = &sc->sc_cdata; |
1626 | struct mvgbe_rx_desc *cur_rx; | | 1627 | struct mvgbe_rx_desc *cur_rx; |
1627 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1628 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1628 | struct mbuf *m; | | 1629 | struct mbuf *m; |
1629 | bus_dmamap_t dmamap; | | 1630 | bus_dmamap_t dmamap; |
1630 | uint32_t rxstat; | | 1631 | uint32_t rxstat; |
1631 | int idx, cur, total_len; | | 1632 | int idx, cur, total_len; |
1632 | | | 1633 | |
1633 | idx = sc->sc_cdata.mvgbe_rx_prod; | | 1634 | idx = sc->sc_cdata.mvgbe_rx_prod; |
1634 | | | 1635 | |
1635 | DPRINTFN(3, ("mvgbe_rxeof %d\n", idx)); | | 1636 | DPRINTFN(3, ("mvgbe_rxeof %d\n", idx)); |
1636 | | | 1637 | |
1637 | for (;;) { | | 1638 | for (;;) { |
1638 | cur = idx; | | 1639 | cur = idx; |
1639 | | | 1640 | |
1640 | /* Sync the descriptor */ | | 1641 | /* Sync the descriptor */ |
1641 | MVGBE_CDRXSYNC(sc, idx, | | 1642 | MVGBE_CDRXSYNC(sc, idx, |
1642 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1643 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1643 | | | 1644 | |
1644 | cur_rx = &sc->sc_rdata->mvgbe_rx_ring[idx]; | | 1645 | cur_rx = &sc->sc_rdata->mvgbe_rx_ring[idx]; |
1645 | | | 1646 | |
1646 | if ((cur_rx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == | | 1647 | if ((cur_rx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == |
1647 | MVGBE_BUFFER_OWNED_BY_DMA) { | | 1648 | MVGBE_BUFFER_OWNED_BY_DMA) { |
1648 | /* Invalidate the descriptor -- it's not ready yet */ | | 1649 | /* Invalidate the descriptor -- it's not ready yet */ |
1649 | MVGBE_CDRXSYNC(sc, idx, BUS_DMASYNC_PREREAD); | | 1650 | MVGBE_CDRXSYNC(sc, idx, BUS_DMASYNC_PREREAD); |
1650 | sc->sc_cdata.mvgbe_rx_prod = idx; | | 1651 | sc->sc_cdata.mvgbe_rx_prod = idx; |
1651 | break; | | 1652 | break; |
1652 | } | | 1653 | } |
1653 | #ifdef DIAGNOSTIC | | 1654 | #ifdef DIAGNOSTIC |
1654 | if ((cur_rx->cmdsts & | | 1655 | if ((cur_rx->cmdsts & |
1655 | (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) != | | 1656 | (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) != |
1656 | (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) | | 1657 | (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) |
1657 | panic( | | 1658 | panic( |
1658 | "mvgbe_rxeof: buffer size is smaller than packet"); | | 1659 | "mvgbe_rxeof: buffer size is smaller than packet"); |
1659 | #endif | | 1660 | #endif |
1660 | | | 1661 | |
1661 | dmamap = sc->sc_cdata.mvgbe_rx_jumbo_map; | | 1662 | dmamap = sc->sc_cdata.mvgbe_rx_jumbo_map; |
1662 | | | 1663 | |
1663 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, | | 1664 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
1664 | BUS_DMASYNC_POSTREAD); | | 1665 | BUS_DMASYNC_POSTREAD); |
1665 | | | 1666 | |
1666 | m = cdata->mvgbe_rx_chain[idx].mvgbe_mbuf; | | 1667 | m = cdata->mvgbe_rx_chain[idx].mvgbe_mbuf; |
1667 | cdata->mvgbe_rx_chain[idx].mvgbe_mbuf = NULL; | | 1668 | cdata->mvgbe_rx_chain[idx].mvgbe_mbuf = NULL; |
1668 | total_len = cur_rx->bytecnt; | | 1669 | total_len = cur_rx->bytecnt; |
1669 | rxstat = cur_rx->cmdsts; | | 1670 | rxstat = cur_rx->cmdsts; |
1670 | | | 1671 | |
1671 | cdata->mvgbe_rx_map[idx] = NULL; | | 1672 | cdata->mvgbe_rx_map[idx] = NULL; |
1672 | | | 1673 | |
1673 | idx = MVGBE_RX_RING_NEXT(idx); | | 1674 | idx = MVGBE_RX_RING_NEXT(idx); |
1674 | | | 1675 | |
1675 | if (rxstat & MVGBE_ERROR_SUMMARY) { | | 1676 | if (rxstat & MVGBE_ERROR_SUMMARY) { |
1676 | #if 0 | | 1677 | #if 0 |
1677 | int err = rxstat & MVGBE_RX_ERROR_CODE_MASK; | | 1678 | int err = rxstat & MVGBE_RX_ERROR_CODE_MASK; |
1678 | | | 1679 | |
1679 | if (err == MVGBE_RX_CRC_ERROR) | | 1680 | if (err == MVGBE_RX_CRC_ERROR) |
1680 | ifp->if_ierrors++; | | 1681 | ifp->if_ierrors++; |
1681 | if (err == MVGBE_RX_OVERRUN_ERROR) | | 1682 | if (err == MVGBE_RX_OVERRUN_ERROR) |
1682 | ifp->if_ierrors++; | | 1683 | ifp->if_ierrors++; |
1683 | if (err == MVGBE_RX_MAX_FRAME_LEN_ERROR) | | 1684 | if (err == MVGBE_RX_MAX_FRAME_LEN_ERROR) |
1684 | ifp->if_ierrors++; | | 1685 | ifp->if_ierrors++; |
1685 | if (err == MVGBE_RX_RESOURCE_ERROR) | | 1686 | if (err == MVGBE_RX_RESOURCE_ERROR) |
1686 | ifp->if_ierrors++; | | 1687 | ifp->if_ierrors++; |
1687 | #else | | 1688 | #else |
1688 | ifp->if_ierrors++; | | 1689 | ifp->if_ierrors++; |
1689 | #endif | | 1690 | #endif |
1690 | mvgbe_newbuf(sc, cur, m, dmamap); | | 1691 | mvgbe_newbuf(sc, cur, m, dmamap); |
1691 | continue; | | 1692 | continue; |
1692 | } | | 1693 | } |
1693 | | | 1694 | |
1694 | if (total_len <= MVGBE_RX_CSUM_MIN_BYTE) /* XXX documented? */ | | 1695 | if (total_len <= MVGBE_RX_CSUM_MIN_BYTE) /* XXX documented? */ |
1695 | goto sw_csum; | | 1696 | goto sw_csum; |
1696 | | | 1697 | |
1697 | if (rxstat & MVGBE_RX_IP_FRAME_TYPE) { | | 1698 | if (rxstat & MVGBE_RX_IP_FRAME_TYPE) { |
1698 | /* Check IPv4 header checksum */ | | 1699 | /* Check IPv4 header checksum */ |
1699 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; | | 1700 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; |
1700 | if (!(rxstat & MVGBE_RX_IP_HEADER_OK)) | | 1701 | if (!(rxstat & MVGBE_RX_IP_HEADER_OK)) |
1701 | m->m_pkthdr.csum_flags |= | | 1702 | m->m_pkthdr.csum_flags |= |
1702 | M_CSUM_IPv4_BAD; | | 1703 | M_CSUM_IPv4_BAD; |
1703 | /* Check TCPv4/UDPv4 checksum */ | | 1704 | /* Check TCPv4/UDPv4 checksum */ |
1704 | if ((rxstat & MVGBE_RX_L4_TYPE_MASK) == | | 1705 | if ((rxstat & MVGBE_RX_L4_TYPE_MASK) == |
1705 | MVGBE_RX_L4_TYPE_TCP) | | 1706 | MVGBE_RX_L4_TYPE_TCP) |
1706 | m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; | | 1707 | m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; |
1707 | else if ((rxstat & MVGBE_RX_L4_TYPE_MASK) == | | 1708 | else if ((rxstat & MVGBE_RX_L4_TYPE_MASK) == |
1708 | MVGBE_RX_L4_TYPE_UDP) | | 1709 | MVGBE_RX_L4_TYPE_UDP) |
1709 | m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; | | 1710 | m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; |
1710 | if (!(rxstat & MVGBE_RX_L4_CHECKSUM)) | | 1711 | if (!(rxstat & MVGBE_RX_L4_CHECKSUM)) |
1711 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; | | 1712 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; |
1712 | } | | 1713 | } |
1713 | sw_csum: | | 1714 | sw_csum: |
1714 | | | 1715 | |
1715 | /* | | 1716 | /* |
1716 | * Try to allocate a new jumbo buffer. If that | | 1717 | * Try to allocate a new jumbo buffer. If that |
1717 | * fails, copy the packet to mbufs and put the | | 1718 | * fails, copy the packet to mbufs and put the |
1718 | * jumbo buffer back in the ring so it can be | | 1719 | * jumbo buffer back in the ring so it can be |
1719 | * re-used. If allocating mbufs fails, then we | | 1720 | * re-used. If allocating mbufs fails, then we |
1720 | * have to drop the packet. | | 1721 | * have to drop the packet. |
1721 | */ | | 1722 | */ |
1722 | if (mvgbe_newbuf(sc, cur, NULL, dmamap) == ENOBUFS) { | | 1723 | if (mvgbe_newbuf(sc, cur, NULL, dmamap) == ENOBUFS) { |
1723 | struct mbuf *m0; | | 1724 | struct mbuf *m0; |
1724 | | | 1725 | |
1725 | m0 = m_devget(mtod(m, char *), total_len, 0, ifp, NULL); | | 1726 | m0 = m_devget(mtod(m, char *), total_len, 0, ifp, NULL); |
1726 | mvgbe_newbuf(sc, cur, m, dmamap); | | 1727 | mvgbe_newbuf(sc, cur, m, dmamap); |
1727 | if (m0 == NULL) { | | 1728 | if (m0 == NULL) { |
1728 | aprint_error_ifnet(ifp, | | 1729 | aprint_error_ifnet(ifp, |
1729 | "no receive buffers available --" | | 1730 | "no receive buffers available --" |
1730 | " packet dropped!\n"); | | 1731 | " packet dropped!\n"); |
1731 | ifp->if_ierrors++; | | 1732 | ifp->if_ierrors++; |
1732 | continue; | | 1733 | continue; |
1733 | } | | 1734 | } |
1734 | m = m0; | | 1735 | m = m0; |
1735 | } else { | | 1736 | } else { |
1736 | m->m_pkthdr.rcvif = ifp; | | 1737 | m->m_pkthdr.rcvif = ifp; |
1737 | m->m_pkthdr.len = m->m_len = total_len; | | 1738 | m->m_pkthdr.len = m->m_len = total_len; |
1738 | } | | 1739 | } |
1739 | | | 1740 | |
1740 | /* Skip on first 2byte (HW header) */ | | 1741 | /* Skip on first 2byte (HW header) */ |
1741 | m_adj(m, MVGBE_HWHEADER_SIZE); | | 1742 | m_adj(m, MVGBE_HWHEADER_SIZE); |
1742 | m->m_flags |= M_HASFCS; | | 1743 | m->m_flags |= M_HASFCS; |
1743 | | | 1744 | |
1744 | ifp->if_ipackets++; | | 1745 | ifp->if_ipackets++; |
1745 | | | 1746 | |
1746 | bpf_mtap(ifp, m); | | 1747 | bpf_mtap(ifp, m); |
1747 | | | 1748 | |
1748 | /* pass it on. */ | | 1749 | /* pass it on. */ |
1749 | (*ifp->if_input)(ifp, m); | | 1750 | (*ifp->if_input)(ifp, m); |
1750 | } | | 1751 | } |
1751 | } | | 1752 | } |
1752 | | | 1753 | |
1753 | static void | | 1754 | static void |
1754 | mvgbe_txeof(struct mvgbe_softc *sc) | | 1755 | mvgbe_txeof(struct mvgbe_softc *sc) |
1755 | { | | 1756 | { |
1756 | struct mvgbe_chain_data *cdata = &sc->sc_cdata; | | 1757 | struct mvgbe_chain_data *cdata = &sc->sc_cdata; |
1757 | struct mvgbe_tx_desc *cur_tx; | | 1758 | struct mvgbe_tx_desc *cur_tx; |
1758 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1759 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1759 | struct mvgbe_txmap_entry *entry; | | 1760 | struct mvgbe_txmap_entry *entry; |
1760 | int idx; | | 1761 | int idx; |
1761 | | | 1762 | |
1762 | DPRINTFN(3, ("mvgbe_txeof\n")); | | 1763 | DPRINTFN(3, ("mvgbe_txeof\n")); |
1763 | | | 1764 | |
1764 | /* | | 1765 | /* |
1765 | * Go through our tx ring and free mbufs for those | | 1766 | * Go through our tx ring and free mbufs for those |
1766 | * frames that have been sent. | | 1767 | * frames that have been sent. |
1767 | */ | | 1768 | */ |
1768 | idx = cdata->mvgbe_tx_cons; | | 1769 | idx = cdata->mvgbe_tx_cons; |
1769 | while (idx != cdata->mvgbe_tx_prod) { | | 1770 | while (idx != cdata->mvgbe_tx_prod) { |
1770 | MVGBE_CDTXSYNC(sc, idx, 1, | | 1771 | MVGBE_CDTXSYNC(sc, idx, 1, |
1771 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1772 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1772 | | | 1773 | |
1773 | cur_tx = &sc->sc_rdata->mvgbe_tx_ring[idx]; | | 1774 | cur_tx = &sc->sc_rdata->mvgbe_tx_ring[idx]; |
1774 | #ifdef MVGBE_DEBUG | | 1775 | #ifdef MVGBE_DEBUG |
1775 | if (mvgbe_debug >= 3) | | 1776 | if (mvgbe_debug >= 3) |
1776 | mvgbe_dump_txdesc(cur_tx, idx); | | 1777 | mvgbe_dump_txdesc(cur_tx, idx); |
1777 | #endif | | 1778 | #endif |
1778 | if ((cur_tx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == | | 1779 | if ((cur_tx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == |
1779 | MVGBE_BUFFER_OWNED_BY_DMA) { | | 1780 | MVGBE_BUFFER_OWNED_BY_DMA) { |
1780 | MVGBE_CDTXSYNC(sc, idx, 1, BUS_DMASYNC_PREREAD); | | 1781 | MVGBE_CDTXSYNC(sc, idx, 1, BUS_DMASYNC_PREREAD); |
1781 | break; | | 1782 | break; |
1782 | } | | 1783 | } |
1783 | if (cur_tx->cmdsts & MVGBE_TX_LAST_DESC) | | 1784 | if (cur_tx->cmdsts & MVGBE_TX_LAST_DESC) |
1784 | ifp->if_opackets++; | | 1785 | ifp->if_opackets++; |
1785 | if (cur_tx->cmdsts & MVGBE_ERROR_SUMMARY) { | | 1786 | if (cur_tx->cmdsts & MVGBE_ERROR_SUMMARY) { |
1786 | int err = cur_tx->cmdsts & MVGBE_TX_ERROR_CODE_MASK; | | 1787 | int err = cur_tx->cmdsts & MVGBE_TX_ERROR_CODE_MASK; |
1787 | | | 1788 | |
1788 | if (err == MVGBE_TX_LATE_COLLISION_ERROR) | | 1789 | if (err == MVGBE_TX_LATE_COLLISION_ERROR) |
1789 | ifp->if_collisions++; | | 1790 | ifp->if_collisions++; |
1790 | if (err == MVGBE_TX_UNDERRUN_ERROR) | | 1791 | if (err == MVGBE_TX_UNDERRUN_ERROR) |
1791 | ifp->if_oerrors++; | | 1792 | ifp->if_oerrors++; |
1792 | if (err == MVGBE_TX_EXCESSIVE_COLLISION_ERRO) | | 1793 | if (err == MVGBE_TX_EXCESSIVE_COLLISION_ERRO) |
1793 | ifp->if_collisions++; | | 1794 | ifp->if_collisions++; |
1794 | } | | 1795 | } |
1795 | if (cdata->mvgbe_tx_chain[idx].mvgbe_mbuf != NULL) { | | 1796 | if (cdata->mvgbe_tx_chain[idx].mvgbe_mbuf != NULL) { |
1796 | entry = cdata->mvgbe_tx_map[idx]; | | 1797 | entry = cdata->mvgbe_tx_map[idx]; |
1797 | | | 1798 | |
1798 | m_freem(cdata->mvgbe_tx_chain[idx].mvgbe_mbuf); | | 1799 | m_freem(cdata->mvgbe_tx_chain[idx].mvgbe_mbuf); |
1799 | cdata->mvgbe_tx_chain[idx].mvgbe_mbuf = NULL; | | 1800 | cdata->mvgbe_tx_chain[idx].mvgbe_mbuf = NULL; |
1800 | | | 1801 | |
1801 | bus_dmamap_sync(sc->sc_dmat, entry->dmamap, 0, | | 1802 | bus_dmamap_sync(sc->sc_dmat, entry->dmamap, 0, |
1802 | entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); | | 1803 | entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
1803 | | | 1804 | |
1804 | bus_dmamap_unload(sc->sc_dmat, entry->dmamap); | | 1805 | bus_dmamap_unload(sc->sc_dmat, entry->dmamap); |
1805 | SIMPLEQ_INSERT_TAIL(&sc->sc_txmap_head, entry, link); | | 1806 | SIMPLEQ_INSERT_TAIL(&sc->sc_txmap_head, entry, link); |
1806 | cdata->mvgbe_tx_map[idx] = NULL; | | 1807 | cdata->mvgbe_tx_map[idx] = NULL; |
1807 | } | | 1808 | } |
1808 | cdata->mvgbe_tx_cnt--; | | 1809 | cdata->mvgbe_tx_cnt--; |
1809 | idx = MVGBE_TX_RING_NEXT(idx); | | 1810 | idx = MVGBE_TX_RING_NEXT(idx); |
1810 | } | | 1811 | } |
1811 | if (cdata->mvgbe_tx_cnt == 0) | | 1812 | if (cdata->mvgbe_tx_cnt == 0) |
1812 | ifp->if_timer = 0; | | 1813 | ifp->if_timer = 0; |
1813 | | | 1814 | |
1814 | if (cdata->mvgbe_tx_cnt < MVGBE_TX_RING_CNT - 2) | | 1815 | if (cdata->mvgbe_tx_cnt < MVGBE_TX_RING_CNT - 2) |
1815 | ifp->if_flags &= ~IFF_OACTIVE; | | 1816 | ifp->if_flags &= ~IFF_OACTIVE; |
1816 | | | 1817 | |
1817 | cdata->mvgbe_tx_cons = idx; | | 1818 | cdata->mvgbe_tx_cons = idx; |
1818 | } | | 1819 | } |
1819 | | | 1820 | |
1820 | static uint8_t | | 1821 | static uint8_t |
1821 | mvgbe_crc8(const uint8_t *data, size_t size) | | 1822 | mvgbe_crc8(const uint8_t *data, size_t size) |
1822 | { | | 1823 | { |
1823 | int bit; | | 1824 | int bit; |
1824 | uint8_t byte; | | 1825 | uint8_t byte; |
1825 | uint8_t crc = 0; | | 1826 | uint8_t crc = 0; |
1826 | const uint8_t poly = 0x07; | | 1827 | const uint8_t poly = 0x07; |
1827 | | | 1828 | |
1828 | while(size--) | | 1829 | while(size--) |
1829 | for (byte = *data++, bit = NBBY-1; bit >= 0; bit--) | | 1830 | for (byte = *data++, bit = NBBY-1; bit >= 0; bit--) |
1830 | crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0); | | 1831 | crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0); |
1831 | | | 1832 | |
1832 | return crc; | | 1833 | return crc; |
1833 | } | | 1834 | } |
1834 | | | 1835 | |
1835 | CTASSERT(MVGBE_NDFSMT == MVGBE_NDFOMT); | | 1836 | CTASSERT(MVGBE_NDFSMT == MVGBE_NDFOMT); |
1836 | | | 1837 | |
1837 | static void | | 1838 | static void |
1838 | mvgbe_filter_setup(struct mvgbe_softc *sc) | | 1839 | mvgbe_filter_setup(struct mvgbe_softc *sc) |
1839 | { | | 1840 | { |
1840 | struct ethercom *ec = &sc->sc_ethercom; | | 1841 | struct ethercom *ec = &sc->sc_ethercom; |
1841 | struct ifnet *ifp= &sc->sc_ethercom.ec_if; | | 1842 | struct ifnet *ifp= &sc->sc_ethercom.ec_if; |
1842 | struct ether_multi *enm; | | 1843 | struct ether_multi *enm; |
1843 | struct ether_multistep step; | | 1844 | struct ether_multistep step; |
1844 | uint32_t *dfut, *dfsmt, *dfomt; | | 1845 | uint32_t *dfut, *dfsmt, *dfomt; |
1845 | uint32_t pxc; | | 1846 | uint32_t pxc; |
1846 | int i; | | 1847 | int i; |
1847 | const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00}; | | 1848 | const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00}; |
1848 | | | 1849 | |
1849 | dfut = kmem_zalloc(sizeof(*dfut) * MVGBE_NDFUT, KM_SLEEP); | | 1850 | dfut = kmem_zalloc(sizeof(*dfut) * MVGBE_NDFUT, KM_SLEEP); |
1850 | dfsmt = kmem_zalloc(sizeof(*dfsmt) * MVGBE_NDFSMT, KM_SLEEP); | | 1851 | dfsmt = kmem_zalloc(sizeof(*dfsmt) * MVGBE_NDFSMT, KM_SLEEP); |
1851 | dfomt = kmem_zalloc(sizeof(*dfomt) * MVGBE_NDFOMT, KM_SLEEP); | | 1852 | dfomt = kmem_zalloc(sizeof(*dfomt) * MVGBE_NDFOMT, KM_SLEEP); |
1852 | | | 1853 | |
1853 | if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { | | 1854 | if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { |
1854 | goto allmulti; | | 1855 | goto allmulti; |
1855 | } | | 1856 | } |
1856 | | | 1857 | |
1857 | ETHER_FIRST_MULTI(step, ec, enm); | | 1858 | ETHER_FIRST_MULTI(step, ec, enm); |
1858 | while (enm != NULL) { | | 1859 | while (enm != NULL) { |
1859 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { | | 1860 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
1860 | /* ranges are complex and somewhat rare */ | | 1861 | /* ranges are complex and somewhat rare */ |
1861 | goto allmulti; | | 1862 | goto allmulti; |
1862 | } | | 1863 | } |
1863 | /* chip handles some IPv4 multicast specially */ | | 1864 | /* chip handles some IPv4 multicast specially */ |
1864 | if (memcmp(enm->enm_addrlo, special, 5) == 0) { | | 1865 | if (memcmp(enm->enm_addrlo, special, 5) == 0) { |
1865 | i = enm->enm_addrlo[5]; | | 1866 | i = enm->enm_addrlo[5]; |
1866 | dfsmt[i>>2] = | | 1867 | dfsmt[i>>2] = |
1867 | MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); | | 1868 | MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); |
1868 | } else { | | 1869 | } else { |
1869 | i = mvgbe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN); | | 1870 | i = mvgbe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN); |
1870 | dfomt[i>>2] = | | 1871 | dfomt[i>>2] = |
1871 | MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); | | 1872 | MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); |
1872 | } | | 1873 | } |
1873 | | | 1874 | |
1874 | ETHER_NEXT_MULTI(step, enm); | | 1875 | ETHER_NEXT_MULTI(step, enm); |
1875 | } | | 1876 | } |
1876 | goto set; | | 1877 | goto set; |
1877 | | | 1878 | |
1878 | allmulti: | | 1879 | allmulti: |
1879 | if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { | | 1880 | if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { |
1880 | for (i = 0; i < MVGBE_NDFSMT; i++) { | | 1881 | for (i = 0; i < MVGBE_NDFSMT; i++) { |
1881 | dfsmt[i] = dfomt[i] = | | 1882 | dfsmt[i] = dfomt[i] = |
1882 | MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | | | 1883 | MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | |
1883 | MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | | | 1884 | MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | |
1884 | MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | | | 1885 | MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | |
1885 | MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); | | 1886 | MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); |
1886 | } | | 1887 | } |
1887 | } | | 1888 | } |
1888 | | | 1889 | |
1889 | set: | | 1890 | set: |
1890 | pxc = MVGBE_READ(sc, MVGBE_PXC); | | 1891 | pxc = MVGBE_READ(sc, MVGBE_PXC); |
1891 | pxc &= ~MVGBE_PXC_UPM; | | 1892 | pxc &= ~MVGBE_PXC_UPM; |
1892 | pxc |= MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP; | | 1893 | pxc |= MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP; |
1893 | if (ifp->if_flags & IFF_BROADCAST) { | | 1894 | if (ifp->if_flags & IFF_BROADCAST) { |
1894 | pxc &= ~(MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP); | | 1895 | pxc &= ~(MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP); |
1895 | } | | 1896 | } |
1896 | if (ifp->if_flags & IFF_PROMISC) { | | 1897 | if (ifp->if_flags & IFF_PROMISC) { |
1897 | pxc |= MVGBE_PXC_UPM; | | 1898 | pxc |= MVGBE_PXC_UPM; |
1898 | } | | 1899 | } |
1899 | MVGBE_WRITE(sc, MVGBE_PXC, pxc); | | 1900 | MVGBE_WRITE(sc, MVGBE_PXC, pxc); |
1900 | | | 1901 | |
1901 | /* Set Destination Address Filter Unicast Table */ | | 1902 | /* Set Destination Address Filter Unicast Table */ |
1902 | i = sc->sc_enaddr[5] & 0xf; /* last nibble */ | | 1903 | i = sc->sc_enaddr[5] & 0xf; /* last nibble */ |
1903 | dfut[i>>2] = MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); | | 1904 | dfut[i>>2] = MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); |
1904 | MVGBE_WRITE_FILTER(sc, MVGBE_DFUT, dfut, MVGBE_NDFUT); | | 1905 | MVGBE_WRITE_FILTER(sc, MVGBE_DFUT, dfut, MVGBE_NDFUT); |
1905 | | | 1906 | |
1906 | /* Set Destination Address Filter Multicast Tables */ | | 1907 | /* Set Destination Address Filter Multicast Tables */ |
1907 | MVGBE_WRITE_FILTER(sc, MVGBE_DFSMT, dfsmt, MVGBE_NDFSMT); | | 1908 | MVGBE_WRITE_FILTER(sc, MVGBE_DFSMT, dfsmt, MVGBE_NDFSMT); |
1908 | MVGBE_WRITE_FILTER(sc, MVGBE_DFOMT, dfomt, MVGBE_NDFOMT); | | 1909 | MVGBE_WRITE_FILTER(sc, MVGBE_DFOMT, dfomt, MVGBE_NDFOMT); |
1909 | | | 1910 | |
1910 | kmem_free(dfut, sizeof(dfut[0]) * MVGBE_NDFUT); | | 1911 | kmem_free(dfut, sizeof(dfut[0]) * MVGBE_NDFUT); |
1911 | kmem_free(dfsmt, sizeof(dfsmt[0]) * MVGBE_NDFSMT); | | 1912 | kmem_free(dfsmt, sizeof(dfsmt[0]) * MVGBE_NDFSMT); |
1912 | kmem_free(dfomt, sizeof(dfsmt[0]) * MVGBE_NDFOMT); | | 1913 | kmem_free(dfomt, sizeof(dfsmt[0]) * MVGBE_NDFOMT); |
1913 | } | | 1914 | } |
1914 | | | 1915 | |
1915 | #ifdef MVGBE_DEBUG | | 1916 | #ifdef MVGBE_DEBUG |
1916 | static void | | 1917 | static void |
1917 | mvgbe_dump_txdesc(struct mvgbe_tx_desc *desc, int idx) | | 1918 | mvgbe_dump_txdesc(struct mvgbe_tx_desc *desc, int idx) |
1918 | { | | 1919 | { |
1919 | #define DESC_PRINT(X) \ | | 1920 | #define DESC_PRINT(X) \ |
1920 | if (X) \ | | 1921 | if (X) \ |
1921 | printf("txdesc[%d]." #X "=%#x\n", idx, X); | | 1922 | printf("txdesc[%d]." #X "=%#x\n", idx, X); |
1922 | | | 1923 | |
1923 | #if BYTE_ORDER == BIG_ENDIAN | | 1924 | #if BYTE_ORDER == BIG_ENDIAN |
1924 | DESC_PRINT(desc->bytecnt); | | 1925 | DESC_PRINT(desc->bytecnt); |
1925 | DESC_PRINT(desc->l4ichk); | | 1926 | DESC_PRINT(desc->l4ichk); |
1926 | DESC_PRINT(desc->cmdsts); | | 1927 | DESC_PRINT(desc->cmdsts); |
1927 | DESC_PRINT(desc->nextdescptr); | | 1928 | DESC_PRINT(desc->nextdescptr); |
1928 | DESC_PRINT(desc->bufptr); | | 1929 | DESC_PRINT(desc->bufptr); |
1929 | #else /* LITTLE_ENDIAN */ | | 1930 | #else /* LITTLE_ENDIAN */ |
1930 | DESC_PRINT(desc->cmdsts); | | 1931 | DESC_PRINT(desc->cmdsts); |
1931 | DESC_PRINT(desc->l4ichk); | | 1932 | DESC_PRINT(desc->l4ichk); |
1932 | DESC_PRINT(desc->bytecnt); | | 1933 | DESC_PRINT(desc->bytecnt); |
1933 | DESC_PRINT(desc->bufptr); | | 1934 | DESC_PRINT(desc->bufptr); |
1934 | DESC_PRINT(desc->nextdescptr); | | 1935 | DESC_PRINT(desc->nextdescptr); |
1935 | #endif | | 1936 | #endif |
1936 | #undef DESC_PRINT | | 1937 | #undef DESC_PRINT |
1937 | } | | 1938 | } |
1938 | #endif | | 1939 | #endif |