| @@ -1,1303 +1,1303 @@ | | | @@ -1,1303 +1,1303 @@ |
1 | /* $NetBSD: dwc_eqos.c,v 1.4 2022/02/13 18:29:00 riastradh Exp $ */ | | 1 | /* $NetBSD: dwc_eqos.c,v 1.5 2022/02/13 18:29:15 riastradh Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca> | | 4 | * Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca> |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | | 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | | 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | | 22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | | 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
26 | * SUCH DAMAGE. | | 26 | * SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | /* | | 29 | /* |
30 | * DesignWare Ethernet Quality-of-Service controller | | 30 | * DesignWare Ethernet Quality-of-Service controller |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | #include "opt_net_mpsafe.h" | | 33 | #include "opt_net_mpsafe.h" |
34 | | | 34 | |
35 | #include <sys/cdefs.h> | | 35 | #include <sys/cdefs.h> |
36 | __KERNEL_RCSID(0, "$NetBSD: dwc_eqos.c,v 1.4 2022/02/13 18:29:00 riastradh Exp $"); | | 36 | __KERNEL_RCSID(0, "$NetBSD: dwc_eqos.c,v 1.5 2022/02/13 18:29:15 riastradh Exp $"); |
37 | | | 37 | |
38 | #include <sys/param.h> | | 38 | #include <sys/param.h> |
39 | #include <sys/bus.h> | | 39 | #include <sys/bus.h> |
40 | #include <sys/device.h> | | 40 | #include <sys/device.h> |
41 | #include <sys/intr.h> | | 41 | #include <sys/intr.h> |
42 | #include <sys/systm.h> | | 42 | #include <sys/systm.h> |
43 | #include <sys/kernel.h> | | 43 | #include <sys/kernel.h> |
44 | #include <sys/mutex.h> | | 44 | #include <sys/mutex.h> |
45 | #include <sys/callout.h> | | 45 | #include <sys/callout.h> |
46 | #include <sys/cprng.h> | | 46 | #include <sys/cprng.h> |
47 | #include <sys/evcnt.h> | | 47 | #include <sys/evcnt.h> |
48 | | | 48 | |
49 | #include <sys/rndsource.h> | | 49 | #include <sys/rndsource.h> |
50 | | | 50 | |
51 | #include <net/if.h> | | 51 | #include <net/if.h> |
52 | #include <net/if_dl.h> | | 52 | #include <net/if_dl.h> |
53 | #include <net/if_ether.h> | | 53 | #include <net/if_ether.h> |
54 | #include <net/if_media.h> | | 54 | #include <net/if_media.h> |
55 | #include <net/bpf.h> | | 55 | #include <net/bpf.h> |
56 | | | 56 | |
57 | #include <dev/mii/miivar.h> | | 57 | #include <dev/mii/miivar.h> |
58 | | | 58 | |
59 | #include <dev/ic/dwc_eqos_reg.h> | | 59 | #include <dev/ic/dwc_eqos_reg.h> |
60 | #include <dev/ic/dwc_eqos_var.h> | | 60 | #include <dev/ic/dwc_eqos_var.h> |
61 | | | 61 | |
62 | CTASSERT(MCLBYTES == 2048); | | 62 | CTASSERT(MCLBYTES == 2048); |
63 | #ifdef EQOS_DEBUG | | 63 | #ifdef EQOS_DEBUG |
64 | #define DPRINTF(...) printf(##__VA_ARGS__) | | 64 | #define DPRINTF(...) printf(##__VA_ARGS__) |
65 | #else | | 65 | #else |
66 | #define DPRINTF(...) ((void)0) | | 66 | #define DPRINTF(...) ((void)0) |
67 | #endif | | 67 | #endif |
68 | | | 68 | |
69 | #ifdef NET_MPSAFE | | 69 | #ifdef NET_MPSAFE |
70 | #define EQOS_MPSAFE 1 | | 70 | #define EQOS_MPSAFE 1 |
71 | #define CALLOUT_FLAGS CALLOUT_MPSAFE | | 71 | #define CALLOUT_FLAGS CALLOUT_MPSAFE |
72 | #else | | 72 | #else |
73 | #define CALLOUT_FLAGS 0 | | 73 | #define CALLOUT_FLAGS 0 |
74 | #endif | | 74 | #endif |
75 | | | 75 | |
76 | #define DESC_BOUNDARY (1ULL << 32) | | 76 | #define DESC_BOUNDARY (1ULL << 32) |
77 | #define DESC_ALIGN sizeof(struct eqos_dma_desc) | | 77 | #define DESC_ALIGN sizeof(struct eqos_dma_desc) |
78 | #define TX_DESC_COUNT EQOS_DMA_DESC_COUNT | | 78 | #define TX_DESC_COUNT EQOS_DMA_DESC_COUNT |
79 | #define TX_DESC_SIZE (TX_DESC_COUNT * DESC_ALIGN) | | 79 | #define TX_DESC_SIZE (TX_DESC_COUNT * DESC_ALIGN) |
80 | #define RX_DESC_COUNT EQOS_DMA_DESC_COUNT | | 80 | #define RX_DESC_COUNT EQOS_DMA_DESC_COUNT |
81 | #define RX_DESC_SIZE (RX_DESC_COUNT * DESC_ALIGN) | | 81 | #define RX_DESC_SIZE (RX_DESC_COUNT * DESC_ALIGN) |
82 | #define MII_BUSY_RETRY 1000 | | 82 | #define MII_BUSY_RETRY 1000 |
83 | | | 83 | |
84 | #define DESC_OFF(n) ((n) * sizeof(struct eqos_dma_desc)) | | 84 | #define DESC_OFF(n) ((n) * sizeof(struct eqos_dma_desc)) |
85 | #define TX_SKIP(n, o) (((n) + (o)) % TX_DESC_COUNT) | | 85 | #define TX_SKIP(n, o) (((n) + (o)) % TX_DESC_COUNT) |
86 | #define TX_NEXT(n) TX_SKIP(n, 1) | | 86 | #define TX_NEXT(n) TX_SKIP(n, 1) |
87 | #define RX_NEXT(n) (((n) + 1) % RX_DESC_COUNT) | | 87 | #define RX_NEXT(n) (((n) + 1) % RX_DESC_COUNT) |
88 | | | 88 | |
89 | #define TX_MAX_SEGS 128 | | 89 | #define TX_MAX_SEGS 128 |
90 | | | 90 | |
91 | #define EQOS_LOCK(sc) mutex_enter(&(sc)->sc_lock) | | 91 | #define EQOS_LOCK(sc) mutex_enter(&(sc)->sc_lock) |
92 | #define EQOS_UNLOCK(sc) mutex_exit(&(sc)->sc_lock) | | 92 | #define EQOS_UNLOCK(sc) mutex_exit(&(sc)->sc_lock) |
93 | #define EQOS_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_lock)) | | 93 | #define EQOS_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_lock)) |
94 | | | 94 | |
95 | #define EQOS_TXLOCK(sc) mutex_enter(&(sc)->sc_txlock) | | 95 | #define EQOS_TXLOCK(sc) mutex_enter(&(sc)->sc_txlock) |
96 | #define EQOS_TXUNLOCK(sc) mutex_exit(&(sc)->sc_txlock) | | 96 | #define EQOS_TXUNLOCK(sc) mutex_exit(&(sc)->sc_txlock) |
97 | #define EQOS_ASSERT_TXLOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_txlock)) | | 97 | #define EQOS_ASSERT_TXLOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_txlock)) |
98 | | | 98 | |
99 | #define EQOS_HW_FEATURE_ADDR64_32BIT(sc) \ | | 99 | #define EQOS_HW_FEATURE_ADDR64_32BIT(sc) \ |
100 | (((sc)->sc_hw_feature[1] & GMAC_MAC_HW_FEATURE1_ADDR64_MASK) == \ | | 100 | (((sc)->sc_hw_feature[1] & GMAC_MAC_HW_FEATURE1_ADDR64_MASK) == \ |
101 | GMAC_MAC_HW_FEATURE1_ADDR64_32BIT) | | 101 | GMAC_MAC_HW_FEATURE1_ADDR64_32BIT) |
102 | | | 102 | |
103 | | | 103 | |
104 | #define RD4(sc, reg) \ | | 104 | #define RD4(sc, reg) \ |
105 | bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) | | 105 | bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) |
106 | #define WR4(sc, reg, val) \ | | 106 | #define WR4(sc, reg, val) \ |
107 | bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) | | 107 | bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) |
108 | | | 108 | |
109 | #define STUB(...) \ | | 109 | #define STUB(...) \ |
110 | printf("%s: TODO\n", __func__); \ | | 110 | printf("%s: TODO\n", __func__); \ |
111 | | | 111 | |
112 | static int | | 112 | static int |
113 | eqos_mii_readreg(device_t dev, int phy, int reg, uint16_t *val) | | 113 | eqos_mii_readreg(device_t dev, int phy, int reg, uint16_t *val) |
114 | { | | 114 | { |
115 | struct eqos_softc *sc = device_private(dev); | | 115 | struct eqos_softc *sc = device_private(dev); |
116 | uint32_t addr; | | 116 | uint32_t addr; |
117 | int retry; | | 117 | int retry; |
118 | | | 118 | |
119 | addr = sc->sc_clock_range | | | 119 | addr = sc->sc_clock_range | |
120 | (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | | | 120 | (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | |
121 | (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | | | 121 | (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | |
122 | GMAC_MAC_MDIO_ADDRESS_GOC_READ | | | 122 | GMAC_MAC_MDIO_ADDRESS_GOC_READ | |
123 | GMAC_MAC_MDIO_ADDRESS_GB; | | 123 | GMAC_MAC_MDIO_ADDRESS_GB; |
124 | WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); | | 124 | WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); |
125 | | | 125 | |
126 | delay(10000); | | 126 | delay(10000); |
127 | | | 127 | |
128 | for (retry = MII_BUSY_RETRY; retry > 0; retry--) { | | 128 | for (retry = MII_BUSY_RETRY; retry > 0; retry--) { |
129 | addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); | | 129 | addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); |
130 | if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { | | 130 | if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { |
131 | *val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF; | | 131 | *val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF; |
132 | break; | | 132 | break; |
133 | } | | 133 | } |
134 | delay(10); | | 134 | delay(10); |
135 | } | | 135 | } |
136 | if (retry == 0) { | | 136 | if (retry == 0) { |
137 | device_printf(dev, "phy read timeout, phy=%d reg=%d\n", | | 137 | device_printf(dev, "phy read timeout, phy=%d reg=%d\n", |
138 | phy, reg); | | 138 | phy, reg); |
139 | return ETIMEDOUT; | | 139 | return ETIMEDOUT; |
140 | } | | 140 | } |
141 | | | 141 | |
142 | return 0; | | 142 | return 0; |
143 | } | | 143 | } |
144 | | | 144 | |
145 | static int | | 145 | static int |
146 | eqos_mii_writereg(device_t dev, int phy, int reg, uint16_t val) | | 146 | eqos_mii_writereg(device_t dev, int phy, int reg, uint16_t val) |
147 | { | | 147 | { |
148 | struct eqos_softc *sc = device_private(dev); | | 148 | struct eqos_softc *sc = device_private(dev); |
149 | uint32_t addr; | | 149 | uint32_t addr; |
150 | int retry; | | 150 | int retry; |
151 | | | 151 | |
152 | WR4(sc, GMAC_MAC_MDIO_DATA, val); | | 152 | WR4(sc, GMAC_MAC_MDIO_DATA, val); |
153 | | | 153 | |
154 | addr = sc->sc_clock_range | | | 154 | addr = sc->sc_clock_range | |
155 | (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | | | 155 | (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | |
156 | (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | | | 156 | (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | |
157 | GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | | | 157 | GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | |
158 | GMAC_MAC_MDIO_ADDRESS_GB; | | 158 | GMAC_MAC_MDIO_ADDRESS_GB; |
159 | WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); | | 159 | WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); |
160 | | | 160 | |
161 | delay(10000); | | 161 | delay(10000); |
162 | | | 162 | |
163 | for (retry = MII_BUSY_RETRY; retry > 0; retry--) { | | 163 | for (retry = MII_BUSY_RETRY; retry > 0; retry--) { |
164 | addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); | | 164 | addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); |
165 | if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { | | 165 | if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { |
166 | break; | | 166 | break; |
167 | } | | 167 | } |
168 | delay(10); | | 168 | delay(10); |
169 | } | | 169 | } |
170 | if (retry == 0) { | | 170 | if (retry == 0) { |
171 | device_printf(dev, "phy write timeout, phy=%d reg=%d\n", | | 171 | device_printf(dev, "phy write timeout, phy=%d reg=%d\n", |
172 | phy, reg); | | 172 | phy, reg); |
173 | return ETIMEDOUT; | | 173 | return ETIMEDOUT; |
174 | } | | 174 | } |
175 | | | 175 | |
176 | return 0; | | 176 | return 0; |
177 | } | | 177 | } |
178 | | | 178 | |
179 | static void | | 179 | static void |
180 | eqos_update_link(struct eqos_softc *sc) | | 180 | eqos_update_link(struct eqos_softc *sc) |
181 | { | | 181 | { |
182 | struct mii_data *mii = &sc->sc_mii; | | 182 | struct mii_data *mii = &sc->sc_mii; |
183 | uint64_t baudrate; | | 183 | uint64_t baudrate; |
184 | uint32_t conf; | | 184 | uint32_t conf; |
185 | | | 185 | |
186 | baudrate = ifmedia_baudrate(mii->mii_media_active); | | 186 | baudrate = ifmedia_baudrate(mii->mii_media_active); |
187 | | | 187 | |
188 | conf = RD4(sc, GMAC_MAC_CONFIGURATION); | | 188 | conf = RD4(sc, GMAC_MAC_CONFIGURATION); |
189 | switch (baudrate) { | | 189 | switch (baudrate) { |
190 | case IF_Mbps(10): | | 190 | case IF_Mbps(10): |
191 | conf |= GMAC_MAC_CONFIGURATION_PS; | | 191 | conf |= GMAC_MAC_CONFIGURATION_PS; |
192 | conf &= ~GMAC_MAC_CONFIGURATION_FES; | | 192 | conf &= ~GMAC_MAC_CONFIGURATION_FES; |
193 | break; | | 193 | break; |
194 | case IF_Mbps(100): | | 194 | case IF_Mbps(100): |
195 | conf |= GMAC_MAC_CONFIGURATION_PS; | | 195 | conf |= GMAC_MAC_CONFIGURATION_PS; |
196 | conf |= GMAC_MAC_CONFIGURATION_FES; | | 196 | conf |= GMAC_MAC_CONFIGURATION_FES; |
197 | break; | | 197 | break; |
198 | case IF_Gbps(1): | | 198 | case IF_Gbps(1): |
199 | conf &= ~GMAC_MAC_CONFIGURATION_PS; | | 199 | conf &= ~GMAC_MAC_CONFIGURATION_PS; |
200 | conf &= ~GMAC_MAC_CONFIGURATION_FES; | | 200 | conf &= ~GMAC_MAC_CONFIGURATION_FES; |
201 | break; | | 201 | break; |
202 | case IF_Mbps(2500ULL): | | 202 | case IF_Mbps(2500ULL): |
203 | conf &= ~GMAC_MAC_CONFIGURATION_PS; | | 203 | conf &= ~GMAC_MAC_CONFIGURATION_PS; |
204 | conf |= GMAC_MAC_CONFIGURATION_FES; | | 204 | conf |= GMAC_MAC_CONFIGURATION_FES; |
205 | break; | | 205 | break; |
206 | } | | 206 | } |
207 | | | 207 | |
208 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { | | 208 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { |
209 | conf |= GMAC_MAC_CONFIGURATION_DM; | | 209 | conf |= GMAC_MAC_CONFIGURATION_DM; |
210 | } else { | | 210 | } else { |
211 | conf &= ~GMAC_MAC_CONFIGURATION_DM; | | 211 | conf &= ~GMAC_MAC_CONFIGURATION_DM; |
212 | } | | 212 | } |
213 | | | 213 | |
214 | WR4(sc, GMAC_MAC_CONFIGURATION, conf); | | 214 | WR4(sc, GMAC_MAC_CONFIGURATION, conf); |
215 | } | | 215 | } |
216 | | | 216 | |
217 | static void | | 217 | static void |
218 | eqos_mii_statchg(struct ifnet *ifp) | | 218 | eqos_mii_statchg(struct ifnet *ifp) |
219 | { | | 219 | { |
220 | struct eqos_softc * const sc = ifp->if_softc; | | 220 | struct eqos_softc * const sc = ifp->if_softc; |
221 | | | 221 | |
222 | eqos_update_link(sc); | | 222 | eqos_update_link(sc); |
223 | } | | 223 | } |
224 | | | 224 | |
225 | static void | | 225 | static void |
226 | eqos_dma_sync(struct eqos_softc *sc, bus_dmamap_t map, | | 226 | eqos_dma_sync(struct eqos_softc *sc, bus_dmamap_t map, |
227 | u_int start, u_int end, u_int total, int flags) | | 227 | u_int start, u_int end, u_int total, int flags) |
228 | { | | 228 | { |
229 | if (end > start) { | | 229 | if (end > start) { |
230 | bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), | | 230 | bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), |
231 | DESC_OFF(end) - DESC_OFF(start), flags); | | 231 | DESC_OFF(end) - DESC_OFF(start), flags); |
232 | } else { | | 232 | } else { |
233 | bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), | | 233 | bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), |
234 | DESC_OFF(total) - DESC_OFF(start), flags); | | 234 | DESC_OFF(total) - DESC_OFF(start), flags); |
235 | if (DESC_OFF(end) - DESC_OFF(0) > 0) { | | 235 | if (DESC_OFF(end) - DESC_OFF(0) > 0) { |
236 | bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(0), | | 236 | bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(0), |
237 | DESC_OFF(end) - DESC_OFF(0), flags); | | 237 | DESC_OFF(end) - DESC_OFF(0), flags); |
238 | } | | 238 | } |
239 | } | | 239 | } |
240 | } | | 240 | } |
241 | | | 241 | |
242 | static void | | 242 | static void |
243 | eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags, | | 243 | eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags, |
244 | bus_addr_t paddr, u_int len, u_int total_len) | | 244 | bus_addr_t paddr, u_int len, u_int total_len) |
245 | { | | 245 | { |
246 | uint32_t tdes2, tdes3; | | 246 | uint32_t tdes2, tdes3; |
247 | | | 247 | |
248 | if (paddr == 0 || len == 0) { | | 248 | if (paddr == 0 || len == 0) { |
249 | KASSERT(flags == 0); | | 249 | KASSERT(flags == 0); |
250 | tdes2 = 0; | | 250 | tdes2 = 0; |
251 | tdes3 = 0; | | 251 | tdes3 = 0; |
252 | --sc->sc_tx.queued; | | 252 | --sc->sc_tx.queued; |
253 | } else { | | 253 | } else { |
254 | tdes2 = (flags & EQOS_TDES3_LD) ? EQOS_TDES2_IOC : 0; | | 254 | tdes2 = (flags & EQOS_TDES3_LD) ? EQOS_TDES2_IOC : 0; |
255 | tdes3 = flags; | | 255 | tdes3 = flags; |
256 | ++sc->sc_tx.queued; | | 256 | ++sc->sc_tx.queued; |
257 | } | | 257 | } |
258 | | | 258 | |
259 | KASSERT(!EQOS_HW_FEATURE_ADDR64_32BIT(sc) || (paddr >> 32) == 0); | | 259 | KASSERT(!EQOS_HW_FEATURE_ADDR64_32BIT(sc) || (paddr >> 32) == 0); |
260 | | | 260 | |
261 | sc->sc_tx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); | | 261 | sc->sc_tx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); |
262 | sc->sc_tx.desc_ring[index].tdes1 = htole32((uint32_t)(paddr >> 32)); | | 262 | sc->sc_tx.desc_ring[index].tdes1 = htole32((uint32_t)(paddr >> 32)); |
263 | sc->sc_tx.desc_ring[index].tdes2 = htole32(tdes2 | len); | | 263 | sc->sc_tx.desc_ring[index].tdes2 = htole32(tdes2 | len); |
264 | sc->sc_tx.desc_ring[index].tdes3 = htole32(tdes3 | total_len); | | 264 | sc->sc_tx.desc_ring[index].tdes3 = htole32(tdes3 | total_len); |
265 | } | | 265 | } |
266 | | | 266 | |
267 | static int | | 267 | static int |
268 | eqos_setup_txbuf(struct eqos_softc *sc, int index, struct mbuf *m) | | 268 | eqos_setup_txbuf(struct eqos_softc *sc, int index, struct mbuf *m) |
269 | { | | 269 | { |
270 | bus_dma_segment_t *segs; | | 270 | bus_dma_segment_t *segs; |
271 | int error, nsegs, cur, i; | | 271 | int error, nsegs, cur, i; |
272 | uint32_t flags; | | 272 | uint32_t flags; |
273 | bool nospace; | | 273 | bool nospace; |
274 | | | 274 | |
275 | /* at least one descriptor free ? */ | | 275 | /* at least one descriptor free ? */ |
276 | if (sc->sc_tx.queued >= TX_DESC_COUNT - 1) | | 276 | if (sc->sc_tx.queued >= TX_DESC_COUNT - 1) |
277 | return -1; | | 277 | return -1; |
278 | | | 278 | |
279 | error = bus_dmamap_load_mbuf(sc->sc_dmat, | | 279 | error = bus_dmamap_load_mbuf(sc->sc_dmat, |
280 | sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); | | 280 | sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); |
281 | if (error == EFBIG) { | | 281 | if (error == EFBIG) { |
282 | device_printf(sc->sc_dev, | | 282 | device_printf(sc->sc_dev, |
283 | "TX packet needs too many DMA segments, dropping...\n"); | | 283 | "TX packet needs too many DMA segments, dropping...\n"); |
284 | return -2; | | 284 | return -2; |
285 | } | | 285 | } |
286 | if (error != 0) { | | 286 | if (error != 0) { |
287 | device_printf(sc->sc_dev, | | 287 | device_printf(sc->sc_dev, |
288 | "TX packet cannot be mapped, retried...\n"); | | 288 | "TX packet cannot be mapped, retried...\n"); |
289 | return 0; | | 289 | return 0; |
290 | } | | 290 | } |
291 | | | 291 | |
292 | segs = sc->sc_tx.buf_map[index].map->dm_segs; | | 292 | segs = sc->sc_tx.buf_map[index].map->dm_segs; |
293 | nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs; | | 293 | nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs; |
294 | | | 294 | |
295 | nospace = sc->sc_tx.queued >= TX_DESC_COUNT - nsegs; | | 295 | nospace = sc->sc_tx.queued >= TX_DESC_COUNT - nsegs; |
296 | if (nospace) { | | 296 | if (nospace) { |
297 | bus_dmamap_unload(sc->sc_dmat, | | 297 | bus_dmamap_unload(sc->sc_dmat, |
298 | sc->sc_tx.buf_map[index].map); | | 298 | sc->sc_tx.buf_map[index].map); |
299 | /* XXX coalesce and retry ? */ | | 299 | /* XXX coalesce and retry ? */ |
300 | return -1; | | 300 | return -1; |
301 | } | | 301 | } |
302 | | | 302 | |
303 | bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.buf_map[index].map, | | 303 | bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.buf_map[index].map, |
304 | 0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); | | 304 | 0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
305 | | | 305 | |
306 | /* stored in same index as loaded map */ | | 306 | /* stored in same index as loaded map */ |
307 | sc->sc_tx.buf_map[index].mbuf = m; | | 307 | sc->sc_tx.buf_map[index].mbuf = m; |
308 | | | 308 | |
309 | flags = EQOS_TDES3_FD; | | 309 | flags = EQOS_TDES3_FD; |
310 | | | 310 | |
311 | for (cur = index, i = 0; i < nsegs; i++) { | | 311 | for (cur = index, i = 0; i < nsegs; i++) { |
312 | if (i == nsegs - 1) | | 312 | if (i == nsegs - 1) |
313 | flags |= EQOS_TDES3_LD; | | 313 | flags |= EQOS_TDES3_LD; |
314 | | | 314 | |
315 | eqos_setup_txdesc(sc, cur, flags, segs[i].ds_addr, | | 315 | eqos_setup_txdesc(sc, cur, flags, segs[i].ds_addr, |
316 | segs[i].ds_len, m->m_pkthdr.len); | | 316 | segs[i].ds_len, m->m_pkthdr.len); |
317 | flags &= ~EQOS_TDES3_FD; | | 317 | flags &= ~EQOS_TDES3_FD; |
318 | cur = TX_NEXT(cur); | | 318 | cur = TX_NEXT(cur); |
319 | | | 319 | |
320 | flags |= EQOS_TDES3_OWN; | | 320 | flags |= EQOS_TDES3_OWN; |
321 | } | | 321 | } |
322 | | | 322 | |
323 | /* | | 323 | /* |
324 | * Defer setting OWN bit on the first descriptor until all | | 324 | * Defer setting OWN bit on the first descriptor until all |
325 | * descriptors have been updated. The hardware will not try to | | 325 | * descriptors have been updated. The hardware will not try to |
326 | * process any descriptors past the first one still owned by | | 326 | * process any descriptors past the first one still owned by |
327 | * software (i.e., with the OWN bit clear). | | 327 | * software (i.e., with the OWN bit clear). |
328 | */ | | 328 | */ |
329 | bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, | | 329 | bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, |
330 | DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), | | 330 | DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), |
331 | BUS_DMASYNC_PREWRITE); | | 331 | BUS_DMASYNC_PREWRITE); |
332 | sc->sc_tx.desc_ring[index].tdes3 |= htole32(EQOS_TDES3_OWN); | | 332 | sc->sc_tx.desc_ring[index].tdes3 |= htole32(EQOS_TDES3_OWN); |
333 | | | 333 | |
334 | return nsegs; | | 334 | return nsegs; |
335 | } | | 335 | } |
336 | | | 336 | |
337 | static void | | 337 | static void |
338 | eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr) | | 338 | eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr) |
339 | { | | 339 | { |
340 | | | 340 | |
341 | sc->sc_rx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); | | 341 | sc->sc_rx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); |
342 | sc->sc_rx.desc_ring[index].tdes1 = htole32((uint32_t)(paddr >> 32)); | | 342 | sc->sc_rx.desc_ring[index].tdes1 = htole32((uint32_t)(paddr >> 32)); |
343 | sc->sc_rx.desc_ring[index].tdes2 = htole32(0); | | 343 | sc->sc_rx.desc_ring[index].tdes2 = htole32(0); |
344 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, | | 344 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, |
345 | DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), | | 345 | DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), |
346 | BUS_DMASYNC_PREWRITE); | | 346 | BUS_DMASYNC_PREWRITE); |
347 | sc->sc_rx.desc_ring[index].tdes3 = | | 347 | sc->sc_rx.desc_ring[index].tdes3 = |
348 | htole32(EQOS_TDES3_OWN | EQOS_TDES3_IOC | EQOS_TDES3_BUF1V); | | 348 | htole32(EQOS_TDES3_OWN | EQOS_TDES3_IOC | EQOS_TDES3_BUF1V); |
349 | } | | 349 | } |
350 | | | 350 | |
351 | static int | | 351 | static int |
352 | eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) | | 352 | eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) |
353 | { | | 353 | { |
354 | int error; | | 354 | int error; |
355 | | | 355 | |
356 | m_adj(m, ETHER_ALIGN); | | 356 | m_adj(m, ETHER_ALIGN); |
357 | | | 357 | |
358 | error = bus_dmamap_load_mbuf(sc->sc_dmat, | | 358 | error = bus_dmamap_load_mbuf(sc->sc_dmat, |
359 | sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT); | | 359 | sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT); |
360 | if (error != 0) | | 360 | if (error != 0) |
361 | return error; | | 361 | return error; |
362 | | | 362 | |
363 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, | | 363 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, |
364 | 0, sc->sc_rx.buf_map[index].map->dm_mapsize, | | 364 | 0, sc->sc_rx.buf_map[index].map->dm_mapsize, |
365 | BUS_DMASYNC_PREREAD); | | 365 | BUS_DMASYNC_PREREAD); |
366 | | | 366 | |
367 | sc->sc_rx.buf_map[index].mbuf = m; | | 367 | sc->sc_rx.buf_map[index].mbuf = m; |
368 | eqos_setup_rxdesc(sc, index, | | 368 | eqos_setup_rxdesc(sc, index, |
369 | sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr); | | 369 | sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr); |
370 | | | 370 | |
371 | return 0; | | 371 | return 0; |
372 | } | | 372 | } |
373 | | | 373 | |
374 | static struct mbuf * | | 374 | static struct mbuf * |
375 | eqos_alloc_mbufcl(struct eqos_softc *sc) | | 375 | eqos_alloc_mbufcl(struct eqos_softc *sc) |
376 | { | | 376 | { |
377 | struct mbuf *m; | | 377 | struct mbuf *m; |
378 | | | 378 | |
379 | m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); | | 379 | m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); |
380 | if (m != NULL) | | 380 | if (m != NULL) |
381 | m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; | | 381 | m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; |
382 | | | 382 | |
383 | return m; | | 383 | return m; |
384 | } | | 384 | } |
385 | | | 385 | |
386 | static void | | 386 | static void |
387 | eqos_enable_intr(struct eqos_softc *sc) | | 387 | eqos_enable_intr(struct eqos_softc *sc) |
388 | { | | 388 | { |
389 | WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, | | 389 | WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, |
390 | GMAC_DMA_CHAN0_INTR_ENABLE_NIE | | | 390 | GMAC_DMA_CHAN0_INTR_ENABLE_NIE | |
391 | GMAC_DMA_CHAN0_INTR_ENABLE_AIE | | | 391 | GMAC_DMA_CHAN0_INTR_ENABLE_AIE | |
392 | GMAC_DMA_CHAN0_INTR_ENABLE_FBE | | | 392 | GMAC_DMA_CHAN0_INTR_ENABLE_FBE | |
393 | GMAC_DMA_CHAN0_INTR_ENABLE_RIE | | | 393 | GMAC_DMA_CHAN0_INTR_ENABLE_RIE | |
394 | GMAC_DMA_CHAN0_INTR_ENABLE_TIE); | | 394 | GMAC_DMA_CHAN0_INTR_ENABLE_TIE); |
395 | } | | 395 | } |
396 | | | 396 | |
397 | static void | | 397 | static void |
398 | eqos_disable_intr(struct eqos_softc *sc) | | 398 | eqos_disable_intr(struct eqos_softc *sc) |
399 | { | | 399 | { |
400 | WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0); | | 400 | WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0); |
401 | } | | 401 | } |
402 | | | 402 | |
403 | static void | | 403 | static void |
404 | eqos_tick(void *softc) | | 404 | eqos_tick(void *softc) |
405 | { | | 405 | { |
406 | struct eqos_softc *sc = softc; | | 406 | struct eqos_softc *sc = softc; |
407 | struct mii_data *mii = &sc->sc_mii; | | 407 | struct mii_data *mii = &sc->sc_mii; |
408 | #ifndef EQOS_MPSAFE | | 408 | #ifndef EQOS_MPSAFE |
409 | int s = splnet(); | | 409 | int s = splnet(); |
410 | #endif | | 410 | #endif |
411 | | | 411 | |
412 | EQOS_LOCK(sc); | | 412 | EQOS_LOCK(sc); |
413 | mii_tick(mii); | | 413 | mii_tick(mii); |
414 | callout_schedule(&sc->sc_stat_ch, hz); | | 414 | callout_schedule(&sc->sc_stat_ch, hz); |
415 | EQOS_UNLOCK(sc); | | 415 | EQOS_UNLOCK(sc); |
416 | | | 416 | |
417 | #ifndef EQOS_MPSAFE | | 417 | #ifndef EQOS_MPSAFE |
418 | splx(s); | | 418 | splx(s); |
419 | #endif | | 419 | #endif |
420 | } | | 420 | } |
421 | | | 421 | |
422 | static uint32_t | | 422 | static uint32_t |
423 | eqos_bitrev32(uint32_t x) | | 423 | eqos_bitrev32(uint32_t x) |
424 | { | | 424 | { |
425 | x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); | | 425 | x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); |
426 | x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); | | 426 | x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); |
427 | x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); | | 427 | x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); |
428 | x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); | | 428 | x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); |
429 | | | 429 | |
430 | return (x >> 16) | (x << 16); | | 430 | return (x >> 16) | (x << 16); |
431 | } | | 431 | } |
432 | | | 432 | |
433 | static void | | 433 | static void |
434 | eqos_setup_rxfilter(struct eqos_softc *sc) | | 434 | eqos_setup_rxfilter(struct eqos_softc *sc) |
435 | { | | 435 | { |
436 | struct ethercom *ec = &sc->sc_ec; | | 436 | struct ethercom *ec = &sc->sc_ec; |
437 | struct ifnet *ifp = &ec->ec_if; | | 437 | struct ifnet *ifp = &ec->ec_if; |
438 | uint32_t pfil, crc, hashreg, hashbit, hash[2]; | | 438 | uint32_t pfil, crc, hashreg, hashbit, hash[2]; |
439 | struct ether_multi *enm; | | 439 | struct ether_multi *enm; |
440 | struct ether_multistep step; | | 440 | struct ether_multistep step; |
441 | const uint8_t *eaddr; | | 441 | const uint8_t *eaddr; |
442 | uint32_t val; | | 442 | uint32_t val; |
443 | | | 443 | |
444 | EQOS_ASSERT_LOCKED(sc); | | 444 | EQOS_ASSERT_LOCKED(sc); |
445 | | | 445 | |
446 | pfil = RD4(sc, GMAC_MAC_PACKET_FILTER); | | 446 | pfil = RD4(sc, GMAC_MAC_PACKET_FILTER); |
447 | pfil &= ~(GMAC_MAC_PACKET_FILTER_PR | | | 447 | pfil &= ~(GMAC_MAC_PACKET_FILTER_PR | |
448 | GMAC_MAC_PACKET_FILTER_PM | | | 448 | GMAC_MAC_PACKET_FILTER_PM | |
449 | GMAC_MAC_PACKET_FILTER_HMC | | | 449 | GMAC_MAC_PACKET_FILTER_HMC | |
450 | GMAC_MAC_PACKET_FILTER_PCF_MASK); | | 450 | GMAC_MAC_PACKET_FILTER_PCF_MASK); |
451 | hash[0] = hash[1] = ~0U; | | 451 | hash[0] = hash[1] = ~0U; |
452 | | | 452 | |
453 | if ((ifp->if_flags & IFF_PROMISC) != 0) { | | 453 | if ((ifp->if_flags & IFF_PROMISC) != 0) { |
454 | pfil |= GMAC_MAC_PACKET_FILTER_PR | | | 454 | pfil |= GMAC_MAC_PACKET_FILTER_PR | |
455 | GMAC_MAC_PACKET_FILTER_PCF_ALL; | | 455 | GMAC_MAC_PACKET_FILTER_PCF_ALL; |
456 | } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { | | 456 | } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { |
457 | pfil |= GMAC_MAC_PACKET_FILTER_PM; | | 457 | pfil |= GMAC_MAC_PACKET_FILTER_PM; |
458 | } else { | | 458 | } else { |
459 | hash[0] = hash[1] = 0; | | 459 | hash[0] = hash[1] = 0; |
460 | pfil |= GMAC_MAC_PACKET_FILTER_HMC; | | 460 | pfil |= GMAC_MAC_PACKET_FILTER_HMC; |
461 | ETHER_LOCK(ec); | | 461 | ETHER_LOCK(ec); |
462 | ETHER_FIRST_MULTI(step, ec, enm); | | 462 | ETHER_FIRST_MULTI(step, ec, enm); |
463 | while (enm != NULL) { | | 463 | while (enm != NULL) { |
464 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); | | 464 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); |
465 | crc &= 0x7f; | | 465 | crc &= 0x7f; |
466 | crc = eqos_bitrev32(~crc) >> 26; | | 466 | crc = eqos_bitrev32(~crc) >> 26; |
467 | hashreg = (crc >> 5); | | 467 | hashreg = (crc >> 5); |
468 | hashbit = (crc & 0x1f); | | 468 | hashbit = (crc & 0x1f); |
469 | hash[hashreg] |= (1 << hashbit); | | 469 | hash[hashreg] |= (1 << hashbit); |
470 | ETHER_NEXT_MULTI(step, enm); | | 470 | ETHER_NEXT_MULTI(step, enm); |
471 | } | | 471 | } |
472 | ETHER_UNLOCK(ec); | | 472 | ETHER_UNLOCK(ec); |
473 | } | | 473 | } |
474 | | | 474 | |
475 | /* Write our unicast address */ | | 475 | /* Write our unicast address */ |
476 | eaddr = CLLADDR(ifp->if_sadl); | | 476 | eaddr = CLLADDR(ifp->if_sadl); |
477 | val = eaddr[4] | (eaddr[5] << 8); | | 477 | val = eaddr[4] | (eaddr[5] << 8); |
478 | WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val); | | 478 | WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val); |
479 | val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | | | 479 | val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | |
480 | (eaddr[3] << 24); | | 480 | (eaddr[3] << 24); |
481 | WR4(sc, GMAC_MAC_ADDRESS0_LOW, val); | | 481 | WR4(sc, GMAC_MAC_ADDRESS0_LOW, val); |
482 | | | 482 | |
483 | /* Multicast hash filters */ | | 483 | /* Multicast hash filters */ |
484 | WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[1]); | | 484 | WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[1]); |
485 | WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[0]); | | 485 | WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[0]); |
486 | | | 486 | |
487 | /* Packet filter config */ | | 487 | /* Packet filter config */ |
488 | WR4(sc, GMAC_MAC_PACKET_FILTER, pfil); | | 488 | WR4(sc, GMAC_MAC_PACKET_FILTER, pfil); |
489 | } | | 489 | } |
490 | | | 490 | |
491 | static int | | 491 | static int |
492 | eqos_reset(struct eqos_softc *sc) | | 492 | eqos_reset(struct eqos_softc *sc) |
493 | { | | 493 | { |
494 | uint32_t val; | | 494 | uint32_t val; |
495 | int retry; | | 495 | int retry; |
496 | | | 496 | |
497 | WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR); | | 497 | WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR); |
498 | for (retry = 2000; retry > 0; retry--) { | | 498 | for (retry = 2000; retry > 0; retry--) { |
499 | delay(1000); | | 499 | delay(1000); |
500 | val = RD4(sc, GMAC_DMA_MODE); | | 500 | val = RD4(sc, GMAC_DMA_MODE); |
501 | if ((val & GMAC_DMA_MODE_SWR) == 0) { | | 501 | if ((val & GMAC_DMA_MODE_SWR) == 0) { |
502 | return 0; | | 502 | return 0; |
503 | } | | 503 | } |
504 | } | | 504 | } |
505 | | | 505 | |
506 | device_printf(sc->sc_dev, "reset timeout!\n"); | | 506 | device_printf(sc->sc_dev, "reset timeout!\n"); |
507 | return ETIMEDOUT; | | 507 | return ETIMEDOUT; |
508 | } | | 508 | } |
509 | | | 509 | |
510 | static void | | 510 | static void |
511 | eqos_init_rings(struct eqos_softc *sc, int qid) | | 511 | eqos_init_rings(struct eqos_softc *sc, int qid) |
512 | { | | 512 | { |
513 | sc->sc_tx.queued = 0; | | 513 | sc->sc_tx.queued = 0; |
514 | | | 514 | |
515 | WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI, | | 515 | WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI, |
516 | (uint32_t)(sc->sc_tx.desc_ring_paddr >> 32)); | | 516 | (uint32_t)(sc->sc_tx.desc_ring_paddr >> 32)); |
517 | WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR, | | 517 | WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR, |
518 | (uint32_t)sc->sc_tx.desc_ring_paddr); | | 518 | (uint32_t)sc->sc_tx.desc_ring_paddr); |
519 | WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); | | 519 | WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); |
520 | | | 520 | |
521 | WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI, | | 521 | WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI, |
522 | (uint32_t)(sc->sc_rx.desc_ring_paddr >> 32)); | | 522 | (uint32_t)(sc->sc_rx.desc_ring_paddr >> 32)); |
523 | WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR, | | 523 | WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR, |
524 | (uint32_t)sc->sc_rx.desc_ring_paddr); | | 524 | (uint32_t)sc->sc_rx.desc_ring_paddr); |
525 | WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); | | 525 | WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); |
526 | WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, | | 526 | WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, |
527 | (uint32_t)sc->sc_rx.desc_ring_paddr + | | 527 | (uint32_t)sc->sc_rx.desc_ring_paddr + |
528 | DESC_OFF((sc->sc_rx.cur - 1) % RX_DESC_COUNT)); | | 528 | DESC_OFF((sc->sc_rx.cur - 1) % RX_DESC_COUNT)); |
529 | } | | 529 | } |
530 | | | 530 | |
531 | static int | | 531 | static int |
532 | eqos_init_locked(struct eqos_softc *sc) | | 532 | eqos_init_locked(struct eqos_softc *sc) |
533 | { | | 533 | { |
534 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 534 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
535 | struct mii_data *mii = &sc->sc_mii; | | 535 | struct mii_data *mii = &sc->sc_mii; |
536 | uint32_t val; | | 536 | uint32_t val; |
537 | | | 537 | |
538 | EQOS_ASSERT_LOCKED(sc); | | 538 | EQOS_ASSERT_LOCKED(sc); |
539 | EQOS_ASSERT_TXLOCKED(sc); | | 539 | EQOS_ASSERT_TXLOCKED(sc); |
540 | | | 540 | |
541 | if ((ifp->if_flags & IFF_RUNNING) != 0) | | 541 | if ((ifp->if_flags & IFF_RUNNING) != 0) |
542 | return 0; | | 542 | return 0; |
543 | | | 543 | |
544 | /* Setup TX/RX rings */ | | 544 | /* Setup TX/RX rings */ |
545 | eqos_init_rings(sc, 0); | | 545 | eqos_init_rings(sc, 0); |
546 | | | 546 | |
547 | /* Setup RX filter */ | | 547 | /* Setup RX filter */ |
548 | eqos_setup_rxfilter(sc); | | 548 | eqos_setup_rxfilter(sc); |
549 | | | 549 | |
550 | WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->sc_csr_clock / 1000000) - 1); | | 550 | WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->sc_csr_clock / 1000000) - 1); |
551 | | | 551 | |
552 | /* Enable transmit and receive DMA */ | | 552 | /* Enable transmit and receive DMA */ |
553 | val = RD4(sc, GMAC_DMA_CHAN0_CONTROL); | | 553 | val = RD4(sc, GMAC_DMA_CHAN0_CONTROL); |
554 | val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK; | | 554 | val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK; |
555 | val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT; | | 555 | val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT; |
556 | val |= GMAC_DMA_CHAN0_CONTROL_PBLX8; | | 556 | val |= GMAC_DMA_CHAN0_CONTROL_PBLX8; |
557 | WR4(sc, GMAC_DMA_CHAN0_CONTROL, val); | | 557 | WR4(sc, GMAC_DMA_CHAN0_CONTROL, val); |
558 | val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); | | 558 | val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); |
559 | val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP; | | 559 | val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP; |
560 | val |= GMAC_DMA_CHAN0_TX_CONTROL_START; | | 560 | val |= GMAC_DMA_CHAN0_TX_CONTROL_START; |
561 | WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); | | 561 | WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); |
562 | val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); | | 562 | val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); |
563 | val &= ~GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK; | | 563 | val &= ~GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK; |
564 | val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT); | | 564 | val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT); |
565 | val |= GMAC_DMA_CHAN0_RX_CONTROL_START; | | 565 | val |= GMAC_DMA_CHAN0_RX_CONTROL_START; |
566 | WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); | | 566 | WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); |
567 | | | 567 | |
568 | /* Configure operation modes */ | | 568 | /* Configure operation modes */ |
569 | WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, | | 569 | WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, |
570 | GMAC_MTL_TXQ0_OPERATION_MODE_TSF | | | 570 | GMAC_MTL_TXQ0_OPERATION_MODE_TSF | |
571 | GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN); | | 571 | GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN); |
572 | WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, | | 572 | WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, |
573 | GMAC_MTL_RXQ0_OPERATION_MODE_RSF | | | 573 | GMAC_MTL_RXQ0_OPERATION_MODE_RSF | |
574 | GMAC_MTL_RXQ0_OPERATION_MODE_FEP | | | 574 | GMAC_MTL_RXQ0_OPERATION_MODE_FEP | |
575 | GMAC_MTL_RXQ0_OPERATION_MODE_FUP); | | 575 | GMAC_MTL_RXQ0_OPERATION_MODE_FUP); |
576 | | | 576 | |
577 | /* Enable flow control */ | | 577 | /* Enable flow control */ |
578 | val = RD4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL); | | 578 | val = RD4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL); |
579 | val |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT; | | 579 | val |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT; |
580 | val |= GMAC_MAC_Q0_TX_FLOW_CTRL_TFE; | | 580 | val |= GMAC_MAC_Q0_TX_FLOW_CTRL_TFE; |
581 | WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, val); | | 581 | WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, val); |
582 | val = RD4(sc, GMAC_MAC_RX_FLOW_CTRL); | | 582 | val = RD4(sc, GMAC_MAC_RX_FLOW_CTRL); |
583 | val |= GMAC_MAC_RX_FLOW_CTRL_RFE; | | 583 | val |= GMAC_MAC_RX_FLOW_CTRL_RFE; |
584 | WR4(sc, GMAC_MAC_RX_FLOW_CTRL, val); | | 584 | WR4(sc, GMAC_MAC_RX_FLOW_CTRL, val); |
585 | | | 585 | |
586 | /* Enable transmitter and receiver */ | | 586 | /* Enable transmitter and receiver */ |
587 | val = RD4(sc, GMAC_MAC_CONFIGURATION); | | 587 | val = RD4(sc, GMAC_MAC_CONFIGURATION); |
588 | val |= GMAC_MAC_CONFIGURATION_BE; | | 588 | val |= GMAC_MAC_CONFIGURATION_BE; |
589 | val |= GMAC_MAC_CONFIGURATION_JD; | | 589 | val |= GMAC_MAC_CONFIGURATION_JD; |
590 | val |= GMAC_MAC_CONFIGURATION_JE; | | 590 | val |= GMAC_MAC_CONFIGURATION_JE; |
591 | val |= GMAC_MAC_CONFIGURATION_DCRS; | | 591 | val |= GMAC_MAC_CONFIGURATION_DCRS; |
592 | val |= GMAC_MAC_CONFIGURATION_TE; | | 592 | val |= GMAC_MAC_CONFIGURATION_TE; |
593 | val |= GMAC_MAC_CONFIGURATION_RE; | | 593 | val |= GMAC_MAC_CONFIGURATION_RE; |
594 | WR4(sc, GMAC_MAC_CONFIGURATION, val); | | 594 | WR4(sc, GMAC_MAC_CONFIGURATION, val); |
595 | | | 595 | |
596 | /* Enable interrupts */ | | 596 | /* Enable interrupts */ |
597 | eqos_enable_intr(sc); | | 597 | eqos_enable_intr(sc); |
598 | | | 598 | |
599 | ifp->if_flags |= IFF_RUNNING; | | 599 | ifp->if_flags |= IFF_RUNNING; |
600 | ifp->if_flags &= ~IFF_OACTIVE; | | 600 | ifp->if_flags &= ~IFF_OACTIVE; |
601 | | | 601 | |
602 | mii_mediachg(mii); | | 602 | mii_mediachg(mii); |
603 | callout_schedule(&sc->sc_stat_ch, hz); | | 603 | callout_schedule(&sc->sc_stat_ch, hz); |
604 | | | 604 | |
605 | return 0; | | 605 | return 0; |
606 | } | | 606 | } |
607 | | | 607 | |
608 | static int | | 608 | static int |
609 | eqos_init(struct ifnet *ifp) | | 609 | eqos_init(struct ifnet *ifp) |
610 | { | | 610 | { |
611 | struct eqos_softc *sc = ifp->if_softc; | | 611 | struct eqos_softc *sc = ifp->if_softc; |
612 | int error; | | 612 | int error; |
613 | | | 613 | |
614 | EQOS_LOCK(sc); | | 614 | EQOS_LOCK(sc); |
615 | EQOS_TXLOCK(sc); | | 615 | EQOS_TXLOCK(sc); |
616 | error = eqos_init_locked(sc); | | 616 | error = eqos_init_locked(sc); |
617 | EQOS_TXUNLOCK(sc); | | 617 | EQOS_TXUNLOCK(sc); |
618 | EQOS_UNLOCK(sc); | | 618 | EQOS_UNLOCK(sc); |
619 | | | 619 | |
620 | return error; | | 620 | return error; |
621 | } | | 621 | } |
622 | | | 622 | |
623 | static void | | 623 | static void |
624 | eqos_stop_locked(struct eqos_softc *sc, int disable) | | 624 | eqos_stop_locked(struct eqos_softc *sc, int disable) |
625 | { | | 625 | { |
626 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 626 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
627 | uint32_t val; | | 627 | uint32_t val; |
628 | int retry; | | 628 | int retry; |
629 | | | 629 | |
630 | EQOS_ASSERT_LOCKED(sc); | | 630 | EQOS_ASSERT_LOCKED(sc); |
631 | | | 631 | |
632 | callout_stop(&sc->sc_stat_ch); | | 632 | callout_stop(&sc->sc_stat_ch); |
633 | | | 633 | |
634 | mii_down(&sc->sc_mii); | | 634 | mii_down(&sc->sc_mii); |
635 | | | 635 | |
636 | /* Disable receiver */ | | 636 | /* Disable receiver */ |
637 | val = RD4(sc, GMAC_MAC_CONFIGURATION); | | 637 | val = RD4(sc, GMAC_MAC_CONFIGURATION); |
638 | val &= ~GMAC_MAC_CONFIGURATION_RE; | | 638 | val &= ~GMAC_MAC_CONFIGURATION_RE; |
639 | WR4(sc, GMAC_MAC_CONFIGURATION, val); | | 639 | WR4(sc, GMAC_MAC_CONFIGURATION, val); |
640 | | | 640 | |
641 | /* Stop receive DMA */ | | 641 | /* Stop receive DMA */ |
642 | val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); | | 642 | val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); |
643 | val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START; | | 643 | val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START; |
644 | WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); | | 644 | WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); |
645 | | | 645 | |
646 | /* Stop transmit DMA */ | | 646 | /* Stop transmit DMA */ |
647 | val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); | | 647 | val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); |
648 | val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START; | | 648 | val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START; |
649 | WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); | | 649 | WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); |
650 | | | 650 | |
651 | if (disable) { | | 651 | if (disable) { |
652 | /* Flush data in the TX FIFO */ | | 652 | /* Flush data in the TX FIFO */ |
653 | val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); | | 653 | val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); |
654 | val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ; | | 654 | val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ; |
655 | WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); | | 655 | WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); |
656 | /* Wait for flush to complete */ | | 656 | /* Wait for flush to complete */ |
657 | for (retry = 10000; retry > 0; retry--) { | | 657 | for (retry = 10000; retry > 0; retry--) { |
658 | val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); | | 658 | val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); |
659 | if ((val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ) == 0) { | | 659 | if ((val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ) == 0) { |
660 | break; | | 660 | break; |
661 | } | | 661 | } |
662 | delay(1); | | 662 | delay(1); |
663 | } | | 663 | } |
664 | if (retry == 0) { | | 664 | if (retry == 0) { |
665 | device_printf(sc->sc_dev, | | 665 | device_printf(sc->sc_dev, |
666 | "timeout flushing TX queue\n"); | | 666 | "timeout flushing TX queue\n"); |
667 | } | | 667 | } |
668 | } | | 668 | } |
669 | | | 669 | |
670 | /* Disable transmitter */ | | 670 | /* Disable transmitter */ |
671 | val = RD4(sc, GMAC_MAC_CONFIGURATION); | | 671 | val = RD4(sc, GMAC_MAC_CONFIGURATION); |
672 | val &= ~GMAC_MAC_CONFIGURATION_TE; | | 672 | val &= ~GMAC_MAC_CONFIGURATION_TE; |
673 | WR4(sc, GMAC_MAC_CONFIGURATION, val); | | 673 | WR4(sc, GMAC_MAC_CONFIGURATION, val); |
674 | | | 674 | |
675 | /* Disable interrupts */ | | 675 | /* Disable interrupts */ |
676 | eqos_disable_intr(sc); | | 676 | eqos_disable_intr(sc); |
677 | | | 677 | |
678 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 678 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
679 | } | | 679 | } |
680 | | | 680 | |
681 | static void | | 681 | static void |
682 | eqos_stop(struct ifnet *ifp, int disable) | | 682 | eqos_stop(struct ifnet *ifp, int disable) |
683 | { | | 683 | { |
684 | struct eqos_softc * const sc = ifp->if_softc; | | 684 | struct eqos_softc * const sc = ifp->if_softc; |
685 | | | 685 | |
686 | EQOS_LOCK(sc); | | 686 | EQOS_LOCK(sc); |
687 | eqos_stop_locked(sc, disable); | | 687 | eqos_stop_locked(sc, disable); |
688 | EQOS_UNLOCK(sc); | | 688 | EQOS_UNLOCK(sc); |
689 | } | | 689 | } |
690 | | | 690 | |
691 | static void | | 691 | static void |
692 | eqos_rxintr(struct eqos_softc *sc, int qid) | | 692 | eqos_rxintr(struct eqos_softc *sc, int qid) |
693 | { | | 693 | { |
694 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 694 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
695 | int error, index, len, pkts = 0; | | 695 | int error, index, len, pkts = 0; |
696 | struct mbuf *m, *m0; | | 696 | struct mbuf *m, *m0; |
697 | uint32_t tdes3; | | 697 | uint32_t tdes3; |
698 | | | 698 | |
699 | for (index = sc->sc_rx.cur; ; index = RX_NEXT(index)) { | | 699 | for (index = sc->sc_rx.cur; ; index = RX_NEXT(index)) { |
700 | eqos_dma_sync(sc, sc->sc_rx.desc_map, | | 700 | eqos_dma_sync(sc, sc->sc_rx.desc_map, |
701 | index, index + 1, RX_DESC_COUNT, | | 701 | index, index + 1, RX_DESC_COUNT, |
702 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 702 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
703 | | | 703 | |
704 | tdes3 = le32toh(sc->sc_rx.desc_ring[index].tdes3); | | 704 | tdes3 = le32toh(sc->sc_rx.desc_ring[index].tdes3); |
705 | if ((tdes3 & EQOS_TDES3_OWN) != 0) { | | 705 | if ((tdes3 & EQOS_TDES3_OWN) != 0) { |
706 | break; | | 706 | break; |
707 | } | | 707 | } |
708 | | | 708 | |
709 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, | | 709 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, |
710 | 0, sc->sc_rx.buf_map[index].map->dm_mapsize, | | 710 | 0, sc->sc_rx.buf_map[index].map->dm_mapsize, |
711 | BUS_DMASYNC_POSTREAD); | | 711 | BUS_DMASYNC_POSTREAD); |
712 | bus_dmamap_unload(sc->sc_dmat, | | 712 | bus_dmamap_unload(sc->sc_dmat, |
713 | sc->sc_rx.buf_map[index].map); | | 713 | sc->sc_rx.buf_map[index].map); |
714 | | | 714 | |
715 | len = tdes3 & EQOS_TDES3_LENGTH_MASK; | | 715 | len = tdes3 & EQOS_TDES3_LENGTH_MASK; |
716 | if (len != 0) { | | 716 | if (len != 0) { |
717 | m = sc->sc_rx.buf_map[index].mbuf; | | 717 | m = sc->sc_rx.buf_map[index].mbuf; |
718 | m_set_rcvif(m, ifp); | | 718 | m_set_rcvif(m, ifp); |
719 | m->m_flags |= M_HASFCS; | | 719 | m->m_flags |= M_HASFCS; |
720 | m->m_pkthdr.len = len; | | 720 | m->m_pkthdr.len = len; |
721 | m->m_len = len; | | 721 | m->m_len = len; |
722 | m->m_nextpkt = NULL; | | 722 | m->m_nextpkt = NULL; |
723 | | | 723 | |
724 | if_percpuq_enqueue(ifp->if_percpuq, m); | | 724 | if_percpuq_enqueue(ifp->if_percpuq, m); |
725 | ++pkts; | | 725 | ++pkts; |
726 | } | | 726 | } |
727 | | | 727 | |
728 | if ((m0 = eqos_alloc_mbufcl(sc)) != NULL) { | | 728 | if ((m0 = eqos_alloc_mbufcl(sc)) != NULL) { |
729 | error = eqos_setup_rxbuf(sc, index, m0); | | 729 | error = eqos_setup_rxbuf(sc, index, m0); |
730 | if (error != 0) { | | 730 | if (error != 0) { |
731 | /* XXX hole in RX ring */ | | 731 | /* XXX hole in RX ring */ |
732 | } | | 732 | } |
733 | } else { | | 733 | } else { |
734 | if_statinc(ifp, if_ierrors); | | 734 | if_statinc(ifp, if_ierrors); |
735 | } | | 735 | } |
736 | eqos_dma_sync(sc, sc->sc_rx.desc_map, | | 736 | eqos_dma_sync(sc, sc->sc_rx.desc_map, |
737 | index, index + 1, RX_DESC_COUNT, | | 737 | index, index + 1, RX_DESC_COUNT, |
738 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); | | 738 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
739 | | | 739 | |
740 | WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, | | 740 | WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, |
741 | (uint32_t)sc->sc_rx.desc_ring_paddr + | | 741 | (uint32_t)sc->sc_rx.desc_ring_paddr + |
742 | DESC_OFF(sc->sc_rx.cur)); | | 742 | DESC_OFF(sc->sc_rx.cur)); |
743 | } | | 743 | } |
744 | | | 744 | |
745 | sc->sc_rx.cur = index; | | 745 | sc->sc_rx.cur = index; |
746 | | | 746 | |
747 | if (pkts != 0) { | | 747 | if (pkts != 0) { |
748 | rnd_add_uint32(&sc->sc_rndsource, pkts); | | 748 | rnd_add_uint32(&sc->sc_rndsource, pkts); |
749 | } | | 749 | } |
750 | } | | 750 | } |
751 | | | 751 | |
752 | static void | | 752 | static void |
753 | eqos_txintr(struct eqos_softc *sc, int qid) | | 753 | eqos_txintr(struct eqos_softc *sc, int qid) |
754 | { | | 754 | { |
755 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 755 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
756 | struct eqos_bufmap *bmap; | | 756 | struct eqos_bufmap *bmap; |
757 | struct eqos_dma_desc *desc; | | 757 | struct eqos_dma_desc *desc; |
758 | uint32_t tdes3; | | 758 | uint32_t tdes3; |
759 | int i, pkts = 0; | | 759 | int i, pkts = 0; |
760 | | | 760 | |
761 | EQOS_ASSERT_LOCKED(sc); | | 761 | EQOS_ASSERT_LOCKED(sc); |
762 | | | 762 | |
763 | for (i = sc->sc_tx.next; sc->sc_tx.queued > 0; i = TX_NEXT(i)) { | | 763 | for (i = sc->sc_tx.next; sc->sc_tx.queued > 0; i = TX_NEXT(i)) { |
764 | KASSERT(sc->sc_tx.queued > 0); | | 764 | KASSERT(sc->sc_tx.queued > 0); |
765 | KASSERT(sc->sc_tx.queued <= TX_DESC_COUNT); | | 765 | KASSERT(sc->sc_tx.queued <= TX_DESC_COUNT); |
766 | eqos_dma_sync(sc, sc->sc_tx.desc_map, | | 766 | eqos_dma_sync(sc, sc->sc_tx.desc_map, |
767 | i, i + 1, TX_DESC_COUNT, | | 767 | i, i + 1, TX_DESC_COUNT, |
768 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 768 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
769 | desc = &sc->sc_tx.desc_ring[i]; | | 769 | desc = &sc->sc_tx.desc_ring[i]; |
770 | tdes3 = le32toh(desc->tdes3); | | 770 | tdes3 = le32toh(desc->tdes3); |
771 | if ((tdes3 & EQOS_TDES3_OWN) != 0) { | | 771 | if ((tdes3 & EQOS_TDES3_OWN) != 0) { |
772 | break; | | 772 | break; |
773 | } | | 773 | } |
774 | bmap = &sc->sc_tx.buf_map[i]; | | 774 | bmap = &sc->sc_tx.buf_map[i]; |
775 | if (bmap->mbuf != NULL) { | | 775 | if (bmap->mbuf != NULL) { |
776 | bus_dmamap_sync(sc->sc_dmat, bmap->map, | | 776 | bus_dmamap_sync(sc->sc_dmat, bmap->map, |
777 | 0, bmap->map->dm_mapsize, | | 777 | 0, bmap->map->dm_mapsize, |
778 | BUS_DMASYNC_POSTWRITE); | | 778 | BUS_DMASYNC_POSTWRITE); |
779 | bus_dmamap_unload(sc->sc_dmat, bmap->map); | | 779 | bus_dmamap_unload(sc->sc_dmat, bmap->map); |
780 | m_freem(bmap->mbuf); | | 780 | m_freem(bmap->mbuf); |
781 | bmap->mbuf = NULL; | | 781 | bmap->mbuf = NULL; |
782 | ++pkts; | | 782 | ++pkts; |
783 | } | | 783 | } |
784 | | | 784 | |
785 | eqos_setup_txdesc(sc, i, 0, 0, 0, 0); | | 785 | eqos_setup_txdesc(sc, i, 0, 0, 0, 0); |
786 | eqos_dma_sync(sc, sc->sc_tx.desc_map, | | 786 | eqos_dma_sync(sc, sc->sc_tx.desc_map, |
787 | i, i + 1, TX_DESC_COUNT, | | 787 | i, i + 1, TX_DESC_COUNT, |
788 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 788 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
789 | | | 789 | |
790 | ifp->if_flags &= ~IFF_OACTIVE; | | 790 | ifp->if_flags &= ~IFF_OACTIVE; |
791 | | | 791 | |
792 | /* Last descriptor in a packet contains DMA status */ | | 792 | /* Last descriptor in a packet contains DMA status */ |
793 | if ((tdes3 & EQOS_TDES3_LD) != 0) { | | 793 | if ((tdes3 & EQOS_TDES3_LD) != 0) { |
794 | if ((tdes3 & EQOS_TDES3_DE) != 0) { | | 794 | if ((tdes3 & EQOS_TDES3_DE) != 0) { |
795 | device_printf(sc->sc_dev, | | 795 | device_printf(sc->sc_dev, |
796 | "TX [%u] desc error: 0x%08x\n", | | 796 | "TX [%u] desc error: 0x%08x\n", |
797 | i, tdes3); | | 797 | i, tdes3); |
798 | if_statinc(ifp, if_oerrors); | | 798 | if_statinc(ifp, if_oerrors); |
799 | } else if ((tdes3 & EQOS_TDES3_ES) != 0) { | | 799 | } else if ((tdes3 & EQOS_TDES3_ES) != 0) { |
800 | device_printf(sc->sc_dev, | | 800 | device_printf(sc->sc_dev, |
801 | "TX [%u] tx error: 0x%08x\n", | | 801 | "TX [%u] tx error: 0x%08x\n", |
802 | i, tdes3); | | 802 | i, tdes3); |
803 | if_statinc(ifp, if_oerrors); | | 803 | if_statinc(ifp, if_oerrors); |
804 | } else { | | 804 | } else { |
805 | if_statinc(ifp, if_opackets); | | 805 | if_statinc(ifp, if_opackets); |
806 | } | | 806 | } |
807 | } | | 807 | } |
808 | | | 808 | |
809 | } | | 809 | } |
810 | | | 810 | |
811 | sc->sc_tx.next = i; | | 811 | sc->sc_tx.next = i; |
812 | | | 812 | |
813 | if (pkts != 0) { | | 813 | if (pkts != 0) { |
814 | rnd_add_uint32(&sc->sc_rndsource, pkts); | | 814 | rnd_add_uint32(&sc->sc_rndsource, pkts); |
815 | } | | 815 | } |
816 | } | | 816 | } |
817 | | | 817 | |
818 | static void | | 818 | static void |
819 | eqos_start_locked(struct eqos_softc *sc) | | 819 | eqos_start_locked(struct eqos_softc *sc) |
820 | { | | 820 | { |
821 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 821 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
822 | struct mbuf *m; | | 822 | struct mbuf *m; |
823 | int cnt, nsegs, start; | | 823 | int cnt, nsegs, start; |
824 | | | 824 | |
825 | EQOS_ASSERT_TXLOCKED(sc); | | 825 | EQOS_ASSERT_TXLOCKED(sc); |
826 | | | 826 | |
827 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) | | 827 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
828 | return; | | 828 | return; |
829 | | | 829 | |
830 | for (cnt = 0, start = sc->sc_tx.cur; ; cnt++) { | | 830 | for (cnt = 0, start = sc->sc_tx.cur; ; cnt++) { |
831 | if (sc->sc_tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { | | 831 | if (sc->sc_tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { |
832 | ifp->if_flags |= IFF_OACTIVE; | | 832 | ifp->if_flags |= IFF_OACTIVE; |
833 | break; | | 833 | break; |
834 | } | | 834 | } |
835 | | | 835 | |
836 | IFQ_POLL(&ifp->if_snd, m); | | 836 | IFQ_POLL(&ifp->if_snd, m); |
837 | if (m == NULL) { | | 837 | if (m == NULL) { |
838 | break; | | 838 | break; |
839 | } | | 839 | } |
840 | | | 840 | |
841 | nsegs = eqos_setup_txbuf(sc, sc->sc_tx.cur, m); | | 841 | nsegs = eqos_setup_txbuf(sc, sc->sc_tx.cur, m); |
842 | if (nsegs <= 0) { | | 842 | if (nsegs <= 0) { |
843 | if (nsegs == -1) { | | 843 | if (nsegs == -1) { |
844 | ifp->if_flags |= IFF_OACTIVE; | | 844 | ifp->if_flags |= IFF_OACTIVE; |
845 | } else if (nsegs == -2) { | | 845 | } else if (nsegs == -2) { |
846 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 846 | IFQ_DEQUEUE(&ifp->if_snd, m); |
847 | m_freem(m); | | 847 | m_freem(m); |
848 | } | | 848 | } |
849 | break; | | 849 | break; |
850 | } | | 850 | } |
851 | | | 851 | |
852 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 852 | IFQ_DEQUEUE(&ifp->if_snd, m); |
853 | bpf_mtap(ifp, m, BPF_D_OUT); | | 853 | bpf_mtap(ifp, m, BPF_D_OUT); |
854 | | | 854 | |
855 | sc->sc_tx.cur = TX_SKIP(sc->sc_tx.cur, nsegs); | | 855 | sc->sc_tx.cur = TX_SKIP(sc->sc_tx.cur, nsegs); |
856 | } | | 856 | } |
857 | | | 857 | |
858 | if (cnt != 0) { | | 858 | if (cnt != 0) { |
859 | eqos_dma_sync(sc, sc->sc_tx.desc_map, | | 859 | eqos_dma_sync(sc, sc->sc_tx.desc_map, |
860 | start, sc->sc_tx.cur, TX_DESC_COUNT, | | 860 | start, sc->sc_tx.cur, TX_DESC_COUNT, |
861 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 861 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
862 | | | 862 | |
863 | /* Start and run TX DMA */ | | 863 | /* Start and run TX DMA */ |
864 | WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR, | | 864 | WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR, |
865 | (uint32_t)sc->sc_tx.desc_ring_paddr + | | 865 | (uint32_t)sc->sc_tx.desc_ring_paddr + |
866 | DESC_OFF(sc->sc_tx.cur)); | | 866 | DESC_OFF(sc->sc_tx.cur)); |
867 | } | | 867 | } |
868 | } | | 868 | } |
869 | | | 869 | |
870 | static void | | 870 | static void |
871 | eqos_start(struct ifnet *ifp) | | 871 | eqos_start(struct ifnet *ifp) |
872 | { | | 872 | { |
873 | struct eqos_softc *sc = ifp->if_softc; | | 873 | struct eqos_softc *sc = ifp->if_softc; |
874 | | | 874 | |
875 | EQOS_TXLOCK(sc); | | 875 | EQOS_TXLOCK(sc); |
876 | eqos_start_locked(sc); | | 876 | eqos_start_locked(sc); |
877 | EQOS_TXUNLOCK(sc); | | 877 | EQOS_TXUNLOCK(sc); |
878 | } | | 878 | } |
879 | | | 879 | |
880 | static void | | 880 | static void |
881 | eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status) | | 881 | eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status) |
882 | { | | 882 | { |
883 | uint32_t debug_data __unused = 0, ictrl = 0; | | 883 | uint32_t debug_data __unused = 0, ictrl = 0; |
884 | | | 884 | |
885 | if (mtl_status == 0) | | 885 | if (mtl_status == 0) |
886 | return; | | 886 | return; |
887 | | | 887 | |
888 | /* Drain the errors reported by MTL_INTERRUPT_STATUS */ | | 888 | /* Drain the errors reported by MTL_INTERRUPT_STATUS */ |
889 | sc->sc_ev_mtl.ev_count++; | | 889 | sc->sc_ev_mtl.ev_count++; |
890 | | | 890 | |
891 | if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_DBGIS) != 0) { | | 891 | if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_DBGIS) != 0) { |
892 | debug_data = RD4(sc, GMAC_MTL_FIFO_DEBUG_DATA); | | 892 | debug_data = RD4(sc, GMAC_MTL_FIFO_DEBUG_DATA); |
893 | sc->sc_ev_mtl_debugdata.ev_count++; | | 893 | sc->sc_ev_mtl_debugdata.ev_count++; |
894 | } | | 894 | } |
895 | if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS) != 0) { | | 895 | if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS) != 0) { |
896 | uint32_t new_status = 0; | | 896 | uint32_t new_status = 0; |
897 | | | 897 | |
898 | ictrl = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS); | | 898 | ictrl = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS); |
899 | if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS) != 0) { | | 899 | if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS) != 0) { |
900 | new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS; | | 900 | new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS; |
901 | sc->sc_ev_mtl_rxovfis.ev_count++; | | 901 | sc->sc_ev_mtl_rxovfis.ev_count++; |
902 | } | | 902 | } |
903 | if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS) != 0) { | | 903 | if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS) != 0) { |
904 | new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS; | | 904 | new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS; |
905 | sc->sc_ev_mtl_txovfis.ev_count++; | | 905 | sc->sc_ev_mtl_txovfis.ev_count++; |
906 | } | | 906 | } |
907 | if (new_status) { | | 907 | if (new_status) { |
908 | new_status |= (ictrl & | | 908 | new_status |= (ictrl & |
909 | (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE| | | 909 | (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE| |
910 | GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE)); | | 910 | GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE)); |
911 | WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, new_status); | | 911 | WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, new_status); |
912 | } | | 912 | } |
913 | } | | 913 | } |
914 | #ifdef DEBUG_LOUD | | 914 | #ifdef DEBUG_LOUD |
915 | device_printf(sc->sc_dev, | | 915 | device_printf(sc->sc_dev, |
916 | "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, " | | 916 | "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, " |
917 | "GMAC_MTL_FIFO_DEBUG_DATA = 0x%08X, " | | 917 | "GMAC_MTL_FIFO_DEBUG_DATA = 0x%08X, " |
918 | "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n", | | 918 | "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n", |
919 | mtl_status, debug_data, ictrl); | | 919 | mtl_status, debug_data, ictrl); |
920 | #endif | | 920 | #endif |
921 | } | | 921 | } |
922 | | | 922 | |
923 | int | | 923 | int |
924 | eqos_intr(void *arg) | | 924 | eqos_intr(void *arg) |
925 | { | | 925 | { |
926 | struct eqos_softc *sc = arg; | | 926 | struct eqos_softc *sc = arg; |
927 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 927 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
928 | uint32_t mac_status, mtl_status, dma_status, rx_tx_status; | | 928 | uint32_t mac_status, mtl_status, dma_status, rx_tx_status; |
929 | | | 929 | |
930 | sc->sc_ev_intr.ev_count++; | | 930 | sc->sc_ev_intr.ev_count++; |
931 | | | 931 | |
932 | mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS); | | 932 | mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS); |
933 | mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE); | | 933 | mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE); |
934 | | | 934 | |
935 | if (mac_status) { | | 935 | if (mac_status) { |
936 | sc->sc_ev_mac.ev_count++; | | 936 | sc->sc_ev_mac.ev_count++; |
937 | #ifdef DEBUG_LOUD | | 937 | #ifdef DEBUG_LOUD |
938 | device_printf(sc->sc_dev, | | 938 | device_printf(sc->sc_dev, |
939 | "GMAC_MAC_INTERRUPT_STATUS = 0x%08X\n", mac_status); | | 939 | "GMAC_MAC_INTERRUPT_STATUS = 0x%08X\n", mac_status); |
940 | #endif | | 940 | #endif |
941 | } | | 941 | } |
942 | | | 942 | |
943 | mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS); | | 943 | mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS); |
944 | eqos_intr_mtl(sc, mtl_status); | | 944 | eqos_intr_mtl(sc, mtl_status); |
945 | | | 945 | |
946 | dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS); | | 946 | dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS); |
947 | dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE); | | 947 | dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE); |
948 | if (dma_status) { | | 948 | if (dma_status) { |
949 | WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status); | | 949 | WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status); |
950 | } | | 950 | } |
951 | | | 951 | |
952 | EQOS_LOCK(sc); | | 952 | EQOS_LOCK(sc); |
953 | if ((dma_status & GMAC_DMA_CHAN0_STATUS_RI) != 0) { | | 953 | if ((dma_status & GMAC_DMA_CHAN0_STATUS_RI) != 0) { |
954 | eqos_rxintr(sc, 0); | | 954 | eqos_rxintr(sc, 0); |
955 | sc->sc_ev_rxintr.ev_count++; | | 955 | sc->sc_ev_rxintr.ev_count++; |
956 | } | | 956 | } |
957 | | | 957 | |
958 | if ((dma_status & GMAC_DMA_CHAN0_STATUS_TI) != 0) { | | 958 | if ((dma_status & GMAC_DMA_CHAN0_STATUS_TI) != 0) { |
959 | eqos_txintr(sc, 0); | | 959 | eqos_txintr(sc, 0); |
960 | if_schedule_deferred_start(ifp); | | 960 | if_schedule_deferred_start(ifp); |
961 | sc->sc_ev_txintr.ev_count++; | | 961 | sc->sc_ev_txintr.ev_count++; |
962 | } | | 962 | } |
963 | EQOS_UNLOCK(sc); | | 963 | EQOS_UNLOCK(sc); |
964 | | | 964 | |
965 | #ifdef DEBUG_LOUD | | 965 | #ifdef DEBUG_LOUD |
966 | if ((mac_status | mtl_status | dma_status) == 0) { | | 966 | if ((mac_status | mtl_status | dma_status) == 0) { |
967 | device_printf(sc->sc_dev, "spurious interrupt?!\n"); | | 967 | device_printf(sc->sc_dev, "spurious interrupt?!\n"); |
968 | } | | 968 | } |
969 | #endif | | 969 | #endif |
970 | | | 970 | |
971 | rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS); | | 971 | rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS); |
972 | if (rx_tx_status) { | | 972 | if (rx_tx_status) { |
973 | sc->sc_ev_status.ev_count++; | | 973 | sc->sc_ev_status.ev_count++; |
974 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_RWT) != 0) | | 974 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_RWT) != 0) |
975 | sc->sc_ev_rwt.ev_count++; | | 975 | sc->sc_ev_rwt.ev_count++; |
976 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXCOL) != 0) | | 976 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXCOL) != 0) |
977 | sc->sc_ev_excol.ev_count++; | | 977 | sc->sc_ev_excol.ev_count++; |
978 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCOL) != 0) | | 978 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCOL) != 0) |
979 | sc->sc_ev_lcol.ev_count++; | | 979 | sc->sc_ev_lcol.ev_count++; |
980 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXDEF) != 0) | | 980 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXDEF) != 0) |
981 | sc->sc_ev_exdef.ev_count++; | | 981 | sc->sc_ev_exdef.ev_count++; |
982 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCARR) != 0) | | 982 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCARR) != 0) |
983 | sc->sc_ev_lcarr.ev_count++; | | 983 | sc->sc_ev_lcarr.ev_count++; |
984 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_NCARR) != 0) | | 984 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_NCARR) != 0) |
985 | sc->sc_ev_ncarr.ev_count++; | | 985 | sc->sc_ev_ncarr.ev_count++; |
986 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_TJT) != 0) | | 986 | if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_TJT) != 0) |
987 | sc->sc_ev_tjt.ev_count++; | | 987 | sc->sc_ev_tjt.ev_count++; |
988 | #ifdef DEBUG_LOUD | | 988 | #ifdef DEBUG_LOUD |
989 | device_printf(sc->sc_dev, "GMAC_MAC_RX_TX_STATUS = 0x%08x\n", | | 989 | device_printf(sc->sc_dev, "GMAC_MAC_RX_TX_STATUS = 0x%08x\n", |
990 | rx_tx_status); | | 990 | rx_tx_status); |
991 | #endif | | 991 | #endif |
992 | } | | 992 | } |
993 | | | 993 | |
994 | return 1; | | 994 | return 1; |
995 | } | | 995 | } |
996 | | | 996 | |
997 | static int | | 997 | static int |
998 | eqos_ioctl(struct ifnet *ifp, u_long cmd, void *data) | | 998 | eqos_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
999 | { | | 999 | { |
1000 | struct eqos_softc *sc = ifp->if_softc; | | 1000 | struct eqos_softc *sc = ifp->if_softc; |
1001 | int error, s; | | 1001 | int error, s; |
1002 | | | 1002 | |
1003 | #ifndef EQOS_MPSAFE | | 1003 | #ifndef EQOS_MPSAFE |
1004 | s = splnet(); | | 1004 | s = splnet(); |
1005 | #endif | | 1005 | #endif |
1006 | | | 1006 | |
1007 | switch (cmd) { | | 1007 | switch (cmd) { |
1008 | default: | | 1008 | default: |
1009 | #ifdef EQOS_MPSAFE | | 1009 | #ifdef EQOS_MPSAFE |
1010 | s = splnet(); | | 1010 | s = splnet(); |
1011 | #endif | | 1011 | #endif |
1012 | error = ether_ioctl(ifp, cmd, data); | | 1012 | error = ether_ioctl(ifp, cmd, data); |
1013 | #ifdef EQOS_MPSAFE | | 1013 | #ifdef EQOS_MPSAFE |
1014 | splx(s); | | 1014 | splx(s); |
1015 | #endif | | 1015 | #endif |
1016 | if (error != ENETRESET) | | 1016 | if (error != ENETRESET) |
1017 | break; | | 1017 | break; |
1018 | | | 1018 | |
1019 | error = 0; | | 1019 | error = 0; |
1020 | | | 1020 | |
1021 | if (cmd == SIOCSIFCAP) | | 1021 | if (cmd == SIOCSIFCAP) |
1022 | error = (*ifp->if_init)(ifp); | | 1022 | error = (*ifp->if_init)(ifp); |
1023 | else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) | | 1023 | else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
1024 | ; | | 1024 | ; |
1025 | else if ((ifp->if_flags & IFF_RUNNING) != 0) { | | 1025 | else if ((ifp->if_flags & IFF_RUNNING) != 0) { |
1026 | EQOS_LOCK(sc); | | 1026 | EQOS_LOCK(sc); |
1027 | eqos_setup_rxfilter(sc); | | 1027 | eqos_setup_rxfilter(sc); |
1028 | EQOS_UNLOCK(sc); | | 1028 | EQOS_UNLOCK(sc); |
1029 | } | | 1029 | } |
1030 | break; | | 1030 | break; |
1031 | } | | 1031 | } |
1032 | | | 1032 | |
1033 | #ifndef EQOS_MPSAFE | | 1033 | #ifndef EQOS_MPSAFE |
1034 | splx(s); | | 1034 | splx(s); |
1035 | #endif | | 1035 | #endif |
1036 | | | 1036 | |
1037 | return error; | | 1037 | return error; |
1038 | } | | 1038 | } |
1039 | | | 1039 | |
1040 | static void | | 1040 | static void |
1041 | eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr) | | 1041 | eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr) |
1042 | { | | 1042 | { |
1043 | prop_dictionary_t prop = device_properties(sc->sc_dev); | | 1043 | prop_dictionary_t prop = device_properties(sc->sc_dev); |
1044 | uint32_t maclo, machi; | | 1044 | uint32_t maclo, machi; |
1045 | prop_data_t eaprop; | | 1045 | prop_data_t eaprop; |
1046 | | | 1046 | |
1047 | eaprop = prop_dictionary_get(prop, "mac-address"); | | 1047 | eaprop = prop_dictionary_get(prop, "mac-address"); |
1048 | if (eaprop != NULL) { | | 1048 | if (eaprop != NULL) { |
1049 | KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); | | 1049 | KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); |
1050 | KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); | | 1050 | KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); |
1051 | memcpy(eaddr, prop_data_value(eaprop), | | 1051 | memcpy(eaddr, prop_data_value(eaprop), |
1052 | ETHER_ADDR_LEN); | | 1052 | ETHER_ADDR_LEN); |
1053 | return; | | 1053 | return; |
1054 | } | | 1054 | } |
1055 | | | 1055 | |
1056 | maclo = htobe32(RD4(sc, GMAC_MAC_ADDRESS0_LOW)); | | 1056 | maclo = htobe32(RD4(sc, GMAC_MAC_ADDRESS0_LOW)); |
1057 | machi = htobe16(RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF); | | 1057 | machi = htobe16(RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF); |
1058 | | | 1058 | |
1059 | if (maclo == 0xFFFFFFFF && machi == 0xFFFF) { | | 1059 | if (maclo == 0xFFFFFFFF && machi == 0xFFFF) { |
1060 | /* Create one */ | | 1060 | /* Create one */ |
1061 | maclo = 0x00f2 | (cprng_strong32() & 0xffff0000); | | 1061 | maclo = 0x00f2 | (cprng_strong32() & 0xffff0000); |
1062 | machi = cprng_strong32() & 0xffff; | | 1062 | machi = cprng_strong32() & 0xffff; |
1063 | } | | 1063 | } |
1064 | | | 1064 | |
1065 | eaddr[0] = maclo & 0xff; | | 1065 | eaddr[0] = maclo & 0xff; |
1066 | eaddr[1] = (maclo >> 8) & 0xff; | | 1066 | eaddr[1] = (maclo >> 8) & 0xff; |
1067 | eaddr[2] = (maclo >> 16) & 0xff; | | 1067 | eaddr[2] = (maclo >> 16) & 0xff; |
1068 | eaddr[3] = (maclo >> 24) & 0xff; | | 1068 | eaddr[3] = (maclo >> 24) & 0xff; |
1069 | eaddr[4] = machi & 0xff; | | 1069 | eaddr[4] = machi & 0xff; |
1070 | eaddr[5] = (machi >> 8) & 0xff; | | 1070 | eaddr[5] = (machi >> 8) & 0xff; |
1071 | } | | 1071 | } |
1072 | | | 1072 | |
1073 | static void | | 1073 | static void |
1074 | eqos_axi_configure(struct eqos_softc *sc) | | 1074 | eqos_axi_configure(struct eqos_softc *sc) |
1075 | { | | 1075 | { |
1076 | prop_dictionary_t prop = device_properties(sc->sc_dev); | | 1076 | prop_dictionary_t prop = device_properties(sc->sc_dev); |
1077 | uint32_t val; | | 1077 | uint32_t val; |
1078 | u_int uival; | | 1078 | u_int uival; |
1079 | bool bval; | | 1079 | bool bval; |
1080 | | | 1080 | |
1081 | val = RD4(sc, GMAC_DMA_SYSBUS_MODE); | | 1081 | val = RD4(sc, GMAC_DMA_SYSBUS_MODE); |
1082 | if (prop_dictionary_get_bool(prop, "snps,mixed-burst", &bval) && bval) { | | 1082 | if (prop_dictionary_get_bool(prop, "snps,mixed-burst", &bval) && bval) { |
1083 | val |= GMAC_DMA_SYSBUS_MODE_MB; | | 1083 | val |= GMAC_DMA_SYSBUS_MODE_MB; |
1084 | } | | 1084 | } |
1085 | if (prop_dictionary_get_bool(prop, "snps,fixed-burst", &bval) && bval) { | | 1085 | if (prop_dictionary_get_bool(prop, "snps,fixed-burst", &bval) && bval) { |
1086 | val |= GMAC_DMA_SYSBUS_MODE_FB; | | 1086 | val |= GMAC_DMA_SYSBUS_MODE_FB; |
1087 | } | | 1087 | } |
1088 | if (prop_dictionary_get_uint(prop, "snps,wr_osr_lmt", &uival)) { | | 1088 | if (prop_dictionary_get_uint(prop, "snps,wr_osr_lmt", &uival)) { |
1089 | val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK; | | 1089 | val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK; |
1090 | val |= uival << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT; | | 1090 | val |= uival << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT; |
1091 | } | | 1091 | } |
1092 | if (prop_dictionary_get_uint(prop, "snps,rd_osr_lmt", &uival)) { | | 1092 | if (prop_dictionary_get_uint(prop, "snps,rd_osr_lmt", &uival)) { |
1093 | val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK; | | 1093 | val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK; |
1094 | val |= uival << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT; | | 1094 | val |= uival << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT; |
1095 | } | | 1095 | } |
1096 | | | 1096 | |
1097 | if (!EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { | | 1097 | if (!EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { |
1098 | val |= GMAC_DMA_SYSBUS_MODE_EAME; | | 1098 | val |= GMAC_DMA_SYSBUS_MODE_EAME; |
1099 | } | | 1099 | } |
1100 | | | 1100 | |
1101 | /* XXX */ | | 1101 | /* XXX */ |
1102 | val |= GMAC_DMA_SYSBUS_MODE_BLEN16; | | 1102 | val |= GMAC_DMA_SYSBUS_MODE_BLEN16; |
1103 | val |= GMAC_DMA_SYSBUS_MODE_BLEN8; | | 1103 | val |= GMAC_DMA_SYSBUS_MODE_BLEN8; |
1104 | val |= GMAC_DMA_SYSBUS_MODE_BLEN4; | | 1104 | val |= GMAC_DMA_SYSBUS_MODE_BLEN4; |
1105 | | | 1105 | |
1106 | WR4(sc, GMAC_DMA_SYSBUS_MODE, val); | | 1106 | WR4(sc, GMAC_DMA_SYSBUS_MODE, val); |
1107 | } | | 1107 | } |
1108 | | | 1108 | |
1109 | static int | | 1109 | static int |
1110 | eqos_setup_dma(struct eqos_softc *sc, int qid) | | 1110 | eqos_setup_dma(struct eqos_softc *sc, int qid) |
1111 | { | | 1111 | { |
1112 | struct mbuf *m; | | 1112 | struct mbuf *m; |
1113 | int error, nsegs, i; | | 1113 | int error, nsegs, i; |
1114 | | | 1114 | |
1115 | /* Setup TX ring */ | | 1115 | /* Setup TX ring */ |
1116 | error = bus_dmamap_create(sc->sc_dmat, TX_DESC_SIZE, 1, TX_DESC_SIZE, | | 1116 | error = bus_dmamap_create(sc->sc_dmat, TX_DESC_SIZE, 1, TX_DESC_SIZE, |
1117 | DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_tx.desc_map); | | 1117 | DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_tx.desc_map); |
1118 | if (error) { | | 1118 | if (error) { |
1119 | return error; | | 1119 | return error; |
1120 | } | | 1120 | } |
1121 | error = bus_dmamem_alloc(sc->sc_dmat, TX_DESC_SIZE, DESC_ALIGN, | | 1121 | error = bus_dmamem_alloc(sc->sc_dmat, TX_DESC_SIZE, DESC_ALIGN, |
1122 | DESC_BOUNDARY, &sc->sc_tx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); | | 1122 | DESC_BOUNDARY, &sc->sc_tx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); |
1123 | if (error) { | | 1123 | if (error) { |
1124 | return error; | | 1124 | return error; |
1125 | } | | 1125 | } |
1126 | error = bus_dmamem_map(sc->sc_dmat, &sc->sc_tx.desc_dmaseg, nsegs, | | 1126 | error = bus_dmamem_map(sc->sc_dmat, &sc->sc_tx.desc_dmaseg, nsegs, |
1127 | TX_DESC_SIZE, (void *)&sc->sc_tx.desc_ring, BUS_DMA_WAITOK); | | 1127 | TX_DESC_SIZE, (void *)&sc->sc_tx.desc_ring, BUS_DMA_WAITOK); |
1128 | if (error) { | | 1128 | if (error) { |
1129 | return error; | | 1129 | return error; |
1130 | } | | 1130 | } |
1131 | error = bus_dmamap_load(sc->sc_dmat, sc->sc_tx.desc_map, | | 1131 | error = bus_dmamap_load(sc->sc_dmat, sc->sc_tx.desc_map, |
1132 | sc->sc_tx.desc_ring, TX_DESC_SIZE, NULL, BUS_DMA_WAITOK); | | 1132 | sc->sc_tx.desc_ring, TX_DESC_SIZE, NULL, BUS_DMA_WAITOK); |
1133 | if (error) { | | 1133 | if (error) { |
1134 | return error; | | 1134 | return error; |
1135 | } | | 1135 | } |
1136 | sc->sc_tx.desc_ring_paddr = sc->sc_tx.desc_map->dm_segs[0].ds_addr; | | 1136 | sc->sc_tx.desc_ring_paddr = sc->sc_tx.desc_map->dm_segs[0].ds_addr; |
1137 | | | 1137 | |
1138 | memset(sc->sc_tx.desc_ring, 0, TX_DESC_SIZE); | | 1138 | memset(sc->sc_tx.desc_ring, 0, TX_DESC_SIZE); |
1139 | bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 0, TX_DESC_SIZE, | | 1139 | bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 0, TX_DESC_SIZE, |
1140 | BUS_DMASYNC_PREWRITE); | | 1140 | BUS_DMASYNC_PREWRITE); |
1141 | | | 1141 | |
1142 | sc->sc_tx.queued = TX_DESC_COUNT; | | 1142 | sc->sc_tx.queued = TX_DESC_COUNT; |
1143 | for (i = 0; i < TX_DESC_COUNT; i++) { | | 1143 | for (i = 0; i < TX_DESC_COUNT; i++) { |
1144 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, | | 1144 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
1145 | TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, | | 1145 | TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, |
1146 | &sc->sc_tx.buf_map[i].map); | | 1146 | &sc->sc_tx.buf_map[i].map); |
1147 | if (error != 0) { | | 1147 | if (error != 0) { |
1148 | device_printf(sc->sc_dev, | | 1148 | device_printf(sc->sc_dev, |
1149 | "cannot create TX buffer map\n"); | | 1149 | "cannot create TX buffer map\n"); |
1150 | return error; | | 1150 | return error; |
1151 | } | | 1151 | } |
1152 | eqos_setup_txdesc(sc, i, 0, 0, 0, 0); | | 1152 | eqos_setup_txdesc(sc, i, 0, 0, 0, 0); |
1153 | } | | 1153 | } |
1154 | | | 1154 | |
1155 | /* Setup RX ring */ | | 1155 | /* Setup RX ring */ |
1156 | error = bus_dmamap_create(sc->sc_dmat, RX_DESC_SIZE, 1, RX_DESC_SIZE, | | 1156 | error = bus_dmamap_create(sc->sc_dmat, RX_DESC_SIZE, 1, RX_DESC_SIZE, |
1157 | DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_rx.desc_map); | | 1157 | DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_rx.desc_map); |
1158 | if (error) { | | 1158 | if (error) { |
1159 | return error; | | 1159 | return error; |
1160 | } | | 1160 | } |
1161 | error = bus_dmamem_alloc(sc->sc_dmat, RX_DESC_SIZE, DESC_ALIGN, | | 1161 | error = bus_dmamem_alloc(sc->sc_dmat, RX_DESC_SIZE, DESC_ALIGN, |
1162 | DESC_BOUNDARY, &sc->sc_rx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); | | 1162 | DESC_BOUNDARY, &sc->sc_rx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); |
1163 | if (error) { | | 1163 | if (error) { |
1164 | return error; | | 1164 | return error; |
1165 | } | | 1165 | } |
1166 | error = bus_dmamem_map(sc->sc_dmat, &sc->sc_rx.desc_dmaseg, nsegs, | | 1166 | error = bus_dmamem_map(sc->sc_dmat, &sc->sc_rx.desc_dmaseg, nsegs, |
1167 | RX_DESC_SIZE, (void *)&sc->sc_rx.desc_ring, BUS_DMA_WAITOK); | | 1167 | RX_DESC_SIZE, (void *)&sc->sc_rx.desc_ring, BUS_DMA_WAITOK); |
1168 | if (error) { | | 1168 | if (error) { |
1169 | return error; | | 1169 | return error; |
1170 | } | | 1170 | } |
1171 | error = bus_dmamap_load(sc->sc_dmat, sc->sc_rx.desc_map, | | 1171 | error = bus_dmamap_load(sc->sc_dmat, sc->sc_rx.desc_map, |
1172 | sc->sc_rx.desc_ring, RX_DESC_SIZE, NULL, BUS_DMA_WAITOK); | | 1172 | sc->sc_rx.desc_ring, RX_DESC_SIZE, NULL, BUS_DMA_WAITOK); |
1173 | if (error) { | | 1173 | if (error) { |
1174 | return error; | | 1174 | return error; |
1175 | } | | 1175 | } |
1176 | sc->sc_rx.desc_ring_paddr = sc->sc_rx.desc_map->dm_segs[0].ds_addr; | | 1176 | sc->sc_rx.desc_ring_paddr = sc->sc_rx.desc_map->dm_segs[0].ds_addr; |
1177 | | | 1177 | |
1178 | memset(sc->sc_rx.desc_ring, 0, RX_DESC_SIZE); | | 1178 | memset(sc->sc_rx.desc_ring, 0, RX_DESC_SIZE); |
1179 | | | 1179 | |
1180 | for (i = 0; i < RX_DESC_COUNT; i++) { | | 1180 | for (i = 0; i < RX_DESC_COUNT; i++) { |
1181 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, | | 1181 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
1182 | RX_DESC_COUNT, MCLBYTES, 0, BUS_DMA_WAITOK, | | 1182 | RX_DESC_COUNT, MCLBYTES, 0, BUS_DMA_WAITOK, |
1183 | &sc->sc_rx.buf_map[i].map); | | 1183 | &sc->sc_rx.buf_map[i].map); |
1184 | if (error != 0) { | | 1184 | if (error != 0) { |
1185 | device_printf(sc->sc_dev, | | 1185 | device_printf(sc->sc_dev, |
1186 | "cannot create RX buffer map\n"); | | 1186 | "cannot create RX buffer map\n"); |
1187 | return error; | | 1187 | return error; |
1188 | } | | 1188 | } |
1189 | if ((m = eqos_alloc_mbufcl(sc)) == NULL) { | | 1189 | if ((m = eqos_alloc_mbufcl(sc)) == NULL) { |
1190 | device_printf(sc->sc_dev, "cannot allocate RX mbuf\n"); | | 1190 | device_printf(sc->sc_dev, "cannot allocate RX mbuf\n"); |
1191 | return ENOMEM; | | 1191 | return ENOMEM; |
1192 | } | | 1192 | } |
1193 | error = eqos_setup_rxbuf(sc, i, m); | | 1193 | error = eqos_setup_rxbuf(sc, i, m); |
1194 | if (error != 0) { | | 1194 | if (error != 0) { |
1195 | device_printf(sc->sc_dev, "cannot create RX buffer\n"); | | 1195 | device_printf(sc->sc_dev, "cannot create RX buffer\n"); |
1196 | return error; | | 1196 | return error; |
1197 | } | | 1197 | } |
1198 | } | | 1198 | } |
1199 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, | | 1199 | bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, |
1200 | 0, sc->sc_rx.desc_map->dm_mapsize, | | 1200 | 0, sc->sc_rx.desc_map->dm_mapsize, |
1201 | BUS_DMASYNC_PREWRITE); | | 1201 | BUS_DMASYNC_PREWRITE); |
1202 | | | 1202 | |
1203 | aprint_debug_dev(sc->sc_dev, "TX ring @ 0x%lX, RX ring @ 0x%lX\n", | | 1203 | aprint_debug_dev(sc->sc_dev, "TX ring @ 0x%lX, RX ring @ 0x%lX\n", |
1204 | sc->sc_tx.desc_ring_paddr, sc->sc_rx.desc_ring_paddr); | | 1204 | sc->sc_tx.desc_ring_paddr, sc->sc_rx.desc_ring_paddr); |
1205 | | | 1205 | |
1206 | return 0; | | 1206 | return 0; |
1207 | } | | 1207 | } |
1208 | | | 1208 | |
1209 | int | | 1209 | int |
1210 | eqos_attach(struct eqos_softc *sc) | | 1210 | eqos_attach(struct eqos_softc *sc) |
1211 | { | | 1211 | { |
1212 | struct mii_data *mii = &sc->sc_mii; | | 1212 | struct mii_data *mii = &sc->sc_mii; |
1213 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 1213 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1214 | uint8_t eaddr[ETHER_ADDR_LEN]; | | 1214 | uint8_t eaddr[ETHER_ADDR_LEN]; |
1215 | u_int userver, snpsver; | | 1215 | u_int userver, snpsver; |
1216 | int mii_flags = 0; | | 1216 | int mii_flags = 0; |
1217 | int error; | | 1217 | int error; |
1218 | int n; | | 1218 | int n; |
1219 | | | 1219 | |
1220 | const uint32_t ver = RD4(sc, GMAC_MAC_VERSION); | | 1220 | const uint32_t ver = RD4(sc, GMAC_MAC_VERSION); |
1221 | userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >> | | 1221 | userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >> |
1222 | GMAC_MAC_VERSION_USERVER_SHIFT; | | 1222 | GMAC_MAC_VERSION_USERVER_SHIFT; |
1223 | snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK; | | 1223 | snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK; |
1224 | | | 1224 | |
1225 | if (snpsver != 0x51) { | | 1225 | if (snpsver != 0x51) { |
1226 | aprint_error(": EQOS version 0x%02xx not supported\n", | | 1226 | aprint_error(": EQOS version 0x%02xx not supported\n", |
1227 | snpsver); | | 1227 | snpsver); |
1228 | return ENXIO; | | 1228 | return ENXIO; |
1229 | } | | 1229 | } |
1230 | | | 1230 | |
1231 | if (sc->sc_csr_clock < 20000000) { | | 1231 | if (sc->sc_csr_clock < 20000000) { |
1232 | aprint_error(": CSR clock too low\n"); | | 1232 | aprint_error(": CSR clock too low\n"); |
1233 | return EINVAL; | | 1233 | return EINVAL; |
1234 | } else if (sc->sc_csr_clock < 35000000) { | | 1234 | } else if (sc->sc_csr_clock < 35000000) { |
1235 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_20_35; | | 1235 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_20_35; |
1236 | } else if (sc->sc_csr_clock < 60000000) { | | 1236 | } else if (sc->sc_csr_clock < 60000000) { |
1237 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_35_60; | | 1237 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_35_60; |
1238 | } else if (sc->sc_csr_clock < 100000000) { | | 1238 | } else if (sc->sc_csr_clock < 100000000) { |
1239 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_60_100; | | 1239 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_60_100; |
1240 | } else if (sc->sc_csr_clock < 150000000) { | | 1240 | } else if (sc->sc_csr_clock < 150000000) { |
1241 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_100_150; | | 1241 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_100_150; |
1242 | } else if (sc->sc_csr_clock < 250000000) { | | 1242 | } else if (sc->sc_csr_clock < 250000000) { |
1243 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_150_250; | | 1243 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_150_250; |
1244 | } else if (sc->sc_csr_clock < 300000000) { | | 1244 | } else if (sc->sc_csr_clock < 300000000) { |
1245 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_300_500; | | 1245 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_300_500; |
1246 | } else if (sc->sc_csr_clock < 800000000) { | | 1246 | } else if (sc->sc_csr_clock < 800000000) { |
1247 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_500_800; | | 1247 | sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_500_800; |
1248 | } else { | | 1248 | } else { |
1249 | aprint_error(": CSR clock too high\n"); | | 1249 | aprint_error(": CSR clock too high\n"); |
1250 | return EINVAL; | | 1250 | return EINVAL; |
1251 | } | | 1251 | } |
1252 | | | 1252 | |
1253 | for (n = 0; n < 4; n++) { | | 1253 | for (n = 0; n < 4; n++) { |
1254 | sc->sc_hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); | | 1254 | sc->sc_hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); |
1255 | } | | 1255 | } |
1256 | | | 1256 | |
1257 | aprint_naive("\n"); | | 1257 | aprint_naive("\n"); |
1258 | aprint_normal(": DesignWare EQOS ver 0x%02x (0x%02x)\n", | | 1258 | aprint_normal(": DesignWare EQOS ver 0x%02x (0x%02x)\n", |
1259 | snpsver, userver); | | 1259 | snpsver, userver); |
1260 | aprint_verbose_dev(sc->sc_dev, "hw features %08x %08x %08x %08x\n", | | 1260 | aprint_verbose_dev(sc->sc_dev, "hw features %08x %08x %08x %08x\n", |
1261 | sc->sc_hw_feature[0], sc->sc_hw_feature[1], | | 1261 | sc->sc_hw_feature[0], sc->sc_hw_feature[1], |
1262 | sc->sc_hw_feature[2], sc->sc_hw_feature[3]); | | 1262 | sc->sc_hw_feature[2], sc->sc_hw_feature[3]); |
1263 | | | 1263 | |
1264 | if (EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { | | 1264 | if (EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { |
1265 | bus_dma_tag_t ntag; | | 1265 | bus_dma_tag_t ntag; |
1266 | | | 1266 | |
1267 | error = bus_dmatag_subregion(sc->sc_dmat, 0, UINT32_MAX, | | 1267 | error = bus_dmatag_subregion(sc->sc_dmat, 0, UINT32_MAX, |
1268 | &ntag, 0); | | 1268 | &ntag, 0); |
1269 | if (error) { | | 1269 | if (error) { |
1270 | aprint_error_dev(sc->sc_dev, | | 1270 | aprint_error_dev(sc->sc_dev, |
1271 | "failed to restrict DMA: %d\n", error); | | 1271 | "failed to restrict DMA: %d\n", error); |
1272 | return error; | | 1272 | return error; |
1273 | } | | 1273 | } |
1274 | aprint_verbose_dev(sc->sc_dev, "using 32-bit DMA\n"); | | 1274 | aprint_verbose_dev(sc->sc_dev, "using 32-bit DMA\n"); |
1275 | sc->sc_dmat = ntag; | | 1275 | sc->sc_dmat = ntag; |
1276 | } | | 1276 | } |
1277 | | | 1277 | |
1278 | mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET); | | 1278 | mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET); |
1279 | mutex_init(&sc->sc_txlock, MUTEX_DEFAULT, IPL_NET); | | 1279 | mutex_init(&sc->sc_txlock, MUTEX_DEFAULT, IPL_NET); |
1280 | callout_init(&sc->sc_stat_ch, CALLOUT_FLAGS); | | 1280 | callout_init(&sc->sc_stat_ch, CALLOUT_FLAGS); |
1281 | callout_setfunc(&sc->sc_stat_ch, eqos_tick, sc); | | 1281 | callout_setfunc(&sc->sc_stat_ch, eqos_tick, sc); |
1282 | | | 1282 | |
1283 | eqos_get_eaddr(sc, eaddr); | | 1283 | eqos_get_eaddr(sc, eaddr); |
1284 | aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(eaddr)); | | 1284 | aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(eaddr)); |
1285 | | | 1285 | |
1286 | /* Soft reset EMAC core */ | | 1286 | /* Soft reset EMAC core */ |
1287 | error = eqos_reset(sc); | | 1287 | error = eqos_reset(sc); |
1288 | if (error != 0) { | | 1288 | if (error != 0) { |
1289 | return error; | | 1289 | return error; |
1290 | } | | 1290 | } |
1291 | | | 1291 | |
1292 | /* Configure AXI Bus mode parameters */ | | 1292 | /* Configure AXI Bus mode parameters */ |
1293 | eqos_axi_configure(sc); | | 1293 | eqos_axi_configure(sc); |
1294 | | | 1294 | |
1295 | /* Setup DMA descriptors */ | | 1295 | /* Setup DMA descriptors */ |
1296 | if (eqos_setup_dma(sc, 0) != 0) { | | 1296 | if (eqos_setup_dma(sc, 0) != 0) { |
1297 | aprint_error_dev(sc->sc_dev, "failed to setup DMA descriptors\n"); | | 1297 | aprint_error_dev(sc->sc_dev, "failed to setup DMA descriptors\n"); |
1298 | return EINVAL; | | 1298 | return EINVAL; |
1299 | } | | 1299 | } |
1300 | | | 1300 | |
1301 | /* Setup ethernet interface */ | | 1301 | /* Setup ethernet interface */ |
1302 | ifp->if_softc = sc; | | 1302 | ifp->if_softc = sc; |
1303 | snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev)); | | 1303 | snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev)); |