| @@ -1,1465 +1,1480 @@ | | | @@ -1,1465 +1,1480 @@ |
1 | /* $NetBSD: sunxi_emac.c,v 1.10 2017/11/30 21:36:11 jmcneill Exp $ */ | | 1 | /* $NetBSD: sunxi_emac.c,v 1.11 2017/12/01 17:47:51 jmcneill Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2016-2017 Jared McNeill <jmcneill@invisible.ca> | | 4 | * Copyright (c) 2016-2017 Jared McNeill <jmcneill@invisible.ca> |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | | 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | | 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | | 22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | | 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
26 | * SUCH DAMAGE. | | 26 | * SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | /* | | 29 | /* |
30 | * Allwinner Gigabit Ethernet MAC (EMAC) controller | | 30 | * Allwinner Gigabit Ethernet MAC (EMAC) controller |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | #include "opt_net_mpsafe.h" | | 33 | #include "opt_net_mpsafe.h" |
34 | | | 34 | |
35 | #include <sys/cdefs.h> | | 35 | #include <sys/cdefs.h> |
36 | __KERNEL_RCSID(0, "$NetBSD: sunxi_emac.c,v 1.10 2017/11/30 21:36:11 jmcneill Exp $"); | | 36 | __KERNEL_RCSID(0, "$NetBSD: sunxi_emac.c,v 1.11 2017/12/01 17:47:51 jmcneill Exp $"); |
37 | | | 37 | |
38 | #include <sys/param.h> | | 38 | #include <sys/param.h> |
39 | #include <sys/bus.h> | | 39 | #include <sys/bus.h> |
40 | #include <sys/device.h> | | 40 | #include <sys/device.h> |
41 | #include <sys/intr.h> | | 41 | #include <sys/intr.h> |
42 | #include <sys/systm.h> | | 42 | #include <sys/systm.h> |
43 | #include <sys/kernel.h> | | 43 | #include <sys/kernel.h> |
44 | #include <sys/mutex.h> | | 44 | #include <sys/mutex.h> |
45 | #include <sys/callout.h> | | 45 | #include <sys/callout.h> |
46 | #include <sys/gpio.h> | | 46 | #include <sys/gpio.h> |
47 | #include <sys/cprng.h> | | 47 | #include <sys/cprng.h> |
48 | | | 48 | |
49 | #include <net/if.h> | | 49 | #include <net/if.h> |
50 | #include <net/if_dl.h> | | 50 | #include <net/if_dl.h> |
51 | #include <net/if_ether.h> | | 51 | #include <net/if_ether.h> |
52 | #include <net/if_media.h> | | 52 | #include <net/if_media.h> |
53 | #include <net/bpf.h> | | 53 | #include <net/bpf.h> |
54 | | | 54 | |
55 | #include <dev/mii/miivar.h> | | 55 | #include <dev/mii/miivar.h> |
56 | | | 56 | |
57 | #include <dev/fdt/fdtvar.h> | | 57 | #include <dev/fdt/fdtvar.h> |
58 | | | 58 | |
59 | #include <arm/sunxi/sunxi_emac.h> | | 59 | #include <arm/sunxi/sunxi_emac.h> |
60 | | | 60 | |
61 | #ifdef NET_MPSAFE | | 61 | #ifdef NET_MPSAFE |
62 | #define EMAC_MPSAFE 1 | | 62 | #define EMAC_MPSAFE 1 |
63 | #define CALLOUT_FLAGS CALLOUT_MPSAFE | | 63 | #define CALLOUT_FLAGS CALLOUT_MPSAFE |
64 | #define FDT_INTR_FLAGS FDT_INTR_MPSAFE | | 64 | #define FDT_INTR_FLAGS FDT_INTR_MPSAFE |
65 | #else | | 65 | #else |
66 | #define CALLOUT_FLAGS 0 | | 66 | #define CALLOUT_FLAGS 0 |
67 | #define FDT_INTR_FLAGS 0 | | 67 | #define FDT_INTR_FLAGS 0 |
68 | #endif | | 68 | #endif |
69 | | | 69 | |
70 | #define EMAC_IFNAME "emac%d" | | 70 | #define EMAC_IFNAME "emac%d" |
71 | | | 71 | |
72 | #define ETHER_ALIGN 2 | | 72 | #define ETHER_ALIGN 2 |
73 | | | 73 | |
74 | #define EMAC_LOCK(sc) mutex_enter(&(sc)->mtx) | | 74 | #define EMAC_LOCK(sc) mutex_enter(&(sc)->mtx) |
75 | #define EMAC_UNLOCK(sc) mutex_exit(&(sc)->mtx) | | 75 | #define EMAC_UNLOCK(sc) mutex_exit(&(sc)->mtx) |
76 | #define EMAC_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->mtx)) | | 76 | #define EMAC_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->mtx)) |
77 | | | 77 | |
78 | #define DESC_ALIGN sizeof(struct sunxi_emac_desc) | | 78 | #define DESC_ALIGN sizeof(struct sunxi_emac_desc) |
79 | #define TX_DESC_COUNT 1024 | | 79 | #define TX_DESC_COUNT 1024 |
80 | #define TX_DESC_SIZE (sizeof(struct sunxi_emac_desc) * TX_DESC_COUNT) | | 80 | #define TX_DESC_SIZE (sizeof(struct sunxi_emac_desc) * TX_DESC_COUNT) |
81 | #define RX_DESC_COUNT 256 | | 81 | #define RX_DESC_COUNT 256 |
82 | #define RX_DESC_SIZE (sizeof(struct sunxi_emac_desc) * RX_DESC_COUNT) | | 82 | #define RX_DESC_SIZE (sizeof(struct sunxi_emac_desc) * RX_DESC_COUNT) |
83 | | | 83 | |
84 | #define DESC_OFF(n) ((n) * sizeof(struct sunxi_emac_desc)) | | 84 | #define DESC_OFF(n) ((n) * sizeof(struct sunxi_emac_desc)) |
85 | #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) | | 85 | #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) |
86 | #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) | | 86 | #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) |
87 | #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) | | 87 | #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) |
88 | | | 88 | |
89 | #define TX_MAX_SEGS 128 | | 89 | #define TX_MAX_SEGS 128 |
90 | | | 90 | |
91 | #define SOFT_RST_RETRY 1000 | | 91 | #define SOFT_RST_RETRY 1000 |
92 | #define MII_BUSY_RETRY 1000 | | 92 | #define MII_BUSY_RETRY 1000 |
93 | #define MDIO_FREQ 2500000 | | 93 | #define MDIO_FREQ 2500000 |
94 | | | 94 | |
95 | #define BURST_LEN_DEFAULT 8 | | 95 | #define BURST_LEN_DEFAULT 8 |
96 | #define RX_TX_PRI_DEFAULT 0 | | 96 | #define RX_TX_PRI_DEFAULT 0 |
97 | #define PAUSE_TIME_DEFAULT 0x400 | | 97 | #define PAUSE_TIME_DEFAULT 0x400 |
98 | | | 98 | |
99 | /* syscon EMAC clock register */ | | 99 | /* syscon EMAC clock register */ |
100 | #define EMAC_CLK_REG 0x30 | | 100 | #define EMAC_CLK_REG 0x30 |
101 | #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ | | 101 | #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ |
102 | #define EMAC_CLK_EPHY_ADDR_SHIFT 20 | | 102 | #define EMAC_CLK_EPHY_ADDR_SHIFT 20 |
103 | #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ | | 103 | #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ |
104 | #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ | | 104 | #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ |
105 | #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ | | 105 | #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ |
106 | #define EMAC_CLK_RMII_EN (1 << 13) | | 106 | #define EMAC_CLK_RMII_EN (1 << 13) |
107 | #define EMAC_CLK_ETXDC (0x7 << 10) | | 107 | #define EMAC_CLK_ETXDC (0x7 << 10) |
108 | #define EMAC_CLK_ETXDC_SHIFT 10 | | 108 | #define EMAC_CLK_ETXDC_SHIFT 10 |
109 | #define EMAC_CLK_ERXDC (0x1f << 5) | | 109 | #define EMAC_CLK_ERXDC (0x1f << 5) |
110 | #define EMAC_CLK_ERXDC_SHIFT 5 | | 110 | #define EMAC_CLK_ERXDC_SHIFT 5 |
111 | #define EMAC_CLK_PIT (0x1 << 2) | | 111 | #define EMAC_CLK_PIT (0x1 << 2) |
112 | #define EMAC_CLK_PIT_MII (0 << 2) | | 112 | #define EMAC_CLK_PIT_MII (0 << 2) |
113 | #define EMAC_CLK_PIT_RGMII (1 << 2) | | 113 | #define EMAC_CLK_PIT_RGMII (1 << 2) |
114 | #define EMAC_CLK_SRC (0x3 << 0) | | 114 | #define EMAC_CLK_SRC (0x3 << 0) |
115 | #define EMAC_CLK_SRC_MII (0 << 0) | | 115 | #define EMAC_CLK_SRC_MII (0 << 0) |
116 | #define EMAC_CLK_SRC_EXT_RGMII (1 << 0) | | 116 | #define EMAC_CLK_SRC_EXT_RGMII (1 << 0) |
117 | #define EMAC_CLK_SRC_RGMII (2 << 0) | | 117 | #define EMAC_CLK_SRC_RGMII (2 << 0) |
118 | | | 118 | |
119 | /* Burst length of RX and TX DMA transfers */ | | 119 | /* Burst length of RX and TX DMA transfers */ |
120 | static int sunxi_emac_burst_len = BURST_LEN_DEFAULT; | | 120 | static int sunxi_emac_burst_len = BURST_LEN_DEFAULT; |
121 | | | 121 | |
122 | /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ | | 122 | /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ |
123 | static int sunxi_emac_rx_tx_pri = RX_TX_PRI_DEFAULT; | | 123 | static int sunxi_emac_rx_tx_pri = RX_TX_PRI_DEFAULT; |
124 | | | 124 | |
125 | /* Pause time field in the transmitted control frame */ | | 125 | /* Pause time field in the transmitted control frame */ |
126 | static int sunxi_emac_pause_time = PAUSE_TIME_DEFAULT; | | 126 | static int sunxi_emac_pause_time = PAUSE_TIME_DEFAULT; |
127 | | | 127 | |
128 | enum sunxi_emac_type { | | 128 | enum sunxi_emac_type { |
129 | EMAC_A83T = 1, | | 129 | EMAC_A83T = 1, |
130 | EMAC_H3, | | 130 | EMAC_H3, |
131 | EMAC_A64, | | 131 | EMAC_A64, |
132 | }; | | 132 | }; |
133 | | | 133 | |
134 | static const struct of_compat_data compat_data[] = { | | 134 | static const struct of_compat_data compat_data[] = { |
135 | { "allwinner,sun8i-a83t-emac", EMAC_A83T }, | | 135 | { "allwinner,sun8i-a83t-emac", EMAC_A83T }, |
136 | { "allwinner,sun8i-h3-emac", EMAC_H3 }, | | 136 | { "allwinner,sun8i-h3-emac", EMAC_H3 }, |
137 | { "allwinner,sun50i-a64-emac", EMAC_A64 }, | | 137 | { "allwinner,sun50i-a64-emac", EMAC_A64 }, |
138 | { NULL } | | 138 | { NULL } |
139 | }; | | 139 | }; |
140 | | | 140 | |
141 | struct sunxi_emac_bufmap { | | 141 | struct sunxi_emac_bufmap { |
142 | bus_dmamap_t map; | | 142 | bus_dmamap_t map; |
143 | struct mbuf *mbuf; | | 143 | struct mbuf *mbuf; |
144 | }; | | 144 | }; |
145 | | | 145 | |
146 | struct sunxi_emac_txring { | | 146 | struct sunxi_emac_txring { |
147 | bus_dma_tag_t desc_tag; | | 147 | bus_dma_tag_t desc_tag; |
148 | bus_dmamap_t desc_map; | | 148 | bus_dmamap_t desc_map; |
149 | bus_dma_segment_t desc_dmaseg; | | 149 | bus_dma_segment_t desc_dmaseg; |
150 | struct sunxi_emac_desc *desc_ring; | | 150 | struct sunxi_emac_desc *desc_ring; |
151 | bus_addr_t desc_ring_paddr; | | 151 | bus_addr_t desc_ring_paddr; |
152 | bus_dma_tag_t buf_tag; | | 152 | bus_dma_tag_t buf_tag; |
153 | struct sunxi_emac_bufmap buf_map[TX_DESC_COUNT]; | | 153 | struct sunxi_emac_bufmap buf_map[TX_DESC_COUNT]; |
154 | u_int cur, next, queued; | | 154 | u_int cur, next, queued; |
155 | }; | | 155 | }; |
156 | | | 156 | |
157 | struct sunxi_emac_rxring { | | 157 | struct sunxi_emac_rxring { |
158 | bus_dma_tag_t desc_tag; | | 158 | bus_dma_tag_t desc_tag; |
159 | bus_dmamap_t desc_map; | | 159 | bus_dmamap_t desc_map; |
160 | bus_dma_segment_t desc_dmaseg; | | 160 | bus_dma_segment_t desc_dmaseg; |
161 | struct sunxi_emac_desc *desc_ring; | | 161 | struct sunxi_emac_desc *desc_ring; |
162 | bus_addr_t desc_ring_paddr; | | 162 | bus_addr_t desc_ring_paddr; |
163 | bus_dma_tag_t buf_tag; | | 163 | bus_dma_tag_t buf_tag; |
164 | struct sunxi_emac_bufmap buf_map[RX_DESC_COUNT]; | | 164 | struct sunxi_emac_bufmap buf_map[RX_DESC_COUNT]; |
165 | u_int cur; | | 165 | u_int cur; |
166 | }; | | 166 | }; |
167 | | | 167 | |
168 | enum { | | 168 | enum { |
169 | _RES_EMAC, | | 169 | _RES_EMAC, |
170 | _RES_SYSCON, | | 170 | _RES_SYSCON, |
171 | _RES_NITEMS | | 171 | _RES_NITEMS |
172 | }; | | 172 | }; |
173 | | | 173 | |
174 | struct sunxi_emac_softc { | | 174 | struct sunxi_emac_softc { |
175 | device_t dev; | | 175 | device_t dev; |
176 | int phandle; | | 176 | int phandle; |
177 | enum sunxi_emac_type type; | | 177 | enum sunxi_emac_type type; |
178 | bus_space_tag_t bst; | | 178 | bus_space_tag_t bst; |
179 | bus_dma_tag_t dmat; | | 179 | bus_dma_tag_t dmat; |
180 | | | 180 | |
181 | bus_space_handle_t bsh[_RES_NITEMS]; | | 181 | bus_space_handle_t bsh[_RES_NITEMS]; |
182 | struct clk *clk_ahb; | | 182 | struct clk *clk_ahb; |
183 | struct clk *clk_ephy; | | 183 | struct clk *clk_ephy; |
184 | struct fdtbus_reset *rst_ahb; | | 184 | struct fdtbus_reset *rst_ahb; |
185 | struct fdtbus_reset *rst_ephy; | | 185 | struct fdtbus_reset *rst_ephy; |
186 | struct fdtbus_regulator *reg_phy; | | 186 | struct fdtbus_regulator *reg_phy; |
187 | struct fdtbus_gpio_pin *pin_reset; | | 187 | struct fdtbus_gpio_pin *pin_reset; |
188 | | | 188 | |
189 | int phy_id; | | 189 | int phy_id; |
190 | | | 190 | |
191 | kmutex_t mtx; | | 191 | kmutex_t mtx; |
192 | struct ethercom ec; | | 192 | struct ethercom ec; |
193 | struct mii_data mii; | | 193 | struct mii_data mii; |
194 | callout_t stat_ch; | | 194 | callout_t stat_ch; |
195 | void *ih; | | 195 | void *ih; |
196 | u_int mdc_div_ratio_m; | | 196 | u_int mdc_div_ratio_m; |
197 | | | 197 | |
198 | struct sunxi_emac_txring tx; | | 198 | struct sunxi_emac_txring tx; |
199 | struct sunxi_emac_rxring rx; | | 199 | struct sunxi_emac_rxring rx; |
200 | }; | | 200 | }; |
201 | | | 201 | |
202 | #define RD4(sc, reg) \ | | 202 | #define RD4(sc, reg) \ |
203 | bus_space_read_4((sc)->bst, (sc)->bsh[_RES_EMAC], (reg)) | | 203 | bus_space_read_4((sc)->bst, (sc)->bsh[_RES_EMAC], (reg)) |
204 | #define WR4(sc, reg, val) \ | | 204 | #define WR4(sc, reg, val) \ |
205 | bus_space_write_4((sc)->bst, (sc)->bsh[_RES_EMAC], (reg), (val)) | | 205 | bus_space_write_4((sc)->bst, (sc)->bsh[_RES_EMAC], (reg), (val)) |
206 | | | 206 | |
207 | #define SYSCONRD4(sc, reg) \ | | 207 | #define SYSCONRD4(sc, reg) \ |
208 | bus_space_read_4((sc)->bst, (sc)->bsh[_RES_SYSCON], (reg)) | | 208 | bus_space_read_4((sc)->bst, (sc)->bsh[_RES_SYSCON], (reg)) |
209 | #define SYSCONWR4(sc, reg, val) \ | | 209 | #define SYSCONWR4(sc, reg, val) \ |
210 | bus_space_write_4((sc)->bst, (sc)->bsh[_RES_SYSCON], (reg), (val)) | | 210 | bus_space_write_4((sc)->bst, (sc)->bsh[_RES_SYSCON], (reg), (val)) |
211 | | | 211 | |
212 | static int | | 212 | static int |
213 | sunxi_emac_mii_readreg(device_t dev, int phy, int reg) | | 213 | sunxi_emac_mii_readreg(device_t dev, int phy, int reg) |
214 | { | | 214 | { |
215 | struct sunxi_emac_softc *sc = device_private(dev); | | 215 | struct sunxi_emac_softc *sc = device_private(dev); |
216 | int retry, val; | | 216 | int retry, val; |
217 | | | 217 | |
218 | val = 0; | | 218 | val = 0; |
219 | | | 219 | |
220 | WR4(sc, EMAC_MII_CMD, | | 220 | WR4(sc, EMAC_MII_CMD, |
221 | (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | | | 221 | (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | |
222 | (phy << PHY_ADDR_SHIFT) | | | 222 | (phy << PHY_ADDR_SHIFT) | |
223 | (reg << PHY_REG_ADDR_SHIFT) | | | 223 | (reg << PHY_REG_ADDR_SHIFT) | |
224 | MII_BUSY); | | 224 | MII_BUSY); |
225 | for (retry = MII_BUSY_RETRY; retry > 0; retry--) { | | 225 | for (retry = MII_BUSY_RETRY; retry > 0; retry--) { |
226 | if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { | | 226 | if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { |
227 | val = RD4(sc, EMAC_MII_DATA); | | 227 | val = RD4(sc, EMAC_MII_DATA); |
228 | break; | | 228 | break; |
229 | } | | 229 | } |
230 | delay(10); | | 230 | delay(10); |
231 | } | | 231 | } |
232 | | | 232 | |
233 | if (retry == 0) | | 233 | if (retry == 0) |
234 | device_printf(dev, "phy read timeout, phy=%d reg=%d\n", | | 234 | device_printf(dev, "phy read timeout, phy=%d reg=%d\n", |
235 | phy, reg); | | 235 | phy, reg); |
236 | | | 236 | |
237 | return val; | | 237 | return val; |
238 | } | | 238 | } |
239 | | | 239 | |
240 | static void | | 240 | static void |
241 | sunxi_emac_mii_writereg(device_t dev, int phy, int reg, int val) | | 241 | sunxi_emac_mii_writereg(device_t dev, int phy, int reg, int val) |
242 | { | | 242 | { |
243 | struct sunxi_emac_softc *sc = device_private(dev); | | 243 | struct sunxi_emac_softc *sc = device_private(dev); |
244 | int retry; | | 244 | int retry; |
245 | | | 245 | |
246 | WR4(sc, EMAC_MII_DATA, val); | | 246 | WR4(sc, EMAC_MII_DATA, val); |
247 | WR4(sc, EMAC_MII_CMD, | | 247 | WR4(sc, EMAC_MII_CMD, |
248 | (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | | | 248 | (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | |
249 | (phy << PHY_ADDR_SHIFT) | | | 249 | (phy << PHY_ADDR_SHIFT) | |
250 | (reg << PHY_REG_ADDR_SHIFT) | | | 250 | (reg << PHY_REG_ADDR_SHIFT) | |
251 | MII_WR | MII_BUSY); | | 251 | MII_WR | MII_BUSY); |
252 | for (retry = MII_BUSY_RETRY; retry > 0; retry--) { | | 252 | for (retry = MII_BUSY_RETRY; retry > 0; retry--) { |
253 | if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) | | 253 | if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) |
254 | break; | | 254 | break; |
255 | delay(10); | | 255 | delay(10); |
256 | } | | 256 | } |
257 | | | 257 | |
258 | if (retry == 0) | | 258 | if (retry == 0) |
259 | device_printf(dev, "phy write timeout, phy=%d reg=%d\n", | | 259 | device_printf(dev, "phy write timeout, phy=%d reg=%d\n", |
260 | phy, reg); | | 260 | phy, reg); |
261 | } | | 261 | } |
262 | | | 262 | |
263 | static void | | 263 | static void |
264 | sunxi_emac_update_link(struct sunxi_emac_softc *sc) | | 264 | sunxi_emac_update_link(struct sunxi_emac_softc *sc) |
265 | { | | 265 | { |
266 | struct mii_data *mii = &sc->mii; | | 266 | struct mii_data *mii = &sc->mii; |
267 | uint32_t val; | | 267 | uint32_t val; |
268 | | | 268 | |
269 | val = RD4(sc, EMAC_BASIC_CTL_0); | | 269 | val = RD4(sc, EMAC_BASIC_CTL_0); |
270 | val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); | | 270 | val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); |
271 | | | 271 | |
272 | if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || | | 272 | if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || |
273 | IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) | | 273 | IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) |
274 | val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; | | 274 | val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; |
275 | else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) | | 275 | else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) |
276 | val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; | | 276 | val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; |
277 | else | | 277 | else |
278 | val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; | | 278 | val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; |
279 | | | 279 | |
280 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) | | 280 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) |
281 | val |= BASIC_CTL_DUPLEX; | | 281 | val |= BASIC_CTL_DUPLEX; |
282 | | | 282 | |
283 | WR4(sc, EMAC_BASIC_CTL_0, val); | | 283 | WR4(sc, EMAC_BASIC_CTL_0, val); |
284 | | | 284 | |
285 | val = RD4(sc, EMAC_RX_CTL_0); | | 285 | val = RD4(sc, EMAC_RX_CTL_0); |
286 | val &= ~RX_FLOW_CTL_EN; | | 286 | val &= ~RX_FLOW_CTL_EN; |
287 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) | | 287 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) |
288 | val |= RX_FLOW_CTL_EN; | | 288 | val |= RX_FLOW_CTL_EN; |
289 | WR4(sc, EMAC_RX_CTL_0, val); | | 289 | WR4(sc, EMAC_RX_CTL_0, val); |
290 | | | 290 | |
291 | val = RD4(sc, EMAC_TX_FLOW_CTL); | | 291 | val = RD4(sc, EMAC_TX_FLOW_CTL); |
292 | val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); | | 292 | val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); |
293 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) | | 293 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) |
294 | val |= TX_FLOW_CTL_EN; | | 294 | val |= TX_FLOW_CTL_EN; |
295 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) | | 295 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) |
296 | val |= sunxi_emac_pause_time << PAUSE_TIME_SHIFT; | | 296 | val |= sunxi_emac_pause_time << PAUSE_TIME_SHIFT; |
297 | WR4(sc, EMAC_TX_FLOW_CTL, val); | | 297 | WR4(sc, EMAC_TX_FLOW_CTL, val); |
298 | } | | 298 | } |
299 | | | 299 | |
300 | static void | | 300 | static void |
301 | sunxi_emac_mii_statchg(struct ifnet *ifp) | | 301 | sunxi_emac_mii_statchg(struct ifnet *ifp) |
302 | { | | 302 | { |
303 | struct sunxi_emac_softc * const sc = ifp->if_softc; | | 303 | struct sunxi_emac_softc * const sc = ifp->if_softc; |
304 | | | 304 | |
305 | sunxi_emac_update_link(sc); | | 305 | sunxi_emac_update_link(sc); |
306 | } | | 306 | } |
307 | | | 307 | |
308 | static void | | 308 | static void |
309 | sunxi_emac_dma_sync(struct sunxi_emac_softc *sc, bus_dma_tag_t dmat, | | 309 | sunxi_emac_dma_sync(struct sunxi_emac_softc *sc, bus_dma_tag_t dmat, |
310 | bus_dmamap_t map, int start, int end, int total, int flags) | | 310 | bus_dmamap_t map, int start, int end, int total, int flags) |
311 | { | | 311 | { |
312 | if (end > start) { | | 312 | if (end > start) { |
313 | bus_dmamap_sync(dmat, map, DESC_OFF(start), | | 313 | bus_dmamap_sync(dmat, map, DESC_OFF(start), |
314 | DESC_OFF(end) - DESC_OFF(start), flags); | | 314 | DESC_OFF(end) - DESC_OFF(start), flags); |
315 | } else { | | 315 | } else { |
316 | bus_dmamap_sync(dmat, map, DESC_OFF(start), | | 316 | bus_dmamap_sync(dmat, map, DESC_OFF(start), |
317 | DESC_OFF(total) - DESC_OFF(start), flags); | | 317 | DESC_OFF(total) - DESC_OFF(start), flags); |
318 | if (DESC_OFF(end) - DESC_OFF(0) > 0) | | 318 | if (DESC_OFF(end) - DESC_OFF(0) > 0) |
319 | bus_dmamap_sync(dmat, map, DESC_OFF(0), | | 319 | bus_dmamap_sync(dmat, map, DESC_OFF(0), |
320 | DESC_OFF(end) - DESC_OFF(0), flags); | | 320 | DESC_OFF(end) - DESC_OFF(0), flags); |
321 | } | | 321 | } |
322 | } | | 322 | } |
323 | | | 323 | |
324 | static void | | 324 | static void |
325 | sunxi_emac_setup_txdesc(struct sunxi_emac_softc *sc, int index, int flags, | | 325 | sunxi_emac_setup_txdesc(struct sunxi_emac_softc *sc, int index, int flags, |
326 | bus_addr_t paddr, u_int len) | | 326 | bus_addr_t paddr, u_int len) |
327 | { | | 327 | { |
328 | uint32_t status, size; | | 328 | uint32_t status, size; |
329 | | | 329 | |
330 | if (paddr == 0 || len == 0) { | | 330 | if (paddr == 0 || len == 0) { |
331 | status = 0; | | 331 | status = 0; |
332 | size = 0; | | 332 | size = 0; |
333 | --sc->tx.queued; | | 333 | --sc->tx.queued; |
334 | } else { | | 334 | } else { |
335 | status = TX_DESC_CTL; | | 335 | status = TX_DESC_CTL; |
336 | size = flags | len; | | 336 | size = flags | len; |
337 | ++sc->tx.queued; | | 337 | ++sc->tx.queued; |
338 | } | | 338 | } |
339 | | | 339 | |
340 | sc->tx.desc_ring[index].addr = htole32((uint32_t)paddr); | | 340 | sc->tx.desc_ring[index].addr = htole32((uint32_t)paddr); |
341 | sc->tx.desc_ring[index].size = htole32(size); | | 341 | sc->tx.desc_ring[index].size = htole32(size); |
342 | sc->tx.desc_ring[index].status = htole32(status); | | 342 | sc->tx.desc_ring[index].status = htole32(status); |
343 | } | | 343 | } |
344 | | | 344 | |
345 | static int | | 345 | static int |
346 | sunxi_emac_setup_txbuf(struct sunxi_emac_softc *sc, int index, struct mbuf *m) | | 346 | sunxi_emac_setup_txbuf(struct sunxi_emac_softc *sc, int index, struct mbuf *m) |
347 | { | | 347 | { |
348 | bus_dma_segment_t *segs; | | 348 | bus_dma_segment_t *segs; |
349 | int error, nsegs, cur, i, flags; | | 349 | int error, nsegs, cur, i, flags; |
350 | u_int csum_flags; | | 350 | u_int csum_flags; |
351 | | | 351 | |
352 | error = bus_dmamap_load_mbuf(sc->tx.buf_tag, | | 352 | error = bus_dmamap_load_mbuf(sc->tx.buf_tag, |
353 | sc->tx.buf_map[index].map, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); | | 353 | sc->tx.buf_map[index].map, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
354 | if (error == EFBIG) { | | 354 | if (error == EFBIG) { |
355 | device_printf(sc->dev, | | 355 | device_printf(sc->dev, |
356 | "TX packet needs too many DMA segments, dropping...\n"); | | 356 | "TX packet needs too many DMA segments, dropping...\n"); |
357 | m_freem(m); | | 357 | m_freem(m); |
358 | return 0; | | 358 | return 0; |
359 | } | | 359 | } |
360 | if (error != 0) | | 360 | if (error != 0) |
361 | return 0; | | 361 | return 0; |
362 | | | 362 | |
363 | segs = sc->tx.buf_map[index].map->dm_segs; | | 363 | segs = sc->tx.buf_map[index].map->dm_segs; |
364 | nsegs = sc->tx.buf_map[index].map->dm_nsegs; | | 364 | nsegs = sc->tx.buf_map[index].map->dm_nsegs; |
365 | | | 365 | |
366 | flags = TX_FIR_DESC; | | 366 | flags = TX_FIR_DESC; |
367 | if ((m->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) { | | 367 | if ((m->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) { |
368 | if ((m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0) | | 368 | if ((m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0) |
369 | csum_flags = TX_CHECKSUM_CTL_FULL; | | 369 | csum_flags = TX_CHECKSUM_CTL_FULL; |
370 | else | | 370 | else |
371 | csum_flags = TX_CHECKSUM_CTL_IP; | | 371 | csum_flags = TX_CHECKSUM_CTL_IP; |
372 | flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); | | 372 | flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); |
373 | } | | 373 | } |
374 | | | 374 | |
375 | for (cur = index, i = 0; i < nsegs; i++) { | | 375 | for (cur = index, i = 0; i < nsegs; i++) { |
376 | sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL); | | 376 | sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL); |
377 | if (i == nsegs - 1) | | 377 | if (i == nsegs - 1) |
378 | flags |= TX_LAST_DESC | TX_INT_CTL; | | 378 | flags |= TX_LAST_DESC | TX_INT_CTL; |
379 | | | 379 | |
380 | sunxi_emac_setup_txdesc(sc, cur, flags, segs[i].ds_addr, | | 380 | sunxi_emac_setup_txdesc(sc, cur, flags, segs[i].ds_addr, |
381 | segs[i].ds_len); | | 381 | segs[i].ds_len); |
382 | flags &= ~TX_FIR_DESC; | | 382 | flags &= ~TX_FIR_DESC; |
383 | cur = TX_NEXT(cur); | | 383 | cur = TX_NEXT(cur); |
384 | } | | 384 | } |
385 | | | 385 | |
386 | bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map, | | 386 | bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map, |
387 | 0, sc->tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); | | 387 | 0, sc->tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
388 | | | 388 | |
389 | return nsegs; | | 389 | return nsegs; |
390 | } | | 390 | } |
391 | | | 391 | |
392 | static void | | 392 | static void |
393 | sunxi_emac_setup_rxdesc(struct sunxi_emac_softc *sc, int index, | | 393 | sunxi_emac_setup_rxdesc(struct sunxi_emac_softc *sc, int index, |
394 | bus_addr_t paddr) | | 394 | bus_addr_t paddr) |
395 | { | | 395 | { |
396 | uint32_t status, size; | | 396 | uint32_t status, size; |
397 | | | 397 | |
398 | status = RX_DESC_CTL; | | 398 | status = RX_DESC_CTL; |
399 | size = MCLBYTES - 1; | | 399 | size = MCLBYTES - 1; |
400 | | | 400 | |
401 | sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); | | 401 | sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); |
402 | sc->rx.desc_ring[index].size = htole32(size); | | 402 | sc->rx.desc_ring[index].size = htole32(size); |
403 | sc->rx.desc_ring[index].next = | | 403 | sc->rx.desc_ring[index].next = |
404 | htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(index))); | | 404 | htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(index))); |
405 | sc->rx.desc_ring[index].status = htole32(status); | | 405 | sc->rx.desc_ring[index].status = htole32(status); |
406 | } | | 406 | } |
407 | | | 407 | |
408 | static int | | 408 | static int |
409 | sunxi_emac_setup_rxbuf(struct sunxi_emac_softc *sc, int index, struct mbuf *m) | | 409 | sunxi_emac_setup_rxbuf(struct sunxi_emac_softc *sc, int index, struct mbuf *m) |
410 | { | | 410 | { |
411 | int error; | | 411 | int error; |
412 | | | 412 | |
413 | m_adj(m, ETHER_ALIGN); | | 413 | m_adj(m, ETHER_ALIGN); |
414 | | | 414 | |
415 | error = bus_dmamap_load_mbuf(sc->rx.buf_tag, | | 415 | error = bus_dmamap_load_mbuf(sc->rx.buf_tag, |
416 | sc->rx.buf_map[index].map, m, BUS_DMA_READ|BUS_DMA_NOWAIT); | | 416 | sc->rx.buf_map[index].map, m, BUS_DMA_READ|BUS_DMA_NOWAIT); |
417 | if (error != 0) | | 417 | if (error != 0) |
418 | return error; | | 418 | return error; |
419 | | | 419 | |
420 | bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, | | 420 | bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, |
421 | 0, sc->rx.buf_map[index].map->dm_mapsize, | | 421 | 0, sc->rx.buf_map[index].map->dm_mapsize, |
422 | BUS_DMASYNC_PREREAD); | | 422 | BUS_DMASYNC_PREREAD); |
423 | | | 423 | |
424 | sc->rx.buf_map[index].mbuf = m; | | 424 | sc->rx.buf_map[index].mbuf = m; |
425 | sunxi_emac_setup_rxdesc(sc, index, | | 425 | sunxi_emac_setup_rxdesc(sc, index, |
426 | sc->rx.buf_map[index].map->dm_segs[0].ds_addr); | | 426 | sc->rx.buf_map[index].map->dm_segs[0].ds_addr); |
427 | | | 427 | |
428 | return 0; | | 428 | return 0; |
429 | } | | 429 | } |
430 | | | 430 | |
431 | static struct mbuf * | | 431 | static struct mbuf * |
432 | sunxi_emac_alloc_mbufcl(struct sunxi_emac_softc *sc) | | 432 | sunxi_emac_alloc_mbufcl(struct sunxi_emac_softc *sc) |
433 | { | | 433 | { |
434 | struct mbuf *m; | | 434 | struct mbuf *m; |
435 | | | 435 | |
436 | m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); | | 436 | m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); |
437 | if (m != NULL) | | 437 | if (m != NULL) |
438 | m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; | | 438 | m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; |
439 | | | 439 | |
440 | return m; | | 440 | return m; |
441 | } | | 441 | } |
442 | | | 442 | |
443 | static void | | 443 | static void |
444 | sunxi_emac_start_locked(struct sunxi_emac_softc *sc) | | 444 | sunxi_emac_start_locked(struct sunxi_emac_softc *sc) |
445 | { | | 445 | { |
446 | struct ifnet *ifp = &sc->ec.ec_if; | | 446 | struct ifnet *ifp = &sc->ec.ec_if; |
447 | struct mbuf *m; | | 447 | struct mbuf *m; |
448 | uint32_t val; | | 448 | uint32_t val; |
449 | int cnt, nsegs, start; | | 449 | int cnt, nsegs, start; |
450 | | | 450 | |
451 | EMAC_ASSERT_LOCKED(sc); | | 451 | EMAC_ASSERT_LOCKED(sc); |
452 | | | 452 | |
453 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) | | 453 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
454 | return; | | 454 | return; |
455 | | | 455 | |
456 | for (cnt = 0, start = sc->tx.cur; ; cnt++) { | | 456 | for (cnt = 0, start = sc->tx.cur; ; cnt++) { |
457 | if (sc->tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { | | 457 | if (sc->tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { |
458 | ifp->if_flags |= IFF_OACTIVE; | | 458 | ifp->if_flags |= IFF_OACTIVE; |
459 | break; | | 459 | break; |
460 | } | | 460 | } |
461 | | | 461 | |
462 | IFQ_POLL(&ifp->if_snd, m); | | 462 | IFQ_POLL(&ifp->if_snd, m); |
463 | if (m == NULL) | | 463 | if (m == NULL) |
464 | break; | | 464 | break; |
465 | | | 465 | |
466 | nsegs = sunxi_emac_setup_txbuf(sc, sc->tx.cur, m); | | 466 | nsegs = sunxi_emac_setup_txbuf(sc, sc->tx.cur, m); |
467 | if (nsegs == 0) { | | 467 | if (nsegs == 0) { |
468 | ifp->if_flags |= IFF_OACTIVE; | | 468 | ifp->if_flags |= IFF_OACTIVE; |
469 | break; | | 469 | break; |
470 | } | | 470 | } |
471 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 471 | IFQ_DEQUEUE(&ifp->if_snd, m); |
472 | bpf_mtap(ifp, m); | | 472 | bpf_mtap(ifp, m); |
473 | | | 473 | |
474 | sc->tx.cur = TX_SKIP(sc->tx.cur, nsegs); | | 474 | sc->tx.cur = TX_SKIP(sc->tx.cur, nsegs); |
475 | } | | 475 | } |
476 | | | 476 | |
477 | if (cnt != 0) { | | 477 | if (cnt != 0) { |
478 | sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map, | | 478 | sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map, |
479 | start, sc->tx.cur, TX_DESC_COUNT, | | 479 | start, sc->tx.cur, TX_DESC_COUNT, |
480 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 480 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
481 | | | 481 | |
482 | /* Start and run TX DMA */ | | 482 | /* Start and run TX DMA */ |
483 | val = RD4(sc, EMAC_TX_CTL_1); | | 483 | val = RD4(sc, EMAC_TX_CTL_1); |
484 | WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); | | 484 | WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); |
485 | } | | 485 | } |
486 | } | | 486 | } |
487 | | | 487 | |
488 | static void | | 488 | static void |
489 | sunxi_emac_start(struct ifnet *ifp) | | 489 | sunxi_emac_start(struct ifnet *ifp) |
490 | { | | 490 | { |
491 | struct sunxi_emac_softc *sc = ifp->if_softc; | | 491 | struct sunxi_emac_softc *sc = ifp->if_softc; |
492 | | | 492 | |
493 | EMAC_LOCK(sc); | | 493 | EMAC_LOCK(sc); |
494 | sunxi_emac_start_locked(sc); | | 494 | sunxi_emac_start_locked(sc); |
495 | EMAC_UNLOCK(sc); | | 495 | EMAC_UNLOCK(sc); |
496 | } | | 496 | } |
497 | | | 497 | |
498 | static void | | 498 | static void |
499 | sunxi_emac_tick(void *softc) | | 499 | sunxi_emac_tick(void *softc) |
500 | { | | 500 | { |
501 | struct sunxi_emac_softc *sc = softc; | | 501 | struct sunxi_emac_softc *sc = softc; |
502 | struct mii_data *mii = &sc->mii; | | 502 | struct mii_data *mii = &sc->mii; |
503 | #ifndef EMAC_MPSAFE | | 503 | #ifndef EMAC_MPSAFE |
504 | int s = splnet(); | | 504 | int s = splnet(); |
505 | #endif | | 505 | #endif |
506 | | | 506 | |
507 | EMAC_LOCK(sc); | | 507 | EMAC_LOCK(sc); |
508 | mii_tick(mii); | | 508 | mii_tick(mii); |
509 | callout_schedule(&sc->stat_ch, hz); | | 509 | callout_schedule(&sc->stat_ch, hz); |
510 | EMAC_UNLOCK(sc); | | 510 | EMAC_UNLOCK(sc); |
511 | | | 511 | |
512 | #ifndef EMAC_MPSAFE | | 512 | #ifndef EMAC_MPSAFE |
513 | splx(s); | | 513 | splx(s); |
514 | #endif | | 514 | #endif |
515 | } | | 515 | } |
516 | | | 516 | |
517 | /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ | | 517 | /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ |
518 | static uint32_t | | 518 | static uint32_t |
519 | bitrev32(uint32_t x) | | 519 | bitrev32(uint32_t x) |
520 | { | | 520 | { |
521 | x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); | | 521 | x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); |
522 | x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); | | 522 | x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); |
523 | x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); | | 523 | x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); |
524 | x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); | | 524 | x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); |
525 | | | 525 | |
526 | return (x >> 16) | (x << 16); | | 526 | return (x >> 16) | (x << 16); |
527 | } | | 527 | } |
528 | | | 528 | |
529 | static void | | 529 | static void |
530 | sunxi_emac_setup_rxfilter(struct sunxi_emac_softc *sc) | | 530 | sunxi_emac_setup_rxfilter(struct sunxi_emac_softc *sc) |
531 | { | | 531 | { |
532 | struct ifnet *ifp = &sc->ec.ec_if; | | 532 | struct ifnet *ifp = &sc->ec.ec_if; |
533 | uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo; | | 533 | uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo; |
534 | struct ether_multi *enm; | | 534 | struct ether_multi *enm; |
535 | struct ether_multistep step; | | 535 | struct ether_multistep step; |
536 | const uint8_t *eaddr; | | 536 | const uint8_t *eaddr; |
537 | | | 537 | |
538 | EMAC_ASSERT_LOCKED(sc); | | 538 | EMAC_ASSERT_LOCKED(sc); |
539 | | | 539 | |
540 | val = 0; | | 540 | val = 0; |
541 | hash[0] = hash[1] = 0; | | 541 | hash[0] = hash[1] = 0; |
542 | | | 542 | |
543 | if ((ifp->if_flags & IFF_PROMISC) != 0) | | 543 | if ((ifp->if_flags & IFF_PROMISC) != 0) |
544 | val |= DIS_ADDR_FILTER; | | 544 | val |= DIS_ADDR_FILTER; |
545 | else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { | | 545 | else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { |
546 | val |= RX_ALL_MULTICAST; | | 546 | val |= RX_ALL_MULTICAST; |
547 | hash[0] = hash[1] = ~0; | | 547 | hash[0] = hash[1] = ~0; |
548 | } else { | | 548 | } else { |
549 | val |= HASH_MULTICAST; | | 549 | val |= HASH_MULTICAST; |
550 | ETHER_FIRST_MULTI(step, &sc->ec, enm); | | 550 | ETHER_FIRST_MULTI(step, &sc->ec, enm); |
551 | while (enm != NULL) { | | 551 | while (enm != NULL) { |
552 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); | | 552 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); |
553 | crc &= 0x7f; | | 553 | crc &= 0x7f; |
554 | crc = bitrev32(~crc) >> 26; | | 554 | crc = bitrev32(~crc) >> 26; |
555 | hashreg = (crc >> 5); | | 555 | hashreg = (crc >> 5); |
556 | hashbit = (crc & 0x1f); | | 556 | hashbit = (crc & 0x1f); |
557 | hash[hashreg] |= (1 << hashbit); | | 557 | hash[hashreg] |= (1 << hashbit); |
558 | ETHER_NEXT_MULTI(step, enm); | | 558 | ETHER_NEXT_MULTI(step, enm); |
559 | } | | 559 | } |
560 | } | | 560 | } |
561 | | | 561 | |
562 | /* Write our unicast address */ | | 562 | /* Write our unicast address */ |
563 | eaddr = CLLADDR(ifp->if_sadl); | | 563 | eaddr = CLLADDR(ifp->if_sadl); |
564 | machi = (eaddr[5] << 8) | eaddr[4]; | | 564 | machi = (eaddr[5] << 8) | eaddr[4]; |
565 | maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | | | 565 | maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | |
566 | (eaddr[0] << 0); | | 566 | (eaddr[0] << 0); |
567 | WR4(sc, EMAC_ADDR_HIGH(0), machi); | | 567 | WR4(sc, EMAC_ADDR_HIGH(0), machi); |
568 | WR4(sc, EMAC_ADDR_LOW(0), maclo); | | 568 | WR4(sc, EMAC_ADDR_LOW(0), maclo); |
569 | | | 569 | |
570 | /* Multicast hash filters */ | | 570 | /* Multicast hash filters */ |
571 | WR4(sc, EMAC_RX_HASH_0, hash[1]); | | 571 | WR4(sc, EMAC_RX_HASH_0, hash[1]); |
572 | WR4(sc, EMAC_RX_HASH_1, hash[0]); | | 572 | WR4(sc, EMAC_RX_HASH_1, hash[0]); |
573 | | | 573 | |
574 | /* RX frame filter config */ | | 574 | /* RX frame filter config */ |
575 | WR4(sc, EMAC_RX_FRM_FLT, val); | | 575 | WR4(sc, EMAC_RX_FRM_FLT, val); |
576 | } | | 576 | } |
577 | | | 577 | |
578 | static void | | 578 | static void |
579 | sunxi_emac_enable_intr(struct sunxi_emac_softc *sc) | | 579 | sunxi_emac_enable_intr(struct sunxi_emac_softc *sc) |
580 | { | | 580 | { |
581 | /* Enable interrupts */ | | 581 | /* Enable interrupts */ |
582 | WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); | | 582 | WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); |
583 | } | | 583 | } |
584 | | | 584 | |
585 | static void | | 585 | static void |
586 | sunxi_emac_disable_intr(struct sunxi_emac_softc *sc) | | 586 | sunxi_emac_disable_intr(struct sunxi_emac_softc *sc) |
587 | { | | 587 | { |
588 | /* Disable interrupts */ | | 588 | /* Disable interrupts */ |
589 | WR4(sc, EMAC_INT_EN, 0); | | 589 | WR4(sc, EMAC_INT_EN, 0); |
590 | } | | 590 | } |
591 | | | 591 | |
592 | static int | | 592 | static int |
593 | sunxi_emac_init_locked(struct sunxi_emac_softc *sc) | | 593 | sunxi_emac_init_locked(struct sunxi_emac_softc *sc) |
594 | { | | 594 | { |
595 | struct ifnet *ifp = &sc->ec.ec_if; | | 595 | struct ifnet *ifp = &sc->ec.ec_if; |
596 | struct mii_data *mii = &sc->mii; | | 596 | struct mii_data *mii = &sc->mii; |
597 | uint32_t val; | | 597 | uint32_t val; |
598 | | | 598 | |
599 | EMAC_ASSERT_LOCKED(sc); | | 599 | EMAC_ASSERT_LOCKED(sc); |
600 | | | 600 | |
601 | if ((ifp->if_flags & IFF_RUNNING) != 0) | | 601 | if ((ifp->if_flags & IFF_RUNNING) != 0) |
602 | return 0; | | 602 | return 0; |
603 | | | 603 | |
604 | sunxi_emac_setup_rxfilter(sc); | | 604 | sunxi_emac_setup_rxfilter(sc); |
605 | | | 605 | |
606 | /* Configure DMA burst length and priorities */ | | 606 | /* Configure DMA burst length and priorities */ |
607 | val = sunxi_emac_burst_len << BASIC_CTL_BURST_LEN_SHIFT; | | 607 | val = sunxi_emac_burst_len << BASIC_CTL_BURST_LEN_SHIFT; |
608 | if (sunxi_emac_rx_tx_pri) | | 608 | if (sunxi_emac_rx_tx_pri) |
609 | val |= BASIC_CTL_RX_TX_PRI; | | 609 | val |= BASIC_CTL_RX_TX_PRI; |
610 | WR4(sc, EMAC_BASIC_CTL_1, val); | | 610 | WR4(sc, EMAC_BASIC_CTL_1, val); |
611 | | | 611 | |
612 | /* Enable interrupts */ | | 612 | /* Enable interrupts */ |
613 | sunxi_emac_enable_intr(sc); | | 613 | sunxi_emac_enable_intr(sc); |
614 | | | 614 | |
615 | /* Enable transmit DMA */ | | 615 | /* Enable transmit DMA */ |
616 | val = RD4(sc, EMAC_TX_CTL_1); | | 616 | val = RD4(sc, EMAC_TX_CTL_1); |
617 | WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); | | 617 | WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); |
618 | | | 618 | |
619 | /* Enable receive DMA */ | | 619 | /* Enable receive DMA */ |
620 | val = RD4(sc, EMAC_RX_CTL_1); | | 620 | val = RD4(sc, EMAC_RX_CTL_1); |
621 | WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); | | 621 | WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); |
622 | | | 622 | |
623 | /* Enable transmitter */ | | 623 | /* Enable transmitter */ |
624 | val = RD4(sc, EMAC_TX_CTL_0); | | 624 | val = RD4(sc, EMAC_TX_CTL_0); |
625 | WR4(sc, EMAC_TX_CTL_0, val | TX_EN); | | 625 | WR4(sc, EMAC_TX_CTL_0, val | TX_EN); |
626 | | | 626 | |
627 | /* Enable receiver */ | | 627 | /* Enable receiver */ |
628 | val = RD4(sc, EMAC_RX_CTL_0); | | 628 | val = RD4(sc, EMAC_RX_CTL_0); |
629 | WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC); | | 629 | WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC); |
630 | | | 630 | |
631 | ifp->if_flags |= IFF_RUNNING; | | 631 | ifp->if_flags |= IFF_RUNNING; |
632 | ifp->if_flags &= ~IFF_OACTIVE; | | 632 | ifp->if_flags &= ~IFF_OACTIVE; |
633 | | | 633 | |
634 | mii_mediachg(mii); | | 634 | mii_mediachg(mii); |
635 | callout_schedule(&sc->stat_ch, hz); | | 635 | callout_schedule(&sc->stat_ch, hz); |
636 | | | 636 | |
637 | return 0; | | 637 | return 0; |
638 | } | | 638 | } |
639 | | | 639 | |
640 | static int | | 640 | static int |
641 | sunxi_emac_init(struct ifnet *ifp) | | 641 | sunxi_emac_init(struct ifnet *ifp) |
642 | { | | 642 | { |
643 | struct sunxi_emac_softc *sc = ifp->if_softc; | | 643 | struct sunxi_emac_softc *sc = ifp->if_softc; |
644 | int error; | | 644 | int error; |
645 | | | 645 | |
646 | EMAC_LOCK(sc); | | 646 | EMAC_LOCK(sc); |
647 | error = sunxi_emac_init_locked(sc); | | 647 | error = sunxi_emac_init_locked(sc); |
648 | EMAC_UNLOCK(sc); | | 648 | EMAC_UNLOCK(sc); |
649 | | | 649 | |
650 | return error; | | 650 | return error; |
651 | } | | 651 | } |
652 | | | 652 | |
653 | static void | | 653 | static void |
654 | sunxi_emac_stop_locked(struct sunxi_emac_softc *sc, int disable) | | 654 | sunxi_emac_stop_locked(struct sunxi_emac_softc *sc, int disable) |
655 | { | | 655 | { |
656 | struct ifnet *ifp = &sc->ec.ec_if; | | 656 | struct ifnet *ifp = &sc->ec.ec_if; |
657 | uint32_t val; | | 657 | uint32_t val; |
658 | | | 658 | |
659 | EMAC_ASSERT_LOCKED(sc); | | 659 | EMAC_ASSERT_LOCKED(sc); |
660 | | | 660 | |
661 | callout_stop(&sc->stat_ch); | | 661 | callout_stop(&sc->stat_ch); |
662 | | | 662 | |
663 | mii_down(&sc->mii); | | 663 | mii_down(&sc->mii); |
664 | | | 664 | |
665 | /* Stop transmit DMA and flush data in the TX FIFO */ | | 665 | /* Stop transmit DMA and flush data in the TX FIFO */ |
666 | val = RD4(sc, EMAC_TX_CTL_1); | | 666 | val = RD4(sc, EMAC_TX_CTL_1); |
667 | val &= ~TX_DMA_EN; | | 667 | val &= ~TX_DMA_EN; |
668 | val |= FLUSH_TX_FIFO; | | 668 | val |= FLUSH_TX_FIFO; |
669 | WR4(sc, EMAC_TX_CTL_1, val); | | 669 | WR4(sc, EMAC_TX_CTL_1, val); |
670 | | | 670 | |
671 | /* Disable transmitter */ | | 671 | /* Disable transmitter */ |
672 | val = RD4(sc, EMAC_TX_CTL_0); | | 672 | val = RD4(sc, EMAC_TX_CTL_0); |
673 | WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN); | | 673 | WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN); |
674 | | | 674 | |
675 | /* Disable receiver */ | | 675 | /* Disable receiver */ |
676 | val = RD4(sc, EMAC_RX_CTL_0); | | 676 | val = RD4(sc, EMAC_RX_CTL_0); |
677 | WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN); | | 677 | WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN); |
678 | | | 678 | |
679 | /* Disable interrupts */ | | 679 | /* Disable interrupts */ |
680 | sunxi_emac_disable_intr(sc); | | 680 | sunxi_emac_disable_intr(sc); |
681 | | | 681 | |
682 | /* Disable transmit DMA */ | | 682 | /* Disable transmit DMA */ |
683 | val = RD4(sc, EMAC_TX_CTL_1); | | 683 | val = RD4(sc, EMAC_TX_CTL_1); |
684 | WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); | | 684 | WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); |
685 | | | 685 | |
686 | /* Disable receive DMA */ | | 686 | /* Disable receive DMA */ |
687 | val = RD4(sc, EMAC_RX_CTL_1); | | 687 | val = RD4(sc, EMAC_RX_CTL_1); |
688 | WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); | | 688 | WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); |
689 | | | 689 | |
690 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 690 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
691 | } | | 691 | } |
692 | | | 692 | |
693 | static void | | 693 | static void |
694 | sunxi_emac_stop(struct ifnet *ifp, int disable) | | 694 | sunxi_emac_stop(struct ifnet *ifp, int disable) |
695 | { | | 695 | { |
696 | struct sunxi_emac_softc * const sc = ifp->if_softc; | | 696 | struct sunxi_emac_softc * const sc = ifp->if_softc; |
697 | | | 697 | |
698 | EMAC_LOCK(sc); | | 698 | EMAC_LOCK(sc); |
699 | sunxi_emac_stop_locked(sc, disable); | | 699 | sunxi_emac_stop_locked(sc, disable); |
700 | EMAC_UNLOCK(sc); | | 700 | EMAC_UNLOCK(sc); |
701 | } | | 701 | } |
702 | | | 702 | |
703 | static int | | 703 | static int |
704 | sunxi_emac_rxintr(struct sunxi_emac_softc *sc) | | 704 | sunxi_emac_rxintr(struct sunxi_emac_softc *sc) |
705 | { | | 705 | { |
706 | struct ifnet *ifp = &sc->ec.ec_if; | | 706 | struct ifnet *ifp = &sc->ec.ec_if; |
707 | int error, index, len, npkt; | | 707 | int error, index, len, npkt; |
708 | struct mbuf *m, *m0; | | 708 | struct mbuf *m, *m0; |
709 | uint32_t status; | | 709 | uint32_t status; |
710 | | | 710 | |
711 | npkt = 0; | | 711 | npkt = 0; |
712 | | | 712 | |
713 | for (index = sc->rx.cur; ; index = RX_NEXT(index)) { | | 713 | for (index = sc->rx.cur; ; index = RX_NEXT(index)) { |
714 | sunxi_emac_dma_sync(sc, sc->rx.desc_tag, sc->rx.desc_map, | | 714 | sunxi_emac_dma_sync(sc, sc->rx.desc_tag, sc->rx.desc_map, |
715 | index, index + 1, | | 715 | index, index + 1, |
716 | RX_DESC_COUNT, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 716 | RX_DESC_COUNT, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
717 | | | 717 | |
718 | status = le32toh(sc->rx.desc_ring[index].status); | | 718 | status = le32toh(sc->rx.desc_ring[index].status); |
719 | if ((status & RX_DESC_CTL) != 0) | | 719 | if ((status & RX_DESC_CTL) != 0) |
720 | break; | | 720 | break; |
721 | | | 721 | |
722 | bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, | | 722 | bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, |
723 | 0, sc->rx.buf_map[index].map->dm_mapsize, | | 723 | 0, sc->rx.buf_map[index].map->dm_mapsize, |
724 | BUS_DMASYNC_POSTREAD); | | 724 | BUS_DMASYNC_POSTREAD); |
725 | bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); | | 725 | bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); |
726 | | | 726 | |
727 | len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; | | 727 | len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; |
728 | if (len != 0) { | | 728 | if (len != 0) { |
729 | m = sc->rx.buf_map[index].mbuf; | | 729 | m = sc->rx.buf_map[index].mbuf; |
730 | m_set_rcvif(m, ifp); | | 730 | m_set_rcvif(m, ifp); |
731 | m->m_flags |= M_HASFCS; | | 731 | m->m_flags |= M_HASFCS; |
732 | m->m_pkthdr.len = len; | | 732 | m->m_pkthdr.len = len; |
733 | m->m_len = len; | | 733 | m->m_len = len; |
734 | m->m_nextpkt = NULL; | | 734 | m->m_nextpkt = NULL; |
735 | | | 735 | |
736 | if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) != 0 && | | 736 | if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) != 0 && |
737 | (status & RX_FRM_TYPE) != 0) { | | 737 | (status & RX_FRM_TYPE) != 0) { |
738 | m->m_pkthdr.csum_flags = M_CSUM_IPv4 | | | 738 | m->m_pkthdr.csum_flags = M_CSUM_IPv4 | |
739 | M_CSUM_TCPv4 | M_CSUM_UDPv4; | | 739 | M_CSUM_TCPv4 | M_CSUM_UDPv4; |
740 | if ((status & RX_HEADER_ERR) != 0) | | 740 | if ((status & RX_HEADER_ERR) != 0) |
741 | m->m_pkthdr.csum_flags |= | | 741 | m->m_pkthdr.csum_flags |= |
742 | M_CSUM_IPv4_BAD; | | 742 | M_CSUM_IPv4_BAD; |
743 | if ((status & RX_PAYLOAD_ERR) != 0) | | 743 | if ((status & RX_PAYLOAD_ERR) != 0) |
744 | m->m_pkthdr.csum_flags |= | | 744 | m->m_pkthdr.csum_flags |= |
745 | M_CSUM_TCP_UDP_BAD; | | 745 | M_CSUM_TCP_UDP_BAD; |
746 | } | | 746 | } |
747 | | | 747 | |
748 | ++npkt; | | 748 | ++npkt; |
749 | | | 749 | |
750 | if_percpuq_enqueue(ifp->if_percpuq, m); | | 750 | if_percpuq_enqueue(ifp->if_percpuq, m); |
751 | } | | 751 | } |
752 | | | 752 | |
753 | if ((m0 = sunxi_emac_alloc_mbufcl(sc)) != NULL) { | | 753 | if ((m0 = sunxi_emac_alloc_mbufcl(sc)) != NULL) { |
754 | error = sunxi_emac_setup_rxbuf(sc, index, m0); | | 754 | error = sunxi_emac_setup_rxbuf(sc, index, m0); |
755 | if (error != 0) { | | 755 | if (error != 0) { |
756 | /* XXX hole in RX ring */ | | 756 | /* XXX hole in RX ring */ |
757 | } | | 757 | } |
758 | } else | | 758 | } else |
759 | ifp->if_ierrors++; | | 759 | ifp->if_ierrors++; |
760 | | | 760 | |
761 | sunxi_emac_dma_sync(sc, sc->rx.desc_tag, sc->rx.desc_map, | | 761 | sunxi_emac_dma_sync(sc, sc->rx.desc_tag, sc->rx.desc_map, |
762 | index, index + 1, | | 762 | index, index + 1, |
763 | RX_DESC_COUNT, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); | | 763 | RX_DESC_COUNT, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); |
764 | } | | 764 | } |
765 | | | 765 | |
766 | sc->rx.cur = index; | | 766 | sc->rx.cur = index; |
767 | | | 767 | |
768 | return npkt; | | 768 | return npkt; |
769 | } | | 769 | } |
770 | | | 770 | |
771 | static void | | 771 | static void |
772 | sunxi_emac_txintr(struct sunxi_emac_softc *sc) | | 772 | sunxi_emac_txintr(struct sunxi_emac_softc *sc) |
773 | { | | 773 | { |
774 | struct ifnet *ifp = &sc->ec.ec_if; | | 774 | struct ifnet *ifp = &sc->ec.ec_if; |
775 | struct sunxi_emac_bufmap *bmap; | | 775 | struct sunxi_emac_bufmap *bmap; |
776 | struct sunxi_emac_desc *desc; | | 776 | struct sunxi_emac_desc *desc; |
777 | uint32_t status; | | 777 | uint32_t status; |
778 | int i; | | 778 | int i; |
779 | | | 779 | |
780 | EMAC_ASSERT_LOCKED(sc); | | 780 | EMAC_ASSERT_LOCKED(sc); |
781 | | | 781 | |
782 | for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { | | 782 | for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { |
783 | KASSERT(sc->tx.queued > 0 && sc->tx.queued <= TX_DESC_COUNT); | | 783 | KASSERT(sc->tx.queued > 0 && sc->tx.queued <= TX_DESC_COUNT); |
784 | sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map, | | 784 | sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map, |
785 | i, i + 1, TX_DESC_COUNT, | | 785 | i, i + 1, TX_DESC_COUNT, |
786 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 786 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
787 | desc = &sc->tx.desc_ring[i]; | | 787 | desc = &sc->tx.desc_ring[i]; |
788 | status = le32toh(desc->status); | | 788 | status = le32toh(desc->status); |
789 | if ((status & TX_DESC_CTL) != 0) | | 789 | if ((status & TX_DESC_CTL) != 0) |
790 | break; | | 790 | break; |
791 | bmap = &sc->tx.buf_map[i]; | | 791 | bmap = &sc->tx.buf_map[i]; |
792 | if (bmap->mbuf != NULL) { | | 792 | if (bmap->mbuf != NULL) { |
793 | bus_dmamap_sync(sc->tx.buf_tag, bmap->map, | | 793 | bus_dmamap_sync(sc->tx.buf_tag, bmap->map, |
794 | 0, bmap->map->dm_mapsize, | | 794 | 0, bmap->map->dm_mapsize, |
795 | BUS_DMASYNC_POSTWRITE); | | 795 | BUS_DMASYNC_POSTWRITE); |
796 | bus_dmamap_unload(sc->tx.buf_tag, bmap->map); | | 796 | bus_dmamap_unload(sc->tx.buf_tag, bmap->map); |
797 | m_freem(bmap->mbuf); | | 797 | m_freem(bmap->mbuf); |
798 | bmap->mbuf = NULL; | | 798 | bmap->mbuf = NULL; |
799 | } | | 799 | } |
800 | | | 800 | |
801 | sunxi_emac_setup_txdesc(sc, i, 0, 0, 0); | | 801 | sunxi_emac_setup_txdesc(sc, i, 0, 0, 0); |
802 | sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map, | | 802 | sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map, |
803 | i, i + 1, TX_DESC_COUNT, | | 803 | i, i + 1, TX_DESC_COUNT, |
804 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 804 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
805 | | | 805 | |
806 | ifp->if_flags &= ~IFF_OACTIVE; | | 806 | ifp->if_flags &= ~IFF_OACTIVE; |
807 | ifp->if_opackets++; | | 807 | ifp->if_opackets++; |
808 | } | | 808 | } |
809 | | | 809 | |
810 | sc->tx.next = i; | | 810 | sc->tx.next = i; |
811 | } | | 811 | } |
812 | | | 812 | |
813 | static int | | 813 | static int |
814 | sunxi_emac_intr(void *arg) | | 814 | sunxi_emac_intr(void *arg) |
815 | { | | 815 | { |
816 | struct sunxi_emac_softc *sc = arg; | | 816 | struct sunxi_emac_softc *sc = arg; |
817 | struct ifnet *ifp = &sc->ec.ec_if; | | 817 | struct ifnet *ifp = &sc->ec.ec_if; |
818 | uint32_t val; | | 818 | uint32_t val; |
819 | | | 819 | |
820 | EMAC_LOCK(sc); | | 820 | EMAC_LOCK(sc); |
821 | | | 821 | |
822 | val = RD4(sc, EMAC_INT_STA); | | 822 | val = RD4(sc, EMAC_INT_STA); |
823 | WR4(sc, EMAC_INT_STA, val); | | 823 | WR4(sc, EMAC_INT_STA, val); |
824 | | | 824 | |
825 | if (val & RX_INT) | | 825 | if (val & RX_INT) |
826 | sunxi_emac_rxintr(sc); | | 826 | sunxi_emac_rxintr(sc); |
827 | | | 827 | |
828 | if (val & (TX_INT|TX_BUF_UA_INT)) { | | 828 | if (val & (TX_INT|TX_BUF_UA_INT)) { |
829 | sunxi_emac_txintr(sc); | | 829 | sunxi_emac_txintr(sc); |
830 | if_schedule_deferred_start(ifp); | | 830 | if_schedule_deferred_start(ifp); |
831 | } | | 831 | } |
832 | | | 832 | |
833 | EMAC_UNLOCK(sc); | | 833 | EMAC_UNLOCK(sc); |
834 | | | 834 | |
835 | return 1; | | 835 | return 1; |
836 | } | | 836 | } |
837 | | | 837 | |
838 | static int | | 838 | static int |
839 | sunxi_emac_ioctl(struct ifnet *ifp, u_long cmd, void *data) | | 839 | sunxi_emac_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
840 | { | | 840 | { |
841 | struct sunxi_emac_softc *sc = ifp->if_softc; | | 841 | struct sunxi_emac_softc *sc = ifp->if_softc; |
842 | struct mii_data *mii = &sc->mii; | | 842 | struct mii_data *mii = &sc->mii; |
843 | struct ifreq *ifr = data; | | 843 | struct ifreq *ifr = data; |
844 | int error, s; | | 844 | int error, s; |
845 | | | 845 | |
846 | #ifndef EMAC_MPSAFE | | 846 | #ifndef EMAC_MPSAFE |
847 | s = splnet(); | | 847 | s = splnet(); |
848 | #endif | | 848 | #endif |
849 | | | 849 | |
850 | switch (cmd) { | | 850 | switch (cmd) { |
851 | case SIOCSIFMEDIA: | | 851 | case SIOCSIFMEDIA: |
852 | case SIOCGIFMEDIA: | | 852 | case SIOCGIFMEDIA: |
853 | #ifdef EMAC_MPSAFE | | 853 | #ifdef EMAC_MPSAFE |
854 | s = splnet(); | | 854 | s = splnet(); |
855 | #endif | | 855 | #endif |
856 | error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); | | 856 | error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); |
857 | #ifdef EMAC_MPSAFE | | 857 | #ifdef EMAC_MPSAFE |
858 | splx(s); | | 858 | splx(s); |
859 | #endif | | 859 | #endif |
860 | break; | | 860 | break; |
861 | default: | | 861 | default: |
862 | #ifdef EMAC_MPSAFE | | 862 | #ifdef EMAC_MPSAFE |
863 | s = splnet(); | | 863 | s = splnet(); |
864 | #endif | | 864 | #endif |
865 | error = ether_ioctl(ifp, cmd, data); | | 865 | error = ether_ioctl(ifp, cmd, data); |
866 | #ifdef EMAC_MPSAFE | | 866 | #ifdef EMAC_MPSAFE |
867 | splx(s); | | 867 | splx(s); |
868 | #endif | | 868 | #endif |
869 | if (error != ENETRESET) | | 869 | if (error != ENETRESET) |
870 | break; | | 870 | break; |
871 | | | 871 | |
872 | error = 0; | | 872 | error = 0; |
873 | | | 873 | |
874 | if (cmd == SIOCSIFCAP) | | 874 | if (cmd == SIOCSIFCAP) |
875 | error = (*ifp->if_init)(ifp); | | 875 | error = (*ifp->if_init)(ifp); |
876 | else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) | | 876 | else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
877 | ; | | 877 | ; |
878 | else if ((ifp->if_flags & IFF_RUNNING) != 0) { | | 878 | else if ((ifp->if_flags & IFF_RUNNING) != 0) { |
879 | EMAC_LOCK(sc); | | 879 | EMAC_LOCK(sc); |
880 | sunxi_emac_setup_rxfilter(sc); | | 880 | sunxi_emac_setup_rxfilter(sc); |
881 | EMAC_UNLOCK(sc); | | 881 | EMAC_UNLOCK(sc); |
882 | } | | 882 | } |
883 | break; | | 883 | break; |
884 | } | | 884 | } |
885 | | | 885 | |
886 | #ifndef EMAC_MPSAFE | | 886 | #ifndef EMAC_MPSAFE |
887 | splx(s); | | 887 | splx(s); |
888 | #endif | | 888 | #endif |
889 | | | 889 | |
890 | return error; | | 890 | return error; |
891 | } | | 891 | } |
892 | | | 892 | |
893 | static bool | | 893 | static bool |
894 | sunxi_emac_has_internal_phy(struct sunxi_emac_softc *sc) | | 894 | sunxi_emac_has_internal_phy(struct sunxi_emac_softc *sc) |
895 | { | | 895 | { |
896 | const char * mdio_internal_compat[] = { | | 896 | const char * mdio_internal_compat[] = { |
897 | "allwinner,sun8i-h3-mdio-internal", | | 897 | "allwinner,sun8i-h3-mdio-internal", |
898 | NULL | | 898 | NULL |
899 | }; | | 899 | }; |
900 | int phy; | | 900 | int phy; |
901 | | | 901 | |
902 | /* Non-standard property, for compatible with old dts files */ | | 902 | /* Non-standard property, for compatible with old dts files */ |
903 | if (of_hasprop(sc->phandle, "allwinner,use-internal-phy")) | | 903 | if (of_hasprop(sc->phandle, "allwinner,use-internal-phy")) |
904 | return true; | | 904 | return true; |
905 | | | 905 | |
906 | phy = fdtbus_get_phandle(sc->phandle, "phy-handle"); | | 906 | phy = fdtbus_get_phandle(sc->phandle, "phy-handle"); |
907 | if (phy == -1) | | 907 | if (phy == -1) |
908 | return false; | | 908 | return false; |
909 | | | 909 | |
910 | /* For internal PHY, check compatible string of parent node */ | | 910 | /* For internal PHY, check compatible string of parent node */ |
911 | return of_compatible(OF_parent(phy), mdio_internal_compat) >= 0; | | 911 | return of_compatible(OF_parent(phy), mdio_internal_compat) >= 0; |
912 | } | | 912 | } |
913 | | | 913 | |
914 | static int | | 914 | static int |
915 | sunxi_emac_setup_phy(struct sunxi_emac_softc *sc) | | 915 | sunxi_emac_setup_phy(struct sunxi_emac_softc *sc) |
916 | { | | 916 | { |
917 | uint32_t reg, tx_delay, rx_delay; | | 917 | uint32_t reg, tx_delay, rx_delay; |
918 | const char *phy_type; | | 918 | const char *phy_type; |
919 | | | 919 | |
920 | phy_type = fdtbus_get_string(sc->phandle, "phy-mode"); | | 920 | phy_type = fdtbus_get_string(sc->phandle, "phy-mode"); |
921 | if (phy_type == NULL) | | 921 | if (phy_type == NULL) |
922 | return 0; | | 922 | return 0; |
923 | | | 923 | |
924 | aprint_debug_dev(sc->dev, "PHY type: %s\n", phy_type); | | 924 | aprint_debug_dev(sc->dev, "PHY type: %s\n", phy_type); |
925 | | | 925 | |
926 | reg = SYSCONRD4(sc, 0); | | 926 | reg = SYSCONRD4(sc, 0); |
927 | | | 927 | |
928 | reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); | | 928 | reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); |
929 | if (strcmp(phy_type, "rgmii") == 0) | | 929 | if (strcmp(phy_type, "rgmii") == 0) |
930 | reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; | | 930 | reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; |
931 | else if (strcmp(phy_type, "rmii") == 0) | | 931 | else if (strcmp(phy_type, "rmii") == 0) |
932 | reg |= EMAC_CLK_RMII_EN; | | 932 | reg |= EMAC_CLK_RMII_EN; |
933 | else | | 933 | else |
934 | reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; | | 934 | reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; |
935 | | | 935 | |
936 | if (of_getprop_uint32(sc->phandle, "tx-delay", &tx_delay) == 0) { | | 936 | if (of_getprop_uint32(sc->phandle, "tx-delay", &tx_delay) == 0) { |
937 | reg &= ~EMAC_CLK_ETXDC; | | 937 | reg &= ~EMAC_CLK_ETXDC; |
938 | reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); | | 938 | reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); |
939 | } | | 939 | } |
940 | if (of_getprop_uint32(sc->phandle, "rx-delay", &rx_delay) == 0) { | | 940 | if (of_getprop_uint32(sc->phandle, "rx-delay", &rx_delay) == 0) { |
941 | reg &= ~EMAC_CLK_ERXDC; | | 941 | reg &= ~EMAC_CLK_ERXDC; |
942 | reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); | | 942 | reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); |
943 | } | | 943 | } |
944 | | | 944 | |
945 | if (sc->type == EMAC_H3) { | | 945 | if (sc->type == EMAC_H3) { |
946 | if (sunxi_emac_has_internal_phy(sc)) { | | 946 | if (sunxi_emac_has_internal_phy(sc)) { |
947 | reg |= EMAC_CLK_EPHY_SELECT; | | 947 | reg |= EMAC_CLK_EPHY_SELECT; |
948 | reg &= ~EMAC_CLK_EPHY_SHUTDOWN; | | 948 | reg &= ~EMAC_CLK_EPHY_SHUTDOWN; |
949 | if (of_hasprop(sc->phandle, | | 949 | if (of_hasprop(sc->phandle, |
950 | "allwinner,leds-active-low")) | | 950 | "allwinner,leds-active-low")) |
951 | reg |= EMAC_CLK_EPHY_LED_POL; | | 951 | reg |= EMAC_CLK_EPHY_LED_POL; |
952 | else | | 952 | else |
953 | reg &= ~EMAC_CLK_EPHY_LED_POL; | | 953 | reg &= ~EMAC_CLK_EPHY_LED_POL; |
954 | | | 954 | |
955 | /* Set internal PHY addr to 1 */ | | 955 | /* Set internal PHY addr to 1 */ |
956 | reg &= ~EMAC_CLK_EPHY_ADDR; | | 956 | reg &= ~EMAC_CLK_EPHY_ADDR; |
957 | reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); | | 957 | reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); |
958 | } else { | | 958 | } else { |
959 | reg &= ~EMAC_CLK_EPHY_SELECT; | | 959 | reg &= ~EMAC_CLK_EPHY_SELECT; |
960 | } | | 960 | } |
961 | } | | 961 | } |
962 | | | 962 | |
963 | aprint_debug_dev(sc->dev, "EMAC clock: 0x%08x\n", reg); | | 963 | aprint_debug_dev(sc->dev, "EMAC clock: 0x%08x\n", reg); |
964 | | | 964 | |
965 | SYSCONWR4(sc, 0, reg); | | 965 | SYSCONWR4(sc, 0, reg); |
966 | | | 966 | |
967 | return 0; | | 967 | return 0; |
968 | } | | 968 | } |
969 | | | 969 | |
970 | static int | | 970 | static int |
971 | sunxi_emac_setup_resources(struct sunxi_emac_softc *sc) | | 971 | sunxi_emac_setup_resources(struct sunxi_emac_softc *sc) |
972 | { | | 972 | { |
973 | u_int freq; | | 973 | u_int freq; |
974 | int error, div; | | 974 | int error, div; |
975 | | | 975 | |
976 | /* Configure PHY for MII or RGMII mode */ | | 976 | /* Configure PHY for MII or RGMII mode */ |
977 | if (sunxi_emac_setup_phy(sc) != 0) | | 977 | if (sunxi_emac_setup_phy(sc) != 0) |
978 | return ENXIO; | | 978 | return ENXIO; |
979 | | | 979 | |
980 | /* Enable clocks */ | | 980 | /* Enable clocks */ |
981 | error = clk_enable(sc->clk_ahb); | | 981 | error = clk_enable(sc->clk_ahb); |
982 | if (error != 0) { | | 982 | if (error != 0) { |
983 | aprint_error_dev(sc->dev, "cannot enable ahb clock\n"); | | 983 | aprint_error_dev(sc->dev, "cannot enable ahb clock\n"); |
984 | return error; | | 984 | return error; |
985 | } | | 985 | } |
986 | | | 986 | |
987 | if (sc->clk_ephy != NULL) { | | 987 | if (sc->clk_ephy != NULL) { |
988 | error = clk_enable(sc->clk_ephy); | | 988 | error = clk_enable(sc->clk_ephy); |
989 | if (error != 0) { | | 989 | if (error != 0) { |
990 | aprint_error_dev(sc->dev, "cannot enable ephy clock\n"); | | 990 | aprint_error_dev(sc->dev, "cannot enable ephy clock\n"); |
991 | return error; | | 991 | return error; |
992 | } | | 992 | } |
993 | } | | 993 | } |
994 | | | 994 | |
995 | /* De-assert reset */ | | 995 | /* De-assert reset */ |
996 | error = fdtbus_reset_deassert(sc->rst_ahb); | | 996 | error = fdtbus_reset_deassert(sc->rst_ahb); |
997 | if (error != 0) { | | 997 | if (error != 0) { |
998 | aprint_error_dev(sc->dev, "cannot de-assert ahb reset\n"); | | 998 | aprint_error_dev(sc->dev, "cannot de-assert ahb reset\n"); |
999 | return error; | | 999 | return error; |
1000 | } | | 1000 | } |
1001 | if (sc->rst_ephy != NULL) { | | 1001 | if (sc->rst_ephy != NULL) { |
1002 | error = fdtbus_reset_deassert(sc->rst_ephy); | | 1002 | error = fdtbus_reset_deassert(sc->rst_ephy); |
1003 | if (error != 0) { | | 1003 | if (error != 0) { |
1004 | aprint_error_dev(sc->dev, | | 1004 | aprint_error_dev(sc->dev, |
1005 | "cannot de-assert ephy reset\n"); | | 1005 | "cannot de-assert ephy reset\n"); |
1006 | return error; | | 1006 | return error; |
1007 | } | | 1007 | } |
1008 | } | | 1008 | } |
1009 | | | 1009 | |
1010 | /* Enable PHY regulator if applicable */ | | 1010 | /* Enable PHY regulator if applicable */ |
1011 | if (sc->reg_phy != NULL) { | | 1011 | if (sc->reg_phy != NULL) { |
1012 | error = fdtbus_regulator_enable(sc->reg_phy); | | 1012 | error = fdtbus_regulator_enable(sc->reg_phy); |
1013 | if (error != 0) { | | 1013 | if (error != 0) { |
1014 | aprint_error_dev(sc->dev, | | 1014 | aprint_error_dev(sc->dev, |
1015 | "cannot enable PHY regulator\n"); | | 1015 | "cannot enable PHY regulator\n"); |
1016 | return error; | | 1016 | return error; |
1017 | } | | 1017 | } |
1018 | } | | 1018 | } |
1019 | | | 1019 | |
1020 | /* Determine MDC clock divide ratio based on AHB clock */ | | 1020 | /* Determine MDC clock divide ratio based on AHB clock */ |
1021 | freq = clk_get_rate(sc->clk_ahb); | | 1021 | freq = clk_get_rate(sc->clk_ahb); |
1022 | if (freq == 0) { | | 1022 | if (freq == 0) { |
1023 | aprint_error_dev(sc->dev, "cannot get AHB clock frequency\n"); | | 1023 | aprint_error_dev(sc->dev, "cannot get AHB clock frequency\n"); |
1024 | return ENXIO; | | 1024 | return ENXIO; |
1025 | } | | 1025 | } |
1026 | div = freq / MDIO_FREQ; | | 1026 | div = freq / MDIO_FREQ; |
1027 | if (div <= 16) | | 1027 | if (div <= 16) |
1028 | sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; | | 1028 | sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; |
1029 | else if (div <= 32) | | 1029 | else if (div <= 32) |
1030 | sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; | | 1030 | sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; |
1031 | else if (div <= 64) | | 1031 | else if (div <= 64) |
1032 | sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; | | 1032 | sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; |
1033 | else if (div <= 128) | | 1033 | else if (div <= 128) |
1034 | sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; | | 1034 | sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; |
1035 | else { | | 1035 | else { |
1036 | aprint_error_dev(sc->dev, | | 1036 | aprint_error_dev(sc->dev, |
1037 | "cannot determine MDC clock divide ratio\n"); | | 1037 | "cannot determine MDC clock divide ratio\n"); |
1038 | return ENXIO; | | 1038 | return ENXIO; |
1039 | } | | 1039 | } |
1040 | | | 1040 | |
1041 | aprint_debug_dev(sc->dev, "AHB frequency %u Hz, MDC div: 0x%x\n", | | 1041 | aprint_debug_dev(sc->dev, "AHB frequency %u Hz, MDC div: 0x%x\n", |
1042 | freq, sc->mdc_div_ratio_m); | | 1042 | freq, sc->mdc_div_ratio_m); |
1043 | | | 1043 | |
1044 | return 0; | | 1044 | return 0; |
1045 | } | | 1045 | } |
1046 | | | 1046 | |
1047 | static void | | 1047 | static void |
1048 | sunxi_emac_get_eaddr(struct sunxi_emac_softc *sc, uint8_t *eaddr) | | 1048 | sunxi_emac_get_eaddr(struct sunxi_emac_softc *sc, uint8_t *eaddr) |
1049 | { | | 1049 | { |
1050 | uint32_t maclo, machi; | | 1050 | uint32_t maclo, machi; |
1051 | #if notyet | | 1051 | #if notyet |
1052 | u_char rootkey[16]; | | 1052 | u_char rootkey[16]; |
1053 | #endif | | 1053 | #endif |
1054 | | | 1054 | |
1055 | machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; | | 1055 | machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; |
1056 | maclo = RD4(sc, EMAC_ADDR_LOW(0)); | | 1056 | maclo = RD4(sc, EMAC_ADDR_LOW(0)); |
1057 | | | 1057 | |
1058 | if (maclo == 0xffffffff && machi == 0xffff) { | | 1058 | if (maclo == 0xffffffff && machi == 0xffff) { |
1059 | #if notyet | | 1059 | #if notyet |
1060 | /* MAC address in hardware is invalid, create one */ | | 1060 | /* MAC address in hardware is invalid, create one */ |
1061 | if (aw_sid_get_rootkey(rootkey) == 0 && | | 1061 | if (aw_sid_get_rootkey(rootkey) == 0 && |
1062 | (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] | | | 1062 | (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] | |
1063 | rootkey[15]) != 0) { | | 1063 | rootkey[15]) != 0) { |
1064 | /* MAC address is derived from the root key in SID */ | | 1064 | /* MAC address is derived from the root key in SID */ |
1065 | maclo = (rootkey[13] << 24) | (rootkey[12] << 16) | | | 1065 | maclo = (rootkey[13] << 24) | (rootkey[12] << 16) | |
1066 | (rootkey[3] << 8) | 0x02; | | 1066 | (rootkey[3] << 8) | 0x02; |
1067 | machi = (rootkey[15] << 8) | rootkey[14]; | | 1067 | machi = (rootkey[15] << 8) | rootkey[14]; |
1068 | } else { | | 1068 | } else { |
1069 | #endif | | 1069 | #endif |
1070 | /* Create one */ | | 1070 | /* Create one */ |
1071 | maclo = 0x00f2 | (cprng_strong32() & 0xffff0000); | | 1071 | maclo = 0x00f2 | (cprng_strong32() & 0xffff0000); |
1072 | machi = cprng_strong32() & 0xffff; | | 1072 | machi = cprng_strong32() & 0xffff; |
1073 | #if notyet | | 1073 | #if notyet |
1074 | } | | 1074 | } |
1075 | #endif | | 1075 | #endif |
1076 | } | | 1076 | } |
1077 | | | 1077 | |
1078 | eaddr[0] = maclo & 0xff; | | 1078 | eaddr[0] = maclo & 0xff; |
1079 | eaddr[1] = (maclo >> 8) & 0xff; | | 1079 | eaddr[1] = (maclo >> 8) & 0xff; |
1080 | eaddr[2] = (maclo >> 16) & 0xff; | | 1080 | eaddr[2] = (maclo >> 16) & 0xff; |
1081 | eaddr[3] = (maclo >> 24) & 0xff; | | 1081 | eaddr[3] = (maclo >> 24) & 0xff; |
1082 | eaddr[4] = machi & 0xff; | | 1082 | eaddr[4] = machi & 0xff; |
1083 | eaddr[5] = (machi >> 8) & 0xff; | | 1083 | eaddr[5] = (machi >> 8) & 0xff; |
1084 | } | | 1084 | } |
1085 | | | 1085 | |
1086 | #ifdef SUNXI_EMAC_DEBUG | | 1086 | #ifdef SUNXI_EMAC_DEBUG |
1087 | static void | | 1087 | static void |
1088 | sunxi_emac_dump_regs(struct sunxi_emac_softc *sc) | | 1088 | sunxi_emac_dump_regs(struct sunxi_emac_softc *sc) |
1089 | { | | 1089 | { |
1090 | static const struct { | | 1090 | static const struct { |
1091 | const char *name; | | 1091 | const char *name; |
1092 | u_int reg; | | 1092 | u_int reg; |
1093 | } regs[] = { | | 1093 | } regs[] = { |
1094 | { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, | | 1094 | { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, |
1095 | { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, | | 1095 | { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, |
1096 | { "INT_STA", EMAC_INT_STA }, | | 1096 | { "INT_STA", EMAC_INT_STA }, |
1097 | { "INT_EN", EMAC_INT_EN }, | | 1097 | { "INT_EN", EMAC_INT_EN }, |
1098 | { "TX_CTL_0", EMAC_TX_CTL_0 }, | | 1098 | { "TX_CTL_0", EMAC_TX_CTL_0 }, |
1099 | { "TX_CTL_1", EMAC_TX_CTL_1 }, | | 1099 | { "TX_CTL_1", EMAC_TX_CTL_1 }, |
1100 | { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, | | 1100 | { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, |
1101 | { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, | | 1101 | { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, |
1102 | { "RX_CTL_0", EMAC_RX_CTL_0 }, | | 1102 | { "RX_CTL_0", EMAC_RX_CTL_0 }, |
1103 | { "RX_CTL_1", EMAC_RX_CTL_1 }, | | 1103 | { "RX_CTL_1", EMAC_RX_CTL_1 }, |
1104 | { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, | | 1104 | { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, |
1105 | { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, | | 1105 | { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, |
1106 | { "RX_HASH_0", EMAC_RX_HASH_0 }, | | 1106 | { "RX_HASH_0", EMAC_RX_HASH_0 }, |
1107 | { "RX_HASH_1", EMAC_RX_HASH_1 }, | | 1107 | { "RX_HASH_1", EMAC_RX_HASH_1 }, |
1108 | { "MII_CMD", EMAC_MII_CMD }, | | 1108 | { "MII_CMD", EMAC_MII_CMD }, |
1109 | { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, | | 1109 | { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, |
1110 | { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, | | 1110 | { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, |
1111 | { "TX_DMA_STA", EMAC_TX_DMA_STA }, | | 1111 | { "TX_DMA_STA", EMAC_TX_DMA_STA }, |
1112 | { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, | | 1112 | { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, |
1113 | { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, | | 1113 | { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, |
1114 | { "RX_DMA_STA", EMAC_RX_DMA_STA }, | | 1114 | { "RX_DMA_STA", EMAC_RX_DMA_STA }, |
1115 | { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, | | 1115 | { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, |
1116 | { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, | | 1116 | { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, |
1117 | { "RGMII_STA", EMAC_RGMII_STA }, | | 1117 | { "RGMII_STA", EMAC_RGMII_STA }, |
1118 | }; | | 1118 | }; |
1119 | u_int n; | | 1119 | u_int n; |
1120 | | | 1120 | |
1121 | for (n = 0; n < __arraycount(regs); n++) | | 1121 | for (n = 0; n < __arraycount(regs); n++) |
1122 | device_printf(dev, " %-20s %08x\n", regs[n].name, | | 1122 | device_printf(dev, " %-20s %08x\n", regs[n].name, |
1123 | RD4(sc, regs[n].reg)); | | 1123 | RD4(sc, regs[n].reg)); |
1124 | } | | 1124 | } |
1125 | #endif | | 1125 | #endif |
1126 | | | 1126 | |
1127 | static int | | 1127 | static int |
1128 | sunxi_emac_phy_reset(struct sunxi_emac_softc *sc) | | 1128 | sunxi_emac_phy_reset(struct sunxi_emac_softc *sc) |
1129 | { | | 1129 | { |
1130 | uint32_t delay_prop[3]; | | 1130 | uint32_t delay_prop[3]; |
1131 | int pin_value; | | 1131 | int pin_value; |
1132 | | | 1132 | |
1133 | if (sc->pin_reset == NULL) | | 1133 | if (sc->pin_reset == NULL) |
1134 | return 0; | | 1134 | return 0; |
1135 | | | 1135 | |
1136 | if (OF_getprop(sc->phandle, "allwinner,reset-delays-us", delay_prop, | | 1136 | if (OF_getprop(sc->phandle, "allwinner,reset-delays-us", delay_prop, |
1137 | sizeof(delay_prop)) <= 0) | | 1137 | sizeof(delay_prop)) <= 0) |
1138 | return ENXIO; | | 1138 | return ENXIO; |
1139 | | | 1139 | |
1140 | pin_value = of_hasprop(sc->phandle, "allwinner,reset-active-low"); | | 1140 | pin_value = of_hasprop(sc->phandle, "allwinner,reset-active-low"); |
1141 | | | 1141 | |
1142 | fdtbus_gpio_write(sc->pin_reset, pin_value); | | 1142 | fdtbus_gpio_write(sc->pin_reset, pin_value); |
1143 | delay(htole32(delay_prop[0])); | | 1143 | delay(htole32(delay_prop[0])); |
1144 | fdtbus_gpio_write(sc->pin_reset, !pin_value); | | 1144 | fdtbus_gpio_write(sc->pin_reset, !pin_value); |
1145 | delay(htole32(delay_prop[1])); | | 1145 | delay(htole32(delay_prop[1])); |
1146 | fdtbus_gpio_write(sc->pin_reset, pin_value); | | 1146 | fdtbus_gpio_write(sc->pin_reset, pin_value); |
1147 | delay(htole32(delay_prop[2])); | | 1147 | delay(htole32(delay_prop[2])); |
1148 | | | 1148 | |
1149 | return 0; | | 1149 | return 0; |
1150 | } | | 1150 | } |
1151 | | | 1151 | |
1152 | static int | | 1152 | static int |
1153 | sunxi_emac_reset(struct sunxi_emac_softc *sc) | | 1153 | sunxi_emac_reset(struct sunxi_emac_softc *sc) |
1154 | { | | 1154 | { |
1155 | int retry; | | 1155 | int retry; |
1156 | | | 1156 | |
1157 | /* Reset PHY if necessary */ | | 1157 | /* Reset PHY if necessary */ |
1158 | if (sunxi_emac_phy_reset(sc) != 0) { | | 1158 | if (sunxi_emac_phy_reset(sc) != 0) { |
1159 | aprint_error_dev(sc->dev, "failed to reset PHY\n"); | | 1159 | aprint_error_dev(sc->dev, "failed to reset PHY\n"); |
1160 | return ENXIO; | | 1160 | return ENXIO; |
1161 | } | | 1161 | } |
1162 | | | 1162 | |
1163 | /* Soft reset all registers and logic */ | | 1163 | /* Soft reset all registers and logic */ |
1164 | WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); | | 1164 | WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); |
1165 | | | 1165 | |
1166 | /* Wait for soft reset bit to self-clear */ | | 1166 | /* Wait for soft reset bit to self-clear */ |
1167 | for (retry = SOFT_RST_RETRY; retry > 0; retry--) { | | 1167 | for (retry = SOFT_RST_RETRY; retry > 0; retry--) { |
1168 | if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) | | 1168 | if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) |
1169 | break; | | 1169 | break; |
1170 | delay(10); | | 1170 | delay(10); |
1171 | } | | 1171 | } |
1172 | if (retry == 0) { | | 1172 | if (retry == 0) { |
1173 | aprint_error_dev(sc->dev, "soft reset timed out\n"); | | 1173 | aprint_error_dev(sc->dev, "soft reset timed out\n"); |
1174 | #ifdef SUNXI_EMAC_DEBUG | | 1174 | #ifdef SUNXI_EMAC_DEBUG |
1175 | sunxi_emac_dump_regs(sc); | | 1175 | sunxi_emac_dump_regs(sc); |
1176 | #endif | | 1176 | #endif |
1177 | return ETIMEDOUT; | | 1177 | return ETIMEDOUT; |
1178 | } | | 1178 | } |
1179 | | | 1179 | |
1180 | return 0; | | 1180 | return 0; |
1181 | } | | 1181 | } |
1182 | | | 1182 | |
1183 | static int | | 1183 | static int |
1184 | sunxi_emac_setup_dma(struct sunxi_emac_softc *sc) | | 1184 | sunxi_emac_setup_dma(struct sunxi_emac_softc *sc) |
1185 | { | | 1185 | { |
1186 | struct mbuf *m; | | 1186 | struct mbuf *m; |
1187 | int error, nsegs, i; | | 1187 | int error, nsegs, i; |
1188 | | | 1188 | |
1189 | /* Setup TX ring */ | | 1189 | /* Setup TX ring */ |
1190 | sc->tx.buf_tag = sc->tx.desc_tag = sc->dmat; | | 1190 | sc->tx.buf_tag = sc->tx.desc_tag = sc->dmat; |
1191 | error = bus_dmamap_create(sc->dmat, TX_DESC_SIZE, 1, TX_DESC_SIZE, 0, | | 1191 | error = bus_dmamap_create(sc->dmat, TX_DESC_SIZE, 1, TX_DESC_SIZE, 0, |
1192 | BUS_DMA_WAITOK, &sc->tx.desc_map); | | 1192 | BUS_DMA_WAITOK, &sc->tx.desc_map); |
1193 | if (error) | | 1193 | if (error) |
1194 | return error; | | 1194 | return error; |
1195 | error = bus_dmamem_alloc(sc->dmat, TX_DESC_SIZE, DESC_ALIGN, 0, | | 1195 | error = bus_dmamem_alloc(sc->dmat, TX_DESC_SIZE, DESC_ALIGN, 0, |
1196 | &sc->tx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); | | 1196 | &sc->tx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); |
1197 | if (error) | | 1197 | if (error) |
1198 | return error; | | 1198 | return error; |
1199 | error = bus_dmamem_map(sc->dmat, &sc->tx.desc_dmaseg, nsegs, | | 1199 | error = bus_dmamem_map(sc->dmat, &sc->tx.desc_dmaseg, nsegs, |
1200 | TX_DESC_SIZE, (void *)&sc->tx.desc_ring, | | 1200 | TX_DESC_SIZE, (void *)&sc->tx.desc_ring, |
1201 | BUS_DMA_WAITOK); | | 1201 | BUS_DMA_WAITOK); |
1202 | if (error) | | 1202 | if (error) |
1203 | return error; | | 1203 | return error; |
1204 | error = bus_dmamap_load(sc->dmat, sc->tx.desc_map, sc->tx.desc_ring, | | 1204 | error = bus_dmamap_load(sc->dmat, sc->tx.desc_map, sc->tx.desc_ring, |
1205 | TX_DESC_SIZE, NULL, BUS_DMA_WAITOK); | | 1205 | TX_DESC_SIZE, NULL, BUS_DMA_WAITOK); |
1206 | if (error) | | 1206 | if (error) |
1207 | return error; | | 1207 | return error; |
1208 | sc->tx.desc_ring_paddr = sc->tx.desc_map->dm_segs[0].ds_addr; | | 1208 | sc->tx.desc_ring_paddr = sc->tx.desc_map->dm_segs[0].ds_addr; |
1209 | | | 1209 | |
1210 | memset(sc->tx.desc_ring, 0, TX_DESC_SIZE); | | 1210 | memset(sc->tx.desc_ring, 0, TX_DESC_SIZE); |
1211 | bus_dmamap_sync(sc->dmat, sc->tx.desc_map, 0, TX_DESC_SIZE, | | 1211 | bus_dmamap_sync(sc->dmat, sc->tx.desc_map, 0, TX_DESC_SIZE, |
1212 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1212 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1213 | | | 1213 | |
1214 | for (i = 0; i < TX_DESC_COUNT; i++) | | 1214 | for (i = 0; i < TX_DESC_COUNT; i++) |
1215 | sc->tx.desc_ring[i].next = | | 1215 | sc->tx.desc_ring[i].next = |
1216 | htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); | | 1216 | htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); |
1217 | | | 1217 | |
1218 | sc->tx.queued = TX_DESC_COUNT; | | 1218 | sc->tx.queued = TX_DESC_COUNT; |
1219 | for (i = 0; i < TX_DESC_COUNT; i++) { | | 1219 | for (i = 0; i < TX_DESC_COUNT; i++) { |
1220 | error = bus_dmamap_create(sc->tx.buf_tag, MCLBYTES, | | 1220 | error = bus_dmamap_create(sc->tx.buf_tag, MCLBYTES, |
1221 | TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, | | 1221 | TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, |
1222 | &sc->tx.buf_map[i].map); | | 1222 | &sc->tx.buf_map[i].map); |
1223 | if (error != 0) { | | 1223 | if (error != 0) { |
1224 | device_printf(sc->dev, "cannot create TX buffer map\n"); | | 1224 | device_printf(sc->dev, "cannot create TX buffer map\n"); |
1225 | return error; | | 1225 | return error; |
1226 | } | | 1226 | } |
1227 | sunxi_emac_setup_txdesc(sc, i, 0, 0, 0); | | 1227 | sunxi_emac_setup_txdesc(sc, i, 0, 0, 0); |
1228 | } | | 1228 | } |
1229 | | | 1229 | |
1230 | /* Setup RX ring */ | | 1230 | /* Setup RX ring */ |
1231 | sc->rx.buf_tag = sc->rx.desc_tag = sc->dmat; | | 1231 | sc->rx.buf_tag = sc->rx.desc_tag = sc->dmat; |
1232 | error = bus_dmamap_create(sc->dmat, RX_DESC_SIZE, 1, RX_DESC_SIZE, 0, | | 1232 | error = bus_dmamap_create(sc->dmat, RX_DESC_SIZE, 1, RX_DESC_SIZE, 0, |
1233 | BUS_DMA_WAITOK, &sc->rx.desc_map); | | 1233 | BUS_DMA_WAITOK, &sc->rx.desc_map); |
1234 | if (error) | | 1234 | if (error) |
1235 | return error; | | 1235 | return error; |
1236 | error = bus_dmamem_alloc(sc->dmat, RX_DESC_SIZE, DESC_ALIGN, 0, | | 1236 | error = bus_dmamem_alloc(sc->dmat, RX_DESC_SIZE, DESC_ALIGN, 0, |
1237 | &sc->rx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); | | 1237 | &sc->rx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); |
1238 | if (error) | | 1238 | if (error) |
1239 | return error; | | 1239 | return error; |
1240 | error = bus_dmamem_map(sc->dmat, &sc->rx.desc_dmaseg, nsegs, | | 1240 | error = bus_dmamem_map(sc->dmat, &sc->rx.desc_dmaseg, nsegs, |
1241 | RX_DESC_SIZE, (void *)&sc->rx.desc_ring, | | 1241 | RX_DESC_SIZE, (void *)&sc->rx.desc_ring, |
1242 | BUS_DMA_WAITOK); | | 1242 | BUS_DMA_WAITOK); |
1243 | if (error) | | 1243 | if (error) |
1244 | return error; | | 1244 | return error; |
1245 | error = bus_dmamap_load(sc->dmat, sc->rx.desc_map, sc->rx.desc_ring, | | 1245 | error = bus_dmamap_load(sc->dmat, sc->rx.desc_map, sc->rx.desc_ring, |
1246 | RX_DESC_SIZE, NULL, BUS_DMA_WAITOK); | | 1246 | RX_DESC_SIZE, NULL, BUS_DMA_WAITOK); |
1247 | if (error) | | 1247 | if (error) |
1248 | return error; | | 1248 | return error; |
1249 | sc->rx.desc_ring_paddr = sc->rx.desc_map->dm_segs[0].ds_addr; | | 1249 | sc->rx.desc_ring_paddr = sc->rx.desc_map->dm_segs[0].ds_addr; |
1250 | | | 1250 | |
1251 | memset(sc->rx.desc_ring, 0, RX_DESC_SIZE); | | 1251 | memset(sc->rx.desc_ring, 0, RX_DESC_SIZE); |
1252 | | | 1252 | |
1253 | for (i = 0; i < RX_DESC_COUNT; i++) { | | 1253 | for (i = 0; i < RX_DESC_COUNT; i++) { |
1254 | error = bus_dmamap_create(sc->rx.buf_tag, MCLBYTES, | | 1254 | error = bus_dmamap_create(sc->rx.buf_tag, MCLBYTES, |
1255 | RX_DESC_COUNT, MCLBYTES, 0, BUS_DMA_WAITOK, | | 1255 | RX_DESC_COUNT, MCLBYTES, 0, BUS_DMA_WAITOK, |
1256 | &sc->rx.buf_map[i].map); | | 1256 | &sc->rx.buf_map[i].map); |
1257 | if (error != 0) { | | 1257 | if (error != 0) { |
1258 | device_printf(sc->dev, "cannot create RX buffer map\n"); | | 1258 | device_printf(sc->dev, "cannot create RX buffer map\n"); |
1259 | return error; | | 1259 | return error; |
1260 | } | | 1260 | } |
1261 | if ((m = sunxi_emac_alloc_mbufcl(sc)) == NULL) { | | 1261 | if ((m = sunxi_emac_alloc_mbufcl(sc)) == NULL) { |
1262 | device_printf(sc->dev, "cannot allocate RX mbuf\n"); | | 1262 | device_printf(sc->dev, "cannot allocate RX mbuf\n"); |
1263 | return ENOMEM; | | 1263 | return ENOMEM; |
1264 | } | | 1264 | } |
1265 | error = sunxi_emac_setup_rxbuf(sc, i, m); | | 1265 | error = sunxi_emac_setup_rxbuf(sc, i, m); |
1266 | if (error != 0) { | | 1266 | if (error != 0) { |
1267 | device_printf(sc->dev, "cannot create RX buffer\n"); | | 1267 | device_printf(sc->dev, "cannot create RX buffer\n"); |
1268 | return error; | | 1268 | return error; |
1269 | } | | 1269 | } |
1270 | } | | 1270 | } |
1271 | bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, | | 1271 | bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, |
1272 | 0, sc->rx.desc_map->dm_mapsize, | | 1272 | 0, sc->rx.desc_map->dm_mapsize, |
1273 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1273 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1274 | | | 1274 | |
1275 | /* Write transmit and receive descriptor base address registers */ | | 1275 | /* Write transmit and receive descriptor base address registers */ |
1276 | WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); | | 1276 | WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); |
1277 | WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); | | 1277 | WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); |
1278 | | | 1278 | |
1279 | return 0; | | 1279 | return 0; |
1280 | } | | 1280 | } |
1281 | | | 1281 | |
1282 | static int | | 1282 | static int |
1283 | sunxi_emac_get_resources(struct sunxi_emac_softc *sc) | | 1283 | sunxi_emac_get_resources(struct sunxi_emac_softc *sc) |
1284 | { | | 1284 | { |
1285 | const int phandle = sc->phandle; | | 1285 | const int phandle = sc->phandle; |
1286 | bus_addr_t addr, size; | | 1286 | bus_addr_t addr, size; |
1287 | | | 1287 | |
1288 | /* Map EMAC registers */ | | 1288 | /* Map EMAC registers */ |
1289 | if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) | | 1289 | if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) |
1290 | return ENXIO; | | 1290 | return ENXIO; |
1291 | if (bus_space_map(sc->bst, addr, size, 0, &sc->bsh[_RES_EMAC]) != 0) | | 1291 | if (bus_space_map(sc->bst, addr, size, 0, &sc->bsh[_RES_EMAC]) != 0) |
1292 | return ENXIO; | | 1292 | return ENXIO; |
1293 | | | 1293 | |
1294 | /* Map SYSCON registers */ | | 1294 | /* Map SYSCON registers */ |
1295 | if (of_hasprop(phandle, "syscon")) { | | 1295 | if (of_hasprop(phandle, "syscon")) { |
1296 | const int syscon_phandle = fdtbus_get_phandle(phandle, | | 1296 | const int syscon_phandle = fdtbus_get_phandle(phandle, |
1297 | "syscon"); | | 1297 | "syscon"); |
1298 | if (syscon_phandle == -1) | | 1298 | if (syscon_phandle == -1) |
1299 | return ENXIO; | | 1299 | return ENXIO; |
1300 | if (fdtbus_get_reg(syscon_phandle, 0, &addr, &size) != 0) | | 1300 | if (fdtbus_get_reg(syscon_phandle, 0, &addr, &size) != 0) |
1301 | return ENXIO; | | 1301 | return ENXIO; |
1302 | if (size < EMAC_CLK_REG + 4) | | 1302 | if (size < EMAC_CLK_REG + 4) |
1303 | return ENXIO; | | 1303 | return ENXIO; |
1304 | addr += EMAC_CLK_REG; | | 1304 | addr += EMAC_CLK_REG; |
1305 | size -= EMAC_CLK_REG; | | 1305 | size -= EMAC_CLK_REG; |
1306 | } else { | | 1306 | } else { |
1307 | if (fdtbus_get_reg(phandle, 1, &addr, &size) != 0) | | 1307 | if (fdtbus_get_reg(phandle, 1, &addr, &size) != 0) |
1308 | return ENXIO; | | 1308 | return ENXIO; |
1309 | } | | 1309 | } |
1310 | if (bus_space_map(sc->bst, addr, size, 0, &sc->bsh[_RES_SYSCON]) != 0) | | 1310 | if (bus_space_map(sc->bst, addr, size, 0, &sc->bsh[_RES_SYSCON]) != 0) |
1311 | return ENXIO; | | 1311 | return ENXIO; |
1312 | | | 1312 | |
1313 | /* Get clocks and resets. "ahb" is required, "ephy" is optional. */ | | 1313 | /* The "ahb"/"stmmaceth" clock and reset is required */ |
1314 | | | 1314 | if ((sc->clk_ahb = fdtbus_clock_get(phandle, "ahb")) == NULL && |
1315 | if ((sc->clk_ahb = fdtbus_clock_get(phandle, "ahb")) == NULL) | | 1315 | (sc->clk_ahb = fdtbus_clock_get(phandle, "stmmaceth")) == NULL) |
1316 | return ENXIO; | | 1316 | return ENXIO; |
1317 | sc->clk_ephy = fdtbus_clock_get(phandle, "ephy"); | | 1317 | if ((sc->rst_ahb = fdtbus_reset_get(phandle, "ahb")) == NULL && |
1318 | | | 1318 | (sc->rst_ahb = fdtbus_reset_get(phandle, "stmmaceth")) == NULL) |
1319 | if ((sc->rst_ahb = fdtbus_reset_get(phandle, "ahb")) == NULL) | | | |
1320 | return ENXIO; | | 1319 | return ENXIO; |
| | | 1320 | |
| | | 1321 | /* Internal PHY clock and reset are optional properties. */ |
| | | 1322 | sc->clk_ephy = fdtbus_clock_get(phandle, "ephy"); |
| | | 1323 | if (sc->clk_ephy == NULL) { |
| | | 1324 | int phy_phandle = fdtbus_get_phandle(phandle, "phy-handle"); |
| | | 1325 | if (phy_phandle != -1) |
| | | 1326 | sc->clk_ephy = fdtbus_clock_get_index(phy_phandle, 0); |
| | | 1327 | } |
1321 | sc->rst_ephy = fdtbus_reset_get(phandle, "ephy"); | | 1328 | sc->rst_ephy = fdtbus_reset_get(phandle, "ephy"); |
| | | 1329 | if (sc->rst_ephy == NULL) { |
| | | 1330 | int phy_phandle = fdtbus_get_phandle(phandle, "phy-phandle"); |
| | | 1331 | if (phy_phandle != -1) |
| | | 1332 | sc->rst_ephy = fdtbus_reset_get_index(phy_phandle, 0); |
| | | 1333 | } |
1322 | | | 1334 | |
1323 | /* Regulator is optional */ | | 1335 | /* Regulator is optional */ |
1324 | sc->reg_phy = fdtbus_regulator_acquire(phandle, "phy-supply"); | | 1336 | sc->reg_phy = fdtbus_regulator_acquire(phandle, "phy-supply"); |
1325 | | | 1337 | |
1326 | /* Reset GPIO is optional */ | | 1338 | /* Reset GPIO is optional */ |
1327 | sc->pin_reset = fdtbus_gpio_acquire(sc->phandle, | | 1339 | sc->pin_reset = fdtbus_gpio_acquire(sc->phandle, |
1328 | "allwinner,reset-gpio", GPIO_PIN_OUTPUT); | | 1340 | "allwinner,reset-gpio", GPIO_PIN_OUTPUT); |
1329 | | | 1341 | |
1330 | return 0; | | 1342 | return 0; |
1331 | } | | 1343 | } |
1332 | | | 1344 | |
1333 | static int | | 1345 | static int |
1334 | sunxi_emac_get_phyid(struct sunxi_emac_softc *sc) | | 1346 | sunxi_emac_get_phyid(struct sunxi_emac_softc *sc) |
1335 | { | | 1347 | { |
1336 | bus_addr_t addr; | | 1348 | bus_addr_t addr; |
| | | 1349 | int phy_phandle; |
1337 | | | 1350 | |
1338 | const int phy_phandle = fdtbus_get_phandle(sc->phandle, "phy"); | | 1351 | phy_phandle = fdtbus_get_phandle(sc->phandle, "phy"); |
| | | 1352 | if (phy_phandle == -1) |
| | | 1353 | phy_phandle = fdtbus_get_phandle(sc->phandle, "phy-handle"); |
1339 | if (phy_phandle == -1) | | 1354 | if (phy_phandle == -1) |
1340 | return MII_PHY_ANY; | | 1355 | return MII_PHY_ANY; |
1341 | | | 1356 | |
1342 | if (fdtbus_get_reg(phy_phandle, 0, &addr, NULL) != 0) | | 1357 | if (fdtbus_get_reg(phy_phandle, 0, &addr, NULL) != 0) |
1343 | return MII_PHY_ANY; | | 1358 | return MII_PHY_ANY; |
1344 | | | 1359 | |
1345 | return (int)addr; | | 1360 | return (int)addr; |
1346 | } | | 1361 | } |
1347 | | | 1362 | |
1348 | static int | | 1363 | static int |
1349 | sunxi_emac_match(device_t parent, cfdata_t cf, void *aux) | | 1364 | sunxi_emac_match(device_t parent, cfdata_t cf, void *aux) |
1350 | { | | 1365 | { |
1351 | struct fdt_attach_args * const faa = aux; | | 1366 | struct fdt_attach_args * const faa = aux; |
1352 | | | 1367 | |
1353 | return of_match_compat_data(faa->faa_phandle, compat_data); | | 1368 | return of_match_compat_data(faa->faa_phandle, compat_data); |
1354 | } | | 1369 | } |
1355 | | | 1370 | |
1356 | static void | | 1371 | static void |
1357 | sunxi_emac_attach(device_t parent, device_t self, void *aux) | | 1372 | sunxi_emac_attach(device_t parent, device_t self, void *aux) |
1358 | { | | 1373 | { |
1359 | struct fdt_attach_args * const faa = aux; | | 1374 | struct fdt_attach_args * const faa = aux; |
1360 | struct sunxi_emac_softc * const sc = device_private(self); | | 1375 | struct sunxi_emac_softc * const sc = device_private(self); |
1361 | const int phandle = faa->faa_phandle; | | 1376 | const int phandle = faa->faa_phandle; |
1362 | struct mii_data *mii = &sc->mii; | | 1377 | struct mii_data *mii = &sc->mii; |
1363 | struct ifnet *ifp = &sc->ec.ec_if; | | 1378 | struct ifnet *ifp = &sc->ec.ec_if; |
1364 | uint8_t eaddr[ETHER_ADDR_LEN]; | | 1379 | uint8_t eaddr[ETHER_ADDR_LEN]; |
1365 | char intrstr[128]; | | 1380 | char intrstr[128]; |
1366 | | | 1381 | |
1367 | sc->dev = self; | | 1382 | sc->dev = self; |
1368 | sc->phandle = phandle; | | 1383 | sc->phandle = phandle; |
1369 | sc->bst = faa->faa_bst; | | 1384 | sc->bst = faa->faa_bst; |
1370 | sc->dmat = faa->faa_dmat; | | 1385 | sc->dmat = faa->faa_dmat; |
1371 | sc->type = of_search_compatible(phandle, compat_data)->data; | | 1386 | sc->type = of_search_compatible(phandle, compat_data)->data; |
1372 | sc->phy_id = sunxi_emac_get_phyid(sc); | | 1387 | sc->phy_id = sunxi_emac_get_phyid(sc); |
1373 | | | 1388 | |
1374 | if (sunxi_emac_get_resources(sc) != 0) { | | 1389 | if (sunxi_emac_get_resources(sc) != 0) { |
1375 | aprint_error(": cannot allocate resources for device\n"); | | 1390 | aprint_error(": cannot allocate resources for device\n"); |
1376 | return; | | 1391 | return; |
1377 | } | | 1392 | } |
1378 | if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) { | | 1393 | if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) { |
1379 | aprint_error(": cannot decode interrupt\n"); | | 1394 | aprint_error(": cannot decode interrupt\n"); |
1380 | return; | | 1395 | return; |
1381 | } | | 1396 | } |
1382 | | | 1397 | |
1383 | mutex_init(&sc->mtx, MUTEX_DEFAULT, IPL_NET); | | 1398 | mutex_init(&sc->mtx, MUTEX_DEFAULT, IPL_NET); |
1384 | callout_init(&sc->stat_ch, CALLOUT_FLAGS); | | 1399 | callout_init(&sc->stat_ch, CALLOUT_FLAGS); |
1385 | callout_setfunc(&sc->stat_ch, sunxi_emac_tick, sc); | | 1400 | callout_setfunc(&sc->stat_ch, sunxi_emac_tick, sc); |
1386 | | | 1401 | |
1387 | aprint_naive("\n"); | | 1402 | aprint_naive("\n"); |
1388 | aprint_normal(": EMAC\n"); | | 1403 | aprint_normal(": EMAC\n"); |
1389 | | | 1404 | |
1390 | /* Setup clocks and regulators */ | | 1405 | /* Setup clocks and regulators */ |
1391 | if (sunxi_emac_setup_resources(sc) != 0) | | 1406 | if (sunxi_emac_setup_resources(sc) != 0) |
1392 | return; | | 1407 | return; |
1393 | | | 1408 | |
1394 | /* Read MAC address before resetting the chip */ | | 1409 | /* Read MAC address before resetting the chip */ |
1395 | sunxi_emac_get_eaddr(sc, eaddr); | | 1410 | sunxi_emac_get_eaddr(sc, eaddr); |
1396 | | | 1411 | |
1397 | /* Soft reset EMAC core */ | | 1412 | /* Soft reset EMAC core */ |
1398 | if (sunxi_emac_reset(sc) != 0) | | 1413 | if (sunxi_emac_reset(sc) != 0) |
1399 | return; | | 1414 | return; |
1400 | | | 1415 | |
1401 | /* Setup DMA descriptors */ | | 1416 | /* Setup DMA descriptors */ |
1402 | if (sunxi_emac_setup_dma(sc) != 0) { | | 1417 | if (sunxi_emac_setup_dma(sc) != 0) { |
1403 | aprint_error_dev(self, "failed to setup DMA descriptors\n"); | | 1418 | aprint_error_dev(self, "failed to setup DMA descriptors\n"); |
1404 | return; | | 1419 | return; |
1405 | } | | 1420 | } |
1406 | | | 1421 | |
1407 | /* Install interrupt handler */ | | 1422 | /* Install interrupt handler */ |
1408 | sc->ih = fdtbus_intr_establish(phandle, 0, IPL_NET, | | 1423 | sc->ih = fdtbus_intr_establish(phandle, 0, IPL_NET, |
1409 | FDT_INTR_FLAGS, sunxi_emac_intr, sc); | | 1424 | FDT_INTR_FLAGS, sunxi_emac_intr, sc); |
1410 | if (sc->ih == NULL) { | | 1425 | if (sc->ih == NULL) { |
1411 | aprint_error_dev(self, "failed to establish interrupt on %s\n", | | 1426 | aprint_error_dev(self, "failed to establish interrupt on %s\n", |
1412 | intrstr); | | 1427 | intrstr); |
1413 | return; | | 1428 | return; |
1414 | } | | 1429 | } |
1415 | aprint_normal_dev(self, "interrupting on %s\n", intrstr); | | 1430 | aprint_normal_dev(self, "interrupting on %s\n", intrstr); |
1416 | | | 1431 | |
1417 | /* Setup ethernet interface */ | | 1432 | /* Setup ethernet interface */ |
1418 | ifp->if_softc = sc; | | 1433 | ifp->if_softc = sc; |
1419 | snprintf(ifp->if_xname, IFNAMSIZ, EMAC_IFNAME, device_unit(self)); | | 1434 | snprintf(ifp->if_xname, IFNAMSIZ, EMAC_IFNAME, device_unit(self)); |
1420 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 1435 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
1421 | #ifdef EMAC_MPSAFE | | 1436 | #ifdef EMAC_MPSAFE |
1422 | ifp->if_extflags = IFEF_MPSAFE; | | 1437 | ifp->if_extflags = IFEF_MPSAFE; |
1423 | #endif | | 1438 | #endif |
1424 | ifp->if_start = sunxi_emac_start; | | 1439 | ifp->if_start = sunxi_emac_start; |
1425 | ifp->if_ioctl = sunxi_emac_ioctl; | | 1440 | ifp->if_ioctl = sunxi_emac_ioctl; |
1426 | ifp->if_init = sunxi_emac_init; | | 1441 | ifp->if_init = sunxi_emac_init; |
1427 | ifp->if_stop = sunxi_emac_stop; | | 1442 | ifp->if_stop = sunxi_emac_stop; |
1428 | ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx | | | 1443 | ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx | |
1429 | IFCAP_CSUM_IPv4_Tx | | | 1444 | IFCAP_CSUM_IPv4_Tx | |
1430 | IFCAP_CSUM_TCPv4_Rx | | | 1445 | IFCAP_CSUM_TCPv4_Rx | |
1431 | IFCAP_CSUM_TCPv4_Tx | | | 1446 | IFCAP_CSUM_TCPv4_Tx | |
1432 | IFCAP_CSUM_UDPv4_Rx | | | 1447 | IFCAP_CSUM_UDPv4_Rx | |
1433 | IFCAP_CSUM_UDPv4_Tx; | | 1448 | IFCAP_CSUM_UDPv4_Tx; |
1434 | IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); | | 1449 | IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); |
1435 | IFQ_SET_READY(&ifp->if_snd); | | 1450 | IFQ_SET_READY(&ifp->if_snd); |
1436 | | | 1451 | |
1437 | /* 802.1Q VLAN-sized frames are supported */ | | 1452 | /* 802.1Q VLAN-sized frames are supported */ |
1438 | sc->ec.ec_capabilities |= ETHERCAP_VLAN_MTU; | | 1453 | sc->ec.ec_capabilities |= ETHERCAP_VLAN_MTU; |
1439 | | | 1454 | |
1440 | /* Attach MII driver */ | | 1455 | /* Attach MII driver */ |
1441 | sc->ec.ec_mii = mii; | | 1456 | sc->ec.ec_mii = mii; |
1442 | ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); | | 1457 | ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); |
1443 | mii->mii_ifp = ifp; | | 1458 | mii->mii_ifp = ifp; |
1444 | mii->mii_readreg = sunxi_emac_mii_readreg; | | 1459 | mii->mii_readreg = sunxi_emac_mii_readreg; |
1445 | mii->mii_writereg = sunxi_emac_mii_writereg; | | 1460 | mii->mii_writereg = sunxi_emac_mii_writereg; |
1446 | mii->mii_statchg = sunxi_emac_mii_statchg; | | 1461 | mii->mii_statchg = sunxi_emac_mii_statchg; |
1447 | mii_attach(self, mii, 0xffffffff, sc->phy_id, MII_OFFSET_ANY, | | 1462 | mii_attach(self, mii, 0xffffffff, sc->phy_id, MII_OFFSET_ANY, |
1448 | MIIF_DOPAUSE); | | 1463 | MIIF_DOPAUSE); |
1449 | | | 1464 | |
1450 | if (LIST_EMPTY(&mii->mii_phys)) { | | 1465 | if (LIST_EMPTY(&mii->mii_phys)) { |
1451 | aprint_error_dev(self, "no PHY found!\n"); | | 1466 | aprint_error_dev(self, "no PHY found!\n"); |
1452 | return; | | 1467 | return; |
1453 | } | | 1468 | } |
1454 | ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO); | | 1469 | ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO); |
1455 | | | 1470 | |
1456 | /* Attach interface */ | | 1471 | /* Attach interface */ |
1457 | if_attach(ifp); | | 1472 | if_attach(ifp); |
1458 | if_deferred_start_init(ifp, NULL); | | 1473 | if_deferred_start_init(ifp, NULL); |
1459 | | | 1474 | |
1460 | /* Attach ethernet interface */ | | 1475 | /* Attach ethernet interface */ |
1461 | ether_ifattach(ifp, eaddr); | | 1476 | ether_ifattach(ifp, eaddr); |
1462 | } | | 1477 | } |
1463 | | | 1478 | |
1464 | CFATTACH_DECL_NEW(sunxi_emac, sizeof(struct sunxi_emac_softc), | | 1479 | CFATTACH_DECL_NEW(sunxi_emac, sizeof(struct sunxi_emac_softc), |
1465 | sunxi_emac_match, sunxi_emac_attach, NULL, NULL); | | 1480 | sunxi_emac_match, sunxi_emac_attach, NULL, NULL); |