| @@ -1,1259 +1,1262 @@ | | | @@ -1,1259 +1,1262 @@ |
1 | /* $NetBSD: if_age.c,v 1.31 2009/08/05 14:41:12 cegger Exp $ */ | | 1 | /* $NetBSD: if_age.c,v 1.32 2009/08/05 15:29:51 cegger Exp $ */ |
2 | /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */ | | 2 | /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */ |
3 | | | 3 | |
4 | /*- | | 4 | /*- |
5 | * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> | | 5 | * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | | 8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions | | 9 | * modification, are permitted provided that the following conditions |
10 | * are met: | | 10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright | | 11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice unmodified, this list of conditions, and the following | | 12 | * notice unmodified, this list of conditions, and the following |
13 | * disclaimer. | | 13 | * disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * | | 17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | | 18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 19 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | | 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
22 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 22 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 23 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 24 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 25 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 26 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 27 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 | * SUCH DAMAGE. | | 28 | * SUCH DAMAGE. |
29 | */ | | 29 | */ |
30 | | | 30 | |
31 | /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ | | 31 | /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ |
32 | | | 32 | |
33 | #include <sys/cdefs.h> | | 33 | #include <sys/cdefs.h> |
34 | __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.31 2009/08/05 14:41:12 cegger Exp $"); | | 34 | __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.32 2009/08/05 15:29:51 cegger Exp $"); |
35 | | | 35 | |
36 | #include "bpfilter.h" | | 36 | #include "bpfilter.h" |
37 | #include "vlan.h" | | 37 | #include "vlan.h" |
38 | | | 38 | |
39 | #include <sys/param.h> | | 39 | #include <sys/param.h> |
40 | #include <sys/proc.h> | | 40 | #include <sys/proc.h> |
41 | #include <sys/endian.h> | | 41 | #include <sys/endian.h> |
42 | #include <sys/systm.h> | | 42 | #include <sys/systm.h> |
43 | #include <sys/types.h> | | 43 | #include <sys/types.h> |
44 | #include <sys/sockio.h> | | 44 | #include <sys/sockio.h> |
45 | #include <sys/mbuf.h> | | 45 | #include <sys/mbuf.h> |
46 | #include <sys/queue.h> | | 46 | #include <sys/queue.h> |
47 | #include <sys/kernel.h> | | 47 | #include <sys/kernel.h> |
48 | #include <sys/device.h> | | 48 | #include <sys/device.h> |
49 | #include <sys/callout.h> | | 49 | #include <sys/callout.h> |
50 | #include <sys/socket.h> | | 50 | #include <sys/socket.h> |
51 | | | 51 | |
52 | #include <net/if.h> | | 52 | #include <net/if.h> |
53 | #include <net/if_dl.h> | | 53 | #include <net/if_dl.h> |
54 | #include <net/if_media.h> | | 54 | #include <net/if_media.h> |
55 | #include <net/if_ether.h> | | 55 | #include <net/if_ether.h> |
56 | | | 56 | |
57 | #ifdef INET | | 57 | #ifdef INET |
58 | #include <netinet/in.h> | | 58 | #include <netinet/in.h> |
59 | #include <netinet/in_systm.h> | | 59 | #include <netinet/in_systm.h> |
60 | #include <netinet/in_var.h> | | 60 | #include <netinet/in_var.h> |
61 | #include <netinet/ip.h> | | 61 | #include <netinet/ip.h> |
62 | #endif | | 62 | #endif |
63 | | | 63 | |
64 | #include <net/if_types.h> | | 64 | #include <net/if_types.h> |
65 | #include <net/if_vlanvar.h> | | 65 | #include <net/if_vlanvar.h> |
66 | | | 66 | |
67 | #if NBPFILTER > 0 | | 67 | #if NBPFILTER > 0 |
68 | #include <net/bpf.h> | | 68 | #include <net/bpf.h> |
69 | #endif | | 69 | #endif |
70 | | | 70 | |
71 | #include <sys/rnd.h> | | 71 | #include <sys/rnd.h> |
72 | | | 72 | |
73 | #include <dev/mii/mii.h> | | 73 | #include <dev/mii/mii.h> |
74 | #include <dev/mii/miivar.h> | | 74 | #include <dev/mii/miivar.h> |
75 | | | 75 | |
76 | #include <dev/pci/pcireg.h> | | 76 | #include <dev/pci/pcireg.h> |
77 | #include <dev/pci/pcivar.h> | | 77 | #include <dev/pci/pcivar.h> |
78 | #include <dev/pci/pcidevs.h> | | 78 | #include <dev/pci/pcidevs.h> |
79 | | | 79 | |
80 | #include <dev/pci/if_agereg.h> | | 80 | #include <dev/pci/if_agereg.h> |
81 | | | 81 | |
82 | static int age_match(device_t, cfdata_t, void *); | | 82 | static int age_match(device_t, cfdata_t, void *); |
83 | static void age_attach(device_t, device_t, void *); | | 83 | static void age_attach(device_t, device_t, void *); |
84 | static int age_detach(device_t, int); | | 84 | static int age_detach(device_t, int); |
85 | | | 85 | |
86 | static bool age_resume(device_t PMF_FN_PROTO); | | 86 | static bool age_resume(device_t PMF_FN_PROTO); |
87 | | | 87 | |
88 | static int age_miibus_readreg(device_t, int, int); | | 88 | static int age_miibus_readreg(device_t, int, int); |
89 | static void age_miibus_writereg(device_t, int, int, int); | | 89 | static void age_miibus_writereg(device_t, int, int, int); |
90 | static void age_miibus_statchg(device_t); | | 90 | static void age_miibus_statchg(device_t); |
91 | | | 91 | |
92 | static int age_init(struct ifnet *); | | 92 | static int age_init(struct ifnet *); |
93 | static int age_ioctl(struct ifnet *, u_long, void *); | | 93 | static int age_ioctl(struct ifnet *, u_long, void *); |
94 | static void age_start(struct ifnet *); | | 94 | static void age_start(struct ifnet *); |
95 | static void age_watchdog(struct ifnet *); | | 95 | static void age_watchdog(struct ifnet *); |
96 | static void age_mediastatus(struct ifnet *, struct ifmediareq *); | | 96 | static void age_mediastatus(struct ifnet *, struct ifmediareq *); |
97 | static int age_mediachange(struct ifnet *); | | 97 | static int age_mediachange(struct ifnet *); |
98 | | | 98 | |
99 | static int age_intr(void *); | | 99 | static int age_intr(void *); |
100 | static int age_dma_alloc(struct age_softc *); | | 100 | static int age_dma_alloc(struct age_softc *); |
101 | static void age_dma_free(struct age_softc *); | | 101 | static void age_dma_free(struct age_softc *); |
102 | static void age_get_macaddr(struct age_softc *, uint8_t[]); | | 102 | static void age_get_macaddr(struct age_softc *, uint8_t[]); |
103 | static void age_phy_reset(struct age_softc *); | | 103 | static void age_phy_reset(struct age_softc *); |
104 | | | 104 | |
105 | static int age_encap(struct age_softc *, struct mbuf **); | | 105 | static int age_encap(struct age_softc *, struct mbuf **); |
106 | static void age_init_tx_ring(struct age_softc *); | | 106 | static void age_init_tx_ring(struct age_softc *); |
107 | static int age_init_rx_ring(struct age_softc *); | | 107 | static int age_init_rx_ring(struct age_softc *); |
108 | static void age_init_rr_ring(struct age_softc *); | | 108 | static void age_init_rr_ring(struct age_softc *); |
109 | static void age_init_cmb_block(struct age_softc *); | | 109 | static void age_init_cmb_block(struct age_softc *); |
110 | static void age_init_smb_block(struct age_softc *); | | 110 | static void age_init_smb_block(struct age_softc *); |
111 | static int age_newbuf(struct age_softc *, struct age_rxdesc *, int); | | 111 | static int age_newbuf(struct age_softc *, struct age_rxdesc *, int); |
112 | static void age_mac_config(struct age_softc *); | | 112 | static void age_mac_config(struct age_softc *); |
113 | static void age_txintr(struct age_softc *, int); | | 113 | static void age_txintr(struct age_softc *, int); |
114 | static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); | | 114 | static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); |
115 | static void age_rxintr(struct age_softc *, int); | | 115 | static void age_rxintr(struct age_softc *, int); |
116 | static void age_tick(void *); | | 116 | static void age_tick(void *); |
117 | static void age_reset(struct age_softc *); | | 117 | static void age_reset(struct age_softc *); |
118 | static void age_stop(struct ifnet *, int); | | 118 | static void age_stop(struct ifnet *, int); |
119 | static void age_stats_update(struct age_softc *); | | 119 | static void age_stats_update(struct age_softc *); |
120 | static void age_stop_txmac(struct age_softc *); | | 120 | static void age_stop_txmac(struct age_softc *); |
121 | static void age_stop_rxmac(struct age_softc *); | | 121 | static void age_stop_rxmac(struct age_softc *); |
122 | static void age_rxvlan(struct age_softc *sc); | | 122 | static void age_rxvlan(struct age_softc *sc); |
123 | static void age_rxfilter(struct age_softc *); | | 123 | static void age_rxfilter(struct age_softc *); |
124 | | | 124 | |
125 | CFATTACH_DECL_NEW(age, sizeof(struct age_softc), | | 125 | CFATTACH_DECL_NEW(age, sizeof(struct age_softc), |
126 | age_match, age_attach, age_detach, NULL); | | 126 | age_match, age_attach, age_detach, NULL); |
127 | | | 127 | |
128 | int agedebug = 0; | | 128 | int agedebug = 0; |
129 | #define DPRINTF(x) do { if (agedebug) printf x; } while (0) | | 129 | #define DPRINTF(x) do { if (agedebug) printf x; } while (0) |
130 | | | 130 | |
131 | #define ETHER_ALIGN 2 | | 131 | #define ETHER_ALIGN 2 |
132 | #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) | | 132 | #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) |
133 | | | 133 | |
134 | static int | | 134 | static int |
135 | age_match(device_t dev, cfdata_t match, void *aux) | | 135 | age_match(device_t dev, cfdata_t match, void *aux) |
136 | { | | 136 | { |
137 | struct pci_attach_args *pa = aux; | | 137 | struct pci_attach_args *pa = aux; |
138 | | | 138 | |
139 | return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && | | 139 | return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && |
140 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA); | | 140 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA); |
141 | } | | 141 | } |
142 | | | 142 | |
143 | static void | | 143 | static void |
144 | age_attach(device_t parent, device_t self, void *aux) | | 144 | age_attach(device_t parent, device_t self, void *aux) |
145 | { | | 145 | { |
146 | struct age_softc *sc = device_private(self); | | 146 | struct age_softc *sc = device_private(self); |
147 | struct pci_attach_args *pa = aux; | | 147 | struct pci_attach_args *pa = aux; |
148 | pci_intr_handle_t ih; | | 148 | pci_intr_handle_t ih; |
149 | const char *intrstr; | | 149 | const char *intrstr; |
150 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 150 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
151 | pcireg_t memtype; | | 151 | pcireg_t memtype; |
152 | int error = 0; | | 152 | int error = 0; |
153 | | | 153 | |
154 | aprint_naive("\n"); | | 154 | aprint_naive("\n"); |
155 | aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n"); | | 155 | aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n"); |
156 | | | 156 | |
157 | sc->sc_dev = self; | | 157 | sc->sc_dev = self; |
158 | sc->sc_dmat = pa->pa_dmat; | | 158 | sc->sc_dmat = pa->pa_dmat; |
159 | sc->sc_pct = pa->pa_pc; | | 159 | sc->sc_pct = pa->pa_pc; |
160 | sc->sc_pcitag = pa->pa_tag; | | 160 | sc->sc_pcitag = pa->pa_tag; |
161 | | | 161 | |
162 | /* | | 162 | /* |
163 | * Allocate IO memory | | 163 | * Allocate IO memory |
164 | */ | | 164 | */ |
165 | memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR); | | 165 | memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR); |
166 | switch (memtype) { | | 166 | switch (memtype) { |
167 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: | | 167 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
168 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: | | 168 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: |
169 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: | | 169 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
170 | break; | | 170 | break; |
171 | default: | | 171 | default: |
172 | aprint_error_dev(self, "invalid base address register\n"); | | 172 | aprint_error_dev(self, "invalid base address register\n"); |
173 | break; | | 173 | break; |
174 | } | | 174 | } |
175 | | | 175 | |
176 | if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, | | 176 | if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, |
177 | &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) { | | 177 | &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) { |
178 | aprint_error_dev(self, "could not map mem space\n"); | | 178 | aprint_error_dev(self, "could not map mem space\n"); |
179 | return; | | 179 | return; |
180 | } | | 180 | } |
181 | | | 181 | |
182 | if (pci_intr_map(pa, &ih) != 0) { | | 182 | if (pci_intr_map(pa, &ih) != 0) { |
183 | aprint_error_dev(self, "could not map interrupt\n"); | | 183 | aprint_error_dev(self, "could not map interrupt\n"); |
184 | goto fail; | | 184 | goto fail; |
185 | } | | 185 | } |
186 | | | 186 | |
187 | /* | | 187 | /* |
188 | * Allocate IRQ | | 188 | * Allocate IRQ |
189 | */ | | 189 | */ |
190 | intrstr = pci_intr_string(sc->sc_pct, ih); | | 190 | intrstr = pci_intr_string(sc->sc_pct, ih); |
191 | sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET, | | 191 | sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET, |
192 | age_intr, sc); | | 192 | age_intr, sc); |
193 | if (sc->sc_irq_handle == NULL) { | | 193 | if (sc->sc_irq_handle == NULL) { |
194 | aprint_error_dev(self, "could not establish interrupt"); | | 194 | aprint_error_dev(self, "could not establish interrupt"); |
195 | if (intrstr != NULL) | | 195 | if (intrstr != NULL) |
196 | aprint_error(" at %s", intrstr); | | 196 | aprint_error(" at %s", intrstr); |
197 | aprint_error("\n"); | | 197 | aprint_error("\n"); |
198 | goto fail; | | 198 | goto fail; |
199 | } | | 199 | } |
200 | aprint_normal_dev(self, "%s\n", intrstr); | | 200 | aprint_normal_dev(self, "%s\n", intrstr); |
201 | | | 201 | |
202 | /* Set PHY address. */ | | 202 | /* Set PHY address. */ |
203 | sc->age_phyaddr = AGE_PHY_ADDR; | | 203 | sc->age_phyaddr = AGE_PHY_ADDR; |
204 | | | 204 | |
205 | /* Reset PHY. */ | | 205 | /* Reset PHY. */ |
206 | age_phy_reset(sc); | | 206 | age_phy_reset(sc); |
207 | | | 207 | |
208 | /* Reset the ethernet controller. */ | | 208 | /* Reset the ethernet controller. */ |
209 | age_reset(sc); | | 209 | age_reset(sc); |
210 | | | 210 | |
211 | /* Get PCI and chip id/revision. */ | | 211 | /* Get PCI and chip id/revision. */ |
212 | sc->age_rev = PCI_REVISION(pa->pa_class); | | 212 | sc->age_rev = PCI_REVISION(pa->pa_class); |
213 | sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> | | 213 | sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> |
214 | MASTER_CHIP_REV_SHIFT; | | 214 | MASTER_CHIP_REV_SHIFT; |
215 | | | 215 | |
216 | aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev); | | 216 | aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev); |
217 | aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev); | | 217 | aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev); |
218 | | | 218 | |
219 | if (agedebug) { | | 219 | if (agedebug) { |
220 | aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n", | | 220 | aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n", |
221 | CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), | | 221 | CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), |
222 | CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); | | 222 | CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); |
223 | } | | 223 | } |
224 | | | 224 | |
225 | /* Set max allowable DMA size. */ | | 225 | /* Set max allowable DMA size. */ |
226 | sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; | | 226 | sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; |
227 | sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; | | 227 | sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; |
228 | | | 228 | |
229 | /* Allocate DMA stuffs */ | | 229 | /* Allocate DMA stuffs */ |
230 | error = age_dma_alloc(sc); | | 230 | error = age_dma_alloc(sc); |
231 | if (error) | | 231 | if (error) |
232 | goto fail; | | 232 | goto fail; |
233 | | | 233 | |
234 | callout_init(&sc->sc_tick_ch, 0); | | 234 | callout_init(&sc->sc_tick_ch, 0); |
235 | callout_setfunc(&sc->sc_tick_ch, age_tick, sc); | | 235 | callout_setfunc(&sc->sc_tick_ch, age_tick, sc); |
236 | | | 236 | |
237 | /* Load station address. */ | | 237 | /* Load station address. */ |
238 | age_get_macaddr(sc, sc->sc_enaddr); | | 238 | age_get_macaddr(sc, sc->sc_enaddr); |
239 | | | 239 | |
240 | aprint_normal_dev(self, "Ethernet address %s\n", | | 240 | aprint_normal_dev(self, "Ethernet address %s\n", |
241 | ether_sprintf(sc->sc_enaddr)); | | 241 | ether_sprintf(sc->sc_enaddr)); |
242 | | | 242 | |
243 | ifp->if_softc = sc; | | 243 | ifp->if_softc = sc; |
244 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 244 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
245 | ifp->if_init = age_init; | | 245 | ifp->if_init = age_init; |
246 | ifp->if_ioctl = age_ioctl; | | 246 | ifp->if_ioctl = age_ioctl; |
247 | ifp->if_start = age_start; | | 247 | ifp->if_start = age_start; |
248 | ifp->if_stop = age_stop; | | 248 | ifp->if_stop = age_stop; |
249 | ifp->if_watchdog = age_watchdog; | | 249 | ifp->if_watchdog = age_watchdog; |
250 | ifp->if_baudrate = IF_Gbps(1); | | 250 | ifp->if_baudrate = IF_Gbps(1); |
251 | IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); | | 251 | IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); |
252 | IFQ_SET_READY(&ifp->if_snd); | | 252 | IFQ_SET_READY(&ifp->if_snd); |
253 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); | | 253 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
254 | | | 254 | |
255 | sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; | | 255 | sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; |
256 | | | 256 | |
| | | 257 | ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx | |
| | | 258 | IFCAP_CSUM_TCPv4_Rx | |
| | | 259 | IFCAP_CSUM_UDPv4_Rx; |
257 | #ifdef AGE_CHECKSUM | | 260 | #ifdef AGE_CHECKSUM |
258 | ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | | | 261 | ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | |
259 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | | | 262 | IFCAP_CSUM_TCPv4_Tx | |
260 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UCPv4_Rx; | | 263 | IFCAP_CSUM_UDPv4_Tx; |
261 | #endif | | 264 | #endif |
262 | | | 265 | |
263 | #if NVLAN > 0 | | 266 | #if NVLAN > 0 |
264 | sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; | | 267 | sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; |
265 | #endif | | 268 | #endif |
266 | | | 269 | |
267 | /* Set up MII bus. */ | | 270 | /* Set up MII bus. */ |
268 | sc->sc_miibus.mii_ifp = ifp; | | 271 | sc->sc_miibus.mii_ifp = ifp; |
269 | sc->sc_miibus.mii_readreg = age_miibus_readreg; | | 272 | sc->sc_miibus.mii_readreg = age_miibus_readreg; |
270 | sc->sc_miibus.mii_writereg = age_miibus_writereg; | | 273 | sc->sc_miibus.mii_writereg = age_miibus_writereg; |
271 | sc->sc_miibus.mii_statchg = age_miibus_statchg; | | 274 | sc->sc_miibus.mii_statchg = age_miibus_statchg; |
272 | | | 275 | |
273 | sc->sc_ec.ec_mii = &sc->sc_miibus; | | 276 | sc->sc_ec.ec_mii = &sc->sc_miibus; |
274 | ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, | | 277 | ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, |
275 | age_mediastatus); | | 278 | age_mediastatus); |
276 | mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, | | 279 | mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, |
277 | MII_OFFSET_ANY, MIIF_DOPAUSE); | | 280 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
278 | | | 281 | |
279 | if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { | | 282 | if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { |
280 | aprint_error_dev(self, "no PHY found!\n"); | | 283 | aprint_error_dev(self, "no PHY found!\n"); |
281 | ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, | | 284 | ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, |
282 | 0, NULL); | | 285 | 0, NULL); |
283 | ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); | | 286 | ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); |
284 | } else | | 287 | } else |
285 | ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); | | 288 | ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); |
286 | | | 289 | |
287 | if_attach(ifp); | | 290 | if_attach(ifp); |
288 | ether_ifattach(ifp, sc->sc_enaddr); | | 291 | ether_ifattach(ifp, sc->sc_enaddr); |
289 | | | 292 | |
290 | if (!pmf_device_register(self, NULL, age_resume)) | | 293 | if (!pmf_device_register(self, NULL, age_resume)) |
291 | aprint_error_dev(self, "couldn't establish power handler\n"); | | 294 | aprint_error_dev(self, "couldn't establish power handler\n"); |
292 | else | | 295 | else |
293 | pmf_class_network_register(self, ifp); | | 296 | pmf_class_network_register(self, ifp); |
294 | | | 297 | |
295 | return; | | 298 | return; |
296 | | | 299 | |
297 | fail: | | 300 | fail: |
298 | age_dma_free(sc); | | 301 | age_dma_free(sc); |
299 | if (sc->sc_irq_handle != NULL) { | | 302 | if (sc->sc_irq_handle != NULL) { |
300 | pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); | | 303 | pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); |
301 | sc->sc_irq_handle = NULL; | | 304 | sc->sc_irq_handle = NULL; |
302 | } | | 305 | } |
303 | if (sc->sc_mem_size) { | | 306 | if (sc->sc_mem_size) { |
304 | bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); | | 307 | bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); |
305 | sc->sc_mem_size = 0; | | 308 | sc->sc_mem_size = 0; |
306 | } | | 309 | } |
307 | } | | 310 | } |
308 | | | 311 | |
309 | static int | | 312 | static int |
310 | age_detach(device_t self, int flags) | | 313 | age_detach(device_t self, int flags) |
311 | { | | 314 | { |
312 | struct age_softc *sc = device_private(self); | | 315 | struct age_softc *sc = device_private(self); |
313 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 316 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
314 | int s; | | 317 | int s; |
315 | | | 318 | |
316 | pmf_device_deregister(self); | | 319 | pmf_device_deregister(self); |
317 | s = splnet(); | | 320 | s = splnet(); |
318 | age_stop(ifp, 0); | | 321 | age_stop(ifp, 0); |
319 | splx(s); | | 322 | splx(s); |
320 | | | 323 | |
321 | mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); | | 324 | mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); |
322 | | | 325 | |
323 | /* Delete all remaining media. */ | | 326 | /* Delete all remaining media. */ |
324 | ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); | | 327 | ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); |
325 | | | 328 | |
326 | ether_ifdetach(ifp); | | 329 | ether_ifdetach(ifp); |
327 | if_detach(ifp); | | 330 | if_detach(ifp); |
328 | age_dma_free(sc); | | 331 | age_dma_free(sc); |
329 | | | 332 | |
330 | if (sc->sc_irq_handle != NULL) { | | 333 | if (sc->sc_irq_handle != NULL) { |
331 | pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); | | 334 | pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); |
332 | sc->sc_irq_handle = NULL; | | 335 | sc->sc_irq_handle = NULL; |
333 | } | | 336 | } |
334 | if (sc->sc_mem_size) { | | 337 | if (sc->sc_mem_size) { |
335 | bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); | | 338 | bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); |
336 | sc->sc_mem_size = 0; | | 339 | sc->sc_mem_size = 0; |
337 | } | | 340 | } |
338 | return 0; | | 341 | return 0; |
339 | } | | 342 | } |
340 | | | 343 | |
341 | /* | | 344 | /* |
342 | * Read a PHY register on the MII of the L1. | | 345 | * Read a PHY register on the MII of the L1. |
343 | */ | | 346 | */ |
344 | static int | | 347 | static int |
345 | age_miibus_readreg(device_t dev, int phy, int reg) | | 348 | age_miibus_readreg(device_t dev, int phy, int reg) |
346 | { | | 349 | { |
347 | struct age_softc *sc = device_private(dev); | | 350 | struct age_softc *sc = device_private(dev); |
348 | uint32_t v; | | 351 | uint32_t v; |
349 | int i; | | 352 | int i; |
350 | | | 353 | |
351 | if (phy != sc->age_phyaddr) | | 354 | if (phy != sc->age_phyaddr) |
352 | return 0; | | 355 | return 0; |
353 | | | 356 | |
354 | CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | | | 357 | CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | |
355 | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); | | 358 | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); |
356 | for (i = AGE_PHY_TIMEOUT; i > 0; i--) { | | 359 | for (i = AGE_PHY_TIMEOUT; i > 0; i--) { |
357 | DELAY(1); | | 360 | DELAY(1); |
358 | v = CSR_READ_4(sc, AGE_MDIO); | | 361 | v = CSR_READ_4(sc, AGE_MDIO); |
359 | if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) | | 362 | if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) |
360 | break; | | 363 | break; |
361 | } | | 364 | } |
362 | | | 365 | |
363 | if (i == 0) { | | 366 | if (i == 0) { |
364 | printf("%s: phy read timeout: phy %d, reg %d\n", | | 367 | printf("%s: phy read timeout: phy %d, reg %d\n", |
365 | device_xname(sc->sc_dev), phy, reg); | | 368 | device_xname(sc->sc_dev), phy, reg); |
366 | return 0; | | 369 | return 0; |
367 | } | | 370 | } |
368 | | | 371 | |
369 | return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); | | 372 | return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); |
370 | } | | 373 | } |
371 | | | 374 | |
372 | /* | | 375 | /* |
373 | * Write a PHY register on the MII of the L1. | | 376 | * Write a PHY register on the MII of the L1. |
374 | */ | | 377 | */ |
375 | static void | | 378 | static void |
376 | age_miibus_writereg(device_t dev, int phy, int reg, int val) | | 379 | age_miibus_writereg(device_t dev, int phy, int reg, int val) |
377 | { | | 380 | { |
378 | struct age_softc *sc = device_private(dev); | | 381 | struct age_softc *sc = device_private(dev); |
379 | uint32_t v; | | 382 | uint32_t v; |
380 | int i; | | 383 | int i; |
381 | | | 384 | |
382 | if (phy != sc->age_phyaddr) | | 385 | if (phy != sc->age_phyaddr) |
383 | return; | | 386 | return; |
384 | | | 387 | |
385 | CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | | | 388 | CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | |
386 | (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | | | 389 | (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | |
387 | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); | | 390 | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); |
388 | | | 391 | |
389 | for (i = AGE_PHY_TIMEOUT; i > 0; i--) { | | 392 | for (i = AGE_PHY_TIMEOUT; i > 0; i--) { |
390 | DELAY(1); | | 393 | DELAY(1); |
391 | v = CSR_READ_4(sc, AGE_MDIO); | | 394 | v = CSR_READ_4(sc, AGE_MDIO); |
392 | if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) | | 395 | if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) |
393 | break; | | 396 | break; |
394 | } | | 397 | } |
395 | | | 398 | |
396 | if (i == 0) { | | 399 | if (i == 0) { |
397 | printf("%s: phy write timeout: phy %d, reg %d\n", | | 400 | printf("%s: phy write timeout: phy %d, reg %d\n", |
398 | device_xname(sc->sc_dev), phy, reg); | | 401 | device_xname(sc->sc_dev), phy, reg); |
399 | } | | 402 | } |
400 | } | | 403 | } |
401 | | | 404 | |
402 | /* | | 405 | /* |
403 | * Callback from MII layer when media changes. | | 406 | * Callback from MII layer when media changes. |
404 | */ | | 407 | */ |
405 | static void | | 408 | static void |
406 | age_miibus_statchg(device_t dev) | | 409 | age_miibus_statchg(device_t dev) |
407 | { | | 410 | { |
408 | struct age_softc *sc = device_private(dev); | | 411 | struct age_softc *sc = device_private(dev); |
409 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 412 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
410 | struct mii_data *mii; | | 413 | struct mii_data *mii; |
411 | | | 414 | |
412 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 415 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
413 | return; | | 416 | return; |
414 | | | 417 | |
415 | mii = &sc->sc_miibus; | | 418 | mii = &sc->sc_miibus; |
416 | | | 419 | |
417 | sc->age_flags &= ~AGE_FLAG_LINK; | | 420 | sc->age_flags &= ~AGE_FLAG_LINK; |
418 | if ((mii->mii_media_status & IFM_AVALID) != 0) { | | 421 | if ((mii->mii_media_status & IFM_AVALID) != 0) { |
419 | switch (IFM_SUBTYPE(mii->mii_media_active)) { | | 422 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
420 | case IFM_10_T: | | 423 | case IFM_10_T: |
421 | case IFM_100_TX: | | 424 | case IFM_100_TX: |
422 | case IFM_1000_T: | | 425 | case IFM_1000_T: |
423 | sc->age_flags |= AGE_FLAG_LINK; | | 426 | sc->age_flags |= AGE_FLAG_LINK; |
424 | break; | | 427 | break; |
425 | default: | | 428 | default: |
426 | break; | | 429 | break; |
427 | } | | 430 | } |
428 | } | | 431 | } |
429 | | | 432 | |
430 | /* Stop Rx/Tx MACs. */ | | 433 | /* Stop Rx/Tx MACs. */ |
431 | age_stop_rxmac(sc); | | 434 | age_stop_rxmac(sc); |
432 | age_stop_txmac(sc); | | 435 | age_stop_txmac(sc); |
433 | | | 436 | |
434 | /* Program MACs with resolved speed/duplex/flow-control. */ | | 437 | /* Program MACs with resolved speed/duplex/flow-control. */ |
435 | if ((sc->age_flags & AGE_FLAG_LINK) != 0) { | | 438 | if ((sc->age_flags & AGE_FLAG_LINK) != 0) { |
436 | uint32_t reg; | | 439 | uint32_t reg; |
437 | | | 440 | |
438 | age_mac_config(sc); | | 441 | age_mac_config(sc); |
439 | reg = CSR_READ_4(sc, AGE_MAC_CFG); | | 442 | reg = CSR_READ_4(sc, AGE_MAC_CFG); |
440 | /* Restart DMA engine and Tx/Rx MAC. */ | | 443 | /* Restart DMA engine and Tx/Rx MAC. */ |
441 | CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | | | 444 | CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | |
442 | DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); | | 445 | DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); |
443 | reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; | | 446 | reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; |
444 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg); | | 447 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg); |
445 | } | | 448 | } |
446 | } | | 449 | } |
447 | | | 450 | |
448 | /* | | 451 | /* |
449 | * Get the current interface media status. | | 452 | * Get the current interface media status. |
450 | */ | | 453 | */ |
451 | static void | | 454 | static void |
452 | age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) | | 455 | age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
453 | { | | 456 | { |
454 | struct age_softc *sc = ifp->if_softc; | | 457 | struct age_softc *sc = ifp->if_softc; |
455 | struct mii_data *mii = &sc->sc_miibus; | | 458 | struct mii_data *mii = &sc->sc_miibus; |
456 | | | 459 | |
457 | mii_pollstat(mii); | | 460 | mii_pollstat(mii); |
458 | ifmr->ifm_status = mii->mii_media_status; | | 461 | ifmr->ifm_status = mii->mii_media_status; |
459 | ifmr->ifm_active = mii->mii_media_active; | | 462 | ifmr->ifm_active = mii->mii_media_active; |
460 | } | | 463 | } |
461 | | | 464 | |
462 | /* | | 465 | /* |
463 | * Set hardware to newly-selected media. | | 466 | * Set hardware to newly-selected media. |
464 | */ | | 467 | */ |
465 | static int | | 468 | static int |
466 | age_mediachange(struct ifnet *ifp) | | 469 | age_mediachange(struct ifnet *ifp) |
467 | { | | 470 | { |
468 | struct age_softc *sc = ifp->if_softc; | | 471 | struct age_softc *sc = ifp->if_softc; |
469 | struct mii_data *mii = &sc->sc_miibus; | | 472 | struct mii_data *mii = &sc->sc_miibus; |
470 | int error; | | 473 | int error; |
471 | | | 474 | |
472 | if (mii->mii_instance != 0) { | | 475 | if (mii->mii_instance != 0) { |
473 | struct mii_softc *miisc; | | 476 | struct mii_softc *miisc; |
474 | | | 477 | |
475 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) | | 478 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) |
476 | mii_phy_reset(miisc); | | 479 | mii_phy_reset(miisc); |
477 | } | | 480 | } |
478 | error = mii_mediachg(mii); | | 481 | error = mii_mediachg(mii); |
479 | | | 482 | |
480 | return error; | | 483 | return error; |
481 | } | | 484 | } |
482 | | | 485 | |
483 | static int | | 486 | static int |
484 | age_intr(void *arg) | | 487 | age_intr(void *arg) |
485 | { | | 488 | { |
486 | struct age_softc *sc = arg; | | 489 | struct age_softc *sc = arg; |
487 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 490 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
488 | struct cmb *cmb; | | 491 | struct cmb *cmb; |
489 | uint32_t status; | | 492 | uint32_t status; |
490 | | | 493 | |
491 | status = CSR_READ_4(sc, AGE_INTR_STATUS); | | 494 | status = CSR_READ_4(sc, AGE_INTR_STATUS); |
492 | if (status == 0 || (status & AGE_INTRS) == 0) | | 495 | if (status == 0 || (status & AGE_INTRS) == 0) |
493 | return 0; | | 496 | return 0; |
494 | | | 497 | |
495 | cmb = sc->age_rdata.age_cmb_block; | | 498 | cmb = sc->age_rdata.age_cmb_block; |
496 | if (cmb == NULL) { | | 499 | if (cmb == NULL) { |
497 | /* Happens when bringing up the interface | | 500 | /* Happens when bringing up the interface |
498 | * w/o having a carrier. Ack. the interrupt. | | 501 | * w/o having a carrier. Ack. the interrupt. |
499 | */ | | 502 | */ |
500 | CSR_WRITE_4(sc, AGE_INTR_STATUS, status); | | 503 | CSR_WRITE_4(sc, AGE_INTR_STATUS, status); |
501 | return 0; | | 504 | return 0; |
502 | } | | 505 | } |
503 | | | 506 | |
504 | /* Disable interrupts. */ | | 507 | /* Disable interrupts. */ |
505 | CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); | | 508 | CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); |
506 | | | 509 | |
507 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, | | 510 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, |
508 | sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); | | 511 | sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
509 | status = le32toh(cmb->intr_status); | | 512 | status = le32toh(cmb->intr_status); |
510 | if ((status & AGE_INTRS) == 0) | | 513 | if ((status & AGE_INTRS) == 0) |
511 | goto back; | | 514 | goto back; |
512 | | | 515 | |
513 | sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> | | 516 | sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> |
514 | TPD_CONS_SHIFT; | | 517 | TPD_CONS_SHIFT; |
515 | sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> | | 518 | sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> |
516 | RRD_PROD_SHIFT; | | 519 | RRD_PROD_SHIFT; |
517 | | | 520 | |
518 | /* Let hardware know CMB was served. */ | | 521 | /* Let hardware know CMB was served. */ |
519 | cmb->intr_status = 0; | | 522 | cmb->intr_status = 0; |
520 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, | | 523 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, |
521 | sc->age_cdata.age_cmb_block_map->dm_mapsize, | | 524 | sc->age_cdata.age_cmb_block_map->dm_mapsize, |
522 | BUS_DMASYNC_PREWRITE); | | 525 | BUS_DMASYNC_PREWRITE); |
523 | | | 526 | |
524 | if (ifp->if_flags & IFF_RUNNING) { | | 527 | if (ifp->if_flags & IFF_RUNNING) { |
525 | if (status & INTR_CMB_RX) | | 528 | if (status & INTR_CMB_RX) |
526 | age_rxintr(sc, sc->age_rr_prod); | | 529 | age_rxintr(sc, sc->age_rr_prod); |
527 | | | 530 | |
528 | if (status & INTR_CMB_TX) | | 531 | if (status & INTR_CMB_TX) |
529 | age_txintr(sc, sc->age_tpd_cons); | | 532 | age_txintr(sc, sc->age_tpd_cons); |
530 | | | 533 | |
531 | if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { | | 534 | if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { |
532 | if (status & INTR_DMA_RD_TO_RST) | | 535 | if (status & INTR_DMA_RD_TO_RST) |
533 | printf("%s: DMA read error! -- resetting\n", | | 536 | printf("%s: DMA read error! -- resetting\n", |
534 | device_xname(sc->sc_dev)); | | 537 | device_xname(sc->sc_dev)); |
535 | if (status & INTR_DMA_WR_TO_RST) | | 538 | if (status & INTR_DMA_WR_TO_RST) |
536 | printf("%s: DMA write error! -- resetting\n", | | 539 | printf("%s: DMA write error! -- resetting\n", |
537 | device_xname(sc->sc_dev)); | | 540 | device_xname(sc->sc_dev)); |
538 | age_init(ifp); | | 541 | age_init(ifp); |
539 | } | | 542 | } |
540 | | | 543 | |
541 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) | | 544 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) |
542 | age_start(ifp); | | 545 | age_start(ifp); |
543 | | | 546 | |
544 | if (status & INTR_SMB) | | 547 | if (status & INTR_SMB) |
545 | age_stats_update(sc); | | 548 | age_stats_update(sc); |
546 | } | | 549 | } |
547 | | | 550 | |
548 | /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ | | 551 | /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ |
549 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, | | 552 | bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, |
550 | sc->age_cdata.age_cmb_block_map->dm_mapsize, | | 553 | sc->age_cdata.age_cmb_block_map->dm_mapsize, |
551 | BUS_DMASYNC_POSTREAD); | | 554 | BUS_DMASYNC_POSTREAD); |
552 | | | 555 | |
553 | back: | | 556 | back: |
554 | /* Re-enable interrupts. */ | | 557 | /* Re-enable interrupts. */ |
555 | CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); | | 558 | CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); |
556 | | | 559 | |
557 | return 1; | | 560 | return 1; |
558 | } | | 561 | } |
559 | | | 562 | |
560 | static void | | 563 | static void |
561 | age_get_macaddr(struct age_softc *sc, uint8_t eaddr[]) | | 564 | age_get_macaddr(struct age_softc *sc, uint8_t eaddr[]) |
562 | { | | 565 | { |
563 | uint32_t ea[2], reg; | | 566 | uint32_t ea[2], reg; |
564 | int i, vpdc; | | 567 | int i, vpdc; |
565 | | | 568 | |
566 | reg = CSR_READ_4(sc, AGE_SPI_CTRL); | | 569 | reg = CSR_READ_4(sc, AGE_SPI_CTRL); |
567 | if ((reg & SPI_VPD_ENB) != 0) { | | 570 | if ((reg & SPI_VPD_ENB) != 0) { |
568 | /* Get VPD stored in TWSI EEPROM. */ | | 571 | /* Get VPD stored in TWSI EEPROM. */ |
569 | reg &= ~SPI_VPD_ENB; | | 572 | reg &= ~SPI_VPD_ENB; |
570 | CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); | | 573 | CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); |
571 | } | | 574 | } |
572 | | | 575 | |
573 | if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, | | 576 | if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, |
574 | PCI_CAP_VPD, &vpdc, NULL)) { | | 577 | PCI_CAP_VPD, &vpdc, NULL)) { |
575 | /* | | 578 | /* |
576 | * PCI VPD capability found, let TWSI reload EEPROM. | | 579 | * PCI VPD capability found, let TWSI reload EEPROM. |
577 | * This will set Ethernet address of controller. | | 580 | * This will set Ethernet address of controller. |
578 | */ | | 581 | */ |
579 | CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | | | 582 | CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | |
580 | TWSI_CTRL_SW_LD_START); | | 583 | TWSI_CTRL_SW_LD_START); |
581 | for (i = 100; i > 0; i++) { | | 584 | for (i = 100; i > 0; i++) { |
582 | DELAY(1000); | | 585 | DELAY(1000); |
583 | reg = CSR_READ_4(sc, AGE_TWSI_CTRL); | | 586 | reg = CSR_READ_4(sc, AGE_TWSI_CTRL); |
584 | if ((reg & TWSI_CTRL_SW_LD_START) == 0) | | 587 | if ((reg & TWSI_CTRL_SW_LD_START) == 0) |
585 | break; | | 588 | break; |
586 | } | | 589 | } |
587 | if (i == 0) | | 590 | if (i == 0) |
588 | printf("%s: reloading EEPROM timeout!\n", | | 591 | printf("%s: reloading EEPROM timeout!\n", |
589 | device_xname(sc->sc_dev)); | | 592 | device_xname(sc->sc_dev)); |
590 | } else { | | 593 | } else { |
591 | if (agedebug) | | 594 | if (agedebug) |
592 | printf("%s: PCI VPD capability not found!\n", | | 595 | printf("%s: PCI VPD capability not found!\n", |
593 | device_xname(sc->sc_dev)); | | 596 | device_xname(sc->sc_dev)); |
594 | } | | 597 | } |
595 | | | 598 | |
596 | ea[0] = CSR_READ_4(sc, AGE_PAR0); | | 599 | ea[0] = CSR_READ_4(sc, AGE_PAR0); |
597 | ea[1] = CSR_READ_4(sc, AGE_PAR1); | | 600 | ea[1] = CSR_READ_4(sc, AGE_PAR1); |
598 | | | 601 | |
599 | eaddr[0] = (ea[1] >> 8) & 0xFF; | | 602 | eaddr[0] = (ea[1] >> 8) & 0xFF; |
600 | eaddr[1] = (ea[1] >> 0) & 0xFF; | | 603 | eaddr[1] = (ea[1] >> 0) & 0xFF; |
601 | eaddr[2] = (ea[0] >> 24) & 0xFF; | | 604 | eaddr[2] = (ea[0] >> 24) & 0xFF; |
602 | eaddr[3] = (ea[0] >> 16) & 0xFF; | | 605 | eaddr[3] = (ea[0] >> 16) & 0xFF; |
603 | eaddr[4] = (ea[0] >> 8) & 0xFF; | | 606 | eaddr[4] = (ea[0] >> 8) & 0xFF; |
604 | eaddr[5] = (ea[0] >> 0) & 0xFF; | | 607 | eaddr[5] = (ea[0] >> 0) & 0xFF; |
605 | } | | 608 | } |
606 | | | 609 | |
607 | static void | | 610 | static void |
608 | age_phy_reset(struct age_softc *sc) | | 611 | age_phy_reset(struct age_softc *sc) |
609 | { | | 612 | { |
610 | uint16_t reg, pn; | | 613 | uint16_t reg, pn; |
611 | int i, linkup; | | 614 | int i, linkup; |
612 | | | 615 | |
613 | /* Reset PHY. */ | | 616 | /* Reset PHY. */ |
614 | CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); | | 617 | CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); |
615 | DELAY(2000); | | 618 | DELAY(2000); |
616 | CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); | | 619 | CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); |
617 | DELAY(2000); | | 620 | DELAY(2000); |
618 | | | 621 | |
619 | #define ATPHY_DBG_ADDR 0x1D | | 622 | #define ATPHY_DBG_ADDR 0x1D |
620 | #define ATPHY_DBG_DATA 0x1E | | 623 | #define ATPHY_DBG_DATA 0x1E |
621 | #define ATPHY_CDTC 0x16 | | 624 | #define ATPHY_CDTC 0x16 |
622 | #define PHY_CDTC_ENB 0x0001 | | 625 | #define PHY_CDTC_ENB 0x0001 |
623 | #define PHY_CDTC_POFF 8 | | 626 | #define PHY_CDTC_POFF 8 |
624 | #define ATPHY_CDTS 0x1C | | 627 | #define ATPHY_CDTS 0x1C |
625 | #define PHY_CDTS_STAT_OK 0x0000 | | 628 | #define PHY_CDTS_STAT_OK 0x0000 |
626 | #define PHY_CDTS_STAT_SHORT 0x0100 | | 629 | #define PHY_CDTS_STAT_SHORT 0x0100 |
627 | #define PHY_CDTS_STAT_OPEN 0x0200 | | 630 | #define PHY_CDTS_STAT_OPEN 0x0200 |
628 | #define PHY_CDTS_STAT_INVAL 0x0300 | | 631 | #define PHY_CDTS_STAT_INVAL 0x0300 |
629 | #define PHY_CDTS_STAT_MASK 0x0300 | | 632 | #define PHY_CDTS_STAT_MASK 0x0300 |
630 | | | 633 | |
631 | /* Check power saving mode. Magic from Linux. */ | | 634 | /* Check power saving mode. Magic from Linux. */ |
632 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); | | 635 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); |
633 | for (linkup = 0, pn = 0; pn < 4; pn++) { | | 636 | for (linkup = 0, pn = 0; pn < 4; pn++) { |
634 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, | | 637 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, |
635 | (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); | | 638 | (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); |
636 | for (i = 200; i > 0; i--) { | | 639 | for (i = 200; i > 0; i--) { |
637 | DELAY(1000); | | 640 | DELAY(1000); |
638 | reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, | | 641 | reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, |
639 | ATPHY_CDTC); | | 642 | ATPHY_CDTC); |
640 | if ((reg & PHY_CDTC_ENB) == 0) | | 643 | if ((reg & PHY_CDTC_ENB) == 0) |
641 | break; | | 644 | break; |
642 | } | | 645 | } |
643 | DELAY(1000); | | 646 | DELAY(1000); |
644 | reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, | | 647 | reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, |
645 | ATPHY_CDTS); | | 648 | ATPHY_CDTS); |
646 | if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { | | 649 | if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { |
647 | linkup++; | | 650 | linkup++; |
648 | break; | | 651 | break; |
649 | } | | 652 | } |
650 | } | | 653 | } |
651 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, | | 654 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, |
652 | BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); | | 655 | BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); |
653 | if (linkup == 0) { | | 656 | if (linkup == 0) { |
654 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, | | 657 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
655 | ATPHY_DBG_ADDR, 0); | | 658 | ATPHY_DBG_ADDR, 0); |
656 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, | | 659 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
657 | ATPHY_DBG_DATA, 0x124E); | | 660 | ATPHY_DBG_DATA, 0x124E); |
658 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, | | 661 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
659 | ATPHY_DBG_ADDR, 1); | | 662 | ATPHY_DBG_ADDR, 1); |
660 | reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, | | 663 | reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, |
661 | ATPHY_DBG_DATA); | | 664 | ATPHY_DBG_DATA); |
662 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, | | 665 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
663 | ATPHY_DBG_DATA, reg | 0x03); | | 666 | ATPHY_DBG_DATA, reg | 0x03); |
664 | /* XXX */ | | 667 | /* XXX */ |
665 | DELAY(1500 * 1000); | | 668 | DELAY(1500 * 1000); |
666 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, | | 669 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
667 | ATPHY_DBG_ADDR, 0); | | 670 | ATPHY_DBG_ADDR, 0); |
668 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, | | 671 | age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, |
669 | ATPHY_DBG_DATA, 0x024E); | | 672 | ATPHY_DBG_DATA, 0x024E); |
670 | } | | 673 | } |
671 | | | 674 | |
672 | #undef ATPHY_DBG_ADDR | | 675 | #undef ATPHY_DBG_ADDR |
673 | #undef ATPHY_DBG_DATA | | 676 | #undef ATPHY_DBG_DATA |
674 | #undef ATPHY_CDTC | | 677 | #undef ATPHY_CDTC |
675 | #undef PHY_CDTC_ENB | | 678 | #undef PHY_CDTC_ENB |
676 | #undef PHY_CDTC_POFF | | 679 | #undef PHY_CDTC_POFF |
677 | #undef ATPHY_CDTS | | 680 | #undef ATPHY_CDTS |
678 | #undef PHY_CDTS_STAT_OK | | 681 | #undef PHY_CDTS_STAT_OK |
679 | #undef PHY_CDTS_STAT_SHORT | | 682 | #undef PHY_CDTS_STAT_SHORT |
680 | #undef PHY_CDTS_STAT_OPEN | | 683 | #undef PHY_CDTS_STAT_OPEN |
681 | #undef PHY_CDTS_STAT_INVAL | | 684 | #undef PHY_CDTS_STAT_INVAL |
682 | #undef PHY_CDTS_STAT_MASK | | 685 | #undef PHY_CDTS_STAT_MASK |
683 | } | | 686 | } |
684 | | | 687 | |
685 | static int | | 688 | static int |
686 | age_dma_alloc(struct age_softc *sc) | | 689 | age_dma_alloc(struct age_softc *sc) |
687 | { | | 690 | { |
688 | struct age_txdesc *txd; | | 691 | struct age_txdesc *txd; |
689 | struct age_rxdesc *rxd; | | 692 | struct age_rxdesc *rxd; |
690 | int nsegs, error, i; | | 693 | int nsegs, error, i; |
691 | | | 694 | |
692 | /* | | 695 | /* |
693 | * Create DMA stuffs for TX ring | | 696 | * Create DMA stuffs for TX ring |
694 | */ | | 697 | */ |
695 | error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, | | 698 | error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, |
696 | AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); | | 699 | AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); |
697 | if (error) { | | 700 | if (error) { |
698 | sc->age_cdata.age_tx_ring_map = NULL; | | 701 | sc->age_cdata.age_tx_ring_map = NULL; |
699 | return ENOBUFS; | | 702 | return ENOBUFS; |
700 | } | | 703 | } |
701 | | | 704 | |
702 | /* Allocate DMA'able memory for TX ring */ | | 705 | /* Allocate DMA'able memory for TX ring */ |
703 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, | | 706 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, |
704 | ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, | | 707 | ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, |
705 | &nsegs, BUS_DMA_WAITOK); | | 708 | &nsegs, BUS_DMA_WAITOK); |
706 | if (error) { | | 709 | if (error) { |
707 | printf("%s: could not allocate DMA'able memory for Tx ring, " | | 710 | printf("%s: could not allocate DMA'able memory for Tx ring, " |
708 | "error = %i\n", device_xname(sc->sc_dev), error); | | 711 | "error = %i\n", device_xname(sc->sc_dev), error); |
709 | return error; | | 712 | return error; |
710 | } | | 713 | } |
711 | | | 714 | |
712 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, | | 715 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, |
713 | nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring, | | 716 | nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring, |
714 | BUS_DMA_NOWAIT); | | 717 | BUS_DMA_NOWAIT); |
715 | if (error) | | 718 | if (error) |
716 | return ENOBUFS; | | 719 | return ENOBUFS; |
717 | | | 720 | |
718 | memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ); | | 721 | memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ); |
719 | | | 722 | |
720 | /* Load the DMA map for Tx ring. */ | | 723 | /* Load the DMA map for Tx ring. */ |
721 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, | | 724 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, |
722 | sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); | | 725 | sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); |
723 | if (error) { | | 726 | if (error) { |
724 | printf("%s: could not load DMA'able memory for Tx ring, " | | 727 | printf("%s: could not load DMA'able memory for Tx ring, " |
725 | "error = %i\n", device_xname(sc->sc_dev), error); | | 728 | "error = %i\n", device_xname(sc->sc_dev), error); |
726 | bus_dmamem_free(sc->sc_dmat, | | 729 | bus_dmamem_free(sc->sc_dmat, |
727 | &sc->age_rdata.age_tx_ring_seg, 1); | | 730 | &sc->age_rdata.age_tx_ring_seg, 1); |
728 | return error; | | 731 | return error; |
729 | } | | 732 | } |
730 | | | 733 | |
731 | sc->age_rdata.age_tx_ring_paddr = | | 734 | sc->age_rdata.age_tx_ring_paddr = |
732 | sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; | | 735 | sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; |
733 | | | 736 | |
734 | /* | | 737 | /* |
735 | * Create DMA stuffs for RX ring | | 738 | * Create DMA stuffs for RX ring |
736 | */ | | 739 | */ |
737 | error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, | | 740 | error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, |
738 | AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); | | 741 | AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); |
739 | if (error) { | | 742 | if (error) { |
740 | sc->age_cdata.age_rx_ring_map = NULL; | | 743 | sc->age_cdata.age_rx_ring_map = NULL; |
741 | return ENOBUFS; | | 744 | return ENOBUFS; |
742 | } | | 745 | } |
743 | | | 746 | |
744 | /* Allocate DMA'able memory for RX ring */ | | 747 | /* Allocate DMA'able memory for RX ring */ |
745 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, | | 748 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, |
746 | ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, | | 749 | ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, |
747 | &nsegs, BUS_DMA_WAITOK); | | 750 | &nsegs, BUS_DMA_WAITOK); |
748 | if (error) { | | 751 | if (error) { |
749 | printf("%s: could not allocate DMA'able memory for Rx ring, " | | 752 | printf("%s: could not allocate DMA'able memory for Rx ring, " |
750 | "error = %i.\n", device_xname(sc->sc_dev), error); | | 753 | "error = %i.\n", device_xname(sc->sc_dev), error); |
751 | return error; | | 754 | return error; |
752 | } | | 755 | } |
753 | | | 756 | |
754 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, | | 757 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, |
755 | nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring, | | 758 | nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring, |
756 | BUS_DMA_NOWAIT); | | 759 | BUS_DMA_NOWAIT); |
757 | if (error) | | 760 | if (error) |
758 | return ENOBUFS; | | 761 | return ENOBUFS; |
759 | | | 762 | |
760 | memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ); | | 763 | memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ); |
761 | | | 764 | |
762 | /* Load the DMA map for Rx ring. */ | | 765 | /* Load the DMA map for Rx ring. */ |
763 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, | | 766 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, |
764 | sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); | | 767 | sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); |
765 | if (error) { | | 768 | if (error) { |
766 | printf("%s: could not load DMA'able memory for Rx ring, " | | 769 | printf("%s: could not load DMA'able memory for Rx ring, " |
767 | "error = %i.\n", device_xname(sc->sc_dev), error); | | 770 | "error = %i.\n", device_xname(sc->sc_dev), error); |
768 | bus_dmamem_free(sc->sc_dmat, | | 771 | bus_dmamem_free(sc->sc_dmat, |
769 | &sc->age_rdata.age_rx_ring_seg, 1); | | 772 | &sc->age_rdata.age_rx_ring_seg, 1); |
770 | return error; | | 773 | return error; |
771 | } | | 774 | } |
772 | | | 775 | |
773 | sc->age_rdata.age_rx_ring_paddr = | | 776 | sc->age_rdata.age_rx_ring_paddr = |
774 | sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; | | 777 | sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; |
775 | | | 778 | |
776 | /* | | 779 | /* |
777 | * Create DMA stuffs for RX return ring | | 780 | * Create DMA stuffs for RX return ring |
778 | */ | | 781 | */ |
779 | error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, | | 782 | error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, |
780 | AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); | | 783 | AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); |
781 | if (error) { | | 784 | if (error) { |
782 | sc->age_cdata.age_rr_ring_map = NULL; | | 785 | sc->age_cdata.age_rr_ring_map = NULL; |
783 | return ENOBUFS; | | 786 | return ENOBUFS; |
784 | } | | 787 | } |
785 | | | 788 | |
786 | /* Allocate DMA'able memory for RX return ring */ | | 789 | /* Allocate DMA'able memory for RX return ring */ |
787 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, | | 790 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, |
788 | ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, | | 791 | ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, |
789 | &nsegs, BUS_DMA_WAITOK); | | 792 | &nsegs, BUS_DMA_WAITOK); |
790 | if (error) { | | 793 | if (error) { |
791 | printf("%s: could not allocate DMA'able memory for Rx " | | 794 | printf("%s: could not allocate DMA'able memory for Rx " |
792 | "return ring, error = %i.\n", | | 795 | "return ring, error = %i.\n", |
793 | device_xname(sc->sc_dev), error); | | 796 | device_xname(sc->sc_dev), error); |
794 | return error; | | 797 | return error; |
795 | } | | 798 | } |
796 | | | 799 | |
797 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, | | 800 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, |
798 | nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring, | | 801 | nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring, |
799 | BUS_DMA_NOWAIT); | | 802 | BUS_DMA_NOWAIT); |
800 | if (error) | | 803 | if (error) |
801 | return ENOBUFS; | | 804 | return ENOBUFS; |
802 | | | 805 | |
803 | memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ); | | 806 | memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ); |
804 | | | 807 | |
805 | /* Load the DMA map for Rx return ring. */ | | 808 | /* Load the DMA map for Rx return ring. */ |
806 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, | | 809 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, |
807 | sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK); | | 810 | sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK); |
808 | if (error) { | | 811 | if (error) { |
809 | printf("%s: could not load DMA'able memory for Rx return ring, " | | 812 | printf("%s: could not load DMA'able memory for Rx return ring, " |
810 | "error = %i\n", device_xname(sc->sc_dev), error); | | 813 | "error = %i\n", device_xname(sc->sc_dev), error); |
811 | bus_dmamem_free(sc->sc_dmat, | | 814 | bus_dmamem_free(sc->sc_dmat, |
812 | &sc->age_rdata.age_rr_ring_seg, 1); | | 815 | &sc->age_rdata.age_rr_ring_seg, 1); |
813 | return error; | | 816 | return error; |
814 | } | | 817 | } |
815 | | | 818 | |
816 | sc->age_rdata.age_rr_ring_paddr = | | 819 | sc->age_rdata.age_rr_ring_paddr = |
817 | sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; | | 820 | sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; |
818 | | | 821 | |
819 | /* | | 822 | /* |
820 | * Create DMA stuffs for CMB block | | 823 | * Create DMA stuffs for CMB block |
821 | */ | | 824 | */ |
822 | error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, | | 825 | error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, |
823 | AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, | | 826 | AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, |
824 | &sc->age_cdata.age_cmb_block_map); | | 827 | &sc->age_cdata.age_cmb_block_map); |
825 | if (error) { | | 828 | if (error) { |
826 | sc->age_cdata.age_cmb_block_map = NULL; | | 829 | sc->age_cdata.age_cmb_block_map = NULL; |
827 | return ENOBUFS; | | 830 | return ENOBUFS; |
828 | } | | 831 | } |
829 | | | 832 | |
830 | /* Allocate DMA'able memory for CMB block */ | | 833 | /* Allocate DMA'able memory for CMB block */ |
831 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, | | 834 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, |
832 | ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, | | 835 | ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, |
833 | &nsegs, BUS_DMA_WAITOK); | | 836 | &nsegs, BUS_DMA_WAITOK); |
834 | if (error) { | | 837 | if (error) { |
835 | printf("%s: could not allocate DMA'able memory for " | | 838 | printf("%s: could not allocate DMA'able memory for " |
836 | "CMB block, error = %i\n", device_xname(sc->sc_dev), error); | | 839 | "CMB block, error = %i\n", device_xname(sc->sc_dev), error); |
837 | return error; | | 840 | return error; |
838 | } | | 841 | } |
839 | | | 842 | |
840 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, | | 843 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, |
841 | nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block, | | 844 | nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block, |
842 | BUS_DMA_NOWAIT); | | 845 | BUS_DMA_NOWAIT); |
843 | if (error) | | 846 | if (error) |
844 | return ENOBUFS; | | 847 | return ENOBUFS; |
845 | | | 848 | |
846 | memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ); | | 849 | memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ); |
847 | | | 850 | |
848 | /* Load the DMA map for CMB block. */ | | 851 | /* Load the DMA map for CMB block. */ |
849 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, | | 852 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, |
850 | sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, | | 853 | sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, |
851 | BUS_DMA_WAITOK); | | 854 | BUS_DMA_WAITOK); |
852 | if (error) { | | 855 | if (error) { |
853 | printf("%s: could not load DMA'able memory for CMB block, " | | 856 | printf("%s: could not load DMA'able memory for CMB block, " |
854 | "error = %i\n", device_xname(sc->sc_dev), error); | | 857 | "error = %i\n", device_xname(sc->sc_dev), error); |
855 | bus_dmamem_free(sc->sc_dmat, | | 858 | bus_dmamem_free(sc->sc_dmat, |
856 | &sc->age_rdata.age_cmb_block_seg, 1); | | 859 | &sc->age_rdata.age_cmb_block_seg, 1); |
857 | return error; | | 860 | return error; |
858 | } | | 861 | } |
859 | | | 862 | |
860 | sc->age_rdata.age_cmb_block_paddr = | | 863 | sc->age_rdata.age_cmb_block_paddr = |
861 | sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; | | 864 | sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; |
862 | | | 865 | |
863 | /* | | 866 | /* |
864 | * Create DMA stuffs for SMB block | | 867 | * Create DMA stuffs for SMB block |
865 | */ | | 868 | */ |
866 | error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, | | 869 | error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, |
867 | AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, | | 870 | AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, |
868 | &sc->age_cdata.age_smb_block_map); | | 871 | &sc->age_cdata.age_smb_block_map); |
869 | if (error) { | | 872 | if (error) { |
870 | sc->age_cdata.age_smb_block_map = NULL; | | 873 | sc->age_cdata.age_smb_block_map = NULL; |
871 | return ENOBUFS; | | 874 | return ENOBUFS; |
872 | } | | 875 | } |
873 | | | 876 | |
874 | /* Allocate DMA'able memory for SMB block */ | | 877 | /* Allocate DMA'able memory for SMB block */ |
875 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, | | 878 | error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, |
876 | ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, | | 879 | ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, |
877 | &nsegs, BUS_DMA_WAITOK); | | 880 | &nsegs, BUS_DMA_WAITOK); |
878 | if (error) { | | 881 | if (error) { |
879 | printf("%s: could not allocate DMA'able memory for " | | 882 | printf("%s: could not allocate DMA'able memory for " |
880 | "SMB block, error = %i\n", device_xname(sc->sc_dev), error); | | 883 | "SMB block, error = %i\n", device_xname(sc->sc_dev), error); |
881 | return error; | | 884 | return error; |
882 | } | | 885 | } |
883 | | | 886 | |
884 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, | | 887 | error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, |
885 | nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block, | | 888 | nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block, |
886 | BUS_DMA_NOWAIT); | | 889 | BUS_DMA_NOWAIT); |
887 | if (error) | | 890 | if (error) |
888 | return ENOBUFS; | | 891 | return ENOBUFS; |
889 | | | 892 | |
890 | memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ); | | 893 | memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ); |
891 | | | 894 | |
892 | /* Load the DMA map for SMB block */ | | 895 | /* Load the DMA map for SMB block */ |
893 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, | | 896 | error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, |
894 | sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, | | 897 | sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, |
895 | BUS_DMA_WAITOK); | | 898 | BUS_DMA_WAITOK); |
896 | if (error) { | | 899 | if (error) { |
897 | printf("%s: could not load DMA'able memory for SMB block, " | | 900 | printf("%s: could not load DMA'able memory for SMB block, " |
898 | "error = %i\n", device_xname(sc->sc_dev), error); | | 901 | "error = %i\n", device_xname(sc->sc_dev), error); |
899 | bus_dmamem_free(sc->sc_dmat, | | 902 | bus_dmamem_free(sc->sc_dmat, |
900 | &sc->age_rdata.age_smb_block_seg, 1); | | 903 | &sc->age_rdata.age_smb_block_seg, 1); |
901 | return error; | | 904 | return error; |
902 | } | | 905 | } |
903 | | | 906 | |
904 | sc->age_rdata.age_smb_block_paddr = | | 907 | sc->age_rdata.age_smb_block_paddr = |
905 | sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; | | 908 | sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; |
906 | | | 909 | |
907 | /* Create DMA maps for Tx buffers. */ | | 910 | /* Create DMA maps for Tx buffers. */ |
908 | for (i = 0; i < AGE_TX_RING_CNT; i++) { | | 911 | for (i = 0; i < AGE_TX_RING_CNT; i++) { |
909 | txd = &sc->age_cdata.age_txdesc[i]; | | 912 | txd = &sc->age_cdata.age_txdesc[i]; |
910 | txd->tx_m = NULL; | | 913 | txd->tx_m = NULL; |
911 | txd->tx_dmamap = NULL; | | 914 | txd->tx_dmamap = NULL; |
912 | error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, | | 915 | error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, |
913 | AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, | | 916 | AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, |
914 | &txd->tx_dmamap); | | 917 | &txd->tx_dmamap); |
915 | if (error) { | | 918 | if (error) { |
916 | txd->tx_dmamap = NULL; | | 919 | txd->tx_dmamap = NULL; |
917 | printf("%s: could not create Tx dmamap, error = %i.\n", | | 920 | printf("%s: could not create Tx dmamap, error = %i.\n", |
918 | device_xname(sc->sc_dev), error); | | 921 | device_xname(sc->sc_dev), error); |
919 | return error; | | 922 | return error; |
920 | } | | 923 | } |
921 | } | | 924 | } |
922 | | | 925 | |
923 | /* Create DMA maps for Rx buffers. */ | | 926 | /* Create DMA maps for Rx buffers. */ |
924 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, | | 927 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, |
925 | BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); | | 928 | BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); |
926 | if (error) { | | 929 | if (error) { |
927 | sc->age_cdata.age_rx_sparemap = NULL; | | 930 | sc->age_cdata.age_rx_sparemap = NULL; |
928 | printf("%s: could not create spare Rx dmamap, error = %i.\n", | | 931 | printf("%s: could not create spare Rx dmamap, error = %i.\n", |
929 | device_xname(sc->sc_dev), error); | | 932 | device_xname(sc->sc_dev), error); |
930 | return error; | | 933 | return error; |
931 | } | | 934 | } |
932 | for (i = 0; i < AGE_RX_RING_CNT; i++) { | | 935 | for (i = 0; i < AGE_RX_RING_CNT; i++) { |
933 | rxd = &sc->age_cdata.age_rxdesc[i]; | | 936 | rxd = &sc->age_cdata.age_rxdesc[i]; |
934 | rxd->rx_m = NULL; | | 937 | rxd->rx_m = NULL; |
935 | rxd->rx_dmamap = NULL; | | 938 | rxd->rx_dmamap = NULL; |
936 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, | | 939 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
937 | MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); | | 940 | MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); |
938 | if (error) { | | 941 | if (error) { |
939 | rxd->rx_dmamap = NULL; | | 942 | rxd->rx_dmamap = NULL; |
940 | printf("%s: could not create Rx dmamap, error = %i.\n", | | 943 | printf("%s: could not create Rx dmamap, error = %i.\n", |
941 | device_xname(sc->sc_dev), error); | | 944 | device_xname(sc->sc_dev), error); |
942 | return error; | | 945 | return error; |
943 | } | | 946 | } |
944 | } | | 947 | } |
945 | | | 948 | |
946 | return 0; | | 949 | return 0; |
947 | } | | 950 | } |
948 | | | 951 | |
949 | static void | | 952 | static void |
950 | age_dma_free(struct age_softc *sc) | | 953 | age_dma_free(struct age_softc *sc) |
951 | { | | 954 | { |
952 | struct age_txdesc *txd; | | 955 | struct age_txdesc *txd; |
953 | struct age_rxdesc *rxd; | | 956 | struct age_rxdesc *rxd; |
954 | int i; | | 957 | int i; |
955 | | | 958 | |
956 | /* Tx buffers */ | | 959 | /* Tx buffers */ |
957 | for (i = 0; i < AGE_TX_RING_CNT; i++) { | | 960 | for (i = 0; i < AGE_TX_RING_CNT; i++) { |
958 | txd = &sc->age_cdata.age_txdesc[i]; | | 961 | txd = &sc->age_cdata.age_txdesc[i]; |
959 | if (txd->tx_dmamap != NULL) { | | 962 | if (txd->tx_dmamap != NULL) { |
960 | bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); | | 963 | bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); |
961 | txd->tx_dmamap = NULL; | | 964 | txd->tx_dmamap = NULL; |
962 | } | | 965 | } |
963 | } | | 966 | } |
964 | /* Rx buffers */ | | 967 | /* Rx buffers */ |
965 | for (i = 0; i < AGE_RX_RING_CNT; i++) { | | 968 | for (i = 0; i < AGE_RX_RING_CNT; i++) { |
966 | rxd = &sc->age_cdata.age_rxdesc[i]; | | 969 | rxd = &sc->age_cdata.age_rxdesc[i]; |
967 | if (rxd->rx_dmamap != NULL) { | | 970 | if (rxd->rx_dmamap != NULL) { |
968 | bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); | | 971 | bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); |
969 | rxd->rx_dmamap = NULL; | | 972 | rxd->rx_dmamap = NULL; |
970 | } | | 973 | } |
971 | } | | 974 | } |
972 | if (sc->age_cdata.age_rx_sparemap != NULL) { | | 975 | if (sc->age_cdata.age_rx_sparemap != NULL) { |
973 | bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); | | 976 | bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); |
974 | sc->age_cdata.age_rx_sparemap = NULL; | | 977 | sc->age_cdata.age_rx_sparemap = NULL; |
975 | } | | 978 | } |
976 | | | 979 | |
977 | /* Tx ring. */ | | 980 | /* Tx ring. */ |
978 | if (sc->age_cdata.age_tx_ring_map != NULL) | | 981 | if (sc->age_cdata.age_tx_ring_map != NULL) |
979 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); | | 982 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); |
980 | if (sc->age_cdata.age_tx_ring_map != NULL && | | 983 | if (sc->age_cdata.age_tx_ring_map != NULL && |
981 | sc->age_rdata.age_tx_ring != NULL) | | 984 | sc->age_rdata.age_tx_ring != NULL) |
982 | bus_dmamem_free(sc->sc_dmat, | | 985 | bus_dmamem_free(sc->sc_dmat, |
983 | &sc->age_rdata.age_tx_ring_seg, 1); | | 986 | &sc->age_rdata.age_tx_ring_seg, 1); |
984 | sc->age_rdata.age_tx_ring = NULL; | | 987 | sc->age_rdata.age_tx_ring = NULL; |
985 | sc->age_cdata.age_tx_ring_map = NULL; | | 988 | sc->age_cdata.age_tx_ring_map = NULL; |
986 | | | 989 | |
987 | /* Rx ring. */ | | 990 | /* Rx ring. */ |
988 | if (sc->age_cdata.age_rx_ring_map != NULL) | | 991 | if (sc->age_cdata.age_rx_ring_map != NULL) |
989 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); | | 992 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); |
990 | if (sc->age_cdata.age_rx_ring_map != NULL && | | 993 | if (sc->age_cdata.age_rx_ring_map != NULL && |
991 | sc->age_rdata.age_rx_ring != NULL) | | 994 | sc->age_rdata.age_rx_ring != NULL) |
992 | bus_dmamem_free(sc->sc_dmat, | | 995 | bus_dmamem_free(sc->sc_dmat, |
993 | &sc->age_rdata.age_rx_ring_seg, 1); | | 996 | &sc->age_rdata.age_rx_ring_seg, 1); |
994 | sc->age_rdata.age_rx_ring = NULL; | | 997 | sc->age_rdata.age_rx_ring = NULL; |
995 | sc->age_cdata.age_rx_ring_map = NULL; | | 998 | sc->age_cdata.age_rx_ring_map = NULL; |
996 | | | 999 | |
997 | /* Rx return ring. */ | | 1000 | /* Rx return ring. */ |
998 | if (sc->age_cdata.age_rr_ring_map != NULL) | | 1001 | if (sc->age_cdata.age_rr_ring_map != NULL) |
999 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); | | 1002 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); |
1000 | if (sc->age_cdata.age_rr_ring_map != NULL && | | 1003 | if (sc->age_cdata.age_rr_ring_map != NULL && |
1001 | sc->age_rdata.age_rr_ring != NULL) | | 1004 | sc->age_rdata.age_rr_ring != NULL) |
1002 | bus_dmamem_free(sc->sc_dmat, | | 1005 | bus_dmamem_free(sc->sc_dmat, |
1003 | &sc->age_rdata.age_rr_ring_seg, 1); | | 1006 | &sc->age_rdata.age_rr_ring_seg, 1); |
1004 | sc->age_rdata.age_rr_ring = NULL; | | 1007 | sc->age_rdata.age_rr_ring = NULL; |
1005 | sc->age_cdata.age_rr_ring_map = NULL; | | 1008 | sc->age_cdata.age_rr_ring_map = NULL; |
1006 | | | 1009 | |
1007 | /* CMB block */ | | 1010 | /* CMB block */ |
1008 | if (sc->age_cdata.age_cmb_block_map != NULL) | | 1011 | if (sc->age_cdata.age_cmb_block_map != NULL) |
1009 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); | | 1012 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); |
1010 | if (sc->age_cdata.age_cmb_block_map != NULL && | | 1013 | if (sc->age_cdata.age_cmb_block_map != NULL && |
1011 | sc->age_rdata.age_cmb_block != NULL) | | 1014 | sc->age_rdata.age_cmb_block != NULL) |
1012 | bus_dmamem_free(sc->sc_dmat, | | 1015 | bus_dmamem_free(sc->sc_dmat, |
1013 | &sc->age_rdata.age_cmb_block_seg, 1); | | 1016 | &sc->age_rdata.age_cmb_block_seg, 1); |
1014 | sc->age_rdata.age_cmb_block = NULL; | | 1017 | sc->age_rdata.age_cmb_block = NULL; |
1015 | sc->age_cdata.age_cmb_block_map = NULL; | | 1018 | sc->age_cdata.age_cmb_block_map = NULL; |
1016 | | | 1019 | |
1017 | /* SMB block */ | | 1020 | /* SMB block */ |
1018 | if (sc->age_cdata.age_smb_block_map != NULL) | | 1021 | if (sc->age_cdata.age_smb_block_map != NULL) |
1019 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); | | 1022 | bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); |
1020 | if (sc->age_cdata.age_smb_block_map != NULL && | | 1023 | if (sc->age_cdata.age_smb_block_map != NULL && |
1021 | sc->age_rdata.age_smb_block != NULL) | | 1024 | sc->age_rdata.age_smb_block != NULL) |
1022 | bus_dmamem_free(sc->sc_dmat, | | 1025 | bus_dmamem_free(sc->sc_dmat, |
1023 | &sc->age_rdata.age_smb_block_seg, 1); | | 1026 | &sc->age_rdata.age_smb_block_seg, 1); |
1024 | sc->age_rdata.age_smb_block = NULL; | | 1027 | sc->age_rdata.age_smb_block = NULL; |
1025 | sc->age_cdata.age_smb_block_map = NULL; | | 1028 | sc->age_cdata.age_smb_block_map = NULL; |
1026 | } | | 1029 | } |
1027 | | | 1030 | |
1028 | static void | | 1031 | static void |
1029 | age_start(struct ifnet *ifp) | | 1032 | age_start(struct ifnet *ifp) |
1030 | { | | 1033 | { |
1031 | struct age_softc *sc = ifp->if_softc; | | 1034 | struct age_softc *sc = ifp->if_softc; |
1032 | struct mbuf *m_head; | | 1035 | struct mbuf *m_head; |
1033 | int enq; | | 1036 | int enq; |
1034 | | | 1037 | |
1035 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) | | 1038 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
1036 | return; | | 1039 | return; |
1037 | | | 1040 | |
1038 | enq = 0; | | 1041 | enq = 0; |
1039 | for (;;) { | | 1042 | for (;;) { |
1040 | IFQ_DEQUEUE(&ifp->if_snd, m_head); | | 1043 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
1041 | if (m_head == NULL) | | 1044 | if (m_head == NULL) |
1042 | break; | | 1045 | break; |
1043 | | | 1046 | |
1044 | /* | | 1047 | /* |
1045 | * Pack the data into the transmit ring. If we | | 1048 | * Pack the data into the transmit ring. If we |
1046 | * don't have room, set the OACTIVE flag and wait | | 1049 | * don't have room, set the OACTIVE flag and wait |
1047 | * for the NIC to drain the ring. | | 1050 | * for the NIC to drain the ring. |
1048 | */ | | 1051 | */ |
1049 | if (age_encap(sc, &m_head)) { | | 1052 | if (age_encap(sc, &m_head)) { |
1050 | if (m_head == NULL) | | 1053 | if (m_head == NULL) |
1051 | break; | | 1054 | break; |
1052 | ifp->if_flags |= IFF_OACTIVE; | | 1055 | ifp->if_flags |= IFF_OACTIVE; |
1053 | break; | | 1056 | break; |
1054 | } | | 1057 | } |
1055 | enq = 1; | | 1058 | enq = 1; |
1056 | | | 1059 | |
1057 | #if NBPFILTER > 0 | | 1060 | #if NBPFILTER > 0 |
1058 | /* | | 1061 | /* |
1059 | * If there's a BPF listener, bounce a copy of this frame | | 1062 | * If there's a BPF listener, bounce a copy of this frame |
1060 | * to him. | | 1063 | * to him. |
1061 | */ | | 1064 | */ |
1062 | if (ifp->if_bpf != NULL) | | 1065 | if (ifp->if_bpf != NULL) |
1063 | bpf_mtap(ifp->if_bpf, m_head); | | 1066 | bpf_mtap(ifp->if_bpf, m_head); |
1064 | #endif | | 1067 | #endif |
1065 | } | | 1068 | } |
1066 | | | 1069 | |
1067 | if (enq) { | | 1070 | if (enq) { |
1068 | /* Update mbox. */ | | 1071 | /* Update mbox. */ |
1069 | AGE_COMMIT_MBOX(sc); | | 1072 | AGE_COMMIT_MBOX(sc); |
1070 | /* Set a timeout in case the chip goes out to lunch. */ | | 1073 | /* Set a timeout in case the chip goes out to lunch. */ |
1071 | ifp->if_timer = AGE_TX_TIMEOUT; | | 1074 | ifp->if_timer = AGE_TX_TIMEOUT; |
1072 | } | | 1075 | } |
1073 | } | | 1076 | } |
1074 | | | 1077 | |
1075 | static void | | 1078 | static void |
1076 | age_watchdog(struct ifnet *ifp) | | 1079 | age_watchdog(struct ifnet *ifp) |
1077 | { | | 1080 | { |
1078 | struct age_softc *sc = ifp->if_softc; | | 1081 | struct age_softc *sc = ifp->if_softc; |
1079 | | | 1082 | |
1080 | if ((sc->age_flags & AGE_FLAG_LINK) == 0) { | | 1083 | if ((sc->age_flags & AGE_FLAG_LINK) == 0) { |
1081 | printf("%s: watchdog timeout (missed link)\n", | | 1084 | printf("%s: watchdog timeout (missed link)\n", |
1082 | device_xname(sc->sc_dev)); | | 1085 | device_xname(sc->sc_dev)); |
1083 | ifp->if_oerrors++; | | 1086 | ifp->if_oerrors++; |
1084 | age_init(ifp); | | 1087 | age_init(ifp); |
1085 | return; | | 1088 | return; |
1086 | } | | 1089 | } |
1087 | | | 1090 | |
1088 | if (sc->age_cdata.age_tx_cnt == 0) { | | 1091 | if (sc->age_cdata.age_tx_cnt == 0) { |
1089 | printf("%s: watchdog timeout (missed Tx interrupts) " | | 1092 | printf("%s: watchdog timeout (missed Tx interrupts) " |
1090 | "-- recovering\n", device_xname(sc->sc_dev)); | | 1093 | "-- recovering\n", device_xname(sc->sc_dev)); |
1091 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) | | 1094 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) |
1092 | age_start(ifp); | | 1095 | age_start(ifp); |
1093 | return; | | 1096 | return; |
1094 | } | | 1097 | } |
1095 | | | 1098 | |
1096 | printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); | | 1099 | printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); |
1097 | ifp->if_oerrors++; | | 1100 | ifp->if_oerrors++; |
1098 | age_init(ifp); | | 1101 | age_init(ifp); |
1099 | | | 1102 | |
1100 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) | | 1103 | if (!IFQ_IS_EMPTY(&ifp->if_snd)) |
1101 | age_start(ifp); | | 1104 | age_start(ifp); |
1102 | } | | 1105 | } |
1103 | | | 1106 | |
1104 | static int | | 1107 | static int |
1105 | age_ioctl(struct ifnet *ifp, u_long cmd, void *data) | | 1108 | age_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
1106 | { | | 1109 | { |
1107 | struct age_softc *sc = ifp->if_softc; | | 1110 | struct age_softc *sc = ifp->if_softc; |
1108 | int s, error; | | 1111 | int s, error; |
1109 | | | 1112 | |
1110 | s = splnet(); | | 1113 | s = splnet(); |
1111 | | | 1114 | |
1112 | error = ether_ioctl(ifp, cmd, data); | | 1115 | error = ether_ioctl(ifp, cmd, data); |
1113 | if (error == ENETRESET) { | | 1116 | if (error == ENETRESET) { |
1114 | if (ifp->if_flags & IFF_RUNNING) | | 1117 | if (ifp->if_flags & IFF_RUNNING) |
1115 | age_rxfilter(sc); | | 1118 | age_rxfilter(sc); |
1116 | error = 0; | | 1119 | error = 0; |
1117 | } | | 1120 | } |
1118 | | | 1121 | |
1119 | splx(s); | | 1122 | splx(s); |
1120 | return error; | | 1123 | return error; |
1121 | } | | 1124 | } |
1122 | | | 1125 | |
1123 | static void | | 1126 | static void |
1124 | age_mac_config(struct age_softc *sc) | | 1127 | age_mac_config(struct age_softc *sc) |
1125 | { | | 1128 | { |
1126 | struct mii_data *mii; | | 1129 | struct mii_data *mii; |
1127 | uint32_t reg; | | 1130 | uint32_t reg; |
1128 | | | 1131 | |
1129 | mii = &sc->sc_miibus; | | 1132 | mii = &sc->sc_miibus; |
1130 | | | 1133 | |
1131 | reg = CSR_READ_4(sc, AGE_MAC_CFG); | | 1134 | reg = CSR_READ_4(sc, AGE_MAC_CFG); |
1132 | reg &= ~MAC_CFG_FULL_DUPLEX; | | 1135 | reg &= ~MAC_CFG_FULL_DUPLEX; |
1133 | reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); | | 1136 | reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); |
1134 | reg &= ~MAC_CFG_SPEED_MASK; | | 1137 | reg &= ~MAC_CFG_SPEED_MASK; |
1135 | | | 1138 | |
1136 | /* Reprogram MAC with resolved speed/duplex. */ | | 1139 | /* Reprogram MAC with resolved speed/duplex. */ |
1137 | switch (IFM_SUBTYPE(mii->mii_media_active)) { | | 1140 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
1138 | case IFM_10_T: | | 1141 | case IFM_10_T: |
1139 | case IFM_100_TX: | | 1142 | case IFM_100_TX: |
1140 | reg |= MAC_CFG_SPEED_10_100; | | 1143 | reg |= MAC_CFG_SPEED_10_100; |
1141 | break; | | 1144 | break; |
1142 | case IFM_1000_T: | | 1145 | case IFM_1000_T: |
1143 | reg |= MAC_CFG_SPEED_1000; | | 1146 | reg |= MAC_CFG_SPEED_1000; |
1144 | break; | | 1147 | break; |
1145 | } | | 1148 | } |
1146 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { | | 1149 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { |
1147 | reg |= MAC_CFG_FULL_DUPLEX; | | 1150 | reg |= MAC_CFG_FULL_DUPLEX; |
1148 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) | | 1151 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) |
1149 | reg |= MAC_CFG_TX_FC; | | 1152 | reg |= MAC_CFG_TX_FC; |
1150 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) | | 1153 | if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) |
1151 | reg |= MAC_CFG_RX_FC; | | 1154 | reg |= MAC_CFG_RX_FC; |
1152 | } | | 1155 | } |
1153 | | | 1156 | |
1154 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg); | | 1157 | CSR_WRITE_4(sc, AGE_MAC_CFG, reg); |
1155 | } | | 1158 | } |
1156 | | | 1159 | |
1157 | static bool | | 1160 | static bool |
1158 | age_resume(device_t dv PMF_FN_ARGS) | | 1161 | age_resume(device_t dv PMF_FN_ARGS) |
1159 | { | | 1162 | { |
1160 | struct age_softc *sc = device_private(dv); | | 1163 | struct age_softc *sc = device_private(dv); |
1161 | uint16_t cmd; | | 1164 | uint16_t cmd; |
1162 | | | 1165 | |
1163 | /* | | 1166 | /* |
1164 | * Clear INTx emulation disable for hardware that | | 1167 | * Clear INTx emulation disable for hardware that |
1165 | * is set in resume event. From Linux. | | 1168 | * is set in resume event. From Linux. |
1166 | */ | | 1169 | */ |
1167 | cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); | | 1170 | cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); |
1168 | if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) { | | 1171 | if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) { |
1169 | cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE; | | 1172 | cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE; |
1170 | pci_conf_write(sc->sc_pct, sc->sc_pcitag, | | 1173 | pci_conf_write(sc->sc_pct, sc->sc_pcitag, |
1171 | PCI_COMMAND_STATUS_REG, cmd); | | 1174 | PCI_COMMAND_STATUS_REG, cmd); |
1172 | } | | 1175 | } |
1173 | | | 1176 | |
1174 | return true; | | 1177 | return true; |
1175 | } | | 1178 | } |
1176 | | | 1179 | |
1177 | static int | | 1180 | static int |
1178 | age_encap(struct age_softc *sc, struct mbuf **m_head) | | 1181 | age_encap(struct age_softc *sc, struct mbuf **m_head) |
1179 | { | | 1182 | { |
1180 | struct age_txdesc *txd, *txd_last; | | 1183 | struct age_txdesc *txd, *txd_last; |
1181 | struct tx_desc *desc; | | 1184 | struct tx_desc *desc; |
1182 | struct mbuf *m; | | 1185 | struct mbuf *m; |
1183 | bus_dmamap_t map; | | 1186 | bus_dmamap_t map; |
1184 | uint32_t cflags, poff, vtag; | | 1187 | uint32_t cflags, poff, vtag; |
1185 | int error, i, nsegs, prod; | | 1188 | int error, i, nsegs, prod; |
1186 | #if NVLAN > 0 | | 1189 | #if NVLAN > 0 |
1187 | struct m_tag *mtag; | | 1190 | struct m_tag *mtag; |
1188 | #endif | | 1191 | #endif |
1189 | | | 1192 | |
1190 | m = *m_head; | | 1193 | m = *m_head; |
1191 | cflags = vtag = 0; | | 1194 | cflags = vtag = 0; |
1192 | poff = 0; | | 1195 | poff = 0; |
1193 | | | 1196 | |
1194 | prod = sc->age_cdata.age_tx_prod; | | 1197 | prod = sc->age_cdata.age_tx_prod; |
1195 | txd = &sc->age_cdata.age_txdesc[prod]; | | 1198 | txd = &sc->age_cdata.age_txdesc[prod]; |
1196 | txd_last = txd; | | 1199 | txd_last = txd; |
1197 | map = txd->tx_dmamap; | | 1200 | map = txd->tx_dmamap; |
1198 | | | 1201 | |
1199 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); | | 1202 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); |
1200 | | | 1203 | |
1201 | if (error == EFBIG) { | | 1204 | if (error == EFBIG) { |
1202 | error = 0; | | 1205 | error = 0; |
1203 | | | 1206 | |
1204 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 1207 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1205 | if (m == NULL) { | | 1208 | if (m == NULL) { |
1206 | printf("%s: can't defrag TX mbuf\n", | | 1209 | printf("%s: can't defrag TX mbuf\n", |
1207 | device_xname(sc->sc_dev)); | | 1210 | device_xname(sc->sc_dev)); |
1208 | m_freem(*m_head); | | 1211 | m_freem(*m_head); |
1209 | *m_head = NULL; | | 1212 | *m_head = NULL; |
1210 | return ENOBUFS; | | 1213 | return ENOBUFS; |
1211 | } | | 1214 | } |
1212 | | | 1215 | |
1213 | M_COPY_PKTHDR(m, *m_head); | | 1216 | M_COPY_PKTHDR(m, *m_head); |
1214 | if ((*m_head)->m_pkthdr.len > MHLEN) { | | 1217 | if ((*m_head)->m_pkthdr.len > MHLEN) { |
1215 | MCLGET(m, M_DONTWAIT); | | 1218 | MCLGET(m, M_DONTWAIT); |
1216 | if (!(m->m_flags & M_EXT)) { | | 1219 | if (!(m->m_flags & M_EXT)) { |
1217 | m_freem(*m_head); | | 1220 | m_freem(*m_head); |
1218 | m_freem(m); | | 1221 | m_freem(m); |
1219 | *m_head = NULL; | | 1222 | *m_head = NULL; |
1220 | return ENOBUFS; | | 1223 | return ENOBUFS; |
1221 | } | | 1224 | } |
1222 | } | | 1225 | } |
1223 | m_copydata(*m_head, 0, (*m_head)->m_pkthdr.len, | | 1226 | m_copydata(*m_head, 0, (*m_head)->m_pkthdr.len, |
1224 | mtod(m, void *)); | | 1227 | mtod(m, void *)); |
1225 | m_freem(*m_head); | | 1228 | m_freem(*m_head); |
1226 | m->m_len = m->m_pkthdr.len; | | 1229 | m->m_len = m->m_pkthdr.len; |
1227 | *m_head = m; | | 1230 | *m_head = m; |
1228 | | | 1231 | |
1229 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, | | 1232 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, |
1230 | BUS_DMA_NOWAIT); | | 1233 | BUS_DMA_NOWAIT); |
1231 | | | 1234 | |
1232 | if (error != 0) { | | 1235 | if (error != 0) { |
1233 | printf("%s: could not load defragged TX mbuf\n", | | 1236 | printf("%s: could not load defragged TX mbuf\n", |
1234 | device_xname(sc->sc_dev)); | | 1237 | device_xname(sc->sc_dev)); |
1235 | if (!error) { | | 1238 | if (!error) { |
1236 | bus_dmamap_unload(sc->sc_dmat, map); | | 1239 | bus_dmamap_unload(sc->sc_dmat, map); |
1237 | error = EFBIG; | | 1240 | error = EFBIG; |
1238 | } | | 1241 | } |
1239 | m_freem(*m_head); | | 1242 | m_freem(*m_head); |
1240 | *m_head = NULL; | | 1243 | *m_head = NULL; |
1241 | return error; | | 1244 | return error; |
1242 | } | | 1245 | } |
1243 | } else if (error) { | | 1246 | } else if (error) { |
1244 | printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev)); | | 1247 | printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev)); |
1245 | return error; | | 1248 | return error; |
1246 | } | | 1249 | } |
1247 | | | 1250 | |
1248 | nsegs = map->dm_nsegs; | | 1251 | nsegs = map->dm_nsegs; |
1249 | | | 1252 | |
1250 | if (nsegs == 0) { | | 1253 | if (nsegs == 0) { |
1251 | m_freem(*m_head); | | 1254 | m_freem(*m_head); |
1252 | *m_head = NULL; | | 1255 | *m_head = NULL; |
1253 | return EIO; | | 1256 | return EIO; |
1254 | } | | 1257 | } |
1255 | | | 1258 | |
1256 | /* Check descriptor overrun. */ | | 1259 | /* Check descriptor overrun. */ |
1257 | if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { | | 1260 | if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { |
1258 | bus_dmamap_unload(sc->sc_dmat, map); | | 1261 | bus_dmamap_unload(sc->sc_dmat, map); |
1259 | return ENOBUFS; | | 1262 | return ENOBUFS; |