| @@ -1,1325 +1,1328 @@ | | | @@ -1,1325 +1,1328 @@ |
1 | /* $NetBSD: if_nfe.c,v 1.56 2012/07/22 14:33:03 matt Exp $ */ | | 1 | /* $NetBSD: if_nfe.c,v 1.57 2012/09/23 01:12:01 chs Exp $ */ |
2 | /* $OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $ */ | | 2 | /* $OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $ */ |
3 | | | 3 | |
4 | /*- | | 4 | /*- |
5 | * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> | | 5 | * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> |
6 | * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> | | 6 | * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> |
7 | * | | 7 | * |
8 | * Permission to use, copy, modify, and distribute this software for any | | 8 | * Permission to use, copy, modify, and distribute this software for any |
9 | * purpose with or without fee is hereby granted, provided that the above | | 9 | * purpose with or without fee is hereby granted, provided that the above |
10 | * copyright notice and this permission notice appear in all copies. | | 10 | * copyright notice and this permission notice appear in all copies. |
11 | * | | 11 | * |
12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | | 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
13 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | | 13 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
14 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | | 14 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
15 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | | 15 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
16 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | | 16 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
17 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | | 17 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
18 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | | 18 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
19 | */ | | 19 | */ |
20 | | | 20 | |
21 | /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ | | 21 | /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ |
22 | | | 22 | |
23 | #include <sys/cdefs.h> | | 23 | #include <sys/cdefs.h> |
24 | __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.56 2012/07/22 14:33:03 matt Exp $"); | | 24 | __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.57 2012/09/23 01:12:01 chs Exp $"); |
25 | | | 25 | |
26 | #include "opt_inet.h" | | 26 | #include "opt_inet.h" |
27 | #include "vlan.h" | | 27 | #include "vlan.h" |
28 | | | 28 | |
29 | #include <sys/param.h> | | 29 | #include <sys/param.h> |
30 | #include <sys/endian.h> | | 30 | #include <sys/endian.h> |
31 | #include <sys/systm.h> | | 31 | #include <sys/systm.h> |
32 | #include <sys/types.h> | | 32 | #include <sys/types.h> |
33 | #include <sys/sockio.h> | | 33 | #include <sys/sockio.h> |
34 | #include <sys/mbuf.h> | | 34 | #include <sys/mbuf.h> |
35 | #include <sys/mutex.h> | | 35 | #include <sys/mutex.h> |
36 | #include <sys/queue.h> | | 36 | #include <sys/queue.h> |
37 | #include <sys/kernel.h> | | 37 | #include <sys/kernel.h> |
38 | #include <sys/device.h> | | 38 | #include <sys/device.h> |
39 | #include <sys/callout.h> | | 39 | #include <sys/callout.h> |
40 | #include <sys/socket.h> | | 40 | #include <sys/socket.h> |
41 | | | 41 | |
42 | #include <sys/bus.h> | | 42 | #include <sys/bus.h> |
43 | | | 43 | |
44 | #include <net/if.h> | | 44 | #include <net/if.h> |
45 | #include <net/if_dl.h> | | 45 | #include <net/if_dl.h> |
46 | #include <net/if_media.h> | | 46 | #include <net/if_media.h> |
47 | #include <net/if_ether.h> | | 47 | #include <net/if_ether.h> |
48 | #include <net/if_arp.h> | | 48 | #include <net/if_arp.h> |
49 | | | 49 | |
50 | #ifdef INET | | 50 | #ifdef INET |
51 | #include <netinet/in.h> | | 51 | #include <netinet/in.h> |
52 | #include <netinet/in_systm.h> | | 52 | #include <netinet/in_systm.h> |
53 | #include <netinet/in_var.h> | | 53 | #include <netinet/in_var.h> |
54 | #include <netinet/ip.h> | | 54 | #include <netinet/ip.h> |
55 | #include <netinet/if_inarp.h> | | 55 | #include <netinet/if_inarp.h> |
56 | #endif | | 56 | #endif |
57 | | | 57 | |
58 | #if NVLAN > 0 | | 58 | #if NVLAN > 0 |
59 | #include <net/if_types.h> | | 59 | #include <net/if_types.h> |
60 | #endif | | 60 | #endif |
61 | | | 61 | |
62 | #include <net/bpf.h> | | 62 | #include <net/bpf.h> |
63 | | | 63 | |
64 | #include <dev/mii/mii.h> | | 64 | #include <dev/mii/mii.h> |
65 | #include <dev/mii/miivar.h> | | 65 | #include <dev/mii/miivar.h> |
66 | | | 66 | |
67 | #include <dev/pci/pcireg.h> | | 67 | #include <dev/pci/pcireg.h> |
68 | #include <dev/pci/pcivar.h> | | 68 | #include <dev/pci/pcivar.h> |
69 | #include <dev/pci/pcidevs.h> | | 69 | #include <dev/pci/pcidevs.h> |
70 | | | 70 | |
71 | #include <dev/pci/if_nfereg.h> | | 71 | #include <dev/pci/if_nfereg.h> |
72 | #include <dev/pci/if_nfevar.h> | | 72 | #include <dev/pci/if_nfevar.h> |
73 | | | 73 | |
74 | static int nfe_ifflags_cb(struct ethercom *); | | 74 | static int nfe_ifflags_cb(struct ethercom *); |
75 | | | 75 | |
76 | int nfe_match(device_t, cfdata_t, void *); | | 76 | int nfe_match(device_t, cfdata_t, void *); |
77 | void nfe_attach(device_t, device_t, void *); | | 77 | void nfe_attach(device_t, device_t, void *); |
78 | int nfe_detach(device_t, int); | | 78 | int nfe_detach(device_t, int); |
79 | void nfe_power(int, void *); | | 79 | void nfe_power(int, void *); |
80 | void nfe_miibus_statchg(struct ifnet *); | | 80 | void nfe_miibus_statchg(struct ifnet *); |
81 | int nfe_miibus_readreg(device_t, int, int); | | 81 | int nfe_miibus_readreg(device_t, int, int); |
82 | void nfe_miibus_writereg(device_t, int, int, int); | | 82 | void nfe_miibus_writereg(device_t, int, int, int); |
83 | int nfe_intr(void *); | | 83 | int nfe_intr(void *); |
84 | int nfe_ioctl(struct ifnet *, u_long, void *); | | 84 | int nfe_ioctl(struct ifnet *, u_long, void *); |
85 | void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); | | 85 | void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); |
86 | void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); | | 86 | void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); |
87 | void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); | | 87 | void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); |
88 | void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); | | 88 | void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); |
89 | void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); | | 89 | void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); |
90 | void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); | | 90 | void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); |
91 | void nfe_rxeof(struct nfe_softc *); | | 91 | void nfe_rxeof(struct nfe_softc *); |
92 | void nfe_txeof(struct nfe_softc *); | | 92 | void nfe_txeof(struct nfe_softc *); |
93 | int nfe_encap(struct nfe_softc *, struct mbuf *); | | 93 | int nfe_encap(struct nfe_softc *, struct mbuf *); |
94 | void nfe_start(struct ifnet *); | | 94 | void nfe_start(struct ifnet *); |
95 | void nfe_watchdog(struct ifnet *); | | 95 | void nfe_watchdog(struct ifnet *); |
96 | int nfe_init(struct ifnet *); | | 96 | int nfe_init(struct ifnet *); |
97 | void nfe_stop(struct ifnet *, int); | | 97 | void nfe_stop(struct ifnet *, int); |
98 | struct nfe_jbuf *nfe_jalloc(struct nfe_softc *, int); | | 98 | struct nfe_jbuf *nfe_jalloc(struct nfe_softc *, int); |
99 | void nfe_jfree(struct mbuf *, void *, size_t, void *); | | 99 | void nfe_jfree(struct mbuf *, void *, size_t, void *); |
100 | int nfe_jpool_alloc(struct nfe_softc *); | | 100 | int nfe_jpool_alloc(struct nfe_softc *); |
101 | void nfe_jpool_free(struct nfe_softc *); | | 101 | void nfe_jpool_free(struct nfe_softc *); |
102 | int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); | | 102 | int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); |
103 | void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); | | 103 | void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); |
104 | void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); | | 104 | void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); |
105 | int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); | | 105 | int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); |
106 | void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); | | 106 | void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); |
107 | void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); | | 107 | void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); |
108 | void nfe_setmulti(struct nfe_softc *); | | 108 | void nfe_setmulti(struct nfe_softc *); |
109 | void nfe_get_macaddr(struct nfe_softc *, uint8_t *); | | 109 | void nfe_get_macaddr(struct nfe_softc *, uint8_t *); |
110 | void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); | | 110 | void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); |
111 | void nfe_tick(void *); | | 111 | void nfe_tick(void *); |
112 | void nfe_poweron(device_t); | | 112 | void nfe_poweron(device_t); |
113 | bool nfe_resume(device_t, const pmf_qual_t *); | | 113 | bool nfe_resume(device_t, const pmf_qual_t *); |
114 | | | 114 | |
115 | CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc), | | 115 | CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc), |
116 | nfe_match, nfe_attach, nfe_detach, NULL); | | 116 | nfe_match, nfe_attach, nfe_detach, NULL); |
117 | | | 117 | |
118 | /* #define NFE_NO_JUMBO */ | | 118 | /* #define NFE_NO_JUMBO */ |
119 | | | 119 | |
120 | #ifdef NFE_DEBUG | | 120 | #ifdef NFE_DEBUG |
121 | int nfedebug = 0; | | 121 | int nfedebug = 0; |
122 | #define DPRINTF(x) do { if (nfedebug) printf x; } while (0) | | 122 | #define DPRINTF(x) do { if (nfedebug) printf x; } while (0) |
123 | #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) | | 123 | #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) |
124 | #else | | 124 | #else |
125 | #define DPRINTF(x) | | 125 | #define DPRINTF(x) |
126 | #define DPRINTFN(n,x) | | 126 | #define DPRINTFN(n,x) |
127 | #endif | | 127 | #endif |
128 | | | 128 | |
129 | /* deal with naming differences */ | | 129 | /* deal with naming differences */ |
130 | | | 130 | |
131 | #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \ | | 131 | #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \ |
132 | PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1 | | 132 | PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1 |
133 | #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \ | | 133 | #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \ |
134 | PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2 | | 134 | PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2 |
135 | #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \ | | 135 | #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \ |
136 | PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN | | 136 | PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN |
137 | | | 137 | |
138 | #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \ | | 138 | #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \ |
139 | PCI_PRODUCT_NVIDIA_NFORCE4_LAN1 | | 139 | PCI_PRODUCT_NVIDIA_NFORCE4_LAN1 |
140 | #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \ | | 140 | #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \ |
141 | PCI_PRODUCT_NVIDIA_NFORCE4_LAN2 | | 141 | PCI_PRODUCT_NVIDIA_NFORCE4_LAN2 |
142 | | | 142 | |
143 | #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \ | | 143 | #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \ |
144 | PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 | | 144 | PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 |
145 | #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \ | | 145 | #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \ |
146 | PCI_PRODUCT_NVIDIA_NFORCE430_LAN2 | | 146 | PCI_PRODUCT_NVIDIA_NFORCE430_LAN2 |
147 | | | 147 | |
148 | #ifdef _LP64 | | 148 | #ifdef _LP64 |
149 | #define __LP64__ 1 | | 149 | #define __LP64__ 1 |
150 | #endif | | 150 | #endif |
151 | | | 151 | |
152 | const struct nfe_product { | | 152 | const struct nfe_product { |
153 | pci_vendor_id_t vendor; | | 153 | pci_vendor_id_t vendor; |
154 | pci_product_id_t product; | | 154 | pci_product_id_t product; |
155 | } nfe_devices[] = { | | 155 | } nfe_devices[] = { |
156 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, | | 156 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, |
157 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, | | 157 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, |
158 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, | | 158 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, |
159 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, | | 159 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, |
160 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, | | 160 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, |
161 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, | | 161 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, |
162 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, | | 162 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, |
163 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, | | 163 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, |
164 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, | | 164 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, |
165 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, | | 165 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, |
166 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, | | 166 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, |
167 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, | | 167 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, |
168 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, | | 168 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, |
169 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, | | 169 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, |
170 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, | | 170 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, |
171 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, | | 171 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, |
172 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, | | 172 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, |
173 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, | | 173 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, |
174 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, | | 174 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, |
175 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, | | 175 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, |
176 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, | | 176 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, |
177 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, | | 177 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, |
178 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, | | 178 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, |
179 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, | | 179 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, |
180 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, | | 180 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, |
181 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, | | 181 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, |
182 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, | | 182 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, |
183 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, | | 183 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, |
184 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, | | 184 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, |
185 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, | | 185 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, |
186 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, | | 186 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, |
187 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, | | 187 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, |
188 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, | | 188 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, |
189 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, | | 189 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, |
190 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, | | 190 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, |
191 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, | | 191 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, |
192 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, | | 192 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, |
193 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, | | 193 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, |
194 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 } | | 194 | { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 } |
195 | }; | | 195 | }; |
196 | | | 196 | |
197 | int | | 197 | int |
198 | nfe_match(device_t dev, cfdata_t match, void *aux) | | 198 | nfe_match(device_t dev, cfdata_t match, void *aux) |
199 | { | | 199 | { |
200 | struct pci_attach_args *pa = aux; | | 200 | struct pci_attach_args *pa = aux; |
201 | const struct nfe_product *np; | | 201 | const struct nfe_product *np; |
202 | int i; | | 202 | int i; |
203 | | | 203 | |
204 | for (i = 0; i < __arraycount(nfe_devices); i++) { | | 204 | for (i = 0; i < __arraycount(nfe_devices); i++) { |
205 | np = &nfe_devices[i]; | | 205 | np = &nfe_devices[i]; |
206 | if (PCI_VENDOR(pa->pa_id) == np->vendor && | | 206 | if (PCI_VENDOR(pa->pa_id) == np->vendor && |
207 | PCI_PRODUCT(pa->pa_id) == np->product) | | 207 | PCI_PRODUCT(pa->pa_id) == np->product) |
208 | return 1; | | 208 | return 1; |
209 | } | | 209 | } |
210 | return 0; | | 210 | return 0; |
211 | } | | 211 | } |
212 | | | 212 | |
213 | void | | 213 | void |
214 | nfe_attach(device_t parent, device_t self, void *aux) | | 214 | nfe_attach(device_t parent, device_t self, void *aux) |
215 | { | | 215 | { |
216 | struct nfe_softc *sc = device_private(self); | | 216 | struct nfe_softc *sc = device_private(self); |
217 | struct pci_attach_args *pa = aux; | | 217 | struct pci_attach_args *pa = aux; |
218 | pci_chipset_tag_t pc = pa->pa_pc; | | 218 | pci_chipset_tag_t pc = pa->pa_pc; |
219 | pci_intr_handle_t ih; | | 219 | pci_intr_handle_t ih; |
220 | const char *intrstr; | | 220 | const char *intrstr; |
221 | struct ifnet *ifp; | | 221 | struct ifnet *ifp; |
222 | pcireg_t memtype, csr; | | 222 | pcireg_t memtype, csr; |
223 | int mii_flags = 0; | | 223 | int mii_flags = 0; |
224 | | | 224 | |
225 | sc->sc_dev = self; | | 225 | sc->sc_dev = self; |
226 | sc->sc_pc = pa->pa_pc; | | 226 | sc->sc_pc = pa->pa_pc; |
227 | pci_aprint_devinfo(pa, NULL); | | 227 | pci_aprint_devinfo(pa, NULL); |
228 | | | 228 | |
229 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); | | 229 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); |
230 | switch (memtype) { | | 230 | switch (memtype) { |
231 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: | | 231 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
232 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: | | 232 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
233 | if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, | | 233 | if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, |
234 | &sc->sc_memh, NULL, &sc->sc_mems) == 0) | | 234 | &sc->sc_memh, NULL, &sc->sc_mems) == 0) |
235 | break; | | 235 | break; |
236 | /* FALLTHROUGH */ | | 236 | /* FALLTHROUGH */ |
237 | default: | | 237 | default: |
238 | aprint_error_dev(self, "could not map mem space\n"); | | 238 | aprint_error_dev(self, "could not map mem space\n"); |
239 | return; | | 239 | return; |
240 | } | | 240 | } |
241 | | | 241 | |
242 | if (pci_intr_map(pa, &ih) != 0) { | | 242 | if (pci_intr_map(pa, &ih) != 0) { |
243 | aprint_error_dev(self, "could not map interrupt\n"); | | 243 | aprint_error_dev(self, "could not map interrupt\n"); |
244 | goto fail; | | 244 | goto fail; |
245 | } | | 245 | } |
246 | | | 246 | |
247 | intrstr = pci_intr_string(pc, ih); | | 247 | intrstr = pci_intr_string(pc, ih); |
248 | sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc); | | 248 | sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc); |
249 | if (sc->sc_ih == NULL) { | | 249 | if (sc->sc_ih == NULL) { |
250 | aprint_error_dev(self, "could not establish interrupt"); | | 250 | aprint_error_dev(self, "could not establish interrupt"); |
251 | if (intrstr != NULL) | | 251 | if (intrstr != NULL) |
252 | aprint_error(" at %s", intrstr); | | 252 | aprint_error(" at %s", intrstr); |
253 | aprint_error("\n"); | | 253 | aprint_error("\n"); |
254 | goto fail; | | 254 | goto fail; |
255 | } | | 255 | } |
256 | aprint_normal_dev(self, "interrupting at %s\n", intrstr); | | 256 | aprint_normal_dev(self, "interrupting at %s\n", intrstr); |
257 | | | 257 | |
258 | sc->sc_dmat = pa->pa_dmat; | | | |
259 | | | | |
260 | csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); | | 258 | csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
261 | csr |= PCI_COMMAND_MASTER_ENABLE; | | 259 | csr |= PCI_COMMAND_MASTER_ENABLE; |
262 | pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr); | | 260 | pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr); |
263 | | | 261 | |
264 | sc->sc_flags = 0; | | 262 | sc->sc_flags = 0; |
265 | | | 263 | |
266 | switch (PCI_PRODUCT(pa->pa_id)) { | | 264 | switch (PCI_PRODUCT(pa->pa_id)) { |
267 | case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: | | 265 | case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: |
268 | case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: | | 266 | case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: |
269 | case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: | | 267 | case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: |
270 | case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: | | 268 | case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: |
271 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; | | 269 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; |
272 | break; | | 270 | break; |
273 | case PCI_PRODUCT_NVIDIA_MCP51_LAN1: | | 271 | case PCI_PRODUCT_NVIDIA_MCP51_LAN1: |
274 | case PCI_PRODUCT_NVIDIA_MCP51_LAN2: | | 272 | case PCI_PRODUCT_NVIDIA_MCP51_LAN2: |
275 | sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; | | 273 | sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; |
276 | break; | | 274 | break; |
277 | case PCI_PRODUCT_NVIDIA_MCP61_LAN1: | | 275 | case PCI_PRODUCT_NVIDIA_MCP61_LAN1: |
278 | case PCI_PRODUCT_NVIDIA_MCP61_LAN2: | | 276 | case PCI_PRODUCT_NVIDIA_MCP61_LAN2: |
279 | case PCI_PRODUCT_NVIDIA_MCP61_LAN3: | | 277 | case PCI_PRODUCT_NVIDIA_MCP61_LAN3: |
280 | case PCI_PRODUCT_NVIDIA_MCP61_LAN4: | | 278 | case PCI_PRODUCT_NVIDIA_MCP61_LAN4: |
281 | case PCI_PRODUCT_NVIDIA_MCP67_LAN1: | | 279 | case PCI_PRODUCT_NVIDIA_MCP67_LAN1: |
282 | case PCI_PRODUCT_NVIDIA_MCP67_LAN2: | | 280 | case PCI_PRODUCT_NVIDIA_MCP67_LAN2: |
283 | case PCI_PRODUCT_NVIDIA_MCP67_LAN3: | | 281 | case PCI_PRODUCT_NVIDIA_MCP67_LAN3: |
284 | case PCI_PRODUCT_NVIDIA_MCP67_LAN4: | | 282 | case PCI_PRODUCT_NVIDIA_MCP67_LAN4: |
285 | case PCI_PRODUCT_NVIDIA_MCP73_LAN1: | | 283 | case PCI_PRODUCT_NVIDIA_MCP73_LAN1: |
286 | case PCI_PRODUCT_NVIDIA_MCP73_LAN2: | | 284 | case PCI_PRODUCT_NVIDIA_MCP73_LAN2: |
287 | case PCI_PRODUCT_NVIDIA_MCP73_LAN3: | | 285 | case PCI_PRODUCT_NVIDIA_MCP73_LAN3: |
288 | case PCI_PRODUCT_NVIDIA_MCP73_LAN4: | | 286 | case PCI_PRODUCT_NVIDIA_MCP73_LAN4: |
289 | sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | | | 287 | sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | |
290 | NFE_PWR_MGMT; | | 288 | NFE_PWR_MGMT; |
291 | break; | | 289 | break; |
292 | case PCI_PRODUCT_NVIDIA_MCP77_LAN1: | | 290 | case PCI_PRODUCT_NVIDIA_MCP77_LAN1: |
293 | case PCI_PRODUCT_NVIDIA_MCP77_LAN2: | | 291 | case PCI_PRODUCT_NVIDIA_MCP77_LAN2: |
294 | case PCI_PRODUCT_NVIDIA_MCP77_LAN3: | | 292 | case PCI_PRODUCT_NVIDIA_MCP77_LAN3: |
295 | case PCI_PRODUCT_NVIDIA_MCP77_LAN4: | | 293 | case PCI_PRODUCT_NVIDIA_MCP77_LAN4: |
296 | sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | | | 294 | sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | |
297 | NFE_CORRECT_MACADDR | NFE_PWR_MGMT; | | 295 | NFE_CORRECT_MACADDR | NFE_PWR_MGMT; |
298 | break; | | 296 | break; |
299 | case PCI_PRODUCT_NVIDIA_MCP79_LAN1: | | 297 | case PCI_PRODUCT_NVIDIA_MCP79_LAN1: |
300 | case PCI_PRODUCT_NVIDIA_MCP79_LAN2: | | 298 | case PCI_PRODUCT_NVIDIA_MCP79_LAN2: |
301 | case PCI_PRODUCT_NVIDIA_MCP79_LAN3: | | 299 | case PCI_PRODUCT_NVIDIA_MCP79_LAN3: |
302 | case PCI_PRODUCT_NVIDIA_MCP79_LAN4: | | 300 | case PCI_PRODUCT_NVIDIA_MCP79_LAN4: |
303 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | | | 301 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | |
304 | NFE_CORRECT_MACADDR | NFE_PWR_MGMT; | | 302 | NFE_CORRECT_MACADDR | NFE_PWR_MGMT; |
305 | break; | | 303 | break; |
306 | case PCI_PRODUCT_NVIDIA_CK804_LAN1: | | 304 | case PCI_PRODUCT_NVIDIA_CK804_LAN1: |
307 | case PCI_PRODUCT_NVIDIA_CK804_LAN2: | | 305 | case PCI_PRODUCT_NVIDIA_CK804_LAN2: |
308 | case PCI_PRODUCT_NVIDIA_MCP04_LAN1: | | 306 | case PCI_PRODUCT_NVIDIA_MCP04_LAN1: |
309 | case PCI_PRODUCT_NVIDIA_MCP04_LAN2: | | 307 | case PCI_PRODUCT_NVIDIA_MCP04_LAN2: |
310 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; | | 308 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; |
311 | break; | | 309 | break; |
312 | case PCI_PRODUCT_NVIDIA_MCP65_LAN1: | | 310 | case PCI_PRODUCT_NVIDIA_MCP65_LAN1: |
313 | case PCI_PRODUCT_NVIDIA_MCP65_LAN2: | | 311 | case PCI_PRODUCT_NVIDIA_MCP65_LAN2: |
314 | case PCI_PRODUCT_NVIDIA_MCP65_LAN3: | | 312 | case PCI_PRODUCT_NVIDIA_MCP65_LAN3: |
315 | case PCI_PRODUCT_NVIDIA_MCP65_LAN4: | | 313 | case PCI_PRODUCT_NVIDIA_MCP65_LAN4: |
316 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | | | 314 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | |
317 | NFE_CORRECT_MACADDR | NFE_PWR_MGMT; | | 315 | NFE_CORRECT_MACADDR | NFE_PWR_MGMT; |
318 | mii_flags = MIIF_DOPAUSE; | | 316 | mii_flags = MIIF_DOPAUSE; |
319 | break; | | 317 | break; |
320 | case PCI_PRODUCT_NVIDIA_MCP55_LAN1: | | 318 | case PCI_PRODUCT_NVIDIA_MCP55_LAN1: |
321 | case PCI_PRODUCT_NVIDIA_MCP55_LAN2: | | 319 | case PCI_PRODUCT_NVIDIA_MCP55_LAN2: |
322 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | | | 320 | sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | |
323 | NFE_HW_VLAN | NFE_PWR_MGMT; | | 321 | NFE_HW_VLAN | NFE_PWR_MGMT; |
324 | break; | | 322 | break; |
325 | } | | 323 | } |
326 | | | 324 | |
| | | 325 | if (pci_dma64_available(pa) && (sc->sc_flags & NFE_40BIT_ADDR) != 0) |
| | | 326 | sc->sc_dmat = pa->pa_dmat64; |
| | | 327 | else |
| | | 328 | sc->sc_dmat = pa->pa_dmat; |
| | | 329 | |
327 | nfe_poweron(self); | | 330 | nfe_poweron(self); |
328 | | | 331 | |
329 | #ifndef NFE_NO_JUMBO | | 332 | #ifndef NFE_NO_JUMBO |
330 | /* enable jumbo frames for adapters that support it */ | | 333 | /* enable jumbo frames for adapters that support it */ |
331 | if (sc->sc_flags & NFE_JUMBO_SUP) | | 334 | if (sc->sc_flags & NFE_JUMBO_SUP) |
332 | sc->sc_flags |= NFE_USE_JUMBO; | | 335 | sc->sc_flags |= NFE_USE_JUMBO; |
333 | #endif | | 336 | #endif |
334 | | | 337 | |
335 | /* Check for reversed ethernet address */ | | 338 | /* Check for reversed ethernet address */ |
336 | if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) | | 339 | if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) |
337 | sc->sc_flags |= NFE_CORRECT_MACADDR; | | 340 | sc->sc_flags |= NFE_CORRECT_MACADDR; |
338 | | | 341 | |
339 | nfe_get_macaddr(sc, sc->sc_enaddr); | | 342 | nfe_get_macaddr(sc, sc->sc_enaddr); |
340 | aprint_normal_dev(self, "Ethernet address %s\n", | | 343 | aprint_normal_dev(self, "Ethernet address %s\n", |
341 | ether_sprintf(sc->sc_enaddr)); | | 344 | ether_sprintf(sc->sc_enaddr)); |
342 | | | 345 | |
343 | /* | | 346 | /* |
344 | * Allocate Tx and Rx rings. | | 347 | * Allocate Tx and Rx rings. |
345 | */ | | 348 | */ |
346 | if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { | | 349 | if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { |
347 | aprint_error_dev(self, "could not allocate Tx ring\n"); | | 350 | aprint_error_dev(self, "could not allocate Tx ring\n"); |
348 | goto fail; | | 351 | goto fail; |
349 | } | | 352 | } |
350 | | | 353 | |
351 | mutex_init(&sc->rxq.mtx, MUTEX_DEFAULT, IPL_NET); | | 354 | mutex_init(&sc->rxq.mtx, MUTEX_DEFAULT, IPL_NET); |
352 | | | 355 | |
353 | if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { | | 356 | if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { |
354 | aprint_error_dev(self, "could not allocate Rx ring\n"); | | 357 | aprint_error_dev(self, "could not allocate Rx ring\n"); |
355 | nfe_free_tx_ring(sc, &sc->txq); | | 358 | nfe_free_tx_ring(sc, &sc->txq); |
356 | goto fail; | | 359 | goto fail; |
357 | } | | 360 | } |
358 | | | 361 | |
359 | ifp = &sc->sc_ethercom.ec_if; | | 362 | ifp = &sc->sc_ethercom.ec_if; |
360 | ifp->if_softc = sc; | | 363 | ifp->if_softc = sc; |
361 | ifp->if_mtu = ETHERMTU; | | 364 | ifp->if_mtu = ETHERMTU; |
362 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 365 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
363 | ifp->if_ioctl = nfe_ioctl; | | 366 | ifp->if_ioctl = nfe_ioctl; |
364 | ifp->if_start = nfe_start; | | 367 | ifp->if_start = nfe_start; |
365 | ifp->if_stop = nfe_stop; | | 368 | ifp->if_stop = nfe_stop; |
366 | ifp->if_watchdog = nfe_watchdog; | | 369 | ifp->if_watchdog = nfe_watchdog; |
367 | ifp->if_init = nfe_init; | | 370 | ifp->if_init = nfe_init; |
368 | ifp->if_baudrate = IF_Gbps(1); | | 371 | ifp->if_baudrate = IF_Gbps(1); |
369 | IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); | | 372 | IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); |
370 | IFQ_SET_READY(&ifp->if_snd); | | 373 | IFQ_SET_READY(&ifp->if_snd); |
371 | strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); | | 374 | strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); |
372 | | | 375 | |
373 | if (sc->sc_flags & NFE_USE_JUMBO) | | 376 | if (sc->sc_flags & NFE_USE_JUMBO) |
374 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; | | 377 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; |
375 | | | 378 | |
376 | #if NVLAN > 0 | | 379 | #if NVLAN > 0 |
377 | if (sc->sc_flags & NFE_HW_VLAN) | | 380 | if (sc->sc_flags & NFE_HW_VLAN) |
378 | sc->sc_ethercom.ec_capabilities |= | | 381 | sc->sc_ethercom.ec_capabilities |= |
379 | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; | | 382 | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; |
380 | #endif | | 383 | #endif |
381 | if (sc->sc_flags & NFE_HW_CSUM) { | | 384 | if (sc->sc_flags & NFE_HW_CSUM) { |
382 | ifp->if_capabilities |= | | 385 | ifp->if_capabilities |= |
383 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | | | 386 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
384 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | | | 387 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
385 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; | | 388 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
386 | } | | 389 | } |
387 | | | 390 | |
388 | sc->sc_mii.mii_ifp = ifp; | | 391 | sc->sc_mii.mii_ifp = ifp; |
389 | sc->sc_mii.mii_readreg = nfe_miibus_readreg; | | 392 | sc->sc_mii.mii_readreg = nfe_miibus_readreg; |
390 | sc->sc_mii.mii_writereg = nfe_miibus_writereg; | | 393 | sc->sc_mii.mii_writereg = nfe_miibus_writereg; |
391 | sc->sc_mii.mii_statchg = nfe_miibus_statchg; | | 394 | sc->sc_mii.mii_statchg = nfe_miibus_statchg; |
392 | | | 395 | |
393 | sc->sc_ethercom.ec_mii = &sc->sc_mii; | | 396 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
394 | ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, | | 397 | ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, |
395 | ether_mediastatus); | | 398 | ether_mediastatus); |
396 | | | 399 | |
397 | mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, mii_flags); | | 400 | mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, mii_flags); |
398 | | | 401 | |
399 | if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { | | 402 | if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { |
400 | aprint_error_dev(self, "no PHY found!\n"); | | 403 | aprint_error_dev(self, "no PHY found!\n"); |
401 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, | | 404 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, |
402 | 0, NULL); | | 405 | 0, NULL); |
403 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); | | 406 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); |
404 | } else | | 407 | } else |
405 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); | | 408 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); |
406 | | | 409 | |
407 | if_attach(ifp); | | 410 | if_attach(ifp); |
408 | ether_ifattach(ifp, sc->sc_enaddr); | | 411 | ether_ifattach(ifp, sc->sc_enaddr); |
409 | ether_set_ifflags_cb(&sc->sc_ethercom, nfe_ifflags_cb); | | 412 | ether_set_ifflags_cb(&sc->sc_ethercom, nfe_ifflags_cb); |
410 | | | 413 | |
411 | callout_init(&sc->sc_tick_ch, 0); | | 414 | callout_init(&sc->sc_tick_ch, 0); |
412 | callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc); | | 415 | callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc); |
413 | | | 416 | |
414 | if (pmf_device_register(self, NULL, nfe_resume)) | | 417 | if (pmf_device_register(self, NULL, nfe_resume)) |
415 | pmf_class_network_register(self, ifp); | | 418 | pmf_class_network_register(self, ifp); |
416 | else | | 419 | else |
417 | aprint_error_dev(self, "couldn't establish power handler\n"); | | 420 | aprint_error_dev(self, "couldn't establish power handler\n"); |
418 | | | 421 | |
419 | return; | | 422 | return; |
420 | | | 423 | |
421 | fail: | | 424 | fail: |
422 | if (sc->sc_ih != NULL) { | | 425 | if (sc->sc_ih != NULL) { |
423 | pci_intr_disestablish(pc, sc->sc_ih); | | 426 | pci_intr_disestablish(pc, sc->sc_ih); |
424 | sc->sc_ih = NULL; | | 427 | sc->sc_ih = NULL; |
425 | } | | 428 | } |
426 | if (sc->sc_mems != 0) { | | 429 | if (sc->sc_mems != 0) { |
427 | bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); | | 430 | bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); |
428 | sc->sc_mems = 0; | | 431 | sc->sc_mems = 0; |
429 | } | | 432 | } |
430 | } | | 433 | } |
431 | | | 434 | |
432 | int | | 435 | int |
433 | nfe_detach(device_t self, int flags) | | 436 | nfe_detach(device_t self, int flags) |
434 | { | | 437 | { |
435 | struct nfe_softc *sc = device_private(self); | | 438 | struct nfe_softc *sc = device_private(self); |
436 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 439 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
437 | int s; | | 440 | int s; |
438 | | | 441 | |
439 | s = splnet(); | | 442 | s = splnet(); |
440 | | | 443 | |
441 | nfe_stop(ifp, 1); | | 444 | nfe_stop(ifp, 1); |
442 | | | 445 | |
443 | pmf_device_deregister(self); | | 446 | pmf_device_deregister(self); |
444 | callout_destroy(&sc->sc_tick_ch); | | 447 | callout_destroy(&sc->sc_tick_ch); |
445 | ether_ifdetach(ifp); | | 448 | ether_ifdetach(ifp); |
446 | if_detach(ifp); | | 449 | if_detach(ifp); |
447 | mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); | | 450 | mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); |
448 | | | 451 | |
449 | nfe_free_rx_ring(sc, &sc->rxq); | | 452 | nfe_free_rx_ring(sc, &sc->rxq); |
450 | mutex_destroy(&sc->rxq.mtx); | | 453 | mutex_destroy(&sc->rxq.mtx); |
451 | nfe_free_tx_ring(sc, &sc->txq); | | 454 | nfe_free_tx_ring(sc, &sc->txq); |
452 | | | 455 | |
453 | if (sc->sc_ih != NULL) { | | 456 | if (sc->sc_ih != NULL) { |
454 | pci_intr_disestablish(sc->sc_pc, sc->sc_ih); | | 457 | pci_intr_disestablish(sc->sc_pc, sc->sc_ih); |
455 | sc->sc_ih = NULL; | | 458 | sc->sc_ih = NULL; |
456 | } | | 459 | } |
457 | | | 460 | |
458 | if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) { | | 461 | if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) { |
459 | nfe_set_macaddr(sc, sc->sc_enaddr); | | 462 | nfe_set_macaddr(sc, sc->sc_enaddr); |
460 | } else { | | 463 | } else { |
461 | NFE_WRITE(sc, NFE_MACADDR_LO, | | 464 | NFE_WRITE(sc, NFE_MACADDR_LO, |
462 | sc->sc_enaddr[0] << 8 | sc->sc_enaddr[1]); | | 465 | sc->sc_enaddr[0] << 8 | sc->sc_enaddr[1]); |
463 | NFE_WRITE(sc, NFE_MACADDR_HI, | | 466 | NFE_WRITE(sc, NFE_MACADDR_HI, |
464 | sc->sc_enaddr[2] << 24 | sc->sc_enaddr[3] << 16 | | | 467 | sc->sc_enaddr[2] << 24 | sc->sc_enaddr[3] << 16 | |
465 | sc->sc_enaddr[4] << 8 | sc->sc_enaddr[5]); | | 468 | sc->sc_enaddr[4] << 8 | sc->sc_enaddr[5]); |
466 | } | | 469 | } |
467 | | | 470 | |
468 | if (sc->sc_mems != 0) { | | 471 | if (sc->sc_mems != 0) { |
469 | bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); | | 472 | bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); |
470 | sc->sc_mems = 0; | | 473 | sc->sc_mems = 0; |
471 | } | | 474 | } |
472 | | | 475 | |
473 | splx(s); | | 476 | splx(s); |
474 | | | 477 | |
475 | return 0; | | 478 | return 0; |
476 | } | | 479 | } |
477 | | | 480 | |
478 | void | | 481 | void |
479 | nfe_miibus_statchg(struct ifnet *ifp) | | 482 | nfe_miibus_statchg(struct ifnet *ifp) |
480 | { | | 483 | { |
481 | struct nfe_softc *sc = ifp->if_softc; | | 484 | struct nfe_softc *sc = ifp->if_softc; |
482 | struct mii_data *mii = &sc->sc_mii; | | 485 | struct mii_data *mii = &sc->sc_mii; |
483 | uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; | | 486 | uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; |
484 | | | 487 | |
485 | phy = NFE_READ(sc, NFE_PHY_IFACE); | | 488 | phy = NFE_READ(sc, NFE_PHY_IFACE); |
486 | phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); | | 489 | phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); |
487 | | | 490 | |
488 | seed = NFE_READ(sc, NFE_RNDSEED); | | 491 | seed = NFE_READ(sc, NFE_RNDSEED); |
489 | seed &= ~NFE_SEED_MASK; | | 492 | seed &= ~NFE_SEED_MASK; |
490 | | | 493 | |
491 | if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { | | 494 | if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { |
492 | phy |= NFE_PHY_HDX; /* half-duplex */ | | 495 | phy |= NFE_PHY_HDX; /* half-duplex */ |
493 | misc |= NFE_MISC1_HDX; | | 496 | misc |= NFE_MISC1_HDX; |
494 | } | | 497 | } |
495 | | | 498 | |
496 | switch (IFM_SUBTYPE(mii->mii_media_active)) { | | 499 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
497 | case IFM_1000_T: /* full-duplex only */ | | 500 | case IFM_1000_T: /* full-duplex only */ |
498 | link |= NFE_MEDIA_1000T; | | 501 | link |= NFE_MEDIA_1000T; |
499 | seed |= NFE_SEED_1000T; | | 502 | seed |= NFE_SEED_1000T; |
500 | phy |= NFE_PHY_1000T; | | 503 | phy |= NFE_PHY_1000T; |
501 | break; | | 504 | break; |
502 | case IFM_100_TX: | | 505 | case IFM_100_TX: |
503 | link |= NFE_MEDIA_100TX; | | 506 | link |= NFE_MEDIA_100TX; |
504 | seed |= NFE_SEED_100TX; | | 507 | seed |= NFE_SEED_100TX; |
505 | phy |= NFE_PHY_100TX; | | 508 | phy |= NFE_PHY_100TX; |
506 | break; | | 509 | break; |
507 | case IFM_10_T: | | 510 | case IFM_10_T: |
508 | link |= NFE_MEDIA_10T; | | 511 | link |= NFE_MEDIA_10T; |
509 | seed |= NFE_SEED_10T; | | 512 | seed |= NFE_SEED_10T; |
510 | break; | | 513 | break; |
511 | } | | 514 | } |
512 | | | 515 | |
513 | NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ | | 516 | NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ |
514 | | | 517 | |
515 | NFE_WRITE(sc, NFE_PHY_IFACE, phy); | | 518 | NFE_WRITE(sc, NFE_PHY_IFACE, phy); |
516 | NFE_WRITE(sc, NFE_MISC1, misc); | | 519 | NFE_WRITE(sc, NFE_MISC1, misc); |
517 | NFE_WRITE(sc, NFE_LINKSPEED, link); | | 520 | NFE_WRITE(sc, NFE_LINKSPEED, link); |
518 | } | | 521 | } |
519 | | | 522 | |
520 | int | | 523 | int |
521 | nfe_miibus_readreg(device_t dev, int phy, int reg) | | 524 | nfe_miibus_readreg(device_t dev, int phy, int reg) |
522 | { | | 525 | { |
523 | struct nfe_softc *sc = device_private(dev); | | 526 | struct nfe_softc *sc = device_private(dev); |
524 | uint32_t val; | | 527 | uint32_t val; |
525 | int ntries; | | 528 | int ntries; |
526 | | | 529 | |
527 | NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); | | 530 | NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); |
528 | | | 531 | |
529 | if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { | | 532 | if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { |
530 | NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); | | 533 | NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); |
531 | DELAY(100); | | 534 | DELAY(100); |
532 | } | | 535 | } |
533 | | | 536 | |
534 | NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); | | 537 | NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); |
535 | | | 538 | |
536 | for (ntries = 0; ntries < 1000; ntries++) { | | 539 | for (ntries = 0; ntries < 1000; ntries++) { |
537 | DELAY(100); | | 540 | DELAY(100); |
538 | if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) | | 541 | if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) |
539 | break; | | 542 | break; |
540 | } | | 543 | } |
541 | if (ntries == 1000) { | | 544 | if (ntries == 1000) { |
542 | DPRINTFN(2, ("%s: timeout waiting for PHY\n", | | 545 | DPRINTFN(2, ("%s: timeout waiting for PHY\n", |
543 | device_xname(sc->sc_dev))); | | 546 | device_xname(sc->sc_dev))); |
544 | return 0; | | 547 | return 0; |
545 | } | | 548 | } |
546 | | | 549 | |
547 | if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { | | 550 | if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { |
548 | DPRINTFN(2, ("%s: could not read PHY\n", | | 551 | DPRINTFN(2, ("%s: could not read PHY\n", |
549 | device_xname(sc->sc_dev))); | | 552 | device_xname(sc->sc_dev))); |
550 | return 0; | | 553 | return 0; |
551 | } | | 554 | } |
552 | | | 555 | |
553 | val = NFE_READ(sc, NFE_PHY_DATA); | | 556 | val = NFE_READ(sc, NFE_PHY_DATA); |
554 | if (val != 0xffffffff && val != 0) | | 557 | if (val != 0xffffffff && val != 0) |
555 | sc->mii_phyaddr = phy; | | 558 | sc->mii_phyaddr = phy; |
556 | | | 559 | |
557 | DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", | | 560 | DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", |
558 | device_xname(sc->sc_dev), phy, reg, val)); | | 561 | device_xname(sc->sc_dev), phy, reg, val)); |
559 | | | 562 | |
560 | return val; | | 563 | return val; |
561 | } | | 564 | } |
562 | | | 565 | |
563 | void | | 566 | void |
564 | nfe_miibus_writereg(device_t dev, int phy, int reg, int val) | | 567 | nfe_miibus_writereg(device_t dev, int phy, int reg, int val) |
565 | { | | 568 | { |
566 | struct nfe_softc *sc = device_private(dev); | | 569 | struct nfe_softc *sc = device_private(dev); |
567 | uint32_t ctl; | | 570 | uint32_t ctl; |
568 | int ntries; | | 571 | int ntries; |
569 | | | 572 | |
570 | NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); | | 573 | NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); |
571 | | | 574 | |
572 | if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { | | 575 | if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { |
573 | NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); | | 576 | NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); |
574 | DELAY(100); | | 577 | DELAY(100); |
575 | } | | 578 | } |
576 | | | 579 | |
577 | NFE_WRITE(sc, NFE_PHY_DATA, val); | | 580 | NFE_WRITE(sc, NFE_PHY_DATA, val); |
578 | ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; | | 581 | ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; |
579 | NFE_WRITE(sc, NFE_PHY_CTL, ctl); | | 582 | NFE_WRITE(sc, NFE_PHY_CTL, ctl); |
580 | | | 583 | |
581 | for (ntries = 0; ntries < 1000; ntries++) { | | 584 | for (ntries = 0; ntries < 1000; ntries++) { |
582 | DELAY(100); | | 585 | DELAY(100); |
583 | if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) | | 586 | if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) |
584 | break; | | 587 | break; |
585 | } | | 588 | } |
586 | #ifdef NFE_DEBUG | | 589 | #ifdef NFE_DEBUG |
587 | if (nfedebug >= 2 && ntries == 1000) | | 590 | if (nfedebug >= 2 && ntries == 1000) |
588 | printf("could not write to PHY\n"); | | 591 | printf("could not write to PHY\n"); |
589 | #endif | | 592 | #endif |
590 | } | | 593 | } |
591 | | | 594 | |
592 | int | | 595 | int |
593 | nfe_intr(void *arg) | | 596 | nfe_intr(void *arg) |
594 | { | | 597 | { |
595 | struct nfe_softc *sc = arg; | | 598 | struct nfe_softc *sc = arg; |
596 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 599 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
597 | uint32_t r; | | 600 | uint32_t r; |
598 | int handled; | | 601 | int handled; |
599 | | | 602 | |
600 | if ((ifp->if_flags & IFF_UP) == 0) | | 603 | if ((ifp->if_flags & IFF_UP) == 0) |
601 | return 0; | | 604 | return 0; |
602 | | | 605 | |
603 | handled = 0; | | 606 | handled = 0; |
604 | | | 607 | |
605 | for (;;) { | | 608 | for (;;) { |
606 | r = NFE_READ(sc, NFE_IRQ_STATUS); | | 609 | r = NFE_READ(sc, NFE_IRQ_STATUS); |
607 | if ((r & NFE_IRQ_WANTED) == 0) | | 610 | if ((r & NFE_IRQ_WANTED) == 0) |
608 | break; | | 611 | break; |
609 | | | 612 | |
610 | NFE_WRITE(sc, NFE_IRQ_STATUS, r); | | 613 | NFE_WRITE(sc, NFE_IRQ_STATUS, r); |
611 | handled = 1; | | 614 | handled = 1; |
612 | DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); | | 615 | DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); |
613 | | | 616 | |
614 | if ((r & (NFE_IRQ_RXERR|NFE_IRQ_RX_NOBUF|NFE_IRQ_RX)) != 0) { | | 617 | if ((r & (NFE_IRQ_RXERR|NFE_IRQ_RX_NOBUF|NFE_IRQ_RX)) != 0) { |
615 | /* check Rx ring */ | | 618 | /* check Rx ring */ |
616 | nfe_rxeof(sc); | | 619 | nfe_rxeof(sc); |
617 | } | | 620 | } |
618 | if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) { | | 621 | if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) { |
619 | /* check Tx ring */ | | 622 | /* check Tx ring */ |
620 | nfe_txeof(sc); | | 623 | nfe_txeof(sc); |
621 | } | | 624 | } |
622 | if ((r & NFE_IRQ_LINK) != 0) { | | 625 | if ((r & NFE_IRQ_LINK) != 0) { |
623 | NFE_READ(sc, NFE_PHY_STATUS); | | 626 | NFE_READ(sc, NFE_PHY_STATUS); |
624 | NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); | | 627 | NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); |
625 | DPRINTF(("%s: link state changed\n", | | 628 | DPRINTF(("%s: link state changed\n", |
626 | device_xname(sc->sc_dev))); | | 629 | device_xname(sc->sc_dev))); |
627 | } | | 630 | } |
628 | } | | 631 | } |
629 | | | 632 | |
630 | if (handled && !IF_IS_EMPTY(&ifp->if_snd)) | | 633 | if (handled && !IF_IS_EMPTY(&ifp->if_snd)) |
631 | nfe_start(ifp); | | 634 | nfe_start(ifp); |
632 | | | 635 | |
633 | return handled; | | 636 | return handled; |
634 | } | | 637 | } |
635 | | | 638 | |
636 | static int | | 639 | static int |
637 | nfe_ifflags_cb(struct ethercom *ec) | | 640 | nfe_ifflags_cb(struct ethercom *ec) |
638 | { | | 641 | { |
639 | struct ifnet *ifp = &ec->ec_if; | | 642 | struct ifnet *ifp = &ec->ec_if; |
640 | struct nfe_softc *sc = ifp->if_softc; | | 643 | struct nfe_softc *sc = ifp->if_softc; |
641 | int change = ifp->if_flags ^ sc->sc_if_flags; | | 644 | int change = ifp->if_flags ^ sc->sc_if_flags; |
642 | | | 645 | |
643 | /* | | 646 | /* |
644 | * If only the PROMISC flag changes, then | | 647 | * If only the PROMISC flag changes, then |
645 | * don't do a full re-init of the chip, just update | | 648 | * don't do a full re-init of the chip, just update |
646 | * the Rx filter. | | 649 | * the Rx filter. |
647 | */ | | 650 | */ |
648 | if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) | | 651 | if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) |
649 | return ENETRESET; | | 652 | return ENETRESET; |
650 | else if ((change & IFF_PROMISC) != 0) | | 653 | else if ((change & IFF_PROMISC) != 0) |
651 | nfe_setmulti(sc); | | 654 | nfe_setmulti(sc); |
652 | | | 655 | |
653 | return 0; | | 656 | return 0; |
654 | } | | 657 | } |
655 | | | 658 | |
656 | int | | 659 | int |
657 | nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data) | | 660 | nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
658 | { | | 661 | { |
659 | struct nfe_softc *sc = ifp->if_softc; | | 662 | struct nfe_softc *sc = ifp->if_softc; |
660 | struct ifaddr *ifa = (struct ifaddr *)data; | | 663 | struct ifaddr *ifa = (struct ifaddr *)data; |
661 | int s, error = 0; | | 664 | int s, error = 0; |
662 | | | 665 | |
663 | s = splnet(); | | 666 | s = splnet(); |
664 | | | 667 | |
665 | switch (cmd) { | | 668 | switch (cmd) { |
666 | case SIOCINITIFADDR: | | 669 | case SIOCINITIFADDR: |
667 | ifp->if_flags |= IFF_UP; | | 670 | ifp->if_flags |= IFF_UP; |
668 | nfe_init(ifp); | | 671 | nfe_init(ifp); |
669 | switch (ifa->ifa_addr->sa_family) { | | 672 | switch (ifa->ifa_addr->sa_family) { |
670 | #ifdef INET | | 673 | #ifdef INET |
671 | case AF_INET: | | 674 | case AF_INET: |
672 | arp_ifinit(ifp, ifa); | | 675 | arp_ifinit(ifp, ifa); |
673 | break; | | 676 | break; |
674 | #endif | | 677 | #endif |
675 | default: | | 678 | default: |
676 | break; | | 679 | break; |
677 | } | | 680 | } |
678 | break; | | 681 | break; |
679 | default: | | 682 | default: |
680 | if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) | | 683 | if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) |
681 | break; | | 684 | break; |
682 | | | 685 | |
683 | error = 0; | | 686 | error = 0; |
684 | | | 687 | |
685 | if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) | | 688 | if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
686 | ; | | 689 | ; |
687 | else if (ifp->if_flags & IFF_RUNNING) | | 690 | else if (ifp->if_flags & IFF_RUNNING) |
688 | nfe_setmulti(sc); | | 691 | nfe_setmulti(sc); |
689 | break; | | 692 | break; |
690 | } | | 693 | } |
691 | sc->sc_if_flags = ifp->if_flags; | | 694 | sc->sc_if_flags = ifp->if_flags; |
692 | | | 695 | |
693 | splx(s); | | 696 | splx(s); |
694 | | | 697 | |
695 | return error; | | 698 | return error; |
696 | } | | 699 | } |
697 | | | 700 | |
698 | void | | 701 | void |
699 | nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) | | 702 | nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) |
700 | { | | 703 | { |
701 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, | | 704 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, |
702 | (char *)desc32 - (char *)sc->txq.desc32, | | 705 | (char *)desc32 - (char *)sc->txq.desc32, |
703 | sizeof (struct nfe_desc32), ops); | | 706 | sizeof (struct nfe_desc32), ops); |
704 | } | | 707 | } |
705 | | | 708 | |
706 | void | | 709 | void |
707 | nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) | | 710 | nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) |
708 | { | | 711 | { |
709 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, | | 712 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, |
710 | (char *)desc64 - (char *)sc->txq.desc64, | | 713 | (char *)desc64 - (char *)sc->txq.desc64, |
711 | sizeof (struct nfe_desc64), ops); | | 714 | sizeof (struct nfe_desc64), ops); |
712 | } | | 715 | } |
713 | | | 716 | |
714 | void | | 717 | void |
715 | nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) | | 718 | nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) |
716 | { | | 719 | { |
717 | if (end > start) { | | 720 | if (end > start) { |
718 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, | | 721 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, |
719 | (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, | | 722 | (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, |
720 | (char *)&sc->txq.desc32[end] - | | 723 | (char *)&sc->txq.desc32[end] - |
721 | (char *)&sc->txq.desc32[start], ops); | | 724 | (char *)&sc->txq.desc32[start], ops); |
722 | return; | | 725 | return; |
723 | } | | 726 | } |
724 | /* sync from 'start' to end of ring */ | | 727 | /* sync from 'start' to end of ring */ |
725 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, | | 728 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, |
726 | (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, | | 729 | (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, |
727 | (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] - | | 730 | (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] - |
728 | (char *)&sc->txq.desc32[start], ops); | | 731 | (char *)&sc->txq.desc32[start], ops); |
729 | | | 732 | |
730 | /* sync from start of ring to 'end' */ | | 733 | /* sync from start of ring to 'end' */ |
731 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, | | 734 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, |
732 | (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops); | | 735 | (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops); |
733 | } | | 736 | } |
734 | | | 737 | |
735 | void | | 738 | void |
736 | nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) | | 739 | nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) |
737 | { | | 740 | { |
738 | if (end > start) { | | 741 | if (end > start) { |
739 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, | | 742 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, |
740 | (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, | | 743 | (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, |
741 | (char *)&sc->txq.desc64[end] - | | 744 | (char *)&sc->txq.desc64[end] - |
742 | (char *)&sc->txq.desc64[start], ops); | | 745 | (char *)&sc->txq.desc64[start], ops); |
743 | return; | | 746 | return; |
744 | } | | 747 | } |
745 | /* sync from 'start' to end of ring */ | | 748 | /* sync from 'start' to end of ring */ |
746 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, | | 749 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, |
747 | (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, | | 750 | (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, |
748 | (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] - | | 751 | (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] - |
749 | (char *)&sc->txq.desc64[start], ops); | | 752 | (char *)&sc->txq.desc64[start], ops); |
750 | | | 753 | |
751 | /* sync from start of ring to 'end' */ | | 754 | /* sync from start of ring to 'end' */ |
752 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, | | 755 | bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, |
753 | (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops); | | 756 | (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops); |
754 | } | | 757 | } |
755 | | | 758 | |
756 | void | | 759 | void |
757 | nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) | | 760 | nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) |
758 | { | | 761 | { |
759 | bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, | | 762 | bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, |
760 | (char *)desc32 - (char *)sc->rxq.desc32, | | 763 | (char *)desc32 - (char *)sc->rxq.desc32, |
761 | sizeof (struct nfe_desc32), ops); | | 764 | sizeof (struct nfe_desc32), ops); |
762 | } | | 765 | } |
763 | | | 766 | |
764 | void | | 767 | void |
765 | nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) | | 768 | nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) |
766 | { | | 769 | { |
767 | bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, | | 770 | bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, |
768 | (char *)desc64 - (char *)sc->rxq.desc64, | | 771 | (char *)desc64 - (char *)sc->rxq.desc64, |
769 | sizeof (struct nfe_desc64), ops); | | 772 | sizeof (struct nfe_desc64), ops); |
770 | } | | 773 | } |
771 | | | 774 | |
772 | void | | 775 | void |
773 | nfe_rxeof(struct nfe_softc *sc) | | 776 | nfe_rxeof(struct nfe_softc *sc) |
774 | { | | 777 | { |
775 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 778 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
776 | struct nfe_desc32 *desc32; | | 779 | struct nfe_desc32 *desc32; |
777 | struct nfe_desc64 *desc64; | | 780 | struct nfe_desc64 *desc64; |
778 | struct nfe_rx_data *data; | | 781 | struct nfe_rx_data *data; |
779 | struct nfe_jbuf *jbuf; | | 782 | struct nfe_jbuf *jbuf; |
780 | struct mbuf *m, *mnew; | | 783 | struct mbuf *m, *mnew; |
781 | bus_addr_t physaddr; | | 784 | bus_addr_t physaddr; |
782 | uint16_t flags; | | 785 | uint16_t flags; |
783 | int error, len, i; | | 786 | int error, len, i; |
784 | | | 787 | |
785 | desc32 = NULL; | | 788 | desc32 = NULL; |
786 | desc64 = NULL; | | 789 | desc64 = NULL; |
787 | for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) { | | 790 | for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) { |
788 | data = &sc->rxq.data[i]; | | 791 | data = &sc->rxq.data[i]; |
789 | | | 792 | |
790 | if (sc->sc_flags & NFE_40BIT_ADDR) { | | 793 | if (sc->sc_flags & NFE_40BIT_ADDR) { |
791 | desc64 = &sc->rxq.desc64[i]; | | 794 | desc64 = &sc->rxq.desc64[i]; |
792 | nfe_rxdesc64_sync(sc, desc64, | | 795 | nfe_rxdesc64_sync(sc, desc64, |
793 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 796 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
794 | | | 797 | |
795 | flags = le16toh(desc64->flags); | | 798 | flags = le16toh(desc64->flags); |
796 | len = le16toh(desc64->length) & 0x3fff; | | 799 | len = le16toh(desc64->length) & 0x3fff; |
797 | } else { | | 800 | } else { |
798 | desc32 = &sc->rxq.desc32[i]; | | 801 | desc32 = &sc->rxq.desc32[i]; |
799 | nfe_rxdesc32_sync(sc, desc32, | | 802 | nfe_rxdesc32_sync(sc, desc32, |
800 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 803 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
801 | | | 804 | |
802 | flags = le16toh(desc32->flags); | | 805 | flags = le16toh(desc32->flags); |
803 | len = le16toh(desc32->length) & 0x3fff; | | 806 | len = le16toh(desc32->length) & 0x3fff; |
804 | } | | 807 | } |
805 | | | 808 | |
806 | if ((flags & NFE_RX_READY) != 0) | | 809 | if ((flags & NFE_RX_READY) != 0) |
807 | break; | | 810 | break; |
808 | | | 811 | |
809 | if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { | | 812 | if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { |
810 | if ((flags & NFE_RX_VALID_V1) == 0) | | 813 | if ((flags & NFE_RX_VALID_V1) == 0) |
811 | goto skip; | | 814 | goto skip; |
812 | | | 815 | |
813 | if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { | | 816 | if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { |
814 | flags &= ~NFE_RX_ERROR; | | 817 | flags &= ~NFE_RX_ERROR; |
815 | len--; /* fix buffer length */ | | 818 | len--; /* fix buffer length */ |
816 | } | | 819 | } |
817 | } else { | | 820 | } else { |
818 | if ((flags & NFE_RX_VALID_V2) == 0) | | 821 | if ((flags & NFE_RX_VALID_V2) == 0) |
819 | goto skip; | | 822 | goto skip; |
820 | | | 823 | |
821 | if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { | | 824 | if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { |
822 | flags &= ~NFE_RX_ERROR; | | 825 | flags &= ~NFE_RX_ERROR; |
823 | len--; /* fix buffer length */ | | 826 | len--; /* fix buffer length */ |
824 | } | | 827 | } |
825 | } | | 828 | } |
826 | | | 829 | |
827 | if (flags & NFE_RX_ERROR) { | | 830 | if (flags & NFE_RX_ERROR) { |
828 | ifp->if_ierrors++; | | 831 | ifp->if_ierrors++; |
829 | goto skip; | | 832 | goto skip; |
830 | } | | 833 | } |
831 | | | 834 | |
832 | /* | | 835 | /* |
833 | * Try to allocate a new mbuf for this ring element and load | | 836 | * Try to allocate a new mbuf for this ring element and load |
834 | * it before processing the current mbuf. If the ring element | | 837 | * it before processing the current mbuf. If the ring element |
835 | * cannot be loaded, drop the received packet and reuse the | | 838 | * cannot be loaded, drop the received packet and reuse the |
836 | * old mbuf. In the unlikely case that the old mbuf can't be | | 839 | * old mbuf. In the unlikely case that the old mbuf can't be |
837 | * reloaded either, explicitly panic. | | 840 | * reloaded either, explicitly panic. |
838 | */ | | 841 | */ |
839 | MGETHDR(mnew, M_DONTWAIT, MT_DATA); | | 842 | MGETHDR(mnew, M_DONTWAIT, MT_DATA); |
840 | if (mnew == NULL) { | | 843 | if (mnew == NULL) { |
841 | ifp->if_ierrors++; | | 844 | ifp->if_ierrors++; |
842 | goto skip; | | 845 | goto skip; |
843 | } | | 846 | } |
844 | | | 847 | |
845 | if (sc->sc_flags & NFE_USE_JUMBO) { | | 848 | if (sc->sc_flags & NFE_USE_JUMBO) { |
846 | physaddr = | | 849 | physaddr = |
847 | sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr; | | 850 | sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr; |
848 | if ((jbuf = nfe_jalloc(sc, i)) == NULL) { | | 851 | if ((jbuf = nfe_jalloc(sc, i)) == NULL) { |
849 | if (len > MCLBYTES) { | | 852 | if (len > MCLBYTES) { |
850 | m_freem(mnew); | | 853 | m_freem(mnew); |
851 | ifp->if_ierrors++; | | 854 | ifp->if_ierrors++; |
852 | goto skip1; | | 855 | goto skip1; |
853 | } | | 856 | } |
854 | MCLGET(mnew, M_DONTWAIT); | | 857 | MCLGET(mnew, M_DONTWAIT); |
855 | if ((mnew->m_flags & M_EXT) == 0) { | | 858 | if ((mnew->m_flags & M_EXT) == 0) { |
856 | m_freem(mnew); | | 859 | m_freem(mnew); |
857 | ifp->if_ierrors++; | | 860 | ifp->if_ierrors++; |
858 | goto skip1; | | 861 | goto skip1; |
859 | } | | 862 | } |
860 | | | 863 | |
861 | (void)memcpy(mtod(mnew, void *), | | 864 | (void)memcpy(mtod(mnew, void *), |
862 | mtod(data->m, const void *), len); | | 865 | mtod(data->m, const void *), len); |
863 | m = mnew; | | 866 | m = mnew; |
864 | goto mbufcopied; | | 867 | goto mbufcopied; |
865 | } else { | | 868 | } else { |
866 | MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); | | 869 | MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); |
867 | bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, | | 870 | bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, |
868 | mtod(data->m, char *) - (char *)sc->rxq.jpool, | | 871 | mtod(data->m, char *) - (char *)sc->rxq.jpool, |
869 | NFE_JBYTES, BUS_DMASYNC_POSTREAD); | | 872 | NFE_JBYTES, BUS_DMASYNC_POSTREAD); |
870 | | | 873 | |
871 | physaddr = jbuf->physaddr; | | 874 | physaddr = jbuf->physaddr; |
872 | } | | 875 | } |
873 | } else { | | 876 | } else { |
874 | MCLGET(mnew, M_DONTWAIT); | | 877 | MCLGET(mnew, M_DONTWAIT); |
875 | if ((mnew->m_flags & M_EXT) == 0) { | | 878 | if ((mnew->m_flags & M_EXT) == 0) { |
876 | m_freem(mnew); | | 879 | m_freem(mnew); |
877 | ifp->if_ierrors++; | | 880 | ifp->if_ierrors++; |
878 | goto skip; | | 881 | goto skip; |
879 | } | | 882 | } |
880 | | | 883 | |
881 | bus_dmamap_sync(sc->sc_dmat, data->map, 0, | | 884 | bus_dmamap_sync(sc->sc_dmat, data->map, 0, |
882 | data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); | | 885 | data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
883 | bus_dmamap_unload(sc->sc_dmat, data->map); | | 886 | bus_dmamap_unload(sc->sc_dmat, data->map); |
884 | | | 887 | |
885 | error = bus_dmamap_load(sc->sc_dmat, data->map, | | 888 | error = bus_dmamap_load(sc->sc_dmat, data->map, |
886 | mtod(mnew, void *), MCLBYTES, NULL, | | 889 | mtod(mnew, void *), MCLBYTES, NULL, |
887 | BUS_DMA_READ | BUS_DMA_NOWAIT); | | 890 | BUS_DMA_READ | BUS_DMA_NOWAIT); |
888 | if (error != 0) { | | 891 | if (error != 0) { |
889 | m_freem(mnew); | | 892 | m_freem(mnew); |
890 | | | 893 | |
891 | /* try to reload the old mbuf */ | | 894 | /* try to reload the old mbuf */ |
892 | error = bus_dmamap_load(sc->sc_dmat, data->map, | | 895 | error = bus_dmamap_load(sc->sc_dmat, data->map, |
893 | mtod(data->m, void *), MCLBYTES, NULL, | | 896 | mtod(data->m, void *), MCLBYTES, NULL, |
894 | BUS_DMA_READ | BUS_DMA_NOWAIT); | | 897 | BUS_DMA_READ | BUS_DMA_NOWAIT); |
895 | if (error != 0) { | | 898 | if (error != 0) { |
896 | /* very unlikely that it will fail.. */ | | 899 | /* very unlikely that it will fail.. */ |
897 | panic("%s: could not load old rx mbuf", | | 900 | panic("%s: could not load old rx mbuf", |
898 | device_xname(sc->sc_dev)); | | 901 | device_xname(sc->sc_dev)); |
899 | } | | 902 | } |
900 | ifp->if_ierrors++; | | 903 | ifp->if_ierrors++; |
901 | goto skip; | | 904 | goto skip; |
902 | } | | 905 | } |
903 | physaddr = data->map->dm_segs[0].ds_addr; | | 906 | physaddr = data->map->dm_segs[0].ds_addr; |
904 | } | | 907 | } |
905 | | | 908 | |
906 | /* | | 909 | /* |
907 | * New mbuf successfully loaded, update Rx ring and continue | | 910 | * New mbuf successfully loaded, update Rx ring and continue |
908 | * processing. | | 911 | * processing. |
909 | */ | | 912 | */ |
910 | m = data->m; | | 913 | m = data->m; |
911 | data->m = mnew; | | 914 | data->m = mnew; |
912 | | | 915 | |
913 | mbufcopied: | | 916 | mbufcopied: |
914 | /* finalize mbuf */ | | 917 | /* finalize mbuf */ |
915 | m->m_pkthdr.len = m->m_len = len; | | 918 | m->m_pkthdr.len = m->m_len = len; |
916 | m->m_pkthdr.rcvif = ifp; | | 919 | m->m_pkthdr.rcvif = ifp; |
917 | | | 920 | |
918 | if ((sc->sc_flags & NFE_HW_CSUM) != 0) { | | 921 | if ((sc->sc_flags & NFE_HW_CSUM) != 0) { |
919 | /* | | 922 | /* |
920 | * XXX | | 923 | * XXX |
921 | * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets? | | 924 | * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets? |
922 | */ | | 925 | */ |
923 | if (flags & NFE_RX_IP_CSUMOK) { | | 926 | if (flags & NFE_RX_IP_CSUMOK) { |
924 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; | | 927 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; |
925 | DPRINTFN(3, ("%s: ip4csum-rx ok\n", | | 928 | DPRINTFN(3, ("%s: ip4csum-rx ok\n", |
926 | device_xname(sc->sc_dev))); | | 929 | device_xname(sc->sc_dev))); |
927 | } | | 930 | } |
928 | /* | | 931 | /* |
929 | * XXX | | 932 | * XXX |
930 | * no way to check M_CSUM_TCP_UDP_BAD or | | 933 | * no way to check M_CSUM_TCP_UDP_BAD or |
931 | * other protocols? | | 934 | * other protocols? |
932 | */ | | 935 | */ |
933 | if (flags & NFE_RX_UDP_CSUMOK) { | | 936 | if (flags & NFE_RX_UDP_CSUMOK) { |
934 | m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; | | 937 | m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; |
935 | DPRINTFN(3, ("%s: udp4csum-rx ok\n", | | 938 | DPRINTFN(3, ("%s: udp4csum-rx ok\n", |
936 | device_xname(sc->sc_dev))); | | 939 | device_xname(sc->sc_dev))); |
937 | } else if (flags & NFE_RX_TCP_CSUMOK) { | | 940 | } else if (flags & NFE_RX_TCP_CSUMOK) { |
938 | m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; | | 941 | m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; |
939 | DPRINTFN(3, ("%s: tcp4csum-rx ok\n", | | 942 | DPRINTFN(3, ("%s: tcp4csum-rx ok\n", |
940 | device_xname(sc->sc_dev))); | | 943 | device_xname(sc->sc_dev))); |
941 | } | | 944 | } |
942 | } | | 945 | } |
943 | bpf_mtap(ifp, m); | | 946 | bpf_mtap(ifp, m); |
944 | ifp->if_ipackets++; | | 947 | ifp->if_ipackets++; |
945 | (*ifp->if_input)(ifp, m); | | 948 | (*ifp->if_input)(ifp, m); |
946 | | | 949 | |
947 | skip1: | | 950 | skip1: |
948 | /* update mapping address in h/w descriptor */ | | 951 | /* update mapping address in h/w descriptor */ |
949 | if (sc->sc_flags & NFE_40BIT_ADDR) { | | 952 | if (sc->sc_flags & NFE_40BIT_ADDR) { |
950 | #if defined(__LP64__) | | 953 | #if defined(__LP64__) |
951 | desc64->physaddr[0] = htole32(physaddr >> 32); | | 954 | desc64->physaddr[0] = htole32(physaddr >> 32); |
952 | #endif | | 955 | #endif |
953 | desc64->physaddr[1] = htole32(physaddr & 0xffffffff); | | 956 | desc64->physaddr[1] = htole32(physaddr & 0xffffffff); |
954 | } else { | | 957 | } else { |
955 | desc32->physaddr = htole32(physaddr); | | 958 | desc32->physaddr = htole32(physaddr); |
956 | } | | 959 | } |
957 | | | 960 | |
958 | skip: | | 961 | skip: |
959 | if (sc->sc_flags & NFE_40BIT_ADDR) { | | 962 | if (sc->sc_flags & NFE_40BIT_ADDR) { |
960 | desc64->length = htole16(sc->rxq.bufsz); | | 963 | desc64->length = htole16(sc->rxq.bufsz); |
961 | desc64->flags = htole16(NFE_RX_READY); | | 964 | desc64->flags = htole16(NFE_RX_READY); |
962 | | | 965 | |
963 | nfe_rxdesc64_sync(sc, desc64, | | 966 | nfe_rxdesc64_sync(sc, desc64, |
964 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 967 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
965 | } else { | | 968 | } else { |
966 | desc32->length = htole16(sc->rxq.bufsz); | | 969 | desc32->length = htole16(sc->rxq.bufsz); |
967 | desc32->flags = htole16(NFE_RX_READY); | | 970 | desc32->flags = htole16(NFE_RX_READY); |
968 | | | 971 | |
969 | nfe_rxdesc32_sync(sc, desc32, | | 972 | nfe_rxdesc32_sync(sc, desc32, |
970 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 973 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
971 | } | | 974 | } |
972 | } | | 975 | } |
973 | /* update current RX pointer */ | | 976 | /* update current RX pointer */ |
974 | sc->rxq.cur = i; | | 977 | sc->rxq.cur = i; |
975 | } | | 978 | } |
976 | | | 979 | |
977 | void | | 980 | void |
978 | nfe_txeof(struct nfe_softc *sc) | | 981 | nfe_txeof(struct nfe_softc *sc) |
979 | { | | 982 | { |
980 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 983 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
981 | struct nfe_desc32 *desc32; | | 984 | struct nfe_desc32 *desc32; |
982 | struct nfe_desc64 *desc64; | | 985 | struct nfe_desc64 *desc64; |
983 | struct nfe_tx_data *data = NULL; | | 986 | struct nfe_tx_data *data = NULL; |
984 | int i; | | 987 | int i; |
985 | uint16_t flags; | | 988 | uint16_t flags; |
986 | char buf[128]; | | 989 | char buf[128]; |
987 | | | 990 | |
988 | for (i = sc->txq.next; | | 991 | for (i = sc->txq.next; |
989 | sc->txq.queued > 0; | | 992 | sc->txq.queued > 0; |
990 | i = NFE_TX_NEXTDESC(i), sc->txq.queued--) { | | 993 | i = NFE_TX_NEXTDESC(i), sc->txq.queued--) { |
991 | if (sc->sc_flags & NFE_40BIT_ADDR) { | | 994 | if (sc->sc_flags & NFE_40BIT_ADDR) { |
992 | desc64 = &sc->txq.desc64[i]; | | 995 | desc64 = &sc->txq.desc64[i]; |
993 | nfe_txdesc64_sync(sc, desc64, | | 996 | nfe_txdesc64_sync(sc, desc64, |
994 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 997 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
995 | | | 998 | |
996 | flags = le16toh(desc64->flags); | | 999 | flags = le16toh(desc64->flags); |
997 | } else { | | 1000 | } else { |
998 | desc32 = &sc->txq.desc32[i]; | | 1001 | desc32 = &sc->txq.desc32[i]; |
999 | nfe_txdesc32_sync(sc, desc32, | | 1002 | nfe_txdesc32_sync(sc, desc32, |
1000 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); | | 1003 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1001 | | | 1004 | |
1002 | flags = le16toh(desc32->flags); | | 1005 | flags = le16toh(desc32->flags); |
1003 | } | | 1006 | } |
1004 | | | 1007 | |
1005 | if ((flags & NFE_TX_VALID) != 0) | | 1008 | if ((flags & NFE_TX_VALID) != 0) |
1006 | break; | | 1009 | break; |
1007 | | | 1010 | |
1008 | data = &sc->txq.data[i]; | | 1011 | data = &sc->txq.data[i]; |
1009 | | | 1012 | |
1010 | if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { | | 1013 | if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { |
1011 | if ((flags & NFE_TX_LASTFRAG_V1) == 0 && | | 1014 | if ((flags & NFE_TX_LASTFRAG_V1) == 0 && |
1012 | data->m == NULL) | | 1015 | data->m == NULL) |
1013 | continue; | | 1016 | continue; |
1014 | | | 1017 | |
1015 | if ((flags & NFE_TX_ERROR_V1) != 0) { | | 1018 | if ((flags & NFE_TX_ERROR_V1) != 0) { |
1016 | snprintb(buf, sizeof(buf), NFE_V1_TXERR, flags); | | 1019 | snprintb(buf, sizeof(buf), NFE_V1_TXERR, flags); |
1017 | aprint_error_dev(sc->sc_dev, "tx v1 error %s\n", | | 1020 | aprint_error_dev(sc->sc_dev, "tx v1 error %s\n", |
1018 | buf); | | 1021 | buf); |
1019 | ifp->if_oerrors++; | | 1022 | ifp->if_oerrors++; |
1020 | } else | | 1023 | } else |
1021 | ifp->if_opackets++; | | 1024 | ifp->if_opackets++; |
1022 | } else { | | 1025 | } else { |
1023 | if ((flags & NFE_TX_LASTFRAG_V2) == 0 && | | 1026 | if ((flags & NFE_TX_LASTFRAG_V2) == 0 && |
1024 | data->m == NULL) | | 1027 | data->m == NULL) |
1025 | continue; | | 1028 | continue; |
1026 | | | 1029 | |
1027 | if ((flags & NFE_TX_ERROR_V2) != 0) { | | 1030 | if ((flags & NFE_TX_ERROR_V2) != 0) { |
1028 | snprintb(buf, sizeof(buf), NFE_V2_TXERR, flags); | | 1031 | snprintb(buf, sizeof(buf), NFE_V2_TXERR, flags); |
1029 | aprint_error_dev(sc->sc_dev, "tx v2 error %s\n", | | 1032 | aprint_error_dev(sc->sc_dev, "tx v2 error %s\n", |
1030 | buf); | | 1033 | buf); |
1031 | ifp->if_oerrors++; | | 1034 | ifp->if_oerrors++; |
1032 | } else | | 1035 | } else |
1033 | ifp->if_opackets++; | | 1036 | ifp->if_opackets++; |
1034 | } | | 1037 | } |
1035 | | | 1038 | |
1036 | if (data->m == NULL) { /* should not get there */ | | 1039 | if (data->m == NULL) { /* should not get there */ |
1037 | aprint_error_dev(sc->sc_dev, | | 1040 | aprint_error_dev(sc->sc_dev, |
1038 | "last fragment bit w/o associated mbuf!\n"); | | 1041 | "last fragment bit w/o associated mbuf!\n"); |
1039 | continue; | | 1042 | continue; |
1040 | } | | 1043 | } |
1041 | | | 1044 | |
1042 | /* last fragment of the mbuf chain transmitted */ | | 1045 | /* last fragment of the mbuf chain transmitted */ |
1043 | bus_dmamap_sync(sc->sc_dmat, data->active, 0, | | 1046 | bus_dmamap_sync(sc->sc_dmat, data->active, 0, |
1044 | data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); | | 1047 | data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
1045 | bus_dmamap_unload(sc->sc_dmat, data->active); | | 1048 | bus_dmamap_unload(sc->sc_dmat, data->active); |
1046 | m_freem(data->m); | | 1049 | m_freem(data->m); |
1047 | data->m = NULL; | | 1050 | data->m = NULL; |
1048 | } | | 1051 | } |
1049 | | | 1052 | |
1050 | sc->txq.next = i; | | 1053 | sc->txq.next = i; |
1051 | | | 1054 | |
1052 | if (sc->txq.queued < NFE_TX_RING_COUNT) { | | 1055 | if (sc->txq.queued < NFE_TX_RING_COUNT) { |
1053 | /* at least one slot freed */ | | 1056 | /* at least one slot freed */ |
1054 | ifp->if_flags &= ~IFF_OACTIVE; | | 1057 | ifp->if_flags &= ~IFF_OACTIVE; |
1055 | } | | 1058 | } |
1056 | | | 1059 | |
1057 | if (sc->txq.queued == 0) { | | 1060 | if (sc->txq.queued == 0) { |
1058 | /* all queued packets are sent */ | | 1061 | /* all queued packets are sent */ |
1059 | ifp->if_timer = 0; | | 1062 | ifp->if_timer = 0; |
1060 | } | | 1063 | } |
1061 | } | | 1064 | } |
1062 | | | 1065 | |
1063 | int | | 1066 | int |
1064 | nfe_encap(struct nfe_softc *sc, struct mbuf *m0) | | 1067 | nfe_encap(struct nfe_softc *sc, struct mbuf *m0) |
1065 | { | | 1068 | { |
1066 | struct nfe_desc32 *desc32; | | 1069 | struct nfe_desc32 *desc32; |
1067 | struct nfe_desc64 *desc64; | | 1070 | struct nfe_desc64 *desc64; |
1068 | struct nfe_tx_data *data; | | 1071 | struct nfe_tx_data *data; |
1069 | bus_dmamap_t map; | | 1072 | bus_dmamap_t map; |
1070 | uint16_t flags, csumflags; | | 1073 | uint16_t flags, csumflags; |
1071 | #if NVLAN > 0 | | 1074 | #if NVLAN > 0 |
1072 | struct m_tag *mtag; | | 1075 | struct m_tag *mtag; |
1073 | uint32_t vtag = 0; | | 1076 | uint32_t vtag = 0; |
1074 | #endif | | 1077 | #endif |
1075 | int error, i, first; | | 1078 | int error, i, first; |
1076 | | | 1079 | |
1077 | desc32 = NULL; | | 1080 | desc32 = NULL; |
1078 | desc64 = NULL; | | 1081 | desc64 = NULL; |
1079 | data = NULL; | | 1082 | data = NULL; |
1080 | | | 1083 | |
1081 | flags = 0; | | 1084 | flags = 0; |
1082 | csumflags = 0; | | 1085 | csumflags = 0; |
1083 | first = sc->txq.cur; | | 1086 | first = sc->txq.cur; |
1084 | | | 1087 | |
1085 | map = sc->txq.data[first].map; | | 1088 | map = sc->txq.data[first].map; |
1086 | | | 1089 | |
1087 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); | | 1090 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); |
1088 | if (error != 0) { | | 1091 | if (error != 0) { |
1089 | aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n", | | 1092 | aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n", |
1090 | error); | | 1093 | error); |
1091 | return error; | | 1094 | return error; |
1092 | } | | 1095 | } |
1093 | | | 1096 | |
1094 | if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { | | 1097 | if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { |
1095 | bus_dmamap_unload(sc->sc_dmat, map); | | 1098 | bus_dmamap_unload(sc->sc_dmat, map); |
1096 | return ENOBUFS; | | 1099 | return ENOBUFS; |
1097 | } | | 1100 | } |
1098 | | | 1101 | |
1099 | #if NVLAN > 0 | | 1102 | #if NVLAN > 0 |
1100 | /* setup h/w VLAN tagging */ | | 1103 | /* setup h/w VLAN tagging */ |
1101 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) | | 1104 | if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) |
1102 | vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag); | | 1105 | vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag); |
1103 | #endif | | 1106 | #endif |
1104 | if ((sc->sc_flags & NFE_HW_CSUM) != 0) { | | 1107 | if ((sc->sc_flags & NFE_HW_CSUM) != 0) { |
1105 | if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) | | 1108 | if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) |
1106 | csumflags |= NFE_TX_IP_CSUM; | | 1109 | csumflags |= NFE_TX_IP_CSUM; |
1107 | if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) | | 1110 | if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) |
1108 | csumflags |= NFE_TX_TCP_UDP_CSUM; | | 1111 | csumflags |= NFE_TX_TCP_UDP_CSUM; |
1109 | } | | 1112 | } |
1110 | | | 1113 | |
1111 | for (i = 0; i < map->dm_nsegs; i++) { | | 1114 | for (i = 0; i < map->dm_nsegs; i++) { |
1112 | data = &sc->txq.data[sc->txq.cur]; | | 1115 | data = &sc->txq.data[sc->txq.cur]; |
1113 | | | 1116 | |
1114 | if (sc->sc_flags & NFE_40BIT_ADDR) { | | 1117 | if (sc->sc_flags & NFE_40BIT_ADDR) { |
1115 | desc64 = &sc->txq.desc64[sc->txq.cur]; | | 1118 | desc64 = &sc->txq.desc64[sc->txq.cur]; |
1116 | #if defined(__LP64__) | | 1119 | #if defined(__LP64__) |
1117 | desc64->physaddr[0] = | | 1120 | desc64->physaddr[0] = |
1118 | htole32(map->dm_segs[i].ds_addr >> 32); | | 1121 | htole32(map->dm_segs[i].ds_addr >> 32); |
1119 | #endif | | 1122 | #endif |
1120 | desc64->physaddr[1] = | | 1123 | desc64->physaddr[1] = |
1121 | htole32(map->dm_segs[i].ds_addr & 0xffffffff); | | 1124 | htole32(map->dm_segs[i].ds_addr & 0xffffffff); |
1122 | desc64->length = htole16(map->dm_segs[i].ds_len - 1); | | 1125 | desc64->length = htole16(map->dm_segs[i].ds_len - 1); |
1123 | desc64->flags = htole16(flags); | | 1126 | desc64->flags = htole16(flags); |
1124 | desc64->vtag = 0; | | 1127 | desc64->vtag = 0; |
1125 | } else { | | 1128 | } else { |
1126 | desc32 = &sc->txq.desc32[sc->txq.cur]; | | 1129 | desc32 = &sc->txq.desc32[sc->txq.cur]; |
1127 | | | 1130 | |
1128 | desc32->physaddr = htole32(map->dm_segs[i].ds_addr); | | 1131 | desc32->physaddr = htole32(map->dm_segs[i].ds_addr); |
1129 | desc32->length = htole16(map->dm_segs[i].ds_len - 1); | | 1132 | desc32->length = htole16(map->dm_segs[i].ds_len - 1); |
1130 | desc32->flags = htole16(flags); | | 1133 | desc32->flags = htole16(flags); |
1131 | } | | 1134 | } |
1132 | | | 1135 | |
1133 | /* | | 1136 | /* |
1134 | * Setting of the valid bit in the first descriptor is | | 1137 | * Setting of the valid bit in the first descriptor is |
1135 | * deferred until the whole chain is fully setup. | | 1138 | * deferred until the whole chain is fully setup. |
1136 | */ | | 1139 | */ |
1137 | flags |= NFE_TX_VALID; | | 1140 | flags |= NFE_TX_VALID; |
1138 | | | 1141 | |
1139 | sc->txq.queued++; | | 1142 | sc->txq.queued++; |
1140 | sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur); | | 1143 | sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur); |
1141 | } | | 1144 | } |
1142 | | | 1145 | |
1143 | /* the whole mbuf chain has been setup */ | | 1146 | /* the whole mbuf chain has been setup */ |
1144 | if (sc->sc_flags & NFE_40BIT_ADDR) { | | 1147 | if (sc->sc_flags & NFE_40BIT_ADDR) { |
1145 | /* fix last descriptor */ | | 1148 | /* fix last descriptor */ |
1146 | flags |= NFE_TX_LASTFRAG_V2; | | 1149 | flags |= NFE_TX_LASTFRAG_V2; |
1147 | desc64->flags = htole16(flags); | | 1150 | desc64->flags = htole16(flags); |
1148 | | | 1151 | |
1149 | /* Checksum flags and vtag belong to the first fragment only. */ | | 1152 | /* Checksum flags and vtag belong to the first fragment only. */ |
1150 | #if NVLAN > 0 | | 1153 | #if NVLAN > 0 |
1151 | sc->txq.desc64[first].vtag = htole32(vtag); | | 1154 | sc->txq.desc64[first].vtag = htole32(vtag); |
1152 | #endif | | 1155 | #endif |
1153 | sc->txq.desc64[first].flags |= htole16(csumflags); | | 1156 | sc->txq.desc64[first].flags |= htole16(csumflags); |
1154 | | | 1157 | |
1155 | /* finally, set the valid bit in the first descriptor */ | | 1158 | /* finally, set the valid bit in the first descriptor */ |
1156 | sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); | | 1159 | sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); |
1157 | } else { | | 1160 | } else { |
1158 | /* fix last descriptor */ | | 1161 | /* fix last descriptor */ |
1159 | if (sc->sc_flags & NFE_JUMBO_SUP) | | 1162 | if (sc->sc_flags & NFE_JUMBO_SUP) |
1160 | flags |= NFE_TX_LASTFRAG_V2; | | 1163 | flags |= NFE_TX_LASTFRAG_V2; |
1161 | else | | 1164 | else |
1162 | flags |= NFE_TX_LASTFRAG_V1; | | 1165 | flags |= NFE_TX_LASTFRAG_V1; |
1163 | desc32->flags = htole16(flags); | | 1166 | desc32->flags = htole16(flags); |
1164 | | | 1167 | |
1165 | /* Checksum flags belong to the first fragment only. */ | | 1168 | /* Checksum flags belong to the first fragment only. */ |
1166 | sc->txq.desc32[first].flags |= htole16(csumflags); | | 1169 | sc->txq.desc32[first].flags |= htole16(csumflags); |
1167 | | | 1170 | |
1168 | /* finally, set the valid bit in the first descriptor */ | | 1171 | /* finally, set the valid bit in the first descriptor */ |
1169 | sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); | | 1172 | sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); |
1170 | } | | 1173 | } |
1171 | | | 1174 | |
1172 | data->m = m0; | | 1175 | data->m = m0; |
1173 | data->active = map; | | 1176 | data->active = map; |
1174 | | | 1177 | |
1175 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, | | 1178 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, |
1176 | BUS_DMASYNC_PREWRITE); | | 1179 | BUS_DMASYNC_PREWRITE); |
1177 | | | 1180 | |
1178 | return 0; | | 1181 | return 0; |
1179 | } | | 1182 | } |
1180 | | | 1183 | |
1181 | void | | 1184 | void |
1182 | nfe_start(struct ifnet *ifp) | | 1185 | nfe_start(struct ifnet *ifp) |
1183 | { | | 1186 | { |
1184 | struct nfe_softc *sc = ifp->if_softc; | | 1187 | struct nfe_softc *sc = ifp->if_softc; |
1185 | int old = sc->txq.queued; | | 1188 | int old = sc->txq.queued; |
1186 | struct mbuf *m0; | | 1189 | struct mbuf *m0; |
1187 | | | 1190 | |
1188 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) | | 1191 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
1189 | return; | | 1192 | return; |
1190 | | | 1193 | |
1191 | for (;;) { | | 1194 | for (;;) { |
1192 | IFQ_POLL(&ifp->if_snd, m0); | | 1195 | IFQ_POLL(&ifp->if_snd, m0); |
1193 | if (m0 == NULL) | | 1196 | if (m0 == NULL) |
1194 | break; | | 1197 | break; |
1195 | | | 1198 | |
1196 | if (nfe_encap(sc, m0) != 0) { | | 1199 | if (nfe_encap(sc, m0) != 0) { |
1197 | ifp->if_flags |= IFF_OACTIVE; | | 1200 | ifp->if_flags |= IFF_OACTIVE; |
1198 | break; | | 1201 | break; |
1199 | } | | 1202 | } |
1200 | | | 1203 | |
1201 | /* packet put in h/w queue, remove from s/w queue */ | | 1204 | /* packet put in h/w queue, remove from s/w queue */ |
1202 | IFQ_DEQUEUE(&ifp->if_snd, m0); | | 1205 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
1203 | | | 1206 | |
1204 | bpf_mtap(ifp, m0); | | 1207 | bpf_mtap(ifp, m0); |
1205 | } | | 1208 | } |
1206 | | | 1209 | |
1207 | if (sc->txq.queued != old) { | | 1210 | if (sc->txq.queued != old) { |
1208 | /* packets are queued */ | | 1211 | /* packets are queued */ |
1209 | if (sc->sc_flags & NFE_40BIT_ADDR) | | 1212 | if (sc->sc_flags & NFE_40BIT_ADDR) |
1210 | nfe_txdesc64_rsync(sc, old, sc->txq.cur, | | 1213 | nfe_txdesc64_rsync(sc, old, sc->txq.cur, |
1211 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1214 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1212 | else | | 1215 | else |
1213 | nfe_txdesc32_rsync(sc, old, sc->txq.cur, | | 1216 | nfe_txdesc32_rsync(sc, old, sc->txq.cur, |
1214 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); | | 1217 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
1215 | /* kick Tx */ | | 1218 | /* kick Tx */ |
1216 | NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); | | 1219 | NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); |
1217 | | | 1220 | |
1218 | /* | | 1221 | /* |
1219 | * Set a timeout in case the chip goes out to lunch. | | 1222 | * Set a timeout in case the chip goes out to lunch. |
1220 | */ | | 1223 | */ |
1221 | ifp->if_timer = 5; | | 1224 | ifp->if_timer = 5; |
1222 | } | | 1225 | } |
1223 | } | | 1226 | } |
1224 | | | 1227 | |
1225 | void | | 1228 | void |
1226 | nfe_watchdog(struct ifnet *ifp) | | 1229 | nfe_watchdog(struct ifnet *ifp) |
1227 | { | | 1230 | { |
1228 | struct nfe_softc *sc = ifp->if_softc; | | 1231 | struct nfe_softc *sc = ifp->if_softc; |
1229 | | | 1232 | |
1230 | aprint_error_dev(sc->sc_dev, "watchdog timeout\n"); | | 1233 | aprint_error_dev(sc->sc_dev, "watchdog timeout\n"); |
1231 | | | 1234 | |
1232 | ifp->if_flags &= ~IFF_RUNNING; | | 1235 | ifp->if_flags &= ~IFF_RUNNING; |
1233 | nfe_init(ifp); | | 1236 | nfe_init(ifp); |
1234 | | | 1237 | |
1235 | ifp->if_oerrors++; | | 1238 | ifp->if_oerrors++; |
1236 | } | | 1239 | } |
1237 | | | 1240 | |
1238 | int | | 1241 | int |
1239 | nfe_init(struct ifnet *ifp) | | 1242 | nfe_init(struct ifnet *ifp) |
1240 | { | | 1243 | { |
1241 | struct nfe_softc *sc = ifp->if_softc; | | 1244 | struct nfe_softc *sc = ifp->if_softc; |
1242 | uint32_t tmp; | | 1245 | uint32_t tmp; |
1243 | int rc = 0, s; | | 1246 | int rc = 0, s; |
1244 | | | 1247 | |
1245 | if (ifp->if_flags & IFF_RUNNING) | | 1248 | if (ifp->if_flags & IFF_RUNNING) |
1246 | return 0; | | 1249 | return 0; |
1247 | | | 1250 | |
1248 | nfe_stop(ifp, 0); | | 1251 | nfe_stop(ifp, 0); |
1249 | | | 1252 | |
1250 | NFE_WRITE(sc, NFE_TX_UNK, 0); | | 1253 | NFE_WRITE(sc, NFE_TX_UNK, 0); |
1251 | NFE_WRITE(sc, NFE_STATUS, 0); | | 1254 | NFE_WRITE(sc, NFE_STATUS, 0); |
1252 | | | 1255 | |
1253 | sc->rxtxctl = NFE_RXTX_BIT2; | | 1256 | sc->rxtxctl = NFE_RXTX_BIT2; |
1254 | if (sc->sc_flags & NFE_40BIT_ADDR) | | 1257 | if (sc->sc_flags & NFE_40BIT_ADDR) |
1255 | sc->rxtxctl |= NFE_RXTX_V3MAGIC; | | 1258 | sc->rxtxctl |= NFE_RXTX_V3MAGIC; |
1256 | else if (sc->sc_flags & NFE_JUMBO_SUP) | | 1259 | else if (sc->sc_flags & NFE_JUMBO_SUP) |
1257 | sc->rxtxctl |= NFE_RXTX_V2MAGIC; | | 1260 | sc->rxtxctl |= NFE_RXTX_V2MAGIC; |
1258 | if (sc->sc_flags & NFE_HW_CSUM) | | 1261 | if (sc->sc_flags & NFE_HW_CSUM) |
1259 | sc->rxtxctl |= NFE_RXTX_RXCSUM; | | 1262 | sc->rxtxctl |= NFE_RXTX_RXCSUM; |
1260 | #if NVLAN > 0 | | 1263 | #if NVLAN > 0 |
1261 | /* | | 1264 | /* |
1262 | * Although the adapter is capable of stripping VLAN tags from received | | 1265 | * Although the adapter is capable of stripping VLAN tags from received |
1263 | * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on | | 1266 | * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on |
1264 | * purpose. This will be done in software by our network stack. | | 1267 | * purpose. This will be done in software by our network stack. |
1265 | */ | | 1268 | */ |
1266 | if (sc->sc_flags & NFE_HW_VLAN) | | 1269 | if (sc->sc_flags & NFE_HW_VLAN) |
1267 | sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; | | 1270 | sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; |
1268 | #endif | | 1271 | #endif |
1269 | NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); | | 1272 | NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); |
1270 | DELAY(10); | | 1273 | DELAY(10); |
1271 | NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); | | 1274 | NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); |
1272 | | | 1275 | |
1273 | #if NVLAN | | 1276 | #if NVLAN |
1274 | if (sc->sc_flags & NFE_HW_VLAN) | | 1277 | if (sc->sc_flags & NFE_HW_VLAN) |
1275 | NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); | | 1278 | NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); |
1276 | #endif | | 1279 | #endif |
1277 | | | 1280 | |
1278 | NFE_WRITE(sc, NFE_SETUP_R6, 0); | | 1281 | NFE_WRITE(sc, NFE_SETUP_R6, 0); |
1279 | | | 1282 | |
1280 | /* set MAC address */ | | 1283 | /* set MAC address */ |
1281 | nfe_set_macaddr(sc, sc->sc_enaddr); | | 1284 | nfe_set_macaddr(sc, sc->sc_enaddr); |
1282 | | | 1285 | |
1283 | /* tell MAC where rings are in memory */ | | 1286 | /* tell MAC where rings are in memory */ |
1284 | #ifdef __LP64__ | | 1287 | #ifdef __LP64__ |
1285 | NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); | | 1288 | NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); |
1286 | #endif | | 1289 | #endif |
1287 | NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); | | 1290 | NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); |
1288 | #ifdef __LP64__ | | 1291 | #ifdef __LP64__ |
1289 | NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); | | 1292 | NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); |
1290 | #endif | | 1293 | #endif |
1291 | NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); | | 1294 | NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); |
1292 | | | 1295 | |
1293 | NFE_WRITE(sc, NFE_RING_SIZE, | | 1296 | NFE_WRITE(sc, NFE_RING_SIZE, |
1294 | (NFE_RX_RING_COUNT - 1) << 16 | | | 1297 | (NFE_RX_RING_COUNT - 1) << 16 | |
1295 | (NFE_TX_RING_COUNT - 1)); | | 1298 | (NFE_TX_RING_COUNT - 1)); |
1296 | | | 1299 | |
1297 | NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); | | 1300 | NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); |
1298 | | | 1301 | |
1299 | /* force MAC to wakeup */ | | 1302 | /* force MAC to wakeup */ |
1300 | tmp = NFE_READ(sc, NFE_PWR_STATE); | | 1303 | tmp = NFE_READ(sc, NFE_PWR_STATE); |
1301 | NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); | | 1304 | NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); |
1302 | DELAY(10); | | 1305 | DELAY(10); |
1303 | tmp = NFE_READ(sc, NFE_PWR_STATE); | | 1306 | tmp = NFE_READ(sc, NFE_PWR_STATE); |
1304 | NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); | | 1307 | NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); |
1305 | | | 1308 | |
1306 | s = splnet(); | | 1309 | s = splnet(); |
1307 | NFE_WRITE(sc, NFE_IRQ_MASK, 0); | | 1310 | NFE_WRITE(sc, NFE_IRQ_MASK, 0); |
1308 | nfe_intr(sc); /* XXX clear IRQ status registers */ | | 1311 | nfe_intr(sc); /* XXX clear IRQ status registers */ |
1309 | NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); | | 1312 | NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); |
1310 | splx(s); | | 1313 | splx(s); |
1311 | | | 1314 | |
1312 | #if 1 | | 1315 | #if 1 |
1313 | /* configure interrupts coalescing/mitigation */ | | 1316 | /* configure interrupts coalescing/mitigation */ |
1314 | NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); | | 1317 | NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); |
1315 | #else | | 1318 | #else |
1316 | /* no interrupt mitigation: one interrupt per packet */ | | 1319 | /* no interrupt mitigation: one interrupt per packet */ |
1317 | NFE_WRITE(sc, NFE_IMTIMER, 970); | | 1320 | NFE_WRITE(sc, NFE_IMTIMER, 970); |
1318 | #endif | | 1321 | #endif |
1319 | | | 1322 | |
1320 | NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); | | 1323 | NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); |
1321 | NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); | | 1324 | NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); |
1322 | NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); | | 1325 | NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); |
1323 | | | 1326 | |
1324 | /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ | | 1327 | /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ |
1325 | NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); | | 1328 | NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); |