Sun Jan 9 00:12:45 2011 UTC ()
Try reading MAC addr from register if it fails to read from EEPROM.
Copied from FreeBSD driver.

Without this my JMC261 doesn't get MAC address properly.

OK'ed by bouyer@


(kochi)
diff -r1.15 -r1.16 src/sys/dev/pci/if_jme.c

cvs diff -r1.15 -r1.16 src/sys/dev/pci/if_jme.c (switch to unified diff)

--- src/sys/dev/pci/if_jme.c 2010/11/13 13:52:06 1.15
+++ src/sys/dev/pci/if_jme.c 2011/01/09 00:12:45 1.16
@@ -1,2156 +1,2179 @@ @@ -1,2156 +1,2179 @@
1/* $NetBSD: if_jme.c,v 1.15 2010/11/13 13:52:06 uebayasi Exp $ */ 1/* $NetBSD: if_jme.c,v 1.16 2011/01/09 00:12:45 kochi Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2008 Manuel Bouyer. All rights reserved. 4 * Copyright (c) 2008 Manuel Bouyer. All rights reserved.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */ 25 */
26 26
27/*- 27/*-
28 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 28 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
29 * All rights reserved. 29 * All rights reserved.
30 * 30 *
31 * Redistribution and use in source and binary forms, with or without 31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions 32 * modification, are permitted provided that the following conditions
33 * are met: 33 * are met:
34 * 1. Redistributions of source code must retain the above copyright 34 * 1. Redistributions of source code must retain the above copyright
35 * notice unmodified, this list of conditions, and the following 35 * notice unmodified, this list of conditions, and the following
36 * disclaimer. 36 * disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright 37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the 38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution. 39 * documentation and/or other materials provided with the distribution.
40 * 40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE. 51 * SUCH DAMAGE.
52 */ 52 */
53 53
54 54
55/* 55/*
56 * Driver for JMicron Technologies JMC250 (Giganbit) and JMC260 (Fast) 56 * Driver for JMicron Technologies JMC250 (Giganbit) and JMC260 (Fast)
57 * Ethernet Controllers. 57 * Ethernet Controllers.
58 */ 58 */
59 59
60#include <sys/cdefs.h> 60#include <sys/cdefs.h>
61__KERNEL_RCSID(0, "$NetBSD: if_jme.c,v 1.15 2010/11/13 13:52:06 uebayasi Exp $"); 61__KERNEL_RCSID(0, "$NetBSD: if_jme.c,v 1.16 2011/01/09 00:12:45 kochi Exp $");
62 62
63 63
64#include <sys/param.h> 64#include <sys/param.h>
65#include <sys/systm.h> 65#include <sys/systm.h>
66#include <sys/mbuf.h> 66#include <sys/mbuf.h>
67#include <sys/protosw.h> 67#include <sys/protosw.h>
68#include <sys/socket.h> 68#include <sys/socket.h>
69#include <sys/ioctl.h> 69#include <sys/ioctl.h>
70#include <sys/errno.h> 70#include <sys/errno.h>
71#include <sys/malloc.h> 71#include <sys/malloc.h>
72#include <sys/kernel.h> 72#include <sys/kernel.h>
73#include <sys/proc.h> /* only for declaration of wakeup() used by vm.h */ 73#include <sys/proc.h> /* only for declaration of wakeup() used by vm.h */
74#include <sys/device.h> 74#include <sys/device.h>
75#include <sys/syslog.h> 75#include <sys/syslog.h>
76#include <sys/sysctl.h> 76#include <sys/sysctl.h>
77 77
78#include <net/if.h> 78#include <net/if.h>
79#if defined(SIOCSIFMEDIA) 79#if defined(SIOCSIFMEDIA)
80#include <net/if_media.h> 80#include <net/if_media.h>
81#endif 81#endif
82#include <net/if_types.h> 82#include <net/if_types.h>
83#include <net/if_dl.h> 83#include <net/if_dl.h>
84#include <net/route.h> 84#include <net/route.h>
85#include <net/netisr.h> 85#include <net/netisr.h>
86 86
87#include <net/bpf.h> 87#include <net/bpf.h>
88#include <net/bpfdesc.h> 88#include <net/bpfdesc.h>
89 89
90#include "rnd.h" 90#include "rnd.h"
91#if NRND > 0 91#if NRND > 0
92#include <sys/rnd.h> 92#include <sys/rnd.h>
93#endif 93#endif
94 94
95#include <netinet/in.h> 95#include <netinet/in.h>
96#include <netinet/in_systm.h> 96#include <netinet/in_systm.h>
97#include <netinet/ip.h> 97#include <netinet/ip.h>
98 98
99#ifdef INET 99#ifdef INET
100#include <netinet/in_var.h> 100#include <netinet/in_var.h>
101#endif 101#endif
102 102
103#include <netinet/tcp.h> 103#include <netinet/tcp.h>
104 104
105#include <net/if_ether.h> 105#include <net/if_ether.h>
106#if defined(INET) 106#if defined(INET)
107#include <netinet/if_inarp.h> 107#include <netinet/if_inarp.h>
108#endif 108#endif
109 109
110#include <sys/bus.h> 110#include <sys/bus.h>
111#include <sys/intr.h> 111#include <sys/intr.h>
112 112
113#include <dev/pci/pcireg.h> 113#include <dev/pci/pcireg.h>
114#include <dev/pci/pcivar.h> 114#include <dev/pci/pcivar.h>
115#include <dev/pci/pcidevs.h> 115#include <dev/pci/pcidevs.h>
116#include <dev/pci/if_jmereg.h> 116#include <dev/pci/if_jmereg.h>
117 117
118#include <dev/mii/mii.h> 118#include <dev/mii/mii.h>
119#include <dev/mii/miivar.h> 119#include <dev/mii/miivar.h>
120 120
121struct jme_product_desc { 121struct jme_product_desc {
122 u_int32_t jme_product; 122 u_int32_t jme_product;
123 const char *jme_desc; 123 const char *jme_desc;
124}; 124};
125 125
126/* number of entries in transmit and receive rings */ 126/* number of entries in transmit and receive rings */
127#define JME_NBUFS (PAGE_SIZE / sizeof(struct jme_desc)) 127#define JME_NBUFS (PAGE_SIZE / sizeof(struct jme_desc))
128 128
129#define JME_DESC_INC(x, y) ((x) = ((x) + 1) % (y)) 129#define JME_DESC_INC(x, y) ((x) = ((x) + 1) % (y))
130 130
131/* Water mark to kick reclaiming Tx buffers. */ 131/* Water mark to kick reclaiming Tx buffers. */
132#define JME_TX_DESC_HIWAT (JME_NBUFS - (((JME_NBUFS) * 3) / 10)) 132#define JME_TX_DESC_HIWAT (JME_NBUFS - (((JME_NBUFS) * 3) / 10))
133 133
134 134
135struct jme_softc { 135struct jme_softc {
136 device_t jme_dev; /* base device */ 136 device_t jme_dev; /* base device */
137 bus_space_tag_t jme_bt_mac; 137 bus_space_tag_t jme_bt_mac;
138 bus_space_handle_t jme_bh_mac; /* Mac registers */ 138 bus_space_handle_t jme_bh_mac; /* Mac registers */
139 bus_space_tag_t jme_bt_phy; 139 bus_space_tag_t jme_bt_phy;
140 bus_space_handle_t jme_bh_phy; /* PHY registers */ 140 bus_space_handle_t jme_bh_phy; /* PHY registers */
141 bus_space_tag_t jme_bt_misc; 141 bus_space_tag_t jme_bt_misc;
142 bus_space_handle_t jme_bh_misc; /* Misc registers */ 142 bus_space_handle_t jme_bh_misc; /* Misc registers */
143 bus_dma_tag_t jme_dmatag; 143 bus_dma_tag_t jme_dmatag;
144 bus_dma_segment_t jme_txseg; /* transmit ring seg */ 144 bus_dma_segment_t jme_txseg; /* transmit ring seg */
145 bus_dmamap_t jme_txmap; /* transmit ring DMA map */ 145 bus_dmamap_t jme_txmap; /* transmit ring DMA map */
146 struct jme_desc* jme_txring; /* transmit ring */ 146 struct jme_desc* jme_txring; /* transmit ring */
147 bus_dmamap_t jme_txmbufm[JME_NBUFS]; /* transmit mbufs DMA map */ 147 bus_dmamap_t jme_txmbufm[JME_NBUFS]; /* transmit mbufs DMA map */
148 struct mbuf *jme_txmbuf[JME_NBUFS]; /* mbufs being transmitted */ 148 struct mbuf *jme_txmbuf[JME_NBUFS]; /* mbufs being transmitted */
149 int jme_tx_cons; /* transmit ring consumer */ 149 int jme_tx_cons; /* transmit ring consumer */
150 int jme_tx_prod; /* transmit ring producer */ 150 int jme_tx_prod; /* transmit ring producer */
151 int jme_tx_cnt; /* transmit ring active count */ 151 int jme_tx_cnt; /* transmit ring active count */
152 bus_dma_segment_t jme_rxseg; /* receive ring seg */ 152 bus_dma_segment_t jme_rxseg; /* receive ring seg */
153 bus_dmamap_t jme_rxmap; /* receive ring DMA map */ 153 bus_dmamap_t jme_rxmap; /* receive ring DMA map */
154 struct jme_desc* jme_rxring; /* receive ring */ 154 struct jme_desc* jme_rxring; /* receive ring */
155 bus_dmamap_t jme_rxmbufm[JME_NBUFS]; /* receive mbufs DMA map */ 155 bus_dmamap_t jme_rxmbufm[JME_NBUFS]; /* receive mbufs DMA map */
156 struct mbuf *jme_rxmbuf[JME_NBUFS]; /* mbufs being received */ 156 struct mbuf *jme_rxmbuf[JME_NBUFS]; /* mbufs being received */
157 int jme_rx_cons; /* receive ring consumer */ 157 int jme_rx_cons; /* receive ring consumer */
158 int jme_rx_prod; /* receive ring producer */ 158 int jme_rx_prod; /* receive ring producer */
159 void* jme_ih; /* our interrupt */ 159 void* jme_ih; /* our interrupt */
160 struct ethercom jme_ec; 160 struct ethercom jme_ec;
161 struct callout jme_tick_ch; /* tick callout */ 161 struct callout jme_tick_ch; /* tick callout */
162 u_int8_t jme_enaddr[ETHER_ADDR_LEN];/* hardware address */ 162 u_int8_t jme_enaddr[ETHER_ADDR_LEN];/* hardware address */
163 u_int8_t jme_phyaddr; /* address of integrated phy */ 163 u_int8_t jme_phyaddr; /* address of integrated phy */
164 u_int8_t jme_chip_rev; /* chip revision */ 164 u_int8_t jme_chip_rev; /* chip revision */
165 u_int8_t jme_rev; /* PCI revision */ 165 u_int8_t jme_rev; /* PCI revision */
166 mii_data_t jme_mii; /* mii bus */ 166 mii_data_t jme_mii; /* mii bus */
167 u_int32_t jme_flags; /* device features, see below */ 167 u_int32_t jme_flags; /* device features, see below */
168 uint32_t jme_txcsr; /* TX config register */ 168 uint32_t jme_txcsr; /* TX config register */
169 uint32_t jme_rxcsr; /* RX config register */ 169 uint32_t jme_rxcsr; /* RX config register */
170#if NRND > 0 170#if NRND > 0
171 rndsource_element_t rnd_source; 171 rndsource_element_t rnd_source;
172#endif 172#endif
173 /* interrupt coalition parameters */ 173 /* interrupt coalition parameters */
174 struct sysctllog *jme_clog; 174 struct sysctllog *jme_clog;
175 int jme_intrxto; /* interrupt RX timeout */ 175 int jme_intrxto; /* interrupt RX timeout */
176 int jme_intrxct; /* interrupt RX packets counter */ 176 int jme_intrxct; /* interrupt RX packets counter */
177 int jme_inttxto; /* interrupt TX timeout */ 177 int jme_inttxto; /* interrupt TX timeout */
178 int jme_inttxct; /* interrupt TX packets counter */ 178 int jme_inttxct; /* interrupt TX packets counter */
179}; 179};
180 180
181#define JME_FLAG_FPGA 0x0001 /* FPGA version */ 181#define JME_FLAG_FPGA 0x0001 /* FPGA version */
182#define JME_FLAG_GIGA 0x0002 /* giga Ethernet capable */ 182#define JME_FLAG_GIGA 0x0002 /* giga Ethernet capable */
183 183
184 184
185#define jme_if jme_ec.ec_if 185#define jme_if jme_ec.ec_if
186#define jme_bpf jme_if.if_bpf 186#define jme_bpf jme_if.if_bpf
187 187
188typedef struct jme_softc jme_softc_t; 188typedef struct jme_softc jme_softc_t;
189typedef u_long ioctl_cmd_t; 189typedef u_long ioctl_cmd_t;
190 190
191static int jme_pci_match(device_t, cfdata_t, void *); 191static int jme_pci_match(device_t, cfdata_t, void *);
192static void jme_pci_attach(device_t, device_t, void *); 192static void jme_pci_attach(device_t, device_t, void *);
193static void jme_intr_rx(jme_softc_t *); 193static void jme_intr_rx(jme_softc_t *);
194static int jme_intr(void *); 194static int jme_intr(void *);
195 195
196static int jme_ifioctl(struct ifnet *, ioctl_cmd_t, void *); 196static int jme_ifioctl(struct ifnet *, ioctl_cmd_t, void *);
197static int jme_mediachange(struct ifnet *); 197static int jme_mediachange(struct ifnet *);
198static void jme_ifwatchdog(struct ifnet *); 198static void jme_ifwatchdog(struct ifnet *);
199static bool jme_shutdown(device_t, int); 199static bool jme_shutdown(device_t, int);
200 200
201static void jme_txeof(struct jme_softc *); 201static void jme_txeof(struct jme_softc *);
202static void jme_ifstart(struct ifnet *); 202static void jme_ifstart(struct ifnet *);
203static void jme_reset(jme_softc_t *); 203static void jme_reset(jme_softc_t *);
204static int jme_ifinit(struct ifnet *); 204static int jme_ifinit(struct ifnet *);
205static int jme_init(struct ifnet *, int); 205static int jme_init(struct ifnet *, int);
206static void jme_stop(struct ifnet *, int); 206static void jme_stop(struct ifnet *, int);
207// static void jme_restart(void *); 207// static void jme_restart(void *);
208static void jme_ticks(void *); 208static void jme_ticks(void *);
209static void jme_mac_config(jme_softc_t *); 209static void jme_mac_config(jme_softc_t *);
210static void jme_set_filter(jme_softc_t *); 210static void jme_set_filter(jme_softc_t *);
211 211
212int jme_mii_read(device_t, int, int); 212int jme_mii_read(device_t, int, int);
213void jme_mii_write(device_t, int, int, int); 213void jme_mii_write(device_t, int, int, int);
214void jme_statchg(device_t); 214void jme_statchg(device_t);
215 215
216static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 216static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
217static int jme_eeprom_macaddr(struct jme_softc *); 217static int jme_eeprom_macaddr(struct jme_softc *);
 218static int jme_reg_macaddr(struct jme_softc *);
218 219
219#define JME_TIMEOUT 1000 220#define JME_TIMEOUT 1000
220#define JME_PHY_TIMEOUT 1000 221#define JME_PHY_TIMEOUT 1000
221#define JME_EEPROM_TIMEOUT 1000 222#define JME_EEPROM_TIMEOUT 1000
222 223
223static int jme_sysctl_intrxto(SYSCTLFN_PROTO); 224static int jme_sysctl_intrxto(SYSCTLFN_PROTO);
224static int jme_sysctl_intrxct(SYSCTLFN_PROTO); 225static int jme_sysctl_intrxct(SYSCTLFN_PROTO);
225static int jme_sysctl_inttxto(SYSCTLFN_PROTO); 226static int jme_sysctl_inttxto(SYSCTLFN_PROTO);
226static int jme_sysctl_inttxct(SYSCTLFN_PROTO); 227static int jme_sysctl_inttxct(SYSCTLFN_PROTO);
227static int jme_root_num; 228static int jme_root_num;
228 229
229 230
230CFATTACH_DECL_NEW(jme, sizeof(jme_softc_t), 231CFATTACH_DECL_NEW(jme, sizeof(jme_softc_t),
231 jme_pci_match, jme_pci_attach, NULL, NULL); 232 jme_pci_match, jme_pci_attach, NULL, NULL);
232 233
233static const struct jme_product_desc jme_products[] = { 234static const struct jme_product_desc jme_products[] = {
234 { PCI_PRODUCT_JMICRON_JMC250, 235 { PCI_PRODUCT_JMICRON_JMC250,
235 "JMicron JMC250 Gigabit Ethernet Controller" }, 236 "JMicron JMC250 Gigabit Ethernet Controller" },
236 { PCI_PRODUCT_JMICRON_JMC260, 237 { PCI_PRODUCT_JMICRON_JMC260,
237 "JMicron JMC260 Gigabit Ethernet Controller" }, 238 "JMicron JMC260 Gigabit Ethernet Controller" },
238 { 0, NULL }, 239 { 0, NULL },
239}; 240};
240 241
241static const struct jme_product_desc *jme_lookup_product(uint32_t); 242static const struct jme_product_desc *jme_lookup_product(uint32_t);
242 243
243static const struct jme_product_desc * 244static const struct jme_product_desc *
244jme_lookup_product(uint32_t id) 245jme_lookup_product(uint32_t id)
245{ 246{
246 const struct jme_product_desc *jp; 247 const struct jme_product_desc *jp;
247 248
248 for (jp = jme_products ; jp->jme_desc != NULL; jp++) 249 for (jp = jme_products ; jp->jme_desc != NULL; jp++)
249 if (PCI_PRODUCT(id) == jp->jme_product) 250 if (PCI_PRODUCT(id) == jp->jme_product)
250 return jp; 251 return jp;
251 252
252 return NULL; 253 return NULL;
253} 254}
254 255
255static int 256static int
256jme_pci_match(device_t parent, cfdata_t cf, void *aux) 257jme_pci_match(device_t parent, cfdata_t cf, void *aux)
257{ 258{
258 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 259 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
259 260
260 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_JMICRON) 261 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_JMICRON)
261 return 0; 262 return 0;
262 263
263 if (jme_lookup_product(pa->pa_id) != NULL) 264 if (jme_lookup_product(pa->pa_id) != NULL)
264 return 1; 265 return 1;
265 266
266 return 0; 267 return 0;
267} 268}
268 269
269static void 270static void
270jme_pci_attach(device_t parent, device_t self, void *aux) 271jme_pci_attach(device_t parent, device_t self, void *aux)
271{ 272{
272 jme_softc_t *sc = device_private(self); 273 jme_softc_t *sc = device_private(self);
273 struct pci_attach_args * const pa = (struct pci_attach_args *)aux; 274 struct pci_attach_args * const pa = (struct pci_attach_args *)aux;
274 const struct jme_product_desc *jp; 275 const struct jme_product_desc *jp;
275 struct ifnet * const ifp = &sc->jme_if; 276 struct ifnet * const ifp = &sc->jme_if;
276 bus_space_tag_t iot1, iot2, memt; 277 bus_space_tag_t iot1, iot2, memt;
277 bus_space_handle_t ioh1, ioh2, memh; 278 bus_space_handle_t ioh1, ioh2, memh;
278 bus_size_t size, size2; 279 bus_size_t size, size2;
279 pci_intr_handle_t intrhandle; 280 pci_intr_handle_t intrhandle;
280 const char *intrstr; 281 const char *intrstr;
281 pcireg_t csr; 282 pcireg_t csr;
282 int nsegs, i; 283 int nsegs, i;
283 const struct sysctlnode *node; 284 const struct sysctlnode *node;
284 int jme_nodenum; 285 int jme_nodenum;
285 286
286 sc->jme_dev = self; 287 sc->jme_dev = self;
287 aprint_normal("\n"); 288 aprint_normal("\n");
288 callout_init(&sc->jme_tick_ch, 0); 289 callout_init(&sc->jme_tick_ch, 0);
289 290
290 jp = jme_lookup_product(pa->pa_id); 291 jp = jme_lookup_product(pa->pa_id);
291 if (jp == NULL) 292 if (jp == NULL)
292 panic("jme_pci_attach: impossible"); 293 panic("jme_pci_attach: impossible");
293 294
294 if (jp->jme_product == PCI_PRODUCT_JMICRON_JMC250) 295 if (jp->jme_product == PCI_PRODUCT_JMICRON_JMC250)
295 sc->jme_flags = JME_FLAG_GIGA; 296 sc->jme_flags = JME_FLAG_GIGA;
296 297
297 /* 298 /*
298 * Map the card space. Try Mem first. 299 * Map the card space. Try Mem first.
299 */ 300 */
300 if (pci_mapreg_map(pa, JME_PCI_BAR0, 301 if (pci_mapreg_map(pa, JME_PCI_BAR0,
301 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 302 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
302 0, &memt, &memh, NULL, &size) == 0) { 303 0, &memt, &memh, NULL, &size) == 0) {
303 sc->jme_bt_mac = memt; 304 sc->jme_bt_mac = memt;
304 sc->jme_bh_mac = memh; 305 sc->jme_bh_mac = memh;
305 sc->jme_bt_phy = memt; 306 sc->jme_bt_phy = memt;
306 if (bus_space_subregion(memt, memh, JME_PHY_EEPROM_BASE_MEMOFF, 307 if (bus_space_subregion(memt, memh, JME_PHY_EEPROM_BASE_MEMOFF,
307 JME_PHY_EEPROM_SIZE, &sc->jme_bh_phy) != 0) { 308 JME_PHY_EEPROM_SIZE, &sc->jme_bh_phy) != 0) {
308 aprint_error_dev(self, "can't subregion PHY space\n"); 309 aprint_error_dev(self, "can't subregion PHY space\n");
309 bus_space_unmap(memt, memh, size); 310 bus_space_unmap(memt, memh, size);
310 return; 311 return;
311 } 312 }
312 sc->jme_bt_misc = memt; 313 sc->jme_bt_misc = memt;
313 if (bus_space_subregion(memt, memh, JME_MISC_BASE_MEMOFF, 314 if (bus_space_subregion(memt, memh, JME_MISC_BASE_MEMOFF,
314 JME_MISC_SIZE, &sc->jme_bh_misc) != 0) { 315 JME_MISC_SIZE, &sc->jme_bh_misc) != 0) {
315 aprint_error_dev(self, "can't subregion misc space\n"); 316 aprint_error_dev(self, "can't subregion misc space\n");
316 bus_space_unmap(memt, memh, size); 317 bus_space_unmap(memt, memh, size);
317 return; 318 return;
318 } 319 }
319 } else { 320 } else {
320 if (pci_mapreg_map(pa, JME_PCI_BAR1, PCI_MAPREG_TYPE_IO, 321 if (pci_mapreg_map(pa, JME_PCI_BAR1, PCI_MAPREG_TYPE_IO,
321 0, &iot1, &ioh1, NULL, &size) != 0) { 322 0, &iot1, &ioh1, NULL, &size) != 0) {
322 aprint_error_dev(self, "can't map I/O space 1\n"); 323 aprint_error_dev(self, "can't map I/O space 1\n");
323 return; 324 return;
324 } 325 }
325 sc->jme_bt_mac = iot1; 326 sc->jme_bt_mac = iot1;
326 sc->jme_bh_mac = ioh1; 327 sc->jme_bh_mac = ioh1;
327 if (pci_mapreg_map(pa, JME_PCI_BAR2, PCI_MAPREG_TYPE_IO, 328 if (pci_mapreg_map(pa, JME_PCI_BAR2, PCI_MAPREG_TYPE_IO,
328 0, &iot2, &ioh2, NULL, &size2) != 0) { 329 0, &iot2, &ioh2, NULL, &size2) != 0) {
329 aprint_error_dev(self, "can't map I/O space 2\n"); 330 aprint_error_dev(self, "can't map I/O space 2\n");
330 bus_space_unmap(iot1, ioh1, size); 331 bus_space_unmap(iot1, ioh1, size);
331 return; 332 return;
332 } 333 }
333 sc->jme_bt_phy = iot2; 334 sc->jme_bt_phy = iot2;
334 sc->jme_bh_phy = ioh2; 335 sc->jme_bh_phy = ioh2;
335 sc->jme_bt_misc = iot2; 336 sc->jme_bt_misc = iot2;
336 if (bus_space_subregion(iot2, ioh2, JME_MISC_BASE_IOOFF, 337 if (bus_space_subregion(iot2, ioh2, JME_MISC_BASE_IOOFF,
337 JME_MISC_SIZE, &sc->jme_bh_misc) != 0) { 338 JME_MISC_SIZE, &sc->jme_bh_misc) != 0) {
338 aprint_error_dev(self, "can't subregion misc space\n"); 339 aprint_error_dev(self, "can't subregion misc space\n");
339 bus_space_unmap(iot1, ioh1, size); 340 bus_space_unmap(iot1, ioh1, size);
340 bus_space_unmap(iot2, ioh2, size2); 341 bus_space_unmap(iot2, ioh2, size2);
341 return; 342 return;
342 } 343 }
343 } 344 }
344 345
345 if (pci_dma64_available(pa)) 346 if (pci_dma64_available(pa))
346 sc->jme_dmatag = pa->pa_dmat64; 347 sc->jme_dmatag = pa->pa_dmat64;
347 else 348 else
348 sc->jme_dmatag = pa->pa_dmat; 349 sc->jme_dmatag = pa->pa_dmat;
349 350
350 /* Enable the device. */ 351 /* Enable the device. */
351 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 352 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
352 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 353 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
353 csr | PCI_COMMAND_MASTER_ENABLE); 354 csr | PCI_COMMAND_MASTER_ENABLE);
354 355
355 aprint_normal_dev(self, "%s\n", jp->jme_desc); 356 aprint_normal_dev(self, "%s\n", jp->jme_desc);
356 357
357 sc->jme_rev = PCI_REVISION(pa->pa_class); 358 sc->jme_rev = PCI_REVISION(pa->pa_class);
358 359
359 csr = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_CHIPMODE); 360 csr = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_CHIPMODE);
360 if (((csr & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 361 if (((csr & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
361 CHIPMODE_NOT_FPGA) 362 CHIPMODE_NOT_FPGA)
362 sc->jme_flags |= JME_FLAG_FPGA; 363 sc->jme_flags |= JME_FLAG_FPGA;
363 sc->jme_chip_rev = (csr & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT; 364 sc->jme_chip_rev = (csr & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
364 aprint_verbose_dev(self, "PCI device revision : 0x%x, Chip revision: " 365 aprint_verbose_dev(self, "PCI device revision : 0x%x, Chip revision: "
365 "0x%x", sc->jme_rev, sc->jme_chip_rev); 366 "0x%x", sc->jme_rev, sc->jme_chip_rev);
366 if (sc->jme_flags & JME_FLAG_FPGA) 367 if (sc->jme_flags & JME_FLAG_FPGA)
367 aprint_verbose(" FPGA revision: 0x%x", 368 aprint_verbose(" FPGA revision: 0x%x",
368 (csr & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT); 369 (csr & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT);
369 aprint_verbose("\n"); 370 aprint_verbose("\n");
370 371
371 /* 372 /*
372 * Save PHY address. 373 * Save PHY address.
373 * Integrated JR0211 has fixed PHY address whereas FPGA version 374 * Integrated JR0211 has fixed PHY address whereas FPGA version
374 * requires PHY probing to get correct PHY address. 375 * requires PHY probing to get correct PHY address.
375 */ 376 */
376 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) { 377 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
377 sc->jme_phyaddr = 378 sc->jme_phyaddr =
378 bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc, 379 bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc,
379 JME_GPREG0) & GPREG0_PHY_ADDR_MASK; 380 JME_GPREG0) & GPREG0_PHY_ADDR_MASK;
380 } else 381 } else
381 sc->jme_phyaddr = 0; 382 sc->jme_phyaddr = 0;
382 383
383 384
384 jme_reset(sc); 385 jme_reset(sc);
385 386
386 /* read mac addr */ 387 /* read mac addr */
387 if (jme_eeprom_macaddr(sc)) { 388 if (jme_eeprom_macaddr(sc) && jme_reg_macaddr(sc)) {
388 aprint_error_dev(self, "error reading Ethernet address\n"); 389 aprint_error_dev(self, "error reading Ethernet address\n");
389 /* return; */ 390 /* return; */
390 } 391 }
391 aprint_normal_dev(self, "Ethernet address %s\n", 392 aprint_normal_dev(self, "Ethernet address %s\n",
392 ether_sprintf(sc->jme_enaddr)); 393 ether_sprintf(sc->jme_enaddr));
393 394
394 /* Map and establish interrupts */ 395 /* Map and establish interrupts */
395 if (pci_intr_map(pa, &intrhandle)) { 396 if (pci_intr_map(pa, &intrhandle)) {
396 aprint_error_dev(self, "couldn't map interrupt\n"); 397 aprint_error_dev(self, "couldn't map interrupt\n");
397 return; 398 return;
398 } 399 }
399 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 400 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
400 sc->jme_if.if_softc = sc; 401 sc->jme_if.if_softc = sc;
401 sc->jme_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET, 402 sc->jme_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
402 jme_intr, sc); 403 jme_intr, sc);
403 if (sc->jme_ih == NULL) { 404 if (sc->jme_ih == NULL) {
404 aprint_error_dev(self, "couldn't establish interrupt"); 405 aprint_error_dev(self, "couldn't establish interrupt");
405 if (intrstr != NULL) 406 if (intrstr != NULL)
406 aprint_error(" at %s", intrstr); 407 aprint_error(" at %s", intrstr);
407 aprint_error("\n"); 408 aprint_error("\n");
408 return; 409 return;
409 } 410 }
410 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 411 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
411 412
412 /* allocate and map DMA-safe memory for transmit ring */ 413 /* allocate and map DMA-safe memory for transmit ring */
413 if (bus_dmamem_alloc(sc->jme_dmatag, PAGE_SIZE, 0, PAGE_SIZE, 414 if (bus_dmamem_alloc(sc->jme_dmatag, PAGE_SIZE, 0, PAGE_SIZE,
414 &sc->jme_txseg, 1, &nsegs, BUS_DMA_NOWAIT) != 0 || 415 &sc->jme_txseg, 1, &nsegs, BUS_DMA_NOWAIT) != 0 ||
415 bus_dmamem_map(sc->jme_dmatag, &sc->jme_txseg, 416 bus_dmamem_map(sc->jme_dmatag, &sc->jme_txseg,
416 nsegs, PAGE_SIZE, (void **)&sc->jme_txring, 417 nsegs, PAGE_SIZE, (void **)&sc->jme_txring,
417 BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0 || 418 BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0 ||
418 bus_dmamap_create(sc->jme_dmatag, PAGE_SIZE, 1, PAGE_SIZE, 0, 419 bus_dmamap_create(sc->jme_dmatag, PAGE_SIZE, 1, PAGE_SIZE, 0,
419 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->jme_txmap) != 0 || 420 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->jme_txmap) != 0 ||
420 bus_dmamap_load(sc->jme_dmatag, sc->jme_txmap, sc->jme_txring, 421 bus_dmamap_load(sc->jme_dmatag, sc->jme_txmap, sc->jme_txring,
421 PAGE_SIZE, NULL, BUS_DMA_NOWAIT) != 0) { 422 PAGE_SIZE, NULL, BUS_DMA_NOWAIT) != 0) {
422 aprint_error_dev(self, "can't allocate DMA memory TX ring\n"); 423 aprint_error_dev(self, "can't allocate DMA memory TX ring\n");
423 return; 424 return;
424 } 425 }
425 /* allocate and map DMA-safe memory for receive ring */ 426 /* allocate and map DMA-safe memory for receive ring */
426 if (bus_dmamem_alloc(sc->jme_dmatag, PAGE_SIZE, 0, PAGE_SIZE, 427 if (bus_dmamem_alloc(sc->jme_dmatag, PAGE_SIZE, 0, PAGE_SIZE,
427 &sc->jme_rxseg, 1, &nsegs, BUS_DMA_NOWAIT) != 0 || 428 &sc->jme_rxseg, 1, &nsegs, BUS_DMA_NOWAIT) != 0 ||
428 bus_dmamem_map(sc->jme_dmatag, &sc->jme_rxseg, 429 bus_dmamem_map(sc->jme_dmatag, &sc->jme_rxseg,
429 nsegs, PAGE_SIZE, (void **)&sc->jme_rxring, 430 nsegs, PAGE_SIZE, (void **)&sc->jme_rxring,
430 BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0 || 431 BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0 ||
431 bus_dmamap_create(sc->jme_dmatag, PAGE_SIZE, 1, PAGE_SIZE, 0, 432 bus_dmamap_create(sc->jme_dmatag, PAGE_SIZE, 1, PAGE_SIZE, 0,
432 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->jme_rxmap) != 0 || 433 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->jme_rxmap) != 0 ||
433 bus_dmamap_load(sc->jme_dmatag, sc->jme_rxmap, sc->jme_rxring, 434 bus_dmamap_load(sc->jme_dmatag, sc->jme_rxmap, sc->jme_rxring,
434 PAGE_SIZE, NULL, BUS_DMA_NOWAIT) != 0) { 435 PAGE_SIZE, NULL, BUS_DMA_NOWAIT) != 0) {
435 aprint_error_dev(self, "can't allocate DMA memory RX ring\n"); 436 aprint_error_dev(self, "can't allocate DMA memory RX ring\n");
436 return; 437 return;
437 } 438 }
438 for (i = 0; i < JME_NBUFS; i++) { 439 for (i = 0; i < JME_NBUFS; i++) {
439 sc->jme_txmbuf[i] = sc->jme_rxmbuf[i] = NULL; 440 sc->jme_txmbuf[i] = sc->jme_rxmbuf[i] = NULL;
440 if (bus_dmamap_create(sc->jme_dmatag, JME_MAX_TX_LEN, 441 if (bus_dmamap_create(sc->jme_dmatag, JME_MAX_TX_LEN,
441 JME_NBUFS, JME_MAX_TX_LEN, 0, BUS_DMA_NOWAIT, 442 JME_NBUFS, JME_MAX_TX_LEN, 0, BUS_DMA_NOWAIT,
442 &sc->jme_txmbufm[i]) != 0) { 443 &sc->jme_txmbufm[i]) != 0) {
443 aprint_error_dev(self, "can't allocate DMA TX map\n"); 444 aprint_error_dev(self, "can't allocate DMA TX map\n");
444 return; 445 return;
445 } 446 }
446 if (bus_dmamap_create(sc->jme_dmatag, JME_MAX_RX_LEN, 447 if (bus_dmamap_create(sc->jme_dmatag, JME_MAX_RX_LEN,
447 1, JME_MAX_RX_LEN, 0, BUS_DMA_NOWAIT, 448 1, JME_MAX_RX_LEN, 0, BUS_DMA_NOWAIT,
448 &sc->jme_rxmbufm[i]) != 0) { 449 &sc->jme_rxmbufm[i]) != 0) {
449 aprint_error_dev(self, "can't allocate DMA RX map\n"); 450 aprint_error_dev(self, "can't allocate DMA RX map\n");
450 return; 451 return;
451 } 452 }
452 } 453 }
453 /* 454 /*
454 * Initialize our media structures and probe the MII. 455 * Initialize our media structures and probe the MII.
455 * 456 *
456 * Note that we don't care about the media instance. We 457 * Note that we don't care about the media instance. We
457 * are expecting to have multiple PHYs on the 10/100 cards, 458 * are expecting to have multiple PHYs on the 10/100 cards,
458 * and on those cards we exclude the internal PHY from providing 459 * and on those cards we exclude the internal PHY from providing
459 * 10baseT. By ignoring the instance, it allows us to not have 460 * 10baseT. By ignoring the instance, it allows us to not have
460 * to specify it on the command line when switching media. 461 * to specify it on the command line when switching media.
461 */ 462 */
462 sc->jme_mii.mii_ifp = ifp; 463 sc->jme_mii.mii_ifp = ifp;
463 sc->jme_mii.mii_readreg = jme_mii_read; 464 sc->jme_mii.mii_readreg = jme_mii_read;
464 sc->jme_mii.mii_writereg = jme_mii_write; 465 sc->jme_mii.mii_writereg = jme_mii_write;
465 sc->jme_mii.mii_statchg = jme_statchg; 466 sc->jme_mii.mii_statchg = jme_statchg;
466 sc->jme_ec.ec_mii = &sc->jme_mii; 467 sc->jme_ec.ec_mii = &sc->jme_mii;
467 ifmedia_init(&sc->jme_mii.mii_media, IFM_IMASK, jme_mediachange, 468 ifmedia_init(&sc->jme_mii.mii_media, IFM_IMASK, jme_mediachange,
468 ether_mediastatus); 469 ether_mediastatus);
469 mii_attach(self, &sc->jme_mii, 0xffffffff, MII_PHY_ANY, 470 mii_attach(self, &sc->jme_mii, 0xffffffff, MII_PHY_ANY,
470 MII_OFFSET_ANY, 0); 471 MII_OFFSET_ANY, 0);
471 if (LIST_FIRST(&sc->jme_mii.mii_phys) == NULL) { 472 if (LIST_FIRST(&sc->jme_mii.mii_phys) == NULL) {
472 ifmedia_add(&sc->jme_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 473 ifmedia_add(&sc->jme_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
473 ifmedia_set(&sc->jme_mii.mii_media, IFM_ETHER|IFM_NONE); 474 ifmedia_set(&sc->jme_mii.mii_media, IFM_ETHER|IFM_NONE);
474 } else 475 } else
475 ifmedia_set(&sc->jme_mii.mii_media, IFM_ETHER|IFM_AUTO); 476 ifmedia_set(&sc->jme_mii.mii_media, IFM_ETHER|IFM_AUTO);
476 477
477 /* 478 /*
478 * We can support 802.1Q VLAN-sized frames. 479 * We can support 802.1Q VLAN-sized frames.
479 */ 480 */
480 sc->jme_ec.ec_capabilities |= 481 sc->jme_ec.ec_capabilities |=
481 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 482 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
482 483
483 if (sc->jme_flags & JME_FLAG_GIGA) 484 if (sc->jme_flags & JME_FLAG_GIGA)
484 sc->jme_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 485 sc->jme_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
485 486
486 487
487 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 488 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
488 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; 489 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
489 ifp->if_ioctl = jme_ifioctl; 490 ifp->if_ioctl = jme_ifioctl;
490 ifp->if_start = jme_ifstart; 491 ifp->if_start = jme_ifstart;
491 ifp->if_watchdog = jme_ifwatchdog; 492 ifp->if_watchdog = jme_ifwatchdog;
492 ifp->if_init = jme_ifinit; 493 ifp->if_init = jme_ifinit;
493 ifp->if_stop = jme_stop; 494 ifp->if_stop = jme_stop;
494 ifp->if_timer = 0; 495 ifp->if_timer = 0;
495 ifp->if_capabilities |= 496 ifp->if_capabilities |=
496 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 497 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
497 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 498 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
498 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 499 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
499 IFCAP_CSUM_TCPv6_Tx | /* IFCAP_CSUM_TCPv6_Rx | hardware bug */ 500 IFCAP_CSUM_TCPv6_Tx | /* IFCAP_CSUM_TCPv6_Rx | hardware bug */
500 IFCAP_CSUM_UDPv6_Tx | /* IFCAP_CSUM_UDPv6_Rx | hardware bug */ 501 IFCAP_CSUM_UDPv6_Tx | /* IFCAP_CSUM_UDPv6_Rx | hardware bug */
501 IFCAP_TSOv4 | IFCAP_TSOv6; 502 IFCAP_TSOv4 | IFCAP_TSOv6;
502 IFQ_SET_READY(&ifp->if_snd); 503 IFQ_SET_READY(&ifp->if_snd);
503 if_attach(ifp); 504 if_attach(ifp);
504 ether_ifattach(&(sc)->jme_if, (sc)->jme_enaddr); 505 ether_ifattach(&(sc)->jme_if, (sc)->jme_enaddr);
505 506
506 /* 507 /*
507 * Add shutdown hook so that DMA is disabled prior to reboot. 508 * Add shutdown hook so that DMA is disabled prior to reboot.
508 */ 509 */
509 if (pmf_device_register1(self, NULL, NULL, jme_shutdown)) 510 if (pmf_device_register1(self, NULL, NULL, jme_shutdown))
510 pmf_class_network_register(self, ifp); 511 pmf_class_network_register(self, ifp);
511 else 512 else
512 aprint_error_dev(self, "couldn't establish power handler\n"); 513 aprint_error_dev(self, "couldn't establish power handler\n");
513 514
514#if NRND > 0 515#if NRND > 0
515 rnd_attach_source(&sc->rnd_source, device_xname(self), 516 rnd_attach_source(&sc->rnd_source, device_xname(self),
516 RND_TYPE_NET, 0); 517 RND_TYPE_NET, 0);
517#endif 518#endif
518 sc->jme_intrxto = PCCRX_COAL_TO_DEFAULT; 519 sc->jme_intrxto = PCCRX_COAL_TO_DEFAULT;
519 sc->jme_intrxct = PCCRX_COAL_PKT_DEFAULT; 520 sc->jme_intrxct = PCCRX_COAL_PKT_DEFAULT;
520 sc->jme_inttxto = PCCTX_COAL_TO_DEFAULT; 521 sc->jme_inttxto = PCCTX_COAL_TO_DEFAULT;
521 sc->jme_inttxct = PCCTX_COAL_PKT_DEFAULT; 522 sc->jme_inttxct = PCCTX_COAL_PKT_DEFAULT;
522 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node, 523 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node,
523 0, CTLTYPE_NODE, device_xname(sc->jme_dev), 524 0, CTLTYPE_NODE, device_xname(sc->jme_dev),
524 SYSCTL_DESCR("jme per-controller controls"), 525 SYSCTL_DESCR("jme per-controller controls"),
525 NULL, 0, NULL, 0, CTL_HW, jme_root_num, CTL_CREATE, 526 NULL, 0, NULL, 0, CTL_HW, jme_root_num, CTL_CREATE,
526 CTL_EOL) != 0) { 527 CTL_EOL) != 0) {
527 aprint_normal_dev(sc->jme_dev, "couldn't create sysctl node\n"); 528 aprint_normal_dev(sc->jme_dev, "couldn't create sysctl node\n");
528 return; 529 return;
529 } 530 }
530 jme_nodenum = node->sysctl_num; 531 jme_nodenum = node->sysctl_num;
531 532
532 /* interrupt moderation sysctls */ 533 /* interrupt moderation sysctls */
533 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node, 534 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node,
534 CTLFLAG_READWRITE, 535 CTLFLAG_READWRITE,
535 CTLTYPE_INT, "int_rxto", 536 CTLTYPE_INT, "int_rxto",
536 SYSCTL_DESCR("jme RX interrupt moderation timer"), 537 SYSCTL_DESCR("jme RX interrupt moderation timer"),
537 jme_sysctl_intrxto, 0, sc, 538 jme_sysctl_intrxto, 0, sc,
538 0, CTL_HW, jme_root_num, jme_nodenum, CTL_CREATE, 539 0, CTL_HW, jme_root_num, jme_nodenum, CTL_CREATE,
539 CTL_EOL) != 0) { 540 CTL_EOL) != 0) {
540 aprint_normal_dev(sc->jme_dev, 541 aprint_normal_dev(sc->jme_dev,
541 "couldn't create int_rxto sysctl node\n"); 542 "couldn't create int_rxto sysctl node\n");
542 } 543 }
543 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node, 544 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node,
544 CTLFLAG_READWRITE, 545 CTLFLAG_READWRITE,
545 CTLTYPE_INT, "int_rxct", 546 CTLTYPE_INT, "int_rxct",
546 SYSCTL_DESCR("jme RX interrupt moderation packet counter"), 547 SYSCTL_DESCR("jme RX interrupt moderation packet counter"),
547 jme_sysctl_intrxct, 0, sc, 548 jme_sysctl_intrxct, 0, sc,
548 0, CTL_HW, jme_root_num, jme_nodenum, CTL_CREATE, 549 0, CTL_HW, jme_root_num, jme_nodenum, CTL_CREATE,
549 CTL_EOL) != 0) { 550 CTL_EOL) != 0) {
550 aprint_normal_dev(sc->jme_dev, 551 aprint_normal_dev(sc->jme_dev,
551 "couldn't create int_rxct sysctl node\n"); 552 "couldn't create int_rxct sysctl node\n");
552 } 553 }
553 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node, 554 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node,
554 CTLFLAG_READWRITE, 555 CTLFLAG_READWRITE,
555 CTLTYPE_INT, "int_txto", 556 CTLTYPE_INT, "int_txto",
556 SYSCTL_DESCR("jme TX interrupt moderation timer"), 557 SYSCTL_DESCR("jme TX interrupt moderation timer"),
557 jme_sysctl_inttxto, 0, sc, 558 jme_sysctl_inttxto, 0, sc,
558 0, CTL_HW, jme_root_num, jme_nodenum, CTL_CREATE, 559 0, CTL_HW, jme_root_num, jme_nodenum, CTL_CREATE,
559 CTL_EOL) != 0) { 560 CTL_EOL) != 0) {
560 aprint_normal_dev(sc->jme_dev, 561 aprint_normal_dev(sc->jme_dev,
561 "couldn't create int_txto sysctl node\n"); 562 "couldn't create int_txto sysctl node\n");
562 } 563 }
563 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node, 564 if (sysctl_createv(&sc->jme_clog, 0, NULL, &node,
564 CTLFLAG_READWRITE, 565 CTLFLAG_READWRITE,
565 CTLTYPE_INT, "int_txct", 566 CTLTYPE_INT, "int_txct",
566 SYSCTL_DESCR("jme TX interrupt moderation packet counter"), 567 SYSCTL_DESCR("jme TX interrupt moderation packet counter"),
567 jme_sysctl_inttxct, 0, sc, 568 jme_sysctl_inttxct, 0, sc,
568 0, CTL_HW, jme_root_num, jme_nodenum, CTL_CREATE, 569 0, CTL_HW, jme_root_num, jme_nodenum, CTL_CREATE,
569 CTL_EOL) != 0) { 570 CTL_EOL) != 0) {
570 aprint_normal_dev(sc->jme_dev, 571 aprint_normal_dev(sc->jme_dev,
571 "couldn't create int_txct sysctl node\n"); 572 "couldn't create int_txct sysctl node\n");
572 } 573 }
573} 574}
574 575
575static void 576static void
576jme_stop_rx(jme_softc_t *sc) 577jme_stop_rx(jme_softc_t *sc)
577{ 578{
578 uint32_t reg; 579 uint32_t reg;
579 int i; 580 int i;
580 581
581 reg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXCSR); 582 reg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXCSR);
582 if ((reg & RXCSR_RX_ENB) == 0) 583 if ((reg & RXCSR_RX_ENB) == 0)
583 return; 584 return;
584 reg &= ~RXCSR_RX_ENB; 585 reg &= ~RXCSR_RX_ENB;
585 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXCSR, reg); 586 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXCSR, reg);
586 for (i = JME_TIMEOUT / 10; i > 0; i--) { 587 for (i = JME_TIMEOUT / 10; i > 0; i--) {
587 DELAY(10); 588 DELAY(10);
588 if ((bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, 589 if ((bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac,
589 JME_RXCSR) & RXCSR_RX_ENB) == 0) 590 JME_RXCSR) & RXCSR_RX_ENB) == 0)
590 break; 591 break;
591 } 592 }
592 if (i == 0) 593 if (i == 0)
593 aprint_error_dev(sc->jme_dev, "stopping recevier timeout!\n"); 594 aprint_error_dev(sc->jme_dev, "stopping recevier timeout!\n");
594 595
595} 596}
596 597
597static void 598static void
598jme_stop_tx(jme_softc_t *sc) 599jme_stop_tx(jme_softc_t *sc)
599{ 600{
600 uint32_t reg; 601 uint32_t reg;
601 int i; 602 int i;
602 603
603 reg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR); 604 reg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR);
604 if ((reg & TXCSR_TX_ENB) == 0) 605 if ((reg & TXCSR_TX_ENB) == 0)
605 return; 606 return;
606 reg &= ~TXCSR_TX_ENB; 607 reg &= ~TXCSR_TX_ENB;
607 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR, reg); 608 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR, reg);
608 for (i = JME_TIMEOUT / 10; i > 0; i--) { 609 for (i = JME_TIMEOUT / 10; i > 0; i--) {
609 DELAY(10); 610 DELAY(10);
610 if ((bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, 611 if ((bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac,
611 JME_TXCSR) & TXCSR_TX_ENB) == 0) 612 JME_TXCSR) & TXCSR_TX_ENB) == 0)
612 break; 613 break;
613 } 614 }
614 if (i == 0) 615 if (i == 0)
615 aprint_error_dev(sc->jme_dev, 616 aprint_error_dev(sc->jme_dev,
616 "stopping transmitter timeout!\n"); 617 "stopping transmitter timeout!\n");
617} 618}
618 619
619static void 620static void
620jme_reset(jme_softc_t *sc) 621jme_reset(jme_softc_t *sc)
621{ 622{
622 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_GHC, GHC_RESET); 623 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_GHC, GHC_RESET);
623 DELAY(10); 624 DELAY(10);
624 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_GHC, 0); 625 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_GHC, 0);
625} 626}
626 627
627static bool 628static bool
628jme_shutdown(device_t self, int howto) 629jme_shutdown(device_t self, int howto)
629{ 630{
630 jme_softc_t *sc; 631 jme_softc_t *sc;
631 struct ifnet *ifp; 632 struct ifnet *ifp;
632 633
633 sc = device_private(self); 634 sc = device_private(self);
634 ifp = &sc->jme_if; 635 ifp = &sc->jme_if;
635 jme_stop(ifp, 1); 636 jme_stop(ifp, 1);
636 637
637 return true; 638 return true;
638} 639}
639 640
640static void 641static void
641jme_stop(struct ifnet *ifp, int disable) 642jme_stop(struct ifnet *ifp, int disable)
642{ 643{
643 jme_softc_t *sc = ifp->if_softc; 644 jme_softc_t *sc = ifp->if_softc;
644 int i; 645 int i;
645 /* Stop receiver, transmitter. */ 646 /* Stop receiver, transmitter. */
646 jme_stop_rx(sc); 647 jme_stop_rx(sc);
647 jme_stop_tx(sc); 648 jme_stop_tx(sc);
648 /* free receive mbufs */ 649 /* free receive mbufs */
649 for (i = 0; i < JME_NBUFS; i++) { 650 for (i = 0; i < JME_NBUFS; i++) {
650 if (sc->jme_rxmbuf[i]) { 651 if (sc->jme_rxmbuf[i]) {
651 bus_dmamap_unload(sc->jme_dmatag, sc->jme_rxmbufm[i]); 652 bus_dmamap_unload(sc->jme_dmatag, sc->jme_rxmbufm[i]);
652 m_freem(sc->jme_rxmbuf[i]); 653 m_freem(sc->jme_rxmbuf[i]);
653 } 654 }
654 sc->jme_rxmbuf[i] = NULL; 655 sc->jme_rxmbuf[i] = NULL;
655 } 656 }
656 /* process completed transmits */ 657 /* process completed transmits */
657 jme_txeof(sc); 658 jme_txeof(sc);
658 /* free abort pending transmits */ 659 /* free abort pending transmits */
659 for (i = 0; i < JME_NBUFS; i++) { 660 for (i = 0; i < JME_NBUFS; i++) {
660 if (sc->jme_txmbuf[i]) { 661 if (sc->jme_txmbuf[i]) {
661 bus_dmamap_unload(sc->jme_dmatag, sc->jme_txmbufm[i]); 662 bus_dmamap_unload(sc->jme_dmatag, sc->jme_txmbufm[i]);
662 m_freem(sc->jme_txmbuf[i]); 663 m_freem(sc->jme_txmbuf[i]);
663 sc->jme_txmbuf[i] = NULL; 664 sc->jme_txmbuf[i] = NULL;
664 } 665 }
665 } 666 }
666 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 667 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
667 ifp->if_timer = 0; 668 ifp->if_timer = 0;
668} 669}
669 670
670#if 0 671#if 0
671static void 672static void
672jme_restart(void *v) 673jme_restart(void *v)
673{ 674{
674 675
675 jme_init(v); 676 jme_init(v);
676} 677}
677#endif 678#endif
678 679
679static int 680static int
680jme_add_rxbuf(jme_softc_t *sc, struct mbuf *m) 681jme_add_rxbuf(jme_softc_t *sc, struct mbuf *m)
681{ 682{
682 int error; 683 int error;
683 bus_dmamap_t map; 684 bus_dmamap_t map;
684 int i = sc->jme_rx_prod; 685 int i = sc->jme_rx_prod;
685 686
686 if (sc->jme_rxmbuf[i] != NULL) { 687 if (sc->jme_rxmbuf[i] != NULL) {
687 aprint_error_dev(sc->jme_dev, 688 aprint_error_dev(sc->jme_dev,
688 "mbuf already here: rxprod %d rxcons %d\n", 689 "mbuf already here: rxprod %d rxcons %d\n",
689 sc->jme_rx_prod, sc->jme_rx_cons); 690 sc->jme_rx_prod, sc->jme_rx_cons);
690 if (m) 691 if (m)
691 m_freem(m); 692 m_freem(m);
692 return EINVAL; 693 return EINVAL;
693 } 694 }
694  695
695 if (m == NULL) { 696 if (m == NULL) {
696 sc->jme_rxmbuf[i] = NULL; 697 sc->jme_rxmbuf[i] = NULL;
697 MGETHDR(m, M_DONTWAIT, MT_DATA); 698 MGETHDR(m, M_DONTWAIT, MT_DATA);
698 if (m == NULL) 699 if (m == NULL)
699 return (ENOBUFS); 700 return (ENOBUFS);
700 MCLGET(m, M_DONTWAIT); 701 MCLGET(m, M_DONTWAIT);
701 if ((m->m_flags & M_EXT) == 0) { 702 if ((m->m_flags & M_EXT) == 0) {
702 m_freem(m); 703 m_freem(m);
703 return (ENOBUFS); 704 return (ENOBUFS);
704 } 705 }
705 } 706 }
706 map = sc->jme_rxmbufm[i]; 707 map = sc->jme_rxmbufm[i];
707 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 708 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
708 error = bus_dmamap_load_mbuf(sc->jme_dmatag, map, m, 709 error = bus_dmamap_load_mbuf(sc->jme_dmatag, map, m,
709 BUS_DMA_READ|BUS_DMA_NOWAIT); 710 BUS_DMA_READ|BUS_DMA_NOWAIT);
710 if (error) { 711 if (error) {
711 sc->jme_rxmbuf[i] = NULL; 712 sc->jme_rxmbuf[i] = NULL;
712 aprint_error_dev(sc->jme_dev, 713 aprint_error_dev(sc->jme_dev,
713 "unable to load rx DMA map %d, error = %d\n", 714 "unable to load rx DMA map %d, error = %d\n",
714 i, error); 715 i, error);
715 m_freem(m); 716 m_freem(m);
716 return (error); 717 return (error);
717 } 718 }
718 bus_dmamap_sync(sc->jme_dmatag, map, 0, map->dm_mapsize, 719 bus_dmamap_sync(sc->jme_dmatag, map, 0, map->dm_mapsize,
719 BUS_DMASYNC_PREREAD); 720 BUS_DMASYNC_PREREAD);
720 721
721 sc->jme_rxmbuf[i] = m; 722 sc->jme_rxmbuf[i] = m;
722 723
723 sc->jme_rxring[i].buflen = htole32(map->dm_segs[0].ds_len); 724 sc->jme_rxring[i].buflen = htole32(map->dm_segs[0].ds_len);
724 sc->jme_rxring[i].addr_lo = 725 sc->jme_rxring[i].addr_lo =
725 htole32(JME_ADDR_LO(map->dm_segs[0].ds_addr)); 726 htole32(JME_ADDR_LO(map->dm_segs[0].ds_addr));
726 sc->jme_rxring[i].addr_hi = 727 sc->jme_rxring[i].addr_hi =
727 htole32(JME_ADDR_HI(map->dm_segs[0].ds_addr)); 728 htole32(JME_ADDR_HI(map->dm_segs[0].ds_addr));
728 sc->jme_rxring[i].flags = 729 sc->jme_rxring[i].flags =
729 htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 730 htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
730 bus_dmamap_sync(sc->jme_dmatag, sc->jme_rxmap, 731 bus_dmamap_sync(sc->jme_dmatag, sc->jme_rxmap,
731 i * sizeof(struct jme_desc), sizeof(struct jme_desc), 732 i * sizeof(struct jme_desc), sizeof(struct jme_desc),
732 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 733 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
733 JME_DESC_INC(sc->jme_rx_prod, JME_NBUFS); 734 JME_DESC_INC(sc->jme_rx_prod, JME_NBUFS);
734 return (0); 735 return (0);
735} 736}
736 737
737static int 738static int
738jme_ifinit(struct ifnet *ifp) 739jme_ifinit(struct ifnet *ifp)
739{ 740{
740 return jme_init(ifp, 1); 741 return jme_init(ifp, 1);
741} 742}
742 743
743static int 744static int
744jme_init(struct ifnet *ifp, int do_ifinit) 745jme_init(struct ifnet *ifp, int do_ifinit)
745{ 746{
746 jme_softc_t *sc = ifp->if_softc; 747 jme_softc_t *sc = ifp->if_softc;
747 int i, s; 748 int i, s;
748 uint8_t eaddr[ETHER_ADDR_LEN]; 749 uint8_t eaddr[ETHER_ADDR_LEN];
749 uint32_t reg; 750 uint32_t reg;
750 751
751 s = splnet(); 752 s = splnet();
752 /* cancel any pending IO */ 753 /* cancel any pending IO */
753 jme_stop(ifp, 1); 754 jme_stop(ifp, 1);
754 jme_reset(sc); 755 jme_reset(sc);
755 if ((sc->jme_if.if_flags & IFF_UP) == 0) { 756 if ((sc->jme_if.if_flags & IFF_UP) == 0) {
756 splx(s); 757 splx(s);
757 return 0; 758 return 0;
758 } 759 }
759 /* allocate receive ring */ 760 /* allocate receive ring */
760 sc->jme_rx_prod = 0; 761 sc->jme_rx_prod = 0;
761 for (i = 0; i < JME_NBUFS; i++) { 762 for (i = 0; i < JME_NBUFS; i++) {
762 if (jme_add_rxbuf(sc, NULL) < 0) { 763 if (jme_add_rxbuf(sc, NULL) < 0) {
763 aprint_error_dev(sc->jme_dev, 764 aprint_error_dev(sc->jme_dev,
764 "can't allocate rx mbuf\n"); 765 "can't allocate rx mbuf\n");
765 for (i--; i >= 0; i--) { 766 for (i--; i >= 0; i--) {
766 bus_dmamap_unload(sc->jme_dmatag, 767 bus_dmamap_unload(sc->jme_dmatag,
767 sc->jme_rxmbufm[i]); 768 sc->jme_rxmbufm[i]);
768 m_freem(sc->jme_rxmbuf[i]); 769 m_freem(sc->jme_rxmbuf[i]);
769 sc->jme_rxmbuf[i] = NULL; 770 sc->jme_rxmbuf[i] = NULL;
770 } 771 }
771 splx(s); 772 splx(s);
772 return ENOMEM; 773 return ENOMEM;
773 } 774 }
774 } 775 }
775 /* init TX ring */ 776 /* init TX ring */
776 memset(sc->jme_txring, 0, JME_NBUFS * sizeof(struct jme_desc)); 777 memset(sc->jme_txring, 0, JME_NBUFS * sizeof(struct jme_desc));
777 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap, 778 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap,
778 0, JME_NBUFS * sizeof(struct jme_desc), 779 0, JME_NBUFS * sizeof(struct jme_desc),
779 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 780 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
780 for (i = 0; i < JME_NBUFS; i++) 781 for (i = 0; i < JME_NBUFS; i++)
781 sc->jme_txmbuf[i] = NULL; 782 sc->jme_txmbuf[i] = NULL;
782 sc->jme_tx_cons = sc->jme_tx_prod = sc->jme_tx_cnt = 0; 783 sc->jme_tx_cons = sc->jme_tx_prod = sc->jme_tx_cnt = 0;
783 784
784 /* Reprogram the station address. */ 785 /* Reprogram the station address. */
785 memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 786 memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
786 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PAR0, 787 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PAR0,
787 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 788 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
788 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, 789 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac,
789 JME_PAR1, eaddr[5] << 8 | eaddr[4]); 790 JME_PAR1, eaddr[5] << 8 | eaddr[4]);
790 791
791 /* 792 /*
792 * Configure Tx queue. 793 * Configure Tx queue.
793 * Tx priority queue weight value : 0 794 * Tx priority queue weight value : 0
794 * Tx FIFO threshold for processing next packet : 16QW 795 * Tx FIFO threshold for processing next packet : 16QW
795 * Maximum Tx DMA length : 512 796 * Maximum Tx DMA length : 512
796 * Allow Tx DMA burst. 797 * Allow Tx DMA burst.
797 */ 798 */
798 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 799 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
799 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 800 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
800 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 801 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
801 sc->jme_txcsr |= TXCSR_DMA_SIZE_512; 802 sc->jme_txcsr |= TXCSR_DMA_SIZE_512;
802 sc->jme_txcsr |= TXCSR_DMA_BURST; 803 sc->jme_txcsr |= TXCSR_DMA_BURST;
803 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, 804 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac,
804 JME_TXCSR, sc->jme_txcsr); 805 JME_TXCSR, sc->jme_txcsr);
805 806
806 /* Set Tx descriptor counter. */ 807 /* Set Tx descriptor counter. */
807 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, 808 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac,
808 JME_TXQDC, JME_NBUFS); 809 JME_TXQDC, JME_NBUFS);
809 810
810 /* Set Tx ring address to the hardware. */ 811 /* Set Tx ring address to the hardware. */
811 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_HI, 812 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_HI,
812 JME_ADDR_HI(sc->jme_txmap->dm_segs[0].ds_addr)); 813 JME_ADDR_HI(sc->jme_txmap->dm_segs[0].ds_addr));
813 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_LO, 814 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_LO,
814 JME_ADDR_LO(sc->jme_txmap->dm_segs[0].ds_addr)); 815 JME_ADDR_LO(sc->jme_txmap->dm_segs[0].ds_addr));
815 816
816 /* Configure TxMAC parameters. */ 817 /* Configure TxMAC parameters. */
817 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC, 818 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC,
818 TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB | 819 TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB |
819 TXMAC_THRESH_1_PKT | TXMAC_CRC_ENB | TXMAC_PAD_ENB); 820 TXMAC_THRESH_1_PKT | TXMAC_CRC_ENB | TXMAC_PAD_ENB);
820 821
821 /* 822 /*
822 * Configure Rx queue. 823 * Configure Rx queue.
823 * FIFO full threshold for transmitting Tx pause packet : 128T 824 * FIFO full threshold for transmitting Tx pause packet : 128T
824 * FIFO threshold for processing next packet : 128QW 825 * FIFO threshold for processing next packet : 128QW
825 * Rx queue 0 select 826 * Rx queue 0 select
826 * Max Rx DMA length : 128 827 * Max Rx DMA length : 128
827 * Rx descriptor retry : 32 828 * Rx descriptor retry : 32
828 * Rx descriptor retry time gap : 256ns 829 * Rx descriptor retry time gap : 256ns
829 * Don't receive runt/bad frame. 830 * Don't receive runt/bad frame.
830 */ 831 */
831 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 832 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
832 /* 833 /*
833 * Since Rx FIFO size is 4K bytes, receiving frames larger 834 * Since Rx FIFO size is 4K bytes, receiving frames larger
834 * than 4K bytes will suffer from Rx FIFO overruns. So 835 * than 4K bytes will suffer from Rx FIFO overruns. So
835 * decrease FIFO threshold to reduce the FIFO overruns for 836 * decrease FIFO threshold to reduce the FIFO overruns for
836 * frames larger than 4000 bytes. 837 * frames larger than 4000 bytes.
837 * For best performance of standard MTU sized frames use 838 * For best performance of standard MTU sized frames use
838 * maximum allowable FIFO threshold, 128QW. 839 * maximum allowable FIFO threshold, 128QW.
839 */ 840 */
840 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 841 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
841 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE) 842 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
842 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 843 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
843 else 844 else
844 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 845 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
845 sc->jme_rxcsr |= RXCSR_DMA_SIZE_128 | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 846 sc->jme_rxcsr |= RXCSR_DMA_SIZE_128 | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
846 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 847 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
847 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 848 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
848 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, 849 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac,
849 JME_RXCSR, sc->jme_rxcsr); 850 JME_RXCSR, sc->jme_rxcsr);
850 851
851 /* Set Rx descriptor counter. */ 852 /* Set Rx descriptor counter. */
852 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, 853 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac,
853 JME_RXQDC, JME_NBUFS); 854 JME_RXQDC, JME_NBUFS);
854 855
855 /* Set Rx ring address to the hardware. */ 856 /* Set Rx ring address to the hardware. */
856 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXDBA_HI, 857 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXDBA_HI,
857 JME_ADDR_HI(sc->jme_rxmap->dm_segs[0].ds_addr)); 858 JME_ADDR_HI(sc->jme_rxmap->dm_segs[0].ds_addr));
858 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXDBA_LO, 859 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXDBA_LO,
859 JME_ADDR_LO(sc->jme_rxmap->dm_segs[0].ds_addr)); 860 JME_ADDR_LO(sc->jme_rxmap->dm_segs[0].ds_addr));
860 861
861 /* Clear receive filter. */ 862 /* Clear receive filter. */
862 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC, 0); 863 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC, 0);
863 /* Set up the receive filter. */ 864 /* Set up the receive filter. */
864 jme_set_filter(sc); 865 jme_set_filter(sc);
865 866
866 /* 867 /*
867 * Disable all WOL bits as WOL can interfere normal Rx 868 * Disable all WOL bits as WOL can interfere normal Rx
868 * operation. Also clear WOL detection status bits. 869 * operation. Also clear WOL detection status bits.
869 */ 870 */
870 reg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PMCS); 871 reg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PMCS);
871 reg &= ~PMCS_WOL_ENB_MASK; 872 reg &= ~PMCS_WOL_ENB_MASK;
872 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PMCS, reg); 873 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PMCS, reg);
873 874
874 reg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC); 875 reg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC);
875 /* 876 /*
876 * Pad 10bytes right before received frame. This will greatly 877 * Pad 10bytes right before received frame. This will greatly
877 * help Rx performance on strict-alignment architectures as 878 * help Rx performance on strict-alignment architectures as
878 * it does not need to copy the frame to align the payload. 879 * it does not need to copy the frame to align the payload.
879 */ 880 */
880 reg |= RXMAC_PAD_10BYTES; 881 reg |= RXMAC_PAD_10BYTES;
881 if ((ifp->if_capenable & 882 if ((ifp->if_capenable &
882 (IFCAP_CSUM_IPv4_Rx|IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx| 883 (IFCAP_CSUM_IPv4_Rx|IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
883 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx)) != 0) 884 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx)) != 0)
884 reg |= RXMAC_CSUM_ENB; 885 reg |= RXMAC_CSUM_ENB;
885 reg |= RXMAC_VLAN_ENB; /* enable hardware vlan */ 886 reg |= RXMAC_VLAN_ENB; /* enable hardware vlan */
886 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC, reg); 887 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC, reg);
887 888
888 /* Configure general purpose reg0 */ 889 /* Configure general purpose reg0 */
889 reg = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_GPREG0); 890 reg = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_GPREG0);
890 reg &= ~GPREG0_PCC_UNIT_MASK; 891 reg &= ~GPREG0_PCC_UNIT_MASK;
891 /* Set PCC timer resolution to micro-seconds unit. */ 892 /* Set PCC timer resolution to micro-seconds unit. */
892 reg |= GPREG0_PCC_UNIT_US; 893 reg |= GPREG0_PCC_UNIT_US;
893 /* 894 /*
894 * Disable all shadow register posting as we have to read 895 * Disable all shadow register posting as we have to read
895 * JME_INTR_STATUS register in jme_int_task. Also it seems 896 * JME_INTR_STATUS register in jme_int_task. Also it seems
896 * that it's hard to synchronize interrupt status between 897 * that it's hard to synchronize interrupt status between
897 * hardware and software with shadow posting due to 898 * hardware and software with shadow posting due to
898 * requirements of bus_dmamap_sync(9). 899 * requirements of bus_dmamap_sync(9).
899 */ 900 */
900 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 901 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
901 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 902 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
902 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 903 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
903 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 904 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
904 /* Disable posting of DW0. */ 905 /* Disable posting of DW0. */
905 reg &= ~GPREG0_POST_DW0_ENB; 906 reg &= ~GPREG0_POST_DW0_ENB;
906 /* Clear PME message. */ 907 /* Clear PME message. */
907 reg &= ~GPREG0_PME_ENB; 908 reg &= ~GPREG0_PME_ENB;
908 /* Set PHY address. */ 909 /* Set PHY address. */
909 reg &= ~GPREG0_PHY_ADDR_MASK; 910 reg &= ~GPREG0_PHY_ADDR_MASK;
910 reg |= sc->jme_phyaddr; 911 reg |= sc->jme_phyaddr;
911 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_GPREG0, reg); 912 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_GPREG0, reg);
912 913
913 /* Configure Tx queue 0 packet completion coalescing. */ 914 /* Configure Tx queue 0 packet completion coalescing. */
914 reg = (sc->jme_inttxto << PCCTX_COAL_TO_SHIFT) & PCCTX_COAL_TO_MASK; 915 reg = (sc->jme_inttxto << PCCTX_COAL_TO_SHIFT) & PCCTX_COAL_TO_MASK;
915 reg |= (sc->jme_inttxct << PCCTX_COAL_PKT_SHIFT) & PCCTX_COAL_PKT_MASK; 916 reg |= (sc->jme_inttxct << PCCTX_COAL_PKT_SHIFT) & PCCTX_COAL_PKT_MASK;
916 reg |= PCCTX_COAL_TXQ0; 917 reg |= PCCTX_COAL_TXQ0;
917 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCTX, reg); 918 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCTX, reg);
918 919
919 /* Configure Rx queue 0 packet completion coalescing. */ 920 /* Configure Rx queue 0 packet completion coalescing. */
920 reg = (sc->jme_intrxto << PCCRX_COAL_TO_SHIFT) & PCCRX_COAL_TO_MASK; 921 reg = (sc->jme_intrxto << PCCRX_COAL_TO_SHIFT) & PCCRX_COAL_TO_MASK;
921 reg |= (sc->jme_intrxct << PCCRX_COAL_PKT_SHIFT) & PCCRX_COAL_PKT_MASK; 922 reg |= (sc->jme_intrxct << PCCRX_COAL_PKT_SHIFT) & PCCRX_COAL_PKT_MASK;
922 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCRX0, reg); 923 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCRX0, reg);
923 924
924 /* Disable Timers */ 925 /* Disable Timers */
925 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_TMCSR, 0); 926 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_TMCSR, 0);
926 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_TIMER1, 0); 927 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_TIMER1, 0);
927 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_TIMER2, 0); 928 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_TIMER2, 0);
928 929
929 /* Configure retry transmit period, retry limit value. */ 930 /* Configure retry transmit period, retry limit value. */
930 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD, 931 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD,
931 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 932 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
932 TXTRHD_RT_PERIOD_MASK) | 933 TXTRHD_RT_PERIOD_MASK) |
933 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 934 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
934 TXTRHD_RT_LIMIT_SHIFT)); 935 TXTRHD_RT_LIMIT_SHIFT));
935 936
936 /* Disable RSS. */ 937 /* Disable RSS. */
937 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, 938 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc,
938 JME_RSSC, RSSC_DIS_RSS); 939 JME_RSSC, RSSC_DIS_RSS);
939 940
940 /* Initialize the interrupt mask. */ 941 /* Initialize the interrupt mask. */
941 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, 942 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc,
942 JME_INTR_MASK_SET, JME_INTRS_ENABLE); 943 JME_INTR_MASK_SET, JME_INTRS_ENABLE);
943 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, 944 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc,
944 JME_INTR_STATUS, 0xFFFFFFFF); 945 JME_INTR_STATUS, 0xFFFFFFFF);
945 946
946 /* set media, if not already handling a media change */ 947 /* set media, if not already handling a media change */
947 if (do_ifinit) { 948 if (do_ifinit) {
948 int error; 949 int error;
949 if ((error = mii_mediachg(&sc->jme_mii)) == ENXIO) 950 if ((error = mii_mediachg(&sc->jme_mii)) == ENXIO)
950 error = 0; 951 error = 0;
951 else if (error != 0) { 952 else if (error != 0) {
952 aprint_error_dev(sc->jme_dev, "could not set media\n"); 953 aprint_error_dev(sc->jme_dev, "could not set media\n");
953 return error; 954 return error;
954 } 955 }
955 } 956 }
956 957
957 /* Program MAC with resolved speed/duplex/flow-control. */ 958 /* Program MAC with resolved speed/duplex/flow-control. */
958 jme_mac_config(sc); 959 jme_mac_config(sc);
959 960
960 /* Start receiver/transmitter. */ 961 /* Start receiver/transmitter. */
961 sc->jme_rx_cons = 0; 962 sc->jme_rx_cons = 0;
962 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXCSR, 963 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXCSR,
963 sc->jme_rxcsr | RXCSR_RX_ENB | RXCSR_RXQ_START); 964 sc->jme_rxcsr | RXCSR_RX_ENB | RXCSR_RXQ_START);
964 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR, 965 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR,
965 sc->jme_txcsr | TXCSR_TX_ENB); 966 sc->jme_txcsr | TXCSR_TX_ENB);
966 967
967 /* start ticks calls */ 968 /* start ticks calls */
968 callout_reset(&sc->jme_tick_ch, hz, jme_ticks, sc); 969 callout_reset(&sc->jme_tick_ch, hz, jme_ticks, sc);
969 sc->jme_if.if_flags |= IFF_RUNNING; 970 sc->jme_if.if_flags |= IFF_RUNNING;
970 sc->jme_if.if_flags &= ~IFF_OACTIVE; 971 sc->jme_if.if_flags &= ~IFF_OACTIVE;
971 splx(s); 972 splx(s);
972 return 0; 973 return 0;
973} 974}
974 975
975 976
976int 977int
977jme_mii_read(device_t self, int phy, int reg) 978jme_mii_read(device_t self, int phy, int reg)
978{ 979{
979 struct jme_softc *sc = device_private(self); 980 struct jme_softc *sc = device_private(self);
980 int val, i; 981 int val, i;
981 982
982 /* For FPGA version, PHY address 0 should be ignored. */ 983 /* For FPGA version, PHY address 0 should be ignored. */
983 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) { 984 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
984 if (phy == 0) 985 if (phy == 0)
985 return (0); 986 return (0);
986 } else { 987 } else {
987 if (sc->jme_phyaddr != phy) 988 if (sc->jme_phyaddr != phy)
988 return (0); 989 return (0);
989 } 990 }
990 991
991 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_SMI, 992 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_SMI,
992 SMI_OP_READ | SMI_OP_EXECUTE | 993 SMI_OP_READ | SMI_OP_EXECUTE |
993 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 994 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
994 for (i = JME_PHY_TIMEOUT / 10; i > 0; i--) { 995 for (i = JME_PHY_TIMEOUT / 10; i > 0; i--) {
995 delay(10); 996 delay(10);
996 if (((val = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, 997 if (((val = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac,
997 JME_SMI)) & SMI_OP_EXECUTE) == 0) 998 JME_SMI)) & SMI_OP_EXECUTE) == 0)
998 break; 999 break;
999 } 1000 }
1000 1001
1001 if (i == 0) { 1002 if (i == 0) {
1002 aprint_error_dev(sc->jme_dev, "phy read timeout : %d\n", reg); 1003 aprint_error_dev(sc->jme_dev, "phy read timeout : %d\n", reg);
1003 return (0); 1004 return (0);
1004 } 1005 }
1005 1006
1006 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 1007 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
1007} 1008}
1008 1009
1009void 1010void
1010jme_mii_write(device_t self, int phy, int reg, int val) 1011jme_mii_write(device_t self, int phy, int reg, int val)
1011{ 1012{
1012 struct jme_softc *sc = device_private(self); 1013 struct jme_softc *sc = device_private(self);
1013 int i; 1014 int i;
1014 1015
1015 /* For FPGA version, PHY address 0 should be ignored. */ 1016 /* For FPGA version, PHY address 0 should be ignored. */
1016 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) { 1017 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
1017 if (phy == 0) 1018 if (phy == 0)
1018 return; 1019 return;
1019 } else { 1020 } else {
1020 if (sc->jme_phyaddr != phy) 1021 if (sc->jme_phyaddr != phy)
1021 return; 1022 return;
1022 } 1023 }
1023 1024
1024 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_SMI, 1025 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_SMI,
1025 SMI_OP_WRITE | SMI_OP_EXECUTE | 1026 SMI_OP_WRITE | SMI_OP_EXECUTE |
1026 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 1027 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
1027 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 1028 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
1028 for (i = JME_PHY_TIMEOUT / 10; i > 0; i--) { 1029 for (i = JME_PHY_TIMEOUT / 10; i > 0; i--) {
1029 delay(10); 1030 delay(10);
1030 if (((val = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, 1031 if (((val = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac,
1031 JME_SMI)) & SMI_OP_EXECUTE) == 0) 1032 JME_SMI)) & SMI_OP_EXECUTE) == 0)
1032 break; 1033 break;
1033 } 1034 }
1034 1035
1035 if (i == 0) 1036 if (i == 0)
1036 aprint_error_dev(sc->jme_dev, "phy write timeout : %d\n", reg); 1037 aprint_error_dev(sc->jme_dev, "phy write timeout : %d\n", reg);
1037 1038
1038 return; 1039 return;
1039} 1040}
1040 1041
1041void 1042void
1042jme_statchg(device_t self) 1043jme_statchg(device_t self)
1043{ 1044{
1044 jme_softc_t *sc = device_private(self); 1045 jme_softc_t *sc = device_private(self);
1045 struct ifnet *ifp = &sc->jme_if; 1046 struct ifnet *ifp = &sc->jme_if;
1046 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) 1047 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING))
1047 jme_init(ifp, 0); 1048 jme_init(ifp, 0);
1048} 1049}
1049 1050
1050static void 1051static void
1051jme_intr_rx(jme_softc_t *sc) { 1052jme_intr_rx(jme_softc_t *sc) {
1052 struct mbuf *m, *mhead; 1053 struct mbuf *m, *mhead;
1053 struct ifnet *ifp = &sc->jme_if; 1054 struct ifnet *ifp = &sc->jme_if;
1054 uint32_t flags, buflen; 1055 uint32_t flags, buflen;
1055 int i, ipackets, nsegs, seg, error; 1056 int i, ipackets, nsegs, seg, error;
1056 struct jme_desc *desc; 1057 struct jme_desc *desc;
1057 1058
1058 bus_dmamap_sync(sc->jme_dmatag, sc->jme_rxmap, 0, 1059 bus_dmamap_sync(sc->jme_dmatag, sc->jme_rxmap, 0,
1059 sizeof(struct jme_desc) * JME_NBUFS, 1060 sizeof(struct jme_desc) * JME_NBUFS,
1060 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1061 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1061#ifdef JMEDEBUG_RX 1062#ifdef JMEDEBUG_RX
1062 printf("rxintr sc->jme_rx_cons %d flags 0x%x\n", 1063 printf("rxintr sc->jme_rx_cons %d flags 0x%x\n",
1063 sc->jme_rx_cons, le32toh(sc->jme_rxring[sc->jme_rx_cons].flags)); 1064 sc->jme_rx_cons, le32toh(sc->jme_rxring[sc->jme_rx_cons].flags));
1064#endif 1065#endif
1065 ipackets = 0; 1066 ipackets = 0;
1066 while((le32toh(sc->jme_rxring[ sc->jme_rx_cons].flags) & JME_RD_OWN) 1067 while((le32toh(sc->jme_rxring[ sc->jme_rx_cons].flags) & JME_RD_OWN)
1067 == 0) { 1068 == 0) {
1068 i = sc->jme_rx_cons; 1069 i = sc->jme_rx_cons;
1069 desc = &sc->jme_rxring[i]; 1070 desc = &sc->jme_rxring[i];
1070#ifdef JMEDEBUG_RX 1071#ifdef JMEDEBUG_RX
1071 printf("rxintr i %d flags 0x%x buflen 0x%x\n", 1072 printf("rxintr i %d flags 0x%x buflen 0x%x\n",
1072 i, le32toh(desc->flags), le32toh(desc->buflen)); 1073 i, le32toh(desc->flags), le32toh(desc->buflen));
1073#endif 1074#endif
1074 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 1075 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
1075 break; 1076 break;
1076 bus_dmamap_sync(sc->jme_dmatag, sc->jme_rxmbufm[i], 0, 1077 bus_dmamap_sync(sc->jme_dmatag, sc->jme_rxmbufm[i], 0,
1077 sc->jme_rxmbufm[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 1078 sc->jme_rxmbufm[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
1078 bus_dmamap_unload(sc->jme_dmatag, sc->jme_rxmbufm[i]); 1079 bus_dmamap_unload(sc->jme_dmatag, sc->jme_rxmbufm[i]);
1079 1080
1080 buflen = le32toh(desc->buflen); 1081 buflen = le32toh(desc->buflen);
1081 nsegs = JME_RX_NSEGS(buflen); 1082 nsegs = JME_RX_NSEGS(buflen);
1082 flags = le32toh(desc->flags); 1083 flags = le32toh(desc->flags);
1083 if ((buflen & JME_RX_ERR_STAT) != 0 || 1084 if ((buflen & JME_RX_ERR_STAT) != 0 ||
1084 JME_RX_BYTES(buflen) < sizeof(struct ether_header) || 1085 JME_RX_BYTES(buflen) < sizeof(struct ether_header) ||
1085 JME_RX_BYTES(buflen) > 1086 JME_RX_BYTES(buflen) >
1086 (ifp->if_mtu + ETHER_HDR_LEN + JME_RX_PAD_BYTES)) { 1087 (ifp->if_mtu + ETHER_HDR_LEN + JME_RX_PAD_BYTES)) {
1087#ifdef JMEDEBUG_RX 1088#ifdef JMEDEBUG_RX
1088 printf("rx error flags 0x%x buflen 0x%x\n", 1089 printf("rx error flags 0x%x buflen 0x%x\n",
1089 flags, buflen); 1090 flags, buflen);
1090#endif 1091#endif
1091 ifp->if_ierrors++; 1092 ifp->if_ierrors++;
1092 /* reuse the mbufs */ 1093 /* reuse the mbufs */
1093 for (seg = 0; seg < nsegs; seg++) { 1094 for (seg = 0; seg < nsegs; seg++) {
1094 m = sc->jme_rxmbuf[i]; 1095 m = sc->jme_rxmbuf[i];
1095 sc->jme_rxmbuf[i] = NULL; 1096 sc->jme_rxmbuf[i] = NULL;
1096 if ((error = jme_add_rxbuf(sc, m)) != 0) 1097 if ((error = jme_add_rxbuf(sc, m)) != 0)
1097 aprint_error_dev(sc->jme_dev, 1098 aprint_error_dev(sc->jme_dev,
1098 "can't reuse mbuf: %d\n", error); 1099 "can't reuse mbuf: %d\n", error);
1099 JME_DESC_INC(sc->jme_rx_cons, JME_NBUFS); 1100 JME_DESC_INC(sc->jme_rx_cons, JME_NBUFS);
1100 i = sc->jme_rx_cons; 1101 i = sc->jme_rx_cons;
1101 } 1102 }
1102 continue; 1103 continue;
1103 } 1104 }
1104 /* receive this packet */ 1105 /* receive this packet */
1105 mhead = m = sc->jme_rxmbuf[i]; 1106 mhead = m = sc->jme_rxmbuf[i];
1106 sc->jme_rxmbuf[i] = NULL; 1107 sc->jme_rxmbuf[i] = NULL;
1107 /* add a new buffer to chain */ 1108 /* add a new buffer to chain */
1108 if (jme_add_rxbuf(sc, NULL) == ENOBUFS) { 1109 if (jme_add_rxbuf(sc, NULL) == ENOBUFS) {
1109 for (seg = 0; seg < nsegs; seg++) { 1110 for (seg = 0; seg < nsegs; seg++) {
1110 m = sc->jme_rxmbuf[i]; 1111 m = sc->jme_rxmbuf[i];
1111 sc->jme_rxmbuf[i] = NULL; 1112 sc->jme_rxmbuf[i] = NULL;
1112 if ((error = jme_add_rxbuf(sc, m)) != 0) 1113 if ((error = jme_add_rxbuf(sc, m)) != 0)
1113 aprint_error_dev(sc->jme_dev, 1114 aprint_error_dev(sc->jme_dev,
1114 "can't reuse mbuf: %d\n", error); 1115 "can't reuse mbuf: %d\n", error);
1115 JME_DESC_INC(sc->jme_rx_cons, JME_NBUFS); 1116 JME_DESC_INC(sc->jme_rx_cons, JME_NBUFS);
1116 i = sc->jme_rx_cons; 1117 i = sc->jme_rx_cons;
1117 } 1118 }
1118 ifp->if_ierrors++; 1119 ifp->if_ierrors++;
1119 continue; 1120 continue;
1120 } 1121 }
1121 1122
1122 /* build mbuf chain: head, then remaining segments */ 1123 /* build mbuf chain: head, then remaining segments */
1123 m->m_pkthdr.rcvif = ifp; 1124 m->m_pkthdr.rcvif = ifp;
1124 m->m_pkthdr.len = JME_RX_BYTES(buflen) - JME_RX_PAD_BYTES; 1125 m->m_pkthdr.len = JME_RX_BYTES(buflen) - JME_RX_PAD_BYTES;
1125 m->m_len = (nsegs > 1) ? (MCLBYTES - JME_RX_PAD_BYTES) : 1126 m->m_len = (nsegs > 1) ? (MCLBYTES - JME_RX_PAD_BYTES) :
1126 m->m_pkthdr.len; 1127 m->m_pkthdr.len;
1127 m->m_data = m->m_ext.ext_buf + JME_RX_PAD_BYTES; 1128 m->m_data = m->m_ext.ext_buf + JME_RX_PAD_BYTES;
1128 JME_DESC_INC(sc->jme_rx_cons, JME_NBUFS); 1129 JME_DESC_INC(sc->jme_rx_cons, JME_NBUFS);
1129 for (seg = 1; seg < nsegs; seg++) { 1130 for (seg = 1; seg < nsegs; seg++) {
1130 i = sc->jme_rx_cons; 1131 i = sc->jme_rx_cons;
1131 m = sc->jme_rxmbuf[i]; 1132 m = sc->jme_rxmbuf[i];
1132 sc->jme_rxmbuf[i] = NULL; 1133 sc->jme_rxmbuf[i] = NULL;
1133 (void)jme_add_rxbuf(sc, NULL); 1134 (void)jme_add_rxbuf(sc, NULL);
1134 m->m_flags &= ~M_PKTHDR; 1135 m->m_flags &= ~M_PKTHDR;
1135 m_cat(mhead, m); 1136 m_cat(mhead, m);
1136 JME_DESC_INC(sc->jme_rx_cons, JME_NBUFS); 1137 JME_DESC_INC(sc->jme_rx_cons, JME_NBUFS);
1137 } 1138 }
1138 /* and adjust last mbuf's size */ 1139 /* and adjust last mbuf's size */
1139 if (nsegs > 1) { 1140 if (nsegs > 1) {
1140 m->m_len = 1141 m->m_len =
1141 JME_RX_BYTES(buflen) - (MCLBYTES * (nsegs - 1)); 1142 JME_RX_BYTES(buflen) - (MCLBYTES * (nsegs - 1));
1142 } 1143 }
1143 ifp->if_ipackets++; 1144 ifp->if_ipackets++;
1144 ipackets++; 1145 ipackets++;
1145 bpf_mtap(ifp, mhead); 1146 bpf_mtap(ifp, mhead);
1146 1147
1147 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) && 1148 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) &&
1148 (flags & JME_RD_IPV4)) { 1149 (flags & JME_RD_IPV4)) {
1149 mhead->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1150 mhead->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1150 if (!(flags & JME_RD_IPCSUM)) 1151 if (!(flags & JME_RD_IPCSUM))
1151 mhead->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1152 mhead->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1152 } 1153 }
1153 if ((ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) && 1154 if ((ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) &&
1154 (flags & JME_RD_TCPV4) == JME_RD_TCPV4) { 1155 (flags & JME_RD_TCPV4) == JME_RD_TCPV4) {
1155 mhead->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1156 mhead->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1156 if (!(flags & JME_RD_TCPCSUM)) 1157 if (!(flags & JME_RD_TCPCSUM))
1157 mhead->m_pkthdr.csum_flags |= 1158 mhead->m_pkthdr.csum_flags |=
1158 M_CSUM_TCP_UDP_BAD; 1159 M_CSUM_TCP_UDP_BAD;
1159 } 1160 }
1160 if ((ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) && 1161 if ((ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) &&
1161 (flags & JME_RD_UDPV4) == JME_RD_UDPV4) { 1162 (flags & JME_RD_UDPV4) == JME_RD_UDPV4) {
1162 mhead->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1163 mhead->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1163 if (!(flags & JME_RD_UDPCSUM)) 1164 if (!(flags & JME_RD_UDPCSUM))
1164 mhead->m_pkthdr.csum_flags |= 1165 mhead->m_pkthdr.csum_flags |=
1165 M_CSUM_TCP_UDP_BAD; 1166 M_CSUM_TCP_UDP_BAD;
1166 } 1167 }
1167 if ((ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx) && 1168 if ((ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx) &&
1168 (flags & JME_RD_TCPV6) == JME_RD_TCPV6) { 1169 (flags & JME_RD_TCPV6) == JME_RD_TCPV6) {
1169 mhead->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 1170 mhead->m_pkthdr.csum_flags |= M_CSUM_TCPv6;
1170 if (!(flags & JME_RD_TCPCSUM)) 1171 if (!(flags & JME_RD_TCPCSUM))
1171 mhead->m_pkthdr.csum_flags |= 1172 mhead->m_pkthdr.csum_flags |=
1172 M_CSUM_TCP_UDP_BAD; 1173 M_CSUM_TCP_UDP_BAD;
1173 } 1174 }
1174 if ((ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx) && 1175 if ((ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx) &&
1175 (flags & JME_RD_UDPV6) == JME_RD_UDPV6) { 1176 (flags & JME_RD_UDPV6) == JME_RD_UDPV6) {
1176 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 1177 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6;
1177 if (!(flags & JME_RD_UDPCSUM)) 1178 if (!(flags & JME_RD_UDPCSUM))
1178 mhead->m_pkthdr.csum_flags |= 1179 mhead->m_pkthdr.csum_flags |=
1179 M_CSUM_TCP_UDP_BAD; 1180 M_CSUM_TCP_UDP_BAD;
1180 } 1181 }
1181 if (flags & JME_RD_VLAN_TAG) { 1182 if (flags & JME_RD_VLAN_TAG) {
1182 /* pass to vlan_input() */ 1183 /* pass to vlan_input() */
1183 VLAN_INPUT_TAG(ifp, mhead, 1184 VLAN_INPUT_TAG(ifp, mhead,
1184 (flags & JME_RD_VLAN_MASK), continue); 1185 (flags & JME_RD_VLAN_MASK), continue);
1185 } 1186 }
1186 (*ifp->if_input)(ifp, mhead); 1187 (*ifp->if_input)(ifp, mhead);
1187 } 1188 }
1188#if NRND > 0 1189#if NRND > 0
1189 if (ipackets && RND_ENABLED(&sc->rnd_source)) 1190 if (ipackets && RND_ENABLED(&sc->rnd_source))
1190 rnd_add_uint32(&sc->rnd_source, ipackets); 1191 rnd_add_uint32(&sc->rnd_source, ipackets);
1191#endif /* NRND > 0 */ 1192#endif /* NRND > 0 */
1192 1193
1193} 1194}
1194 1195
1195static int 1196static int
1196jme_intr(void *v) 1197jme_intr(void *v)
1197{ 1198{
1198 jme_softc_t *sc = v; 1199 jme_softc_t *sc = v;
1199 uint32_t istatus; 1200 uint32_t istatus;
1200 1201
1201 istatus = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc, 1202 istatus = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc,
1202 JME_INTR_STATUS); 1203 JME_INTR_STATUS);
1203 if (istatus == 0 || istatus == 0xFFFFFFFF) 1204 if (istatus == 0 || istatus == 0xFFFFFFFF)
1204 return 0; 1205 return 0;
1205 /* Disable interrupts. */ 1206 /* Disable interrupts. */
1206 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, 1207 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc,
1207 JME_INTR_MASK_CLR, 0xFFFFFFFF); 1208 JME_INTR_MASK_CLR, 0xFFFFFFFF);
1208again: 1209again:
1209 /* and update istatus */ 1210 /* and update istatus */
1210 istatus = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc, 1211 istatus = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc,
1211 JME_INTR_STATUS); 1212 JME_INTR_STATUS);
1212 if ((istatus & JME_INTRS_CHECK) == 0) 1213 if ((istatus & JME_INTRS_CHECK) == 0)
1213 goto done; 1214 goto done;
1214 /* Reset PCC counter/timer and Ack interrupts. */ 1215 /* Reset PCC counter/timer and Ack interrupts. */
1215 if ((istatus & (INTR_TXQ_COMP | INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0) 1216 if ((istatus & (INTR_TXQ_COMP | INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
1216 istatus |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 1217 istatus |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1217 if ((istatus & (INTR_RXQ_COMP | INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) 1218 if ((istatus & (INTR_RXQ_COMP | INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
1218 istatus |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 1219 istatus |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1219 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, 1220 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc,
1220 JME_INTR_STATUS, istatus); 1221 JME_INTR_STATUS, istatus);
1221 1222
1222 if ((sc->jme_if.if_flags & IFF_RUNNING) == 0) 1223 if ((sc->jme_if.if_flags & IFF_RUNNING) == 0)
1223 goto done; 1224 goto done;
1224#ifdef JMEDEBUG_RX 1225#ifdef JMEDEBUG_RX
1225 printf("jme_intr 0x%x RXCS 0x%x RXDBA 0x%x 0x%x RXQDC 0x%x RXNDA 0x%x RXMCS 0x%x\n", istatus, 1226 printf("jme_intr 0x%x RXCS 0x%x RXDBA 0x%x 0x%x RXQDC 0x%x RXNDA 0x%x RXMCS 0x%x\n", istatus,
1226 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXCSR), 1227 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXCSR),
1227 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXDBA_LO), 1228 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXDBA_LO),
1228 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXDBA_HI), 1229 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXDBA_HI),
1229 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXQDC), 1230 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXQDC),
1230 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXNDA), 1231 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXNDA),
1231 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC)); 1232 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC));
1232 printf("jme_intr RXUMA 0x%x 0x%x RXMCHT 0x%x 0x%x GHC 0x%x\n", 1233 printf("jme_intr RXUMA 0x%x 0x%x RXMCHT 0x%x 0x%x GHC 0x%x\n",
1233 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PAR0), 1234 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PAR0),
1234 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PAR1), 1235 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PAR1),
1235 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_MAR0), 1236 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_MAR0),
1236 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_MAR1), 1237 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_MAR1),
1237 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_GHC)); 1238 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_GHC));
1238#endif 1239#endif
1239 if ((istatus & (INTR_RXQ_COMP | INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) 1240 if ((istatus & (INTR_RXQ_COMP | INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
1240 jme_intr_rx(sc); 1241 jme_intr_rx(sc);
1241 if ((istatus & INTR_RXQ_DESC_EMPTY) != 0) { 1242 if ((istatus & INTR_RXQ_DESC_EMPTY) != 0) {
1242 /* 1243 /*
1243 * Notify hardware availability of new Rx 1244 * Notify hardware availability of new Rx
1244 * buffers. 1245 * buffers.
1245 * Reading RXCSR takes very long time under 1246 * Reading RXCSR takes very long time under
1246 * heavy load so cache RXCSR value and writes 1247 * heavy load so cache RXCSR value and writes
1247 * the ORed value with the kick command to 1248 * the ORed value with the kick command to
1248 * the RXCSR. This saves one register access 1249 * the RXCSR. This saves one register access
1249 * cycle. 1250 * cycle.
1250 */ 1251 */
1251 sc->jme_rx_cons = 0; 1252 sc->jme_rx_cons = 0;
1252 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, 1253 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac,
1253 JME_RXCSR, 1254 JME_RXCSR,
1254 sc->jme_rxcsr | RXCSR_RX_ENB | RXCSR_RXQ_START); 1255 sc->jme_rxcsr | RXCSR_RX_ENB | RXCSR_RXQ_START);
1255 } 1256 }
1256 if ((istatus & (INTR_TXQ_COMP | INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0) 1257 if ((istatus & (INTR_TXQ_COMP | INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
1257 jme_ifstart(&sc->jme_if); 1258 jme_ifstart(&sc->jme_if);
1258 1259
1259 goto again; 1260 goto again;
1260 1261
1261done: 1262done:
1262 /* enable interrupts. */ 1263 /* enable interrupts. */
1263 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, 1264 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc,
1264 JME_INTR_MASK_SET, JME_INTRS_ENABLE); 1265 JME_INTR_MASK_SET, JME_INTRS_ENABLE);
1265 return 1; 1266 return 1;
1266} 1267}
1267 1268
1268 1269
1269static int 1270static int
1270jme_ifioctl(struct ifnet *ifp, unsigned long cmd, void *data) 1271jme_ifioctl(struct ifnet *ifp, unsigned long cmd, void *data)
1271{ 1272{
1272 struct jme_softc *sc = ifp->if_softc; 1273 struct jme_softc *sc = ifp->if_softc;
1273 int s, error; 1274 int s, error;
1274 struct ifreq *ifr; 1275 struct ifreq *ifr;
1275 struct ifcapreq *ifcr; 1276 struct ifcapreq *ifcr;
1276 1277
1277 s = splnet(); 1278 s = splnet();
1278 /* 1279 /*
1279 * we can't support at the same time jumbo frames and 1280 * we can't support at the same time jumbo frames and
1280 * TX checksums offload/TSO 1281 * TX checksums offload/TSO
1281 */ 1282 */
1282 switch(cmd) { 1283 switch(cmd) {
1283 case SIOCSIFMTU: 1284 case SIOCSIFMTU:
1284 ifr = data; 1285 ifr = data;
1285 if (ifr->ifr_mtu > JME_TX_FIFO_SIZE && 1286 if (ifr->ifr_mtu > JME_TX_FIFO_SIZE &&
1286 (ifp->if_capenable & ( 1287 (ifp->if_capenable & (
1287 IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx| 1288 IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx|
1288 IFCAP_CSUM_TCPv6_Tx|IFCAP_CSUM_UDPv6_Tx| 1289 IFCAP_CSUM_TCPv6_Tx|IFCAP_CSUM_UDPv6_Tx|
1289 IFCAP_TSOv4|IFCAP_TSOv6)) != 0) { 1290 IFCAP_TSOv4|IFCAP_TSOv6)) != 0) {
1290 splx(s); 1291 splx(s);
1291 return EINVAL; 1292 return EINVAL;
1292 } 1293 }
1293 break; 1294 break;
1294 case SIOCSIFCAP: 1295 case SIOCSIFCAP:
1295 ifcr = data; 1296 ifcr = data;
1296 if (ifp->if_mtu > JME_TX_FIFO_SIZE && 1297 if (ifp->if_mtu > JME_TX_FIFO_SIZE &&
1297 (ifcr->ifcr_capenable & ( 1298 (ifcr->ifcr_capenable & (
1298 IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx| 1299 IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx|
1299 IFCAP_CSUM_TCPv6_Tx|IFCAP_CSUM_UDPv6_Tx| 1300 IFCAP_CSUM_TCPv6_Tx|IFCAP_CSUM_UDPv6_Tx|
1300 IFCAP_TSOv4|IFCAP_TSOv6)) != 0) { 1301 IFCAP_TSOv4|IFCAP_TSOv6)) != 0) {
1301 splx(s); 1302 splx(s);
1302 return EINVAL; 1303 return EINVAL;
1303 } 1304 }
1304 break; 1305 break;
1305 } 1306 }
1306 1307
1307 error = ether_ioctl(ifp, cmd, data); 1308 error = ether_ioctl(ifp, cmd, data);
1308 if (error == ENETRESET && (ifp->if_flags & IFF_RUNNING)) { 1309 if (error == ENETRESET && (ifp->if_flags & IFF_RUNNING)) {
1309 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1310 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
1310 jme_set_filter(sc); 1311 jme_set_filter(sc);
1311 error = 0; 1312 error = 0;
1312 } else { 1313 } else {
1313 error = jme_init(ifp, 0); 1314 error = jme_init(ifp, 0);
1314 } 1315 }
1315 } 1316 }
1316 splx(s); 1317 splx(s);
1317 return error; 1318 return error;
1318} 1319}
1319 1320
1320static int 1321static int
1321jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1322jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1322{ 1323{
1323 struct jme_desc *txd; 1324 struct jme_desc *txd;
1324 struct jme_desc *desc; 1325 struct jme_desc *desc;
1325 struct mbuf *m; 1326 struct mbuf *m;
1326 struct m_tag *mtag; 1327 struct m_tag *mtag;
1327 int error, i, prod, headdsc, nsegs; 1328 int error, i, prod, headdsc, nsegs;
1328 uint32_t cflags, tso_segsz; 1329 uint32_t cflags, tso_segsz;
1329 1330
1330 if (((*m_head)->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) != 0){ 1331 if (((*m_head)->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) != 0){
1331 /* 1332 /*
1332 * Due to the adherence to NDIS specification JMC250 1333 * Due to the adherence to NDIS specification JMC250
1333 * assumes upper stack computed TCP pseudo checksum 1334 * assumes upper stack computed TCP pseudo checksum
1334 * without including payload length. This breaks 1335 * without including payload length. This breaks
1335 * checksum offload for TSO case so recompute TCP 1336 * checksum offload for TSO case so recompute TCP
1336 * pseudo checksum for JMC250. Hopefully this wouldn't 1337 * pseudo checksum for JMC250. Hopefully this wouldn't
1337 * be much burden on modern CPUs. 1338 * be much burden on modern CPUs.
1338 */ 1339 */
1339 bool v4 = ((*m_head)->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 1340 bool v4 = ((*m_head)->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1340 int iphl = v4 ? 1341 int iphl = v4 ?
1341 M_CSUM_DATA_IPv4_IPHL((*m_head)->m_pkthdr.csum_data) : 1342 M_CSUM_DATA_IPv4_IPHL((*m_head)->m_pkthdr.csum_data) :
1342 M_CSUM_DATA_IPv6_HL((*m_head)->m_pkthdr.csum_data); 1343 M_CSUM_DATA_IPv6_HL((*m_head)->m_pkthdr.csum_data);
1343 /* 1344 /*
1344 * note: we support vlan offloading, so we should never have 1345 * note: we support vlan offloading, so we should never have
1345 * a ETHERTYPE_VLAN packet here - so ETHER_HDR_LEN is always 1346 * a ETHERTYPE_VLAN packet here - so ETHER_HDR_LEN is always
1346 * right. 1347 * right.
1347 */ 1348 */
1348 int hlen = ETHER_HDR_LEN + iphl; 1349 int hlen = ETHER_HDR_LEN + iphl;
1349 1350
1350 if (__predict_false((*m_head)->m_len < 1351 if (__predict_false((*m_head)->m_len <
1351 (hlen + sizeof(struct tcphdr)))) { 1352 (hlen + sizeof(struct tcphdr)))) {
1352 /* 1353 /*
1353 * TCP/IP headers are not in the first mbuf; we need 1354 * TCP/IP headers are not in the first mbuf; we need
1354 * to do this the slow and painful way. Let's just 1355 * to do this the slow and painful way. Let's just
1355 * hope this doesn't happen very often. 1356 * hope this doesn't happen very often.
1356 */ 1357 */
1357 struct tcphdr th; 1358 struct tcphdr th;
1358 1359
1359 m_copydata((*m_head), hlen, sizeof(th), &th); 1360 m_copydata((*m_head), hlen, sizeof(th), &th);
1360 if (v4) { 1361 if (v4) {
1361 struct ip ip; 1362 struct ip ip;
1362 1363
1363 m_copydata((*m_head), ETHER_HDR_LEN, 1364 m_copydata((*m_head), ETHER_HDR_LEN,
1364 sizeof(ip), &ip); 1365 sizeof(ip), &ip);
1365 ip.ip_len = 0; 1366 ip.ip_len = 0;
1366 m_copyback((*m_head), 1367 m_copyback((*m_head),
1367 ETHER_HDR_LEN + offsetof(struct ip, ip_len), 1368 ETHER_HDR_LEN + offsetof(struct ip, ip_len),
1368 sizeof(ip.ip_len), &ip.ip_len); 1369 sizeof(ip.ip_len), &ip.ip_len);
1369 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 1370 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1370 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 1371 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1371 } else { 1372 } else {
1372#if INET6 1373#if INET6
1373 struct ip6_hdr ip6; 1374 struct ip6_hdr ip6;
1374 1375
1375 m_copydata((*m_head), ETHER_HDR_LEN, 1376 m_copydata((*m_head), ETHER_HDR_LEN,
1376 sizeof(ip6), &ip6); 1377 sizeof(ip6), &ip6);
1377 ip6.ip6_plen = 0; 1378 ip6.ip6_plen = 0;
1378 m_copyback((*m_head), ETHER_HDR_LEN + 1379 m_copyback((*m_head), ETHER_HDR_LEN +
1379 offsetof(struct ip6_hdr, ip6_plen), 1380 offsetof(struct ip6_hdr, ip6_plen),
1380 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 1381 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1381 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 1382 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1382 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 1383 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1383#endif /* INET6 */ 1384#endif /* INET6 */
1384 } 1385 }
1385 m_copyback((*m_head), 1386 m_copyback((*m_head),
1386 hlen + offsetof(struct tcphdr, th_sum), 1387 hlen + offsetof(struct tcphdr, th_sum),
1387 sizeof(th.th_sum), &th.th_sum); 1388 sizeof(th.th_sum), &th.th_sum);
1388 1389
1389 hlen += th.th_off << 2; 1390 hlen += th.th_off << 2;
1390 } else { 1391 } else {
1391 /* 1392 /*
1392 * TCP/IP headers are in the first mbuf; we can do 1393 * TCP/IP headers are in the first mbuf; we can do
1393 * this the easy way. 1394 * this the easy way.
1394 */ 1395 */
1395 struct tcphdr *th; 1396 struct tcphdr *th;
1396 1397
1397 if (v4) { 1398 if (v4) {
1398 struct ip *ip = 1399 struct ip *ip =
1399 (void *)(mtod((*m_head), char *) + 1400 (void *)(mtod((*m_head), char *) +
1400 ETHER_HDR_LEN); 1401 ETHER_HDR_LEN);
1401 th = (void *)(mtod((*m_head), char *) + hlen); 1402 th = (void *)(mtod((*m_head), char *) + hlen);
1402 1403
1403 ip->ip_len = 0; 1404 ip->ip_len = 0;
1404 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 1405 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1405 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1406 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1406 } else { 1407 } else {
1407#if INET6 1408#if INET6
1408 struct ip6_hdr *ip6 = 1409 struct ip6_hdr *ip6 =
1409 (void *)(mtod((*m_head), char *) + 1410 (void *)(mtod((*m_head), char *) +
1410 ETHER_HDR_LEN); 1411 ETHER_HDR_LEN);
1411 th = (void *)(mtod((*m_head), char *) + hlen); 1412 th = (void *)(mtod((*m_head), char *) + hlen);
1412 1413
1413 ip6->ip6_plen = 0; 1414 ip6->ip6_plen = 0;
1414 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 1415 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1415 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 1416 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1416#endif /* INET6 */ 1417#endif /* INET6 */
1417 } 1418 }
1418 hlen += th->th_off << 2; 1419 hlen += th->th_off << 2;
1419 } 1420 }
1420 1421
1421 } 1422 }
1422 1423
1423 prod = sc->jme_tx_prod; 1424 prod = sc->jme_tx_prod;
1424 txd = &sc->jme_txring[prod]; 1425 txd = &sc->jme_txring[prod];
1425 1426
1426 error = bus_dmamap_load_mbuf(sc->jme_dmatag, sc->jme_txmbufm[prod], 1427 error = bus_dmamap_load_mbuf(sc->jme_dmatag, sc->jme_txmbufm[prod],
1427 *m_head, BUS_DMA_WRITE); 1428 *m_head, BUS_DMA_WRITE);
1428 if (error) { 1429 if (error) {
1429 if (error == EFBIG) { 1430 if (error == EFBIG) {
1430 log(LOG_ERR, "%s: Tx packet consumes too many " 1431 log(LOG_ERR, "%s: Tx packet consumes too many "
1431 "DMA segments, dropping...\n", 1432 "DMA segments, dropping...\n",
1432 device_xname(sc->jme_dev)); 1433 device_xname(sc->jme_dev));
1433 m_freem(*m_head); 1434 m_freem(*m_head);
1434 m_head = NULL; 1435 m_head = NULL;
1435 } 1436 }
1436 return (error); 1437 return (error);
1437 } 1438 }
1438 /* 1439 /*
1439 * Check descriptor overrun. Leave one free descriptor. 1440 * Check descriptor overrun. Leave one free descriptor.
1440 * Since we always use 64bit address mode for transmitting, 1441 * Since we always use 64bit address mode for transmitting,
1441 * each Tx request requires one more dummy descriptor. 1442 * each Tx request requires one more dummy descriptor.
1442 */ 1443 */
1443 nsegs = sc->jme_txmbufm[prod]->dm_nsegs; 1444 nsegs = sc->jme_txmbufm[prod]->dm_nsegs;
1444#ifdef JMEDEBUG_TX 1445#ifdef JMEDEBUG_TX
1445 printf("jme_encap prod %d nsegs %d jme_tx_cnt %d\n", prod, nsegs, sc->jme_tx_cnt); 1446 printf("jme_encap prod %d nsegs %d jme_tx_cnt %d\n", prod, nsegs, sc->jme_tx_cnt);
1446#endif 1447#endif
1447 if (sc->jme_tx_cnt + nsegs + 1 > JME_NBUFS - 1) { 1448 if (sc->jme_tx_cnt + nsegs + 1 > JME_NBUFS - 1) {
1448 bus_dmamap_unload(sc->jme_dmatag, sc->jme_txmbufm[prod]); 1449 bus_dmamap_unload(sc->jme_dmatag, sc->jme_txmbufm[prod]);
1449 return (ENOBUFS); 1450 return (ENOBUFS);
1450 } 1451 }
1451 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmbufm[prod], 1452 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmbufm[prod],
1452 0, sc->jme_txmbufm[prod]->dm_mapsize, BUS_DMASYNC_PREWRITE); 1453 0, sc->jme_txmbufm[prod]->dm_mapsize, BUS_DMASYNC_PREWRITE);
1453 1454
1454 m = *m_head; 1455 m = *m_head;
1455 cflags = 0; 1456 cflags = 0;
1456 tso_segsz = 0; 1457 tso_segsz = 0;
1457 /* Configure checksum offload and TSO. */ 1458 /* Configure checksum offload and TSO. */
1458 if ((m->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) != 0) { 1459 if ((m->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) != 0) {
1459 tso_segsz = (uint32_t)m->m_pkthdr.segsz << JME_TD_MSS_SHIFT; 1460 tso_segsz = (uint32_t)m->m_pkthdr.segsz << JME_TD_MSS_SHIFT;
1460 cflags |= JME_TD_TSO; 1461 cflags |= JME_TD_TSO;
1461 } else { 1462 } else {
1462 if ((m->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) 1463 if ((m->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
1463 cflags |= JME_TD_IPCSUM; 1464 cflags |= JME_TD_IPCSUM;
1464 if ((m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) != 0) 1465 if ((m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) != 0)
1465 cflags |= JME_TD_TCPCSUM; 1466 cflags |= JME_TD_TCPCSUM;
1466 if ((m->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) != 0) 1467 if ((m->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) != 0)
1467 cflags |= JME_TD_UDPCSUM; 1468 cflags |= JME_TD_UDPCSUM;
1468 } 1469 }
1469 /* Configure VLAN. */ 1470 /* Configure VLAN. */
1470 if ((mtag = VLAN_OUTPUT_TAG(&sc->jme_ec, m)) != NULL) { 1471 if ((mtag = VLAN_OUTPUT_TAG(&sc->jme_ec, m)) != NULL) {
1471 cflags |= (VLAN_TAG_VALUE(mtag) & JME_TD_VLAN_MASK); 1472 cflags |= (VLAN_TAG_VALUE(mtag) & JME_TD_VLAN_MASK);
1472 cflags |= JME_TD_VLAN_TAG; 1473 cflags |= JME_TD_VLAN_TAG;
1473 } 1474 }
1474 1475
1475 desc = &sc->jme_txring[prod]; 1476 desc = &sc->jme_txring[prod];
1476 desc->flags = htole32(cflags); 1477 desc->flags = htole32(cflags);
1477 desc->buflen = htole32(tso_segsz); 1478 desc->buflen = htole32(tso_segsz);
1478 desc->addr_hi = htole32(m->m_pkthdr.len); 1479 desc->addr_hi = htole32(m->m_pkthdr.len);
1479 desc->addr_lo = 0; 1480 desc->addr_lo = 0;
1480 headdsc = prod; 1481 headdsc = prod;
1481 sc->jme_tx_cnt++; 1482 sc->jme_tx_cnt++;
1482 JME_DESC_INC(prod, JME_NBUFS); 1483 JME_DESC_INC(prod, JME_NBUFS);
1483 for (i = 0; i < nsegs; i++) { 1484 for (i = 0; i < nsegs; i++) {
1484 desc = &sc->jme_txring[prod]; 1485 desc = &sc->jme_txring[prod];
1485 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); 1486 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1486 desc->buflen = 1487 desc->buflen =
1487 htole32(sc->jme_txmbufm[headdsc]->dm_segs[i].ds_len); 1488 htole32(sc->jme_txmbufm[headdsc]->dm_segs[i].ds_len);
1488 desc->addr_hi = htole32( 1489 desc->addr_hi = htole32(
1489 JME_ADDR_HI(sc->jme_txmbufm[headdsc]->dm_segs[i].ds_addr)); 1490 JME_ADDR_HI(sc->jme_txmbufm[headdsc]->dm_segs[i].ds_addr));
1490 desc->addr_lo = htole32( 1491 desc->addr_lo = htole32(
1491 JME_ADDR_LO(sc->jme_txmbufm[headdsc]->dm_segs[i].ds_addr)); 1492 JME_ADDR_LO(sc->jme_txmbufm[headdsc]->dm_segs[i].ds_addr));
1492 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap, 1493 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap,
1493 prod * sizeof(struct jme_desc), sizeof(struct jme_desc), 1494 prod * sizeof(struct jme_desc), sizeof(struct jme_desc),
1494 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1495 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1495 sc->jme_txmbuf[prod] = NULL; 1496 sc->jme_txmbuf[prod] = NULL;
1496 sc->jme_tx_cnt++; 1497 sc->jme_tx_cnt++;
1497 JME_DESC_INC(prod, JME_NBUFS); 1498 JME_DESC_INC(prod, JME_NBUFS);
1498 } 1499 }
1499 1500
1500 /* Update producer index. */ 1501 /* Update producer index. */
1501 sc->jme_tx_prod = prod; 1502 sc->jme_tx_prod = prod;
1502#ifdef JMEDEBUG_TX 1503#ifdef JMEDEBUG_TX
1503 printf("jme_encap prod now %d\n", sc->jme_tx_prod); 1504 printf("jme_encap prod now %d\n", sc->jme_tx_prod);
1504#endif 1505#endif
1505 /* 1506 /*
1506 * Finally request interrupt and give the first descriptor 1507 * Finally request interrupt and give the first descriptor
1507 * owenership to hardware. 1508 * owenership to hardware.
1508 */ 1509 */
1509 desc = &sc->jme_txring[headdsc]; 1510 desc = &sc->jme_txring[headdsc];
1510 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1511 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1511 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap, 1512 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap,
1512 headdsc * sizeof(struct jme_desc), sizeof(struct jme_desc), 1513 headdsc * sizeof(struct jme_desc), sizeof(struct jme_desc),
1513 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1514 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1514 1515
1515 sc->jme_txmbuf[headdsc] = m; 1516 sc->jme_txmbuf[headdsc] = m;
1516 return (0); 1517 return (0);
1517} 1518}
1518 1519
1519static void 1520static void
1520jme_txeof(struct jme_softc *sc) 1521jme_txeof(struct jme_softc *sc)
1521{ 1522{
1522 struct ifnet *ifp; 1523 struct ifnet *ifp;
1523 struct jme_desc *desc; 1524 struct jme_desc *desc;
1524 uint32_t status; 1525 uint32_t status;
1525 int cons, cons0, nsegs, seg; 1526 int cons, cons0, nsegs, seg;
1526 1527
1527 ifp = &sc->jme_if; 1528 ifp = &sc->jme_if;
1528 1529
1529#ifdef JMEDEBUG_TX 1530#ifdef JMEDEBUG_TX
1530 printf("jme_txeof cons %d prod %d\n", 1531 printf("jme_txeof cons %d prod %d\n",
1531 sc->jme_tx_cons, sc->jme_tx_prod); 1532 sc->jme_tx_cons, sc->jme_tx_prod);
1532 printf("jme_txeof JME_TXCSR 0x%x JME_TXDBA_LO 0x%x JME_TXDBA_HI 0x%x " 1533 printf("jme_txeof JME_TXCSR 0x%x JME_TXDBA_LO 0x%x JME_TXDBA_HI 0x%x "
1533 "JME_TXQDC 0x%x JME_TXNDA 0x%x JME_TXMAC 0x%x JME_TXPFC 0x%x " 1534 "JME_TXQDC 0x%x JME_TXNDA 0x%x JME_TXMAC 0x%x JME_TXPFC 0x%x "
1534 "JME_TXTRHD 0x%x\n", 1535 "JME_TXTRHD 0x%x\n",
1535 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR), 1536 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR),
1536 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_LO), 1537 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_LO),
1537 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_HI), 1538 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_HI),
1538 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXQDC), 1539 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXQDC),
1539 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXNDA), 1540 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXNDA),
1540 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC), 1541 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC),
1541 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXPFC), 1542 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXPFC),
1542 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD)); 1543 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD));
1543 for (cons = sc->jme_tx_cons; cons != sc->jme_tx_prod; ) { 1544 for (cons = sc->jme_tx_cons; cons != sc->jme_tx_prod; ) {
1544 desc = &sc->jme_txring[cons]; 1545 desc = &sc->jme_txring[cons];
1545 printf("ring[%d] 0x%x 0x%x 0x%x 0x%x\n", cons, 1546 printf("ring[%d] 0x%x 0x%x 0x%x 0x%x\n", cons,
1546 desc->flags, desc->buflen, desc->addr_hi, desc->addr_lo); 1547 desc->flags, desc->buflen, desc->addr_hi, desc->addr_lo);
1547 JME_DESC_INC(cons, JME_NBUFS); 1548 JME_DESC_INC(cons, JME_NBUFS);
1548 } 1549 }
1549#endif 1550#endif
1550 1551
1551 cons = sc->jme_tx_cons; 1552 cons = sc->jme_tx_cons;
1552 if (cons == sc->jme_tx_prod) 1553 if (cons == sc->jme_tx_prod)
1553 return; 1554 return;
1554 1555
1555 /* 1556 /*
1556 * Go through our Tx list and free mbufs for those 1557 * Go through our Tx list and free mbufs for those
1557 * frames which have been transmitted. 1558 * frames which have been transmitted.
1558 */ 1559 */
1559 for (; cons != sc->jme_tx_prod;) { 1560 for (; cons != sc->jme_tx_prod;) {
1560 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap, 1561 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap,
1561 cons * sizeof(struct jme_desc), sizeof(struct jme_desc), 1562 cons * sizeof(struct jme_desc), sizeof(struct jme_desc),
1562 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1563 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1563 1564
1564 desc = &sc->jme_txring[cons]; 1565 desc = &sc->jme_txring[cons];
1565 status = le32toh(desc->flags); 1566 status = le32toh(desc->flags);
1566#ifdef JMEDEBUG_TX 1567#ifdef JMEDEBUG_TX
1567 printf("jme_txeof %i status 0x%x nsegs %d\n", cons, status, 1568 printf("jme_txeof %i status 0x%x nsegs %d\n", cons, status,
1568 sc->jme_txmbufm[cons]->dm_nsegs); 1569 sc->jme_txmbufm[cons]->dm_nsegs);
1569#endif 1570#endif
1570 if (status & JME_TD_OWN) 1571 if (status & JME_TD_OWN)
1571 break; 1572 break;
1572 1573
1573 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0) 1574 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
1574 ifp->if_oerrors++; 1575 ifp->if_oerrors++;
1575 else { 1576 else {
1576 ifp->if_opackets++; 1577 ifp->if_opackets++;
1577 if ((status & JME_TD_COLLISION) != 0) 1578 if ((status & JME_TD_COLLISION) != 0)
1578 ifp->if_collisions += 1579 ifp->if_collisions +=
1579 le32toh(desc->buflen) & 1580 le32toh(desc->buflen) &
1580 JME_TD_BUF_LEN_MASK; 1581 JME_TD_BUF_LEN_MASK;
1581 } 1582 }
1582 /* 1583 /*
1583 * Only the first descriptor of multi-descriptor 1584 * Only the first descriptor of multi-descriptor
1584 * transmission is updated so driver have to skip entire 1585 * transmission is updated so driver have to skip entire
1585 * chained buffers for the transmiited frame. In other 1586 * chained buffers for the transmiited frame. In other
1586 * words, JME_TD_OWN bit is valid only at the first 1587 * words, JME_TD_OWN bit is valid only at the first
1587 * descriptor of a multi-descriptor transmission. 1588 * descriptor of a multi-descriptor transmission.
1588 */ 1589 */
1589 nsegs = sc->jme_txmbufm[cons]->dm_nsegs; 1590 nsegs = sc->jme_txmbufm[cons]->dm_nsegs;
1590 cons0 = cons; 1591 cons0 = cons;
1591 JME_DESC_INC(cons, JME_NBUFS); 1592 JME_DESC_INC(cons, JME_NBUFS);
1592 for (seg = 1; seg < nsegs + 1; seg++) { 1593 for (seg = 1; seg < nsegs + 1; seg++) {
1593 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap, 1594 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmap,
1594 cons * sizeof(struct jme_desc), 1595 cons * sizeof(struct jme_desc),
1595 sizeof(struct jme_desc), 1596 sizeof(struct jme_desc),
1596 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1597 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1597 sc->jme_txring[cons].flags = 0; 1598 sc->jme_txring[cons].flags = 0;
1598 JME_DESC_INC(cons, JME_NBUFS); 1599 JME_DESC_INC(cons, JME_NBUFS);
1599 } 1600 }
1600 /* Reclaim transferred mbufs. */ 1601 /* Reclaim transferred mbufs. */
1601 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmbufm[cons0], 1602 bus_dmamap_sync(sc->jme_dmatag, sc->jme_txmbufm[cons0],
1602 0, sc->jme_txmbufm[cons0]->dm_mapsize, 1603 0, sc->jme_txmbufm[cons0]->dm_mapsize,
1603 BUS_DMASYNC_POSTWRITE); 1604 BUS_DMASYNC_POSTWRITE);
1604 bus_dmamap_unload(sc->jme_dmatag, sc->jme_txmbufm[cons0]); 1605 bus_dmamap_unload(sc->jme_dmatag, sc->jme_txmbufm[cons0]);
1605 1606
1606 KASSERT(sc->jme_txmbuf[cons0] != NULL); 1607 KASSERT(sc->jme_txmbuf[cons0] != NULL);
1607 m_freem(sc->jme_txmbuf[cons0]); 1608 m_freem(sc->jme_txmbuf[cons0]);
1608 sc->jme_txmbuf[cons0] = NULL; 1609 sc->jme_txmbuf[cons0] = NULL;
1609 sc->jme_tx_cnt -= nsegs + 1; 1610 sc->jme_tx_cnt -= nsegs + 1;
1610 KASSERT(sc->jme_tx_cnt >= 0); 1611 KASSERT(sc->jme_tx_cnt >= 0);
1611 sc->jme_if.if_flags &= ~IFF_OACTIVE; 1612 sc->jme_if.if_flags &= ~IFF_OACTIVE;
1612 } 1613 }
1613 sc->jme_tx_cons = cons; 1614 sc->jme_tx_cons = cons;
1614 /* Unarm watchog timer when there is no pending descriptors in queue. */ 1615 /* Unarm watchog timer when there is no pending descriptors in queue. */
1615 if (sc->jme_tx_cnt == 0) 1616 if (sc->jme_tx_cnt == 0)
1616 ifp->if_timer = 0; 1617 ifp->if_timer = 0;
1617#ifdef JMEDEBUG_TX 1618#ifdef JMEDEBUG_TX
1618 printf("jme_txeof jme_tx_cnt %d\n", sc->jme_tx_cnt); 1619 printf("jme_txeof jme_tx_cnt %d\n", sc->jme_tx_cnt);
1619#endif 1620#endif
1620} 1621}
1621 1622
1622static void 1623static void
1623jme_ifstart(struct ifnet *ifp) 1624jme_ifstart(struct ifnet *ifp)
1624{ 1625{
1625 jme_softc_t *sc = ifp->if_softc; 1626 jme_softc_t *sc = ifp->if_softc;
1626 struct mbuf *mb_head; 1627 struct mbuf *mb_head;
1627 int enq; 1628 int enq;
1628 1629
1629 /* 1630 /*
1630 * check if we can free some desc. 1631 * check if we can free some desc.
1631 * Clear TX interrupt status to reset TX coalescing counters. 1632 * Clear TX interrupt status to reset TX coalescing counters.
1632 */ 1633 */
1633 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, 1634 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc,
1634 JME_INTR_STATUS, INTR_TXQ_COMP); 1635 JME_INTR_STATUS, INTR_TXQ_COMP);
1635 jme_txeof(sc); 1636 jme_txeof(sc);
1636 1637
1637 if ((sc->jme_if.if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1638 if ((sc->jme_if.if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1638 return; 1639 return;
1639 for (enq = 0;; enq++) { 1640 for (enq = 0;; enq++) {
1640nexttx: 1641nexttx:
1641 /* Grab a paquet for output */ 1642 /* Grab a paquet for output */
1642 IFQ_DEQUEUE(&ifp->if_snd, mb_head); 1643 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1643 if (mb_head == NULL) { 1644 if (mb_head == NULL) {
1644#ifdef JMEDEBUG_TX 1645#ifdef JMEDEBUG_TX
1645 printf("%s: nothing to send\n", __func__); 1646 printf("%s: nothing to send\n", __func__);
1646#endif 1647#endif
1647 break; 1648 break;
1648 } 1649 }
1649 /* try to add this mbuf to the TX ring */ 1650 /* try to add this mbuf to the TX ring */
1650 if (jme_encap(sc, &mb_head)) { 1651 if (jme_encap(sc, &mb_head)) {
1651 if (mb_head == NULL) { 1652 if (mb_head == NULL) {
1652 ifp->if_oerrors++; 1653 ifp->if_oerrors++;
1653 /* packet dropped, try next one */ 1654 /* packet dropped, try next one */
1654 goto nexttx; 1655 goto nexttx;
1655 } 1656 }
1656 /* resource shortage, try again later */ 1657 /* resource shortage, try again later */
1657 IF_PREPEND(&ifp->if_snd, mb_head); 1658 IF_PREPEND(&ifp->if_snd, mb_head);
1658 ifp->if_flags |= IFF_OACTIVE; 1659 ifp->if_flags |= IFF_OACTIVE;
1659 break; 1660 break;
1660 } 1661 }
1661 /* Pass packet to bpf if there is a listener */ 1662 /* Pass packet to bpf if there is a listener */
1662 bpf_mtap(ifp, mb_head); 1663 bpf_mtap(ifp, mb_head);
1663 } 1664 }
1664#ifdef JMEDEBUG_TX 1665#ifdef JMEDEBUG_TX
1665 printf("jme_ifstart enq %d\n", enq); 1666 printf("jme_ifstart enq %d\n", enq);
1666#endif 1667#endif
1667 if (enq) { 1668 if (enq) {
1668 /* 1669 /*
1669 * Set a 5 second timer just in case we don't hear from 1670 * Set a 5 second timer just in case we don't hear from
1670 * the card again. 1671 * the card again.
1671 */ 1672 */
1672 ifp->if_timer = 5; 1673 ifp->if_timer = 5;
1673 /* 1674 /*
1674 * Reading TXCSR takes very long time under heavy load 1675 * Reading TXCSR takes very long time under heavy load
1675 * so cache TXCSR value and writes the ORed value with 1676 * so cache TXCSR value and writes the ORed value with
1676 * the kick command to the TXCSR. This saves one register 1677 * the kick command to the TXCSR. This saves one register
1677 * access cycle. 1678 * access cycle.
1678 */ 1679 */
1679 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR, 1680 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR,
1680 sc->jme_txcsr | TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1681 sc->jme_txcsr | TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0));
1681#ifdef JMEDEBUG_TX 1682#ifdef JMEDEBUG_TX
1682 printf("jme_ifstart JME_TXCSR 0x%x JME_TXDBA_LO 0x%x JME_TXDBA_HI 0x%x " 1683 printf("jme_ifstart JME_TXCSR 0x%x JME_TXDBA_LO 0x%x JME_TXDBA_HI 0x%x "
1683 "JME_TXQDC 0x%x JME_TXNDA 0x%x JME_TXMAC 0x%x JME_TXPFC 0x%x " 1684 "JME_TXQDC 0x%x JME_TXNDA 0x%x JME_TXMAC 0x%x JME_TXPFC 0x%x "
1684 "JME_TXTRHD 0x%x\n", 1685 "JME_TXTRHD 0x%x\n",
1685 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR), 1686 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXCSR),
1686 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_LO), 1687 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_LO),
1687 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_HI), 1688 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXDBA_HI),
1688 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXQDC), 1689 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXQDC),
1689 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXNDA), 1690 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXNDA),
1690 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC), 1691 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC),
1691 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXPFC), 1692 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXPFC),
1692 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD)); 1693 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD));
1693#endif 1694#endif
1694 } 1695 }
1695} 1696}
1696 1697
1697static void 1698static void
1698jme_ifwatchdog(struct ifnet *ifp) 1699jme_ifwatchdog(struct ifnet *ifp)
1699{ 1700{
1700 jme_softc_t *sc = ifp->if_softc; 1701 jme_softc_t *sc = ifp->if_softc;
1701 1702
1702 if ((ifp->if_flags & IFF_RUNNING) == 0) 1703 if ((ifp->if_flags & IFF_RUNNING) == 0)
1703 return; 1704 return;
1704 printf("%s: device timeout\n", device_xname(sc->jme_dev)); 1705 printf("%s: device timeout\n", device_xname(sc->jme_dev));
1705 ifp->if_oerrors++; 1706 ifp->if_oerrors++;
1706 jme_init(ifp, 0); 1707 jme_init(ifp, 0);
1707} 1708}
1708 1709
1709static int 1710static int
1710jme_mediachange(struct ifnet *ifp) 1711jme_mediachange(struct ifnet *ifp)
1711{ 1712{
1712 int error; 1713 int error;
1713 jme_softc_t *sc = ifp->if_softc; 1714 jme_softc_t *sc = ifp->if_softc;
1714 1715
1715 if ((error = mii_mediachg(&sc->jme_mii)) == ENXIO) 1716 if ((error = mii_mediachg(&sc->jme_mii)) == ENXIO)
1716 error = 0; 1717 error = 0;
1717 else if (error != 0) { 1718 else if (error != 0) {
1718 aprint_error_dev(sc->jme_dev, "could not set media\n"); 1719 aprint_error_dev(sc->jme_dev, "could not set media\n");
1719 return error; 1720 return error;
1720 } 1721 }
1721 return 0; 1722 return 0;
1722} 1723}
1723 1724
1724static void 1725static void
1725jme_ticks(void *v) 1726jme_ticks(void *v)
1726{ 1727{
1727 jme_softc_t *sc = v; 1728 jme_softc_t *sc = v;
1728 int s = splnet(); 1729 int s = splnet();
1729 1730
1730 /* Tick the MII. */ 1731 /* Tick the MII. */
1731 mii_tick(&sc->jme_mii); 1732 mii_tick(&sc->jme_mii);
1732 1733
1733 /* every seconds */ 1734 /* every seconds */
1734 callout_reset(&sc->jme_tick_ch, hz, jme_ticks, sc); 1735 callout_reset(&sc->jme_tick_ch, hz, jme_ticks, sc);
1735 splx(s); 1736 splx(s);
1736} 1737}
1737 1738
1738static void 1739static void
1739jme_mac_config(jme_softc_t *sc) 1740jme_mac_config(jme_softc_t *sc)
1740{ 1741{
1741 uint32_t ghc, gpreg, rxmac, txmac, txpause; 1742 uint32_t ghc, gpreg, rxmac, txmac, txpause;
1742 struct mii_data *mii = &sc->jme_mii; 1743 struct mii_data *mii = &sc->jme_mii;
1743 1744
1744 ghc = 0; 1745 ghc = 0;
1745 rxmac = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC); 1746 rxmac = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC);
1746 rxmac &= ~RXMAC_FC_ENB; 1747 rxmac &= ~RXMAC_FC_ENB;
1747 txmac = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC); 1748 txmac = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC);
1748 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1749 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1749 txpause = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXPFC); 1750 txpause = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXPFC);
1750 txpause &= ~TXPFC_PAUSE_ENB; 1751 txpause &= ~TXPFC_PAUSE_ENB;
1751 1752
1752 if (mii->mii_media_active & IFM_FDX) { 1753 if (mii->mii_media_active & IFM_FDX) {
1753 ghc |= GHC_FULL_DUPLEX; 1754 ghc |= GHC_FULL_DUPLEX;
1754 rxmac &= ~RXMAC_COLL_DET_ENB; 1755 rxmac &= ~RXMAC_COLL_DET_ENB;
1755 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1756 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1756 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1757 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1757 TXMAC_FRAME_BURST); 1758 TXMAC_FRAME_BURST);
1758 /* Disable retry transmit timer/retry limit. */ 1759 /* Disable retry transmit timer/retry limit. */
1759 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD, 1760 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD,
1760 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD) 1761 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD)
1761 & ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1762 & ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1762 } else { 1763 } else {
1763 rxmac |= RXMAC_COLL_DET_ENB; 1764 rxmac |= RXMAC_COLL_DET_ENB;
1764 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1765 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1765 /* Enable retry transmit timer/retry limit. */ 1766 /* Enable retry transmit timer/retry limit. */
1766 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD, 1767 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD,
1767 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD) | TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1768 bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXTRHD) | TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1768 } 1769 }
1769 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */ 1770 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
1770 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1771 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1771 case IFM_10_T: 1772 case IFM_10_T:
1772 ghc |= GHC_SPEED_10 | GHC_CLKSRC_10_100; 1773 ghc |= GHC_SPEED_10 | GHC_CLKSRC_10_100;
1773 break; 1774 break;
1774 case IFM_100_TX: 1775 case IFM_100_TX:
1775 ghc |= GHC_SPEED_100 | GHC_CLKSRC_10_100; 1776 ghc |= GHC_SPEED_100 | GHC_CLKSRC_10_100;
1776 break; 1777 break;
1777 case IFM_1000_T: 1778 case IFM_1000_T:
1778 ghc |= GHC_SPEED_1000 | GHC_CLKSRC_1000; 1779 ghc |= GHC_SPEED_1000 | GHC_CLKSRC_1000;
1779 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1780 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1780 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1781 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1781 break; 1782 break;
1782 default: 1783 default:
1783 break; 1784 break;
1784 } 1785 }
1785 if ((sc->jme_flags & JME_FLAG_GIGA) && 1786 if ((sc->jme_flags & JME_FLAG_GIGA) &&
1786 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) { 1787 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
1787 /* 1788 /*
1788 * Workaround occasional packet loss issue of JMC250 A2 1789 * Workaround occasional packet loss issue of JMC250 A2
1789 * when it runs on half-duplex media. 1790 * when it runs on half-duplex media.
1790 */ 1791 */
1791#ifdef JMEDEBUG 1792#ifdef JMEDEBUG
1792 printf("JME250 A2 workaround\n"); 1793 printf("JME250 A2 workaround\n");
1793#endif 1794#endif
1794 gpreg = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc, 1795 gpreg = bus_space_read_4(sc->jme_bt_misc, sc->jme_bh_misc,
1795 JME_GPREG1); 1796 JME_GPREG1);
1796 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 1797 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
1797 gpreg &= ~GPREG1_HDPX_FIX; 1798 gpreg &= ~GPREG1_HDPX_FIX;
1798 else 1799 else
1799 gpreg |= GPREG1_HDPX_FIX; 1800 gpreg |= GPREG1_HDPX_FIX;
1800 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, 1801 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc,
1801 JME_GPREG1, gpreg); 1802 JME_GPREG1, gpreg);
1802 /* Workaround CRC errors at 100Mbps on JMC250 A2. */ 1803 /* Workaround CRC errors at 100Mbps on JMC250 A2. */
1803 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 1804 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
1804 /* Extend interface FIFO depth. */ 1805 /* Extend interface FIFO depth. */
1805 jme_mii_write(sc->jme_dev, sc->jme_phyaddr, 1806 jme_mii_write(sc->jme_dev, sc->jme_phyaddr,
1806 0x1B, 0x0000); 1807 0x1B, 0x0000);
1807 } else { 1808 } else {
1808 /* Select default interface FIFO depth. */ 1809 /* Select default interface FIFO depth. */
1809 jme_mii_write(sc->jme_dev, sc->jme_phyaddr, 1810 jme_mii_write(sc->jme_dev, sc->jme_phyaddr,
1810 0x1B, 0x0004); 1811 0x1B, 0x0004);
1811 } 1812 }
1812 } 1813 }
1813 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_GHC, ghc); 1814 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_GHC, ghc);
1814 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC, rxmac); 1815 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC, rxmac);
1815 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC, txmac); 1816 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXMAC, txmac);
1816 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXPFC, txpause); 1817 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_TXPFC, txpause);
1817} 1818}
1818 1819
1819static void 1820static void
1820jme_set_filter(jme_softc_t *sc) 1821jme_set_filter(jme_softc_t *sc)
1821{ 1822{
1822 struct ifnet *ifp = &sc->jme_if; 1823 struct ifnet *ifp = &sc->jme_if;
1823 struct ether_multistep step; 1824 struct ether_multistep step;
1824 struct ether_multi *enm; 1825 struct ether_multi *enm;
1825 uint32_t hash[2] = {0, 0}; 1826 uint32_t hash[2] = {0, 0};
1826 int i; 1827 int i;
1827 uint32_t rxcfg; 1828 uint32_t rxcfg;
1828 1829
1829 rxcfg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC); 1830 rxcfg = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC);
1830 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 1831 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
1831 RXMAC_ALLMULTI); 1832 RXMAC_ALLMULTI);
1832 /* Always accept frames destined to our station address. */ 1833 /* Always accept frames destined to our station address. */
1833 rxcfg |= RXMAC_UNICAST; 1834 rxcfg |= RXMAC_UNICAST;
1834 if ((ifp->if_flags & IFF_BROADCAST) != 0) 1835 if ((ifp->if_flags & IFF_BROADCAST) != 0)
1835 rxcfg |= RXMAC_BROADCAST; 1836 rxcfg |= RXMAC_BROADCAST;
1836 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1837 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1837 if ((ifp->if_flags & IFF_PROMISC) != 0) 1838 if ((ifp->if_flags & IFF_PROMISC) != 0)
1838 rxcfg |= RXMAC_PROMISC; 1839 rxcfg |= RXMAC_PROMISC;
1839 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 1840 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
1840 rxcfg |= RXMAC_ALLMULTI; 1841 rxcfg |= RXMAC_ALLMULTI;
1841 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, 1842 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac,
1842 JME_MAR0, 0xFFFFFFFF); 1843 JME_MAR0, 0xFFFFFFFF);
1843 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, 1844 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac,
1844 JME_MAR1, 0xFFFFFFFF); 1845 JME_MAR1, 0xFFFFFFFF);
1845 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, 1846 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac,
1846 JME_RXMAC, rxcfg); 1847 JME_RXMAC, rxcfg);
1847 return; 1848 return;
1848 } 1849 }
1849 /* 1850 /*
1850 * Set up the multicast address filter by passing all multicast 1851 * Set up the multicast address filter by passing all multicast
1851 * addresses through a CRC generator, and then using the low-order 1852 * addresses through a CRC generator, and then using the low-order
1852 * 6 bits as an index into the 64 bit multicast hash table. The 1853 * 6 bits as an index into the 64 bit multicast hash table. The
1853 * high order bits select the register, while the rest of the bits 1854 * high order bits select the register, while the rest of the bits
1854 * select the bit within the register. 1855 * select the bit within the register.
1855 */ 1856 */
1856 rxcfg |= RXMAC_MULTICAST; 1857 rxcfg |= RXMAC_MULTICAST;
1857 memset(hash, 0, sizeof(hash)); 1858 memset(hash, 0, sizeof(hash));
1858 1859
1859 ETHER_FIRST_MULTI(step, &sc->jme_ec, enm); 1860 ETHER_FIRST_MULTI(step, &sc->jme_ec, enm);
1860 while (enm != NULL) { 1861 while (enm != NULL) {
1861#ifdef JEMDBUG 1862#ifdef JEMDBUG
1862 printf("%s: addrs %s %s\n", __func__, 1863 printf("%s: addrs %s %s\n", __func__,
1863 ether_sprintf(enm->enm_addrlo), 1864 ether_sprintf(enm->enm_addrlo),
1864 ether_sprintf(enm->enm_addrhi)); 1865 ether_sprintf(enm->enm_addrhi));
1865#endif 1866#endif
1866 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) == 0) { 1867 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) == 0) {
1867 i = ether_crc32_be(enm->enm_addrlo, 6); 1868 i = ether_crc32_be(enm->enm_addrlo, 6);
1868 /* Just want the 6 least significant bits. */ 1869 /* Just want the 6 least significant bits. */
1869 i &= 0x3f; 1870 i &= 0x3f;
1870 hash[i / 32] |= 1 << (i%32); 1871 hash[i / 32] |= 1 << (i%32);
1871 } else { 1872 } else {
1872 hash[0] = hash[1] = 0xffffffff; 1873 hash[0] = hash[1] = 0xffffffff;
1873 sc->jme_if.if_flags |= IFF_ALLMULTI; 1874 sc->jme_if.if_flags |= IFF_ALLMULTI;
1874 break; 1875 break;
1875 } 1876 }
1876 ETHER_NEXT_MULTI(step, enm); 1877 ETHER_NEXT_MULTI(step, enm);
1877 } 1878 }
1878#ifdef JMEDEBUG 1879#ifdef JMEDEBUG
1879 printf("%s: hash1 %x has2 %x\n", __func__, hash[0], hash[1]); 1880 printf("%s: hash1 %x has2 %x\n", __func__, hash[0], hash[1]);
1880#endif 1881#endif
1881 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_MAR0, hash[0]); 1882 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_MAR0, hash[0]);
1882 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_MAR1, hash[1]); 1883 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_MAR1, hash[1]);
1883 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC, rxcfg); 1884 bus_space_write_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_RXMAC, rxcfg);
1884} 1885}
1885 1886
1886#if 0 1887#if 0
1887static int 1888static int
1888jme_multicast_hash(uint8_t *a) 1889jme_multicast_hash(uint8_t *a)
1889{ 1890{
1890 int hash; 1891 int hash;
1891 1892
1892#define DA(addr,bit) (addr[5 - (bit / 8)] & (1 << (bit % 8))) 1893#define DA(addr,bit) (addr[5 - (bit / 8)] & (1 << (bit % 8)))
1893#define xor8(a,b,c,d,e,f,g,h) \ 1894#define xor8(a,b,c,d,e,f,g,h) \
1894 (((a != 0) + (b != 0) + (c != 0) + (d != 0) + \ 1895 (((a != 0) + (b != 0) + (c != 0) + (d != 0) + \
1895 (e != 0) + (f != 0) + (g != 0) + (h != 0)) & 1) 1896 (e != 0) + (f != 0) + (g != 0) + (h != 0)) & 1)
1896 1897
1897 hash = xor8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30), 1898 hash = xor8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30),
1898 DA(a,36), DA(a,42)); 1899 DA(a,36), DA(a,42));
1899 hash |= xor8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31), 1900 hash |= xor8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31),
1900 DA(a,37), DA(a,43)) << 1; 1901 DA(a,37), DA(a,43)) << 1;
1901 hash |= xor8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32), 1902 hash |= xor8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32),
1902 DA(a,38), DA(a,44)) << 2; 1903 DA(a,38), DA(a,44)) << 2;
1903 hash |= xor8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33), 1904 hash |= xor8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33),
1904 DA(a,39), DA(a,45)) << 3; 1905 DA(a,39), DA(a,45)) << 3;
1905 hash |= xor8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34), 1906 hash |= xor8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34),
1906 DA(a,40), DA(a,46)) << 4; 1907 DA(a,40), DA(a,46)) << 4;
1907 hash |= xor8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35), 1908 hash |= xor8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35),
1908 DA(a,41), DA(a,47)) << 5; 1909 DA(a,41), DA(a,47)) << 5;
1909 1910
1910 return hash; 1911 return hash;
1911} 1912}
1912#endif 1913#endif
1913 1914
1914static int 1915static int
1915jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 1916jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
1916{ 1917{
1917 uint32_t reg; 1918 uint32_t reg;
1918 int i; 1919 int i;
1919 1920
1920 *val = 0; 1921 *val = 0;
1921 for (i = JME_EEPROM_TIMEOUT / 10; i > 0; i--) { 1922 for (i = JME_EEPROM_TIMEOUT / 10; i > 0; i--) {
1922 reg = bus_space_read_4(sc->jme_bt_phy, sc->jme_bh_phy, 1923 reg = bus_space_read_4(sc->jme_bt_phy, sc->jme_bh_phy,
1923 JME_SMBCSR); 1924 JME_SMBCSR);
1924 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 1925 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
1925 break; 1926 break;
1926 delay(10); 1927 delay(10);
1927 } 1928 }
1928 1929
1929 if (i == 0) { 1930 if (i == 0) {
1930 aprint_error_dev(sc->jme_dev, "EEPROM idle timeout!\n"); 1931 aprint_error_dev(sc->jme_dev, "EEPROM idle timeout!\n");
1931 return (ETIMEDOUT); 1932 return (ETIMEDOUT);
1932 } 1933 }
1933 1934
1934 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 1935 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
1935 bus_space_write_4(sc->jme_bt_phy, sc->jme_bh_phy, 1936 bus_space_write_4(sc->jme_bt_phy, sc->jme_bh_phy,
1936 JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 1937 JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
1937 for (i = JME_EEPROM_TIMEOUT / 10; i > 0; i--) { 1938 for (i = JME_EEPROM_TIMEOUT / 10; i > 0; i--) {
1938 delay(10); 1939 delay(10);
1939 reg = bus_space_read_4(sc->jme_bt_phy, sc->jme_bh_phy, 1940 reg = bus_space_read_4(sc->jme_bt_phy, sc->jme_bh_phy,
1940 JME_SMBINTF); 1941 JME_SMBINTF);
1941 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 1942 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
1942 break; 1943 break;
1943 } 1944 }
1944 1945
1945 if (i == 0) { 1946 if (i == 0) {
1946 aprint_error_dev(sc->jme_dev, "EEPROM read timeout!\n"); 1947 aprint_error_dev(sc->jme_dev, "EEPROM read timeout!\n");
1947 return (ETIMEDOUT); 1948 return (ETIMEDOUT);
1948 } 1949 }
1949 1950
1950 reg = bus_space_read_4(sc->jme_bt_phy, sc->jme_bh_phy, JME_SMBINTF); 1951 reg = bus_space_read_4(sc->jme_bt_phy, sc->jme_bh_phy, JME_SMBINTF);
1951 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 1952 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
1952 return (0); 1953 return (0);
1953} 1954}
1954 1955
1955 1956
1956static int 1957static int
1957jme_eeprom_macaddr(struct jme_softc *sc) 1958jme_eeprom_macaddr(struct jme_softc *sc)
1958{ 1959{
1959 uint8_t eaddr[ETHER_ADDR_LEN]; 1960 uint8_t eaddr[ETHER_ADDR_LEN];
1960 uint8_t fup, reg, val; 1961 uint8_t fup, reg, val;
1961 uint32_t offset; 1962 uint32_t offset;
1962 int match; 1963 int match;
1963 1964
1964 offset = 0; 1965 offset = 0;
1965 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 1966 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
1966 fup != JME_EEPROM_SIG0) 1967 fup != JME_EEPROM_SIG0)
1967 return (ENOENT); 1968 return (ENOENT);
1968 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 1969 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
1969 fup != JME_EEPROM_SIG1) 1970 fup != JME_EEPROM_SIG1)
1970 return (ENOENT); 1971 return (ENOENT);
1971 match = 0; 1972 match = 0;
1972 do { 1973 do {
1973 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 1974 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
1974 break; 1975 break;
1975 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) 1976 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1)
1976 == (fup & (JME_EEPROM_FUNC_MASK|JME_EEPROM_PAGE_MASK))) { 1977 == (fup & (JME_EEPROM_FUNC_MASK|JME_EEPROM_PAGE_MASK))) {
1977 if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0) 1978 if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
1978 break; 1979 break;
1979 if (reg >= JME_PAR0 && 1980 if (reg >= JME_PAR0 &&
1980 reg < JME_PAR0 + ETHER_ADDR_LEN) { 1981 reg < JME_PAR0 + ETHER_ADDR_LEN) {
1981 if (jme_eeprom_read_byte(sc, offset + 2, 1982 if (jme_eeprom_read_byte(sc, offset + 2,
1982 &val) != 0) 1983 &val) != 0)
1983 break; 1984 break;
1984 eaddr[reg - JME_PAR0] = val; 1985 eaddr[reg - JME_PAR0] = val;
1985 match++; 1986 match++;
1986 } 1987 }
1987 } 1988 }
1988 if (fup & JME_EEPROM_DESC_END) 1989 if (fup & JME_EEPROM_DESC_END)
1989 break; 1990 break;
1990  1991
1991 /* Try next eeprom descriptor. */ 1992 /* Try next eeprom descriptor. */
1992 offset += JME_EEPROM_DESC_BYTES; 1993 offset += JME_EEPROM_DESC_BYTES;
1993 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 1994 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
1994 1995
1995 if (match == ETHER_ADDR_LEN) { 1996 if (match == ETHER_ADDR_LEN) {
1996 memcpy(sc->jme_enaddr, eaddr, ETHER_ADDR_LEN); 1997 memcpy(sc->jme_enaddr, eaddr, ETHER_ADDR_LEN);
1997 return (0); 1998 return (0);
1998 } 1999 }
1999 2000
2000 return (ENOENT); 2001 return (ENOENT);
2001} 2002}
2002 2003
 2004static int
 2005jme_reg_macaddr(struct jme_softc *sc)
 2006{
 2007 uint32_t par0, par1;
 2008
 2009 par0 = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PAR0);
 2010 par1 = bus_space_read_4(sc->jme_bt_mac, sc->jme_bh_mac, JME_PAR1);
 2011 par1 &= 0xffff;
 2012 if ((par0 == 0 && par1 == 0) ||
 2013 (par0 == 0xffffffff && par1 == 0xffff)) {
 2014 return (ENOENT);
 2015 } else {
 2016 sc->jme_enaddr[0] = (par0 >> 0) & 0xff;
 2017 sc->jme_enaddr[1] = (par0 >> 8) & 0xff;
 2018 sc->jme_enaddr[2] = (par0 >> 16) & 0xff;
 2019 sc->jme_enaddr[3] = (par0 >> 24) & 0xff;
 2020 sc->jme_enaddr[4] = (par1 >> 0) & 0xff;
 2021 sc->jme_enaddr[5] = (par1 >> 8) & 0xff;
 2022 }
 2023 return (0);
 2024}
 2025
2003/* 2026/*
2004 * Set up sysctl(3) MIB, hw.jme.* - Individual controllers will be 2027 * Set up sysctl(3) MIB, hw.jme.* - Individual controllers will be
2005 * set up in jme_pci_attach() 2028 * set up in jme_pci_attach()
2006 */ 2029 */
2007SYSCTL_SETUP(sysctl_jme, "sysctl jme subtree setup") 2030SYSCTL_SETUP(sysctl_jme, "sysctl jme subtree setup")
2008{ 2031{
2009 int rc; 2032 int rc;
2010 const struct sysctlnode *node; 2033 const struct sysctlnode *node;
2011 2034
2012 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 2035 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
2013 0, CTLTYPE_NODE, "hw", NULL, 2036 0, CTLTYPE_NODE, "hw", NULL,
2014 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 2037 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
2015 goto err; 2038 goto err;
2016 } 2039 }
2017 2040
2018 if ((rc = sysctl_createv(clog, 0, NULL, &node, 2041 if ((rc = sysctl_createv(clog, 0, NULL, &node,
2019 0, CTLTYPE_NODE, "jme", 2042 0, CTLTYPE_NODE, "jme",
2020 SYSCTL_DESCR("jme interface controls"), 2043 SYSCTL_DESCR("jme interface controls"),
2021 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 2044 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
2022 goto err; 2045 goto err;
2023 } 2046 }
2024 2047
2025 jme_root_num = node->sysctl_num; 2048 jme_root_num = node->sysctl_num;
2026 return; 2049 return;
2027 2050
2028err: 2051err:
2029 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc); 2052 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
2030} 2053}
2031 2054
2032static int 2055static int
2033jme_sysctl_intrxto(SYSCTLFN_ARGS) 2056jme_sysctl_intrxto(SYSCTLFN_ARGS)
2034{ 2057{
2035 int error, t; 2058 int error, t;
2036 struct sysctlnode node; 2059 struct sysctlnode node;
2037 struct jme_softc *sc; 2060 struct jme_softc *sc;
2038 uint32_t reg; 2061 uint32_t reg;
2039 2062
2040 node = *rnode; 2063 node = *rnode;
2041 sc = node.sysctl_data; 2064 sc = node.sysctl_data;
2042 t = sc->jme_intrxto; 2065 t = sc->jme_intrxto;
2043 node.sysctl_data = &t; 2066 node.sysctl_data = &t;
2044 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2067 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2045 if (error || newp == NULL) 2068 if (error || newp == NULL)
2046 return error; 2069 return error;
2047 2070
2048 if (t < PCCRX_COAL_TO_MIN || t > PCCRX_COAL_TO_MAX) 2071 if (t < PCCRX_COAL_TO_MIN || t > PCCRX_COAL_TO_MAX)
2049 return EINVAL; 2072 return EINVAL;
2050 2073
2051 /* 2074 /*
2052 * update the softc with sysctl-changed value, and mark 2075 * update the softc with sysctl-changed value, and mark
2053 * for hardware update 2076 * for hardware update
2054 */ 2077 */
2055 sc->jme_intrxto = t; 2078 sc->jme_intrxto = t;
2056 /* Configure Rx queue 0 packet completion coalescing. */ 2079 /* Configure Rx queue 0 packet completion coalescing. */
2057 reg = (sc->jme_intrxto << PCCRX_COAL_TO_SHIFT) & PCCRX_COAL_TO_MASK; 2080 reg = (sc->jme_intrxto << PCCRX_COAL_TO_SHIFT) & PCCRX_COAL_TO_MASK;
2058 reg |= (sc->jme_intrxct << PCCRX_COAL_PKT_SHIFT) & PCCRX_COAL_PKT_MASK; 2081 reg |= (sc->jme_intrxct << PCCRX_COAL_PKT_SHIFT) & PCCRX_COAL_PKT_MASK;
2059 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCRX0, reg); 2082 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCRX0, reg);
2060 return 0; 2083 return 0;
2061} 2084}
2062 2085
2063static int 2086static int
2064jme_sysctl_intrxct(SYSCTLFN_ARGS) 2087jme_sysctl_intrxct(SYSCTLFN_ARGS)
2065{ 2088{
2066 int error, t; 2089 int error, t;
2067 struct sysctlnode node; 2090 struct sysctlnode node;
2068 struct jme_softc *sc; 2091 struct jme_softc *sc;
2069 uint32_t reg; 2092 uint32_t reg;
2070 2093
2071 node = *rnode; 2094 node = *rnode;
2072 sc = node.sysctl_data; 2095 sc = node.sysctl_data;
2073 t = sc->jme_intrxct; 2096 t = sc->jme_intrxct;
2074 node.sysctl_data = &t; 2097 node.sysctl_data = &t;
2075 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2098 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2076 if (error || newp == NULL) 2099 if (error || newp == NULL)
2077 return error; 2100 return error;
2078 2101
2079 if (t < PCCRX_COAL_PKT_MIN || t > PCCRX_COAL_PKT_MAX) 2102 if (t < PCCRX_COAL_PKT_MIN || t > PCCRX_COAL_PKT_MAX)
2080 return EINVAL; 2103 return EINVAL;
2081 2104
2082 /* 2105 /*
2083 * update the softc with sysctl-changed value, and mark 2106 * update the softc with sysctl-changed value, and mark
2084 * for hardware update 2107 * for hardware update
2085 */ 2108 */
2086 sc->jme_intrxct = t; 2109 sc->jme_intrxct = t;
2087 /* Configure Rx queue 0 packet completion coalescing. */ 2110 /* Configure Rx queue 0 packet completion coalescing. */
2088 reg = (sc->jme_intrxto << PCCRX_COAL_TO_SHIFT) & PCCRX_COAL_TO_MASK; 2111 reg = (sc->jme_intrxto << PCCRX_COAL_TO_SHIFT) & PCCRX_COAL_TO_MASK;
2089 reg |= (sc->jme_intrxct << PCCRX_COAL_PKT_SHIFT) & PCCRX_COAL_PKT_MASK; 2112 reg |= (sc->jme_intrxct << PCCRX_COAL_PKT_SHIFT) & PCCRX_COAL_PKT_MASK;
2090 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCRX0, reg); 2113 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCRX0, reg);
2091 return 0; 2114 return 0;
2092} 2115}
2093 2116
2094static int 2117static int
2095jme_sysctl_inttxto(SYSCTLFN_ARGS) 2118jme_sysctl_inttxto(SYSCTLFN_ARGS)
2096{ 2119{
2097 int error, t; 2120 int error, t;
2098 struct sysctlnode node; 2121 struct sysctlnode node;
2099 struct jme_softc *sc; 2122 struct jme_softc *sc;
2100 uint32_t reg; 2123 uint32_t reg;
2101 2124
2102 node = *rnode; 2125 node = *rnode;
2103 sc = node.sysctl_data; 2126 sc = node.sysctl_data;
2104 t = sc->jme_inttxto; 2127 t = sc->jme_inttxto;
2105 node.sysctl_data = &t; 2128 node.sysctl_data = &t;
2106 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2129 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2107 if (error || newp == NULL) 2130 if (error || newp == NULL)
2108 return error; 2131 return error;
2109 2132
2110 if (t < PCCTX_COAL_TO_MIN || t > PCCTX_COAL_TO_MAX) 2133 if (t < PCCTX_COAL_TO_MIN || t > PCCTX_COAL_TO_MAX)
2111 return EINVAL; 2134 return EINVAL;
2112 2135
2113 /* 2136 /*
2114 * update the softc with sysctl-changed value, and mark 2137 * update the softc with sysctl-changed value, and mark
2115 * for hardware update 2138 * for hardware update
2116 */ 2139 */
2117 sc->jme_inttxto = t; 2140 sc->jme_inttxto = t;
2118 /* Configure Tx queue 0 packet completion coalescing. */ 2141 /* Configure Tx queue 0 packet completion coalescing. */
2119 reg = (sc->jme_inttxto << PCCTX_COAL_TO_SHIFT) & PCCTX_COAL_TO_MASK; 2142 reg = (sc->jme_inttxto << PCCTX_COAL_TO_SHIFT) & PCCTX_COAL_TO_MASK;
2120 reg |= (sc->jme_inttxct << PCCTX_COAL_PKT_SHIFT) & PCCTX_COAL_PKT_MASK; 2143 reg |= (sc->jme_inttxct << PCCTX_COAL_PKT_SHIFT) & PCCTX_COAL_PKT_MASK;
2121 reg |= PCCTX_COAL_TXQ0; 2144 reg |= PCCTX_COAL_TXQ0;
2122 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCTX, reg); 2145 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCTX, reg);
2123 return 0; 2146 return 0;
2124} 2147}
2125 2148
2126static int 2149static int
2127jme_sysctl_inttxct(SYSCTLFN_ARGS) 2150jme_sysctl_inttxct(SYSCTLFN_ARGS)
2128{ 2151{
2129 int error, t; 2152 int error, t;
2130 struct sysctlnode node; 2153 struct sysctlnode node;
2131 struct jme_softc *sc; 2154 struct jme_softc *sc;
2132 uint32_t reg; 2155 uint32_t reg;
2133 2156
2134 node = *rnode; 2157 node = *rnode;
2135 sc = node.sysctl_data; 2158 sc = node.sysctl_data;
2136 t = sc->jme_inttxct; 2159 t = sc->jme_inttxct;
2137 node.sysctl_data = &t; 2160 node.sysctl_data = &t;
2138 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2161 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2139 if (error || newp == NULL) 2162 if (error || newp == NULL)
2140 return error; 2163 return error;
2141 2164
2142 if (t < PCCTX_COAL_PKT_MIN || t > PCCTX_COAL_PKT_MAX) 2165 if (t < PCCTX_COAL_PKT_MIN || t > PCCTX_COAL_PKT_MAX)
2143 return EINVAL; 2166 return EINVAL;
2144 2167
2145 /* 2168 /*
2146 * update the softc with sysctl-changed value, and mark 2169 * update the softc with sysctl-changed value, and mark
2147 * for hardware update 2170 * for hardware update
2148 */ 2171 */
2149 sc->jme_inttxct = t; 2172 sc->jme_inttxct = t;
2150 /* Configure Tx queue 0 packet completion coalescing. */ 2173 /* Configure Tx queue 0 packet completion coalescing. */
2151 reg = (sc->jme_inttxto << PCCTX_COAL_TO_SHIFT) & PCCTX_COAL_TO_MASK; 2174 reg = (sc->jme_inttxto << PCCTX_COAL_TO_SHIFT) & PCCTX_COAL_TO_MASK;
2152 reg |= (sc->jme_inttxct << PCCTX_COAL_PKT_SHIFT) & PCCTX_COAL_PKT_MASK; 2175 reg |= (sc->jme_inttxct << PCCTX_COAL_PKT_SHIFT) & PCCTX_COAL_PKT_MASK;
2153 reg |= PCCTX_COAL_TXQ0; 2176 reg |= PCCTX_COAL_TXQ0;
2154 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCTX, reg); 2177 bus_space_write_4(sc->jme_bt_misc, sc->jme_bh_misc, JME_PCCTX, reg);
2155 return 0; 2178 return 0;
2156} 2179}