Tue Oct 13 07:47:45 2015 UTC ()
refactor: change some macros to functions

change below macros to functions
    - WM_CDTXSYNC
    - WM_CDRXSYNC
    - WM_INIT_RXDESC


(knakahara)
diff -r1.351 -r1.352 src/sys/dev/pci/if_wm.c

cvs diff -r1.351 -r1.352 src/sys/dev/pci/if_wm.c (expand / switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2015/10/08 09:28:13 1.351
+++ src/sys/dev/pci/if_wm.c 2015/10/13 07:47:45 1.352
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_wm.c,v 1.351 2015/10/08 09:28:13 msaitoh Exp $ */ 1/* $NetBSD: if_wm.c,v 1.352 2015/10/13 07:47:45 knakahara Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -73,27 +73,27 @@ @@ -73,27 +73,27 @@
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet) 76 * - EEE (Energy Efficiency Ethernet)
77 * - Multi queue 77 * - Multi queue
78 * - Image Unique ID 78 * - Image Unique ID
79 * - LPLU other than PCH* 79 * - LPLU other than PCH*
80 * - Virtual Function 80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM) 81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM. 82 * - Rework how parameters are loaded from the EEPROM.
83 */ 83 */
84 84
85#include <sys/cdefs.h> 85#include <sys/cdefs.h>
86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.351 2015/10/08 09:28:13 msaitoh Exp $"); 86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.352 2015/10/13 07:47:45 knakahara Exp $");
87 87
88#ifdef _KERNEL_OPT 88#ifdef _KERNEL_OPT
89#include "opt_net_mpsafe.h" 89#include "opt_net_mpsafe.h"
90#endif 90#endif
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/systm.h> 93#include <sys/systm.h>
94#include <sys/callout.h> 94#include <sys/callout.h>
95#include <sys/mbuf.h> 95#include <sys/mbuf.h>
96#include <sys/malloc.h> 96#include <sys/malloc.h>
97#include <sys/kernel.h> 97#include <sys/kernel.h>
98#include <sys/socket.h> 98#include <sys/socket.h>
99#include <sys/ioctl.h> 99#include <sys/ioctl.h>
@@ -496,100 +496,46 @@ do { \ @@ -496,100 +496,46 @@ do { \
496#define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 496#define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
497#define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 497#define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
498 498
499#define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 499#define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
500#define WM_CDTXADDR_HI(sc, x) \ 500#define WM_CDTXADDR_HI(sc, x) \
501 (sizeof(bus_addr_t) == 8 ? \ 501 (sizeof(bus_addr_t) == 8 ? \
502 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 502 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
503 503
504#define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 504#define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
505#define WM_CDRXADDR_HI(sc, x) \ 505#define WM_CDRXADDR_HI(sc, x) \
506 (sizeof(bus_addr_t) == 8 ? \ 506 (sizeof(bus_addr_t) == 8 ? \
507 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 507 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
508 508
509#define WM_CDTXSYNC(sc, x, n, ops) \ 
510do { \ 
511 int __x, __n; \ 
512 \ 
513 __x = (x); \ 
514 __n = (n); \ 
515 \ 
516 /* If it will wrap around, sync to the end of the ring. */ \ 
517 if ((__x + __n) > WM_NTXDESC(sc)) { \ 
518 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 
519 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 
520 (WM_NTXDESC(sc) - __x), (ops)); \ 
521 __n -= (WM_NTXDESC(sc) - __x); \ 
522 __x = 0; \ 
523 } \ 
524 \ 
525 /* Now sync whatever is left. */ \ 
526 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 
527 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 
528} while (/*CONSTCOND*/0) 
529 
530#define WM_CDRXSYNC(sc, x, ops) \ 
531do { \ 
532 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 
533 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 
534} while (/*CONSTCOND*/0) 
535 
536#define WM_INIT_RXDESC(sc, x) \ 
537do { \ 
538 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 
539 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 
540 struct mbuf *__m = __rxs->rxs_mbuf; \ 
541 \ 
542 /* \ 
543 * Note: We scoot the packet forward 2 bytes in the buffer \ 
544 * so that the payload after the Ethernet header is aligned \ 
545 * to a 4-byte boundary. \ 
546 * \ 
547 * XXX BRAINDAMAGE ALERT! \ 
548 * The stupid chip uses the same size for every buffer, which \ 
549 * is set in the Receive Control register. We are using the 2K \ 
550 * size option, but what we REALLY want is (2K - 2)! For this \ 
551 * reason, we can't "scoot" packets longer than the standard \ 
552 * Ethernet MTU. On strict-alignment platforms, if the total \ 
553 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 
554 * the upper layer copy the headers. \ 
555 */ \ 
556 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 
557 \ 
558 wm_set_dma_addr(&__rxd->wrx_addr, \ 
559 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 
560 __rxd->wrx_len = 0; \ 
561 __rxd->wrx_cksum = 0; \ 
562 __rxd->wrx_status = 0; \ 
563 __rxd->wrx_errors = 0; \ 
564 __rxd->wrx_special = 0; \ 
565 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 
566 \ 
567 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 
568} while (/*CONSTCOND*/0) 
569 
570/* 509/*
571 * Register read/write functions. 510 * Register read/write functions.
572 * Other than CSR_{READ|WRITE}(). 511 * Other than CSR_{READ|WRITE}().
573 */ 512 */
574#if 0 513#if 0
575static inline uint32_t wm_io_read(struct wm_softc *, int); 514static inline uint32_t wm_io_read(struct wm_softc *, int);
576#endif 515#endif
577static inline void wm_io_write(struct wm_softc *, int, uint32_t); 516static inline void wm_io_write(struct wm_softc *, int, uint32_t);
578static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 517static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
579 uint32_t, uint32_t); 518 uint32_t, uint32_t);
580static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 519static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
581 520
582/* 521/*
 522 * Descriptor sync/init functions.
 523 */
 524static inline void wm_cdtxsync(struct wm_softc *, int, int, int);
 525static inline void wm_cdrxsync(struct wm_softc *, int, int);
 526static inline void wm_init_rxdesc(struct wm_softc *, int);
 527
 528/*
583 * Device driver interface functions and commonly used functions. 529 * Device driver interface functions and commonly used functions.
584 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 530 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
585 */ 531 */
586static const struct wm_product *wm_lookup(const struct pci_attach_args *); 532static const struct wm_product *wm_lookup(const struct pci_attach_args *);
587static int wm_match(device_t, cfdata_t, void *); 533static int wm_match(device_t, cfdata_t, void *);
588static void wm_attach(device_t, device_t, void *); 534static void wm_attach(device_t, device_t, void *);
589static int wm_detach(device_t, int); 535static int wm_detach(device_t, int);
590static bool wm_suspend(device_t, const pmf_qual_t *); 536static bool wm_suspend(device_t, const pmf_qual_t *);
591static bool wm_resume(device_t, const pmf_qual_t *); 537static bool wm_resume(device_t, const pmf_qual_t *);
592static void wm_watchdog(struct ifnet *); 538static void wm_watchdog(struct ifnet *);
593static void wm_tick(void *); 539static void wm_tick(void *);
594static int wm_ifflags_cb(struct ethercom *); 540static int wm_ifflags_cb(struct ethercom *);
595static int wm_ioctl(struct ifnet *, u_long, void *); 541static int wm_ioctl(struct ifnet *, u_long, void *);
@@ -1373,26 +1319,90 @@ wm_82575_write_8bit_ctlr_reg(struct wm_s @@ -1373,26 +1319,90 @@ wm_82575_write_8bit_ctlr_reg(struct wm_s
1373} 1319}
1374 1320
1375static inline void 1321static inline void
1376wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 1322wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1377{ 1323{
1378 wa->wa_low = htole32(v & 0xffffffffU); 1324 wa->wa_low = htole32(v & 0xffffffffU);
1379 if (sizeof(bus_addr_t) == 8) 1325 if (sizeof(bus_addr_t) == 8)
1380 wa->wa_high = htole32((uint64_t) v >> 32); 1326 wa->wa_high = htole32((uint64_t) v >> 32);
1381 else 1327 else
1382 wa->wa_high = 0; 1328 wa->wa_high = 0;
1383} 1329}
1384 1330
1385/* 1331/*
 1332 * Descriptor sync/init functions.
 1333 */
 1334static inline void
 1335wm_cdtxsync(struct wm_softc *sc, int start, int num, int ops)
 1336{
 1337
 1338 /* If it will wrap around, sync to the end of the ring. */
 1339 if ((start + num) > WM_NTXDESC(sc)) {
 1340 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap,
 1341 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
 1342 (WM_NTXDESC(sc) - start), ops);
 1343 num -= (WM_NTXDESC(sc) - start);
 1344 start = 0;
 1345 }
 1346
 1347 /* Now sync whatever is left. */
 1348 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap,
 1349 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
 1350}
 1351
 1352static inline void
 1353wm_cdrxsync(struct wm_softc *sc, int start, int ops)
 1354{
 1355
 1356 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap,
 1357 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
 1358}
 1359
 1360static inline void
 1361wm_init_rxdesc(struct wm_softc *sc, int start)
 1362{
 1363 struct wm_rxsoft *rxs = &sc->sc_rxsoft[start];
 1364 wiseman_rxdesc_t *rxd = &sc->sc_rxdescs[start];
 1365 struct mbuf *m = rxs->rxs_mbuf;
 1366
 1367 /*
 1368 * Note: We scoot the packet forward 2 bytes in the buffer
 1369 * so that the payload after the Ethernet header is aligned
 1370 * to a 4-byte boundary.
 1371
 1372 * XXX BRAINDAMAGE ALERT!
 1373 * The stupid chip uses the same size for every buffer, which
 1374 * is set in the Receive Control register. We are using the 2K
 1375 * size option, but what we REALLY want is (2K - 2)! For this
 1376 * reason, we can't "scoot" packets longer than the standard
 1377 * Ethernet MTU. On strict-alignment platforms, if the total
 1378 * size exceeds (2K - 2) we set align_tweak to 0 and let
 1379 * the upper layer copy the headers.
 1380 */
 1381 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
 1382
 1383 wm_set_dma_addr(&rxd->wrx_addr,
 1384 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
 1385 rxd->wrx_len = 0;
 1386 rxd->wrx_cksum = 0;
 1387 rxd->wrx_status = 0;
 1388 rxd->wrx_errors = 0;
 1389 rxd->wrx_special = 0;
 1390 wm_cdrxsync(sc, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1391
 1392 CSR_WRITE(sc, sc->sc_rdt_reg, start);
 1393}
 1394
 1395/*
1386 * Device driver interface functions and commonly used functions. 1396 * Device driver interface functions and commonly used functions.
1387 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 1397 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1388 */ 1398 */
1389 1399
1390/* Lookup supported device table */ 1400/* Lookup supported device table */
1391static const struct wm_product * 1401static const struct wm_product *
1392wm_lookup(const struct pci_attach_args *pa) 1402wm_lookup(const struct pci_attach_args *pa)
1393{ 1403{
1394 const struct wm_product *wmp; 1404 const struct wm_product *wmp;
1395 1405
1396 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 1406 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1397 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 1407 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1398 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 1408 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
@@ -4138,29 +4148,29 @@ wm_add_rxbuf(struct wm_softc *sc, int id @@ -4138,29 +4148,29 @@ wm_add_rxbuf(struct wm_softc *sc, int id
4138 if (error) { 4148 if (error) {
4139 /* XXX XXX XXX */ 4149 /* XXX XXX XXX */
4140 aprint_error_dev(sc->sc_dev, 4150 aprint_error_dev(sc->sc_dev,
4141 "unable to load rx DMA map %d, error = %d\n", 4151 "unable to load rx DMA map %d, error = %d\n",
4142 idx, error); 4152 idx, error);
4143 panic("wm_add_rxbuf"); 4153 panic("wm_add_rxbuf");
4144 } 4154 }
4145 4155
4146 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 4156 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4147 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 4157 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4148 4158
4149 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 4159 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4150 if ((sc->sc_rctl & RCTL_EN) != 0) 4160 if ((sc->sc_rctl & RCTL_EN) != 0)
4151 WM_INIT_RXDESC(sc, idx); 4161 wm_init_rxdesc(sc, idx);
4152 } else 4162 } else
4153 WM_INIT_RXDESC(sc, idx); 4163 wm_init_rxdesc(sc, idx);
4154 4164
4155 return 0; 4165 return 0;
4156} 4166}
4157 4167
4158/* 4168/*
4159 * wm_rxdrain: 4169 * wm_rxdrain:
4160 * 4170 *
4161 * Drain the receive queue. 4171 * Drain the receive queue.
4162 */ 4172 */
4163static void 4173static void
4164wm_rxdrain(struct wm_softc *sc) 4174wm_rxdrain(struct wm_softc *sc)
4165{ 4175{
4166 struct wm_rxsoft *rxs; 4176 struct wm_rxsoft *rxs;
@@ -4280,27 +4290,27 @@ wm_init_locked(struct ifnet *ifp) @@ -4280,27 +4290,27 @@ wm_init_locked(struct ifnet *ifp)
4280 4290
4281 /* 4291 /*
4282 * For N interrupts/sec, set this value to: 4292 * For N interrupts/sec, set this value to:
4283 * 1000000000 / (N * 256). Note that we set the 4293 * 1000000000 / (N * 256). Note that we set the
4284 * absolute and packet timer values to this value 4294 * absolute and packet timer values to this value
4285 * divided by 4 to get "simple timer" behavior. 4295 * divided by 4 to get "simple timer" behavior.
4286 */ 4296 */
4287 4297
4288 sc->sc_itr = 1500; /* 2604 ints/sec */ 4298 sc->sc_itr = 1500; /* 2604 ints/sec */
4289 } 4299 }
4290 4300
4291 /* Initialize the transmit descriptor ring. */ 4301 /* Initialize the transmit descriptor ring. */
4292 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 4302 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4293 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 4303 wm_cdtxsync(sc, 0, WM_NTXDESC(sc),
4294 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 4304 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4295 sc->sc_txfree = WM_NTXDESC(sc); 4305 sc->sc_txfree = WM_NTXDESC(sc);
4296 sc->sc_txnext = 0; 4306 sc->sc_txnext = 0;
4297 4307
4298 if (sc->sc_type < WM_T_82543) { 4308 if (sc->sc_type < WM_T_82543) {
4299 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0)); 4309 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4300 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0)); 4310 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4301 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 4311 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4302 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 4312 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4303 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 4313 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4304 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 4314 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4305 } else { 4315 } else {
4306 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0)); 4316 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
@@ -4385,27 +4395,27 @@ wm_init_locked(struct ifnet *ifp) @@ -4385,27 +4395,27 @@ wm_init_locked(struct ifnet *ifp)
4385 if ((error = wm_add_rxbuf(sc, i)) != 0) { 4395 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4386 log(LOG_ERR, "%s: unable to allocate or map " 4396 log(LOG_ERR, "%s: unable to allocate or map "
4387 "rx buffer %d, error = %d\n", 4397 "rx buffer %d, error = %d\n",
4388 device_xname(sc->sc_dev), i, error); 4398 device_xname(sc->sc_dev), i, error);
4389 /* 4399 /*
4390 * XXX Should attempt to run with fewer receive 4400 * XXX Should attempt to run with fewer receive
4391 * XXX buffers instead of just failing. 4401 * XXX buffers instead of just failing.
4392 */ 4402 */
4393 wm_rxdrain(sc); 4403 wm_rxdrain(sc);
4394 goto out; 4404 goto out;
4395 } 4405 }
4396 } else { 4406 } else {
4397 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 4407 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4398 WM_INIT_RXDESC(sc, i); 4408 wm_init_rxdesc(sc, i);
4399 /* 4409 /*
4400 * For 82575 and newer device, the RX descriptors 4410 * For 82575 and newer device, the RX descriptors
4401 * must be initialized after the setting of RCTL.EN in 4411 * must be initialized after the setting of RCTL.EN in
4402 * wm_set_filter() 4412 * wm_set_filter()
4403 */ 4413 */
4404 } 4414 }
4405 } 4415 }
4406 sc->sc_rxptr = 0; 4416 sc->sc_rxptr = 0;
4407 sc->sc_rxdiscard = 0; 4417 sc->sc_rxdiscard = 0;
4408 WM_RXCHAIN_RESET(sc); 4418 WM_RXCHAIN_RESET(sc);
4409 4419
4410 /* 4420 /*
4411 * Clear out the VLAN table -- we don't use it (yet). 4421 * Clear out the VLAN table -- we don't use it (yet).
@@ -4760,27 +4770,27 @@ wm_init_locked(struct ifnet *ifp) @@ -4760,27 +4770,27 @@ wm_init_locked(struct ifnet *ifp)
4760 CSR_WRITE(sc, WMREG_PBECCSTS, reg); 4770 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4761 4771
4762 reg = CSR_READ(sc, WMREG_CTRL); 4772 reg = CSR_READ(sc, WMREG_CTRL);
4763 reg |= CTRL_MEHE; 4773 reg |= CTRL_MEHE;
4764 CSR_WRITE(sc, WMREG_CTRL, reg); 4774 CSR_WRITE(sc, WMREG_CTRL, reg);
4765 break; 4775 break;
4766 default: 4776 default:
4767 break; 4777 break;
4768 } 4778 }
4769 4779
4770 /* On 575 and later set RDT only if RX enabled */ 4780 /* On 575 and later set RDT only if RX enabled */
4771 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4781 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4772 for (i = 0; i < WM_NRXDESC; i++) 4782 for (i = 0; i < WM_NRXDESC; i++)
4773 WM_INIT_RXDESC(sc, i); 4783 wm_init_rxdesc(sc, i);
4774 4784
4775 sc->sc_stopping = false; 4785 sc->sc_stopping = false;
4776 4786
4777 /* Start the one second link check clock. */ 4787 /* Start the one second link check clock. */
4778 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 4788 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4779 4789
4780 /* ...all done! */ 4790 /* ...all done! */
4781 ifp->if_flags |= IFF_RUNNING; 4791 ifp->if_flags |= IFF_RUNNING;
4782 ifp->if_flags &= ~IFF_OACTIVE; 4792 ifp->if_flags &= ~IFF_OACTIVE;
4783 4793
4784 out: 4794 out:
4785 sc->sc_if_flags = ifp->if_flags; 4795 sc->sc_if_flags = ifp->if_flags;
4786 if (error) 4796 if (error)
@@ -5050,27 +5060,27 @@ wm_tx_offload(struct wm_softc *sc, struc @@ -5050,27 +5060,27 @@ wm_tx_offload(struct wm_softc *sc, struc
5050 /* Just initialize it to a valid TCP context. */ 5060 /* Just initialize it to a valid TCP context. */
5051 tucs = WTX_TCPIP_TUCSS(offset) | 5061 tucs = WTX_TCPIP_TUCSS(offset) |
5052 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 5062 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5053 WTX_TCPIP_TUCSE(0) /* rest of packet */; 5063 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5054 } 5064 }
5055 5065
5056 /* Fill in the context descriptor. */ 5066 /* Fill in the context descriptor. */
5057 t = (struct livengood_tcpip_ctxdesc *) 5067 t = (struct livengood_tcpip_ctxdesc *)
5058 &sc->sc_txdescs[sc->sc_txnext]; 5068 &sc->sc_txdescs[sc->sc_txnext];
5059 t->tcpip_ipcs = htole32(ipcs); 5069 t->tcpip_ipcs = htole32(ipcs);
5060 t->tcpip_tucs = htole32(tucs); 5070 t->tcpip_tucs = htole32(tucs);
5061 t->tcpip_cmdlen = htole32(cmdlen); 5071 t->tcpip_cmdlen = htole32(cmdlen);
5062 t->tcpip_seg = htole32(seg); 5072 t->tcpip_seg = htole32(seg);
5063 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 5073 wm_cdtxsync(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5064 5074
5065 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 5075 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5066 txs->txs_ndesc++; 5076 txs->txs_ndesc++;
5067 5077
5068 *cmdp = cmd; 5078 *cmdp = cmd;
5069 *fieldsp = fields; 5079 *fieldsp = fields;
5070 5080
5071 return 0; 5081 return 0;
5072} 5082}
5073 5083
5074static void 5084static void
5075wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 5085wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5076{ 5086{
@@ -5453,27 +5463,27 @@ wm_start_locked(struct ifnet *ifp) @@ -5453,27 +5463,27 @@ wm_start_locked(struct ifnet *ifp)
5453 htole32(WTX_CMD_VLE); 5463 htole32(WTX_CMD_VLE);
5454 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 5464 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5455 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 5465 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5456 } 5466 }
5457 5467
5458 txs->txs_lastdesc = lasttx; 5468 txs->txs_lastdesc = lasttx;
5459 5469
5460 DPRINTF(WM_DEBUG_TX, 5470 DPRINTF(WM_DEBUG_TX,
5461 ("%s: TX: desc %d: cmdlen 0x%08x\n", 5471 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5462 device_xname(sc->sc_dev), 5472 device_xname(sc->sc_dev),
5463 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 5473 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5464 5474
5465 /* Sync the descriptors we're using. */ 5475 /* Sync the descriptors we're using. */
5466 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 5476 wm_cdtxsync(sc, sc->sc_txnext, txs->txs_ndesc,
5467 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5477 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5468 5478
5469 /* Give the packet to the chip. */ 5479 /* Give the packet to the chip. */
5470 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 5480 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5471 5481
5472 DPRINTF(WM_DEBUG_TX, 5482 DPRINTF(WM_DEBUG_TX,
5473 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 5483 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5474 5484
5475 DPRINTF(WM_DEBUG_TX, 5485 DPRINTF(WM_DEBUG_TX,
5476 ("%s: TX: finished transmitting packet, job %d\n", 5486 ("%s: TX: finished transmitting packet, job %d\n",
5477 device_xname(sc->sc_dev), sc->sc_txsnext)); 5487 device_xname(sc->sc_dev), sc->sc_txsnext));
5478 5488
5479 /* Advance the tx pointer. */ 5489 /* Advance the tx pointer. */
@@ -5683,27 +5693,27 @@ wm_nq_tx_offload(struct wm_softc *sc, st @@ -5683,27 +5693,27 @@ wm_nq_tx_offload(struct wm_softc *sc, st
5683 } 5693 }
5684 cmdc |= NQTXC_CMD_IP6; 5694 cmdc |= NQTXC_CMD_IP6;
5685 *fieldsp |= NQTXD_FIELDS_TUXSM; 5695 *fieldsp |= NQTXD_FIELDS_TUXSM;
5686 } 5696 }
5687 5697
5688 /* Fill in the context descriptor. */ 5698 /* Fill in the context descriptor. */
5689 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len = 5699 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5690 htole32(vl_len); 5700 htole32(vl_len);
5691 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0; 5701 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5692 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd = 5702 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5693 htole32(cmdc); 5703 htole32(cmdc);
5694 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx = 5704 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5695 htole32(mssidx); 5705 htole32(mssidx);
5696 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 5706 wm_cdtxsync(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5697 DPRINTF(WM_DEBUG_TX, 5707 DPRINTF(WM_DEBUG_TX,
5698 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), 5708 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5699 sc->sc_txnext, 0, vl_len)); 5709 sc->sc_txnext, 0, vl_len));
5700 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); 5710 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5701 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 5711 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5702 txs->txs_ndesc++; 5712 txs->txs_ndesc++;
5703 return 0; 5713 return 0;
5704} 5714}
5705 5715
5706/* 5716/*
5707 * wm_nq_start: [ifnet interface function] 5717 * wm_nq_start: [ifnet interface function]
5708 * 5718 *
5709 * Start packet transmission on the interface for NEWQUEUE devices 5719 * Start packet transmission on the interface for NEWQUEUE devices
@@ -5940,27 +5950,27 @@ wm_nq_start_locked(struct ifnet *ifp) @@ -5940,27 +5950,27 @@ wm_nq_start_locked(struct ifnet *ifp)
5940 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == 5950 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5941 (NQTX_CMD_EOP | NQTX_CMD_RS)); 5951 (NQTX_CMD_EOP | NQTX_CMD_RS));
5942 sc->sc_txdescs[lasttx].wtx_cmdlen |= 5952 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5943 htole32(WTX_CMD_EOP | WTX_CMD_RS); 5953 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5944 5954
5945 txs->txs_lastdesc = lasttx; 5955 txs->txs_lastdesc = lasttx;
5946 5956
5947 DPRINTF(WM_DEBUG_TX, 5957 DPRINTF(WM_DEBUG_TX,
5948 ("%s: TX: desc %d: cmdlen 0x%08x\n", 5958 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5949 device_xname(sc->sc_dev), 5959 device_xname(sc->sc_dev),
5950 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 5960 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5951 5961
5952 /* Sync the descriptors we're using. */ 5962 /* Sync the descriptors we're using. */
5953 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 5963 wm_cdtxsync(sc, sc->sc_txnext, txs->txs_ndesc,
5954 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5964 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5955 5965
5956 /* Give the packet to the chip. */ 5966 /* Give the packet to the chip. */
5957 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 5967 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5958 sent = true; 5968 sent = true;
5959 5969
5960 DPRINTF(WM_DEBUG_TX, 5970 DPRINTF(WM_DEBUG_TX,
5961 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 5971 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5962 5972
5963 DPRINTF(WM_DEBUG_TX, 5973 DPRINTF(WM_DEBUG_TX,
5964 ("%s: TX: finished transmitting packet, job %d\n", 5974 ("%s: TX: finished transmitting packet, job %d\n",
5965 device_xname(sc->sc_dev), sc->sc_txsnext)); 5975 device_xname(sc->sc_dev), sc->sc_txsnext));
5966 5976
@@ -6016,33 +6026,33 @@ wm_txeof(struct wm_softc *sc) @@ -6016,33 +6026,33 @@ wm_txeof(struct wm_softc *sc)
6016 ifp->if_flags &= ~IFF_OACTIVE; 6026 ifp->if_flags &= ~IFF_OACTIVE;
6017 6027
6018 /* 6028 /*
6019 * Go through the Tx list and free mbufs for those 6029 * Go through the Tx list and free mbufs for those
6020 * frames which have been transmitted. 6030 * frames which have been transmitted.
6021 */ 6031 */
6022 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 6032 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
6023 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 6033 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
6024 txs = &sc->sc_txsoft[i]; 6034 txs = &sc->sc_txsoft[i];
6025 6035
6026 DPRINTF(WM_DEBUG_TX, 6036 DPRINTF(WM_DEBUG_TX,
6027 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); 6037 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6028 6038
6029 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 6039 wm_cdtxsync(sc, txs->txs_firstdesc, txs->txs_ndesc,
6030 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 6040 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6031 6041
6032 status = 6042 status =
6033 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 6043 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6034 if ((status & WTX_ST_DD) == 0) { 6044 if ((status & WTX_ST_DD) == 0) {
6035 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 6045 wm_cdtxsync(sc, txs->txs_lastdesc, 1,
6036 BUS_DMASYNC_PREREAD); 6046 BUS_DMASYNC_PREREAD);
6037 break; 6047 break;
6038 } 6048 }
6039 6049
6040 processed = true; 6050 processed = true;
6041 count++; 6051 count++;
6042 DPRINTF(WM_DEBUG_TX, 6052 DPRINTF(WM_DEBUG_TX,
6043 ("%s: TX: job %d done: descs %d..%d\n", 6053 ("%s: TX: job %d done: descs %d..%d\n",
6044 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 6054 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6045 txs->txs_lastdesc)); 6055 txs->txs_lastdesc));
6046 6056
6047 /* 6057 /*
6048 * XXX We should probably be using the statistics 6058 * XXX We should probably be using the statistics
@@ -6107,45 +6117,45 @@ wm_rxeof(struct wm_softc *sc) @@ -6107,45 +6117,45 @@ wm_rxeof(struct wm_softc *sc)
6107 struct mbuf *m; 6117 struct mbuf *m;
6108 int i, len; 6118 int i, len;
6109 int count = 0; 6119 int count = 0;
6110 uint8_t status, errors; 6120 uint8_t status, errors;
6111 uint16_t vlantag; 6121 uint16_t vlantag;
6112 6122
6113 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 6123 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
6114 rxs = &sc->sc_rxsoft[i]; 6124 rxs = &sc->sc_rxsoft[i];
6115 6125
6116 DPRINTF(WM_DEBUG_RX, 6126 DPRINTF(WM_DEBUG_RX,
6117 ("%s: RX: checking descriptor %d\n", 6127 ("%s: RX: checking descriptor %d\n",
6118 device_xname(sc->sc_dev), i)); 6128 device_xname(sc->sc_dev), i));
6119 6129
6120 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 6130 wm_cdrxsync(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6121 6131
6122 status = sc->sc_rxdescs[i].wrx_status; 6132 status = sc->sc_rxdescs[i].wrx_status;
6123 errors = sc->sc_rxdescs[i].wrx_errors; 6133 errors = sc->sc_rxdescs[i].wrx_errors;
6124 len = le16toh(sc->sc_rxdescs[i].wrx_len); 6134 len = le16toh(sc->sc_rxdescs[i].wrx_len);
6125 vlantag = sc->sc_rxdescs[i].wrx_special; 6135 vlantag = sc->sc_rxdescs[i].wrx_special;
6126 6136
6127 if ((status & WRX_ST_DD) == 0) { 6137 if ((status & WRX_ST_DD) == 0) {
6128 /* We have processed all of the receive descriptors. */ 6138 /* We have processed all of the receive descriptors. */
6129 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 6139 wm_cdrxsync(sc, i, BUS_DMASYNC_PREREAD);
6130 break; 6140 break;
6131 } 6141 }
6132 6142
6133 count++; 6143 count++;
6134 if (__predict_false(sc->sc_rxdiscard)) { 6144 if (__predict_false(sc->sc_rxdiscard)) {
6135 DPRINTF(WM_DEBUG_RX, 6145 DPRINTF(WM_DEBUG_RX,
6136 ("%s: RX: discarding contents of descriptor %d\n", 6146 ("%s: RX: discarding contents of descriptor %d\n",
6137 device_xname(sc->sc_dev), i)); 6147 device_xname(sc->sc_dev), i));
6138 WM_INIT_RXDESC(sc, i); 6148 wm_init_rxdesc(sc, i);
6139 if (status & WRX_ST_EOP) { 6149 if (status & WRX_ST_EOP) {
6140 /* Reset our state. */ 6150 /* Reset our state. */
6141 DPRINTF(WM_DEBUG_RX, 6151 DPRINTF(WM_DEBUG_RX,
6142 ("%s: RX: resetting rxdiscard -> 0\n", 6152 ("%s: RX: resetting rxdiscard -> 0\n",
6143 device_xname(sc->sc_dev))); 6153 device_xname(sc->sc_dev)));
6144 sc->sc_rxdiscard = 0; 6154 sc->sc_rxdiscard = 0;
6145 } 6155 }
6146 continue; 6156 continue;
6147 } 6157 }
6148 6158
6149 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 6159 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6150 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 6160 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6151 6161
@@ -6154,27 +6164,27 @@ wm_rxeof(struct wm_softc *sc) @@ -6154,27 +6164,27 @@ wm_rxeof(struct wm_softc *sc)
6154 /* 6164 /*
6155 * Add a new receive buffer to the ring, unless of 6165 * Add a new receive buffer to the ring, unless of
6156 * course the length is zero. Treat the latter as a 6166 * course the length is zero. Treat the latter as a
6157 * failed mapping. 6167 * failed mapping.
6158 */ 6168 */
6159 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 6169 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
6160 /* 6170 /*
6161 * Failed, throw away what we've done so 6171 * Failed, throw away what we've done so
6162 * far, and discard the rest of the packet. 6172 * far, and discard the rest of the packet.
6163 */ 6173 */
6164 ifp->if_ierrors++; 6174 ifp->if_ierrors++;
6165 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 6175 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6166 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 6176 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6167 WM_INIT_RXDESC(sc, i); 6177 wm_init_rxdesc(sc, i);
6168 if ((status & WRX_ST_EOP) == 0) 6178 if ((status & WRX_ST_EOP) == 0)
6169 sc->sc_rxdiscard = 1; 6179 sc->sc_rxdiscard = 1;
6170 if (sc->sc_rxhead != NULL) 6180 if (sc->sc_rxhead != NULL)
6171 m_freem(sc->sc_rxhead); 6181 m_freem(sc->sc_rxhead);
6172 WM_RXCHAIN_RESET(sc); 6182 WM_RXCHAIN_RESET(sc);
6173 DPRINTF(WM_DEBUG_RX, 6183 DPRINTF(WM_DEBUG_RX,
6174 ("%s: RX: Rx buffer allocation failed, " 6184 ("%s: RX: Rx buffer allocation failed, "
6175 "dropping packet%s\n", device_xname(sc->sc_dev), 6185 "dropping packet%s\n", device_xname(sc->sc_dev),
6176 sc->sc_rxdiscard ? " (discard)" : "")); 6186 sc->sc_rxdiscard ? " (discard)" : ""));
6177 continue; 6187 continue;
6178 } 6188 }
6179 6189
6180 m->m_len = len; 6190 m->m_len = len;