Tue Oct 13 08:27:11 2015 UTC ()
refactor: rearrange function arguments without interrupt handlers


(knakahara)
diff -r1.361 -r1.362 src/sys/dev/pci/if_wm.c

cvs diff -r1.361 -r1.362 src/sys/dev/pci/if_wm.c (expand / switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2015/10/13 08:23:31 1.361
+++ src/sys/dev/pci/if_wm.c 2015/10/13 08:27:11 1.362
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_wm.c,v 1.361 2015/10/13 08:23:31 knakahara Exp $ */ 1/* $NetBSD: if_wm.c,v 1.362 2015/10/13 08:27:11 knakahara Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -73,27 +73,27 @@ @@ -73,27 +73,27 @@
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet) 76 * - EEE (Energy Efficiency Ethernet)
77 * - Multi queue 77 * - Multi queue
78 * - Image Unique ID 78 * - Image Unique ID
79 * - LPLU other than PCH* 79 * - LPLU other than PCH*
80 * - Virtual Function 80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM) 81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM. 82 * - Rework how parameters are loaded from the EEPROM.
83 */ 83 */
84 84
85#include <sys/cdefs.h> 85#include <sys/cdefs.h>
86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.361 2015/10/13 08:23:31 knakahara Exp $"); 86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.362 2015/10/13 08:27:11 knakahara Exp $");
87 87
88#ifdef _KERNEL_OPT 88#ifdef _KERNEL_OPT
89#include "opt_net_mpsafe.h" 89#include "opt_net_mpsafe.h"
90#endif 90#endif
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/systm.h> 93#include <sys/systm.h>
94#include <sys/callout.h> 94#include <sys/callout.h>
95#include <sys/mbuf.h> 95#include <sys/mbuf.h>
96#include <sys/malloc.h> 96#include <sys/malloc.h>
97#include <sys/kmem.h> 97#include <sys/kmem.h>
98#include <sys/kernel.h> 98#include <sys/kernel.h>
99#include <sys/socket.h> 99#include <sys/socket.h>
@@ -524,29 +524,29 @@ do { \ @@ -524,29 +524,29 @@ do { \
524 * Other than CSR_{READ|WRITE}(). 524 * Other than CSR_{READ|WRITE}().
525 */ 525 */
526#if 0 526#if 0
527static inline uint32_t wm_io_read(struct wm_softc *, int); 527static inline uint32_t wm_io_read(struct wm_softc *, int);
528#endif 528#endif
529static inline void wm_io_write(struct wm_softc *, int, uint32_t); 529static inline void wm_io_write(struct wm_softc *, int, uint32_t);
530static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 530static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
531 uint32_t, uint32_t); 531 uint32_t, uint32_t);
532static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 532static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
533 533
534/* 534/*
535 * Descriptor sync/init functions. 535 * Descriptor sync/init functions.
536 */ 536 */
537static inline void wm_cdtxsync(struct wm_softc *, int, int, int); 537static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
538static inline void wm_cdrxsync(struct wm_softc *, int, int); 538static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
539static inline void wm_init_rxdesc(struct wm_softc *, int); 539static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
540 540
541/* 541/*
542 * Device driver interface functions and commonly used functions. 542 * Device driver interface functions and commonly used functions.
543 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 543 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
544 */ 544 */
545static const struct wm_product *wm_lookup(const struct pci_attach_args *); 545static const struct wm_product *wm_lookup(const struct pci_attach_args *);
546static int wm_match(device_t, cfdata_t, void *); 546static int wm_match(device_t, cfdata_t, void *);
547static void wm_attach(device_t, device_t, void *); 547static void wm_attach(device_t, device_t, void *);
548static int wm_detach(device_t, int); 548static int wm_detach(device_t, int);
549static bool wm_suspend(device_t, const pmf_qual_t *); 549static bool wm_suspend(device_t, const pmf_qual_t *);
550static bool wm_resume(device_t, const pmf_qual_t *); 550static bool wm_resume(device_t, const pmf_qual_t *);
551static void wm_watchdog(struct ifnet *); 551static void wm_watchdog(struct ifnet *);
552static void wm_tick(void *); 552static void wm_tick(void *);
@@ -557,66 +557,66 @@ static uint16_t wm_check_alt_mac_addr(st @@ -557,66 +557,66 @@ static uint16_t wm_check_alt_mac_addr(st
557static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 557static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
558static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 558static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
559static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 559static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
560static void wm_set_filter(struct wm_softc *); 560static void wm_set_filter(struct wm_softc *);
561/* Reset and init related */ 561/* Reset and init related */
562static void wm_set_vlan(struct wm_softc *); 562static void wm_set_vlan(struct wm_softc *);
563static void wm_set_pcie_completion_timeout(struct wm_softc *); 563static void wm_set_pcie_completion_timeout(struct wm_softc *);
564static void wm_get_auto_rd_done(struct wm_softc *); 564static void wm_get_auto_rd_done(struct wm_softc *);
565static void wm_lan_init_done(struct wm_softc *); 565static void wm_lan_init_done(struct wm_softc *);
566static void wm_get_cfg_done(struct wm_softc *); 566static void wm_get_cfg_done(struct wm_softc *);
567static void wm_initialize_hardware_bits(struct wm_softc *); 567static void wm_initialize_hardware_bits(struct wm_softc *);
568static uint32_t wm_rxpbs_adjust_82580(uint32_t); 568static uint32_t wm_rxpbs_adjust_82580(uint32_t);
569static void wm_reset(struct wm_softc *); 569static void wm_reset(struct wm_softc *);
570static int wm_add_rxbuf(struct wm_softc *, int); 570static int wm_add_rxbuf(struct wm_rxqueue *, int);
571static void wm_rxdrain(struct wm_softc *); 571static void wm_rxdrain(struct wm_rxqueue *);
572static int wm_init(struct ifnet *); 572static int wm_init(struct ifnet *);
573static int wm_init_locked(struct ifnet *); 573static int wm_init_locked(struct ifnet *);
574static void wm_stop(struct ifnet *, int); 574static void wm_stop(struct ifnet *, int);
575static void wm_stop_locked(struct ifnet *, int); 575static void wm_stop_locked(struct ifnet *, int);
576static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, 576static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
577 uint32_t *, uint8_t *); 577 uint32_t *, uint8_t *);
578static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 578static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
579static void wm_82547_txfifo_stall(void *); 579static void wm_82547_txfifo_stall(void *);
580static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 580static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
581/* DMA related */ 581/* DMA related */
582static int wm_alloc_tx_descs(struct wm_softc *); 582static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
583static void wm_free_tx_descs(struct wm_softc *); 583static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
584static void wm_init_tx_descs(struct wm_softc *); 584static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
585static void wm_init_tx_regs(struct wm_softc *); 585static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
586static int wm_alloc_rx_descs(struct wm_softc *); 586static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
587static void wm_free_rx_descs(struct wm_softc *); 587static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
588static void wm_init_rx_regs(struct wm_softc *); 588static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
589static int wm_alloc_tx_buffer(struct wm_softc *); 589static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
590static void wm_free_tx_buffer(struct wm_softc *); 590static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
591static void wm_init_tx_buffer(struct wm_softc *); 591static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
592static int wm_alloc_rx_buffer(struct wm_softc *); 592static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
593static void wm_free_rx_buffer(struct wm_softc *); 593static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
594static int wm_init_rx_buffer(struct wm_softc *); 594static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
595static void wm_init_tx_queue(struct wm_softc *); 595static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
596static int wm_init_rx_queue(struct wm_softc *); 596static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
597static int wm_alloc_txrx_queues(struct wm_softc *); 597static int wm_alloc_txrx_queues(struct wm_softc *);
598static void wm_free_txrx_queues(struct wm_softc *); 598static void wm_free_txrx_queues(struct wm_softc *);
599static int wm_init_txrx_queues(struct wm_softc *); 599static int wm_init_txrx_queues(struct wm_softc *);
600/* Start */ 600/* Start */
601static void wm_start(struct ifnet *); 601static void wm_start(struct ifnet *);
602static void wm_start_locked(struct ifnet *); 602static void wm_start_locked(struct ifnet *);
603static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *, 603static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
604 uint32_t *, uint32_t *, bool *); 604 uint32_t *, uint32_t *, bool *);
605static void wm_nq_start(struct ifnet *); 605static void wm_nq_start(struct ifnet *);
606static void wm_nq_start_locked(struct ifnet *); 606static void wm_nq_start_locked(struct ifnet *);
607/* Interrupt */ 607/* Interrupt */
608static int wm_txeof(struct wm_softc *); 608static int wm_txeof(struct wm_softc *);
609static void wm_rxeof(struct wm_softc *); 609static void wm_rxeof(struct wm_rxqueue *);
610static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 610static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
611static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 611static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
612static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 612static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
613static void wm_linkintr(struct wm_softc *, uint32_t); 613static void wm_linkintr(struct wm_softc *, uint32_t);
614static int wm_intr_legacy(void *); 614static int wm_intr_legacy(void *);
615#ifdef WM_MSI_MSIX 615#ifdef WM_MSI_MSIX
616static int wm_setup_legacy(struct wm_softc *); 616static int wm_setup_legacy(struct wm_softc *);
617static int wm_setup_msix(struct wm_softc *); 617static int wm_setup_msix(struct wm_softc *);
618static int wm_txintr_msix(void *); 618static int wm_txintr_msix(void *);
619static int wm_rxintr_msix(void *); 619static int wm_rxintr_msix(void *);
620static int wm_linkintr_msix(void *); 620static int wm_linkintr_msix(void *);
621#endif 621#endif
622 622
@@ -1342,57 +1342,57 @@ static inline void @@ -1342,57 +1342,57 @@ static inline void
1342wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 1342wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1343{ 1343{
1344 wa->wa_low = htole32(v & 0xffffffffU); 1344 wa->wa_low = htole32(v & 0xffffffffU);
1345 if (sizeof(bus_addr_t) == 8) 1345 if (sizeof(bus_addr_t) == 8)
1346 wa->wa_high = htole32((uint64_t) v >> 32); 1346 wa->wa_high = htole32((uint64_t) v >> 32);
1347 else 1347 else
1348 wa->wa_high = 0; 1348 wa->wa_high = 0;
1349} 1349}
1350 1350
1351/* 1351/*
1352 * Descriptor sync/init functions. 1352 * Descriptor sync/init functions.
1353 */ 1353 */
1354static inline void 1354static inline void
1355wm_cdtxsync(struct wm_softc *sc, int start, int num, int ops) 1355wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1356{ 1356{
1357 struct wm_txqueue *txq = sc->sc_txq; 1357 struct wm_softc *sc = txq->txq_sc;
1358 1358
1359 /* If it will wrap around, sync to the end of the ring. */ 1359 /* If it will wrap around, sync to the end of the ring. */
1360 if ((start + num) > WM_NTXDESC(txq)) { 1360 if ((start + num) > WM_NTXDESC(txq)) {
1361 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, 1361 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1362 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * 1362 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
1363 (WM_NTXDESC(txq) - start), ops); 1363 (WM_NTXDESC(txq) - start), ops);
1364 num -= (WM_NTXDESC(txq) - start); 1364 num -= (WM_NTXDESC(txq) - start);
1365 start = 0; 1365 start = 0;
1366 } 1366 }
1367 1367
1368 /* Now sync whatever is left. */ 1368 /* Now sync whatever is left. */
1369 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, 1369 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1370 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops); 1370 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
1371} 1371}
1372 1372
1373static inline void 1373static inline void
1374wm_cdrxsync(struct wm_softc *sc, int start, int ops) 1374wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1375{ 1375{
1376 struct wm_rxqueue *rxq = sc->sc_rxq; 1376 struct wm_softc *sc = rxq->rxq_sc;
1377 1377
1378 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap, 1378 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1379 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops); 1379 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1380} 1380}
1381 1381
1382static inline void 1382static inline void
1383wm_init_rxdesc(struct wm_softc *sc, int start) 1383wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1384{ 1384{
1385 struct wm_rxqueue *rxq = sc->sc_rxq; 1385 struct wm_softc *sc = rxq->rxq_sc;
1386 struct wm_rxsoft *rxs = &rxq->rxq_soft[start]; 1386 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1387 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start]; 1387 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1388 struct mbuf *m = rxs->rxs_mbuf; 1388 struct mbuf *m = rxs->rxs_mbuf;
1389 1389
1390 /* 1390 /*
1391 * Note: We scoot the packet forward 2 bytes in the buffer 1391 * Note: We scoot the packet forward 2 bytes in the buffer
1392 * so that the payload after the Ethernet header is aligned 1392 * so that the payload after the Ethernet header is aligned
1393 * to a 4-byte boundary. 1393 * to a 4-byte boundary.
1394 1394
1395 * XXX BRAINDAMAGE ALERT! 1395 * XXX BRAINDAMAGE ALERT!
1396 * The stupid chip uses the same size for every buffer, which 1396 * The stupid chip uses the same size for every buffer, which
1397 * is set in the Receive Control register. We are using the 2K 1397 * is set in the Receive Control register. We are using the 2K
1398 * size option, but what we REALLY want is (2K - 2)! For this 1398 * size option, but what we REALLY want is (2K - 2)! For this
@@ -1400,27 +1400,27 @@ wm_init_rxdesc(struct wm_softc *sc, int  @@ -1400,27 +1400,27 @@ wm_init_rxdesc(struct wm_softc *sc, int
1400 * Ethernet MTU. On strict-alignment platforms, if the total 1400 * Ethernet MTU. On strict-alignment platforms, if the total
1401 * size exceeds (2K - 2) we set align_tweak to 0 and let 1401 * size exceeds (2K - 2) we set align_tweak to 0 and let
1402 * the upper layer copy the headers. 1402 * the upper layer copy the headers.
1403 */ 1403 */
1404 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak; 1404 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1405 1405
1406 wm_set_dma_addr(&rxd->wrx_addr, 1406 wm_set_dma_addr(&rxd->wrx_addr,
1407 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1407 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1408 rxd->wrx_len = 0; 1408 rxd->wrx_len = 0;
1409 rxd->wrx_cksum = 0; 1409 rxd->wrx_cksum = 0;
1410 rxd->wrx_status = 0; 1410 rxd->wrx_status = 0;
1411 rxd->wrx_errors = 0; 1411 rxd->wrx_errors = 0;
1412 rxd->wrx_special = 0; 1412 rxd->wrx_special = 0;
1413 wm_cdrxsync(sc, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1413 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1414 1414
1415 CSR_WRITE(sc, rxq->rxq_rdt_reg, start); 1415 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1416} 1416}
1417 1417
1418/* 1418/*
1419 * Device driver interface functions and commonly used functions. 1419 * Device driver interface functions and commonly used functions.
1420 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 1420 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1421 */ 1421 */
1422 1422
1423/* Lookup supported device table */ 1423/* Lookup supported device table */
1424static const struct wm_product * 1424static const struct wm_product *
1425wm_lookup(const struct pci_attach_args *pa) 1425wm_lookup(const struct pci_attach_args *pa)
1426{ 1426{
@@ -2591,27 +2591,27 @@ wm_detach(device_t self, int flags __unu @@ -2591,27 +2591,27 @@ wm_detach(device_t self, int flags __unu
2591 WM_CORE_UNLOCK(sc); 2591 WM_CORE_UNLOCK(sc);
2592 2592
2593 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 2593 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2594 2594
2595 /* Delete all remaining media. */ 2595 /* Delete all remaining media. */
2596 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 2596 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2597 2597
2598 ether_ifdetach(ifp); 2598 ether_ifdetach(ifp);
2599 if_detach(ifp); 2599 if_detach(ifp);
2600 2600
2601 2601
2602 /* Unload RX dmamaps and free mbufs */ 2602 /* Unload RX dmamaps and free mbufs */
2603 WM_RX_LOCK(rxq); 2603 WM_RX_LOCK(rxq);
2604 wm_rxdrain(sc); 2604 wm_rxdrain(rxq);
2605 WM_RX_UNLOCK(rxq); 2605 WM_RX_UNLOCK(rxq);
2606 /* Must unlock here */ 2606 /* Must unlock here */
2607 2607
2608 wm_free_txrx_queues(sc); 2608 wm_free_txrx_queues(sc);
2609 2609
2610 /* Disestablish the interrupt handler */ 2610 /* Disestablish the interrupt handler */
2611 for (i = 0; i < sc->sc_nintrs; i++) { 2611 for (i = 0; i < sc->sc_nintrs; i++) {
2612 if (sc->sc_ihs[i] != NULL) { 2612 if (sc->sc_ihs[i] != NULL) {
2613 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]); 2613 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2614 sc->sc_ihs[i] = NULL; 2614 sc->sc_ihs[i] = NULL;
2615 } 2615 }
2616 } 2616 }
2617#ifdef WM_MSI_MSIX 2617#ifdef WM_MSI_MSIX
@@ -3952,29 +3952,29 @@ wm_reset(struct wm_softc *sc) @@ -3952,29 +3952,29 @@ wm_reset(struct wm_softc *sc)
3952 3952
3953 wm_reset_mdicnfg_82580(sc); 3953 wm_reset_mdicnfg_82580(sc);
3954 3954
3955 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) 3955 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
3956 wm_pll_workaround_i210(sc); 3956 wm_pll_workaround_i210(sc);
3957} 3957}
3958 3958
3959/* 3959/*
3960 * wm_add_rxbuf: 3960 * wm_add_rxbuf:
3961 * 3961 *
3962 * Add a receive buffer to the indiciated descriptor. 3962 * Add a receive buffer to the indiciated descriptor.
3963 */ 3963 */
3964static int 3964static int
3965wm_add_rxbuf(struct wm_softc *sc, int idx) 3965wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
3966{ 3966{
3967 struct wm_rxqueue *rxq = sc->sc_rxq; 3967 struct wm_softc *sc = rxq->rxq_sc;
3968 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx]; 3968 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
3969 struct mbuf *m; 3969 struct mbuf *m;
3970 int error; 3970 int error;
3971 3971
3972 KASSERT(WM_RX_LOCKED(rxq)); 3972 KASSERT(WM_RX_LOCKED(rxq));
3973 3973
3974 MGETHDR(m, M_DONTWAIT, MT_DATA); 3974 MGETHDR(m, M_DONTWAIT, MT_DATA);
3975 if (m == NULL) 3975 if (m == NULL)
3976 return ENOBUFS; 3976 return ENOBUFS;
3977 3977
3978 MCLGET(m, M_DONTWAIT); 3978 MCLGET(m, M_DONTWAIT);
3979 if ((m->m_flags & M_EXT) == 0) { 3979 if ((m->m_flags & M_EXT) == 0) {
3980 m_freem(m); 3980 m_freem(m);
@@ -3992,42 +3992,42 @@ wm_add_rxbuf(struct wm_softc *sc, int id @@ -3992,42 +3992,42 @@ wm_add_rxbuf(struct wm_softc *sc, int id
3992 if (error) { 3992 if (error) {
3993 /* XXX XXX XXX */ 3993 /* XXX XXX XXX */
3994 aprint_error_dev(sc->sc_dev, 3994 aprint_error_dev(sc->sc_dev,
3995 "unable to load rx DMA map %d, error = %d\n", 3995 "unable to load rx DMA map %d, error = %d\n",
3996 idx, error); 3996 idx, error);
3997 panic("wm_add_rxbuf"); 3997 panic("wm_add_rxbuf");
3998 } 3998 }
3999 3999
4000 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 4000 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4001 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 4001 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4002 4002
4003 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 4003 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4004 if ((sc->sc_rctl & RCTL_EN) != 0) 4004 if ((sc->sc_rctl & RCTL_EN) != 0)
4005 wm_init_rxdesc(sc, idx); 4005 wm_init_rxdesc(rxq, idx);
4006 } else 4006 } else
4007 wm_init_rxdesc(sc, idx); 4007 wm_init_rxdesc(rxq, idx);
4008 4008
4009 return 0; 4009 return 0;
4010} 4010}
4011 4011
4012/* 4012/*
4013 * wm_rxdrain: 4013 * wm_rxdrain:
4014 * 4014 *
4015 * Drain the receive queue. 4015 * Drain the receive queue.
4016 */ 4016 */
4017static void 4017static void
4018wm_rxdrain(struct wm_softc *sc) 4018wm_rxdrain(struct wm_rxqueue *rxq)
4019{ 4019{
4020 struct wm_rxqueue *rxq = sc->sc_rxq; 4020 struct wm_softc *sc = rxq->rxq_sc;
4021 struct wm_rxsoft *rxs; 4021 struct wm_rxsoft *rxs;
4022 int i; 4022 int i;
4023 4023
4024 KASSERT(WM_RX_LOCKED(rxq)); 4024 KASSERT(WM_RX_LOCKED(rxq));
4025 4025
4026 for (i = 0; i < WM_NRXDESC; i++) { 4026 for (i = 0; i < WM_NRXDESC; i++) {
4027 rxs = &rxq->rxq_soft[i]; 4027 rxs = &rxq->rxq_soft[i];
4028 if (rxs->rxs_mbuf != NULL) { 4028 if (rxs->rxs_mbuf != NULL) {
4029 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 4029 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4030 m_freem(rxs->rxs_mbuf); 4030 m_freem(rxs->rxs_mbuf);
4031 rxs->rxs_mbuf = NULL; 4031 rxs->rxs_mbuf = NULL;
4032 } 4032 }
4033 } 4033 }
@@ -4603,29 +4603,33 @@ wm_init_locked(struct ifnet *ifp) @@ -4603,29 +4603,33 @@ wm_init_locked(struct ifnet *ifp)
4603 reg = CSR_READ(sc, WMREG_PBECCSTS); 4603 reg = CSR_READ(sc, WMREG_PBECCSTS);
4604 reg |= PBECCSTS_UNCORR_ECC_ENABLE; 4604 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4605 CSR_WRITE(sc, WMREG_PBECCSTS, reg); 4605 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4606 4606
4607 reg = CSR_READ(sc, WMREG_CTRL); 4607 reg = CSR_READ(sc, WMREG_CTRL);
4608 reg |= CTRL_MEHE; 4608 reg |= CTRL_MEHE;
4609 CSR_WRITE(sc, WMREG_CTRL, reg); 4609 CSR_WRITE(sc, WMREG_CTRL, reg);
4610 break; 4610 break;
4611 default: 4611 default:
4612 break; 4612 break;
4613 } 4613 }
4614 4614
4615 /* On 575 and later set RDT only if RX enabled */ 4615 /* On 575 and later set RDT only if RX enabled */
4616 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4616 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
 4617 struct wm_rxqueue *rxq = sc->sc_rxq;
 4618 WM_RX_LOCK(rxq);
4617 for (i = 0; i < WM_NRXDESC; i++) 4619 for (i = 0; i < WM_NRXDESC; i++)
4618 wm_init_rxdesc(sc, i); 4620 wm_init_rxdesc(rxq, i);
 4621 WM_RX_UNLOCK(rxq);
 4622 }
4619 4623
4620 sc->sc_stopping = false; 4624 sc->sc_stopping = false;
4621 4625
4622 /* Start the one second link check clock. */ 4626 /* Start the one second link check clock. */
4623 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 4627 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4624 4628
4625 /* ...all done! */ 4629 /* ...all done! */
4626 ifp->if_flags |= IFF_RUNNING; 4630 ifp->if_flags |= IFF_RUNNING;
4627 ifp->if_flags &= ~IFF_OACTIVE; 4631 ifp->if_flags &= ~IFF_OACTIVE;
4628 4632
4629 out: 4633 out:
4630 sc->sc_if_flags = ifp->if_flags; 4634 sc->sc_if_flags = ifp->if_flags;
4631 if (error) 4635 if (error)
@@ -4708,27 +4712,27 @@ wm_stop_locked(struct ifnet *ifp, int di @@ -4708,27 +4712,27 @@ wm_stop_locked(struct ifnet *ifp, int di
4708 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 4712 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4709 m_freem(txs->txs_mbuf); 4713 m_freem(txs->txs_mbuf);
4710 txs->txs_mbuf = NULL; 4714 txs->txs_mbuf = NULL;
4711 } 4715 }
4712 } 4716 }
4713 WM_TX_UNLOCK(txq); 4717 WM_TX_UNLOCK(txq);
4714 4718
4715 /* Mark the interface as down and cancel the watchdog timer. */ 4719 /* Mark the interface as down and cancel the watchdog timer. */
4716 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4720 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4717 ifp->if_timer = 0; 4721 ifp->if_timer = 0;
4718 4722
4719 if (disable) { 4723 if (disable) {
4720 WM_RX_LOCK(rxq); 4724 WM_RX_LOCK(rxq);
4721 wm_rxdrain(sc); 4725 wm_rxdrain(rxq);
4722 WM_RX_UNLOCK(rxq); 4726 WM_RX_UNLOCK(rxq);
4723 } 4727 }
4724 4728
4725#if 0 /* notyet */ 4729#if 0 /* notyet */
4726 if (sc->sc_type >= WM_T_82544) 4730 if (sc->sc_type >= WM_T_82544)
4727 CSR_WRITE(sc, WMREG_WUC, 0); 4731 CSR_WRITE(sc, WMREG_WUC, 0);
4728#endif 4732#endif
4729} 4733}
4730 4734
4731/* 4735/*
4732 * wm_tx_offload: 4736 * wm_tx_offload:
4733 * 4737 *
4734 * Set up TCP/IP checksumming parameters for the 4738 * Set up TCP/IP checksumming parameters for the
@@ -4903,27 +4907,27 @@ wm_tx_offload(struct wm_softc *sc, struc @@ -4903,27 +4907,27 @@ wm_tx_offload(struct wm_softc *sc, struc
4903 /* Just initialize it to a valid TCP context. */ 4907 /* Just initialize it to a valid TCP context. */
4904 tucs = WTX_TCPIP_TUCSS(offset) | 4908 tucs = WTX_TCPIP_TUCSS(offset) |
4905 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 4909 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4906 WTX_TCPIP_TUCSE(0) /* rest of packet */; 4910 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4907 } 4911 }
4908 4912
4909 /* Fill in the context descriptor. */ 4913 /* Fill in the context descriptor. */
4910 t = (struct livengood_tcpip_ctxdesc *) 4914 t = (struct livengood_tcpip_ctxdesc *)
4911 &txq->txq_descs[txq->txq_next]; 4915 &txq->txq_descs[txq->txq_next];
4912 t->tcpip_ipcs = htole32(ipcs); 4916 t->tcpip_ipcs = htole32(ipcs);
4913 t->tcpip_tucs = htole32(tucs); 4917 t->tcpip_tucs = htole32(tucs);
4914 t->tcpip_cmdlen = htole32(cmdlen); 4918 t->tcpip_cmdlen = htole32(cmdlen);
4915 t->tcpip_seg = htole32(seg); 4919 t->tcpip_seg = htole32(seg);
4916 wm_cdtxsync(sc, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 4920 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
4917 4921
4918 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 4922 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4919 txs->txs_ndesc++; 4923 txs->txs_ndesc++;
4920 4924
4921 *cmdp = cmd; 4925 *cmdp = cmd;
4922 *fieldsp = fields; 4926 *fieldsp = fields;
4923 4927
4924 return 0; 4928 return 0;
4925} 4929}
4926 4930
4927static void 4931static void
4928wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 4932wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4929{ 4933{
@@ -5033,29 +5037,28 @@ wm_82547_txfifo_bugchk(struct wm_softc * @@ -5033,29 +5037,28 @@ wm_82547_txfifo_bugchk(struct wm_softc *
5033 callout_schedule(&sc->sc_txfifo_ch, 1); 5037 callout_schedule(&sc->sc_txfifo_ch, 1);
5034 return 1; 5038 return 1;
5035 } 5039 }
5036 5040
5037 send_packet: 5041 send_packet:
5038 txq->txq_fifo_head += len; 5042 txq->txq_fifo_head += len;
5039 if (txq->txq_fifo_head >= txq->txq_fifo_size) 5043 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5040 txq->txq_fifo_head -= txq->txq_fifo_size; 5044 txq->txq_fifo_head -= txq->txq_fifo_size;
5041 5045
5042 return 0; 5046 return 0;
5043} 5047}
5044 5048
5045static int 5049static int
5046wm_alloc_tx_descs(struct wm_softc *sc) 5050wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5047{ 5051{
5048 struct wm_txqueue *txq = sc->sc_txq; 
5049 int error; 5052 int error;
5050 5053
5051 /* 5054 /*
5052 * Allocate the control data structures, and create and load the 5055 * Allocate the control data structures, and create and load the
5053 * DMA map for it. 5056 * DMA map for it.
5054 * 5057 *
5055 * NOTE: All Tx descriptors must be in the same 4G segment of 5058 * NOTE: All Tx descriptors must be in the same 4G segment of
5056 * memory. So must Rx descriptors. We simplify by allocating 5059 * memory. So must Rx descriptors. We simplify by allocating
5057 * both sets within the same 4G segment. 5060 * both sets within the same 4G segment.
5058 */ 5061 */
5059 if (sc->sc_type < WM_T_82544) { 5062 if (sc->sc_type < WM_T_82544) {
5060 WM_NTXDESC(txq) = WM_NTXDESC_82542; 5063 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5061 txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq); 5064 txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
@@ -5101,41 +5104,39 @@ wm_alloc_tx_descs(struct wm_softc *sc) @@ -5101,41 +5104,39 @@ wm_alloc_tx_descs(struct wm_softc *sc)
5101 5104
5102 fail_3: 5105 fail_3:
5103 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 5106 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5104 fail_2: 5107 fail_2:
5105 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 5108 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5106 txq->txq_desc_size); 5109 txq->txq_desc_size);
5107 fail_1: 5110 fail_1:
5108 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 5111 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5109 fail_0: 5112 fail_0:
5110 return error; 5113 return error;
5111} 5114}
5112 5115
5113static void 5116static void
5114wm_free_tx_descs(struct wm_softc *sc) 5117wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5115{ 5118{
5116 struct wm_txqueue *txq = sc->sc_txq; 
5117 5119
5118 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap); 5120 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5119 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 5121 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5120 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 5122 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5121 txq->txq_desc_size); 5123 txq->txq_desc_size);
5122 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 5124 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5123} 5125}
5124 5126
5125static int 5127static int
5126wm_alloc_rx_descs(struct wm_softc *sc) 5128wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5127{ 5129{
5128 struct wm_rxqueue *rxq = sc->sc_rxq; 
5129 int error; 5130 int error;
5130 5131
5131 /* 5132 /*
5132 * Allocate the control data structures, and create and load the 5133 * Allocate the control data structures, and create and load the
5133 * DMA map for it. 5134 * DMA map for it.
5134 * 5135 *
5135 * NOTE: All Tx descriptors must be in the same 4G segment of 5136 * NOTE: All Tx descriptors must be in the same 4G segment of
5136 * memory. So must Rx descriptors. We simplify by allocating 5137 * memory. So must Rx descriptors. We simplify by allocating
5137 * both sets within the same 4G segment. 5138 * both sets within the same 4G segment.
5138 */ 5139 */
5139 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC; 5140 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5140 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE, 5141 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
5141 (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1, 5142 (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
@@ -5174,42 +5175,40 @@ wm_alloc_rx_descs(struct wm_softc *sc) @@ -5174,42 +5175,40 @@ wm_alloc_rx_descs(struct wm_softc *sc)
5174 5175
5175 fail_3: 5176 fail_3:
5176 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 5177 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5177 fail_2: 5178 fail_2:
5178 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs, 5179 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5179 rxq->rxq_desc_size); 5180 rxq->rxq_desc_size);
5180 fail_1: 5181 fail_1:
5181 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 5182 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5182 fail_0: 5183 fail_0:
5183 return error; 5184 return error;
5184} 5185}
5185 5186
5186static void 5187static void
5187wm_free_rx_descs(struct wm_softc *sc) 5188wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5188{ 5189{
5189 struct wm_rxqueue *rxq = sc->sc_rxq; 
5190 5190
5191 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap); 5191 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5192 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 5192 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5193 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs, 5193 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5194 rxq->rxq_desc_size); 5194 rxq->rxq_desc_size);
5195 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 5195 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5196} 5196}
5197 5197
5198 5198
5199static int 5199static int
5200wm_alloc_tx_buffer(struct wm_softc *sc) 5200wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5201{ 5201{
5202 struct wm_txqueue *txq = sc->sc_txq; 
5203 int i, error; 5202 int i, error;
5204 5203
5205 /* Create the transmit buffer DMA maps. */ 5204 /* Create the transmit buffer DMA maps. */
5206 WM_TXQUEUELEN(txq) = 5205 WM_TXQUEUELEN(txq) =
5207 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 5206 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5208 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 5207 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5209 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 5208 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5210 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 5209 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5211 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 5210 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5212 &txq->txq_soft[i].txs_dmamap)) != 0) { 5211 &txq->txq_soft[i].txs_dmamap)) != 0) {
5213 aprint_error_dev(sc->sc_dev, 5212 aprint_error_dev(sc->sc_dev,
5214 "unable to create Tx DMA map %d, error = %d\n", 5213 "unable to create Tx DMA map %d, error = %d\n",
5215 i, error); 5214 i, error);
@@ -5219,42 +5218,40 @@ wm_alloc_tx_buffer(struct wm_softc *sc) @@ -5219,42 +5218,40 @@ wm_alloc_tx_buffer(struct wm_softc *sc)
5219 5218
5220 return 0; 5219 return 0;
5221 5220
5222 fail: 5221 fail:
5223 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 5222 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5224 if (txq->txq_soft[i].txs_dmamap != NULL) 5223 if (txq->txq_soft[i].txs_dmamap != NULL)
5225 bus_dmamap_destroy(sc->sc_dmat, 5224 bus_dmamap_destroy(sc->sc_dmat,
5226 txq->txq_soft[i].txs_dmamap); 5225 txq->txq_soft[i].txs_dmamap);
5227 } 5226 }
5228 return error; 5227 return error;
5229} 5228}
5230 5229
5231static void 5230static void
5232wm_free_tx_buffer(struct wm_softc *sc) 5231wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5233{ 5232{
5234 struct wm_txqueue *txq = sc->sc_txq; 
5235 int i; 5233 int i;
5236 5234
5237 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 5235 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5238 if (txq->txq_soft[i].txs_dmamap != NULL) 5236 if (txq->txq_soft[i].txs_dmamap != NULL)
5239 bus_dmamap_destroy(sc->sc_dmat, 5237 bus_dmamap_destroy(sc->sc_dmat,
5240 txq->txq_soft[i].txs_dmamap); 5238 txq->txq_soft[i].txs_dmamap);
5241 } 5239 }
5242} 5240}
5243 5241
5244static int 5242static int
5245wm_alloc_rx_buffer(struct wm_softc *sc) 5243wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5246{ 5244{
5247 struct wm_rxqueue *rxq = sc->sc_rxq; 
5248 int i, error; 5245 int i, error;
5249 5246
5250 /* Create the receive buffer DMA maps. */ 5247 /* Create the receive buffer DMA maps. */
5251 for (i = 0; i < WM_NRXDESC; i++) { 5248 for (i = 0; i < WM_NRXDESC; i++) {
5252 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 5249 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5253 MCLBYTES, 0, 0, 5250 MCLBYTES, 0, 0,
5254 &rxq->rxq_soft[i].rxs_dmamap)) != 0) { 5251 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5255 aprint_error_dev(sc->sc_dev, 5252 aprint_error_dev(sc->sc_dev,
5256 "unable to create Rx DMA map %d error = %d\n", 5253 "unable to create Rx DMA map %d error = %d\n",
5257 i, error); 5254 i, error);
5258 goto fail; 5255 goto fail;
5259 } 5256 }
5260 rxq->rxq_soft[i].rxs_mbuf = NULL; 5257 rxq->rxq_soft[i].rxs_mbuf = NULL;
@@ -5262,161 +5259,160 @@ wm_alloc_rx_buffer(struct wm_softc *sc) @@ -5262,161 +5259,160 @@ wm_alloc_rx_buffer(struct wm_softc *sc)
5262 5259
5263 return 0; 5260 return 0;
5264 5261
5265 fail: 5262 fail:
5266 for (i = 0; i < WM_NRXDESC; i++) { 5263 for (i = 0; i < WM_NRXDESC; i++) {
5267 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 5264 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5268 bus_dmamap_destroy(sc->sc_dmat, 5265 bus_dmamap_destroy(sc->sc_dmat,
5269 rxq->rxq_soft[i].rxs_dmamap); 5266 rxq->rxq_soft[i].rxs_dmamap);
5270 } 5267 }
5271 return error; 5268 return error;
5272} 5269}
5273 5270
5274static void 5271static void
5275wm_free_rx_buffer(struct wm_softc *sc) 5272wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5276{ 5273{
5277 struct wm_rxqueue *rxq = sc->sc_rxq; 
5278 int i; 5274 int i;
5279 5275
5280 for (i = 0; i < WM_NRXDESC; i++) { 5276 for (i = 0; i < WM_NRXDESC; i++) {
5281 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 5277 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5282 bus_dmamap_destroy(sc->sc_dmat, 5278 bus_dmamap_destroy(sc->sc_dmat,
5283 rxq->rxq_soft[i].rxs_dmamap); 5279 rxq->rxq_soft[i].rxs_dmamap);
5284 } 5280 }
5285} 5281}
5286 5282
5287/* 5283/*
5288 * wm_alloc_quques: 5284 * wm_alloc_quques:
5289 * Allocate {tx,rx}descs and {tx,rx} buffers 5285 * Allocate {tx,rx}descs and {tx,rx} buffers
5290 */ 5286 */
5291static int 5287static int
5292wm_alloc_txrx_queues(struct wm_softc *sc) 5288wm_alloc_txrx_queues(struct wm_softc *sc)
5293{ 5289{
5294 int error; 5290 int error;
 5291 struct wm_txqueue *txq;
 5292 struct wm_rxqueue *rxq;
5295 5293
5296 /* 5294 /*
5297 * For transmission 5295 * For transmission
5298 */ 5296 */
5299 sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues, 5297 sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
5300 KM_SLEEP); 5298 KM_SLEEP);
5301 if (sc->sc_txq == NULL) { 5299 if (sc->sc_txq == NULL) {
5302 aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n"); 5300 aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
5303 error = ENOMEM; 5301 error = ENOMEM;
5304 goto fail_0; 5302 goto fail_0;
5305 } 5303 }
 5304 txq = sc->sc_txq;
 5305 txq->txq_sc = sc;
5306#ifdef WM_MPSAFE 5306#ifdef WM_MPSAFE
5307 sc->sc_txq->txq_lock = 5307 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5308 mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 
5309#else 5308#else
5310 sc->sc_txq->txq_lock = NULL; 5309 txq->txq_lock = NULL;
5311#endif 5310#endif
5312 5311
5313 error = wm_alloc_tx_descs(sc); 5312 error = wm_alloc_tx_descs(sc, txq);
5314 if (error) 5313 if (error)
5315 goto fail_1; 5314 goto fail_1;
5316 5315
5317 error = wm_alloc_tx_buffer(sc); 5316 error = wm_alloc_tx_buffer(sc, txq);
5318 if (error) 5317 if (error)
5319 goto fail_2; 5318 goto fail_2;
5320 5319
5321 /* 5320 /*
5322 * For recieve 5321 * For recieve
5323 */ 5322 */
5324 sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues, 5323 sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
5325 KM_SLEEP); 5324 KM_SLEEP);
5326 if (sc->sc_rxq == NULL) { 5325 if (sc->sc_rxq == NULL) {
5327 aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n"); 5326 aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
5328 error = ENOMEM; 5327 error = ENOMEM;
5329 goto fail_3; 5328 goto fail_3;
5330 } 5329 }
 5330 rxq = sc->sc_rxq;
 5331 rxq->rxq_sc = sc;
5331#ifdef WM_MPSAFE 5332#ifdef WM_MPSAFE
5332 sc->sc_rxq->rxq_lock = 5333 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5333 mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 
5334#else 5334#else
5335 sc->sc_rxq->rxq_lock = NULL; 5335 rxq->rxq_lock = NULL;
5336#endif 5336#endif
5337 5337
5338 error = wm_alloc_rx_descs(sc); 5338 error = wm_alloc_rx_descs(sc, rxq);
5339 if (error) 5339 if (error)
5340 goto fail_4; 5340 goto fail_4;
5341 5341
5342 error = wm_alloc_rx_buffer(sc); 5342 error = wm_alloc_rx_buffer(sc, rxq);
5343 if (error) 5343 if (error)
5344 goto fail_5; 5344 goto fail_5;
5345 5345
5346 return 0; 5346 return 0;
5347 5347
5348 fail_5: 5348 fail_5:
5349 wm_free_rx_descs(sc); 5349 wm_free_rx_descs(sc, rxq);
5350 fail_4: 5350 fail_4:
5351 if (sc->sc_rxq->rxq_lock) 5351 if (rxq->rxq_lock)
5352 mutex_obj_free(sc->sc_rxq->rxq_lock); 5352 mutex_obj_free(rxq->rxq_lock);
5353 kmem_free(sc->sc_rxq, 5353 kmem_free(rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5354 sizeof(struct wm_rxqueue) * sc->sc_nrxqueues); 
5355 fail_3: 5354 fail_3:
5356 wm_free_tx_buffer(sc); 5355 wm_free_tx_buffer(sc, txq);
5357 fail_2: 5356 fail_2:
5358 wm_free_tx_descs(sc); 5357 wm_free_tx_descs(sc, txq);
5359 fail_1: 5358 fail_1:
5360 if (sc->sc_txq->txq_lock) 5359 if (txq->txq_lock)
5361 mutex_obj_free(sc->sc_txq->txq_lock); 5360 mutex_obj_free(txq->txq_lock);
5362 kmem_free(sc->sc_txq, 5361 kmem_free(txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5363 sizeof(struct wm_txqueue) * sc->sc_ntxqueues); 
5364 fail_0: 5362 fail_0:
5365 return error; 5363 return error;
5366} 5364}
5367 5365
5368/* 5366/*
5369 * wm_free_quques: 5367 * wm_free_quques:
5370 * Free {tx,rx}descs and {tx,rx} buffers 5368 * Free {tx,rx}descs and {tx,rx} buffers
5371 */ 5369 */
5372static void 5370static void
5373wm_free_txrx_queues(struct wm_softc *sc) 5371wm_free_txrx_queues(struct wm_softc *sc)
5374{ 5372{
 5373 struct wm_txqueue *txq = sc->sc_txq;
 5374 struct wm_rxqueue *rxq = sc->sc_rxq;
 5375
 5376 wm_free_rx_buffer(sc, rxq);
 5377 wm_free_rx_descs(sc, rxq);
 5378 if (rxq->rxq_lock)
 5379 mutex_obj_free(rxq->rxq_lock);
 5380 kmem_free(rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5375 5381
5376 wm_free_rx_buffer(sc); 5382 wm_free_tx_buffer(sc, txq);
5377 wm_free_rx_descs(sc); 5383 wm_free_tx_descs(sc, txq);
5378 if (sc->sc_rxq->rxq_lock) 5384 if (txq->txq_lock)
5379 mutex_obj_free(sc->sc_rxq->rxq_lock); 5385 mutex_obj_free(txq->txq_lock);
5380 kmem_free(sc->sc_rxq, 5386 kmem_free(txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5381 sizeof(struct wm_rxqueue) * sc->sc_nrxqueues); 
5382 
5383 wm_free_tx_buffer(sc); 
5384 wm_free_tx_descs(sc); 
5385 if (sc->sc_txq->txq_lock) 
5386 mutex_obj_free(sc->sc_txq->txq_lock); 
5387 kmem_free(sc->sc_txq, 
5388 sizeof(struct wm_txqueue) * sc->sc_ntxqueues); 
5389} 5387}
5390 5388
5391static void 5389static void
5392wm_init_tx_descs(struct wm_softc *sc) 5390wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5393{ 5391{
5394 struct wm_txqueue *txq = sc->sc_txq; 
5395 5392
5396 KASSERT(WM_TX_LOCKED(txq)); 5393 KASSERT(WM_TX_LOCKED(txq));
5397 5394
5398 /* Initialize the transmit descriptor ring. */ 5395 /* Initialize the transmit descriptor ring. */
5399 memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq)); 5396 memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
5400 wm_cdtxsync(sc, 0, WM_NTXDESC(txq), 5397 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5401 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5398 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5402 txq->txq_free = WM_NTXDESC(txq); 5399 txq->txq_free = WM_NTXDESC(txq);
5403 txq->txq_next = 0; 5400 txq->txq_next = 0;
5404} 5401}
5405 5402
5406static void 5403static void
5407wm_init_tx_regs(struct wm_softc *sc) 5404wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
5408{ 5405{
5409 struct wm_txqueue *txq = sc->sc_txq; 
5410 5406
5411 KASSERT(WM_TX_LOCKED(txq)); 5407 KASSERT(WM_TX_LOCKED(txq));
5412 5408
5413 if (sc->sc_type < WM_T_82543) { 5409 if (sc->sc_type < WM_T_82543) {
5414 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0)); 5410 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5415 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0)); 5411 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5416 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq)); 5412 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
5417 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 5413 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5418 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 5414 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5419 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 5415 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5420 } else { 5416 } else {
5421 CSR_WRITE(sc, WMREG_TDBAH(0), WM_CDTXADDR_HI(txq, 0)); 5417 CSR_WRITE(sc, WMREG_TDBAH(0), WM_CDTXADDR_HI(txq, 0));
5422 CSR_WRITE(sc, WMREG_TDBAL(0), WM_CDTXADDR_LO(txq, 0)); 5418 CSR_WRITE(sc, WMREG_TDBAL(0), WM_CDTXADDR_LO(txq, 0));
@@ -5437,67 +5433,64 @@ wm_init_tx_regs(struct wm_softc *sc) @@ -5437,67 +5433,64 @@ wm_init_tx_regs(struct wm_softc *sc)
5437 if (sc->sc_type >= WM_T_82540) { 5433 if (sc->sc_type >= WM_T_82540) {
5438 /* should be same */ 5434 /* should be same */
5439 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4); 5435 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5440 } 5436 }
5441 5437
5442 CSR_WRITE(sc, WMREG_TDT(0), 0); 5438 CSR_WRITE(sc, WMREG_TDT(0), 0);
5443 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) | 5439 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
5444 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 5440 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5445 } 5441 }
5446 } 5442 }
5447} 5443}
5448 5444
5449static void 5445static void
5450wm_init_tx_buffer(struct wm_softc *sc) 5446wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5451{ 5447{
5452 struct wm_txqueue *txq = sc->sc_txq; 
5453 int i; 5448 int i;
5454 5449
5455 KASSERT(WM_TX_LOCKED(txq)); 5450 KASSERT(WM_TX_LOCKED(txq));
5456 5451
5457 /* Initialize the transmit job descriptors. */ 5452 /* Initialize the transmit job descriptors. */
5458 for (i = 0; i < WM_TXQUEUELEN(txq); i++) 5453 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5459 txq->txq_soft[i].txs_mbuf = NULL; 5454 txq->txq_soft[i].txs_mbuf = NULL;
5460 txq->txq_sfree = WM_TXQUEUELEN(txq); 5455 txq->txq_sfree = WM_TXQUEUELEN(txq);
5461 txq->txq_snext = 0; 5456 txq->txq_snext = 0;
5462 txq->txq_sdirty = 0; 5457 txq->txq_sdirty = 0;
5463} 5458}
5464 5459
5465static void 5460static void
5466wm_init_tx_queue(struct wm_softc *sc) 5461wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
5467{ 5462{
5468 struct wm_txqueue *txq = sc->sc_txq; 
5469 5463
5470 KASSERT(WM_TX_LOCKED(txq)); 5464 KASSERT(WM_TX_LOCKED(txq));
5471 5465
5472 /* 5466 /*
5473 * Set up some register offsets that are different between 5467 * Set up some register offsets that are different between
5474 * the i82542 and the i82543 and later chips. 5468 * the i82542 and the i82543 and later chips.
5475 */ 5469 */
5476 if (sc->sc_type < WM_T_82543) { 5470 if (sc->sc_type < WM_T_82543) {
5477 txq->txq_tdt_reg = WMREG_OLD_TDT; 5471 txq->txq_tdt_reg = WMREG_OLD_TDT;
5478 } else { 5472 } else {
5479 txq->txq_tdt_reg = WMREG_TDT(0); 5473 txq->txq_tdt_reg = WMREG_TDT(0);
5480 } 5474 }
5481 5475
5482 wm_init_tx_descs(sc); 5476 wm_init_tx_descs(sc, txq);
5483 wm_init_tx_regs(sc); 5477 wm_init_tx_regs(sc, txq);
5484 wm_init_tx_buffer(sc); 5478 wm_init_tx_buffer(sc, txq);
5485} 5479}
5486 5480
5487static void 5481static void
5488wm_init_rx_regs(struct wm_softc *sc) 5482wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5489{ 5483{
5490 struct wm_rxqueue *rxq = sc->sc_rxq; 
5491 5484
5492 KASSERT(WM_RX_LOCKED(rxq)); 5485 KASSERT(WM_RX_LOCKED(rxq));
5493 5486
5494 /* 5487 /*
5495 * Initialize the receive descriptor and receive job 5488 * Initialize the receive descriptor and receive job
5496 * descriptor rings. 5489 * descriptor rings.
5497 */ 5490 */
5498 if (sc->sc_type < WM_T_82543) { 5491 if (sc->sc_type < WM_T_82543) {
5499 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0)); 5492 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5500 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0)); 5493 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5501 CSR_WRITE(sc, WMREG_OLD_RDLEN0, 5494 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5502 sizeof(wiseman_rxdesc_t) * WM_NRXDESC); 5495 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5503 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 5496 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
@@ -5526,103 +5519,101 @@ wm_init_rx_regs(struct wm_softc *sc) @@ -5526,103 +5519,101 @@ wm_init_rx_regs(struct wm_softc *sc)
5526 | RXDCTL_WTHRESH(1)); 5519 | RXDCTL_WTHRESH(1));
5527 } else { 5520 } else {
5528 CSR_WRITE(sc, WMREG_RDH(0), 0); 5521 CSR_WRITE(sc, WMREG_RDH(0), 0);
5529 CSR_WRITE(sc, WMREG_RDT(0), 0); 5522 CSR_WRITE(sc, WMREG_RDT(0), 0);
5530 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */ 5523 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
5531 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */ 5524 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
5532 CSR_WRITE(sc, WMREG_RXDCTL(0), RXDCTL_PTHRESH(0) | 5525 CSR_WRITE(sc, WMREG_RXDCTL(0), RXDCTL_PTHRESH(0) |
5533 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 5526 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5534 } 5527 }
5535 } 5528 }
5536} 5529}
5537 5530
5538static int 5531static int
5539wm_init_rx_buffer(struct wm_softc *sc) 5532wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5540{ 5533{
5541 struct wm_rxqueue *rxq = sc->sc_rxq; 
5542 struct wm_rxsoft *rxs; 5534 struct wm_rxsoft *rxs;
5543 int error, i; 5535 int error, i;
5544 5536
5545 KASSERT(WM_RX_LOCKED(rxq)); 5537 KASSERT(WM_RX_LOCKED(rxq));
5546 5538
5547 for (i = 0; i < WM_NRXDESC; i++) { 5539 for (i = 0; i < WM_NRXDESC; i++) {
5548 rxs = &rxq->rxq_soft[i]; 5540 rxs = &rxq->rxq_soft[i];
5549 if (rxs->rxs_mbuf == NULL) { 5541 if (rxs->rxs_mbuf == NULL) {
5550 if ((error = wm_add_rxbuf(sc, i)) != 0) { 5542 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5551 log(LOG_ERR, "%s: unable to allocate or map " 5543 log(LOG_ERR, "%s: unable to allocate or map "
5552 "rx buffer %d, error = %d\n", 5544 "rx buffer %d, error = %d\n",
5553 device_xname(sc->sc_dev), i, error); 5545 device_xname(sc->sc_dev), i, error);
5554 /* 5546 /*
5555 * XXX Should attempt to run with fewer receive 5547 * XXX Should attempt to run with fewer receive
5556 * XXX buffers instead of just failing. 5548 * XXX buffers instead of just failing.
5557 */ 5549 */
5558 wm_rxdrain(sc); 5550 wm_rxdrain(rxq);
5559 return ENOMEM; 5551 return ENOMEM;
5560 } 5552 }
5561 } else { 5553 } else {
5562 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 5554 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5563 wm_init_rxdesc(sc, i); 5555 wm_init_rxdesc(rxq, i);
5564 /* 5556 /*
5565 * For 82575 and newer device, the RX descriptors 5557 * For 82575 and newer device, the RX descriptors
5566 * must be initialized after the setting of RCTL.EN in 5558 * must be initialized after the setting of RCTL.EN in
5567 * wm_set_filter() 5559 * wm_set_filter()
5568 */ 5560 */
5569 } 5561 }
5570 } 5562 }
5571 rxq->rxq_ptr = 0; 5563 rxq->rxq_ptr = 0;
5572 rxq->rxq_discard = 0; 5564 rxq->rxq_discard = 0;
5573 WM_RXCHAIN_RESET(rxq); 5565 WM_RXCHAIN_RESET(rxq);
5574 5566
5575 return 0; 5567 return 0;
5576} 5568}
5577 5569
5578static int 5570static int
5579wm_init_rx_queue(struct wm_softc *sc) 5571wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
5580{ 5572{
5581 struct wm_rxqueue *rxq = sc->sc_rxq; 
5582 5573
5583 KASSERT(WM_RX_LOCKED(rxq)); 5574 KASSERT(WM_RX_LOCKED(rxq));
5584 5575
5585 /* 5576 /*
5586 * Set up some register offsets that are different between 5577 * Set up some register offsets that are different between
5587 * the i82542 and the i82543 and later chips. 5578 * the i82542 and the i82543 and later chips.
5588 */ 5579 */
5589 if (sc->sc_type < WM_T_82543) { 5580 if (sc->sc_type < WM_T_82543) {
5590 rxq->rxq_rdt_reg = WMREG_OLD_RDT0; 5581 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5591 } else { 5582 } else {
5592 rxq->rxq_rdt_reg = WMREG_RDT(0); 5583 rxq->rxq_rdt_reg = WMREG_RDT(0);
5593 } 5584 }
5594 5585
5595 wm_init_rx_regs(sc); 5586 wm_init_rx_regs(sc, rxq);
5596 return wm_init_rx_buffer(sc); 5587 return wm_init_rx_buffer(sc, rxq);
5597} 5588}
5598 5589
5599/* 5590/*
5600 * wm_init_quques: 5591 * wm_init_quques:
5601 * Initialize {tx,rx}descs and {tx,rx} buffers 5592 * Initialize {tx,rx}descs and {tx,rx} buffers
5602 */ 5593 */
5603static int 5594static int
5604wm_init_txrx_queues(struct wm_softc *sc) 5595wm_init_txrx_queues(struct wm_softc *sc)
5605{ 5596{
5606 struct wm_txqueue *txq = sc->sc_txq; 5597 struct wm_txqueue *txq = sc->sc_txq;
5607 struct wm_rxqueue *rxq = sc->sc_rxq; 5598 struct wm_rxqueue *rxq = sc->sc_rxq;
5608 int error; 5599 int error;
5609 5600
5610 WM_TX_LOCK(txq); 5601 WM_TX_LOCK(txq);
5611 wm_init_tx_queue(sc); 5602 wm_init_tx_queue(sc, txq);
5612 WM_TX_UNLOCK(txq); 5603 WM_TX_UNLOCK(txq);
5613 5604
5614 WM_RX_LOCK(rxq); 5605 WM_RX_LOCK(rxq);
5615 error = wm_init_rx_queue(sc); 5606 error = wm_init_rx_queue(sc, rxq);
5616 WM_RX_UNLOCK(rxq); 5607 WM_RX_UNLOCK(rxq);
5617 5608
5618 return error; 5609 return error;
5619} 5610}
5620 5611
5621/* 5612/*
5622 * wm_start: [ifnet interface function] 5613 * wm_start: [ifnet interface function]
5623 * 5614 *
5624 * Start packet transmission on the interface. 5615 * Start packet transmission on the interface.
5625 */ 5616 */
5626static void 5617static void
5627wm_start(struct ifnet *ifp) 5618wm_start(struct ifnet *ifp)
5628{ 5619{
@@ -5886,27 +5877,27 @@ wm_start_locked(struct ifnet *ifp) @@ -5886,27 +5877,27 @@ wm_start_locked(struct ifnet *ifp)
5886 htole32(WTX_CMD_VLE); 5877 htole32(WTX_CMD_VLE);
5887 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan 5878 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
5888 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 5879 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5889 } 5880 }
5890 5881
5891 txs->txs_lastdesc = lasttx; 5882 txs->txs_lastdesc = lasttx;
5892 5883
5893 DPRINTF(WM_DEBUG_TX, 5884 DPRINTF(WM_DEBUG_TX,
5894 ("%s: TX: desc %d: cmdlen 0x%08x\n", 5885 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5895 device_xname(sc->sc_dev), 5886 device_xname(sc->sc_dev),
5896 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 5887 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5897 5888
5898 /* Sync the descriptors we're using. */ 5889 /* Sync the descriptors we're using. */
5899 wm_cdtxsync(sc, txq->txq_next, txs->txs_ndesc, 5890 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
5900 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5891 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5901 5892
5902 /* Give the packet to the chip. */ 5893 /* Give the packet to the chip. */
5903 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); 5894 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
5904 5895
5905 DPRINTF(WM_DEBUG_TX, 5896 DPRINTF(WM_DEBUG_TX,
5906 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 5897 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5907 5898
5908 DPRINTF(WM_DEBUG_TX, 5899 DPRINTF(WM_DEBUG_TX,
5909 ("%s: TX: finished transmitting packet, job %d\n", 5900 ("%s: TX: finished transmitting packet, job %d\n",
5910 device_xname(sc->sc_dev), txq->txq_txsnext)); 5901 device_xname(sc->sc_dev), txq->txq_txsnext));
5911 5902
5912 /* Advance the tx pointer. */ 5903 /* Advance the tx pointer. */
@@ -6117,27 +6108,27 @@ wm_nq_tx_offload(struct wm_softc *sc, st @@ -6117,27 +6108,27 @@ wm_nq_tx_offload(struct wm_softc *sc, st
6117 } 6108 }
6118 cmdc |= NQTXC_CMD_IP6; 6109 cmdc |= NQTXC_CMD_IP6;
6119 *fieldsp |= NQTXD_FIELDS_TUXSM; 6110 *fieldsp |= NQTXD_FIELDS_TUXSM;
6120 } 6111 }
6121 6112
6122 /* Fill in the context descriptor. */ 6113 /* Fill in the context descriptor. */
6123 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len = 6114 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6124 htole32(vl_len); 6115 htole32(vl_len);
6125 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0; 6116 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6126 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd = 6117 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6127 htole32(cmdc); 6118 htole32(cmdc);
6128 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx = 6119 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6129 htole32(mssidx); 6120 htole32(mssidx);
6130 wm_cdtxsync(sc, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 6121 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6131 DPRINTF(WM_DEBUG_TX, 6122 DPRINTF(WM_DEBUG_TX,
6132 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), 6123 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6133 txq->txq_txnext, 0, vl_len)); 6124 txq->txq_txnext, 0, vl_len));
6134 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); 6125 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6135 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 6126 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6136 txs->txs_ndesc++; 6127 txs->txs_ndesc++;
6137 return 0; 6128 return 0;
6138} 6129}
6139 6130
6140/* 6131/*
6141 * wm_nq_start: [ifnet interface function] 6132 * wm_nq_start: [ifnet interface function]
6142 * 6133 *
6143 * Start packet transmission on the interface for NEWQUEUE devices 6134 * Start packet transmission on the interface for NEWQUEUE devices
@@ -6376,27 +6367,27 @@ wm_nq_start_locked(struct ifnet *ifp) @@ -6376,27 +6367,27 @@ wm_nq_start_locked(struct ifnet *ifp)
6376 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == 6367 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6377 (NQTX_CMD_EOP | NQTX_CMD_RS)); 6368 (NQTX_CMD_EOP | NQTX_CMD_RS));
6378 txq->txq_descs[lasttx].wtx_cmdlen |= 6369 txq->txq_descs[lasttx].wtx_cmdlen |=
6379 htole32(WTX_CMD_EOP | WTX_CMD_RS); 6370 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6380 6371
6381 txs->txs_lastdesc = lasttx; 6372 txs->txs_lastdesc = lasttx;
6382 6373
6383 DPRINTF(WM_DEBUG_TX, 6374 DPRINTF(WM_DEBUG_TX,
6384 ("%s: TX: desc %d: cmdlen 0x%08x\n", 6375 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6385 device_xname(sc->sc_dev), 6376 device_xname(sc->sc_dev),
6386 lasttx, le32toh(txq->txq_txdescs[lasttx].wtx_cmdlen))); 6377 lasttx, le32toh(txq->txq_txdescs[lasttx].wtx_cmdlen)));
6387 6378
6388 /* Sync the descriptors we're using. */ 6379 /* Sync the descriptors we're using. */
6389 wm_cdtxsync(sc, txq->txq_next, txs->txs_ndesc, 6380 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6390 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 6381 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6391 6382
6392 /* Give the packet to the chip. */ 6383 /* Give the packet to the chip. */
6393 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); 6384 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6394 sent = true; 6385 sent = true;
6395 6386
6396 DPRINTF(WM_DEBUG_TX, 6387 DPRINTF(WM_DEBUG_TX,
6397 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 6388 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6398 6389
6399 DPRINTF(WM_DEBUG_TX, 6390 DPRINTF(WM_DEBUG_TX,
6400 ("%s: TX: finished transmitting packet, job %d\n", 6391 ("%s: TX: finished transmitting packet, job %d\n",
6401 device_xname(sc->sc_dev), txq->txq_txsnext)); 6392 device_xname(sc->sc_dev), txq->txq_txsnext));
6402 6393
@@ -6453,33 +6444,33 @@ wm_txeof(struct wm_softc *sc) @@ -6453,33 +6444,33 @@ wm_txeof(struct wm_softc *sc)
6453 ifp->if_flags &= ~IFF_OACTIVE; 6444 ifp->if_flags &= ~IFF_OACTIVE;
6454 6445
6455 /* 6446 /*
6456 * Go through the Tx list and free mbufs for those 6447 * Go through the Tx list and free mbufs for those
6457 * frames which have been transmitted. 6448 * frames which have been transmitted.
6458 */ 6449 */
6459 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq); 6450 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
6460 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) { 6451 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
6461 txs = &txq->txq_soft[i]; 6452 txs = &txq->txq_soft[i];
6462 6453
6463 DPRINTF(WM_DEBUG_TX, 6454 DPRINTF(WM_DEBUG_TX,
6464 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); 6455 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6465 6456
6466 wm_cdtxsync(sc, txs->txs_firstdesc, txs->txs_ndesc, 6457 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
6467 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 6458 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6468 6459
6469 status = 6460 status =
6470 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status; 6461 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6471 if ((status & WTX_ST_DD) == 0) { 6462 if ((status & WTX_ST_DD) == 0) {
6472 wm_cdtxsync(sc, txs->txs_lastdesc, 1, 6463 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
6473 BUS_DMASYNC_PREREAD); 6464 BUS_DMASYNC_PREREAD);
6474 break; 6465 break;
6475 } 6466 }
6476 6467
6477 processed = true; 6468 processed = true;
6478 count++; 6469 count++;
6479 DPRINTF(WM_DEBUG_TX, 6470 DPRINTF(WM_DEBUG_TX,
6480 ("%s: TX: job %d done: descs %d..%d\n", 6471 ("%s: TX: job %d done: descs %d..%d\n",
6481 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 6472 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6482 txs->txs_lastdesc)); 6473 txs->txs_lastdesc));
6483 6474
6484 /* 6475 /*
6485 * XXX We should probably be using the statistics 6476 * XXX We should probably be using the statistics
@@ -6527,92 +6518,92 @@ wm_txeof(struct wm_softc *sc) @@ -6527,92 +6518,92 @@ wm_txeof(struct wm_softc *sc)
6527 */ 6518 */
6528 if (txq->txq_sfree == WM_TXQUEUELEN(txq)) 6519 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
6529 ifp->if_timer = 0; 6520 ifp->if_timer = 0;
6530 6521
6531 return processed; 6522 return processed;
6532} 6523}
6533 6524
6534/* 6525/*
6535 * wm_rxeof: 6526 * wm_rxeof:
6536 * 6527 *
6537 * Helper; handle receive interrupts. 6528 * Helper; handle receive interrupts.
6538 */ 6529 */
6539static void 6530static void
6540wm_rxeof(struct wm_softc *sc) 6531wm_rxeof(struct wm_rxqueue *rxq)
6541{ 6532{
6542 struct wm_rxqueue *rxq = sc->sc_rxq; 6533 struct wm_softc *sc = rxq->rxq_sc;
6543 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 6534 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6544 struct wm_rxsoft *rxs; 6535 struct wm_rxsoft *rxs;
6545 struct mbuf *m; 6536 struct mbuf *m;
6546 int i, len; 6537 int i, len;
6547 int count = 0; 6538 int count = 0;
6548 uint8_t status, errors; 6539 uint8_t status, errors;
6549 uint16_t vlantag; 6540 uint16_t vlantag;
6550 6541
6551 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) { 6542 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
6552 rxs = &rxq->rxq_soft[i]; 6543 rxs = &rxq->rxq_soft[i];
6553 6544
6554 DPRINTF(WM_DEBUG_RX, 6545 DPRINTF(WM_DEBUG_RX,
6555 ("%s: RX: checking descriptor %d\n", 6546 ("%s: RX: checking descriptor %d\n",
6556 device_xname(sc->sc_dev), i)); 6547 device_xname(sc->sc_dev), i));
6557 6548
6558 wm_cdrxsync(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 6549 wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6559 6550
6560 status = rxq->rxq_descs[i].wrx_status; 6551 status = rxq->rxq_descs[i].wrx_status;
6561 errors = rxq->rxq_descs[i].wrx_errors; 6552 errors = rxq->rxq_descs[i].wrx_errors;
6562 len = le16toh(rxq->rxq_descs[i].wrx_len); 6553 len = le16toh(rxq->rxq_descs[i].wrx_len);
6563 vlantag = rxq->rxq_descs[i].wrx_special; 6554 vlantag = rxq->rxq_descs[i].wrx_special;
6564 6555
6565 if ((status & WRX_ST_DD) == 0) { 6556 if ((status & WRX_ST_DD) == 0) {
6566 /* We have processed all of the receive descriptors. */ 6557 /* We have processed all of the receive descriptors. */
6567 wm_cdrxsync(sc, i, BUS_DMASYNC_PREREAD); 6558 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
6568 break; 6559 break;
6569 } 6560 }
6570 6561
6571 count++; 6562 count++;
6572 if (__predict_false(rxq->rxq_discard)) { 6563 if (__predict_false(rxq->rxq_discard)) {
6573 DPRINTF(WM_DEBUG_RX, 6564 DPRINTF(WM_DEBUG_RX,
6574 ("%s: RX: discarding contents of descriptor %d\n", 6565 ("%s: RX: discarding contents of descriptor %d\n",
6575 device_xname(sc->sc_dev), i)); 6566 device_xname(sc->sc_dev), i));
6576 wm_init_rxdesc(sc, i); 6567 wm_init_rxdesc(rxq, i);
6577 if (status & WRX_ST_EOP) { 6568 if (status & WRX_ST_EOP) {
6578 /* Reset our state. */ 6569 /* Reset our state. */
6579 DPRINTF(WM_DEBUG_RX, 6570 DPRINTF(WM_DEBUG_RX,
6580 ("%s: RX: resetting rxdiscard -> 0\n", 6571 ("%s: RX: resetting rxdiscard -> 0\n",
6581 device_xname(sc->sc_dev))); 6572 device_xname(sc->sc_dev)));
6582 rxq->rxq_discard = 0; 6573 rxq->rxq_discard = 0;
6583 } 6574 }
6584 continue; 6575 continue;
6585 } 6576 }
6586 6577
6587 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 6578 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6588 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 6579 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6589 6580
6590 m = rxs->rxs_mbuf; 6581 m = rxs->rxs_mbuf;
6591 6582
6592 /* 6583 /*
6593 * Add a new receive buffer to the ring, unless of 6584 * Add a new receive buffer to the ring, unless of
6594 * course the length is zero. Treat the latter as a 6585 * course the length is zero. Treat the latter as a
6595 * failed mapping. 6586 * failed mapping.
6596 */ 6587 */
6597 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 6588 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
6598 /* 6589 /*
6599 * Failed, throw away what we've done so 6590 * Failed, throw away what we've done so
6600 * far, and discard the rest of the packet. 6591 * far, and discard the rest of the packet.
6601 */ 6592 */
6602 ifp->if_ierrors++; 6593 ifp->if_ierrors++;
6603 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 6594 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6604 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 6595 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6605 wm_init_rxdesc(sc, i); 6596 wm_init_rxdesc(rxq, i);
6606 if ((status & WRX_ST_EOP) == 0) 6597 if ((status & WRX_ST_EOP) == 0)
6607 rxq->rxq_discard = 1; 6598 rxq->rxq_discard = 1;
6608 if (rxq->rxq_head != NULL) 6599 if (rxq->rxq_head != NULL)
6609 m_freem(rxq->rxq_head); 6600 m_freem(rxq->rxq_head);
6610 WM_RXCHAIN_RESET(rxq); 6601 WM_RXCHAIN_RESET(rxq);
6611 DPRINTF(WM_DEBUG_RX, 6602 DPRINTF(WM_DEBUG_RX,
6612 ("%s: RX: Rx buffer allocation failed, " 6603 ("%s: RX: Rx buffer allocation failed, "
6613 "dropping packet%s\n", device_xname(sc->sc_dev), 6604 "dropping packet%s\n", device_xname(sc->sc_dev),
6614 rxq->rxq_rxdiscard ? " (discard)" : "")); 6605 rxq->rxq_rxdiscard ? " (discard)" : ""));
6615 continue; 6606 continue;
6616 } 6607 }
6617 6608
6618 m->m_len = len; 6609 m->m_len = len;
@@ -7008,27 +6999,27 @@ wm_intr_legacy(void *arg) @@ -7008,27 +6999,27 @@ wm_intr_legacy(void *arg)
7008 } 6999 }
7009 7000
7010 handled = 1; 7001 handled = 1;
7011 7002
7012#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 7003#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7013 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 7004 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
7014 DPRINTF(WM_DEBUG_RX, 7005 DPRINTF(WM_DEBUG_RX,
7015 ("%s: RX: got Rx intr 0x%08x\n", 7006 ("%s: RX: got Rx intr 0x%08x\n",
7016 device_xname(sc->sc_dev), 7007 device_xname(sc->sc_dev),
7017 icr & (ICR_RXDMT0|ICR_RXT0))); 7008 icr & (ICR_RXDMT0|ICR_RXT0)));
7018 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 7009 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7019 } 7010 }
7020#endif 7011#endif
7021 wm_rxeof(sc); 7012 wm_rxeof(rxq);
7022 7013
7023 WM_RX_UNLOCK(rxq); 7014 WM_RX_UNLOCK(rxq);
7024 WM_TX_LOCK(txq); 7015 WM_TX_LOCK(txq);
7025 7016
7026#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 7017#if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7027 if (icr & ICR_TXDW) { 7018 if (icr & ICR_TXDW) {
7028 DPRINTF(WM_DEBUG_TX, 7019 DPRINTF(WM_DEBUG_TX,
7029 ("%s: TX: got TXDW interrupt\n", 7020 ("%s: TX: got TXDW interrupt\n",
7030 device_xname(sc->sc_dev))); 7021 device_xname(sc->sc_dev)));
7031 WM_EVCNT_INCR(&sc->sc_ev_txdw); 7022 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7032 } 7023 }
7033#endif 7024#endif
7034 wm_txeof(sc); 7025 wm_txeof(sc);
@@ -7128,27 +7119,27 @@ wm_rxintr_msix(void *arg) @@ -7128,27 +7119,27 @@ wm_rxintr_msix(void *arg)
7128 if (sc->sc_type == WM_T_82574) 7119 if (sc->sc_type == WM_T_82574)
7129 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(0)); /* 82574 only */ 7120 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(0)); /* 82574 only */
7130 else if (sc->sc_type == WM_T_82575) 7121 else if (sc->sc_type == WM_T_82575)
7131 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(0)); 7122 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(0));
7132 else 7123 else
7133 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX); 7124 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX);
7134 7125
7135 WM_RX_LOCK(rxq); 7126 WM_RX_LOCK(rxq);
7136 7127
7137 if (sc->sc_stopping) 7128 if (sc->sc_stopping)
7138 goto out; 7129 goto out;
7139 7130
7140 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 7131 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7141 wm_rxeof(sc); 7132 wm_rxeof(rxq);
7142 7133
7143out: 7134out:
7144 WM_RX_UNLOCK(rxq); 7135 WM_RX_UNLOCK(rxq);
7145 7136
7146 if (sc->sc_type == WM_T_82574) 7137 if (sc->sc_type == WM_T_82574)
7147 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(0)); 7138 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(0));
7148 else if (sc->sc_type == WM_T_82575) 7139 else if (sc->sc_type == WM_T_82575)
7149 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(0)); 7140 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(0));
7150 else 7141 else
7151 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX); 7142 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX);
7152 7143
7153 return 1; 7144 return 1;
7154} 7145}