Mon Nov 2 09:21:50 2020 UTC ()
Workaround for ihphy and atphy(ICH*/PCH*, 82580 and I350).

These phys stop DMA while link is down which causes device timeout.
Fix PR/kern 40981

Reviewed and tested by msaitoh@n.o, thanks.

XXX pullup-[89]


(knakahara)
diff -r1.694 -r1.695 src/sys/dev/pci/if_wm.c

cvs diff -r1.694 -r1.695 src/sys/dev/pci/if_wm.c (expand / switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2020/10/30 06:29:47 1.694
+++ src/sys/dev/pci/if_wm.c 2020/11/02 09:21:50 1.695
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_wm.c,v 1.694 2020/10/30 06:29:47 msaitoh Exp $ */ 1/* $NetBSD: if_wm.c,v 1.695 2020/11/02 09:21:50 knakahara Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -72,27 +72,27 @@ @@ -72,27 +72,27 @@
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic) 76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors 77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354 78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function 79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM) 80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM. 81 * - Rework how parameters are loaded from the EEPROM.
82 */ 82 */
83 83
84#include <sys/cdefs.h> 84#include <sys/cdefs.h>
85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.694 2020/10/30 06:29:47 msaitoh Exp $"); 85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.695 2020/11/02 09:21:50 knakahara Exp $");
86 86
87#ifdef _KERNEL_OPT 87#ifdef _KERNEL_OPT
88#include "opt_net_mpsafe.h" 88#include "opt_net_mpsafe.h"
89#include "opt_if_wm.h" 89#include "opt_if_wm.h"
90#endif 90#endif
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/systm.h> 93#include <sys/systm.h>
94#include <sys/callout.h> 94#include <sys/callout.h>
95#include <sys/mbuf.h> 95#include <sys/mbuf.h>
96#include <sys/malloc.h> 96#include <sys/malloc.h>
97#include <sys/kmem.h> 97#include <sys/kmem.h>
98#include <sys/kernel.h> 98#include <sys/kernel.h>
@@ -374,27 +374,28 @@ struct wm_txqueue { @@ -374,27 +374,28 @@ struct wm_txqueue {
374 int txq_fifo_stall; /* Tx FIFO is stalled */ 374 int txq_fifo_stall; /* Tx FIFO is stalled */
375 375
376 /* 376 /*
377 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 377 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
378 * CPUs. This queue intermediate them without block. 378 * CPUs. This queue intermediate them without block.
379 */ 379 */
380 pcq_t *txq_interq; 380 pcq_t *txq_interq;
381 381
382 /* 382 /*
383 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 383 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
384 * to manage Tx H/W queue's busy flag. 384 * to manage Tx H/W queue's busy flag.
385 */ 385 */
386 int txq_flags; /* flags for H/W queue, see below */ 386 int txq_flags; /* flags for H/W queue, see below */
387#define WM_TXQ_NO_SPACE 0x1 387#define WM_TXQ_NO_SPACE 0x1
 388#define WM_TXQ_LINKDOWN_DISCARD 0x2
388 389
389 bool txq_stopping; 390 bool txq_stopping;
390 391
391 bool txq_sending; 392 bool txq_sending;
392 time_t txq_lastsent; 393 time_t txq_lastsent;
393 394
394 /* Checksum flags used for previous packet */ 395 /* Checksum flags used for previous packet */
395 uint32_t txq_last_hw_cmd; 396 uint32_t txq_last_hw_cmd;
396 uint8_t txq_last_hw_fields; 397 uint8_t txq_last_hw_fields;
397 uint16_t txq_last_hw_ipcs; 398 uint16_t txq_last_hw_ipcs;
398 uint16_t txq_last_hw_tucs; 399 uint16_t txq_last_hw_tucs;
399 400
400 uint32_t txq_packets; /* for AIM */ 401 uint32_t txq_packets; /* for AIM */
@@ -1034,26 +1035,29 @@ static int wm_lv_phy_workarounds_ich8lan @@ -1034,26 +1035,29 @@ static int wm_lv_phy_workarounds_ich8lan
1034static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool); 1035static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1035static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 1036static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1036static int wm_k1_workaround_lv(struct wm_softc *); 1037static int wm_k1_workaround_lv(struct wm_softc *);
1037static int wm_link_stall_workaround_hv(struct wm_softc *); 1038static int wm_link_stall_workaround_hv(struct wm_softc *);
1038static int wm_set_mdio_slow_mode_hv(struct wm_softc *); 1039static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1039static void wm_configure_k1_ich8lan(struct wm_softc *, int); 1040static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1040static void wm_reset_init_script_82575(struct wm_softc *); 1041static void wm_reset_init_script_82575(struct wm_softc *);
1041static void wm_reset_mdicnfg_82580(struct wm_softc *); 1042static void wm_reset_mdicnfg_82580(struct wm_softc *);
1042static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 1043static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1043static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 1044static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1044static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 1045static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1045static int wm_pll_workaround_i210(struct wm_softc *); 1046static int wm_pll_workaround_i210(struct wm_softc *);
1046static void wm_legacy_irq_quirk_spt(struct wm_softc *); 1047static void wm_legacy_irq_quirk_spt(struct wm_softc *);
 1048static bool wm_phy_need_linkdown_discard(struct wm_softc *);
 1049static void wm_set_linkdown_discard(struct wm_softc *);
 1050static void wm_clear_linkdown_discard(struct wm_softc *);
1047 1051
1048#ifdef WM_DEBUG 1052#ifdef WM_DEBUG
1049static int wm_sysctl_debug(SYSCTLFN_PROTO); 1053static int wm_sysctl_debug(SYSCTLFN_PROTO);
1050#endif 1054#endif
1051 1055
1052CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 1056CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1053 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 1057 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1054 1058
1055/* 1059/*
1056 * Devices supported by this driver. 1060 * Devices supported by this driver.
1057 */ 1061 */
1058static const struct wm_product { 1062static const struct wm_product {
1059 pci_vendor_id_t wmp_vendor; 1063 pci_vendor_id_t wmp_vendor;
@@ -3090,26 +3094,29 @@ alloc_retry: @@ -3090,26 +3094,29 @@ alloc_retry:
3090 NULL, xname, "tx_xoff"); 3094 NULL, xname, "tx_xoff");
3091 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 3095 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3092 NULL, xname, "tx_xon"); 3096 NULL, xname, "tx_xon");
3093 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 3097 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3094 NULL, xname, "rx_xoff"); 3098 NULL, xname, "rx_xoff");
3095 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 3099 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3096 NULL, xname, "rx_xon"); 3100 NULL, xname, "rx_xon");
3097 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 3101 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3098 NULL, xname, "rx_macctl"); 3102 NULL, xname, "rx_macctl");
3099#endif /* WM_EVENT_COUNTERS */ 3103#endif /* WM_EVENT_COUNTERS */
3100 3104
3101 sc->sc_txrx_use_workqueue = false; 3105 sc->sc_txrx_use_workqueue = false;
3102 3106
 3107 if (wm_phy_need_linkdown_discard(sc))
 3108 wm_set_linkdown_discard(sc);
 3109
3103 wm_init_sysctls(sc); 3110 wm_init_sysctls(sc);
3104 3111
3105 if (pmf_device_register(self, wm_suspend, wm_resume)) 3112 if (pmf_device_register(self, wm_suspend, wm_resume))
3106 pmf_class_network_register(self, ifp); 3113 pmf_class_network_register(self, ifp);
3107 else 3114 else
3108 aprint_error_dev(self, "couldn't establish power handler\n"); 3115 aprint_error_dev(self, "couldn't establish power handler\n");
3109 3116
3110 sc->sc_flags |= WM_F_ATTACHED; 3117 sc->sc_flags |= WM_F_ATTACHED;
3111out: 3118out:
3112 return; 3119 return;
3113} 3120}
3114 3121
3115/* The detach function (ca_detach) */ 3122/* The detach function (ca_detach) */
@@ -3473,26 +3480,69 @@ ec: @@ -3473,26 +3480,69 @@ ec:
3473 } 3480 }
3474 3481
3475 /* ec related updates */ 3482 /* ec related updates */
3476 wm_set_eee(sc); 3483 wm_set_eee(sc);
3477 3484
3478out: 3485out:
3479 if (needreset) 3486 if (needreset)
3480 rc = ENETRESET; 3487 rc = ENETRESET;
3481 WM_CORE_UNLOCK(sc); 3488 WM_CORE_UNLOCK(sc);
3482 3489
3483 return rc; 3490 return rc;
3484} 3491}
3485 3492
 3493static bool
 3494wm_phy_need_linkdown_discard(struct wm_softc *sc)
 3495{
 3496
 3497 switch(sc->sc_phytype) {
 3498 case WMPHY_82577: /* ihphy */
 3499 case WMPHY_82578: /* atphy */
 3500 case WMPHY_82579: /* ihphy */
 3501 case WMPHY_I217: /* ihphy */
 3502 case WMPHY_82580: /* ihphy */
 3503 case WMPHY_I350: /* ihphy */
 3504 return true;
 3505 default:
 3506 return false;
 3507 }
 3508}
 3509
 3510static void
 3511wm_set_linkdown_discard(struct wm_softc *sc)
 3512{
 3513
 3514 for (int i = 0; i < sc->sc_nqueues; i++) {
 3515 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
 3516
 3517 mutex_enter(txq->txq_lock);
 3518 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
 3519 mutex_exit(txq->txq_lock);
 3520 }
 3521}
 3522
 3523static void
 3524wm_clear_linkdown_discard(struct wm_softc *sc)
 3525{
 3526
 3527 for (int i = 0; i < sc->sc_nqueues; i++) {
 3528 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
 3529
 3530 mutex_enter(txq->txq_lock);
 3531 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
 3532 mutex_exit(txq->txq_lock);
 3533 }
 3534}
 3535
3486/* 3536/*
3487 * wm_ioctl: [ifnet interface function] 3537 * wm_ioctl: [ifnet interface function]
3488 * 3538 *
3489 * Handle control requests from the operator. 3539 * Handle control requests from the operator.
3490 */ 3540 */
3491static int 3541static int
3492wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3542wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3493{ 3543{
3494 struct wm_softc *sc = ifp->if_softc; 3544 struct wm_softc *sc = ifp->if_softc;
3495 struct ifreq *ifr = (struct ifreq *)data; 3545 struct ifreq *ifr = (struct ifreq *)data;
3496 struct ifaddr *ifa = (struct ifaddr *)data; 3546 struct ifaddr *ifa = (struct ifaddr *)data;
3497 struct sockaddr_dl *sdl; 3547 struct sockaddr_dl *sdl;
3498 int s, error; 3548 int s, error;
@@ -3510,42 +3560,57 @@ wm_ioctl(struct ifnet *ifp, u_long cmd,  @@ -3510,42 +3560,57 @@ wm_ioctl(struct ifnet *ifp, u_long cmd,
3510 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3560 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3511 (ifr->ifr_media & IFM_FDX) == 0) 3561 (ifr->ifr_media & IFM_FDX) == 0)
3512 ifr->ifr_media &= ~IFM_ETH_FMASK; 3562 ifr->ifr_media &= ~IFM_ETH_FMASK;
3513 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3563 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3514 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3564 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3515 /* We can do both TXPAUSE and RXPAUSE. */ 3565 /* We can do both TXPAUSE and RXPAUSE. */
3516 ifr->ifr_media |= 3566 ifr->ifr_media |=
3517 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3567 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3518 } 3568 }
3519 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3569 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3520 } 3570 }
3521 WM_CORE_UNLOCK(sc); 3571 WM_CORE_UNLOCK(sc);
3522 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 3572 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
 3573 if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
 3574 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
 3575 wm_set_linkdown_discard(sc);
 3576 else
 3577 wm_clear_linkdown_discard(sc);
 3578 }
3523 break; 3579 break;
3524 case SIOCINITIFADDR: 3580 case SIOCINITIFADDR:
3525 WM_CORE_LOCK(sc); 3581 WM_CORE_LOCK(sc);
3526 if (ifa->ifa_addr->sa_family == AF_LINK) { 3582 if (ifa->ifa_addr->sa_family == AF_LINK) {
3527 sdl = satosdl(ifp->if_dl->ifa_addr); 3583 sdl = satosdl(ifp->if_dl->ifa_addr);
3528 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, 3584 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3529 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); 3585 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3530 /* Unicast address is the first multicast entry */ 3586 /* Unicast address is the first multicast entry */
3531 wm_set_filter(sc); 3587 wm_set_filter(sc);
3532 error = 0; 3588 error = 0;
3533 WM_CORE_UNLOCK(sc); 3589 WM_CORE_UNLOCK(sc);
3534 break; 3590 break;
3535 } 3591 }
3536 WM_CORE_UNLOCK(sc); 3592 WM_CORE_UNLOCK(sc);
 3593 if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
 3594 wm_clear_linkdown_discard(sc);
3537 /*FALLTHROUGH*/ 3595 /*FALLTHROUGH*/
3538 default: 3596 default:
 3597 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
 3598 if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
 3599 wm_clear_linkdown_discard(sc);
 3600 } else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
 3601 wm_set_linkdown_discard(sc);
 3602 }
 3603 }
3539#ifdef WM_MPSAFE 3604#ifdef WM_MPSAFE
3540 s = splnet(); 3605 s = splnet();
3541#endif 3606#endif
3542 /* It may call wm_start, so unlock here */ 3607 /* It may call wm_start, so unlock here */
3543 error = ether_ioctl(ifp, cmd, data); 3608 error = ether_ioctl(ifp, cmd, data);
3544#ifdef WM_MPSAFE 3609#ifdef WM_MPSAFE
3545 splx(s); 3610 splx(s);
3546#endif 3611#endif
3547 if (error != ENETRESET) 3612 if (error != ENETRESET)
3548 break; 3613 break;
3549 3614
3550 error = 0; 3615 error = 0;
3551 3616
@@ -7664,26 +7729,36 @@ static inline int @@ -7664,26 +7729,36 @@ static inline int
7664wm_select_txqueue(struct ifnet *ifp, struct mbuf *m) 7729wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7665{ 7730{
7666 struct wm_softc *sc = ifp->if_softc; 7731 struct wm_softc *sc = ifp->if_softc;
7667 u_int cpuid = cpu_index(curcpu()); 7732 u_int cpuid = cpu_index(curcpu());
7668 7733
7669 /* 7734 /*
7670 * Currently, simple distribute strategy. 7735 * Currently, simple distribute strategy.
7671 * TODO: 7736 * TODO:
7672 * distribute by flowid(RSS has value). 7737 * distribute by flowid(RSS has value).
7673 */ 7738 */
7674 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues; 7739 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7675} 7740}
7676 7741
 7742static inline bool
 7743wm_linkdown_discard(struct wm_txqueue *txq)
 7744{
 7745
 7746 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
 7747 return true;
 7748
 7749 return false;
 7750}
 7751
7677/* 7752/*
7678 * wm_start: [ifnet interface function] 7753 * wm_start: [ifnet interface function]
7679 * 7754 *
7680 * Start packet transmission on the interface. 7755 * Start packet transmission on the interface.
7681 */ 7756 */
7682static void 7757static void
7683wm_start(struct ifnet *ifp) 7758wm_start(struct ifnet *ifp)
7684{ 7759{
7685 struct wm_softc *sc = ifp->if_softc; 7760 struct wm_softc *sc = ifp->if_softc;
7686 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7761 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7687 7762
7688#ifdef WM_MPSAFE 7763#ifdef WM_MPSAFE
7689 KASSERT(if_is_mpsafe(ifp)); 7764 KASSERT(if_is_mpsafe(ifp));
@@ -7757,26 +7832,43 @@ wm_send_common_locked(struct ifnet *ifp, @@ -7757,26 +7832,43 @@ wm_send_common_locked(struct ifnet *ifp,
7757 bus_addr_t curaddr; 7832 bus_addr_t curaddr;
7758 bus_size_t seglen, curlen; 7833 bus_size_t seglen, curlen;
7759 uint32_t cksumcmd; 7834 uint32_t cksumcmd;
7760 uint8_t cksumfields; 7835 uint8_t cksumfields;
7761 bool remap = true; 7836 bool remap = true;
7762 7837
7763 KASSERT(mutex_owned(txq->txq_lock)); 7838 KASSERT(mutex_owned(txq->txq_lock));
7764 7839
7765 if ((ifp->if_flags & IFF_RUNNING) == 0) 7840 if ((ifp->if_flags & IFF_RUNNING) == 0)
7766 return; 7841 return;
7767 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0) 7842 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7768 return; 7843 return;
7769 7844
 7845 if (__predict_false(wm_linkdown_discard(txq))) {
 7846 do {
 7847 if (is_transmit)
 7848 m0 = pcq_get(txq->txq_interq);
 7849 else
 7850 IFQ_DEQUEUE(&ifp->if_snd, m0);
 7851 /*
 7852 * increment successed packet counter as in the case
 7853 * which the packet is discarded by link down PHY.
 7854 */
 7855 if (m0 != NULL)
 7856 if_statinc(ifp, if_opackets);
 7857 m_freem(m0);
 7858 } while (m0 != NULL);
 7859 return;
 7860 }
 7861
7770 /* Remember the previous number of free descriptors. */ 7862 /* Remember the previous number of free descriptors. */
7771 ofree = txq->txq_free; 7863 ofree = txq->txq_free;
7772 7864
7773 /* 7865 /*
7774 * Loop through the send queue, setting up transmit descriptors 7866 * Loop through the send queue, setting up transmit descriptors
7775 * until we drain the queue, or use up all available transmit 7867 * until we drain the queue, or use up all available transmit
7776 * descriptors. 7868 * descriptors.
7777 */ 7869 */
7778 for (;;) { 7870 for (;;) {
7779 m0 = NULL; 7871 m0 = NULL;
7780 7872
7781 /* Get a work queue entry. */ 7873 /* Get a work queue entry. */
7782 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { 7874 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
@@ -8357,26 +8449,43 @@ wm_nq_send_common_locked(struct ifnet *i @@ -8357,26 +8449,43 @@ wm_nq_send_common_locked(struct ifnet *i
8357 struct wm_txsoft *txs; 8449 struct wm_txsoft *txs;
8358 bus_dmamap_t dmamap; 8450 bus_dmamap_t dmamap;
8359 int error, nexttx, lasttx = -1, seg, segs_needed; 8451 int error, nexttx, lasttx = -1, seg, segs_needed;
8360 bool do_csum, sent; 8452 bool do_csum, sent;
8361 bool remap = true; 8453 bool remap = true;
8362 8454
8363 KASSERT(mutex_owned(txq->txq_lock)); 8455 KASSERT(mutex_owned(txq->txq_lock));
8364 8456
8365 if ((ifp->if_flags & IFF_RUNNING) == 0) 8457 if ((ifp->if_flags & IFF_RUNNING) == 0)
8366 return; 8458 return;
8367 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0) 8459 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8368 return; 8460 return;
8369 8461
 8462 if (__predict_false(wm_linkdown_discard(txq))) {
 8463 do {
 8464 if (is_transmit)
 8465 m0 = pcq_get(txq->txq_interq);
 8466 else
 8467 IFQ_DEQUEUE(&ifp->if_snd, m0);
 8468 /*
 8469 * increment successed packet counter as in the case
 8470 * which the packet is discarded by link down PHY.
 8471 */
 8472 if (m0 != NULL)
 8473 if_statinc(ifp, if_opackets);
 8474 m_freem(m0);
 8475 } while (m0 != NULL);
 8476 return;
 8477 }
 8478
8370 sent = false; 8479 sent = false;
8371 8480
8372 /* 8481 /*
8373 * Loop through the send queue, setting up transmit descriptors 8482 * Loop through the send queue, setting up transmit descriptors
8374 * until we drain the queue, or use up all available transmit 8483 * until we drain the queue, or use up all available transmit
8375 * descriptors. 8484 * descriptors.
8376 */ 8485 */
8377 for (;;) { 8486 for (;;) {
8378 m0 = NULL; 8487 m0 = NULL;
8379 8488
8380 /* Get a work queue entry. */ 8489 /* Get a work queue entry. */
8381 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { 8490 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8382 wm_txeof(txq, UINT_MAX); 8491 wm_txeof(txq, UINT_MAX);
@@ -9224,29 +9333,33 @@ wm_linkintr_gmii(struct wm_softc *sc, ui @@ -9224,29 +9333,33 @@ wm_linkintr_gmii(struct wm_softc *sc, ui
9224 DPRINTF(sc, WM_DEBUG_LINK, 9333 DPRINTF(sc, WM_DEBUG_LINK,
9225 ("%s: LINK Receive sequence error\n", 9334 ("%s: LINK Receive sequence error\n",
9226 device_xname(dev))); 9335 device_xname(dev)));
9227 return; 9336 return;
9228 } 9337 }
9229 9338
9230 /* Link status changed */ 9339 /* Link status changed */
9231 status = CSR_READ(sc, WMREG_STATUS); 9340 status = CSR_READ(sc, WMREG_STATUS);
9232 link = status & STATUS_LU; 9341 link = status & STATUS_LU;
9233 if (link) { 9342 if (link) {
9234 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 9343 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9235 device_xname(dev), 9344 device_xname(dev),
9236 (status & STATUS_FD) ? "FDX" : "HDX")); 9345 (status & STATUS_FD) ? "FDX" : "HDX"));
 9346 if (wm_phy_need_linkdown_discard(sc))
 9347 wm_clear_linkdown_discard(sc);
9237 } else { 9348 } else {
9238 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 9349 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9239 device_xname(dev))); 9350 device_xname(dev)));
 9351 if (wm_phy_need_linkdown_discard(sc))
 9352 wm_set_linkdown_discard(sc);
9240 } 9353 }
9241 if ((sc->sc_type == WM_T_ICH8) && (link == false)) 9354 if ((sc->sc_type == WM_T_ICH8) && (link == false))
9242 wm_gig_downshift_workaround_ich8lan(sc); 9355 wm_gig_downshift_workaround_ich8lan(sc);
9243 9356
9244 if ((sc->sc_type == WM_T_ICH8) 9357 if ((sc->sc_type == WM_T_ICH8)
9245 && (sc->sc_phytype == WMPHY_IGP_3)) { 9358 && (sc->sc_phytype == WMPHY_IGP_3)) {
9246 wm_kmrn_lock_loss_workaround_ich8lan(sc); 9359 wm_kmrn_lock_loss_workaround_ich8lan(sc);
9247 } 9360 }
9248 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n", 9361 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9249 device_xname(dev))); 9362 device_xname(dev)));
9250 mii_pollstat(&sc->sc_mii); 9363 mii_pollstat(&sc->sc_mii);
9251 if (sc->sc_type == WM_T_82543) { 9364 if (sc->sc_type == WM_T_82543) {
9252 int miistatus, active; 9365 int miistatus, active;