Wed Jan 29 06:44:27 2020 UTC ()
Adopt <net/if_stats.h>.


(thorpej)
diff -r1.662 -r1.663 src/sys/dev/pci/if_wm.c

cvs diff -r1.662 -r1.663 src/sys/dev/pci/if_wm.c (expand / switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2020/01/24 02:50:41 1.662
+++ src/sys/dev/pci/if_wm.c 2020/01/29 06:44:27 1.663
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_wm.c,v 1.662 2020/01/24 02:50:41 knakahara Exp $ */ 1/* $NetBSD: if_wm.c,v 1.663 2020/01/29 06:44:27 thorpej Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -72,27 +72,27 @@ @@ -72,27 +72,27 @@
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic) 76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors 77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354 78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function 79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM) 80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM. 81 * - Rework how parameters are loaded from the EEPROM.
82 */ 82 */
83 83
84#include <sys/cdefs.h> 84#include <sys/cdefs.h>
85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.662 2020/01/24 02:50:41 knakahara Exp $"); 85__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.663 2020/01/29 06:44:27 thorpej Exp $");
86 86
87#ifdef _KERNEL_OPT 87#ifdef _KERNEL_OPT
88#include "opt_net_mpsafe.h" 88#include "opt_net_mpsafe.h"
89#include "opt_if_wm.h" 89#include "opt_if_wm.h"
90#endif 90#endif
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/systm.h> 93#include <sys/systm.h>
94#include <sys/callout.h> 94#include <sys/callout.h>
95#include <sys/mbuf.h> 95#include <sys/mbuf.h>
96#include <sys/malloc.h> 96#include <sys/malloc.h>
97#include <sys/kmem.h> 97#include <sys/kmem.h>
98#include <sys/kernel.h> 98#include <sys/kernel.h>
@@ -3256,27 +3256,27 @@ wm_watchdog_txq_locked(struct ifnet *ifp @@ -3256,27 +3256,27 @@ wm_watchdog_txq_locked(struct ifnet *ifp
3256 3256
3257 if (txq->txq_free == WM_NTXDESC(txq)) { 3257 if (txq->txq_free == WM_NTXDESC(txq)) {
3258 log(LOG_ERR, "%s: device timeout (lost interrupt)\n", 3258 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3259 device_xname(sc->sc_dev)); 3259 device_xname(sc->sc_dev));
3260 } else { 3260 } else {
3261#ifdef WM_DEBUG 3261#ifdef WM_DEBUG
3262 int i, j; 3262 int i, j;
3263 struct wm_txsoft *txs; 3263 struct wm_txsoft *txs;
3264#endif 3264#endif
3265 log(LOG_ERR, 3265 log(LOG_ERR,
3266 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 3266 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3267 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree, 3267 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3268 txq->txq_next); 3268 txq->txq_next);
3269 ifp->if_oerrors++; 3269 if_statinc(ifp, if_oerrors);
3270#ifdef WM_DEBUG 3270#ifdef WM_DEBUG
3271 for (i = txq->txq_sdirty; i != txq->txq_snext; 3271 for (i = txq->txq_sdirty; i != txq->txq_snext;
3272 i = WM_NEXTTXS(txq, i)) { 3272 i = WM_NEXTTXS(txq, i)) {
3273 txs = &txq->txq_soft[i]; 3273 txs = &txq->txq_soft[i];
3274 printf("txs %d tx %d -> %d\n", 3274 printf("txs %d tx %d -> %d\n",
3275 i, txs->txs_firstdesc, txs->txs_lastdesc); 3275 i, txs->txs_firstdesc, txs->txs_lastdesc);
3276 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) { 3276 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3277 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 3277 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3278 printf("\tdesc %d: 0x%" PRIx64 "\n", j, 3278 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3279 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr); 3279 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3280 printf("\t %#08x%08x\n", 3280 printf("\t %#08x%08x\n",
3281 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields, 3281 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3282 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen); 3282 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
@@ -3321,45 +3321,47 @@ wm_tick(void *arg) @@ -3321,45 +3321,47 @@ wm_tick(void *arg)
3321 splx(s); 3321 splx(s);
3322#endif 3322#endif
3323 return; 3323 return;
3324 } 3324 }
3325 3325
3326 if (sc->sc_type >= WM_T_82542_2_1) { 3326 if (sc->sc_type >= WM_T_82542_2_1) {
3327 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 3327 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3328 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 3328 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3329 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 3329 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3330 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 3330 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3331 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 3331 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3332 } 3332 }
3333 3333
3334 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 3334 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3335 ifp->if_ierrors += 0ULL /* ensure quad_t */ 3335 if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
 3336 if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
3336 + CSR_READ(sc, WMREG_CRCERRS) 3337 + CSR_READ(sc, WMREG_CRCERRS)
3337 + CSR_READ(sc, WMREG_ALGNERRC) 3338 + CSR_READ(sc, WMREG_ALGNERRC)
3338 + CSR_READ(sc, WMREG_SYMERRC) 3339 + CSR_READ(sc, WMREG_SYMERRC)
3339 + CSR_READ(sc, WMREG_RXERRC) 3340 + CSR_READ(sc, WMREG_RXERRC)
3340 + CSR_READ(sc, WMREG_SEC) 3341 + CSR_READ(sc, WMREG_SEC)
3341 + CSR_READ(sc, WMREG_CEXTERR) 3342 + CSR_READ(sc, WMREG_CEXTERR)
3342 + CSR_READ(sc, WMREG_RLEC); 3343 + CSR_READ(sc, WMREG_RLEC));
3343 /* 3344 /*
3344 * WMREG_RNBC is incremented when there is no available buffers in host 3345 * WMREG_RNBC is incremented when there is no available buffers in host
3345 * memory. It does not mean the number of dropped packet. Because 3346 * memory. It does not mean the number of dropped packet. Because
3346 * ethernet controller can receive packets in such case if there is 3347 * ethernet controller can receive packets in such case if there is
3347 * space in phy's FIFO. 3348 * space in phy's FIFO.
3348 * 3349 *
3349 * If you want to know the nubmer of WMREG_RMBC, you should use such as 3350 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3350 * own EVCNT instead of if_iqdrops. 3351 * own EVCNT instead of if_iqdrops.
3351 */ 3352 */
3352 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC); 3353 if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
 3354 IF_STAT_PUTREF(ifp);
3353 3355
3354 if (sc->sc_flags & WM_F_HAS_MII) 3356 if (sc->sc_flags & WM_F_HAS_MII)
3355 mii_tick(&sc->sc_mii); 3357 mii_tick(&sc->sc_mii);
3356 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211) 3358 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3357 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) 3359 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3358 wm_serdes_tick(sc); 3360 wm_serdes_tick(sc);
3359 else 3361 else
3360 wm_tbi_tick(sc); 3362 wm_tbi_tick(sc);
3361 3363
3362 WM_CORE_UNLOCK(sc); 3364 WM_CORE_UNLOCK(sc);
3363 3365
3364 wm_watchdog(ifp); 3366 wm_watchdog(ifp);
3365 3367
@@ -5837,28 +5839,28 @@ wm_init_locked(struct ifnet *ifp) @@ -5837,28 +5839,28 @@ wm_init_locked(struct ifnet *ifp)
5837#ifdef __NO_STRICT_ALIGNMENT 5839#ifdef __NO_STRICT_ALIGNMENT
5838 sc->sc_align_tweak = 0; 5840 sc->sc_align_tweak = 0;
5839#else 5841#else
5840 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 5842 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
5841 sc->sc_align_tweak = 0; 5843 sc->sc_align_tweak = 0;
5842 else 5844 else
5843 sc->sc_align_tweak = 2; 5845 sc->sc_align_tweak = 2;
5844#endif /* __NO_STRICT_ALIGNMENT */ 5846#endif /* __NO_STRICT_ALIGNMENT */
5845 5847
5846 /* Cancel any pending I/O. */ 5848 /* Cancel any pending I/O. */
5847 wm_stop_locked(ifp, 0); 5849 wm_stop_locked(ifp, 0);
5848 5850
5849 /* Update statistics before reset */ 5851 /* Update statistics before reset */
5850 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 5852 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
5851 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 5853 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
5852 5854
5853 /* PCH_SPT hardware workaround */ 5855 /* PCH_SPT hardware workaround */
5854 if (sc->sc_type == WM_T_PCH_SPT) 5856 if (sc->sc_type == WM_T_PCH_SPT)
5855 wm_flush_desc_rings(sc); 5857 wm_flush_desc_rings(sc);
5856 5858
5857 /* Reset the chip to a known state. */ 5859 /* Reset the chip to a known state. */
5858 wm_reset(sc); 5860 wm_reset(sc);
5859 5861
5860 /* 5862 /*
5861 * AMT based hardware can now take control from firmware 5863 * AMT based hardware can now take control from firmware
5862 * Do this after reset. 5864 * Do this after reset.
5863 */ 5865 */
5864 if ((sc->sc_flags & WM_F_HAS_AMT) != 0) 5866 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
@@ -7552,27 +7554,27 @@ wm_select_txqueue(struct ifnet *ifp, str @@ -7552,27 +7554,27 @@ wm_select_txqueue(struct ifnet *ifp, str
7552 * 7554 *
7553 * Start packet transmission on the interface. 7555 * Start packet transmission on the interface.
7554 */ 7556 */
7555static void 7557static void
7556wm_start(struct ifnet *ifp) 7558wm_start(struct ifnet *ifp)
7557{ 7559{
7558 struct wm_softc *sc = ifp->if_softc; 7560 struct wm_softc *sc = ifp->if_softc;
7559 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7561 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7560 7562
7561#ifdef WM_MPSAFE 7563#ifdef WM_MPSAFE
7562 KASSERT(if_is_mpsafe(ifp)); 7564 KASSERT(if_is_mpsafe(ifp));
7563#endif 7565#endif
7564 /* 7566 /*
7565 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. 7567 * if_obytes and if_omcasts are added in if_transmit()@if.c.
7566 */ 7568 */
7567 7569
7568 mutex_enter(txq->txq_lock); 7570 mutex_enter(txq->txq_lock);
7569 if (!txq->txq_stopping) 7571 if (!txq->txq_stopping)
7570 wm_start_locked(ifp); 7572 wm_start_locked(ifp);
7571 mutex_exit(txq->txq_lock); 7573 mutex_exit(txq->txq_lock);
7572} 7574}
7573 7575
7574static void 7576static void
7575wm_start_locked(struct ifnet *ifp) 7577wm_start_locked(struct ifnet *ifp)
7576{ 7578{
7577 struct wm_softc *sc = ifp->if_softc; 7579 struct wm_softc *sc = ifp->if_softc;
7578 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7580 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
@@ -7586,30 +7588,31 @@ wm_transmit(struct ifnet *ifp, struct mb @@ -7586,30 +7588,31 @@ wm_transmit(struct ifnet *ifp, struct mb
7586 int qid; 7588 int qid;
7587 struct wm_softc *sc = ifp->if_softc; 7589 struct wm_softc *sc = ifp->if_softc;
7588 struct wm_txqueue *txq; 7590 struct wm_txqueue *txq;
7589 7591
7590 qid = wm_select_txqueue(ifp, m); 7592 qid = wm_select_txqueue(ifp, m);
7591 txq = &sc->sc_queue[qid].wmq_txq; 7593 txq = &sc->sc_queue[qid].wmq_txq;
7592 7594
7593 if (__predict_false(!pcq_put(txq->txq_interq, m))) { 7595 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7594 m_freem(m); 7596 m_freem(m);
7595 WM_Q_EVCNT_INCR(txq, pcqdrop); 7597 WM_Q_EVCNT_INCR(txq, pcqdrop);
7596 return ENOBUFS; 7598 return ENOBUFS;
7597 } 7599 }
7598 7600
7599 /* XXX NOMPSAFE: ifp->if_data should be percpu. */ 7601 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
7600 ifp->if_obytes += m->m_pkthdr.len; 7602 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
7601 if (m->m_flags & M_MCAST) 7603 if (m->m_flags & M_MCAST)
7602 ifp->if_omcasts++; 7604 if_statinc_ref(nsr, if_omcasts);
 7605 IF_STAT_PUTREF(ifp);
7603 7606
7604 if (mutex_tryenter(txq->txq_lock)) { 7607 if (mutex_tryenter(txq->txq_lock)) {
7605 if (!txq->txq_stopping) 7608 if (!txq->txq_stopping)
7606 wm_transmit_locked(ifp, txq); 7609 wm_transmit_locked(ifp, txq);
7607 mutex_exit(txq->txq_lock); 7610 mutex_exit(txq->txq_lock);
7608 } 7611 }
7609 7612
7610 return 0; 7613 return 0;
7611} 7614}
7612 7615
7613static void 7616static void
7614wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq) 7617wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7615{ 7618{
@@ -8157,27 +8160,27 @@ wm_nq_tx_offload(struct wm_softc *sc, st @@ -8157,27 +8160,27 @@ wm_nq_tx_offload(struct wm_softc *sc, st
8157 * 8160 *
8158 * Start packet transmission on the interface for NEWQUEUE devices 8161 * Start packet transmission on the interface for NEWQUEUE devices
8159 */ 8162 */
8160static void 8163static void
8161wm_nq_start(struct ifnet *ifp) 8164wm_nq_start(struct ifnet *ifp)
8162{ 8165{
8163 struct wm_softc *sc = ifp->if_softc; 8166 struct wm_softc *sc = ifp->if_softc;
8164 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 8167 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8165 8168
8166#ifdef WM_MPSAFE 8169#ifdef WM_MPSAFE
8167 KASSERT(if_is_mpsafe(ifp)); 8170 KASSERT(if_is_mpsafe(ifp));
8168#endif 8171#endif
8169 /* 8172 /*
8170 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. 8173 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8171 */ 8174 */
8172 8175
8173 mutex_enter(txq->txq_lock); 8176 mutex_enter(txq->txq_lock);
8174 if (!txq->txq_stopping) 8177 if (!txq->txq_stopping)
8175 wm_nq_start_locked(ifp); 8178 wm_nq_start_locked(ifp);
8176 mutex_exit(txq->txq_lock); 8179 mutex_exit(txq->txq_lock);
8177} 8180}
8178 8181
8179static void 8182static void
8180wm_nq_start_locked(struct ifnet *ifp) 8183wm_nq_start_locked(struct ifnet *ifp)
8181{ 8184{
8182 struct wm_softc *sc = ifp->if_softc; 8185 struct wm_softc *sc = ifp->if_softc;
8183 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 8186 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
@@ -8191,30 +8194,31 @@ wm_nq_transmit(struct ifnet *ifp, struct @@ -8191,30 +8194,31 @@ wm_nq_transmit(struct ifnet *ifp, struct
8191 int qid; 8194 int qid;
8192 struct wm_softc *sc = ifp->if_softc; 8195 struct wm_softc *sc = ifp->if_softc;
8193 struct wm_txqueue *txq; 8196 struct wm_txqueue *txq;
8194 8197
8195 qid = wm_select_txqueue(ifp, m); 8198 qid = wm_select_txqueue(ifp, m);
8196 txq = &sc->sc_queue[qid].wmq_txq; 8199 txq = &sc->sc_queue[qid].wmq_txq;
8197 8200
8198 if (__predict_false(!pcq_put(txq->txq_interq, m))) { 8201 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8199 m_freem(m); 8202 m_freem(m);
8200 WM_Q_EVCNT_INCR(txq, pcqdrop); 8203 WM_Q_EVCNT_INCR(txq, pcqdrop);
8201 return ENOBUFS; 8204 return ENOBUFS;
8202 } 8205 }
8203 8206
8204 /* XXX NOMPSAFE: ifp->if_data should be percpu. */ 8207 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8205 ifp->if_obytes += m->m_pkthdr.len; 8208 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8206 if (m->m_flags & M_MCAST) 8209 if (m->m_flags & M_MCAST)
8207 ifp->if_omcasts++; 8210 if_statinc_ref(nsr, if_omcasts);
 8211 IF_STAT_PUTREF(ifp);
8208 8212
8209 /* 8213 /*
8210 * The situations which this mutex_tryenter() fails at running time 8214 * The situations which this mutex_tryenter() fails at running time
8211 * are below two patterns. 8215 * are below two patterns.
8212 * (1) contention with interrupt handler(wm_txrxintr_msix()) 8216 * (1) contention with interrupt handler(wm_txrxintr_msix())
8213 * (2) contention with deferred if_start softint(wm_handle_queue()) 8217 * (2) contention with deferred if_start softint(wm_handle_queue())
8214 * In the case of (1), the last packet enqueued to txq->txq_interq is 8218 * In the case of (1), the last packet enqueued to txq->txq_interq is
8215 * dequeued by wm_deferred_start_locked(). So, it does not get stuck. 8219 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8216 * In the case of (2), the last packet enqueued to txq->txq_interq is 8220 * In the case of (2), the last packet enqueued to txq->txq_interq is
8217 * also dequeued by wm_deferred_start_locked(). So, it does not get 8221 * also dequeued by wm_deferred_start_locked(). So, it does not get
8218 * stuck, either. 8222 * stuck, either.
8219 */ 8223 */
8220 if (mutex_tryenter(txq->txq_lock)) { 8224 if (mutex_tryenter(txq->txq_lock)) {
@@ -8639,38 +8643,38 @@ wm_txeof(struct wm_txqueue *txq, u_int l @@ -8639,38 +8643,38 @@ wm_txeof(struct wm_txqueue *txq, u_int l
8639 * 82574 and newer's document says the status field has neither 8643 * 82574 and newer's document says the status field has neither
8640 * EC (Excessive Collision) bit nor LC (Late Collision) bit 8644 * EC (Excessive Collision) bit nor LC (Late Collision) bit
8641 * (reserved). Refer "PCIe GbE Controller Open Source Software 8645 * (reserved). Refer "PCIe GbE Controller Open Source Software
8642 * Developer's Manual", 82574 datasheet and newer. 8646 * Developer's Manual", 82574 datasheet and newer.
8643 * 8647 *
8644 * XXX I saw the LC bit was set on I218 even though the media 8648 * XXX I saw the LC bit was set on I218 even though the media
8645 * was full duplex, so the bit might be used for other 8649 * was full duplex, so the bit might be used for other
8646 * meaning ...(I have no document). 8650 * meaning ...(I have no document).
8647 */ 8651 */
8648 8652
8649 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0) 8653 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
8650 && ((sc->sc_type < WM_T_82574) 8654 && ((sc->sc_type < WM_T_82574)
8651 || (sc->sc_type == WM_T_80003))) { 8655 || (sc->sc_type == WM_T_80003))) {
8652 ifp->if_oerrors++; 8656 if_statinc(ifp, if_oerrors);
8653 if (status & WTX_ST_LC) 8657 if (status & WTX_ST_LC)
8654 log(LOG_WARNING, "%s: late collision\n", 8658 log(LOG_WARNING, "%s: late collision\n",
8655 device_xname(sc->sc_dev)); 8659 device_xname(sc->sc_dev));
8656 else if (status & WTX_ST_EC) { 8660 else if (status & WTX_ST_EC) {
8657 ifp->if_collisions += 8661 if_statadd(ifp, if_collisions,
8658 TX_COLLISION_THRESHOLD + 1; 8662 TX_COLLISION_THRESHOLD + 1);
8659 log(LOG_WARNING, "%s: excessive collisions\n", 8663 log(LOG_WARNING, "%s: excessive collisions\n",
8660 device_xname(sc->sc_dev)); 8664 device_xname(sc->sc_dev));
8661 } 8665 }
8662 } else 8666 } else
8663 ifp->if_opackets++; 8667 if_statinc(ifp, if_opackets);
8664 8668
8665 txq->txq_packets++; 8669 txq->txq_packets++;
8666 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len; 8670 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8667 8671
8668 txq->txq_free += txs->txs_ndesc; 8672 txq->txq_free += txs->txs_ndesc;
8669 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 8673 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8670 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 8674 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8671 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 8675 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8672 m_freem(txs->txs_mbuf); 8676 m_freem(txs->txs_mbuf);
8673 txs->txs_mbuf = NULL; 8677 txs->txs_mbuf = NULL;
8674 } 8678 }
8675 8679
8676 /* Update the dirty transmit buffer pointer. */ 8680 /* Update the dirty transmit buffer pointer. */
@@ -8972,27 +8976,27 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l @@ -8972,27 +8976,27 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l
8972 8976
8973 m = rxs->rxs_mbuf; 8977 m = rxs->rxs_mbuf;
8974 8978
8975 /* 8979 /*
8976 * Add a new receive buffer to the ring, unless of 8980 * Add a new receive buffer to the ring, unless of
8977 * course the length is zero. Treat the latter as a 8981 * course the length is zero. Treat the latter as a
8978 * failed mapping. 8982 * failed mapping.
8979 */ 8983 */
8980 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) { 8984 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
8981 /* 8985 /*
8982 * Failed, throw away what we've done so 8986 * Failed, throw away what we've done so
8983 * far, and discard the rest of the packet. 8987 * far, and discard the rest of the packet.
8984 */ 8988 */
8985 ifp->if_ierrors++; 8989 if_statinc(ifp, if_ierrors);
8986 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 8990 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8987 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 8991 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
8988 wm_init_rxdesc(rxq, i); 8992 wm_init_rxdesc(rxq, i);
8989 if (!wm_rxdesc_is_eop(rxq, status)) 8993 if (!wm_rxdesc_is_eop(rxq, status))
8990 rxq->rxq_discard = 1; 8994 rxq->rxq_discard = 1;
8991 if (rxq->rxq_head != NULL) 8995 if (rxq->rxq_head != NULL)
8992 m_freem(rxq->rxq_head); 8996 m_freem(rxq->rxq_head);
8993 WM_RXCHAIN_RESET(rxq); 8997 WM_RXCHAIN_RESET(rxq);
8994 DPRINTF(WM_DEBUG_RX, 8998 DPRINTF(WM_DEBUG_RX,
8995 ("%s: RX: Rx buffer allocation failed, " 8999 ("%s: RX: Rx buffer allocation failed, "
8996 "dropping packet%s\n", device_xname(sc->sc_dev), 9000 "dropping packet%s\n", device_xname(sc->sc_dev),
8997 rxq->rxq_discard ? " (discard)" : "")); 9001 rxq->rxq_discard ? " (discard)" : ""));
8998 continue; 9002 continue;