| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: if_wm.c,v 1.508.4.54 2024/02/03 12:04:06 martin Exp $ */ | | 1 | /* $NetBSD: if_wm.c,v 1.508.4.55 2024/02/29 10:46:27 martin Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. | | 4 | * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. | | 7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
| @@ -72,27 +72,27 @@ | | | @@ -72,27 +72,27 @@ |
72 | * | | 72 | * |
73 | * TODO (in order of importance): | | 73 | * TODO (in order of importance): |
74 | * | | 74 | * |
75 | * - Check XXX'ed comments | | 75 | * - Check XXX'ed comments |
76 | * - TX Multi queue improvement (refine queue selection logic) | | 76 | * - TX Multi queue improvement (refine queue selection logic) |
77 | * - Split header buffer for newer descriptors | | 77 | * - Split header buffer for newer descriptors |
78 | * - EEE (Energy Efficiency Ethernet) | | 78 | * - EEE (Energy Efficiency Ethernet) |
79 | * - Virtual Function | | 79 | * - Virtual Function |
80 | * - Set LED correctly (based on contents in EEPROM) | | 80 | * - Set LED correctly (based on contents in EEPROM) |
81 | * - Rework how parameters are loaded from the EEPROM. | | 81 | * - Rework how parameters are loaded from the EEPROM. |
82 | */ | | 82 | */ |
83 | | | 83 | |
84 | #include <sys/cdefs.h> | | 84 | #include <sys/cdefs.h> |
85 | __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.508.4.54 2024/02/03 12:04:06 martin Exp $"); | | 85 | __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.508.4.55 2024/02/29 10:46:27 martin Exp $"); |
86 | | | 86 | |
87 | #ifdef _KERNEL_OPT | | 87 | #ifdef _KERNEL_OPT |
88 | #include "opt_net_mpsafe.h" | | 88 | #include "opt_net_mpsafe.h" |
89 | #include "opt_if_wm.h" | | 89 | #include "opt_if_wm.h" |
90 | #endif | | 90 | #endif |
91 | | | 91 | |
92 | #include <sys/param.h> | | 92 | #include <sys/param.h> |
93 | #include <sys/callout.h> | | 93 | #include <sys/callout.h> |
94 | #include <sys/cpu.h> | | 94 | #include <sys/cpu.h> |
95 | #include <sys/device.h> | | 95 | #include <sys/device.h> |
96 | #include <sys/errno.h> | | 96 | #include <sys/errno.h> |
97 | #include <sys/interrupt.h> | | 97 | #include <sys/interrupt.h> |
98 | #include <sys/ioctl.h> | | 98 | #include <sys/ioctl.h> |
| @@ -459,29 +459,29 @@ struct wm_rxqueue { | | | @@ -459,29 +459,29 @@ struct wm_rxqueue { |
459 | int rxq_len; | | 459 | int rxq_len; |
460 | struct mbuf *rxq_head; | | 460 | struct mbuf *rxq_head; |
461 | struct mbuf *rxq_tail; | | 461 | struct mbuf *rxq_tail; |
462 | struct mbuf **rxq_tailp; | | 462 | struct mbuf **rxq_tailp; |
463 | | | 463 | |
464 | bool rxq_stopping; | | 464 | bool rxq_stopping; |
465 | | | 465 | |
466 | uint32_t rxq_packets; /* for AIM */ | | 466 | uint32_t rxq_packets; /* for AIM */ |
467 | uint32_t rxq_bytes; /* for AIM */ | | 467 | uint32_t rxq_bytes; /* for AIM */ |
468 | #ifdef WM_EVENT_COUNTERS | | 468 | #ifdef WM_EVENT_COUNTERS |
469 | /* RX event counters */ | | 469 | /* RX event counters */ |
470 | WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */ | | 470 | WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */ |
471 | WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */ | | 471 | WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */ |
472 | | | | |
473 | WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */ | | 472 | WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */ |
474 | WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */ | | 473 | WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */ |
| | | 474 | WM_Q_EVCNT_DEFINE(rxq, qdrop); /* Rx queue drop packet */ |
475 | #endif | | 475 | #endif |
476 | }; | | 476 | }; |
477 | | | 477 | |
478 | struct wm_queue { | | 478 | struct wm_queue { |
479 | int wmq_id; /* index of TX/RX queues */ | | 479 | int wmq_id; /* index of TX/RX queues */ |
480 | int wmq_intr_idx; /* index of MSI-X tables */ | | 480 | int wmq_intr_idx; /* index of MSI-X tables */ |
481 | | | 481 | |
482 | uint32_t wmq_itr; /* interrupt interval per queue. */ | | 482 | uint32_t wmq_itr; /* interrupt interval per queue. */ |
483 | bool wmq_set_itr; | | 483 | bool wmq_set_itr; |
484 | | | 484 | |
485 | struct wm_txqueue wmq_txq; | | 485 | struct wm_txqueue wmq_txq; |
486 | struct wm_rxqueue wmq_rxq; | | 486 | struct wm_rxqueue wmq_rxq; |
487 | char sysctlname[32]; /* Name for sysctl */ | | 487 | char sysctlname[32]; /* Name for sysctl */ |
| @@ -2683,26 +2683,30 @@ alloc_retry: | | | @@ -2683,26 +2683,30 @@ alloc_retry: |
2683 | case WM_T_PCH_TGP: | | 2683 | case WM_T_PCH_TGP: |
2684 | apme_mask = WUC_APME; | | 2684 | apme_mask = WUC_APME; |
2685 | eeprom_data = CSR_READ(sc, WMREG_WUC); | | 2685 | eeprom_data = CSR_READ(sc, WMREG_WUC); |
2686 | if ((eeprom_data & apme_mask) != 0) | | 2686 | if ((eeprom_data & apme_mask) != 0) |
2687 | sc->sc_flags |= WM_F_WOL; | | 2687 | sc->sc_flags |= WM_F_WOL; |
2688 | break; | | 2688 | break; |
2689 | default: | | 2689 | default: |
2690 | break; | | 2690 | break; |
2691 | } | | 2691 | } |
2692 | | | 2692 | |
2693 | /* Reset the chip to a known state. */ | | 2693 | /* Reset the chip to a known state. */ |
2694 | wm_reset(sc); | | 2694 | wm_reset(sc); |
2695 | | | 2695 | |
| | | 2696 | /* sc->sc_pba is set in wm_reset(). */ |
| | | 2697 | aprint_verbose_dev(sc->sc_dev, "RX packet buffer size: %uKB\n", |
| | | 2698 | sc->sc_pba); |
| | | 2699 | |
2696 | /* | | 2700 | /* |
2697 | * Check for I21[01] PLL workaround. | | 2701 | * Check for I21[01] PLL workaround. |
2698 | * | | 2702 | * |
2699 | * Three cases: | | 2703 | * Three cases: |
2700 | * a) Chip is I211. | | 2704 | * a) Chip is I211. |
2701 | * b) Chip is I210 and it uses INVM (not FLASH). | | 2705 | * b) Chip is I210 and it uses INVM (not FLASH). |
2702 | * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25 | | 2706 | * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25 |
2703 | */ | | 2707 | */ |
2704 | if (sc->sc_type == WM_T_I211) | | 2708 | if (sc->sc_type == WM_T_I211) |
2705 | sc->sc_flags |= WM_F_PLL_WA_I210; | | 2709 | sc->sc_flags |= WM_F_PLL_WA_I210; |
2706 | if (sc->sc_type == WM_T_I210) { | | 2710 | if (sc->sc_type == WM_T_I210) { |
2707 | if (!wm_nvm_flash_presence_i210(sc)) | | 2711 | if (!wm_nvm_flash_presence_i210(sc)) |
2708 | sc->sc_flags |= WM_F_PLL_WA_I210; | | 2712 | sc->sc_flags |= WM_F_PLL_WA_I210; |
| @@ -6516,26 +6520,27 @@ teardown: | | | @@ -6516,26 +6520,27 @@ teardown: |
6516 | sysctl_teardown(log); | | 6520 | sysctl_teardown(log); |
6517 | err: | | 6521 | err: |
6518 | sc->sc_sysctllog = NULL; | | 6522 | sc->sc_sysctllog = NULL; |
6519 | device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n", | | 6523 | device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n", |
6520 | __func__, rv); | | 6524 | __func__, rv); |
6521 | } | | 6525 | } |
6522 | | | 6526 | |
6523 | static void | | 6527 | static void |
6524 | wm_update_stats(struct wm_softc *sc) | | 6528 | wm_update_stats(struct wm_softc *sc) |
6525 | { | | 6529 | { |
6526 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 6530 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
6527 | uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc, | | 6531 | uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc, |
6528 | cexterr; | | 6532 | cexterr; |
| | | 6533 | uint64_t total_qdrop = 0; |
6529 | | | 6534 | |
6530 | crcerrs = CSR_READ(sc, WMREG_CRCERRS); | | 6535 | crcerrs = CSR_READ(sc, WMREG_CRCERRS); |
6531 | symerrc = CSR_READ(sc, WMREG_SYMERRC); | | 6536 | symerrc = CSR_READ(sc, WMREG_SYMERRC); |
6532 | mpc = CSR_READ(sc, WMREG_MPC); | | 6537 | mpc = CSR_READ(sc, WMREG_MPC); |
6533 | colc = CSR_READ(sc, WMREG_COLC); | | 6538 | colc = CSR_READ(sc, WMREG_COLC); |
6534 | sec = CSR_READ(sc, WMREG_SEC); | | 6539 | sec = CSR_READ(sc, WMREG_SEC); |
6535 | rlec = CSR_READ(sc, WMREG_RLEC); | | 6540 | rlec = CSR_READ(sc, WMREG_RLEC); |
6536 | | | 6541 | |
6537 | WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs); | | 6542 | WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs); |
6538 | WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc); | | 6543 | WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc); |
6539 | WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc); | | 6544 | WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc); |
6540 | WM_EVCNT_ADD(&sc->sc_ev_colc, colc); | | 6545 | WM_EVCNT_ADD(&sc->sc_ev_colc, colc); |
6541 | WM_EVCNT_ADD(&sc->sc_ev_sec, sec); | | 6546 | WM_EVCNT_ADD(&sc->sc_ev_sec, sec); |
| @@ -6664,26 +6669,42 @@ wm_update_stats(struct wm_softc *sc) | | | @@ -6664,26 +6669,42 @@ wm_update_stats(struct wm_softc *sc) |
6664 | WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4)); | | 6669 | WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4)); |
6665 | WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC)); | | 6670 | WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC)); |
6666 | WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC)); | | 6671 | WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC)); |
6667 | | | 6672 | |
6668 | WM_EVCNT_ADD(&sc->sc_ev_hgorc, | | 6673 | WM_EVCNT_ADD(&sc->sc_ev_hgorc, |
6669 | CSR_READ(sc, WMREG_HGORCL) + | | 6674 | CSR_READ(sc, WMREG_HGORCL) + |
6670 | ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32)); | | 6675 | ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32)); |
6671 | WM_EVCNT_ADD(&sc->sc_ev_hgotc, | | 6676 | WM_EVCNT_ADD(&sc->sc_ev_hgotc, |
6672 | CSR_READ(sc, WMREG_HGOTCL) + | | 6677 | CSR_READ(sc, WMREG_HGOTCL) + |
6673 | ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32)); | | 6678 | ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32)); |
6674 | WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS)); | | 6679 | WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS)); |
6675 | WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC)); | | 6680 | WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC)); |
6676 | WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC)); | | 6681 | WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC)); |
| | | 6682 | #ifdef WM_EVENT_COUNTERS |
| | | 6683 | for (int i = 0; i < sc->sc_nqueues; i++) { |
| | | 6684 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
| | | 6685 | uint32_t rqdpc; |
| | | 6686 | |
| | | 6687 | rqdpc = CSR_READ(sc, WMREG_RQDPC(i)); |
| | | 6688 | /* |
| | | 6689 | * On I210 and newer device, the RQDPC register is not |
| | | 6690 | * cleard on read. |
| | | 6691 | */ |
| | | 6692 | if ((rqdpc != 0) && (sc->sc_type >= WM_T_I210)) |
| | | 6693 | CSR_WRITE(sc, WMREG_RQDPC(i), 0); |
| | | 6694 | WM_Q_EVCNT_ADD(rxq, qdrop, rqdpc); |
| | | 6695 | total_qdrop += rqdpc; |
| | | 6696 | } |
| | | 6697 | #endif |
6677 | } | | 6698 | } |
6678 | if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) { | | 6699 | if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) { |
6679 | WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC)); | | 6700 | WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC)); |
6680 | WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC)); | | 6701 | WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC)); |
6681 | if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) { | | 6702 | if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) { |
6682 | WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, | | 6703 | WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, |
6683 | CSR_READ(sc, WMREG_B2OGPRC)); | | 6704 | CSR_READ(sc, WMREG_B2OGPRC)); |
6684 | WM_EVCNT_ADD(&sc->sc_ev_o2bspc, | | 6705 | WM_EVCNT_ADD(&sc->sc_ev_o2bspc, |
6685 | CSR_READ(sc, WMREG_O2BSPC)); | | 6706 | CSR_READ(sc, WMREG_O2BSPC)); |
6686 | WM_EVCNT_ADD(&sc->sc_ev_b2ospc, | | 6707 | WM_EVCNT_ADD(&sc->sc_ev_b2ospc, |
6687 | CSR_READ(sc, WMREG_B2OSPC)); | | 6708 | CSR_READ(sc, WMREG_B2OSPC)); |
6688 | WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, | | 6709 | WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, |
6689 | CSR_READ(sc, WMREG_O2BGPTC)); | | 6710 | CSR_READ(sc, WMREG_O2BGPTC)); |
| @@ -6692,43 +6713,45 @@ wm_update_stats(struct wm_softc *sc) | | | @@ -6692,43 +6713,45 @@ wm_update_stats(struct wm_softc *sc) |
6692 | ifp->if_collisions += colc; | | 6713 | ifp->if_collisions += colc; |
6693 | ifp->if_ierrors += | | 6714 | ifp->if_ierrors += |
6694 | crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec; | | 6715 | crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec; |
6695 | | | 6716 | |
6696 | /* | | 6717 | /* |
6697 | * WMREG_RNBC is incremented when there are no available buffers in | | 6718 | * WMREG_RNBC is incremented when there are no available buffers in |
6698 | * host memory. It does not mean the number of dropped packets, because | | 6719 | * host memory. It does not mean the number of dropped packets, because |
6699 | * an Ethernet controller can receive packets in such case if there is | | 6720 | * an Ethernet controller can receive packets in such case if there is |
6700 | * space in the phy's FIFO. | | 6721 | * space in the phy's FIFO. |
6701 | * | | 6722 | * |
6702 | * If you want to know the nubmer of WMREG_RMBC, you should use such as | | 6723 | * If you want to know the nubmer of WMREG_RMBC, you should use such as |
6703 | * own EVCNT instead of if_iqdrops. | | 6724 | * own EVCNT instead of if_iqdrops. |
6704 | */ | | 6725 | */ |
6705 | ifp->if_iqdrops += mpc; | | 6726 | ifp->if_iqdrops += mpc + total_qdrop; |
6706 | } | | 6727 | } |
6707 | | | 6728 | |
6708 | void | | 6729 | void |
6709 | wm_clear_evcnt(struct wm_softc *sc) | | 6730 | wm_clear_evcnt(struct wm_softc *sc) |
6710 | { | | 6731 | { |
6711 | #ifdef WM_EVENT_COUNTERS | | 6732 | #ifdef WM_EVENT_COUNTERS |
6712 | int i; | | 6733 | int i; |
6713 | | | 6734 | |
6714 | /* RX queues */ | | 6735 | /* RX queues */ |
6715 | for (i = 0; i < sc->sc_nqueues; i++) { | | 6736 | for (i = 0; i < sc->sc_nqueues; i++) { |
6716 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; | | 6737 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
6717 | | | 6738 | |
6718 | WM_Q_EVCNT_STORE(rxq, intr, 0); | | 6739 | WM_Q_EVCNT_STORE(rxq, intr, 0); |
6719 | WM_Q_EVCNT_STORE(rxq, defer, 0); | | 6740 | WM_Q_EVCNT_STORE(rxq, defer, 0); |
6720 | WM_Q_EVCNT_STORE(rxq, ipsum, 0); | | 6741 | WM_Q_EVCNT_STORE(rxq, ipsum, 0); |
6721 | WM_Q_EVCNT_STORE(rxq, tusum, 0); | | 6742 | WM_Q_EVCNT_STORE(rxq, tusum, 0); |
| | | 6743 | if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc)) |
| | | 6744 | WM_Q_EVCNT_STORE(rxq, qdrop, 0); |
6722 | } | | 6745 | } |
6723 | | | 6746 | |
6724 | /* TX queues */ | | 6747 | /* TX queues */ |
6725 | for (i = 0; i < sc->sc_nqueues; i++) { | | 6748 | for (i = 0; i < sc->sc_nqueues; i++) { |
6726 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; | | 6749 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; |
6727 | int j; | | 6750 | int j; |
6728 | | | 6751 | |
6729 | WM_Q_EVCNT_STORE(txq, txsstall, 0); | | 6752 | WM_Q_EVCNT_STORE(txq, txsstall, 0); |
6730 | WM_Q_EVCNT_STORE(txq, txdstall, 0); | | 6753 | WM_Q_EVCNT_STORE(txq, txdstall, 0); |
6731 | WM_Q_EVCNT_STORE(txq, fifo_stall, 0); | | 6754 | WM_Q_EVCNT_STORE(txq, fifo_stall, 0); |
6732 | WM_Q_EVCNT_STORE(txq, txdw, 0); | | 6755 | WM_Q_EVCNT_STORE(txq, txdw, 0); |
6733 | WM_Q_EVCNT_STORE(txq, txqe, 0); | | 6756 | WM_Q_EVCNT_STORE(txq, txqe, 0); |
6734 | WM_Q_EVCNT_STORE(txq, ipsum, 0); | | 6757 | WM_Q_EVCNT_STORE(txq, ipsum, 0); |
| @@ -8042,29 +8065,30 @@ wm_alloc_txrx_queues(struct wm_softc *sc | | | @@ -8042,29 +8065,30 @@ wm_alloc_txrx_queues(struct wm_softc *sc |
8042 | break; | | 8065 | break; |
8043 | | | 8066 | |
8044 | error = wm_alloc_rx_buffer(sc, rxq); | | 8067 | error = wm_alloc_rx_buffer(sc, rxq); |
8045 | if (error) { | | 8068 | if (error) { |
8046 | wm_free_rx_descs(sc, rxq); | | 8069 | wm_free_rx_descs(sc, rxq); |
8047 | break; | | 8070 | break; |
8048 | } | | 8071 | } |
8049 | | | 8072 | |
8050 | #ifdef WM_EVENT_COUNTERS | | 8073 | #ifdef WM_EVENT_COUNTERS |
8051 | xname = device_xname(sc->sc_dev); | | 8074 | xname = device_xname(sc->sc_dev); |
8052 | | | 8075 | |
8053 | WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname); | | 8076 | WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname); |
8054 | WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname); | | 8077 | WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname); |
8055 | | | | |
8056 | WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname); | | 8078 | WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname); |
8057 | WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname); | | 8079 | WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname); |
| | | 8080 | if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc)) |
| | | 8081 | WM_Q_MISC_EVCNT_ATTACH(rxq, qdrop, rxq, i, xname); |
8058 | #endif /* WM_EVENT_COUNTERS */ | | 8082 | #endif /* WM_EVENT_COUNTERS */ |
8059 | | | 8083 | |
8060 | rx_done++; | | 8084 | rx_done++; |
8061 | } | | 8085 | } |
8062 | if (error) | | 8086 | if (error) |
8063 | goto fail_2; | | 8087 | goto fail_2; |
8064 | | | 8088 | |
8065 | for (i = 0; i < sc->sc_nqueues; i++) { | | 8089 | for (i = 0; i < sc->sc_nqueues; i++) { |
8066 | char rndname[16]; | | 8090 | char rndname[16]; |
8067 | | | 8091 | |
8068 | snprintf(rndname, sizeof(rndname), "%sTXRX%d", | | 8092 | snprintf(rndname, sizeof(rndname), "%sTXRX%d", |
8069 | device_xname(sc->sc_dev), i); | | 8093 | device_xname(sc->sc_dev), i); |
8070 | rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname, | | 8094 | rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname, |
| @@ -8107,26 +8131,28 @@ wm_free_txrx_queues(struct wm_softc *sc) | | | @@ -8107,26 +8131,28 @@ wm_free_txrx_queues(struct wm_softc *sc) |
8107 | int i; | | 8131 | int i; |
8108 | | | 8132 | |
8109 | for (i = 0; i < sc->sc_nqueues; i++) | | 8133 | for (i = 0; i < sc->sc_nqueues; i++) |
8110 | rnd_detach_source(&sc->sc_queue[i].rnd_source); | | 8134 | rnd_detach_source(&sc->sc_queue[i].rnd_source); |
8111 | | | 8135 | |
8112 | for (i = 0; i < sc->sc_nqueues; i++) { | | 8136 | for (i = 0; i < sc->sc_nqueues; i++) { |
8113 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; | | 8137 | struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; |
8114 | | | 8138 | |
8115 | #ifdef WM_EVENT_COUNTERS | | 8139 | #ifdef WM_EVENT_COUNTERS |
8116 | WM_Q_EVCNT_DETACH(rxq, intr, rxq, i); | | 8140 | WM_Q_EVCNT_DETACH(rxq, intr, rxq, i); |
8117 | WM_Q_EVCNT_DETACH(rxq, defer, rxq, i); | | 8141 | WM_Q_EVCNT_DETACH(rxq, defer, rxq, i); |
8118 | WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i); | | 8142 | WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i); |
8119 | WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i); | | 8143 | WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i); |
| | | 8144 | if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc)) |
| | | 8145 | WM_Q_EVCNT_DETACH(rxq, qdrop, rxq, i); |
8120 | #endif /* WM_EVENT_COUNTERS */ | | 8146 | #endif /* WM_EVENT_COUNTERS */ |
8121 | | | 8147 | |
8122 | wm_free_rx_buffer(sc, rxq); | | 8148 | wm_free_rx_buffer(sc, rxq); |
8123 | wm_free_rx_descs(sc, rxq); | | 8149 | wm_free_rx_descs(sc, rxq); |
8124 | if (rxq->rxq_lock) | | 8150 | if (rxq->rxq_lock) |
8125 | mutex_obj_free(rxq->rxq_lock); | | 8151 | mutex_obj_free(rxq->rxq_lock); |
8126 | } | | 8152 | } |
8127 | | | 8153 | |
8128 | for (i = 0; i < sc->sc_nqueues; i++) { | | 8154 | for (i = 0; i < sc->sc_nqueues; i++) { |
8129 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; | | 8155 | struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; |
8130 | struct mbuf *m; | | 8156 | struct mbuf *m; |
8131 | #ifdef WM_EVENT_COUNTERS | | 8157 | #ifdef WM_EVENT_COUNTERS |
8132 | int j; | | 8158 | int j; |
| @@ -8296,37 +8322,47 @@ wm_init_rx_regs(struct wm_softc *sc, str | | | @@ -8296,37 +8322,47 @@ wm_init_rx_regs(struct wm_softc *sc, str |
8296 | CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); | | 8322 | CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); |
8297 | CSR_WRITE(sc, WMREG_OLD_RDH1, 0); | | 8323 | CSR_WRITE(sc, WMREG_OLD_RDH1, 0); |
8298 | CSR_WRITE(sc, WMREG_OLD_RDT1, 0); | | 8324 | CSR_WRITE(sc, WMREG_OLD_RDT1, 0); |
8299 | CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); | | 8325 | CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); |
8300 | } else { | | 8326 | } else { |
8301 | int qid = wmq->wmq_id; | | 8327 | int qid = wmq->wmq_id; |
8302 | | | 8328 | |
8303 | CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0)); | | 8329 | CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0)); |
8304 | CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0)); | | 8330 | CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0)); |
8305 | CSR_WRITE(sc, WMREG_RDLEN(qid), | | 8331 | CSR_WRITE(sc, WMREG_RDLEN(qid), |
8306 | rxq->rxq_descsize * rxq->rxq_ndesc); | | 8332 | rxq->rxq_descsize * rxq->rxq_ndesc); |
8307 | | | 8333 | |
8308 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { | | 8334 | if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { |
| | | 8335 | uint32_t srrctl; |
| | | 8336 | |
8309 | if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) | | 8337 | if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) |
8310 | panic("%s: MCLBYTES %d unsupported for 82575 " | | 8338 | panic("%s: MCLBYTES %d unsupported for 82575 " |
8311 | "or higher\n", __func__, MCLBYTES); | | 8339 | "or higher\n", __func__, MCLBYTES); |
8312 | | | 8340 | |
8313 | /* | | 8341 | /* |
8314 | * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF | | 8342 | * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF |
8315 | * only. | | 8343 | * only. |
8316 | */ | | 8344 | */ |
8317 | CSR_WRITE(sc, WMREG_SRRCTL(qid), | | 8345 | srrctl = SRRCTL_DESCTYPE_ADV_ONEBUF |
8318 | SRRCTL_DESCTYPE_ADV_ONEBUF | | 8346 | | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT); |
8319 | | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); | | 8347 | /* |
| | | 8348 | * Drop frames if the RX descriptor ring has no room. |
| | | 8349 | * This is enabled only on multiqueue system to avoid |
| | | 8350 | * bad influence to other queues. |
| | | 8351 | */ |
| | | 8352 | if (sc->sc_nqueues > 1) |
| | | 8353 | srrctl |= SRRCTL_DROP_EN; |
| | | 8354 | CSR_WRITE(sc, WMREG_SRRCTL(qid), srrctl); |
| | | 8355 | |
8320 | CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE | | 8356 | CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE |
8321 | | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) | | 8357 | | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) |
8322 | | RXDCTL_WTHRESH(1)); | | 8358 | | RXDCTL_WTHRESH(1)); |
8323 | CSR_WRITE(sc, WMREG_RDH(qid), 0); | | 8359 | CSR_WRITE(sc, WMREG_RDH(qid), 0); |
8324 | CSR_WRITE(sc, WMREG_RDT(qid), 0); | | 8360 | CSR_WRITE(sc, WMREG_RDT(qid), 0); |
8325 | } else { | | 8361 | } else { |
8326 | CSR_WRITE(sc, WMREG_RDH(qid), 0); | | 8362 | CSR_WRITE(sc, WMREG_RDH(qid), 0); |
8327 | CSR_WRITE(sc, WMREG_RDT(qid), 0); | | 8363 | CSR_WRITE(sc, WMREG_RDT(qid), 0); |
8328 | /* XXX should update with AIM? */ | | 8364 | /* XXX should update with AIM? */ |
8329 | CSR_WRITE(sc, WMREG_RDTR, | | 8365 | CSR_WRITE(sc, WMREG_RDTR, |
8330 | (wmq->wmq_itr / 4) | RDTR_FPD); | | 8366 | (wmq->wmq_itr / 4) | RDTR_FPD); |
8331 | /* MUST be same */ | | 8367 | /* MUST be same */ |
8332 | CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4); | | 8368 | CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4); |
| @@ -10933,29 +10969,29 @@ wm_linkintr_msix(void *arg) | | | @@ -10933,29 +10969,29 @@ wm_linkintr_msix(void *arg) |
10933 | if (sc->sc_core_stopping) | | 10969 | if (sc->sc_core_stopping) |
10934 | goto out; | | 10970 | goto out; |
10935 | | | 10971 | |
10936 | if ((reg & ICR_LSC) != 0) { | | 10972 | if ((reg & ICR_LSC) != 0) { |
10937 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); | | 10973 | WM_EVCNT_INCR(&sc->sc_ev_linkintr); |
10938 | wm_linkintr(sc, ICR_LSC); | | 10974 | wm_linkintr(sc, ICR_LSC); |
10939 | } | | 10975 | } |
10940 | if ((reg & ICR_GPI(0)) != 0) | | 10976 | if ((reg & ICR_GPI(0)) != 0) |
10941 | device_printf(sc->sc_dev, "got module interrupt\n"); | | 10977 | device_printf(sc->sc_dev, "got module interrupt\n"); |
10942 | | | 10978 | |
10943 | /* | | 10979 | /* |
10944 | * XXX 82574 MSI-X mode workaround | | 10980 | * XXX 82574 MSI-X mode workaround |
10945 | * | | 10981 | * |
10946 | * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER | | 10982 | * 82574 MSI-X mode causes a receive overrun(RXO) interrupt as an |
10947 | * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor | | 10983 | * ICR_OTHER MSI-X vector; furthermore it causes neither ICR_RXQ(0) |
10948 | * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1) | | 10984 | * nor ICR_RXQ(1) vectors. So, we generate ICR_RXQ(0) and ICR_RXQ(1) |
10949 | * interrupts by writing WMREG_ICS to process receive packets. | | 10985 | * interrupts by writing WMREG_ICS to process receive packets. |
10950 | */ | | 10986 | */ |
10951 | if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) { | | 10987 | if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) { |
10952 | #if defined(WM_DEBUG) | | 10988 | #if defined(WM_DEBUG) |
10953 | log(LOG_WARNING, "%s: Receive overrun\n", | | 10989 | log(LOG_WARNING, "%s: Receive overrun\n", |
10954 | device_xname(sc->sc_dev)); | | 10990 | device_xname(sc->sc_dev)); |
10955 | #endif /* defined(WM_DEBUG) */ | | 10991 | #endif /* defined(WM_DEBUG) */ |
10956 | | | 10992 | |
10957 | has_rxo = true; | | 10993 | has_rxo = true; |
10958 | /* | | 10994 | /* |
10959 | * The RXO interrupt is very high rate when receive traffic is | | 10995 | * The RXO interrupt is very high rate when receive traffic is |
10960 | * high rate. We use polling mode for ICR_OTHER like Tx/Rx | | 10996 | * high rate. We use polling mode for ICR_OTHER like Tx/Rx |
10961 | * interrupts. ICR_OTHER will be enabled at the end of | | 10997 | * interrupts. ICR_OTHER will be enabled at the end of |
| @@ -14963,36 +14999,37 @@ printver: | | | @@ -14963,36 +14999,37 @@ printver: |
14963 | sc->sc_nvm_ver_minor); | | 14999 | sc->sc_nvm_ver_minor); |
14964 | if (have_build) { | | 15000 | if (have_build) { |
14965 | sc->sc_nvm_ver_build = build; | | 15001 | sc->sc_nvm_ver_build = build; |
14966 | aprint_verbose(".%d", build); | | 15002 | aprint_verbose(".%d", build); |
14967 | } | | 15003 | } |
14968 | } | | 15004 | } |
14969 | | | 15005 | |
14970 | /* Assume the Option ROM area is at avove NVM_SIZE */ | | 15006 | /* Assume the Option ROM area is at avove NVM_SIZE */ |
14971 | if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom | | 15007 | if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom |
14972 | && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) { | | 15008 | && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) { |
14973 | /* Option ROM Version */ | | 15009 | /* Option ROM Version */ |
14974 | if ((off != 0x0000) && (off != 0xffff)) { | | 15010 | if ((off != 0x0000) && (off != 0xffff)) { |
14975 | int rv; | | 15011 | int rv; |
| | | 15012 | uint16_t oid0, oid1; |
14976 | | | 15013 | |
14977 | off += NVM_COMBO_VER_OFF; | | 15014 | off += NVM_COMBO_VER_OFF; |
14978 | rv = wm_nvm_read(sc, off + 1, 1, &uid1); | | 15015 | rv = wm_nvm_read(sc, off + 1, 1, &oid1); |
14979 | rv |= wm_nvm_read(sc, off, 1, &uid0); | | 15016 | rv |= wm_nvm_read(sc, off, 1, &oid0); |
14980 | if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff) | | 15017 | if ((rv == 0) && (oid0 != 0) && (oid0 != 0xffff) |
14981 | && (uid1 != 0) && (uid1 != 0xffff)) { | | 15018 | && (oid1 != 0) && (oid1 != 0xffff)) { |
14982 | /* 16bits */ | | 15019 | /* 16bits */ |
14983 | major = uid0 >> 8; | | 15020 | major = oid0 >> 8; |
14984 | build = (uid0 << 8) | (uid1 >> 8); | | 15021 | build = (oid0 << 8) | (oid1 >> 8); |
14985 | patch = uid1 & 0x00ff; | | 15022 | patch = oid1 & 0x00ff; |
14986 | aprint_verbose(", option ROM Version %d.%d.%d", | | 15023 | aprint_verbose(", option ROM Version %d.%d.%d", |
14987 | major, build, patch); | | 15024 | major, build, patch); |
14988 | } | | 15025 | } |
14989 | } | | 15026 | } |
14990 | } | | 15027 | } |
14991 | | | 15028 | |
14992 | if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0)) | | 15029 | if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0)) |
14993 | aprint_verbose(", Image Unique ID %08x", | | 15030 | aprint_verbose(", Image Unique ID %08x", |
14994 | ((uint32_t)uid1 << 16) | uid0); | | 15031 | ((uint32_t)uid1 << 16) | uid0); |
14995 | } | | 15032 | } |
14996 | | | 15033 | |
14997 | /* | | 15034 | /* |
14998 | * wm_nvm_read: | | 15035 | * wm_nvm_read: |