| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: ix_txrx.c,v 1.100.4.4 2023/10/18 11:53:22 martin Exp $ */ | | 1 | /* $NetBSD: ix_txrx.c,v 1.100.4.5 2024/02/03 11:58:53 martin Exp $ */ |
2 | | | 2 | |
3 | /****************************************************************************** | | 3 | /****************************************************************************** |
4 | | | 4 | |
5 | Copyright (c) 2001-2017, Intel Corporation | | 5 | Copyright (c) 2001-2017, Intel Corporation |
6 | All rights reserved. | | 6 | All rights reserved. |
7 | | | 7 | |
8 | Redistribution and use in source and binary forms, with or without | | 8 | Redistribution and use in source and binary forms, with or without |
9 | modification, are permitted provided that the following conditions are met: | | 9 | modification, are permitted provided that the following conditions are met: |
10 | | | 10 | |
11 | 1. Redistributions of source code must retain the above copyright notice, | | 11 | 1. Redistributions of source code must retain the above copyright notice, |
12 | this list of conditions and the following disclaimer. | | 12 | this list of conditions and the following disclaimer. |
13 | | | 13 | |
14 | 2. Redistributions in binary form must reproduce the above copyright | | 14 | 2. Redistributions in binary form must reproduce the above copyright |
| @@ -54,57 +54,61 @@ | | | @@ -54,57 +54,61 @@ |
54 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 54 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
55 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 55 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
56 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 56 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
57 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 57 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
58 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 58 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
59 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 59 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
60 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 60 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
61 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 61 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
62 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 62 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
63 | * POSSIBILITY OF SUCH DAMAGE. | | 63 | * POSSIBILITY OF SUCH DAMAGE. |
64 | */ | | 64 | */ |
65 | | | 65 | |
66 | #include <sys/cdefs.h> | | 66 | #include <sys/cdefs.h> |
67 | __KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.100.4.4 2023/10/18 11:53:22 martin Exp $"); | | 67 | __KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.100.4.5 2024/02/03 11:58:53 martin Exp $"); |
68 | | | 68 | |
69 | #include "opt_inet.h" | | 69 | #include "opt_inet.h" |
70 | #include "opt_inet6.h" | | 70 | #include "opt_inet6.h" |
71 | | | 71 | |
72 | #include "ixgbe.h" | | 72 | #include "ixgbe.h" |
73 | | | 73 | |
| | | 74 | #ifdef RSC |
74 | /* | | 75 | /* |
75 | * HW RSC control: | | 76 | * HW RSC control: |
76 | * this feature only works with | | 77 | * this feature only works with |
77 | * IPv4, and only on 82599 and later. | | 78 | * IPv4, and only on 82599 and later. |
78 | * Also this will cause IP forwarding to | | 79 | * Also this will cause IP forwarding to |
79 | * fail and that can't be controlled by | | 80 | * fail and that can't be controlled by |
80 | * the stack as LRO can. For all these | | 81 | * the stack as LRO can. For all these |
81 | * reasons I've deemed it best to leave | | 82 | * reasons I've deemed it best to leave |
82 | * this off and not bother with a tuneable | | 83 | * this off and not bother with a tuneable |
83 | * interface, this would need to be compiled | | 84 | * interface, this would need to be compiled |
84 | * to enable. | | 85 | * to enable. |
85 | */ | | 86 | */ |
86 | static bool ixgbe_rsc_enable = FALSE; | | 87 | static bool ixgbe_rsc_enable = FALSE; |
| | | 88 | #endif |
87 | | | 89 | |
| | | 90 | #ifdef IXGBE_FDIR |
88 | /* | | 91 | /* |
89 | * For Flow Director: this is the | | 92 | * For Flow Director: this is the |
90 | * number of TX packets we sample | | 93 | * number of TX packets we sample |
91 | * for the filter pool, this means | | 94 | * for the filter pool, this means |
92 | * every 20th packet will be probed. | | 95 | * every 20th packet will be probed. |
93 | * | | 96 | * |
94 | * This feature can be disabled by | | 97 | * This feature can be disabled by |
95 | * setting this to 0. | | 98 | * setting this to 0. |
96 | */ | | 99 | */ |
97 | static int atr_sample_rate = 20; | | 100 | static int atr_sample_rate = 20; |
| | | 101 | #endif |
98 | | | 102 | |
99 | #define IXGBE_M_ADJ(sc, rxr, mp) \ | | 103 | #define IXGBE_M_ADJ(sc, rxr, mp) \ |
100 | if (sc->max_frame_size <= (rxr->mbuf_sz - ETHER_ALIGN)) \ | | 104 | if (sc->max_frame_size <= (rxr->mbuf_sz - ETHER_ALIGN)) \ |
101 | m_adj(mp, ETHER_ALIGN) | | 105 | m_adj(mp, ETHER_ALIGN) |
102 | | | 106 | |
103 | /************************************************************************ | | 107 | /************************************************************************ |
104 | * Local Function prototypes | | 108 | * Local Function prototypes |
105 | ************************************************************************/ | | 109 | ************************************************************************/ |
106 | static void ixgbe_setup_transmit_ring(struct tx_ring *); | | 110 | static void ixgbe_setup_transmit_ring(struct tx_ring *); |
107 | static void ixgbe_free_transmit_buffers(struct tx_ring *); | | 111 | static void ixgbe_free_transmit_buffers(struct tx_ring *); |
108 | static int ixgbe_setup_receive_ring(struct rx_ring *); | | 112 | static int ixgbe_setup_receive_ring(struct rx_ring *); |
109 | static void ixgbe_free_receive_buffers(struct rx_ring *); | | 113 | static void ixgbe_free_receive_buffers(struct rx_ring *); |
110 | static void ixgbe_rx_checksum(u32, struct mbuf *, u32, | | 114 | static void ixgbe_rx_checksum(u32, struct mbuf *, u32, |
| @@ -112,28 +116,29 @@ static void ixgbe_rx_checksum(u | | | @@ -112,28 +116,29 @@ static void ixgbe_rx_checksum(u |
112 | static void ixgbe_refresh_mbufs(struct rx_ring *, int); | | 116 | static void ixgbe_refresh_mbufs(struct rx_ring *, int); |
113 | static void ixgbe_drain(struct ifnet *, struct tx_ring *); | | 117 | static void ixgbe_drain(struct ifnet *, struct tx_ring *); |
114 | static int ixgbe_xmit(struct tx_ring *, struct mbuf *); | | 118 | static int ixgbe_xmit(struct tx_ring *, struct mbuf *); |
115 | static int ixgbe_tx_ctx_setup(struct tx_ring *, | | 119 | static int ixgbe_tx_ctx_setup(struct tx_ring *, |
116 | struct mbuf *, u32 *, u32 *); | | 120 | struct mbuf *, u32 *, u32 *); |
117 | static int ixgbe_tso_setup(struct tx_ring *, | | 121 | static int ixgbe_tso_setup(struct tx_ring *, |
118 | struct mbuf *, u32 *, u32 *); | | 122 | struct mbuf *, u32 *, u32 *); |
119 | static __inline void ixgbe_rx_discard(struct rx_ring *, int); | | 123 | static __inline void ixgbe_rx_discard(struct rx_ring *, int); |
120 | static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, | | 124 | static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, |
121 | struct mbuf *, u32); | | 125 | struct mbuf *, u32); |
122 | static int ixgbe_dma_malloc(struct ixgbe_softc *, bus_size_t, | | 126 | static int ixgbe_dma_malloc(struct ixgbe_softc *, bus_size_t, |
123 | struct ixgbe_dma_alloc *, int); | | 127 | struct ixgbe_dma_alloc *, int); |
124 | static void ixgbe_dma_free(struct ixgbe_softc *, struct ixgbe_dma_alloc *); | | 128 | static void ixgbe_dma_free(struct ixgbe_softc *, struct ixgbe_dma_alloc *); |
125 | | | 129 | #ifdef RSC |
126 | static void ixgbe_setup_hw_rsc(struct rx_ring *); | | 130 | static void ixgbe_setup_hw_rsc(struct rx_ring *); |
| | | 131 | #endif |
127 | | | 132 | |
128 | /************************************************************************ | | 133 | /************************************************************************ |
129 | * ixgbe_legacy_start_locked - Transmit entry point | | 134 | * ixgbe_legacy_start_locked - Transmit entry point |
130 | * | | 135 | * |
131 | * Called by the stack to initiate a transmit. | | 136 | * Called by the stack to initiate a transmit. |
132 | * The driver will remain in this routine as long as there are | | 137 | * The driver will remain in this routine as long as there are |
133 | * packets to transmit and transmit resources are available. | | 138 | * packets to transmit and transmit resources are available. |
134 | * In case resources are not available, the stack is notified | | 139 | * In case resources are not available, the stack is notified |
135 | * and the packet is requeued. | | 140 | * and the packet is requeued. |
136 | ************************************************************************/ | | 141 | ************************************************************************/ |
137 | int | | 142 | int |
138 | ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) | | 143 | ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) |
139 | { | | 144 | { |
| @@ -396,27 +401,27 @@ ixgbe_drain_all(struct ixgbe_softc *sc) | | | @@ -396,27 +401,27 @@ ixgbe_drain_all(struct ixgbe_softc *sc) |
396 | } | | 401 | } |
397 | | | 402 | |
398 | /************************************************************************ | | 403 | /************************************************************************ |
399 | * ixgbe_xmit | | 404 | * ixgbe_xmit |
400 | * | | 405 | * |
401 | * Maps the mbufs to tx descriptors, allowing the | | 406 | * Maps the mbufs to tx descriptors, allowing the |
402 | * TX engine to transmit the packets. | | 407 | * TX engine to transmit the packets. |
403 | * | | 408 | * |
404 | * Return 0 on success, positive on failure | | 409 | * Return 0 on success, positive on failure |
405 | ************************************************************************/ | | 410 | ************************************************************************/ |
406 | static int | | 411 | static int |
407 | ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head) | | 412 | ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head) |
408 | { | | 413 | { |
409 | struct ixgbe_softc *sc = txr->sc; | | 414 | struct ixgbe_softc *sc = txr->sc; |
410 | struct ixgbe_tx_buf *txbuf; | | 415 | struct ixgbe_tx_buf *txbuf; |
411 | union ixgbe_adv_tx_desc *txd = NULL; | | 416 | union ixgbe_adv_tx_desc *txd = NULL; |
412 | struct ifnet *ifp = sc->ifp; | | 417 | struct ifnet *ifp = sc->ifp; |
413 | int i, j, error; | | 418 | int i, j, error; |
414 | int first; | | 419 | int first; |
415 | u32 olinfo_status = 0, cmd_type_len; | | 420 | u32 olinfo_status = 0, cmd_type_len; |
416 | bool remap = TRUE; | | 421 | bool remap = TRUE; |
417 | bus_dmamap_t map; | | 422 | bus_dmamap_t map; |
418 | | | 423 | |
419 | /* Basic descriptor defines */ | | 424 | /* Basic descriptor defines */ |
420 | cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | | | 425 | cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | |
421 | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); | | 426 | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); |
422 | | | 427 | |
| @@ -477,28 +482,28 @@ retry: | | | @@ -477,28 +482,28 @@ retry: |
477 | return error; | | 482 | return error; |
478 | } | | 483 | } |
479 | } | | 484 | } |
480 | | | 485 | |
481 | /* Make certain there are enough descriptors */ | | 486 | /* Make certain there are enough descriptors */ |
482 | if (txr->tx_avail < (map->dm_nsegs + 2)) { | | 487 | if (txr->tx_avail < (map->dm_nsegs + 2)) { |
483 | txr->txr_no_space = true; | | 488 | txr->txr_no_space = true; |
484 | IXGBE_EVC_ADD(&txr->no_desc_avail, 1); | | 489 | IXGBE_EVC_ADD(&txr->no_desc_avail, 1); |
485 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); | | 490 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); |
486 | return EAGAIN; | | 491 | return EAGAIN; |
487 | } | | 492 | } |
488 | | | 493 | |
489 | /* | | 494 | /* |
490 | * Set up the appropriate offload context | | 495 | * Set up the appropriate offload context if requested, |
491 | * this will consume the first descriptor | | 496 | * this may consume one TX descriptor. |
492 | */ | | 497 | */ |
493 | error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); | | 498 | error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); |
494 | if (__predict_false(error)) { | | 499 | if (__predict_false(error)) { |
495 | return (error); | | 500 | return (error); |
496 | } | | 501 | } |
497 | | | 502 | |
498 | #ifdef IXGBE_FDIR | | 503 | #ifdef IXGBE_FDIR |
499 | /* Do the flow director magic */ | | 504 | /* Do the flow director magic */ |
500 | if ((sc->feat_en & IXGBE_FEATURE_FDIR) && | | 505 | if ((sc->feat_en & IXGBE_FEATURE_FDIR) && |
501 | (txr->atr_sample) && (!sc->fdir_reinit)) { | | 506 | (txr->atr_sample) && (!sc->fdir_reinit)) { |
502 | ++txr->atr_count; | | 507 | ++txr->atr_count; |
503 | if (txr->atr_count >= atr_sample_rate) { | | 508 | if (txr->atr_count >= atr_sample_rate) { |
504 | ixgbe_atr(txr, m_head); | | 509 | ixgbe_atr(txr, m_head); |
| @@ -614,28 +619,28 @@ ixgbe_allocate_transmit_buffers(struct t | | | @@ -614,28 +619,28 @@ ixgbe_allocate_transmit_buffers(struct t |
614 | /* parent */ sc->osdep.dmat, | | 619 | /* parent */ sc->osdep.dmat, |
615 | /* alignment */ 1, | | 620 | /* alignment */ 1, |
616 | /* bounds */ 0, | | 621 | /* bounds */ 0, |
617 | /* maxsize */ IXGBE_TSO_SIZE, | | 622 | /* maxsize */ IXGBE_TSO_SIZE, |
618 | /* nsegments */ sc->num_segs, | | 623 | /* nsegments */ sc->num_segs, |
619 | /* maxsegsize */ PAGE_SIZE, | | 624 | /* maxsegsize */ PAGE_SIZE, |
620 | /* flags */ 0, | | 625 | /* flags */ 0, |
621 | &txr->txtag); | | 626 | &txr->txtag); |
622 | if (error != 0) { | | 627 | if (error != 0) { |
623 | aprint_error_dev(dev,"Unable to allocate TX DMA tag\n"); | | 628 | aprint_error_dev(dev,"Unable to allocate TX DMA tag\n"); |
624 | goto fail; | | 629 | goto fail; |
625 | } | | 630 | } |
626 | | | 631 | |
627 | txr->tx_buffers = malloc(sizeof(struct ixgbe_tx_buf) * | | 632 | txr->tx_buffers = kmem_zalloc(sizeof(struct ixgbe_tx_buf) * |
628 | sc->num_tx_desc, M_DEVBUF, M_WAITOK | M_ZERO); | | 633 | sc->num_tx_desc, KM_SLEEP); |
629 | | | 634 | |
630 | /* Create the descriptor buffer dma maps */ | | 635 | /* Create the descriptor buffer dma maps */ |
631 | txbuf = txr->tx_buffers; | | 636 | txbuf = txr->tx_buffers; |
632 | for (i = 0; i < sc->num_tx_desc; i++, txbuf++) { | | 637 | for (i = 0; i < sc->num_tx_desc; i++, txbuf++) { |
633 | error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map); | | 638 | error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map); |
634 | if (error != 0) { | | 639 | if (error != 0) { |
635 | aprint_error_dev(dev, | | 640 | aprint_error_dev(dev, |
636 | "Unable to create TX DMA map (%d)\n", error); | | 641 | "Unable to create TX DMA map (%d)\n", error); |
637 | goto fail; | | 642 | goto fail; |
638 | } | | 643 | } |
639 | } | | 644 | } |
640 | | | 645 | |
641 | return 0; | | 646 | return 0; |
| @@ -705,29 +710,31 @@ ixgbe_setup_transmit_ring(struct tx_ring | | | @@ -705,29 +710,31 @@ ixgbe_setup_transmit_ring(struct tx_ring |
705 | * netmap_idx_n2k() handles wraparounds properly. | | 710 | * netmap_idx_n2k() handles wraparounds properly. |
706 | */ | | 711 | */ |
707 | if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) { | | 712 | if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) { |
708 | int si = netmap_idx_n2k(na->tx_rings[txr->me], i); | | 713 | int si = netmap_idx_n2k(na->tx_rings[txr->me], i); |
709 | netmap_load_map(na, txr->txtag, | | 714 | netmap_load_map(na, txr->txtag, |
710 | txbuf->map, NMB(na, slot + si)); | | 715 | txbuf->map, NMB(na, slot + si)); |
711 | } | | 716 | } |
712 | #endif /* DEV_NETMAP */ | | 717 | #endif /* DEV_NETMAP */ |
713 | | | 718 | |
714 | /* Clear the EOP descriptor pointer */ | | 719 | /* Clear the EOP descriptor pointer */ |
715 | txbuf->eop = NULL; | | 720 | txbuf->eop = NULL; |
716 | } | | 721 | } |
717 | | | 722 | |
| | | 723 | #ifdef IXGBE_FDIR |
718 | /* Set the rate at which we sample packets */ | | 724 | /* Set the rate at which we sample packets */ |
719 | if (sc->feat_en & IXGBE_FEATURE_FDIR) | | 725 | if (sc->feat_en & IXGBE_FEATURE_FDIR) |
720 | txr->atr_sample = atr_sample_rate; | | 726 | txr->atr_sample = atr_sample_rate; |
| | | 727 | #endif |
721 | | | 728 | |
722 | /* Set number of descriptors available */ | | 729 | /* Set number of descriptors available */ |
723 | txr->tx_avail = sc->num_tx_desc; | | 730 | txr->tx_avail = sc->num_tx_desc; |
724 | | | 731 | |
725 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 732 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
726 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 733 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
727 | IXGBE_TX_UNLOCK(txr); | | 734 | IXGBE_TX_UNLOCK(txr); |
728 | } /* ixgbe_setup_transmit_ring */ | | 735 | } /* ixgbe_setup_transmit_ring */ |
729 | | | 736 | |
730 | /************************************************************************ | | 737 | /************************************************************************ |
731 | * ixgbe_setup_transmit_structures - Initialize all transmit rings. | | 738 | * ixgbe_setup_transmit_structures - Initialize all transmit rings. |
732 | ************************************************************************/ | | 739 | ************************************************************************/ |
733 | int | | 740 | int |
| @@ -744,38 +751,38 @@ ixgbe_setup_transmit_structures(struct i | | | @@ -744,38 +751,38 @@ ixgbe_setup_transmit_structures(struct i |
744 | /************************************************************************ | | 751 | /************************************************************************ |
745 | * ixgbe_free_transmit_structures - Free all transmit rings. | | 752 | * ixgbe_free_transmit_structures - Free all transmit rings. |
746 | ************************************************************************/ | | 753 | ************************************************************************/ |
747 | void | | 754 | void |
748 | ixgbe_free_transmit_structures(struct ixgbe_softc *sc) | | 755 | ixgbe_free_transmit_structures(struct ixgbe_softc *sc) |
749 | { | | 756 | { |
750 | struct tx_ring *txr = sc->tx_rings; | | 757 | struct tx_ring *txr = sc->tx_rings; |
751 | | | 758 | |
752 | for (int i = 0; i < sc->num_queues; i++, txr++) { | | 759 | for (int i = 0; i < sc->num_queues; i++, txr++) { |
753 | ixgbe_free_transmit_buffers(txr); | | 760 | ixgbe_free_transmit_buffers(txr); |
754 | ixgbe_dma_free(sc, &txr->txdma); | | 761 | ixgbe_dma_free(sc, &txr->txdma); |
755 | IXGBE_TX_LOCK_DESTROY(txr); | | 762 | IXGBE_TX_LOCK_DESTROY(txr); |
756 | } | | 763 | } |
757 | free(sc->tx_rings, M_DEVBUF); | | 764 | kmem_free(sc->tx_rings, sizeof(struct tx_ring) * sc->num_queues); |
758 | } /* ixgbe_free_transmit_structures */ | | 765 | } /* ixgbe_free_transmit_structures */ |
759 | | | 766 | |
760 | /************************************************************************ | | 767 | /************************************************************************ |
761 | * ixgbe_free_transmit_buffers | | 768 | * ixgbe_free_transmit_buffers |
762 | * | | 769 | * |
763 | * Free transmit ring related data structures. | | 770 | * Free transmit ring related data structures. |
764 | ************************************************************************/ | | 771 | ************************************************************************/ |
765 | static void | | 772 | static void |
766 | ixgbe_free_transmit_buffers(struct tx_ring *txr) | | 773 | ixgbe_free_transmit_buffers(struct tx_ring *txr) |
767 | { | | 774 | { |
768 | struct ixgbe_softc *sc = txr->sc; | | 775 | struct ixgbe_softc *sc = txr->sc; |
769 | struct ixgbe_tx_buf *tx_buffer; | | 776 | struct ixgbe_tx_buf *tx_buffer; |
770 | int i; | | 777 | int i; |
771 | | | 778 | |
772 | INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin"); | | 779 | INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin"); |
773 | | | 780 | |
774 | if (txr->tx_buffers == NULL) | | 781 | if (txr->tx_buffers == NULL) |
775 | return; | | 782 | return; |
776 | | | 783 | |
777 | tx_buffer = txr->tx_buffers; | | 784 | tx_buffer = txr->tx_buffers; |
778 | for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) { | | 785 | for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) { |
779 | if (tx_buffer->m_head != NULL) { | | 786 | if (tx_buffer->m_head != NULL) { |
780 | bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, | | 787 | bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, |
781 | 0, tx_buffer->m_head->m_pkthdr.len, | | 788 | 0, tx_buffer->m_head->m_pkthdr.len, |
| @@ -792,27 +799,28 @@ ixgbe_free_transmit_buffers(struct tx_ri | | | @@ -792,27 +799,28 @@ ixgbe_free_transmit_buffers(struct tx_ri |
792 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); | | 799 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); |
793 | ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); | | 800 | ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); |
794 | tx_buffer->map = NULL; | | 801 | tx_buffer->map = NULL; |
795 | } | | 802 | } |
796 | } | | 803 | } |
797 | if (txr->txr_interq != NULL) { | | 804 | if (txr->txr_interq != NULL) { |
798 | struct mbuf *m; | | 805 | struct mbuf *m; |
799 | | | 806 | |
800 | while ((m = pcq_get(txr->txr_interq)) != NULL) | | 807 | while ((m = pcq_get(txr->txr_interq)) != NULL) |
801 | m_freem(m); | | 808 | m_freem(m); |
802 | pcq_destroy(txr->txr_interq); | | 809 | pcq_destroy(txr->txr_interq); |
803 | } | | 810 | } |
804 | if (txr->tx_buffers != NULL) { | | 811 | if (txr->tx_buffers != NULL) { |
805 | free(txr->tx_buffers, M_DEVBUF); | | 812 | kmem_free(txr->tx_buffers, |
| | | 813 | sizeof(struct ixgbe_tx_buf) * sc->num_tx_desc); |
806 | txr->tx_buffers = NULL; | | 814 | txr->tx_buffers = NULL; |
807 | } | | 815 | } |
808 | if (txr->txtag != NULL) { | | 816 | if (txr->txtag != NULL) { |
809 | ixgbe_dma_tag_destroy(txr->txtag); | | 817 | ixgbe_dma_tag_destroy(txr->txtag); |
810 | txr->txtag = NULL; | | 818 | txr->txtag = NULL; |
811 | } | | 819 | } |
812 | } /* ixgbe_free_transmit_buffers */ | | 820 | } /* ixgbe_free_transmit_buffers */ |
813 | | | 821 | |
814 | /************************************************************************ | | 822 | /************************************************************************ |
815 | * ixgbe_tx_ctx_setup | | 823 | * ixgbe_tx_ctx_setup |
816 | * | | 824 | * |
817 | * Advanced Context Descriptor setup for VLAN, CSUM or TSO | | 825 | * Advanced Context Descriptor setup for VLAN, CSUM or TSO |
818 | ************************************************************************/ | | 826 | ************************************************************************/ |
| @@ -844,29 +852,26 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, | | | @@ -844,29 +852,26 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, |
844 | int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status); | | 852 | int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status); |
845 | | | 853 | |
846 | if (rv != 0) | | 854 | if (rv != 0) |
847 | IXGBE_EVC_ADD(&sc->tso_err, 1); | | 855 | IXGBE_EVC_ADD(&sc->tso_err, 1); |
848 | return rv; | | 856 | return rv; |
849 | } | | 857 | } |
850 | | | 858 | |
851 | if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0) | | 859 | if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0) |
852 | offload = FALSE; | | 860 | offload = FALSE; |
853 | | | 861 | |
854 | /* Indicate the whole packet as payload when not doing TSO */ | | 862 | /* Indicate the whole packet as payload when not doing TSO */ |
855 | *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; | | 863 | *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; |
856 | | | 864 | |
857 | /* Now ready a context descriptor */ | | | |
858 | TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; | | | |
859 | | | | |
860 | /* | | 865 | /* |
861 | * In advanced descriptors the vlan tag must | | 866 | * In advanced descriptors the vlan tag must |
862 | * be placed into the context descriptor. Hence | | 867 | * be placed into the context descriptor. Hence |
863 | * we need to make one even if not doing offloads. | | 868 | * we need to make one even if not doing offloads. |
864 | */ | | 869 | */ |
865 | if (vlan_has_tag(mp)) { | | 870 | if (vlan_has_tag(mp)) { |
866 | vtag = htole16(vlan_get_tag(mp)); | | 871 | vtag = htole16(vlan_get_tag(mp)); |
867 | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); | | 872 | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); |
868 | } else if (!(txr->sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) && | | 873 | } else if (!(txr->sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) && |
869 | (offload == FALSE)) | | 874 | (offload == FALSE)) |
870 | return (0); | | 875 | return (0); |
871 | | | 876 | |
872 | /* | | 877 | /* |
| @@ -949,26 +954,29 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, | | | @@ -949,26 +954,29 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, |
949 | offload = false; | | 954 | offload = false; |
950 | break; | | 955 | break; |
951 | default: | | 956 | default: |
952 | offload = false; | | 957 | offload = false; |
953 | break; | | 958 | break; |
954 | } | | 959 | } |
955 | | | 960 | |
956 | if (offload) /* Insert L4 checksum into data descriptors */ | | 961 | if (offload) /* Insert L4 checksum into data descriptors */ |
957 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; | | 962 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; |
958 | | | 963 | |
959 | no_offloads: | | 964 | no_offloads: |
960 | type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; | | 965 | type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; |
961 | | | 966 | |
| | | 967 | /* Now ready a context descriptor */ |
| | | 968 | TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; |
| | | 969 | |
962 | /* Now copy bits into descriptor */ | | 970 | /* Now copy bits into descriptor */ |
963 | TXD->vlan_macip_lens = htole32(vlan_macip_lens); | | 971 | TXD->vlan_macip_lens = htole32(vlan_macip_lens); |
964 | TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); | | 972 | TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); |
965 | TXD->seqnum_seed = htole32(0); | | 973 | TXD->seqnum_seed = htole32(0); |
966 | TXD->mss_l4len_idx = htole32(0); | | 974 | TXD->mss_l4len_idx = htole32(0); |
967 | | | 975 | |
968 | /* We've consumed the first desc, adjust counters */ | | 976 | /* We've consumed the first desc, adjust counters */ |
969 | if (++ctxd == txr->num_desc) | | 977 | if (++ctxd == txr->num_desc) |
970 | ctxd = 0; | | 978 | ctxd = 0; |
971 | txr->next_avail_desc = ctxd; | | 979 | txr->next_avail_desc = ctxd; |
972 | --txr->tx_avail; | | 980 | --txr->tx_avail; |
973 | | | 981 | |
974 | return (0); | | 982 | return (0); |
| @@ -1098,26 +1106,27 @@ ixgbe_tso_setup(struct tx_ring *txr, str | | | @@ -1098,26 +1106,27 @@ ixgbe_tso_setup(struct tx_ring *txr, str |
1098 | * Examine each tx_buffer in the used queue. If the hardware is done | | 1106 | * Examine each tx_buffer in the used queue. If the hardware is done |
1099 | * processing the packet then free associated resources. The | | 1107 | * processing the packet then free associated resources. The |
1100 | * tx_buffer is put back on the free queue. | | 1108 | * tx_buffer is put back on the free queue. |
1101 | ************************************************************************/ | | 1109 | ************************************************************************/ |
1102 | bool | | 1110 | bool |
1103 | ixgbe_txeof(struct tx_ring *txr) | | 1111 | ixgbe_txeof(struct tx_ring *txr) |
1104 | { | | 1112 | { |
1105 | struct ixgbe_softc *sc = txr->sc; | | 1113 | struct ixgbe_softc *sc = txr->sc; |
1106 | struct ifnet *ifp = sc->ifp; | | 1114 | struct ifnet *ifp = sc->ifp; |
1107 | struct ixgbe_tx_buf *buf; | | 1115 | struct ixgbe_tx_buf *buf; |
1108 | union ixgbe_adv_tx_desc *txd; | | 1116 | union ixgbe_adv_tx_desc *txd; |
1109 | u32 work, processed = 0; | | 1117 | u32 work, processed = 0; |
1110 | u32 limit = sc->tx_process_limit; | | 1118 | u32 limit = sc->tx_process_limit; |
| | | 1119 | u16 avail; |
1111 | | | 1120 | |
1112 | KASSERT(mutex_owned(&txr->tx_mtx)); | | 1121 | KASSERT(mutex_owned(&txr->tx_mtx)); |
1113 | | | 1122 | |
1114 | #ifdef DEV_NETMAP | | 1123 | #ifdef DEV_NETMAP |
1115 | if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && | | 1124 | if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && |
1116 | (sc->ifp->if_capenable & IFCAP_NETMAP)) { | | 1125 | (sc->ifp->if_capenable & IFCAP_NETMAP)) { |
1117 | struct netmap_sc *na = NA(sc->ifp); | | 1126 | struct netmap_sc *na = NA(sc->ifp); |
1118 | struct netmap_kring *kring = na->tx_rings[txr->me]; | | 1127 | struct netmap_kring *kring = na->tx_rings[txr->me]; |
1119 | txd = txr->tx_base; | | 1128 | txd = txr->tx_base; |
1120 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 1129 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
1121 | BUS_DMASYNC_POSTREAD); | | 1130 | BUS_DMASYNC_POSTREAD); |
1122 | /* | | 1131 | /* |
1123 | * In netmap mode, all the work is done in the context | | 1132 | * In netmap mode, all the work is done in the context |
| @@ -1141,123 +1150,128 @@ ixgbe_txeof(struct tx_ring *txr) | | | @@ -1141,123 +1150,128 @@ ixgbe_txeof(struct tx_ring *txr) |
1141 | } | | 1150 | } |
1142 | #endif /* DEV_NETMAP */ | | 1151 | #endif /* DEV_NETMAP */ |
1143 | | | 1152 | |
1144 | if (txr->tx_avail == txr->num_desc) { | | 1153 | if (txr->tx_avail == txr->num_desc) { |
1145 | txr->busy = 0; | | 1154 | txr->busy = 0; |
1146 | return false; | | 1155 | return false; |
1147 | } | | 1156 | } |
1148 | | | 1157 | |
1149 | /* Get work starting point */ | | 1158 | /* Get work starting point */ |
1150 | work = txr->next_to_clean; | | 1159 | work = txr->next_to_clean; |
1151 | buf = &txr->tx_buffers[work]; | | 1160 | buf = &txr->tx_buffers[work]; |
1152 | txd = &txr->tx_base[work]; | | 1161 | txd = &txr->tx_base[work]; |
1153 | work -= txr->num_desc; /* The distance to ring end */ | | 1162 | work -= txr->num_desc; /* The distance to ring end */ |
| | | 1163 | avail = txr->tx_avail; |
1154 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 1164 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
1155 | BUS_DMASYNC_POSTREAD); | | 1165 | BUS_DMASYNC_POSTREAD); |
1156 | | | 1166 | |
1157 | do { | | 1167 | do { |
1158 | union ixgbe_adv_tx_desc *eop = buf->eop; | | 1168 | union ixgbe_adv_tx_desc *eop = buf->eop; |
1159 | if (eop == NULL) /* No work */ | | 1169 | if (eop == NULL) /* No work */ |
1160 | break; | | 1170 | break; |
1161 | | | 1171 | |
1162 | if ((le32toh(eop->wb.status) & IXGBE_TXD_STAT_DD) == 0) | | 1172 | if ((le32toh(eop->wb.status) & IXGBE_TXD_STAT_DD) == 0) |
1163 | break; /* I/O not complete */ | | 1173 | break; /* I/O not complete */ |
1164 | | | 1174 | |
1165 | if (buf->m_head) { | | 1175 | if (buf->m_head) { |
1166 | txr->bytes += buf->m_head->m_pkthdr.len; | | 1176 | txr->bytes += buf->m_head->m_pkthdr.len; |
1167 | bus_dmamap_sync(txr->txtag->dt_dmat, buf->map, | | 1177 | bus_dmamap_sync(txr->txtag->dt_dmat, buf->map, |
1168 | 0, buf->m_head->m_pkthdr.len, | | 1178 | 0, buf->m_head->m_pkthdr.len, |
1169 | BUS_DMASYNC_POSTWRITE); | | 1179 | BUS_DMASYNC_POSTWRITE); |
1170 | ixgbe_dmamap_unload(txr->txtag, buf->map); | | 1180 | ixgbe_dmamap_unload(txr->txtag, buf->map); |
1171 | m_freem(buf->m_head); | | 1181 | m_freem(buf->m_head); |
1172 | buf->m_head = NULL; | | 1182 | buf->m_head = NULL; |
1173 | } | | 1183 | } |
1174 | buf->eop = NULL; | | 1184 | buf->eop = NULL; |
1175 | txr->txr_no_space = false; | | 1185 | ++avail; |
1176 | ++txr->tx_avail; | | | |
1177 | | | 1186 | |
1178 | /* We clean the range if multi segment */ | | 1187 | /* We clean the range if multi segment */ |
1179 | while (txd != eop) { | | 1188 | while (txd != eop) { |
1180 | ++txd; | | 1189 | ++txd; |
1181 | ++buf; | | 1190 | ++buf; |
1182 | ++work; | | 1191 | ++work; |
1183 | /* wrap the ring? */ | | 1192 | /* wrap the ring? */ |
1184 | if (__predict_false(!work)) { | | 1193 | if (__predict_false(!work)) { |
1185 | work -= txr->num_desc; | | 1194 | work -= txr->num_desc; |
1186 | buf = txr->tx_buffers; | | 1195 | buf = txr->tx_buffers; |
1187 | txd = txr->tx_base; | | 1196 | txd = txr->tx_base; |
1188 | } | | 1197 | } |
1189 | if (buf->m_head) { | | 1198 | if (buf->m_head) { |
1190 | txr->bytes += | | 1199 | txr->bytes += |
1191 | buf->m_head->m_pkthdr.len; | | 1200 | buf->m_head->m_pkthdr.len; |
1192 | bus_dmamap_sync(txr->txtag->dt_dmat, | | 1201 | bus_dmamap_sync(txr->txtag->dt_dmat, |
1193 | buf->map, | | 1202 | buf->map, |
1194 | 0, buf->m_head->m_pkthdr.len, | | 1203 | 0, buf->m_head->m_pkthdr.len, |
1195 | BUS_DMASYNC_POSTWRITE); | | 1204 | BUS_DMASYNC_POSTWRITE); |
1196 | ixgbe_dmamap_unload(txr->txtag, | | 1205 | ixgbe_dmamap_unload(txr->txtag, |
1197 | buf->map); | | 1206 | buf->map); |
1198 | m_freem(buf->m_head); | | 1207 | m_freem(buf->m_head); |
1199 | buf->m_head = NULL; | | 1208 | buf->m_head = NULL; |
1200 | } | | 1209 | } |
1201 | ++txr->tx_avail; | | 1210 | ++avail; |
1202 | buf->eop = NULL; | | 1211 | buf->eop = NULL; |
1203 | | | 1212 | |
1204 | } | | 1213 | } |
1205 | ++txr->packets; | | | |
1206 | ++processed; | | 1214 | ++processed; |
1207 | if_statinc(ifp, if_opackets); | | | |
1208 | | | 1215 | |
1209 | /* Try the next packet */ | | 1216 | /* Try the next packet */ |
1210 | ++txd; | | 1217 | ++txd; |
1211 | ++buf; | | 1218 | ++buf; |
1212 | ++work; | | 1219 | ++work; |
1213 | /* reset with a wrap */ | | 1220 | /* reset with a wrap */ |
1214 | if (__predict_false(!work)) { | | 1221 | if (__predict_false(!work)) { |
1215 | work -= txr->num_desc; | | 1222 | work -= txr->num_desc; |
1216 | buf = txr->tx_buffers; | | 1223 | buf = txr->tx_buffers; |
1217 | txd = txr->tx_base; | | 1224 | txd = txr->tx_base; |
1218 | } | | 1225 | } |
1219 | prefetch(txd); | | 1226 | prefetch(txd); |
1220 | } while (__predict_true(--limit)); | | 1227 | } while (__predict_true(--limit)); |
1221 | | | 1228 | |
1222 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 1229 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
1223 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1230 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1224 | | | 1231 | |
1225 | work += txr->num_desc; | | 1232 | work += txr->num_desc; |
1226 | txr->next_to_clean = work; | | 1233 | txr->next_to_clean = work; |
| | | 1234 | if (processed) { |
| | | 1235 | txr->tx_avail = avail; |
| | | 1236 | txr->txr_no_space = false; |
| | | 1237 | txr->packets += processed; |
| | | 1238 | if_statadd(ifp, if_opackets, processed); |
| | | 1239 | } |
1227 | | | 1240 | |
1228 | /* | | 1241 | /* |
1229 | * Queue Hang detection, we know there's | | 1242 | * Queue Hang detection, we know there's |
1230 | * work outstanding or the first return | | 1243 | * work outstanding or the first return |
1231 | * would have been taken, so increment busy | | 1244 | * would have been taken, so increment busy |
1232 | * if nothing managed to get cleaned, then | | 1245 | * if nothing managed to get cleaned, then |
1233 | * in local_timer it will be checked and | | 1246 | * in local_timer it will be checked and |
1234 | * marked as HUNG if it exceeds a MAX attempt. | | 1247 | * marked as HUNG if it exceeds a MAX attempt. |
1235 | */ | | 1248 | */ |
1236 | if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG)) | | 1249 | if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG)) |
1237 | ++txr->busy; | | 1250 | ++txr->busy; |
1238 | /* | | 1251 | /* |
1239 | * If anything gets cleaned we reset state to 1, | | 1252 | * If anything gets cleaned we reset state to 1, |
1240 | * note this will turn off HUNG if its set. | | 1253 | * note this will turn off HUNG if its set. |
1241 | */ | | 1254 | */ |
1242 | if (processed) | | 1255 | if (processed) |
1243 | txr->busy = 1; | | 1256 | txr->busy = 1; |
1244 | | | 1257 | |
1245 | if (txr->tx_avail == txr->num_desc) | | 1258 | if (txr->tx_avail == txr->num_desc) |
1246 | txr->busy = 0; | | 1259 | txr->busy = 0; |
1247 | | | 1260 | |
1248 | return ((limit > 0) ? false : true); | | 1261 | return ((limit > 0) ? false : true); |
1249 | } /* ixgbe_txeof */ | | 1262 | } /* ixgbe_txeof */ |
1250 | | | 1263 | |
| | | 1264 | #ifdef RSC |
1251 | /************************************************************************ | | 1265 | /************************************************************************ |
1252 | * ixgbe_rsc_count | | 1266 | * ixgbe_rsc_count |
1253 | * | | 1267 | * |
1254 | * Used to detect a descriptor that has been merged by Hardware RSC. | | 1268 | * Used to detect a descriptor that has been merged by Hardware RSC. |
1255 | ************************************************************************/ | | 1269 | ************************************************************************/ |
1256 | static inline u32 | | 1270 | static inline u32 |
1257 | ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) | | 1271 | ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) |
1258 | { | | 1272 | { |
1259 | return (le32toh(rx->wb.lower.lo_dword.data) & | | 1273 | return (le32toh(rx->wb.lower.lo_dword.data) & |
1260 | IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; | | 1274 | IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; |
1261 | } /* ixgbe_rsc_count */ | | 1275 | } /* ixgbe_rsc_count */ |
1262 | | | 1276 | |
1263 | /************************************************************************ | | 1277 | /************************************************************************ |
| @@ -1314,26 +1328,27 @@ ixgbe_setup_hw_rsc(struct rx_ring *rxr) | | | @@ -1314,26 +1328,27 @@ ixgbe_setup_hw_rsc(struct rx_ring *rxr) |
1314 | | | 1328 | |
1315 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); | | 1329 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); |
1316 | | | 1330 | |
1317 | /* Enable TCP header recognition */ | | 1331 | /* Enable TCP header recognition */ |
1318 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), | | 1332 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), |
1319 | (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR)); | | 1333 | (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR)); |
1320 | | | 1334 | |
1321 | /* Disable RSC for ACK packets */ | | 1335 | /* Disable RSC for ACK packets */ |
1322 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, | | 1336 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, |
1323 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); | | 1337 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); |
1324 | | | 1338 | |
1325 | rxr->hw_rsc = TRUE; | | 1339 | rxr->hw_rsc = TRUE; |
1326 | } /* ixgbe_setup_hw_rsc */ | | 1340 | } /* ixgbe_setup_hw_rsc */ |
| | | 1341 | #endif |
1327 | | | 1342 | |
1328 | /************************************************************************ | | 1343 | /************************************************************************ |
1329 | * ixgbe_refresh_mbufs | | 1344 | * ixgbe_refresh_mbufs |
1330 | * | | 1345 | * |
1331 | * Refresh mbuf buffers for RX descriptor rings | | 1346 | * Refresh mbuf buffers for RX descriptor rings |
1332 | * - now keeps its own state so discards due to resource | | 1347 | * - now keeps its own state so discards due to resource |
1333 | * exhaustion are unnecessary, if an mbuf cannot be obtained | | 1348 | * exhaustion are unnecessary, if an mbuf cannot be obtained |
1334 | * it just returns, keeping its placeholder, thus it can simply | | 1349 | * it just returns, keeping its placeholder, thus it can simply |
1335 | * be recalled to try again. | | 1350 | * be recalled to try again. |
1336 | ************************************************************************/ | | 1351 | ************************************************************************/ |
1337 | static void | | 1352 | static void |
1338 | ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit) | | 1353 | ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit) |
1339 | { | | 1354 | { |
| @@ -1408,27 +1423,27 @@ update: | | | @@ -1408,27 +1423,27 @@ update: |
1408 | * rx_buffer per received packet, the maximum number of rx_buffer's | | 1423 | * rx_buffer per received packet, the maximum number of rx_buffer's |
1409 | * that we'll need is equal to the number of receive descriptors | | 1424 | * that we'll need is equal to the number of receive descriptors |
1410 | * that we've allocated. | | 1425 | * that we've allocated. |
1411 | ************************************************************************/ | | 1426 | ************************************************************************/ |
1412 | static int | | 1427 | static int |
1413 | ixgbe_allocate_receive_buffers(struct rx_ring *rxr) | | 1428 | ixgbe_allocate_receive_buffers(struct rx_ring *rxr) |
1414 | { | | 1429 | { |
1415 | struct ixgbe_softc *sc = rxr->sc; | | 1430 | struct ixgbe_softc *sc = rxr->sc; |
1416 | device_t dev = sc->dev; | | 1431 | device_t dev = sc->dev; |
1417 | struct ixgbe_rx_buf *rxbuf; | | 1432 | struct ixgbe_rx_buf *rxbuf; |
1418 | int bsize, error; | | 1433 | int bsize, error; |
1419 | | | 1434 | |
1420 | bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; | | 1435 | bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; |
1421 | rxr->rx_buffers = malloc(bsize, M_DEVBUF, M_WAITOK | M_ZERO); | | 1436 | rxr->rx_buffers = kmem_zalloc(bsize, KM_SLEEP); |
1422 | | | 1437 | |
1423 | error = ixgbe_dma_tag_create( | | 1438 | error = ixgbe_dma_tag_create( |
1424 | /* parent */ sc->osdep.dmat, | | 1439 | /* parent */ sc->osdep.dmat, |
1425 | /* alignment */ 1, | | 1440 | /* alignment */ 1, |
1426 | /* bounds */ 0, | | 1441 | /* bounds */ 0, |
1427 | /* maxsize */ MJUM16BYTES, | | 1442 | /* maxsize */ MJUM16BYTES, |
1428 | /* nsegments */ 1, | | 1443 | /* nsegments */ 1, |
1429 | /* maxsegsize */ MJUM16BYTES, | | 1444 | /* maxsegsize */ MJUM16BYTES, |
1430 | /* flags */ 0, | | 1445 | /* flags */ 0, |
1431 | &rxr->ptag); | | 1446 | &rxr->ptag); |
1432 | if (error != 0) { | | 1447 | if (error != 0) { |
1433 | aprint_error_dev(dev, "Unable to create RX DMA tag\n"); | | 1448 | aprint_error_dev(dev, "Unable to create RX DMA tag\n"); |
1434 | goto fail; | | 1449 | goto fail; |
| @@ -1486,58 +1501,58 @@ ixgbe_setup_receive_ring(struct rx_ring | | | @@ -1486,58 +1501,58 @@ ixgbe_setup_receive_ring(struct rx_ring |
1486 | sc = rxr->sc; | | 1501 | sc = rxr->sc; |
1487 | #ifdef LRO | | 1502 | #ifdef LRO |
1488 | ifp = sc->ifp; | | 1503 | ifp = sc->ifp; |
1489 | #endif /* LRO */ | | 1504 | #endif /* LRO */ |
1490 | | | 1505 | |
1491 | /* Clear the ring contents */ | | 1506 | /* Clear the ring contents */ |
1492 | IXGBE_RX_LOCK(rxr); | | 1507 | IXGBE_RX_LOCK(rxr); |
1493 | | | 1508 | |
1494 | #ifdef DEV_NETMAP | | 1509 | #ifdef DEV_NETMAP |
1495 | if (sc->feat_en & IXGBE_FEATURE_NETMAP) | | 1510 | if (sc->feat_en & IXGBE_FEATURE_NETMAP) |
1496 | slot = netmap_reset(na, NR_RX, rxr->me, 0); | | 1511 | slot = netmap_reset(na, NR_RX, rxr->me, 0); |
1497 | #endif /* DEV_NETMAP */ | | 1512 | #endif /* DEV_NETMAP */ |
1498 | | | 1513 | |
1499 | rsize = roundup2(sc->num_rx_desc * | | 1514 | rsize = sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc); |
1500 | sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); | | 1515 | KASSERT((rsize % DBA_ALIGN) == 0); |
1501 | bzero((void *)rxr->rx_base, rsize); | | 1516 | bzero((void *)rxr->rx_base, rsize); |
1502 | /* Cache the size */ | | 1517 | /* Cache the size */ |
1503 | rxr->mbuf_sz = sc->rx_mbuf_sz; | | 1518 | rxr->mbuf_sz = sc->rx_mbuf_sz; |
1504 | | | 1519 | |
1505 | /* Free current RX buffer structs and their mbufs */ | | 1520 | /* Free current RX buffer structs and their mbufs */ |
1506 | ixgbe_free_receive_ring(rxr); | | 1521 | ixgbe_free_receive_ring(rxr); |
1507 | | | 1522 | |
1508 | /* Now replenish the mbufs */ | | 1523 | /* Now replenish the mbufs */ |
1509 | for (int j = 0; j != rxr->num_desc; ++j) { | | 1524 | for (int i = 0; i < rxr->num_desc; i++) { |
1510 | struct mbuf *mp; | | 1525 | struct mbuf *mp; |
1511 | | | 1526 | |
1512 | rxbuf = &rxr->rx_buffers[j]; | | 1527 | rxbuf = &rxr->rx_buffers[i]; |
1513 | | | 1528 | |
1514 | #ifdef DEV_NETMAP | | 1529 | #ifdef DEV_NETMAP |
1515 | /* | | 1530 | /* |
1516 | * In netmap mode, fill the map and set the buffer | | 1531 | * In netmap mode, fill the map and set the buffer |
1517 | * address in the NIC ring, considering the offset | | 1532 | * address in the NIC ring, considering the offset |
1518 | * between the netmap and NIC rings (see comment in | | 1533 | * between the netmap and NIC rings (see comment in |
1519 | * ixgbe_setup_transmit_ring() ). No need to allocate | | 1534 | * ixgbe_setup_transmit_ring() ). No need to allocate |
1520 | * an mbuf, so end the block with a continue; | | 1535 | * an mbuf, so end the block with a continue; |
1521 | */ | | 1536 | */ |
1522 | if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) { | | 1537 | if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) { |
1523 | int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j); | | 1538 | int sj = netmap_idx_n2k(na->rx_rings[rxr->me], i); |
1524 | uint64_t paddr; | | 1539 | uint64_t paddr; |
1525 | void *addr; | | 1540 | void *addr; |
1526 | | | 1541 | |
1527 | addr = PNMB(na, slot + sj, &paddr); | | 1542 | addr = PNMB(na, slot + sj, &paddr); |
1528 | netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); | | 1543 | netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); |
1529 | /* Update descriptor and the cached value */ | | 1544 | /* Update descriptor and the cached value */ |
1530 | rxr->rx_base[j].read.pkt_addr = htole64(paddr); | | 1545 | rxr->rx_base[i].read.pkt_addr = htole64(paddr); |
1531 | rxbuf->addr = htole64(paddr); | | 1546 | rxbuf->addr = htole64(paddr); |
1532 | continue; | | 1547 | continue; |
1533 | } | | 1548 | } |
1534 | #endif /* DEV_NETMAP */ | | 1549 | #endif /* DEV_NETMAP */ |
1535 | | | 1550 | |
1536 | rxbuf->flags = 0; | | 1551 | rxbuf->flags = 0; |
1537 | rxbuf->buf = ixgbe_getcl(); | | 1552 | rxbuf->buf = ixgbe_getcl(); |
1538 | if (rxbuf->buf == NULL) { | | 1553 | if (rxbuf->buf == NULL) { |
1539 | IXGBE_EVC_ADD(&rxr->no_mbuf, 1); | | 1554 | IXGBE_EVC_ADD(&rxr->no_mbuf, 1); |
1540 | error = ENOBUFS; | | 1555 | error = ENOBUFS; |
1541 | goto fail; | | 1556 | goto fail; |
1542 | } | | 1557 | } |
1543 | mp = rxbuf->buf; | | 1558 | mp = rxbuf->buf; |
| @@ -1549,55 +1564,62 @@ ixgbe_setup_receive_ring(struct rx_ring | | | @@ -1549,55 +1564,62 @@ ixgbe_setup_receive_ring(struct rx_ring |
1549 | if (error != 0) { | | 1564 | if (error != 0) { |
1550 | /* | | 1565 | /* |
1551 | * Clear this entry for later cleanup in | | 1566 | * Clear this entry for later cleanup in |
1552 | * ixgbe_discard() which is called via | | 1567 | * ixgbe_discard() which is called via |
1553 | * ixgbe_free_receive_ring(). | | 1568 | * ixgbe_free_receive_ring(). |
1554 | */ | | 1569 | */ |
1555 | m_freem(mp); | | 1570 | m_freem(mp); |
1556 | rxbuf->buf = NULL; | | 1571 | rxbuf->buf = NULL; |
1557 | goto fail; | | 1572 | goto fail; |
1558 | } | | 1573 | } |
1559 | bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, | | 1574 | bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, |
1560 | 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD); | | 1575 | 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD); |
1561 | /* Update the descriptor and the cached value */ | | 1576 | /* Update the descriptor and the cached value */ |
1562 | rxr->rx_base[j].read.pkt_addr = | | 1577 | rxr->rx_base[i].read.pkt_addr = |
1563 | htole64(rxbuf->pmap->dm_segs[0].ds_addr); | | 1578 | htole64(rxbuf->pmap->dm_segs[0].ds_addr); |
1564 | rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr); | | 1579 | rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr); |
1565 | } | | 1580 | } |
1566 | | | 1581 | |
1567 | /* Setup our descriptor indices */ | | 1582 | /* Setup our descriptor indices */ |
1568 | rxr->next_to_check = 0; | | 1583 | rxr->next_to_check = 0; |
1569 | rxr->next_to_refresh = sc->num_rx_desc - 1; /* Fully allocated */ | | 1584 | rxr->next_to_refresh = sc->num_rx_desc - 1; /* Fully allocated */ |
| | | 1585 | #ifdef LRO |
1570 | rxr->lro_enabled = FALSE; | | 1586 | rxr->lro_enabled = FALSE; |
| | | 1587 | #endif |
1571 | rxr->discard_multidesc = false; | | 1588 | rxr->discard_multidesc = false; |
1572 | IXGBE_EVC_STORE(&rxr->rx_copies, 0); | | 1589 | IXGBE_EVC_STORE(&rxr->rx_copies, 0); |
1573 | #if 0 /* NetBSD */ | | 1590 | #if 0 /* NetBSD */ |
1574 | IXGBE_EVC_STORE(&rxr->rx_bytes, 0); | | 1591 | IXGBE_EVC_STORE(&rxr->rx_bytes, 0); |
1575 | #if 1 /* Fix inconsistency */ | | 1592 | #if 1 /* Fix inconsistency */ |
1576 | IXGBE_EVC_STORE(&rxr->rx_packets, 0); | | 1593 | IXGBE_EVC_STORE(&rxr->rx_packets, 0); |
1577 | #endif | | 1594 | #endif |
1578 | #endif | | 1595 | #endif |
1579 | rxr->vtag_strip = FALSE; | | 1596 | rxr->vtag_strip = FALSE; |
1580 | | | 1597 | |
1581 | ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, | | 1598 | ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, |
1582 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1599 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1583 | | | 1600 | |
1584 | /* | | 1601 | /* |
1585 | * Now set up the LRO interface | | 1602 | * Now set up the LRO interface |
1586 | */ | | 1603 | */ |
| | | 1604 | #ifdef RSC |
1587 | if (ixgbe_rsc_enable) | | 1605 | if (ixgbe_rsc_enable) |
1588 | ixgbe_setup_hw_rsc(rxr); | | 1606 | ixgbe_setup_hw_rsc(rxr); |
| | | 1607 | #endif |
1589 | #ifdef LRO | | 1608 | #ifdef LRO |
1590 | else if (ifp->if_capenable & IFCAP_LRO) { | | 1609 | #ifdef RSC |
| | | 1610 | else |
| | | 1611 | #endif |
| | | 1612 | if (ifp->if_capenable & IFCAP_LRO) { |
1591 | device_t dev = sc->dev; | | 1613 | device_t dev = sc->dev; |
1592 | int err = tcp_lro_init(lro); | | 1614 | int err = tcp_lro_init(lro); |
1593 | if (err) { | | 1615 | if (err) { |
1594 | device_printf(dev, "LRO Initialization failed!\n"); | | 1616 | device_printf(dev, "LRO Initialization failed!\n"); |
1595 | goto fail; | | 1617 | goto fail; |
1596 | } | | 1618 | } |
1597 | INIT_DEBUGOUT("RX Soft LRO Initialized\n"); | | 1619 | INIT_DEBUGOUT("RX Soft LRO Initialized\n"); |
1598 | rxr->lro_enabled = TRUE; | | 1620 | rxr->lro_enabled = TRUE; |
1599 | lro->ifp = sc->ifp; | | 1621 | lro->ifp = sc->ifp; |
1600 | } | | 1622 | } |
1601 | #endif /* LRO */ | | 1623 | #endif /* LRO */ |
1602 | | | 1624 | |
1603 | IXGBE_RX_UNLOCK(rxr); | | 1625 | IXGBE_RX_UNLOCK(rxr); |
| @@ -1654,54 +1676,55 @@ ixgbe_free_receive_structures(struct ixg | | | @@ -1654,54 +1676,55 @@ ixgbe_free_receive_structures(struct ixg |
1654 | INIT_DEBUGOUT("ixgbe_free_receive_structures: begin"); | | 1676 | INIT_DEBUGOUT("ixgbe_free_receive_structures: begin"); |
1655 | | | 1677 | |
1656 | for (int i = 0; i < sc->num_queues; i++, rxr++) { | | 1678 | for (int i = 0; i < sc->num_queues; i++, rxr++) { |
1657 | ixgbe_free_receive_buffers(rxr); | | 1679 | ixgbe_free_receive_buffers(rxr); |
1658 | #ifdef LRO | | 1680 | #ifdef LRO |
1659 | /* Free LRO memory */ | | 1681 | /* Free LRO memory */ |
1660 | tcp_lro_free(&rxr->lro); | | 1682 | tcp_lro_free(&rxr->lro); |
1661 | #endif /* LRO */ | | 1683 | #endif /* LRO */ |
1662 | /* Free the ring memory as well */ | | 1684 | /* Free the ring memory as well */ |
1663 | ixgbe_dma_free(sc, &rxr->rxdma); | | 1685 | ixgbe_dma_free(sc, &rxr->rxdma); |
1664 | IXGBE_RX_LOCK_DESTROY(rxr); | | 1686 | IXGBE_RX_LOCK_DESTROY(rxr); |
1665 | } | | 1687 | } |
1666 | | | 1688 | |
1667 | free(sc->rx_rings, M_DEVBUF); | | 1689 | kmem_free(sc->rx_rings, sizeof(struct rx_ring) * sc->num_queues); |
1668 | } /* ixgbe_free_receive_structures */ | | 1690 | } /* ixgbe_free_receive_structures */ |
1669 | | | 1691 | |
1670 | | | 1692 | |
1671 | /************************************************************************ | | 1693 | /************************************************************************ |
1672 | * ixgbe_free_receive_buffers - Free receive ring data structures | | 1694 | * ixgbe_free_receive_buffers - Free receive ring data structures |
1673 | ************************************************************************/ | | 1695 | ************************************************************************/ |
1674 | static void | | 1696 | static void |
1675 | ixgbe_free_receive_buffers(struct rx_ring *rxr) | | 1697 | ixgbe_free_receive_buffers(struct rx_ring *rxr) |
1676 | { | | 1698 | { |
1677 | struct ixgbe_softc *sc = rxr->sc; | | 1699 | struct ixgbe_softc *sc = rxr->sc; |
1678 | struct ixgbe_rx_buf *rxbuf; | | 1700 | struct ixgbe_rx_buf *rxbuf; |
1679 | | | 1701 | |
1680 | INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin"); | | 1702 | INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin"); |
1681 | | | 1703 | |
1682 | /* Cleanup any existing buffers */ | | 1704 | /* Cleanup any existing buffers */ |
1683 | if (rxr->rx_buffers != NULL) { | | 1705 | if (rxr->rx_buffers != NULL) { |
1684 | for (int i = 0; i < sc->num_rx_desc; i++) { | | 1706 | for (int i = 0; i < sc->num_rx_desc; i++) { |
1685 | rxbuf = &rxr->rx_buffers[i]; | | 1707 | rxbuf = &rxr->rx_buffers[i]; |
1686 | ixgbe_rx_discard(rxr, i); | | 1708 | ixgbe_rx_discard(rxr, i); |
1687 | if (rxbuf->pmap != NULL) { | | 1709 | if (rxbuf->pmap != NULL) { |
1688 | ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap); | | 1710 | ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap); |
1689 | rxbuf->pmap = NULL; | | 1711 | rxbuf->pmap = NULL; |
1690 | } | | 1712 | } |
1691 | } | | 1713 | } |
1692 | | | 1714 | |
1693 | if (rxr->rx_buffers != NULL) { | | 1715 | if (rxr->rx_buffers != NULL) { |
1694 | free(rxr->rx_buffers, M_DEVBUF); | | 1716 | kmem_free(rxr->rx_buffers, |
| | | 1717 | sizeof(struct ixgbe_rx_buf) * rxr->num_desc); |
1695 | rxr->rx_buffers = NULL; | | 1718 | rxr->rx_buffers = NULL; |
1696 | } | | 1719 | } |
1697 | } | | 1720 | } |
1698 | | | 1721 | |
1699 | if (rxr->ptag != NULL) { | | 1722 | if (rxr->ptag != NULL) { |
1700 | ixgbe_dma_tag_destroy(rxr->ptag); | | 1723 | ixgbe_dma_tag_destroy(rxr->ptag); |
1701 | rxr->ptag = NULL; | | 1724 | rxr->ptag = NULL; |
1702 | } | | 1725 | } |
1703 | | | 1726 | |
1704 | return; | | 1727 | return; |
1705 | } /* ixgbe_free_receive_buffers */ | | 1728 | } /* ixgbe_free_receive_buffers */ |
1706 | | | 1729 | |
1707 | /************************************************************************ | | 1730 | /************************************************************************ |
| @@ -1844,27 +1867,30 @@ ixgbe_rxeof(struct ix_queue *que) | | | @@ -1844,27 +1867,30 @@ ixgbe_rxeof(struct ix_queue *que) |
1844 | sizeof(union ixgbe_adv_rx_desc) * numdesc, | | 1867 | sizeof(union ixgbe_adv_rx_desc) * numdesc, |
1845 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1868 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1846 | | | 1869 | |
1847 | /* | | 1870 | /* |
1848 | * The max number of loop is rx_process_limit. If discard_multidesc is | | 1871 | * The max number of loop is rx_process_limit. If discard_multidesc is |
1849 | * true, continue processing to not to send broken packet to the upper | | 1872 | * true, continue processing to not to send broken packet to the upper |
1850 | * layer. | | 1873 | * layer. |
1851 | */ | | 1874 | */ |
1852 | for (i = rxr->next_to_check; | | 1875 | for (i = rxr->next_to_check; |
1853 | (loopcount < limit) || (discard_multidesc == true);) { | | 1876 | (loopcount < limit) || (discard_multidesc == true);) { |
1854 | | | 1877 | |
1855 | struct mbuf *sendmp, *mp; | | 1878 | struct mbuf *sendmp, *mp; |
1856 | struct mbuf *newmp; | | 1879 | struct mbuf *newmp; |
1857 | u32 rsc, ptype; | | 1880 | #ifdef RSC |
| | | 1881 | u32 rsc; |
| | | 1882 | #endif |
| | | 1883 | u32 ptype; |
1858 | u16 len; | | 1884 | u16 len; |
1859 | u16 vtag = 0; | | 1885 | u16 vtag = 0; |
1860 | bool eop; | | 1886 | bool eop; |
1861 | bool discard = false; | | 1887 | bool discard = false; |
1862 | | | 1888 | |
1863 | if (wraparound) { | | 1889 | if (wraparound) { |
1864 | /* Sync the last half. */ | | 1890 | /* Sync the last half. */ |
1865 | KASSERT(syncremain != 0); | | 1891 | KASSERT(syncremain != 0); |
1866 | numdesc = syncremain; | | 1892 | numdesc = syncremain; |
1867 | wraparound = false; | | 1893 | wraparound = false; |
1868 | } else if (__predict_false(loopcount >= limit)) { | | 1894 | } else if (__predict_false(loopcount >= limit)) { |
1869 | KASSERT(discard_multidesc == true); | | 1895 | KASSERT(discard_multidesc == true); |
1870 | numdesc = 1; | | 1896 | numdesc = 1; |
| @@ -1879,27 +1905,29 @@ ixgbe_rxeof(struct ix_queue *que) | | | @@ -1879,27 +1905,29 @@ ixgbe_rxeof(struct ix_queue *que) |
1879 | | | 1905 | |
1880 | cur = &rxr->rx_base[i]; | | 1906 | cur = &rxr->rx_base[i]; |
1881 | staterr = le32toh(cur->wb.upper.status_error); | | 1907 | staterr = le32toh(cur->wb.upper.status_error); |
1882 | #ifdef RSS | | 1908 | #ifdef RSS |
1883 | pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info); | | 1909 | pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info); |
1884 | #endif | | 1910 | #endif |
1885 | | | 1911 | |
1886 | if ((staterr & IXGBE_RXD_STAT_DD) == 0) | | 1912 | if ((staterr & IXGBE_RXD_STAT_DD) == 0) |
1887 | break; | | 1913 | break; |
1888 | | | 1914 | |
1889 | loopcount++; | | 1915 | loopcount++; |
1890 | sendmp = newmp = NULL; | | 1916 | sendmp = newmp = NULL; |
1891 | nbuf = NULL; | | 1917 | nbuf = NULL; |
| | | 1918 | #ifdef RSC |
1892 | rsc = 0; | | 1919 | rsc = 0; |
| | | 1920 | #endif |
1893 | cur->wb.upper.status_error = 0; | | 1921 | cur->wb.upper.status_error = 0; |
1894 | rbuf = &rxr->rx_buffers[i]; | | 1922 | rbuf = &rxr->rx_buffers[i]; |
1895 | mp = rbuf->buf; | | 1923 | mp = rbuf->buf; |
1896 | | | 1924 | |
1897 | len = le16toh(cur->wb.upper.length); | | 1925 | len = le16toh(cur->wb.upper.length); |
1898 | ptype = le32toh(cur->wb.lower.lo_dword.data) & | | 1926 | ptype = le32toh(cur->wb.lower.lo_dword.data) & |
1899 | IXGBE_RXDADV_PKTTYPE_MASK; | | 1927 | IXGBE_RXDADV_PKTTYPE_MASK; |
1900 | eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); | | 1928 | eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); |
1901 | | | 1929 | |
1902 | /* Make sure bad packets are discarded */ | | 1930 | /* Make sure bad packets are discarded */ |
1903 | if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { | | 1931 | if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { |
1904 | #if __FreeBSD_version >= 1100036 | | 1932 | #if __FreeBSD_version >= 1100036 |
1905 | if (sc->feat_en & IXGBE_FEATURE_VF) | | 1933 | if (sc->feat_en & IXGBE_FEATURE_VF) |
| @@ -1965,34 +1993,37 @@ ixgbe_rxeof(struct ix_queue *que) | | | @@ -1965,34 +1993,37 @@ ixgbe_rxeof(struct ix_queue *que) |
1965 | * descriptors, rather the next descriptor | | 1993 | * descriptors, rather the next descriptor |
1966 | * is indicated in bits of the descriptor. | | 1994 | * is indicated in bits of the descriptor. |
1967 | * This also means that we might process | | 1995 | * This also means that we might process |
1968 | * more than one packet at a time, something | | 1996 | * more than one packet at a time, something |
1969 | * that has never been true before, it | | 1997 | * that has never been true before, it |
1970 | * required eliminating global chain pointers | | 1998 | * required eliminating global chain pointers |
1971 | * in favor of what we are doing here. -jfv | | 1999 | * in favor of what we are doing here. -jfv |
1972 | */ | | 2000 | */ |
1973 | if (!eop) { | | 2001 | if (!eop) { |
1974 | /* | | 2002 | /* |
1975 | * Figure out the next descriptor | | 2003 | * Figure out the next descriptor |
1976 | * of this frame. | | 2004 | * of this frame. |
1977 | */ | | 2005 | */ |
| | | 2006 | #ifdef RSC |
1978 | if (rxr->hw_rsc == TRUE) { | | 2007 | if (rxr->hw_rsc == TRUE) { |
1979 | rsc = ixgbe_rsc_count(cur); | | 2008 | rsc = ixgbe_rsc_count(cur); |
1980 | rxr->rsc_num += (rsc - 1); | | 2009 | rxr->rsc_num += (rsc - 1); |
1981 | } | | 2010 | } |
1982 | if (rsc) { /* Get hardware index */ | | 2011 | if (rsc) { /* Get hardware index */ |
1983 | nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >> | | 2012 | nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >> |
1984 | IXGBE_RXDADV_NEXTP_SHIFT); | | 2013 | IXGBE_RXDADV_NEXTP_SHIFT); |
1985 | } else { /* Just sequential */ | | 2014 | } else |
| | | 2015 | #endif |
| | | 2016 | { /* Just sequential */ |
1986 | nextp = i + 1; | | 2017 | nextp = i + 1; |
1987 | if (nextp == sc->num_rx_desc) | | 2018 | if (nextp == sc->num_rx_desc) |
1988 | nextp = 0; | | 2019 | nextp = 0; |
1989 | } | | 2020 | } |
1990 | nbuf = &rxr->rx_buffers[nextp]; | | 2021 | nbuf = &rxr->rx_buffers[nextp]; |
1991 | prefetch(nbuf); | | 2022 | prefetch(nbuf); |
1992 | } | | 2023 | } |
1993 | /* | | 2024 | /* |
1994 | * Rather than using the fmp/lmp global pointers | | 2025 | * Rather than using the fmp/lmp global pointers |
1995 | * we now keep the head of a packet chain in the | | 2026 | * we now keep the head of a packet chain in the |
1996 | * buffer struct and pass this along from one | | 2027 | * buffer struct and pass this along from one |
1997 | * descriptor to the next, until we get EOP. | | 2028 | * descriptor to the next, until we get EOP. |
1998 | */ | | 2029 | */ |
| @@ -2322,40 +2353,40 @@ ixgbe_dma_free(struct ixgbe_softc *sc, s | | | @@ -2322,40 +2353,40 @@ ixgbe_dma_free(struct ixgbe_softc *sc, s |
2322 | * the descriptors associated with each, called only once at attach. | | 2353 | * the descriptors associated with each, called only once at attach. |
2323 | ************************************************************************/ | | 2354 | ************************************************************************/ |
2324 | int | | 2355 | int |
2325 | ixgbe_allocate_queues(struct ixgbe_softc *sc) | | 2356 | ixgbe_allocate_queues(struct ixgbe_softc *sc) |
2326 | { | | 2357 | { |
2327 | device_t dev = sc->dev; | | 2358 | device_t dev = sc->dev; |
2328 | struct ix_queue *que; | | 2359 | struct ix_queue *que; |
2329 | struct tx_ring *txr; | | 2360 | struct tx_ring *txr; |
2330 | struct rx_ring *rxr; | | 2361 | struct rx_ring *rxr; |
2331 | int rsize, tsize, error = IXGBE_SUCCESS; | | 2362 | int rsize, tsize, error = IXGBE_SUCCESS; |
2332 | int txconf = 0, rxconf = 0; | | 2363 | int txconf = 0, rxconf = 0; |
2333 | | | 2364 | |
2334 | /* First, allocate the top level queue structs */ | | 2365 | /* First, allocate the top level queue structs */ |
2335 | sc->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) * | | 2366 | sc->queues = kmem_zalloc(sizeof(struct ix_queue) * sc->num_queues, |
2336 | sc->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); | | 2367 | KM_SLEEP); |
2337 | | | 2368 | |
2338 | /* Second, allocate the TX ring struct memory */ | | 2369 | /* Second, allocate the TX ring struct memory */ |
2339 | sc->tx_rings = malloc(sizeof(struct tx_ring) * | | 2370 | sc->tx_rings = kmem_zalloc(sizeof(struct tx_ring) * sc->num_queues, |
2340 | sc->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); | | 2371 | KM_SLEEP); |
2341 | | | 2372 | |
2342 | /* Third, allocate the RX ring */ | | 2373 | /* Third, allocate the RX ring */ |
2343 | sc->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) * | | 2374 | sc->rx_rings = kmem_zalloc(sizeof(struct rx_ring) * sc->num_queues, |
2344 | sc->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); | | 2375 | KM_SLEEP); |
2345 | | | 2376 | |
2346 | /* For the ring itself */ | | 2377 | /* For the ring itself */ |
2347 | tsize = roundup2(sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc), | | 2378 | tsize = sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc); |
2348 | DBA_ALIGN); | | 2379 | KASSERT((tsize % DBA_ALIGN) == 0); |
2349 | | | 2380 | |
2350 | /* | | 2381 | /* |
2351 | * Now set up the TX queues, txconf is needed to handle the | | 2382 | * Now set up the TX queues, txconf is needed to handle the |
2352 | * possibility that things fail midcourse and we need to | | 2383 | * possibility that things fail midcourse and we need to |
2353 | * undo memory gracefully | | 2384 | * undo memory gracefully |
2354 | */ | | 2385 | */ |
2355 | for (int i = 0; i < sc->num_queues; i++, txconf++) { | | 2386 | for (int i = 0; i < sc->num_queues; i++, txconf++) { |
2356 | /* Set up some basics */ | | 2387 | /* Set up some basics */ |
2357 | txr = &sc->tx_rings[i]; | | 2388 | txr = &sc->tx_rings[i]; |
2358 | txr->sc = sc; | | 2389 | txr->sc = sc; |
2359 | txr->txr_interq = NULL; | | 2390 | txr->txr_interq = NULL; |
2360 | /* In case SR-IOV is enabled, align the index properly */ | | 2391 | /* In case SR-IOV is enabled, align the index properly */ |
2361 | #ifdef PCI_IOV | | 2392 | #ifdef PCI_IOV |
| @@ -2391,28 +2422,28 @@ ixgbe_allocate_queues(struct ixgbe_softc | | | @@ -2391,28 +2422,28 @@ ixgbe_allocate_queues(struct ixgbe_softc |
2391 | txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP); | | 2422 | txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP); |
2392 | if (txr->txr_interq == NULL) { | | 2423 | if (txr->txr_interq == NULL) { |
2393 | aprint_error_dev(dev, | | 2424 | aprint_error_dev(dev, |
2394 | "Critical Failure setting up buf ring\n"); | | 2425 | "Critical Failure setting up buf ring\n"); |
2395 | error = ENOMEM; | | 2426 | error = ENOMEM; |
2396 | goto err_tx_desc; | | 2427 | goto err_tx_desc; |
2397 | } | | 2428 | } |
2398 | } | | 2429 | } |
2399 | } | | 2430 | } |
2400 | | | 2431 | |
2401 | /* | | 2432 | /* |
2402 | * Next the RX queues... | | 2433 | * Next the RX queues... |
2403 | */ | | 2434 | */ |
2404 | rsize = roundup2(sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc), | | 2435 | rsize = sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc); |
2405 | DBA_ALIGN); | | 2436 | KASSERT((rsize % DBA_ALIGN) == 0); |
2406 | for (int i = 0; i < sc->num_queues; i++, rxconf++) { | | 2437 | for (int i = 0; i < sc->num_queues; i++, rxconf++) { |
2407 | rxr = &sc->rx_rings[i]; | | 2438 | rxr = &sc->rx_rings[i]; |
2408 | /* Set up some basics */ | | 2439 | /* Set up some basics */ |
2409 | rxr->sc = sc; | | 2440 | rxr->sc = sc; |
2410 | #ifdef PCI_IOV | | 2441 | #ifdef PCI_IOV |
2411 | /* In case SR-IOV is enabled, align the index properly */ | | 2442 | /* In case SR-IOV is enabled, align the index properly */ |
2412 | rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, | | 2443 | rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, |
2413 | i); | | 2444 | i); |
2414 | #else | | 2445 | #else |
2415 | rxr->me = i; | | 2446 | rxr->me = i; |
2416 | #endif | | 2447 | #endif |
2417 | rxr->num_desc = sc->num_rx_desc; | | 2448 | rxr->num_desc = sc->num_rx_desc; |
2418 | | | 2449 | |
| @@ -2450,39 +2481,39 @@ ixgbe_allocate_queues(struct ixgbe_softc | | | @@ -2450,39 +2481,39 @@ ixgbe_allocate_queues(struct ixgbe_softc |
2450 | | | 2481 | |
2451 | mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET); | | 2482 | mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET); |
2452 | que->disabled_count = 0; | | 2483 | que->disabled_count = 0; |
2453 | } | | 2484 | } |
2454 | | | 2485 | |
2455 | return (0); | | 2486 | return (0); |
2456 | | | 2487 | |
2457 | err_rx_desc: | | 2488 | err_rx_desc: |
2458 | for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--) | | 2489 | for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--) |
2459 | ixgbe_dma_free(sc, &rxr->rxdma); | | 2490 | ixgbe_dma_free(sc, &rxr->rxdma); |
2460 | err_tx_desc: | | 2491 | err_tx_desc: |
2461 | for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) | | 2492 | for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) |
2462 | ixgbe_dma_free(sc, &txr->txdma); | | 2493 | ixgbe_dma_free(sc, &txr->txdma); |
2463 | free(sc->rx_rings, M_DEVBUF); | | 2494 | kmem_free(sc->rx_rings, sizeof(struct rx_ring) * sc->num_queues); |
2464 | free(sc->tx_rings, M_DEVBUF); | | 2495 | kmem_free(sc->tx_rings, sizeof(struct tx_ring) * sc->num_queues); |
2465 | free(sc->queues, M_DEVBUF); | | 2496 | kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues); |
2466 | return (error); | | 2497 | return (error); |
2467 | } /* ixgbe_allocate_queues */ | | 2498 | } /* ixgbe_allocate_queues */ |
2468 | | | 2499 | |
2469 | /************************************************************************ | | 2500 | /************************************************************************ |
2470 | * ixgbe_free_queues | | 2501 | * ixgbe_free_queues |
2471 | * | | 2502 | * |
2472 | * Free descriptors for the transmit and receive rings, and then | | 2503 | * Free descriptors for the transmit and receive rings, and then |
2473 | * the memory associated with each. | | 2504 | * the memory associated with each. |
2474 | ************************************************************************/ | | 2505 | ************************************************************************/ |
2475 | void | | 2506 | void |
2476 | ixgbe_free_queues(struct ixgbe_softc *sc) | | 2507 | ixgbe_free_queues(struct ixgbe_softc *sc) |
2477 | { | | 2508 | { |
2478 | struct ix_queue *que; | | 2509 | struct ix_queue *que; |
2479 | int i; | | 2510 | int i; |
2480 | | | 2511 | |
2481 | ixgbe_free_transmit_structures(sc); | | 2512 | ixgbe_free_transmit_structures(sc); |
2482 | ixgbe_free_receive_structures(sc); | | 2513 | ixgbe_free_receive_structures(sc); |
2483 | for (i = 0; i < sc->num_queues; i++) { | | 2514 | for (i = 0; i < sc->num_queues; i++) { |
2484 | que = &sc->queues[i]; | | 2515 | que = &sc->queues[i]; |
2485 | mutex_destroy(&que->dc_mtx); | | 2516 | mutex_destroy(&que->dc_mtx); |
2486 | } | | 2517 | } |
2487 | free(sc->queues, M_DEVBUF); | | 2518 | kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues); |
2488 | } /* ixgbe_free_queues */ | | 2519 | } /* ixgbe_free_queues */ |