Sat Feb 3 11:58:53 2024 UTC (107d)
Pull up following revision(s) (requested by msaitoh in ticket #563):

	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.110
	sys/dev/pci/ixgbe/ixgbe.c: revision 1.345
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.111
	sys/dev/pci/ixgbe/ixgbe.c: revision 1.346
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.112
	sys/dev/pci/ixgbe/ixgbe_type.h: revision 1.62
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.113
	sys/dev/pci/ixgbe/ixgbe.c: revision 1.348
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.114
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.115
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.116
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.105
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.106
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.107
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.108
	sys/dev/pci/ixgbe/ix_txrx.c: revision 1.109
	sys/dev/pci/ixgbe/ixv.c: revision 1.193
	sys/dev/pci/ixgbe/ixv.c: revision 1.195
	sys/dev/pci/ixgbe/ixv.c: revision 1.196
	sys/dev/pci/ixgbe/ixgbe.h: revision 1.94
	sys/dev/pci/ixgbe/ixgbe.h: revision 1.95
	sys/dev/pci/ixgbe/ixgbe.h: revision 1.96
	sys/dev/pci/ixgbe/ixgbe.h: revision 1.97
	sys/dev/pci/ixgbe/ixgbe.h: revision 1.98

ixgbe: Fix comment. No functional change.
ixgbe: Whitespace. No functional change.
ixgbe(4): Move assignment of TXD. NFCI.
ixgbe(4): Modify comment. No functional change.
 ixgbe_tx_ctx_setup() may or may not consume one TX descriptor.
ixv(4): Remove unused IFF_OACTIVE. No functional change.
ixgbe: Clear the WTHRESH bit field before writing it.
ixgbe: Modify for the readability. No functional change.
ixgbe: micro-optimize ixgbe_txeof()
 Update txr->tx_avail and txr->txr_no_space outside the loop in ixgbe_txeof().
ixgbe: Update if_opackets outside the loop in ixgbe_txeof().
ixgbe: micro-optimize ixgbe_txeof()
 Update txr->packets outside the loop in ixgbe_txeof().
ixgbe: Use #ifdef IXGBE_FDIR more
 Don't include the Flow Director related members to reduce the size of
struct tx_ring. On amd64 and aarch64, the real size is not changed
because of the alignment.
ixgbe: Simplify. No functional change.
 The descriptor ring size and the alignment are tested in the attach
function, so it's not required to use roundup2(size, DBA_ALIGN).
ixgbe: Use kmem_zalloc() instead of malloc(,M_ZERO).
ixgbe: Remove unused to reduce the size of struct rx_ring.
ixgbe: Use #ifdef LRO more to reduce the size of struct rx_ring.
ixgbe: Change "me" from 32bit to 8bit because the max is 128.
 This commit doesn't change the real size of ix_queue, tx_ring and rx_ring
because of the alignment.
ixgbe: Use #ifdef RSC
 This feature (hardware receive side coalescing) has been disabled all along,
so enclose the code with #ifdef RSC.


(martin)
diff -r1.100.4.4 -r1.100.4.5 src/sys/dev/pci/ixgbe/ix_txrx.c
diff -r1.324.2.6 -r1.324.2.7 src/sys/dev/pci/ixgbe/ixgbe.c
diff -r1.86.4.4 -r1.86.4.5 src/sys/dev/pci/ixgbe/ixgbe.h
diff -r1.55.4.5 -r1.55.4.6 src/sys/dev/pci/ixgbe/ixgbe_type.h
diff -r1.183.4.5 -r1.183.4.6 src/sys/dev/pci/ixgbe/ixv.c

cvs diff -r1.100.4.4 -r1.100.4.5 src/sys/dev/pci/ixgbe/ix_txrx.c (expand / switch to unified diff)

--- src/sys/dev/pci/ixgbe/ix_txrx.c 2023/10/18 11:53:22 1.100.4.4
+++ src/sys/dev/pci/ixgbe/ix_txrx.c 2024/02/03 11:58:53 1.100.4.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: ix_txrx.c,v 1.100.4.4 2023/10/18 11:53:22 martin Exp $ */ 1/* $NetBSD: ix_txrx.c,v 1.100.4.5 2024/02/03 11:58:53 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
@@ -54,57 +54,61 @@ @@ -54,57 +54,61 @@
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#include <sys/cdefs.h> 66#include <sys/cdefs.h>
67__KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.100.4.4 2023/10/18 11:53:22 martin Exp $"); 67__KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.100.4.5 2024/02/03 11:58:53 martin Exp $");
68 68
69#include "opt_inet.h" 69#include "opt_inet.h"
70#include "opt_inet6.h" 70#include "opt_inet6.h"
71 71
72#include "ixgbe.h" 72#include "ixgbe.h"
73 73
 74#ifdef RSC
74/* 75/*
75 * HW RSC control: 76 * HW RSC control:
76 * this feature only works with 77 * this feature only works with
77 * IPv4, and only on 82599 and later. 78 * IPv4, and only on 82599 and later.
78 * Also this will cause IP forwarding to 79 * Also this will cause IP forwarding to
79 * fail and that can't be controlled by 80 * fail and that can't be controlled by
80 * the stack as LRO can. For all these 81 * the stack as LRO can. For all these
81 * reasons I've deemed it best to leave 82 * reasons I've deemed it best to leave
82 * this off and not bother with a tuneable 83 * this off and not bother with a tuneable
83 * interface, this would need to be compiled 84 * interface, this would need to be compiled
84 * to enable. 85 * to enable.
85 */ 86 */
86static bool ixgbe_rsc_enable = FALSE; 87static bool ixgbe_rsc_enable = FALSE;
 88#endif
87 89
 90#ifdef IXGBE_FDIR
88/* 91/*
89 * For Flow Director: this is the 92 * For Flow Director: this is the
90 * number of TX packets we sample 93 * number of TX packets we sample
91 * for the filter pool, this means 94 * for the filter pool, this means
92 * every 20th packet will be probed. 95 * every 20th packet will be probed.
93 * 96 *
94 * This feature can be disabled by 97 * This feature can be disabled by
95 * setting this to 0. 98 * setting this to 0.
96 */ 99 */
97static int atr_sample_rate = 20; 100static int atr_sample_rate = 20;
 101#endif
98 102
99#define IXGBE_M_ADJ(sc, rxr, mp) \ 103#define IXGBE_M_ADJ(sc, rxr, mp) \
100 if (sc->max_frame_size <= (rxr->mbuf_sz - ETHER_ALIGN)) \ 104 if (sc->max_frame_size <= (rxr->mbuf_sz - ETHER_ALIGN)) \
101 m_adj(mp, ETHER_ALIGN) 105 m_adj(mp, ETHER_ALIGN)
102 106
103/************************************************************************ 107/************************************************************************
104 * Local Function prototypes 108 * Local Function prototypes
105 ************************************************************************/ 109 ************************************************************************/
106static void ixgbe_setup_transmit_ring(struct tx_ring *); 110static void ixgbe_setup_transmit_ring(struct tx_ring *);
107static void ixgbe_free_transmit_buffers(struct tx_ring *); 111static void ixgbe_free_transmit_buffers(struct tx_ring *);
108static int ixgbe_setup_receive_ring(struct rx_ring *); 112static int ixgbe_setup_receive_ring(struct rx_ring *);
109static void ixgbe_free_receive_buffers(struct rx_ring *); 113static void ixgbe_free_receive_buffers(struct rx_ring *);
110static void ixgbe_rx_checksum(u32, struct mbuf *, u32, 114static void ixgbe_rx_checksum(u32, struct mbuf *, u32,
@@ -112,28 +116,29 @@ static void ixgbe_rx_checksum(u @@ -112,28 +116,29 @@ static void ixgbe_rx_checksum(u
112static void ixgbe_refresh_mbufs(struct rx_ring *, int); 116static void ixgbe_refresh_mbufs(struct rx_ring *, int);
113static void ixgbe_drain(struct ifnet *, struct tx_ring *); 117static void ixgbe_drain(struct ifnet *, struct tx_ring *);
114static int ixgbe_xmit(struct tx_ring *, struct mbuf *); 118static int ixgbe_xmit(struct tx_ring *, struct mbuf *);
115static int ixgbe_tx_ctx_setup(struct tx_ring *, 119static int ixgbe_tx_ctx_setup(struct tx_ring *,
116 struct mbuf *, u32 *, u32 *); 120 struct mbuf *, u32 *, u32 *);
117static int ixgbe_tso_setup(struct tx_ring *, 121static int ixgbe_tso_setup(struct tx_ring *,
118 struct mbuf *, u32 *, u32 *); 122 struct mbuf *, u32 *, u32 *);
119static __inline void ixgbe_rx_discard(struct rx_ring *, int); 123static __inline void ixgbe_rx_discard(struct rx_ring *, int);
120static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, 124static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
121 struct mbuf *, u32); 125 struct mbuf *, u32);
122static int ixgbe_dma_malloc(struct ixgbe_softc *, bus_size_t, 126static int ixgbe_dma_malloc(struct ixgbe_softc *, bus_size_t,
123 struct ixgbe_dma_alloc *, int); 127 struct ixgbe_dma_alloc *, int);
124static void ixgbe_dma_free(struct ixgbe_softc *, struct ixgbe_dma_alloc *); 128static void ixgbe_dma_free(struct ixgbe_softc *, struct ixgbe_dma_alloc *);
125 129#ifdef RSC
126static void ixgbe_setup_hw_rsc(struct rx_ring *); 130static void ixgbe_setup_hw_rsc(struct rx_ring *);
 131#endif
127 132
128/************************************************************************ 133/************************************************************************
129 * ixgbe_legacy_start_locked - Transmit entry point 134 * ixgbe_legacy_start_locked - Transmit entry point
130 * 135 *
131 * Called by the stack to initiate a transmit. 136 * Called by the stack to initiate a transmit.
132 * The driver will remain in this routine as long as there are 137 * The driver will remain in this routine as long as there are
133 * packets to transmit and transmit resources are available. 138 * packets to transmit and transmit resources are available.
134 * In case resources are not available, the stack is notified 139 * In case resources are not available, the stack is notified
135 * and the packet is requeued. 140 * and the packet is requeued.
136 ************************************************************************/ 141 ************************************************************************/
137int 142int
138ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) 143ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
139{ 144{
@@ -396,27 +401,27 @@ ixgbe_drain_all(struct ixgbe_softc *sc) @@ -396,27 +401,27 @@ ixgbe_drain_all(struct ixgbe_softc *sc)
396} 401}
397 402
398/************************************************************************ 403/************************************************************************
399 * ixgbe_xmit 404 * ixgbe_xmit
400 * 405 *
401 * Maps the mbufs to tx descriptors, allowing the 406 * Maps the mbufs to tx descriptors, allowing the
402 * TX engine to transmit the packets. 407 * TX engine to transmit the packets.
403 * 408 *
404 * Return 0 on success, positive on failure 409 * Return 0 on success, positive on failure
405 ************************************************************************/ 410 ************************************************************************/
406static int 411static int
407ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head) 412ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
408{ 413{
409 struct ixgbe_softc *sc = txr->sc; 414 struct ixgbe_softc *sc = txr->sc;
410 struct ixgbe_tx_buf *txbuf; 415 struct ixgbe_tx_buf *txbuf;
411 union ixgbe_adv_tx_desc *txd = NULL; 416 union ixgbe_adv_tx_desc *txd = NULL;
412 struct ifnet *ifp = sc->ifp; 417 struct ifnet *ifp = sc->ifp;
413 int i, j, error; 418 int i, j, error;
414 int first; 419 int first;
415 u32 olinfo_status = 0, cmd_type_len; 420 u32 olinfo_status = 0, cmd_type_len;
416 bool remap = TRUE; 421 bool remap = TRUE;
417 bus_dmamap_t map; 422 bus_dmamap_t map;
418 423
419 /* Basic descriptor defines */ 424 /* Basic descriptor defines */
420 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | 425 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
421 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); 426 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
422 427
@@ -477,28 +482,28 @@ retry: @@ -477,28 +482,28 @@ retry:
477 return error; 482 return error;
478 } 483 }
479 } 484 }
480 485
481 /* Make certain there are enough descriptors */ 486 /* Make certain there are enough descriptors */
482 if (txr->tx_avail < (map->dm_nsegs + 2)) { 487 if (txr->tx_avail < (map->dm_nsegs + 2)) {
483 txr->txr_no_space = true; 488 txr->txr_no_space = true;
484 IXGBE_EVC_ADD(&txr->no_desc_avail, 1); 489 IXGBE_EVC_ADD(&txr->no_desc_avail, 1);
485 ixgbe_dmamap_unload(txr->txtag, txbuf->map); 490 ixgbe_dmamap_unload(txr->txtag, txbuf->map);
486 return EAGAIN; 491 return EAGAIN;
487 } 492 }
488 493
489 /* 494 /*
490 * Set up the appropriate offload context 495 * Set up the appropriate offload context if requested,
491 * this will consume the first descriptor 496 * this may consume one TX descriptor.
492 */ 497 */
493 error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); 498 error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
494 if (__predict_false(error)) { 499 if (__predict_false(error)) {
495 return (error); 500 return (error);
496 } 501 }
497 502
498#ifdef IXGBE_FDIR 503#ifdef IXGBE_FDIR
499 /* Do the flow director magic */ 504 /* Do the flow director magic */
500 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 505 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
501 (txr->atr_sample) && (!sc->fdir_reinit)) { 506 (txr->atr_sample) && (!sc->fdir_reinit)) {
502 ++txr->atr_count; 507 ++txr->atr_count;
503 if (txr->atr_count >= atr_sample_rate) { 508 if (txr->atr_count >= atr_sample_rate) {
504 ixgbe_atr(txr, m_head); 509 ixgbe_atr(txr, m_head);
@@ -614,28 +619,28 @@ ixgbe_allocate_transmit_buffers(struct t @@ -614,28 +619,28 @@ ixgbe_allocate_transmit_buffers(struct t
614 /* parent */ sc->osdep.dmat, 619 /* parent */ sc->osdep.dmat,
615 /* alignment */ 1, 620 /* alignment */ 1,
616 /* bounds */ 0, 621 /* bounds */ 0,
617 /* maxsize */ IXGBE_TSO_SIZE, 622 /* maxsize */ IXGBE_TSO_SIZE,
618 /* nsegments */ sc->num_segs, 623 /* nsegments */ sc->num_segs,
619 /* maxsegsize */ PAGE_SIZE, 624 /* maxsegsize */ PAGE_SIZE,
620 /* flags */ 0, 625 /* flags */ 0,
621 &txr->txtag); 626 &txr->txtag);
622 if (error != 0) { 627 if (error != 0) {
623 aprint_error_dev(dev,"Unable to allocate TX DMA tag\n"); 628 aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
624 goto fail; 629 goto fail;
625 } 630 }
626 631
627 txr->tx_buffers = malloc(sizeof(struct ixgbe_tx_buf) * 632 txr->tx_buffers = kmem_zalloc(sizeof(struct ixgbe_tx_buf) *
628 sc->num_tx_desc, M_DEVBUF, M_WAITOK | M_ZERO); 633 sc->num_tx_desc, KM_SLEEP);
629 634
630 /* Create the descriptor buffer dma maps */ 635 /* Create the descriptor buffer dma maps */
631 txbuf = txr->tx_buffers; 636 txbuf = txr->tx_buffers;
632 for (i = 0; i < sc->num_tx_desc; i++, txbuf++) { 637 for (i = 0; i < sc->num_tx_desc; i++, txbuf++) {
633 error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map); 638 error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
634 if (error != 0) { 639 if (error != 0) {
635 aprint_error_dev(dev, 640 aprint_error_dev(dev,
636 "Unable to create TX DMA map (%d)\n", error); 641 "Unable to create TX DMA map (%d)\n", error);
637 goto fail; 642 goto fail;
638 } 643 }
639 } 644 }
640 645
641 return 0; 646 return 0;
@@ -705,29 +710,31 @@ ixgbe_setup_transmit_ring(struct tx_ring @@ -705,29 +710,31 @@ ixgbe_setup_transmit_ring(struct tx_ring
705 * netmap_idx_n2k() handles wraparounds properly. 710 * netmap_idx_n2k() handles wraparounds properly.
706 */ 711 */
707 if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) { 712 if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
708 int si = netmap_idx_n2k(na->tx_rings[txr->me], i); 713 int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
709 netmap_load_map(na, txr->txtag, 714 netmap_load_map(na, txr->txtag,
710 txbuf->map, NMB(na, slot + si)); 715 txbuf->map, NMB(na, slot + si));
711 } 716 }
712#endif /* DEV_NETMAP */ 717#endif /* DEV_NETMAP */
713 718
714 /* Clear the EOP descriptor pointer */ 719 /* Clear the EOP descriptor pointer */
715 txbuf->eop = NULL; 720 txbuf->eop = NULL;
716 } 721 }
717 722
 723#ifdef IXGBE_FDIR
718 /* Set the rate at which we sample packets */ 724 /* Set the rate at which we sample packets */
719 if (sc->feat_en & IXGBE_FEATURE_FDIR) 725 if (sc->feat_en & IXGBE_FEATURE_FDIR)
720 txr->atr_sample = atr_sample_rate; 726 txr->atr_sample = atr_sample_rate;
 727#endif
721 728
722 /* Set number of descriptors available */ 729 /* Set number of descriptors available */
723 txr->tx_avail = sc->num_tx_desc; 730 txr->tx_avail = sc->num_tx_desc;
724 731
725 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 732 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
726 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 733 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
727 IXGBE_TX_UNLOCK(txr); 734 IXGBE_TX_UNLOCK(txr);
728} /* ixgbe_setup_transmit_ring */ 735} /* ixgbe_setup_transmit_ring */
729 736
730/************************************************************************ 737/************************************************************************
731 * ixgbe_setup_transmit_structures - Initialize all transmit rings. 738 * ixgbe_setup_transmit_structures - Initialize all transmit rings.
732 ************************************************************************/ 739 ************************************************************************/
733int 740int
@@ -744,38 +751,38 @@ ixgbe_setup_transmit_structures(struct i @@ -744,38 +751,38 @@ ixgbe_setup_transmit_structures(struct i
744/************************************************************************ 751/************************************************************************
745 * ixgbe_free_transmit_structures - Free all transmit rings. 752 * ixgbe_free_transmit_structures - Free all transmit rings.
746 ************************************************************************/ 753 ************************************************************************/
747void 754void
748ixgbe_free_transmit_structures(struct ixgbe_softc *sc) 755ixgbe_free_transmit_structures(struct ixgbe_softc *sc)
749{ 756{
750 struct tx_ring *txr = sc->tx_rings; 757 struct tx_ring *txr = sc->tx_rings;
751 758
752 for (int i = 0; i < sc->num_queues; i++, txr++) { 759 for (int i = 0; i < sc->num_queues; i++, txr++) {
753 ixgbe_free_transmit_buffers(txr); 760 ixgbe_free_transmit_buffers(txr);
754 ixgbe_dma_free(sc, &txr->txdma); 761 ixgbe_dma_free(sc, &txr->txdma);
755 IXGBE_TX_LOCK_DESTROY(txr); 762 IXGBE_TX_LOCK_DESTROY(txr);
756 } 763 }
757 free(sc->tx_rings, M_DEVBUF); 764 kmem_free(sc->tx_rings, sizeof(struct tx_ring) * sc->num_queues);
758} /* ixgbe_free_transmit_structures */ 765} /* ixgbe_free_transmit_structures */
759 766
760/************************************************************************ 767/************************************************************************
761 * ixgbe_free_transmit_buffers 768 * ixgbe_free_transmit_buffers
762 * 769 *
763 * Free transmit ring related data structures. 770 * Free transmit ring related data structures.
764 ************************************************************************/ 771 ************************************************************************/
765static void 772static void
766ixgbe_free_transmit_buffers(struct tx_ring *txr) 773ixgbe_free_transmit_buffers(struct tx_ring *txr)
767{ 774{
768 struct ixgbe_softc *sc = txr->sc; 775 struct ixgbe_softc *sc = txr->sc;
769 struct ixgbe_tx_buf *tx_buffer; 776 struct ixgbe_tx_buf *tx_buffer;
770 int i; 777 int i;
771 778
772 INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin"); 779 INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
773 780
774 if (txr->tx_buffers == NULL) 781 if (txr->tx_buffers == NULL)
775 return; 782 return;
776 783
777 tx_buffer = txr->tx_buffers; 784 tx_buffer = txr->tx_buffers;
778 for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) { 785 for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
779 if (tx_buffer->m_head != NULL) { 786 if (tx_buffer->m_head != NULL) {
780 bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, 787 bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
781 0, tx_buffer->m_head->m_pkthdr.len, 788 0, tx_buffer->m_head->m_pkthdr.len,
@@ -792,27 +799,28 @@ ixgbe_free_transmit_buffers(struct tx_ri @@ -792,27 +799,28 @@ ixgbe_free_transmit_buffers(struct tx_ri
792 ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); 799 ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
793 ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); 800 ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
794 tx_buffer->map = NULL; 801 tx_buffer->map = NULL;
795 } 802 }
796 } 803 }
797 if (txr->txr_interq != NULL) { 804 if (txr->txr_interq != NULL) {
798 struct mbuf *m; 805 struct mbuf *m;
799 806
800 while ((m = pcq_get(txr->txr_interq)) != NULL) 807 while ((m = pcq_get(txr->txr_interq)) != NULL)
801 m_freem(m); 808 m_freem(m);
802 pcq_destroy(txr->txr_interq); 809 pcq_destroy(txr->txr_interq);
803 } 810 }
804 if (txr->tx_buffers != NULL) { 811 if (txr->tx_buffers != NULL) {
805 free(txr->tx_buffers, M_DEVBUF); 812 kmem_free(txr->tx_buffers,
 813 sizeof(struct ixgbe_tx_buf) * sc->num_tx_desc);
806 txr->tx_buffers = NULL; 814 txr->tx_buffers = NULL;
807 } 815 }
808 if (txr->txtag != NULL) { 816 if (txr->txtag != NULL) {
809 ixgbe_dma_tag_destroy(txr->txtag); 817 ixgbe_dma_tag_destroy(txr->txtag);
810 txr->txtag = NULL; 818 txr->txtag = NULL;
811 } 819 }
812} /* ixgbe_free_transmit_buffers */ 820} /* ixgbe_free_transmit_buffers */
813 821
814/************************************************************************ 822/************************************************************************
815 * ixgbe_tx_ctx_setup 823 * ixgbe_tx_ctx_setup
816 * 824 *
817 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 825 * Advanced Context Descriptor setup for VLAN, CSUM or TSO
818 ************************************************************************/ 826 ************************************************************************/
@@ -844,29 +852,26 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr,  @@ -844,29 +852,26 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr,
844 int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status); 852 int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
845 853
846 if (rv != 0) 854 if (rv != 0)
847 IXGBE_EVC_ADD(&sc->tso_err, 1); 855 IXGBE_EVC_ADD(&sc->tso_err, 1);
848 return rv; 856 return rv;
849 } 857 }
850 858
851 if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0) 859 if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
852 offload = FALSE; 860 offload = FALSE;
853 861
854 /* Indicate the whole packet as payload when not doing TSO */ 862 /* Indicate the whole packet as payload when not doing TSO */
855 *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; 863 *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
856 864
857 /* Now ready a context descriptor */ 
858 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; 
859 
860 /* 865 /*
861 * In advanced descriptors the vlan tag must 866 * In advanced descriptors the vlan tag must
862 * be placed into the context descriptor. Hence 867 * be placed into the context descriptor. Hence
863 * we need to make one even if not doing offloads. 868 * we need to make one even if not doing offloads.
864 */ 869 */
865 if (vlan_has_tag(mp)) { 870 if (vlan_has_tag(mp)) {
866 vtag = htole16(vlan_get_tag(mp)); 871 vtag = htole16(vlan_get_tag(mp));
867 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); 872 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
868 } else if (!(txr->sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) && 873 } else if (!(txr->sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
869 (offload == FALSE)) 874 (offload == FALSE))
870 return (0); 875 return (0);
871 876
872 /* 877 /*
@@ -949,26 +954,29 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr,  @@ -949,26 +954,29 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr,
949 offload = false; 954 offload = false;
950 break; 955 break;
951 default: 956 default:
952 offload = false; 957 offload = false;
953 break; 958 break;
954 } 959 }
955 960
956 if (offload) /* Insert L4 checksum into data descriptors */ 961 if (offload) /* Insert L4 checksum into data descriptors */
957 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; 962 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
958 963
959no_offloads: 964no_offloads:
960 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 965 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
961 966
 967 /* Now ready a context descriptor */
 968 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
 969
962 /* Now copy bits into descriptor */ 970 /* Now copy bits into descriptor */
963 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 971 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
964 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 972 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
965 TXD->seqnum_seed = htole32(0); 973 TXD->seqnum_seed = htole32(0);
966 TXD->mss_l4len_idx = htole32(0); 974 TXD->mss_l4len_idx = htole32(0);
967 975
968 /* We've consumed the first desc, adjust counters */ 976 /* We've consumed the first desc, adjust counters */
969 if (++ctxd == txr->num_desc) 977 if (++ctxd == txr->num_desc)
970 ctxd = 0; 978 ctxd = 0;
971 txr->next_avail_desc = ctxd; 979 txr->next_avail_desc = ctxd;
972 --txr->tx_avail; 980 --txr->tx_avail;
973 981
974 return (0); 982 return (0);
@@ -1098,26 +1106,27 @@ ixgbe_tso_setup(struct tx_ring *txr, str @@ -1098,26 +1106,27 @@ ixgbe_tso_setup(struct tx_ring *txr, str
1098 * Examine each tx_buffer in the used queue. If the hardware is done 1106 * Examine each tx_buffer in the used queue. If the hardware is done
1099 * processing the packet then free associated resources. The 1107 * processing the packet then free associated resources. The
1100 * tx_buffer is put back on the free queue. 1108 * tx_buffer is put back on the free queue.
1101 ************************************************************************/ 1109 ************************************************************************/
1102bool 1110bool
1103ixgbe_txeof(struct tx_ring *txr) 1111ixgbe_txeof(struct tx_ring *txr)
1104{ 1112{
1105 struct ixgbe_softc *sc = txr->sc; 1113 struct ixgbe_softc *sc = txr->sc;
1106 struct ifnet *ifp = sc->ifp; 1114 struct ifnet *ifp = sc->ifp;
1107 struct ixgbe_tx_buf *buf; 1115 struct ixgbe_tx_buf *buf;
1108 union ixgbe_adv_tx_desc *txd; 1116 union ixgbe_adv_tx_desc *txd;
1109 u32 work, processed = 0; 1117 u32 work, processed = 0;
1110 u32 limit = sc->tx_process_limit; 1118 u32 limit = sc->tx_process_limit;
 1119 u16 avail;
1111 1120
1112 KASSERT(mutex_owned(&txr->tx_mtx)); 1121 KASSERT(mutex_owned(&txr->tx_mtx));
1113 1122
1114#ifdef DEV_NETMAP 1123#ifdef DEV_NETMAP
1115 if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && 1124 if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
1116 (sc->ifp->if_capenable & IFCAP_NETMAP)) { 1125 (sc->ifp->if_capenable & IFCAP_NETMAP)) {
1117 struct netmap_sc *na = NA(sc->ifp); 1126 struct netmap_sc *na = NA(sc->ifp);
1118 struct netmap_kring *kring = na->tx_rings[txr->me]; 1127 struct netmap_kring *kring = na->tx_rings[txr->me];
1119 txd = txr->tx_base; 1128 txd = txr->tx_base;
1120 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 1129 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1121 BUS_DMASYNC_POSTREAD); 1130 BUS_DMASYNC_POSTREAD);
1122 /* 1131 /*
1123 * In netmap mode, all the work is done in the context 1132 * In netmap mode, all the work is done in the context
@@ -1141,123 +1150,128 @@ ixgbe_txeof(struct tx_ring *txr) @@ -1141,123 +1150,128 @@ ixgbe_txeof(struct tx_ring *txr)
1141 } 1150 }
1142#endif /* DEV_NETMAP */ 1151#endif /* DEV_NETMAP */
1143 1152
1144 if (txr->tx_avail == txr->num_desc) { 1153 if (txr->tx_avail == txr->num_desc) {
1145 txr->busy = 0; 1154 txr->busy = 0;
1146 return false; 1155 return false;
1147 } 1156 }
1148 1157
1149 /* Get work starting point */ 1158 /* Get work starting point */
1150 work = txr->next_to_clean; 1159 work = txr->next_to_clean;
1151 buf = &txr->tx_buffers[work]; 1160 buf = &txr->tx_buffers[work];
1152 txd = &txr->tx_base[work]; 1161 txd = &txr->tx_base[work];
1153 work -= txr->num_desc; /* The distance to ring end */ 1162 work -= txr->num_desc; /* The distance to ring end */
 1163 avail = txr->tx_avail;
1154 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 1164 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1155 BUS_DMASYNC_POSTREAD); 1165 BUS_DMASYNC_POSTREAD);
1156 1166
1157 do { 1167 do {
1158 union ixgbe_adv_tx_desc *eop = buf->eop; 1168 union ixgbe_adv_tx_desc *eop = buf->eop;
1159 if (eop == NULL) /* No work */ 1169 if (eop == NULL) /* No work */
1160 break; 1170 break;
1161 1171
1162 if ((le32toh(eop->wb.status) & IXGBE_TXD_STAT_DD) == 0) 1172 if ((le32toh(eop->wb.status) & IXGBE_TXD_STAT_DD) == 0)
1163 break; /* I/O not complete */ 1173 break; /* I/O not complete */
1164 1174
1165 if (buf->m_head) { 1175 if (buf->m_head) {
1166 txr->bytes += buf->m_head->m_pkthdr.len; 1176 txr->bytes += buf->m_head->m_pkthdr.len;
1167 bus_dmamap_sync(txr->txtag->dt_dmat, buf->map, 1177 bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
1168 0, buf->m_head->m_pkthdr.len, 1178 0, buf->m_head->m_pkthdr.len,
1169 BUS_DMASYNC_POSTWRITE); 1179 BUS_DMASYNC_POSTWRITE);
1170 ixgbe_dmamap_unload(txr->txtag, buf->map); 1180 ixgbe_dmamap_unload(txr->txtag, buf->map);
1171 m_freem(buf->m_head); 1181 m_freem(buf->m_head);
1172 buf->m_head = NULL; 1182 buf->m_head = NULL;
1173 } 1183 }
1174 buf->eop = NULL; 1184 buf->eop = NULL;
1175 txr->txr_no_space = false; 1185 ++avail;
1176 ++txr->tx_avail; 
1177 1186
1178 /* We clean the range if multi segment */ 1187 /* We clean the range if multi segment */
1179 while (txd != eop) { 1188 while (txd != eop) {
1180 ++txd; 1189 ++txd;
1181 ++buf; 1190 ++buf;
1182 ++work; 1191 ++work;
1183 /* wrap the ring? */ 1192 /* wrap the ring? */
1184 if (__predict_false(!work)) { 1193 if (__predict_false(!work)) {
1185 work -= txr->num_desc; 1194 work -= txr->num_desc;
1186 buf = txr->tx_buffers; 1195 buf = txr->tx_buffers;
1187 txd = txr->tx_base; 1196 txd = txr->tx_base;
1188 } 1197 }
1189 if (buf->m_head) { 1198 if (buf->m_head) {
1190 txr->bytes += 1199 txr->bytes +=
1191 buf->m_head->m_pkthdr.len; 1200 buf->m_head->m_pkthdr.len;
1192 bus_dmamap_sync(txr->txtag->dt_dmat, 1201 bus_dmamap_sync(txr->txtag->dt_dmat,
1193 buf->map, 1202 buf->map,
1194 0, buf->m_head->m_pkthdr.len, 1203 0, buf->m_head->m_pkthdr.len,
1195 BUS_DMASYNC_POSTWRITE); 1204 BUS_DMASYNC_POSTWRITE);
1196 ixgbe_dmamap_unload(txr->txtag, 1205 ixgbe_dmamap_unload(txr->txtag,
1197 buf->map); 1206 buf->map);
1198 m_freem(buf->m_head); 1207 m_freem(buf->m_head);
1199 buf->m_head = NULL; 1208 buf->m_head = NULL;
1200 } 1209 }
1201 ++txr->tx_avail; 1210 ++avail;
1202 buf->eop = NULL; 1211 buf->eop = NULL;
1203 1212
1204 } 1213 }
1205 ++txr->packets; 
1206 ++processed; 1214 ++processed;
1207 if_statinc(ifp, if_opackets); 
1208 1215
1209 /* Try the next packet */ 1216 /* Try the next packet */
1210 ++txd; 1217 ++txd;
1211 ++buf; 1218 ++buf;
1212 ++work; 1219 ++work;
1213 /* reset with a wrap */ 1220 /* reset with a wrap */
1214 if (__predict_false(!work)) { 1221 if (__predict_false(!work)) {
1215 work -= txr->num_desc; 1222 work -= txr->num_desc;
1216 buf = txr->tx_buffers; 1223 buf = txr->tx_buffers;
1217 txd = txr->tx_base; 1224 txd = txr->tx_base;
1218 } 1225 }
1219 prefetch(txd); 1226 prefetch(txd);
1220 } while (__predict_true(--limit)); 1227 } while (__predict_true(--limit));
1221 1228
1222 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 1229 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1223 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1230 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1224 1231
1225 work += txr->num_desc; 1232 work += txr->num_desc;
1226 txr->next_to_clean = work; 1233 txr->next_to_clean = work;
 1234 if (processed) {
 1235 txr->tx_avail = avail;
 1236 txr->txr_no_space = false;
 1237 txr->packets += processed;
 1238 if_statadd(ifp, if_opackets, processed);
 1239 }
1227 1240
1228 /* 1241 /*
1229 * Queue Hang detection, we know there's 1242 * Queue Hang detection, we know there's
1230 * work outstanding or the first return 1243 * work outstanding or the first return
1231 * would have been taken, so increment busy 1244 * would have been taken, so increment busy
1232 * if nothing managed to get cleaned, then 1245 * if nothing managed to get cleaned, then
1233 * in local_timer it will be checked and 1246 * in local_timer it will be checked and
1234 * marked as HUNG if it exceeds a MAX attempt. 1247 * marked as HUNG if it exceeds a MAX attempt.
1235 */ 1248 */
1236 if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG)) 1249 if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1237 ++txr->busy; 1250 ++txr->busy;
1238 /* 1251 /*
1239 * If anything gets cleaned we reset state to 1, 1252 * If anything gets cleaned we reset state to 1,
1240 * note this will turn off HUNG if its set. 1253 * note this will turn off HUNG if its set.
1241 */ 1254 */
1242 if (processed) 1255 if (processed)
1243 txr->busy = 1; 1256 txr->busy = 1;
1244 1257
1245 if (txr->tx_avail == txr->num_desc) 1258 if (txr->tx_avail == txr->num_desc)
1246 txr->busy = 0; 1259 txr->busy = 0;
1247 1260
1248 return ((limit > 0) ? false : true); 1261 return ((limit > 0) ? false : true);
1249} /* ixgbe_txeof */ 1262} /* ixgbe_txeof */
1250 1263
 1264#ifdef RSC
1251/************************************************************************ 1265/************************************************************************
1252 * ixgbe_rsc_count 1266 * ixgbe_rsc_count
1253 * 1267 *
1254 * Used to detect a descriptor that has been merged by Hardware RSC. 1268 * Used to detect a descriptor that has been merged by Hardware RSC.
1255 ************************************************************************/ 1269 ************************************************************************/
1256static inline u32 1270static inline u32
1257ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) 1271ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1258{ 1272{
1259 return (le32toh(rx->wb.lower.lo_dword.data) & 1273 return (le32toh(rx->wb.lower.lo_dword.data) &
1260 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; 1274 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1261} /* ixgbe_rsc_count */ 1275} /* ixgbe_rsc_count */
1262 1276
1263/************************************************************************ 1277/************************************************************************
@@ -1314,26 +1328,27 @@ ixgbe_setup_hw_rsc(struct rx_ring *rxr) @@ -1314,26 +1328,27 @@ ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1314 1328
1315 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); 1329 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1316 1330
1317 /* Enable TCP header recognition */ 1331 /* Enable TCP header recognition */
1318 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), 1332 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1319 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR)); 1333 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1320 1334
1321 /* Disable RSC for ACK packets */ 1335 /* Disable RSC for ACK packets */
1322 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 1336 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1323 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 1337 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1324 1338
1325 rxr->hw_rsc = TRUE; 1339 rxr->hw_rsc = TRUE;
1326} /* ixgbe_setup_hw_rsc */ 1340} /* ixgbe_setup_hw_rsc */
 1341#endif
1327 1342
1328/************************************************************************ 1343/************************************************************************
1329 * ixgbe_refresh_mbufs 1344 * ixgbe_refresh_mbufs
1330 * 1345 *
1331 * Refresh mbuf buffers for RX descriptor rings 1346 * Refresh mbuf buffers for RX descriptor rings
1332 * - now keeps its own state so discards due to resource 1347 * - now keeps its own state so discards due to resource
1333 * exhaustion are unnecessary, if an mbuf cannot be obtained 1348 * exhaustion are unnecessary, if an mbuf cannot be obtained
1334 * it just returns, keeping its placeholder, thus it can simply 1349 * it just returns, keeping its placeholder, thus it can simply
1335 * be recalled to try again. 1350 * be recalled to try again.
1336 ************************************************************************/ 1351 ************************************************************************/
1337static void 1352static void
1338ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit) 1353ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1339{ 1354{
@@ -1408,27 +1423,27 @@ update: @@ -1408,27 +1423,27 @@ update:
1408 * rx_buffer per received packet, the maximum number of rx_buffer's 1423 * rx_buffer per received packet, the maximum number of rx_buffer's
1409 * that we'll need is equal to the number of receive descriptors 1424 * that we'll need is equal to the number of receive descriptors
1410 * that we've allocated. 1425 * that we've allocated.
1411 ************************************************************************/ 1426 ************************************************************************/
1412static int 1427static int
1413ixgbe_allocate_receive_buffers(struct rx_ring *rxr) 1428ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1414{ 1429{
1415 struct ixgbe_softc *sc = rxr->sc; 1430 struct ixgbe_softc *sc = rxr->sc;
1416 device_t dev = sc->dev; 1431 device_t dev = sc->dev;
1417 struct ixgbe_rx_buf *rxbuf; 1432 struct ixgbe_rx_buf *rxbuf;
1418 int bsize, error; 1433 int bsize, error;
1419 1434
1420 bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; 1435 bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1421 rxr->rx_buffers = malloc(bsize, M_DEVBUF, M_WAITOK | M_ZERO); 1436 rxr->rx_buffers = kmem_zalloc(bsize, KM_SLEEP);
1422 1437
1423 error = ixgbe_dma_tag_create( 1438 error = ixgbe_dma_tag_create(
1424 /* parent */ sc->osdep.dmat, 1439 /* parent */ sc->osdep.dmat,
1425 /* alignment */ 1, 1440 /* alignment */ 1,
1426 /* bounds */ 0, 1441 /* bounds */ 0,
1427 /* maxsize */ MJUM16BYTES, 1442 /* maxsize */ MJUM16BYTES,
1428 /* nsegments */ 1, 1443 /* nsegments */ 1,
1429 /* maxsegsize */ MJUM16BYTES, 1444 /* maxsegsize */ MJUM16BYTES,
1430 /* flags */ 0, 1445 /* flags */ 0,
1431 &rxr->ptag); 1446 &rxr->ptag);
1432 if (error != 0) { 1447 if (error != 0) {
1433 aprint_error_dev(dev, "Unable to create RX DMA tag\n"); 1448 aprint_error_dev(dev, "Unable to create RX DMA tag\n");
1434 goto fail; 1449 goto fail;
@@ -1486,58 +1501,58 @@ ixgbe_setup_receive_ring(struct rx_ring  @@ -1486,58 +1501,58 @@ ixgbe_setup_receive_ring(struct rx_ring
1486 sc = rxr->sc; 1501 sc = rxr->sc;
1487#ifdef LRO 1502#ifdef LRO
1488 ifp = sc->ifp; 1503 ifp = sc->ifp;
1489#endif /* LRO */ 1504#endif /* LRO */
1490 1505
1491 /* Clear the ring contents */ 1506 /* Clear the ring contents */
1492 IXGBE_RX_LOCK(rxr); 1507 IXGBE_RX_LOCK(rxr);
1493 1508
1494#ifdef DEV_NETMAP 1509#ifdef DEV_NETMAP
1495 if (sc->feat_en & IXGBE_FEATURE_NETMAP) 1510 if (sc->feat_en & IXGBE_FEATURE_NETMAP)
1496 slot = netmap_reset(na, NR_RX, rxr->me, 0); 1511 slot = netmap_reset(na, NR_RX, rxr->me, 0);
1497#endif /* DEV_NETMAP */ 1512#endif /* DEV_NETMAP */
1498 1513
1499 rsize = roundup2(sc->num_rx_desc * 1514 rsize = sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc);
1500 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); 1515 KASSERT((rsize % DBA_ALIGN) == 0);
1501 bzero((void *)rxr->rx_base, rsize); 1516 bzero((void *)rxr->rx_base, rsize);
1502 /* Cache the size */ 1517 /* Cache the size */
1503 rxr->mbuf_sz = sc->rx_mbuf_sz; 1518 rxr->mbuf_sz = sc->rx_mbuf_sz;
1504 1519
1505 /* Free current RX buffer structs and their mbufs */ 1520 /* Free current RX buffer structs and their mbufs */
1506 ixgbe_free_receive_ring(rxr); 1521 ixgbe_free_receive_ring(rxr);
1507 1522
1508 /* Now replenish the mbufs */ 1523 /* Now replenish the mbufs */
1509 for (int j = 0; j != rxr->num_desc; ++j) { 1524 for (int i = 0; i < rxr->num_desc; i++) {
1510 struct mbuf *mp; 1525 struct mbuf *mp;
1511 1526
1512 rxbuf = &rxr->rx_buffers[j]; 1527 rxbuf = &rxr->rx_buffers[i];
1513 1528
1514#ifdef DEV_NETMAP 1529#ifdef DEV_NETMAP
1515 /* 1530 /*
1516 * In netmap mode, fill the map and set the buffer 1531 * In netmap mode, fill the map and set the buffer
1517 * address in the NIC ring, considering the offset 1532 * address in the NIC ring, considering the offset
1518 * between the netmap and NIC rings (see comment in 1533 * between the netmap and NIC rings (see comment in
1519 * ixgbe_setup_transmit_ring() ). No need to allocate 1534 * ixgbe_setup_transmit_ring() ). No need to allocate
1520 * an mbuf, so end the block with a continue; 1535 * an mbuf, so end the block with a continue;
1521 */ 1536 */
1522 if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) { 1537 if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1523 int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j); 1538 int sj = netmap_idx_n2k(na->rx_rings[rxr->me], i);
1524 uint64_t paddr; 1539 uint64_t paddr;
1525 void *addr; 1540 void *addr;
1526 1541
1527 addr = PNMB(na, slot + sj, &paddr); 1542 addr = PNMB(na, slot + sj, &paddr);
1528 netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); 1543 netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1529 /* Update descriptor and the cached value */ 1544 /* Update descriptor and the cached value */
1530 rxr->rx_base[j].read.pkt_addr = htole64(paddr); 1545 rxr->rx_base[i].read.pkt_addr = htole64(paddr);
1531 rxbuf->addr = htole64(paddr); 1546 rxbuf->addr = htole64(paddr);
1532 continue; 1547 continue;
1533 } 1548 }
1534#endif /* DEV_NETMAP */ 1549#endif /* DEV_NETMAP */
1535 1550
1536 rxbuf->flags = 0; 1551 rxbuf->flags = 0;
1537 rxbuf->buf = ixgbe_getcl(); 1552 rxbuf->buf = ixgbe_getcl();
1538 if (rxbuf->buf == NULL) { 1553 if (rxbuf->buf == NULL) {
1539 IXGBE_EVC_ADD(&rxr->no_mbuf, 1); 1554 IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
1540 error = ENOBUFS; 1555 error = ENOBUFS;
1541 goto fail; 1556 goto fail;
1542 } 1557 }
1543 mp = rxbuf->buf; 1558 mp = rxbuf->buf;
@@ -1549,55 +1564,62 @@ ixgbe_setup_receive_ring(struct rx_ring  @@ -1549,55 +1564,62 @@ ixgbe_setup_receive_ring(struct rx_ring
1549 if (error != 0) { 1564 if (error != 0) {
1550 /* 1565 /*
1551 * Clear this entry for later cleanup in 1566 * Clear this entry for later cleanup in
1552 * ixgbe_discard() which is called via 1567 * ixgbe_discard() which is called via
1553 * ixgbe_free_receive_ring(). 1568 * ixgbe_free_receive_ring().
1554 */ 1569 */
1555 m_freem(mp); 1570 m_freem(mp);
1556 rxbuf->buf = NULL; 1571 rxbuf->buf = NULL;
1557 goto fail; 1572 goto fail;
1558 } 1573 }
1559 bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, 1574 bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1560 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD); 1575 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1561 /* Update the descriptor and the cached value */ 1576 /* Update the descriptor and the cached value */
1562 rxr->rx_base[j].read.pkt_addr = 1577 rxr->rx_base[i].read.pkt_addr =
1563 htole64(rxbuf->pmap->dm_segs[0].ds_addr); 1578 htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1564 rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr); 1579 rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1565 } 1580 }
1566 1581
1567 /* Setup our descriptor indices */ 1582 /* Setup our descriptor indices */
1568 rxr->next_to_check = 0; 1583 rxr->next_to_check = 0;
1569 rxr->next_to_refresh = sc->num_rx_desc - 1; /* Fully allocated */ 1584 rxr->next_to_refresh = sc->num_rx_desc - 1; /* Fully allocated */
 1585#ifdef LRO
1570 rxr->lro_enabled = FALSE; 1586 rxr->lro_enabled = FALSE;
 1587#endif
1571 rxr->discard_multidesc = false; 1588 rxr->discard_multidesc = false;
1572 IXGBE_EVC_STORE(&rxr->rx_copies, 0); 1589 IXGBE_EVC_STORE(&rxr->rx_copies, 0);
1573#if 0 /* NetBSD */ 1590#if 0 /* NetBSD */
1574 IXGBE_EVC_STORE(&rxr->rx_bytes, 0); 1591 IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
1575#if 1 /* Fix inconsistency */ 1592#if 1 /* Fix inconsistency */
1576 IXGBE_EVC_STORE(&rxr->rx_packets, 0); 1593 IXGBE_EVC_STORE(&rxr->rx_packets, 0);
1577#endif 1594#endif
1578#endif 1595#endif
1579 rxr->vtag_strip = FALSE; 1596 rxr->vtag_strip = FALSE;
1580 1597
1581 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1598 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1582 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1599 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1583 1600
1584 /* 1601 /*
1585 * Now set up the LRO interface 1602 * Now set up the LRO interface
1586 */ 1603 */
 1604#ifdef RSC
1587 if (ixgbe_rsc_enable) 1605 if (ixgbe_rsc_enable)
1588 ixgbe_setup_hw_rsc(rxr); 1606 ixgbe_setup_hw_rsc(rxr);
 1607#endif
1589#ifdef LRO 1608#ifdef LRO
1590 else if (ifp->if_capenable & IFCAP_LRO) { 1609#ifdef RSC
 1610 else
 1611#endif
 1612 if (ifp->if_capenable & IFCAP_LRO) {
1591 device_t dev = sc->dev; 1613 device_t dev = sc->dev;
1592 int err = tcp_lro_init(lro); 1614 int err = tcp_lro_init(lro);
1593 if (err) { 1615 if (err) {
1594 device_printf(dev, "LRO Initialization failed!\n"); 1616 device_printf(dev, "LRO Initialization failed!\n");
1595 goto fail; 1617 goto fail;
1596 } 1618 }
1597 INIT_DEBUGOUT("RX Soft LRO Initialized\n"); 1619 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1598 rxr->lro_enabled = TRUE; 1620 rxr->lro_enabled = TRUE;
1599 lro->ifp = sc->ifp; 1621 lro->ifp = sc->ifp;
1600 } 1622 }
1601#endif /* LRO */ 1623#endif /* LRO */
1602 1624
1603 IXGBE_RX_UNLOCK(rxr); 1625 IXGBE_RX_UNLOCK(rxr);
@@ -1654,54 +1676,55 @@ ixgbe_free_receive_structures(struct ixg @@ -1654,54 +1676,55 @@ ixgbe_free_receive_structures(struct ixg
1654 INIT_DEBUGOUT("ixgbe_free_receive_structures: begin"); 1676 INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1655 1677
1656 for (int i = 0; i < sc->num_queues; i++, rxr++) { 1678 for (int i = 0; i < sc->num_queues; i++, rxr++) {
1657 ixgbe_free_receive_buffers(rxr); 1679 ixgbe_free_receive_buffers(rxr);
1658#ifdef LRO 1680#ifdef LRO
1659 /* Free LRO memory */ 1681 /* Free LRO memory */
1660 tcp_lro_free(&rxr->lro); 1682 tcp_lro_free(&rxr->lro);
1661#endif /* LRO */ 1683#endif /* LRO */
1662 /* Free the ring memory as well */ 1684 /* Free the ring memory as well */
1663 ixgbe_dma_free(sc, &rxr->rxdma); 1685 ixgbe_dma_free(sc, &rxr->rxdma);
1664 IXGBE_RX_LOCK_DESTROY(rxr); 1686 IXGBE_RX_LOCK_DESTROY(rxr);
1665 } 1687 }
1666 1688
1667 free(sc->rx_rings, M_DEVBUF); 1689 kmem_free(sc->rx_rings, sizeof(struct rx_ring) * sc->num_queues);
1668} /* ixgbe_free_receive_structures */ 1690} /* ixgbe_free_receive_structures */
1669 1691
1670 1692
1671/************************************************************************ 1693/************************************************************************
1672 * ixgbe_free_receive_buffers - Free receive ring data structures 1694 * ixgbe_free_receive_buffers - Free receive ring data structures
1673 ************************************************************************/ 1695 ************************************************************************/
1674static void 1696static void
1675ixgbe_free_receive_buffers(struct rx_ring *rxr) 1697ixgbe_free_receive_buffers(struct rx_ring *rxr)
1676{ 1698{
1677 struct ixgbe_softc *sc = rxr->sc; 1699 struct ixgbe_softc *sc = rxr->sc;
1678 struct ixgbe_rx_buf *rxbuf; 1700 struct ixgbe_rx_buf *rxbuf;
1679 1701
1680 INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin"); 1702 INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1681 1703
1682 /* Cleanup any existing buffers */ 1704 /* Cleanup any existing buffers */
1683 if (rxr->rx_buffers != NULL) { 1705 if (rxr->rx_buffers != NULL) {
1684 for (int i = 0; i < sc->num_rx_desc; i++) { 1706 for (int i = 0; i < sc->num_rx_desc; i++) {
1685 rxbuf = &rxr->rx_buffers[i]; 1707 rxbuf = &rxr->rx_buffers[i];
1686 ixgbe_rx_discard(rxr, i); 1708 ixgbe_rx_discard(rxr, i);
1687 if (rxbuf->pmap != NULL) { 1709 if (rxbuf->pmap != NULL) {
1688 ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap); 1710 ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1689 rxbuf->pmap = NULL; 1711 rxbuf->pmap = NULL;
1690 } 1712 }
1691 } 1713 }
1692 1714
1693 if (rxr->rx_buffers != NULL) { 1715 if (rxr->rx_buffers != NULL) {
1694 free(rxr->rx_buffers, M_DEVBUF); 1716 kmem_free(rxr->rx_buffers,
 1717 sizeof(struct ixgbe_rx_buf) * rxr->num_desc);
1695 rxr->rx_buffers = NULL; 1718 rxr->rx_buffers = NULL;
1696 } 1719 }
1697 } 1720 }
1698 1721
1699 if (rxr->ptag != NULL) { 1722 if (rxr->ptag != NULL) {
1700 ixgbe_dma_tag_destroy(rxr->ptag); 1723 ixgbe_dma_tag_destroy(rxr->ptag);
1701 rxr->ptag = NULL; 1724 rxr->ptag = NULL;
1702 } 1725 }
1703 1726
1704 return; 1727 return;
1705} /* ixgbe_free_receive_buffers */ 1728} /* ixgbe_free_receive_buffers */
1706 1729
1707/************************************************************************ 1730/************************************************************************
@@ -1844,27 +1867,30 @@ ixgbe_rxeof(struct ix_queue *que) @@ -1844,27 +1867,30 @@ ixgbe_rxeof(struct ix_queue *que)
1844 sizeof(union ixgbe_adv_rx_desc) * numdesc, 1867 sizeof(union ixgbe_adv_rx_desc) * numdesc,
1845 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1868 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1846 1869
1847 /* 1870 /*
1848 * The max number of loop is rx_process_limit. If discard_multidesc is 1871 * The max number of loop is rx_process_limit. If discard_multidesc is
1849 * true, continue processing to not to send broken packet to the upper 1872 * true, continue processing to not to send broken packet to the upper
1850 * layer. 1873 * layer.
1851 */ 1874 */
1852 for (i = rxr->next_to_check; 1875 for (i = rxr->next_to_check;
1853 (loopcount < limit) || (discard_multidesc == true);) { 1876 (loopcount < limit) || (discard_multidesc == true);) {
1854 1877
1855 struct mbuf *sendmp, *mp; 1878 struct mbuf *sendmp, *mp;
1856 struct mbuf *newmp; 1879 struct mbuf *newmp;
1857 u32 rsc, ptype; 1880#ifdef RSC
 1881 u32 rsc;
 1882#endif
 1883 u32 ptype;
1858 u16 len; 1884 u16 len;
1859 u16 vtag = 0; 1885 u16 vtag = 0;
1860 bool eop; 1886 bool eop;
1861 bool discard = false; 1887 bool discard = false;
1862 1888
1863 if (wraparound) { 1889 if (wraparound) {
1864 /* Sync the last half. */ 1890 /* Sync the last half. */
1865 KASSERT(syncremain != 0); 1891 KASSERT(syncremain != 0);
1866 numdesc = syncremain; 1892 numdesc = syncremain;
1867 wraparound = false; 1893 wraparound = false;
1868 } else if (__predict_false(loopcount >= limit)) { 1894 } else if (__predict_false(loopcount >= limit)) {
1869 KASSERT(discard_multidesc == true); 1895 KASSERT(discard_multidesc == true);
1870 numdesc = 1; 1896 numdesc = 1;
@@ -1879,27 +1905,29 @@ ixgbe_rxeof(struct ix_queue *que) @@ -1879,27 +1905,29 @@ ixgbe_rxeof(struct ix_queue *que)
1879 1905
1880 cur = &rxr->rx_base[i]; 1906 cur = &rxr->rx_base[i];
1881 staterr = le32toh(cur->wb.upper.status_error); 1907 staterr = le32toh(cur->wb.upper.status_error);
1882#ifdef RSS 1908#ifdef RSS
1883 pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info); 1909 pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1884#endif 1910#endif
1885 1911
1886 if ((staterr & IXGBE_RXD_STAT_DD) == 0) 1912 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1887 break; 1913 break;
1888 1914
1889 loopcount++; 1915 loopcount++;
1890 sendmp = newmp = NULL; 1916 sendmp = newmp = NULL;
1891 nbuf = NULL; 1917 nbuf = NULL;
 1918#ifdef RSC
1892 rsc = 0; 1919 rsc = 0;
 1920#endif
1893 cur->wb.upper.status_error = 0; 1921 cur->wb.upper.status_error = 0;
1894 rbuf = &rxr->rx_buffers[i]; 1922 rbuf = &rxr->rx_buffers[i];
1895 mp = rbuf->buf; 1923 mp = rbuf->buf;
1896 1924
1897 len = le16toh(cur->wb.upper.length); 1925 len = le16toh(cur->wb.upper.length);
1898 ptype = le32toh(cur->wb.lower.lo_dword.data) & 1926 ptype = le32toh(cur->wb.lower.lo_dword.data) &
1899 IXGBE_RXDADV_PKTTYPE_MASK; 1927 IXGBE_RXDADV_PKTTYPE_MASK;
1900 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); 1928 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1901 1929
1902 /* Make sure bad packets are discarded */ 1930 /* Make sure bad packets are discarded */
1903 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { 1931 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1904#if __FreeBSD_version >= 1100036 1932#if __FreeBSD_version >= 1100036
1905 if (sc->feat_en & IXGBE_FEATURE_VF) 1933 if (sc->feat_en & IXGBE_FEATURE_VF)
@@ -1965,34 +1993,37 @@ ixgbe_rxeof(struct ix_queue *que) @@ -1965,34 +1993,37 @@ ixgbe_rxeof(struct ix_queue *que)
1965 * descriptors, rather the next descriptor 1993 * descriptors, rather the next descriptor
1966 * is indicated in bits of the descriptor. 1994 * is indicated in bits of the descriptor.
1967 * This also means that we might process 1995 * This also means that we might process
1968 * more than one packet at a time, something 1996 * more than one packet at a time, something
1969 * that has never been true before, it 1997 * that has never been true before, it
1970 * required eliminating global chain pointers 1998 * required eliminating global chain pointers
1971 * in favor of what we are doing here. -jfv 1999 * in favor of what we are doing here. -jfv
1972 */ 2000 */
1973 if (!eop) { 2001 if (!eop) {
1974 /* 2002 /*
1975 * Figure out the next descriptor 2003 * Figure out the next descriptor
1976 * of this frame. 2004 * of this frame.
1977 */ 2005 */
 2006#ifdef RSC
1978 if (rxr->hw_rsc == TRUE) { 2007 if (rxr->hw_rsc == TRUE) {
1979 rsc = ixgbe_rsc_count(cur); 2008 rsc = ixgbe_rsc_count(cur);
1980 rxr->rsc_num += (rsc - 1); 2009 rxr->rsc_num += (rsc - 1);
1981 } 2010 }
1982 if (rsc) { /* Get hardware index */ 2011 if (rsc) { /* Get hardware index */
1983 nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >> 2012 nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1984 IXGBE_RXDADV_NEXTP_SHIFT); 2013 IXGBE_RXDADV_NEXTP_SHIFT);
1985 } else { /* Just sequential */ 2014 } else
 2015#endif
 2016 { /* Just sequential */
1986 nextp = i + 1; 2017 nextp = i + 1;
1987 if (nextp == sc->num_rx_desc) 2018 if (nextp == sc->num_rx_desc)
1988 nextp = 0; 2019 nextp = 0;
1989 } 2020 }
1990 nbuf = &rxr->rx_buffers[nextp]; 2021 nbuf = &rxr->rx_buffers[nextp];
1991 prefetch(nbuf); 2022 prefetch(nbuf);
1992 } 2023 }
1993 /* 2024 /*
1994 * Rather than using the fmp/lmp global pointers 2025 * Rather than using the fmp/lmp global pointers
1995 * we now keep the head of a packet chain in the 2026 * we now keep the head of a packet chain in the
1996 * buffer struct and pass this along from one 2027 * buffer struct and pass this along from one
1997 * descriptor to the next, until we get EOP. 2028 * descriptor to the next, until we get EOP.
1998 */ 2029 */
@@ -2322,40 +2353,40 @@ ixgbe_dma_free(struct ixgbe_softc *sc, s @@ -2322,40 +2353,40 @@ ixgbe_dma_free(struct ixgbe_softc *sc, s
2322 * the descriptors associated with each, called only once at attach. 2353 * the descriptors associated with each, called only once at attach.
2323 ************************************************************************/ 2354 ************************************************************************/
2324int 2355int
2325ixgbe_allocate_queues(struct ixgbe_softc *sc) 2356ixgbe_allocate_queues(struct ixgbe_softc *sc)
2326{ 2357{
2327 device_t dev = sc->dev; 2358 device_t dev = sc->dev;
2328 struct ix_queue *que; 2359 struct ix_queue *que;
2329 struct tx_ring *txr; 2360 struct tx_ring *txr;
2330 struct rx_ring *rxr; 2361 struct rx_ring *rxr;
2331 int rsize, tsize, error = IXGBE_SUCCESS; 2362 int rsize, tsize, error = IXGBE_SUCCESS;
2332 int txconf = 0, rxconf = 0; 2363 int txconf = 0, rxconf = 0;
2333 2364
2334 /* First, allocate the top level queue structs */ 2365 /* First, allocate the top level queue structs */
2335 sc->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) * 2366 sc->queues = kmem_zalloc(sizeof(struct ix_queue) * sc->num_queues,
2336 sc->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); 2367 KM_SLEEP);
2337 2368
2338 /* Second, allocate the TX ring struct memory */ 2369 /* Second, allocate the TX ring struct memory */
2339 sc->tx_rings = malloc(sizeof(struct tx_ring) * 2370 sc->tx_rings = kmem_zalloc(sizeof(struct tx_ring) * sc->num_queues,
2340 sc->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); 2371 KM_SLEEP);
2341 2372
2342 /* Third, allocate the RX ring */ 2373 /* Third, allocate the RX ring */
2343 sc->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) * 2374 sc->rx_rings = kmem_zalloc(sizeof(struct rx_ring) * sc->num_queues,
2344 sc->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); 2375 KM_SLEEP);
2345 2376
2346 /* For the ring itself */ 2377 /* For the ring itself */
2347 tsize = roundup2(sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc), 2378 tsize = sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc);
2348 DBA_ALIGN); 2379 KASSERT((tsize % DBA_ALIGN) == 0);
2349 2380
2350 /* 2381 /*
2351 * Now set up the TX queues, txconf is needed to handle the 2382 * Now set up the TX queues, txconf is needed to handle the
2352 * possibility that things fail midcourse and we need to 2383 * possibility that things fail midcourse and we need to
2353 * undo memory gracefully 2384 * undo memory gracefully
2354 */ 2385 */
2355 for (int i = 0; i < sc->num_queues; i++, txconf++) { 2386 for (int i = 0; i < sc->num_queues; i++, txconf++) {
2356 /* Set up some basics */ 2387 /* Set up some basics */
2357 txr = &sc->tx_rings[i]; 2388 txr = &sc->tx_rings[i];
2358 txr->sc = sc; 2389 txr->sc = sc;
2359 txr->txr_interq = NULL; 2390 txr->txr_interq = NULL;
2360 /* In case SR-IOV is enabled, align the index properly */ 2391 /* In case SR-IOV is enabled, align the index properly */
2361#ifdef PCI_IOV 2392#ifdef PCI_IOV
@@ -2391,28 +2422,28 @@ ixgbe_allocate_queues(struct ixgbe_softc @@ -2391,28 +2422,28 @@ ixgbe_allocate_queues(struct ixgbe_softc
2391 txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP); 2422 txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
2392 if (txr->txr_interq == NULL) { 2423 if (txr->txr_interq == NULL) {
2393 aprint_error_dev(dev, 2424 aprint_error_dev(dev,
2394 "Critical Failure setting up buf ring\n"); 2425 "Critical Failure setting up buf ring\n");
2395 error = ENOMEM; 2426 error = ENOMEM;
2396 goto err_tx_desc; 2427 goto err_tx_desc;
2397 } 2428 }
2398 } 2429 }
2399 } 2430 }
2400 2431
2401 /* 2432 /*
2402 * Next the RX queues... 2433 * Next the RX queues...
2403 */ 2434 */
2404 rsize = roundup2(sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc), 2435 rsize = sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc);
2405 DBA_ALIGN); 2436 KASSERT((rsize % DBA_ALIGN) == 0);
2406 for (int i = 0; i < sc->num_queues; i++, rxconf++) { 2437 for (int i = 0; i < sc->num_queues; i++, rxconf++) {
2407 rxr = &sc->rx_rings[i]; 2438 rxr = &sc->rx_rings[i];
2408 /* Set up some basics */ 2439 /* Set up some basics */
2409 rxr->sc = sc; 2440 rxr->sc = sc;
2410#ifdef PCI_IOV 2441#ifdef PCI_IOV
2411 /* In case SR-IOV is enabled, align the index properly */ 2442 /* In case SR-IOV is enabled, align the index properly */
2412 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 2443 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
2413 i); 2444 i);
2414#else 2445#else
2415 rxr->me = i; 2446 rxr->me = i;
2416#endif 2447#endif
2417 rxr->num_desc = sc->num_rx_desc; 2448 rxr->num_desc = sc->num_rx_desc;
2418 2449
@@ -2450,39 +2481,39 @@ ixgbe_allocate_queues(struct ixgbe_softc @@ -2450,39 +2481,39 @@ ixgbe_allocate_queues(struct ixgbe_softc
2450 2481
2451 mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET); 2482 mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
2452 que->disabled_count = 0; 2483 que->disabled_count = 0;
2453 } 2484 }
2454 2485
2455 return (0); 2486 return (0);
2456 2487
2457err_rx_desc: 2488err_rx_desc:
2458 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--) 2489 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
2459 ixgbe_dma_free(sc, &rxr->rxdma); 2490 ixgbe_dma_free(sc, &rxr->rxdma);
2460err_tx_desc: 2491err_tx_desc:
2461 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) 2492 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
2462 ixgbe_dma_free(sc, &txr->txdma); 2493 ixgbe_dma_free(sc, &txr->txdma);
2463 free(sc->rx_rings, M_DEVBUF); 2494 kmem_free(sc->rx_rings, sizeof(struct rx_ring) * sc->num_queues);
2464 free(sc->tx_rings, M_DEVBUF); 2495 kmem_free(sc->tx_rings, sizeof(struct tx_ring) * sc->num_queues);
2465 free(sc->queues, M_DEVBUF); 2496 kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues);
2466 return (error); 2497 return (error);
2467} /* ixgbe_allocate_queues */ 2498} /* ixgbe_allocate_queues */
2468 2499
2469/************************************************************************ 2500/************************************************************************
2470 * ixgbe_free_queues 2501 * ixgbe_free_queues
2471 * 2502 *
2472 * Free descriptors for the transmit and receive rings, and then 2503 * Free descriptors for the transmit and receive rings, and then
2473 * the memory associated with each. 2504 * the memory associated with each.
2474 ************************************************************************/ 2505 ************************************************************************/
2475void 2506void
2476ixgbe_free_queues(struct ixgbe_softc *sc) 2507ixgbe_free_queues(struct ixgbe_softc *sc)
2477{ 2508{
2478 struct ix_queue *que; 2509 struct ix_queue *que;
2479 int i; 2510 int i;
2480 2511
2481 ixgbe_free_transmit_structures(sc); 2512 ixgbe_free_transmit_structures(sc);
2482 ixgbe_free_receive_structures(sc); 2513 ixgbe_free_receive_structures(sc);
2483 for (i = 0; i < sc->num_queues; i++) { 2514 for (i = 0; i < sc->num_queues; i++) {
2484 que = &sc->queues[i]; 2515 que = &sc->queues[i];
2485 mutex_destroy(&que->dc_mtx); 2516 mutex_destroy(&que->dc_mtx);
2486 } 2517 }
2487 free(sc->queues, M_DEVBUF); 2518 kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues);
2488} /* ixgbe_free_queues */ 2519} /* ixgbe_free_queues */

cvs diff -r1.324.2.6 -r1.324.2.7 src/sys/dev/pci/ixgbe/ixgbe.c (expand / switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.c 2023/11/03 10:10:49 1.324.2.6
+++ src/sys/dev/pci/ixgbe/ixgbe.c 2024/02/03 11:58:53 1.324.2.7
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: ixgbe.c,v 1.324.2.6 2023/11/03 10:10:49 martin Exp $ */ 1/* $NetBSD: ixgbe.c,v 1.324.2.7 2024/02/03 11:58:53 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
@@ -54,27 +54,27 @@ @@ -54,27 +54,27 @@
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#include <sys/cdefs.h> 66#include <sys/cdefs.h>
67__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.324.2.6 2023/11/03 10:10:49 martin Exp $"); 67__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.324.2.7 2024/02/03 11:58:53 martin Exp $");
68 68
69#ifdef _KERNEL_OPT 69#ifdef _KERNEL_OPT
70#include "opt_inet.h" 70#include "opt_inet.h"
71#include "opt_inet6.h" 71#include "opt_inet6.h"
72#include "opt_net_mpsafe.h" 72#include "opt_net_mpsafe.h"
73#endif 73#endif
74 74
75#include "ixgbe.h" 75#include "ixgbe.h"
76#include "ixgbe_phy.h" 76#include "ixgbe_phy.h"
77#include "ixgbe_sriov.h" 77#include "ixgbe_sriov.h"
78 78
79#include <sys/cprng.h> 79#include <sys/cprng.h>
80#include <dev/mii/mii.h> 80#include <dev/mii/mii.h>
@@ -713,27 +713,27 @@ ixgbe_initialize_transmit_units(struct i @@ -713,27 +713,27 @@ ixgbe_initialize_transmit_units(struct i
713 reg &= ~(0x000000ffUL << (regshift * 8)); 713 reg &= ~(0x000000ffUL << (regshift * 8));
714 reg |= i << (regshift * 8); 714 reg |= i << (regshift * 8);
715 IXGBE_WRITE_REG(hw, tqsmreg, reg); 715 IXGBE_WRITE_REG(hw, tqsmreg, reg);
716 716
717 /* Setup the HW Tx Head and Tail descriptor pointers */ 717 /* Setup the HW Tx Head and Tail descriptor pointers */
718 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 718 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
719 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 719 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
720 720
721 /* Cache the tail address */ 721 /* Cache the tail address */
722 txr->tail = IXGBE_TDT(j); 722 txr->tail = IXGBE_TDT(j);
723 723
724 txr->txr_no_space = false; 724 txr->txr_no_space = false;
725 725
726 /* Disable Head Writeback */ 726 /* Disable relax ordering */
727 /* 727 /*
728 * Note: for X550 series devices, these registers are actually 728 * Note: for X550 series devices, these registers are actually
729 * prefixed with TPH_ instead of DCA_, but the addresses and 729 * prefixed with TPH_ instead of DCA_, but the addresses and
730 * fields remain the same. 730 * fields remain the same.
731 */ 731 */
732 switch (hw->mac.type) { 732 switch (hw->mac.type) {
733 case ixgbe_mac_82598EB: 733 case ixgbe_mac_82598EB:
734 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 734 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
735 break; 735 break;
736 default: 736 default:
737 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 737 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
738 break; 738 break;
739 } 739 }
@@ -3602,27 +3602,27 @@ ixgbe_add_device_sysctls(struct ixgbe_so @@ -3602,27 +3602,27 @@ ixgbe_add_device_sysctls(struct ixgbe_so
3602 ixgbe_sysctl_eee_state, 0, (void *)sc, 0, CTL_CREATE, 3602 ixgbe_sysctl_eee_state, 0, (void *)sc, 0, CTL_CREATE,
3603 CTL_EOL) != 0) 3603 CTL_EOL) != 0)
3604 aprint_error_dev(dev, "could not create sysctl\n"); 3604 aprint_error_dev(dev, "could not create sysctl\n");
3605 } 3605 }
3606} /* ixgbe_add_device_sysctls */ 3606} /* ixgbe_add_device_sysctls */
3607 3607
3608/************************************************************************ 3608/************************************************************************
3609 * ixgbe_allocate_pci_resources 3609 * ixgbe_allocate_pci_resources
3610 ************************************************************************/ 3610 ************************************************************************/
3611static int 3611static int
3612ixgbe_allocate_pci_resources(struct ixgbe_softc *sc, 3612ixgbe_allocate_pci_resources(struct ixgbe_softc *sc,
3613 const struct pci_attach_args *pa) 3613 const struct pci_attach_args *pa)
3614{ 3614{
3615 pcireg_t memtype, csr; 3615 pcireg_t memtype, csr;
3616 device_t dev = sc->dev; 3616 device_t dev = sc->dev;
3617 bus_addr_t addr; 3617 bus_addr_t addr;
3618 int flags; 3618 int flags;
3619 3619
3620 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 3620 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3621 switch (memtype) { 3621 switch (memtype) {
3622 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 3622 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3623 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 3623 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3624 sc->osdep.mem_bus_space_tag = pa->pa_memt; 3624 sc->osdep.mem_bus_space_tag = pa->pa_memt;
3625 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), 3625 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3626 memtype, &addr, &sc->osdep.mem_size, &flags) != 0) 3626 memtype, &addr, &sc->osdep.mem_size, &flags) != 0)
3627 goto map_err; 3627 goto map_err;
3628 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { 3628 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
@@ -4147,26 +4147,27 @@ ixgbe_init_locked(struct ixgbe_softc *sc @@ -4147,26 +4147,27 @@ ixgbe_init_locked(struct ixgbe_softc *sc
4147 /* aka IXGBE_MAXFRS on 82599 and newer */ 4147 /* aka IXGBE_MAXFRS on 82599 and newer */
4148 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 4148 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4149 mhadd &= ~IXGBE_MHADD_MFS_MASK; 4149 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4150 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 4150 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4151 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 4151 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4152 } 4152 }
4153 4153
4154 /* Now enable all the queues */ 4154 /* Now enable all the queues */
4155 for (i = 0; i < sc->num_queues; i++) { 4155 for (i = 0; i < sc->num_queues; i++) {
4156 txr = &sc->tx_rings[i]; 4156 txr = &sc->tx_rings[i];
4157 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 4157 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4158 txdctl |= IXGBE_TXDCTL_ENABLE; 4158 txdctl |= IXGBE_TXDCTL_ENABLE;
4159 /* Set WTHRESH to 8, burst writeback */ 4159 /* Set WTHRESH to 8, burst writeback */
 4160 txdctl &= ~IXGBE_TXDCTL_WTHRESH_MASK;
4160 txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT; 4161 txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
4161 /* 4162 /*
4162 * When the internal queue falls below PTHRESH (32), 4163 * When the internal queue falls below PTHRESH (32),
4163 * start prefetching as long as there are at least 4164 * start prefetching as long as there are at least
4164 * HTHRESH (1) buffers ready. The values are taken 4165 * HTHRESH (1) buffers ready. The values are taken
4165 * from the Intel linux driver 3.8.21. 4166 * from the Intel linux driver 3.8.21.
4166 * Prefetching enables tx line rate even with 1 queue. 4167 * Prefetching enables tx line rate even with 1 queue.
4167 */ 4168 */
4168 txdctl |= (32 << 0) | (1 << 8); 4169 txdctl |= (32 << 0) | (1 << 8);
4169 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 4170 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4170 } 4171 }
4171 4172
4172 for (i = 0; i < sc->num_queues; i++) { 4173 for (i = 0; i < sc->num_queues; i++) {

cvs diff -r1.86.4.4 -r1.86.4.5 src/sys/dev/pci/ixgbe/ixgbe.h (expand / switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.h 2023/10/18 11:53:22 1.86.4.4
+++ src/sys/dev/pci/ixgbe/ixgbe.h 2024/02/03 11:58:53 1.86.4.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: ixgbe.h,v 1.86.4.4 2023/10/18 11:53:22 martin Exp $ */ 1/* $NetBSD: ixgbe.h,v 1.86.4.5 2024/02/03 11:58:53 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 SPDX-License-Identifier: BSD-3-Clause 4 SPDX-License-Identifier: BSD-3-Clause
5 5
6 Copyright (c) 2001-2017, Intel Corporation 6 Copyright (c) 2001-2017, Intel Corporation
7 All rights reserved. 7 All rights reserved.
8 8
9 Redistribution and use in source and binary forms, with or without 9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met: 10 modification, are permitted provided that the following conditions are met:
11 11
12 1. Redistributions of source code must retain the above copyright notice, 12 1. Redistributions of source code must retain the above copyright notice,
13 this list of conditions and the following disclaimer. 13 this list of conditions and the following disclaimer.
14 14
@@ -315,27 +315,27 @@ struct ixgbe_dma_alloc { @@ -315,27 +315,27 @@ struct ixgbe_dma_alloc {
315struct ixgbe_mc_addr { 315struct ixgbe_mc_addr {
316 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 316 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
317 u32 vmdq; 317 u32 vmdq;
318}; 318};
319 319
320/* 320/*
321 * Driver queue struct: this is the interrupt container 321 * Driver queue struct: this is the interrupt container
322 * for the associated tx and rx ring. 322 * for the associated tx and rx ring.
323 */ 323 */
324struct ix_queue { 324struct ix_queue {
325 struct ixgbe_softc *sc; 325 struct ixgbe_softc *sc;
326 u32 msix; /* This queue's MSI-X vector */ 326 u32 msix; /* This queue's MSI-X vector */
327 u32 eitr_setting; 327 u32 eitr_setting;
328 u32 me; 328 u8 me;
329 struct resource *res; 329 struct resource *res;
330 int busy; 330 int busy;
331 struct tx_ring *txr; 331 struct tx_ring *txr;
332 struct rx_ring *rxr; 332 struct rx_ring *rxr;
333 struct work wq_cookie; 333 struct work wq_cookie;
334 void *que_si; 334 void *que_si;
335 /* Per queue event conters */ 335 /* Per queue event conters */
336 struct evcnt irqs; /* Hardware interrupt */ 336 struct evcnt irqs; /* Hardware interrupt */
337 struct evcnt handleq; /* software_interrupt */ 337 struct evcnt handleq; /* software_interrupt */
338 struct evcnt req; /* deferred */ 338 struct evcnt req; /* deferred */
339 char namebuf[32]; /* Name for sysctl */ 339 char namebuf[32]; /* Name for sysctl */
340 char evnamebuf[32]; /* Name for evcnt */ 340 char evnamebuf[32]; /* Name for evcnt */
341 341
@@ -347,108 +347,111 @@ struct ix_queue { @@ -347,108 +347,111 @@ struct ix_queue {
347 * > 0 : this queue is disabled 347 * > 0 : this queue is disabled
348 * the value is ixgbe_disable_queue() called count 348 * the value is ixgbe_disable_queue() called count
349 */ 349 */
350 int disabled_count; 350 int disabled_count;
351 bool txrx_use_workqueue; 351 bool txrx_use_workqueue;
352}; 352};
353 353
354/* 354/*
355 * The transmit ring, one per queue 355 * The transmit ring, one per queue
356 */ 356 */
357struct tx_ring { 357struct tx_ring {
358 struct ixgbe_softc *sc; 358 struct ixgbe_softc *sc;
359 kmutex_t tx_mtx; 359 kmutex_t tx_mtx;
360 u32 me; 360 u8 me;
361 u32 tail; 361 u32 tail;
362 int busy; 362 int busy;
363 union ixgbe_adv_tx_desc *tx_base; 363 union ixgbe_adv_tx_desc *tx_base;
364 struct ixgbe_tx_buf *tx_buffers; 364 struct ixgbe_tx_buf *tx_buffers;
365 struct ixgbe_dma_alloc txdma; 365 struct ixgbe_dma_alloc txdma;
366 volatile u16 tx_avail; 366 volatile u16 tx_avail;
367 u16 next_avail_desc; 367 u16 next_avail_desc;
368 u16 next_to_clean; 368 u16 next_to_clean;
369 u16 num_desc; 369 u16 num_desc;
370 ixgbe_dma_tag_t *txtag; 370 ixgbe_dma_tag_t *txtag;
371#if 0 371#if 0
372 char mtx_name[16]; /* NetBSD has no mutex name */ 372 char mtx_name[16]; /* NetBSD has no mutex name */
373#endif 373#endif
374 pcq_t *txr_interq; 374 pcq_t *txr_interq;
375 struct work wq_cookie; 375 struct work wq_cookie;
376 void *txr_si; 376 void *txr_si;
377 bool txr_no_space; /* Like IFF_OACTIVE */ 377 bool txr_no_space; /* Like IFF_OACTIVE */
378 378
 379#ifdef IXGBE_FDIR
379 /* Flow Director */ 380 /* Flow Director */
380 u16 atr_sample; 381 u16 atr_sample;
381 u16 atr_count; 382 u16 atr_count;
 383#endif
382 384
383 u64 bytes; /* Used for AIM */ 385 u64 bytes; /* Used for AIM */
384 u64 packets; 386 u64 packets;
385 /* Soft Stats */ 387 /* Soft Stats */
386 struct evcnt total_packets; 388 struct evcnt total_packets;
387 struct evcnt pcq_drops; 389 struct evcnt pcq_drops;
388 struct evcnt no_desc_avail; 390 struct evcnt no_desc_avail;
389 struct evcnt tso_tx; 391 struct evcnt tso_tx;
390 /* Per queue conters. The adapter total is in struct adapter */ 392 /* Per queue conters. The adapter total is in struct adapter */
391 u64 q_efbig_tx_dma_setup; 393 u64 q_efbig_tx_dma_setup;
392 u64 q_mbuf_defrag_failed; 394 u64 q_mbuf_defrag_failed;
393 u64 q_efbig2_tx_dma_setup; 395 u64 q_efbig2_tx_dma_setup;
394 u64 q_einval_tx_dma_setup; 396 u64 q_einval_tx_dma_setup;
395 u64 q_other_tx_dma_setup; 397 u64 q_other_tx_dma_setup;
396 u64 q_eagain_tx_dma_setup; 398 u64 q_eagain_tx_dma_setup;
397 u64 q_enomem_tx_dma_setup; 399 u64 q_enomem_tx_dma_setup;
398 u64 q_tso_err; 400 u64 q_tso_err;
399}; 401};
400 402
401 403
402/* 404/*
403 * The Receive ring, one per rx queue 405 * The Receive ring, one per rx queue
404 */ 406 */
405struct rx_ring { 407struct rx_ring {
406 struct ixgbe_softc *sc; 408 struct ixgbe_softc *sc;
407 kmutex_t rx_mtx; 409 kmutex_t rx_mtx;
408 u32 me; 410 u8 me;
409 u32 tail; 411 u32 tail;
410 union ixgbe_adv_rx_desc *rx_base; 412 union ixgbe_adv_rx_desc *rx_base;
411 struct ixgbe_dma_alloc rxdma; 413 struct ixgbe_dma_alloc rxdma;
412#ifdef LRO 414#ifdef LRO
413 struct lro_ctrl lro; 415 struct lro_ctrl lro;
414#endif /* LRO */ 
415 bool lro_enabled; 416 bool lro_enabled;
 417#endif /* LRO */
 418#ifdef RSC
416 bool hw_rsc; 419 bool hw_rsc;
 420#endif
417 bool vtag_strip; 421 bool vtag_strip;
418 bool discard_multidesc; 422 bool discard_multidesc;
419 u16 next_to_refresh; 423 u16 next_to_refresh;
420 u16 next_to_check; 424 u16 next_to_check;
421 u16 num_desc; 425 u16 num_desc;
422 u16 mbuf_sz; 426 u16 mbuf_sz;
423#if 0 427#if 0
424 char mtx_name[16]; /* NetBSD has no mutex name */ 428 char mtx_name[16]; /* NetBSD has no mutex name */
425#endif 429#endif
426 struct ixgbe_rx_buf *rx_buffers; 430 struct ixgbe_rx_buf *rx_buffers;
427 ixgbe_dma_tag_t *ptag; 431 ixgbe_dma_tag_t *ptag;
428 432
429 u64 bytes; /* Used for AIM calc */ 433 u64 bytes; /* Used for AIM calc */
430 u64 packets; 434 u64 packets;
431 435
432 /* Soft stats */ 436 /* Soft stats */
433 struct evcnt rx_copies; 437 struct evcnt rx_copies;
434 struct evcnt rx_packets; 438 struct evcnt rx_packets;
435 struct evcnt rx_bytes; 439 struct evcnt rx_bytes;
436 struct evcnt rx_discarded; 440 struct evcnt rx_discarded;
437 struct evcnt no_mbuf; 441 struct evcnt no_mbuf;
 442#ifdef RSC
438 u64 rsc_num; 443 u64 rsc_num;
439 444#endif
440 /* Flow Director */ 
441 u64 flm; 
442}; 445};
443 446
444struct ixgbe_vf { 447struct ixgbe_vf {
445 u_int pool; 448 u_int pool;
446 u_int rar_index; 449 u_int rar_index;
447 u_int max_frame_size; 450 u_int max_frame_size;
448 uint32_t flags; 451 uint32_t flags;
449 uint8_t ether_addr[ETHER_ADDR_LEN]; 452 uint8_t ether_addr[ETHER_ADDR_LEN];
450 uint16_t mc_hash[IXGBE_MAX_VF_MC]; 453 uint16_t mc_hash[IXGBE_MAX_VF_MC];
451 uint16_t num_mc_hashes; 454 uint16_t num_mc_hashes;
452 uint16_t default_vlan; 455 uint16_t default_vlan;
453 uint16_t vlan_tag; 456 uint16_t vlan_tag;
454 uint16_t api_ver; 457 uint16_t api_ver;

cvs diff -r1.55.4.5 -r1.55.4.6 src/sys/dev/pci/ixgbe/ixgbe_type.h (expand / switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe_type.h 2023/10/18 11:53:22 1.55.4.5
+++ src/sys/dev/pci/ixgbe/ixgbe_type.h 2024/02/03 11:58:53 1.55.4.6
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: ixgbe_type.h,v 1.55.4.5 2023/10/18 11:53:22 martin Exp $ */ 1/* $NetBSD: ixgbe_type.h,v 1.55.4.6 2024/02/03 11:58:53 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 SPDX-License-Identifier: BSD-3-Clause 4 SPDX-License-Identifier: BSD-3-Clause
5 5
6 Copyright (c) 2001-2020, Intel Corporation 6 Copyright (c) 2001-2020, Intel Corporation
7 All rights reserved. 7 All rights reserved.
8 8
9 Redistribution and use in source and binary forms, with or without 9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met: 10 modification, are permitted provided that the following conditions are met:
11 11
12 1. Redistributions of source code must retain the above copyright notice, 12 1. Redistributions of source code must retain the above copyright notice,
13 this list of conditions and the following disclaimer. 13 this list of conditions and the following disclaimer.
14 14
@@ -2615,26 +2615,27 @@ enum { @@ -2615,26 +2615,27 @@ enum {
2615#define IXGBE_RFCTL_NFS_VER_SHIFT 8 2615#define IXGBE_RFCTL_NFS_VER_SHIFT 8
2616#define IXGBE_RFCTL_NFS_VER_2 0 2616#define IXGBE_RFCTL_NFS_VER_2 0
2617#define IXGBE_RFCTL_NFS_VER_3 1 2617#define IXGBE_RFCTL_NFS_VER_3 1
2618#define IXGBE_RFCTL_NFS_VER_4 2 2618#define IXGBE_RFCTL_NFS_VER_4 2
2619#define IXGBE_RFCTL_IPV6_DIS 0x00000400 2619#define IXGBE_RFCTL_IPV6_DIS 0x00000400
2620#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 2620#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
2621#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 2621#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
2622#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 2622#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
2623#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 2623#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
2624 2624
2625/* Transmit Config masks */ 2625/* Transmit Config masks */
2626#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ 2626#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
2627#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ 2627#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
 2628#define IXGBE_TXDCTL_WTHRESH_MASK 0x007f0000
2628#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ 2629#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
2629/* Enable short packet padding to 64 bytes */ 2630/* Enable short packet padding to 64 bytes */
2630#define IXGBE_TX_PAD_ENABLE 0x00000400 2631#define IXGBE_TX_PAD_ENABLE 0x00000400
2631#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ 2632#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
2632/* This allows for 16K packets + 4k for vlan */ 2633/* This allows for 16K packets + 4k for vlan */
2633#define IXGBE_MAX_FRAME_SZ 0x40040000 2634#define IXGBE_MAX_FRAME_SZ 0x40040000
2634 2635
2635#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ 2636#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
2636#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ 2637#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
2637 2638
2638/* Receive Config masks */ 2639/* Receive Config masks */
2639#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ 2640#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
2640#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */ 2641#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */

cvs diff -r1.183.4.5 -r1.183.4.6 src/sys/dev/pci/ixgbe/ixv.c (expand / switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixv.c 2023/11/03 10:10:49 1.183.4.5
+++ src/sys/dev/pci/ixgbe/ixv.c 2024/02/03 11:58:53 1.183.4.6
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: ixv.c,v 1.183.4.5 2023/11/03 10:10:49 martin Exp $ */ 1/* $NetBSD: ixv.c,v 1.183.4.6 2024/02/03 11:58:53 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
@@ -25,27 +25,27 @@ @@ -25,27 +25,27 @@
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37#include <sys/cdefs.h> 37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.183.4.5 2023/11/03 10:10:49 martin Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.183.4.6 2024/02/03 11:58:53 martin Exp $");
39 39
40#ifdef _KERNEL_OPT 40#ifdef _KERNEL_OPT
41#include "opt_inet.h" 41#include "opt_inet.h"
42#include "opt_inet6.h" 42#include "opt_inet6.h"
43#include "opt_net_mpsafe.h" 43#include "opt_net_mpsafe.h"
44#endif 44#endif
45 45
46#include "ixgbe.h" 46#include "ixgbe.h"
47 47
48/************************************************************************ 48/************************************************************************
49 * Driver version 49 * Driver version
50 ************************************************************************/ 50 ************************************************************************/
51static const char ixv_driver_version[] = "2.0.1-k"; 51static const char ixv_driver_version[] = "2.0.1-k";
@@ -820,27 +820,26 @@ ixv_init_locked(struct ixgbe_softc *sc) @@ -820,27 +820,26 @@ ixv_init_locked(struct ixgbe_softc *sc)
820 /* Start watchdog */ 820 /* Start watchdog */
821 callout_reset(&sc->timer, hz, ixv_local_timer, sc); 821 callout_reset(&sc->timer, hz, ixv_local_timer, sc);
822 atomic_store_relaxed(&sc->timer_pending, 0); 822 atomic_store_relaxed(&sc->timer_pending, 0);
823 823
824 /* OK to schedule workqueues. */ 824 /* OK to schedule workqueues. */
825 sc->schedule_wqs_ok = true; 825 sc->schedule_wqs_ok = true;
826 826
827 /* Update saved flags. See ixgbe_ifflags_cb() */ 827 /* Update saved flags. See ixgbe_ifflags_cb() */
828 sc->if_flags = ifp->if_flags; 828 sc->if_flags = ifp->if_flags;
829 sc->ec_capenable = sc->osdep.ec.ec_capenable; 829 sc->ec_capenable = sc->osdep.ec.ec_capenable;
830 830
831 /* Inform the stack we're ready */ 831 /* Inform the stack we're ready */
832 ifp->if_flags |= IFF_RUNNING; 832 ifp->if_flags |= IFF_RUNNING;
833 ifp->if_flags &= ~IFF_OACTIVE; 
834 833
835 /* And now turn on interrupts */ 834 /* And now turn on interrupts */
836 ixv_enable_intr(sc); 835 ixv_enable_intr(sc);
837 836
838 return; 837 return;
839} /* ixv_init_locked */ 838} /* ixv_init_locked */
840 839
841/************************************************************************ 840/************************************************************************
842 * ixv_enable_queue 841 * ixv_enable_queue
843 ************************************************************************/ 842 ************************************************************************/
844static inline void 843static inline void
845ixv_enable_queue(struct ixgbe_softc *sc, u32 vector) 844ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
846{ 845{
@@ -1485,52 +1484,52 @@ ixv_stop_locked(void *arg) @@ -1485,52 +1484,52 @@ ixv_stop_locked(void *arg)
1485{ 1484{
1486 struct ifnet *ifp; 1485 struct ifnet *ifp;
1487 struct ixgbe_softc *sc = arg; 1486 struct ixgbe_softc *sc = arg;
1488 struct ixgbe_hw *hw = &sc->hw; 1487 struct ixgbe_hw *hw = &sc->hw;
1489 1488
1490 ifp = sc->ifp; 1489 ifp = sc->ifp;
1491 1490
1492 KASSERT(mutex_owned(&sc->core_mtx)); 1491 KASSERT(mutex_owned(&sc->core_mtx));
1493 1492
1494 INIT_DEBUGOUT("ixv_stop_locked: begin\n"); 1493 INIT_DEBUGOUT("ixv_stop_locked: begin\n");
1495 ixv_disable_intr(sc); 1494 ixv_disable_intr(sc);
1496 1495
1497 /* Tell the stack that the interface is no longer active */ 1496 /* Tell the stack that the interface is no longer active */
1498 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1497 ifp->if_flags &= ~IFF_RUNNING;
1499 1498
1500 hw->mac.ops.reset_hw(hw); 1499 hw->mac.ops.reset_hw(hw);
1501 sc->hw.adapter_stopped = FALSE; 1500 sc->hw.adapter_stopped = FALSE;
1502 hw->mac.ops.stop_adapter(hw); 1501 hw->mac.ops.stop_adapter(hw);
1503 callout_stop(&sc->timer); 1502 callout_stop(&sc->timer);
1504 1503
1505 /* Don't schedule workqueues. */ 1504 /* Don't schedule workqueues. */
1506 sc->schedule_wqs_ok = false; 1505 sc->schedule_wqs_ok = false;
1507 1506
1508 /* reprogram the RAR[0] in case user changed it. */ 1507 /* reprogram the RAR[0] in case user changed it. */
1509 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1508 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1510 1509
1511 return; 1510 return;
1512} /* ixv_stop_locked */ 1511} /* ixv_stop_locked */
1513 1512
1514 1513
1515/************************************************************************ 1514/************************************************************************
1516 * ixv_allocate_pci_resources 1515 * ixv_allocate_pci_resources
1517 ************************************************************************/ 1516 ************************************************************************/
1518static int 1517static int
1519ixv_allocate_pci_resources(struct ixgbe_softc *sc, 1518ixv_allocate_pci_resources(struct ixgbe_softc *sc,
1520 const struct pci_attach_args *pa) 1519 const struct pci_attach_args *pa)
1521{ 1520{
1522 pcireg_t memtype, csr; 1521 pcireg_t memtype, csr;
1523 device_t dev = sc->dev; 1522 device_t dev = sc->dev;
1524 bus_addr_t addr; 1523 bus_addr_t addr;
1525 int flags; 1524 int flags;
1526 1525
1527 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 1526 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1528 switch (memtype) { 1527 switch (memtype) {
1529 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1528 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1530 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1529 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1531 sc->osdep.mem_bus_space_tag = pa->pa_memt; 1530 sc->osdep.mem_bus_space_tag = pa->pa_memt;
1532 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), 1531 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1533 memtype, &addr, &sc->osdep.mem_size, &flags) != 0) 1532 memtype, &addr, &sc->osdep.mem_size, &flags) != 0)
1534 goto map_err; 1533 goto map_err;
1535 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { 1534 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1536 aprint_normal_dev(dev, "clearing prefetchable bit\n"); 1535 aprint_normal_dev(dev, "clearing prefetchable bit\n");
@@ -1739,26 +1738,27 @@ static void @@ -1739,26 +1738,27 @@ static void
1739ixv_initialize_transmit_units(struct ixgbe_softc *sc) 1738ixv_initialize_transmit_units(struct ixgbe_softc *sc)
1740{ 1739{
1741 struct tx_ring *txr = sc->tx_rings; 1740 struct tx_ring *txr = sc->tx_rings;
1742 struct ixgbe_hw *hw = &sc->hw; 1741 struct ixgbe_hw *hw = &sc->hw;
1743 int i; 1742 int i;
1744 1743
1745 for (i = 0; i < sc->num_queues; i++, txr++) { 1744 for (i = 0; i < sc->num_queues; i++, txr++) {
1746 u64 tdba = txr->txdma.dma_paddr; 1745 u64 tdba = txr->txdma.dma_paddr;
1747 u32 txctrl, txdctl; 1746 u32 txctrl, txdctl;
1748 int j = txr->me; 1747 int j = txr->me;
1749 1748
1750 /* Set WTHRESH to 8, burst writeback */ 1749 /* Set WTHRESH to 8, burst writeback */
1751 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1750 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
 1751 txdctl &= ~IXGBE_TXDCTL_WTHRESH_MASK;
1752 txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT; 1752 txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
1753 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1753 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1754 1754
1755 /* Set the HW Tx Head and Tail indices */ 1755 /* Set the HW Tx Head and Tail indices */
1756 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0); 1756 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1757 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0); 1757 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1758 1758
1759 /* Set Tx Tail register */ 1759 /* Set Tx Tail register */
1760 txr->tail = IXGBE_VFTDT(j); 1760 txr->tail = IXGBE_VFTDT(j);
1761 1761
1762 txr->txr_no_space = false; 1762 txr->txr_no_space = false;
1763 1763
1764 /* Set Ring parameters */ 1764 /* Set Ring parameters */