Wed Oct 14 07:16:04 2015 UTC ()
- Update TODO:
  - "Multiqueue" -> "TX Multiqueue" because RX Multiqueue was added.
  - Sort entries.
- Grouping functions.


(msaitoh)
diff -r1.370 -r1.371 src/sys/dev/pci/if_wm.c

cvs diff -r1.370 -r1.371 src/sys/dev/pci/if_wm.c (expand / switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2015/10/13 21:28:41 1.370
+++ src/sys/dev/pci/if_wm.c 2015/10/14 07:16:04 1.371
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_wm.c,v 1.370 2015/10/13 21:28:41 christos Exp $ */ 1/* $NetBSD: if_wm.c,v 1.371 2015/10/14 07:16:04 msaitoh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -63,37 +63,37 @@ @@ -63,37 +63,37 @@
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE. 67 POSSIBILITY OF SUCH DAMAGE.
68 68
69*******************************************************************************/ 69*******************************************************************************/
70/* 70/*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 * 72 *
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet) 
77 * - Multi queue 
78 * - Image Unique ID 
79 * - LPLU other than PCH* 76 * - LPLU other than PCH*
 77 * - TX Multi queue
 78 * - EEE (Energy Efficiency Ethernet)
80 * - Virtual Function 79 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM) 80 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM. 81 * - Rework how parameters are loaded from the EEPROM.
 82 * - Image Unique ID
83 */ 83 */
84 84
85#include <sys/cdefs.h> 85#include <sys/cdefs.h>
86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.370 2015/10/13 21:28:41 christos Exp $"); 86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.371 2015/10/14 07:16:04 msaitoh Exp $");
87 87
88#ifdef _KERNEL_OPT 88#ifdef _KERNEL_OPT
89#include "opt_net_mpsafe.h" 89#include "opt_net_mpsafe.h"
90#endif 90#endif
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/systm.h> 93#include <sys/systm.h>
94#include <sys/callout.h> 94#include <sys/callout.h>
95#include <sys/mbuf.h> 95#include <sys/mbuf.h>
96#include <sys/malloc.h> 96#include <sys/malloc.h>
97#include <sys/kmem.h> 97#include <sys/kmem.h>
98#include <sys/kernel.h> 98#include <sys/kernel.h>
99#include <sys/socket.h> 99#include <sys/socket.h>
@@ -556,73 +556,75 @@ static uint32_t wm_mchash(struct wm_soft @@ -556,73 +556,75 @@ static uint32_t wm_mchash(struct wm_soft
556static void wm_set_filter(struct wm_softc *); 556static void wm_set_filter(struct wm_softc *);
557/* Reset and init related */ 557/* Reset and init related */
558static void wm_set_vlan(struct wm_softc *); 558static void wm_set_vlan(struct wm_softc *);
559static void wm_set_pcie_completion_timeout(struct wm_softc *); 559static void wm_set_pcie_completion_timeout(struct wm_softc *);
560static void wm_get_auto_rd_done(struct wm_softc *); 560static void wm_get_auto_rd_done(struct wm_softc *);
561static void wm_lan_init_done(struct wm_softc *); 561static void wm_lan_init_done(struct wm_softc *);
562static void wm_get_cfg_done(struct wm_softc *); 562static void wm_get_cfg_done(struct wm_softc *);
563static void wm_initialize_hardware_bits(struct wm_softc *); 563static void wm_initialize_hardware_bits(struct wm_softc *);
564static uint32_t wm_rxpbs_adjust_82580(uint32_t); 564static uint32_t wm_rxpbs_adjust_82580(uint32_t);
565static void wm_reset(struct wm_softc *); 565static void wm_reset(struct wm_softc *);
566static int wm_add_rxbuf(struct wm_rxqueue *, int); 566static int wm_add_rxbuf(struct wm_rxqueue *, int);
567static void wm_rxdrain(struct wm_rxqueue *); 567static void wm_rxdrain(struct wm_rxqueue *);
568static void wm_init_rss(struct wm_softc *); 568static void wm_init_rss(struct wm_softc *);
 569#ifdef WM_MSI_MSIX
 570static void wm_adjust_qnum(struct wm_softc *, int);
 571static int wm_setup_legacy(struct wm_softc *);
 572static int wm_setup_msix(struct wm_softc *);
 573#endif
569static int wm_init(struct ifnet *); 574static int wm_init(struct ifnet *);
570static int wm_init_locked(struct ifnet *); 575static int wm_init_locked(struct ifnet *);
571static void wm_stop(struct ifnet *, int); 576static void wm_stop(struct ifnet *, int);
572static void wm_stop_locked(struct ifnet *, int); 577static void wm_stop_locked(struct ifnet *, int);
573static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, 
574 uint32_t *, uint8_t *); 
575static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 578static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
576static void wm_82547_txfifo_stall(void *); 579static void wm_82547_txfifo_stall(void *);
577static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 580static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
578/* DMA related */ 581/* DMA related */
579static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 582static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
580static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 583static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
581static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 584static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
582static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *); 585static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
583static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 586static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
584static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 587static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
585static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *); 588static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
586static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 589static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
587static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 590static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
588static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 591static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
589static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 592static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
590static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 593static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
591static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 594static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
592static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *); 595static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
593static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *); 596static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
594static int wm_alloc_txrx_queues(struct wm_softc *); 597static int wm_alloc_txrx_queues(struct wm_softc *);
595static void wm_free_txrx_queues(struct wm_softc *); 598static void wm_free_txrx_queues(struct wm_softc *);
596static int wm_init_txrx_queues(struct wm_softc *); 599static int wm_init_txrx_queues(struct wm_softc *);
597/* Start */ 600/* Start */
 601static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
 602 uint32_t *, uint8_t *);
598static void wm_start(struct ifnet *); 603static void wm_start(struct ifnet *);
599static void wm_start_locked(struct ifnet *); 604static void wm_start_locked(struct ifnet *);
600static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *, 605static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
601 uint32_t *, uint32_t *, bool *); 606 uint32_t *, uint32_t *, bool *);
602static void wm_nq_start(struct ifnet *); 607static void wm_nq_start(struct ifnet *);
603static void wm_nq_start_locked(struct ifnet *); 608static void wm_nq_start_locked(struct ifnet *);
604/* Interrupt */ 609/* Interrupt */
605static int wm_txeof(struct wm_softc *); 610static int wm_txeof(struct wm_softc *);
606static void wm_rxeof(struct wm_rxqueue *); 611static void wm_rxeof(struct wm_rxqueue *);
607static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 612static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
608static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 613static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
609static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 614static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
610static void wm_linkintr(struct wm_softc *, uint32_t); 615static void wm_linkintr(struct wm_softc *, uint32_t);
611static int wm_intr_legacy(void *); 616static int wm_intr_legacy(void *);
612#ifdef WM_MSI_MSIX 617#ifdef WM_MSI_MSIX
613static void wm_adjust_qnum(struct wm_softc *, int); 
614static int wm_setup_legacy(struct wm_softc *); 
615static int wm_setup_msix(struct wm_softc *); 
616static int wm_txintr_msix(void *); 618static int wm_txintr_msix(void *);
617static int wm_rxintr_msix(void *); 619static int wm_rxintr_msix(void *);
618static int wm_linkintr_msix(void *); 620static int wm_linkintr_msix(void *);
619#endif 621#endif
620 622
621/* 623/*
622 * Media related. 624 * Media related.
623 * GMII, SGMII, TBI, SERDES and SFP. 625 * GMII, SGMII, TBI, SERDES and SFP.
624 */ 626 */
625/* Common */ 627/* Common */
626static void wm_tbi_serdes_set_linkled(struct wm_softc *); 628static void wm_tbi_serdes_set_linkled(struct wm_softc *);
627/* GMII related */ 629/* GMII related */
628static void wm_gmii_reset(struct wm_softc *); 630static void wm_gmii_reset(struct wm_softc *);
@@ -5042,222 +5044,26 @@ wm_stop_locked(struct ifnet *ifp, int di @@ -5042,222 +5044,26 @@ wm_stop_locked(struct ifnet *ifp, int di
5042 struct wm_rxqueue *rxq = &sc->sc_rxq[i]; 5044 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5043 WM_RX_LOCK(rxq); 5045 WM_RX_LOCK(rxq);
5044 wm_rxdrain(rxq); 5046 wm_rxdrain(rxq);
5045 WM_RX_UNLOCK(rxq); 5047 WM_RX_UNLOCK(rxq);
5046 } 5048 }
5047 } 5049 }
5048 5050
5049#if 0 /* notyet */ 5051#if 0 /* notyet */
5050 if (sc->sc_type >= WM_T_82544) 5052 if (sc->sc_type >= WM_T_82544)
5051 CSR_WRITE(sc, WMREG_WUC, 0); 5053 CSR_WRITE(sc, WMREG_WUC, 0);
5052#endif 5054#endif
5053} 5055}
5054 5056
5055/* 
5056 * wm_tx_offload: 
5057 * 
5058 * Set up TCP/IP checksumming parameters for the 
5059 * specified packet. 
5060 */ 
5061static int 
5062wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 
5063 uint8_t *fieldsp) 
5064{ 
5065 struct wm_txqueue *txq = &sc->sc_txq[0]; 
5066 struct mbuf *m0 = txs->txs_mbuf; 
5067 struct livengood_tcpip_ctxdesc *t; 
5068 uint32_t ipcs, tucs, cmd, cmdlen, seg; 
5069 uint32_t ipcse; 
5070 struct ether_header *eh; 
5071 int offset, iphl; 
5072 uint8_t fields; 
5073 
5074 /* 
5075 * XXX It would be nice if the mbuf pkthdr had offset 
5076 * fields for the protocol headers. 
5077 */ 
5078 
5079 eh = mtod(m0, struct ether_header *); 
5080 switch (htons(eh->ether_type)) { 
5081 case ETHERTYPE_IP: 
5082 case ETHERTYPE_IPV6: 
5083 offset = ETHER_HDR_LEN; 
5084 break; 
5085 
5086 case ETHERTYPE_VLAN: 
5087 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 
5088 break; 
5089 
5090 default: 
5091 /* 
5092 * Don't support this protocol or encapsulation. 
5093 */ 
5094 *fieldsp = 0; 
5095 *cmdp = 0; 
5096 return 0; 
5097 } 
5098 
5099 if ((m0->m_pkthdr.csum_flags & 
5100 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 
5101 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 
5102 } else { 
5103 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 
5104 } 
5105 ipcse = offset + iphl - 1; 
5106 
5107 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 
5108 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 
5109 seg = 0; 
5110 fields = 0; 
5111 
5112 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 
5113 int hlen = offset + iphl; 
5114 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 
5115 
5116 if (__predict_false(m0->m_len < 
5117 (hlen + sizeof(struct tcphdr)))) { 
5118 /* 
5119 * TCP/IP headers are not in the first mbuf; we need 
5120 * to do this the slow and painful way. Let's just 
5121 * hope this doesn't happen very often. 
5122 */ 
5123 struct tcphdr th; 
5124 
5125 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 
5126 
5127 m_copydata(m0, hlen, sizeof(th), &th); 
5128 if (v4) { 
5129 struct ip ip; 
5130 
5131 m_copydata(m0, offset, sizeof(ip), &ip); 
5132 ip.ip_len = 0; 
5133 m_copyback(m0, 
5134 offset + offsetof(struct ip, ip_len), 
5135 sizeof(ip.ip_len), &ip.ip_len); 
5136 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 
5137 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 
5138 } else { 
5139 struct ip6_hdr ip6; 
5140 
5141 m_copydata(m0, offset, sizeof(ip6), &ip6); 
5142 ip6.ip6_plen = 0; 
5143 m_copyback(m0, 
5144 offset + offsetof(struct ip6_hdr, ip6_plen), 
5145 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 
5146 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 
5147 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 
5148 } 
5149 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 
5150 sizeof(th.th_sum), &th.th_sum); 
5151 
5152 hlen += th.th_off << 2; 
5153 } else { 
5154 /* 
5155 * TCP/IP headers are in the first mbuf; we can do 
5156 * this the easy way. 
5157 */ 
5158 struct tcphdr *th; 
5159 
5160 if (v4) { 
5161 struct ip *ip = 
5162 (void *)(mtod(m0, char *) + offset); 
5163 th = (void *)(mtod(m0, char *) + hlen); 
5164 
5165 ip->ip_len = 0; 
5166 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 
5167 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 
5168 } else { 
5169 struct ip6_hdr *ip6 = 
5170 (void *)(mtod(m0, char *) + offset); 
5171 th = (void *)(mtod(m0, char *) + hlen); 
5172 
5173 ip6->ip6_plen = 0; 
5174 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 
5175 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 
5176 } 
5177 hlen += th->th_off << 2; 
5178 } 
5179 
5180 if (v4) { 
5181 WM_EVCNT_INCR(&sc->sc_ev_txtso); 
5182 cmdlen |= WTX_TCPIP_CMD_IP; 
5183 } else { 
5184 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 
5185 ipcse = 0; 
5186 } 
5187 cmd |= WTX_TCPIP_CMD_TSE; 
5188 cmdlen |= WTX_TCPIP_CMD_TSE | 
5189 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 
5190 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 
5191 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 
5192 } 
5193 
5194 /* 
5195 * NOTE: Even if we're not using the IP or TCP/UDP checksum 
5196 * offload feature, if we load the context descriptor, we 
5197 * MUST provide valid values for IPCSS and TUCSS fields. 
5198 */ 
5199 
5200 ipcs = WTX_TCPIP_IPCSS(offset) | 
5201 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 
5202 WTX_TCPIP_IPCSE(ipcse); 
5203 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 
5204 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 
5205 fields |= WTX_IXSM; 
5206 } 
5207 
5208 offset += iphl; 
5209 
5210 if (m0->m_pkthdr.csum_flags & 
5211 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 
5212 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 
5213 fields |= WTX_TXSM; 
5214 tucs = WTX_TCPIP_TUCSS(offset) | 
5215 WTX_TCPIP_TUCSO(offset + 
5216 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 
5217 WTX_TCPIP_TUCSE(0) /* rest of packet */; 
5218 } else if ((m0->m_pkthdr.csum_flags & 
5219 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) { 
5220 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 
5221 fields |= WTX_TXSM; 
5222 tucs = WTX_TCPIP_TUCSS(offset) | 
5223 WTX_TCPIP_TUCSO(offset + 
5224 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 
5225 WTX_TCPIP_TUCSE(0) /* rest of packet */; 
5226 } else { 
5227 /* Just initialize it to a valid TCP context. */ 
5228 tucs = WTX_TCPIP_TUCSS(offset) | 
5229 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 
5230 WTX_TCPIP_TUCSE(0) /* rest of packet */; 
5231 } 
5232 
5233 /* Fill in the context descriptor. */ 
5234 t = (struct livengood_tcpip_ctxdesc *) 
5235 &txq->txq_descs[txq->txq_next]; 
5236 t->tcpip_ipcs = htole32(ipcs); 
5237 t->tcpip_tucs = htole32(tucs); 
5238 t->tcpip_cmdlen = htole32(cmdlen); 
5239 t->tcpip_seg = htole32(seg); 
5240 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 
5241 
5242 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 
5243 txs->txs_ndesc++; 
5244 
5245 *cmdp = cmd; 
5246 *fieldsp = fields; 
5247 
5248 return 0; 
5249} 
5250 
5251static void 5057static void
5252wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 5058wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5253{ 5059{
5254 struct mbuf *m; 5060 struct mbuf *m;
5255 int i; 5061 int i;
5256 5062
5257 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev)); 5063 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5258 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 5064 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5259 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 5065 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5260 "m_flags = 0x%08x\n", device_xname(sc->sc_dev), 5066 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5261 m->m_data, m->m_len, m->m_flags); 5067 m->m_data, m->m_len, m->m_flags);
5262 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev), 5068 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5263 i, i == 1 ? "" : "s"); 5069 i, i == 1 ? "" : "s");
@@ -5961,26 +5767,222 @@ wm_init_txrx_queues(struct wm_softc *sc) @@ -5961,26 +5767,222 @@ wm_init_txrx_queues(struct wm_softc *sc)
5961 for (i = 0; i < sc->sc_nrxqueues; i++) { 5767 for (i = 0; i < sc->sc_nrxqueues; i++) {
5962 struct wm_rxqueue *rxq = &sc->sc_rxq[i]; 5768 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5963 WM_RX_LOCK(rxq); 5769 WM_RX_LOCK(rxq);
5964 error = wm_init_rx_queue(sc, rxq); 5770 error = wm_init_rx_queue(sc, rxq);
5965 WM_RX_UNLOCK(rxq); 5771 WM_RX_UNLOCK(rxq);
5966 if (error) 5772 if (error)
5967 break; 5773 break;
5968 } 5774 }
5969 5775
5970 return error; 5776 return error;
5971} 5777}
5972 5778
5973/* 5779/*
 5780 * wm_tx_offload:
 5781 *
 5782 * Set up TCP/IP checksumming parameters for the
 5783 * specified packet.
 5784 */
 5785static int
 5786wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
 5787 uint8_t *fieldsp)
 5788{
 5789 struct wm_txqueue *txq = &sc->sc_txq[0];
 5790 struct mbuf *m0 = txs->txs_mbuf;
 5791 struct livengood_tcpip_ctxdesc *t;
 5792 uint32_t ipcs, tucs, cmd, cmdlen, seg;
 5793 uint32_t ipcse;
 5794 struct ether_header *eh;
 5795 int offset, iphl;
 5796 uint8_t fields;
 5797
 5798 /*
 5799 * XXX It would be nice if the mbuf pkthdr had offset
 5800 * fields for the protocol headers.
 5801 */
 5802
 5803 eh = mtod(m0, struct ether_header *);
 5804 switch (htons(eh->ether_type)) {
 5805 case ETHERTYPE_IP:
 5806 case ETHERTYPE_IPV6:
 5807 offset = ETHER_HDR_LEN;
 5808 break;
 5809
 5810 case ETHERTYPE_VLAN:
 5811 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 5812 break;
 5813
 5814 default:
 5815 /*
 5816 * Don't support this protocol or encapsulation.
 5817 */
 5818 *fieldsp = 0;
 5819 *cmdp = 0;
 5820 return 0;
 5821 }
 5822
 5823 if ((m0->m_pkthdr.csum_flags &
 5824 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
 5825 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
 5826 } else {
 5827 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
 5828 }
 5829 ipcse = offset + iphl - 1;
 5830
 5831 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
 5832 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
 5833 seg = 0;
 5834 fields = 0;
 5835
 5836 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
 5837 int hlen = offset + iphl;
 5838 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
 5839
 5840 if (__predict_false(m0->m_len <
 5841 (hlen + sizeof(struct tcphdr)))) {
 5842 /*
 5843 * TCP/IP headers are not in the first mbuf; we need
 5844 * to do this the slow and painful way. Let's just
 5845 * hope this doesn't happen very often.
 5846 */
 5847 struct tcphdr th;
 5848
 5849 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
 5850
 5851 m_copydata(m0, hlen, sizeof(th), &th);
 5852 if (v4) {
 5853 struct ip ip;
 5854
 5855 m_copydata(m0, offset, sizeof(ip), &ip);
 5856 ip.ip_len = 0;
 5857 m_copyback(m0,
 5858 offset + offsetof(struct ip, ip_len),
 5859 sizeof(ip.ip_len), &ip.ip_len);
 5860 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
 5861 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
 5862 } else {
 5863 struct ip6_hdr ip6;
 5864
 5865 m_copydata(m0, offset, sizeof(ip6), &ip6);
 5866 ip6.ip6_plen = 0;
 5867 m_copyback(m0,
 5868 offset + offsetof(struct ip6_hdr, ip6_plen),
 5869 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
 5870 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
 5871 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
 5872 }
 5873 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
 5874 sizeof(th.th_sum), &th.th_sum);
 5875
 5876 hlen += th.th_off << 2;
 5877 } else {
 5878 /*
 5879 * TCP/IP headers are in the first mbuf; we can do
 5880 * this the easy way.
 5881 */
 5882 struct tcphdr *th;
 5883
 5884 if (v4) {
 5885 struct ip *ip =
 5886 (void *)(mtod(m0, char *) + offset);
 5887 th = (void *)(mtod(m0, char *) + hlen);
 5888
 5889 ip->ip_len = 0;
 5890 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
 5891 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
 5892 } else {
 5893 struct ip6_hdr *ip6 =
 5894 (void *)(mtod(m0, char *) + offset);
 5895 th = (void *)(mtod(m0, char *) + hlen);
 5896
 5897 ip6->ip6_plen = 0;
 5898 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
 5899 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
 5900 }
 5901 hlen += th->th_off << 2;
 5902 }
 5903
 5904 if (v4) {
 5905 WM_EVCNT_INCR(&sc->sc_ev_txtso);
 5906 cmdlen |= WTX_TCPIP_CMD_IP;
 5907 } else {
 5908 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
 5909 ipcse = 0;
 5910 }
 5911 cmd |= WTX_TCPIP_CMD_TSE;
 5912 cmdlen |= WTX_TCPIP_CMD_TSE |
 5913 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
 5914 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
 5915 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
 5916 }
 5917
 5918 /*
 5919 * NOTE: Even if we're not using the IP or TCP/UDP checksum
 5920 * offload feature, if we load the context descriptor, we
 5921 * MUST provide valid values for IPCSS and TUCSS fields.
 5922 */
 5923
 5924 ipcs = WTX_TCPIP_IPCSS(offset) |
 5925 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
 5926 WTX_TCPIP_IPCSE(ipcse);
 5927 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
 5928 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
 5929 fields |= WTX_IXSM;
 5930 }
 5931
 5932 offset += iphl;
 5933
 5934 if (m0->m_pkthdr.csum_flags &
 5935 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
 5936 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
 5937 fields |= WTX_TXSM;
 5938 tucs = WTX_TCPIP_TUCSS(offset) |
 5939 WTX_TCPIP_TUCSO(offset +
 5940 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
 5941 WTX_TCPIP_TUCSE(0) /* rest of packet */;
 5942 } else if ((m0->m_pkthdr.csum_flags &
 5943 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
 5944 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
 5945 fields |= WTX_TXSM;
 5946 tucs = WTX_TCPIP_TUCSS(offset) |
 5947 WTX_TCPIP_TUCSO(offset +
 5948 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
 5949 WTX_TCPIP_TUCSE(0) /* rest of packet */;
 5950 } else {
 5951 /* Just initialize it to a valid TCP context. */
 5952 tucs = WTX_TCPIP_TUCSS(offset) |
 5953 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
 5954 WTX_TCPIP_TUCSE(0) /* rest of packet */;
 5955 }
 5956
 5957 /* Fill in the context descriptor. */
 5958 t = (struct livengood_tcpip_ctxdesc *)
 5959 &txq->txq_descs[txq->txq_next];
 5960 t->tcpip_ipcs = htole32(ipcs);
 5961 t->tcpip_tucs = htole32(tucs);
 5962 t->tcpip_cmdlen = htole32(cmdlen);
 5963 t->tcpip_seg = htole32(seg);
 5964 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
 5965
 5966 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
 5967 txs->txs_ndesc++;
 5968
 5969 *cmdp = cmd;
 5970 *fieldsp = fields;
 5971
 5972 return 0;
 5973}
 5974
 5975/*
5974 * wm_start: [ifnet interface function] 5976 * wm_start: [ifnet interface function]
5975 * 5977 *
5976 * Start packet transmission on the interface. 5978 * Start packet transmission on the interface.
5977 */ 5979 */
5978static void 5980static void
5979wm_start(struct ifnet *ifp) 5981wm_start(struct ifnet *ifp)
5980{ 5982{
5981 struct wm_softc *sc = ifp->if_softc; 5983 struct wm_softc *sc = ifp->if_softc;
5982 struct wm_txqueue *txq = &sc->sc_txq[0]; 5984 struct wm_txqueue *txq = &sc->sc_txq[0];
5983 5985
5984 WM_TX_LOCK(txq); 5986 WM_TX_LOCK(txq);
5985 if (!sc->sc_stopping) 5987 if (!sc->sc_stopping)
5986 wm_start_locked(ifp); 5988 wm_start_locked(ifp);