Sun Dec 23 12:09:45 2018 UTC ()
Cleanup the TX path:
- split in sub-functions
- ratelimit printf for mbuf allocation failure
- don't loop forever on mbuf allocation failure


(bouyer)
diff -r1.71 -r1.72 src/sys/arch/xen/xen/xennetback_xenbus.c

cvs diff -r1.71 -r1.72 src/sys/arch/xen/xen/xennetback_xenbus.c (expand / switch to context diff)
--- src/sys/arch/xen/xen/xennetback_xenbus.c 2018/10/26 05:33:21 1.71
+++ src/sys/arch/xen/xen/xennetback_xenbus.c 2018/12/23 12:09:45 1.72
@@ -1,4 +1,4 @@
-/*      $NetBSD: xennetback_xenbus.c,v 1.71 2018/10/26 05:33:21 cherry Exp $      */
+/*      $NetBSD: xennetback_xenbus.c,v 1.72 2018/12/23 12:09:45 bouyer Exp $      */
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -25,7 +25,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.71 2018/10/26 05:33:21 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.72 2018/12/23 12:09:45 bouyer Exp $");
 
 #include "opt_xen.h"
 
@@ -1215,7 +1215,65 @@
 	splx(s);
 }
 
+/*
+ * sighly different from m_dup(); for some reason m_dup() can return
+ * a chain where the data area can cross a page boundary.
+ * This doesn't happens with the function below.
+ */
+static struct mbuf *
+xennetback_copymbuf(struct mbuf *m)
+{
+	struct mbuf *new_m;
+
+	MGETHDR(new_m, M_DONTWAIT, MT_DATA);
+	if (__predict_false(new_m == NULL)) {
+		return NULL;
+	}
+	if (m->m_pkthdr.len > MHLEN) {
+		MCLGET(new_m, M_DONTWAIT);
+		if (__predict_false(
+		    (new_m->m_flags & M_EXT) == 0)) {
+			m_freem(new_m);
+			return NULL;
+		}
+	}
+	m_copydata(m, 0, m->m_pkthdr.len,
+	    mtod(new_m, void *));
+	new_m->m_len = new_m->m_pkthdr.len =
+	    m->m_pkthdr.len;
+	return new_m;
+}
+
+/* return physical page address and offset of data area of an mbuf */
 static void
+xennetback_mbuf_addr(struct mbuf *m, paddr_t *xmit_pa, int *offset)
+{
+	switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
+	case M_EXT|M_EXT_CLUSTER:
+		KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
+		*xmit_pa = m->m_ext.ext_paddr;
+		*offset = m->m_data - m->m_ext.ext_buf;
+		break;
+	case 0:
+		KASSERT(m->m_paddr != M_PADDR_INVALID);
+		*xmit_pa = m->m_paddr;
+		*offset = M_BUFOFFSET(m) +
+		    (m->m_data - M_BUFADDR(m));
+		break;
+	default:
+		if (__predict_false(
+		    !pmap_extract(pmap_kernel(),
+		    (vaddr_t)m->m_data, xmit_pa))) {
+			panic("xennet_start: no pa");
+		}
+		*offset = 0;
+		break;
+	}
+	*offset += (*xmit_pa & ~PG_FRAME);
+	*xmit_pa = (*xmit_pa & PG_FRAME);
+}
+
+static void
 xennetback_ifsoftstart_copy(void *arg)
 {
 	struct xnetback_instance *xneti = arg;
@@ -1230,6 +1288,7 @@
 	int do_event = 0;
 	gnttab_copy_t *gop;
 	int id, offset;
+	bool abort;
 
 	XENPRINTF(("xennetback_ifsoftstart_copy "));
 	int s = splnet();
@@ -1246,6 +1305,7 @@
 		xen_rmb();
 
 		gop = xstart_gop_copy;
+		abort = false;
 		for (i = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) {
 			XENPRINTF(("have a packet\n"));
 			IFQ_POLL(&ifp->if_snd, m);
@@ -1261,69 +1321,30 @@
 				    "0x%x\n",
 				    req_prod, xneti->xni_rxring.req_cons,
 				    resp_prod));
-				ifp->if_timer = 1;
+				abort = true;
 				break;
 			}
 			if (__predict_false(i == NB_XMIT_PAGES_BATCH))
 				break; /* we filled the array */
-			switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
-			case M_EXT|M_EXT_CLUSTER:
-				KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
-				xmit_pa = m->m_ext.ext_paddr;
-				offset = m->m_data - m->m_ext.ext_buf;
-				break;
-			case 0:
-				KASSERT(m->m_paddr != M_PADDR_INVALID);
-				xmit_pa = m->m_paddr;
-				offset = M_BUFOFFSET(m) +
-				    (m->m_data - M_BUFADDR(m));
-				break;
-			default:
-				if (__predict_false(
-				    !pmap_extract(pmap_kernel(),
-				    (vaddr_t)m->m_data, &xmit_pa))) {
-					panic("xennet_start: no pa");
-				}
-				offset = 0;
-				break;
-			}
-			offset += (xmit_pa & ~PG_FRAME);
-			xmit_pa = (xmit_pa & PG_FRAME);
+
+			xennetback_mbuf_addr(m, &xmit_pa, &offset);
 			if (m->m_pkthdr.len != m->m_len ||
 			    (offset + m->m_pkthdr.len) > PAGE_SIZE) {
-				MGETHDR(new_m, M_DONTWAIT, MT_DATA);
+				new_m = xennetback_copymbuf(m);
 				if (__predict_false(new_m == NULL)) {
-					printf("%s: cannot allocate new mbuf\n",
-					    ifp->if_xname);
+					static struct timeval lasttime;
+					if (ratecheck(&lasttime, &xni_pool_errintvl))
+						printf("%s: cannot allocate new mbuf\n",
+						    ifp->if_xname);
+					abort = 1;
 					break;
-				}
-				if (m->m_pkthdr.len > MHLEN) {
-					MCLGET(new_m, M_DONTWAIT);
-					if (__predict_false(
-					    (new_m->m_flags & M_EXT) == 0)) {
-						XENPRINTF((
-						    "%s: no mbuf cluster\n",
-						    ifp->if_xname));
-						m_freem(new_m);
-						break;
-					}
-					xmit_pa = new_m->m_ext.ext_paddr;
-					offset = new_m->m_data -
-					    new_m->m_ext.ext_buf;
 				} else {
-					xmit_pa = new_m->m_paddr;
-					offset = M_BUFOFFSET(new_m) +
-					    (new_m->m_data - M_BUFADDR(new_m));
+					IFQ_DEQUEUE(&ifp->if_snd, m);
+					m_freem(m);
+					m = new_m;
+					xennetback_mbuf_addr(m,
+					    &xmit_pa, &offset);
 				}
-				offset += (xmit_pa & ~PG_FRAME);
-				xmit_pa = (xmit_pa & PG_FRAME);
-				m_copydata(m, 0, m->m_pkthdr.len,
-				    mtod(new_m, void *));
-				new_m->m_len = new_m->m_pkthdr.len =
-				    m->m_pkthdr.len;
-				IFQ_DEQUEUE(&ifp->if_snd, m);
-				m_freem(m);
-				m = new_m;
 			} else {
 				IFQ_DEQUEUE(&ifp->if_snd, m);
 			}
@@ -1421,9 +1442,10 @@
 		 * here, as the frontend doesn't notify when adding
 		 * requests anyway
 		 */
-		if (__predict_false(
+		if (__predict_false(abort || 
 		    !RING_HAS_UNCONSUMED_REQUESTS(&xneti->xni_rxring))) {
 			/* ring full */
+			ifp->if_timer = 1;
 			break;
 		}
 	}