Fri Jul 23 00:26:20 2021 UTC ()
getiobuf() can return NULL if there are no IO buffers available.
RAIDframe can't deal with that, so create a dedicated pool of buffers
to use for IO.  PR_WAITOK is fine here, as we pre-allocate more than
we need to guarantee IO can make progress.  Tuning of pool still to
come.


(oster)
diff -r1.58 -r1.59 src/sys/dev/raidframe/rf_diskqueue.c
diff -r1.35 -r1.36 src/sys/dev/raidframe/rf_netbsd.h

cvs diff -r1.58 -r1.59 src/sys/dev/raidframe/rf_diskqueue.c (expand / switch to context diff)
--- src/sys/dev/raidframe/rf_diskqueue.c 2020/06/19 19:32:03 1.58
+++ src/sys/dev/raidframe/rf_diskqueue.c 2021/07/23 00:26:19 1.59
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_diskqueue.c,v 1.58 2020/06/19 19:32:03 jdolecek Exp $	*/
+/*	$NetBSD: rf_diskqueue.c,v 1.59 2021/07/23 00:26:19 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -66,7 +66,7 @@
  ****************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_diskqueue.c,v 1.58 2020/06/19 19:32:03 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_diskqueue.c,v 1.59 2021/07/23 00:26:19 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -84,6 +84,8 @@
 #include "rf_fifo.h"
 #include "rf_kintf.h"
 
+#include <sys/buf.h>
+
 static void rf_ShutdownDiskQueueSystem(void *);
 
 #ifndef RF_DEBUG_DISKQUEUE
@@ -147,11 +149,16 @@
 };
 #define NUM_DISK_QUEUE_TYPES (sizeof(diskqueuesw)/sizeof(RF_DiskQueueSW_t))
 
+
 #define RF_MAX_FREE_DQD 256
 #define RF_MIN_FREE_DQD  64
 
-#include <sys/buf.h>
+/* XXX: scale these... */
+#define RF_MAX_FREE_BUFIO 256
+#define RF_MIN_FREE_BUFIO  64
 
+
+
 /* configures a single disk queue */
 
 static void
@@ -189,6 +196,7 @@
 rf_ShutdownDiskQueueSystem(void *ignored)
 {
 	pool_destroy(&rf_pools.dqd);
+	pool_destroy(&rf_pools.bufio);
 }
 
 int
@@ -197,6 +205,8 @@
 
 	rf_pool_init(&rf_pools.dqd, sizeof(RF_DiskQueueData_t),
 		     "rf_dqd_pl", RF_MIN_FREE_DQD, RF_MAX_FREE_DQD);
+	rf_pool_init(&rf_pools.bufio, sizeof(buf_t),
+		     "rf_bufio_pl", RF_MIN_FREE_BUFIO, RF_MAX_FREE_BUFIO);
 	rf_ShutdownCreate(listp, rf_ShutdownDiskQueueSystem, NULL);
 
 	return (0);
@@ -367,19 +377,20 @@
 {
 	RF_DiskQueueData_t *p;
 
-	p = pool_get(&rf_pools.dqd, waitflag | PR_ZERO);
-	if (p == NULL)
-		return (NULL);
+	p = pool_get(&rf_pools.dqd, PR_WAITOK | PR_ZERO);
+	KASSERT(p != NULL);
 
-	if (waitflag == PR_WAITOK) {
-		p->bp = getiobuf(NULL, true);
-	} else {
-		p->bp = getiobuf(NULL, false);
-	}
-	if (p->bp == NULL) {
-		pool_put(&rf_pools.dqd, p);
-		return (NULL);
-	}
+	/* Obtain a buffer from our own pool.  It is possible for the
+	   regular getiobuf() to run out of memory and return NULL.
+	   We need to guarantee that never happens, as RAIDframe
+	   doesn't have a good way to recover if memory allocation
+	   fails here.
+	*/
+	p->bp = pool_get(&rf_pools.bufio, PR_WAITOK | PR_ZERO);
+	KASSERT(p->bp != NULL);
+	
+	buf_init(p->bp);
+		
 	SET(p->bp->b_cflags, BC_BUSY);	/* mark buffer busy */
 	if (mbp) {
 		SET(p->bp->b_flags, mbp->b_flags & rf_b_pass);
@@ -405,6 +416,6 @@
 void
 rf_FreeDiskQueueData(RF_DiskQueueData_t *p)
 {
-	putiobuf(p->bp);
+	pool_put(&rf_pools.bufio, p->bp);
 	pool_put(&rf_pools.dqd, p);
 }

cvs diff -r1.35 -r1.36 src/sys/dev/raidframe/rf_netbsd.h (expand / switch to context diff)
--- src/sys/dev/raidframe/rf_netbsd.h 2020/06/19 19:29:39 1.35
+++ src/sys/dev/raidframe/rf_netbsd.h 2021/07/23 00:26:19 1.36
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_netbsd.h,v 1.35 2020/06/19 19:29:39 jdolecek Exp $	*/
+/*	$NetBSD: rf_netbsd.h,v 1.36 2021/07/23 00:26:19 oster Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -61,6 +61,7 @@
 	struct pool asm_hdr;     /* Access Stripe Map Header */
 	struct pool asmap;       /* Access Stripe Map */
 	struct pool asmhle;      /* Access Stripe Map Header List Elements */
+	struct pool bufio;       /* Buffer IO Pool */
 	struct pool callbackf;   /* Callback function descriptors */
 	struct pool callbackv;   /* Callback value descriptors */
 	struct pool dagh;        /* DAG headers */