Tue Mar 13 16:38:28 2018 UTC ()
Pull up following revision(s) (requested by hannken in ticket #1516):
	sys/dev/fss.c: 1.101-1.103
Bounds check against media size for non-persistent snapshots.
--
Treat partial read from backing store as I/O error.
--
Pass residual back to b_resid for persistent snapshots.


(snj)
diff -r1.81.4.4 -r1.81.4.5 src/sys/dev/fss.c

cvs diff -r1.81.4.4 -r1.81.4.5 src/sys/dev/fss.c (expand / switch to unified diff)

--- src/sys/dev/fss.c 2016/08/27 14:47:47 1.81.4.4
+++ src/sys/dev/fss.c 2018/03/13 16:38:28 1.81.4.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: fss.c,v 1.81.4.4 2016/08/27 14:47:47 bouyer Exp $ */ 1/* $NetBSD: fss.c,v 1.81.4.5 2018/03/13 16:38:28 snj Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2003 The NetBSD Foundation, Inc. 4 * Copyright (c) 2003 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Juergen Hannken-Illjes. 8 * by Juergen Hannken-Illjes.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -26,27 +26,27 @@ @@ -26,27 +26,27 @@
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * File system snapshot disk driver. 33 * File system snapshot disk driver.
34 * 34 *
35 * Block/character interface to the snapshot of a mounted file system. 35 * Block/character interface to the snapshot of a mounted file system.
36 */ 36 */
37 37
38#include <sys/cdefs.h> 38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: fss.c,v 1.81.4.4 2016/08/27 14:47:47 bouyer Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: fss.c,v 1.81.4.5 2018/03/13 16:38:28 snj Exp $");
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/systm.h> 42#include <sys/systm.h>
43#include <sys/namei.h> 43#include <sys/namei.h>
44#include <sys/proc.h> 44#include <sys/proc.h>
45#include <sys/errno.h> 45#include <sys/errno.h>
46#include <sys/malloc.h> 46#include <sys/malloc.h>
47#include <sys/buf.h> 47#include <sys/buf.h>
48#include <sys/ioctl.h> 48#include <sys/ioctl.h>
49#include <sys/disklabel.h> 49#include <sys/disklabel.h>
50#include <sys/device.h> 50#include <sys/device.h>
51#include <sys/disk.h> 51#include <sys/disk.h>
52#include <sys/stat.h> 52#include <sys/stat.h>
@@ -80,27 +80,27 @@ dev_type_size(fss_size); @@ -80,27 +80,27 @@ dev_type_size(fss_size);
80static void fss_unmount_hook(struct mount *); 80static void fss_unmount_hook(struct mount *);
81static int fss_copy_on_write(void *, struct buf *, bool); 81static int fss_copy_on_write(void *, struct buf *, bool);
82static inline void fss_error(struct fss_softc *, const char *); 82static inline void fss_error(struct fss_softc *, const char *);
83static int fss_create_files(struct fss_softc *, struct fss_set *, 83static int fss_create_files(struct fss_softc *, struct fss_set *,
84 off_t *, struct lwp *); 84 off_t *, struct lwp *);
85static int fss_create_snapshot(struct fss_softc *, struct fss_set *, 85static int fss_create_snapshot(struct fss_softc *, struct fss_set *,
86 struct lwp *); 86 struct lwp *);
87static int fss_delete_snapshot(struct fss_softc *, struct lwp *); 87static int fss_delete_snapshot(struct fss_softc *, struct lwp *);
88static int fss_softc_alloc(struct fss_softc *); 88static int fss_softc_alloc(struct fss_softc *);
89static void fss_softc_free(struct fss_softc *); 89static void fss_softc_free(struct fss_softc *);
90static int fss_read_cluster(struct fss_softc *, u_int32_t); 90static int fss_read_cluster(struct fss_softc *, u_int32_t);
91static void fss_bs_thread(void *); 91static void fss_bs_thread(void *);
92static int fss_bs_io(struct fss_softc *, fss_io_type, 92static int fss_bs_io(struct fss_softc *, fss_io_type,
93 u_int32_t, off_t, int, void *); 93 u_int32_t, off_t, int, void *, size_t *);
94static u_int32_t *fss_bs_indir(struct fss_softc *, u_int32_t); 94static u_int32_t *fss_bs_indir(struct fss_softc *, u_int32_t);
95 95
96static kmutex_t fss_device_lock; /* Protect all units. */ 96static kmutex_t fss_device_lock; /* Protect all units. */
97static int fss_num_attached = 0; /* Number of attached devices. */ 97static int fss_num_attached = 0; /* Number of attached devices. */
98static struct vfs_hooks fss_vfs_hooks = { 98static struct vfs_hooks fss_vfs_hooks = {
99 .vh_unmount = fss_unmount_hook 99 .vh_unmount = fss_unmount_hook
100}; 100};
101 101
102const struct bdevsw fss_bdevsw = { 102const struct bdevsw fss_bdevsw = {
103 fss_open, fss_close, fss_strategy, fss_ioctl, 103 fss_open, fss_close, fss_strategy, fss_ioctl,
104 fss_dump, fss_size, D_DISK | D_MPSAFE 104 fss_dump, fss_size, D_DISK | D_MPSAFE
105}; 105};
106 106
@@ -256,40 +256,46 @@ restart: @@ -256,40 +256,46 @@ restart:
256 256
257 return error; 257 return error;
258} 258}
259 259
260void 260void
261fss_strategy(struct buf *bp) 261fss_strategy(struct buf *bp)
262{ 262{
263 const bool write = ((bp->b_flags & B_READ) != B_READ); 263 const bool write = ((bp->b_flags & B_READ) != B_READ);
264 struct fss_softc *sc = device_lookup_private(&fss_cd, minor(bp->b_dev)); 264 struct fss_softc *sc = device_lookup_private(&fss_cd, minor(bp->b_dev));
265 265
266 mutex_enter(&sc->sc_slock); 266 mutex_enter(&sc->sc_slock);
267 267
268 if (write || !FSS_ISVALID(sc)) { 268 if (write || !FSS_ISVALID(sc)) {
269 
270 mutex_exit(&sc->sc_slock); 
271 
272 bp->b_error = (write ? EROFS : ENXIO); 269 bp->b_error = (write ? EROFS : ENXIO);
273 bp->b_resid = bp->b_bcount; 270 goto done;
274 biodone(bp); 
275 return; 
276 } 271 }
 272 /* Check bounds for non-persistent snapshots. */
 273 if ((sc->sc_flags & FSS_PERSISTENT) == 0 &&
 274 bounds_check_with_mediasize(bp, DEV_BSIZE,
 275 btodb(FSS_CLTOB(sc, sc->sc_clcount - 1) + sc->sc_clresid)) <= 0)
 276 goto done;
277 277
278 bp->b_rawblkno = bp->b_blkno; 278 bp->b_rawblkno = bp->b_blkno;
279 bufq_put(sc->sc_bufq, bp); 279 bufq_put(sc->sc_bufq, bp);
280 cv_signal(&sc->sc_work_cv); 280 cv_signal(&sc->sc_work_cv);
281 281
282 mutex_exit(&sc->sc_slock); 282 mutex_exit(&sc->sc_slock);
 283 return;
 284
 285done:
 286 mutex_exit(&sc->sc_slock);
 287 bp->b_resid = bp->b_bcount;
 288 biodone(bp);
283} 289}
284 290
285int 291int
286fss_read(dev_t dev, struct uio *uio, int flags) 292fss_read(dev_t dev, struct uio *uio, int flags)
287{ 293{
288 return physio(fss_strategy, NULL, dev, B_READ, minphys, uio); 294 return physio(fss_strategy, NULL, dev, B_READ, minphys, uio);
289} 295}
290 296
291int 297int
292fss_write(dev_t dev, struct uio *uio, int flags) 298fss_write(dev_t dev, struct uio *uio, int flags)
293{ 299{
294 return physio(fss_strategy, NULL, dev, B_WRITE, minphys, uio); 300 return physio(fss_strategy, NULL, dev, B_WRITE, minphys, uio);
295} 301}
@@ -983,59 +989,61 @@ restart: @@ -983,59 +989,61 @@ restart:
983 bp = mbp; 989 bp = mbp;
984 else { 990 else {
985 bp = getiobuf(NULL, true); 991 bp = getiobuf(NULL, true);
986 nestiobuf_setup(mbp, bp, offset, len); 992 nestiobuf_setup(mbp, bp, offset, len);
987 } 993 }
988 bp->b_lblkno = 0; 994 bp->b_lblkno = 0;
989 bp->b_blkno = dblk; 995 bp->b_blkno = dblk;
990 bdev_strategy(bp); 996 bdev_strategy(bp);
991 dblk += btodb(len); 997 dblk += btodb(len);
992 offset += len; 998 offset += len;
993 todo -= len; 999 todo -= len;
994 } 1000 }
995 error = biowait(mbp); 1001 error = biowait(mbp);
 1002 if (error == 0 && mbp->b_resid != 0)
 1003 error = EIO;
996 putiobuf(mbp); 1004 putiobuf(mbp);
997 1005
998 mutex_enter(&sc->sc_slock); 1006 mutex_enter(&sc->sc_slock);
999 scp->fc_type = (error ? FSS_CACHE_FREE : FSS_CACHE_VALID); 1007 scp->fc_type = (error ? FSS_CACHE_FREE : FSS_CACHE_VALID);
1000 cv_broadcast(&scp->fc_state_cv); 1008 cv_broadcast(&scp->fc_state_cv);
1001 if (error == 0) { 1009 if (error == 0) {
1002 setbit(sc->sc_copied, scp->fc_cluster); 1010 setbit(sc->sc_copied, scp->fc_cluster);
1003 cv_signal(&sc->sc_work_cv); 1011 cv_signal(&sc->sc_work_cv);
1004 } 1012 }
1005 mutex_exit(&sc->sc_slock); 1013 mutex_exit(&sc->sc_slock);
1006 1014
1007 return error; 1015 return error;
1008} 1016}
1009 1017
1010/* 1018/*
1011 * Read/write clusters from/to backing store. 1019 * Read/write clusters from/to backing store.
1012 * For persistent snapshots must be called with cl == 0. off is the 1020 * For persistent snapshots must be called with cl == 0. off is the
1013 * offset into the snapshot. 1021 * offset into the snapshot.
1014 */ 1022 */
1015static int 1023static int
1016fss_bs_io(struct fss_softc *sc, fss_io_type rw, 1024fss_bs_io(struct fss_softc *sc, fss_io_type rw,
1017 u_int32_t cl, off_t off, int len, void *data) 1025 u_int32_t cl, off_t off, int len, void *data, size_t *resid)
1018{ 1026{
1019 int error; 1027 int error;
1020 1028
1021 off += FSS_CLTOB(sc, cl); 1029 off += FSS_CLTOB(sc, cl);
1022 1030
1023 vn_lock(sc->sc_bs_vp, LK_EXCLUSIVE|LK_RETRY); 1031 vn_lock(sc->sc_bs_vp, LK_EXCLUSIVE|LK_RETRY);
1024 1032
1025 error = vn_rdwr((rw == FSS_READ ? UIO_READ : UIO_WRITE), sc->sc_bs_vp, 1033 error = vn_rdwr((rw == FSS_READ ? UIO_READ : UIO_WRITE), sc->sc_bs_vp,
1026 data, len, off, UIO_SYSSPACE, 1034 data, len, off, UIO_SYSSPACE,
1027 IO_ADV_ENCODE(POSIX_FADV_NOREUSE) | IO_NODELOCKED, 1035 IO_ADV_ENCODE(POSIX_FADV_NOREUSE) | IO_NODELOCKED,
1028 sc->sc_bs_lwp->l_cred, NULL, NULL); 1036 sc->sc_bs_lwp->l_cred, resid, NULL);
1029 if (error == 0) { 1037 if (error == 0) {
1030 mutex_enter(sc->sc_bs_vp->v_interlock); 1038 mutex_enter(sc->sc_bs_vp->v_interlock);
1031 error = VOP_PUTPAGES(sc->sc_bs_vp, trunc_page(off), 1039 error = VOP_PUTPAGES(sc->sc_bs_vp, trunc_page(off),
1032 round_page(off+len), PGO_CLEANIT | PGO_FREE | PGO_SYNCIO); 1040 round_page(off+len), PGO_CLEANIT | PGO_FREE | PGO_SYNCIO);
1033 } 1041 }
1034 1042
1035 VOP_UNLOCK(sc->sc_bs_vp); 1043 VOP_UNLOCK(sc->sc_bs_vp);
1036 1044
1037 return error; 1045 return error;
1038} 1046}
1039 1047
1040/* 1048/*
1041 * Get a pointer to the indirect slot for this cluster. 1049 * Get a pointer to the indirect slot for this cluster.
@@ -1044,57 +1052,58 @@ static u_int32_t * @@ -1044,57 +1052,58 @@ static u_int32_t *
1044fss_bs_indir(struct fss_softc *sc, u_int32_t cl) 1052fss_bs_indir(struct fss_softc *sc, u_int32_t cl)
1045{ 1053{
1046 u_int32_t icl; 1054 u_int32_t icl;
1047 int ioff; 1055 int ioff;
1048 1056
1049 icl = cl/(FSS_CLSIZE(sc)/sizeof(u_int32_t)); 1057 icl = cl/(FSS_CLSIZE(sc)/sizeof(u_int32_t));
1050 ioff = cl%(FSS_CLSIZE(sc)/sizeof(u_int32_t)); 1058 ioff = cl%(FSS_CLSIZE(sc)/sizeof(u_int32_t));
1051 1059
1052 if (sc->sc_indir_cur == icl) 1060 if (sc->sc_indir_cur == icl)
1053 return &sc->sc_indir_data[ioff]; 1061 return &sc->sc_indir_data[ioff];
1054 1062
1055 if (sc->sc_indir_dirty) { 1063 if (sc->sc_indir_dirty) {
1056 if (fss_bs_io(sc, FSS_WRITE, sc->sc_indir_cur, 0, 1064 if (fss_bs_io(sc, FSS_WRITE, sc->sc_indir_cur, 0,
1057 FSS_CLSIZE(sc), (void *)sc->sc_indir_data) != 0) 1065 FSS_CLSIZE(sc), (void *)sc->sc_indir_data, NULL) != 0)
1058 return NULL; 1066 return NULL;
1059 setbit(sc->sc_indir_valid, sc->sc_indir_cur); 1067 setbit(sc->sc_indir_valid, sc->sc_indir_cur);
1060 } 1068 }
1061 1069
1062 sc->sc_indir_dirty = 0; 1070 sc->sc_indir_dirty = 0;
1063 sc->sc_indir_cur = icl; 1071 sc->sc_indir_cur = icl;
1064 1072
1065 if (isset(sc->sc_indir_valid, sc->sc_indir_cur)) { 1073 if (isset(sc->sc_indir_valid, sc->sc_indir_cur)) {
1066 if (fss_bs_io(sc, FSS_READ, sc->sc_indir_cur, 0, 1074 if (fss_bs_io(sc, FSS_READ, sc->sc_indir_cur, 0,
1067 FSS_CLSIZE(sc), (void *)sc->sc_indir_data) != 0) 1075 FSS_CLSIZE(sc), (void *)sc->sc_indir_data, NULL) != 0)
1068 return NULL; 1076 return NULL;
1069 } else 1077 } else
1070 memset(sc->sc_indir_data, 0, FSS_CLSIZE(sc)); 1078 memset(sc->sc_indir_data, 0, FSS_CLSIZE(sc));
1071 1079
1072 return &sc->sc_indir_data[ioff]; 1080 return &sc->sc_indir_data[ioff];
1073} 1081}
1074 1082
1075/* 1083/*
1076 * The kernel thread (one for every active snapshot). 1084 * The kernel thread (one for every active snapshot).
1077 * 1085 *
1078 * After wakeup it cleans the cache and runs the I/O requests. 1086 * After wakeup it cleans the cache and runs the I/O requests.
1079 */ 1087 */
1080static void 1088static void
1081fss_bs_thread(void *arg) 1089fss_bs_thread(void *arg)
1082{ 1090{
1083 bool thread_idle, is_valid; 1091 bool thread_idle, is_valid;
1084 int error, i, todo, len, crotor, is_read; 1092 int error, i, todo, len, crotor, is_read;
1085 long off; 1093 long off;
1086 char *addr; 1094 char *addr;
1087 u_int32_t c, cl, ch, *indirp; 1095 u_int32_t c, cl, ch, *indirp;
 1096 size_t resid;
1088 struct buf *bp, *nbp; 1097 struct buf *bp, *nbp;
1089 struct fss_softc *sc; 1098 struct fss_softc *sc;
1090 struct fss_cache *scp, *scl; 1099 struct fss_cache *scp, *scl;
1091 1100
1092 sc = arg; 1101 sc = arg;
1093 scl = sc->sc_cache+sc->sc_cache_size; 1102 scl = sc->sc_cache+sc->sc_cache_size;
1094 crotor = 0; 1103 crotor = 0;
1095 thread_idle = false; 1104 thread_idle = false;
1096 1105
1097 mutex_enter(&sc->sc_slock); 1106 mutex_enter(&sc->sc_slock);
1098 1107
1099 for (;;) { 1108 for (;;) {
1100 if (thread_idle) 1109 if (thread_idle)
@@ -1111,55 +1120,59 @@ fss_bs_thread(void *arg) @@ -1111,55 +1120,59 @@ fss_bs_thread(void *arg)
1111 1120
1112 if (sc->sc_flags & FSS_PERSISTENT) { 1121 if (sc->sc_flags & FSS_PERSISTENT) {
1113 if ((bp = bufq_get(sc->sc_bufq)) == NULL) 1122 if ((bp = bufq_get(sc->sc_bufq)) == NULL)
1114 continue; 1123 continue;
1115 is_valid = FSS_ISVALID(sc); 1124 is_valid = FSS_ISVALID(sc);
1116 is_read = (bp->b_flags & B_READ); 1125 is_read = (bp->b_flags & B_READ);
1117 thread_idle = false; 1126 thread_idle = false;
1118 mutex_exit(&sc->sc_slock); 1127 mutex_exit(&sc->sc_slock);
1119 1128
1120 if (is_valid) { 1129 if (is_valid) {
1121 disk_busy(sc->sc_dkdev); 1130 disk_busy(sc->sc_dkdev);
1122 error = fss_bs_io(sc, FSS_READ, 0, 1131 error = fss_bs_io(sc, FSS_READ, 0,
1123 dbtob(bp->b_blkno), bp->b_bcount, 1132 dbtob(bp->b_blkno), bp->b_bcount,
1124 bp->b_data); 1133 bp->b_data, &resid);
 1134 if (error)
 1135 resid = bp->b_bcount;
1125 disk_unbusy(sc->sc_dkdev, 1136 disk_unbusy(sc->sc_dkdev,
1126 (error ? 0 : bp->b_bcount), is_read); 1137 (error ? 0 : bp->b_bcount), is_read);
1127 } else 1138 } else {
1128 error = ENXIO; 1139 error = ENXIO;
 1140 resid = bp->b_bcount;
 1141 }
1129 1142
1130 bp->b_error = error; 1143 bp->b_error = error;
1131 bp->b_resid = (error ? bp->b_bcount : 0); 1144 bp->b_resid = resid;
1132 biodone(bp); 1145 biodone(bp);
1133 1146
1134 mutex_enter(&sc->sc_slock); 1147 mutex_enter(&sc->sc_slock);
1135 continue; 1148 continue;
1136 } 1149 }
1137 1150
1138 /* 1151 /*
1139 * Clean the cache 1152 * Clean the cache
1140 */ 1153 */
1141 for (i = 0; i < sc->sc_cache_size; i++) { 1154 for (i = 0; i < sc->sc_cache_size; i++) {
1142 crotor = (crotor + 1) % sc->sc_cache_size; 1155 crotor = (crotor + 1) % sc->sc_cache_size;
1143 scp = sc->sc_cache + crotor; 1156 scp = sc->sc_cache + crotor;
1144 if (scp->fc_type != FSS_CACHE_VALID) 1157 if (scp->fc_type != FSS_CACHE_VALID)
1145 continue; 1158 continue;
1146 mutex_exit(&sc->sc_slock); 1159 mutex_exit(&sc->sc_slock);
1147 1160
1148 thread_idle = false; 1161 thread_idle = false;
1149 indirp = fss_bs_indir(sc, scp->fc_cluster); 1162 indirp = fss_bs_indir(sc, scp->fc_cluster);
1150 if (indirp != NULL) { 1163 if (indirp != NULL) {
1151 error = fss_bs_io(sc, FSS_WRITE, sc->sc_clnext, 1164 error = fss_bs_io(sc, FSS_WRITE, sc->sc_clnext,
1152 0, FSS_CLSIZE(sc), scp->fc_data); 1165 0, FSS_CLSIZE(sc), scp->fc_data, NULL);
1153 } else 1166 } else
1154 error = EIO; 1167 error = EIO;
1155 1168
1156 mutex_enter(&sc->sc_slock); 1169 mutex_enter(&sc->sc_slock);
1157 if (error == 0) { 1170 if (error == 0) {
1158 *indirp = sc->sc_clnext++; 1171 *indirp = sc->sc_clnext++;
1159 sc->sc_indir_dirty = 1; 1172 sc->sc_indir_dirty = 1;
1160 } else 1173 } else
1161 fss_error(sc, "write error on backing store"); 1174 fss_error(sc, "write error on backing store");
1162 1175
1163 scp->fc_type = FSS_CACHE_FREE; 1176 scp->fc_type = FSS_CACHE_FREE;
1164 cv_signal(&sc->sc_cache_cv); 1177 cv_signal(&sc->sc_cache_cv);
1165 break; 1178 break;
@@ -1207,26 +1220,28 @@ fss_bs_thread(void *arg) @@ -1207,26 +1220,28 @@ fss_bs_thread(void *arg)
1207 nbp = getiobuf(NULL, true); 1220 nbp = getiobuf(NULL, true);
1208 nbp->b_flags = B_READ; 1221 nbp->b_flags = B_READ;
1209 nbp->b_resid = nbp->b_bcount = bp->b_bcount; 1222 nbp->b_resid = nbp->b_bcount = bp->b_bcount;
1210 nbp->b_bufsize = bp->b_bcount; 1223 nbp->b_bufsize = bp->b_bcount;
1211 nbp->b_data = bp->b_data; 1224 nbp->b_data = bp->b_data;
1212 nbp->b_blkno = bp->b_blkno; 1225 nbp->b_blkno = bp->b_blkno;
1213 nbp->b_lblkno = 0; 1226 nbp->b_lblkno = 0;
1214 nbp->b_dev = sc->sc_bdev; 1227 nbp->b_dev = sc->sc_bdev;
1215 SET(nbp->b_cflags, BC_BUSY); /* mark buffer busy */ 1228 SET(nbp->b_cflags, BC_BUSY); /* mark buffer busy */
1216 1229
1217 bdev_strategy(nbp); 1230 bdev_strategy(nbp);
1218 1231
1219 error = biowait(nbp); 1232 error = biowait(nbp);
 1233 if (error == 0 && nbp->b_resid != 0)
 1234 error = EIO;
1220 if (error != 0) { 1235 if (error != 0) {
1221 bp->b_resid = bp->b_bcount; 1236 bp->b_resid = bp->b_bcount;
1222 bp->b_error = nbp->b_error; 1237 bp->b_error = nbp->b_error;
1223 disk_unbusy(sc->sc_dkdev, 0, is_read); 1238 disk_unbusy(sc->sc_dkdev, 0, is_read);
1224 biodone(bp); 1239 biodone(bp);
1225 } 1240 }
1226 putiobuf(nbp); 1241 putiobuf(nbp);
1227 1242
1228 mutex_enter(&sc->sc_slock); 1243 mutex_enter(&sc->sc_slock);
1229 break; 1244 break;
1230 } 1245 }
1231 if (error) 1246 if (error)
1232 continue; 1247 continue;
@@ -1258,28 +1273,28 @@ fss_bs_thread(void *arg) @@ -1258,28 +1273,28 @@ fss_bs_thread(void *arg)
1258 scp->fc_cluster == c) 1273 scp->fc_cluster == c)
1259 break; 1274 break;
1260 if (scp < scl) 1275 if (scp < scl)
1261 memcpy(addr, (char *)scp->fc_data+off, 1276 memcpy(addr, (char *)scp->fc_data+off,
1262 len); 1277 len);
1263 else 1278 else
1264 memset(addr, 0, len); 1279 memset(addr, 0, len);
1265 continue; 1280 continue;
1266 } 1281 }
1267 1282
1268 /* 1283 /*
1269 * Read from backing store. 1284 * Read from backing store.
1270 */ 1285 */
1271 error = 1286 error = fss_bs_io(sc, FSS_READ,
1272 fss_bs_io(sc, FSS_READ, *indirp, off, len, addr); 1287 *indirp, off, len, addr, NULL);
1273 1288
1274 mutex_enter(&sc->sc_slock); 1289 mutex_enter(&sc->sc_slock);
1275 if (error) { 1290 if (error) {
1276 bp->b_resid = bp->b_bcount; 1291 bp->b_resid = bp->b_bcount;
1277 bp->b_error = error; 1292 bp->b_error = error;
1278 break; 1293 break;
1279 } 1294 }
1280 } 1295 }
1281 mutex_exit(&sc->sc_slock); 1296 mutex_exit(&sc->sc_slock);
1282 1297
1283 disk_unbusy(sc->sc_dkdev, (error ? 0 : bp->b_bcount), is_read); 1298 disk_unbusy(sc->sc_dkdev, (error ? 0 : bp->b_bcount), is_read);
1284 biodone(bp); 1299 biodone(bp);
1285 1300