Wed Aug 29 09:04:03 2018 UTC ()
Convert flags FSS_ACTIVE and FSS_ERROR into new member sc_state
with states FSS_IDLE, FSS_ACTIVE and FSS_ERROR.

No functional change intended.


(hannken)
diff -r1.104 -r1.105 src/sys/dev/fss.c
diff -r1.29 -r1.30 src/sys/dev/fssvar.h

cvs diff -r1.104 -r1.105 src/sys/dev/fss.c (expand / switch to unified diff)

--- src/sys/dev/fss.c 2018/01/23 22:42:29 1.104
+++ src/sys/dev/fss.c 2018/08/29 09:04:03 1.105
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: fss.c,v 1.104 2018/01/23 22:42:29 pgoyette Exp $ */ 1/* $NetBSD: fss.c,v 1.105 2018/08/29 09:04:03 hannken Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2003 The NetBSD Foundation, Inc. 4 * Copyright (c) 2003 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Juergen Hannken-Illjes. 8 * by Juergen Hannken-Illjes.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -26,27 +26,27 @@ @@ -26,27 +26,27 @@
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * File system snapshot disk driver. 33 * File system snapshot disk driver.
34 * 34 *
35 * Block/character interface to the snapshot of a mounted file system. 35 * Block/character interface to the snapshot of a mounted file system.
36 */ 36 */
37 37
38#include <sys/cdefs.h> 38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: fss.c,v 1.104 2018/01/23 22:42:29 pgoyette Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: fss.c,v 1.105 2018/08/29 09:04:03 hannken Exp $");
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/systm.h> 42#include <sys/systm.h>
43#include <sys/namei.h> 43#include <sys/namei.h>
44#include <sys/proc.h> 44#include <sys/proc.h>
45#include <sys/errno.h> 45#include <sys/errno.h>
46#include <sys/malloc.h> 46#include <sys/malloc.h>
47#include <sys/buf.h> 47#include <sys/buf.h>
48#include <sys/ioctl.h> 48#include <sys/ioctl.h>
49#include <sys/disklabel.h> 49#include <sys/disklabel.h>
50#include <sys/device.h> 50#include <sys/device.h>
51#include <sys/disk.h> 51#include <sys/disk.h>
52#include <sys/stat.h> 52#include <sys/stat.h>
@@ -163,28 +163,32 @@ fss_attach(device_t parent, device_t sel @@ -163,28 +163,32 @@ fss_attach(device_t parent, device_t sel
163 disk_init(sc->sc_dkdev, device_xname(self), NULL); 163 disk_init(sc->sc_dkdev, device_xname(self), NULL);
164 if (!pmf_device_register(self, NULL, NULL)) 164 if (!pmf_device_register(self, NULL, NULL))
165 aprint_error_dev(self, "couldn't establish power handler\n"); 165 aprint_error_dev(self, "couldn't establish power handler\n");
166 166
167 if (fss_num_attached++ == 0) 167 if (fss_num_attached++ == 0)
168 vfs_hooks_attach(&fss_vfs_hooks); 168 vfs_hooks_attach(&fss_vfs_hooks);
169} 169}
170 170
171static int 171static int
172fss_detach(device_t self, int flags) 172fss_detach(device_t self, int flags)
173{ 173{
174 struct fss_softc *sc = device_private(self); 174 struct fss_softc *sc = device_private(self);
175 175
176 if (sc->sc_flags & FSS_ACTIVE) 176 mutex_enter(&sc->sc_slock);
 177 if (sc->sc_state != FSS_IDLE) {
 178 mutex_exit(&sc->sc_slock);
177 return EBUSY; 179 return EBUSY;
 180 }
 181 mutex_exit(&sc->sc_slock);
178 182
179 if (--fss_num_attached == 0) 183 if (--fss_num_attached == 0)
180 vfs_hooks_detach(&fss_vfs_hooks); 184 vfs_hooks_detach(&fss_vfs_hooks);
181 185
182 pmf_device_deregister(self); 186 pmf_device_deregister(self);
183 mutex_destroy(&sc->sc_slock); 187 mutex_destroy(&sc->sc_slock);
184 mutex_destroy(&sc->sc_lock); 188 mutex_destroy(&sc->sc_lock);
185 cv_destroy(&sc->sc_work_cv); 189 cv_destroy(&sc->sc_work_cv);
186 cv_destroy(&sc->sc_cache_cv); 190 cv_destroy(&sc->sc_cache_cv);
187 bufq_drain(sc->sc_bufq); 191 bufq_drain(sc->sc_bufq);
188 bufq_free(sc->sc_bufq); 192 bufq_free(sc->sc_bufq);
189 disk_destroy(sc->sc_dkdev); 193 disk_destroy(sc->sc_dkdev);
190 free(sc->sc_dkdev, M_DEVBUF); 194 free(sc->sc_dkdev, M_DEVBUF);
@@ -205,26 +209,27 @@ fss_open(dev_t dev, int flags, int mode, @@ -205,26 +209,27 @@ fss_open(dev_t dev, int flags, int mode,
205 209
206 sc = device_lookup_private(&fss_cd, minor(dev)); 210 sc = device_lookup_private(&fss_cd, minor(dev));
207 if (sc == NULL) { 211 if (sc == NULL) {
208 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK); 212 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
209 cf->cf_name = fss_cd.cd_name; 213 cf->cf_name = fss_cd.cd_name;
210 cf->cf_atname = fss_cd.cd_name; 214 cf->cf_atname = fss_cd.cd_name;
211 cf->cf_unit = minor(dev); 215 cf->cf_unit = minor(dev);
212 cf->cf_fstate = FSTATE_STAR; 216 cf->cf_fstate = FSTATE_STAR;
213 sc = device_private(config_attach_pseudo(cf)); 217 sc = device_private(config_attach_pseudo(cf));
214 if (sc == NULL) { 218 if (sc == NULL) {
215 mutex_exit(&fss_device_lock); 219 mutex_exit(&fss_device_lock);
216 return ENOMEM; 220 return ENOMEM;
217 } 221 }
 222 sc->sc_state = FSS_IDLE;
218 } 223 }
219 224
220 mutex_enter(&sc->sc_slock); 225 mutex_enter(&sc->sc_slock);
221 226
222 sc->sc_flags |= mflag; 227 sc->sc_flags |= mflag;
223 228
224 mutex_exit(&sc->sc_slock); 229 mutex_exit(&sc->sc_slock);
225 mutex_exit(&fss_device_lock); 230 mutex_exit(&fss_device_lock);
226 231
227 return 0; 232 return 0;
228} 233}
229 234
230int 235int
@@ -236,60 +241,60 @@ fss_close(dev_t dev, int flags, int mode @@ -236,60 +241,60 @@ fss_close(dev_t dev, int flags, int mode
236 241
237 mflag = (mode == S_IFCHR ? FSS_CDEV_OPEN : FSS_BDEV_OPEN); 242 mflag = (mode == S_IFCHR ? FSS_CDEV_OPEN : FSS_BDEV_OPEN);
238 error = 0; 243 error = 0;
239 244
240 mutex_enter(&fss_device_lock); 245 mutex_enter(&fss_device_lock);
241restart: 246restart:
242 mutex_enter(&sc->sc_slock); 247 mutex_enter(&sc->sc_slock);
243 if ((sc->sc_flags & (FSS_CDEV_OPEN|FSS_BDEV_OPEN)) != mflag) { 248 if ((sc->sc_flags & (FSS_CDEV_OPEN|FSS_BDEV_OPEN)) != mflag) {
244 sc->sc_flags &= ~mflag; 249 sc->sc_flags &= ~mflag;
245 mutex_exit(&sc->sc_slock); 250 mutex_exit(&sc->sc_slock);
246 mutex_exit(&fss_device_lock); 251 mutex_exit(&fss_device_lock);
247 return 0; 252 return 0;
248 } 253 }
249 if ((sc->sc_flags & FSS_ACTIVE) != 0 && 254 if (sc->sc_state != FSS_IDLE &&
250 (sc->sc_uflags & FSS_UNCONFIG_ON_CLOSE) != 0) { 255 (sc->sc_uflags & FSS_UNCONFIG_ON_CLOSE) != 0) {
251 sc->sc_uflags &= ~FSS_UNCONFIG_ON_CLOSE; 256 sc->sc_uflags &= ~FSS_UNCONFIG_ON_CLOSE;
252 mutex_exit(&sc->sc_slock); 257 mutex_exit(&sc->sc_slock);
253 error = fss_ioctl(dev, FSSIOCCLR, NULL, FWRITE, l); 258 error = fss_ioctl(dev, FSSIOCCLR, NULL, FWRITE, l);
254 goto restart; 259 goto restart;
255 } 260 }
256 if ((sc->sc_flags & FSS_ACTIVE) != 0) { 261 if (sc->sc_state != FSS_IDLE) {
257 mutex_exit(&sc->sc_slock); 262 mutex_exit(&sc->sc_slock);
258 mutex_exit(&fss_device_lock); 263 mutex_exit(&fss_device_lock);
259 return error; 264 return error;
260 } 265 }
261 266
262 KASSERT((sc->sc_flags & FSS_ACTIVE) == 0); 267 KASSERT(sc->sc_state == FSS_IDLE);
263 KASSERT((sc->sc_flags & (FSS_CDEV_OPEN|FSS_BDEV_OPEN)) == mflag); 268 KASSERT((sc->sc_flags & (FSS_CDEV_OPEN|FSS_BDEV_OPEN)) == mflag);
264 mutex_exit(&sc->sc_slock); 269 mutex_exit(&sc->sc_slock);
265 cf = device_cfdata(sc->sc_dev); 270 cf = device_cfdata(sc->sc_dev);
266 error = config_detach(sc->sc_dev, DETACH_QUIET); 271 error = config_detach(sc->sc_dev, DETACH_QUIET);
267 if (! error) 272 if (! error)
268 free(cf, M_DEVBUF); 273 free(cf, M_DEVBUF);
269 mutex_exit(&fss_device_lock); 274 mutex_exit(&fss_device_lock);
270 275
271 return error; 276 return error;
272} 277}
273 278
274void 279void
275fss_strategy(struct buf *bp) 280fss_strategy(struct buf *bp)
276{ 281{
277 const bool write = ((bp->b_flags & B_READ) != B_READ); 282 const bool write = ((bp->b_flags & B_READ) != B_READ);
278 struct fss_softc *sc = device_lookup_private(&fss_cd, minor(bp->b_dev)); 283 struct fss_softc *sc = device_lookup_private(&fss_cd, minor(bp->b_dev));
279 284
280 mutex_enter(&sc->sc_slock); 285 mutex_enter(&sc->sc_slock);
281 286
282 if (write || !FSS_ISVALID(sc)) { 287 if (write || sc->sc_state != FSS_ACTIVE) {
283 bp->b_error = (write ? EROFS : ENXIO); 288 bp->b_error = (write ? EROFS : ENXIO);
284 goto done; 289 goto done;
285 } 290 }
286 /* Check bounds for non-persistent snapshots. */ 291 /* Check bounds for non-persistent snapshots. */
287 if ((sc->sc_flags & FSS_PERSISTENT) == 0 && 292 if ((sc->sc_flags & FSS_PERSISTENT) == 0 &&
288 bounds_check_with_mediasize(bp, DEV_BSIZE, 293 bounds_check_with_mediasize(bp, DEV_BSIZE,
289 btodb(FSS_CLTOB(sc, sc->sc_clcount - 1) + sc->sc_clresid)) <= 0) 294 btodb(FSS_CLTOB(sc, sc->sc_clcount - 1) + sc->sc_clresid)) <= 0)
290 goto done; 295 goto done;
291 296
292 bp->b_rawblkno = bp->b_blkno; 297 bp->b_rawblkno = bp->b_blkno;
293 bufq_put(sc->sc_bufq, bp); 298 bufq_put(sc->sc_bufq, bp);
294 cv_signal(&sc->sc_work_cv); 299 cv_signal(&sc->sc_work_cv);
295 300
@@ -307,119 +312,119 @@ fss_read(dev_t dev, struct uio *uio, int @@ -307,119 +312,119 @@ fss_read(dev_t dev, struct uio *uio, int
307{ 312{
308 return physio(fss_strategy, NULL, dev, B_READ, minphys, uio); 313 return physio(fss_strategy, NULL, dev, B_READ, minphys, uio);
309} 314}
310 315
311int 316int
312fss_write(dev_t dev, struct uio *uio, int flags) 317fss_write(dev_t dev, struct uio *uio, int flags)
313{ 318{
314 return physio(fss_strategy, NULL, dev, B_WRITE, minphys, uio); 319 return physio(fss_strategy, NULL, dev, B_WRITE, minphys, uio);
315} 320}
316 321
317int 322int
318fss_ioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 323fss_ioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
319{ 324{
320 int error; 325 int error = 0;
321 struct fss_softc *sc = device_lookup_private(&fss_cd, minor(dev)); 326 struct fss_softc *sc = device_lookup_private(&fss_cd, minor(dev));
322 struct fss_set _fss; 327 struct fss_set _fss;
323 struct fss_set *fss = (struct fss_set *)data; 328 struct fss_set *fss = (struct fss_set *)data;
324 struct fss_set50 *fss50 = (struct fss_set50 *)data; 329 struct fss_set50 *fss50 = (struct fss_set50 *)data;
325 struct fss_get *fsg = (struct fss_get *)data; 330 struct fss_get *fsg = (struct fss_get *)data;
326#ifndef _LP64 331#ifndef _LP64
327 struct fss_get50 *fsg50 = (struct fss_get50 *)data; 332 struct fss_get50 *fsg50 = (struct fss_get50 *)data;
328#endif 333#endif
329 334
330 switch (cmd) { 335 switch (cmd) {
331 case FSSIOCSET50: 336 case FSSIOCSET50:
332 fss = &_fss; 337 fss = &_fss;
333 fss->fss_mount = fss50->fss_mount; 338 fss->fss_mount = fss50->fss_mount;
334 fss->fss_bstore = fss50->fss_bstore; 339 fss->fss_bstore = fss50->fss_bstore;
335 fss->fss_csize = fss50->fss_csize; 340 fss->fss_csize = fss50->fss_csize;
336 fss->fss_flags = 0; 341 fss->fss_flags = 0;
337 /* Fall through */ 342 /* Fall through */
338 case FSSIOCSET: 343 case FSSIOCSET:
339 mutex_enter(&sc->sc_lock); 344 mutex_enter(&sc->sc_lock);
340 if ((flag & FWRITE) == 0) 345 if ((flag & FWRITE) == 0)
341 error = EPERM; 346 error = EPERM;
342 else if ((sc->sc_flags & FSS_ACTIVE) != 0) 347 mutex_enter(&sc->sc_slock);
 348 if (error == 0 && sc->sc_state != FSS_IDLE)
343 error = EBUSY; 349 error = EBUSY;
344 else 350 mutex_exit(&sc->sc_slock);
 351 if (error == 0)
345 error = fss_create_snapshot(sc, fss, l); 352 error = fss_create_snapshot(sc, fss, l);
346 if (error == 0) 353 if (error == 0)
347 sc->sc_uflags = fss->fss_flags; 354 sc->sc_uflags = fss->fss_flags;
348 mutex_exit(&sc->sc_lock); 355 mutex_exit(&sc->sc_lock);
349 break; 356 break;
350 357
351 case FSSIOCCLR: 358 case FSSIOCCLR:
352 mutex_enter(&sc->sc_lock); 359 mutex_enter(&sc->sc_lock);
353 if ((flag & FWRITE) == 0) 360 if ((flag & FWRITE) == 0)
354 error = EPERM; 361 error = EPERM;
355 else if ((sc->sc_flags & FSS_ACTIVE) == 0) 362 mutex_enter(&sc->sc_slock);
 363 if (error == 0 && sc->sc_state == FSS_IDLE)
356 error = ENXIO; 364 error = ENXIO;
357 else 365 mutex_exit(&sc->sc_slock);
 366 if (error == 0)
358 error = fss_delete_snapshot(sc, l); 367 error = fss_delete_snapshot(sc, l);
359 mutex_exit(&sc->sc_lock); 368 mutex_exit(&sc->sc_lock);
360 break; 369 break;
361 370
362#ifndef _LP64 371#ifndef _LP64
363 case FSSIOCGET50: 372 case FSSIOCGET50:
364 mutex_enter(&sc->sc_lock); 373 mutex_enter(&sc->sc_lock);
365 switch (sc->sc_flags & (FSS_PERSISTENT | FSS_ACTIVE)) { 374 mutex_enter(&sc->sc_slock);
366 case FSS_ACTIVE: 375 if (sc->sc_state == FSS_IDLE) {
 376 error = ENXIO;
 377 } else if ((sc->sc_flags & FSS_PERSISTENT) == 0) {
367 memcpy(fsg50->fsg_mount, sc->sc_mntname, MNAMELEN); 378 memcpy(fsg50->fsg_mount, sc->sc_mntname, MNAMELEN);
368 fsg50->fsg_csize = FSS_CLSIZE(sc); 379 fsg50->fsg_csize = FSS_CLSIZE(sc);
369 timeval_to_timeval50(&sc->sc_time, &fsg50->fsg_time); 380 timeval_to_timeval50(&sc->sc_time, &fsg50->fsg_time);
370 fsg50->fsg_mount_size = sc->sc_clcount; 381 fsg50->fsg_mount_size = sc->sc_clcount;
371 fsg50->fsg_bs_size = sc->sc_clnext; 382 fsg50->fsg_bs_size = sc->sc_clnext;
372 error = 0; 383 error = 0;
373 break; 384 } else {
374 case FSS_PERSISTENT | FSS_ACTIVE: 
375 memcpy(fsg50->fsg_mount, sc->sc_mntname, MNAMELEN); 385 memcpy(fsg50->fsg_mount, sc->sc_mntname, MNAMELEN);
376 fsg50->fsg_csize = 0; 386 fsg50->fsg_csize = 0;
377 timeval_to_timeval50(&sc->sc_time, &fsg50->fsg_time); 387 timeval_to_timeval50(&sc->sc_time, &fsg50->fsg_time);
378 fsg50->fsg_mount_size = 0; 388 fsg50->fsg_mount_size = 0;
379 fsg50->fsg_bs_size = 0; 389 fsg50->fsg_bs_size = 0;
380 error = 0; 390 error = 0;
381 break; 
382 default: 
383 error = ENXIO; 
384 break; 
385 } 391 }
 392 mutex_exit(&sc->sc_slock);
386 mutex_exit(&sc->sc_lock); 393 mutex_exit(&sc->sc_lock);
387 break; 394 break;
388#endif /* _LP64 */ 395#endif /* _LP64 */
389 396
390 case FSSIOCGET: 397 case FSSIOCGET:
391 mutex_enter(&sc->sc_lock); 398 mutex_enter(&sc->sc_lock);
392 switch (sc->sc_flags & (FSS_PERSISTENT | FSS_ACTIVE)) { 399 mutex_enter(&sc->sc_slock);
393 case FSS_ACTIVE: 400 if (sc->sc_state == FSS_IDLE) {
 401 error = ENXIO;
 402 } else if ((sc->sc_flags & FSS_PERSISTENT) == 0) {
394 memcpy(fsg->fsg_mount, sc->sc_mntname, MNAMELEN); 403 memcpy(fsg->fsg_mount, sc->sc_mntname, MNAMELEN);
395 fsg->fsg_csize = FSS_CLSIZE(sc); 404 fsg->fsg_csize = FSS_CLSIZE(sc);
396 fsg->fsg_time = sc->sc_time; 405 fsg->fsg_time = sc->sc_time;
397 fsg->fsg_mount_size = sc->sc_clcount; 406 fsg->fsg_mount_size = sc->sc_clcount;
398 fsg->fsg_bs_size = sc->sc_clnext; 407 fsg->fsg_bs_size = sc->sc_clnext;
399 error = 0; 408 error = 0;
400 break; 409 } else {
401 case FSS_PERSISTENT | FSS_ACTIVE: 
402 memcpy(fsg->fsg_mount, sc->sc_mntname, MNAMELEN); 410 memcpy(fsg->fsg_mount, sc->sc_mntname, MNAMELEN);
403 fsg->fsg_csize = 0; 411 fsg->fsg_csize = 0;
404 fsg->fsg_time = sc->sc_time; 412 fsg->fsg_time = sc->sc_time;
405 fsg->fsg_mount_size = 0; 413 fsg->fsg_mount_size = 0;
406 fsg->fsg_bs_size = 0; 414 fsg->fsg_bs_size = 0;
407 error = 0; 415 error = 0;
408 break; 
409 default: 
410 error = ENXIO; 
411 break; 
412 } 416 }
 417 mutex_exit(&sc->sc_slock);
413 mutex_exit(&sc->sc_lock); 418 mutex_exit(&sc->sc_lock);
414 break; 419 break;
415 420
416 case FSSIOFSET: 421 case FSSIOFSET:
417 mutex_enter(&sc->sc_slock); 422 mutex_enter(&sc->sc_slock);
418 sc->sc_uflags = *(int *)data; 423 sc->sc_uflags = *(int *)data;
419 mutex_exit(&sc->sc_slock); 424 mutex_exit(&sc->sc_slock);
420 error = 0; 425 error = 0;
421 break; 426 break;
422 427
423 case FSSIOFGET: 428 case FSSIOFGET:
424 mutex_enter(&sc->sc_slock); 429 mutex_enter(&sc->sc_slock);
425 *(int *)data = sc->sc_uflags; 430 *(int *)data = sc->sc_uflags;
@@ -447,33 +452,38 @@ fss_dump(dev_t dev, daddr_t blkno, void  @@ -447,33 +452,38 @@ fss_dump(dev_t dev, daddr_t blkno, void
447{ 452{
448 return EROFS; 453 return EROFS;
449} 454}
450 455
451/* 456/*
452 * An error occurred reading or writing the snapshot or backing store. 457 * An error occurred reading or writing the snapshot or backing store.
453 * If it is the first error log to console and disestablish cow handler. 458 * If it is the first error log to console and disestablish cow handler.
454 * The caller holds the mutex. 459 * The caller holds the mutex.
455 */ 460 */
456static inline void 461static inline void
457fss_error(struct fss_softc *sc, const char *msg) 462fss_error(struct fss_softc *sc, const char *msg)
458{ 463{
459 464
460 if ((sc->sc_flags & (FSS_ACTIVE | FSS_ERROR)) != FSS_ACTIVE) 465 KASSERT(mutex_owned(&sc->sc_slock));
 466
 467 if (sc->sc_state == FSS_ERROR)
461 return; 468 return;
462 469
463 aprint_error_dev(sc->sc_dev, "snapshot invalid: %s\n", msg); 470 aprint_error_dev(sc->sc_dev, "snapshot invalid: %s\n", msg);
464 if ((sc->sc_flags & FSS_PERSISTENT) == 0) 471 if ((sc->sc_flags & FSS_PERSISTENT) == 0) {
 472 mutex_exit(&sc->sc_slock);
465 fscow_disestablish(sc->sc_mount, fss_copy_on_write, sc); 473 fscow_disestablish(sc->sc_mount, fss_copy_on_write, sc);
466 sc->sc_flags |= FSS_ERROR; 474 mutex_enter(&sc->sc_slock);
 475 }
 476 sc->sc_state = FSS_ERROR;
467} 477}
468 478
469/* 479/*
470 * Allocate the variable sized parts of the softc and 480 * Allocate the variable sized parts of the softc and
471 * fork the kernel thread. 481 * fork the kernel thread.
472 * 482 *
473 * The fields sc_clcount, sc_clshift, sc_cache_size and sc_indir_size 483 * The fields sc_clcount, sc_clshift, sc_cache_size and sc_indir_size
474 * must be initialized. 484 * must be initialized.
475 */ 485 */
476static int 486static int
477fss_softc_alloc(struct fss_softc *sc) 487fss_softc_alloc(struct fss_softc *sc)
478{ 488{
479 int i, error; 489 int i, error;
@@ -560,46 +570,46 @@ fss_softc_free(struct fss_softc *sc) @@ -560,46 +570,46 @@ fss_softc_free(struct fss_softc *sc)
560 * Set all active snapshots on this file system into ERROR state. 570 * Set all active snapshots on this file system into ERROR state.
561 */ 571 */
562static void 572static void
563fss_unmount_hook(struct mount *mp) 573fss_unmount_hook(struct mount *mp)
564{ 574{
565 int i; 575 int i;
566 struct fss_softc *sc; 576 struct fss_softc *sc;
567 577
568 mutex_enter(&fss_device_lock); 578 mutex_enter(&fss_device_lock);
569 for (i = 0; i < fss_cd.cd_ndevs; i++) { 579 for (i = 0; i < fss_cd.cd_ndevs; i++) {
570 if ((sc = device_lookup_private(&fss_cd, i)) == NULL) 580 if ((sc = device_lookup_private(&fss_cd, i)) == NULL)
571 continue; 581 continue;
572 mutex_enter(&sc->sc_slock); 582 mutex_enter(&sc->sc_slock);
573 if ((sc->sc_flags & FSS_ACTIVE) != 0 && sc->sc_mount == mp) 583 if (sc->sc_state != FSS_IDLE && sc->sc_mount == mp)
574 fss_error(sc, "forced by unmount"); 584 fss_error(sc, "forced by unmount");
575 mutex_exit(&sc->sc_slock); 585 mutex_exit(&sc->sc_slock);
576 } 586 }
577 mutex_exit(&fss_device_lock); 587 mutex_exit(&fss_device_lock);
578} 588}
579 589
580/* 590/*
581 * A buffer is written to the snapshotted block device. Copy to 591 * A buffer is written to the snapshotted block device. Copy to
582 * backing store if needed. 592 * backing store if needed.
583 */ 593 */
584static int 594static int
585fss_copy_on_write(void *v, struct buf *bp, bool data_valid) 595fss_copy_on_write(void *v, struct buf *bp, bool data_valid)
586{ 596{
587 int error; 597 int error;
588 u_int32_t cl, ch, c; 598 u_int32_t cl, ch, c;
589 struct fss_softc *sc = v; 599 struct fss_softc *sc = v;
590 600
591 mutex_enter(&sc->sc_slock); 601 mutex_enter(&sc->sc_slock);
592 if (!FSS_ISVALID(sc)) { 602 if (sc->sc_state != FSS_ACTIVE) {
593 mutex_exit(&sc->sc_slock); 603 mutex_exit(&sc->sc_slock);
594 return 0; 604 return 0;
595 } 605 }
596 606
597 cl = FSS_BTOCL(sc, dbtob(bp->b_blkno)); 607 cl = FSS_BTOCL(sc, dbtob(bp->b_blkno));
598 ch = FSS_BTOCL(sc, dbtob(bp->b_blkno)+bp->b_bcount-1); 608 ch = FSS_BTOCL(sc, dbtob(bp->b_blkno)+bp->b_bcount-1);
599 error = 0; 609 error = 0;
600 if (curlwp == uvm.pagedaemon_lwp) { 610 if (curlwp == uvm.pagedaemon_lwp) {
601 for (c = cl; c <= ch; c++) 611 for (c = cl; c <= ch; c++)
602 if (isclr(sc->sc_copied, c)) { 612 if (isclr(sc->sc_copied, c)) {
603 error = ENOMEM; 613 error = ENOMEM;
604 break; 614 break;
605 } 615 }
@@ -772,27 +782,29 @@ fss_create_snapshot(struct fss_softc *sc @@ -772,27 +782,29 @@ fss_create_snapshot(struct fss_softc *sc
772 u_int32_t csize; 782 u_int32_t csize;
773 off_t bsize; 783 off_t bsize;
774 784
775 bsize = 0; /* XXX gcc */ 785 bsize = 0; /* XXX gcc */
776 786
777 /* 787 /*
778 * Open needed files. 788 * Open needed files.
779 */ 789 */
780 if ((error = fss_create_files(sc, fss, &bsize, l)) != 0) 790 if ((error = fss_create_files(sc, fss, &bsize, l)) != 0)
781 goto bad; 791 goto bad;
782 792
783 if (sc->sc_flags & FSS_PERSISTENT) { 793 if (sc->sc_flags & FSS_PERSISTENT) {
784 fss_softc_alloc(sc); 794 fss_softc_alloc(sc);
785 sc->sc_flags |= FSS_ACTIVE; 795 mutex_enter(&sc->sc_slock);
 796 sc->sc_state = FSS_ACTIVE;
 797 mutex_exit(&sc->sc_slock);
786 return 0; 798 return 0;
787 } 799 }
788 800
789 /* 801 /*
790 * Set cluster size. Must be a power of two and 802 * Set cluster size. Must be a power of two and
791 * a multiple of backing store block size. 803 * a multiple of backing store block size.
792 */ 804 */
793 if (fss->fss_csize <= 0) 805 if (fss->fss_csize <= 0)
794 csize = MAXPHYS; 806 csize = MAXPHYS;
795 else 807 else
796 csize = fss->fss_csize; 808 csize = fss->fss_csize;
797 if (bsize/csize > FSS_CLUSTER_MAX) 809 if (bsize/csize > FSS_CLUSTER_MAX)
798 csize = bsize/FSS_CLUSTER_MAX+1; 810 csize = bsize/FSS_CLUSTER_MAX+1;
@@ -837,28 +849,31 @@ fss_create_snapshot(struct fss_softc *sc @@ -837,28 +849,31 @@ fss_create_snapshot(struct fss_softc *sc
837 /* 849 /*
838 * Activate the snapshot. 850 * Activate the snapshot.
839 */ 851 */
840 852
841 if ((error = vfs_suspend(sc->sc_mount, 0)) != 0) 853 if ((error = vfs_suspend(sc->sc_mount, 0)) != 0)
842 goto bad; 854 goto bad;
843 855
844 microtime(&sc->sc_time); 856 microtime(&sc->sc_time);
845 857
846 vrele_flush(sc->sc_mount); 858 vrele_flush(sc->sc_mount);
847 error = VFS_SYNC(sc->sc_mount, MNT_WAIT, curlwp->l_cred); 859 error = VFS_SYNC(sc->sc_mount, MNT_WAIT, curlwp->l_cred);
848 if (error == 0) 860 if (error == 0)
849 error = fscow_establish(sc->sc_mount, fss_copy_on_write, sc); 861 error = fscow_establish(sc->sc_mount, fss_copy_on_write, sc);
850 if (error == 0) 862 if (error == 0) {
851 sc->sc_flags |= FSS_ACTIVE; 863 mutex_enter(&sc->sc_slock);
 864 sc->sc_state = FSS_ACTIVE;
 865 mutex_exit(&sc->sc_slock);
 866 }
852 867
853 vfs_resume(sc->sc_mount); 868 vfs_resume(sc->sc_mount);
854 869
855 if (error != 0) 870 if (error != 0)
856 goto bad; 871 goto bad;
857 872
858 aprint_debug_dev(sc->sc_dev, "%s snapshot active\n", sc->sc_mntname); 873 aprint_debug_dev(sc->sc_dev, "%s snapshot active\n", sc->sc_mntname);
859 aprint_debug_dev(sc->sc_dev, 874 aprint_debug_dev(sc->sc_dev,
860 "%u clusters of %u, %u cache slots, %u indir clusters\n", 875 "%u clusters of %u, %u cache slots, %u indir clusters\n",
861 sc->sc_clcount, FSS_CLSIZE(sc), 876 sc->sc_clcount, FSS_CLSIZE(sc),
862 sc->sc_cache_size, sc->sc_indir_size); 877 sc->sc_cache_size, sc->sc_indir_size);
863 878
864 return 0; 879 return 0;
@@ -873,31 +888,33 @@ bad: @@ -873,31 +888,33 @@ bad:
873 } 888 }
874 sc->sc_bs_vp = NULL; 889 sc->sc_bs_vp = NULL;
875 890
876 return error; 891 return error;
877} 892}
878 893
879/* 894/*
880 * Delete a snapshot. 895 * Delete a snapshot.
881 */ 896 */
882static int 897static int
883fss_delete_snapshot(struct fss_softc *sc, struct lwp *l) 898fss_delete_snapshot(struct fss_softc *sc, struct lwp *l)
884{ 899{
885 900
886 if ((sc->sc_flags & (FSS_PERSISTENT | FSS_ERROR)) == 0) 
887 fscow_disestablish(sc->sc_mount, fss_copy_on_write, sc); 
888 
889 mutex_enter(&sc->sc_slock); 901 mutex_enter(&sc->sc_slock);
890 sc->sc_flags &= ~(FSS_ACTIVE|FSS_ERROR); 902 if ((sc->sc_flags & FSS_PERSISTENT) == 0 && sc->sc_state != FSS_ERROR) {
 903 mutex_exit(&sc->sc_slock);
 904 fscow_disestablish(sc->sc_mount, fss_copy_on_write, sc);
 905 mutex_enter(&sc->sc_slock);
 906 }
 907 sc->sc_state = FSS_IDLE;
891 sc->sc_mount = NULL; 908 sc->sc_mount = NULL;
892 sc->sc_bdev = NODEV; 909 sc->sc_bdev = NODEV;
893 mutex_exit(&sc->sc_slock); 910 mutex_exit(&sc->sc_slock);
894 911
895 fss_softc_free(sc); 912 fss_softc_free(sc);
896 if (sc->sc_flags & FSS_PERSISTENT) 913 if (sc->sc_flags & FSS_PERSISTENT)
897 vrele(sc->sc_bs_vp); 914 vrele(sc->sc_bs_vp);
898 else 915 else
899 vn_close(sc->sc_bs_vp, FREAD|FWRITE, l->l_cred); 916 vn_close(sc->sc_bs_vp, FREAD|FWRITE, l->l_cred);
900 sc->sc_bs_vp = NULL; 917 sc->sc_bs_vp = NULL;
901 sc->sc_flags &= ~FSS_PERSISTENT; 918 sc->sc_flags &= ~FSS_PERSISTENT;
902 919
903 return 0; 920 return 0;
@@ -912,27 +929,27 @@ fss_read_cluster(struct fss_softc *sc, u @@ -912,27 +929,27 @@ fss_read_cluster(struct fss_softc *sc, u
912 int error, todo, offset, len; 929 int error, todo, offset, len;
913 daddr_t dblk; 930 daddr_t dblk;
914 struct buf *bp, *mbp; 931 struct buf *bp, *mbp;
915 struct fss_cache *scp, *scl; 932 struct fss_cache *scp, *scl;
916 933
917 /* 934 /*
918 * Get a free cache slot. 935 * Get a free cache slot.
919 */ 936 */
920 scl = sc->sc_cache+sc->sc_cache_size; 937 scl = sc->sc_cache+sc->sc_cache_size;
921 938
922 mutex_enter(&sc->sc_slock); 939 mutex_enter(&sc->sc_slock);
923 940
924restart: 941restart:
925 if (isset(sc->sc_copied, cl) || !FSS_ISVALID(sc)) { 942 if (isset(sc->sc_copied, cl) || sc->sc_state != FSS_ACTIVE) {
926 mutex_exit(&sc->sc_slock); 943 mutex_exit(&sc->sc_slock);
927 return 0; 944 return 0;
928 } 945 }
929 946
930 for (scp = sc->sc_cache; scp < scl; scp++) 947 for (scp = sc->sc_cache; scp < scl; scp++)
931 if (scp->fc_cluster == cl) { 948 if (scp->fc_cluster == cl) {
932 if (scp->fc_type == FSS_CACHE_VALID) { 949 if (scp->fc_type == FSS_CACHE_VALID) {
933 mutex_exit(&sc->sc_slock); 950 mutex_exit(&sc->sc_slock);
934 return 0; 951 return 0;
935 } else if (scp->fc_type == FSS_CACHE_BUSY) { 952 } else if (scp->fc_type == FSS_CACHE_BUSY) {
936 cv_wait(&scp->fc_state_cv, &sc->sc_slock); 953 cv_wait(&scp->fc_state_cv, &sc->sc_slock);
937 goto restart; 954 goto restart;
938 } 955 }
@@ -1098,27 +1115,27 @@ fss_bs_thread(void *arg) @@ -1098,27 +1115,27 @@ fss_bs_thread(void *arg)
1098 thread_idle = true; 1115 thread_idle = true;
1099 if ((sc->sc_flags & FSS_BS_THREAD) == 0) { 1116 if ((sc->sc_flags & FSS_BS_THREAD) == 0) {
1100 mutex_exit(&sc->sc_slock); 1117 mutex_exit(&sc->sc_slock);
1101 kthread_exit(0); 1118 kthread_exit(0);
1102 } 1119 }
1103 1120
1104 /* 1121 /*
1105 * Process I/O requests (persistent) 1122 * Process I/O requests (persistent)
1106 */ 1123 */
1107 1124
1108 if (sc->sc_flags & FSS_PERSISTENT) { 1125 if (sc->sc_flags & FSS_PERSISTENT) {
1109 if ((bp = bufq_get(sc->sc_bufq)) == NULL) 1126 if ((bp = bufq_get(sc->sc_bufq)) == NULL)
1110 continue; 1127 continue;
1111 is_valid = FSS_ISVALID(sc); 1128 is_valid = (sc->sc_state == FSS_ACTIVE);
1112 is_read = (bp->b_flags & B_READ); 1129 is_read = (bp->b_flags & B_READ);
1113 thread_idle = false; 1130 thread_idle = false;
1114 mutex_exit(&sc->sc_slock); 1131 mutex_exit(&sc->sc_slock);
1115 1132
1116 if (is_valid) { 1133 if (is_valid) {
1117 disk_busy(sc->sc_dkdev); 1134 disk_busy(sc->sc_dkdev);
1118 error = fss_bs_io(sc, FSS_READ, 0, 1135 error = fss_bs_io(sc, FSS_READ, 0,
1119 dbtob(bp->b_blkno), bp->b_bcount, 1136 dbtob(bp->b_blkno), bp->b_bcount,
1120 bp->b_data, &resid); 1137 bp->b_data, &resid);
1121 if (error) 1138 if (error)
1122 resid = bp->b_bcount; 1139 resid = bp->b_bcount;
1123 disk_unbusy(sc->sc_dkdev, 1140 disk_unbusy(sc->sc_dkdev,
1124 (error ? 0 : bp->b_bcount), is_read); 1141 (error ? 0 : bp->b_bcount), is_read);
@@ -1160,27 +1177,27 @@ fss_bs_thread(void *arg) @@ -1160,27 +1177,27 @@ fss_bs_thread(void *arg)
1160 } else 1177 } else
1161 fss_error(sc, "write error on backing store"); 1178 fss_error(sc, "write error on backing store");
1162 1179
1163 scp->fc_type = FSS_CACHE_FREE; 1180 scp->fc_type = FSS_CACHE_FREE;
1164 cv_broadcast(&sc->sc_cache_cv); 1181 cv_broadcast(&sc->sc_cache_cv);
1165 break; 1182 break;
1166 } 1183 }
1167 1184
1168 /* 1185 /*
1169 * Process I/O requests 1186 * Process I/O requests
1170 */ 1187 */
1171 if ((bp = bufq_get(sc->sc_bufq)) == NULL) 1188 if ((bp = bufq_get(sc->sc_bufq)) == NULL)
1172 continue; 1189 continue;
1173 is_valid = FSS_ISVALID(sc); 1190 is_valid = (sc->sc_state == FSS_ACTIVE);
1174 is_read = (bp->b_flags & B_READ); 1191 is_read = (bp->b_flags & B_READ);
1175 thread_idle = false; 1192 thread_idle = false;
1176 1193
1177 if (!is_valid) { 1194 if (!is_valid) {
1178 mutex_exit(&sc->sc_slock); 1195 mutex_exit(&sc->sc_slock);
1179 1196
1180 bp->b_error = ENXIO; 1197 bp->b_error = ENXIO;
1181 bp->b_resid = bp->b_bcount; 1198 bp->b_resid = bp->b_bcount;
1182 biodone(bp); 1199 biodone(bp);
1183 1200
1184 mutex_enter(&sc->sc_slock); 1201 mutex_enter(&sc->sc_slock);
1185 continue; 1202 continue;
1186 } 1203 }

cvs diff -r1.29 -r1.30 src/sys/dev/fssvar.h (expand / switch to unified diff)

--- src/sys/dev/fssvar.h 2015/09/06 06:00:59 1.29
+++ src/sys/dev/fssvar.h 2018/08/29 09:04:03 1.30
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: fssvar.h,v 1.29 2015/09/06 06:00:59 dholland Exp $ */ 1/* $NetBSD: fssvar.h,v 1.30 2018/08/29 09:04:03 hannken Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2003, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 2003, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Juergen Hannken-Illjes. 8 * by Juergen Hannken-Illjes.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -73,30 +73,26 @@ struct fss_get50 { @@ -73,30 +73,26 @@ struct fss_get50 {
73 blkcnt_t fsg_mount_size; /* # clusters on file system */ 73 blkcnt_t fsg_mount_size; /* # clusters on file system */
74 blkcnt_t fsg_bs_size; /* # clusters on backing store */ 74 blkcnt_t fsg_bs_size; /* # clusters on backing store */
75}; 75};
76 76
77#define FSSIOCSET50 _IOW('F', 0, struct fss_set50) /* Old configure */ 77#define FSSIOCSET50 _IOW('F', 0, struct fss_set50) /* Old configure */
78#define FSSIOCGET50 _IOR('F', 1, struct fss_get50) /* Old Status */ 78#define FSSIOCGET50 _IOR('F', 1, struct fss_get50) /* Old Status */
79 79
80#include <sys/bufq.h> 80#include <sys/bufq.h>
81 81
82#define FSS_CLUSTER_MAX (1<<24) /* Upper bound of clusters. The 82#define FSS_CLUSTER_MAX (1<<24) /* Upper bound of clusters. The
83 sc_copied map uses up to 83 sc_copied map uses up to
84 FSS_CLUSTER_MAX/NBBY bytes */ 84 FSS_CLUSTER_MAX/NBBY bytes */
85 85
86/* Check if still valid */ 
87#define FSS_ISVALID(sc) \ 
88 (((sc)->sc_flags & (FSS_ACTIVE|FSS_ERROR)) == FSS_ACTIVE) 
89 
90/* Offset to cluster */ 86/* Offset to cluster */
91#define FSS_BTOCL(sc, off) \ 87#define FSS_BTOCL(sc, off) \
92 ((off) >> (sc)->sc_clshift) 88 ((off) >> (sc)->sc_clshift)
93 89
94/* Cluster to offset */ 90/* Cluster to offset */
95#define FSS_CLTOB(sc, cl) \ 91#define FSS_CLTOB(sc, cl) \
96 ((off_t)(cl) << (sc)->sc_clshift) 92 ((off_t)(cl) << (sc)->sc_clshift)
97 93
98/* Offset from start of cluster */ 94/* Offset from start of cluster */
99#define FSS_CLOFF(sc, off) \ 95#define FSS_CLOFF(sc, off) \
100 ((off) & (sc)->sc_clmask) 96 ((off) & (sc)->sc_clmask)
101 97
102/* Size of cluster */ 98/* Size of cluster */
@@ -127,35 +123,40 @@ typedef enum { @@ -127,35 +123,40 @@ typedef enum {
127typedef enum { 123typedef enum {
128 FSS_CACHE_FREE = 0, /* Cache entry is free */ 124 FSS_CACHE_FREE = 0, /* Cache entry is free */
129 FSS_CACHE_BUSY = 1, /* Cache entry is read from device */ 125 FSS_CACHE_BUSY = 1, /* Cache entry is read from device */
130 FSS_CACHE_VALID = 2 /* Cache entry contains valid data */ 126 FSS_CACHE_VALID = 2 /* Cache entry contains valid data */
131} fss_cache_type; 127} fss_cache_type;
132 128
133struct fss_cache { 129struct fss_cache {
134 fss_cache_type fc_type; /* Current state */ 130 fss_cache_type fc_type; /* Current state */
135 u_int32_t fc_cluster; /* Cluster number of this entry */ 131 u_int32_t fc_cluster; /* Cluster number of this entry */
136 kcondvar_t fc_state_cv; /* Signals state change from busy */ 132 kcondvar_t fc_state_cv; /* Signals state change from busy */
137 void * fc_data; /* Data */ 133 void * fc_data; /* Data */
138}; 134};
139 135
 136typedef enum {
 137 FSS_IDLE, /* Device is unconfigured */
 138 FSS_ACTIVE, /* Device is configured */
 139 FSS_ERROR /* Device had errors */
 140} fss_state_t;
 141
140struct fss_softc { 142struct fss_softc {
141 device_t sc_dev; /* Self */ 143 device_t sc_dev; /* Self */
142 kmutex_t sc_slock; /* Protect this softc */ 144 kmutex_t sc_slock; /* Protect this softc */
143 kmutex_t sc_lock; /* Sleep lock for fss_ioctl */ 145 kmutex_t sc_lock; /* Sleep lock for fss_ioctl */
144 kcondvar_t sc_work_cv; /* Signals work for the kernel thread */ 146 kcondvar_t sc_work_cv; /* Signals work for the kernel thread */
145 kcondvar_t sc_cache_cv; /* Signals free cache slot */ 147 kcondvar_t sc_cache_cv; /* Signals free cache slot */
 148 fss_state_t sc_state; /* Current state */
146 volatile int sc_flags; /* Flags */ 149 volatile int sc_flags; /* Flags */
147#define FSS_ACTIVE 0x01 /* Snapshot is active */ 
148#define FSS_ERROR 0x02 /* I/O error occurred */ 
149#define FSS_BS_THREAD 0x04 /* Kernel thread is running */ 150#define FSS_BS_THREAD 0x04 /* Kernel thread is running */
150#define FSS_PERSISTENT 0x20 /* File system internal snapshot */ 151#define FSS_PERSISTENT 0x20 /* File system internal snapshot */
151#define FSS_CDEV_OPEN 0x40 /* character device open */ 152#define FSS_CDEV_OPEN 0x40 /* character device open */
152#define FSS_BDEV_OPEN 0x80 /* block device open */ 153#define FSS_BDEV_OPEN 0x80 /* block device open */
153 int sc_uflags; /* User visible flags */ 154 int sc_uflags; /* User visible flags */
154 struct disk *sc_dkdev; /* Generic disk device info */ 155 struct disk *sc_dkdev; /* Generic disk device info */
155 struct mount *sc_mount; /* Mount point */ 156 struct mount *sc_mount; /* Mount point */
156 char sc_mntname[MNAMELEN]; /* Mount point */ 157 char sc_mntname[MNAMELEN]; /* Mount point */
157 struct timeval sc_time; /* Time this snapshot was taken */ 158 struct timeval sc_time; /* Time this snapshot was taken */
158 dev_t sc_bdev; /* Underlying block device */ 159 dev_t sc_bdev; /* Underlying block device */
159 struct vnode *sc_bs_vp; /* Our backing store */ 160 struct vnode *sc_bs_vp; /* Our backing store */
160 int sc_bs_bshift; /* Shift of backing store block */ 161 int sc_bs_bshift; /* Shift of backing store block */
161 u_int32_t sc_bs_bmask; /* Mask of backing store block */ 162 u_int32_t sc_bs_bmask; /* Mask of backing store block */