Wed May 24 09:53:00 2017 UTC ()
Remove the syncer dance from dounmount().  The syncer skips
unmounting file systems as they are suspended.

Remove now unused syncer_mutex.


(hannken)
diff -r1.62 -r1.63 src/sys/kern/vfs_mount.c
diff -r1.464 -r1.465 src/sys/kern/vfs_subr.c
diff -r1.226 -r1.227 src/sys/sys/mount.h

cvs diff -r1.62 -r1.63 src/sys/kern/vfs_mount.c (expand / switch to unified diff)

--- src/sys/kern/vfs_mount.c 2017/05/17 12:45:03 1.62
+++ src/sys/kern/vfs_mount.c 2017/05/24 09:52:59 1.63
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vfs_mount.c,v 1.62 2017/05/17 12:45:03 hannken Exp $ */ 1/* $NetBSD: vfs_mount.c,v 1.63 2017/05/24 09:52:59 hannken Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran. 9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE. 64 * SUCH DAMAGE.
65 * 65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */ 67 */
68 68
69#include <sys/cdefs.h> 69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.62 2017/05/17 12:45:03 hannken Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.63 2017/05/24 09:52:59 hannken Exp $");
71 71
72#include <sys/param.h> 72#include <sys/param.h>
73#include <sys/kernel.h> 73#include <sys/kernel.h>
74 74
75#include <sys/atomic.h> 75#include <sys/atomic.h>
76#include <sys/buf.h> 76#include <sys/buf.h>
77#include <sys/conf.h> 77#include <sys/conf.h>
78#include <sys/fcntl.h> 78#include <sys/fcntl.h>
79#include <sys/filedesc.h> 79#include <sys/filedesc.h>
80#include <sys/device.h> 80#include <sys/device.h>
81#include <sys/kauth.h> 81#include <sys/kauth.h>
82#include <sys/kmem.h> 82#include <sys/kmem.h>
83#include <sys/module.h> 83#include <sys/module.h>
@@ -859,100 +859,74 @@ dounmount(struct mount *mp, int flags, s @@ -859,100 +859,74 @@ dounmount(struct mount *mp, int flags, s
859 859
860 /* 860 /*
861 * No unmount below layered mounts. 861 * No unmount below layered mounts.
862 */ 862 */
863 mountlist_iterator_init(&iter); 863 mountlist_iterator_init(&iter);
864 while ((cmp = mountlist_iterator_next(iter)) != NULL) { 864 while ((cmp = mountlist_iterator_next(iter)) != NULL) {
865 if (cmp->mnt_lower == mp) { 865 if (cmp->mnt_lower == mp) {
866 mountlist_iterator_destroy(iter); 866 mountlist_iterator_destroy(iter);
867 return EBUSY; 867 return EBUSY;
868 } 868 }
869 } 869 }
870 mountlist_iterator_destroy(iter); 870 mountlist_iterator_destroy(iter);
871 871
872 /* 
873 * XXX Freeze syncer. Must do this before locking the 
874 * mount point. See dounmount() for details. 
875 */ 
876 mutex_enter(&syncer_mutex); 
877 
878 error = vfs_suspend(mp, 0); 872 error = vfs_suspend(mp, 0);
879 if (error) { 873 if (error) {
880 mutex_exit(&syncer_mutex); 
881 return error; 874 return error;
882 } 875 }
883 876
884 /* 877 /*
885 * Abort unmount attempt when the filesystem is in use 878 * Abort unmount attempt when the filesystem is in use
886 */ 879 */
887 mutex_enter(&mp->mnt_unmounting); 880 mutex_enter(&mp->mnt_unmounting);
888 if (mp->mnt_busynest != 0) { 881 if (mp->mnt_busynest != 0) {
889 mutex_exit(&mp->mnt_unmounting); 882 mutex_exit(&mp->mnt_unmounting);
890 mutex_exit(&syncer_mutex); 
891 vfs_resume(mp); 883 vfs_resume(mp);
892 return EBUSY; 884 return EBUSY;
893 } 885 }
894 886
895 /* 887 /*
896 * Abort unmount attempt when the filesystem is not mounted 888 * Abort unmount attempt when the filesystem is not mounted
897 */ 889 */
898 if ((mp->mnt_iflag & IMNT_GONE) != 0) { 890 if ((mp->mnt_iflag & IMNT_GONE) != 0) {
899 mutex_exit(&mp->mnt_unmounting); 891 mutex_exit(&mp->mnt_unmounting);
900 mutex_exit(&syncer_mutex); 
901 return ENOENT; 892 return ENOENT;
902 } 893 }
903 894
904 used_syncer = (mp->mnt_iflag & IMNT_ONWORKLIST) != 0; 895 used_syncer = (mp->mnt_iflag & IMNT_ONWORKLIST) != 0;
905 used_extattr = mp->mnt_flag & MNT_EXTATTR; 896 used_extattr = mp->mnt_flag & MNT_EXTATTR;
906 897
907 /* 
908 * XXX Syncer must be frozen when we get here. This should really 
909 * be done on a per-mountpoint basis, but the syncer doesn't work 
910 * like that. 
911 * 
912 * The caller of dounmount() must acquire syncer_mutex because 
913 * the syncer itself acquires locks in syncer_mutex -> vfs_busy 
914 * order, and we must preserve that order to avoid deadlock. 
915 * 
916 * So, if the file system did not use the syncer, now is 
917 * the time to release the syncer_mutex. 
918 */ 
919 if (used_syncer == 0) { 
920 mutex_exit(&syncer_mutex); 
921 } 
922 mp->mnt_iflag |= IMNT_UNMOUNT; 898 mp->mnt_iflag |= IMNT_UNMOUNT;
923 mutex_enter(&mp->mnt_updating); 899 mutex_enter(&mp->mnt_updating);
924 async = mp->mnt_flag & MNT_ASYNC; 900 async = mp->mnt_flag & MNT_ASYNC;
925 mp->mnt_flag &= ~MNT_ASYNC; 901 mp->mnt_flag &= ~MNT_ASYNC;
926 cache_purgevfs(mp); /* remove cache entries for this file sys */ 902 cache_purgevfs(mp); /* remove cache entries for this file sys */
927 if (used_syncer) 903 if (used_syncer)
928 vfs_syncer_remove_from_worklist(mp); 904 vfs_syncer_remove_from_worklist(mp);
929 error = 0; 905 error = 0;
930 if (((mp->mnt_flag & MNT_RDONLY) == 0) && ((flags & MNT_FORCE) == 0)) { 906 if (((mp->mnt_flag & MNT_RDONLY) == 0) && ((flags & MNT_FORCE) == 0)) {
931 error = VFS_SYNC(mp, MNT_WAIT, l->l_cred); 907 error = VFS_SYNC(mp, MNT_WAIT, l->l_cred);
932 } 908 }
933 if (error == 0 || (flags & MNT_FORCE)) { 909 if (error == 0 || (flags & MNT_FORCE)) {
934 error = VFS_UNMOUNT(mp, flags); 910 error = VFS_UNMOUNT(mp, flags);
935 } 911 }
936 if (error) { 912 if (error) {
937 mp->mnt_iflag &= ~IMNT_UNMOUNT; 913 mp->mnt_iflag &= ~IMNT_UNMOUNT;
938 mutex_exit(&mp->mnt_unmounting); 914 mutex_exit(&mp->mnt_unmounting);
939 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0) 915 if ((mp->mnt_flag & (MNT_RDONLY | MNT_ASYNC)) == 0)
940 vfs_syncer_add_to_worklist(mp); 916 vfs_syncer_add_to_worklist(mp);
941 mp->mnt_flag |= async; 917 mp->mnt_flag |= async;
942 mutex_exit(&mp->mnt_updating); 918 mutex_exit(&mp->mnt_updating);
943 vfs_resume(mp); 919 vfs_resume(mp);
944 if (used_syncer) 
945 mutex_exit(&syncer_mutex); 
946 if (used_extattr) { 920 if (used_extattr) {
947 if (start_extattr(mp) != 0) 921 if (start_extattr(mp) != 0)
948 mp->mnt_flag &= ~MNT_EXTATTR; 922 mp->mnt_flag &= ~MNT_EXTATTR;
949 else 923 else
950 mp->mnt_flag |= MNT_EXTATTR; 924 mp->mnt_flag |= MNT_EXTATTR;
951 } 925 }
952 return (error); 926 return (error);
953 } 927 }
954 mutex_exit(&mp->mnt_updating); 928 mutex_exit(&mp->mnt_updating);
955 929
956 /* 930 /*
957 * release mnt_umounting lock here, because other code calls 931 * release mnt_umounting lock here, because other code calls
958 * vfs_busy() while holding the mountlist_lock. 932 * vfs_busy() while holding the mountlist_lock.
@@ -963,28 +937,26 @@ dounmount(struct mount *mp, int flags, s @@ -963,28 +937,26 @@ dounmount(struct mount *mp, int flags, s
963 */ 937 */
964 mp->mnt_iflag |= IMNT_GONE; 938 mp->mnt_iflag |= IMNT_GONE;
965 mutex_exit(&mp->mnt_unmounting); 939 mutex_exit(&mp->mnt_unmounting);
966 vfs_resume(mp); 940 vfs_resume(mp);
967 941
968 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) { 942 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
969 vn_lock(coveredvp, LK_EXCLUSIVE | LK_RETRY); 943 vn_lock(coveredvp, LK_EXCLUSIVE | LK_RETRY);
970 coveredvp->v_mountedhere = NULL; 944 coveredvp->v_mountedhere = NULL;
971 VOP_UNLOCK(coveredvp); 945 VOP_UNLOCK(coveredvp);
972 } 946 }
973 mountlist_remove(mp); 947 mountlist_remove(mp);
974 if (TAILQ_FIRST(&mp->mnt_vnodelist) != NULL) 948 if (TAILQ_FIRST(&mp->mnt_vnodelist) != NULL)
975 panic("unmount: dangling vnode"); 949 panic("unmount: dangling vnode");
976 if (used_syncer) 
977 mutex_exit(&syncer_mutex); 
978 vfs_hooks_unmount(mp); 950 vfs_hooks_unmount(mp);
979 951
980 fstrans_unmount(mp); 952 fstrans_unmount(mp);
981 vfs_rele(mp); /* reference from mount() */ 953 vfs_rele(mp); /* reference from mount() */
982 if (coveredvp != NULLVP) { 954 if (coveredvp != NULLVP) {
983 vrele(coveredvp); 955 vrele(coveredvp);
984 } 956 }
985 return (0); 957 return (0);
986} 958}
987 959
988/* 960/*
989 * Unmount all file systems. 961 * Unmount all file systems.
990 * We traverse the list in reverse order under the assumption that doing so 962 * We traverse the list in reverse order under the assumption that doing so

cvs diff -r1.464 -r1.465 src/sys/kern/vfs_subr.c (expand / switch to unified diff)

--- src/sys/kern/vfs_subr.c 2017/05/07 08:26:58 1.464
+++ src/sys/kern/vfs_subr.c 2017/05/24 09:52:59 1.465
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vfs_subr.c,v 1.464 2017/05/07 08:26:58 hannken Exp $ */ 1/* $NetBSD: vfs_subr.c,v 1.465 2017/05/24 09:52:59 hannken Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, by Andrew Doran, 9 * NASA Ames Research Center, by Charles M. Hannum, by Andrew Doran,
10 * by Marshall Kirk McKusick and Greg Ganger at the University of Michigan. 10 * by Marshall Kirk McKusick and Greg Ganger at the University of Michigan.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -58,27 +58,27 @@ @@ -58,27 +58,27 @@
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE. 65 * SUCH DAMAGE.
66 * 66 *
67 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 67 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
68 */ 68 */
69 69
70#include <sys/cdefs.h> 70#include <sys/cdefs.h>
71__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.464 2017/05/07 08:26:58 hannken Exp $"); 71__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.465 2017/05/24 09:52:59 hannken Exp $");
72 72
73#ifdef _KERNEL_OPT 73#ifdef _KERNEL_OPT
74#include "opt_ddb.h" 74#include "opt_ddb.h"
75#include "opt_compat_netbsd.h" 75#include "opt_compat_netbsd.h"
76#include "opt_compat_43.h" 76#include "opt_compat_43.h"
77#endif 77#endif
78 78
79#include <sys/param.h> 79#include <sys/param.h>
80#include <sys/systm.h> 80#include <sys/systm.h>
81#include <sys/conf.h> 81#include <sys/conf.h>
82#include <sys/dirent.h> 82#include <sys/dirent.h>
83#include <sys/filedesc.h> 83#include <sys/filedesc.h>
84#include <sys/kernel.h> 84#include <sys/kernel.h>
@@ -558,49 +558,47 @@ typedef TAILQ_HEAD(synclist, vnode_impl) @@ -558,49 +558,47 @@ typedef TAILQ_HEAD(synclist, vnode_impl)
558static void vn_syncer_add1(struct vnode *, int); 558static void vn_syncer_add1(struct vnode *, int);
559static void sysctl_vfs_syncfs_setup(struct sysctllog **); 559static void sysctl_vfs_syncfs_setup(struct sysctllog **);
560 560
561/* 561/*
562 * Defines and variables for the syncer process. 562 * Defines and variables for the syncer process.
563 */ 563 */
564int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 564int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
565time_t syncdelay = 30; /* max time to delay syncing data */ 565time_t syncdelay = 30; /* max time to delay syncing data */
566time_t filedelay = 30; /* time to delay syncing files */ 566time_t filedelay = 30; /* time to delay syncing files */
567time_t dirdelay = 15; /* time to delay syncing directories */ 567time_t dirdelay = 15; /* time to delay syncing directories */
568time_t metadelay = 10; /* time to delay syncing metadata */ 568time_t metadelay = 10; /* time to delay syncing metadata */
569time_t lockdelay = 1; /* time to delay if locking fails */ 569time_t lockdelay = 1; /* time to delay if locking fails */
570 570
571kmutex_t syncer_mutex; /* used to freeze syncer, long term */ 
572static kmutex_t syncer_data_lock; /* short term lock on data structs */ 571static kmutex_t syncer_data_lock; /* short term lock on data structs */
573 572
574static int syncer_delayno = 0; 573static int syncer_delayno = 0;
575static long syncer_last; 574static long syncer_last;
576static synclist_t * syncer_workitem_pending; 575static synclist_t * syncer_workitem_pending;
577 576
578static void 577static void
579vn_initialize_syncerd(void) 578vn_initialize_syncerd(void)
580{ 579{
581 int i; 580 int i;
582 581
583 syncer_last = SYNCER_MAXDELAY + 2; 582 syncer_last = SYNCER_MAXDELAY + 2;
584 583
585 sysctl_vfs_syncfs_setup(NULL); 584 sysctl_vfs_syncfs_setup(NULL);
586 585
587 syncer_workitem_pending = 586 syncer_workitem_pending =
588 kmem_alloc(syncer_last * sizeof (struct synclist), KM_SLEEP); 587 kmem_alloc(syncer_last * sizeof (struct synclist), KM_SLEEP);
589 588
590 for (i = 0; i < syncer_last; i++) 589 for (i = 0; i < syncer_last; i++)
591 TAILQ_INIT(&syncer_workitem_pending[i]); 590 TAILQ_INIT(&syncer_workitem_pending[i]);
592 591
593 mutex_init(&syncer_mutex, MUTEX_DEFAULT, IPL_NONE); 
594 mutex_init(&syncer_data_lock, MUTEX_DEFAULT, IPL_NONE); 592 mutex_init(&syncer_data_lock, MUTEX_DEFAULT, IPL_NONE);
595} 593}
596 594
597/* 595/*
598 * Return delay factor appropriate for the given file system. For 596 * Return delay factor appropriate for the given file system. For
599 * WAPBL we use the sync vnode to burst out metadata updates: sync 597 * WAPBL we use the sync vnode to burst out metadata updates: sync
600 * those file systems more frequently. 598 * those file systems more frequently.
601 */ 599 */
602static inline int 600static inline int
603sync_delay(struct mount *mp) 601sync_delay(struct mount *mp)
604{ 602{
605 603
606 return mp->mnt_wapbl != NULL ? metadelay : syncdelay; 604 return mp->mnt_wapbl != NULL ? metadelay : syncdelay;
@@ -757,28 +755,26 @@ lazy_sync_vnode(struct vnode *vp) @@ -757,28 +755,26 @@ lazy_sync_vnode(struct vnode *vp)
757 * System filesystem synchronizer daemon. 755 * System filesystem synchronizer daemon.
758 */ 756 */
759void 757void
760sched_sync(void *arg) 758sched_sync(void *arg)
761{ 759{
762 mount_iterator_t *iter; 760 mount_iterator_t *iter;
763 synclist_t *slp; 761 synclist_t *slp;
764 struct vnode *vp; 762 struct vnode *vp;
765 struct mount *mp; 763 struct mount *mp;
766 time_t starttime; 764 time_t starttime;
767 bool synced; 765 bool synced;
768 766
769 for (;;) { 767 for (;;) {
770 mutex_enter(&syncer_mutex); 
771 
772 starttime = time_second; 768 starttime = time_second;
773 769
774 /* 770 /*
775 * Sync mounts whose dirty time has expired. 771 * Sync mounts whose dirty time has expired.
776 */ 772 */
777 mountlist_iterator_init(&iter); 773 mountlist_iterator_init(&iter);
778 while ((mp = mountlist_iterator_trynext(iter)) != NULL) { 774 while ((mp = mountlist_iterator_trynext(iter)) != NULL) {
779 if ((mp->mnt_iflag & IMNT_ONWORKLIST) == 0 || 775 if ((mp->mnt_iflag & IMNT_ONWORKLIST) == 0 ||
780 mp->mnt_synclist_slot != syncer_delayno) { 776 mp->mnt_synclist_slot != syncer_delayno) {
781 continue; 777 continue;
782 } 778 }
783 mp->mnt_synclist_slot = sync_delay_slot(sync_delay(mp)); 779 mp->mnt_synclist_slot = sync_delay_slot(sync_delay(mp));
784 VFS_SYNC(mp, MNT_LAZY, curlwp->l_cred); 780 VFS_SYNC(mp, MNT_LAZY, curlwp->l_cred);
@@ -820,27 +816,26 @@ sched_sync(void *arg) @@ -820,27 +816,26 @@ sched_sync(void *arg)
820 * syncdelay is mainly to get this vnode out 816 * syncdelay is mainly to get this vnode out
821 * of the way so we do not consider it again 817 * of the way so we do not consider it again
822 * "soon" in this loop, so the delay time is 818 * "soon" in this loop, so the delay time is
823 * not critical as long as it is not "soon".  819 * not critical as long as it is not "soon".
824 * While write-back strategy is the file 820 * While write-back strategy is the file
825 * system's domain, we expect write-back to 821 * system's domain, we expect write-back to
826 * occur no later than syncdelay seconds 822 * occur no later than syncdelay seconds
827 * into the future. 823 * into the future.
828 */ 824 */
829 vn_syncer_add1(vp, 825 vn_syncer_add1(vp,
830 synced ? syncdelay : lockdelay); 826 synced ? syncdelay : lockdelay);
831 } 827 }
832 } 828 }
833 mutex_exit(&syncer_mutex); 
834 829
835 /* 830 /*
836 * If it has taken us less than a second to process the 831 * If it has taken us less than a second to process the
837 * current work, then wait. Otherwise start right over 832 * current work, then wait. Otherwise start right over
838 * again. We can still lose time if any single round 833 * again. We can still lose time if any single round
839 * takes more than two seconds, but it does not really 834 * takes more than two seconds, but it does not really
840 * matter as we are just trying to generally pace the 835 * matter as we are just trying to generally pace the
841 * filesystem activity. 836 * filesystem activity.
842 */ 837 */
843 if (time_second == starttime) { 838 if (time_second == starttime) {
844 kpause("syncer", false, hz, &syncer_data_lock); 839 kpause("syncer", false, hz, &syncer_data_lock);
845 } 840 }
846 mutex_exit(&syncer_data_lock); 841 mutex_exit(&syncer_data_lock);

cvs diff -r1.226 -r1.227 src/sys/sys/mount.h (expand / switch to unified diff)

--- src/sys/sys/mount.h 2017/04/17 08:34:27 1.226
+++ src/sys/sys/mount.h 2017/05/24 09:52:59 1.227
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: mount.h,v 1.226 2017/04/17 08:34:27 hannken Exp $ */ 1/* $NetBSD: mount.h,v 1.227 2017/05/24 09:52:59 hannken Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1989, 1991, 1993 4 * Copyright (c) 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -448,27 +448,26 @@ int vfs_quotactl_cursorget(struct mount  @@ -448,27 +448,26 @@ int vfs_quotactl_cursorget(struct mount
448int vfs_quotactl_cursoratend(struct mount *, struct quotakcursor *, int *); 448int vfs_quotactl_cursoratend(struct mount *, struct quotakcursor *, int *);
449int vfs_quotactl_cursorrewind(struct mount *, struct quotakcursor *); 449int vfs_quotactl_cursorrewind(struct mount *, struct quotakcursor *);
450int vfs_quotactl_quotaon(struct mount *, int, const char *); 450int vfs_quotactl_quotaon(struct mount *, int, const char *);
451int vfs_quotactl_quotaoff(struct mount *, int); 451int vfs_quotactl_quotaoff(struct mount *, int);
452 452
453struct vnode_iterator; /* Opaque. */ 453struct vnode_iterator; /* Opaque. */
454void vfs_vnode_iterator_init(struct mount *, struct vnode_iterator **); 454void vfs_vnode_iterator_init(struct mount *, struct vnode_iterator **);
455void vfs_vnode_iterator_destroy(struct vnode_iterator *); 455void vfs_vnode_iterator_destroy(struct vnode_iterator *);
456struct vnode *vfs_vnode_iterator_next(struct vnode_iterator *, 456struct vnode *vfs_vnode_iterator_next(struct vnode_iterator *,
457 bool (*)(void *, struct vnode *), void *); 457 bool (*)(void *, struct vnode *), void *);
458 458
459/* Syncer */ 459/* Syncer */
460extern int syncer_maxdelay; 460extern int syncer_maxdelay;
461extern kmutex_t syncer_mutex; 
462extern time_t syncdelay; 461extern time_t syncdelay;
463extern time_t filedelay; 462extern time_t filedelay;
464extern time_t dirdelay; 463extern time_t dirdelay;
465extern time_t metadelay; 464extern time_t metadelay;
466void vfs_syncer_add_to_worklist(struct mount *); 465void vfs_syncer_add_to_worklist(struct mount *);
467void vfs_syncer_remove_from_worklist(struct mount *); 466void vfs_syncer_remove_from_worklist(struct mount *);
468 467
469extern struct vfsops *vfssw[]; /* filesystem type table */ 468extern struct vfsops *vfssw[]; /* filesystem type table */
470extern int nvfssw; 469extern int nvfssw;
471extern kmutex_t vfs_list_lock; 470extern kmutex_t vfs_list_lock;
472 471
473void vfs_mount_sysinit(void); 472void vfs_mount_sysinit(void);
474long makefstype(const char *); 473long makefstype(const char *);