| @@ -1,489 +1,505 @@ | | | @@ -1,489 +1,505 @@ |
1 | /* $NetBSD: tmpfs_vfsops.c,v 1.68 2016/08/26 21:44:24 dholland Exp $ */ | | 1 | /* $NetBSD: tmpfs_vfsops.c,v 1.69 2017/01/27 10:47:54 hannken Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Julio M. Merino Vidal, developed as part of Google's Summer of Code | | 8 | * by Julio M. Merino Vidal, developed as part of Google's Summer of Code |
9 | * 2005 program. | | 9 | * 2005 program. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. | | 15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright | | 16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the | | 17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. | | 18 | * documentation and/or other materials provided with the distribution. |
19 | * | | 19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. | | 30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | /* | | 33 | /* |
34 | * Efficient memory file system. | | 34 | * Efficient memory file system. |
35 | * | | 35 | * |
36 | * tmpfs is a file system that uses NetBSD's virtual memory sub-system | | 36 | * tmpfs is a file system that uses NetBSD's virtual memory sub-system |
37 | * (the well-known UVM) to store file data and metadata in an efficient | | 37 | * (the well-known UVM) to store file data and metadata in an efficient |
38 | * way. This means that it does not follow the structure of an on-disk | | 38 | * way. This means that it does not follow the structure of an on-disk |
39 | * file system because it simply does not need to. Instead, it uses | | 39 | * file system because it simply does not need to. Instead, it uses |
40 | * memory-specific data structures and algorithms to automatically | | 40 | * memory-specific data structures and algorithms to automatically |
41 | * allocate and release resources. | | 41 | * allocate and release resources. |
42 | */ | | 42 | */ |
43 | | | 43 | |
44 | #include <sys/cdefs.h> | | 44 | #include <sys/cdefs.h> |
45 | __KERNEL_RCSID(0, "$NetBSD: tmpfs_vfsops.c,v 1.68 2016/08/26 21:44:24 dholland Exp $"); | | 45 | __KERNEL_RCSID(0, "$NetBSD: tmpfs_vfsops.c,v 1.69 2017/01/27 10:47:54 hannken Exp $"); |
46 | | | 46 | |
47 | #include <sys/param.h> | | 47 | #include <sys/param.h> |
48 | #include <sys/atomic.h> | | 48 | #include <sys/atomic.h> |
49 | #include <sys/types.h> | | 49 | #include <sys/types.h> |
50 | #include <sys/kmem.h> | | 50 | #include <sys/kmem.h> |
51 | #include <sys/mount.h> | | 51 | #include <sys/mount.h> |
52 | #include <sys/stat.h> | | 52 | #include <sys/stat.h> |
53 | #include <sys/systm.h> | | 53 | #include <sys/systm.h> |
54 | #include <sys/vnode.h> | | 54 | #include <sys/vnode.h> |
55 | #include <sys/kauth.h> | | 55 | #include <sys/kauth.h> |
56 | #include <sys/module.h> | | 56 | #include <sys/module.h> |
57 | | | 57 | |
58 | #include <miscfs/genfs/genfs.h> | | 58 | #include <miscfs/genfs/genfs.h> |
59 | #include <fs/tmpfs/tmpfs.h> | | 59 | #include <fs/tmpfs/tmpfs.h> |
60 | #include <fs/tmpfs/tmpfs_args.h> | | 60 | #include <fs/tmpfs/tmpfs_args.h> |
61 | | | 61 | |
62 | MODULE(MODULE_CLASS_VFS, tmpfs, NULL); | | 62 | MODULE(MODULE_CLASS_VFS, tmpfs, NULL); |
63 | | | 63 | |
64 | struct pool tmpfs_dirent_pool; | | 64 | struct pool tmpfs_dirent_pool; |
65 | struct pool tmpfs_node_pool; | | 65 | struct pool tmpfs_node_pool; |
66 | | | 66 | |
67 | void | | 67 | void |
68 | tmpfs_init(void) | | 68 | tmpfs_init(void) |
69 | { | | 69 | { |
70 | | | 70 | |
71 | pool_init(&tmpfs_dirent_pool, sizeof(tmpfs_dirent_t), 0, 0, 0, | | 71 | pool_init(&tmpfs_dirent_pool, sizeof(tmpfs_dirent_t), 0, 0, 0, |
72 | "tmpfs_dirent", &pool_allocator_nointr, IPL_NONE); | | 72 | "tmpfs_dirent", &pool_allocator_nointr, IPL_NONE); |
73 | pool_init(&tmpfs_node_pool, sizeof(tmpfs_node_t), 0, 0, 0, | | 73 | pool_init(&tmpfs_node_pool, sizeof(tmpfs_node_t), 0, 0, 0, |
74 | "tmpfs_node", &pool_allocator_nointr, IPL_NONE); | | 74 | "tmpfs_node", &pool_allocator_nointr, IPL_NONE); |
75 | } | | 75 | } |
76 | | | 76 | |
77 | void | | 77 | void |
78 | tmpfs_done(void) | | 78 | tmpfs_done(void) |
79 | { | | 79 | { |
80 | | | 80 | |
81 | pool_destroy(&tmpfs_dirent_pool); | | 81 | pool_destroy(&tmpfs_dirent_pool); |
82 | pool_destroy(&tmpfs_node_pool); | | 82 | pool_destroy(&tmpfs_node_pool); |
83 | } | | 83 | } |
84 | | | 84 | |
85 | int | | 85 | int |
86 | tmpfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) | | 86 | tmpfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) |
87 | { | | 87 | { |
88 | struct tmpfs_args *args = data; | | 88 | struct tmpfs_args *args = data; |
89 | tmpfs_mount_t *tmp; | | 89 | tmpfs_mount_t *tmp; |
90 | tmpfs_node_t *root; | | 90 | tmpfs_node_t *root; |
91 | struct vattr va; | | 91 | struct vattr va; |
92 | struct vnode *vp; | | 92 | struct vnode *vp; |
93 | uint64_t memlimit; | | 93 | uint64_t memlimit; |
94 | ino_t nodes; | | 94 | ino_t nodes; |
95 | int error; | | 95 | int error, flags; |
96 | bool set_memlimit; | | 96 | bool set_memlimit; |
97 | bool set_nodes; | | 97 | bool set_nodes; |
98 | | | 98 | |
99 | if (args == NULL) | | 99 | if (args == NULL) |
100 | return EINVAL; | | 100 | return EINVAL; |
101 | | | 101 | |
102 | /* Validate the version. */ | | 102 | /* Validate the version. */ |
103 | if (*data_len < sizeof(*args) || | | 103 | if (*data_len < sizeof(*args) || |
104 | args->ta_version != TMPFS_ARGS_VERSION) | | 104 | args->ta_version != TMPFS_ARGS_VERSION) |
105 | return EINVAL; | | 105 | return EINVAL; |
106 | | | 106 | |
107 | /* Handle retrieval of mount point arguments. */ | | 107 | /* Handle retrieval of mount point arguments. */ |
108 | if (mp->mnt_flag & MNT_GETARGS) { | | 108 | if (mp->mnt_flag & MNT_GETARGS) { |
109 | if (mp->mnt_data == NULL) | | 109 | if (mp->mnt_data == NULL) |
110 | return EIO; | | 110 | return EIO; |
111 | tmp = VFS_TO_TMPFS(mp); | | 111 | tmp = VFS_TO_TMPFS(mp); |
112 | | | 112 | |
113 | args->ta_version = TMPFS_ARGS_VERSION; | | 113 | args->ta_version = TMPFS_ARGS_VERSION; |
114 | args->ta_nodes_max = tmp->tm_nodes_max; | | 114 | args->ta_nodes_max = tmp->tm_nodes_max; |
115 | args->ta_size_max = tmp->tm_mem_limit; | | 115 | args->ta_size_max = tmp->tm_mem_limit; |
116 | | | 116 | |
117 | root = tmp->tm_root; | | 117 | root = tmp->tm_root; |
118 | args->ta_root_uid = root->tn_uid; | | 118 | args->ta_root_uid = root->tn_uid; |
119 | args->ta_root_gid = root->tn_gid; | | 119 | args->ta_root_gid = root->tn_gid; |
120 | args->ta_root_mode = root->tn_mode; | | 120 | args->ta_root_mode = root->tn_mode; |
121 | | | 121 | |
122 | *data_len = sizeof(*args); | | 122 | *data_len = sizeof(*args); |
123 | return 0; | | 123 | return 0; |
124 | } | | 124 | } |
125 | | | 125 | |
126 | | | 126 | |
127 | /* Prohibit mounts if there is not enough memory. */ | | 127 | /* Prohibit mounts if there is not enough memory. */ |
128 | if (tmpfs_mem_info(true) < uvmexp.freetarg) | | 128 | if (tmpfs_mem_info(true) < uvmexp.freetarg) |
129 | return EINVAL; | | 129 | return EINVAL; |
130 | | | 130 | |
131 | /* Check for invalid uid and gid arguments */ | | 131 | /* Check for invalid uid and gid arguments */ |
132 | if (args->ta_root_uid == VNOVAL || args->ta_root_gid == VNOVAL) | | 132 | if (args->ta_root_uid == VNOVAL || args->ta_root_gid == VNOVAL) |
133 | return EINVAL; | | 133 | return EINVAL; |
134 | | | 134 | |
135 | /* This can never happen? */ | | 135 | /* This can never happen? */ |
136 | if ((args->ta_root_mode & ALLPERMS) == VNOVAL) | | 136 | if ((args->ta_root_mode & ALLPERMS) == VNOVAL) |
137 | return EINVAL; | | 137 | return EINVAL; |
138 | | | 138 | |
139 | /* Get the memory usage limit for this file-system. */ | | 139 | /* Get the memory usage limit for this file-system. */ |
140 | if (args->ta_size_max < PAGE_SIZE) { | | 140 | if (args->ta_size_max < PAGE_SIZE) { |
141 | memlimit = UINT64_MAX; | | 141 | memlimit = UINT64_MAX; |
142 | set_memlimit = false; | | 142 | set_memlimit = false; |
143 | } else { | | 143 | } else { |
144 | memlimit = args->ta_size_max; | | 144 | memlimit = args->ta_size_max; |
145 | set_memlimit = true; | | 145 | set_memlimit = true; |
146 | } | | 146 | } |
147 | KASSERT(memlimit > 0); | | 147 | KASSERT(memlimit > 0); |
148 | | | 148 | |
149 | if (args->ta_nodes_max <= 3) { | | 149 | if (args->ta_nodes_max <= 3) { |
150 | nodes = 3 + (memlimit / 1024); | | 150 | nodes = 3 + (memlimit / 1024); |
151 | set_nodes = false; | | 151 | set_nodes = false; |
152 | } else { | | 152 | } else { |
153 | nodes = args->ta_nodes_max; | | 153 | nodes = args->ta_nodes_max; |
154 | set_nodes = true; | | 154 | set_nodes = true; |
155 | } | | 155 | } |
156 | nodes = MIN(nodes, INT_MAX); | | 156 | nodes = MIN(nodes, INT_MAX); |
157 | KASSERT(nodes >= 3); | | 157 | KASSERT(nodes >= 3); |
158 | | | 158 | |
159 | if (mp->mnt_flag & MNT_UPDATE) { | | 159 | if (mp->mnt_flag & MNT_UPDATE) { |
160 | tmp = VFS_TO_TMPFS(mp); | | 160 | tmp = VFS_TO_TMPFS(mp); |
161 | if (set_nodes && nodes < tmp->tm_nodes_cnt) | | 161 | if (set_nodes && nodes < tmp->tm_nodes_cnt) |
162 | return EBUSY; | | 162 | return EBUSY; |
| | | 163 | if (!tmp->tm_rdonly && (mp->mnt_flag & MNT_RDONLY)) { |
| | | 164 | /* Changing from read/write to read-only. */ |
| | | 165 | flags = WRITECLOSE; |
| | | 166 | if ((mp->mnt_flag & MNT_FORCE)) |
| | | 167 | flags |= FORCECLOSE; |
| | | 168 | error = vflush(mp, NULL, flags); |
| | | 169 | if (error) |
| | | 170 | return error; |
| | | 171 | tmp->tm_rdonly = true; |
| | | 172 | } |
| | | 173 | if (tmp->tm_rdonly && (mp->mnt_flag & IMNT_WANTRDWR)) { |
| | | 174 | /* Changing from read-only to read/write. */ |
| | | 175 | tmp->tm_rdonly = false; |
| | | 176 | } |
163 | if (set_memlimit) { | | 177 | if (set_memlimit) { |
164 | if ((error = tmpfs_mntmem_set(tmp, memlimit)) != 0) | | 178 | if ((error = tmpfs_mntmem_set(tmp, memlimit)) != 0) |
165 | return error; | | 179 | return error; |
166 | } | | 180 | } |
167 | if (set_nodes) | | 181 | if (set_nodes) |
168 | tmp->tm_nodes_max = nodes; | | 182 | tmp->tm_nodes_max = nodes; |
169 | root = tmp->tm_root; | | 183 | root = tmp->tm_root; |
170 | root->tn_uid = args->ta_root_uid; | | 184 | root->tn_uid = args->ta_root_uid; |
171 | root->tn_gid = args->ta_root_gid; | | 185 | root->tn_gid = args->ta_root_gid; |
172 | root->tn_mode = args->ta_root_mode; | | 186 | root->tn_mode = args->ta_root_mode; |
173 | return 0; | | 187 | return 0; |
174 | } | | 188 | } |
175 | | | 189 | |
176 | /* Allocate the tmpfs mount structure and fill it. */ | | 190 | /* Allocate the tmpfs mount structure and fill it. */ |
177 | tmp = kmem_zalloc(sizeof(tmpfs_mount_t), KM_SLEEP); | | 191 | tmp = kmem_zalloc(sizeof(tmpfs_mount_t), KM_SLEEP); |
178 | if (tmp == NULL) | | 192 | if (tmp == NULL) |
179 | return ENOMEM; | | 193 | return ENOMEM; |
180 | | | 194 | |
| | | 195 | if ((mp->mnt_flag & MNT_RDONLY)) |
| | | 196 | tmp->tm_rdonly = true; |
181 | tmp->tm_nodes_max = nodes; | | 197 | tmp->tm_nodes_max = nodes; |
182 | tmp->tm_nodes_cnt = 0; | | 198 | tmp->tm_nodes_cnt = 0; |
183 | LIST_INIT(&tmp->tm_nodes); | | 199 | LIST_INIT(&tmp->tm_nodes); |
184 | | | 200 | |
185 | mutex_init(&tmp->tm_lock, MUTEX_DEFAULT, IPL_NONE); | | 201 | mutex_init(&tmp->tm_lock, MUTEX_DEFAULT, IPL_NONE); |
186 | tmpfs_mntmem_init(tmp, memlimit); | | 202 | tmpfs_mntmem_init(tmp, memlimit); |
187 | mp->mnt_data = tmp; | | 203 | mp->mnt_data = tmp; |
188 | | | 204 | |
189 | /* Allocate the root node. */ | | 205 | /* Allocate the root node. */ |
190 | vattr_null(&va); | | 206 | vattr_null(&va); |
191 | va.va_type = VDIR; | | 207 | va.va_type = VDIR; |
192 | va.va_mode = args->ta_root_mode & ALLPERMS; | | 208 | va.va_mode = args->ta_root_mode & ALLPERMS; |
193 | va.va_uid = args->ta_root_uid; | | 209 | va.va_uid = args->ta_root_uid; |
194 | va.va_gid = args->ta_root_gid; | | 210 | va.va_gid = args->ta_root_gid; |
195 | error = vcache_new(mp, NULL, &va, NOCRED, &vp); | | 211 | error = vcache_new(mp, NULL, &va, NOCRED, &vp); |
196 | if (error) { | | 212 | if (error) { |
197 | mp->mnt_data = NULL; | | 213 | mp->mnt_data = NULL; |
198 | tmpfs_mntmem_destroy(tmp); | | 214 | tmpfs_mntmem_destroy(tmp); |
199 | mutex_destroy(&tmp->tm_lock); | | 215 | mutex_destroy(&tmp->tm_lock); |
200 | kmem_free(tmp, sizeof(*tmp)); | | 216 | kmem_free(tmp, sizeof(*tmp)); |
201 | return error; | | 217 | return error; |
202 | } | | 218 | } |
203 | KASSERT(vp != NULL); | | 219 | KASSERT(vp != NULL); |
204 | root = VP_TO_TMPFS_NODE(vp); | | 220 | root = VP_TO_TMPFS_NODE(vp); |
205 | KASSERT(root != NULL); | | 221 | KASSERT(root != NULL); |
206 | | | 222 | |
207 | /* | | 223 | /* |
208 | * Parent of the root inode is itself. Also, root inode has no | | 224 | * Parent of the root inode is itself. Also, root inode has no |
209 | * directory entry (i.e. is never attached), thus hold an extra | | 225 | * directory entry (i.e. is never attached), thus hold an extra |
210 | * reference (link) for it. | | 226 | * reference (link) for it. |
211 | */ | | 227 | */ |
212 | root->tn_links++; | | 228 | root->tn_links++; |
213 | root->tn_spec.tn_dir.tn_parent = root; | | 229 | root->tn_spec.tn_dir.tn_parent = root; |
214 | tmp->tm_root = root; | | 230 | tmp->tm_root = root; |
215 | vrele(vp); | | 231 | vrele(vp); |
216 | | | 232 | |
217 | mp->mnt_flag |= MNT_LOCAL; | | 233 | mp->mnt_flag |= MNT_LOCAL; |
218 | mp->mnt_stat.f_namemax = TMPFS_MAXNAMLEN; | | 234 | mp->mnt_stat.f_namemax = TMPFS_MAXNAMLEN; |
219 | mp->mnt_fs_bshift = PAGE_SHIFT; | | 235 | mp->mnt_fs_bshift = PAGE_SHIFT; |
220 | mp->mnt_dev_bshift = DEV_BSHIFT; | | 236 | mp->mnt_dev_bshift = DEV_BSHIFT; |
221 | mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO; | | 237 | mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO; |
222 | vfs_getnewfsid(mp); | | 238 | vfs_getnewfsid(mp); |
223 | | | 239 | |
224 | error = set_statvfs_info(path, UIO_USERSPACE, "tmpfs", UIO_SYSSPACE, | | 240 | error = set_statvfs_info(path, UIO_USERSPACE, "tmpfs", UIO_SYSSPACE, |
225 | mp->mnt_op->vfs_name, mp, curlwp); | | 241 | mp->mnt_op->vfs_name, mp, curlwp); |
226 | if (error) { | | 242 | if (error) { |
227 | (void)tmpfs_unmount(mp, MNT_FORCE); | | 243 | (void)tmpfs_unmount(mp, MNT_FORCE); |
228 | } | | 244 | } |
229 | return error; | | 245 | return error; |
230 | } | | 246 | } |
231 | | | 247 | |
232 | int | | 248 | int |
233 | tmpfs_start(struct mount *mp, int flags) | | 249 | tmpfs_start(struct mount *mp, int flags) |
234 | { | | 250 | { |
235 | | | 251 | |
236 | return 0; | | 252 | return 0; |
237 | } | | 253 | } |
238 | | | 254 | |
239 | int | | 255 | int |
240 | tmpfs_unmount(struct mount *mp, int mntflags) | | 256 | tmpfs_unmount(struct mount *mp, int mntflags) |
241 | { | | 257 | { |
242 | tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp); | | 258 | tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp); |
243 | tmpfs_node_t *node, *cnode; | | 259 | tmpfs_node_t *node, *cnode; |
244 | int error, flags = 0; | | 260 | int error, flags = 0; |
245 | | | 261 | |
246 | /* Handle forced unmounts. */ | | 262 | /* Handle forced unmounts. */ |
247 | if (mntflags & MNT_FORCE) | | 263 | if (mntflags & MNT_FORCE) |
248 | flags |= FORCECLOSE; | | 264 | flags |= FORCECLOSE; |
249 | | | 265 | |
250 | /* Finalize all pending I/O. */ | | 266 | /* Finalize all pending I/O. */ |
251 | error = vflush(mp, NULL, flags); | | 267 | error = vflush(mp, NULL, flags); |
252 | if (error != 0) | | 268 | if (error != 0) |
253 | return error; | | 269 | return error; |
254 | | | 270 | |
255 | /* | | 271 | /* |
256 | * First round, detach and destroy all directory entries. | | 272 | * First round, detach and destroy all directory entries. |
257 | * Also, clear the pointers to the vnodes - they are gone. | | 273 | * Also, clear the pointers to the vnodes - they are gone. |
258 | */ | | 274 | */ |
259 | LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) { | | 275 | LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) { |
260 | tmpfs_dirent_t *de; | | 276 | tmpfs_dirent_t *de; |
261 | | | 277 | |
262 | node->tn_vnode = NULL; | | 278 | node->tn_vnode = NULL; |
263 | if (node->tn_type != VDIR) { | | 279 | if (node->tn_type != VDIR) { |
264 | continue; | | 280 | continue; |
265 | } | | 281 | } |
266 | while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) { | | 282 | while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) { |
267 | cnode = de->td_node; | | 283 | cnode = de->td_node; |
268 | if (cnode && cnode != TMPFS_NODE_WHITEOUT) { | | 284 | if (cnode && cnode != TMPFS_NODE_WHITEOUT) { |
269 | cnode->tn_vnode = NULL; | | 285 | cnode->tn_vnode = NULL; |
270 | } | | 286 | } |
271 | tmpfs_dir_detach(node, de); | | 287 | tmpfs_dir_detach(node, de); |
272 | tmpfs_free_dirent(tmp, de); | | 288 | tmpfs_free_dirent(tmp, de); |
273 | } | | 289 | } |
274 | /* Extra virtual entry (itself for the root). */ | | 290 | /* Extra virtual entry (itself for the root). */ |
275 | node->tn_links--; | | 291 | node->tn_links--; |
276 | } | | 292 | } |
277 | | | 293 | |
278 | /* Release the reference on root (diagnostic). */ | | 294 | /* Release the reference on root (diagnostic). */ |
279 | node = tmp->tm_root; | | 295 | node = tmp->tm_root; |
280 | node->tn_links--; | | 296 | node->tn_links--; |
281 | | | 297 | |
282 | /* Second round, destroy all inodes. */ | | 298 | /* Second round, destroy all inodes. */ |
283 | while ((node = LIST_FIRST(&tmp->tm_nodes)) != NULL) { | | 299 | while ((node = LIST_FIRST(&tmp->tm_nodes)) != NULL) { |
284 | tmpfs_free_node(tmp, node); | | 300 | tmpfs_free_node(tmp, node); |
285 | } | | 301 | } |
286 | | | 302 | |
287 | /* Throw away the tmpfs_mount structure. */ | | 303 | /* Throw away the tmpfs_mount structure. */ |
288 | tmpfs_mntmem_destroy(tmp); | | 304 | tmpfs_mntmem_destroy(tmp); |
289 | mutex_destroy(&tmp->tm_lock); | | 305 | mutex_destroy(&tmp->tm_lock); |
290 | kmem_free(tmp, sizeof(*tmp)); | | 306 | kmem_free(tmp, sizeof(*tmp)); |
291 | mp->mnt_data = NULL; | | 307 | mp->mnt_data = NULL; |
292 | | | 308 | |
293 | return 0; | | 309 | return 0; |
294 | } | | 310 | } |
295 | | | 311 | |
296 | int | | 312 | int |
297 | tmpfs_root(struct mount *mp, vnode_t **vpp) | | 313 | tmpfs_root(struct mount *mp, vnode_t **vpp) |
298 | { | | 314 | { |
299 | tmpfs_node_t *node = VFS_TO_TMPFS(mp)->tm_root; | | 315 | tmpfs_node_t *node = VFS_TO_TMPFS(mp)->tm_root; |
300 | int error; | | 316 | int error; |
301 | | | 317 | |
302 | error = vcache_get(mp, &node, sizeof(node), vpp); | | 318 | error = vcache_get(mp, &node, sizeof(node), vpp); |
303 | if (error) | | 319 | if (error) |
304 | return error; | | 320 | return error; |
305 | error = vn_lock(*vpp, LK_EXCLUSIVE); | | 321 | error = vn_lock(*vpp, LK_EXCLUSIVE); |
306 | if (error) { | | 322 | if (error) { |
307 | vrele(*vpp); | | 323 | vrele(*vpp); |
308 | *vpp = NULL; | | 324 | *vpp = NULL; |
309 | return error; | | 325 | return error; |
310 | } | | 326 | } |
311 | | | 327 | |
312 | return 0; | | 328 | return 0; |
313 | } | | 329 | } |
314 | | | 330 | |
315 | int | | 331 | int |
316 | tmpfs_vget(struct mount *mp, ino_t ino, vnode_t **vpp) | | 332 | tmpfs_vget(struct mount *mp, ino_t ino, vnode_t **vpp) |
317 | { | | 333 | { |
318 | | | 334 | |
319 | return EOPNOTSUPP; | | 335 | return EOPNOTSUPP; |
320 | } | | 336 | } |
321 | | | 337 | |
322 | int | | 338 | int |
323 | tmpfs_fhtovp(struct mount *mp, struct fid *fhp, vnode_t **vpp) | | 339 | tmpfs_fhtovp(struct mount *mp, struct fid *fhp, vnode_t **vpp) |
324 | { | | 340 | { |
325 | tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp); | | 341 | tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp); |
326 | tmpfs_node_t *node; | | 342 | tmpfs_node_t *node; |
327 | tmpfs_fid_t tfh; | | 343 | tmpfs_fid_t tfh; |
328 | int error; | | 344 | int error; |
329 | | | 345 | |
330 | if (fhp->fid_len != sizeof(tmpfs_fid_t)) { | | 346 | if (fhp->fid_len != sizeof(tmpfs_fid_t)) { |
331 | return EINVAL; | | 347 | return EINVAL; |
332 | } | | 348 | } |
333 | memcpy(&tfh, fhp, sizeof(tmpfs_fid_t)); | | 349 | memcpy(&tfh, fhp, sizeof(tmpfs_fid_t)); |
334 | | | 350 | |
335 | mutex_enter(&tmp->tm_lock); | | 351 | mutex_enter(&tmp->tm_lock); |
336 | LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) { | | 352 | LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) { |
337 | if (node->tn_id == tfh.tf_id) { | | 353 | if (node->tn_id == tfh.tf_id) { |
338 | /* Prevent this node from disappearing. */ | | 354 | /* Prevent this node from disappearing. */ |
339 | atomic_inc_32(&node->tn_holdcount); | | 355 | atomic_inc_32(&node->tn_holdcount); |
340 | break; | | 356 | break; |
341 | } | | 357 | } |
342 | } | | 358 | } |
343 | mutex_exit(&tmp->tm_lock); | | 359 | mutex_exit(&tmp->tm_lock); |
344 | if (node == NULL) | | 360 | if (node == NULL) |
345 | return ESTALE; | | 361 | return ESTALE; |
346 | | | 362 | |
347 | error = vcache_get(mp, &node, sizeof(node), vpp); | | 363 | error = vcache_get(mp, &node, sizeof(node), vpp); |
348 | /* If this node has been reclaimed free it now. */ | | 364 | /* If this node has been reclaimed free it now. */ |
349 | if (atomic_dec_32_nv(&node->tn_holdcount) == TMPFS_NODE_RECLAIMED) { | | 365 | if (atomic_dec_32_nv(&node->tn_holdcount) == TMPFS_NODE_RECLAIMED) { |
350 | KASSERT(error != 0); | | 366 | KASSERT(error != 0); |
351 | tmpfs_free_node(tmp, node); | | 367 | tmpfs_free_node(tmp, node); |
352 | } | | 368 | } |
353 | if (error) | | 369 | if (error) |
354 | return (error == ENOENT ? ESTALE : error); | | 370 | return (error == ENOENT ? ESTALE : error); |
355 | error = vn_lock(*vpp, LK_EXCLUSIVE); | | 371 | error = vn_lock(*vpp, LK_EXCLUSIVE); |
356 | if (error) { | | 372 | if (error) { |
357 | vrele(*vpp); | | 373 | vrele(*vpp); |
358 | *vpp = NULL; | | 374 | *vpp = NULL; |
359 | return error; | | 375 | return error; |
360 | } | | 376 | } |
361 | if (TMPFS_NODE_GEN(node) != tfh.tf_gen) { | | 377 | if (TMPFS_NODE_GEN(node) != tfh.tf_gen) { |
362 | vput(*vpp); | | 378 | vput(*vpp); |
363 | *vpp = NULL; | | 379 | *vpp = NULL; |
364 | return ESTALE; | | 380 | return ESTALE; |
365 | } | | 381 | } |
366 | | | 382 | |
367 | return 0; | | 383 | return 0; |
368 | } | | 384 | } |
369 | | | 385 | |
370 | int | | 386 | int |
371 | tmpfs_vptofh(vnode_t *vp, struct fid *fhp, size_t *fh_size) | | 387 | tmpfs_vptofh(vnode_t *vp, struct fid *fhp, size_t *fh_size) |
372 | { | | 388 | { |
373 | tmpfs_fid_t tfh; | | 389 | tmpfs_fid_t tfh; |
374 | tmpfs_node_t *node; | | 390 | tmpfs_node_t *node; |
375 | | | 391 | |
376 | if (*fh_size < sizeof(tmpfs_fid_t)) { | | 392 | if (*fh_size < sizeof(tmpfs_fid_t)) { |
377 | *fh_size = sizeof(tmpfs_fid_t); | | 393 | *fh_size = sizeof(tmpfs_fid_t); |
378 | return E2BIG; | | 394 | return E2BIG; |
379 | } | | 395 | } |
380 | *fh_size = sizeof(tmpfs_fid_t); | | 396 | *fh_size = sizeof(tmpfs_fid_t); |
381 | node = VP_TO_TMPFS_NODE(vp); | | 397 | node = VP_TO_TMPFS_NODE(vp); |
382 | | | 398 | |
383 | memset(&tfh, 0, sizeof(tfh)); | | 399 | memset(&tfh, 0, sizeof(tfh)); |
384 | tfh.tf_len = sizeof(tmpfs_fid_t); | | 400 | tfh.tf_len = sizeof(tmpfs_fid_t); |
385 | tfh.tf_gen = TMPFS_NODE_GEN(node); | | 401 | tfh.tf_gen = TMPFS_NODE_GEN(node); |
386 | tfh.tf_id = node->tn_id; | | 402 | tfh.tf_id = node->tn_id; |
387 | memcpy(fhp, &tfh, sizeof(tfh)); | | 403 | memcpy(fhp, &tfh, sizeof(tfh)); |
388 | | | 404 | |
389 | return 0; | | 405 | return 0; |
390 | } | | 406 | } |
391 | | | 407 | |
392 | int | | 408 | int |
393 | tmpfs_statvfs(struct mount *mp, struct statvfs *sbp) | | 409 | tmpfs_statvfs(struct mount *mp, struct statvfs *sbp) |
394 | { | | 410 | { |
395 | tmpfs_mount_t *tmp; | | 411 | tmpfs_mount_t *tmp; |
396 | fsfilcnt_t freenodes; | | 412 | fsfilcnt_t freenodes; |
397 | size_t avail; | | 413 | size_t avail; |
398 | | | 414 | |
399 | tmp = VFS_TO_TMPFS(mp); | | 415 | tmp = VFS_TO_TMPFS(mp); |
400 | | | 416 | |
401 | sbp->f_iosize = sbp->f_frsize = sbp->f_bsize = PAGE_SIZE; | | 417 | sbp->f_iosize = sbp->f_frsize = sbp->f_bsize = PAGE_SIZE; |
402 | | | 418 | |
403 | mutex_enter(&tmp->tm_acc_lock); | | 419 | mutex_enter(&tmp->tm_acc_lock); |
404 | avail = tmpfs_pages_avail(tmp); | | 420 | avail = tmpfs_pages_avail(tmp); |
405 | sbp->f_blocks = (tmpfs_bytes_max(tmp) >> PAGE_SHIFT); | | 421 | sbp->f_blocks = (tmpfs_bytes_max(tmp) >> PAGE_SHIFT); |
406 | sbp->f_bavail = sbp->f_bfree = avail; | | 422 | sbp->f_bavail = sbp->f_bfree = avail; |
407 | sbp->f_bresvd = 0; | | 423 | sbp->f_bresvd = 0; |
408 | | | 424 | |
409 | freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt, | | 425 | freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt, |
410 | avail * PAGE_SIZE / sizeof(tmpfs_node_t)); | | 426 | avail * PAGE_SIZE / sizeof(tmpfs_node_t)); |
411 | | | 427 | |
412 | sbp->f_files = tmp->tm_nodes_cnt + freenodes; | | 428 | sbp->f_files = tmp->tm_nodes_cnt + freenodes; |
413 | sbp->f_favail = sbp->f_ffree = freenodes; | | 429 | sbp->f_favail = sbp->f_ffree = freenodes; |
414 | sbp->f_fresvd = 0; | | 430 | sbp->f_fresvd = 0; |
415 | mutex_exit(&tmp->tm_acc_lock); | | 431 | mutex_exit(&tmp->tm_acc_lock); |
416 | | | 432 | |
417 | copy_statvfs_info(sbp, mp); | | 433 | copy_statvfs_info(sbp, mp); |
418 | | | 434 | |
419 | return 0; | | 435 | return 0; |
420 | } | | 436 | } |
421 | | | 437 | |
422 | int | | 438 | int |
423 | tmpfs_sync(struct mount *mp, int waitfor, kauth_cred_t uc) | | 439 | tmpfs_sync(struct mount *mp, int waitfor, kauth_cred_t uc) |
424 | { | | 440 | { |
425 | | | 441 | |
426 | return 0; | | 442 | return 0; |
427 | } | | 443 | } |
428 | | | 444 | |
429 | int | | 445 | int |
430 | tmpfs_snapshot(struct mount *mp, vnode_t *vp, struct timespec *ctime) | | 446 | tmpfs_snapshot(struct mount *mp, vnode_t *vp, struct timespec *ctime) |
431 | { | | 447 | { |
432 | | | 448 | |
433 | return EOPNOTSUPP; | | 449 | return EOPNOTSUPP; |
434 | } | | 450 | } |
435 | | | 451 | |
436 | /* | | 452 | /* |
437 | * tmpfs vfs operations. | | 453 | * tmpfs vfs operations. |
438 | */ | | 454 | */ |
439 | | | 455 | |
440 | extern const struct vnodeopv_desc tmpfs_fifoop_opv_desc; | | 456 | extern const struct vnodeopv_desc tmpfs_fifoop_opv_desc; |
441 | extern const struct vnodeopv_desc tmpfs_specop_opv_desc; | | 457 | extern const struct vnodeopv_desc tmpfs_specop_opv_desc; |
442 | extern const struct vnodeopv_desc tmpfs_vnodeop_opv_desc; | | 458 | extern const struct vnodeopv_desc tmpfs_vnodeop_opv_desc; |
443 | | | 459 | |
444 | const struct vnodeopv_desc * const tmpfs_vnodeopv_descs[] = { | | 460 | const struct vnodeopv_desc * const tmpfs_vnodeopv_descs[] = { |
445 | &tmpfs_fifoop_opv_desc, | | 461 | &tmpfs_fifoop_opv_desc, |
446 | &tmpfs_specop_opv_desc, | | 462 | &tmpfs_specop_opv_desc, |
447 | &tmpfs_vnodeop_opv_desc, | | 463 | &tmpfs_vnodeop_opv_desc, |
448 | NULL, | | 464 | NULL, |
449 | }; | | 465 | }; |
450 | | | 466 | |
451 | struct vfsops tmpfs_vfsops = { | | 467 | struct vfsops tmpfs_vfsops = { |
452 | .vfs_name = MOUNT_TMPFS, | | 468 | .vfs_name = MOUNT_TMPFS, |
453 | .vfs_min_mount_data = sizeof (struct tmpfs_args), | | 469 | .vfs_min_mount_data = sizeof (struct tmpfs_args), |
454 | .vfs_mount = tmpfs_mount, | | 470 | .vfs_mount = tmpfs_mount, |
455 | .vfs_start = tmpfs_start, | | 471 | .vfs_start = tmpfs_start, |
456 | .vfs_unmount = tmpfs_unmount, | | 472 | .vfs_unmount = tmpfs_unmount, |
457 | .vfs_root = tmpfs_root, | | 473 | .vfs_root = tmpfs_root, |
458 | .vfs_quotactl = (void *)eopnotsupp, | | 474 | .vfs_quotactl = (void *)eopnotsupp, |
459 | .vfs_statvfs = tmpfs_statvfs, | | 475 | .vfs_statvfs = tmpfs_statvfs, |
460 | .vfs_sync = tmpfs_sync, | | 476 | .vfs_sync = tmpfs_sync, |
461 | .vfs_vget = tmpfs_vget, | | 477 | .vfs_vget = tmpfs_vget, |
462 | .vfs_loadvnode = tmpfs_loadvnode, | | 478 | .vfs_loadvnode = tmpfs_loadvnode, |
463 | .vfs_newvnode = tmpfs_newvnode, | | 479 | .vfs_newvnode = tmpfs_newvnode, |
464 | .vfs_fhtovp = tmpfs_fhtovp, | | 480 | .vfs_fhtovp = tmpfs_fhtovp, |
465 | .vfs_vptofh = tmpfs_vptofh, | | 481 | .vfs_vptofh = tmpfs_vptofh, |
466 | .vfs_init = tmpfs_init, | | 482 | .vfs_init = tmpfs_init, |
467 | .vfs_done = tmpfs_done, | | 483 | .vfs_done = tmpfs_done, |
468 | .vfs_snapshot = tmpfs_snapshot, | | 484 | .vfs_snapshot = tmpfs_snapshot, |
469 | .vfs_extattrctl = vfs_stdextattrctl, | | 485 | .vfs_extattrctl = vfs_stdextattrctl, |
470 | .vfs_suspendctl = (void *)eopnotsupp, | | 486 | .vfs_suspendctl = (void *)eopnotsupp, |
471 | .vfs_renamelock_enter = genfs_renamelock_enter, | | 487 | .vfs_renamelock_enter = genfs_renamelock_enter, |
472 | .vfs_renamelock_exit = genfs_renamelock_exit, | | 488 | .vfs_renamelock_exit = genfs_renamelock_exit, |
473 | .vfs_fsync = (void *)eopnotsupp, | | 489 | .vfs_fsync = (void *)eopnotsupp, |
474 | .vfs_opv_descs = tmpfs_vnodeopv_descs | | 490 | .vfs_opv_descs = tmpfs_vnodeopv_descs |
475 | }; | | 491 | }; |
476 | | | 492 | |
477 | static int | | 493 | static int |
478 | tmpfs_modcmd(modcmd_t cmd, void *arg) | | 494 | tmpfs_modcmd(modcmd_t cmd, void *arg) |
479 | { | | 495 | { |
480 | | | 496 | |
481 | switch (cmd) { | | 497 | switch (cmd) { |
482 | case MODULE_CMD_INIT: | | 498 | case MODULE_CMD_INIT: |
483 | return vfs_attach(&tmpfs_vfsops); | | 499 | return vfs_attach(&tmpfs_vfsops); |
484 | case MODULE_CMD_FINI: | | 500 | case MODULE_CMD_FINI: |
485 | return vfs_detach(&tmpfs_vfsops); | | 501 | return vfs_detach(&tmpfs_vfsops); |
486 | default: | | 502 | default: |
487 | return ENOTTY; | | 503 | return ENOTTY; |
488 | } | | 504 | } |
489 | } | | 505 | } |