Fri Jan 27 10:47:54 2017 UTC ()
Run vflush() when going from read/write to read only.


(hannken)
diff -r1.52 -r1.53 src/sys/fs/tmpfs/tmpfs.h
diff -r1.68 -r1.69 src/sys/fs/tmpfs/tmpfs_vfsops.c

cvs diff -r1.52 -r1.53 src/sys/fs/tmpfs/tmpfs.h (switch to unified diff)

--- src/sys/fs/tmpfs/tmpfs.h 2015/07/06 10:07:12 1.52
+++ src/sys/fs/tmpfs/tmpfs.h 2017/01/27 10:47:54 1.53
@@ -1,346 +1,349 @@ @@ -1,346 +1,349 @@
1/* $NetBSD: tmpfs.h,v 1.52 2015/07/06 10:07:12 hannken Exp $ */ 1/* $NetBSD: tmpfs.h,v 1.53 2017/01/27 10:47:54 hannken Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program. 9 * 2005 program.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#ifndef _FS_TMPFS_TMPFS_H_ 33#ifndef _FS_TMPFS_TMPFS_H_
34#define _FS_TMPFS_TMPFS_H_ 34#define _FS_TMPFS_TMPFS_H_
35 35
36#if !defined(_KERNEL) && !defined(_KMEMUSER) 36#if !defined(_KERNEL) && !defined(_KMEMUSER)
37#error "not supposed to be exposed to userland" 37#error "not supposed to be exposed to userland"
38#endif 38#endif
39 39
40#include <sys/dirent.h> 40#include <sys/dirent.h>
41#include <sys/mount.h> 41#include <sys/mount.h>
42#include <sys/pool.h> 42#include <sys/pool.h>
43#include <sys/queue.h> 43#include <sys/queue.h>
44#include <sys/vnode.h> 44#include <sys/vnode.h>
45 45
46/* 46/*
47 * Internal representation of a tmpfs directory entry. 47 * Internal representation of a tmpfs directory entry.
48 * 48 *
49 * All fields are protected by vnode lock. 49 * All fields are protected by vnode lock.
50 */ 50 */
51typedef struct tmpfs_dirent { 51typedef struct tmpfs_dirent {
52 TAILQ_ENTRY(tmpfs_dirent) td_entries; 52 TAILQ_ENTRY(tmpfs_dirent) td_entries;
53 53
54 /* Pointer to the inode this entry refers to. */ 54 /* Pointer to the inode this entry refers to. */
55 struct tmpfs_node * td_node; 55 struct tmpfs_node * td_node;
56 56
57 /* Sequence number, see tmpfs_dir_getseq(). */ 57 /* Sequence number, see tmpfs_dir_getseq(). */
58 uint32_t td_seq; 58 uint32_t td_seq;
59 59
60 /* Name and its length. */ 60 /* Name and its length. */
61 char * td_name; 61 char * td_name;
62 uint16_t td_namelen; 62 uint16_t td_namelen;
63} tmpfs_dirent_t; 63} tmpfs_dirent_t;
64 64
65TAILQ_HEAD(tmpfs_dir, tmpfs_dirent); 65TAILQ_HEAD(tmpfs_dir, tmpfs_dirent);
66 66
67/* 67/*
68 * Internal representation of a tmpfs file system node -- inode. 68 * Internal representation of a tmpfs file system node -- inode.
69 * 69 *
70 * This structure is split in two parts: one holds attributes common 70 * This structure is split in two parts: one holds attributes common
71 * to all file types and the other holds data that is only applicable to 71 * to all file types and the other holds data that is only applicable to
72 * a particular type. 72 * a particular type.
73 * 73 *
74 * All fields are protected by vnode lock. The vnode association itself 74 * All fields are protected by vnode lock. The vnode association itself
75 * is protected by vcache. 75 * is protected by vcache.
76 */ 76 */
77typedef struct tmpfs_node { 77typedef struct tmpfs_node {
78 LIST_ENTRY(tmpfs_node) tn_entries; 78 LIST_ENTRY(tmpfs_node) tn_entries;
79 79
80 /* 80 /*
81 * Each inode has a corresponding vnode. It is a bi-directional 81 * Each inode has a corresponding vnode. It is a bi-directional
82 * association. Whenever vnode is allocated, its v_data field is 82 * association. Whenever vnode is allocated, its v_data field is
83 * set to the inode it reference, and tmpfs_node_t::tn_vnode is 83 * set to the inode it reference, and tmpfs_node_t::tn_vnode is
84 * set to point to the said vnode. 84 * set to point to the said vnode.
85 * 85 *
86 * Further attempts to allocate a vnode for this same node will 86 * Further attempts to allocate a vnode for this same node will
87 * result in returning a new reference to the value stored in 87 * result in returning a new reference to the value stored in
88 * tn_vnode. It may be NULL when the node is unused (that is, 88 * tn_vnode. It may be NULL when the node is unused (that is,
89 * no vnode has been allocated or it has been reclaimed). 89 * no vnode has been allocated or it has been reclaimed).
90 */ 90 */
91 vnode_t * tn_vnode; 91 vnode_t * tn_vnode;
92 92
93 /* Prevent node from being reclaimed. */ 93 /* Prevent node from being reclaimed. */
94 uint32_t tn_holdcount; 94 uint32_t tn_holdcount;
95 95
96 /* Directory entry. Only a hint, since hard link can have multiple. */ 96 /* Directory entry. Only a hint, since hard link can have multiple. */
97 tmpfs_dirent_t * tn_dirent_hint; 97 tmpfs_dirent_t * tn_dirent_hint;
98 98
99 /* The inode type: VBLK, VCHR, VDIR, VFIFO, VLNK, VREG or VSOCK. */ 99 /* The inode type: VBLK, VCHR, VDIR, VFIFO, VLNK, VREG or VSOCK. */
100 enum vtype tn_type; 100 enum vtype tn_type;
101 101
102 /* Inode identifier and generation number. */ 102 /* Inode identifier and generation number. */
103 ino_t tn_id; 103 ino_t tn_id;
104 uint32_t tn_gen; 104 uint32_t tn_gen;
105 105
106 /* The inode size. */ 106 /* The inode size. */
107 off_t tn_size; 107 off_t tn_size;
108 108
109 /* Generic node attributes. */ 109 /* Generic node attributes. */
110 uid_t tn_uid; 110 uid_t tn_uid;
111 gid_t tn_gid; 111 gid_t tn_gid;
112 mode_t tn_mode; 112 mode_t tn_mode;
113 int tn_flags; 113 int tn_flags;
114 nlink_t tn_links; 114 nlink_t tn_links;
115 struct timespec tn_atime; 115 struct timespec tn_atime;
116 struct timespec tn_mtime; 116 struct timespec tn_mtime;
117 struct timespec tn_ctime; 117 struct timespec tn_ctime;
118 struct timespec tn_birthtime; 118 struct timespec tn_birthtime;
119 119
120 /* Head of byte-level lock list (used by tmpfs_advlock). */ 120 /* Head of byte-level lock list (used by tmpfs_advlock). */
121 struct lockf * tn_lockf; 121 struct lockf * tn_lockf;
122 122
123 union { 123 union {
124 /* Type case: VBLK or VCHR. */ 124 /* Type case: VBLK or VCHR. */
125 struct { 125 struct {
126 dev_t tn_rdev; 126 dev_t tn_rdev;
127 } tn_dev; 127 } tn_dev;
128 128
129 /* Type case: VDIR. */ 129 /* Type case: VDIR. */
130 struct { 130 struct {
131 /* Parent directory (root inode points to itself). */ 131 /* Parent directory (root inode points to itself). */
132 struct tmpfs_node * tn_parent; 132 struct tmpfs_node * tn_parent;
133 133
134 /* List of directory entries. */ 134 /* List of directory entries. */
135 struct tmpfs_dir tn_dir; 135 struct tmpfs_dir tn_dir;
136 136
137 /* Last given sequence number and their arena. */ 137 /* Last given sequence number and their arena. */
138 uint32_t tn_next_seq; 138 uint32_t tn_next_seq;
139 void * tn_seq_arena; 139 void * tn_seq_arena;
140 140
141 /* 141 /*
142 * Pointer of the last directory entry returned 142 * Pointer of the last directory entry returned
143 * by the readdir(3) operation. 143 * by the readdir(3) operation.
144 */ 144 */
145 struct tmpfs_dirent * tn_readdir_lastp; 145 struct tmpfs_dirent * tn_readdir_lastp;
146 } tn_dir; 146 } tn_dir;
147 147
148 /* Type case: VLNK. */ 148 /* Type case: VLNK. */
149 struct tn_lnk { 149 struct tn_lnk {
150 /* The link's target. */ 150 /* The link's target. */
151 char * tn_link; 151 char * tn_link;
152 } tn_lnk; 152 } tn_lnk;
153 153
154 /* Type case: VREG. */ 154 /* Type case: VREG. */
155 struct tn_reg { 155 struct tn_reg {
156 /* Underlying UVM object to store contents. */ 156 /* Underlying UVM object to store contents. */
157 struct uvm_object * tn_aobj; 157 struct uvm_object * tn_aobj;
158 size_t tn_aobj_pages; 158 size_t tn_aobj_pages;
159 } tn_reg; 159 } tn_reg;
160 } tn_spec; 160 } tn_spec;
161} tmpfs_node_t; 161} tmpfs_node_t;
162 162
163#if defined(_KERNEL) 163#if defined(_KERNEL)
164 164
165VFS_PROTOS(tmpfs); 165VFS_PROTOS(tmpfs);
166 166
167LIST_HEAD(tmpfs_node_list, tmpfs_node); 167LIST_HEAD(tmpfs_node_list, tmpfs_node);
168 168
169#define TMPFS_MAXNAMLEN 255 169#define TMPFS_MAXNAMLEN 255
170/* Validate maximum td_namelen length. */ 170/* Validate maximum td_namelen length. */
171CTASSERT(TMPFS_MAXNAMLEN < UINT16_MAX); 171CTASSERT(TMPFS_MAXNAMLEN < UINT16_MAX);
172 172
173/* 173/*
174 * Reserved values for the virtual entries (the first must be 0) and EOF. 174 * Reserved values for the virtual entries (the first must be 0) and EOF.
175 * The start/end of the incremental range, see tmpfs_dir_getseq(). 175 * The start/end of the incremental range, see tmpfs_dir_getseq().
176 */ 176 */
177#define TMPFS_DIRSEQ_DOT 0 177#define TMPFS_DIRSEQ_DOT 0
178#define TMPFS_DIRSEQ_DOTDOT 1 178#define TMPFS_DIRSEQ_DOTDOT 1
179#define TMPFS_DIRSEQ_EOF 2 179#define TMPFS_DIRSEQ_EOF 2
180 180
181#define TMPFS_DIRSEQ_START 3 /* inclusive */ 181#define TMPFS_DIRSEQ_START 3 /* inclusive */
182#define TMPFS_DIRSEQ_END (1U << 30) /* exclusive */ 182#define TMPFS_DIRSEQ_END (1U << 30) /* exclusive */
183 183
184/* Mark to indicate that the number is not set. */ 184/* Mark to indicate that the number is not set. */
185#define TMPFS_DIRSEQ_NONE (1U << 31) 185#define TMPFS_DIRSEQ_NONE (1U << 31)
186 186
187/* Flags: time update requests. */ 187/* Flags: time update requests. */
188#define TMPFS_UPDATE_ATIME 0x01 188#define TMPFS_UPDATE_ATIME 0x01
189#define TMPFS_UPDATE_MTIME 0x02 189#define TMPFS_UPDATE_MTIME 0x02
190#define TMPFS_UPDATE_CTIME 0x04 190#define TMPFS_UPDATE_CTIME 0x04
191 191
192/* 192/*
193 * Bits indicating whiteout use for the directory. 193 * Bits indicating whiteout use for the directory.
194 * We abuse tmpfs_node_t::tn_gen for that. 194 * We abuse tmpfs_node_t::tn_gen for that.
195 */ 195 */
196#define TMPFS_WHITEOUT_BIT (1U << 31) 196#define TMPFS_WHITEOUT_BIT (1U << 31)
197#define TMPFS_NODE_GEN_MASK (TMPFS_WHITEOUT_BIT - 1) 197#define TMPFS_NODE_GEN_MASK (TMPFS_WHITEOUT_BIT - 1)
198 198
199#define TMPFS_NODE_GEN(node) \ 199#define TMPFS_NODE_GEN(node) \
200 ((node)->tn_gen & TMPFS_NODE_GEN_MASK) 200 ((node)->tn_gen & TMPFS_NODE_GEN_MASK)
201 201
202/* White-out inode indicator. */ 202/* White-out inode indicator. */
203#define TMPFS_NODE_WHITEOUT ((tmpfs_node_t *)-1) 203#define TMPFS_NODE_WHITEOUT ((tmpfs_node_t *)-1)
204 204
205/* 205/*
206 * Bit indicating this node must be reclaimed when holdcount reaches zero. 206 * Bit indicating this node must be reclaimed when holdcount reaches zero.
207 * Ored into tmpfs_node_t::tn_holdcount. 207 * Ored into tmpfs_node_t::tn_holdcount.
208 */ 208 */
209#define TMPFS_NODE_RECLAIMED (1U << 30) 209#define TMPFS_NODE_RECLAIMED (1U << 30)
210 210
211/* 211/*
212 * Internal representation of a tmpfs mount point. 212 * Internal representation of a tmpfs mount point.
213 */ 213 */
214typedef struct tmpfs_mount { 214typedef struct tmpfs_mount {
215 /* Limit and number of bytes in use by the file system. */ 215 /* Limit and number of bytes in use by the file system. */
216 uint64_t tm_mem_limit; 216 uint64_t tm_mem_limit;
217 uint64_t tm_bytes_used; 217 uint64_t tm_bytes_used;
218 kmutex_t tm_acc_lock; 218 kmutex_t tm_acc_lock;
219 219
 220 /* Read-only indicator. */
 221 bool tm_rdonly;
 222
220 /* Pointer to the root inode. */ 223 /* Pointer to the root inode. */
221 tmpfs_node_t * tm_root; 224 tmpfs_node_t * tm_root;
222 225
223 /* Maximum number of possible nodes for this file system. */ 226 /* Maximum number of possible nodes for this file system. */
224 unsigned int tm_nodes_max; 227 unsigned int tm_nodes_max;
225 228
226 /* Number of nodes currently allocated. */ 229 /* Number of nodes currently allocated. */
227 unsigned int tm_nodes_cnt; 230 unsigned int tm_nodes_cnt;
228 231
229 /* List of inodes and the lock protecting it. */ 232 /* List of inodes and the lock protecting it. */
230 kmutex_t tm_lock; 233 kmutex_t tm_lock;
231 struct tmpfs_node_list tm_nodes; 234 struct tmpfs_node_list tm_nodes;
232} tmpfs_mount_t; 235} tmpfs_mount_t;
233 236
234/* 237/*
235 * This structure maps a file identifier to a tmpfs node. Used by the 238 * This structure maps a file identifier to a tmpfs node. Used by the
236 * NFS code. 239 * NFS code.
237 */ 240 */
238typedef struct tmpfs_fid { 241typedef struct tmpfs_fid {
239 uint16_t tf_len; 242 uint16_t tf_len;
240 uint16_t tf_pad; 243 uint16_t tf_pad;
241 uint32_t tf_gen; 244 uint32_t tf_gen;
242 ino_t tf_id; 245 ino_t tf_id;
243} tmpfs_fid_t; 246} tmpfs_fid_t;
244 247
245/* 248/*
246 * Prototypes for tmpfs_subr.c. 249 * Prototypes for tmpfs_subr.c.
247 */ 250 */
248 251
249void tmpfs_free_node(tmpfs_mount_t *, tmpfs_node_t *); 252void tmpfs_free_node(tmpfs_mount_t *, tmpfs_node_t *);
250 253
251int tmpfs_construct_node(vnode_t *, vnode_t **, struct vattr *, 254int tmpfs_construct_node(vnode_t *, vnode_t **, struct vattr *,
252 struct componentname *, char *); 255 struct componentname *, char *);
253 256
254int tmpfs_alloc_dirent(tmpfs_mount_t *, const char *, uint16_t, 257int tmpfs_alloc_dirent(tmpfs_mount_t *, const char *, uint16_t,
255 tmpfs_dirent_t **); 258 tmpfs_dirent_t **);
256void tmpfs_free_dirent(tmpfs_mount_t *, tmpfs_dirent_t *); 259void tmpfs_free_dirent(tmpfs_mount_t *, tmpfs_dirent_t *);
257void tmpfs_dir_attach(tmpfs_node_t *, tmpfs_dirent_t *, tmpfs_node_t *); 260void tmpfs_dir_attach(tmpfs_node_t *, tmpfs_dirent_t *, tmpfs_node_t *);
258void tmpfs_dir_detach(tmpfs_node_t *, tmpfs_dirent_t *); 261void tmpfs_dir_detach(tmpfs_node_t *, tmpfs_dirent_t *);
259 262
260tmpfs_dirent_t *tmpfs_dir_lookup(tmpfs_node_t *, struct componentname *); 263tmpfs_dirent_t *tmpfs_dir_lookup(tmpfs_node_t *, struct componentname *);
261tmpfs_dirent_t *tmpfs_dir_cached(tmpfs_node_t *); 264tmpfs_dirent_t *tmpfs_dir_cached(tmpfs_node_t *);
262 265
263uint32_t tmpfs_dir_getseq(tmpfs_node_t *, tmpfs_dirent_t *); 266uint32_t tmpfs_dir_getseq(tmpfs_node_t *, tmpfs_dirent_t *);
264tmpfs_dirent_t *tmpfs_dir_lookupbyseq(tmpfs_node_t *, off_t); 267tmpfs_dirent_t *tmpfs_dir_lookupbyseq(tmpfs_node_t *, off_t);
265int tmpfs_dir_getdents(tmpfs_node_t *, struct uio *, off_t *); 268int tmpfs_dir_getdents(tmpfs_node_t *, struct uio *, off_t *);
266 269
267int tmpfs_reg_resize(vnode_t *, off_t); 270int tmpfs_reg_resize(vnode_t *, off_t);
268 271
269int tmpfs_chflags(vnode_t *, int, kauth_cred_t, lwp_t *); 272int tmpfs_chflags(vnode_t *, int, kauth_cred_t, lwp_t *);
270int tmpfs_chmod(vnode_t *, mode_t, kauth_cred_t, lwp_t *); 273int tmpfs_chmod(vnode_t *, mode_t, kauth_cred_t, lwp_t *);
271int tmpfs_chown(vnode_t *, uid_t, gid_t, kauth_cred_t, lwp_t *); 274int tmpfs_chown(vnode_t *, uid_t, gid_t, kauth_cred_t, lwp_t *);
272int tmpfs_chsize(vnode_t *, u_quad_t, kauth_cred_t, lwp_t *); 275int tmpfs_chsize(vnode_t *, u_quad_t, kauth_cred_t, lwp_t *);
273int tmpfs_chtimes(vnode_t *, const struct timespec *, 276int tmpfs_chtimes(vnode_t *, const struct timespec *,
274 const struct timespec *, const struct timespec *, int, 277 const struct timespec *, const struct timespec *, int,
275 kauth_cred_t, lwp_t *); 278 kauth_cred_t, lwp_t *);
276void tmpfs_update(vnode_t *, unsigned); 279void tmpfs_update(vnode_t *, unsigned);
277 280
278/* 281/*
279 * Prototypes for tmpfs_mem.c. 282 * Prototypes for tmpfs_mem.c.
280 */ 283 */
281 284
282void tmpfs_mntmem_init(tmpfs_mount_t *, uint64_t); 285void tmpfs_mntmem_init(tmpfs_mount_t *, uint64_t);
283void tmpfs_mntmem_destroy(tmpfs_mount_t *); 286void tmpfs_mntmem_destroy(tmpfs_mount_t *);
284int tmpfs_mntmem_set(tmpfs_mount_t *, uint64_t); 287int tmpfs_mntmem_set(tmpfs_mount_t *, uint64_t);
285 288
286size_t tmpfs_mem_info(bool); 289size_t tmpfs_mem_info(bool);
287uint64_t tmpfs_bytes_max(tmpfs_mount_t *); 290uint64_t tmpfs_bytes_max(tmpfs_mount_t *);
288size_t tmpfs_pages_avail(tmpfs_mount_t *); 291size_t tmpfs_pages_avail(tmpfs_mount_t *);
289bool tmpfs_mem_incr(tmpfs_mount_t *, size_t); 292bool tmpfs_mem_incr(tmpfs_mount_t *, size_t);
290void tmpfs_mem_decr(tmpfs_mount_t *, size_t); 293void tmpfs_mem_decr(tmpfs_mount_t *, size_t);
291 294
292tmpfs_dirent_t *tmpfs_dirent_get(tmpfs_mount_t *); 295tmpfs_dirent_t *tmpfs_dirent_get(tmpfs_mount_t *);
293void tmpfs_dirent_put(tmpfs_mount_t *, tmpfs_dirent_t *); 296void tmpfs_dirent_put(tmpfs_mount_t *, tmpfs_dirent_t *);
294 297
295tmpfs_node_t * tmpfs_node_get(tmpfs_mount_t *); 298tmpfs_node_t * tmpfs_node_get(tmpfs_mount_t *);
296void tmpfs_node_put(tmpfs_mount_t *, tmpfs_node_t *); 299void tmpfs_node_put(tmpfs_mount_t *, tmpfs_node_t *);
297 300
298char * tmpfs_strname_alloc(tmpfs_mount_t *, size_t); 301char * tmpfs_strname_alloc(tmpfs_mount_t *, size_t);
299void tmpfs_strname_free(tmpfs_mount_t *, char *, size_t); 302void tmpfs_strname_free(tmpfs_mount_t *, char *, size_t);
300bool tmpfs_strname_neqlen(struct componentname *, struct componentname *); 303bool tmpfs_strname_neqlen(struct componentname *, struct componentname *);
301 304
302/* 305/*
303 * Ensures that the node pointed by 'node' is a directory and that its 306 * Ensures that the node pointed by 'node' is a directory and that its
304 * contents are consistent with respect to directories. 307 * contents are consistent with respect to directories.
305 */ 308 */
306#define TMPFS_VALIDATE_DIR(node) \ 309#define TMPFS_VALIDATE_DIR(node) \
307 KASSERT((node)->tn_vnode == NULL || VOP_ISLOCKED((node)->tn_vnode)); \ 310 KASSERT((node)->tn_vnode == NULL || VOP_ISLOCKED((node)->tn_vnode)); \
308 KASSERT((node)->tn_type == VDIR); \ 311 KASSERT((node)->tn_type == VDIR); \
309 KASSERT((node)->tn_size % sizeof(tmpfs_dirent_t) == 0); 312 KASSERT((node)->tn_size % sizeof(tmpfs_dirent_t) == 0);
310 313
311/* 314/*
312 * Routines to convert VFS structures to tmpfs internal ones. 315 * Routines to convert VFS structures to tmpfs internal ones.
313 */ 316 */
314 317
315static inline tmpfs_mount_t * 318static inline tmpfs_mount_t *
316VFS_TO_TMPFS(struct mount *mp) 319VFS_TO_TMPFS(struct mount *mp)
317{ 320{
318 tmpfs_mount_t *tmp = mp->mnt_data; 321 tmpfs_mount_t *tmp = mp->mnt_data;
319 322
320 KASSERT(tmp != NULL); 323 KASSERT(tmp != NULL);
321 return tmp; 324 return tmp;
322} 325}
323 326
324static inline tmpfs_node_t * 327static inline tmpfs_node_t *
325VP_TO_TMPFS_DIR(vnode_t *vp) 328VP_TO_TMPFS_DIR(vnode_t *vp)
326{ 329{
327 tmpfs_node_t *node = vp->v_data; 330 tmpfs_node_t *node = vp->v_data;
328 331
329 KASSERT(node != NULL); 332 KASSERT(node != NULL);
330 TMPFS_VALIDATE_DIR(node); 333 TMPFS_VALIDATE_DIR(node);
331 return node; 334 return node;
332} 335}
333 336
334#endif /* defined(_KERNEL) */ 337#endif /* defined(_KERNEL) */
335 338
336static __inline tmpfs_node_t * 339static __inline tmpfs_node_t *
337VP_TO_TMPFS_NODE(vnode_t *vp) 340VP_TO_TMPFS_NODE(vnode_t *vp)
338{ 341{
339 tmpfs_node_t *node = vp->v_data; 342 tmpfs_node_t *node = vp->v_data;
340#ifdef KASSERT 343#ifdef KASSERT
341 KASSERT(node != NULL); 344 KASSERT(node != NULL);
342#endif 345#endif
343 return node; 346 return node;
344} 347}
345 348
346#endif /* _FS_TMPFS_TMPFS_H_ */ 349#endif /* _FS_TMPFS_TMPFS_H_ */

cvs diff -r1.68 -r1.69 src/sys/fs/tmpfs/tmpfs_vfsops.c (switch to unified diff)

--- src/sys/fs/tmpfs/tmpfs_vfsops.c 2016/08/26 21:44:24 1.68
+++ src/sys/fs/tmpfs/tmpfs_vfsops.c 2017/01/27 10:47:54 1.69
@@ -1,489 +1,505 @@ @@ -1,489 +1,505 @@
1/* $NetBSD: tmpfs_vfsops.c,v 1.68 2016/08/26 21:44:24 dholland Exp $ */ 1/* $NetBSD: tmpfs_vfsops.c,v 1.69 2017/01/27 10:47:54 hannken Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program. 9 * 2005 program.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Efficient memory file system. 34 * Efficient memory file system.
35 * 35 *
36 * tmpfs is a file system that uses NetBSD's virtual memory sub-system 36 * tmpfs is a file system that uses NetBSD's virtual memory sub-system
37 * (the well-known UVM) to store file data and metadata in an efficient 37 * (the well-known UVM) to store file data and metadata in an efficient
38 * way. This means that it does not follow the structure of an on-disk 38 * way. This means that it does not follow the structure of an on-disk
39 * file system because it simply does not need to. Instead, it uses 39 * file system because it simply does not need to. Instead, it uses
40 * memory-specific data structures and algorithms to automatically 40 * memory-specific data structures and algorithms to automatically
41 * allocate and release resources. 41 * allocate and release resources.
42 */ 42 */
43 43
44#include <sys/cdefs.h> 44#include <sys/cdefs.h>
45__KERNEL_RCSID(0, "$NetBSD: tmpfs_vfsops.c,v 1.68 2016/08/26 21:44:24 dholland Exp $"); 45__KERNEL_RCSID(0, "$NetBSD: tmpfs_vfsops.c,v 1.69 2017/01/27 10:47:54 hannken Exp $");
46 46
47#include <sys/param.h> 47#include <sys/param.h>
48#include <sys/atomic.h> 48#include <sys/atomic.h>
49#include <sys/types.h> 49#include <sys/types.h>
50#include <sys/kmem.h> 50#include <sys/kmem.h>
51#include <sys/mount.h> 51#include <sys/mount.h>
52#include <sys/stat.h> 52#include <sys/stat.h>
53#include <sys/systm.h> 53#include <sys/systm.h>
54#include <sys/vnode.h> 54#include <sys/vnode.h>
55#include <sys/kauth.h> 55#include <sys/kauth.h>
56#include <sys/module.h> 56#include <sys/module.h>
57 57
58#include <miscfs/genfs/genfs.h> 58#include <miscfs/genfs/genfs.h>
59#include <fs/tmpfs/tmpfs.h> 59#include <fs/tmpfs/tmpfs.h>
60#include <fs/tmpfs/tmpfs_args.h> 60#include <fs/tmpfs/tmpfs_args.h>
61 61
62MODULE(MODULE_CLASS_VFS, tmpfs, NULL); 62MODULE(MODULE_CLASS_VFS, tmpfs, NULL);
63 63
64struct pool tmpfs_dirent_pool; 64struct pool tmpfs_dirent_pool;
65struct pool tmpfs_node_pool; 65struct pool tmpfs_node_pool;
66 66
67void 67void
68tmpfs_init(void) 68tmpfs_init(void)
69{ 69{
70 70
71 pool_init(&tmpfs_dirent_pool, sizeof(tmpfs_dirent_t), 0, 0, 0, 71 pool_init(&tmpfs_dirent_pool, sizeof(tmpfs_dirent_t), 0, 0, 0,
72 "tmpfs_dirent", &pool_allocator_nointr, IPL_NONE); 72 "tmpfs_dirent", &pool_allocator_nointr, IPL_NONE);
73 pool_init(&tmpfs_node_pool, sizeof(tmpfs_node_t), 0, 0, 0, 73 pool_init(&tmpfs_node_pool, sizeof(tmpfs_node_t), 0, 0, 0,
74 "tmpfs_node", &pool_allocator_nointr, IPL_NONE); 74 "tmpfs_node", &pool_allocator_nointr, IPL_NONE);
75} 75}
76 76
77void 77void
78tmpfs_done(void) 78tmpfs_done(void)
79{ 79{
80 80
81 pool_destroy(&tmpfs_dirent_pool); 81 pool_destroy(&tmpfs_dirent_pool);
82 pool_destroy(&tmpfs_node_pool); 82 pool_destroy(&tmpfs_node_pool);
83} 83}
84 84
85int 85int
86tmpfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) 86tmpfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
87{ 87{
88 struct tmpfs_args *args = data; 88 struct tmpfs_args *args = data;
89 tmpfs_mount_t *tmp; 89 tmpfs_mount_t *tmp;
90 tmpfs_node_t *root; 90 tmpfs_node_t *root;
91 struct vattr va; 91 struct vattr va;
92 struct vnode *vp; 92 struct vnode *vp;
93 uint64_t memlimit; 93 uint64_t memlimit;
94 ino_t nodes; 94 ino_t nodes;
95 int error; 95 int error, flags;
96 bool set_memlimit; 96 bool set_memlimit;
97 bool set_nodes; 97 bool set_nodes;
98 98
99 if (args == NULL) 99 if (args == NULL)
100 return EINVAL; 100 return EINVAL;
101 101
102 /* Validate the version. */ 102 /* Validate the version. */
103 if (*data_len < sizeof(*args) || 103 if (*data_len < sizeof(*args) ||
104 args->ta_version != TMPFS_ARGS_VERSION) 104 args->ta_version != TMPFS_ARGS_VERSION)
105 return EINVAL; 105 return EINVAL;
106 106
107 /* Handle retrieval of mount point arguments. */ 107 /* Handle retrieval of mount point arguments. */
108 if (mp->mnt_flag & MNT_GETARGS) { 108 if (mp->mnt_flag & MNT_GETARGS) {
109 if (mp->mnt_data == NULL) 109 if (mp->mnt_data == NULL)
110 return EIO; 110 return EIO;
111 tmp = VFS_TO_TMPFS(mp); 111 tmp = VFS_TO_TMPFS(mp);
112 112
113 args->ta_version = TMPFS_ARGS_VERSION; 113 args->ta_version = TMPFS_ARGS_VERSION;
114 args->ta_nodes_max = tmp->tm_nodes_max; 114 args->ta_nodes_max = tmp->tm_nodes_max;
115 args->ta_size_max = tmp->tm_mem_limit; 115 args->ta_size_max = tmp->tm_mem_limit;
116 116
117 root = tmp->tm_root; 117 root = tmp->tm_root;
118 args->ta_root_uid = root->tn_uid; 118 args->ta_root_uid = root->tn_uid;
119 args->ta_root_gid = root->tn_gid; 119 args->ta_root_gid = root->tn_gid;
120 args->ta_root_mode = root->tn_mode; 120 args->ta_root_mode = root->tn_mode;
121 121
122 *data_len = sizeof(*args); 122 *data_len = sizeof(*args);
123 return 0; 123 return 0;
124 } 124 }
125 125
126 126
127 /* Prohibit mounts if there is not enough memory. */ 127 /* Prohibit mounts if there is not enough memory. */
128 if (tmpfs_mem_info(true) < uvmexp.freetarg) 128 if (tmpfs_mem_info(true) < uvmexp.freetarg)
129 return EINVAL; 129 return EINVAL;
130 130
131 /* Check for invalid uid and gid arguments */ 131 /* Check for invalid uid and gid arguments */
132 if (args->ta_root_uid == VNOVAL || args->ta_root_gid == VNOVAL) 132 if (args->ta_root_uid == VNOVAL || args->ta_root_gid == VNOVAL)
133 return EINVAL; 133 return EINVAL;
134 134
135 /* This can never happen? */ 135 /* This can never happen? */
136 if ((args->ta_root_mode & ALLPERMS) == VNOVAL) 136 if ((args->ta_root_mode & ALLPERMS) == VNOVAL)
137 return EINVAL; 137 return EINVAL;
138 138
139 /* Get the memory usage limit for this file-system. */ 139 /* Get the memory usage limit for this file-system. */
140 if (args->ta_size_max < PAGE_SIZE) { 140 if (args->ta_size_max < PAGE_SIZE) {
141 memlimit = UINT64_MAX; 141 memlimit = UINT64_MAX;
142 set_memlimit = false; 142 set_memlimit = false;
143 } else { 143 } else {
144 memlimit = args->ta_size_max; 144 memlimit = args->ta_size_max;
145 set_memlimit = true; 145 set_memlimit = true;
146 } 146 }
147 KASSERT(memlimit > 0); 147 KASSERT(memlimit > 0);
148 148
149 if (args->ta_nodes_max <= 3) { 149 if (args->ta_nodes_max <= 3) {
150 nodes = 3 + (memlimit / 1024); 150 nodes = 3 + (memlimit / 1024);
151 set_nodes = false; 151 set_nodes = false;
152 } else { 152 } else {
153 nodes = args->ta_nodes_max; 153 nodes = args->ta_nodes_max;
154 set_nodes = true; 154 set_nodes = true;
155 } 155 }
156 nodes = MIN(nodes, INT_MAX); 156 nodes = MIN(nodes, INT_MAX);
157 KASSERT(nodes >= 3); 157 KASSERT(nodes >= 3);
158 158
159 if (mp->mnt_flag & MNT_UPDATE) { 159 if (mp->mnt_flag & MNT_UPDATE) {
160 tmp = VFS_TO_TMPFS(mp); 160 tmp = VFS_TO_TMPFS(mp);
161 if (set_nodes && nodes < tmp->tm_nodes_cnt) 161 if (set_nodes && nodes < tmp->tm_nodes_cnt)
162 return EBUSY; 162 return EBUSY;
 163 if (!tmp->tm_rdonly && (mp->mnt_flag & MNT_RDONLY)) {
 164 /* Changing from read/write to read-only. */
 165 flags = WRITECLOSE;
 166 if ((mp->mnt_flag & MNT_FORCE))
 167 flags |= FORCECLOSE;
 168 error = vflush(mp, NULL, flags);
 169 if (error)
 170 return error;
 171 tmp->tm_rdonly = true;
 172 }
 173 if (tmp->tm_rdonly && (mp->mnt_flag & IMNT_WANTRDWR)) {
 174 /* Changing from read-only to read/write. */
 175 tmp->tm_rdonly = false;
 176 }
163 if (set_memlimit) { 177 if (set_memlimit) {
164 if ((error = tmpfs_mntmem_set(tmp, memlimit)) != 0) 178 if ((error = tmpfs_mntmem_set(tmp, memlimit)) != 0)
165 return error; 179 return error;
166 } 180 }
167 if (set_nodes) 181 if (set_nodes)
168 tmp->tm_nodes_max = nodes; 182 tmp->tm_nodes_max = nodes;
169 root = tmp->tm_root; 183 root = tmp->tm_root;
170 root->tn_uid = args->ta_root_uid; 184 root->tn_uid = args->ta_root_uid;
171 root->tn_gid = args->ta_root_gid; 185 root->tn_gid = args->ta_root_gid;
172 root->tn_mode = args->ta_root_mode; 186 root->tn_mode = args->ta_root_mode;
173 return 0; 187 return 0;
174 } 188 }
175 189
176 /* Allocate the tmpfs mount structure and fill it. */ 190 /* Allocate the tmpfs mount structure and fill it. */
177 tmp = kmem_zalloc(sizeof(tmpfs_mount_t), KM_SLEEP); 191 tmp = kmem_zalloc(sizeof(tmpfs_mount_t), KM_SLEEP);
178 if (tmp == NULL) 192 if (tmp == NULL)
179 return ENOMEM; 193 return ENOMEM;
180 194
 195 if ((mp->mnt_flag & MNT_RDONLY))
 196 tmp->tm_rdonly = true;
181 tmp->tm_nodes_max = nodes; 197 tmp->tm_nodes_max = nodes;
182 tmp->tm_nodes_cnt = 0; 198 tmp->tm_nodes_cnt = 0;
183 LIST_INIT(&tmp->tm_nodes); 199 LIST_INIT(&tmp->tm_nodes);
184 200
185 mutex_init(&tmp->tm_lock, MUTEX_DEFAULT, IPL_NONE); 201 mutex_init(&tmp->tm_lock, MUTEX_DEFAULT, IPL_NONE);
186 tmpfs_mntmem_init(tmp, memlimit); 202 tmpfs_mntmem_init(tmp, memlimit);
187 mp->mnt_data = tmp; 203 mp->mnt_data = tmp;
188 204
189 /* Allocate the root node. */ 205 /* Allocate the root node. */
190 vattr_null(&va); 206 vattr_null(&va);
191 va.va_type = VDIR; 207 va.va_type = VDIR;
192 va.va_mode = args->ta_root_mode & ALLPERMS; 208 va.va_mode = args->ta_root_mode & ALLPERMS;
193 va.va_uid = args->ta_root_uid; 209 va.va_uid = args->ta_root_uid;
194 va.va_gid = args->ta_root_gid; 210 va.va_gid = args->ta_root_gid;
195 error = vcache_new(mp, NULL, &va, NOCRED, &vp); 211 error = vcache_new(mp, NULL, &va, NOCRED, &vp);
196 if (error) { 212 if (error) {
197 mp->mnt_data = NULL; 213 mp->mnt_data = NULL;
198 tmpfs_mntmem_destroy(tmp); 214 tmpfs_mntmem_destroy(tmp);
199 mutex_destroy(&tmp->tm_lock); 215 mutex_destroy(&tmp->tm_lock);
200 kmem_free(tmp, sizeof(*tmp)); 216 kmem_free(tmp, sizeof(*tmp));
201 return error; 217 return error;
202 } 218 }
203 KASSERT(vp != NULL); 219 KASSERT(vp != NULL);
204 root = VP_TO_TMPFS_NODE(vp); 220 root = VP_TO_TMPFS_NODE(vp);
205 KASSERT(root != NULL); 221 KASSERT(root != NULL);
206 222
207 /* 223 /*
208 * Parent of the root inode is itself. Also, root inode has no 224 * Parent of the root inode is itself. Also, root inode has no
209 * directory entry (i.e. is never attached), thus hold an extra 225 * directory entry (i.e. is never attached), thus hold an extra
210 * reference (link) for it. 226 * reference (link) for it.
211 */ 227 */
212 root->tn_links++; 228 root->tn_links++;
213 root->tn_spec.tn_dir.tn_parent = root; 229 root->tn_spec.tn_dir.tn_parent = root;
214 tmp->tm_root = root; 230 tmp->tm_root = root;
215 vrele(vp); 231 vrele(vp);
216 232
217 mp->mnt_flag |= MNT_LOCAL; 233 mp->mnt_flag |= MNT_LOCAL;
218 mp->mnt_stat.f_namemax = TMPFS_MAXNAMLEN; 234 mp->mnt_stat.f_namemax = TMPFS_MAXNAMLEN;
219 mp->mnt_fs_bshift = PAGE_SHIFT; 235 mp->mnt_fs_bshift = PAGE_SHIFT;
220 mp->mnt_dev_bshift = DEV_BSHIFT; 236 mp->mnt_dev_bshift = DEV_BSHIFT;
221 mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO; 237 mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO;
222 vfs_getnewfsid(mp); 238 vfs_getnewfsid(mp);
223 239
224 error = set_statvfs_info(path, UIO_USERSPACE, "tmpfs", UIO_SYSSPACE, 240 error = set_statvfs_info(path, UIO_USERSPACE, "tmpfs", UIO_SYSSPACE,
225 mp->mnt_op->vfs_name, mp, curlwp); 241 mp->mnt_op->vfs_name, mp, curlwp);
226 if (error) { 242 if (error) {
227 (void)tmpfs_unmount(mp, MNT_FORCE); 243 (void)tmpfs_unmount(mp, MNT_FORCE);
228 } 244 }
229 return error; 245 return error;
230} 246}
231 247
232int 248int
233tmpfs_start(struct mount *mp, int flags) 249tmpfs_start(struct mount *mp, int flags)
234{ 250{
235 251
236 return 0; 252 return 0;
237} 253}
238 254
239int 255int
240tmpfs_unmount(struct mount *mp, int mntflags) 256tmpfs_unmount(struct mount *mp, int mntflags)
241{ 257{
242 tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp); 258 tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp);
243 tmpfs_node_t *node, *cnode; 259 tmpfs_node_t *node, *cnode;
244 int error, flags = 0; 260 int error, flags = 0;
245 261
246 /* Handle forced unmounts. */ 262 /* Handle forced unmounts. */
247 if (mntflags & MNT_FORCE) 263 if (mntflags & MNT_FORCE)
248 flags |= FORCECLOSE; 264 flags |= FORCECLOSE;
249 265
250 /* Finalize all pending I/O. */ 266 /* Finalize all pending I/O. */
251 error = vflush(mp, NULL, flags); 267 error = vflush(mp, NULL, flags);
252 if (error != 0) 268 if (error != 0)
253 return error; 269 return error;
254 270
255 /* 271 /*
256 * First round, detach and destroy all directory entries. 272 * First round, detach and destroy all directory entries.
257 * Also, clear the pointers to the vnodes - they are gone. 273 * Also, clear the pointers to the vnodes - they are gone.
258 */ 274 */
259 LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) { 275 LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
260 tmpfs_dirent_t *de; 276 tmpfs_dirent_t *de;
261 277
262 node->tn_vnode = NULL; 278 node->tn_vnode = NULL;
263 if (node->tn_type != VDIR) { 279 if (node->tn_type != VDIR) {
264 continue; 280 continue;
265 } 281 }
266 while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) { 282 while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) {
267 cnode = de->td_node; 283 cnode = de->td_node;
268 if (cnode && cnode != TMPFS_NODE_WHITEOUT) { 284 if (cnode && cnode != TMPFS_NODE_WHITEOUT) {
269 cnode->tn_vnode = NULL; 285 cnode->tn_vnode = NULL;
270 } 286 }
271 tmpfs_dir_detach(node, de); 287 tmpfs_dir_detach(node, de);
272 tmpfs_free_dirent(tmp, de); 288 tmpfs_free_dirent(tmp, de);
273 } 289 }
274 /* Extra virtual entry (itself for the root). */ 290 /* Extra virtual entry (itself for the root). */
275 node->tn_links--; 291 node->tn_links--;
276 } 292 }
277 293
278 /* Release the reference on root (diagnostic). */ 294 /* Release the reference on root (diagnostic). */
279 node = tmp->tm_root; 295 node = tmp->tm_root;
280 node->tn_links--; 296 node->tn_links--;
281 297
282 /* Second round, destroy all inodes. */ 298 /* Second round, destroy all inodes. */
283 while ((node = LIST_FIRST(&tmp->tm_nodes)) != NULL) { 299 while ((node = LIST_FIRST(&tmp->tm_nodes)) != NULL) {
284 tmpfs_free_node(tmp, node); 300 tmpfs_free_node(tmp, node);
285 } 301 }
286 302
287 /* Throw away the tmpfs_mount structure. */ 303 /* Throw away the tmpfs_mount structure. */
288 tmpfs_mntmem_destroy(tmp); 304 tmpfs_mntmem_destroy(tmp);
289 mutex_destroy(&tmp->tm_lock); 305 mutex_destroy(&tmp->tm_lock);
290 kmem_free(tmp, sizeof(*tmp)); 306 kmem_free(tmp, sizeof(*tmp));
291 mp->mnt_data = NULL; 307 mp->mnt_data = NULL;
292 308
293 return 0; 309 return 0;
294} 310}
295 311
296int 312int
297tmpfs_root(struct mount *mp, vnode_t **vpp) 313tmpfs_root(struct mount *mp, vnode_t **vpp)
298{ 314{
299 tmpfs_node_t *node = VFS_TO_TMPFS(mp)->tm_root; 315 tmpfs_node_t *node = VFS_TO_TMPFS(mp)->tm_root;
300 int error; 316 int error;
301 317
302 error = vcache_get(mp, &node, sizeof(node), vpp); 318 error = vcache_get(mp, &node, sizeof(node), vpp);
303 if (error) 319 if (error)
304 return error; 320 return error;
305 error = vn_lock(*vpp, LK_EXCLUSIVE); 321 error = vn_lock(*vpp, LK_EXCLUSIVE);
306 if (error) { 322 if (error) {
307 vrele(*vpp); 323 vrele(*vpp);
308 *vpp = NULL; 324 *vpp = NULL;
309 return error; 325 return error;
310 } 326 }
311 327
312 return 0; 328 return 0;
313} 329}
314 330
315int 331int
316tmpfs_vget(struct mount *mp, ino_t ino, vnode_t **vpp) 332tmpfs_vget(struct mount *mp, ino_t ino, vnode_t **vpp)
317{ 333{
318 334
319 return EOPNOTSUPP; 335 return EOPNOTSUPP;
320} 336}
321 337
322int 338int
323tmpfs_fhtovp(struct mount *mp, struct fid *fhp, vnode_t **vpp) 339tmpfs_fhtovp(struct mount *mp, struct fid *fhp, vnode_t **vpp)
324{ 340{
325 tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp); 341 tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp);
326 tmpfs_node_t *node; 342 tmpfs_node_t *node;
327 tmpfs_fid_t tfh; 343 tmpfs_fid_t tfh;
328 int error; 344 int error;
329 345
330 if (fhp->fid_len != sizeof(tmpfs_fid_t)) { 346 if (fhp->fid_len != sizeof(tmpfs_fid_t)) {
331 return EINVAL; 347 return EINVAL;
332 } 348 }
333 memcpy(&tfh, fhp, sizeof(tmpfs_fid_t)); 349 memcpy(&tfh, fhp, sizeof(tmpfs_fid_t));
334 350
335 mutex_enter(&tmp->tm_lock); 351 mutex_enter(&tmp->tm_lock);
336 LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) { 352 LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
337 if (node->tn_id == tfh.tf_id) { 353 if (node->tn_id == tfh.tf_id) {
338 /* Prevent this node from disappearing. */ 354 /* Prevent this node from disappearing. */
339 atomic_inc_32(&node->tn_holdcount); 355 atomic_inc_32(&node->tn_holdcount);
340 break; 356 break;
341 } 357 }
342 } 358 }
343 mutex_exit(&tmp->tm_lock); 359 mutex_exit(&tmp->tm_lock);
344 if (node == NULL) 360 if (node == NULL)
345 return ESTALE; 361 return ESTALE;
346 362
347 error = vcache_get(mp, &node, sizeof(node), vpp); 363 error = vcache_get(mp, &node, sizeof(node), vpp);
348 /* If this node has been reclaimed free it now. */ 364 /* If this node has been reclaimed free it now. */
349 if (atomic_dec_32_nv(&node->tn_holdcount) == TMPFS_NODE_RECLAIMED) { 365 if (atomic_dec_32_nv(&node->tn_holdcount) == TMPFS_NODE_RECLAIMED) {
350 KASSERT(error != 0); 366 KASSERT(error != 0);
351 tmpfs_free_node(tmp, node); 367 tmpfs_free_node(tmp, node);
352 } 368 }
353 if (error) 369 if (error)
354 return (error == ENOENT ? ESTALE : error); 370 return (error == ENOENT ? ESTALE : error);
355 error = vn_lock(*vpp, LK_EXCLUSIVE); 371 error = vn_lock(*vpp, LK_EXCLUSIVE);
356 if (error) { 372 if (error) {
357 vrele(*vpp); 373 vrele(*vpp);
358 *vpp = NULL; 374 *vpp = NULL;
359 return error; 375 return error;
360 } 376 }
361 if (TMPFS_NODE_GEN(node) != tfh.tf_gen) { 377 if (TMPFS_NODE_GEN(node) != tfh.tf_gen) {
362 vput(*vpp); 378 vput(*vpp);
363 *vpp = NULL; 379 *vpp = NULL;
364 return ESTALE; 380 return ESTALE;
365 } 381 }
366 382
367 return 0; 383 return 0;
368} 384}
369 385
370int 386int
371tmpfs_vptofh(vnode_t *vp, struct fid *fhp, size_t *fh_size) 387tmpfs_vptofh(vnode_t *vp, struct fid *fhp, size_t *fh_size)
372{ 388{
373 tmpfs_fid_t tfh; 389 tmpfs_fid_t tfh;
374 tmpfs_node_t *node; 390 tmpfs_node_t *node;
375 391
376 if (*fh_size < sizeof(tmpfs_fid_t)) { 392 if (*fh_size < sizeof(tmpfs_fid_t)) {
377 *fh_size = sizeof(tmpfs_fid_t); 393 *fh_size = sizeof(tmpfs_fid_t);
378 return E2BIG; 394 return E2BIG;
379 } 395 }
380 *fh_size = sizeof(tmpfs_fid_t); 396 *fh_size = sizeof(tmpfs_fid_t);
381 node = VP_TO_TMPFS_NODE(vp); 397 node = VP_TO_TMPFS_NODE(vp);
382 398
383 memset(&tfh, 0, sizeof(tfh)); 399 memset(&tfh, 0, sizeof(tfh));
384 tfh.tf_len = sizeof(tmpfs_fid_t); 400 tfh.tf_len = sizeof(tmpfs_fid_t);
385 tfh.tf_gen = TMPFS_NODE_GEN(node); 401 tfh.tf_gen = TMPFS_NODE_GEN(node);
386 tfh.tf_id = node->tn_id; 402 tfh.tf_id = node->tn_id;
387 memcpy(fhp, &tfh, sizeof(tfh)); 403 memcpy(fhp, &tfh, sizeof(tfh));
388 404
389 return 0; 405 return 0;
390} 406}
391 407
392int 408int
393tmpfs_statvfs(struct mount *mp, struct statvfs *sbp) 409tmpfs_statvfs(struct mount *mp, struct statvfs *sbp)
394{ 410{
395 tmpfs_mount_t *tmp; 411 tmpfs_mount_t *tmp;
396 fsfilcnt_t freenodes; 412 fsfilcnt_t freenodes;
397 size_t avail; 413 size_t avail;
398 414
399 tmp = VFS_TO_TMPFS(mp); 415 tmp = VFS_TO_TMPFS(mp);
400 416
401 sbp->f_iosize = sbp->f_frsize = sbp->f_bsize = PAGE_SIZE; 417 sbp->f_iosize = sbp->f_frsize = sbp->f_bsize = PAGE_SIZE;
402 418
403 mutex_enter(&tmp->tm_acc_lock); 419 mutex_enter(&tmp->tm_acc_lock);
404 avail = tmpfs_pages_avail(tmp); 420 avail = tmpfs_pages_avail(tmp);
405 sbp->f_blocks = (tmpfs_bytes_max(tmp) >> PAGE_SHIFT); 421 sbp->f_blocks = (tmpfs_bytes_max(tmp) >> PAGE_SHIFT);
406 sbp->f_bavail = sbp->f_bfree = avail; 422 sbp->f_bavail = sbp->f_bfree = avail;
407 sbp->f_bresvd = 0; 423 sbp->f_bresvd = 0;
408 424
409 freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt, 425 freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt,
410 avail * PAGE_SIZE / sizeof(tmpfs_node_t)); 426 avail * PAGE_SIZE / sizeof(tmpfs_node_t));
411 427
412 sbp->f_files = tmp->tm_nodes_cnt + freenodes; 428 sbp->f_files = tmp->tm_nodes_cnt + freenodes;
413 sbp->f_favail = sbp->f_ffree = freenodes; 429 sbp->f_favail = sbp->f_ffree = freenodes;
414 sbp->f_fresvd = 0; 430 sbp->f_fresvd = 0;
415 mutex_exit(&tmp->tm_acc_lock); 431 mutex_exit(&tmp->tm_acc_lock);
416 432
417 copy_statvfs_info(sbp, mp); 433 copy_statvfs_info(sbp, mp);
418 434
419 return 0; 435 return 0;
420} 436}
421 437
422int 438int
423tmpfs_sync(struct mount *mp, int waitfor, kauth_cred_t uc) 439tmpfs_sync(struct mount *mp, int waitfor, kauth_cred_t uc)
424{ 440{
425 441
426 return 0; 442 return 0;
427} 443}
428 444
429int 445int
430tmpfs_snapshot(struct mount *mp, vnode_t *vp, struct timespec *ctime) 446tmpfs_snapshot(struct mount *mp, vnode_t *vp, struct timespec *ctime)
431{ 447{
432 448
433 return EOPNOTSUPP; 449 return EOPNOTSUPP;
434} 450}
435 451
436/* 452/*
437 * tmpfs vfs operations. 453 * tmpfs vfs operations.
438 */ 454 */
439 455
440extern const struct vnodeopv_desc tmpfs_fifoop_opv_desc; 456extern const struct vnodeopv_desc tmpfs_fifoop_opv_desc;
441extern const struct vnodeopv_desc tmpfs_specop_opv_desc; 457extern const struct vnodeopv_desc tmpfs_specop_opv_desc;
442extern const struct vnodeopv_desc tmpfs_vnodeop_opv_desc; 458extern const struct vnodeopv_desc tmpfs_vnodeop_opv_desc;
443 459
444const struct vnodeopv_desc * const tmpfs_vnodeopv_descs[] = { 460const struct vnodeopv_desc * const tmpfs_vnodeopv_descs[] = {
445 &tmpfs_fifoop_opv_desc, 461 &tmpfs_fifoop_opv_desc,
446 &tmpfs_specop_opv_desc, 462 &tmpfs_specop_opv_desc,
447 &tmpfs_vnodeop_opv_desc, 463 &tmpfs_vnodeop_opv_desc,
448 NULL, 464 NULL,
449}; 465};
450 466
451struct vfsops tmpfs_vfsops = { 467struct vfsops tmpfs_vfsops = {
452 .vfs_name = MOUNT_TMPFS, 468 .vfs_name = MOUNT_TMPFS,
453 .vfs_min_mount_data = sizeof (struct tmpfs_args), 469 .vfs_min_mount_data = sizeof (struct tmpfs_args),
454 .vfs_mount = tmpfs_mount, 470 .vfs_mount = tmpfs_mount,
455 .vfs_start = tmpfs_start, 471 .vfs_start = tmpfs_start,
456 .vfs_unmount = tmpfs_unmount, 472 .vfs_unmount = tmpfs_unmount,
457 .vfs_root = tmpfs_root, 473 .vfs_root = tmpfs_root,
458 .vfs_quotactl = (void *)eopnotsupp, 474 .vfs_quotactl = (void *)eopnotsupp,
459 .vfs_statvfs = tmpfs_statvfs, 475 .vfs_statvfs = tmpfs_statvfs,
460 .vfs_sync = tmpfs_sync, 476 .vfs_sync = tmpfs_sync,
461 .vfs_vget = tmpfs_vget, 477 .vfs_vget = tmpfs_vget,
462 .vfs_loadvnode = tmpfs_loadvnode, 478 .vfs_loadvnode = tmpfs_loadvnode,
463 .vfs_newvnode = tmpfs_newvnode, 479 .vfs_newvnode = tmpfs_newvnode,
464 .vfs_fhtovp = tmpfs_fhtovp, 480 .vfs_fhtovp = tmpfs_fhtovp,
465 .vfs_vptofh = tmpfs_vptofh, 481 .vfs_vptofh = tmpfs_vptofh,
466 .vfs_init = tmpfs_init, 482 .vfs_init = tmpfs_init,
467 .vfs_done = tmpfs_done, 483 .vfs_done = tmpfs_done,
468 .vfs_snapshot = tmpfs_snapshot, 484 .vfs_snapshot = tmpfs_snapshot,
469 .vfs_extattrctl = vfs_stdextattrctl, 485 .vfs_extattrctl = vfs_stdextattrctl,
470 .vfs_suspendctl = (void *)eopnotsupp, 486 .vfs_suspendctl = (void *)eopnotsupp,
471 .vfs_renamelock_enter = genfs_renamelock_enter, 487 .vfs_renamelock_enter = genfs_renamelock_enter,
472 .vfs_renamelock_exit = genfs_renamelock_exit, 488 .vfs_renamelock_exit = genfs_renamelock_exit,
473 .vfs_fsync = (void *)eopnotsupp, 489 .vfs_fsync = (void *)eopnotsupp,
474 .vfs_opv_descs = tmpfs_vnodeopv_descs 490 .vfs_opv_descs = tmpfs_vnodeopv_descs
475}; 491};
476 492
477static int 493static int
478tmpfs_modcmd(modcmd_t cmd, void *arg) 494tmpfs_modcmd(modcmd_t cmd, void *arg)
479{ 495{
480 496
481 switch (cmd) { 497 switch (cmd) {
482 case MODULE_CMD_INIT: 498 case MODULE_CMD_INIT:
483 return vfs_attach(&tmpfs_vfsops); 499 return vfs_attach(&tmpfs_vfsops);
484 case MODULE_CMD_FINI: 500 case MODULE_CMD_FINI:
485 return vfs_detach(&tmpfs_vfsops); 501 return vfs_detach(&tmpfs_vfsops);
486 default: 502 default:
487 return ENOTTY; 503 return ENOTTY;
488 } 504 }
489} 505}