| @@ -1,1583 +1,1584 @@ | | | @@ -1,1583 +1,1584 @@ |
1 | /* $NetBSD: kern_ktrace.c,v 1.148 2009/01/11 02:45:52 christos Exp $ */ | | 1 | /* $NetBSD: kern_ktrace.c,v 1.149 2009/08/05 19:53:42 dsl Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran. | | 8 | * by Andrew Doran. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * Copyright (c) 1989, 1993 | | 33 | * Copyright (c) 1989, 1993 |
34 | * The Regents of the University of California. All rights reserved. | | 34 | * The Regents of the University of California. All rights reserved. |
35 | * | | 35 | * |
36 | * Redistribution and use in source and binary forms, with or without | | 36 | * Redistribution and use in source and binary forms, with or without |
37 | * modification, are permitted provided that the following conditions | | 37 | * modification, are permitted provided that the following conditions |
38 | * are met: | | 38 | * are met: |
39 | * 1. Redistributions of source code must retain the above copyright | | 39 | * 1. Redistributions of source code must retain the above copyright |
40 | * notice, this list of conditions and the following disclaimer. | | 40 | * notice, this list of conditions and the following disclaimer. |
41 | * 2. Redistributions in binary form must reproduce the above copyright | | 41 | * 2. Redistributions in binary form must reproduce the above copyright |
42 | * notice, this list of conditions and the following disclaimer in the | | 42 | * notice, this list of conditions and the following disclaimer in the |
43 | * documentation and/or other materials provided with the distribution. | | 43 | * documentation and/or other materials provided with the distribution. |
44 | * 3. Neither the name of the University nor the names of its contributors | | 44 | * 3. Neither the name of the University nor the names of its contributors |
45 | * may be used to endorse or promote products derived from this software | | 45 | * may be used to endorse or promote products derived from this software |
46 | * without specific prior written permission. | | 46 | * without specific prior written permission. |
47 | * | | 47 | * |
48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
58 | * SUCH DAMAGE. | | 58 | * SUCH DAMAGE. |
59 | * | | 59 | * |
60 | * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95 | | 60 | * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95 |
61 | */ | | 61 | */ |
62 | | | 62 | |
63 | #include <sys/cdefs.h> | | 63 | #include <sys/cdefs.h> |
64 | __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.148 2009/01/11 02:45:52 christos Exp $"); | | 64 | __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.149 2009/08/05 19:53:42 dsl Exp $"); |
65 | | | 65 | |
66 | #include <sys/param.h> | | 66 | #include <sys/param.h> |
67 | #include <sys/systm.h> | | 67 | #include <sys/systm.h> |
68 | #include <sys/proc.h> | | 68 | #include <sys/proc.h> |
69 | #include <sys/file.h> | | 69 | #include <sys/file.h> |
70 | #include <sys/namei.h> | | 70 | #include <sys/namei.h> |
71 | #include <sys/vnode.h> | | 71 | #include <sys/vnode.h> |
72 | #include <sys/kernel.h> | | 72 | #include <sys/kernel.h> |
73 | #include <sys/kthread.h> | | 73 | #include <sys/kthread.h> |
74 | #include <sys/ktrace.h> | | 74 | #include <sys/ktrace.h> |
75 | #include <sys/kmem.h> | | 75 | #include <sys/kmem.h> |
76 | #include <sys/syslog.h> | | 76 | #include <sys/syslog.h> |
77 | #include <sys/filedesc.h> | | 77 | #include <sys/filedesc.h> |
78 | #include <sys/ioctl.h> | | 78 | #include <sys/ioctl.h> |
79 | #include <sys/callout.h> | | 79 | #include <sys/callout.h> |
80 | #include <sys/kauth.h> | | 80 | #include <sys/kauth.h> |
81 | | | 81 | |
82 | #include <sys/mount.h> | | 82 | #include <sys/mount.h> |
83 | #include <sys/sa.h> | | 83 | #include <sys/sa.h> |
84 | #include <sys/syscallargs.h> | | 84 | #include <sys/syscallargs.h> |
85 | | | 85 | |
86 | /* | | 86 | /* |
87 | * TODO: | | 87 | * TODO: |
88 | * - need better error reporting? | | 88 | * - need better error reporting? |
89 | * - userland utility to sort ktrace.out by timestamp. | | 89 | * - userland utility to sort ktrace.out by timestamp. |
90 | * - keep minimum information in ktrace_entry when rest of alloc failed. | | 90 | * - keep minimum information in ktrace_entry when rest of alloc failed. |
91 | * - per trace control of configurable parameters. | | 91 | * - per trace control of configurable parameters. |
92 | */ | | 92 | */ |
93 | | | 93 | |
94 | struct ktrace_entry { | | 94 | struct ktrace_entry { |
95 | TAILQ_ENTRY(ktrace_entry) kte_list; | | 95 | TAILQ_ENTRY(ktrace_entry) kte_list; |
96 | struct ktr_header kte_kth; | | 96 | struct ktr_header kte_kth; |
97 | void *kte_buf; | | 97 | void *kte_buf; |
98 | size_t kte_bufsz; | | 98 | size_t kte_bufsz; |
99 | #define KTE_SPACE 32 | | 99 | #define KTE_SPACE 32 |
100 | uint8_t kte_space[KTE_SPACE]; | | 100 | uint8_t kte_space[KTE_SPACE]; |
101 | }; | | 101 | }; |
102 | | | 102 | |
103 | struct ktr_desc { | | 103 | struct ktr_desc { |
104 | TAILQ_ENTRY(ktr_desc) ktd_list; | | 104 | TAILQ_ENTRY(ktr_desc) ktd_list; |
105 | int ktd_flags; | | 105 | int ktd_flags; |
106 | #define KTDF_WAIT 0x0001 | | 106 | #define KTDF_WAIT 0x0001 |
107 | #define KTDF_DONE 0x0002 | | 107 | #define KTDF_DONE 0x0002 |
108 | #define KTDF_BLOCKING 0x0004 | | 108 | #define KTDF_BLOCKING 0x0004 |
109 | #define KTDF_INTERACTIVE 0x0008 | | 109 | #define KTDF_INTERACTIVE 0x0008 |
110 | int ktd_error; | | 110 | int ktd_error; |
111 | #define KTDE_ENOMEM 0x0001 | | 111 | #define KTDE_ENOMEM 0x0001 |
112 | #define KTDE_ENOSPC 0x0002 | | 112 | #define KTDE_ENOSPC 0x0002 |
113 | int ktd_errcnt; | | 113 | int ktd_errcnt; |
114 | int ktd_ref; /* # of reference */ | | 114 | int ktd_ref; /* # of reference */ |
115 | int ktd_qcount; /* # of entry in the queue */ | | 115 | int ktd_qcount; /* # of entry in the queue */ |
116 | | | 116 | |
117 | /* | | 117 | /* |
118 | * Params to control behaviour. | | 118 | * Params to control behaviour. |
119 | */ | | 119 | */ |
120 | int ktd_delayqcnt; /* # of entry allowed to delay */ | | 120 | int ktd_delayqcnt; /* # of entry allowed to delay */ |
121 | int ktd_wakedelay; /* delay of wakeup in *tick* */ | | 121 | int ktd_wakedelay; /* delay of wakeup in *tick* */ |
122 | int ktd_intrwakdl; /* ditto, but when interactive */ | | 122 | int ktd_intrwakdl; /* ditto, but when interactive */ |
123 | | | 123 | |
124 | file_t *ktd_fp; /* trace output file */ | | 124 | file_t *ktd_fp; /* trace output file */ |
125 | lwp_t *ktd_lwp; /* our kernel thread */ | | 125 | lwp_t *ktd_lwp; /* our kernel thread */ |
126 | TAILQ_HEAD(, ktrace_entry) ktd_queue; | | 126 | TAILQ_HEAD(, ktrace_entry) ktd_queue; |
127 | callout_t ktd_wakch; /* delayed wakeup */ | | 127 | callout_t ktd_wakch; /* delayed wakeup */ |
128 | kcondvar_t ktd_sync_cv; | | 128 | kcondvar_t ktd_sync_cv; |
129 | kcondvar_t ktd_cv; | | 129 | kcondvar_t ktd_cv; |
130 | }; | | 130 | }; |
131 | | | 131 | |
132 | static int ktealloc(struct ktrace_entry **, void **, lwp_t *, int, | | 132 | static int ktealloc(struct ktrace_entry **, void **, lwp_t *, int, |
133 | size_t); | | 133 | size_t); |
134 | static void ktrwrite(struct ktr_desc *, struct ktrace_entry *); | | 134 | static void ktrwrite(struct ktr_desc *, struct ktrace_entry *); |
135 | static int ktrace_common(lwp_t *, int, int, int, file_t *); | | 135 | static int ktrace_common(lwp_t *, int, int, int, file_t *); |
136 | static int ktrops(lwp_t *, struct proc *, int, int, | | 136 | static int ktrops(lwp_t *, struct proc *, int, int, |
137 | struct ktr_desc *); | | 137 | struct ktr_desc *); |
138 | static int ktrsetchildren(lwp_t *, struct proc *, int, int, | | 138 | static int ktrsetchildren(lwp_t *, struct proc *, int, int, |
139 | struct ktr_desc *); | | 139 | struct ktr_desc *); |
140 | static int ktrcanset(lwp_t *, struct proc *); | | 140 | static int ktrcanset(lwp_t *, struct proc *); |
141 | static int ktrsamefile(file_t *, file_t *); | | 141 | static int ktrsamefile(file_t *, file_t *); |
142 | static void ktr_kmem(lwp_t *, int, const void *, size_t); | | 142 | static void ktr_kmem(lwp_t *, int, const void *, size_t); |
143 | static void ktr_io(lwp_t *, int, enum uio_rw, struct iovec *, size_t); | | 143 | static void ktr_io(lwp_t *, int, enum uio_rw, struct iovec *, size_t); |
144 | | | 144 | |
145 | static struct ktr_desc * | | 145 | static struct ktr_desc * |
146 | ktd_lookup(file_t *); | | 146 | ktd_lookup(file_t *); |
147 | static void ktdrel(struct ktr_desc *); | | 147 | static void ktdrel(struct ktr_desc *); |
148 | static void ktdref(struct ktr_desc *); | | 148 | static void ktdref(struct ktr_desc *); |
149 | static void ktraddentry(lwp_t *, struct ktrace_entry *, int); | | 149 | static void ktraddentry(lwp_t *, struct ktrace_entry *, int); |
150 | /* Flags for ktraddentry (3rd arg) */ | | 150 | /* Flags for ktraddentry (3rd arg) */ |
151 | #define KTA_NOWAIT 0x0000 | | 151 | #define KTA_NOWAIT 0x0000 |
152 | #define KTA_WAITOK 0x0001 | | 152 | #define KTA_WAITOK 0x0001 |
153 | #define KTA_LARGE 0x0002 | | 153 | #define KTA_LARGE 0x0002 |
154 | static void ktefree(struct ktrace_entry *); | | 154 | static void ktefree(struct ktrace_entry *); |
155 | static void ktd_logerrl(struct ktr_desc *, int); | | 155 | static void ktd_logerrl(struct ktr_desc *, int); |
156 | static void ktrace_thread(void *); | | 156 | static void ktrace_thread(void *); |
157 | static int ktrderefall(struct ktr_desc *, int); | | 157 | static int ktrderefall(struct ktr_desc *, int); |
158 | | | 158 | |
159 | /* | | 159 | /* |
160 | * Default vaules. | | 160 | * Default vaules. |
161 | */ | | 161 | */ |
162 | #define KTD_MAXENTRY 1000 /* XXX: tune */ | | 162 | #define KTD_MAXENTRY 1000 /* XXX: tune */ |
163 | #define KTD_TIMEOUT 5 /* XXX: tune */ | | 163 | #define KTD_TIMEOUT 5 /* XXX: tune */ |
164 | #define KTD_DELAYQCNT 100 /* XXX: tune */ | | 164 | #define KTD_DELAYQCNT 100 /* XXX: tune */ |
165 | #define KTD_WAKEDELAY 5000 /* XXX: tune */ | | 165 | #define KTD_WAKEDELAY 5000 /* XXX: tune */ |
166 | #define KTD_INTRWAKDL 100 /* XXX: tune */ | | 166 | #define KTD_INTRWAKDL 100 /* XXX: tune */ |
167 | | | 167 | |
168 | /* | | 168 | /* |
169 | * Patchable variables. | | 169 | * Patchable variables. |
170 | */ | | 170 | */ |
171 | int ktd_maxentry = KTD_MAXENTRY; /* max # of entry in the queue */ | | 171 | int ktd_maxentry = KTD_MAXENTRY; /* max # of entry in the queue */ |
172 | int ktd_timeout = KTD_TIMEOUT; /* timeout in seconds */ | | 172 | int ktd_timeout = KTD_TIMEOUT; /* timeout in seconds */ |
173 | int ktd_delayqcnt = KTD_DELAYQCNT; /* # of entry allowed to delay */ | | 173 | int ktd_delayqcnt = KTD_DELAYQCNT; /* # of entry allowed to delay */ |
174 | int ktd_wakedelay = KTD_WAKEDELAY; /* delay of wakeup in *ms* */ | | 174 | int ktd_wakedelay = KTD_WAKEDELAY; /* delay of wakeup in *ms* */ |
175 | int ktd_intrwakdl = KTD_INTRWAKDL; /* ditto, but when interactive */ | | 175 | int ktd_intrwakdl = KTD_INTRWAKDL; /* ditto, but when interactive */ |
176 | | | 176 | |
177 | kmutex_t ktrace_lock; | | 177 | kmutex_t ktrace_lock; |
178 | int ktrace_on; | | 178 | int ktrace_on; |
179 | static TAILQ_HEAD(, ktr_desc) ktdq = TAILQ_HEAD_INITIALIZER(ktdq); | | 179 | static TAILQ_HEAD(, ktr_desc) ktdq = TAILQ_HEAD_INITIALIZER(ktdq); |
180 | static pool_cache_t kte_cache; | | 180 | static pool_cache_t kte_cache; |
181 | | | 181 | |
182 | static void | | 182 | static void |
183 | ktd_wakeup(struct ktr_desc *ktd) | | 183 | ktd_wakeup(struct ktr_desc *ktd) |
184 | { | | 184 | { |
185 | | | 185 | |
186 | callout_stop(&ktd->ktd_wakch); | | 186 | callout_stop(&ktd->ktd_wakch); |
187 | cv_signal(&ktd->ktd_cv); | | 187 | cv_signal(&ktd->ktd_cv); |
188 | } | | 188 | } |
189 | | | 189 | |
190 | static void | | 190 | static void |
191 | ktd_callout(void *arg) | | 191 | ktd_callout(void *arg) |
192 | { | | 192 | { |
193 | | | 193 | |
194 | mutex_enter(&ktrace_lock); | | 194 | mutex_enter(&ktrace_lock); |
195 | ktd_wakeup(arg); | | 195 | ktd_wakeup(arg); |
196 | mutex_exit(&ktrace_lock); | | 196 | mutex_exit(&ktrace_lock); |
197 | } | | 197 | } |
198 | | | 198 | |
199 | static void | | 199 | static void |
200 | ktd_logerrl(struct ktr_desc *ktd, int error) | | 200 | ktd_logerrl(struct ktr_desc *ktd, int error) |
201 | { | | 201 | { |
202 | | | 202 | |
203 | ktd->ktd_error |= error; | | 203 | ktd->ktd_error |= error; |
204 | ktd->ktd_errcnt++; | | 204 | ktd->ktd_errcnt++; |
205 | } | | 205 | } |
206 | | | 206 | |
207 | #if 0 | | 207 | #if 0 |
208 | static void | | 208 | static void |
209 | ktd_logerr(struct proc *p, int error) | | 209 | ktd_logerr(struct proc *p, int error) |
210 | { | | 210 | { |
211 | struct ktr_desc *ktd; | | 211 | struct ktr_desc *ktd; |
212 | | | 212 | |
213 | KASSERT(mutex_owned(&ktrace_lock)); | | 213 | KASSERT(mutex_owned(&ktrace_lock)); |
214 | | | 214 | |
215 | ktd = p->p_tracep; | | 215 | ktd = p->p_tracep; |
216 | if (ktd == NULL) | | 216 | if (ktd == NULL) |
217 | return; | | 217 | return; |
218 | | | 218 | |
219 | ktd_logerrl(ktd, error); | | 219 | ktd_logerrl(ktd, error); |
220 | } | | 220 | } |
221 | #endif | | 221 | #endif |
222 | | | 222 | |
223 | static inline int | | 223 | static inline int |
224 | ktrenter(lwp_t *l) | | 224 | ktrenter(lwp_t *l) |
225 | { | | 225 | { |
226 | | | 226 | |
227 | if ((l->l_pflag & LP_KTRACTIVE) != 0) | | 227 | if ((l->l_pflag & LP_KTRACTIVE) != 0) |
228 | return 1; | | 228 | return 1; |
229 | l->l_pflag |= LP_KTRACTIVE; | | 229 | l->l_pflag |= LP_KTRACTIVE; |
230 | return 0; | | 230 | return 0; |
231 | } | | 231 | } |
232 | | | 232 | |
233 | static inline void | | 233 | static inline void |
234 | ktrexit(lwp_t *l) | | 234 | ktrexit(lwp_t *l) |
235 | { | | 235 | { |
236 | | | 236 | |
237 | l->l_pflag &= ~LP_KTRACTIVE; | | 237 | l->l_pflag &= ~LP_KTRACTIVE; |
238 | } | | 238 | } |
239 | | | 239 | |
240 | /* | | 240 | /* |
241 | * Initialise the ktrace system. | | 241 | * Initialise the ktrace system. |
242 | */ | | 242 | */ |
243 | void | | 243 | void |
244 | ktrinit(void) | | 244 | ktrinit(void) |
245 | { | | 245 | { |
246 | | | 246 | |
247 | mutex_init(&ktrace_lock, MUTEX_DEFAULT, IPL_NONE); | | 247 | mutex_init(&ktrace_lock, MUTEX_DEFAULT, IPL_NONE); |
248 | kte_cache = pool_cache_init(sizeof(struct ktrace_entry), 0, 0, 0, | | 248 | kte_cache = pool_cache_init(sizeof(struct ktrace_entry), 0, 0, 0, |
249 | "ktrace", &pool_allocator_nointr, IPL_NONE, NULL, NULL, NULL); | | 249 | "ktrace", &pool_allocator_nointr, IPL_NONE, NULL, NULL, NULL); |
250 | } | | 250 | } |
251 | | | 251 | |
252 | /* | | 252 | /* |
253 | * Release a reference. Called with ktrace_lock held. | | 253 | * Release a reference. Called with ktrace_lock held. |
254 | */ | | 254 | */ |
255 | void | | 255 | void |
256 | ktdrel(struct ktr_desc *ktd) | | 256 | ktdrel(struct ktr_desc *ktd) |
257 | { | | 257 | { |
258 | | | 258 | |
259 | KASSERT(mutex_owned(&ktrace_lock)); | | 259 | KASSERT(mutex_owned(&ktrace_lock)); |
260 | | | 260 | |
261 | KDASSERT(ktd->ktd_ref != 0); | | 261 | KDASSERT(ktd->ktd_ref != 0); |
262 | KASSERT(ktd->ktd_ref > 0); | | 262 | KASSERT(ktd->ktd_ref > 0); |
263 | KASSERT(ktrace_on > 0); | | 263 | KASSERT(ktrace_on > 0); |
264 | ktrace_on--; | | 264 | ktrace_on--; |
265 | if (--ktd->ktd_ref <= 0) { | | 265 | if (--ktd->ktd_ref <= 0) { |
266 | ktd->ktd_flags |= KTDF_DONE; | | 266 | ktd->ktd_flags |= KTDF_DONE; |
267 | cv_signal(&ktd->ktd_cv); | | 267 | cv_signal(&ktd->ktd_cv); |
268 | } | | 268 | } |
269 | } | | 269 | } |
270 | | | 270 | |
271 | void | | 271 | void |
272 | ktdref(struct ktr_desc *ktd) | | 272 | ktdref(struct ktr_desc *ktd) |
273 | { | | 273 | { |
274 | | | 274 | |
275 | KASSERT(mutex_owned(&ktrace_lock)); | | 275 | KASSERT(mutex_owned(&ktrace_lock)); |
276 | | | 276 | |
277 | ktd->ktd_ref++; | | 277 | ktd->ktd_ref++; |
278 | ktrace_on++; | | 278 | ktrace_on++; |
279 | } | | 279 | } |
280 | | | 280 | |
281 | struct ktr_desc * | | 281 | struct ktr_desc * |
282 | ktd_lookup(file_t *fp) | | 282 | ktd_lookup(file_t *fp) |
283 | { | | 283 | { |
284 | struct ktr_desc *ktd; | | 284 | struct ktr_desc *ktd; |
285 | | | 285 | |
286 | KASSERT(mutex_owned(&ktrace_lock)); | | 286 | KASSERT(mutex_owned(&ktrace_lock)); |
287 | | | 287 | |
288 | for (ktd = TAILQ_FIRST(&ktdq); ktd != NULL; | | 288 | for (ktd = TAILQ_FIRST(&ktdq); ktd != NULL; |
289 | ktd = TAILQ_NEXT(ktd, ktd_list)) { | | 289 | ktd = TAILQ_NEXT(ktd, ktd_list)) { |
290 | if (ktrsamefile(ktd->ktd_fp, fp)) { | | 290 | if (ktrsamefile(ktd->ktd_fp, fp)) { |
291 | ktdref(ktd); | | 291 | ktdref(ktd); |
292 | break; | | 292 | break; |
293 | } | | 293 | } |
294 | } | | 294 | } |
295 | | | 295 | |
296 | return (ktd); | | 296 | return (ktd); |
297 | } | | 297 | } |
298 | | | 298 | |
299 | void | | 299 | void |
300 | ktraddentry(lwp_t *l, struct ktrace_entry *kte, int flags) | | 300 | ktraddentry(lwp_t *l, struct ktrace_entry *kte, int flags) |
301 | { | | 301 | { |
302 | struct proc *p = l->l_proc; | | 302 | struct proc *p = l->l_proc; |
303 | struct ktr_desc *ktd; | | 303 | struct ktr_desc *ktd; |
304 | #ifdef DEBUG | | 304 | #ifdef DEBUG |
305 | struct timeval t1, t2; | | 305 | struct timeval t1, t2; |
306 | #endif | | 306 | #endif |
307 | | | 307 | |
308 | mutex_enter(&ktrace_lock); | | 308 | mutex_enter(&ktrace_lock); |
309 | | | 309 | |
310 | if (p->p_traceflag & KTRFAC_TRC_EMUL) { | | 310 | if (p->p_traceflag & KTRFAC_TRC_EMUL) { |
311 | /* Add emulation trace before first entry for this process */ | | 311 | /* Add emulation trace before first entry for this process */ |
312 | p->p_traceflag &= ~KTRFAC_TRC_EMUL; | | 312 | p->p_traceflag &= ~KTRFAC_TRC_EMUL; |
313 | mutex_exit(&ktrace_lock); | | 313 | mutex_exit(&ktrace_lock); |
314 | ktrexit(l); | | 314 | ktrexit(l); |
315 | ktremul(); | | 315 | ktremul(); |
316 | (void)ktrenter(l); | | 316 | (void)ktrenter(l); |
317 | mutex_enter(&ktrace_lock); | | 317 | mutex_enter(&ktrace_lock); |
318 | } | | 318 | } |
319 | | | 319 | |
320 | /* Tracing may have been cancelled. */ | | 320 | /* Tracing may have been cancelled. */ |
321 | ktd = p->p_tracep; | | 321 | ktd = p->p_tracep; |
322 | if (ktd == NULL) | | 322 | if (ktd == NULL) |
323 | goto freekte; | | 323 | goto freekte; |
324 | | | 324 | |
325 | /* | | 325 | /* |
326 | * Bump reference count so that the object will remain while | | 326 | * Bump reference count so that the object will remain while |
327 | * we are here. Note that the trace is controlled by other | | 327 | * we are here. Note that the trace is controlled by other |
328 | * process. | | 328 | * process. |
329 | */ | | 329 | */ |
330 | ktdref(ktd); | | 330 | ktdref(ktd); |
331 | | | 331 | |
332 | if (ktd->ktd_flags & KTDF_DONE) | | 332 | if (ktd->ktd_flags & KTDF_DONE) |
333 | goto relktd; | | 333 | goto relktd; |
334 | | | 334 | |
335 | if (ktd->ktd_qcount > ktd_maxentry) { | | 335 | if (ktd->ktd_qcount > ktd_maxentry) { |
336 | ktd_logerrl(ktd, KTDE_ENOSPC); | | 336 | ktd_logerrl(ktd, KTDE_ENOSPC); |
337 | goto relktd; | | 337 | goto relktd; |
338 | } | | 338 | } |
339 | TAILQ_INSERT_TAIL(&ktd->ktd_queue, kte, kte_list); | | 339 | TAILQ_INSERT_TAIL(&ktd->ktd_queue, kte, kte_list); |
340 | ktd->ktd_qcount++; | | 340 | ktd->ktd_qcount++; |
341 | if (ktd->ktd_flags & KTDF_BLOCKING) | | 341 | if (ktd->ktd_flags & KTDF_BLOCKING) |
342 | goto skip_sync; | | 342 | goto skip_sync; |
343 | | | 343 | |
344 | if (flags & KTA_WAITOK && | | 344 | if (flags & KTA_WAITOK && |
345 | (/* flags & KTA_LARGE */0 || ktd->ktd_flags & KTDF_WAIT || | | 345 | (/* flags & KTA_LARGE */0 || ktd->ktd_flags & KTDF_WAIT || |
346 | ktd->ktd_qcount > ktd_maxentry >> 1)) | | 346 | ktd->ktd_qcount > ktd_maxentry >> 1)) |
347 | /* | | 347 | /* |
348 | * Sync with writer thread since we're requesting rather | | 348 | * Sync with writer thread since we're requesting rather |
349 | * big one or many requests are pending. | | 349 | * big one or many requests are pending. |
350 | */ | | 350 | */ |
351 | do { | | 351 | do { |
352 | ktd->ktd_flags |= KTDF_WAIT; | | 352 | ktd->ktd_flags |= KTDF_WAIT; |
353 | ktd_wakeup(ktd); | | 353 | ktd_wakeup(ktd); |
354 | #ifdef DEBUG | | 354 | #ifdef DEBUG |
355 | getmicrouptime(&t1); | | 355 | getmicrouptime(&t1); |
356 | #endif | | 356 | #endif |
357 | if (cv_timedwait(&ktd->ktd_sync_cv, &ktrace_lock, | | 357 | if (cv_timedwait(&ktd->ktd_sync_cv, &ktrace_lock, |
358 | ktd_timeout * hz) != 0) { | | 358 | ktd_timeout * hz) != 0) { |
359 | ktd->ktd_flags |= KTDF_BLOCKING; | | 359 | ktd->ktd_flags |= KTDF_BLOCKING; |
360 | /* | | 360 | /* |
361 | * Maybe the writer thread is blocking | | 361 | * Maybe the writer thread is blocking |
362 | * completely for some reason, but | | 362 | * completely for some reason, but |
363 | * don't stop target process forever. | | 363 | * don't stop target process forever. |
364 | */ | | 364 | */ |
365 | log(LOG_NOTICE, "ktrace timeout\n"); | | 365 | log(LOG_NOTICE, "ktrace timeout\n"); |
366 | break; | | 366 | break; |
367 | } | | 367 | } |
368 | #ifdef DEBUG | | 368 | #ifdef DEBUG |
369 | getmicrouptime(&t2); | | 369 | getmicrouptime(&t2); |
370 | timersub(&t2, &t1, &t2); | | 370 | timersub(&t2, &t1, &t2); |
371 | if (t2.tv_sec > 0) | | 371 | if (t2.tv_sec > 0) |
372 | log(LOG_NOTICE, | | 372 | log(LOG_NOTICE, |
373 | "ktrace long wait: %lld.%06ld\n", | | 373 | "ktrace long wait: %lld.%06ld\n", |
374 | (long long)t2.tv_sec, (long)t2.tv_usec); | | 374 | (long long)t2.tv_sec, (long)t2.tv_usec); |
375 | #endif | | 375 | #endif |
376 | } while (p->p_tracep == ktd && | | 376 | } while (p->p_tracep == ktd && |
377 | (ktd->ktd_flags & (KTDF_WAIT | KTDF_DONE)) == KTDF_WAIT); | | 377 | (ktd->ktd_flags & (KTDF_WAIT | KTDF_DONE)) == KTDF_WAIT); |
378 | else { | | 378 | else { |
379 | /* Schedule delayed wakeup */ | | 379 | /* Schedule delayed wakeup */ |
380 | if (ktd->ktd_qcount > ktd->ktd_delayqcnt) | | 380 | if (ktd->ktd_qcount > ktd->ktd_delayqcnt) |
381 | ktd_wakeup(ktd); /* Wakeup now */ | | 381 | ktd_wakeup(ktd); /* Wakeup now */ |
382 | else if (!callout_pending(&ktd->ktd_wakch)) | | 382 | else if (!callout_pending(&ktd->ktd_wakch)) |
383 | callout_reset(&ktd->ktd_wakch, | | 383 | callout_reset(&ktd->ktd_wakch, |
384 | ktd->ktd_flags & KTDF_INTERACTIVE ? | | 384 | ktd->ktd_flags & KTDF_INTERACTIVE ? |
385 | ktd->ktd_intrwakdl : ktd->ktd_wakedelay, | | 385 | ktd->ktd_intrwakdl : ktd->ktd_wakedelay, |
386 | ktd_callout, ktd); | | 386 | ktd_callout, ktd); |
387 | } | | 387 | } |
388 | | | 388 | |
389 | skip_sync: | | 389 | skip_sync: |
390 | ktdrel(ktd); | | 390 | ktdrel(ktd); |
391 | mutex_exit(&ktrace_lock); | | 391 | mutex_exit(&ktrace_lock); |
392 | ktrexit(l); | | 392 | ktrexit(l); |
393 | return; | | 393 | return; |
394 | | | 394 | |
395 | relktd: | | 395 | relktd: |
396 | ktdrel(ktd); | | 396 | ktdrel(ktd); |
397 | | | 397 | |
398 | freekte: | | 398 | freekte: |
399 | mutex_exit(&ktrace_lock); | | 399 | mutex_exit(&ktrace_lock); |
400 | ktefree(kte); | | 400 | ktefree(kte); |
401 | ktrexit(l); | | 401 | ktrexit(l); |
402 | } | | 402 | } |
403 | | | 403 | |
404 | void | | 404 | void |
405 | ktefree(struct ktrace_entry *kte) | | 405 | ktefree(struct ktrace_entry *kte) |
406 | { | | 406 | { |
407 | | | 407 | |
408 | if (kte->kte_buf != kte->kte_space) | | 408 | if (kte->kte_buf != kte->kte_space) |
409 | kmem_free(kte->kte_buf, kte->kte_bufsz); | | 409 | kmem_free(kte->kte_buf, kte->kte_bufsz); |
410 | pool_cache_put(kte_cache, kte); | | 410 | pool_cache_put(kte_cache, kte); |
411 | } | | 411 | } |
412 | | | 412 | |
413 | /* | | 413 | /* |
414 | * "deep" compare of two files for the purposes of clearing a trace. | | 414 | * "deep" compare of two files for the purposes of clearing a trace. |
415 | * Returns true if they're the same open file, or if they point at the | | 415 | * Returns true if they're the same open file, or if they point at the |
416 | * same underlying vnode/socket. | | 416 | * same underlying vnode/socket. |
417 | */ | | 417 | */ |
418 | | | 418 | |
419 | int | | 419 | int |
420 | ktrsamefile(file_t *f1, file_t *f2) | | 420 | ktrsamefile(file_t *f1, file_t *f2) |
421 | { | | 421 | { |
422 | | | 422 | |
423 | return ((f1 == f2) || | | 423 | return ((f1 == f2) || |
424 | ((f1 != NULL) && (f2 != NULL) && | | 424 | ((f1 != NULL) && (f2 != NULL) && |
425 | (f1->f_type == f2->f_type) && | | 425 | (f1->f_type == f2->f_type) && |
426 | (f1->f_data == f2->f_data))); | | 426 | (f1->f_data == f2->f_data))); |
427 | } | | 427 | } |
428 | | | 428 | |
429 | void | | 429 | void |
430 | ktrderef(struct proc *p) | | 430 | ktrderef(struct proc *p) |
431 | { | | 431 | { |
432 | struct ktr_desc *ktd = p->p_tracep; | | 432 | struct ktr_desc *ktd = p->p_tracep; |
433 | | | 433 | |
434 | KASSERT(mutex_owned(&ktrace_lock)); | | 434 | KASSERT(mutex_owned(&ktrace_lock)); |
435 | | | 435 | |
436 | p->p_traceflag = 0; | | 436 | p->p_traceflag = 0; |
437 | if (ktd == NULL) | | 437 | if (ktd == NULL) |
438 | return; | | 438 | return; |
439 | p->p_tracep = NULL; | | 439 | p->p_tracep = NULL; |
440 | | | 440 | |
441 | cv_broadcast(&ktd->ktd_sync_cv); | | 441 | cv_broadcast(&ktd->ktd_sync_cv); |
442 | ktdrel(ktd); | | 442 | ktdrel(ktd); |
443 | } | | 443 | } |
444 | | | 444 | |
445 | void | | 445 | void |
446 | ktradref(struct proc *p) | | 446 | ktradref(struct proc *p) |
447 | { | | 447 | { |
448 | struct ktr_desc *ktd = p->p_tracep; | | 448 | struct ktr_desc *ktd = p->p_tracep; |
449 | | | 449 | |
450 | KASSERT(mutex_owned(&ktrace_lock)); | | 450 | KASSERT(mutex_owned(&ktrace_lock)); |
451 | | | 451 | |
452 | ktdref(ktd); | | 452 | ktdref(ktd); |
453 | } | | 453 | } |
454 | | | 454 | |
455 | int | | 455 | int |
456 | ktrderefall(struct ktr_desc *ktd, int auth) | | 456 | ktrderefall(struct ktr_desc *ktd, int auth) |
457 | { | | 457 | { |
458 | lwp_t *curl = curlwp; | | 458 | lwp_t *curl = curlwp; |
459 | struct proc *p; | | 459 | struct proc *p; |
460 | int error = 0; | | 460 | int error = 0; |
461 | | | 461 | |
462 | mutex_enter(proc_lock); | | 462 | mutex_enter(proc_lock); |
463 | PROCLIST_FOREACH(p, &allproc) { | | 463 | PROCLIST_FOREACH(p, &allproc) { |
464 | if ((p->p_flag & PK_MARKER) != 0 || p->p_tracep != ktd) | | 464 | if ((p->p_flag & PK_MARKER) != 0 || p->p_tracep != ktd) |
465 | continue; | | 465 | continue; |
466 | mutex_enter(p->p_lock); | | 466 | mutex_enter(p->p_lock); |
467 | mutex_enter(&ktrace_lock); | | 467 | mutex_enter(&ktrace_lock); |
468 | if (p->p_tracep == ktd) { | | 468 | if (p->p_tracep == ktd) { |
469 | if (!auth || ktrcanset(curl, p)) | | 469 | if (!auth || ktrcanset(curl, p)) |
470 | ktrderef(p); | | 470 | ktrderef(p); |
471 | else | | 471 | else |
472 | error = EPERM; | | 472 | error = EPERM; |
473 | } | | 473 | } |
474 | mutex_exit(&ktrace_lock); | | 474 | mutex_exit(&ktrace_lock); |
475 | mutex_exit(p->p_lock); | | 475 | mutex_exit(p->p_lock); |
476 | } | | 476 | } |
477 | mutex_exit(proc_lock); | | 477 | mutex_exit(proc_lock); |
478 | | | 478 | |
479 | return error; | | 479 | return error; |
480 | } | | 480 | } |
481 | | | 481 | |
482 | int | | 482 | int |
483 | ktealloc(struct ktrace_entry **ktep, void **bufp, lwp_t *l, int type, | | 483 | ktealloc(struct ktrace_entry **ktep, void **bufp, lwp_t *l, int type, |
484 | size_t sz) | | 484 | size_t sz) |
485 | { | | 485 | { |
486 | struct proc *p = l->l_proc; | | 486 | struct proc *p = l->l_proc; |
487 | struct ktrace_entry *kte; | | 487 | struct ktrace_entry *kte; |
488 | struct ktr_header *kth; | | 488 | struct ktr_header *kth; |
489 | struct timespec ts; | | 489 | struct timespec ts; |
490 | void *buf; | | 490 | void *buf; |
491 | | | 491 | |
492 | if (ktrenter(l)) | | 492 | if (ktrenter(l)) |
493 | return EAGAIN; | | 493 | return EAGAIN; |
494 | | | 494 | |
495 | kte = pool_cache_get(kte_cache, PR_WAITOK); | | 495 | kte = pool_cache_get(kte_cache, PR_WAITOK); |
496 | if (sz > sizeof(kte->kte_space)) { | | 496 | if (sz > sizeof(kte->kte_space)) { |
497 | if ((buf = kmem_alloc(sz, KM_SLEEP)) == NULL) { | | 497 | if ((buf = kmem_alloc(sz, KM_SLEEP)) == NULL) { |
498 | pool_cache_put(kte_cache, kte); | | 498 | pool_cache_put(kte_cache, kte); |
499 | ktrexit(l); | | 499 | ktrexit(l); |
500 | return ENOMEM; | | 500 | return ENOMEM; |
501 | } | | 501 | } |
502 | } else | | 502 | } else |
503 | buf = kte->kte_space; | | 503 | buf = kte->kte_space; |
504 | | | 504 | |
505 | kte->kte_bufsz = sz; | | 505 | kte->kte_bufsz = sz; |
506 | kte->kte_buf = buf; | | 506 | kte->kte_buf = buf; |
507 | | | 507 | |
508 | kth = &kte->kte_kth; | | 508 | kth = &kte->kte_kth; |
509 | (void)memset(kth, 0, sizeof(*kth)); | | 509 | (void)memset(kth, 0, sizeof(*kth)); |
510 | kth->ktr_len = sz; | | 510 | kth->ktr_len = sz; |
511 | kth->ktr_type = type; | | 511 | kth->ktr_type = type; |
512 | kth->ktr_pid = p->p_pid; | | 512 | kth->ktr_pid = p->p_pid; |
513 | memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN); | | 513 | memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN); |
514 | kth->ktr_version = KTRFAC_VERSION(p->p_traceflag); | | 514 | kth->ktr_version = KTRFAC_VERSION(p->p_traceflag); |
515 | | | 515 | |
516 | nanotime(&ts); | | 516 | nanotime(&ts); |
517 | switch (KTRFAC_VERSION(p->p_traceflag)) { | | 517 | switch (KTRFAC_VERSION(p->p_traceflag)) { |
518 | case 0: | | 518 | case 0: |
519 | /* This is the original format */ | | 519 | /* This is the original format */ |
520 | kth->ktr_otv.tv_sec = ts.tv_sec; | | 520 | kth->ktr_otv.tv_sec = ts.tv_sec; |
521 | kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; | | 521 | kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; |
522 | break; | | 522 | break; |
523 | case 1: | | 523 | case 1: |
524 | kth->ktr_olid = l->l_lid; | | 524 | kth->ktr_olid = l->l_lid; |
525 | kth->ktr_ots.tv_sec = ts.tv_sec; | | 525 | kth->ktr_ots.tv_sec = ts.tv_sec; |
526 | kth->ktr_ots.tv_nsec = ts.tv_nsec; | | 526 | kth->ktr_ots.tv_nsec = ts.tv_nsec; |
527 | break; | | 527 | break; |
528 | case 2: | | 528 | case 2: |
529 | kth->ktr_lid = l->l_lid; | | 529 | kth->ktr_lid = l->l_lid; |
530 | kth->ktr_ts.tv_sec = ts.tv_sec; | | 530 | kth->ktr_ts.tv_sec = ts.tv_sec; |
531 | kth->ktr_ts.tv_nsec = ts.tv_nsec; | | 531 | kth->ktr_ts.tv_nsec = ts.tv_nsec; |
532 | break; | | 532 | break; |
533 | default: | | 533 | default: |
534 | break; | | 534 | break; |
535 | } | | 535 | } |
536 | | | 536 | |
537 | *ktep = kte; | | 537 | *ktep = kte; |
538 | *bufp = buf; | | 538 | *bufp = buf; |
539 | | | 539 | |
540 | return 0; | | 540 | return 0; |
541 | } | | 541 | } |
542 | | | 542 | |
543 | void | | 543 | void |
544 | ktr_syscall(register_t code, const register_t args[], int narg) | | 544 | ktr_syscall(register_t code, const register_t args[], int narg) |
545 | { | | 545 | { |
546 | lwp_t *l = curlwp; | | 546 | lwp_t *l = curlwp; |
547 | struct proc *p = l->l_proc; | | 547 | struct proc *p = l->l_proc; |
548 | struct ktrace_entry *kte; | | 548 | struct ktrace_entry *kte; |
549 | struct ktr_syscall *ktp; | | 549 | struct ktr_syscall *ktp; |
550 | register_t *argp; | | 550 | register_t *argp; |
551 | size_t len; | | 551 | size_t len; |
552 | u_int i; | | 552 | u_int i; |
553 | | | 553 | |
554 | if (!KTRPOINT(p, KTR_SYSCALL)) | | 554 | if (!KTRPOINT(p, KTR_SYSCALL)) |
555 | return; | | 555 | return; |
556 | | | 556 | |
557 | len = sizeof(struct ktr_syscall) + narg * sizeof argp[0]; | | 557 | len = sizeof(struct ktr_syscall) + narg * sizeof argp[0]; |
558 | | | 558 | |
559 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSCALL, len)) | | 559 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSCALL, len)) |
560 | return; | | 560 | return; |
561 | | | 561 | |
562 | ktp->ktr_code = code; | | 562 | ktp->ktr_code = code; |
563 | ktp->ktr_argsize = narg * sizeof argp[0]; | | 563 | ktp->ktr_argsize = narg * sizeof argp[0]; |
564 | argp = (register_t *)(ktp + 1); | | 564 | argp = (register_t *)(ktp + 1); |
565 | for (i = 0; i < narg; i++) | | 565 | for (i = 0; i < narg; i++) |
566 | *argp++ = args[i]; | | 566 | *argp++ = args[i]; |
567 | | | 567 | |
568 | ktraddentry(l, kte, KTA_WAITOK); | | 568 | ktraddentry(l, kte, KTA_WAITOK); |
569 | } | | 569 | } |
570 | | | 570 | |
571 | void | | 571 | void |
572 | ktr_sysret(register_t code, int error, register_t *retval) | | 572 | ktr_sysret(register_t code, int error, register_t *retval) |
573 | { | | 573 | { |
574 | lwp_t *l = curlwp; | | 574 | lwp_t *l = curlwp; |
575 | struct ktrace_entry *kte; | | 575 | struct ktrace_entry *kte; |
576 | struct ktr_sysret *ktp; | | 576 | struct ktr_sysret *ktp; |
577 | | | 577 | |
578 | if (!KTRPOINT(l->l_proc, KTR_SYSRET)) | | 578 | if (!KTRPOINT(l->l_proc, KTR_SYSRET)) |
579 | return; | | 579 | return; |
580 | | | 580 | |
581 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSRET, | | 581 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSRET, |
582 | sizeof(struct ktr_sysret))) | | 582 | sizeof(struct ktr_sysret))) |
583 | return; | | 583 | return; |
584 | | | 584 | |
585 | ktp->ktr_code = code; | | 585 | ktp->ktr_code = code; |
586 | ktp->ktr_eosys = 0; /* XXX unused */ | | 586 | ktp->ktr_eosys = 0; /* XXX unused */ |
587 | ktp->ktr_error = error; | | 587 | ktp->ktr_error = error; |
588 | ktp->ktr_retval = retval ? retval[0] : 0; | | 588 | ktp->ktr_retval = retval ? retval[0] : 0; |
589 | ktp->ktr_retval_1 = retval ? retval[1] : 0; | | 589 | ktp->ktr_retval_1 = retval ? retval[1] : 0; |
590 | | | 590 | |
591 | ktraddentry(l, kte, KTA_WAITOK); | | 591 | ktraddentry(l, kte, KTA_WAITOK); |
592 | } | | 592 | } |
593 | | | 593 | |
594 | void | | 594 | void |
595 | ktr_namei(const char *path, size_t pathlen) | | 595 | ktr_namei(const char *path, size_t pathlen) |
596 | { | | 596 | { |
597 | lwp_t *l = curlwp; | | 597 | lwp_t *l = curlwp; |
598 | | | 598 | |
599 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) | | 599 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) |
600 | return; | | 600 | return; |
601 | | | 601 | |
602 | ktr_kmem(l, KTR_NAMEI, path, pathlen); | | 602 | ktr_kmem(l, KTR_NAMEI, path, pathlen); |
603 | } | | 603 | } |
604 | | | 604 | |
605 | void | | 605 | void |
606 | ktr_namei2(const char *eroot, size_t erootlen, | | 606 | ktr_namei2(const char *eroot, size_t erootlen, |
607 | const char *path, size_t pathlen) | | 607 | const char *path, size_t pathlen) |
608 | { | | 608 | { |
609 | lwp_t *l = curlwp; | | 609 | lwp_t *l = curlwp; |
610 | struct ktrace_entry *kte; | | 610 | struct ktrace_entry *kte; |
611 | void *buf; | | 611 | void *buf; |
612 | | | 612 | |
613 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) | | 613 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) |
614 | return; | | 614 | return; |
615 | | | 615 | |
616 | if (ktealloc(&kte, &buf, l, KTR_NAMEI, erootlen + pathlen)) | | 616 | if (ktealloc(&kte, &buf, l, KTR_NAMEI, erootlen + pathlen)) |
617 | return; | | 617 | return; |
618 | memcpy(buf, eroot, erootlen); | | 618 | memcpy(buf, eroot, erootlen); |
619 | buf = (char *)buf + erootlen; | | 619 | buf = (char *)buf + erootlen; |
620 | memcpy(buf, path, pathlen); | | 620 | memcpy(buf, path, pathlen); |
621 | ktraddentry(l, kte, KTA_WAITOK); | | 621 | ktraddentry(l, kte, KTA_WAITOK); |
622 | } | | 622 | } |
623 | | | 623 | |
624 | void | | 624 | void |
625 | ktr_emul(void) | | 625 | ktr_emul(void) |
626 | { | | 626 | { |
627 | lwp_t *l = curlwp; | | 627 | lwp_t *l = curlwp; |
628 | const char *emul = l->l_proc->p_emul->e_name; | | 628 | const char *emul = l->l_proc->p_emul->e_name; |
629 | | | 629 | |
630 | if (!KTRPOINT(l->l_proc, KTR_EMUL)) | | 630 | if (!KTRPOINT(l->l_proc, KTR_EMUL)) |
631 | return; | | 631 | return; |
632 | | | 632 | |
633 | ktr_kmem(l, KTR_EMUL, emul, strlen(emul)); | | 633 | ktr_kmem(l, KTR_EMUL, emul, strlen(emul)); |
634 | } | | 634 | } |
635 | | | 635 | |
636 | void | | 636 | void |
637 | ktr_execarg(const void *bf, size_t len) | | 637 | ktr_execarg(const void *bf, size_t len) |
638 | { | | 638 | { |
639 | lwp_t *l = curlwp; | | 639 | lwp_t *l = curlwp; |
640 | | | 640 | |
641 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ARG)) | | 641 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ARG)) |
642 | return; | | 642 | return; |
643 | | | 643 | |
644 | ktr_kmem(l, KTR_EXEC_ARG, bf, len); | | 644 | ktr_kmem(l, KTR_EXEC_ARG, bf, len); |
645 | } | | 645 | } |
646 | | | 646 | |
647 | void | | 647 | void |
648 | ktr_execenv(const void *bf, size_t len) | | 648 | ktr_execenv(const void *bf, size_t len) |
649 | { | | 649 | { |
650 | lwp_t *l = curlwp; | | 650 | lwp_t *l = curlwp; |
651 | | | 651 | |
652 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ENV)) | | 652 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ENV)) |
653 | return; | | 653 | return; |
654 | | | 654 | |
655 | ktr_kmem(l, KTR_EXEC_ENV, bf, len); | | 655 | ktr_kmem(l, KTR_EXEC_ENV, bf, len); |
656 | } | | 656 | } |
657 | | | 657 | |
658 | static void | | 658 | static void |
659 | ktr_kmem(lwp_t *l, int type, const void *bf, size_t len) | | 659 | ktr_kmem(lwp_t *l, int type, const void *bf, size_t len) |
660 | { | | 660 | { |
661 | struct ktrace_entry *kte; | | 661 | struct ktrace_entry *kte; |
662 | void *buf; | | 662 | void *buf; |
663 | | | 663 | |
664 | if (ktealloc(&kte, &buf, l, type, len)) | | 664 | if (ktealloc(&kte, &buf, l, type, len)) |
665 | return; | | 665 | return; |
666 | memcpy(buf, bf, len); | | 666 | memcpy(buf, bf, len); |
667 | ktraddentry(l, kte, KTA_WAITOK); | | 667 | ktraddentry(l, kte, KTA_WAITOK); |
668 | } | | 668 | } |
669 | | | 669 | |
670 | static void | | 670 | static void |
671 | ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len) | | 671 | ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len) |
672 | { | | 672 | { |
673 | struct ktrace_entry *kte; | | 673 | struct ktrace_entry *kte; |
674 | struct ktr_genio *ktp; | | 674 | struct ktr_genio *ktp; |
675 | size_t resid = len, cnt, buflen; | | 675 | size_t resid = len, cnt, buflen; |
676 | void *cp; | | 676 | char *cp; |
677 | | | 677 | |
678 | next: | | 678 | next: |
679 | buflen = min(PAGE_SIZE, resid + sizeof(struct ktr_genio)); | | 679 | buflen = min(PAGE_SIZE, resid + sizeof(struct ktr_genio)); |
680 | | | 680 | |
681 | if (ktealloc(&kte, (void *)&ktp, l, KTR_GENIO, buflen)) | | 681 | if (ktealloc(&kte, (void *)&ktp, l, KTR_GENIO, buflen)) |
682 | return; | | 682 | return; |
683 | | | 683 | |
684 | ktp->ktr_fd = fd; | | 684 | ktp->ktr_fd = fd; |
685 | ktp->ktr_rw = rw; | | 685 | ktp->ktr_rw = rw; |
686 | | | 686 | |
687 | cp = (void *)(ktp + 1); | | 687 | cp = (void *)(ktp + 1); |
688 | buflen -= sizeof(struct ktr_genio); | | 688 | buflen -= sizeof(struct ktr_genio); |
689 | kte->kte_kth.ktr_len = sizeof(struct ktr_genio); | | 689 | kte->kte_kth.ktr_len = sizeof(struct ktr_genio); |
690 | | | 690 | |
691 | while (buflen > 0) { | | 691 | while (buflen > 0) { |
692 | cnt = min(iov->iov_len, buflen); | | 692 | cnt = min(iov->iov_len, buflen); |
693 | if (copyin(iov->iov_base, cp, cnt) != 0) | | 693 | if (copyin(iov->iov_base, cp, cnt) != 0) |
694 | goto out; | | 694 | goto out; |
695 | kte->kte_kth.ktr_len += cnt; | | 695 | kte->kte_kth.ktr_len += cnt; |
| | | 696 | cp += cnt; |
696 | buflen -= cnt; | | 697 | buflen -= cnt; |
697 | resid -= cnt; | | 698 | resid -= cnt; |
698 | iov->iov_len -= cnt; | | 699 | iov->iov_len -= cnt; |
699 | if (iov->iov_len == 0) | | 700 | if (iov->iov_len == 0) |
700 | iov++; | | 701 | iov++; |
701 | else | | 702 | else |
702 | iov->iov_base = (char *)iov->iov_base + cnt; | | 703 | iov->iov_base = (char *)iov->iov_base + cnt; |
703 | } | | 704 | } |
704 | | | 705 | |
705 | /* | | 706 | /* |
706 | * Don't push so many entry at once. It will cause kmem map | | 707 | * Don't push so many entry at once. It will cause kmem map |
707 | * shortage. | | 708 | * shortage. |
708 | */ | | 709 | */ |
709 | ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE); | | 710 | ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE); |
710 | if (resid > 0) { | | 711 | if (resid > 0) { |
711 | if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) { | | 712 | if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) { |
712 | (void)ktrenter(l); | | 713 | (void)ktrenter(l); |
713 | preempt(); | | 714 | preempt(); |
714 | ktrexit(l); | | 715 | ktrexit(l); |
715 | } | | 716 | } |
716 | | | 717 | |
717 | goto next; | | 718 | goto next; |
718 | } | | 719 | } |
719 | | | 720 | |
720 | return; | | 721 | return; |
721 | | | 722 | |
722 | out: | | 723 | out: |
723 | ktefree(kte); | | 724 | ktefree(kte); |
724 | ktrexit(l); | | 725 | ktrexit(l); |
725 | } | | 726 | } |
726 | | | 727 | |
727 | void | | 728 | void |
728 | ktr_genio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) | | 729 | ktr_genio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) |
729 | { | | 730 | { |
730 | lwp_t *l = curlwp; | | 731 | lwp_t *l = curlwp; |
731 | struct iovec iov; | | 732 | struct iovec iov; |
732 | | | 733 | |
733 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) | | 734 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) |
734 | return; | | 735 | return; |
735 | iov.iov_base = __UNCONST(addr); | | 736 | iov.iov_base = __UNCONST(addr); |
736 | iov.iov_len = len; | | 737 | iov.iov_len = len; |
737 | ktr_io(l, fd, rw, &iov, len); | | 738 | ktr_io(l, fd, rw, &iov, len); |
738 | } | | 739 | } |
739 | | | 740 | |
740 | void | | 741 | void |
741 | ktr_geniov(int fd, enum uio_rw rw, struct iovec *iov, size_t len, int error) | | 742 | ktr_geniov(int fd, enum uio_rw rw, struct iovec *iov, size_t len, int error) |
742 | { | | 743 | { |
743 | lwp_t *l = curlwp; | | 744 | lwp_t *l = curlwp; |
744 | | | 745 | |
745 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) | | 746 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) |
746 | return; | | 747 | return; |
747 | ktr_io(l, fd, rw, iov, len); | | 748 | ktr_io(l, fd, rw, iov, len); |
748 | } | | 749 | } |
749 | | | 750 | |
750 | void | | 751 | void |
751 | ktr_mibio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) | | 752 | ktr_mibio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) |
752 | { | | 753 | { |
753 | lwp_t *l = curlwp; | | 754 | lwp_t *l = curlwp; |
754 | struct iovec iov; | | 755 | struct iovec iov; |
755 | | | 756 | |
756 | if (!KTRPOINT(l->l_proc, KTR_MIB) || error != 0) | | 757 | if (!KTRPOINT(l->l_proc, KTR_MIB) || error != 0) |
757 | return; | | 758 | return; |
758 | iov.iov_base = __UNCONST(addr); | | 759 | iov.iov_base = __UNCONST(addr); |
759 | iov.iov_len = len; | | 760 | iov.iov_len = len; |
760 | ktr_io(l, fd, rw, &iov, len); | | 761 | ktr_io(l, fd, rw, &iov, len); |
761 | } | | 762 | } |
762 | | | 763 | |
763 | void | | 764 | void |
764 | ktr_psig(int sig, sig_t action, const sigset_t *mask, | | 765 | ktr_psig(int sig, sig_t action, const sigset_t *mask, |
765 | const ksiginfo_t *ksi) | | 766 | const ksiginfo_t *ksi) |
766 | { | | 767 | { |
767 | struct ktrace_entry *kte; | | 768 | struct ktrace_entry *kte; |
768 | lwp_t *l = curlwp; | | 769 | lwp_t *l = curlwp; |
769 | struct { | | 770 | struct { |
770 | struct ktr_psig kp; | | 771 | struct ktr_psig kp; |
771 | siginfo_t si; | | 772 | siginfo_t si; |
772 | } *kbuf; | | 773 | } *kbuf; |
773 | | | 774 | |
774 | if (!KTRPOINT(l->l_proc, KTR_PSIG)) | | 775 | if (!KTRPOINT(l->l_proc, KTR_PSIG)) |
775 | return; | | 776 | return; |
776 | | | 777 | |
777 | if (ktealloc(&kte, (void *)&kbuf, l, KTR_PSIG, sizeof(*kbuf))) | | 778 | if (ktealloc(&kte, (void *)&kbuf, l, KTR_PSIG, sizeof(*kbuf))) |
778 | return; | | 779 | return; |
779 | | | 780 | |
780 | kbuf->kp.signo = (char)sig; | | 781 | kbuf->kp.signo = (char)sig; |
781 | kbuf->kp.action = action; | | 782 | kbuf->kp.action = action; |
782 | kbuf->kp.mask = *mask; | | 783 | kbuf->kp.mask = *mask; |
783 | | | 784 | |
784 | if (ksi) { | | 785 | if (ksi) { |
785 | kbuf->kp.code = KSI_TRAPCODE(ksi); | | 786 | kbuf->kp.code = KSI_TRAPCODE(ksi); |
786 | (void)memset(&kbuf->si, 0, sizeof(kbuf->si)); | | 787 | (void)memset(&kbuf->si, 0, sizeof(kbuf->si)); |
787 | kbuf->si._info = ksi->ksi_info; | | 788 | kbuf->si._info = ksi->ksi_info; |
788 | kte->kte_kth.ktr_len = sizeof(*kbuf); | | 789 | kte->kte_kth.ktr_len = sizeof(*kbuf); |
789 | } else { | | 790 | } else { |
790 | kbuf->kp.code = 0; | | 791 | kbuf->kp.code = 0; |
791 | kte->kte_kth.ktr_len = sizeof(struct ktr_psig); | | 792 | kte->kte_kth.ktr_len = sizeof(struct ktr_psig); |
792 | } | | 793 | } |
793 | | | 794 | |
794 | ktraddentry(l, kte, KTA_WAITOK); | | 795 | ktraddentry(l, kte, KTA_WAITOK); |
795 | } | | 796 | } |
796 | | | 797 | |
797 | void | | 798 | void |
798 | ktr_csw(int out, int user) | | 799 | ktr_csw(int out, int user) |
799 | { | | 800 | { |
800 | lwp_t *l = curlwp; | | 801 | lwp_t *l = curlwp; |
801 | struct proc *p = l->l_proc; | | 802 | struct proc *p = l->l_proc; |
802 | struct ktrace_entry *kte; | | 803 | struct ktrace_entry *kte; |
803 | struct ktr_csw *kc; | | 804 | struct ktr_csw *kc; |
804 | | | 805 | |
805 | if (!KTRPOINT(p, KTR_CSW)) | | 806 | if (!KTRPOINT(p, KTR_CSW)) |
806 | return; | | 807 | return; |
807 | | | 808 | |
808 | /* | | 809 | /* |
809 | * Don't record context switches resulting from blocking on | | 810 | * Don't record context switches resulting from blocking on |
810 | * locks; it's too easy to get duff results. | | 811 | * locks; it's too easy to get duff results. |
811 | */ | | 812 | */ |
812 | if (l->l_syncobj == &mutex_syncobj || l->l_syncobj == &rw_syncobj) | | 813 | if (l->l_syncobj == &mutex_syncobj || l->l_syncobj == &rw_syncobj) |
813 | return; | | 814 | return; |
814 | | | 815 | |
815 | /* | | 816 | /* |
816 | * We can't sleep if we're already going to sleep (if original | | 817 | * We can't sleep if we're already going to sleep (if original |
817 | * condition is met during sleep, we hang up). | | 818 | * condition is met during sleep, we hang up). |
818 | * | | 819 | * |
819 | * XXX This is not ideal: it would be better to maintain a pool | | 820 | * XXX This is not ideal: it would be better to maintain a pool |
820 | * of ktes and actually push this to the kthread when context | | 821 | * of ktes and actually push this to the kthread when context |
821 | * switch happens, however given the points where we are called | | 822 | * switch happens, however given the points where we are called |
822 | * from that is difficult to do. | | 823 | * from that is difficult to do. |
823 | */ | | 824 | */ |
824 | if (out) { | | 825 | if (out) { |
825 | struct timespec ts; | | 826 | struct timespec ts; |
826 | if (ktrenter(l)) | | 827 | if (ktrenter(l)) |
827 | return; | | 828 | return; |
828 | | | 829 | |
829 | nanotime(&l->l_ktrcsw); | | 830 | nanotime(&l->l_ktrcsw); |
830 | l->l_pflag |= LP_KTRCSW; | | 831 | l->l_pflag |= LP_KTRCSW; |
831 | nanotime(&ts); | | 832 | nanotime(&ts); |
832 | if (user) | | 833 | if (user) |
833 | l->l_pflag |= LP_KTRCSWUSER; | | 834 | l->l_pflag |= LP_KTRCSWUSER; |
834 | else | | 835 | else |
835 | l->l_pflag &= ~LP_KTRCSWUSER; | | 836 | l->l_pflag &= ~LP_KTRCSWUSER; |
836 | | | 837 | |
837 | ktrexit(l); | | 838 | ktrexit(l); |
838 | return; | | 839 | return; |
839 | } | | 840 | } |
840 | | | 841 | |
841 | /* | | 842 | /* |
842 | * On the way back in, we need to record twice: once for entry, and | | 843 | * On the way back in, we need to record twice: once for entry, and |
843 | * once for exit. | | 844 | * once for exit. |
844 | */ | | 845 | */ |
845 | if ((l->l_pflag & LP_KTRCSW) != 0) { | | 846 | if ((l->l_pflag & LP_KTRCSW) != 0) { |
846 | struct timespec *ts; | | 847 | struct timespec *ts; |
847 | l->l_pflag &= ~LP_KTRCSW; | | 848 | l->l_pflag &= ~LP_KTRCSW; |
848 | | | 849 | |
849 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) | | 850 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) |
850 | return; | | 851 | return; |
851 | | | 852 | |
852 | kc->out = 1; | | 853 | kc->out = 1; |
853 | kc->user = ((l->l_pflag & LP_KTRCSWUSER) != 0); | | 854 | kc->user = ((l->l_pflag & LP_KTRCSWUSER) != 0); |
854 | | | 855 | |
855 | ts = &l->l_ktrcsw; | | 856 | ts = &l->l_ktrcsw; |
856 | switch (KTRFAC_VERSION(p->p_traceflag)) { | | 857 | switch (KTRFAC_VERSION(p->p_traceflag)) { |
857 | case 0: | | 858 | case 0: |
858 | kte->kte_kth.ktr_otv.tv_sec = ts->tv_sec; | | 859 | kte->kte_kth.ktr_otv.tv_sec = ts->tv_sec; |
859 | kte->kte_kth.ktr_otv.tv_usec = ts->tv_nsec / 1000; | | 860 | kte->kte_kth.ktr_otv.tv_usec = ts->tv_nsec / 1000; |
860 | break; | | 861 | break; |
861 | case 1: | | 862 | case 1: |
862 | kte->kte_kth.ktr_ots.tv_sec = ts->tv_sec; | | 863 | kte->kte_kth.ktr_ots.tv_sec = ts->tv_sec; |
863 | kte->kte_kth.ktr_ots.tv_nsec = ts->tv_nsec; | | 864 | kte->kte_kth.ktr_ots.tv_nsec = ts->tv_nsec; |
864 | break; | | 865 | break; |
865 | case 2: | | 866 | case 2: |
866 | kte->kte_kth.ktr_ts.tv_sec = ts->tv_sec; | | 867 | kte->kte_kth.ktr_ts.tv_sec = ts->tv_sec; |
867 | kte->kte_kth.ktr_ts.tv_nsec = ts->tv_nsec; | | 868 | kte->kte_kth.ktr_ts.tv_nsec = ts->tv_nsec; |
868 | break; | | 869 | break; |
869 | default: | | 870 | default: |
870 | break; | | 871 | break; |
871 | } | | 872 | } |
872 | | | 873 | |
873 | ktraddentry(l, kte, KTA_WAITOK); | | 874 | ktraddentry(l, kte, KTA_WAITOK); |
874 | } | | 875 | } |
875 | | | 876 | |
876 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) | | 877 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) |
877 | return; | | 878 | return; |
878 | | | 879 | |
879 | kc->out = 0; | | 880 | kc->out = 0; |
880 | kc->user = user; | | 881 | kc->user = user; |
881 | | | 882 | |
882 | ktraddentry(l, kte, KTA_WAITOK); | | 883 | ktraddentry(l, kte, KTA_WAITOK); |
883 | } | | 884 | } |
884 | | | 885 | |
885 | bool | | 886 | bool |
886 | ktr_point(int fac_bit) | | 887 | ktr_point(int fac_bit) |
887 | { | | 888 | { |
888 | return curlwp->l_proc->p_traceflag & fac_bit; | | 889 | return curlwp->l_proc->p_traceflag & fac_bit; |
889 | } | | 890 | } |
890 | | | 891 | |
891 | int | | 892 | int |
892 | ktruser(const char *id, void *addr, size_t len, int ustr) | | 893 | ktruser(const char *id, void *addr, size_t len, int ustr) |
893 | { | | 894 | { |
894 | struct ktrace_entry *kte; | | 895 | struct ktrace_entry *kte; |
895 | struct ktr_user *ktp; | | 896 | struct ktr_user *ktp; |
896 | lwp_t *l = curlwp; | | 897 | lwp_t *l = curlwp; |
897 | void *user_dta; | | 898 | void *user_dta; |
898 | int error; | | 899 | int error; |
899 | | | 900 | |
900 | if (!KTRPOINT(l->l_proc, KTR_USER)) | | 901 | if (!KTRPOINT(l->l_proc, KTR_USER)) |
901 | return 0; | | 902 | return 0; |
902 | | | 903 | |
903 | if (len > KTR_USER_MAXLEN) | | 904 | if (len > KTR_USER_MAXLEN) |
904 | return ENOSPC; | | 905 | return ENOSPC; |
905 | | | 906 | |
906 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); | | 907 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); |
907 | if (error != 0) | | 908 | if (error != 0) |
908 | return error; | | 909 | return error; |
909 | | | 910 | |
910 | if (ustr) { | | 911 | if (ustr) { |
911 | if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0) | | 912 | if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0) |
912 | ktp->ktr_id[0] = '\0'; | | 913 | ktp->ktr_id[0] = '\0'; |
913 | } else | | 914 | } else |
914 | strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); | | 915 | strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); |
915 | ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0'; | | 916 | ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0'; |
916 | | | 917 | |
917 | user_dta = (void *)(ktp + 1); | | 918 | user_dta = (void *)(ktp + 1); |
918 | if ((error = copyin(addr, (void *)user_dta, len)) != 0) | | 919 | if ((error = copyin(addr, (void *)user_dta, len)) != 0) |
919 | len = 0; | | 920 | len = 0; |
920 | | | 921 | |
921 | ktraddentry(l, kte, KTA_WAITOK); | | 922 | ktraddentry(l, kte, KTA_WAITOK); |
922 | return error; | | 923 | return error; |
923 | } | | 924 | } |
924 | | | 925 | |
925 | void | | 926 | void |
926 | ktr_kuser(const char *id, void *addr, size_t len) | | 927 | ktr_kuser(const char *id, void *addr, size_t len) |
927 | { | | 928 | { |
928 | struct ktrace_entry *kte; | | 929 | struct ktrace_entry *kte; |
929 | struct ktr_user *ktp; | | 930 | struct ktr_user *ktp; |
930 | lwp_t *l = curlwp; | | 931 | lwp_t *l = curlwp; |
931 | int error; | | 932 | int error; |
932 | | | 933 | |
933 | if (!KTRPOINT(l->l_proc, KTR_USER)) | | 934 | if (!KTRPOINT(l->l_proc, KTR_USER)) |
934 | return; | | 935 | return; |
935 | | | 936 | |
936 | if (len > KTR_USER_MAXLEN) | | 937 | if (len > KTR_USER_MAXLEN) |
937 | return; | | 938 | return; |
938 | | | 939 | |
939 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); | | 940 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); |
940 | if (error != 0) | | 941 | if (error != 0) |
941 | return; | | 942 | return; |
942 | | | 943 | |
943 | strlcpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); | | 944 | strlcpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); |
944 | | | 945 | |
945 | memcpy(ktp + 1, addr, len); | | 946 | memcpy(ktp + 1, addr, len); |
946 | | | 947 | |
947 | ktraddentry(l, kte, KTA_WAITOK); | | 948 | ktraddentry(l, kte, KTA_WAITOK); |
948 | } | | 949 | } |
949 | | | 950 | |
950 | void | | 951 | void |
951 | ktr_mmsg(const void *msgh, size_t size) | | 952 | ktr_mmsg(const void *msgh, size_t size) |
952 | { | | 953 | { |
953 | lwp_t *l = curlwp; | | 954 | lwp_t *l = curlwp; |
954 | | | 955 | |
955 | if (!KTRPOINT(l->l_proc, KTR_MMSG)) | | 956 | if (!KTRPOINT(l->l_proc, KTR_MMSG)) |
956 | return; | | 957 | return; |
957 | | | 958 | |
958 | ktr_kmem(l, KTR_MMSG, msgh, size); | | 959 | ktr_kmem(l, KTR_MMSG, msgh, size); |
959 | } | | 960 | } |
960 | | | 961 | |
961 | void | | 962 | void |
962 | ktr_mool(const void *kaddr, size_t size, const void *uaddr) | | 963 | ktr_mool(const void *kaddr, size_t size, const void *uaddr) |
963 | { | | 964 | { |
964 | struct ktrace_entry *kte; | | 965 | struct ktrace_entry *kte; |
965 | struct ktr_mool *kp; | | 966 | struct ktr_mool *kp; |
966 | struct ktr_mool *bf; | | 967 | struct ktr_mool *bf; |
967 | lwp_t *l = curlwp; | | 968 | lwp_t *l = curlwp; |
968 | | | 969 | |
969 | if (!KTRPOINT(l->l_proc, KTR_MOOL)) | | 970 | if (!KTRPOINT(l->l_proc, KTR_MOOL)) |
970 | return; | | 971 | return; |
971 | | | 972 | |
972 | if (ktealloc(&kte, (void *)&kp, l, KTR_MOOL, size + sizeof(*kp))) | | 973 | if (ktealloc(&kte, (void *)&kp, l, KTR_MOOL, size + sizeof(*kp))) |
973 | return; | | 974 | return; |
974 | | | 975 | |
975 | kp->uaddr = uaddr; | | 976 | kp->uaddr = uaddr; |
976 | kp->size = size; | | 977 | kp->size = size; |
977 | bf = kp + 1; /* Skip uaddr and size */ | | 978 | bf = kp + 1; /* Skip uaddr and size */ |
978 | (void)memcpy(bf, kaddr, size); | | 979 | (void)memcpy(bf, kaddr, size); |
979 | | | 980 | |
980 | ktraddentry(l, kte, KTA_WAITOK); | | 981 | ktraddentry(l, kte, KTA_WAITOK); |
981 | } | | 982 | } |
982 | | | 983 | |
983 | void | | 984 | void |
984 | ktr_saupcall(struct lwp *l, int type, int nevent, int nint, void *sas, | | 985 | ktr_saupcall(struct lwp *l, int type, int nevent, int nint, void *sas, |
985 | void *ap, void *ksas) | | 986 | void *ap, void *ksas) |
986 | { | | 987 | { |
987 | struct ktrace_entry *kte; | | 988 | struct ktrace_entry *kte; |
988 | struct ktr_saupcall *ktp; | | 989 | struct ktr_saupcall *ktp; |
989 | size_t len, sz; | | 990 | size_t len, sz; |
990 | struct sa_t **sapp; | | 991 | struct sa_t **sapp; |
991 | int i; | | 992 | int i; |
992 | | | 993 | |
993 | if (!KTRPOINT(l->l_proc, KTR_SAUPCALL)) | | 994 | if (!KTRPOINT(l->l_proc, KTR_SAUPCALL)) |
994 | return; | | 995 | return; |
995 | | | 996 | |
996 | len = sizeof(struct ktr_saupcall); | | 997 | len = sizeof(struct ktr_saupcall); |
997 | sz = len + sizeof(struct sa_t) * (nevent + nint + 1); | | 998 | sz = len + sizeof(struct sa_t) * (nevent + nint + 1); |
998 | | | 999 | |
999 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SAUPCALL, sz)) | | 1000 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SAUPCALL, sz)) |
1000 | return; | | 1001 | return; |
1001 | | | 1002 | |
1002 | ktp->ktr_type = type; | | 1003 | ktp->ktr_type = type; |
1003 | ktp->ktr_nevent = nevent; | | 1004 | ktp->ktr_nevent = nevent; |
1004 | ktp->ktr_nint = nint; | | 1005 | ktp->ktr_nint = nint; |
1005 | ktp->ktr_sas = sas; | | 1006 | ktp->ktr_sas = sas; |
1006 | ktp->ktr_ap = ap; | | 1007 | ktp->ktr_ap = ap; |
1007 | | | 1008 | |
1008 | /* Copy the sa_t's */ | | 1009 | /* Copy the sa_t's */ |
1009 | sapp = (struct sa_t **) ksas; | | 1010 | sapp = (struct sa_t **) ksas; |
1010 | | | 1011 | |
1011 | for (i = nevent + nint; i >= 0; i--) { | | 1012 | for (i = nevent + nint; i >= 0; i--) { |
1012 | memcpy((char *)ktp + len, *sapp, sizeof(struct sa_t)); | | 1013 | memcpy((char *)ktp + len, *sapp, sizeof(struct sa_t)); |
1013 | len += sizeof(struct sa_t); | | 1014 | len += sizeof(struct sa_t); |
1014 | sapp++; | | 1015 | sapp++; |
1015 | } | | 1016 | } |
1016 | | | 1017 | |
1017 | kte->kte_kth.ktr_len = len; | | 1018 | kte->kte_kth.ktr_len = len; |
1018 | ktraddentry(l, kte, KTA_WAITOK); | | 1019 | ktraddentry(l, kte, KTA_WAITOK); |
1019 | } | | 1020 | } |
1020 | | | 1021 | |
1021 | void | | 1022 | void |
1022 | ktr_mib(const int *name, u_int namelen) | | 1023 | ktr_mib(const int *name, u_int namelen) |
1023 | { | | 1024 | { |
1024 | struct ktrace_entry *kte; | | 1025 | struct ktrace_entry *kte; |
1025 | int *namep; | | 1026 | int *namep; |
1026 | size_t size; | | 1027 | size_t size; |
1027 | lwp_t *l = curlwp; | | 1028 | lwp_t *l = curlwp; |
1028 | | | 1029 | |
1029 | if (!KTRPOINT(l->l_proc, KTR_MIB)) | | 1030 | if (!KTRPOINT(l->l_proc, KTR_MIB)) |
1030 | return; | | 1031 | return; |
1031 | | | 1032 | |
1032 | size = namelen * sizeof(*name); | | 1033 | size = namelen * sizeof(*name); |
1033 | | | 1034 | |
1034 | if (ktealloc(&kte, (void *)&namep, l, KTR_MIB, size)) | | 1035 | if (ktealloc(&kte, (void *)&namep, l, KTR_MIB, size)) |
1035 | return; | | 1036 | return; |
1036 | | | 1037 | |
1037 | (void)memcpy(namep, name, namelen * sizeof(*name)); | | 1038 | (void)memcpy(namep, name, namelen * sizeof(*name)); |
1038 | | | 1039 | |
1039 | ktraddentry(l, kte, KTA_WAITOK); | | 1040 | ktraddentry(l, kte, KTA_WAITOK); |
1040 | } | | 1041 | } |
1041 | | | 1042 | |
1042 | /* Interface and common routines */ | | 1043 | /* Interface and common routines */ |
1043 | | | 1044 | |
1044 | int | | 1045 | int |
1045 | ktrace_common(lwp_t *curl, int ops, int facs, int pid, file_t *fp) | | 1046 | ktrace_common(lwp_t *curl, int ops, int facs, int pid, file_t *fp) |
1046 | { | | 1047 | { |
1047 | struct proc *curp; | | 1048 | struct proc *curp; |
1048 | struct proc *p; | | 1049 | struct proc *p; |
1049 | struct pgrp *pg; | | 1050 | struct pgrp *pg; |
1050 | struct ktr_desc *ktd = NULL; | | 1051 | struct ktr_desc *ktd = NULL; |
1051 | int ret = 0; | | 1052 | int ret = 0; |
1052 | int error = 0; | | 1053 | int error = 0; |
1053 | int descend; | | 1054 | int descend; |
1054 | | | 1055 | |
1055 | curp = curl->l_proc; | | 1056 | curp = curl->l_proc; |
1056 | descend = ops & KTRFLAG_DESCEND; | | 1057 | descend = ops & KTRFLAG_DESCEND; |
1057 | facs = facs & ~((unsigned) KTRFAC_PERSISTENT); | | 1058 | facs = facs & ~((unsigned) KTRFAC_PERSISTENT); |
1058 | | | 1059 | |
1059 | (void)ktrenter(curl); | | 1060 | (void)ktrenter(curl); |
1060 | | | 1061 | |
1061 | switch (KTROP(ops)) { | | 1062 | switch (KTROP(ops)) { |
1062 | | | 1063 | |
1063 | case KTROP_CLEARFILE: | | 1064 | case KTROP_CLEARFILE: |
1064 | /* | | 1065 | /* |
1065 | * Clear all uses of the tracefile | | 1066 | * Clear all uses of the tracefile |
1066 | */ | | 1067 | */ |
1067 | mutex_enter(&ktrace_lock); | | 1068 | mutex_enter(&ktrace_lock); |
1068 | ktd = ktd_lookup(fp); | | 1069 | ktd = ktd_lookup(fp); |
1069 | mutex_exit(&ktrace_lock); | | 1070 | mutex_exit(&ktrace_lock); |
1070 | if (ktd == NULL) | | 1071 | if (ktd == NULL) |
1071 | goto done; | | 1072 | goto done; |
1072 | error = ktrderefall(ktd, 1); | | 1073 | error = ktrderefall(ktd, 1); |
1073 | goto done; | | 1074 | goto done; |
1074 | | | 1075 | |
1075 | case KTROP_SET: | | 1076 | case KTROP_SET: |
1076 | mutex_enter(&ktrace_lock); | | 1077 | mutex_enter(&ktrace_lock); |
1077 | ktd = ktd_lookup(fp); | | 1078 | ktd = ktd_lookup(fp); |
1078 | mutex_exit(&ktrace_lock); | | 1079 | mutex_exit(&ktrace_lock); |
1079 | if (ktd == NULL) { | | 1080 | if (ktd == NULL) { |
1080 | ktd = kmem_alloc(sizeof(*ktd), KM_SLEEP); | | 1081 | ktd = kmem_alloc(sizeof(*ktd), KM_SLEEP); |
1081 | TAILQ_INIT(&ktd->ktd_queue); | | 1082 | TAILQ_INIT(&ktd->ktd_queue); |
1082 | callout_init(&ktd->ktd_wakch, CALLOUT_MPSAFE); | | 1083 | callout_init(&ktd->ktd_wakch, CALLOUT_MPSAFE); |
1083 | cv_init(&ktd->ktd_cv, "ktrwait"); | | 1084 | cv_init(&ktd->ktd_cv, "ktrwait"); |
1084 | cv_init(&ktd->ktd_sync_cv, "ktrsync"); | | 1085 | cv_init(&ktd->ktd_sync_cv, "ktrsync"); |
1085 | ktd->ktd_flags = 0; | | 1086 | ktd->ktd_flags = 0; |
1086 | ktd->ktd_qcount = 0; | | 1087 | ktd->ktd_qcount = 0; |
1087 | ktd->ktd_error = 0; | | 1088 | ktd->ktd_error = 0; |
1088 | ktd->ktd_errcnt = 0; | | 1089 | ktd->ktd_errcnt = 0; |
1089 | ktd->ktd_delayqcnt = ktd_delayqcnt; | | 1090 | ktd->ktd_delayqcnt = ktd_delayqcnt; |
1090 | ktd->ktd_wakedelay = mstohz(ktd_wakedelay); | | 1091 | ktd->ktd_wakedelay = mstohz(ktd_wakedelay); |
1091 | ktd->ktd_intrwakdl = mstohz(ktd_intrwakdl); | | 1092 | ktd->ktd_intrwakdl = mstohz(ktd_intrwakdl); |
1092 | ktd->ktd_ref = 0; | | 1093 | ktd->ktd_ref = 0; |
1093 | ktd->ktd_fp = fp; | | 1094 | ktd->ktd_fp = fp; |
1094 | mutex_enter(&ktrace_lock); | | 1095 | mutex_enter(&ktrace_lock); |
1095 | ktdref(ktd); | | 1096 | ktdref(ktd); |
1096 | mutex_exit(&ktrace_lock); | | 1097 | mutex_exit(&ktrace_lock); |
1097 | | | 1098 | |
1098 | /* | | 1099 | /* |
1099 | * XXX: not correct. needs an way to detect | | 1100 | * XXX: not correct. needs an way to detect |
1100 | * whether ktruss or ktrace. | | 1101 | * whether ktruss or ktrace. |
1101 | */ | | 1102 | */ |
1102 | if (fp->f_type == DTYPE_PIPE) | | 1103 | if (fp->f_type == DTYPE_PIPE) |
1103 | ktd->ktd_flags |= KTDF_INTERACTIVE; | | 1104 | ktd->ktd_flags |= KTDF_INTERACTIVE; |
1104 | | | 1105 | |
1105 | mutex_enter(&fp->f_lock); | | 1106 | mutex_enter(&fp->f_lock); |
1106 | fp->f_count++; | | 1107 | fp->f_count++; |
1107 | mutex_exit(&fp->f_lock); | | 1108 | mutex_exit(&fp->f_lock); |
1108 | error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, | | 1109 | error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, |
1109 | ktrace_thread, ktd, &ktd->ktd_lwp, "ktrace"); | | 1110 | ktrace_thread, ktd, &ktd->ktd_lwp, "ktrace"); |
1110 | if (error != 0) { | | 1111 | if (error != 0) { |
1111 | kmem_free(ktd, sizeof(*ktd)); | | 1112 | kmem_free(ktd, sizeof(*ktd)); |
1112 | mutex_enter(&fp->f_lock); | | 1113 | mutex_enter(&fp->f_lock); |
1113 | fp->f_count--; | | 1114 | fp->f_count--; |
1114 | mutex_exit(&fp->f_lock); | | 1115 | mutex_exit(&fp->f_lock); |
1115 | goto done; | | 1116 | goto done; |
1116 | } | | 1117 | } |
1117 | | | 1118 | |
1118 | mutex_enter(&ktrace_lock); | | 1119 | mutex_enter(&ktrace_lock); |
1119 | if (ktd_lookup(fp) != NULL) { | | 1120 | if (ktd_lookup(fp) != NULL) { |
1120 | ktdrel(ktd); | | 1121 | ktdrel(ktd); |
1121 | ktd = NULL; | | 1122 | ktd = NULL; |
1122 | } else | | 1123 | } else |
1123 | TAILQ_INSERT_TAIL(&ktdq, ktd, ktd_list); | | 1124 | TAILQ_INSERT_TAIL(&ktdq, ktd, ktd_list); |
1124 | if (ktd == NULL) | | 1125 | if (ktd == NULL) |
1125 | cv_wait(&lbolt, &ktrace_lock); | | 1126 | cv_wait(&lbolt, &ktrace_lock); |
1126 | mutex_exit(&ktrace_lock); | | 1127 | mutex_exit(&ktrace_lock); |
1127 | if (ktd == NULL) | | 1128 | if (ktd == NULL) |
1128 | goto done; | | 1129 | goto done; |
1129 | } | | 1130 | } |
1130 | break; | | 1131 | break; |
1131 | | | 1132 | |
1132 | case KTROP_CLEAR: | | 1133 | case KTROP_CLEAR: |
1133 | break; | | 1134 | break; |
1134 | } | | 1135 | } |
1135 | | | 1136 | |
1136 | /* | | 1137 | /* |
1137 | * need something to (un)trace (XXX - why is this here?) | | 1138 | * need something to (un)trace (XXX - why is this here?) |
1138 | */ | | 1139 | */ |
1139 | if (!facs) { | | 1140 | if (!facs) { |
1140 | error = EINVAL; | | 1141 | error = EINVAL; |
1141 | goto done; | | 1142 | goto done; |
1142 | } | | 1143 | } |
1143 | | | 1144 | |
1144 | /* | | 1145 | /* |
1145 | * do it | | 1146 | * do it |
1146 | */ | | 1147 | */ |
1147 | mutex_enter(proc_lock); | | 1148 | mutex_enter(proc_lock); |
1148 | if (pid < 0) { | | 1149 | if (pid < 0) { |
1149 | /* | | 1150 | /* |
1150 | * by process group | | 1151 | * by process group |
1151 | */ | | 1152 | */ |
1152 | pg = pg_find(-pid, PFIND_LOCKED); | | 1153 | pg = pg_find(-pid, PFIND_LOCKED); |
1153 | if (pg == NULL) | | 1154 | if (pg == NULL) |
1154 | error = ESRCH; | | 1155 | error = ESRCH; |
1155 | else { | | 1156 | else { |
1156 | LIST_FOREACH(p, &pg->pg_members, p_pglist) { | | 1157 | LIST_FOREACH(p, &pg->pg_members, p_pglist) { |
1157 | if (descend) | | 1158 | if (descend) |
1158 | ret |= ktrsetchildren(curl, p, ops, | | 1159 | ret |= ktrsetchildren(curl, p, ops, |
1159 | facs, ktd); | | 1160 | facs, ktd); |
1160 | else | | 1161 | else |
1161 | ret |= ktrops(curl, p, ops, facs, | | 1162 | ret |= ktrops(curl, p, ops, facs, |
1162 | ktd); | | 1163 | ktd); |
1163 | } | | 1164 | } |
1164 | } | | 1165 | } |
1165 | | | 1166 | |
1166 | } else { | | 1167 | } else { |
1167 | /* | | 1168 | /* |
1168 | * by pid | | 1169 | * by pid |
1169 | */ | | 1170 | */ |
1170 | p = p_find(pid, PFIND_LOCKED); | | 1171 | p = p_find(pid, PFIND_LOCKED); |
1171 | if (p == NULL) | | 1172 | if (p == NULL) |
1172 | error = ESRCH; | | 1173 | error = ESRCH; |
1173 | else if (descend) | | 1174 | else if (descend) |
1174 | ret |= ktrsetchildren(curl, p, ops, facs, ktd); | | 1175 | ret |= ktrsetchildren(curl, p, ops, facs, ktd); |
1175 | else | | 1176 | else |
1176 | ret |= ktrops(curl, p, ops, facs, ktd); | | 1177 | ret |= ktrops(curl, p, ops, facs, ktd); |
1177 | } | | 1178 | } |
1178 | mutex_exit(proc_lock); | | 1179 | mutex_exit(proc_lock); |
1179 | if (error == 0 && !ret) | | 1180 | if (error == 0 && !ret) |
1180 | error = EPERM; | | 1181 | error = EPERM; |
1181 | done: | | 1182 | done: |
1182 | if (ktd != NULL) { | | 1183 | if (ktd != NULL) { |
1183 | mutex_enter(&ktrace_lock); | | 1184 | mutex_enter(&ktrace_lock); |
1184 | if (error != 0) { | | 1185 | if (error != 0) { |
1185 | /* | | 1186 | /* |
1186 | * Wakeup the thread so that it can be die if we | | 1187 | * Wakeup the thread so that it can be die if we |
1187 | * can't trace any process. | | 1188 | * can't trace any process. |
1188 | */ | | 1189 | */ |
1189 | ktd_wakeup(ktd); | | 1190 | ktd_wakeup(ktd); |
1190 | } | | 1191 | } |
1191 | if (KTROP(ops) == KTROP_SET || KTROP(ops) == KTROP_CLEARFILE) | | 1192 | if (KTROP(ops) == KTROP_SET || KTROP(ops) == KTROP_CLEARFILE) |
1192 | ktdrel(ktd); | | 1193 | ktdrel(ktd); |
1193 | mutex_exit(&ktrace_lock); | | 1194 | mutex_exit(&ktrace_lock); |
1194 | } | | 1195 | } |
1195 | ktrexit(curl); | | 1196 | ktrexit(curl); |
1196 | return (error); | | 1197 | return (error); |
1197 | } | | 1198 | } |
1198 | | | 1199 | |
1199 | /* | | 1200 | /* |
1200 | * fktrace system call | | 1201 | * fktrace system call |
1201 | */ | | 1202 | */ |
1202 | /* ARGSUSED */ | | 1203 | /* ARGSUSED */ |
1203 | int | | 1204 | int |
1204 | sys_fktrace(struct lwp *l, const struct sys_fktrace_args *uap, register_t *retval) | | 1205 | sys_fktrace(struct lwp *l, const struct sys_fktrace_args *uap, register_t *retval) |
1205 | { | | 1206 | { |
1206 | /* { | | 1207 | /* { |
1207 | syscallarg(int) fd; | | 1208 | syscallarg(int) fd; |
1208 | syscallarg(int) ops; | | 1209 | syscallarg(int) ops; |
1209 | syscallarg(int) facs; | | 1210 | syscallarg(int) facs; |
1210 | syscallarg(int) pid; | | 1211 | syscallarg(int) pid; |
1211 | } */ | | 1212 | } */ |
1212 | file_t *fp; | | 1213 | file_t *fp; |
1213 | int error, fd; | | 1214 | int error, fd; |
1214 | | | 1215 | |
1215 | fd = SCARG(uap, fd); | | 1216 | fd = SCARG(uap, fd); |
1216 | if ((fp = fd_getfile(fd)) == NULL) | | 1217 | if ((fp = fd_getfile(fd)) == NULL) |
1217 | return (EBADF); | | 1218 | return (EBADF); |
1218 | if ((fp->f_flag & FWRITE) == 0) | | 1219 | if ((fp->f_flag & FWRITE) == 0) |
1219 | error = EBADF; | | 1220 | error = EBADF; |
1220 | else | | 1221 | else |
1221 | error = ktrace_common(l, SCARG(uap, ops), | | 1222 | error = ktrace_common(l, SCARG(uap, ops), |
1222 | SCARG(uap, facs), SCARG(uap, pid), fp); | | 1223 | SCARG(uap, facs), SCARG(uap, pid), fp); |
1223 | fd_putfile(fd); | | 1224 | fd_putfile(fd); |
1224 | return error; | | 1225 | return error; |
1225 | } | | 1226 | } |
1226 | | | 1227 | |
1227 | /* | | 1228 | /* |
1228 | * ktrace system call | | 1229 | * ktrace system call |
1229 | */ | | 1230 | */ |
1230 | /* ARGSUSED */ | | 1231 | /* ARGSUSED */ |
1231 | int | | 1232 | int |
1232 | sys_ktrace(struct lwp *l, const struct sys_ktrace_args *uap, register_t *retval) | | 1233 | sys_ktrace(struct lwp *l, const struct sys_ktrace_args *uap, register_t *retval) |
1233 | { | | 1234 | { |
1234 | /* { | | 1235 | /* { |
1235 | syscallarg(const char *) fname; | | 1236 | syscallarg(const char *) fname; |
1236 | syscallarg(int) ops; | | 1237 | syscallarg(int) ops; |
1237 | syscallarg(int) facs; | | 1238 | syscallarg(int) facs; |
1238 | syscallarg(int) pid; | | 1239 | syscallarg(int) pid; |
1239 | } */ | | 1240 | } */ |
1240 | struct vnode *vp = NULL; | | 1241 | struct vnode *vp = NULL; |
1241 | file_t *fp = NULL; | | 1242 | file_t *fp = NULL; |
1242 | struct nameidata nd; | | 1243 | struct nameidata nd; |
1243 | int error = 0; | | 1244 | int error = 0; |
1244 | int fd; | | 1245 | int fd; |
1245 | | | 1246 | |
1246 | if (ktrenter(l)) | | 1247 | if (ktrenter(l)) |
1247 | return EAGAIN; | | 1248 | return EAGAIN; |
1248 | | | 1249 | |
1249 | if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) { | | 1250 | if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) { |
1250 | /* | | 1251 | /* |
1251 | * an operation which requires a file argument. | | 1252 | * an operation which requires a file argument. |
1252 | */ | | 1253 | */ |
1253 | NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname)); | | 1254 | NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname)); |
1254 | if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) { | | 1255 | if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) { |
1255 | ktrexit(l); | | 1256 | ktrexit(l); |
1256 | return (error); | | 1257 | return (error); |
1257 | } | | 1258 | } |
1258 | vp = nd.ni_vp; | | 1259 | vp = nd.ni_vp; |
1259 | VOP_UNLOCK(vp, 0); | | 1260 | VOP_UNLOCK(vp, 0); |
1260 | if (vp->v_type != VREG) { | | 1261 | if (vp->v_type != VREG) { |
1261 | vn_close(vp, FREAD|FWRITE, l->l_cred); | | 1262 | vn_close(vp, FREAD|FWRITE, l->l_cred); |
1262 | ktrexit(l); | | 1263 | ktrexit(l); |
1263 | return (EACCES); | | 1264 | return (EACCES); |
1264 | } | | 1265 | } |
1265 | /* | | 1266 | /* |
1266 | * This uses up a file descriptor slot in the | | 1267 | * This uses up a file descriptor slot in the |
1267 | * tracing process for the duration of this syscall. | | 1268 | * tracing process for the duration of this syscall. |
1268 | * This is not expected to be a problem. | | 1269 | * This is not expected to be a problem. |
1269 | */ | | 1270 | */ |
1270 | if ((error = fd_allocfile(&fp, &fd)) != 0) { | | 1271 | if ((error = fd_allocfile(&fp, &fd)) != 0) { |
1271 | vn_close(vp, FWRITE, l->l_cred); | | 1272 | vn_close(vp, FWRITE, l->l_cred); |
1272 | ktrexit(l); | | 1273 | ktrexit(l); |
1273 | return error; | | 1274 | return error; |
1274 | } | | 1275 | } |
1275 | fp->f_flag = FWRITE; | | 1276 | fp->f_flag = FWRITE; |
1276 | fp->f_type = DTYPE_VNODE; | | 1277 | fp->f_type = DTYPE_VNODE; |
1277 | fp->f_ops = &vnops; | | 1278 | fp->f_ops = &vnops; |
1278 | fp->f_data = (void *)vp; | | 1279 | fp->f_data = (void *)vp; |
1279 | vp = NULL; | | 1280 | vp = NULL; |
1280 | } | | 1281 | } |
1281 | error = ktrace_common(l, SCARG(uap, ops), SCARG(uap, facs), | | 1282 | error = ktrace_common(l, SCARG(uap, ops), SCARG(uap, facs), |
1282 | SCARG(uap, pid), fp); | | 1283 | SCARG(uap, pid), fp); |
1283 | if (fp != NULL) { | | 1284 | if (fp != NULL) { |
1284 | if (error != 0) { | | 1285 | if (error != 0) { |
1285 | /* File unused. */ | | 1286 | /* File unused. */ |
1286 | fd_abort(curproc, fp, fd); | | 1287 | fd_abort(curproc, fp, fd); |
1287 | } else { | | 1288 | } else { |
1288 | /* File was used. */ | | 1289 | /* File was used. */ |
1289 | fd_abort(curproc, NULL, fd); | | 1290 | fd_abort(curproc, NULL, fd); |
1290 | } | | 1291 | } |
1291 | } | | 1292 | } |
1292 | return (error); | | 1293 | return (error); |
1293 | } | | 1294 | } |
1294 | | | 1295 | |
1295 | int | | 1296 | int |
1296 | ktrops(lwp_t *curl, struct proc *p, int ops, int facs, | | 1297 | ktrops(lwp_t *curl, struct proc *p, int ops, int facs, |
1297 | struct ktr_desc *ktd) | | 1298 | struct ktr_desc *ktd) |
1298 | { | | 1299 | { |
1299 | int vers = ops & KTRFAC_VER_MASK; | | 1300 | int vers = ops & KTRFAC_VER_MASK; |
1300 | int error = 0; | | 1301 | int error = 0; |
1301 | | | 1302 | |
1302 | mutex_enter(p->p_lock); | | 1303 | mutex_enter(p->p_lock); |
1303 | mutex_enter(&ktrace_lock); | | 1304 | mutex_enter(&ktrace_lock); |
1304 | | | 1305 | |
1305 | if (!ktrcanset(curl, p)) | | 1306 | if (!ktrcanset(curl, p)) |
1306 | goto out; | | 1307 | goto out; |
1307 | | | 1308 | |
1308 | switch (vers) { | | 1309 | switch (vers) { |
1309 | case KTRFACv0: | | 1310 | case KTRFACv0: |
1310 | case KTRFACv1: | | 1311 | case KTRFACv1: |
1311 | case KTRFACv2: | | 1312 | case KTRFACv2: |
1312 | break; | | 1313 | break; |
1313 | default: | | 1314 | default: |
1314 | error = EINVAL; | | 1315 | error = EINVAL; |
1315 | goto out; | | 1316 | goto out; |
1316 | } | | 1317 | } |
1317 | | | 1318 | |
1318 | if (KTROP(ops) == KTROP_SET) { | | 1319 | if (KTROP(ops) == KTROP_SET) { |
1319 | if (p->p_tracep != ktd) { | | 1320 | if (p->p_tracep != ktd) { |
1320 | /* | | 1321 | /* |
1321 | * if trace file already in use, relinquish | | 1322 | * if trace file already in use, relinquish |
1322 | */ | | 1323 | */ |
1323 | ktrderef(p); | | 1324 | ktrderef(p); |
1324 | p->p_tracep = ktd; | | 1325 | p->p_tracep = ktd; |
1325 | ktradref(p); | | 1326 | ktradref(p); |
1326 | } | | 1327 | } |
1327 | p->p_traceflag |= facs; | | 1328 | p->p_traceflag |= facs; |
1328 | if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KTRACE, | | 1329 | if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KTRACE, |
1329 | p, KAUTH_ARG(KAUTH_REQ_PROCESS_KTRACE_PERSISTENT), NULL, | | 1330 | p, KAUTH_ARG(KAUTH_REQ_PROCESS_KTRACE_PERSISTENT), NULL, |
1330 | NULL) == 0) | | 1331 | NULL) == 0) |
1331 | p->p_traceflag |= KTRFAC_PERSISTENT; | | 1332 | p->p_traceflag |= KTRFAC_PERSISTENT; |
1332 | } else { | | 1333 | } else { |
1333 | /* KTROP_CLEAR */ | | 1334 | /* KTROP_CLEAR */ |
1334 | if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { | | 1335 | if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { |
1335 | /* no more tracing */ | | 1336 | /* no more tracing */ |
1336 | ktrderef(p); | | 1337 | ktrderef(p); |
1337 | } | | 1338 | } |
1338 | } | | 1339 | } |
1339 | | | 1340 | |
1340 | if (p->p_traceflag) | | 1341 | if (p->p_traceflag) |
1341 | p->p_traceflag |= vers; | | 1342 | p->p_traceflag |= vers; |
1342 | /* | | 1343 | /* |
1343 | * Emit an emulation record, every time there is a ktrace | | 1344 | * Emit an emulation record, every time there is a ktrace |
1344 | * change/attach request. | | 1345 | * change/attach request. |
1345 | */ | | 1346 | */ |
1346 | if (KTRPOINT(p, KTR_EMUL)) | | 1347 | if (KTRPOINT(p, KTR_EMUL)) |
1347 | p->p_traceflag |= KTRFAC_TRC_EMUL; | | 1348 | p->p_traceflag |= KTRFAC_TRC_EMUL; |
1348 | | | 1349 | |
1349 | p->p_trace_enabled = trace_is_enabled(p); | | 1350 | p->p_trace_enabled = trace_is_enabled(p); |
1350 | #ifdef __HAVE_SYSCALL_INTERN | | 1351 | #ifdef __HAVE_SYSCALL_INTERN |
1351 | (*p->p_emul->e_syscall_intern)(p); | | 1352 | (*p->p_emul->e_syscall_intern)(p); |
1352 | #endif | | 1353 | #endif |
1353 | | | 1354 | |
1354 | out: | | 1355 | out: |
1355 | mutex_exit(&ktrace_lock); | | 1356 | mutex_exit(&ktrace_lock); |
1356 | mutex_exit(p->p_lock); | | 1357 | mutex_exit(p->p_lock); |
1357 | | | 1358 | |
1358 | return (1); | | 1359 | return (1); |
1359 | } | | 1360 | } |
1360 | | | 1361 | |
1361 | int | | 1362 | int |
1362 | ktrsetchildren(lwp_t *curl, struct proc *top, int ops, int facs, | | 1363 | ktrsetchildren(lwp_t *curl, struct proc *top, int ops, int facs, |
1363 | struct ktr_desc *ktd) | | 1364 | struct ktr_desc *ktd) |
1364 | { | | 1365 | { |
1365 | struct proc *p; | | 1366 | struct proc *p; |
1366 | int ret = 0; | | 1367 | int ret = 0; |
1367 | | | 1368 | |
1368 | KASSERT(mutex_owned(proc_lock)); | | 1369 | KASSERT(mutex_owned(proc_lock)); |
1369 | | | 1370 | |
1370 | p = top; | | 1371 | p = top; |
1371 | for (;;) { | | 1372 | for (;;) { |
1372 | ret |= ktrops(curl, p, ops, facs, ktd); | | 1373 | ret |= ktrops(curl, p, ops, facs, ktd); |
1373 | /* | | 1374 | /* |
1374 | * If this process has children, descend to them next, | | 1375 | * If this process has children, descend to them next, |
1375 | * otherwise do any siblings, and if done with this level, | | 1376 | * otherwise do any siblings, and if done with this level, |
1376 | * follow back up the tree (but not past top). | | 1377 | * follow back up the tree (but not past top). |
1377 | */ | | 1378 | */ |
1378 | if (LIST_FIRST(&p->p_children) != NULL) { | | 1379 | if (LIST_FIRST(&p->p_children) != NULL) { |
1379 | p = LIST_FIRST(&p->p_children); | | 1380 | p = LIST_FIRST(&p->p_children); |
1380 | continue; | | 1381 | continue; |
1381 | } | | 1382 | } |
1382 | for (;;) { | | 1383 | for (;;) { |
1383 | if (p == top) | | 1384 | if (p == top) |
1384 | return (ret); | | 1385 | return (ret); |
1385 | if (LIST_NEXT(p, p_sibling) != NULL) { | | 1386 | if (LIST_NEXT(p, p_sibling) != NULL) { |
1386 | p = LIST_NEXT(p, p_sibling); | | 1387 | p = LIST_NEXT(p, p_sibling); |
1387 | break; | | 1388 | break; |
1388 | } | | 1389 | } |
1389 | p = p->p_pptr; | | 1390 | p = p->p_pptr; |
1390 | } | | 1391 | } |
1391 | } | | 1392 | } |
1392 | /*NOTREACHED*/ | | 1393 | /*NOTREACHED*/ |
1393 | } | | 1394 | } |
1394 | | | 1395 | |
1395 | void | | 1396 | void |
1396 | ktrwrite(struct ktr_desc *ktd, struct ktrace_entry *kte) | | 1397 | ktrwrite(struct ktr_desc *ktd, struct ktrace_entry *kte) |
1397 | { | | 1398 | { |
1398 | size_t hlen; | | 1399 | size_t hlen; |
1399 | struct uio auio; | | 1400 | struct uio auio; |
1400 | struct iovec aiov[64], *iov; | | 1401 | struct iovec aiov[64], *iov; |
1401 | struct ktrace_entry *top = kte; | | 1402 | struct ktrace_entry *top = kte; |
1402 | struct ktr_header *kth; | | 1403 | struct ktr_header *kth; |
1403 | file_t *fp = ktd->ktd_fp; | | 1404 | file_t *fp = ktd->ktd_fp; |
1404 | int error; | | 1405 | int error; |
1405 | next: | | 1406 | next: |
1406 | auio.uio_iov = iov = &aiov[0]; | | 1407 | auio.uio_iov = iov = &aiov[0]; |
1407 | auio.uio_offset = 0; | | 1408 | auio.uio_offset = 0; |
1408 | auio.uio_rw = UIO_WRITE; | | 1409 | auio.uio_rw = UIO_WRITE; |
1409 | auio.uio_resid = 0; | | 1410 | auio.uio_resid = 0; |
1410 | auio.uio_iovcnt = 0; | | 1411 | auio.uio_iovcnt = 0; |
1411 | UIO_SETUP_SYSSPACE(&auio); | | 1412 | UIO_SETUP_SYSSPACE(&auio); |
1412 | do { | | 1413 | do { |
1413 | struct timespec ts; | | 1414 | struct timespec ts; |
1414 | lwpid_t lid; | | 1415 | lwpid_t lid; |
1415 | kth = &kte->kte_kth; | | 1416 | kth = &kte->kte_kth; |
1416 | | | 1417 | |
1417 | hlen = sizeof(struct ktr_header); | | 1418 | hlen = sizeof(struct ktr_header); |
1418 | switch (kth->ktr_version) { | | 1419 | switch (kth->ktr_version) { |
1419 | case 0: | | 1420 | case 0: |
1420 | ts = kth->ktr_time; | | 1421 | ts = kth->ktr_time; |
1421 | | | 1422 | |
1422 | kth->ktr_otv.tv_sec = ts.tv_sec; | | 1423 | kth->ktr_otv.tv_sec = ts.tv_sec; |
1423 | kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; | | 1424 | kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; |
1424 | kth->ktr_unused = NULL; | | 1425 | kth->ktr_unused = NULL; |
1425 | hlen -= sizeof(kth->_v) - | | 1426 | hlen -= sizeof(kth->_v) - |
1426 | MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); | | 1427 | MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); |
1427 | break; | | 1428 | break; |
1428 | case 1: | | 1429 | case 1: |
1429 | ts = kth->ktr_time; | | 1430 | ts = kth->ktr_time; |
1430 | lid = kth->ktr_lid; | | 1431 | lid = kth->ktr_lid; |
1431 | | | 1432 | |
1432 | kth->ktr_ots.tv_sec = ts.tv_sec; | | 1433 | kth->ktr_ots.tv_sec = ts.tv_sec; |
1433 | kth->ktr_ots.tv_nsec = ts.tv_nsec; | | 1434 | kth->ktr_ots.tv_nsec = ts.tv_nsec; |
1434 | kth->ktr_olid = lid; | | 1435 | kth->ktr_olid = lid; |
1435 | hlen -= sizeof(kth->_v) - | | 1436 | hlen -= sizeof(kth->_v) - |
1436 | MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); | | 1437 | MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); |
1437 | break; | | 1438 | break; |
1438 | } | | 1439 | } |
1439 | iov->iov_base = (void *)kth; | | 1440 | iov->iov_base = (void *)kth; |
1440 | iov++->iov_len = hlen; | | 1441 | iov++->iov_len = hlen; |
1441 | auio.uio_resid += hlen; | | 1442 | auio.uio_resid += hlen; |
1442 | auio.uio_iovcnt++; | | 1443 | auio.uio_iovcnt++; |
1443 | if (kth->ktr_len > 0) { | | 1444 | if (kth->ktr_len > 0) { |
1444 | iov->iov_base = kte->kte_buf; | | 1445 | iov->iov_base = kte->kte_buf; |
1445 | iov++->iov_len = kth->ktr_len; | | 1446 | iov++->iov_len = kth->ktr_len; |
1446 | auio.uio_resid += kth->ktr_len; | | 1447 | auio.uio_resid += kth->ktr_len; |
1447 | auio.uio_iovcnt++; | | 1448 | auio.uio_iovcnt++; |
1448 | } | | 1449 | } |
1449 | } while ((kte = TAILQ_NEXT(kte, kte_list)) != NULL && | | 1450 | } while ((kte = TAILQ_NEXT(kte, kte_list)) != NULL && |
1450 | auio.uio_iovcnt < sizeof(aiov) / sizeof(aiov[0]) - 1); | | 1451 | auio.uio_iovcnt < sizeof(aiov) / sizeof(aiov[0]) - 1); |
1451 | | | 1452 | |
1452 | again: | | 1453 | again: |
1453 | error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio, | | 1454 | error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio, |
1454 | fp->f_cred, FOF_UPDATE_OFFSET); | | 1455 | fp->f_cred, FOF_UPDATE_OFFSET); |
1455 | switch (error) { | | 1456 | switch (error) { |
1456 | | | 1457 | |
1457 | case 0: | | 1458 | case 0: |
1458 | if (auio.uio_resid > 0) | | 1459 | if (auio.uio_resid > 0) |
1459 | goto again; | | 1460 | goto again; |
1460 | if (kte != NULL) | | 1461 | if (kte != NULL) |
1461 | goto next; | | 1462 | goto next; |
1462 | break; | | 1463 | break; |
1463 | | | 1464 | |
1464 | case EWOULDBLOCK: | | 1465 | case EWOULDBLOCK: |
1465 | kpause("ktrzzz", false, 1, NULL); | | 1466 | kpause("ktrzzz", false, 1, NULL); |
1466 | goto again; | | 1467 | goto again; |
1467 | | | 1468 | |
1468 | default: | | 1469 | default: |
1469 | /* | | 1470 | /* |
1470 | * If error encountered, give up tracing on this | | 1471 | * If error encountered, give up tracing on this |
1471 | * vnode. Don't report EPIPE as this can easily | | 1472 | * vnode. Don't report EPIPE as this can easily |
1472 | * happen with fktrace()/ktruss. | | 1473 | * happen with fktrace()/ktruss. |
1473 | */ | | 1474 | */ |
1474 | #ifndef DEBUG | | 1475 | #ifndef DEBUG |
1475 | if (error != EPIPE) | | 1476 | if (error != EPIPE) |
1476 | #endif | | 1477 | #endif |
1477 | log(LOG_NOTICE, | | 1478 | log(LOG_NOTICE, |
1478 | "ktrace write failed, errno %d, tracing stopped\n", | | 1479 | "ktrace write failed, errno %d, tracing stopped\n", |
1479 | error); | | 1480 | error); |
1480 | (void)ktrderefall(ktd, 0); | | 1481 | (void)ktrderefall(ktd, 0); |
1481 | } | | 1482 | } |
1482 | | | 1483 | |
1483 | while ((kte = top) != NULL) { | | 1484 | while ((kte = top) != NULL) { |
1484 | top = TAILQ_NEXT(top, kte_list); | | 1485 | top = TAILQ_NEXT(top, kte_list); |
1485 | ktefree(kte); | | 1486 | ktefree(kte); |
1486 | } | | 1487 | } |
1487 | } | | 1488 | } |
1488 | | | 1489 | |
1489 | void | | 1490 | void |
1490 | ktrace_thread(void *arg) | | 1491 | ktrace_thread(void *arg) |
1491 | { | | 1492 | { |
1492 | struct ktr_desc *ktd = arg; | | 1493 | struct ktr_desc *ktd = arg; |
1493 | file_t *fp = ktd->ktd_fp; | | 1494 | file_t *fp = ktd->ktd_fp; |
1494 | struct ktrace_entry *kte; | | 1495 | struct ktrace_entry *kte; |
1495 | int ktrerr, errcnt; | | 1496 | int ktrerr, errcnt; |
1496 | | | 1497 | |
1497 | mutex_enter(&ktrace_lock); | | 1498 | mutex_enter(&ktrace_lock); |
1498 | for (;;) { | | 1499 | for (;;) { |
1499 | kte = TAILQ_FIRST(&ktd->ktd_queue); | | 1500 | kte = TAILQ_FIRST(&ktd->ktd_queue); |
1500 | if (kte == NULL) { | | 1501 | if (kte == NULL) { |
1501 | if (ktd->ktd_flags & KTDF_WAIT) { | | 1502 | if (ktd->ktd_flags & KTDF_WAIT) { |
1502 | ktd->ktd_flags &= ~(KTDF_WAIT | KTDF_BLOCKING); | | 1503 | ktd->ktd_flags &= ~(KTDF_WAIT | KTDF_BLOCKING); |
1503 | cv_broadcast(&ktd->ktd_sync_cv); | | 1504 | cv_broadcast(&ktd->ktd_sync_cv); |
1504 | } | | 1505 | } |
1505 | if (ktd->ktd_ref == 0) | | 1506 | if (ktd->ktd_ref == 0) |
1506 | break; | | 1507 | break; |
1507 | cv_wait(&ktd->ktd_cv, &ktrace_lock); | | 1508 | cv_wait(&ktd->ktd_cv, &ktrace_lock); |
1508 | continue; | | 1509 | continue; |
1509 | } | | 1510 | } |
1510 | TAILQ_INIT(&ktd->ktd_queue); | | 1511 | TAILQ_INIT(&ktd->ktd_queue); |
1511 | ktd->ktd_qcount = 0; | | 1512 | ktd->ktd_qcount = 0; |
1512 | ktrerr = ktd->ktd_error; | | 1513 | ktrerr = ktd->ktd_error; |
1513 | errcnt = ktd->ktd_errcnt; | | 1514 | errcnt = ktd->ktd_errcnt; |
1514 | ktd->ktd_error = ktd->ktd_errcnt = 0; | | 1515 | ktd->ktd_error = ktd->ktd_errcnt = 0; |
1515 | mutex_exit(&ktrace_lock); | | 1516 | mutex_exit(&ktrace_lock); |
1516 | | | 1517 | |
1517 | if (ktrerr) { | | 1518 | if (ktrerr) { |
1518 | log(LOG_NOTICE, | | 1519 | log(LOG_NOTICE, |
1519 | "ktrace failed, fp %p, error 0x%x, total %d\n", | | 1520 | "ktrace failed, fp %p, error 0x%x, total %d\n", |
1520 | fp, ktrerr, errcnt); | | 1521 | fp, ktrerr, errcnt); |
1521 | } | | 1522 | } |
1522 | ktrwrite(ktd, kte); | | 1523 | ktrwrite(ktd, kte); |
1523 | mutex_enter(&ktrace_lock); | | 1524 | mutex_enter(&ktrace_lock); |
1524 | } | | 1525 | } |
1525 | | | 1526 | |
1526 | TAILQ_REMOVE(&ktdq, ktd, ktd_list); | | 1527 | TAILQ_REMOVE(&ktdq, ktd, ktd_list); |
1527 | mutex_exit(&ktrace_lock); | | 1528 | mutex_exit(&ktrace_lock); |
1528 | | | 1529 | |
1529 | /* | | 1530 | /* |
1530 | * ktrace file descriptor can't be watched (are not visible to | | 1531 | * ktrace file descriptor can't be watched (are not visible to |
1531 | * userspace), so no kqueue stuff here | | 1532 | * userspace), so no kqueue stuff here |
1532 | * XXX: The above comment is wrong, because the fktrace file | | 1533 | * XXX: The above comment is wrong, because the fktrace file |
1533 | * descriptor is available in userland. | | 1534 | * descriptor is available in userland. |
1534 | */ | | 1535 | */ |
1535 | closef(fp); | | 1536 | closef(fp); |
1536 | | | 1537 | |
1537 | cv_destroy(&ktd->ktd_sync_cv); | | 1538 | cv_destroy(&ktd->ktd_sync_cv); |
1538 | cv_destroy(&ktd->ktd_cv); | | 1539 | cv_destroy(&ktd->ktd_cv); |
1539 | | | 1540 | |
1540 | callout_stop(&ktd->ktd_wakch); | | 1541 | callout_stop(&ktd->ktd_wakch); |
1541 | callout_destroy(&ktd->ktd_wakch); | | 1542 | callout_destroy(&ktd->ktd_wakch); |
1542 | kmem_free(ktd, sizeof(*ktd)); | | 1543 | kmem_free(ktd, sizeof(*ktd)); |
1543 | | | 1544 | |
1544 | kthread_exit(0); | | 1545 | kthread_exit(0); |
1545 | } | | 1546 | } |
1546 | | | 1547 | |
1547 | /* | | 1548 | /* |
1548 | * Return true if caller has permission to set the ktracing state | | 1549 | * Return true if caller has permission to set the ktracing state |
1549 | * of target. Essentially, the target can't possess any | | 1550 | * of target. Essentially, the target can't possess any |
1550 | * more permissions than the caller. KTRFAC_PERSISTENT signifies that | | 1551 | * more permissions than the caller. KTRFAC_PERSISTENT signifies that |
1551 | * the tracing will persist on sugid processes during exec; it is only | | 1552 | * the tracing will persist on sugid processes during exec; it is only |
1552 | * settable by a process with appropriate credentials. | | 1553 | * settable by a process with appropriate credentials. |
1553 | * | | 1554 | * |
1554 | * TODO: check groups. use caller effective gid. | | 1555 | * TODO: check groups. use caller effective gid. |
1555 | */ | | 1556 | */ |
1556 | int | | 1557 | int |
1557 | ktrcanset(lwp_t *calll, struct proc *targetp) | | 1558 | ktrcanset(lwp_t *calll, struct proc *targetp) |
1558 | { | | 1559 | { |
1559 | KASSERT(mutex_owned(targetp->p_lock)); | | 1560 | KASSERT(mutex_owned(targetp->p_lock)); |
1560 | KASSERT(mutex_owned(&ktrace_lock)); | | 1561 | KASSERT(mutex_owned(&ktrace_lock)); |
1561 | | | 1562 | |
1562 | if (kauth_authorize_process(calll->l_cred, KAUTH_PROCESS_KTRACE, | | 1563 | if (kauth_authorize_process(calll->l_cred, KAUTH_PROCESS_KTRACE, |
1563 | targetp, NULL, NULL, NULL) == 0) | | 1564 | targetp, NULL, NULL, NULL) == 0) |
1564 | return (1); | | 1565 | return (1); |
1565 | | | 1566 | |
1566 | return (0); | | 1567 | return (0); |
1567 | } | | 1568 | } |
1568 | | | 1569 | |
1569 | /* | | 1570 | /* |
1570 | * Put user defined entry to ktrace records. | | 1571 | * Put user defined entry to ktrace records. |
1571 | */ | | 1572 | */ |
1572 | int | | 1573 | int |
1573 | sys_utrace(struct lwp *l, const struct sys_utrace_args *uap, register_t *retval) | | 1574 | sys_utrace(struct lwp *l, const struct sys_utrace_args *uap, register_t *retval) |
1574 | { | | 1575 | { |
1575 | /* { | | 1576 | /* { |
1576 | syscallarg(const char *) label; | | 1577 | syscallarg(const char *) label; |
1577 | syscallarg(void *) addr; | | 1578 | syscallarg(void *) addr; |
1578 | syscallarg(size_t) len; | | 1579 | syscallarg(size_t) len; |
1579 | } */ | | 1580 | } */ |
1580 | | | 1581 | |
1581 | return ktruser(SCARG(uap, label), SCARG(uap, addr), | | 1582 | return ktruser(SCARG(uap, label), SCARG(uap, addr), |
1582 | SCARG(uap, len), 1); | | 1583 | SCARG(uap, len), 1); |
1583 | } | | 1584 | } |