| @@ -1,1248 +1,1286 @@ | | | @@ -1,1248 +1,1286 @@ |
1 | /* $NetBSD: kern_ktrace.c,v 1.149 2009/08/05 19:53:42 dsl Exp $ */ | | 1 | /* $NetBSD: kern_ktrace.c,v 1.150 2009/10/02 21:47:35 elad Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran. | | 8 | * by Andrew Doran. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * Copyright (c) 1989, 1993 | | 33 | * Copyright (c) 1989, 1993 |
34 | * The Regents of the University of California. All rights reserved. | | 34 | * The Regents of the University of California. All rights reserved. |
35 | * | | 35 | * |
36 | * Redistribution and use in source and binary forms, with or without | | 36 | * Redistribution and use in source and binary forms, with or without |
37 | * modification, are permitted provided that the following conditions | | 37 | * modification, are permitted provided that the following conditions |
38 | * are met: | | 38 | * are met: |
39 | * 1. Redistributions of source code must retain the above copyright | | 39 | * 1. Redistributions of source code must retain the above copyright |
40 | * notice, this list of conditions and the following disclaimer. | | 40 | * notice, this list of conditions and the following disclaimer. |
41 | * 2. Redistributions in binary form must reproduce the above copyright | | 41 | * 2. Redistributions in binary form must reproduce the above copyright |
42 | * notice, this list of conditions and the following disclaimer in the | | 42 | * notice, this list of conditions and the following disclaimer in the |
43 | * documentation and/or other materials provided with the distribution. | | 43 | * documentation and/or other materials provided with the distribution. |
44 | * 3. Neither the name of the University nor the names of its contributors | | 44 | * 3. Neither the name of the University nor the names of its contributors |
45 | * may be used to endorse or promote products derived from this software | | 45 | * may be used to endorse or promote products derived from this software |
46 | * without specific prior written permission. | | 46 | * without specific prior written permission. |
47 | * | | 47 | * |
48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
58 | * SUCH DAMAGE. | | 58 | * SUCH DAMAGE. |
59 | * | | 59 | * |
60 | * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95 | | 60 | * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95 |
61 | */ | | 61 | */ |
62 | | | 62 | |
63 | #include <sys/cdefs.h> | | 63 | #include <sys/cdefs.h> |
64 | __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.149 2009/08/05 19:53:42 dsl Exp $"); | | 64 | __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.150 2009/10/02 21:47:35 elad Exp $"); |
65 | | | 65 | |
66 | #include <sys/param.h> | | 66 | #include <sys/param.h> |
67 | #include <sys/systm.h> | | 67 | #include <sys/systm.h> |
68 | #include <sys/proc.h> | | 68 | #include <sys/proc.h> |
69 | #include <sys/file.h> | | 69 | #include <sys/file.h> |
70 | #include <sys/namei.h> | | 70 | #include <sys/namei.h> |
71 | #include <sys/vnode.h> | | 71 | #include <sys/vnode.h> |
72 | #include <sys/kernel.h> | | 72 | #include <sys/kernel.h> |
73 | #include <sys/kthread.h> | | 73 | #include <sys/kthread.h> |
74 | #include <sys/ktrace.h> | | 74 | #include <sys/ktrace.h> |
75 | #include <sys/kmem.h> | | 75 | #include <sys/kmem.h> |
76 | #include <sys/syslog.h> | | 76 | #include <sys/syslog.h> |
77 | #include <sys/filedesc.h> | | 77 | #include <sys/filedesc.h> |
78 | #include <sys/ioctl.h> | | 78 | #include <sys/ioctl.h> |
79 | #include <sys/callout.h> | | 79 | #include <sys/callout.h> |
80 | #include <sys/kauth.h> | | 80 | #include <sys/kauth.h> |
81 | | | 81 | |
82 | #include <sys/mount.h> | | 82 | #include <sys/mount.h> |
83 | #include <sys/sa.h> | | 83 | #include <sys/sa.h> |
84 | #include <sys/syscallargs.h> | | 84 | #include <sys/syscallargs.h> |
85 | | | 85 | |
86 | /* | | 86 | /* |
87 | * TODO: | | 87 | * TODO: |
88 | * - need better error reporting? | | 88 | * - need better error reporting? |
89 | * - userland utility to sort ktrace.out by timestamp. | | 89 | * - userland utility to sort ktrace.out by timestamp. |
90 | * - keep minimum information in ktrace_entry when rest of alloc failed. | | 90 | * - keep minimum information in ktrace_entry when rest of alloc failed. |
91 | * - per trace control of configurable parameters. | | 91 | * - per trace control of configurable parameters. |
92 | */ | | 92 | */ |
93 | | | 93 | |
94 | struct ktrace_entry { | | 94 | struct ktrace_entry { |
95 | TAILQ_ENTRY(ktrace_entry) kte_list; | | 95 | TAILQ_ENTRY(ktrace_entry) kte_list; |
96 | struct ktr_header kte_kth; | | 96 | struct ktr_header kte_kth; |
97 | void *kte_buf; | | 97 | void *kte_buf; |
98 | size_t kte_bufsz; | | 98 | size_t kte_bufsz; |
99 | #define KTE_SPACE 32 | | 99 | #define KTE_SPACE 32 |
100 | uint8_t kte_space[KTE_SPACE]; | | 100 | uint8_t kte_space[KTE_SPACE]; |
101 | }; | | 101 | }; |
102 | | | 102 | |
103 | struct ktr_desc { | | 103 | struct ktr_desc { |
104 | TAILQ_ENTRY(ktr_desc) ktd_list; | | 104 | TAILQ_ENTRY(ktr_desc) ktd_list; |
105 | int ktd_flags; | | 105 | int ktd_flags; |
106 | #define KTDF_WAIT 0x0001 | | 106 | #define KTDF_WAIT 0x0001 |
107 | #define KTDF_DONE 0x0002 | | 107 | #define KTDF_DONE 0x0002 |
108 | #define KTDF_BLOCKING 0x0004 | | 108 | #define KTDF_BLOCKING 0x0004 |
109 | #define KTDF_INTERACTIVE 0x0008 | | 109 | #define KTDF_INTERACTIVE 0x0008 |
110 | int ktd_error; | | 110 | int ktd_error; |
111 | #define KTDE_ENOMEM 0x0001 | | 111 | #define KTDE_ENOMEM 0x0001 |
112 | #define KTDE_ENOSPC 0x0002 | | 112 | #define KTDE_ENOSPC 0x0002 |
113 | int ktd_errcnt; | | 113 | int ktd_errcnt; |
114 | int ktd_ref; /* # of reference */ | | 114 | int ktd_ref; /* # of reference */ |
115 | int ktd_qcount; /* # of entry in the queue */ | | 115 | int ktd_qcount; /* # of entry in the queue */ |
116 | | | 116 | |
117 | /* | | 117 | /* |
118 | * Params to control behaviour. | | 118 | * Params to control behaviour. |
119 | */ | | 119 | */ |
120 | int ktd_delayqcnt; /* # of entry allowed to delay */ | | 120 | int ktd_delayqcnt; /* # of entry allowed to delay */ |
121 | int ktd_wakedelay; /* delay of wakeup in *tick* */ | | 121 | int ktd_wakedelay; /* delay of wakeup in *tick* */ |
122 | int ktd_intrwakdl; /* ditto, but when interactive */ | | 122 | int ktd_intrwakdl; /* ditto, but when interactive */ |
123 | | | 123 | |
124 | file_t *ktd_fp; /* trace output file */ | | 124 | file_t *ktd_fp; /* trace output file */ |
125 | lwp_t *ktd_lwp; /* our kernel thread */ | | 125 | lwp_t *ktd_lwp; /* our kernel thread */ |
126 | TAILQ_HEAD(, ktrace_entry) ktd_queue; | | 126 | TAILQ_HEAD(, ktrace_entry) ktd_queue; |
127 | callout_t ktd_wakch; /* delayed wakeup */ | | 127 | callout_t ktd_wakch; /* delayed wakeup */ |
128 | kcondvar_t ktd_sync_cv; | | 128 | kcondvar_t ktd_sync_cv; |
129 | kcondvar_t ktd_cv; | | 129 | kcondvar_t ktd_cv; |
130 | }; | | 130 | }; |
131 | | | 131 | |
132 | static int ktealloc(struct ktrace_entry **, void **, lwp_t *, int, | | 132 | static int ktealloc(struct ktrace_entry **, void **, lwp_t *, int, |
133 | size_t); | | 133 | size_t); |
134 | static void ktrwrite(struct ktr_desc *, struct ktrace_entry *); | | 134 | static void ktrwrite(struct ktr_desc *, struct ktrace_entry *); |
135 | static int ktrace_common(lwp_t *, int, int, int, file_t *); | | 135 | static int ktrace_common(lwp_t *, int, int, int, file_t *); |
136 | static int ktrops(lwp_t *, struct proc *, int, int, | | 136 | static int ktrops(lwp_t *, struct proc *, int, int, |
137 | struct ktr_desc *); | | 137 | struct ktr_desc *); |
138 | static int ktrsetchildren(lwp_t *, struct proc *, int, int, | | 138 | static int ktrsetchildren(lwp_t *, struct proc *, int, int, |
139 | struct ktr_desc *); | | 139 | struct ktr_desc *); |
140 | static int ktrcanset(lwp_t *, struct proc *); | | 140 | static int ktrcanset(lwp_t *, struct proc *); |
141 | static int ktrsamefile(file_t *, file_t *); | | 141 | static int ktrsamefile(file_t *, file_t *); |
142 | static void ktr_kmem(lwp_t *, int, const void *, size_t); | | 142 | static void ktr_kmem(lwp_t *, int, const void *, size_t); |
143 | static void ktr_io(lwp_t *, int, enum uio_rw, struct iovec *, size_t); | | 143 | static void ktr_io(lwp_t *, int, enum uio_rw, struct iovec *, size_t); |
144 | | | 144 | |
145 | static struct ktr_desc * | | 145 | static struct ktr_desc * |
146 | ktd_lookup(file_t *); | | 146 | ktd_lookup(file_t *); |
147 | static void ktdrel(struct ktr_desc *); | | 147 | static void ktdrel(struct ktr_desc *); |
148 | static void ktdref(struct ktr_desc *); | | 148 | static void ktdref(struct ktr_desc *); |
149 | static void ktraddentry(lwp_t *, struct ktrace_entry *, int); | | 149 | static void ktraddentry(lwp_t *, struct ktrace_entry *, int); |
150 | /* Flags for ktraddentry (3rd arg) */ | | 150 | /* Flags for ktraddentry (3rd arg) */ |
151 | #define KTA_NOWAIT 0x0000 | | 151 | #define KTA_NOWAIT 0x0000 |
152 | #define KTA_WAITOK 0x0001 | | 152 | #define KTA_WAITOK 0x0001 |
153 | #define KTA_LARGE 0x0002 | | 153 | #define KTA_LARGE 0x0002 |
154 | static void ktefree(struct ktrace_entry *); | | 154 | static void ktefree(struct ktrace_entry *); |
155 | static void ktd_logerrl(struct ktr_desc *, int); | | 155 | static void ktd_logerrl(struct ktr_desc *, int); |
156 | static void ktrace_thread(void *); | | 156 | static void ktrace_thread(void *); |
157 | static int ktrderefall(struct ktr_desc *, int); | | 157 | static int ktrderefall(struct ktr_desc *, int); |
158 | | | 158 | |
159 | /* | | 159 | /* |
160 | * Default vaules. | | 160 | * Default vaules. |
161 | */ | | 161 | */ |
162 | #define KTD_MAXENTRY 1000 /* XXX: tune */ | | 162 | #define KTD_MAXENTRY 1000 /* XXX: tune */ |
163 | #define KTD_TIMEOUT 5 /* XXX: tune */ | | 163 | #define KTD_TIMEOUT 5 /* XXX: tune */ |
164 | #define KTD_DELAYQCNT 100 /* XXX: tune */ | | 164 | #define KTD_DELAYQCNT 100 /* XXX: tune */ |
165 | #define KTD_WAKEDELAY 5000 /* XXX: tune */ | | 165 | #define KTD_WAKEDELAY 5000 /* XXX: tune */ |
166 | #define KTD_INTRWAKDL 100 /* XXX: tune */ | | 166 | #define KTD_INTRWAKDL 100 /* XXX: tune */ |
167 | | | 167 | |
168 | /* | | 168 | /* |
169 | * Patchable variables. | | 169 | * Patchable variables. |
170 | */ | | 170 | */ |
171 | int ktd_maxentry = KTD_MAXENTRY; /* max # of entry in the queue */ | | 171 | int ktd_maxentry = KTD_MAXENTRY; /* max # of entry in the queue */ |
172 | int ktd_timeout = KTD_TIMEOUT; /* timeout in seconds */ | | 172 | int ktd_timeout = KTD_TIMEOUT; /* timeout in seconds */ |
173 | int ktd_delayqcnt = KTD_DELAYQCNT; /* # of entry allowed to delay */ | | 173 | int ktd_delayqcnt = KTD_DELAYQCNT; /* # of entry allowed to delay */ |
174 | int ktd_wakedelay = KTD_WAKEDELAY; /* delay of wakeup in *ms* */ | | 174 | int ktd_wakedelay = KTD_WAKEDELAY; /* delay of wakeup in *ms* */ |
175 | int ktd_intrwakdl = KTD_INTRWAKDL; /* ditto, but when interactive */ | | 175 | int ktd_intrwakdl = KTD_INTRWAKDL; /* ditto, but when interactive */ |
176 | | | 176 | |
177 | kmutex_t ktrace_lock; | | 177 | kmutex_t ktrace_lock; |
178 | int ktrace_on; | | 178 | int ktrace_on; |
179 | static TAILQ_HEAD(, ktr_desc) ktdq = TAILQ_HEAD_INITIALIZER(ktdq); | | 179 | static TAILQ_HEAD(, ktr_desc) ktdq = TAILQ_HEAD_INITIALIZER(ktdq); |
180 | static pool_cache_t kte_cache; | | 180 | static pool_cache_t kte_cache; |
181 | | | 181 | |
| | | 182 | static kauth_listener_t ktrace_listener; |
| | | 183 | |
182 | static void | | 184 | static void |
183 | ktd_wakeup(struct ktr_desc *ktd) | | 185 | ktd_wakeup(struct ktr_desc *ktd) |
184 | { | | 186 | { |
185 | | | 187 | |
186 | callout_stop(&ktd->ktd_wakch); | | 188 | callout_stop(&ktd->ktd_wakch); |
187 | cv_signal(&ktd->ktd_cv); | | 189 | cv_signal(&ktd->ktd_cv); |
188 | } | | 190 | } |
189 | | | 191 | |
190 | static void | | 192 | static void |
191 | ktd_callout(void *arg) | | 193 | ktd_callout(void *arg) |
192 | { | | 194 | { |
193 | | | 195 | |
194 | mutex_enter(&ktrace_lock); | | 196 | mutex_enter(&ktrace_lock); |
195 | ktd_wakeup(arg); | | 197 | ktd_wakeup(arg); |
196 | mutex_exit(&ktrace_lock); | | 198 | mutex_exit(&ktrace_lock); |
197 | } | | 199 | } |
198 | | | 200 | |
199 | static void | | 201 | static void |
200 | ktd_logerrl(struct ktr_desc *ktd, int error) | | 202 | ktd_logerrl(struct ktr_desc *ktd, int error) |
201 | { | | 203 | { |
202 | | | 204 | |
203 | ktd->ktd_error |= error; | | 205 | ktd->ktd_error |= error; |
204 | ktd->ktd_errcnt++; | | 206 | ktd->ktd_errcnt++; |
205 | } | | 207 | } |
206 | | | 208 | |
207 | #if 0 | | 209 | #if 0 |
208 | static void | | 210 | static void |
209 | ktd_logerr(struct proc *p, int error) | | 211 | ktd_logerr(struct proc *p, int error) |
210 | { | | 212 | { |
211 | struct ktr_desc *ktd; | | 213 | struct ktr_desc *ktd; |
212 | | | 214 | |
213 | KASSERT(mutex_owned(&ktrace_lock)); | | 215 | KASSERT(mutex_owned(&ktrace_lock)); |
214 | | | 216 | |
215 | ktd = p->p_tracep; | | 217 | ktd = p->p_tracep; |
216 | if (ktd == NULL) | | 218 | if (ktd == NULL) |
217 | return; | | 219 | return; |
218 | | | 220 | |
219 | ktd_logerrl(ktd, error); | | 221 | ktd_logerrl(ktd, error); |
220 | } | | 222 | } |
221 | #endif | | 223 | #endif |
222 | | | 224 | |
223 | static inline int | | 225 | static inline int |
224 | ktrenter(lwp_t *l) | | 226 | ktrenter(lwp_t *l) |
225 | { | | 227 | { |
226 | | | 228 | |
227 | if ((l->l_pflag & LP_KTRACTIVE) != 0) | | 229 | if ((l->l_pflag & LP_KTRACTIVE) != 0) |
228 | return 1; | | 230 | return 1; |
229 | l->l_pflag |= LP_KTRACTIVE; | | 231 | l->l_pflag |= LP_KTRACTIVE; |
230 | return 0; | | 232 | return 0; |
231 | } | | 233 | } |
232 | | | 234 | |
233 | static inline void | | 235 | static inline void |
234 | ktrexit(lwp_t *l) | | 236 | ktrexit(lwp_t *l) |
235 | { | | 237 | { |
236 | | | 238 | |
237 | l->l_pflag &= ~LP_KTRACTIVE; | | 239 | l->l_pflag &= ~LP_KTRACTIVE; |
238 | } | | 240 | } |
239 | | | 241 | |
| | | 242 | static int |
| | | 243 | ktrace_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, |
| | | 244 | void *arg0, void *arg1, void *arg2, void *arg3) |
| | | 245 | { |
| | | 246 | struct proc *p; |
| | | 247 | int result; |
| | | 248 | enum kauth_process_req req; |
| | | 249 | |
| | | 250 | result = KAUTH_RESULT_DEFER; |
| | | 251 | p = arg0; |
| | | 252 | |
| | | 253 | if (action != KAUTH_PROCESS_KTRACE) |
| | | 254 | return result; |
| | | 255 | |
| | | 256 | req = (enum kauth_process_req)(unsigned long)arg1; |
| | | 257 | |
| | | 258 | /* Privileged; secmodel should handle these. */ |
| | | 259 | if (req == KAUTH_REQ_PROCESS_KTRACE_PERSISTENT) |
| | | 260 | return result; |
| | | 261 | |
| | | 262 | if ((p->p_traceflag & KTRFAC_PERSISTENT) || |
| | | 263 | (p->p_flag & PK_SUGID)) |
| | | 264 | return result; |
| | | 265 | |
| | | 266 | if (kauth_cred_geteuid(cred) == kauth_cred_getuid(p->p_cred) && |
| | | 267 | kauth_cred_getuid(cred) == kauth_cred_getsvuid(p->p_cred) && |
| | | 268 | kauth_cred_getgid(cred) == kauth_cred_getgid(p->p_cred) && |
| | | 269 | kauth_cred_getgid(cred) == kauth_cred_getsvgid(p->p_cred)) |
| | | 270 | result = KAUTH_RESULT_ALLOW; |
| | | 271 | |
| | | 272 | return result; |
| | | 273 | } |
| | | 274 | |
240 | /* | | 275 | /* |
241 | * Initialise the ktrace system. | | 276 | * Initialise the ktrace system. |
242 | */ | | 277 | */ |
243 | void | | 278 | void |
244 | ktrinit(void) | | 279 | ktrinit(void) |
245 | { | | 280 | { |
246 | | | 281 | |
247 | mutex_init(&ktrace_lock, MUTEX_DEFAULT, IPL_NONE); | | 282 | mutex_init(&ktrace_lock, MUTEX_DEFAULT, IPL_NONE); |
248 | kte_cache = pool_cache_init(sizeof(struct ktrace_entry), 0, 0, 0, | | 283 | kte_cache = pool_cache_init(sizeof(struct ktrace_entry), 0, 0, 0, |
249 | "ktrace", &pool_allocator_nointr, IPL_NONE, NULL, NULL, NULL); | | 284 | "ktrace", &pool_allocator_nointr, IPL_NONE, NULL, NULL, NULL); |
| | | 285 | |
| | | 286 | ktrace_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, |
| | | 287 | ktrace_listener_cb, NULL); |
250 | } | | 288 | } |
251 | | | 289 | |
252 | /* | | 290 | /* |
253 | * Release a reference. Called with ktrace_lock held. | | 291 | * Release a reference. Called with ktrace_lock held. |
254 | */ | | 292 | */ |
255 | void | | 293 | void |
256 | ktdrel(struct ktr_desc *ktd) | | 294 | ktdrel(struct ktr_desc *ktd) |
257 | { | | 295 | { |
258 | | | 296 | |
259 | KASSERT(mutex_owned(&ktrace_lock)); | | 297 | KASSERT(mutex_owned(&ktrace_lock)); |
260 | | | 298 | |
261 | KDASSERT(ktd->ktd_ref != 0); | | 299 | KDASSERT(ktd->ktd_ref != 0); |
262 | KASSERT(ktd->ktd_ref > 0); | | 300 | KASSERT(ktd->ktd_ref > 0); |
263 | KASSERT(ktrace_on > 0); | | 301 | KASSERT(ktrace_on > 0); |
264 | ktrace_on--; | | 302 | ktrace_on--; |
265 | if (--ktd->ktd_ref <= 0) { | | 303 | if (--ktd->ktd_ref <= 0) { |
266 | ktd->ktd_flags |= KTDF_DONE; | | 304 | ktd->ktd_flags |= KTDF_DONE; |
267 | cv_signal(&ktd->ktd_cv); | | 305 | cv_signal(&ktd->ktd_cv); |
268 | } | | 306 | } |
269 | } | | 307 | } |
270 | | | 308 | |
271 | void | | 309 | void |
272 | ktdref(struct ktr_desc *ktd) | | 310 | ktdref(struct ktr_desc *ktd) |
273 | { | | 311 | { |
274 | | | 312 | |
275 | KASSERT(mutex_owned(&ktrace_lock)); | | 313 | KASSERT(mutex_owned(&ktrace_lock)); |
276 | | | 314 | |
277 | ktd->ktd_ref++; | | 315 | ktd->ktd_ref++; |
278 | ktrace_on++; | | 316 | ktrace_on++; |
279 | } | | 317 | } |
280 | | | 318 | |
281 | struct ktr_desc * | | 319 | struct ktr_desc * |
282 | ktd_lookup(file_t *fp) | | 320 | ktd_lookup(file_t *fp) |
283 | { | | 321 | { |
284 | struct ktr_desc *ktd; | | 322 | struct ktr_desc *ktd; |
285 | | | 323 | |
286 | KASSERT(mutex_owned(&ktrace_lock)); | | 324 | KASSERT(mutex_owned(&ktrace_lock)); |
287 | | | 325 | |
288 | for (ktd = TAILQ_FIRST(&ktdq); ktd != NULL; | | 326 | for (ktd = TAILQ_FIRST(&ktdq); ktd != NULL; |
289 | ktd = TAILQ_NEXT(ktd, ktd_list)) { | | 327 | ktd = TAILQ_NEXT(ktd, ktd_list)) { |
290 | if (ktrsamefile(ktd->ktd_fp, fp)) { | | 328 | if (ktrsamefile(ktd->ktd_fp, fp)) { |
291 | ktdref(ktd); | | 329 | ktdref(ktd); |
292 | break; | | 330 | break; |
293 | } | | 331 | } |
294 | } | | 332 | } |
295 | | | 333 | |
296 | return (ktd); | | 334 | return (ktd); |
297 | } | | 335 | } |
298 | | | 336 | |
299 | void | | 337 | void |
300 | ktraddentry(lwp_t *l, struct ktrace_entry *kte, int flags) | | 338 | ktraddentry(lwp_t *l, struct ktrace_entry *kte, int flags) |
301 | { | | 339 | { |
302 | struct proc *p = l->l_proc; | | 340 | struct proc *p = l->l_proc; |
303 | struct ktr_desc *ktd; | | 341 | struct ktr_desc *ktd; |
304 | #ifdef DEBUG | | 342 | #ifdef DEBUG |
305 | struct timeval t1, t2; | | 343 | struct timeval t1, t2; |
306 | #endif | | 344 | #endif |
307 | | | 345 | |
308 | mutex_enter(&ktrace_lock); | | 346 | mutex_enter(&ktrace_lock); |
309 | | | 347 | |
310 | if (p->p_traceflag & KTRFAC_TRC_EMUL) { | | 348 | if (p->p_traceflag & KTRFAC_TRC_EMUL) { |
311 | /* Add emulation trace before first entry for this process */ | | 349 | /* Add emulation trace before first entry for this process */ |
312 | p->p_traceflag &= ~KTRFAC_TRC_EMUL; | | 350 | p->p_traceflag &= ~KTRFAC_TRC_EMUL; |
313 | mutex_exit(&ktrace_lock); | | 351 | mutex_exit(&ktrace_lock); |
314 | ktrexit(l); | | 352 | ktrexit(l); |
315 | ktremul(); | | 353 | ktremul(); |
316 | (void)ktrenter(l); | | 354 | (void)ktrenter(l); |
317 | mutex_enter(&ktrace_lock); | | 355 | mutex_enter(&ktrace_lock); |
318 | } | | 356 | } |
319 | | | 357 | |
320 | /* Tracing may have been cancelled. */ | | 358 | /* Tracing may have been cancelled. */ |
321 | ktd = p->p_tracep; | | 359 | ktd = p->p_tracep; |
322 | if (ktd == NULL) | | 360 | if (ktd == NULL) |
323 | goto freekte; | | 361 | goto freekte; |
324 | | | 362 | |
325 | /* | | 363 | /* |
326 | * Bump reference count so that the object will remain while | | 364 | * Bump reference count so that the object will remain while |
327 | * we are here. Note that the trace is controlled by other | | 365 | * we are here. Note that the trace is controlled by other |
328 | * process. | | 366 | * process. |
329 | */ | | 367 | */ |
330 | ktdref(ktd); | | 368 | ktdref(ktd); |
331 | | | 369 | |
332 | if (ktd->ktd_flags & KTDF_DONE) | | 370 | if (ktd->ktd_flags & KTDF_DONE) |
333 | goto relktd; | | 371 | goto relktd; |
334 | | | 372 | |
335 | if (ktd->ktd_qcount > ktd_maxentry) { | | 373 | if (ktd->ktd_qcount > ktd_maxentry) { |
336 | ktd_logerrl(ktd, KTDE_ENOSPC); | | 374 | ktd_logerrl(ktd, KTDE_ENOSPC); |
337 | goto relktd; | | 375 | goto relktd; |
338 | } | | 376 | } |
339 | TAILQ_INSERT_TAIL(&ktd->ktd_queue, kte, kte_list); | | 377 | TAILQ_INSERT_TAIL(&ktd->ktd_queue, kte, kte_list); |
340 | ktd->ktd_qcount++; | | 378 | ktd->ktd_qcount++; |
341 | if (ktd->ktd_flags & KTDF_BLOCKING) | | 379 | if (ktd->ktd_flags & KTDF_BLOCKING) |
342 | goto skip_sync; | | 380 | goto skip_sync; |
343 | | | 381 | |
344 | if (flags & KTA_WAITOK && | | 382 | if (flags & KTA_WAITOK && |
345 | (/* flags & KTA_LARGE */0 || ktd->ktd_flags & KTDF_WAIT || | | 383 | (/* flags & KTA_LARGE */0 || ktd->ktd_flags & KTDF_WAIT || |
346 | ktd->ktd_qcount > ktd_maxentry >> 1)) | | 384 | ktd->ktd_qcount > ktd_maxentry >> 1)) |
347 | /* | | 385 | /* |
348 | * Sync with writer thread since we're requesting rather | | 386 | * Sync with writer thread since we're requesting rather |
349 | * big one or many requests are pending. | | 387 | * big one or many requests are pending. |
350 | */ | | 388 | */ |
351 | do { | | 389 | do { |
352 | ktd->ktd_flags |= KTDF_WAIT; | | 390 | ktd->ktd_flags |= KTDF_WAIT; |
353 | ktd_wakeup(ktd); | | 391 | ktd_wakeup(ktd); |
354 | #ifdef DEBUG | | 392 | #ifdef DEBUG |
355 | getmicrouptime(&t1); | | 393 | getmicrouptime(&t1); |
356 | #endif | | 394 | #endif |
357 | if (cv_timedwait(&ktd->ktd_sync_cv, &ktrace_lock, | | 395 | if (cv_timedwait(&ktd->ktd_sync_cv, &ktrace_lock, |
358 | ktd_timeout * hz) != 0) { | | 396 | ktd_timeout * hz) != 0) { |
359 | ktd->ktd_flags |= KTDF_BLOCKING; | | 397 | ktd->ktd_flags |= KTDF_BLOCKING; |
360 | /* | | 398 | /* |
361 | * Maybe the writer thread is blocking | | 399 | * Maybe the writer thread is blocking |
362 | * completely for some reason, but | | 400 | * completely for some reason, but |
363 | * don't stop target process forever. | | 401 | * don't stop target process forever. |
364 | */ | | 402 | */ |
365 | log(LOG_NOTICE, "ktrace timeout\n"); | | 403 | log(LOG_NOTICE, "ktrace timeout\n"); |
366 | break; | | 404 | break; |
367 | } | | 405 | } |
368 | #ifdef DEBUG | | 406 | #ifdef DEBUG |
369 | getmicrouptime(&t2); | | 407 | getmicrouptime(&t2); |
370 | timersub(&t2, &t1, &t2); | | 408 | timersub(&t2, &t1, &t2); |
371 | if (t2.tv_sec > 0) | | 409 | if (t2.tv_sec > 0) |
372 | log(LOG_NOTICE, | | 410 | log(LOG_NOTICE, |
373 | "ktrace long wait: %lld.%06ld\n", | | 411 | "ktrace long wait: %lld.%06ld\n", |
374 | (long long)t2.tv_sec, (long)t2.tv_usec); | | 412 | (long long)t2.tv_sec, (long)t2.tv_usec); |
375 | #endif | | 413 | #endif |
376 | } while (p->p_tracep == ktd && | | 414 | } while (p->p_tracep == ktd && |
377 | (ktd->ktd_flags & (KTDF_WAIT | KTDF_DONE)) == KTDF_WAIT); | | 415 | (ktd->ktd_flags & (KTDF_WAIT | KTDF_DONE)) == KTDF_WAIT); |
378 | else { | | 416 | else { |
379 | /* Schedule delayed wakeup */ | | 417 | /* Schedule delayed wakeup */ |
380 | if (ktd->ktd_qcount > ktd->ktd_delayqcnt) | | 418 | if (ktd->ktd_qcount > ktd->ktd_delayqcnt) |
381 | ktd_wakeup(ktd); /* Wakeup now */ | | 419 | ktd_wakeup(ktd); /* Wakeup now */ |
382 | else if (!callout_pending(&ktd->ktd_wakch)) | | 420 | else if (!callout_pending(&ktd->ktd_wakch)) |
383 | callout_reset(&ktd->ktd_wakch, | | 421 | callout_reset(&ktd->ktd_wakch, |
384 | ktd->ktd_flags & KTDF_INTERACTIVE ? | | 422 | ktd->ktd_flags & KTDF_INTERACTIVE ? |
385 | ktd->ktd_intrwakdl : ktd->ktd_wakedelay, | | 423 | ktd->ktd_intrwakdl : ktd->ktd_wakedelay, |
386 | ktd_callout, ktd); | | 424 | ktd_callout, ktd); |
387 | } | | 425 | } |
388 | | | 426 | |
389 | skip_sync: | | 427 | skip_sync: |
390 | ktdrel(ktd); | | 428 | ktdrel(ktd); |
391 | mutex_exit(&ktrace_lock); | | 429 | mutex_exit(&ktrace_lock); |
392 | ktrexit(l); | | 430 | ktrexit(l); |
393 | return; | | 431 | return; |
394 | | | 432 | |
395 | relktd: | | 433 | relktd: |
396 | ktdrel(ktd); | | 434 | ktdrel(ktd); |
397 | | | 435 | |
398 | freekte: | | 436 | freekte: |
399 | mutex_exit(&ktrace_lock); | | 437 | mutex_exit(&ktrace_lock); |
400 | ktefree(kte); | | 438 | ktefree(kte); |
401 | ktrexit(l); | | 439 | ktrexit(l); |
402 | } | | 440 | } |
403 | | | 441 | |
404 | void | | 442 | void |
405 | ktefree(struct ktrace_entry *kte) | | 443 | ktefree(struct ktrace_entry *kte) |
406 | { | | 444 | { |
407 | | | 445 | |
408 | if (kte->kte_buf != kte->kte_space) | | 446 | if (kte->kte_buf != kte->kte_space) |
409 | kmem_free(kte->kte_buf, kte->kte_bufsz); | | 447 | kmem_free(kte->kte_buf, kte->kte_bufsz); |
410 | pool_cache_put(kte_cache, kte); | | 448 | pool_cache_put(kte_cache, kte); |
411 | } | | 449 | } |
412 | | | 450 | |
413 | /* | | 451 | /* |
414 | * "deep" compare of two files for the purposes of clearing a trace. | | 452 | * "deep" compare of two files for the purposes of clearing a trace. |
415 | * Returns true if they're the same open file, or if they point at the | | 453 | * Returns true if they're the same open file, or if they point at the |
416 | * same underlying vnode/socket. | | 454 | * same underlying vnode/socket. |
417 | */ | | 455 | */ |
418 | | | 456 | |
419 | int | | 457 | int |
420 | ktrsamefile(file_t *f1, file_t *f2) | | 458 | ktrsamefile(file_t *f1, file_t *f2) |
421 | { | | 459 | { |
422 | | | 460 | |
423 | return ((f1 == f2) || | | 461 | return ((f1 == f2) || |
424 | ((f1 != NULL) && (f2 != NULL) && | | 462 | ((f1 != NULL) && (f2 != NULL) && |
425 | (f1->f_type == f2->f_type) && | | 463 | (f1->f_type == f2->f_type) && |
426 | (f1->f_data == f2->f_data))); | | 464 | (f1->f_data == f2->f_data))); |
427 | } | | 465 | } |
428 | | | 466 | |
429 | void | | 467 | void |
430 | ktrderef(struct proc *p) | | 468 | ktrderef(struct proc *p) |
431 | { | | 469 | { |
432 | struct ktr_desc *ktd = p->p_tracep; | | 470 | struct ktr_desc *ktd = p->p_tracep; |
433 | | | 471 | |
434 | KASSERT(mutex_owned(&ktrace_lock)); | | 472 | KASSERT(mutex_owned(&ktrace_lock)); |
435 | | | 473 | |
436 | p->p_traceflag = 0; | | 474 | p->p_traceflag = 0; |
437 | if (ktd == NULL) | | 475 | if (ktd == NULL) |
438 | return; | | 476 | return; |
439 | p->p_tracep = NULL; | | 477 | p->p_tracep = NULL; |
440 | | | 478 | |
441 | cv_broadcast(&ktd->ktd_sync_cv); | | 479 | cv_broadcast(&ktd->ktd_sync_cv); |
442 | ktdrel(ktd); | | 480 | ktdrel(ktd); |
443 | } | | 481 | } |
444 | | | 482 | |
445 | void | | 483 | void |
446 | ktradref(struct proc *p) | | 484 | ktradref(struct proc *p) |
447 | { | | 485 | { |
448 | struct ktr_desc *ktd = p->p_tracep; | | 486 | struct ktr_desc *ktd = p->p_tracep; |
449 | | | 487 | |
450 | KASSERT(mutex_owned(&ktrace_lock)); | | 488 | KASSERT(mutex_owned(&ktrace_lock)); |
451 | | | 489 | |
452 | ktdref(ktd); | | 490 | ktdref(ktd); |
453 | } | | 491 | } |
454 | | | 492 | |
455 | int | | 493 | int |
456 | ktrderefall(struct ktr_desc *ktd, int auth) | | 494 | ktrderefall(struct ktr_desc *ktd, int auth) |
457 | { | | 495 | { |
458 | lwp_t *curl = curlwp; | | 496 | lwp_t *curl = curlwp; |
459 | struct proc *p; | | 497 | struct proc *p; |
460 | int error = 0; | | 498 | int error = 0; |
461 | | | 499 | |
462 | mutex_enter(proc_lock); | | 500 | mutex_enter(proc_lock); |
463 | PROCLIST_FOREACH(p, &allproc) { | | 501 | PROCLIST_FOREACH(p, &allproc) { |
464 | if ((p->p_flag & PK_MARKER) != 0 || p->p_tracep != ktd) | | 502 | if ((p->p_flag & PK_MARKER) != 0 || p->p_tracep != ktd) |
465 | continue; | | 503 | continue; |
466 | mutex_enter(p->p_lock); | | 504 | mutex_enter(p->p_lock); |
467 | mutex_enter(&ktrace_lock); | | 505 | mutex_enter(&ktrace_lock); |
468 | if (p->p_tracep == ktd) { | | 506 | if (p->p_tracep == ktd) { |
469 | if (!auth || ktrcanset(curl, p)) | | 507 | if (!auth || ktrcanset(curl, p)) |
470 | ktrderef(p); | | 508 | ktrderef(p); |
471 | else | | 509 | else |
472 | error = EPERM; | | 510 | error = EPERM; |
473 | } | | 511 | } |
474 | mutex_exit(&ktrace_lock); | | 512 | mutex_exit(&ktrace_lock); |
475 | mutex_exit(p->p_lock); | | 513 | mutex_exit(p->p_lock); |
476 | } | | 514 | } |
477 | mutex_exit(proc_lock); | | 515 | mutex_exit(proc_lock); |
478 | | | 516 | |
479 | return error; | | 517 | return error; |
480 | } | | 518 | } |
481 | | | 519 | |
482 | int | | 520 | int |
483 | ktealloc(struct ktrace_entry **ktep, void **bufp, lwp_t *l, int type, | | 521 | ktealloc(struct ktrace_entry **ktep, void **bufp, lwp_t *l, int type, |
484 | size_t sz) | | 522 | size_t sz) |
485 | { | | 523 | { |
486 | struct proc *p = l->l_proc; | | 524 | struct proc *p = l->l_proc; |
487 | struct ktrace_entry *kte; | | 525 | struct ktrace_entry *kte; |
488 | struct ktr_header *kth; | | 526 | struct ktr_header *kth; |
489 | struct timespec ts; | | 527 | struct timespec ts; |
490 | void *buf; | | 528 | void *buf; |
491 | | | 529 | |
492 | if (ktrenter(l)) | | 530 | if (ktrenter(l)) |
493 | return EAGAIN; | | 531 | return EAGAIN; |
494 | | | 532 | |
495 | kte = pool_cache_get(kte_cache, PR_WAITOK); | | 533 | kte = pool_cache_get(kte_cache, PR_WAITOK); |
496 | if (sz > sizeof(kte->kte_space)) { | | 534 | if (sz > sizeof(kte->kte_space)) { |
497 | if ((buf = kmem_alloc(sz, KM_SLEEP)) == NULL) { | | 535 | if ((buf = kmem_alloc(sz, KM_SLEEP)) == NULL) { |
498 | pool_cache_put(kte_cache, kte); | | 536 | pool_cache_put(kte_cache, kte); |
499 | ktrexit(l); | | 537 | ktrexit(l); |
500 | return ENOMEM; | | 538 | return ENOMEM; |
501 | } | | 539 | } |
502 | } else | | 540 | } else |
503 | buf = kte->kte_space; | | 541 | buf = kte->kte_space; |
504 | | | 542 | |
505 | kte->kte_bufsz = sz; | | 543 | kte->kte_bufsz = sz; |
506 | kte->kte_buf = buf; | | 544 | kte->kte_buf = buf; |
507 | | | 545 | |
508 | kth = &kte->kte_kth; | | 546 | kth = &kte->kte_kth; |
509 | (void)memset(kth, 0, sizeof(*kth)); | | 547 | (void)memset(kth, 0, sizeof(*kth)); |
510 | kth->ktr_len = sz; | | 548 | kth->ktr_len = sz; |
511 | kth->ktr_type = type; | | 549 | kth->ktr_type = type; |
512 | kth->ktr_pid = p->p_pid; | | 550 | kth->ktr_pid = p->p_pid; |
513 | memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN); | | 551 | memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN); |
514 | kth->ktr_version = KTRFAC_VERSION(p->p_traceflag); | | 552 | kth->ktr_version = KTRFAC_VERSION(p->p_traceflag); |
515 | | | 553 | |
516 | nanotime(&ts); | | 554 | nanotime(&ts); |
517 | switch (KTRFAC_VERSION(p->p_traceflag)) { | | 555 | switch (KTRFAC_VERSION(p->p_traceflag)) { |
518 | case 0: | | 556 | case 0: |
519 | /* This is the original format */ | | 557 | /* This is the original format */ |
520 | kth->ktr_otv.tv_sec = ts.tv_sec; | | 558 | kth->ktr_otv.tv_sec = ts.tv_sec; |
521 | kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; | | 559 | kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; |
522 | break; | | 560 | break; |
523 | case 1: | | 561 | case 1: |
524 | kth->ktr_olid = l->l_lid; | | 562 | kth->ktr_olid = l->l_lid; |
525 | kth->ktr_ots.tv_sec = ts.tv_sec; | | 563 | kth->ktr_ots.tv_sec = ts.tv_sec; |
526 | kth->ktr_ots.tv_nsec = ts.tv_nsec; | | 564 | kth->ktr_ots.tv_nsec = ts.tv_nsec; |
527 | break; | | 565 | break; |
528 | case 2: | | 566 | case 2: |
529 | kth->ktr_lid = l->l_lid; | | 567 | kth->ktr_lid = l->l_lid; |
530 | kth->ktr_ts.tv_sec = ts.tv_sec; | | 568 | kth->ktr_ts.tv_sec = ts.tv_sec; |
531 | kth->ktr_ts.tv_nsec = ts.tv_nsec; | | 569 | kth->ktr_ts.tv_nsec = ts.tv_nsec; |
532 | break; | | 570 | break; |
533 | default: | | 571 | default: |
534 | break; | | 572 | break; |
535 | } | | 573 | } |
536 | | | 574 | |
537 | *ktep = kte; | | 575 | *ktep = kte; |
538 | *bufp = buf; | | 576 | *bufp = buf; |
539 | | | 577 | |
540 | return 0; | | 578 | return 0; |
541 | } | | 579 | } |
542 | | | 580 | |
543 | void | | 581 | void |
544 | ktr_syscall(register_t code, const register_t args[], int narg) | | 582 | ktr_syscall(register_t code, const register_t args[], int narg) |
545 | { | | 583 | { |
546 | lwp_t *l = curlwp; | | 584 | lwp_t *l = curlwp; |
547 | struct proc *p = l->l_proc; | | 585 | struct proc *p = l->l_proc; |
548 | struct ktrace_entry *kte; | | 586 | struct ktrace_entry *kte; |
549 | struct ktr_syscall *ktp; | | 587 | struct ktr_syscall *ktp; |
550 | register_t *argp; | | 588 | register_t *argp; |
551 | size_t len; | | 589 | size_t len; |
552 | u_int i; | | 590 | u_int i; |
553 | | | 591 | |
554 | if (!KTRPOINT(p, KTR_SYSCALL)) | | 592 | if (!KTRPOINT(p, KTR_SYSCALL)) |
555 | return; | | 593 | return; |
556 | | | 594 | |
557 | len = sizeof(struct ktr_syscall) + narg * sizeof argp[0]; | | 595 | len = sizeof(struct ktr_syscall) + narg * sizeof argp[0]; |
558 | | | 596 | |
559 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSCALL, len)) | | 597 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSCALL, len)) |
560 | return; | | 598 | return; |
561 | | | 599 | |
562 | ktp->ktr_code = code; | | 600 | ktp->ktr_code = code; |
563 | ktp->ktr_argsize = narg * sizeof argp[0]; | | 601 | ktp->ktr_argsize = narg * sizeof argp[0]; |
564 | argp = (register_t *)(ktp + 1); | | 602 | argp = (register_t *)(ktp + 1); |
565 | for (i = 0; i < narg; i++) | | 603 | for (i = 0; i < narg; i++) |
566 | *argp++ = args[i]; | | 604 | *argp++ = args[i]; |
567 | | | 605 | |
568 | ktraddentry(l, kte, KTA_WAITOK); | | 606 | ktraddentry(l, kte, KTA_WAITOK); |
569 | } | | 607 | } |
570 | | | 608 | |
571 | void | | 609 | void |
572 | ktr_sysret(register_t code, int error, register_t *retval) | | 610 | ktr_sysret(register_t code, int error, register_t *retval) |
573 | { | | 611 | { |
574 | lwp_t *l = curlwp; | | 612 | lwp_t *l = curlwp; |
575 | struct ktrace_entry *kte; | | 613 | struct ktrace_entry *kte; |
576 | struct ktr_sysret *ktp; | | 614 | struct ktr_sysret *ktp; |
577 | | | 615 | |
578 | if (!KTRPOINT(l->l_proc, KTR_SYSRET)) | | 616 | if (!KTRPOINT(l->l_proc, KTR_SYSRET)) |
579 | return; | | 617 | return; |
580 | | | 618 | |
581 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSRET, | | 619 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSRET, |
582 | sizeof(struct ktr_sysret))) | | 620 | sizeof(struct ktr_sysret))) |
583 | return; | | 621 | return; |
584 | | | 622 | |
585 | ktp->ktr_code = code; | | 623 | ktp->ktr_code = code; |
586 | ktp->ktr_eosys = 0; /* XXX unused */ | | 624 | ktp->ktr_eosys = 0; /* XXX unused */ |
587 | ktp->ktr_error = error; | | 625 | ktp->ktr_error = error; |
588 | ktp->ktr_retval = retval ? retval[0] : 0; | | 626 | ktp->ktr_retval = retval ? retval[0] : 0; |
589 | ktp->ktr_retval_1 = retval ? retval[1] : 0; | | 627 | ktp->ktr_retval_1 = retval ? retval[1] : 0; |
590 | | | 628 | |
591 | ktraddentry(l, kte, KTA_WAITOK); | | 629 | ktraddentry(l, kte, KTA_WAITOK); |
592 | } | | 630 | } |
593 | | | 631 | |
594 | void | | 632 | void |
595 | ktr_namei(const char *path, size_t pathlen) | | 633 | ktr_namei(const char *path, size_t pathlen) |
596 | { | | 634 | { |
597 | lwp_t *l = curlwp; | | 635 | lwp_t *l = curlwp; |
598 | | | 636 | |
599 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) | | 637 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) |
600 | return; | | 638 | return; |
601 | | | 639 | |
602 | ktr_kmem(l, KTR_NAMEI, path, pathlen); | | 640 | ktr_kmem(l, KTR_NAMEI, path, pathlen); |
603 | } | | 641 | } |
604 | | | 642 | |
605 | void | | 643 | void |
606 | ktr_namei2(const char *eroot, size_t erootlen, | | 644 | ktr_namei2(const char *eroot, size_t erootlen, |
607 | const char *path, size_t pathlen) | | 645 | const char *path, size_t pathlen) |
608 | { | | 646 | { |
609 | lwp_t *l = curlwp; | | 647 | lwp_t *l = curlwp; |
610 | struct ktrace_entry *kte; | | 648 | struct ktrace_entry *kte; |
611 | void *buf; | | 649 | void *buf; |
612 | | | 650 | |
613 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) | | 651 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) |
614 | return; | | 652 | return; |
615 | | | 653 | |
616 | if (ktealloc(&kte, &buf, l, KTR_NAMEI, erootlen + pathlen)) | | 654 | if (ktealloc(&kte, &buf, l, KTR_NAMEI, erootlen + pathlen)) |
617 | return; | | 655 | return; |
618 | memcpy(buf, eroot, erootlen); | | 656 | memcpy(buf, eroot, erootlen); |
619 | buf = (char *)buf + erootlen; | | 657 | buf = (char *)buf + erootlen; |
620 | memcpy(buf, path, pathlen); | | 658 | memcpy(buf, path, pathlen); |
621 | ktraddentry(l, kte, KTA_WAITOK); | | 659 | ktraddentry(l, kte, KTA_WAITOK); |
622 | } | | 660 | } |
623 | | | 661 | |
624 | void | | 662 | void |
625 | ktr_emul(void) | | 663 | ktr_emul(void) |
626 | { | | 664 | { |
627 | lwp_t *l = curlwp; | | 665 | lwp_t *l = curlwp; |
628 | const char *emul = l->l_proc->p_emul->e_name; | | 666 | const char *emul = l->l_proc->p_emul->e_name; |
629 | | | 667 | |
630 | if (!KTRPOINT(l->l_proc, KTR_EMUL)) | | 668 | if (!KTRPOINT(l->l_proc, KTR_EMUL)) |
631 | return; | | 669 | return; |
632 | | | 670 | |
633 | ktr_kmem(l, KTR_EMUL, emul, strlen(emul)); | | 671 | ktr_kmem(l, KTR_EMUL, emul, strlen(emul)); |
634 | } | | 672 | } |
635 | | | 673 | |
636 | void | | 674 | void |
637 | ktr_execarg(const void *bf, size_t len) | | 675 | ktr_execarg(const void *bf, size_t len) |
638 | { | | 676 | { |
639 | lwp_t *l = curlwp; | | 677 | lwp_t *l = curlwp; |
640 | | | 678 | |
641 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ARG)) | | 679 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ARG)) |
642 | return; | | 680 | return; |
643 | | | 681 | |
644 | ktr_kmem(l, KTR_EXEC_ARG, bf, len); | | 682 | ktr_kmem(l, KTR_EXEC_ARG, bf, len); |
645 | } | | 683 | } |
646 | | | 684 | |
647 | void | | 685 | void |
648 | ktr_execenv(const void *bf, size_t len) | | 686 | ktr_execenv(const void *bf, size_t len) |
649 | { | | 687 | { |
650 | lwp_t *l = curlwp; | | 688 | lwp_t *l = curlwp; |
651 | | | 689 | |
652 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ENV)) | | 690 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ENV)) |
653 | return; | | 691 | return; |
654 | | | 692 | |
655 | ktr_kmem(l, KTR_EXEC_ENV, bf, len); | | 693 | ktr_kmem(l, KTR_EXEC_ENV, bf, len); |
656 | } | | 694 | } |
657 | | | 695 | |
658 | static void | | 696 | static void |
659 | ktr_kmem(lwp_t *l, int type, const void *bf, size_t len) | | 697 | ktr_kmem(lwp_t *l, int type, const void *bf, size_t len) |
660 | { | | 698 | { |
661 | struct ktrace_entry *kte; | | 699 | struct ktrace_entry *kte; |
662 | void *buf; | | 700 | void *buf; |
663 | | | 701 | |
664 | if (ktealloc(&kte, &buf, l, type, len)) | | 702 | if (ktealloc(&kte, &buf, l, type, len)) |
665 | return; | | 703 | return; |
666 | memcpy(buf, bf, len); | | 704 | memcpy(buf, bf, len); |
667 | ktraddentry(l, kte, KTA_WAITOK); | | 705 | ktraddentry(l, kte, KTA_WAITOK); |
668 | } | | 706 | } |
669 | | | 707 | |
670 | static void | | 708 | static void |
671 | ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len) | | 709 | ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len) |
672 | { | | 710 | { |
673 | struct ktrace_entry *kte; | | 711 | struct ktrace_entry *kte; |
674 | struct ktr_genio *ktp; | | 712 | struct ktr_genio *ktp; |
675 | size_t resid = len, cnt, buflen; | | 713 | size_t resid = len, cnt, buflen; |
676 | char *cp; | | 714 | char *cp; |
677 | | | 715 | |
678 | next: | | 716 | next: |
679 | buflen = min(PAGE_SIZE, resid + sizeof(struct ktr_genio)); | | 717 | buflen = min(PAGE_SIZE, resid + sizeof(struct ktr_genio)); |
680 | | | 718 | |
681 | if (ktealloc(&kte, (void *)&ktp, l, KTR_GENIO, buflen)) | | 719 | if (ktealloc(&kte, (void *)&ktp, l, KTR_GENIO, buflen)) |
682 | return; | | 720 | return; |
683 | | | 721 | |
684 | ktp->ktr_fd = fd; | | 722 | ktp->ktr_fd = fd; |
685 | ktp->ktr_rw = rw; | | 723 | ktp->ktr_rw = rw; |
686 | | | 724 | |
687 | cp = (void *)(ktp + 1); | | 725 | cp = (void *)(ktp + 1); |
688 | buflen -= sizeof(struct ktr_genio); | | 726 | buflen -= sizeof(struct ktr_genio); |
689 | kte->kte_kth.ktr_len = sizeof(struct ktr_genio); | | 727 | kte->kte_kth.ktr_len = sizeof(struct ktr_genio); |
690 | | | 728 | |
691 | while (buflen > 0) { | | 729 | while (buflen > 0) { |
692 | cnt = min(iov->iov_len, buflen); | | 730 | cnt = min(iov->iov_len, buflen); |
693 | if (copyin(iov->iov_base, cp, cnt) != 0) | | 731 | if (copyin(iov->iov_base, cp, cnt) != 0) |
694 | goto out; | | 732 | goto out; |
695 | kte->kte_kth.ktr_len += cnt; | | 733 | kte->kte_kth.ktr_len += cnt; |
696 | cp += cnt; | | 734 | cp += cnt; |
697 | buflen -= cnt; | | 735 | buflen -= cnt; |
698 | resid -= cnt; | | 736 | resid -= cnt; |
699 | iov->iov_len -= cnt; | | 737 | iov->iov_len -= cnt; |
700 | if (iov->iov_len == 0) | | 738 | if (iov->iov_len == 0) |
701 | iov++; | | 739 | iov++; |
702 | else | | 740 | else |
703 | iov->iov_base = (char *)iov->iov_base + cnt; | | 741 | iov->iov_base = (char *)iov->iov_base + cnt; |
704 | } | | 742 | } |
705 | | | 743 | |
706 | /* | | 744 | /* |
707 | * Don't push so many entry at once. It will cause kmem map | | 745 | * Don't push so many entry at once. It will cause kmem map |
708 | * shortage. | | 746 | * shortage. |
709 | */ | | 747 | */ |
710 | ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE); | | 748 | ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE); |
711 | if (resid > 0) { | | 749 | if (resid > 0) { |
712 | if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) { | | 750 | if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) { |
713 | (void)ktrenter(l); | | 751 | (void)ktrenter(l); |
714 | preempt(); | | 752 | preempt(); |
715 | ktrexit(l); | | 753 | ktrexit(l); |
716 | } | | 754 | } |
717 | | | 755 | |
718 | goto next; | | 756 | goto next; |
719 | } | | 757 | } |
720 | | | 758 | |
721 | return; | | 759 | return; |
722 | | | 760 | |
723 | out: | | 761 | out: |
724 | ktefree(kte); | | 762 | ktefree(kte); |
725 | ktrexit(l); | | 763 | ktrexit(l); |
726 | } | | 764 | } |
727 | | | 765 | |
728 | void | | 766 | void |
729 | ktr_genio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) | | 767 | ktr_genio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) |
730 | { | | 768 | { |
731 | lwp_t *l = curlwp; | | 769 | lwp_t *l = curlwp; |
732 | struct iovec iov; | | 770 | struct iovec iov; |
733 | | | 771 | |
734 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) | | 772 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) |
735 | return; | | 773 | return; |
736 | iov.iov_base = __UNCONST(addr); | | 774 | iov.iov_base = __UNCONST(addr); |
737 | iov.iov_len = len; | | 775 | iov.iov_len = len; |
738 | ktr_io(l, fd, rw, &iov, len); | | 776 | ktr_io(l, fd, rw, &iov, len); |
739 | } | | 777 | } |
740 | | | 778 | |
741 | void | | 779 | void |
742 | ktr_geniov(int fd, enum uio_rw rw, struct iovec *iov, size_t len, int error) | | 780 | ktr_geniov(int fd, enum uio_rw rw, struct iovec *iov, size_t len, int error) |
743 | { | | 781 | { |
744 | lwp_t *l = curlwp; | | 782 | lwp_t *l = curlwp; |
745 | | | 783 | |
746 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) | | 784 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) |
747 | return; | | 785 | return; |
748 | ktr_io(l, fd, rw, iov, len); | | 786 | ktr_io(l, fd, rw, iov, len); |
749 | } | | 787 | } |
750 | | | 788 | |
751 | void | | 789 | void |
752 | ktr_mibio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) | | 790 | ktr_mibio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) |
753 | { | | 791 | { |
754 | lwp_t *l = curlwp; | | 792 | lwp_t *l = curlwp; |
755 | struct iovec iov; | | 793 | struct iovec iov; |
756 | | | 794 | |
757 | if (!KTRPOINT(l->l_proc, KTR_MIB) || error != 0) | | 795 | if (!KTRPOINT(l->l_proc, KTR_MIB) || error != 0) |
758 | return; | | 796 | return; |
759 | iov.iov_base = __UNCONST(addr); | | 797 | iov.iov_base = __UNCONST(addr); |
760 | iov.iov_len = len; | | 798 | iov.iov_len = len; |
761 | ktr_io(l, fd, rw, &iov, len); | | 799 | ktr_io(l, fd, rw, &iov, len); |
762 | } | | 800 | } |
763 | | | 801 | |
764 | void | | 802 | void |
765 | ktr_psig(int sig, sig_t action, const sigset_t *mask, | | 803 | ktr_psig(int sig, sig_t action, const sigset_t *mask, |
766 | const ksiginfo_t *ksi) | | 804 | const ksiginfo_t *ksi) |
767 | { | | 805 | { |
768 | struct ktrace_entry *kte; | | 806 | struct ktrace_entry *kte; |
769 | lwp_t *l = curlwp; | | 807 | lwp_t *l = curlwp; |
770 | struct { | | 808 | struct { |
771 | struct ktr_psig kp; | | 809 | struct ktr_psig kp; |
772 | siginfo_t si; | | 810 | siginfo_t si; |
773 | } *kbuf; | | 811 | } *kbuf; |
774 | | | 812 | |
775 | if (!KTRPOINT(l->l_proc, KTR_PSIG)) | | 813 | if (!KTRPOINT(l->l_proc, KTR_PSIG)) |
776 | return; | | 814 | return; |
777 | | | 815 | |
778 | if (ktealloc(&kte, (void *)&kbuf, l, KTR_PSIG, sizeof(*kbuf))) | | 816 | if (ktealloc(&kte, (void *)&kbuf, l, KTR_PSIG, sizeof(*kbuf))) |
779 | return; | | 817 | return; |
780 | | | 818 | |
781 | kbuf->kp.signo = (char)sig; | | 819 | kbuf->kp.signo = (char)sig; |
782 | kbuf->kp.action = action; | | 820 | kbuf->kp.action = action; |
783 | kbuf->kp.mask = *mask; | | 821 | kbuf->kp.mask = *mask; |
784 | | | 822 | |
785 | if (ksi) { | | 823 | if (ksi) { |
786 | kbuf->kp.code = KSI_TRAPCODE(ksi); | | 824 | kbuf->kp.code = KSI_TRAPCODE(ksi); |
787 | (void)memset(&kbuf->si, 0, sizeof(kbuf->si)); | | 825 | (void)memset(&kbuf->si, 0, sizeof(kbuf->si)); |
788 | kbuf->si._info = ksi->ksi_info; | | 826 | kbuf->si._info = ksi->ksi_info; |
789 | kte->kte_kth.ktr_len = sizeof(*kbuf); | | 827 | kte->kte_kth.ktr_len = sizeof(*kbuf); |
790 | } else { | | 828 | } else { |
791 | kbuf->kp.code = 0; | | 829 | kbuf->kp.code = 0; |
792 | kte->kte_kth.ktr_len = sizeof(struct ktr_psig); | | 830 | kte->kte_kth.ktr_len = sizeof(struct ktr_psig); |
793 | } | | 831 | } |
794 | | | 832 | |
795 | ktraddentry(l, kte, KTA_WAITOK); | | 833 | ktraddentry(l, kte, KTA_WAITOK); |
796 | } | | 834 | } |
797 | | | 835 | |
798 | void | | 836 | void |
799 | ktr_csw(int out, int user) | | 837 | ktr_csw(int out, int user) |
800 | { | | 838 | { |
801 | lwp_t *l = curlwp; | | 839 | lwp_t *l = curlwp; |
802 | struct proc *p = l->l_proc; | | 840 | struct proc *p = l->l_proc; |
803 | struct ktrace_entry *kte; | | 841 | struct ktrace_entry *kte; |
804 | struct ktr_csw *kc; | | 842 | struct ktr_csw *kc; |
805 | | | 843 | |
806 | if (!KTRPOINT(p, KTR_CSW)) | | 844 | if (!KTRPOINT(p, KTR_CSW)) |
807 | return; | | 845 | return; |
808 | | | 846 | |
809 | /* | | 847 | /* |
810 | * Don't record context switches resulting from blocking on | | 848 | * Don't record context switches resulting from blocking on |
811 | * locks; it's too easy to get duff results. | | 849 | * locks; it's too easy to get duff results. |
812 | */ | | 850 | */ |
813 | if (l->l_syncobj == &mutex_syncobj || l->l_syncobj == &rw_syncobj) | | 851 | if (l->l_syncobj == &mutex_syncobj || l->l_syncobj == &rw_syncobj) |
814 | return; | | 852 | return; |
815 | | | 853 | |
816 | /* | | 854 | /* |
817 | * We can't sleep if we're already going to sleep (if original | | 855 | * We can't sleep if we're already going to sleep (if original |
818 | * condition is met during sleep, we hang up). | | 856 | * condition is met during sleep, we hang up). |
819 | * | | 857 | * |
820 | * XXX This is not ideal: it would be better to maintain a pool | | 858 | * XXX This is not ideal: it would be better to maintain a pool |
821 | * of ktes and actually push this to the kthread when context | | 859 | * of ktes and actually push this to the kthread when context |
822 | * switch happens, however given the points where we are called | | 860 | * switch happens, however given the points where we are called |
823 | * from that is difficult to do. | | 861 | * from that is difficult to do. |
824 | */ | | 862 | */ |
825 | if (out) { | | 863 | if (out) { |
826 | struct timespec ts; | | 864 | struct timespec ts; |
827 | if (ktrenter(l)) | | 865 | if (ktrenter(l)) |
828 | return; | | 866 | return; |
829 | | | 867 | |
830 | nanotime(&l->l_ktrcsw); | | 868 | nanotime(&l->l_ktrcsw); |
831 | l->l_pflag |= LP_KTRCSW; | | 869 | l->l_pflag |= LP_KTRCSW; |
832 | nanotime(&ts); | | 870 | nanotime(&ts); |
833 | if (user) | | 871 | if (user) |
834 | l->l_pflag |= LP_KTRCSWUSER; | | 872 | l->l_pflag |= LP_KTRCSWUSER; |
835 | else | | 873 | else |
836 | l->l_pflag &= ~LP_KTRCSWUSER; | | 874 | l->l_pflag &= ~LP_KTRCSWUSER; |
837 | | | 875 | |
838 | ktrexit(l); | | 876 | ktrexit(l); |
839 | return; | | 877 | return; |
840 | } | | 878 | } |
841 | | | 879 | |
842 | /* | | 880 | /* |
843 | * On the way back in, we need to record twice: once for entry, and | | 881 | * On the way back in, we need to record twice: once for entry, and |
844 | * once for exit. | | 882 | * once for exit. |
845 | */ | | 883 | */ |
846 | if ((l->l_pflag & LP_KTRCSW) != 0) { | | 884 | if ((l->l_pflag & LP_KTRCSW) != 0) { |
847 | struct timespec *ts; | | 885 | struct timespec *ts; |
848 | l->l_pflag &= ~LP_KTRCSW; | | 886 | l->l_pflag &= ~LP_KTRCSW; |
849 | | | 887 | |
850 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) | | 888 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) |
851 | return; | | 889 | return; |
852 | | | 890 | |
853 | kc->out = 1; | | 891 | kc->out = 1; |
854 | kc->user = ((l->l_pflag & LP_KTRCSWUSER) != 0); | | 892 | kc->user = ((l->l_pflag & LP_KTRCSWUSER) != 0); |
855 | | | 893 | |
856 | ts = &l->l_ktrcsw; | | 894 | ts = &l->l_ktrcsw; |
857 | switch (KTRFAC_VERSION(p->p_traceflag)) { | | 895 | switch (KTRFAC_VERSION(p->p_traceflag)) { |
858 | case 0: | | 896 | case 0: |
859 | kte->kte_kth.ktr_otv.tv_sec = ts->tv_sec; | | 897 | kte->kte_kth.ktr_otv.tv_sec = ts->tv_sec; |
860 | kte->kte_kth.ktr_otv.tv_usec = ts->tv_nsec / 1000; | | 898 | kte->kte_kth.ktr_otv.tv_usec = ts->tv_nsec / 1000; |
861 | break; | | 899 | break; |
862 | case 1: | | 900 | case 1: |
863 | kte->kte_kth.ktr_ots.tv_sec = ts->tv_sec; | | 901 | kte->kte_kth.ktr_ots.tv_sec = ts->tv_sec; |
864 | kte->kte_kth.ktr_ots.tv_nsec = ts->tv_nsec; | | 902 | kte->kte_kth.ktr_ots.tv_nsec = ts->tv_nsec; |
865 | break; | | 903 | break; |
866 | case 2: | | 904 | case 2: |
867 | kte->kte_kth.ktr_ts.tv_sec = ts->tv_sec; | | 905 | kte->kte_kth.ktr_ts.tv_sec = ts->tv_sec; |
868 | kte->kte_kth.ktr_ts.tv_nsec = ts->tv_nsec; | | 906 | kte->kte_kth.ktr_ts.tv_nsec = ts->tv_nsec; |
869 | break; | | 907 | break; |
870 | default: | | 908 | default: |
871 | break; | | 909 | break; |
872 | } | | 910 | } |
873 | | | 911 | |
874 | ktraddentry(l, kte, KTA_WAITOK); | | 912 | ktraddentry(l, kte, KTA_WAITOK); |
875 | } | | 913 | } |
876 | | | 914 | |
877 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) | | 915 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) |
878 | return; | | 916 | return; |
879 | | | 917 | |
880 | kc->out = 0; | | 918 | kc->out = 0; |
881 | kc->user = user; | | 919 | kc->user = user; |
882 | | | 920 | |
883 | ktraddentry(l, kte, KTA_WAITOK); | | 921 | ktraddentry(l, kte, KTA_WAITOK); |
884 | } | | 922 | } |
885 | | | 923 | |
886 | bool | | 924 | bool |
887 | ktr_point(int fac_bit) | | 925 | ktr_point(int fac_bit) |
888 | { | | 926 | { |
889 | return curlwp->l_proc->p_traceflag & fac_bit; | | 927 | return curlwp->l_proc->p_traceflag & fac_bit; |
890 | } | | 928 | } |
891 | | | 929 | |
892 | int | | 930 | int |
893 | ktruser(const char *id, void *addr, size_t len, int ustr) | | 931 | ktruser(const char *id, void *addr, size_t len, int ustr) |
894 | { | | 932 | { |
895 | struct ktrace_entry *kte; | | 933 | struct ktrace_entry *kte; |
896 | struct ktr_user *ktp; | | 934 | struct ktr_user *ktp; |
897 | lwp_t *l = curlwp; | | 935 | lwp_t *l = curlwp; |
898 | void *user_dta; | | 936 | void *user_dta; |
899 | int error; | | 937 | int error; |
900 | | | 938 | |
901 | if (!KTRPOINT(l->l_proc, KTR_USER)) | | 939 | if (!KTRPOINT(l->l_proc, KTR_USER)) |
902 | return 0; | | 940 | return 0; |
903 | | | 941 | |
904 | if (len > KTR_USER_MAXLEN) | | 942 | if (len > KTR_USER_MAXLEN) |
905 | return ENOSPC; | | 943 | return ENOSPC; |
906 | | | 944 | |
907 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); | | 945 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); |
908 | if (error != 0) | | 946 | if (error != 0) |
909 | return error; | | 947 | return error; |
910 | | | 948 | |
911 | if (ustr) { | | 949 | if (ustr) { |
912 | if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0) | | 950 | if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0) |
913 | ktp->ktr_id[0] = '\0'; | | 951 | ktp->ktr_id[0] = '\0'; |
914 | } else | | 952 | } else |
915 | strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); | | 953 | strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); |
916 | ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0'; | | 954 | ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0'; |
917 | | | 955 | |
918 | user_dta = (void *)(ktp + 1); | | 956 | user_dta = (void *)(ktp + 1); |
919 | if ((error = copyin(addr, (void *)user_dta, len)) != 0) | | 957 | if ((error = copyin(addr, (void *)user_dta, len)) != 0) |
920 | len = 0; | | 958 | len = 0; |
921 | | | 959 | |
922 | ktraddentry(l, kte, KTA_WAITOK); | | 960 | ktraddentry(l, kte, KTA_WAITOK); |
923 | return error; | | 961 | return error; |
924 | } | | 962 | } |
925 | | | 963 | |
926 | void | | 964 | void |
927 | ktr_kuser(const char *id, void *addr, size_t len) | | 965 | ktr_kuser(const char *id, void *addr, size_t len) |
928 | { | | 966 | { |
929 | struct ktrace_entry *kte; | | 967 | struct ktrace_entry *kte; |
930 | struct ktr_user *ktp; | | 968 | struct ktr_user *ktp; |
931 | lwp_t *l = curlwp; | | 969 | lwp_t *l = curlwp; |
932 | int error; | | 970 | int error; |
933 | | | 971 | |
934 | if (!KTRPOINT(l->l_proc, KTR_USER)) | | 972 | if (!KTRPOINT(l->l_proc, KTR_USER)) |
935 | return; | | 973 | return; |
936 | | | 974 | |
937 | if (len > KTR_USER_MAXLEN) | | 975 | if (len > KTR_USER_MAXLEN) |
938 | return; | | 976 | return; |
939 | | | 977 | |
940 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); | | 978 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); |
941 | if (error != 0) | | 979 | if (error != 0) |
942 | return; | | 980 | return; |
943 | | | 981 | |
944 | strlcpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); | | 982 | strlcpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); |
945 | | | 983 | |
946 | memcpy(ktp + 1, addr, len); | | 984 | memcpy(ktp + 1, addr, len); |
947 | | | 985 | |
948 | ktraddentry(l, kte, KTA_WAITOK); | | 986 | ktraddentry(l, kte, KTA_WAITOK); |
949 | } | | 987 | } |
950 | | | 988 | |
951 | void | | 989 | void |
952 | ktr_mmsg(const void *msgh, size_t size) | | 990 | ktr_mmsg(const void *msgh, size_t size) |
953 | { | | 991 | { |
954 | lwp_t *l = curlwp; | | 992 | lwp_t *l = curlwp; |
955 | | | 993 | |
956 | if (!KTRPOINT(l->l_proc, KTR_MMSG)) | | 994 | if (!KTRPOINT(l->l_proc, KTR_MMSG)) |
957 | return; | | 995 | return; |
958 | | | 996 | |
959 | ktr_kmem(l, KTR_MMSG, msgh, size); | | 997 | ktr_kmem(l, KTR_MMSG, msgh, size); |
960 | } | | 998 | } |
961 | | | 999 | |
962 | void | | 1000 | void |
963 | ktr_mool(const void *kaddr, size_t size, const void *uaddr) | | 1001 | ktr_mool(const void *kaddr, size_t size, const void *uaddr) |
964 | { | | 1002 | { |
965 | struct ktrace_entry *kte; | | 1003 | struct ktrace_entry *kte; |
966 | struct ktr_mool *kp; | | 1004 | struct ktr_mool *kp; |
967 | struct ktr_mool *bf; | | 1005 | struct ktr_mool *bf; |
968 | lwp_t *l = curlwp; | | 1006 | lwp_t *l = curlwp; |
969 | | | 1007 | |
970 | if (!KTRPOINT(l->l_proc, KTR_MOOL)) | | 1008 | if (!KTRPOINT(l->l_proc, KTR_MOOL)) |
971 | return; | | 1009 | return; |
972 | | | 1010 | |
973 | if (ktealloc(&kte, (void *)&kp, l, KTR_MOOL, size + sizeof(*kp))) | | 1011 | if (ktealloc(&kte, (void *)&kp, l, KTR_MOOL, size + sizeof(*kp))) |
974 | return; | | 1012 | return; |
975 | | | 1013 | |
976 | kp->uaddr = uaddr; | | 1014 | kp->uaddr = uaddr; |
977 | kp->size = size; | | 1015 | kp->size = size; |
978 | bf = kp + 1; /* Skip uaddr and size */ | | 1016 | bf = kp + 1; /* Skip uaddr and size */ |
979 | (void)memcpy(bf, kaddr, size); | | 1017 | (void)memcpy(bf, kaddr, size); |
980 | | | 1018 | |
981 | ktraddentry(l, kte, KTA_WAITOK); | | 1019 | ktraddentry(l, kte, KTA_WAITOK); |
982 | } | | 1020 | } |
983 | | | 1021 | |
984 | void | | 1022 | void |
985 | ktr_saupcall(struct lwp *l, int type, int nevent, int nint, void *sas, | | 1023 | ktr_saupcall(struct lwp *l, int type, int nevent, int nint, void *sas, |
986 | void *ap, void *ksas) | | 1024 | void *ap, void *ksas) |
987 | { | | 1025 | { |
988 | struct ktrace_entry *kte; | | 1026 | struct ktrace_entry *kte; |
989 | struct ktr_saupcall *ktp; | | 1027 | struct ktr_saupcall *ktp; |
990 | size_t len, sz; | | 1028 | size_t len, sz; |
991 | struct sa_t **sapp; | | 1029 | struct sa_t **sapp; |
992 | int i; | | 1030 | int i; |
993 | | | 1031 | |
994 | if (!KTRPOINT(l->l_proc, KTR_SAUPCALL)) | | 1032 | if (!KTRPOINT(l->l_proc, KTR_SAUPCALL)) |
995 | return; | | 1033 | return; |
996 | | | 1034 | |
997 | len = sizeof(struct ktr_saupcall); | | 1035 | len = sizeof(struct ktr_saupcall); |
998 | sz = len + sizeof(struct sa_t) * (nevent + nint + 1); | | 1036 | sz = len + sizeof(struct sa_t) * (nevent + nint + 1); |
999 | | | 1037 | |
1000 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SAUPCALL, sz)) | | 1038 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SAUPCALL, sz)) |
1001 | return; | | 1039 | return; |
1002 | | | 1040 | |
1003 | ktp->ktr_type = type; | | 1041 | ktp->ktr_type = type; |
1004 | ktp->ktr_nevent = nevent; | | 1042 | ktp->ktr_nevent = nevent; |
1005 | ktp->ktr_nint = nint; | | 1043 | ktp->ktr_nint = nint; |
1006 | ktp->ktr_sas = sas; | | 1044 | ktp->ktr_sas = sas; |
1007 | ktp->ktr_ap = ap; | | 1045 | ktp->ktr_ap = ap; |
1008 | | | 1046 | |
1009 | /* Copy the sa_t's */ | | 1047 | /* Copy the sa_t's */ |
1010 | sapp = (struct sa_t **) ksas; | | 1048 | sapp = (struct sa_t **) ksas; |
1011 | | | 1049 | |
1012 | for (i = nevent + nint; i >= 0; i--) { | | 1050 | for (i = nevent + nint; i >= 0; i--) { |
1013 | memcpy((char *)ktp + len, *sapp, sizeof(struct sa_t)); | | 1051 | memcpy((char *)ktp + len, *sapp, sizeof(struct sa_t)); |
1014 | len += sizeof(struct sa_t); | | 1052 | len += sizeof(struct sa_t); |
1015 | sapp++; | | 1053 | sapp++; |
1016 | } | | 1054 | } |
1017 | | | 1055 | |
1018 | kte->kte_kth.ktr_len = len; | | 1056 | kte->kte_kth.ktr_len = len; |
1019 | ktraddentry(l, kte, KTA_WAITOK); | | 1057 | ktraddentry(l, kte, KTA_WAITOK); |
1020 | } | | 1058 | } |
1021 | | | 1059 | |
1022 | void | | 1060 | void |
1023 | ktr_mib(const int *name, u_int namelen) | | 1061 | ktr_mib(const int *name, u_int namelen) |
1024 | { | | 1062 | { |
1025 | struct ktrace_entry *kte; | | 1063 | struct ktrace_entry *kte; |
1026 | int *namep; | | 1064 | int *namep; |
1027 | size_t size; | | 1065 | size_t size; |
1028 | lwp_t *l = curlwp; | | 1066 | lwp_t *l = curlwp; |
1029 | | | 1067 | |
1030 | if (!KTRPOINT(l->l_proc, KTR_MIB)) | | 1068 | if (!KTRPOINT(l->l_proc, KTR_MIB)) |
1031 | return; | | 1069 | return; |
1032 | | | 1070 | |
1033 | size = namelen * sizeof(*name); | | 1071 | size = namelen * sizeof(*name); |
1034 | | | 1072 | |
1035 | if (ktealloc(&kte, (void *)&namep, l, KTR_MIB, size)) | | 1073 | if (ktealloc(&kte, (void *)&namep, l, KTR_MIB, size)) |
1036 | return; | | 1074 | return; |
1037 | | | 1075 | |
1038 | (void)memcpy(namep, name, namelen * sizeof(*name)); | | 1076 | (void)memcpy(namep, name, namelen * sizeof(*name)); |
1039 | | | 1077 | |
1040 | ktraddentry(l, kte, KTA_WAITOK); | | 1078 | ktraddentry(l, kte, KTA_WAITOK); |
1041 | } | | 1079 | } |
1042 | | | 1080 | |
1043 | /* Interface and common routines */ | | 1081 | /* Interface and common routines */ |
1044 | | | 1082 | |
1045 | int | | 1083 | int |
1046 | ktrace_common(lwp_t *curl, int ops, int facs, int pid, file_t *fp) | | 1084 | ktrace_common(lwp_t *curl, int ops, int facs, int pid, file_t *fp) |
1047 | { | | 1085 | { |
1048 | struct proc *curp; | | 1086 | struct proc *curp; |
1049 | struct proc *p; | | 1087 | struct proc *p; |
1050 | struct pgrp *pg; | | 1088 | struct pgrp *pg; |
1051 | struct ktr_desc *ktd = NULL; | | 1089 | struct ktr_desc *ktd = NULL; |
1052 | int ret = 0; | | 1090 | int ret = 0; |
1053 | int error = 0; | | 1091 | int error = 0; |
1054 | int descend; | | 1092 | int descend; |
1055 | | | 1093 | |
1056 | curp = curl->l_proc; | | 1094 | curp = curl->l_proc; |
1057 | descend = ops & KTRFLAG_DESCEND; | | 1095 | descend = ops & KTRFLAG_DESCEND; |
1058 | facs = facs & ~((unsigned) KTRFAC_PERSISTENT); | | 1096 | facs = facs & ~((unsigned) KTRFAC_PERSISTENT); |
1059 | | | 1097 | |
1060 | (void)ktrenter(curl); | | 1098 | (void)ktrenter(curl); |
1061 | | | 1099 | |
1062 | switch (KTROP(ops)) { | | 1100 | switch (KTROP(ops)) { |
1063 | | | 1101 | |
1064 | case KTROP_CLEARFILE: | | 1102 | case KTROP_CLEARFILE: |
1065 | /* | | 1103 | /* |
1066 | * Clear all uses of the tracefile | | 1104 | * Clear all uses of the tracefile |
1067 | */ | | 1105 | */ |
1068 | mutex_enter(&ktrace_lock); | | 1106 | mutex_enter(&ktrace_lock); |
1069 | ktd = ktd_lookup(fp); | | 1107 | ktd = ktd_lookup(fp); |
1070 | mutex_exit(&ktrace_lock); | | 1108 | mutex_exit(&ktrace_lock); |
1071 | if (ktd == NULL) | | 1109 | if (ktd == NULL) |
1072 | goto done; | | 1110 | goto done; |
1073 | error = ktrderefall(ktd, 1); | | 1111 | error = ktrderefall(ktd, 1); |
1074 | goto done; | | 1112 | goto done; |
1075 | | | 1113 | |
1076 | case KTROP_SET: | | 1114 | case KTROP_SET: |
1077 | mutex_enter(&ktrace_lock); | | 1115 | mutex_enter(&ktrace_lock); |
1078 | ktd = ktd_lookup(fp); | | 1116 | ktd = ktd_lookup(fp); |
1079 | mutex_exit(&ktrace_lock); | | 1117 | mutex_exit(&ktrace_lock); |
1080 | if (ktd == NULL) { | | 1118 | if (ktd == NULL) { |
1081 | ktd = kmem_alloc(sizeof(*ktd), KM_SLEEP); | | 1119 | ktd = kmem_alloc(sizeof(*ktd), KM_SLEEP); |
1082 | TAILQ_INIT(&ktd->ktd_queue); | | 1120 | TAILQ_INIT(&ktd->ktd_queue); |
1083 | callout_init(&ktd->ktd_wakch, CALLOUT_MPSAFE); | | 1121 | callout_init(&ktd->ktd_wakch, CALLOUT_MPSAFE); |
1084 | cv_init(&ktd->ktd_cv, "ktrwait"); | | 1122 | cv_init(&ktd->ktd_cv, "ktrwait"); |
1085 | cv_init(&ktd->ktd_sync_cv, "ktrsync"); | | 1123 | cv_init(&ktd->ktd_sync_cv, "ktrsync"); |
1086 | ktd->ktd_flags = 0; | | 1124 | ktd->ktd_flags = 0; |
1087 | ktd->ktd_qcount = 0; | | 1125 | ktd->ktd_qcount = 0; |
1088 | ktd->ktd_error = 0; | | 1126 | ktd->ktd_error = 0; |
1089 | ktd->ktd_errcnt = 0; | | 1127 | ktd->ktd_errcnt = 0; |
1090 | ktd->ktd_delayqcnt = ktd_delayqcnt; | | 1128 | ktd->ktd_delayqcnt = ktd_delayqcnt; |
1091 | ktd->ktd_wakedelay = mstohz(ktd_wakedelay); | | 1129 | ktd->ktd_wakedelay = mstohz(ktd_wakedelay); |
1092 | ktd->ktd_intrwakdl = mstohz(ktd_intrwakdl); | | 1130 | ktd->ktd_intrwakdl = mstohz(ktd_intrwakdl); |
1093 | ktd->ktd_ref = 0; | | 1131 | ktd->ktd_ref = 0; |
1094 | ktd->ktd_fp = fp; | | 1132 | ktd->ktd_fp = fp; |
1095 | mutex_enter(&ktrace_lock); | | 1133 | mutex_enter(&ktrace_lock); |
1096 | ktdref(ktd); | | 1134 | ktdref(ktd); |
1097 | mutex_exit(&ktrace_lock); | | 1135 | mutex_exit(&ktrace_lock); |
1098 | | | 1136 | |
1099 | /* | | 1137 | /* |
1100 | * XXX: not correct. needs an way to detect | | 1138 | * XXX: not correct. needs an way to detect |
1101 | * whether ktruss or ktrace. | | 1139 | * whether ktruss or ktrace. |
1102 | */ | | 1140 | */ |
1103 | if (fp->f_type == DTYPE_PIPE) | | 1141 | if (fp->f_type == DTYPE_PIPE) |
1104 | ktd->ktd_flags |= KTDF_INTERACTIVE; | | 1142 | ktd->ktd_flags |= KTDF_INTERACTIVE; |
1105 | | | 1143 | |
1106 | mutex_enter(&fp->f_lock); | | 1144 | mutex_enter(&fp->f_lock); |
1107 | fp->f_count++; | | 1145 | fp->f_count++; |
1108 | mutex_exit(&fp->f_lock); | | 1146 | mutex_exit(&fp->f_lock); |
1109 | error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, | | 1147 | error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, |
1110 | ktrace_thread, ktd, &ktd->ktd_lwp, "ktrace"); | | 1148 | ktrace_thread, ktd, &ktd->ktd_lwp, "ktrace"); |
1111 | if (error != 0) { | | 1149 | if (error != 0) { |
1112 | kmem_free(ktd, sizeof(*ktd)); | | 1150 | kmem_free(ktd, sizeof(*ktd)); |
1113 | mutex_enter(&fp->f_lock); | | 1151 | mutex_enter(&fp->f_lock); |
1114 | fp->f_count--; | | 1152 | fp->f_count--; |
1115 | mutex_exit(&fp->f_lock); | | 1153 | mutex_exit(&fp->f_lock); |
1116 | goto done; | | 1154 | goto done; |
1117 | } | | 1155 | } |
1118 | | | 1156 | |
1119 | mutex_enter(&ktrace_lock); | | 1157 | mutex_enter(&ktrace_lock); |
1120 | if (ktd_lookup(fp) != NULL) { | | 1158 | if (ktd_lookup(fp) != NULL) { |
1121 | ktdrel(ktd); | | 1159 | ktdrel(ktd); |
1122 | ktd = NULL; | | 1160 | ktd = NULL; |
1123 | } else | | 1161 | } else |
1124 | TAILQ_INSERT_TAIL(&ktdq, ktd, ktd_list); | | 1162 | TAILQ_INSERT_TAIL(&ktdq, ktd, ktd_list); |
1125 | if (ktd == NULL) | | 1163 | if (ktd == NULL) |
1126 | cv_wait(&lbolt, &ktrace_lock); | | 1164 | cv_wait(&lbolt, &ktrace_lock); |
1127 | mutex_exit(&ktrace_lock); | | 1165 | mutex_exit(&ktrace_lock); |
1128 | if (ktd == NULL) | | 1166 | if (ktd == NULL) |
1129 | goto done; | | 1167 | goto done; |
1130 | } | | 1168 | } |
1131 | break; | | 1169 | break; |
1132 | | | 1170 | |
1133 | case KTROP_CLEAR: | | 1171 | case KTROP_CLEAR: |
1134 | break; | | 1172 | break; |
1135 | } | | 1173 | } |
1136 | | | 1174 | |
1137 | /* | | 1175 | /* |
1138 | * need something to (un)trace (XXX - why is this here?) | | 1176 | * need something to (un)trace (XXX - why is this here?) |
1139 | */ | | 1177 | */ |
1140 | if (!facs) { | | 1178 | if (!facs) { |
1141 | error = EINVAL; | | 1179 | error = EINVAL; |
1142 | goto done; | | 1180 | goto done; |
1143 | } | | 1181 | } |
1144 | | | 1182 | |
1145 | /* | | 1183 | /* |
1146 | * do it | | 1184 | * do it |
1147 | */ | | 1185 | */ |
1148 | mutex_enter(proc_lock); | | 1186 | mutex_enter(proc_lock); |
1149 | if (pid < 0) { | | 1187 | if (pid < 0) { |
1150 | /* | | 1188 | /* |
1151 | * by process group | | 1189 | * by process group |
1152 | */ | | 1190 | */ |
1153 | pg = pg_find(-pid, PFIND_LOCKED); | | 1191 | pg = pg_find(-pid, PFIND_LOCKED); |
1154 | if (pg == NULL) | | 1192 | if (pg == NULL) |
1155 | error = ESRCH; | | 1193 | error = ESRCH; |
1156 | else { | | 1194 | else { |
1157 | LIST_FOREACH(p, &pg->pg_members, p_pglist) { | | 1195 | LIST_FOREACH(p, &pg->pg_members, p_pglist) { |
1158 | if (descend) | | 1196 | if (descend) |
1159 | ret |= ktrsetchildren(curl, p, ops, | | 1197 | ret |= ktrsetchildren(curl, p, ops, |
1160 | facs, ktd); | | 1198 | facs, ktd); |
1161 | else | | 1199 | else |
1162 | ret |= ktrops(curl, p, ops, facs, | | 1200 | ret |= ktrops(curl, p, ops, facs, |
1163 | ktd); | | 1201 | ktd); |
1164 | } | | 1202 | } |
1165 | } | | 1203 | } |
1166 | | | 1204 | |
1167 | } else { | | 1205 | } else { |
1168 | /* | | 1206 | /* |
1169 | * by pid | | 1207 | * by pid |
1170 | */ | | 1208 | */ |
1171 | p = p_find(pid, PFIND_LOCKED); | | 1209 | p = p_find(pid, PFIND_LOCKED); |
1172 | if (p == NULL) | | 1210 | if (p == NULL) |
1173 | error = ESRCH; | | 1211 | error = ESRCH; |
1174 | else if (descend) | | 1212 | else if (descend) |
1175 | ret |= ktrsetchildren(curl, p, ops, facs, ktd); | | 1213 | ret |= ktrsetchildren(curl, p, ops, facs, ktd); |
1176 | else | | 1214 | else |
1177 | ret |= ktrops(curl, p, ops, facs, ktd); | | 1215 | ret |= ktrops(curl, p, ops, facs, ktd); |
1178 | } | | 1216 | } |
1179 | mutex_exit(proc_lock); | | 1217 | mutex_exit(proc_lock); |
1180 | if (error == 0 && !ret) | | 1218 | if (error == 0 && !ret) |
1181 | error = EPERM; | | 1219 | error = EPERM; |
1182 | done: | | 1220 | done: |
1183 | if (ktd != NULL) { | | 1221 | if (ktd != NULL) { |
1184 | mutex_enter(&ktrace_lock); | | 1222 | mutex_enter(&ktrace_lock); |
1185 | if (error != 0) { | | 1223 | if (error != 0) { |
1186 | /* | | 1224 | /* |
1187 | * Wakeup the thread so that it can be die if we | | 1225 | * Wakeup the thread so that it can be die if we |
1188 | * can't trace any process. | | 1226 | * can't trace any process. |
1189 | */ | | 1227 | */ |
1190 | ktd_wakeup(ktd); | | 1228 | ktd_wakeup(ktd); |
1191 | } | | 1229 | } |
1192 | if (KTROP(ops) == KTROP_SET || KTROP(ops) == KTROP_CLEARFILE) | | 1230 | if (KTROP(ops) == KTROP_SET || KTROP(ops) == KTROP_CLEARFILE) |
1193 | ktdrel(ktd); | | 1231 | ktdrel(ktd); |
1194 | mutex_exit(&ktrace_lock); | | 1232 | mutex_exit(&ktrace_lock); |
1195 | } | | 1233 | } |
1196 | ktrexit(curl); | | 1234 | ktrexit(curl); |
1197 | return (error); | | 1235 | return (error); |
1198 | } | | 1236 | } |
1199 | | | 1237 | |
1200 | /* | | 1238 | /* |
1201 | * fktrace system call | | 1239 | * fktrace system call |
1202 | */ | | 1240 | */ |
1203 | /* ARGSUSED */ | | 1241 | /* ARGSUSED */ |
1204 | int | | 1242 | int |
1205 | sys_fktrace(struct lwp *l, const struct sys_fktrace_args *uap, register_t *retval) | | 1243 | sys_fktrace(struct lwp *l, const struct sys_fktrace_args *uap, register_t *retval) |
1206 | { | | 1244 | { |
1207 | /* { | | 1245 | /* { |
1208 | syscallarg(int) fd; | | 1246 | syscallarg(int) fd; |
1209 | syscallarg(int) ops; | | 1247 | syscallarg(int) ops; |
1210 | syscallarg(int) facs; | | 1248 | syscallarg(int) facs; |
1211 | syscallarg(int) pid; | | 1249 | syscallarg(int) pid; |
1212 | } */ | | 1250 | } */ |
1213 | file_t *fp; | | 1251 | file_t *fp; |
1214 | int error, fd; | | 1252 | int error, fd; |
1215 | | | 1253 | |
1216 | fd = SCARG(uap, fd); | | 1254 | fd = SCARG(uap, fd); |
1217 | if ((fp = fd_getfile(fd)) == NULL) | | 1255 | if ((fp = fd_getfile(fd)) == NULL) |
1218 | return (EBADF); | | 1256 | return (EBADF); |
1219 | if ((fp->f_flag & FWRITE) == 0) | | 1257 | if ((fp->f_flag & FWRITE) == 0) |
1220 | error = EBADF; | | 1258 | error = EBADF; |
1221 | else | | 1259 | else |
1222 | error = ktrace_common(l, SCARG(uap, ops), | | 1260 | error = ktrace_common(l, SCARG(uap, ops), |
1223 | SCARG(uap, facs), SCARG(uap, pid), fp); | | 1261 | SCARG(uap, facs), SCARG(uap, pid), fp); |
1224 | fd_putfile(fd); | | 1262 | fd_putfile(fd); |
1225 | return error; | | 1263 | return error; |
1226 | } | | 1264 | } |
1227 | | | 1265 | |
1228 | /* | | 1266 | /* |
1229 | * ktrace system call | | 1267 | * ktrace system call |
1230 | */ | | 1268 | */ |
1231 | /* ARGSUSED */ | | 1269 | /* ARGSUSED */ |
1232 | int | | 1270 | int |
1233 | sys_ktrace(struct lwp *l, const struct sys_ktrace_args *uap, register_t *retval) | | 1271 | sys_ktrace(struct lwp *l, const struct sys_ktrace_args *uap, register_t *retval) |
1234 | { | | 1272 | { |
1235 | /* { | | 1273 | /* { |
1236 | syscallarg(const char *) fname; | | 1274 | syscallarg(const char *) fname; |
1237 | syscallarg(int) ops; | | 1275 | syscallarg(int) ops; |
1238 | syscallarg(int) facs; | | 1276 | syscallarg(int) facs; |
1239 | syscallarg(int) pid; | | 1277 | syscallarg(int) pid; |
1240 | } */ | | 1278 | } */ |
1241 | struct vnode *vp = NULL; | | 1279 | struct vnode *vp = NULL; |
1242 | file_t *fp = NULL; | | 1280 | file_t *fp = NULL; |
1243 | struct nameidata nd; | | 1281 | struct nameidata nd; |
1244 | int error = 0; | | 1282 | int error = 0; |
1245 | int fd; | | 1283 | int fd; |
1246 | | | 1284 | |
1247 | if (ktrenter(l)) | | 1285 | if (ktrenter(l)) |
1248 | return EAGAIN; | | 1286 | return EAGAIN; |