| @@ -1,1592 +1,1592 @@ | | | @@ -1,1592 +1,1592 @@ |
1 | /* $NetBSD: kern_ktrace.c,v 1.160.2.1 2014/12/07 15:09:31 martin Exp $ */ | | 1 | /* $NetBSD: kern_ktrace.c,v 1.160.2.2 2017/08/19 04:24:23 snj Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran. | | 8 | * by Andrew Doran. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * Copyright (c) 1989, 1993 | | 33 | * Copyright (c) 1989, 1993 |
34 | * The Regents of the University of California. All rights reserved. | | 34 | * The Regents of the University of California. All rights reserved. |
35 | * | | 35 | * |
36 | * Redistribution and use in source and binary forms, with or without | | 36 | * Redistribution and use in source and binary forms, with or without |
37 | * modification, are permitted provided that the following conditions | | 37 | * modification, are permitted provided that the following conditions |
38 | * are met: | | 38 | * are met: |
39 | * 1. Redistributions of source code must retain the above copyright | | 39 | * 1. Redistributions of source code must retain the above copyright |
40 | * notice, this list of conditions and the following disclaimer. | | 40 | * notice, this list of conditions and the following disclaimer. |
41 | * 2. Redistributions in binary form must reproduce the above copyright | | 41 | * 2. Redistributions in binary form must reproduce the above copyright |
42 | * notice, this list of conditions and the following disclaimer in the | | 42 | * notice, this list of conditions and the following disclaimer in the |
43 | * documentation and/or other materials provided with the distribution. | | 43 | * documentation and/or other materials provided with the distribution. |
44 | * 3. Neither the name of the University nor the names of its contributors | | 44 | * 3. Neither the name of the University nor the names of its contributors |
45 | * may be used to endorse or promote products derived from this software | | 45 | * may be used to endorse or promote products derived from this software |
46 | * without specific prior written permission. | | 46 | * without specific prior written permission. |
47 | * | | 47 | * |
48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
58 | * SUCH DAMAGE. | | 58 | * SUCH DAMAGE. |
59 | * | | 59 | * |
60 | * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95 | | 60 | * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95 |
61 | */ | | 61 | */ |
62 | | | 62 | |
63 | #include <sys/cdefs.h> | | 63 | #include <sys/cdefs.h> |
64 | __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.160.2.1 2014/12/07 15:09:31 martin Exp $"); | | 64 | __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.160.2.2 2017/08/19 04:24:23 snj Exp $"); |
65 | | | 65 | |
66 | #include <sys/param.h> | | 66 | #include <sys/param.h> |
67 | #include <sys/systm.h> | | 67 | #include <sys/systm.h> |
68 | #include <sys/proc.h> | | 68 | #include <sys/proc.h> |
69 | #include <sys/file.h> | | 69 | #include <sys/file.h> |
70 | #include <sys/namei.h> | | 70 | #include <sys/namei.h> |
71 | #include <sys/vnode.h> | | 71 | #include <sys/vnode.h> |
72 | #include <sys/kernel.h> | | 72 | #include <sys/kernel.h> |
73 | #include <sys/kthread.h> | | 73 | #include <sys/kthread.h> |
74 | #include <sys/ktrace.h> | | 74 | #include <sys/ktrace.h> |
75 | #include <sys/kmem.h> | | 75 | #include <sys/kmem.h> |
76 | #include <sys/syslog.h> | | 76 | #include <sys/syslog.h> |
77 | #include <sys/filedesc.h> | | 77 | #include <sys/filedesc.h> |
78 | #include <sys/ioctl.h> | | 78 | #include <sys/ioctl.h> |
79 | #include <sys/callout.h> | | 79 | #include <sys/callout.h> |
80 | #include <sys/kauth.h> | | 80 | #include <sys/kauth.h> |
81 | | | 81 | |
82 | #include <sys/mount.h> | | 82 | #include <sys/mount.h> |
83 | #include <sys/sa.h> | | 83 | #include <sys/sa.h> |
84 | #include <sys/syscallargs.h> | | 84 | #include <sys/syscallargs.h> |
85 | | | 85 | |
86 | /* | | 86 | /* |
87 | * TODO: | | 87 | * TODO: |
88 | * - need better error reporting? | | 88 | * - need better error reporting? |
89 | * - userland utility to sort ktrace.out by timestamp. | | 89 | * - userland utility to sort ktrace.out by timestamp. |
90 | * - keep minimum information in ktrace_entry when rest of alloc failed. | | 90 | * - keep minimum information in ktrace_entry when rest of alloc failed. |
91 | * - per trace control of configurable parameters. | | 91 | * - per trace control of configurable parameters. |
92 | */ | | 92 | */ |
93 | | | 93 | |
94 | struct ktrace_entry { | | 94 | struct ktrace_entry { |
95 | TAILQ_ENTRY(ktrace_entry) kte_list; | | 95 | TAILQ_ENTRY(ktrace_entry) kte_list; |
96 | struct ktr_header kte_kth; | | 96 | struct ktr_header kte_kth; |
97 | void *kte_buf; | | 97 | void *kte_buf; |
98 | size_t kte_bufsz; | | 98 | size_t kte_bufsz; |
99 | #define KTE_SPACE 32 | | 99 | #define KTE_SPACE 32 |
100 | uint8_t kte_space[KTE_SPACE] __aligned(sizeof(register_t)); | | 100 | uint8_t kte_space[KTE_SPACE] __aligned(sizeof(register_t)); |
101 | }; | | 101 | }; |
102 | | | 102 | |
103 | struct ktr_desc { | | 103 | struct ktr_desc { |
104 | TAILQ_ENTRY(ktr_desc) ktd_list; | | 104 | TAILQ_ENTRY(ktr_desc) ktd_list; |
105 | int ktd_flags; | | 105 | int ktd_flags; |
106 | #define KTDF_WAIT 0x0001 | | 106 | #define KTDF_WAIT 0x0001 |
107 | #define KTDF_DONE 0x0002 | | 107 | #define KTDF_DONE 0x0002 |
108 | #define KTDF_BLOCKING 0x0004 | | 108 | #define KTDF_BLOCKING 0x0004 |
109 | #define KTDF_INTERACTIVE 0x0008 | | 109 | #define KTDF_INTERACTIVE 0x0008 |
110 | int ktd_error; | | 110 | int ktd_error; |
111 | #define KTDE_ENOMEM 0x0001 | | 111 | #define KTDE_ENOMEM 0x0001 |
112 | #define KTDE_ENOSPC 0x0002 | | 112 | #define KTDE_ENOSPC 0x0002 |
113 | int ktd_errcnt; | | 113 | int ktd_errcnt; |
114 | int ktd_ref; /* # of reference */ | | 114 | int ktd_ref; /* # of reference */ |
115 | int ktd_qcount; /* # of entry in the queue */ | | 115 | int ktd_qcount; /* # of entry in the queue */ |
116 | | | 116 | |
117 | /* | | 117 | /* |
118 | * Params to control behaviour. | | 118 | * Params to control behaviour. |
119 | */ | | 119 | */ |
120 | int ktd_delayqcnt; /* # of entry allowed to delay */ | | 120 | int ktd_delayqcnt; /* # of entry allowed to delay */ |
121 | int ktd_wakedelay; /* delay of wakeup in *tick* */ | | 121 | int ktd_wakedelay; /* delay of wakeup in *tick* */ |
122 | int ktd_intrwakdl; /* ditto, but when interactive */ | | 122 | int ktd_intrwakdl; /* ditto, but when interactive */ |
123 | | | 123 | |
124 | file_t *ktd_fp; /* trace output file */ | | 124 | file_t *ktd_fp; /* trace output file */ |
125 | lwp_t *ktd_lwp; /* our kernel thread */ | | 125 | lwp_t *ktd_lwp; /* our kernel thread */ |
126 | TAILQ_HEAD(, ktrace_entry) ktd_queue; | | 126 | TAILQ_HEAD(, ktrace_entry) ktd_queue; |
127 | callout_t ktd_wakch; /* delayed wakeup */ | | 127 | callout_t ktd_wakch; /* delayed wakeup */ |
128 | kcondvar_t ktd_sync_cv; | | 128 | kcondvar_t ktd_sync_cv; |
129 | kcondvar_t ktd_cv; | | 129 | kcondvar_t ktd_cv; |
130 | }; | | 130 | }; |
131 | | | 131 | |
132 | static int ktealloc(struct ktrace_entry **, void **, lwp_t *, int, | | 132 | static int ktealloc(struct ktrace_entry **, void **, lwp_t *, int, |
133 | size_t); | | 133 | size_t); |
134 | static void ktrwrite(struct ktr_desc *, struct ktrace_entry *); | | 134 | static void ktrwrite(struct ktr_desc *, struct ktrace_entry *); |
135 | static int ktrace_common(lwp_t *, int, int, int, file_t **); | | 135 | static int ktrace_common(lwp_t *, int, int, int, file_t **); |
136 | static int ktrops(lwp_t *, struct proc *, int, int, | | 136 | static int ktrops(lwp_t *, struct proc *, int, int, |
137 | struct ktr_desc *); | | 137 | struct ktr_desc *); |
138 | static int ktrsetchildren(lwp_t *, struct proc *, int, int, | | 138 | static int ktrsetchildren(lwp_t *, struct proc *, int, int, |
139 | struct ktr_desc *); | | 139 | struct ktr_desc *); |
140 | static int ktrcanset(lwp_t *, struct proc *); | | 140 | static int ktrcanset(lwp_t *, struct proc *); |
141 | static int ktrsamefile(file_t *, file_t *); | | 141 | static int ktrsamefile(file_t *, file_t *); |
142 | static void ktr_kmem(lwp_t *, int, const void *, size_t); | | 142 | static void ktr_kmem(lwp_t *, int, const void *, size_t); |
143 | static void ktr_io(lwp_t *, int, enum uio_rw, struct iovec *, size_t); | | 143 | static void ktr_io(lwp_t *, int, enum uio_rw, struct iovec *, size_t); |
144 | | | 144 | |
145 | static struct ktr_desc * | | 145 | static struct ktr_desc * |
146 | ktd_lookup(file_t *); | | 146 | ktd_lookup(file_t *); |
147 | static void ktdrel(struct ktr_desc *); | | 147 | static void ktdrel(struct ktr_desc *); |
148 | static void ktdref(struct ktr_desc *); | | 148 | static void ktdref(struct ktr_desc *); |
149 | static void ktraddentry(lwp_t *, struct ktrace_entry *, int); | | 149 | static void ktraddentry(lwp_t *, struct ktrace_entry *, int); |
150 | /* Flags for ktraddentry (3rd arg) */ | | 150 | /* Flags for ktraddentry (3rd arg) */ |
151 | #define KTA_NOWAIT 0x0000 | | 151 | #define KTA_NOWAIT 0x0000 |
152 | #define KTA_WAITOK 0x0001 | | 152 | #define KTA_WAITOK 0x0001 |
153 | #define KTA_LARGE 0x0002 | | 153 | #define KTA_LARGE 0x0002 |
154 | static void ktefree(struct ktrace_entry *); | | 154 | static void ktefree(struct ktrace_entry *); |
155 | static void ktd_logerrl(struct ktr_desc *, int); | | 155 | static void ktd_logerrl(struct ktr_desc *, int); |
156 | static void ktrace_thread(void *); | | 156 | static void ktrace_thread(void *); |
157 | static int ktrderefall(struct ktr_desc *, int); | | 157 | static int ktrderefall(struct ktr_desc *, int); |
158 | | | 158 | |
159 | /* | | 159 | /* |
160 | * Default vaules. | | 160 | * Default vaules. |
161 | */ | | 161 | */ |
162 | #define KTD_MAXENTRY 1000 /* XXX: tune */ | | 162 | #define KTD_MAXENTRY 1000 /* XXX: tune */ |
163 | #define KTD_TIMEOUT 5 /* XXX: tune */ | | 163 | #define KTD_TIMEOUT 5 /* XXX: tune */ |
164 | #define KTD_DELAYQCNT 100 /* XXX: tune */ | | 164 | #define KTD_DELAYQCNT 100 /* XXX: tune */ |
165 | #define KTD_WAKEDELAY 5000 /* XXX: tune */ | | 165 | #define KTD_WAKEDELAY 5000 /* XXX: tune */ |
166 | #define KTD_INTRWAKDL 100 /* XXX: tune */ | | 166 | #define KTD_INTRWAKDL 100 /* XXX: tune */ |
167 | | | 167 | |
168 | /* | | 168 | /* |
169 | * Patchable variables. | | 169 | * Patchable variables. |
170 | */ | | 170 | */ |
171 | int ktd_maxentry = KTD_MAXENTRY; /* max # of entry in the queue */ | | 171 | int ktd_maxentry = KTD_MAXENTRY; /* max # of entry in the queue */ |
172 | int ktd_timeout = KTD_TIMEOUT; /* timeout in seconds */ | | 172 | int ktd_timeout = KTD_TIMEOUT; /* timeout in seconds */ |
173 | int ktd_delayqcnt = KTD_DELAYQCNT; /* # of entry allowed to delay */ | | 173 | int ktd_delayqcnt = KTD_DELAYQCNT; /* # of entry allowed to delay */ |
174 | int ktd_wakedelay = KTD_WAKEDELAY; /* delay of wakeup in *ms* */ | | 174 | int ktd_wakedelay = KTD_WAKEDELAY; /* delay of wakeup in *ms* */ |
175 | int ktd_intrwakdl = KTD_INTRWAKDL; /* ditto, but when interactive */ | | 175 | int ktd_intrwakdl = KTD_INTRWAKDL; /* ditto, but when interactive */ |
176 | | | 176 | |
177 | kmutex_t ktrace_lock; | | 177 | kmutex_t ktrace_lock; |
178 | int ktrace_on; | | 178 | int ktrace_on; |
179 | static TAILQ_HEAD(, ktr_desc) ktdq = TAILQ_HEAD_INITIALIZER(ktdq); | | 179 | static TAILQ_HEAD(, ktr_desc) ktdq = TAILQ_HEAD_INITIALIZER(ktdq); |
180 | static pool_cache_t kte_cache; | | 180 | static pool_cache_t kte_cache; |
181 | | | 181 | |
182 | static kauth_listener_t ktrace_listener; | | 182 | static kauth_listener_t ktrace_listener; |
183 | | | 183 | |
184 | static void | | 184 | static void |
185 | ktd_wakeup(struct ktr_desc *ktd) | | 185 | ktd_wakeup(struct ktr_desc *ktd) |
186 | { | | 186 | { |
187 | | | 187 | |
188 | callout_stop(&ktd->ktd_wakch); | | 188 | callout_stop(&ktd->ktd_wakch); |
189 | cv_signal(&ktd->ktd_cv); | | 189 | cv_signal(&ktd->ktd_cv); |
190 | } | | 190 | } |
191 | | | 191 | |
192 | static void | | 192 | static void |
193 | ktd_callout(void *arg) | | 193 | ktd_callout(void *arg) |
194 | { | | 194 | { |
195 | | | 195 | |
196 | mutex_enter(&ktrace_lock); | | 196 | mutex_enter(&ktrace_lock); |
197 | ktd_wakeup(arg); | | 197 | ktd_wakeup(arg); |
198 | mutex_exit(&ktrace_lock); | | 198 | mutex_exit(&ktrace_lock); |
199 | } | | 199 | } |
200 | | | 200 | |
201 | static void | | 201 | static void |
202 | ktd_logerrl(struct ktr_desc *ktd, int error) | | 202 | ktd_logerrl(struct ktr_desc *ktd, int error) |
203 | { | | 203 | { |
204 | | | 204 | |
205 | ktd->ktd_error |= error; | | 205 | ktd->ktd_error |= error; |
206 | ktd->ktd_errcnt++; | | 206 | ktd->ktd_errcnt++; |
207 | } | | 207 | } |
208 | | | 208 | |
209 | #if 0 | | 209 | #if 0 |
210 | static void | | 210 | static void |
211 | ktd_logerr(struct proc *p, int error) | | 211 | ktd_logerr(struct proc *p, int error) |
212 | { | | 212 | { |
213 | struct ktr_desc *ktd; | | 213 | struct ktr_desc *ktd; |
214 | | | 214 | |
215 | KASSERT(mutex_owned(&ktrace_lock)); | | 215 | KASSERT(mutex_owned(&ktrace_lock)); |
216 | | | 216 | |
217 | ktd = p->p_tracep; | | 217 | ktd = p->p_tracep; |
218 | if (ktd == NULL) | | 218 | if (ktd == NULL) |
219 | return; | | 219 | return; |
220 | | | 220 | |
221 | ktd_logerrl(ktd, error); | | 221 | ktd_logerrl(ktd, error); |
222 | } | | 222 | } |
223 | #endif | | 223 | #endif |
224 | | | 224 | |
225 | static inline int | | 225 | static inline int |
226 | ktrenter(lwp_t *l) | | 226 | ktrenter(lwp_t *l) |
227 | { | | 227 | { |
228 | | | 228 | |
229 | if ((l->l_pflag & LP_KTRACTIVE) != 0) | | 229 | if ((l->l_pflag & LP_KTRACTIVE) != 0) |
230 | return 1; | | 230 | return 1; |
231 | l->l_pflag |= LP_KTRACTIVE; | | 231 | l->l_pflag |= LP_KTRACTIVE; |
232 | return 0; | | 232 | return 0; |
233 | } | | 233 | } |
234 | | | 234 | |
235 | static inline void | | 235 | static inline void |
236 | ktrexit(lwp_t *l) | | 236 | ktrexit(lwp_t *l) |
237 | { | | 237 | { |
238 | | | 238 | |
239 | l->l_pflag &= ~LP_KTRACTIVE; | | 239 | l->l_pflag &= ~LP_KTRACTIVE; |
240 | } | | 240 | } |
241 | | | 241 | |
242 | static int | | 242 | static int |
243 | ktrace_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, | | 243 | ktrace_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, |
244 | void *arg0, void *arg1, void *arg2, void *arg3) | | 244 | void *arg0, void *arg1, void *arg2, void *arg3) |
245 | { | | 245 | { |
246 | struct proc *p; | | 246 | struct proc *p; |
247 | int result; | | 247 | int result; |
248 | enum kauth_process_req req; | | 248 | enum kauth_process_req req; |
249 | | | 249 | |
250 | result = KAUTH_RESULT_DEFER; | | 250 | result = KAUTH_RESULT_DEFER; |
251 | p = arg0; | | 251 | p = arg0; |
252 | | | 252 | |
253 | if (action != KAUTH_PROCESS_KTRACE) | | 253 | if (action != KAUTH_PROCESS_KTRACE) |
254 | return result; | | 254 | return result; |
255 | | | 255 | |
256 | req = (enum kauth_process_req)(unsigned long)arg1; | | 256 | req = (enum kauth_process_req)(unsigned long)arg1; |
257 | | | 257 | |
258 | /* Privileged; secmodel should handle these. */ | | 258 | /* Privileged; secmodel should handle these. */ |
259 | if (req == KAUTH_REQ_PROCESS_KTRACE_PERSISTENT) | | 259 | if (req == KAUTH_REQ_PROCESS_KTRACE_PERSISTENT) |
260 | return result; | | 260 | return result; |
261 | | | 261 | |
262 | if ((p->p_traceflag & KTRFAC_PERSISTENT) || | | 262 | if ((p->p_traceflag & KTRFAC_PERSISTENT) || |
263 | (p->p_flag & PK_SUGID)) | | 263 | (p->p_flag & PK_SUGID)) |
264 | return result; | | 264 | return result; |
265 | | | 265 | |
266 | if (kauth_cred_geteuid(cred) == kauth_cred_getuid(p->p_cred) && | | 266 | if (kauth_cred_geteuid(cred) == kauth_cred_getuid(p->p_cred) && |
267 | kauth_cred_getuid(cred) == kauth_cred_getsvuid(p->p_cred) && | | 267 | kauth_cred_getuid(cred) == kauth_cred_getsvuid(p->p_cred) && |
268 | kauth_cred_getgid(cred) == kauth_cred_getgid(p->p_cred) && | | 268 | kauth_cred_getgid(cred) == kauth_cred_getgid(p->p_cred) && |
269 | kauth_cred_getgid(cred) == kauth_cred_getsvgid(p->p_cred)) | | 269 | kauth_cred_getgid(cred) == kauth_cred_getsvgid(p->p_cred)) |
270 | result = KAUTH_RESULT_ALLOW; | | 270 | result = KAUTH_RESULT_ALLOW; |
271 | | | 271 | |
272 | return result; | | 272 | return result; |
273 | } | | 273 | } |
274 | | | 274 | |
275 | /* | | 275 | /* |
276 | * Initialise the ktrace system. | | 276 | * Initialise the ktrace system. |
277 | */ | | 277 | */ |
278 | void | | 278 | void |
279 | ktrinit(void) | | 279 | ktrinit(void) |
280 | { | | 280 | { |
281 | | | 281 | |
282 | mutex_init(&ktrace_lock, MUTEX_DEFAULT, IPL_NONE); | | 282 | mutex_init(&ktrace_lock, MUTEX_DEFAULT, IPL_NONE); |
283 | kte_cache = pool_cache_init(sizeof(struct ktrace_entry), 0, 0, 0, | | 283 | kte_cache = pool_cache_init(sizeof(struct ktrace_entry), 0, 0, 0, |
284 | "ktrace", &pool_allocator_nointr, IPL_NONE, NULL, NULL, NULL); | | 284 | "ktrace", &pool_allocator_nointr, IPL_NONE, NULL, NULL, NULL); |
285 | | | 285 | |
286 | ktrace_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, | | 286 | ktrace_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, |
287 | ktrace_listener_cb, NULL); | | 287 | ktrace_listener_cb, NULL); |
288 | } | | 288 | } |
289 | | | 289 | |
290 | /* | | 290 | /* |
291 | * Release a reference. Called with ktrace_lock held. | | 291 | * Release a reference. Called with ktrace_lock held. |
292 | */ | | 292 | */ |
293 | void | | 293 | void |
294 | ktdrel(struct ktr_desc *ktd) | | 294 | ktdrel(struct ktr_desc *ktd) |
295 | { | | 295 | { |
296 | | | 296 | |
297 | KASSERT(mutex_owned(&ktrace_lock)); | | 297 | KASSERT(mutex_owned(&ktrace_lock)); |
298 | | | 298 | |
299 | KDASSERT(ktd->ktd_ref != 0); | | 299 | KDASSERT(ktd->ktd_ref != 0); |
300 | KASSERT(ktd->ktd_ref > 0); | | 300 | KASSERT(ktd->ktd_ref > 0); |
301 | KASSERT(ktrace_on > 0); | | 301 | KASSERT(ktrace_on > 0); |
302 | ktrace_on--; | | 302 | ktrace_on--; |
303 | if (--ktd->ktd_ref <= 0) { | | 303 | if (--ktd->ktd_ref <= 0) { |
304 | ktd->ktd_flags |= KTDF_DONE; | | 304 | ktd->ktd_flags |= KTDF_DONE; |
305 | cv_signal(&ktd->ktd_cv); | | 305 | cv_signal(&ktd->ktd_cv); |
306 | } | | 306 | } |
307 | } | | 307 | } |
308 | | | 308 | |
309 | void | | 309 | void |
310 | ktdref(struct ktr_desc *ktd) | | 310 | ktdref(struct ktr_desc *ktd) |
311 | { | | 311 | { |
312 | | | 312 | |
313 | KASSERT(mutex_owned(&ktrace_lock)); | | 313 | KASSERT(mutex_owned(&ktrace_lock)); |
314 | | | 314 | |
315 | ktd->ktd_ref++; | | 315 | ktd->ktd_ref++; |
316 | ktrace_on++; | | 316 | ktrace_on++; |
317 | } | | 317 | } |
318 | | | 318 | |
319 | struct ktr_desc * | | 319 | struct ktr_desc * |
320 | ktd_lookup(file_t *fp) | | 320 | ktd_lookup(file_t *fp) |
321 | { | | 321 | { |
322 | struct ktr_desc *ktd; | | 322 | struct ktr_desc *ktd; |
323 | | | 323 | |
324 | KASSERT(mutex_owned(&ktrace_lock)); | | 324 | KASSERT(mutex_owned(&ktrace_lock)); |
325 | | | 325 | |
326 | for (ktd = TAILQ_FIRST(&ktdq); ktd != NULL; | | 326 | for (ktd = TAILQ_FIRST(&ktdq); ktd != NULL; |
327 | ktd = TAILQ_NEXT(ktd, ktd_list)) { | | 327 | ktd = TAILQ_NEXT(ktd, ktd_list)) { |
328 | if (ktrsamefile(ktd->ktd_fp, fp)) { | | 328 | if (ktrsamefile(ktd->ktd_fp, fp)) { |
329 | ktdref(ktd); | | 329 | ktdref(ktd); |
330 | break; | | 330 | break; |
331 | } | | 331 | } |
332 | } | | 332 | } |
333 | | | 333 | |
334 | return (ktd); | | 334 | return (ktd); |
335 | } | | 335 | } |
336 | | | 336 | |
337 | void | | 337 | void |
338 | ktraddentry(lwp_t *l, struct ktrace_entry *kte, int flags) | | 338 | ktraddentry(lwp_t *l, struct ktrace_entry *kte, int flags) |
339 | { | | 339 | { |
340 | struct proc *p = l->l_proc; | | 340 | struct proc *p = l->l_proc; |
341 | struct ktr_desc *ktd; | | 341 | struct ktr_desc *ktd; |
342 | #ifdef DEBUG | | 342 | #ifdef DEBUG |
343 | struct timeval t1, t2; | | 343 | struct timeval t1, t2; |
344 | #endif | | 344 | #endif |
345 | | | 345 | |
346 | mutex_enter(&ktrace_lock); | | 346 | mutex_enter(&ktrace_lock); |
347 | | | 347 | |
348 | if (p->p_traceflag & KTRFAC_TRC_EMUL) { | | 348 | if (p->p_traceflag & KTRFAC_TRC_EMUL) { |
349 | /* Add emulation trace before first entry for this process */ | | 349 | /* Add emulation trace before first entry for this process */ |
350 | p->p_traceflag &= ~KTRFAC_TRC_EMUL; | | 350 | p->p_traceflag &= ~KTRFAC_TRC_EMUL; |
351 | mutex_exit(&ktrace_lock); | | 351 | mutex_exit(&ktrace_lock); |
352 | ktrexit(l); | | 352 | ktrexit(l); |
353 | ktremul(); | | 353 | ktremul(); |
354 | (void)ktrenter(l); | | 354 | (void)ktrenter(l); |
355 | mutex_enter(&ktrace_lock); | | 355 | mutex_enter(&ktrace_lock); |
356 | } | | 356 | } |
357 | | | 357 | |
358 | /* Tracing may have been cancelled. */ | | 358 | /* Tracing may have been cancelled. */ |
359 | ktd = p->p_tracep; | | 359 | ktd = p->p_tracep; |
360 | if (ktd == NULL) | | 360 | if (ktd == NULL) |
361 | goto freekte; | | 361 | goto freekte; |
362 | | | 362 | |
363 | /* | | 363 | /* |
364 | * Bump reference count so that the object will remain while | | 364 | * Bump reference count so that the object will remain while |
365 | * we are here. Note that the trace is controlled by other | | 365 | * we are here. Note that the trace is controlled by other |
366 | * process. | | 366 | * process. |
367 | */ | | 367 | */ |
368 | ktdref(ktd); | | 368 | ktdref(ktd); |
369 | | | 369 | |
370 | if (ktd->ktd_flags & KTDF_DONE) | | 370 | if (ktd->ktd_flags & KTDF_DONE) |
371 | goto relktd; | | 371 | goto relktd; |
372 | | | 372 | |
373 | if (ktd->ktd_qcount > ktd_maxentry) { | | 373 | if (ktd->ktd_qcount > ktd_maxentry) { |
374 | ktd_logerrl(ktd, KTDE_ENOSPC); | | 374 | ktd_logerrl(ktd, KTDE_ENOSPC); |
375 | goto relktd; | | 375 | goto relktd; |
376 | } | | 376 | } |
377 | TAILQ_INSERT_TAIL(&ktd->ktd_queue, kte, kte_list); | | 377 | TAILQ_INSERT_TAIL(&ktd->ktd_queue, kte, kte_list); |
378 | ktd->ktd_qcount++; | | 378 | ktd->ktd_qcount++; |
379 | if (ktd->ktd_flags & KTDF_BLOCKING) | | 379 | if (ktd->ktd_flags & KTDF_BLOCKING) |
380 | goto skip_sync; | | 380 | goto skip_sync; |
381 | | | 381 | |
382 | if (flags & KTA_WAITOK && | | 382 | if (flags & KTA_WAITOK && |
383 | (/* flags & KTA_LARGE */0 || ktd->ktd_flags & KTDF_WAIT || | | 383 | (/* flags & KTA_LARGE */0 || ktd->ktd_flags & KTDF_WAIT || |
384 | ktd->ktd_qcount > ktd_maxentry >> 1)) | | 384 | ktd->ktd_qcount > ktd_maxentry >> 1)) |
385 | /* | | 385 | /* |
386 | * Sync with writer thread since we're requesting rather | | 386 | * Sync with writer thread since we're requesting rather |
387 | * big one or many requests are pending. | | 387 | * big one or many requests are pending. |
388 | */ | | 388 | */ |
389 | do { | | 389 | do { |
390 | ktd->ktd_flags |= KTDF_WAIT; | | 390 | ktd->ktd_flags |= KTDF_WAIT; |
391 | ktd_wakeup(ktd); | | 391 | ktd_wakeup(ktd); |
392 | #ifdef DEBUG | | 392 | #ifdef DEBUG |
393 | getmicrouptime(&t1); | | 393 | getmicrouptime(&t1); |
394 | #endif | | 394 | #endif |
395 | if (cv_timedwait(&ktd->ktd_sync_cv, &ktrace_lock, | | 395 | if (cv_timedwait(&ktd->ktd_sync_cv, &ktrace_lock, |
396 | ktd_timeout * hz) != 0) { | | 396 | ktd_timeout * hz) != 0) { |
397 | ktd->ktd_flags |= KTDF_BLOCKING; | | 397 | ktd->ktd_flags |= KTDF_BLOCKING; |
398 | /* | | 398 | /* |
399 | * Maybe the writer thread is blocking | | 399 | * Maybe the writer thread is blocking |
400 | * completely for some reason, but | | 400 | * completely for some reason, but |
401 | * don't stop target process forever. | | 401 | * don't stop target process forever. |
402 | */ | | 402 | */ |
403 | log(LOG_NOTICE, "ktrace timeout\n"); | | 403 | log(LOG_NOTICE, "ktrace timeout\n"); |
404 | break; | | 404 | break; |
405 | } | | 405 | } |
406 | #ifdef DEBUG | | 406 | #ifdef DEBUG |
407 | getmicrouptime(&t2); | | 407 | getmicrouptime(&t2); |
408 | timersub(&t2, &t1, &t2); | | 408 | timersub(&t2, &t1, &t2); |
409 | if (t2.tv_sec > 0) | | 409 | if (t2.tv_sec > 0) |
410 | log(LOG_NOTICE, | | 410 | log(LOG_NOTICE, |
411 | "ktrace long wait: %lld.%06ld\n", | | 411 | "ktrace long wait: %lld.%06ld\n", |
412 | (long long)t2.tv_sec, (long)t2.tv_usec); | | 412 | (long long)t2.tv_sec, (long)t2.tv_usec); |
413 | #endif | | 413 | #endif |
414 | } while (p->p_tracep == ktd && | | 414 | } while (p->p_tracep == ktd && |
415 | (ktd->ktd_flags & (KTDF_WAIT | KTDF_DONE)) == KTDF_WAIT); | | 415 | (ktd->ktd_flags & (KTDF_WAIT | KTDF_DONE)) == KTDF_WAIT); |
416 | else { | | 416 | else { |
417 | /* Schedule delayed wakeup */ | | 417 | /* Schedule delayed wakeup */ |
418 | if (ktd->ktd_qcount > ktd->ktd_delayqcnt) | | 418 | if (ktd->ktd_qcount > ktd->ktd_delayqcnt) |
419 | ktd_wakeup(ktd); /* Wakeup now */ | | 419 | ktd_wakeup(ktd); /* Wakeup now */ |
420 | else if (!callout_pending(&ktd->ktd_wakch)) | | 420 | else if (!callout_pending(&ktd->ktd_wakch)) |
421 | callout_reset(&ktd->ktd_wakch, | | 421 | callout_reset(&ktd->ktd_wakch, |
422 | ktd->ktd_flags & KTDF_INTERACTIVE ? | | 422 | ktd->ktd_flags & KTDF_INTERACTIVE ? |
423 | ktd->ktd_intrwakdl : ktd->ktd_wakedelay, | | 423 | ktd->ktd_intrwakdl : ktd->ktd_wakedelay, |
424 | ktd_callout, ktd); | | 424 | ktd_callout, ktd); |
425 | } | | 425 | } |
426 | | | 426 | |
427 | skip_sync: | | 427 | skip_sync: |
428 | ktdrel(ktd); | | 428 | ktdrel(ktd); |
429 | mutex_exit(&ktrace_lock); | | 429 | mutex_exit(&ktrace_lock); |
430 | ktrexit(l); | | 430 | ktrexit(l); |
431 | return; | | 431 | return; |
432 | | | 432 | |
433 | relktd: | | 433 | relktd: |
434 | ktdrel(ktd); | | 434 | ktdrel(ktd); |
435 | | | 435 | |
436 | freekte: | | 436 | freekte: |
437 | mutex_exit(&ktrace_lock); | | 437 | mutex_exit(&ktrace_lock); |
438 | ktefree(kte); | | 438 | ktefree(kte); |
439 | ktrexit(l); | | 439 | ktrexit(l); |
440 | } | | 440 | } |
441 | | | 441 | |
442 | void | | 442 | void |
443 | ktefree(struct ktrace_entry *kte) | | 443 | ktefree(struct ktrace_entry *kte) |
444 | { | | 444 | { |
445 | | | 445 | |
446 | if (kte->kte_buf != kte->kte_space) | | 446 | if (kte->kte_buf != kte->kte_space) |
447 | kmem_free(kte->kte_buf, kte->kte_bufsz); | | 447 | kmem_free(kte->kte_buf, kte->kte_bufsz); |
448 | pool_cache_put(kte_cache, kte); | | 448 | pool_cache_put(kte_cache, kte); |
449 | } | | 449 | } |
450 | | | 450 | |
451 | /* | | 451 | /* |
452 | * "deep" compare of two files for the purposes of clearing a trace. | | 452 | * "deep" compare of two files for the purposes of clearing a trace. |
453 | * Returns true if they're the same open file, or if they point at the | | 453 | * Returns true if they're the same open file, or if they point at the |
454 | * same underlying vnode/socket. | | 454 | * same underlying vnode/socket. |
455 | */ | | 455 | */ |
456 | | | 456 | |
457 | int | | 457 | int |
458 | ktrsamefile(file_t *f1, file_t *f2) | | 458 | ktrsamefile(file_t *f1, file_t *f2) |
459 | { | | 459 | { |
460 | | | 460 | |
461 | return ((f1 == f2) || | | 461 | return ((f1 == f2) || |
462 | ((f1 != NULL) && (f2 != NULL) && | | 462 | ((f1 != NULL) && (f2 != NULL) && |
463 | (f1->f_type == f2->f_type) && | | 463 | (f1->f_type == f2->f_type) && |
464 | (f1->f_data == f2->f_data))); | | 464 | (f1->f_data == f2->f_data))); |
465 | } | | 465 | } |
466 | | | 466 | |
467 | void | | 467 | void |
468 | ktrderef(struct proc *p) | | 468 | ktrderef(struct proc *p) |
469 | { | | 469 | { |
470 | struct ktr_desc *ktd = p->p_tracep; | | 470 | struct ktr_desc *ktd = p->p_tracep; |
471 | | | 471 | |
472 | KASSERT(mutex_owned(&ktrace_lock)); | | 472 | KASSERT(mutex_owned(&ktrace_lock)); |
473 | | | 473 | |
474 | p->p_traceflag = 0; | | 474 | p->p_traceflag = 0; |
475 | if (ktd == NULL) | | 475 | if (ktd == NULL) |
476 | return; | | 476 | return; |
477 | p->p_tracep = NULL; | | 477 | p->p_tracep = NULL; |
478 | | | 478 | |
479 | cv_broadcast(&ktd->ktd_sync_cv); | | 479 | cv_broadcast(&ktd->ktd_sync_cv); |
480 | ktdrel(ktd); | | 480 | ktdrel(ktd); |
481 | } | | 481 | } |
482 | | | 482 | |
483 | void | | 483 | void |
484 | ktradref(struct proc *p) | | 484 | ktradref(struct proc *p) |
485 | { | | 485 | { |
486 | struct ktr_desc *ktd = p->p_tracep; | | 486 | struct ktr_desc *ktd = p->p_tracep; |
487 | | | 487 | |
488 | KASSERT(mutex_owned(&ktrace_lock)); | | 488 | KASSERT(mutex_owned(&ktrace_lock)); |
489 | | | 489 | |
490 | ktdref(ktd); | | 490 | ktdref(ktd); |
491 | } | | 491 | } |
492 | | | 492 | |
493 | int | | 493 | int |
494 | ktrderefall(struct ktr_desc *ktd, int auth) | | 494 | ktrderefall(struct ktr_desc *ktd, int auth) |
495 | { | | 495 | { |
496 | lwp_t *curl = curlwp; | | 496 | lwp_t *curl = curlwp; |
497 | struct proc *p; | | 497 | struct proc *p; |
498 | int error = 0; | | 498 | int error = 0; |
499 | | | 499 | |
500 | mutex_enter(proc_lock); | | 500 | mutex_enter(proc_lock); |
501 | PROCLIST_FOREACH(p, &allproc) { | | 501 | PROCLIST_FOREACH(p, &allproc) { |
502 | if (p->p_tracep != ktd) | | 502 | if (p->p_tracep != ktd) |
503 | continue; | | 503 | continue; |
504 | mutex_enter(p->p_lock); | | 504 | mutex_enter(p->p_lock); |
505 | mutex_enter(&ktrace_lock); | | 505 | mutex_enter(&ktrace_lock); |
506 | if (p->p_tracep == ktd) { | | 506 | if (p->p_tracep == ktd) { |
507 | if (!auth || ktrcanset(curl, p)) | | 507 | if (!auth || ktrcanset(curl, p)) |
508 | ktrderef(p); | | 508 | ktrderef(p); |
509 | else | | 509 | else |
510 | error = EPERM; | | 510 | error = EPERM; |
511 | } | | 511 | } |
512 | mutex_exit(&ktrace_lock); | | 512 | mutex_exit(&ktrace_lock); |
513 | mutex_exit(p->p_lock); | | 513 | mutex_exit(p->p_lock); |
514 | } | | 514 | } |
515 | mutex_exit(proc_lock); | | 515 | mutex_exit(proc_lock); |
516 | | | 516 | |
517 | return error; | | 517 | return error; |
518 | } | | 518 | } |
519 | | | 519 | |
520 | int | | 520 | int |
521 | ktealloc(struct ktrace_entry **ktep, void **bufp, lwp_t *l, int type, | | 521 | ktealloc(struct ktrace_entry **ktep, void **bufp, lwp_t *l, int type, |
522 | size_t sz) | | 522 | size_t sz) |
523 | { | | 523 | { |
524 | struct proc *p = l->l_proc; | | 524 | struct proc *p = l->l_proc; |
525 | struct ktrace_entry *kte; | | 525 | struct ktrace_entry *kte; |
526 | struct ktr_header *kth; | | 526 | struct ktr_header *kth; |
527 | void *buf; | | 527 | void *buf; |
528 | | | 528 | |
529 | if (ktrenter(l)) | | 529 | if (ktrenter(l)) |
530 | return EAGAIN; | | 530 | return EAGAIN; |
531 | | | 531 | |
532 | kte = pool_cache_get(kte_cache, PR_WAITOK); | | 532 | kte = pool_cache_get(kte_cache, PR_WAITOK); |
533 | if (sz > sizeof(kte->kte_space)) { | | 533 | if (sz > sizeof(kte->kte_space)) { |
534 | if ((buf = kmem_alloc(sz, KM_SLEEP)) == NULL) { | | 534 | if ((buf = kmem_alloc(sz, KM_SLEEP)) == NULL) { |
535 | pool_cache_put(kte_cache, kte); | | 535 | pool_cache_put(kte_cache, kte); |
536 | ktrexit(l); | | 536 | ktrexit(l); |
537 | return ENOMEM; | | 537 | return ENOMEM; |
538 | } | | 538 | } |
539 | } else | | 539 | } else |
540 | buf = kte->kte_space; | | 540 | buf = kte->kte_space; |
541 | | | 541 | |
542 | kte->kte_bufsz = sz; | | 542 | kte->kte_bufsz = sz; |
543 | kte->kte_buf = buf; | | 543 | kte->kte_buf = buf; |
544 | | | 544 | |
545 | kth = &kte->kte_kth; | | 545 | kth = &kte->kte_kth; |
546 | (void)memset(kth, 0, sizeof(*kth)); | | 546 | (void)memset(kth, 0, sizeof(*kth)); |
547 | kth->ktr_len = sz; | | 547 | kth->ktr_len = sz; |
548 | kth->ktr_type = type; | | 548 | kth->ktr_type = type; |
549 | kth->ktr_pid = p->p_pid; | | 549 | kth->ktr_pid = p->p_pid; |
550 | memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN); | | 550 | memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN); |
551 | kth->ktr_version = KTRFAC_VERSION(p->p_traceflag); | | 551 | kth->ktr_version = KTRFAC_VERSION(p->p_traceflag); |
552 | kth->ktr_lid = l->l_lid; | | 552 | kth->ktr_lid = l->l_lid; |
553 | nanotime(&kth->ktr_ts); | | 553 | nanotime(&kth->ktr_ts); |
554 | | | 554 | |
555 | *ktep = kte; | | 555 | *ktep = kte; |
556 | *bufp = buf; | | 556 | *bufp = buf; |
557 | | | 557 | |
558 | return 0; | | 558 | return 0; |
559 | } | | 559 | } |
560 | | | 560 | |
561 | void | | 561 | void |
562 | ktr_syscall(register_t code, const register_t args[], int narg) | | 562 | ktr_syscall(register_t code, const register_t args[], int narg) |
563 | { | | 563 | { |
564 | lwp_t *l = curlwp; | | 564 | lwp_t *l = curlwp; |
565 | struct proc *p = l->l_proc; | | 565 | struct proc *p = l->l_proc; |
566 | struct ktrace_entry *kte; | | 566 | struct ktrace_entry *kte; |
567 | struct ktr_syscall *ktp; | | 567 | struct ktr_syscall *ktp; |
568 | register_t *argp; | | 568 | register_t *argp; |
569 | size_t len; | | 569 | size_t len; |
570 | u_int i; | | 570 | u_int i; |
571 | | | 571 | |
572 | if (!KTRPOINT(p, KTR_SYSCALL)) | | 572 | if (!KTRPOINT(p, KTR_SYSCALL)) |
573 | return; | | 573 | return; |
574 | | | 574 | |
575 | len = sizeof(struct ktr_syscall) + narg * sizeof argp[0]; | | 575 | len = sizeof(struct ktr_syscall) + narg * sizeof argp[0]; |
576 | | | 576 | |
577 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSCALL, len)) | | 577 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSCALL, len)) |
578 | return; | | 578 | return; |
579 | | | 579 | |
580 | ktp->ktr_code = code; | | 580 | ktp->ktr_code = code; |
581 | ktp->ktr_argsize = narg * sizeof argp[0]; | | 581 | ktp->ktr_argsize = narg * sizeof argp[0]; |
582 | argp = (register_t *)(ktp + 1); | | 582 | argp = (register_t *)(ktp + 1); |
583 | for (i = 0; i < narg; i++) | | 583 | for (i = 0; i < narg; i++) |
584 | *argp++ = args[i]; | | 584 | *argp++ = args[i]; |
585 | | | 585 | |
586 | ktraddentry(l, kte, KTA_WAITOK); | | 586 | ktraddentry(l, kte, KTA_WAITOK); |
587 | } | | 587 | } |
588 | | | 588 | |
589 | void | | 589 | void |
590 | ktr_sysret(register_t code, int error, register_t *retval) | | 590 | ktr_sysret(register_t code, int error, register_t *retval) |
591 | { | | 591 | { |
592 | lwp_t *l = curlwp; | | 592 | lwp_t *l = curlwp; |
593 | struct ktrace_entry *kte; | | 593 | struct ktrace_entry *kte; |
594 | struct ktr_sysret *ktp; | | 594 | struct ktr_sysret *ktp; |
595 | | | 595 | |
596 | if (!KTRPOINT(l->l_proc, KTR_SYSRET)) | | 596 | if (!KTRPOINT(l->l_proc, KTR_SYSRET)) |
597 | return; | | 597 | return; |
598 | | | 598 | |
599 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSRET, | | 599 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSRET, |
600 | sizeof(struct ktr_sysret))) | | 600 | sizeof(struct ktr_sysret))) |
601 | return; | | 601 | return; |
602 | | | 602 | |
603 | ktp->ktr_code = code; | | 603 | ktp->ktr_code = code; |
604 | ktp->ktr_eosys = 0; /* XXX unused */ | | 604 | ktp->ktr_eosys = 0; /* XXX unused */ |
605 | ktp->ktr_error = error; | | 605 | ktp->ktr_error = error; |
606 | ktp->ktr_retval = retval && error == 0 ? retval[0] : 0; | | 606 | ktp->ktr_retval = retval && error == 0 ? retval[0] : 0; |
607 | ktp->ktr_retval_1 = retval && error == 0 ? retval[1] : 0; | | 607 | ktp->ktr_retval_1 = retval && error == 0 ? retval[1] : 0; |
608 | | | 608 | |
609 | ktraddentry(l, kte, KTA_WAITOK); | | 609 | ktraddentry(l, kte, KTA_WAITOK); |
610 | } | | 610 | } |
611 | | | 611 | |
612 | void | | 612 | void |
613 | ktr_namei(const char *path, size_t pathlen) | | 613 | ktr_namei(const char *path, size_t pathlen) |
614 | { | | 614 | { |
615 | lwp_t *l = curlwp; | | 615 | lwp_t *l = curlwp; |
616 | | | 616 | |
617 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) | | 617 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) |
618 | return; | | 618 | return; |
619 | | | 619 | |
620 | ktr_kmem(l, KTR_NAMEI, path, pathlen); | | 620 | ktr_kmem(l, KTR_NAMEI, path, pathlen); |
621 | } | | 621 | } |
622 | | | 622 | |
623 | void | | 623 | void |
624 | ktr_namei2(const char *eroot, size_t erootlen, | | 624 | ktr_namei2(const char *eroot, size_t erootlen, |
625 | const char *path, size_t pathlen) | | 625 | const char *path, size_t pathlen) |
626 | { | | 626 | { |
627 | lwp_t *l = curlwp; | | 627 | lwp_t *l = curlwp; |
628 | struct ktrace_entry *kte; | | 628 | struct ktrace_entry *kte; |
629 | void *buf; | | 629 | void *buf; |
630 | | | 630 | |
631 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) | | 631 | if (!KTRPOINT(l->l_proc, KTR_NAMEI)) |
632 | return; | | 632 | return; |
633 | | | 633 | |
634 | if (ktealloc(&kte, &buf, l, KTR_NAMEI, erootlen + pathlen)) | | 634 | if (ktealloc(&kte, &buf, l, KTR_NAMEI, erootlen + pathlen)) |
635 | return; | | 635 | return; |
636 | memcpy(buf, eroot, erootlen); | | 636 | memcpy(buf, eroot, erootlen); |
637 | buf = (char *)buf + erootlen; | | 637 | buf = (char *)buf + erootlen; |
638 | memcpy(buf, path, pathlen); | | 638 | memcpy(buf, path, pathlen); |
639 | ktraddentry(l, kte, KTA_WAITOK); | | 639 | ktraddentry(l, kte, KTA_WAITOK); |
640 | } | | 640 | } |
641 | | | 641 | |
642 | void | | 642 | void |
643 | ktr_emul(void) | | 643 | ktr_emul(void) |
644 | { | | 644 | { |
645 | lwp_t *l = curlwp; | | 645 | lwp_t *l = curlwp; |
646 | const char *emul = l->l_proc->p_emul->e_name; | | 646 | const char *emul = l->l_proc->p_emul->e_name; |
647 | | | 647 | |
648 | if (!KTRPOINT(l->l_proc, KTR_EMUL)) | | 648 | if (!KTRPOINT(l->l_proc, KTR_EMUL)) |
649 | return; | | 649 | return; |
650 | | | 650 | |
651 | ktr_kmem(l, KTR_EMUL, emul, strlen(emul)); | | 651 | ktr_kmem(l, KTR_EMUL, emul, strlen(emul)); |
652 | } | | 652 | } |
653 | | | 653 | |
654 | void | | 654 | void |
655 | ktr_execarg(const void *bf, size_t len) | | 655 | ktr_execarg(const void *bf, size_t len) |
656 | { | | 656 | { |
657 | lwp_t *l = curlwp; | | 657 | lwp_t *l = curlwp; |
658 | | | 658 | |
659 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ARG)) | | 659 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ARG)) |
660 | return; | | 660 | return; |
661 | | | 661 | |
662 | ktr_kmem(l, KTR_EXEC_ARG, bf, len); | | 662 | ktr_kmem(l, KTR_EXEC_ARG, bf, len); |
663 | } | | 663 | } |
664 | | | 664 | |
665 | void | | 665 | void |
666 | ktr_execenv(const void *bf, size_t len) | | 666 | ktr_execenv(const void *bf, size_t len) |
667 | { | | 667 | { |
668 | lwp_t *l = curlwp; | | 668 | lwp_t *l = curlwp; |
669 | | | 669 | |
670 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ENV)) | | 670 | if (!KTRPOINT(l->l_proc, KTR_EXEC_ENV)) |
671 | return; | | 671 | return; |
672 | | | 672 | |
673 | ktr_kmem(l, KTR_EXEC_ENV, bf, len); | | 673 | ktr_kmem(l, KTR_EXEC_ENV, bf, len); |
674 | } | | 674 | } |
675 | | | 675 | |
676 | void | | 676 | void |
677 | ktr_execfd(int fd, u_int dtype) | | 677 | ktr_execfd(int fd, u_int dtype) |
678 | { | | 678 | { |
679 | struct ktrace_entry *kte; | | 679 | struct ktrace_entry *kte; |
680 | struct ktr_execfd* ktp; | | 680 | struct ktr_execfd* ktp; |
681 | | | 681 | |
682 | lwp_t *l = curlwp; | | 682 | lwp_t *l = curlwp; |
683 | | | 683 | |
684 | if (!KTRPOINT(l->l_proc, KTR_EXEC_FD)) | | 684 | if (!KTRPOINT(l->l_proc, KTR_EXEC_FD)) |
685 | return; | | 685 | return; |
686 | | | 686 | |
687 | if (ktealloc(&kte, (void *)&ktp, l, KTR_EXEC_FD, sizeof(*ktp))) | | 687 | if (ktealloc(&kte, (void *)&ktp, l, KTR_EXEC_FD, sizeof(*ktp))) |
688 | return; | | 688 | return; |
689 | | | 689 | |
690 | ktp->ktr_fd = fd; | | 690 | ktp->ktr_fd = fd; |
691 | ktp->ktr_dtype = dtype; | | 691 | ktp->ktr_dtype = dtype; |
692 | ktraddentry(l, kte, KTA_WAITOK); | | 692 | ktraddentry(l, kte, KTA_WAITOK); |
693 | } | | 693 | } |
694 | | | 694 | |
695 | static void | | 695 | static void |
696 | ktr_kmem(lwp_t *l, int type, const void *bf, size_t len) | | 696 | ktr_kmem(lwp_t *l, int type, const void *bf, size_t len) |
697 | { | | 697 | { |
698 | struct ktrace_entry *kte; | | 698 | struct ktrace_entry *kte; |
699 | void *buf; | | 699 | void *buf; |
700 | | | 700 | |
701 | if (ktealloc(&kte, &buf, l, type, len)) | | 701 | if (ktealloc(&kte, &buf, l, type, len)) |
702 | return; | | 702 | return; |
703 | memcpy(buf, bf, len); | | 703 | memcpy(buf, bf, len); |
704 | ktraddentry(l, kte, KTA_WAITOK); | | 704 | ktraddentry(l, kte, KTA_WAITOK); |
705 | } | | 705 | } |
706 | | | 706 | |
707 | static void | | 707 | static void |
708 | ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len) | | 708 | ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len) |
709 | { | | 709 | { |
710 | struct ktrace_entry *kte; | | 710 | struct ktrace_entry *kte; |
711 | struct ktr_genio *ktp; | | 711 | struct ktr_genio *ktp; |
712 | size_t resid = len, cnt, buflen; | | 712 | size_t resid = len, cnt, buflen; |
713 | char *cp; | | 713 | char *cp; |
714 | | | 714 | |
715 | next: | | 715 | next: |
716 | buflen = min(PAGE_SIZE, resid + sizeof(struct ktr_genio)); | | 716 | buflen = min(PAGE_SIZE, resid + sizeof(struct ktr_genio)); |
717 | | | 717 | |
718 | if (ktealloc(&kte, (void *)&ktp, l, KTR_GENIO, buflen)) | | 718 | if (ktealloc(&kte, (void *)&ktp, l, KTR_GENIO, buflen)) |
719 | return; | | 719 | return; |
720 | | | 720 | |
721 | ktp->ktr_fd = fd; | | 721 | ktp->ktr_fd = fd; |
722 | ktp->ktr_rw = rw; | | 722 | ktp->ktr_rw = rw; |
723 | | | 723 | |
724 | cp = (void *)(ktp + 1); | | 724 | cp = (void *)(ktp + 1); |
725 | buflen -= sizeof(struct ktr_genio); | | 725 | buflen -= sizeof(struct ktr_genio); |
726 | kte->kte_kth.ktr_len = sizeof(struct ktr_genio); | | 726 | kte->kte_kth.ktr_len = sizeof(struct ktr_genio); |
727 | | | 727 | |
728 | while (buflen > 0) { | | 728 | while (buflen > 0) { |
729 | cnt = min(iov->iov_len, buflen); | | 729 | cnt = min(iov->iov_len, buflen); |
730 | if (copyin(iov->iov_base, cp, cnt) != 0) | | 730 | if (copyin(iov->iov_base, cp, cnt) != 0) |
731 | goto out; | | 731 | goto out; |
732 | kte->kte_kth.ktr_len += cnt; | | 732 | kte->kte_kth.ktr_len += cnt; |
733 | cp += cnt; | | 733 | cp += cnt; |
734 | buflen -= cnt; | | 734 | buflen -= cnt; |
735 | resid -= cnt; | | 735 | resid -= cnt; |
736 | iov->iov_len -= cnt; | | 736 | iov->iov_len -= cnt; |
737 | if (iov->iov_len == 0) | | 737 | if (iov->iov_len == 0) |
738 | iov++; | | 738 | iov++; |
739 | else | | 739 | else |
740 | iov->iov_base = (char *)iov->iov_base + cnt; | | 740 | iov->iov_base = (char *)iov->iov_base + cnt; |
741 | } | | 741 | } |
742 | | | 742 | |
743 | /* | | 743 | /* |
744 | * Don't push so many entry at once. It will cause kmem map | | 744 | * Don't push so many entry at once. It will cause kmem map |
745 | * shortage. | | 745 | * shortage. |
746 | */ | | 746 | */ |
747 | ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE); | | 747 | ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE); |
748 | if (resid > 0) { | | 748 | if (resid > 0) { |
749 | if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) { | | 749 | if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) { |
750 | (void)ktrenter(l); | | 750 | (void)ktrenter(l); |
751 | preempt(); | | 751 | preempt(); |
752 | ktrexit(l); | | 752 | ktrexit(l); |
753 | } | | 753 | } |
754 | | | 754 | |
755 | goto next; | | 755 | goto next; |
756 | } | | 756 | } |
757 | | | 757 | |
758 | return; | | 758 | return; |
759 | | | 759 | |
760 | out: | | 760 | out: |
761 | ktefree(kte); | | 761 | ktefree(kte); |
762 | ktrexit(l); | | 762 | ktrexit(l); |
763 | } | | 763 | } |
764 | | | 764 | |
765 | void | | 765 | void |
766 | ktr_genio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) | | 766 | ktr_genio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) |
767 | { | | 767 | { |
768 | lwp_t *l = curlwp; | | 768 | lwp_t *l = curlwp; |
769 | struct iovec iov; | | 769 | struct iovec iov; |
770 | | | 770 | |
771 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) | | 771 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) |
772 | return; | | 772 | return; |
773 | iov.iov_base = __UNCONST(addr); | | 773 | iov.iov_base = __UNCONST(addr); |
774 | iov.iov_len = len; | | 774 | iov.iov_len = len; |
775 | ktr_io(l, fd, rw, &iov, len); | | 775 | ktr_io(l, fd, rw, &iov, len); |
776 | } | | 776 | } |
777 | | | 777 | |
778 | void | | 778 | void |
779 | ktr_geniov(int fd, enum uio_rw rw, struct iovec *iov, size_t len, int error) | | 779 | ktr_geniov(int fd, enum uio_rw rw, struct iovec *iov, size_t len, int error) |
780 | { | | 780 | { |
781 | lwp_t *l = curlwp; | | 781 | lwp_t *l = curlwp; |
782 | | | 782 | |
783 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) | | 783 | if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) |
784 | return; | | 784 | return; |
785 | ktr_io(l, fd, rw, iov, len); | | 785 | ktr_io(l, fd, rw, iov, len); |
786 | } | | 786 | } |
787 | | | 787 | |
788 | void | | 788 | void |
789 | ktr_mibio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) | | 789 | ktr_mibio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) |
790 | { | | 790 | { |
791 | lwp_t *l = curlwp; | | 791 | lwp_t *l = curlwp; |
792 | struct iovec iov; | | 792 | struct iovec iov; |
793 | | | 793 | |
794 | if (!KTRPOINT(l->l_proc, KTR_MIB) || error != 0) | | 794 | if (!KTRPOINT(l->l_proc, KTR_MIB) || error != 0) |
795 | return; | | 795 | return; |
796 | iov.iov_base = __UNCONST(addr); | | 796 | iov.iov_base = __UNCONST(addr); |
797 | iov.iov_len = len; | | 797 | iov.iov_len = len; |
798 | ktr_io(l, fd, rw, &iov, len); | | 798 | ktr_io(l, fd, rw, &iov, len); |
799 | } | | 799 | } |
800 | | | 800 | |
801 | void | | 801 | void |
802 | ktr_psig(int sig, sig_t action, const sigset_t *mask, | | 802 | ktr_psig(int sig, sig_t action, const sigset_t *mask, |
803 | const ksiginfo_t *ksi) | | 803 | const ksiginfo_t *ksi) |
804 | { | | 804 | { |
805 | struct ktrace_entry *kte; | | 805 | struct ktrace_entry *kte; |
806 | lwp_t *l = curlwp; | | 806 | lwp_t *l = curlwp; |
807 | struct { | | 807 | struct { |
808 | struct ktr_psig kp; | | 808 | struct ktr_psig kp; |
809 | siginfo_t si; | | 809 | siginfo_t si; |
810 | } *kbuf; | | 810 | } *kbuf; |
811 | | | 811 | |
812 | if (!KTRPOINT(l->l_proc, KTR_PSIG)) | | 812 | if (!KTRPOINT(l->l_proc, KTR_PSIG)) |
813 | return; | | 813 | return; |
814 | | | 814 | |
815 | if (ktealloc(&kte, (void *)&kbuf, l, KTR_PSIG, sizeof(*kbuf))) | | 815 | if (ktealloc(&kte, (void *)&kbuf, l, KTR_PSIG, sizeof(*kbuf))) |
816 | return; | | 816 | return; |
817 | | | 817 | |
818 | kbuf->kp.signo = (char)sig; | | 818 | kbuf->kp.signo = (char)sig; |
819 | kbuf->kp.action = action; | | 819 | kbuf->kp.action = action; |
820 | kbuf->kp.mask = *mask; | | 820 | kbuf->kp.mask = *mask; |
821 | | | 821 | |
822 | if (ksi) { | | 822 | if (ksi) { |
823 | kbuf->kp.code = KSI_TRAPCODE(ksi); | | 823 | kbuf->kp.code = KSI_TRAPCODE(ksi); |
824 | (void)memset(&kbuf->si, 0, sizeof(kbuf->si)); | | 824 | (void)memset(&kbuf->si, 0, sizeof(kbuf->si)); |
825 | kbuf->si._info = ksi->ksi_info; | | 825 | kbuf->si._info = ksi->ksi_info; |
826 | kte->kte_kth.ktr_len = sizeof(*kbuf); | | 826 | kte->kte_kth.ktr_len = sizeof(*kbuf); |
827 | } else { | | 827 | } else { |
828 | kbuf->kp.code = 0; | | 828 | kbuf->kp.code = 0; |
829 | kte->kte_kth.ktr_len = sizeof(struct ktr_psig); | | 829 | kte->kte_kth.ktr_len = sizeof(struct ktr_psig); |
830 | } | | 830 | } |
831 | | | 831 | |
832 | ktraddentry(l, kte, KTA_WAITOK); | | 832 | ktraddentry(l, kte, KTA_WAITOK); |
833 | } | | 833 | } |
834 | | | 834 | |
835 | void | | 835 | void |
836 | ktr_csw(int out, int user) | | 836 | ktr_csw(int out, int user) |
837 | { | | 837 | { |
838 | lwp_t *l = curlwp; | | 838 | lwp_t *l = curlwp; |
839 | struct proc *p = l->l_proc; | | 839 | struct proc *p = l->l_proc; |
840 | struct ktrace_entry *kte; | | 840 | struct ktrace_entry *kte; |
841 | struct ktr_csw *kc; | | 841 | struct ktr_csw *kc; |
842 | | | 842 | |
843 | if (!KTRPOINT(p, KTR_CSW)) | | 843 | if (!KTRPOINT(p, KTR_CSW)) |
844 | return; | | 844 | return; |
845 | | | 845 | |
846 | /* | | 846 | /* |
847 | * Don't record context switches resulting from blocking on | | 847 | * Don't record context switches resulting from blocking on |
848 | * locks; it's too easy to get duff results. | | 848 | * locks; it's too easy to get duff results. |
849 | */ | | 849 | */ |
850 | if (l->l_syncobj == &mutex_syncobj || l->l_syncobj == &rw_syncobj) | | 850 | if (l->l_syncobj == &mutex_syncobj || l->l_syncobj == &rw_syncobj) |
851 | return; | | 851 | return; |
852 | | | 852 | |
853 | /* | | 853 | /* |
854 | * We can't sleep if we're already going to sleep (if original | | 854 | * We can't sleep if we're already going to sleep (if original |
855 | * condition is met during sleep, we hang up). | | 855 | * condition is met during sleep, we hang up). |
856 | * | | 856 | * |
857 | * XXX This is not ideal: it would be better to maintain a pool | | 857 | * XXX This is not ideal: it would be better to maintain a pool |
858 | * of ktes and actually push this to the kthread when context | | 858 | * of ktes and actually push this to the kthread when context |
859 | * switch happens, however given the points where we are called | | 859 | * switch happens, however given the points where we are called |
860 | * from that is difficult to do. | | 860 | * from that is difficult to do. |
861 | */ | | 861 | */ |
862 | if (out) { | | 862 | if (out) { |
863 | if (ktrenter(l)) | | 863 | if (ktrenter(l)) |
864 | return; | | 864 | return; |
865 | | | 865 | |
866 | nanotime(&l->l_ktrcsw); | | 866 | nanotime(&l->l_ktrcsw); |
867 | l->l_pflag |= LP_KTRCSW; | | 867 | l->l_pflag |= LP_KTRCSW; |
868 | if (user) | | 868 | if (user) |
869 | l->l_pflag |= LP_KTRCSWUSER; | | 869 | l->l_pflag |= LP_KTRCSWUSER; |
870 | else | | 870 | else |
871 | l->l_pflag &= ~LP_KTRCSWUSER; | | 871 | l->l_pflag &= ~LP_KTRCSWUSER; |
872 | | | 872 | |
873 | ktrexit(l); | | 873 | ktrexit(l); |
874 | return; | | 874 | return; |
875 | } | | 875 | } |
876 | | | 876 | |
877 | /* | | 877 | /* |
878 | * On the way back in, we need to record twice: once for entry, and | | 878 | * On the way back in, we need to record twice: once for entry, and |
879 | * once for exit. | | 879 | * once for exit. |
880 | */ | | 880 | */ |
881 | if ((l->l_pflag & LP_KTRCSW) != 0) { | | 881 | if ((l->l_pflag & LP_KTRCSW) != 0) { |
882 | struct timespec *ts; | | 882 | struct timespec *ts; |
883 | l->l_pflag &= ~LP_KTRCSW; | | 883 | l->l_pflag &= ~LP_KTRCSW; |
884 | | | 884 | |
885 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) | | 885 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) |
886 | return; | | 886 | return; |
887 | | | 887 | |
888 | kc->out = 1; | | 888 | kc->out = 1; |
889 | kc->user = ((l->l_pflag & LP_KTRCSWUSER) != 0); | | 889 | kc->user = ((l->l_pflag & LP_KTRCSWUSER) != 0); |
890 | | | 890 | |
891 | ts = &l->l_ktrcsw; | | 891 | ts = &l->l_ktrcsw; |
892 | switch (KTRFAC_VERSION(p->p_traceflag)) { | | 892 | switch (KTRFAC_VERSION(p->p_traceflag)) { |
893 | case 0: | | 893 | case 0: |
894 | kte->kte_kth.ktr_otv.tv_sec = ts->tv_sec; | | 894 | kte->kte_kth.ktr_otv.tv_sec = ts->tv_sec; |
895 | kte->kte_kth.ktr_otv.tv_usec = ts->tv_nsec / 1000; | | 895 | kte->kte_kth.ktr_otv.tv_usec = ts->tv_nsec / 1000; |
896 | break; | | 896 | break; |
897 | case 1: | | 897 | case 1: |
898 | kte->kte_kth.ktr_ots.tv_sec = ts->tv_sec; | | 898 | kte->kte_kth.ktr_ots.tv_sec = ts->tv_sec; |
899 | kte->kte_kth.ktr_ots.tv_nsec = ts->tv_nsec; | | 899 | kte->kte_kth.ktr_ots.tv_nsec = ts->tv_nsec; |
900 | break; | | 900 | break; |
901 | case 2: | | 901 | case 2: |
902 | kte->kte_kth.ktr_ts.tv_sec = ts->tv_sec; | | 902 | kte->kte_kth.ktr_ts.tv_sec = ts->tv_sec; |
903 | kte->kte_kth.ktr_ts.tv_nsec = ts->tv_nsec; | | 903 | kte->kte_kth.ktr_ts.tv_nsec = ts->tv_nsec; |
904 | break; | | 904 | break; |
905 | default: | | 905 | default: |
906 | break; | | 906 | break; |
907 | } | | 907 | } |
908 | | | 908 | |
909 | ktraddentry(l, kte, KTA_WAITOK); | | 909 | ktraddentry(l, kte, KTA_WAITOK); |
910 | } | | 910 | } |
911 | | | 911 | |
912 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) | | 912 | if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) |
913 | return; | | 913 | return; |
914 | | | 914 | |
915 | kc->out = 0; | | 915 | kc->out = 0; |
916 | kc->user = user; | | 916 | kc->user = user; |
917 | | | 917 | |
918 | ktraddentry(l, kte, KTA_WAITOK); | | 918 | ktraddentry(l, kte, KTA_WAITOK); |
919 | } | | 919 | } |
920 | | | 920 | |
921 | bool | | 921 | bool |
922 | ktr_point(int fac_bit) | | 922 | ktr_point(int fac_bit) |
923 | { | | 923 | { |
924 | return curlwp->l_proc->p_traceflag & fac_bit; | | 924 | return curlwp->l_proc->p_traceflag & fac_bit; |
925 | } | | 925 | } |
926 | | | 926 | |
927 | int | | 927 | int |
928 | ktruser(const char *id, void *addr, size_t len, int ustr) | | 928 | ktruser(const char *id, void *addr, size_t len, int ustr) |
929 | { | | 929 | { |
930 | struct ktrace_entry *kte; | | 930 | struct ktrace_entry *kte; |
931 | struct ktr_user *ktp; | | 931 | struct ktr_user *ktp; |
932 | lwp_t *l = curlwp; | | 932 | lwp_t *l = curlwp; |
933 | void *user_dta; | | 933 | void *user_dta; |
934 | int error; | | 934 | int error; |
935 | | | 935 | |
936 | if (!KTRPOINT(l->l_proc, KTR_USER)) | | 936 | if (!KTRPOINT(l->l_proc, KTR_USER)) |
937 | return 0; | | 937 | return 0; |
938 | | | 938 | |
939 | if (len > KTR_USER_MAXLEN) | | 939 | if (len > KTR_USER_MAXLEN) |
940 | return ENOSPC; | | 940 | return ENOSPC; |
941 | | | 941 | |
942 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); | | 942 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); |
943 | if (error != 0) | | 943 | if (error != 0) |
944 | return error; | | 944 | return error; |
945 | | | 945 | |
946 | if (ustr) { | | 946 | if (ustr) { |
947 | if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0) | | 947 | if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0) |
948 | ktp->ktr_id[0] = '\0'; | | 948 | ktp->ktr_id[0] = '\0'; |
949 | } else | | 949 | } else |
950 | strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); | | 950 | strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); |
951 | ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0'; | | 951 | ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0'; |
952 | | | 952 | |
953 | user_dta = (void *)(ktp + 1); | | 953 | user_dta = (void *)(ktp + 1); |
954 | if ((error = copyin(addr, (void *)user_dta, len)) != 0) | | 954 | if ((error = copyin(addr, (void *)user_dta, len)) != 0) |
955 | len = 0; | | 955 | kte->kte_kth.ktr_len = 0; |
956 | | | 956 | |
957 | ktraddentry(l, kte, KTA_WAITOK); | | 957 | ktraddentry(l, kte, KTA_WAITOK); |
958 | return error; | | 958 | return error; |
959 | } | | 959 | } |
960 | | | 960 | |
961 | void | | 961 | void |
962 | ktr_kuser(const char *id, void *addr, size_t len) | | 962 | ktr_kuser(const char *id, void *addr, size_t len) |
963 | { | | 963 | { |
964 | struct ktrace_entry *kte; | | 964 | struct ktrace_entry *kte; |
965 | struct ktr_user *ktp; | | 965 | struct ktr_user *ktp; |
966 | lwp_t *l = curlwp; | | 966 | lwp_t *l = curlwp; |
967 | int error; | | 967 | int error; |
968 | | | 968 | |
969 | if (!KTRPOINT(l->l_proc, KTR_USER)) | | 969 | if (!KTRPOINT(l->l_proc, KTR_USER)) |
970 | return; | | 970 | return; |
971 | | | 971 | |
972 | if (len > KTR_USER_MAXLEN) | | 972 | if (len > KTR_USER_MAXLEN) |
973 | return; | | 973 | return; |
974 | | | 974 | |
975 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); | | 975 | error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); |
976 | if (error != 0) | | 976 | if (error != 0) |
977 | return; | | 977 | return; |
978 | | | 978 | |
979 | strlcpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); | | 979 | strlcpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); |
980 | | | 980 | |
981 | memcpy(ktp + 1, addr, len); | | 981 | memcpy(ktp + 1, addr, len); |
982 | | | 982 | |
983 | ktraddentry(l, kte, KTA_WAITOK); | | 983 | ktraddentry(l, kte, KTA_WAITOK); |
984 | } | | 984 | } |
985 | | | 985 | |
986 | void | | 986 | void |
987 | ktr_saupcall(struct lwp *l, int type, int nevent, int nint, void *sas, | | 987 | ktr_saupcall(struct lwp *l, int type, int nevent, int nint, void *sas, |
988 | void *ap, void *ksas) | | 988 | void *ap, void *ksas) |
989 | { | | 989 | { |
990 | struct ktrace_entry *kte; | | 990 | struct ktrace_entry *kte; |
991 | struct ktr_saupcall *ktp; | | 991 | struct ktr_saupcall *ktp; |
992 | size_t len, sz; | | 992 | size_t len, sz; |
993 | struct sa_t **sapp; | | 993 | struct sa_t **sapp; |
994 | int i; | | 994 | int i; |
995 | | | 995 | |
996 | if (!KTRPOINT(l->l_proc, KTR_SAUPCALL)) | | 996 | if (!KTRPOINT(l->l_proc, KTR_SAUPCALL)) |
997 | return; | | 997 | return; |
998 | | | 998 | |
999 | len = sizeof(struct ktr_saupcall); | | 999 | len = sizeof(struct ktr_saupcall); |
1000 | sz = len + sizeof(struct sa_t) * (nevent + nint + 1); | | 1000 | sz = len + sizeof(struct sa_t) * (nevent + nint + 1); |
1001 | | | 1001 | |
1002 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SAUPCALL, sz)) | | 1002 | if (ktealloc(&kte, (void *)&ktp, l, KTR_SAUPCALL, sz)) |
1003 | return; | | 1003 | return; |
1004 | | | 1004 | |
1005 | ktp->ktr_type = type; | | 1005 | ktp->ktr_type = type; |
1006 | ktp->ktr_nevent = nevent; | | 1006 | ktp->ktr_nevent = nevent; |
1007 | ktp->ktr_nint = nint; | | 1007 | ktp->ktr_nint = nint; |
1008 | ktp->ktr_sas = sas; | | 1008 | ktp->ktr_sas = sas; |
1009 | ktp->ktr_ap = ap; | | 1009 | ktp->ktr_ap = ap; |
1010 | | | 1010 | |
1011 | /* Copy the sa_t's */ | | 1011 | /* Copy the sa_t's */ |
1012 | sapp = (struct sa_t **) ksas; | | 1012 | sapp = (struct sa_t **) ksas; |
1013 | | | 1013 | |
1014 | for (i = nevent + nint; i >= 0; i--) { | | 1014 | for (i = nevent + nint; i >= 0; i--) { |
1015 | memcpy((char *)ktp + len, *sapp, sizeof(struct sa_t)); | | 1015 | memcpy((char *)ktp + len, *sapp, sizeof(struct sa_t)); |
1016 | len += sizeof(struct sa_t); | | 1016 | len += sizeof(struct sa_t); |
1017 | sapp++; | | 1017 | sapp++; |
1018 | } | | 1018 | } |
1019 | | | 1019 | |
1020 | kte->kte_kth.ktr_len = len; | | 1020 | kte->kte_kth.ktr_len = len; |
1021 | ktraddentry(l, kte, KTA_WAITOK); | | 1021 | ktraddentry(l, kte, KTA_WAITOK); |
1022 | } | | 1022 | } |
1023 | | | 1023 | |
1024 | void | | 1024 | void |
1025 | ktr_mib(const int *name, u_int namelen) | | 1025 | ktr_mib(const int *name, u_int namelen) |
1026 | { | | 1026 | { |
1027 | struct ktrace_entry *kte; | | 1027 | struct ktrace_entry *kte; |
1028 | int *namep; | | 1028 | int *namep; |
1029 | size_t size; | | 1029 | size_t size; |
1030 | lwp_t *l = curlwp; | | 1030 | lwp_t *l = curlwp; |
1031 | | | 1031 | |
1032 | if (!KTRPOINT(l->l_proc, KTR_MIB)) | | 1032 | if (!KTRPOINT(l->l_proc, KTR_MIB)) |
1033 | return; | | 1033 | return; |
1034 | | | 1034 | |
1035 | size = namelen * sizeof(*name); | | 1035 | size = namelen * sizeof(*name); |
1036 | | | 1036 | |
1037 | if (ktealloc(&kte, (void *)&namep, l, KTR_MIB, size)) | | 1037 | if (ktealloc(&kte, (void *)&namep, l, KTR_MIB, size)) |
1038 | return; | | 1038 | return; |
1039 | | | 1039 | |
1040 | (void)memcpy(namep, name, namelen * sizeof(*name)); | | 1040 | (void)memcpy(namep, name, namelen * sizeof(*name)); |
1041 | | | 1041 | |
1042 | ktraddentry(l, kte, KTA_WAITOK); | | 1042 | ktraddentry(l, kte, KTA_WAITOK); |
1043 | } | | 1043 | } |
1044 | | | 1044 | |
1045 | /* Interface and common routines */ | | 1045 | /* Interface and common routines */ |
1046 | | | 1046 | |
1047 | int | | 1047 | int |
1048 | ktrace_common(lwp_t *curl, int ops, int facs, int pid, file_t **fpp) | | 1048 | ktrace_common(lwp_t *curl, int ops, int facs, int pid, file_t **fpp) |
1049 | { | | 1049 | { |
1050 | struct proc *curp; | | 1050 | struct proc *curp; |
1051 | struct proc *p; | | 1051 | struct proc *p; |
1052 | struct pgrp *pg; | | 1052 | struct pgrp *pg; |
1053 | struct ktr_desc *ktd = NULL; | | 1053 | struct ktr_desc *ktd = NULL; |
1054 | file_t *fp = *fpp; | | 1054 | file_t *fp = *fpp; |
1055 | int ret = 0; | | 1055 | int ret = 0; |
1056 | int error = 0; | | 1056 | int error = 0; |
1057 | int descend; | | 1057 | int descend; |
1058 | | | 1058 | |
1059 | curp = curl->l_proc; | | 1059 | curp = curl->l_proc; |
1060 | descend = ops & KTRFLAG_DESCEND; | | 1060 | descend = ops & KTRFLAG_DESCEND; |
1061 | facs = facs & ~((unsigned) KTRFAC_PERSISTENT); | | 1061 | facs = facs & ~((unsigned) KTRFAC_PERSISTENT); |
1062 | | | 1062 | |
1063 | (void)ktrenter(curl); | | 1063 | (void)ktrenter(curl); |
1064 | | | 1064 | |
1065 | switch (KTROP(ops)) { | | 1065 | switch (KTROP(ops)) { |
1066 | | | 1066 | |
1067 | case KTROP_CLEARFILE: | | 1067 | case KTROP_CLEARFILE: |
1068 | /* | | 1068 | /* |
1069 | * Clear all uses of the tracefile | | 1069 | * Clear all uses of the tracefile |
1070 | */ | | 1070 | */ |
1071 | mutex_enter(&ktrace_lock); | | 1071 | mutex_enter(&ktrace_lock); |
1072 | ktd = ktd_lookup(fp); | | 1072 | ktd = ktd_lookup(fp); |
1073 | mutex_exit(&ktrace_lock); | | 1073 | mutex_exit(&ktrace_lock); |
1074 | if (ktd == NULL) | | 1074 | if (ktd == NULL) |
1075 | goto done; | | 1075 | goto done; |
1076 | error = ktrderefall(ktd, 1); | | 1076 | error = ktrderefall(ktd, 1); |
1077 | goto done; | | 1077 | goto done; |
1078 | | | 1078 | |
1079 | case KTROP_SET: | | 1079 | case KTROP_SET: |
1080 | mutex_enter(&ktrace_lock); | | 1080 | mutex_enter(&ktrace_lock); |
1081 | ktd = ktd_lookup(fp); | | 1081 | ktd = ktd_lookup(fp); |
1082 | mutex_exit(&ktrace_lock); | | 1082 | mutex_exit(&ktrace_lock); |
1083 | if (ktd == NULL) { | | 1083 | if (ktd == NULL) { |
1084 | ktd = kmem_alloc(sizeof(*ktd), KM_SLEEP); | | 1084 | ktd = kmem_alloc(sizeof(*ktd), KM_SLEEP); |
1085 | TAILQ_INIT(&ktd->ktd_queue); | | 1085 | TAILQ_INIT(&ktd->ktd_queue); |
1086 | callout_init(&ktd->ktd_wakch, CALLOUT_MPSAFE); | | 1086 | callout_init(&ktd->ktd_wakch, CALLOUT_MPSAFE); |
1087 | cv_init(&ktd->ktd_cv, "ktrwait"); | | 1087 | cv_init(&ktd->ktd_cv, "ktrwait"); |
1088 | cv_init(&ktd->ktd_sync_cv, "ktrsync"); | | 1088 | cv_init(&ktd->ktd_sync_cv, "ktrsync"); |
1089 | ktd->ktd_flags = 0; | | 1089 | ktd->ktd_flags = 0; |
1090 | ktd->ktd_qcount = 0; | | 1090 | ktd->ktd_qcount = 0; |
1091 | ktd->ktd_error = 0; | | 1091 | ktd->ktd_error = 0; |
1092 | ktd->ktd_errcnt = 0; | | 1092 | ktd->ktd_errcnt = 0; |
1093 | ktd->ktd_delayqcnt = ktd_delayqcnt; | | 1093 | ktd->ktd_delayqcnt = ktd_delayqcnt; |
1094 | ktd->ktd_wakedelay = mstohz(ktd_wakedelay); | | 1094 | ktd->ktd_wakedelay = mstohz(ktd_wakedelay); |
1095 | ktd->ktd_intrwakdl = mstohz(ktd_intrwakdl); | | 1095 | ktd->ktd_intrwakdl = mstohz(ktd_intrwakdl); |
1096 | ktd->ktd_ref = 0; | | 1096 | ktd->ktd_ref = 0; |
1097 | ktd->ktd_fp = fp; | | 1097 | ktd->ktd_fp = fp; |
1098 | mutex_enter(&ktrace_lock); | | 1098 | mutex_enter(&ktrace_lock); |
1099 | ktdref(ktd); | | 1099 | ktdref(ktd); |
1100 | mutex_exit(&ktrace_lock); | | 1100 | mutex_exit(&ktrace_lock); |
1101 | | | 1101 | |
1102 | /* | | 1102 | /* |
1103 | * XXX: not correct. needs an way to detect | | 1103 | * XXX: not correct. needs an way to detect |
1104 | * whether ktruss or ktrace. | | 1104 | * whether ktruss or ktrace. |
1105 | */ | | 1105 | */ |
1106 | if (fp->f_type == DTYPE_PIPE) | | 1106 | if (fp->f_type == DTYPE_PIPE) |
1107 | ktd->ktd_flags |= KTDF_INTERACTIVE; | | 1107 | ktd->ktd_flags |= KTDF_INTERACTIVE; |
1108 | | | 1108 | |
1109 | mutex_enter(&fp->f_lock); | | 1109 | mutex_enter(&fp->f_lock); |
1110 | fp->f_count++; | | 1110 | fp->f_count++; |
1111 | mutex_exit(&fp->f_lock); | | 1111 | mutex_exit(&fp->f_lock); |
1112 | error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, | | 1112 | error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, |
1113 | ktrace_thread, ktd, &ktd->ktd_lwp, "ktrace"); | | 1113 | ktrace_thread, ktd, &ktd->ktd_lwp, "ktrace"); |
1114 | if (error != 0) { | | 1114 | if (error != 0) { |
1115 | kmem_free(ktd, sizeof(*ktd)); | | 1115 | kmem_free(ktd, sizeof(*ktd)); |
1116 | ktd = NULL; | | 1116 | ktd = NULL; |
1117 | mutex_enter(&fp->f_lock); | | 1117 | mutex_enter(&fp->f_lock); |
1118 | fp->f_count--; | | 1118 | fp->f_count--; |
1119 | mutex_exit(&fp->f_lock); | | 1119 | mutex_exit(&fp->f_lock); |
1120 | goto done; | | 1120 | goto done; |
1121 | } | | 1121 | } |
1122 | | | 1122 | |
1123 | mutex_enter(&ktrace_lock); | | 1123 | mutex_enter(&ktrace_lock); |
1124 | if (ktd_lookup(fp) != NULL) { | | 1124 | if (ktd_lookup(fp) != NULL) { |
1125 | ktdrel(ktd); | | 1125 | ktdrel(ktd); |
1126 | ktd = NULL; | | 1126 | ktd = NULL; |
1127 | } else | | 1127 | } else |
1128 | TAILQ_INSERT_TAIL(&ktdq, ktd, ktd_list); | | 1128 | TAILQ_INSERT_TAIL(&ktdq, ktd, ktd_list); |
1129 | if (ktd == NULL) | | 1129 | if (ktd == NULL) |
1130 | cv_wait(&lbolt, &ktrace_lock); | | 1130 | cv_wait(&lbolt, &ktrace_lock); |
1131 | mutex_exit(&ktrace_lock); | | 1131 | mutex_exit(&ktrace_lock); |
1132 | if (ktd == NULL) | | 1132 | if (ktd == NULL) |
1133 | goto done; | | 1133 | goto done; |
1134 | } | | 1134 | } |
1135 | break; | | 1135 | break; |
1136 | | | 1136 | |
1137 | case KTROP_CLEAR: | | 1137 | case KTROP_CLEAR: |
1138 | break; | | 1138 | break; |
1139 | } | | 1139 | } |
1140 | | | 1140 | |
1141 | /* | | 1141 | /* |
1142 | * need something to (un)trace (XXX - why is this here?) | | 1142 | * need something to (un)trace (XXX - why is this here?) |
1143 | */ | | 1143 | */ |
1144 | if (!facs) { | | 1144 | if (!facs) { |
1145 | error = EINVAL; | | 1145 | error = EINVAL; |
1146 | *fpp = NULL; | | 1146 | *fpp = NULL; |
1147 | goto done; | | 1147 | goto done; |
1148 | } | | 1148 | } |
1149 | | | 1149 | |
1150 | /* | | 1150 | /* |
1151 | * do it | | 1151 | * do it |
1152 | */ | | 1152 | */ |
1153 | mutex_enter(proc_lock); | | 1153 | mutex_enter(proc_lock); |
1154 | if (pid < 0) { | | 1154 | if (pid < 0) { |
1155 | /* | | 1155 | /* |
1156 | * by process group | | 1156 | * by process group |
1157 | */ | | 1157 | */ |
1158 | pg = pgrp_find(-pid); | | 1158 | pg = pgrp_find(-pid); |
1159 | if (pg == NULL) | | 1159 | if (pg == NULL) |
1160 | error = ESRCH; | | 1160 | error = ESRCH; |
1161 | else { | | 1161 | else { |
1162 | LIST_FOREACH(p, &pg->pg_members, p_pglist) { | | 1162 | LIST_FOREACH(p, &pg->pg_members, p_pglist) { |
1163 | if (descend) | | 1163 | if (descend) |
1164 | ret |= ktrsetchildren(curl, p, ops, | | 1164 | ret |= ktrsetchildren(curl, p, ops, |
1165 | facs, ktd); | | 1165 | facs, ktd); |
1166 | else | | 1166 | else |
1167 | ret |= ktrops(curl, p, ops, facs, | | 1167 | ret |= ktrops(curl, p, ops, facs, |
1168 | ktd); | | 1168 | ktd); |
1169 | } | | 1169 | } |
1170 | } | | 1170 | } |
1171 | | | 1171 | |
1172 | } else { | | 1172 | } else { |
1173 | /* | | 1173 | /* |
1174 | * by pid | | 1174 | * by pid |
1175 | */ | | 1175 | */ |
1176 | p = proc_find(pid); | | 1176 | p = proc_find(pid); |
1177 | if (p == NULL) | | 1177 | if (p == NULL) |
1178 | error = ESRCH; | | 1178 | error = ESRCH; |
1179 | else if (descend) | | 1179 | else if (descend) |
1180 | ret |= ktrsetchildren(curl, p, ops, facs, ktd); | | 1180 | ret |= ktrsetchildren(curl, p, ops, facs, ktd); |
1181 | else | | 1181 | else |
1182 | ret |= ktrops(curl, p, ops, facs, ktd); | | 1182 | ret |= ktrops(curl, p, ops, facs, ktd); |
1183 | } | | 1183 | } |
1184 | mutex_exit(proc_lock); | | 1184 | mutex_exit(proc_lock); |
1185 | if (error == 0 && !ret) | | 1185 | if (error == 0 && !ret) |
1186 | error = EPERM; | | 1186 | error = EPERM; |
1187 | *fpp = NULL; | | 1187 | *fpp = NULL; |
1188 | done: | | 1188 | done: |
1189 | if (ktd != NULL) { | | 1189 | if (ktd != NULL) { |
1190 | mutex_enter(&ktrace_lock); | | 1190 | mutex_enter(&ktrace_lock); |
1191 | if (error != 0) { | | 1191 | if (error != 0) { |
1192 | /* | | 1192 | /* |
1193 | * Wakeup the thread so that it can be die if we | | 1193 | * Wakeup the thread so that it can be die if we |
1194 | * can't trace any process. | | 1194 | * can't trace any process. |
1195 | */ | | 1195 | */ |
1196 | ktd_wakeup(ktd); | | 1196 | ktd_wakeup(ktd); |
1197 | } | | 1197 | } |
1198 | if (KTROP(ops) == KTROP_SET || KTROP(ops) == KTROP_CLEARFILE) | | 1198 | if (KTROP(ops) == KTROP_SET || KTROP(ops) == KTROP_CLEARFILE) |
1199 | ktdrel(ktd); | | 1199 | ktdrel(ktd); |
1200 | mutex_exit(&ktrace_lock); | | 1200 | mutex_exit(&ktrace_lock); |
1201 | } | | 1201 | } |
1202 | ktrexit(curl); | | 1202 | ktrexit(curl); |
1203 | return (error); | | 1203 | return (error); |
1204 | } | | 1204 | } |
1205 | | | 1205 | |
1206 | /* | | 1206 | /* |
1207 | * fktrace system call | | 1207 | * fktrace system call |
1208 | */ | | 1208 | */ |
1209 | /* ARGSUSED */ | | 1209 | /* ARGSUSED */ |
1210 | int | | 1210 | int |
1211 | sys_fktrace(struct lwp *l, const struct sys_fktrace_args *uap, register_t *retval) | | 1211 | sys_fktrace(struct lwp *l, const struct sys_fktrace_args *uap, register_t *retval) |
1212 | { | | 1212 | { |
1213 | /* { | | 1213 | /* { |
1214 | syscallarg(int) fd; | | 1214 | syscallarg(int) fd; |
1215 | syscallarg(int) ops; | | 1215 | syscallarg(int) ops; |
1216 | syscallarg(int) facs; | | 1216 | syscallarg(int) facs; |
1217 | syscallarg(int) pid; | | 1217 | syscallarg(int) pid; |
1218 | } */ | | 1218 | } */ |
1219 | file_t *fp; | | 1219 | file_t *fp; |
1220 | int error, fd; | | 1220 | int error, fd; |
1221 | | | 1221 | |
1222 | fd = SCARG(uap, fd); | | 1222 | fd = SCARG(uap, fd); |
1223 | if ((fp = fd_getfile(fd)) == NULL) | | 1223 | if ((fp = fd_getfile(fd)) == NULL) |
1224 | return (EBADF); | | 1224 | return (EBADF); |
1225 | if ((fp->f_flag & FWRITE) == 0) | | 1225 | if ((fp->f_flag & FWRITE) == 0) |
1226 | error = EBADF; | | 1226 | error = EBADF; |
1227 | else | | 1227 | else |
1228 | error = ktrace_common(l, SCARG(uap, ops), | | 1228 | error = ktrace_common(l, SCARG(uap, ops), |
1229 | SCARG(uap, facs), SCARG(uap, pid), &fp); | | 1229 | SCARG(uap, facs), SCARG(uap, pid), &fp); |
1230 | fd_putfile(fd); | | 1230 | fd_putfile(fd); |
1231 | return error; | | 1231 | return error; |
1232 | } | | 1232 | } |
1233 | | | 1233 | |
1234 | /* | | 1234 | /* |
1235 | * ktrace system call | | 1235 | * ktrace system call |
1236 | */ | | 1236 | */ |
1237 | /* ARGSUSED */ | | 1237 | /* ARGSUSED */ |
1238 | int | | 1238 | int |
1239 | sys_ktrace(struct lwp *l, const struct sys_ktrace_args *uap, register_t *retval) | | 1239 | sys_ktrace(struct lwp *l, const struct sys_ktrace_args *uap, register_t *retval) |
1240 | { | | 1240 | { |
1241 | /* { | | 1241 | /* { |
1242 | syscallarg(const char *) fname; | | 1242 | syscallarg(const char *) fname; |
1243 | syscallarg(int) ops; | | 1243 | syscallarg(int) ops; |
1244 | syscallarg(int) facs; | | 1244 | syscallarg(int) facs; |
1245 | syscallarg(int) pid; | | 1245 | syscallarg(int) pid; |
1246 | } */ | | 1246 | } */ |
1247 | struct vnode *vp = NULL; | | 1247 | struct vnode *vp = NULL; |
1248 | file_t *fp = NULL; | | 1248 | file_t *fp = NULL; |
1249 | struct pathbuf *pb; | | 1249 | struct pathbuf *pb; |
1250 | struct nameidata nd; | | 1250 | struct nameidata nd; |
1251 | int error = 0; | | 1251 | int error = 0; |
1252 | int fd; | | 1252 | int fd; |
1253 | | | 1253 | |
1254 | if (ktrenter(l)) | | 1254 | if (ktrenter(l)) |
1255 | return EAGAIN; | | 1255 | return EAGAIN; |
1256 | | | 1256 | |
1257 | if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) { | | 1257 | if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) { |
1258 | /* | | 1258 | /* |
1259 | * an operation which requires a file argument. | | 1259 | * an operation which requires a file argument. |
1260 | */ | | 1260 | */ |
1261 | error = pathbuf_copyin(SCARG(uap, fname), &pb); | | 1261 | error = pathbuf_copyin(SCARG(uap, fname), &pb); |
1262 | if (error) { | | 1262 | if (error) { |
1263 | ktrexit(l); | | 1263 | ktrexit(l); |
1264 | return (error); | | 1264 | return (error); |
1265 | } | | 1265 | } |
1266 | NDINIT(&nd, LOOKUP, FOLLOW, pb); | | 1266 | NDINIT(&nd, LOOKUP, FOLLOW, pb); |
1267 | if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) { | | 1267 | if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) { |
1268 | pathbuf_destroy(pb); | | 1268 | pathbuf_destroy(pb); |
1269 | ktrexit(l); | | 1269 | ktrexit(l); |
1270 | return (error); | | 1270 | return (error); |
1271 | } | | 1271 | } |
1272 | vp = nd.ni_vp; | | 1272 | vp = nd.ni_vp; |
1273 | pathbuf_destroy(pb); | | 1273 | pathbuf_destroy(pb); |
1274 | VOP_UNLOCK(vp); | | 1274 | VOP_UNLOCK(vp); |
1275 | if (vp->v_type != VREG) { | | 1275 | if (vp->v_type != VREG) { |
1276 | vn_close(vp, FREAD|FWRITE, l->l_cred); | | 1276 | vn_close(vp, FREAD|FWRITE, l->l_cred); |
1277 | ktrexit(l); | | 1277 | ktrexit(l); |
1278 | return (EACCES); | | 1278 | return (EACCES); |
1279 | } | | 1279 | } |
1280 | /* | | 1280 | /* |
1281 | * This uses up a file descriptor slot in the | | 1281 | * This uses up a file descriptor slot in the |
1282 | * tracing process for the duration of this syscall. | | 1282 | * tracing process for the duration of this syscall. |
1283 | * This is not expected to be a problem. | | 1283 | * This is not expected to be a problem. |
1284 | */ | | 1284 | */ |
1285 | if ((error = fd_allocfile(&fp, &fd)) != 0) { | | 1285 | if ((error = fd_allocfile(&fp, &fd)) != 0) { |
1286 | vn_close(vp, FWRITE, l->l_cred); | | 1286 | vn_close(vp, FWRITE, l->l_cred); |
1287 | ktrexit(l); | | 1287 | ktrexit(l); |
1288 | return error; | | 1288 | return error; |
1289 | } | | 1289 | } |
1290 | fp->f_flag = FWRITE; | | 1290 | fp->f_flag = FWRITE; |
1291 | fp->f_type = DTYPE_VNODE; | | 1291 | fp->f_type = DTYPE_VNODE; |
1292 | fp->f_ops = &vnops; | | 1292 | fp->f_ops = &vnops; |
1293 | fp->f_data = (void *)vp; | | 1293 | fp->f_data = (void *)vp; |
1294 | vp = NULL; | | 1294 | vp = NULL; |
1295 | } | | 1295 | } |
1296 | error = ktrace_common(l, SCARG(uap, ops), SCARG(uap, facs), | | 1296 | error = ktrace_common(l, SCARG(uap, ops), SCARG(uap, facs), |
1297 | SCARG(uap, pid), &fp); | | 1297 | SCARG(uap, pid), &fp); |
1298 | if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) | | 1298 | if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) |
1299 | fd_abort(curproc, fp, fd); | | 1299 | fd_abort(curproc, fp, fd); |
1300 | return (error); | | 1300 | return (error); |
1301 | } | | 1301 | } |
1302 | | | 1302 | |
1303 | int | | 1303 | int |
1304 | ktrops(lwp_t *curl, struct proc *p, int ops, int facs, | | 1304 | ktrops(lwp_t *curl, struct proc *p, int ops, int facs, |
1305 | struct ktr_desc *ktd) | | 1305 | struct ktr_desc *ktd) |
1306 | { | | 1306 | { |
1307 | int vers = ops & KTRFAC_VER_MASK; | | 1307 | int vers = ops & KTRFAC_VER_MASK; |
1308 | int error = 0; | | 1308 | int error = 0; |
1309 | | | 1309 | |
1310 | mutex_enter(p->p_lock); | | 1310 | mutex_enter(p->p_lock); |
1311 | mutex_enter(&ktrace_lock); | | 1311 | mutex_enter(&ktrace_lock); |
1312 | | | 1312 | |
1313 | if (!ktrcanset(curl, p)) | | 1313 | if (!ktrcanset(curl, p)) |
1314 | goto out; | | 1314 | goto out; |
1315 | | | 1315 | |
1316 | switch (vers) { | | 1316 | switch (vers) { |
1317 | case KTRFACv0: | | 1317 | case KTRFACv0: |
1318 | case KTRFACv1: | | 1318 | case KTRFACv1: |
1319 | case KTRFACv2: | | 1319 | case KTRFACv2: |
1320 | break; | | 1320 | break; |
1321 | default: | | 1321 | default: |
1322 | error = EINVAL; | | 1322 | error = EINVAL; |
1323 | goto out; | | 1323 | goto out; |
1324 | } | | 1324 | } |
1325 | | | 1325 | |
1326 | if (KTROP(ops) == KTROP_SET) { | | 1326 | if (KTROP(ops) == KTROP_SET) { |
1327 | if (p->p_tracep != ktd) { | | 1327 | if (p->p_tracep != ktd) { |
1328 | /* | | 1328 | /* |
1329 | * if trace file already in use, relinquish | | 1329 | * if trace file already in use, relinquish |
1330 | */ | | 1330 | */ |
1331 | ktrderef(p); | | 1331 | ktrderef(p); |
1332 | p->p_tracep = ktd; | | 1332 | p->p_tracep = ktd; |
1333 | ktradref(p); | | 1333 | ktradref(p); |
1334 | } | | 1334 | } |
1335 | p->p_traceflag |= facs; | | 1335 | p->p_traceflag |= facs; |
1336 | if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KTRACE, | | 1336 | if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KTRACE, |
1337 | p, KAUTH_ARG(KAUTH_REQ_PROCESS_KTRACE_PERSISTENT), NULL, | | 1337 | p, KAUTH_ARG(KAUTH_REQ_PROCESS_KTRACE_PERSISTENT), NULL, |
1338 | NULL) == 0) | | 1338 | NULL) == 0) |
1339 | p->p_traceflag |= KTRFAC_PERSISTENT; | | 1339 | p->p_traceflag |= KTRFAC_PERSISTENT; |
1340 | } else { | | 1340 | } else { |
1341 | /* KTROP_CLEAR */ | | 1341 | /* KTROP_CLEAR */ |
1342 | if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { | | 1342 | if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { |
1343 | /* no more tracing */ | | 1343 | /* no more tracing */ |
1344 | ktrderef(p); | | 1344 | ktrderef(p); |
1345 | } | | 1345 | } |
1346 | } | | 1346 | } |
1347 | | | 1347 | |
1348 | if (p->p_traceflag) | | 1348 | if (p->p_traceflag) |
1349 | p->p_traceflag |= vers; | | 1349 | p->p_traceflag |= vers; |
1350 | /* | | 1350 | /* |
1351 | * Emit an emulation record, every time there is a ktrace | | 1351 | * Emit an emulation record, every time there is a ktrace |
1352 | * change/attach request. | | 1352 | * change/attach request. |
1353 | */ | | 1353 | */ |
1354 | if (KTRPOINT(p, KTR_EMUL)) | | 1354 | if (KTRPOINT(p, KTR_EMUL)) |
1355 | p->p_traceflag |= KTRFAC_TRC_EMUL; | | 1355 | p->p_traceflag |= KTRFAC_TRC_EMUL; |
1356 | | | 1356 | |
1357 | p->p_trace_enabled = trace_is_enabled(p); | | 1357 | p->p_trace_enabled = trace_is_enabled(p); |
1358 | #ifdef __HAVE_SYSCALL_INTERN | | 1358 | #ifdef __HAVE_SYSCALL_INTERN |
1359 | (*p->p_emul->e_syscall_intern)(p); | | 1359 | (*p->p_emul->e_syscall_intern)(p); |
1360 | #endif | | 1360 | #endif |
1361 | | | 1361 | |
1362 | out: | | 1362 | out: |
1363 | mutex_exit(&ktrace_lock); | | 1363 | mutex_exit(&ktrace_lock); |
1364 | mutex_exit(p->p_lock); | | 1364 | mutex_exit(p->p_lock); |
1365 | | | 1365 | |
1366 | return (1); | | 1366 | return (1); |
1367 | } | | 1367 | } |
1368 | | | 1368 | |
1369 | int | | 1369 | int |
1370 | ktrsetchildren(lwp_t *curl, struct proc *top, int ops, int facs, | | 1370 | ktrsetchildren(lwp_t *curl, struct proc *top, int ops, int facs, |
1371 | struct ktr_desc *ktd) | | 1371 | struct ktr_desc *ktd) |
1372 | { | | 1372 | { |
1373 | struct proc *p; | | 1373 | struct proc *p; |
1374 | int ret = 0; | | 1374 | int ret = 0; |
1375 | | | 1375 | |
1376 | KASSERT(mutex_owned(proc_lock)); | | 1376 | KASSERT(mutex_owned(proc_lock)); |
1377 | | | 1377 | |
1378 | p = top; | | 1378 | p = top; |
1379 | for (;;) { | | 1379 | for (;;) { |
1380 | ret |= ktrops(curl, p, ops, facs, ktd); | | 1380 | ret |= ktrops(curl, p, ops, facs, ktd); |
1381 | /* | | 1381 | /* |
1382 | * If this process has children, descend to them next, | | 1382 | * If this process has children, descend to them next, |
1383 | * otherwise do any siblings, and if done with this level, | | 1383 | * otherwise do any siblings, and if done with this level, |
1384 | * follow back up the tree (but not past top). | | 1384 | * follow back up the tree (but not past top). |
1385 | */ | | 1385 | */ |
1386 | if (LIST_FIRST(&p->p_children) != NULL) { | | 1386 | if (LIST_FIRST(&p->p_children) != NULL) { |
1387 | p = LIST_FIRST(&p->p_children); | | 1387 | p = LIST_FIRST(&p->p_children); |
1388 | continue; | | 1388 | continue; |
1389 | } | | 1389 | } |
1390 | for (;;) { | | 1390 | for (;;) { |
1391 | if (p == top) | | 1391 | if (p == top) |
1392 | return (ret); | | 1392 | return (ret); |
1393 | if (LIST_NEXT(p, p_sibling) != NULL) { | | 1393 | if (LIST_NEXT(p, p_sibling) != NULL) { |
1394 | p = LIST_NEXT(p, p_sibling); | | 1394 | p = LIST_NEXT(p, p_sibling); |
1395 | break; | | 1395 | break; |
1396 | } | | 1396 | } |
1397 | p = p->p_pptr; | | 1397 | p = p->p_pptr; |
1398 | } | | 1398 | } |
1399 | } | | 1399 | } |
1400 | /*NOTREACHED*/ | | 1400 | /*NOTREACHED*/ |
1401 | } | | 1401 | } |
1402 | | | 1402 | |
1403 | void | | 1403 | void |
1404 | ktrwrite(struct ktr_desc *ktd, struct ktrace_entry *kte) | | 1404 | ktrwrite(struct ktr_desc *ktd, struct ktrace_entry *kte) |
1405 | { | | 1405 | { |
1406 | size_t hlen; | | 1406 | size_t hlen; |
1407 | struct uio auio; | | 1407 | struct uio auio; |
1408 | struct iovec aiov[64], *iov; | | 1408 | struct iovec aiov[64], *iov; |
1409 | struct ktrace_entry *top = kte; | | 1409 | struct ktrace_entry *top = kte; |
1410 | struct ktr_header *kth; | | 1410 | struct ktr_header *kth; |
1411 | file_t *fp = ktd->ktd_fp; | | 1411 | file_t *fp = ktd->ktd_fp; |
1412 | int error; | | 1412 | int error; |
1413 | next: | | 1413 | next: |
1414 | auio.uio_iov = iov = &aiov[0]; | | 1414 | auio.uio_iov = iov = &aiov[0]; |
1415 | auio.uio_offset = 0; | | 1415 | auio.uio_offset = 0; |
1416 | auio.uio_rw = UIO_WRITE; | | 1416 | auio.uio_rw = UIO_WRITE; |
1417 | auio.uio_resid = 0; | | 1417 | auio.uio_resid = 0; |
1418 | auio.uio_iovcnt = 0; | | 1418 | auio.uio_iovcnt = 0; |
1419 | UIO_SETUP_SYSSPACE(&auio); | | 1419 | UIO_SETUP_SYSSPACE(&auio); |
1420 | do { | | 1420 | do { |
1421 | struct timespec ts; | | 1421 | struct timespec ts; |
1422 | lwpid_t lid; | | 1422 | lwpid_t lid; |
1423 | kth = &kte->kte_kth; | | 1423 | kth = &kte->kte_kth; |
1424 | | | 1424 | |
1425 | hlen = sizeof(struct ktr_header); | | 1425 | hlen = sizeof(struct ktr_header); |
1426 | switch (kth->ktr_version) { | | 1426 | switch (kth->ktr_version) { |
1427 | case 0: | | 1427 | case 0: |
1428 | ts = kth->ktr_time; | | 1428 | ts = kth->ktr_time; |
1429 | | | 1429 | |
1430 | kth->ktr_otv.tv_sec = ts.tv_sec; | | 1430 | kth->ktr_otv.tv_sec = ts.tv_sec; |
1431 | kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; | | 1431 | kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; |
1432 | kth->ktr_unused = NULL; | | 1432 | kth->ktr_unused = NULL; |
1433 | hlen -= sizeof(kth->_v) - | | 1433 | hlen -= sizeof(kth->_v) - |
1434 | MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); | | 1434 | MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); |
1435 | break; | | 1435 | break; |
1436 | case 1: | | 1436 | case 1: |
1437 | ts = kth->ktr_time; | | 1437 | ts = kth->ktr_time; |
1438 | lid = kth->ktr_lid; | | 1438 | lid = kth->ktr_lid; |
1439 | | | 1439 | |
1440 | kth->ktr_ots.tv_sec = ts.tv_sec; | | 1440 | kth->ktr_ots.tv_sec = ts.tv_sec; |
1441 | kth->ktr_ots.tv_nsec = ts.tv_nsec; | | 1441 | kth->ktr_ots.tv_nsec = ts.tv_nsec; |
1442 | kth->ktr_olid = lid; | | 1442 | kth->ktr_olid = lid; |
1443 | hlen -= sizeof(kth->_v) - | | 1443 | hlen -= sizeof(kth->_v) - |
1444 | MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); | | 1444 | MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); |
1445 | break; | | 1445 | break; |
1446 | } | | 1446 | } |
1447 | iov->iov_base = (void *)kth; | | 1447 | iov->iov_base = (void *)kth; |
1448 | iov++->iov_len = hlen; | | 1448 | iov++->iov_len = hlen; |
1449 | auio.uio_resid += hlen; | | 1449 | auio.uio_resid += hlen; |
1450 | auio.uio_iovcnt++; | | 1450 | auio.uio_iovcnt++; |
1451 | if (kth->ktr_len > 0) { | | 1451 | if (kth->ktr_len > 0) { |
1452 | iov->iov_base = kte->kte_buf; | | 1452 | iov->iov_base = kte->kte_buf; |
1453 | iov++->iov_len = kth->ktr_len; | | 1453 | iov++->iov_len = kth->ktr_len; |
1454 | auio.uio_resid += kth->ktr_len; | | 1454 | auio.uio_resid += kth->ktr_len; |
1455 | auio.uio_iovcnt++; | | 1455 | auio.uio_iovcnt++; |
1456 | } | | 1456 | } |
1457 | } while ((kte = TAILQ_NEXT(kte, kte_list)) != NULL && | | 1457 | } while ((kte = TAILQ_NEXT(kte, kte_list)) != NULL && |
1458 | auio.uio_iovcnt < sizeof(aiov) / sizeof(aiov[0]) - 1); | | 1458 | auio.uio_iovcnt < sizeof(aiov) / sizeof(aiov[0]) - 1); |
1459 | | | 1459 | |
1460 | again: | | 1460 | again: |
1461 | error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio, | | 1461 | error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio, |
1462 | fp->f_cred, FOF_UPDATE_OFFSET); | | 1462 | fp->f_cred, FOF_UPDATE_OFFSET); |
1463 | switch (error) { | | 1463 | switch (error) { |
1464 | | | 1464 | |
1465 | case 0: | | 1465 | case 0: |
1466 | if (auio.uio_resid > 0) | | 1466 | if (auio.uio_resid > 0) |
1467 | goto again; | | 1467 | goto again; |
1468 | if (kte != NULL) | | 1468 | if (kte != NULL) |
1469 | goto next; | | 1469 | goto next; |
1470 | break; | | 1470 | break; |
1471 | | | 1471 | |
1472 | case EWOULDBLOCK: | | 1472 | case EWOULDBLOCK: |
1473 | kpause("ktrzzz", false, 1, NULL); | | 1473 | kpause("ktrzzz", false, 1, NULL); |
1474 | goto again; | | 1474 | goto again; |
1475 | | | 1475 | |
1476 | default: | | 1476 | default: |
1477 | /* | | 1477 | /* |
1478 | * If error encountered, give up tracing on this | | 1478 | * If error encountered, give up tracing on this |
1479 | * vnode. Don't report EPIPE as this can easily | | 1479 | * vnode. Don't report EPIPE as this can easily |
1480 | * happen with fktrace()/ktruss. | | 1480 | * happen with fktrace()/ktruss. |
1481 | */ | | 1481 | */ |
1482 | #ifndef DEBUG | | 1482 | #ifndef DEBUG |
1483 | if (error != EPIPE) | | 1483 | if (error != EPIPE) |
1484 | #endif | | 1484 | #endif |
1485 | log(LOG_NOTICE, | | 1485 | log(LOG_NOTICE, |
1486 | "ktrace write failed, errno %d, tracing stopped\n", | | 1486 | "ktrace write failed, errno %d, tracing stopped\n", |
1487 | error); | | 1487 | error); |
1488 | (void)ktrderefall(ktd, 0); | | 1488 | (void)ktrderefall(ktd, 0); |
1489 | } | | 1489 | } |
1490 | | | 1490 | |
1491 | while ((kte = top) != NULL) { | | 1491 | while ((kte = top) != NULL) { |
1492 | top = TAILQ_NEXT(top, kte_list); | | 1492 | top = TAILQ_NEXT(top, kte_list); |
1493 | ktefree(kte); | | 1493 | ktefree(kte); |
1494 | } | | 1494 | } |
1495 | } | | 1495 | } |
1496 | | | 1496 | |
1497 | void | | 1497 | void |
1498 | ktrace_thread(void *arg) | | 1498 | ktrace_thread(void *arg) |
1499 | { | | 1499 | { |
1500 | struct ktr_desc *ktd = arg; | | 1500 | struct ktr_desc *ktd = arg; |
1501 | file_t *fp = ktd->ktd_fp; | | 1501 | file_t *fp = ktd->ktd_fp; |
1502 | struct ktrace_entry *kte; | | 1502 | struct ktrace_entry *kte; |
1503 | int ktrerr, errcnt; | | 1503 | int ktrerr, errcnt; |
1504 | | | 1504 | |
1505 | mutex_enter(&ktrace_lock); | | 1505 | mutex_enter(&ktrace_lock); |
1506 | for (;;) { | | 1506 | for (;;) { |
1507 | kte = TAILQ_FIRST(&ktd->ktd_queue); | | 1507 | kte = TAILQ_FIRST(&ktd->ktd_queue); |
1508 | if (kte == NULL) { | | 1508 | if (kte == NULL) { |
1509 | if (ktd->ktd_flags & KTDF_WAIT) { | | 1509 | if (ktd->ktd_flags & KTDF_WAIT) { |
1510 | ktd->ktd_flags &= ~(KTDF_WAIT | KTDF_BLOCKING); | | 1510 | ktd->ktd_flags &= ~(KTDF_WAIT | KTDF_BLOCKING); |
1511 | cv_broadcast(&ktd->ktd_sync_cv); | | 1511 | cv_broadcast(&ktd->ktd_sync_cv); |
1512 | } | | 1512 | } |
1513 | if (ktd->ktd_ref == 0) | | 1513 | if (ktd->ktd_ref == 0) |
1514 | break; | | 1514 | break; |
1515 | cv_wait(&ktd->ktd_cv, &ktrace_lock); | | 1515 | cv_wait(&ktd->ktd_cv, &ktrace_lock); |
1516 | continue; | | 1516 | continue; |
1517 | } | | 1517 | } |
1518 | TAILQ_INIT(&ktd->ktd_queue); | | 1518 | TAILQ_INIT(&ktd->ktd_queue); |
1519 | ktd->ktd_qcount = 0; | | 1519 | ktd->ktd_qcount = 0; |
1520 | ktrerr = ktd->ktd_error; | | 1520 | ktrerr = ktd->ktd_error; |
1521 | errcnt = ktd->ktd_errcnt; | | 1521 | errcnt = ktd->ktd_errcnt; |
1522 | ktd->ktd_error = ktd->ktd_errcnt = 0; | | 1522 | ktd->ktd_error = ktd->ktd_errcnt = 0; |
1523 | mutex_exit(&ktrace_lock); | | 1523 | mutex_exit(&ktrace_lock); |
1524 | | | 1524 | |
1525 | if (ktrerr) { | | 1525 | if (ktrerr) { |
1526 | log(LOG_NOTICE, | | 1526 | log(LOG_NOTICE, |
1527 | "ktrace failed, fp %p, error 0x%x, total %d\n", | | 1527 | "ktrace failed, fp %p, error 0x%x, total %d\n", |
1528 | fp, ktrerr, errcnt); | | 1528 | fp, ktrerr, errcnt); |
1529 | } | | 1529 | } |
1530 | ktrwrite(ktd, kte); | | 1530 | ktrwrite(ktd, kte); |
1531 | mutex_enter(&ktrace_lock); | | 1531 | mutex_enter(&ktrace_lock); |
1532 | } | | 1532 | } |
1533 | | | 1533 | |
1534 | TAILQ_REMOVE(&ktdq, ktd, ktd_list); | | 1534 | TAILQ_REMOVE(&ktdq, ktd, ktd_list); |
1535 | | | 1535 | |
1536 | callout_halt(&ktd->ktd_wakch, &ktrace_lock); | | 1536 | callout_halt(&ktd->ktd_wakch, &ktrace_lock); |
1537 | callout_destroy(&ktd->ktd_wakch); | | 1537 | callout_destroy(&ktd->ktd_wakch); |
1538 | mutex_exit(&ktrace_lock); | | 1538 | mutex_exit(&ktrace_lock); |
1539 | | | 1539 | |
1540 | /* | | 1540 | /* |
1541 | * ktrace file descriptor can't be watched (are not visible to | | 1541 | * ktrace file descriptor can't be watched (are not visible to |
1542 | * userspace), so no kqueue stuff here | | 1542 | * userspace), so no kqueue stuff here |
1543 | * XXX: The above comment is wrong, because the fktrace file | | 1543 | * XXX: The above comment is wrong, because the fktrace file |
1544 | * descriptor is available in userland. | | 1544 | * descriptor is available in userland. |
1545 | */ | | 1545 | */ |
1546 | closef(fp); | | 1546 | closef(fp); |
1547 | | | 1547 | |
1548 | cv_destroy(&ktd->ktd_sync_cv); | | 1548 | cv_destroy(&ktd->ktd_sync_cv); |
1549 | cv_destroy(&ktd->ktd_cv); | | 1549 | cv_destroy(&ktd->ktd_cv); |
1550 | | | 1550 | |
1551 | kmem_free(ktd, sizeof(*ktd)); | | 1551 | kmem_free(ktd, sizeof(*ktd)); |
1552 | | | 1552 | |
1553 | kthread_exit(0); | | 1553 | kthread_exit(0); |
1554 | } | | 1554 | } |
1555 | | | 1555 | |
1556 | /* | | 1556 | /* |
1557 | * Return true if caller has permission to set the ktracing state | | 1557 | * Return true if caller has permission to set the ktracing state |
1558 | * of target. Essentially, the target can't possess any | | 1558 | * of target. Essentially, the target can't possess any |
1559 | * more permissions than the caller. KTRFAC_PERSISTENT signifies that | | 1559 | * more permissions than the caller. KTRFAC_PERSISTENT signifies that |
1560 | * the tracing will persist on sugid processes during exec; it is only | | 1560 | * the tracing will persist on sugid processes during exec; it is only |
1561 | * settable by a process with appropriate credentials. | | 1561 | * settable by a process with appropriate credentials. |
1562 | * | | 1562 | * |
1563 | * TODO: check groups. use caller effective gid. | | 1563 | * TODO: check groups. use caller effective gid. |
1564 | */ | | 1564 | */ |
1565 | int | | 1565 | int |
1566 | ktrcanset(lwp_t *calll, struct proc *targetp) | | 1566 | ktrcanset(lwp_t *calll, struct proc *targetp) |
1567 | { | | 1567 | { |
1568 | KASSERT(mutex_owned(targetp->p_lock)); | | 1568 | KASSERT(mutex_owned(targetp->p_lock)); |
1569 | KASSERT(mutex_owned(&ktrace_lock)); | | 1569 | KASSERT(mutex_owned(&ktrace_lock)); |
1570 | | | 1570 | |
1571 | if (kauth_authorize_process(calll->l_cred, KAUTH_PROCESS_KTRACE, | | 1571 | if (kauth_authorize_process(calll->l_cred, KAUTH_PROCESS_KTRACE, |
1572 | targetp, NULL, NULL, NULL) == 0) | | 1572 | targetp, NULL, NULL, NULL) == 0) |
1573 | return (1); | | 1573 | return (1); |
1574 | | | 1574 | |
1575 | return (0); | | 1575 | return (0); |
1576 | } | | 1576 | } |
1577 | | | 1577 | |
1578 | /* | | 1578 | /* |
1579 | * Put user defined entry to ktrace records. | | 1579 | * Put user defined entry to ktrace records. |
1580 | */ | | 1580 | */ |
1581 | int | | 1581 | int |
1582 | sys_utrace(struct lwp *l, const struct sys_utrace_args *uap, register_t *retval) | | 1582 | sys_utrace(struct lwp *l, const struct sys_utrace_args *uap, register_t *retval) |
1583 | { | | 1583 | { |
1584 | /* { | | 1584 | /* { |
1585 | syscallarg(const char *) label; | | 1585 | syscallarg(const char *) label; |
1586 | syscallarg(void *) addr; | | 1586 | syscallarg(void *) addr; |
1587 | syscallarg(size_t) len; | | 1587 | syscallarg(size_t) len; |
1588 | } */ | | 1588 | } */ |
1589 | | | 1589 | |
1590 | return ktruser(SCARG(uap, label), SCARG(uap, addr), | | 1590 | return ktruser(SCARG(uap, label), SCARG(uap, addr), |
1591 | SCARG(uap, len), 1); | | 1591 | SCARG(uap, len), 1); |
1592 | } | | 1592 | } |