| @@ -1,1855 +1,1832 @@ | | | @@ -1,1855 +1,1832 @@ |
1 | /* $NetBSD: usbdi.c,v 1.228 2022/03/03 06:09:33 riastradh Exp $ */ | | 1 | /* $NetBSD: usbdi.c,v 1.229 2022/03/03 06:09:44 riastradh Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 1998, 2012, 2015 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1998, 2012, 2015 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Lennart Augustsson (lennart@augustsson.net) at | | 8 | * by Lennart Augustsson (lennart@augustsson.net) at |
9 | * Carlstedt Research & Technology, Matthew R. Green (mrg@eterna.com.au), | | 9 | * Carlstedt Research & Technology, Matthew R. Green (mrg@eterna.com.au), |
10 | * and Nick Hudson. | | 10 | * and Nick Hudson. |
11 | * | | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | | 12 | * Redistribution and use in source and binary forms, with or without |
13 | * modification, are permitted provided that the following conditions | | 13 | * modification, are permitted provided that the following conditions |
14 | * are met: | | 14 | * are met: |
15 | * 1. Redistributions of source code must retain the above copyright | | 15 | * 1. Redistributions of source code must retain the above copyright |
16 | * notice, this list of conditions and the following disclaimer. | | 16 | * notice, this list of conditions and the following disclaimer. |
17 | * 2. Redistributions in binary form must reproduce the above copyright | | 17 | * 2. Redistributions in binary form must reproduce the above copyright |
18 | * notice, this list of conditions and the following disclaimer in the | | 18 | * notice, this list of conditions and the following disclaimer in the |
19 | * documentation and/or other materials provided with the distribution. | | 19 | * documentation and/or other materials provided with the distribution. |
20 | * | | 20 | * |
21 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 21 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
23 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 23 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
24 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 24 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
25 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 25 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
31 | * POSSIBILITY OF SUCH DAMAGE. | | 31 | * POSSIBILITY OF SUCH DAMAGE. |
32 | */ | | 32 | */ |
33 | | | 33 | |
34 | #include <sys/cdefs.h> | | 34 | #include <sys/cdefs.h> |
35 | __KERNEL_RCSID(0, "$NetBSD: usbdi.c,v 1.228 2022/03/03 06:09:33 riastradh Exp $"); | | 35 | __KERNEL_RCSID(0, "$NetBSD: usbdi.c,v 1.229 2022/03/03 06:09:44 riastradh Exp $"); |
36 | | | 36 | |
37 | #ifdef _KERNEL_OPT | | 37 | #ifdef _KERNEL_OPT |
38 | #include "opt_usb.h" | | 38 | #include "opt_usb.h" |
39 | #include "opt_compat_netbsd.h" | | 39 | #include "opt_compat_netbsd.h" |
40 | #include "usb_dma.h" | | 40 | #include "usb_dma.h" |
41 | #endif | | 41 | #endif |
42 | | | 42 | |
43 | #include <sys/param.h> | | 43 | #include <sys/param.h> |
44 | #include <sys/systm.h> | | 44 | #include <sys/systm.h> |
45 | #include <sys/kernel.h> | | 45 | #include <sys/kernel.h> |
46 | #include <sys/device.h> | | 46 | #include <sys/device.h> |
47 | #include <sys/kmem.h> | | 47 | #include <sys/kmem.h> |
48 | #include <sys/proc.h> | | 48 | #include <sys/proc.h> |
49 | #include <sys/bus.h> | | 49 | #include <sys/bus.h> |
50 | #include <sys/cpu.h> | | 50 | #include <sys/cpu.h> |
51 | | | 51 | |
52 | #include <dev/usb/usb.h> | | 52 | #include <dev/usb/usb.h> |
53 | #include <dev/usb/usbdi.h> | | 53 | #include <dev/usb/usbdi.h> |
54 | #include <dev/usb/usbdi_util.h> | | 54 | #include <dev/usb/usbdi_util.h> |
55 | #include <dev/usb/usbdivar.h> | | 55 | #include <dev/usb/usbdivar.h> |
56 | #include <dev/usb/usb_mem.h> | | 56 | #include <dev/usb/usb_mem.h> |
57 | #include <dev/usb/usb_quirks.h> | | 57 | #include <dev/usb/usb_quirks.h> |
58 | #include <dev/usb/usb_sdt.h> | | 58 | #include <dev/usb/usb_sdt.h> |
59 | #include <dev/usb/usbhist.h> | | 59 | #include <dev/usb/usbhist.h> |
60 | | | 60 | |
61 | /* UTF-8 encoding stuff */ | | 61 | /* UTF-8 encoding stuff */ |
62 | #include <fs/unicode.h> | | 62 | #include <fs/unicode.h> |
63 | | | 63 | |
64 | SDT_PROBE_DEFINE5(usb, device, pipe, open, | | 64 | SDT_PROBE_DEFINE5(usb, device, pipe, open, |
65 | "struct usbd_interface *"/*iface*/, | | 65 | "struct usbd_interface *"/*iface*/, |
66 | "uint8_t"/*address*/, | | 66 | "uint8_t"/*address*/, |
67 | "uint8_t"/*flags*/, | | 67 | "uint8_t"/*flags*/, |
68 | "int"/*ival*/, | | 68 | "int"/*ival*/, |
69 | "struct usbd_pipe *"/*pipe*/); | | 69 | "struct usbd_pipe *"/*pipe*/); |
70 | | | 70 | |
71 | SDT_PROBE_DEFINE7(usb, device, pipe, open__intr, | | 71 | SDT_PROBE_DEFINE7(usb, device, pipe, open__intr, |
72 | "struct usbd_interface *"/*iface*/, | | 72 | "struct usbd_interface *"/*iface*/, |
73 | "uint8_t"/*address*/, | | 73 | "uint8_t"/*address*/, |
74 | "uint8_t"/*flags*/, | | 74 | "uint8_t"/*flags*/, |
75 | "int"/*ival*/, | | 75 | "int"/*ival*/, |
76 | "usbd_callback"/*cb*/, | | 76 | "usbd_callback"/*cb*/, |
77 | "void *"/*cookie*/, | | 77 | "void *"/*cookie*/, |
78 | "struct usbd_pipe *"/*pipe*/); | | 78 | "struct usbd_pipe *"/*pipe*/); |
79 | | | 79 | |
80 | SDT_PROBE_DEFINE2(usb, device, pipe, transfer__start, | | 80 | SDT_PROBE_DEFINE2(usb, device, pipe, transfer__start, |
81 | "struct usbd_pipe *"/*pipe*/, | | 81 | "struct usbd_pipe *"/*pipe*/, |
82 | "struct usbd_xfer *"/*xfer*/); | | 82 | "struct usbd_xfer *"/*xfer*/); |
83 | SDT_PROBE_DEFINE3(usb, device, pipe, transfer__done, | | 83 | SDT_PROBE_DEFINE3(usb, device, pipe, transfer__done, |
84 | "struct usbd_pipe *"/*pipe*/, | | 84 | "struct usbd_pipe *"/*pipe*/, |
85 | "struct usbd_xfer *"/*xfer*/, | | 85 | "struct usbd_xfer *"/*xfer*/, |
86 | "usbd_status"/*err*/); | | 86 | "usbd_status"/*err*/); |
87 | SDT_PROBE_DEFINE2(usb, device, pipe, start, | | 87 | SDT_PROBE_DEFINE2(usb, device, pipe, start, |
88 | "struct usbd_pipe *"/*pipe*/, | | 88 | "struct usbd_pipe *"/*pipe*/, |
89 | "struct usbd_xfer *"/*xfer*/); | | 89 | "struct usbd_xfer *"/*xfer*/); |
90 | | | 90 | |
91 | SDT_PROBE_DEFINE1(usb, device, pipe, close, "struct usbd_pipe *"/*pipe*/); | | 91 | SDT_PROBE_DEFINE1(usb, device, pipe, close, "struct usbd_pipe *"/*pipe*/); |
92 | SDT_PROBE_DEFINE1(usb, device, pipe, abort__start, | | 92 | SDT_PROBE_DEFINE1(usb, device, pipe, abort__start, |
93 | "struct usbd_pipe *"/*pipe*/); | | 93 | "struct usbd_pipe *"/*pipe*/); |
94 | SDT_PROBE_DEFINE1(usb, device, pipe, abort__done, | | 94 | SDT_PROBE_DEFINE1(usb, device, pipe, abort__done, |
95 | "struct usbd_pipe *"/*pipe*/); | | 95 | "struct usbd_pipe *"/*pipe*/); |
96 | SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__stall, | | 96 | SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__stall, |
97 | "struct usbd_pipe *"/*pipe*/); | | 97 | "struct usbd_pipe *"/*pipe*/); |
98 | SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__toggle, | | 98 | SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__toggle, |
99 | "struct usbd_pipe *"/*pipe*/); | | 99 | "struct usbd_pipe *"/*pipe*/); |
100 | | | 100 | |
101 | SDT_PROBE_DEFINE5(usb, device, xfer, create, | | 101 | SDT_PROBE_DEFINE5(usb, device, xfer, create, |
102 | "struct usbd_xfer *"/*xfer*/, | | 102 | "struct usbd_xfer *"/*xfer*/, |
103 | "struct usbd_pipe *"/*pipe*/, | | 103 | "struct usbd_pipe *"/*pipe*/, |
104 | "size_t"/*len*/, | | 104 | "size_t"/*len*/, |
105 | "unsigned int"/*flags*/, | | 105 | "unsigned int"/*flags*/, |
106 | "unsigned int"/*nframes*/); | | 106 | "unsigned int"/*nframes*/); |
107 | SDT_PROBE_DEFINE1(usb, device, xfer, start, "struct usbd_xfer *"/*xfer*/); | | 107 | SDT_PROBE_DEFINE1(usb, device, xfer, start, "struct usbd_xfer *"/*xfer*/); |
108 | SDT_PROBE_DEFINE1(usb, device, xfer, preabort, "struct usbd_xfer *"/*xfer*/); | | 108 | SDT_PROBE_DEFINE1(usb, device, xfer, preabort, "struct usbd_xfer *"/*xfer*/); |
109 | SDT_PROBE_DEFINE1(usb, device, xfer, abort, "struct usbd_xfer *"/*xfer*/); | | 109 | SDT_PROBE_DEFINE1(usb, device, xfer, abort, "struct usbd_xfer *"/*xfer*/); |
110 | SDT_PROBE_DEFINE1(usb, device, xfer, timeout, "struct usbd_xfer *"/*xfer*/); | | 110 | SDT_PROBE_DEFINE1(usb, device, xfer, timeout, "struct usbd_xfer *"/*xfer*/); |
111 | SDT_PROBE_DEFINE2(usb, device, xfer, done, | | 111 | SDT_PROBE_DEFINE2(usb, device, xfer, done, |
112 | "struct usbd_xfer *"/*xfer*/, | | 112 | "struct usbd_xfer *"/*xfer*/, |
113 | "usbd_status"/*status*/); | | 113 | "usbd_status"/*status*/); |
114 | SDT_PROBE_DEFINE1(usb, device, xfer, destroy, "struct usbd_xfer *"/*xfer*/); | | 114 | SDT_PROBE_DEFINE1(usb, device, xfer, destroy, "struct usbd_xfer *"/*xfer*/); |
115 | | | 115 | |
116 | Static void usbd_ar_pipe(struct usbd_pipe *); | | 116 | Static void usbd_ar_pipe(struct usbd_pipe *); |
117 | static usbd_status usb_insert_transfer(struct usbd_xfer *); | | | |
118 | Static void usbd_start_next(struct usbd_pipe *); | | 117 | Static void usbd_start_next(struct usbd_pipe *); |
119 | Static usbd_status usbd_open_pipe_ival | | 118 | Static usbd_status usbd_open_pipe_ival |
120 | (struct usbd_interface *, uint8_t, uint8_t, struct usbd_pipe **, int); | | 119 | (struct usbd_interface *, uint8_t, uint8_t, struct usbd_pipe **, int); |
121 | static void *usbd_alloc_buffer(struct usbd_xfer *, uint32_t); | | 120 | static void *usbd_alloc_buffer(struct usbd_xfer *, uint32_t); |
122 | static void usbd_free_buffer(struct usbd_xfer *); | | 121 | static void usbd_free_buffer(struct usbd_xfer *); |
123 | static struct usbd_xfer *usbd_alloc_xfer(struct usbd_device *, unsigned int); | | 122 | static struct usbd_xfer *usbd_alloc_xfer(struct usbd_device *, unsigned int); |
124 | static void usbd_free_xfer(struct usbd_xfer *); | | 123 | static void usbd_free_xfer(struct usbd_xfer *); |
125 | static void usbd_request_async_cb(struct usbd_xfer *, void *, usbd_status); | | 124 | static void usbd_request_async_cb(struct usbd_xfer *, void *, usbd_status); |
126 | static void usbd_xfer_timeout(void *); | | 125 | static void usbd_xfer_timeout(void *); |
127 | static void usbd_xfer_timeout_task(void *); | | 126 | static void usbd_xfer_timeout_task(void *); |
128 | static bool usbd_xfer_probe_timeout(struct usbd_xfer *); | | 127 | static bool usbd_xfer_probe_timeout(struct usbd_xfer *); |
129 | static void usbd_xfer_cancel_timeout_async(struct usbd_xfer *); | | 128 | static void usbd_xfer_cancel_timeout_async(struct usbd_xfer *); |
130 | | | 129 | |
131 | #if defined(USB_DEBUG) | | 130 | #if defined(USB_DEBUG) |
132 | void | | 131 | void |
133 | usbd_dump_iface(struct usbd_interface *iface) | | 132 | usbd_dump_iface(struct usbd_interface *iface) |
134 | { | | 133 | { |
135 | USBHIST_FUNC(); | | 134 | USBHIST_FUNC(); |
136 | USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0); | | 135 | USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0); |
137 | | | 136 | |
138 | if (iface == NULL) | | 137 | if (iface == NULL) |
139 | return; | | 138 | return; |
140 | USBHIST_LOG(usbdebug, " device = %#jx idesc = %#jx index = %jd", | | 139 | USBHIST_LOG(usbdebug, " device = %#jx idesc = %#jx index = %jd", |
141 | (uintptr_t)iface->ui_dev, (uintptr_t)iface->ui_idesc, | | 140 | (uintptr_t)iface->ui_dev, (uintptr_t)iface->ui_idesc, |
142 | iface->ui_index, 0); | | 141 | iface->ui_index, 0); |
143 | USBHIST_LOG(usbdebug, " altindex=%jd", | | 142 | USBHIST_LOG(usbdebug, " altindex=%jd", |
144 | iface->ui_altindex, 0, 0, 0); | | 143 | iface->ui_altindex, 0, 0, 0); |
145 | } | | 144 | } |
146 | | | 145 | |
147 | void | | 146 | void |
148 | usbd_dump_device(struct usbd_device *dev) | | 147 | usbd_dump_device(struct usbd_device *dev) |
149 | { | | 148 | { |
150 | USBHIST_FUNC(); | | 149 | USBHIST_FUNC(); |
151 | USBHIST_CALLARGS(usbdebug, "dev = %#jx", (uintptr_t)dev, 0, 0, 0); | | 150 | USBHIST_CALLARGS(usbdebug, "dev = %#jx", (uintptr_t)dev, 0, 0, 0); |
152 | | | 151 | |
153 | if (dev == NULL) | | 152 | if (dev == NULL) |
154 | return; | | 153 | return; |
155 | USBHIST_LOG(usbdebug, " bus = %#jx default_pipe = %#jx", | | 154 | USBHIST_LOG(usbdebug, " bus = %#jx default_pipe = %#jx", |
156 | (uintptr_t)dev->ud_bus, (uintptr_t)dev->ud_pipe0, 0, 0); | | 155 | (uintptr_t)dev->ud_bus, (uintptr_t)dev->ud_pipe0, 0, 0); |
157 | USBHIST_LOG(usbdebug, " address = %jd config = %jd depth = %jd ", | | 156 | USBHIST_LOG(usbdebug, " address = %jd config = %jd depth = %jd ", |
158 | dev->ud_addr, dev->ud_config, dev->ud_depth, 0); | | 157 | dev->ud_addr, dev->ud_config, dev->ud_depth, 0); |
159 | USBHIST_LOG(usbdebug, " speed = %jd self_powered = %jd " | | 158 | USBHIST_LOG(usbdebug, " speed = %jd self_powered = %jd " |
160 | "power = %jd langid = %jd", | | 159 | "power = %jd langid = %jd", |
161 | dev->ud_speed, dev->ud_selfpowered, dev->ud_power, dev->ud_langid); | | 160 | dev->ud_speed, dev->ud_selfpowered, dev->ud_power, dev->ud_langid); |
162 | } | | 161 | } |
163 | | | 162 | |
164 | void | | 163 | void |
165 | usbd_dump_endpoint(struct usbd_endpoint *endp) | | 164 | usbd_dump_endpoint(struct usbd_endpoint *endp) |
166 | { | | 165 | { |
167 | USBHIST_FUNC(); | | 166 | USBHIST_FUNC(); |
168 | USBHIST_CALLARGS(usbdebug, "endp = %#jx", (uintptr_t)endp, 0, 0, 0); | | 167 | USBHIST_CALLARGS(usbdebug, "endp = %#jx", (uintptr_t)endp, 0, 0, 0); |
169 | | | 168 | |
170 | if (endp == NULL) | | 169 | if (endp == NULL) |
171 | return; | | 170 | return; |
172 | USBHIST_LOG(usbdebug, " edesc = %#jx refcnt = %jd", | | 171 | USBHIST_LOG(usbdebug, " edesc = %#jx refcnt = %jd", |
173 | (uintptr_t)endp->ue_edesc, endp->ue_refcnt, 0, 0); | | 172 | (uintptr_t)endp->ue_edesc, endp->ue_refcnt, 0, 0); |
174 | if (endp->ue_edesc) | | 173 | if (endp->ue_edesc) |
175 | USBHIST_LOG(usbdebug, " bEndpointAddress=0x%02jx", | | 174 | USBHIST_LOG(usbdebug, " bEndpointAddress=0x%02jx", |
176 | endp->ue_edesc->bEndpointAddress, 0, 0, 0); | | 175 | endp->ue_edesc->bEndpointAddress, 0, 0, 0); |
177 | } | | 176 | } |
178 | | | 177 | |
179 | void | | 178 | void |
180 | usbd_dump_queue(struct usbd_pipe *pipe) | | 179 | usbd_dump_queue(struct usbd_pipe *pipe) |
181 | { | | 180 | { |
182 | struct usbd_xfer *xfer; | | 181 | struct usbd_xfer *xfer; |
183 | | | 182 | |
184 | USBHIST_FUNC(); | | 183 | USBHIST_FUNC(); |
185 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0); | | 184 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0); |
186 | | | 185 | |
187 | SIMPLEQ_FOREACH(xfer, &pipe->up_queue, ux_next) { | | 186 | SIMPLEQ_FOREACH(xfer, &pipe->up_queue, ux_next) { |
188 | USBHIST_LOG(usbdebug, " xfer = %#jx", (uintptr_t)xfer, | | 187 | USBHIST_LOG(usbdebug, " xfer = %#jx", (uintptr_t)xfer, |
189 | 0, 0, 0); | | 188 | 0, 0, 0); |
190 | } | | 189 | } |
191 | } | | 190 | } |
192 | | | 191 | |
193 | void | | 192 | void |
194 | usbd_dump_pipe(struct usbd_pipe *pipe) | | 193 | usbd_dump_pipe(struct usbd_pipe *pipe) |
195 | { | | 194 | { |
196 | USBHIST_FUNC(); | | 195 | USBHIST_FUNC(); |
197 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0); | | 196 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0); |
198 | | | 197 | |
199 | if (pipe == NULL) | | 198 | if (pipe == NULL) |
200 | return; | | 199 | return; |
201 | usbd_dump_iface(pipe->up_iface); | | 200 | usbd_dump_iface(pipe->up_iface); |
202 | usbd_dump_device(pipe->up_dev); | | 201 | usbd_dump_device(pipe->up_dev); |
203 | usbd_dump_endpoint(pipe->up_endpoint); | | 202 | usbd_dump_endpoint(pipe->up_endpoint); |
204 | USBHIST_LOG(usbdebug, "(usbd_dump_pipe)", 0, 0, 0, 0); | | 203 | USBHIST_LOG(usbdebug, "(usbd_dump_pipe)", 0, 0, 0, 0); |
205 | USBHIST_LOG(usbdebug, " running = %jd aborting = %jd", | | 204 | USBHIST_LOG(usbdebug, " running = %jd aborting = %jd", |
206 | pipe->up_running, pipe->up_aborting, 0, 0); | | 205 | pipe->up_running, pipe->up_aborting, 0, 0); |
207 | USBHIST_LOG(usbdebug, " intrxfer = %#jx, repeat = %jd, " | | 206 | USBHIST_LOG(usbdebug, " intrxfer = %#jx, repeat = %jd, " |
208 | "interval = %jd", (uintptr_t)pipe->up_intrxfer, pipe->up_repeat, | | 207 | "interval = %jd", (uintptr_t)pipe->up_intrxfer, pipe->up_repeat, |
209 | pipe->up_interval, 0); | | 208 | pipe->up_interval, 0); |
210 | } | | 209 | } |
211 | #endif | | 210 | #endif |
212 | | | 211 | |
213 | usbd_status | | 212 | usbd_status |
214 | usbd_open_pipe(struct usbd_interface *iface, uint8_t address, | | 213 | usbd_open_pipe(struct usbd_interface *iface, uint8_t address, |
215 | uint8_t flags, struct usbd_pipe **pipe) | | 214 | uint8_t flags, struct usbd_pipe **pipe) |
216 | { | | 215 | { |
217 | return (usbd_open_pipe_ival(iface, address, flags, pipe, | | 216 | return (usbd_open_pipe_ival(iface, address, flags, pipe, |
218 | USBD_DEFAULT_INTERVAL)); | | 217 | USBD_DEFAULT_INTERVAL)); |
219 | } | | 218 | } |
220 | | | 219 | |
221 | usbd_status | | 220 | usbd_status |
222 | usbd_open_pipe_ival(struct usbd_interface *iface, uint8_t address, | | 221 | usbd_open_pipe_ival(struct usbd_interface *iface, uint8_t address, |
223 | uint8_t flags, struct usbd_pipe **pipe, int ival) | | 222 | uint8_t flags, struct usbd_pipe **pipe, int ival) |
224 | { | | 223 | { |
225 | struct usbd_pipe *p = NULL; | | 224 | struct usbd_pipe *p = NULL; |
226 | struct usbd_endpoint *ep = NULL /* XXXGCC */; | | 225 | struct usbd_endpoint *ep = NULL /* XXXGCC */; |
227 | bool piperef = false; | | 226 | bool piperef = false; |
228 | usbd_status err; | | 227 | usbd_status err; |
229 | int i; | | 228 | int i; |
230 | | | 229 | |
231 | USBHIST_FUNC(); | | 230 | USBHIST_FUNC(); |
232 | USBHIST_CALLARGS(usbdebug, "iface = %#jx address = %#jx flags = %#jx", | | 231 | USBHIST_CALLARGS(usbdebug, "iface = %#jx address = %#jx flags = %#jx", |
233 | (uintptr_t)iface, address, flags, 0); | | 232 | (uintptr_t)iface, address, flags, 0); |
234 | | | 233 | |
235 | /* | | 234 | /* |
236 | * Block usbd_set_interface so we have a snapshot of the | | 235 | * Block usbd_set_interface so we have a snapshot of the |
237 | * interface endpoints. They will remain stable until we drop | | 236 | * interface endpoints. They will remain stable until we drop |
238 | * the reference in usbd_close_pipe (or on failure here). | | 237 | * the reference in usbd_close_pipe (or on failure here). |
239 | */ | | 238 | */ |
240 | err = usbd_iface_piperef(iface); | | 239 | err = usbd_iface_piperef(iface); |
241 | if (err) | | 240 | if (err) |
242 | goto out; | | 241 | goto out; |
243 | piperef = true; | | 242 | piperef = true; |
244 | | | 243 | |
245 | /* Find the endpoint at this address. */ | | 244 | /* Find the endpoint at this address. */ |
246 | for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) { | | 245 | for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) { |
247 | ep = &iface->ui_endpoints[i]; | | 246 | ep = &iface->ui_endpoints[i]; |
248 | if (ep->ue_edesc == NULL) { | | 247 | if (ep->ue_edesc == NULL) { |
249 | err = USBD_IOERROR; | | 248 | err = USBD_IOERROR; |
250 | goto out; | | 249 | goto out; |
251 | } | | 250 | } |
252 | if (ep->ue_edesc->bEndpointAddress == address) | | 251 | if (ep->ue_edesc->bEndpointAddress == address) |
253 | break; | | 252 | break; |
254 | } | | 253 | } |
255 | if (i == iface->ui_idesc->bNumEndpoints) { | | 254 | if (i == iface->ui_idesc->bNumEndpoints) { |
256 | err = USBD_BAD_ADDRESS; | | 255 | err = USBD_BAD_ADDRESS; |
257 | goto out; | | 256 | goto out; |
258 | } | | 257 | } |
259 | | | 258 | |
260 | /* Set up the pipe with this endpoint. */ | | 259 | /* Set up the pipe with this endpoint. */ |
261 | err = usbd_setup_pipe_flags(iface->ui_dev, iface, ep, ival, &p, flags); | | 260 | err = usbd_setup_pipe_flags(iface->ui_dev, iface, ep, ival, &p, flags); |
262 | if (err) | | 261 | if (err) |
263 | goto out; | | 262 | goto out; |
264 | | | 263 | |
265 | /* Success! */ | | 264 | /* Success! */ |
266 | *pipe = p; | | 265 | *pipe = p; |
267 | p = NULL; /* handed off to caller */ | | 266 | p = NULL; /* handed off to caller */ |
268 | piperef = false; /* handed off to pipe */ | | 267 | piperef = false; /* handed off to pipe */ |
269 | SDT_PROBE5(usb, device, pipe, open, | | 268 | SDT_PROBE5(usb, device, pipe, open, |
270 | iface, address, flags, ival, p); | | 269 | iface, address, flags, ival, p); |
271 | err = USBD_NORMAL_COMPLETION; | | 270 | err = USBD_NORMAL_COMPLETION; |
272 | | | 271 | |
273 | out: if (p) | | 272 | out: if (p) |
274 | usbd_close_pipe(p); | | 273 | usbd_close_pipe(p); |
275 | if (piperef) | | 274 | if (piperef) |
276 | usbd_iface_pipeunref(iface); | | 275 | usbd_iface_pipeunref(iface); |
277 | return err; | | 276 | return err; |
278 | } | | 277 | } |
279 | | | 278 | |
280 | usbd_status | | 279 | usbd_status |
281 | usbd_open_pipe_intr(struct usbd_interface *iface, uint8_t address, | | 280 | usbd_open_pipe_intr(struct usbd_interface *iface, uint8_t address, |
282 | uint8_t flags, struct usbd_pipe **pipe, | | 281 | uint8_t flags, struct usbd_pipe **pipe, |
283 | void *priv, void *buffer, uint32_t len, | | 282 | void *priv, void *buffer, uint32_t len, |
284 | usbd_callback cb, int ival) | | 283 | usbd_callback cb, int ival) |
285 | { | | 284 | { |
286 | usbd_status err; | | 285 | usbd_status err; |
287 | struct usbd_xfer *xfer; | | 286 | struct usbd_xfer *xfer; |
288 | struct usbd_pipe *ipipe; | | 287 | struct usbd_pipe *ipipe; |
289 | | | 288 | |
290 | USBHIST_FUNC(); | | 289 | USBHIST_FUNC(); |
291 | USBHIST_CALLARGS(usbdebug, "address = %#jx flags = %#jx len = %jd", | | 290 | USBHIST_CALLARGS(usbdebug, "address = %#jx flags = %#jx len = %jd", |
292 | address, flags, len, 0); | | 291 | address, flags, len, 0); |
293 | | | 292 | |
294 | err = usbd_open_pipe_ival(iface, address, | | 293 | err = usbd_open_pipe_ival(iface, address, |
295 | USBD_EXCLUSIVE_USE | (flags & USBD_MPSAFE), | | 294 | USBD_EXCLUSIVE_USE | (flags & USBD_MPSAFE), |
296 | &ipipe, ival); | | 295 | &ipipe, ival); |
297 | if (err) | | 296 | if (err) |
298 | return err; | | 297 | return err; |
299 | err = usbd_create_xfer(ipipe, len, flags, 0, &xfer); | | 298 | err = usbd_create_xfer(ipipe, len, flags, 0, &xfer); |
300 | if (err) | | 299 | if (err) |
301 | goto bad1; | | 300 | goto bad1; |
302 | | | 301 | |
303 | usbd_setup_xfer(xfer, priv, buffer, len, flags, USBD_NO_TIMEOUT, cb); | | 302 | usbd_setup_xfer(xfer, priv, buffer, len, flags, USBD_NO_TIMEOUT, cb); |
304 | ipipe->up_intrxfer = xfer; | | 303 | ipipe->up_intrxfer = xfer; |
305 | ipipe->up_repeat = 1; | | 304 | ipipe->up_repeat = 1; |
306 | err = usbd_transfer(xfer); | | 305 | err = usbd_transfer(xfer); |
307 | *pipe = ipipe; | | 306 | *pipe = ipipe; |
308 | if (err != USBD_IN_PROGRESS) | | 307 | if (err != USBD_IN_PROGRESS) |
309 | goto bad3; | | 308 | goto bad3; |
310 | SDT_PROBE7(usb, device, pipe, open__intr, | | 309 | SDT_PROBE7(usb, device, pipe, open__intr, |
311 | iface, address, flags, ival, cb, priv, ipipe); | | 310 | iface, address, flags, ival, cb, priv, ipipe); |
312 | return USBD_NORMAL_COMPLETION; | | 311 | return USBD_NORMAL_COMPLETION; |
313 | | | 312 | |
314 | bad3: | | 313 | bad3: |
315 | ipipe->up_intrxfer = NULL; | | 314 | ipipe->up_intrxfer = NULL; |
316 | ipipe->up_repeat = 0; | | 315 | ipipe->up_repeat = 0; |
317 | | | 316 | |
318 | usbd_destroy_xfer(xfer); | | 317 | usbd_destroy_xfer(xfer); |
319 | bad1: | | 318 | bad1: |
320 | usbd_close_pipe(ipipe); | | 319 | usbd_close_pipe(ipipe); |
321 | return err; | | 320 | return err; |
322 | } | | 321 | } |
323 | | | 322 | |
324 | void | | 323 | void |
325 | usbd_close_pipe(struct usbd_pipe *pipe) | | 324 | usbd_close_pipe(struct usbd_pipe *pipe) |
326 | { | | 325 | { |
327 | USBHIST_FUNC(); USBHIST_CALLED(usbdebug); | | 326 | USBHIST_FUNC(); USBHIST_CALLED(usbdebug); |
328 | | | 327 | |
329 | KASSERT(pipe != NULL); | | 328 | KASSERT(pipe != NULL); |
330 | | | 329 | |
331 | usbd_lock_pipe(pipe); | | 330 | usbd_lock_pipe(pipe); |
332 | SDT_PROBE1(usb, device, pipe, close, pipe); | | 331 | SDT_PROBE1(usb, device, pipe, close, pipe); |
333 | if (!SIMPLEQ_EMPTY(&pipe->up_queue)) { | | 332 | if (!SIMPLEQ_EMPTY(&pipe->up_queue)) { |
334 | printf("WARNING: pipe closed with active xfers on addr %d\n", | | 333 | printf("WARNING: pipe closed with active xfers on addr %d\n", |
335 | pipe->up_dev->ud_addr); | | 334 | pipe->up_dev->ud_addr); |
336 | usbd_ar_pipe(pipe); | | 335 | usbd_ar_pipe(pipe); |
337 | } | | 336 | } |
338 | KASSERT(SIMPLEQ_EMPTY(&pipe->up_queue)); | | 337 | KASSERT(SIMPLEQ_EMPTY(&pipe->up_queue)); |
339 | pipe->up_methods->upm_close(pipe); | | 338 | pipe->up_methods->upm_close(pipe); |
340 | usbd_unlock_pipe(pipe); | | 339 | usbd_unlock_pipe(pipe); |
341 | | | 340 | |
342 | cv_destroy(&pipe->up_callingcv); | | 341 | cv_destroy(&pipe->up_callingcv); |
343 | if (pipe->up_intrxfer) | | 342 | if (pipe->up_intrxfer) |
344 | usbd_destroy_xfer(pipe->up_intrxfer); | | 343 | usbd_destroy_xfer(pipe->up_intrxfer); |
345 | usb_rem_task_wait(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER, | | 344 | usb_rem_task_wait(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER, |
346 | NULL); | | 345 | NULL); |
347 | usbd_endpoint_release(pipe->up_dev, pipe->up_endpoint); | | 346 | usbd_endpoint_release(pipe->up_dev, pipe->up_endpoint); |
348 | if (pipe->up_iface) | | 347 | if (pipe->up_iface) |
349 | usbd_iface_pipeunref(pipe->up_iface); | | 348 | usbd_iface_pipeunref(pipe->up_iface); |
350 | kmem_free(pipe, pipe->up_dev->ud_bus->ub_pipesize); | | 349 | kmem_free(pipe, pipe->up_dev->ud_bus->ub_pipesize); |
351 | } | | 350 | } |
352 | | | 351 | |
353 | usbd_status | | 352 | usbd_status |
354 | usbd_transfer(struct usbd_xfer *xfer) | | 353 | usbd_transfer(struct usbd_xfer *xfer) |
355 | { | | 354 | { |
356 | struct usbd_pipe *pipe = xfer->ux_pipe; | | 355 | struct usbd_pipe *pipe = xfer->ux_pipe; |
357 | usbd_status err; | | 356 | usbd_status err; |
358 | unsigned int size, flags; | | 357 | unsigned int size, flags; |
359 | | | 358 | |
360 | USBHIST_FUNC(); USBHIST_CALLARGS(usbdebug, | | 359 | USBHIST_FUNC(); USBHIST_CALLARGS(usbdebug, |
361 | "xfer = %#jx, flags = %#jx, pipe = %#jx, running = %jd", | | 360 | "xfer = %#jx, flags = %#jx, pipe = %#jx, running = %jd", |
362 | (uintptr_t)xfer, xfer->ux_flags, (uintptr_t)pipe, pipe->up_running); | | 361 | (uintptr_t)xfer, xfer->ux_flags, (uintptr_t)pipe, pipe->up_running); |
363 | KASSERT(xfer->ux_status == USBD_NOT_STARTED); | | 362 | KASSERT(xfer->ux_status == USBD_NOT_STARTED); |
364 | SDT_PROBE1(usb, device, xfer, start, xfer); | | 363 | SDT_PROBE1(usb, device, xfer, start, xfer); |
365 | | | 364 | |
366 | #ifdef USB_DEBUG | | 365 | #ifdef USB_DEBUG |
367 | if (usbdebug > 5) | | 366 | if (usbdebug > 5) |
368 | usbd_dump_queue(pipe); | | 367 | usbd_dump_queue(pipe); |
369 | #endif | | 368 | #endif |
370 | xfer->ux_done = 0; | | 369 | xfer->ux_done = 0; |
371 | | | 370 | |
372 | if (pipe->up_aborting) { | | 371 | if (pipe->up_aborting) { |
373 | USBHIST_LOG(usbdebug, "<- done xfer %#jx, aborting", | | 372 | USBHIST_LOG(usbdebug, "<- done xfer %#jx, aborting", |
374 | (uintptr_t)xfer, 0, 0, 0); | | 373 | (uintptr_t)xfer, 0, 0, 0); |
375 | SDT_PROBE2(usb, device, xfer, done, xfer, USBD_CANCELLED); | | 374 | SDT_PROBE2(usb, device, xfer, done, xfer, USBD_CANCELLED); |
376 | return USBD_CANCELLED; | | 375 | return USBD_CANCELLED; |
377 | } | | 376 | } |
378 | | | 377 | |
379 | KASSERT(xfer->ux_length == 0 || xfer->ux_buf != NULL); | | 378 | KASSERT(xfer->ux_length == 0 || xfer->ux_buf != NULL); |
380 | | | 379 | |
381 | size = xfer->ux_length; | | 380 | size = xfer->ux_length; |
382 | flags = xfer->ux_flags; | | 381 | flags = xfer->ux_flags; |
383 | | | 382 | |
384 | if (size != 0) { | | 383 | if (size != 0) { |
385 | /* | | 384 | /* |
386 | * Use the xfer buffer if none specified in transfer setup. | | 385 | * Use the xfer buffer if none specified in transfer setup. |
387 | * isoc transfers always use the xfer buffer, i.e. | | 386 | * isoc transfers always use the xfer buffer, i.e. |
388 | * ux_buffer is always NULL for isoc. | | 387 | * ux_buffer is always NULL for isoc. |
389 | */ | | 388 | */ |
390 | if (xfer->ux_buffer == NULL) { | | 389 | if (xfer->ux_buffer == NULL) { |
391 | xfer->ux_buffer = xfer->ux_buf; | | 390 | xfer->ux_buffer = xfer->ux_buf; |
392 | } | | 391 | } |
393 | | | 392 | |
394 | /* | | 393 | /* |
395 | * If not using the xfer buffer copy data to the | | 394 | * If not using the xfer buffer copy data to the |
396 | * xfer buffer for OUT transfers of >0 length | | 395 | * xfer buffer for OUT transfers of >0 length |
397 | */ | | 396 | */ |
398 | if (xfer->ux_buffer != xfer->ux_buf) { | | 397 | if (xfer->ux_buffer != xfer->ux_buf) { |
399 | KASSERT(xfer->ux_buf); | | 398 | KASSERT(xfer->ux_buf); |
400 | if (!usbd_xfer_isread(xfer)) { | | 399 | if (!usbd_xfer_isread(xfer)) { |
401 | memcpy(xfer->ux_buf, xfer->ux_buffer, size); | | 400 | memcpy(xfer->ux_buf, xfer->ux_buffer, size); |
402 | } | | 401 | } |
403 | } | | 402 | } |
404 | } | | 403 | } |
405 | | | 404 | |
406 | /* xfer is not valid after the transfer method unless synchronous */ | | 405 | /* xfer is not valid after the transfer method unless synchronous */ |
407 | SDT_PROBE2(usb, device, pipe, transfer__start, pipe, xfer); | | 406 | SDT_PROBE2(usb, device, pipe, transfer__start, pipe, xfer); |
408 | do { | | 407 | do { |
409 | usbd_lock_pipe(pipe); | | 408 | usbd_lock_pipe(pipe); |
410 | err = usb_insert_transfer(xfer); | | 409 | #ifdef DIAGNOSTIC |
| | | 410 | xfer->ux_state = XFER_ONQU; |
| | | 411 | #endif |
| | | 412 | SIMPLEQ_INSERT_TAIL(&pipe->up_queue, xfer, ux_next); |
| | | 413 | if (pipe->up_running && pipe->up_serialise) { |
| | | 414 | err = USBD_IN_PROGRESS; |
| | | 415 | } else { |
| | | 416 | pipe->up_running = 1; |
| | | 417 | err = USBD_NORMAL_COMPLETION; |
| | | 418 | } |
411 | usbd_unlock_pipe(pipe); | | 419 | usbd_unlock_pipe(pipe); |
412 | if (err) | | 420 | if (err) |
413 | break; | | 421 | break; |
414 | err = pipe->up_methods->upm_transfer(xfer); | | 422 | err = pipe->up_methods->upm_transfer(xfer); |
415 | } while (0); | | 423 | } while (0); |
416 | SDT_PROBE3(usb, device, pipe, transfer__done, pipe, xfer, err); | | 424 | SDT_PROBE3(usb, device, pipe, transfer__done, pipe, xfer, err); |
417 | | | 425 | |
418 | if (err != USBD_IN_PROGRESS && err) { | | 426 | if (err != USBD_IN_PROGRESS && err) { |
419 | /* | | 427 | /* |
420 | * The transfer made it onto the pipe queue, but didn't get | | 428 | * The transfer made it onto the pipe queue, but didn't get |
421 | * accepted by the HCD for some reason. It needs removing | | 429 | * accepted by the HCD for some reason. It needs removing |
422 | * from the pipe queue. | | 430 | * from the pipe queue. |
423 | */ | | 431 | */ |
424 | USBHIST_LOG(usbdebug, "xfer failed: %jd, reinserting", | | 432 | USBHIST_LOG(usbdebug, "xfer failed: %jd, reinserting", |
425 | err, 0, 0, 0); | | 433 | err, 0, 0, 0); |
426 | usbd_lock_pipe(pipe); | | 434 | usbd_lock_pipe(pipe); |
427 | SDT_PROBE1(usb, device, xfer, preabort, xfer); | | 435 | SDT_PROBE1(usb, device, xfer, preabort, xfer); |
428 | #ifdef DIAGNOSTIC | | 436 | #ifdef DIAGNOSTIC |
429 | xfer->ux_state = XFER_BUSY; | | 437 | xfer->ux_state = XFER_BUSY; |
430 | #endif | | 438 | #endif |
431 | SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next); | | 439 | SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next); |
432 | if (pipe->up_serialise) | | 440 | if (pipe->up_serialise) |
433 | usbd_start_next(pipe); | | 441 | usbd_start_next(pipe); |
434 | usbd_unlock_pipe(pipe); | | 442 | usbd_unlock_pipe(pipe); |
435 | } | | 443 | } |
436 | | | 444 | |
437 | if (!(flags & USBD_SYNCHRONOUS)) { | | 445 | if (!(flags & USBD_SYNCHRONOUS)) { |
438 | USBHIST_LOG(usbdebug, "<- done xfer %#jx, not sync (err %jd)", | | 446 | USBHIST_LOG(usbdebug, "<- done xfer %#jx, not sync (err %jd)", |
439 | (uintptr_t)xfer, err, 0, 0); | | 447 | (uintptr_t)xfer, err, 0, 0); |
440 | if (err != USBD_IN_PROGRESS) /* XXX Possible? */ | | 448 | if (err != USBD_IN_PROGRESS) /* XXX Possible? */ |
441 | SDT_PROBE2(usb, device, xfer, done, xfer, err); | | 449 | SDT_PROBE2(usb, device, xfer, done, xfer, err); |
442 | return err; | | 450 | return err; |
443 | } | | 451 | } |
444 | | | 452 | |
445 | if (err != USBD_IN_PROGRESS) { | | 453 | if (err != USBD_IN_PROGRESS) { |
446 | USBHIST_LOG(usbdebug, "<- done xfer %#jx, sync (err %jd)", | | 454 | USBHIST_LOG(usbdebug, "<- done xfer %#jx, sync (err %jd)", |
447 | (uintptr_t)xfer, err, 0, 0); | | 455 | (uintptr_t)xfer, err, 0, 0); |
448 | SDT_PROBE2(usb, device, xfer, done, xfer, err); | | 456 | SDT_PROBE2(usb, device, xfer, done, xfer, err); |
449 | return err; | | 457 | return err; |
450 | } | | 458 | } |
451 | | | 459 | |
452 | /* Sync transfer, wait for completion. */ | | 460 | /* Sync transfer, wait for completion. */ |
453 | usbd_lock_pipe(pipe); | | 461 | usbd_lock_pipe(pipe); |
454 | while (!xfer->ux_done) { | | 462 | while (!xfer->ux_done) { |
455 | if (pipe->up_dev->ud_bus->ub_usepolling) | | 463 | if (pipe->up_dev->ud_bus->ub_usepolling) |
456 | panic("usbd_transfer: not done"); | | 464 | panic("usbd_transfer: not done"); |
457 | USBHIST_LOG(usbdebug, "<- sleeping on xfer %#jx", | | 465 | USBHIST_LOG(usbdebug, "<- sleeping on xfer %#jx", |
458 | (uintptr_t)xfer, 0, 0, 0); | | 466 | (uintptr_t)xfer, 0, 0, 0); |
459 | | | 467 | |
460 | err = 0; | | 468 | err = 0; |
461 | if ((flags & USBD_SYNCHRONOUS_SIG) != 0) { | | 469 | if ((flags & USBD_SYNCHRONOUS_SIG) != 0) { |
462 | err = cv_wait_sig(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock); | | 470 | err = cv_wait_sig(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock); |
463 | } else { | | 471 | } else { |
464 | cv_wait(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock); | | 472 | cv_wait(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock); |
465 | } | | 473 | } |
466 | if (err) { | | 474 | if (err) { |
467 | if (!xfer->ux_done) { | | 475 | if (!xfer->ux_done) { |
468 | SDT_PROBE1(usb, device, xfer, abort, xfer); | | 476 | SDT_PROBE1(usb, device, xfer, abort, xfer); |
469 | pipe->up_methods->upm_abort(xfer); | | 477 | pipe->up_methods->upm_abort(xfer); |
470 | } | | 478 | } |
471 | break; | | 479 | break; |
472 | } | | 480 | } |
473 | } | | 481 | } |
474 | SDT_PROBE2(usb, device, xfer, done, xfer, xfer->ux_status); | | 482 | SDT_PROBE2(usb, device, xfer, done, xfer, xfer->ux_status); |
475 | /* XXX Race to read xfer->ux_status? */ | | 483 | /* XXX Race to read xfer->ux_status? */ |
476 | usbd_unlock_pipe(pipe); | | 484 | usbd_unlock_pipe(pipe); |
477 | return xfer->ux_status; | | 485 | return xfer->ux_status; |
478 | } | | 486 | } |
479 | | | 487 | |
480 | /* Like usbd_transfer(), but waits for completion. */ | | 488 | /* Like usbd_transfer(), but waits for completion. */ |
481 | usbd_status | | 489 | usbd_status |
482 | usbd_sync_transfer(struct usbd_xfer *xfer) | | 490 | usbd_sync_transfer(struct usbd_xfer *xfer) |
483 | { | | 491 | { |
484 | xfer->ux_flags |= USBD_SYNCHRONOUS; | | 492 | xfer->ux_flags |= USBD_SYNCHRONOUS; |
485 | return usbd_transfer(xfer); | | 493 | return usbd_transfer(xfer); |
486 | } | | 494 | } |
487 | | | 495 | |
488 | /* Like usbd_transfer(), but waits for completion and listens for signals. */ | | 496 | /* Like usbd_transfer(), but waits for completion and listens for signals. */ |
489 | usbd_status | | 497 | usbd_status |
490 | usbd_sync_transfer_sig(struct usbd_xfer *xfer) | | 498 | usbd_sync_transfer_sig(struct usbd_xfer *xfer) |
491 | { | | 499 | { |
492 | xfer->ux_flags |= USBD_SYNCHRONOUS | USBD_SYNCHRONOUS_SIG; | | 500 | xfer->ux_flags |= USBD_SYNCHRONOUS | USBD_SYNCHRONOUS_SIG; |
493 | return usbd_transfer(xfer); | | 501 | return usbd_transfer(xfer); |
494 | } | | 502 | } |
495 | | | 503 | |
496 | static void * | | 504 | static void * |
497 | usbd_alloc_buffer(struct usbd_xfer *xfer, uint32_t size) | | 505 | usbd_alloc_buffer(struct usbd_xfer *xfer, uint32_t size) |
498 | { | | 506 | { |
499 | KASSERT(xfer->ux_buf == NULL); | | 507 | KASSERT(xfer->ux_buf == NULL); |
500 | KASSERT(size != 0); | | 508 | KASSERT(size != 0); |
501 | | | 509 | |
502 | xfer->ux_bufsize = 0; | | 510 | xfer->ux_bufsize = 0; |
503 | #if NUSB_DMA > 0 | | 511 | #if NUSB_DMA > 0 |
504 | struct usbd_bus *bus = xfer->ux_bus; | | 512 | struct usbd_bus *bus = xfer->ux_bus; |
505 | | | 513 | |
506 | if (bus->ub_usedma) { | | 514 | if (bus->ub_usedma) { |
507 | usb_dma_t *dmap = &xfer->ux_dmabuf; | | 515 | usb_dma_t *dmap = &xfer->ux_dmabuf; |
508 | | | 516 | |
509 | KASSERT((bus->ub_dmaflags & USBMALLOC_COHERENT) == 0); | | 517 | KASSERT((bus->ub_dmaflags & USBMALLOC_COHERENT) == 0); |
510 | int err = usb_allocmem(bus->ub_dmatag, size, 0, bus->ub_dmaflags, dmap); | | 518 | int err = usb_allocmem(bus->ub_dmatag, size, 0, bus->ub_dmaflags, dmap); |
511 | if (err) { | | 519 | if (err) { |
512 | return NULL; | | 520 | return NULL; |
513 | } | | 521 | } |
514 | xfer->ux_buf = KERNADDR(&xfer->ux_dmabuf, 0); | | 522 | xfer->ux_buf = KERNADDR(&xfer->ux_dmabuf, 0); |
515 | xfer->ux_bufsize = size; | | 523 | xfer->ux_bufsize = size; |
516 | | | 524 | |
517 | return xfer->ux_buf; | | 525 | return xfer->ux_buf; |
518 | } | | 526 | } |
519 | #endif | | 527 | #endif |
520 | KASSERT(xfer->ux_bus->ub_usedma == false); | | 528 | KASSERT(xfer->ux_bus->ub_usedma == false); |
521 | xfer->ux_buf = kmem_alloc(size, KM_SLEEP); | | 529 | xfer->ux_buf = kmem_alloc(size, KM_SLEEP); |
522 | xfer->ux_bufsize = size; | | 530 | xfer->ux_bufsize = size; |
523 | return xfer->ux_buf; | | 531 | return xfer->ux_buf; |
524 | } | | 532 | } |
525 | | | 533 | |
526 | static void | | 534 | static void |
527 | usbd_free_buffer(struct usbd_xfer *xfer) | | 535 | usbd_free_buffer(struct usbd_xfer *xfer) |
528 | { | | 536 | { |
529 | KASSERT(xfer->ux_buf != NULL); | | 537 | KASSERT(xfer->ux_buf != NULL); |
530 | KASSERT(xfer->ux_bufsize != 0); | | 538 | KASSERT(xfer->ux_bufsize != 0); |
531 | | | 539 | |
532 | void *buf = xfer->ux_buf; | | 540 | void *buf = xfer->ux_buf; |
533 | uint32_t size = xfer->ux_bufsize; | | 541 | uint32_t size = xfer->ux_bufsize; |
534 | | | 542 | |
535 | xfer->ux_buf = NULL; | | 543 | xfer->ux_buf = NULL; |
536 | xfer->ux_bufsize = 0; | | 544 | xfer->ux_bufsize = 0; |
537 | | | 545 | |
538 | #if NUSB_DMA > 0 | | 546 | #if NUSB_DMA > 0 |
539 | struct usbd_bus *bus = xfer->ux_bus; | | 547 | struct usbd_bus *bus = xfer->ux_bus; |
540 | | | 548 | |
541 | if (bus->ub_usedma) { | | 549 | if (bus->ub_usedma) { |
542 | usb_dma_t *dmap = &xfer->ux_dmabuf; | | 550 | usb_dma_t *dmap = &xfer->ux_dmabuf; |
543 | | | 551 | |
544 | usb_freemem(dmap); | | 552 | usb_freemem(dmap); |
545 | return; | | 553 | return; |
546 | } | | 554 | } |
547 | #endif | | 555 | #endif |
548 | KASSERT(xfer->ux_bus->ub_usedma == false); | | 556 | KASSERT(xfer->ux_bus->ub_usedma == false); |
549 | | | 557 | |
550 | kmem_free(buf, size); | | 558 | kmem_free(buf, size); |
551 | } | | 559 | } |
552 | | | 560 | |
553 | void * | | 561 | void * |
554 | usbd_get_buffer(struct usbd_xfer *xfer) | | 562 | usbd_get_buffer(struct usbd_xfer *xfer) |
555 | { | | 563 | { |
556 | return xfer->ux_buf; | | 564 | return xfer->ux_buf; |
557 | } | | 565 | } |
558 | | | 566 | |
559 | struct usbd_pipe * | | 567 | struct usbd_pipe * |
560 | usbd_get_pipe0(struct usbd_device *dev) | | 568 | usbd_get_pipe0(struct usbd_device *dev) |
561 | { | | 569 | { |
562 | | | 570 | |
563 | return dev->ud_pipe0; | | 571 | return dev->ud_pipe0; |
564 | } | | 572 | } |
565 | | | 573 | |
566 | static struct usbd_xfer * | | 574 | static struct usbd_xfer * |
567 | usbd_alloc_xfer(struct usbd_device *dev, unsigned int nframes) | | 575 | usbd_alloc_xfer(struct usbd_device *dev, unsigned int nframes) |
568 | { | | 576 | { |
569 | struct usbd_xfer *xfer; | | 577 | struct usbd_xfer *xfer; |
570 | | | 578 | |
571 | USBHIST_FUNC(); | | 579 | USBHIST_FUNC(); |
572 | | | 580 | |
573 | ASSERT_SLEEPABLE(); | | 581 | ASSERT_SLEEPABLE(); |
574 | | | 582 | |
575 | xfer = dev->ud_bus->ub_methods->ubm_allocx(dev->ud_bus, nframes); | | 583 | xfer = dev->ud_bus->ub_methods->ubm_allocx(dev->ud_bus, nframes); |
576 | if (xfer == NULL) | | 584 | if (xfer == NULL) |
577 | goto out; | | 585 | goto out; |
578 | xfer->ux_bus = dev->ud_bus; | | 586 | xfer->ux_bus = dev->ud_bus; |
579 | callout_init(&xfer->ux_callout, CALLOUT_MPSAFE); | | 587 | callout_init(&xfer->ux_callout, CALLOUT_MPSAFE); |
580 | callout_setfunc(&xfer->ux_callout, usbd_xfer_timeout, xfer); | | 588 | callout_setfunc(&xfer->ux_callout, usbd_xfer_timeout, xfer); |
581 | cv_init(&xfer->ux_cv, "usbxfer"); | | 589 | cv_init(&xfer->ux_cv, "usbxfer"); |
582 | usb_init_task(&xfer->ux_aborttask, usbd_xfer_timeout_task, xfer, | | 590 | usb_init_task(&xfer->ux_aborttask, usbd_xfer_timeout_task, xfer, |
583 | USB_TASKQ_MPSAFE); | | 591 | USB_TASKQ_MPSAFE); |
584 | | | 592 | |
585 | out: | | 593 | out: |
586 | USBHIST_CALLARGS(usbdebug, "returns %#jx", (uintptr_t)xfer, 0, 0, 0); | | 594 | USBHIST_CALLARGS(usbdebug, "returns %#jx", (uintptr_t)xfer, 0, 0, 0); |
587 | | | 595 | |
588 | return xfer; | | 596 | return xfer; |
589 | } | | 597 | } |
590 | | | 598 | |
591 | static void | | 599 | static void |
592 | usbd_free_xfer(struct usbd_xfer *xfer) | | 600 | usbd_free_xfer(struct usbd_xfer *xfer) |
593 | { | | 601 | { |
594 | USBHIST_FUNC(); | | 602 | USBHIST_FUNC(); |
595 | USBHIST_CALLARGS(usbdebug, "%#jx", (uintptr_t)xfer, 0, 0, 0); | | 603 | USBHIST_CALLARGS(usbdebug, "%#jx", (uintptr_t)xfer, 0, 0, 0); |
596 | | | 604 | |
597 | if (xfer->ux_buf) { | | 605 | if (xfer->ux_buf) { |
598 | usbd_free_buffer(xfer); | | 606 | usbd_free_buffer(xfer); |
599 | } | | 607 | } |
600 | | | 608 | |
601 | /* Wait for any straggling timeout to complete. */ | | 609 | /* Wait for any straggling timeout to complete. */ |
602 | mutex_enter(xfer->ux_bus->ub_lock); | | 610 | mutex_enter(xfer->ux_bus->ub_lock); |
603 | xfer->ux_timeout_reset = false; /* do not resuscitate */ | | 611 | xfer->ux_timeout_reset = false; /* do not resuscitate */ |
604 | callout_halt(&xfer->ux_callout, xfer->ux_bus->ub_lock); | | 612 | callout_halt(&xfer->ux_callout, xfer->ux_bus->ub_lock); |
605 | usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, | | 613 | usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, |
606 | USB_TASKQ_HC, xfer->ux_bus->ub_lock); | | 614 | USB_TASKQ_HC, xfer->ux_bus->ub_lock); |
607 | mutex_exit(xfer->ux_bus->ub_lock); | | 615 | mutex_exit(xfer->ux_bus->ub_lock); |
608 | | | 616 | |
609 | cv_destroy(&xfer->ux_cv); | | 617 | cv_destroy(&xfer->ux_cv); |
610 | xfer->ux_bus->ub_methods->ubm_freex(xfer->ux_bus, xfer); | | 618 | xfer->ux_bus->ub_methods->ubm_freex(xfer->ux_bus, xfer); |
611 | } | | 619 | } |
612 | | | 620 | |
613 | int | | 621 | int |
614 | usbd_create_xfer(struct usbd_pipe *pipe, size_t len, unsigned int flags, | | 622 | usbd_create_xfer(struct usbd_pipe *pipe, size_t len, unsigned int flags, |
615 | unsigned int nframes, struct usbd_xfer **xp) | | 623 | unsigned int nframes, struct usbd_xfer **xp) |
616 | { | | 624 | { |
617 | KASSERT(xp != NULL); | | 625 | KASSERT(xp != NULL); |
618 | void *buf = NULL; | | 626 | void *buf = NULL; |
619 | | | 627 | |
620 | struct usbd_xfer *xfer = usbd_alloc_xfer(pipe->up_dev, nframes); | | 628 | struct usbd_xfer *xfer = usbd_alloc_xfer(pipe->up_dev, nframes); |
621 | if (xfer == NULL) | | 629 | if (xfer == NULL) |
622 | return ENOMEM; | | 630 | return ENOMEM; |
623 | | | 631 | |
624 | xfer->ux_pipe = pipe; | | 632 | xfer->ux_pipe = pipe; |
625 | xfer->ux_flags = flags; | | 633 | xfer->ux_flags = flags; |
626 | xfer->ux_nframes = nframes; | | 634 | xfer->ux_nframes = nframes; |
627 | xfer->ux_methods = pipe->up_methods; | | 635 | xfer->ux_methods = pipe->up_methods; |
628 | | | 636 | |
629 | if (len) { | | 637 | if (len) { |
630 | buf = usbd_alloc_buffer(xfer, len); | | 638 | buf = usbd_alloc_buffer(xfer, len); |
631 | if (!buf) { | | 639 | if (!buf) { |
632 | usbd_free_xfer(xfer); | | 640 | usbd_free_xfer(xfer); |
633 | return ENOMEM; | | 641 | return ENOMEM; |
634 | } | | 642 | } |
635 | } | | 643 | } |
636 | | | 644 | |
637 | if (xfer->ux_methods->upm_init) { | | 645 | if (xfer->ux_methods->upm_init) { |
638 | int err = xfer->ux_methods->upm_init(xfer); | | 646 | int err = xfer->ux_methods->upm_init(xfer); |
639 | if (err) { | | 647 | if (err) { |
640 | usbd_free_xfer(xfer); | | 648 | usbd_free_xfer(xfer); |
641 | return err; | | 649 | return err; |
642 | } | | 650 | } |
643 | } | | 651 | } |
644 | | | 652 | |
645 | *xp = xfer; | | 653 | *xp = xfer; |
646 | SDT_PROBE5(usb, device, xfer, create, | | 654 | SDT_PROBE5(usb, device, xfer, create, |
647 | xfer, pipe, len, flags, nframes); | | 655 | xfer, pipe, len, flags, nframes); |
648 | return 0; | | 656 | return 0; |
649 | } | | 657 | } |
650 | | | 658 | |
651 | void | | 659 | void |
652 | usbd_destroy_xfer(struct usbd_xfer *xfer) | | 660 | usbd_destroy_xfer(struct usbd_xfer *xfer) |
653 | { | | 661 | { |
654 | | | 662 | |
655 | SDT_PROBE1(usb, device, xfer, destroy, xfer); | | 663 | SDT_PROBE1(usb, device, xfer, destroy, xfer); |
656 | if (xfer->ux_methods->upm_fini) | | 664 | if (xfer->ux_methods->upm_fini) |
657 | xfer->ux_methods->upm_fini(xfer); | | 665 | xfer->ux_methods->upm_fini(xfer); |
658 | | | 666 | |
659 | usbd_free_xfer(xfer); | | 667 | usbd_free_xfer(xfer); |
660 | } | | 668 | } |
661 | | | 669 | |
662 | void | | 670 | void |
663 | usbd_setup_xfer(struct usbd_xfer *xfer, void *priv, void *buffer, | | 671 | usbd_setup_xfer(struct usbd_xfer *xfer, void *priv, void *buffer, |
664 | uint32_t length, uint16_t flags, uint32_t timeout, usbd_callback callback) | | 672 | uint32_t length, uint16_t flags, uint32_t timeout, usbd_callback callback) |
665 | { | | 673 | { |
666 | KASSERT(xfer->ux_pipe); | | 674 | KASSERT(xfer->ux_pipe); |
667 | | | 675 | |
668 | xfer->ux_priv = priv; | | 676 | xfer->ux_priv = priv; |
669 | xfer->ux_buffer = buffer; | | 677 | xfer->ux_buffer = buffer; |
670 | xfer->ux_length = length; | | 678 | xfer->ux_length = length; |
671 | xfer->ux_actlen = 0; | | 679 | xfer->ux_actlen = 0; |
672 | xfer->ux_flags = flags; | | 680 | xfer->ux_flags = flags; |
673 | xfer->ux_timeout = timeout; | | 681 | xfer->ux_timeout = timeout; |
674 | xfer->ux_status = USBD_NOT_STARTED; | | 682 | xfer->ux_status = USBD_NOT_STARTED; |
675 | xfer->ux_callback = callback; | | 683 | xfer->ux_callback = callback; |
676 | xfer->ux_rqflags &= ~URQ_REQUEST; | | 684 | xfer->ux_rqflags &= ~URQ_REQUEST; |
677 | xfer->ux_nframes = 0; | | 685 | xfer->ux_nframes = 0; |
678 | } | | 686 | } |
679 | | | 687 | |
680 | void | | 688 | void |
681 | usbd_setup_default_xfer(struct usbd_xfer *xfer, struct usbd_device *dev, | | 689 | usbd_setup_default_xfer(struct usbd_xfer *xfer, struct usbd_device *dev, |
682 | void *priv, uint32_t timeout, usb_device_request_t *req, void *buffer, | | 690 | void *priv, uint32_t timeout, usb_device_request_t *req, void *buffer, |
683 | uint32_t length, uint16_t flags, usbd_callback callback) | | 691 | uint32_t length, uint16_t flags, usbd_callback callback) |
684 | { | | 692 | { |
685 | KASSERT(xfer->ux_pipe == dev->ud_pipe0); | | 693 | KASSERT(xfer->ux_pipe == dev->ud_pipe0); |
686 | | | 694 | |
687 | xfer->ux_priv = priv; | | 695 | xfer->ux_priv = priv; |
688 | xfer->ux_buffer = buffer; | | 696 | xfer->ux_buffer = buffer; |
689 | xfer->ux_length = length; | | 697 | xfer->ux_length = length; |
690 | xfer->ux_actlen = 0; | | 698 | xfer->ux_actlen = 0; |
691 | xfer->ux_flags = flags; | | 699 | xfer->ux_flags = flags; |
692 | xfer->ux_timeout = timeout; | | 700 | xfer->ux_timeout = timeout; |
693 | xfer->ux_status = USBD_NOT_STARTED; | | 701 | xfer->ux_status = USBD_NOT_STARTED; |
694 | xfer->ux_callback = callback; | | 702 | xfer->ux_callback = callback; |
695 | xfer->ux_request = *req; | | 703 | xfer->ux_request = *req; |
696 | xfer->ux_rqflags |= URQ_REQUEST; | | 704 | xfer->ux_rqflags |= URQ_REQUEST; |
697 | xfer->ux_nframes = 0; | | 705 | xfer->ux_nframes = 0; |
698 | } | | 706 | } |
699 | | | 707 | |
700 | void | | 708 | void |
701 | usbd_setup_isoc_xfer(struct usbd_xfer *xfer, void *priv, uint16_t *frlengths, | | 709 | usbd_setup_isoc_xfer(struct usbd_xfer *xfer, void *priv, uint16_t *frlengths, |
702 | uint32_t nframes, uint16_t flags, usbd_callback callback) | | 710 | uint32_t nframes, uint16_t flags, usbd_callback callback) |
703 | { | | 711 | { |
704 | xfer->ux_priv = priv; | | 712 | xfer->ux_priv = priv; |
705 | xfer->ux_buffer = NULL; | | 713 | xfer->ux_buffer = NULL; |
706 | xfer->ux_length = 0; | | 714 | xfer->ux_length = 0; |
707 | xfer->ux_actlen = 0; | | 715 | xfer->ux_actlen = 0; |
708 | xfer->ux_flags = flags; | | 716 | xfer->ux_flags = flags; |
709 | xfer->ux_timeout = USBD_NO_TIMEOUT; | | 717 | xfer->ux_timeout = USBD_NO_TIMEOUT; |
710 | xfer->ux_status = USBD_NOT_STARTED; | | 718 | xfer->ux_status = USBD_NOT_STARTED; |
711 | xfer->ux_callback = callback; | | 719 | xfer->ux_callback = callback; |
712 | xfer->ux_rqflags &= ~URQ_REQUEST; | | 720 | xfer->ux_rqflags &= ~URQ_REQUEST; |
713 | xfer->ux_frlengths = frlengths; | | 721 | xfer->ux_frlengths = frlengths; |
714 | xfer->ux_nframes = nframes; | | 722 | xfer->ux_nframes = nframes; |
715 | | | 723 | |
716 | for (size_t i = 0; i < xfer->ux_nframes; i++) | | 724 | for (size_t i = 0; i < xfer->ux_nframes; i++) |
717 | xfer->ux_length += xfer->ux_frlengths[i]; | | 725 | xfer->ux_length += xfer->ux_frlengths[i]; |
718 | } | | 726 | } |
719 | | | 727 | |
720 | void | | 728 | void |
721 | usbd_get_xfer_status(struct usbd_xfer *xfer, void **priv, | | 729 | usbd_get_xfer_status(struct usbd_xfer *xfer, void **priv, |
722 | void **buffer, uint32_t *count, usbd_status *status) | | 730 | void **buffer, uint32_t *count, usbd_status *status) |
723 | { | | 731 | { |
724 | if (priv != NULL) | | 732 | if (priv != NULL) |
725 | *priv = xfer->ux_priv; | | 733 | *priv = xfer->ux_priv; |
726 | if (buffer != NULL) | | 734 | if (buffer != NULL) |
727 | *buffer = xfer->ux_buffer; | | 735 | *buffer = xfer->ux_buffer; |
728 | if (count != NULL) | | 736 | if (count != NULL) |
729 | *count = xfer->ux_actlen; | | 737 | *count = xfer->ux_actlen; |
730 | if (status != NULL) | | 738 | if (status != NULL) |
731 | *status = xfer->ux_status; | | 739 | *status = xfer->ux_status; |
732 | } | | 740 | } |
733 | | | 741 | |
734 | usb_config_descriptor_t * | | 742 | usb_config_descriptor_t * |
735 | usbd_get_config_descriptor(struct usbd_device *dev) | | 743 | usbd_get_config_descriptor(struct usbd_device *dev) |
736 | { | | 744 | { |
737 | KASSERT(dev != NULL); | | 745 | KASSERT(dev != NULL); |
738 | | | 746 | |
739 | return dev->ud_cdesc; | | 747 | return dev->ud_cdesc; |
740 | } | | 748 | } |
741 | | | 749 | |
742 | usb_interface_descriptor_t * | | 750 | usb_interface_descriptor_t * |
743 | usbd_get_interface_descriptor(struct usbd_interface *iface) | | 751 | usbd_get_interface_descriptor(struct usbd_interface *iface) |
744 | { | | 752 | { |
745 | KASSERT(iface != NULL); | | 753 | KASSERT(iface != NULL); |
746 | | | 754 | |
747 | return iface->ui_idesc; | | 755 | return iface->ui_idesc; |
748 | } | | 756 | } |
749 | | | 757 | |
750 | usb_device_descriptor_t * | | 758 | usb_device_descriptor_t * |
751 | usbd_get_device_descriptor(struct usbd_device *dev) | | 759 | usbd_get_device_descriptor(struct usbd_device *dev) |
752 | { | | 760 | { |
753 | KASSERT(dev != NULL); | | 761 | KASSERT(dev != NULL); |
754 | | | 762 | |
755 | return &dev->ud_ddesc; | | 763 | return &dev->ud_ddesc; |
756 | } | | 764 | } |
757 | | | 765 | |
758 | usb_endpoint_descriptor_t * | | 766 | usb_endpoint_descriptor_t * |
759 | usbd_interface2endpoint_descriptor(struct usbd_interface *iface, uint8_t index) | | 767 | usbd_interface2endpoint_descriptor(struct usbd_interface *iface, uint8_t index) |
760 | { | | 768 | { |
761 | | | 769 | |
762 | if (index >= iface->ui_idesc->bNumEndpoints) | | 770 | if (index >= iface->ui_idesc->bNumEndpoints) |
763 | return NULL; | | 771 | return NULL; |
764 | return iface->ui_endpoints[index].ue_edesc; | | 772 | return iface->ui_endpoints[index].ue_edesc; |
765 | } | | 773 | } |
766 | | | 774 | |
767 | /* Some drivers may wish to abort requests on the default pipe, * | | 775 | /* Some drivers may wish to abort requests on the default pipe, * |
768 | * but there is no mechanism for getting a handle on it. */ | | 776 | * but there is no mechanism for getting a handle on it. */ |
769 | void | | 777 | void |
770 | usbd_abort_default_pipe(struct usbd_device *device) | | 778 | usbd_abort_default_pipe(struct usbd_device *device) |
771 | { | | 779 | { |
772 | usbd_abort_pipe(device->ud_pipe0); | | 780 | usbd_abort_pipe(device->ud_pipe0); |
773 | } | | 781 | } |
774 | | | 782 | |
775 | void | | 783 | void |
776 | usbd_abort_pipe(struct usbd_pipe *pipe) | | 784 | usbd_abort_pipe(struct usbd_pipe *pipe) |
777 | { | | 785 | { |
778 | | | 786 | |
779 | usbd_suspend_pipe(pipe); | | 787 | usbd_suspend_pipe(pipe); |
780 | usbd_resume_pipe(pipe); | | 788 | usbd_resume_pipe(pipe); |
781 | } | | 789 | } |
782 | | | 790 | |
783 | void | | 791 | void |
784 | usbd_suspend_pipe(struct usbd_pipe *pipe) | | 792 | usbd_suspend_pipe(struct usbd_pipe *pipe) |
785 | { | | 793 | { |
786 | | | 794 | |
787 | usbd_lock_pipe(pipe); | | 795 | usbd_lock_pipe(pipe); |
788 | usbd_ar_pipe(pipe); | | 796 | usbd_ar_pipe(pipe); |
789 | usbd_unlock_pipe(pipe); | | 797 | usbd_unlock_pipe(pipe); |
790 | } | | 798 | } |
791 | | | 799 | |
792 | void | | 800 | void |
793 | usbd_resume_pipe(struct usbd_pipe *pipe) | | 801 | usbd_resume_pipe(struct usbd_pipe *pipe) |
794 | { | | 802 | { |
795 | | | 803 | |
796 | usbd_lock_pipe(pipe); | | 804 | usbd_lock_pipe(pipe); |
797 | KASSERT(SIMPLEQ_EMPTY(&pipe->up_queue)); | | 805 | KASSERT(SIMPLEQ_EMPTY(&pipe->up_queue)); |
798 | pipe->up_aborting = 0; | | 806 | pipe->up_aborting = 0; |
799 | usbd_unlock_pipe(pipe); | | 807 | usbd_unlock_pipe(pipe); |
800 | } | | 808 | } |
801 | | | 809 | |
802 | usbd_status | | 810 | usbd_status |
803 | usbd_clear_endpoint_stall(struct usbd_pipe *pipe) | | 811 | usbd_clear_endpoint_stall(struct usbd_pipe *pipe) |
804 | { | | 812 | { |
805 | struct usbd_device *dev = pipe->up_dev; | | 813 | struct usbd_device *dev = pipe->up_dev; |
806 | usbd_status err; | | 814 | usbd_status err; |
807 | | | 815 | |
808 | USBHIST_FUNC(); USBHIST_CALLED(usbdebug); | | 816 | USBHIST_FUNC(); USBHIST_CALLED(usbdebug); |
809 | SDT_PROBE1(usb, device, pipe, clear__endpoint__stall, pipe); | | 817 | SDT_PROBE1(usb, device, pipe, clear__endpoint__stall, pipe); |
810 | | | 818 | |
811 | /* | | 819 | /* |
812 | * Clearing en endpoint stall resets the endpoint toggle, so | | 820 | * Clearing en endpoint stall resets the endpoint toggle, so |
813 | * do the same to the HC toggle. | | 821 | * do the same to the HC toggle. |
814 | */ | | 822 | */ |
815 | SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle, pipe); | | 823 | SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle, pipe); |
816 | pipe->up_methods->upm_cleartoggle(pipe); | | 824 | pipe->up_methods->upm_cleartoggle(pipe); |
817 | | | 825 | |
818 | err = usbd_clear_endpoint_feature(dev, | | 826 | err = usbd_clear_endpoint_feature(dev, |
819 | pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT); | | 827 | pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT); |
820 | #if 0 | | 828 | #if 0 |
821 | XXX should we do this? | | 829 | XXX should we do this? |
822 | if (!err) { | | 830 | if (!err) { |
823 | pipe->state = USBD_PIPE_ACTIVE; | | 831 | pipe->state = USBD_PIPE_ACTIVE; |
824 | /* XXX activate pipe */ | | 832 | /* XXX activate pipe */ |
825 | } | | 833 | } |
826 | #endif | | 834 | #endif |
827 | return err; | | 835 | return err; |
828 | } | | 836 | } |
829 | | | 837 | |
830 | void | | 838 | void |
831 | usbd_clear_endpoint_stall_task(void *arg) | | 839 | usbd_clear_endpoint_stall_task(void *arg) |
832 | { | | 840 | { |
833 | struct usbd_pipe *pipe = arg; | | 841 | struct usbd_pipe *pipe = arg; |
834 | struct usbd_device *dev = pipe->up_dev; | | 842 | struct usbd_device *dev = pipe->up_dev; |
835 | | | 843 | |
836 | SDT_PROBE1(usb, device, pipe, clear__endpoint__stall, pipe); | | 844 | SDT_PROBE1(usb, device, pipe, clear__endpoint__stall, pipe); |
837 | SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle, pipe); | | 845 | SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle, pipe); |
838 | pipe->up_methods->upm_cleartoggle(pipe); | | 846 | pipe->up_methods->upm_cleartoggle(pipe); |
839 | | | 847 | |
840 | (void)usbd_clear_endpoint_feature(dev, | | 848 | (void)usbd_clear_endpoint_feature(dev, |
841 | pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT); | | 849 | pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT); |
842 | } | | 850 | } |
843 | | | 851 | |
844 | void | | 852 | void |
845 | usbd_clear_endpoint_stall_async(struct usbd_pipe *pipe) | | 853 | usbd_clear_endpoint_stall_async(struct usbd_pipe *pipe) |
846 | { | | 854 | { |
847 | usb_add_task(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER); | | 855 | usb_add_task(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER); |
848 | } | | 856 | } |
849 | | | 857 | |
850 | void | | 858 | void |
851 | usbd_clear_endpoint_toggle(struct usbd_pipe *pipe) | | 859 | usbd_clear_endpoint_toggle(struct usbd_pipe *pipe) |
852 | { | | 860 | { |
853 | | | 861 | |
854 | SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle, pipe); | | 862 | SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle, pipe); |
855 | pipe->up_methods->upm_cleartoggle(pipe); | | 863 | pipe->up_methods->upm_cleartoggle(pipe); |
856 | } | | 864 | } |
857 | | | 865 | |
858 | usbd_status | | 866 | usbd_status |
859 | usbd_endpoint_count(struct usbd_interface *iface, uint8_t *count) | | 867 | usbd_endpoint_count(struct usbd_interface *iface, uint8_t *count) |
860 | { | | 868 | { |
861 | KASSERT(iface != NULL); | | 869 | KASSERT(iface != NULL); |
862 | KASSERT(iface->ui_idesc != NULL); | | 870 | KASSERT(iface->ui_idesc != NULL); |
863 | | | 871 | |
864 | *count = iface->ui_idesc->bNumEndpoints; | | 872 | *count = iface->ui_idesc->bNumEndpoints; |
865 | return USBD_NORMAL_COMPLETION; | | 873 | return USBD_NORMAL_COMPLETION; |
866 | } | | 874 | } |
867 | | | 875 | |
868 | usbd_status | | 876 | usbd_status |
869 | usbd_interface_count(struct usbd_device *dev, uint8_t *count) | | 877 | usbd_interface_count(struct usbd_device *dev, uint8_t *count) |
870 | { | | 878 | { |
871 | | | 879 | |
872 | if (dev->ud_cdesc == NULL) | | 880 | if (dev->ud_cdesc == NULL) |
873 | return USBD_NOT_CONFIGURED; | | 881 | return USBD_NOT_CONFIGURED; |
874 | *count = dev->ud_cdesc->bNumInterface; | | 882 | *count = dev->ud_cdesc->bNumInterface; |
875 | return USBD_NORMAL_COMPLETION; | | 883 | return USBD_NORMAL_COMPLETION; |
876 | } | | 884 | } |
877 | | | 885 | |
878 | void | | 886 | void |
879 | usbd_interface2device_handle(struct usbd_interface *iface, | | 887 | usbd_interface2device_handle(struct usbd_interface *iface, |
880 | struct usbd_device **dev) | | 888 | struct usbd_device **dev) |
881 | { | | 889 | { |
882 | | | 890 | |
883 | *dev = iface->ui_dev; | | 891 | *dev = iface->ui_dev; |
884 | } | | 892 | } |
885 | | | 893 | |
886 | usbd_status | | 894 | usbd_status |
887 | usbd_device2interface_handle(struct usbd_device *dev, | | 895 | usbd_device2interface_handle(struct usbd_device *dev, |
888 | uint8_t ifaceno, struct usbd_interface **iface) | | 896 | uint8_t ifaceno, struct usbd_interface **iface) |
889 | { | | 897 | { |
890 | | | 898 | |
891 | if (dev->ud_cdesc == NULL) | | 899 | if (dev->ud_cdesc == NULL) |
892 | return USBD_NOT_CONFIGURED; | | 900 | return USBD_NOT_CONFIGURED; |
893 | if (ifaceno >= dev->ud_cdesc->bNumInterface) | | 901 | if (ifaceno >= dev->ud_cdesc->bNumInterface) |
894 | return USBD_INVAL; | | 902 | return USBD_INVAL; |
895 | *iface = &dev->ud_ifaces[ifaceno]; | | 903 | *iface = &dev->ud_ifaces[ifaceno]; |
896 | return USBD_NORMAL_COMPLETION; | | 904 | return USBD_NORMAL_COMPLETION; |
897 | } | | 905 | } |
898 | | | 906 | |
899 | struct usbd_device * | | 907 | struct usbd_device * |
900 | usbd_pipe2device_handle(struct usbd_pipe *pipe) | | 908 | usbd_pipe2device_handle(struct usbd_pipe *pipe) |
901 | { | | 909 | { |
902 | KASSERT(pipe != NULL); | | 910 | KASSERT(pipe != NULL); |
903 | | | 911 | |
904 | return pipe->up_dev; | | 912 | return pipe->up_dev; |
905 | } | | 913 | } |
906 | | | 914 | |
907 | /* XXXX use altno */ | | 915 | /* XXXX use altno */ |
908 | usbd_status | | 916 | usbd_status |
909 | usbd_set_interface(struct usbd_interface *iface, int altidx) | | 917 | usbd_set_interface(struct usbd_interface *iface, int altidx) |
910 | { | | 918 | { |
911 | bool locked = false; | | 919 | bool locked = false; |
912 | usb_device_request_t req; | | 920 | usb_device_request_t req; |
913 | usbd_status err; | | 921 | usbd_status err; |
914 | | | 922 | |
915 | USBHIST_FUNC(); | | 923 | USBHIST_FUNC(); |
916 | USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0); | | 924 | USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0); |
917 | | | 925 | |
918 | err = usbd_iface_lock(iface); | | 926 | err = usbd_iface_lock(iface); |
919 | if (err) | | 927 | if (err) |
920 | goto out; | | 928 | goto out; |
921 | locked = true; | | 929 | locked = true; |
922 | | | 930 | |
923 | err = usbd_fill_iface_data(iface->ui_dev, iface->ui_index, altidx); | | 931 | err = usbd_fill_iface_data(iface->ui_dev, iface->ui_index, altidx); |
924 | if (err) | | 932 | if (err) |
925 | goto out; | | 933 | goto out; |
926 | | | 934 | |
927 | req.bmRequestType = UT_WRITE_INTERFACE; | | 935 | req.bmRequestType = UT_WRITE_INTERFACE; |
928 | req.bRequest = UR_SET_INTERFACE; | | 936 | req.bRequest = UR_SET_INTERFACE; |
929 | USETW(req.wValue, iface->ui_idesc->bAlternateSetting); | | 937 | USETW(req.wValue, iface->ui_idesc->bAlternateSetting); |
930 | USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber); | | 938 | USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber); |
931 | USETW(req.wLength, 0); | | 939 | USETW(req.wLength, 0); |
932 | err = usbd_do_request(iface->ui_dev, &req, 0); | | 940 | err = usbd_do_request(iface->ui_dev, &req, 0); |
933 | | | 941 | |
934 | out: /* XXX back out iface data? */ | | 942 | out: /* XXX back out iface data? */ |
935 | if (locked) | | 943 | if (locked) |
936 | usbd_iface_unlock(iface); | | 944 | usbd_iface_unlock(iface); |
937 | return err; | | 945 | return err; |
938 | } | | 946 | } |
939 | | | 947 | |
940 | int | | 948 | int |
941 | usbd_get_no_alts(usb_config_descriptor_t *cdesc, int ifaceno) | | 949 | usbd_get_no_alts(usb_config_descriptor_t *cdesc, int ifaceno) |
942 | { | | 950 | { |
943 | char *p = (char *)cdesc; | | 951 | char *p = (char *)cdesc; |
944 | char *end = p + UGETW(cdesc->wTotalLength); | | 952 | char *end = p + UGETW(cdesc->wTotalLength); |
945 | usb_interface_descriptor_t *d; | | 953 | usb_interface_descriptor_t *d; |
946 | int n; | | 954 | int n; |
947 | | | 955 | |
948 | for (n = 0; p < end; p += d->bLength) { | | 956 | for (n = 0; p < end; p += d->bLength) { |
949 | d = (usb_interface_descriptor_t *)p; | | 957 | d = (usb_interface_descriptor_t *)p; |
950 | if (p + d->bLength <= end && | | 958 | if (p + d->bLength <= end && |
951 | d->bDescriptorType == UDESC_INTERFACE && | | 959 | d->bDescriptorType == UDESC_INTERFACE && |
952 | d->bInterfaceNumber == ifaceno) | | 960 | d->bInterfaceNumber == ifaceno) |
953 | n++; | | 961 | n++; |
954 | } | | 962 | } |
955 | return n; | | 963 | return n; |
956 | } | | 964 | } |
957 | | | 965 | |
958 | int | | 966 | int |
959 | usbd_get_interface_altindex(struct usbd_interface *iface) | | 967 | usbd_get_interface_altindex(struct usbd_interface *iface) |
960 | { | | 968 | { |
961 | return iface->ui_altindex; | | 969 | return iface->ui_altindex; |
962 | } | | 970 | } |
963 | | | 971 | |
964 | usbd_status | | 972 | usbd_status |
965 | usbd_get_interface(struct usbd_interface *iface, uint8_t *aiface) | | 973 | usbd_get_interface(struct usbd_interface *iface, uint8_t *aiface) |
966 | { | | 974 | { |
967 | usb_device_request_t req; | | 975 | usb_device_request_t req; |
968 | | | 976 | |
969 | req.bmRequestType = UT_READ_INTERFACE; | | 977 | req.bmRequestType = UT_READ_INTERFACE; |
970 | req.bRequest = UR_GET_INTERFACE; | | 978 | req.bRequest = UR_GET_INTERFACE; |
971 | USETW(req.wValue, 0); | | 979 | USETW(req.wValue, 0); |
972 | USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber); | | 980 | USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber); |
973 | USETW(req.wLength, 1); | | 981 | USETW(req.wLength, 1); |
974 | return usbd_do_request(iface->ui_dev, &req, aiface); | | 982 | return usbd_do_request(iface->ui_dev, &req, aiface); |
975 | } | | 983 | } |
976 | | | 984 | |
977 | /*** Internal routines ***/ | | 985 | /*** Internal routines ***/ |
978 | | | 986 | |
979 | /* Dequeue all pipe operations, called with bus lock held. */ | | 987 | /* Dequeue all pipe operations, called with bus lock held. */ |
980 | Static void | | 988 | Static void |
981 | usbd_ar_pipe(struct usbd_pipe *pipe) | | 989 | usbd_ar_pipe(struct usbd_pipe *pipe) |
982 | { | | 990 | { |
983 | struct usbd_xfer *xfer; | | 991 | struct usbd_xfer *xfer; |
984 | | | 992 | |
985 | USBHIST_FUNC(); | | 993 | USBHIST_FUNC(); |
986 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0); | | 994 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0); |
987 | SDT_PROBE1(usb, device, pipe, abort__start, pipe); | | 995 | SDT_PROBE1(usb, device, pipe, abort__start, pipe); |
988 | | | 996 | |
989 | KASSERT(mutex_owned(pipe->up_dev->ud_bus->ub_lock)); | | 997 | KASSERT(mutex_owned(pipe->up_dev->ud_bus->ub_lock)); |
990 | | | 998 | |
991 | #ifdef USB_DEBUG | | 999 | #ifdef USB_DEBUG |
992 | if (usbdebug > 5) | | 1000 | if (usbdebug > 5) |
993 | usbd_dump_queue(pipe); | | 1001 | usbd_dump_queue(pipe); |
994 | #endif | | 1002 | #endif |
995 | pipe->up_repeat = 0; | | 1003 | pipe->up_repeat = 0; |
996 | pipe->up_running = 0; | | 1004 | pipe->up_running = 0; |
997 | pipe->up_aborting = 1; | | 1005 | pipe->up_aborting = 1; |
998 | while ((xfer = SIMPLEQ_FIRST(&pipe->up_queue)) != NULL) { | | 1006 | while ((xfer = SIMPLEQ_FIRST(&pipe->up_queue)) != NULL) { |
999 | USBHIST_LOG(usbdebug, "pipe = %#jx xfer = %#jx " | | 1007 | USBHIST_LOG(usbdebug, "pipe = %#jx xfer = %#jx " |
1000 | "(methods = %#jx)", (uintptr_t)pipe, (uintptr_t)xfer, | | 1008 | "(methods = %#jx)", (uintptr_t)pipe, (uintptr_t)xfer, |
1001 | (uintptr_t)pipe->up_methods, 0); | | 1009 | (uintptr_t)pipe->up_methods, 0); |
1002 | if (xfer->ux_status == USBD_NOT_STARTED) { | | 1010 | if (xfer->ux_status == USBD_NOT_STARTED) { |
1003 | SDT_PROBE1(usb, device, xfer, preabort, xfer); | | 1011 | SDT_PROBE1(usb, device, xfer, preabort, xfer); |
1004 | #ifdef DIAGNOSTIC | | 1012 | #ifdef DIAGNOSTIC |
1005 | xfer->ux_state = XFER_BUSY; | | 1013 | xfer->ux_state = XFER_BUSY; |
1006 | #endif | | 1014 | #endif |
1007 | SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next); | | 1015 | SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next); |
1008 | } else { | | 1016 | } else { |
1009 | /* Make the HC abort it (and invoke the callback). */ | | 1017 | /* Make the HC abort it (and invoke the callback). */ |
1010 | SDT_PROBE1(usb, device, xfer, abort, xfer); | | 1018 | SDT_PROBE1(usb, device, xfer, abort, xfer); |
1011 | pipe->up_methods->upm_abort(xfer); | | 1019 | pipe->up_methods->upm_abort(xfer); |
1012 | while (pipe->up_callingxfer == xfer) { | | 1020 | while (pipe->up_callingxfer == xfer) { |
1013 | USBHIST_LOG(usbdebug, "wait for callback" | | 1021 | USBHIST_LOG(usbdebug, "wait for callback" |
1014 | "pipe = %#jx xfer = %#jx", | | 1022 | "pipe = %#jx xfer = %#jx", |
1015 | (uintptr_t)pipe, (uintptr_t)xfer, 0, 0); | | 1023 | (uintptr_t)pipe, (uintptr_t)xfer, 0, 0); |
1016 | cv_wait(&pipe->up_callingcv, | | 1024 | cv_wait(&pipe->up_callingcv, |
1017 | pipe->up_dev->ud_bus->ub_lock); | | 1025 | pipe->up_dev->ud_bus->ub_lock); |
1018 | } | | 1026 | } |
1019 | /* XXX only for non-0 usbd_clear_endpoint_stall(pipe); */ | | 1027 | /* XXX only for non-0 usbd_clear_endpoint_stall(pipe); */ |
1020 | } | | 1028 | } |
1021 | } | | 1029 | } |
1022 | SDT_PROBE1(usb, device, pipe, abort__done, pipe); | | 1030 | SDT_PROBE1(usb, device, pipe, abort__done, pipe); |
1023 | } | | 1031 | } |
1024 | | | 1032 | |
1025 | /* Called with USB lock held. */ | | 1033 | /* Called with USB lock held. */ |
1026 | void | | 1034 | void |
1027 | usb_transfer_complete(struct usbd_xfer *xfer) | | 1035 | usb_transfer_complete(struct usbd_xfer *xfer) |
1028 | { | | 1036 | { |
1029 | struct usbd_pipe *pipe = xfer->ux_pipe; | | 1037 | struct usbd_pipe *pipe = xfer->ux_pipe; |
1030 | struct usbd_bus *bus = pipe->up_dev->ud_bus; | | 1038 | struct usbd_bus *bus = pipe->up_dev->ud_bus; |
1031 | int sync = xfer->ux_flags & USBD_SYNCHRONOUS; | | 1039 | int sync = xfer->ux_flags & USBD_SYNCHRONOUS; |
1032 | int erred; | | 1040 | int erred; |
1033 | int polling = bus->ub_usepolling; | | 1041 | int polling = bus->ub_usepolling; |
1034 | int repeat = pipe->up_repeat; | | 1042 | int repeat = pipe->up_repeat; |
1035 | | | 1043 | |
1036 | USBHIST_FUNC(); | | 1044 | USBHIST_FUNC(); |
1037 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx xfer = %#jx status = %jd " | | 1045 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx xfer = %#jx status = %jd " |
1038 | "actlen = %jd", (uintptr_t)pipe, (uintptr_t)xfer, xfer->ux_status, | | 1046 | "actlen = %jd", (uintptr_t)pipe, (uintptr_t)xfer, xfer->ux_status, |
1039 | xfer->ux_actlen); | | 1047 | xfer->ux_actlen); |
1040 | | | 1048 | |
1041 | KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock)); | | 1049 | KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock)); |
1042 | KASSERTMSG(xfer->ux_state == XFER_ONQU, "xfer %p state is %x", xfer, | | 1050 | KASSERTMSG(xfer->ux_state == XFER_ONQU, "xfer %p state is %x", xfer, |
1043 | xfer->ux_state); | | 1051 | xfer->ux_state); |
1044 | KASSERT(pipe != NULL); | | 1052 | KASSERT(pipe != NULL); |
1045 | | | 1053 | |
1046 | /* | | 1054 | /* |
1047 | * If device is known to miss out ack, then pretend that | | 1055 | * If device is known to miss out ack, then pretend that |
1048 | * output timeout is a success. Userland should handle | | 1056 | * output timeout is a success. Userland should handle |
1049 | * the logic to verify that the operation succeeded. | | 1057 | * the logic to verify that the operation succeeded. |
1050 | */ | | 1058 | */ |
1051 | if (pipe->up_dev->ud_quirks && | | 1059 | if (pipe->up_dev->ud_quirks && |
1052 | pipe->up_dev->ud_quirks->uq_flags & UQ_MISS_OUT_ACK && | | 1060 | pipe->up_dev->ud_quirks->uq_flags & UQ_MISS_OUT_ACK && |
1053 | xfer->ux_status == USBD_TIMEOUT && | | 1061 | xfer->ux_status == USBD_TIMEOUT && |
1054 | !usbd_xfer_isread(xfer)) { | | 1062 | !usbd_xfer_isread(xfer)) { |
1055 | USBHIST_LOG(usbdebug, "Possible output ack miss for xfer %#jx: " | | 1063 | USBHIST_LOG(usbdebug, "Possible output ack miss for xfer %#jx: " |
1056 | "hiding write timeout to %jd.%jd for %ju bytes written", | | 1064 | "hiding write timeout to %jd.%jd for %ju bytes written", |
1057 | (uintptr_t)xfer, curlwp->l_proc->p_pid, curlwp->l_lid, | | 1065 | (uintptr_t)xfer, curlwp->l_proc->p_pid, curlwp->l_lid, |
1058 | xfer->ux_length); | | 1066 | xfer->ux_length); |
1059 | | | 1067 | |
1060 | xfer->ux_status = USBD_NORMAL_COMPLETION; | | 1068 | xfer->ux_status = USBD_NORMAL_COMPLETION; |
1061 | xfer->ux_actlen = xfer->ux_length; | | 1069 | xfer->ux_actlen = xfer->ux_length; |
1062 | } | | 1070 | } |
1063 | | | 1071 | |
1064 | erred = xfer->ux_status == USBD_CANCELLED || | | 1072 | erred = xfer->ux_status == USBD_CANCELLED || |
1065 | xfer->ux_status == USBD_TIMEOUT; | | 1073 | xfer->ux_status == USBD_TIMEOUT; |
1066 | | | 1074 | |
1067 | if (!repeat) { | | 1075 | if (!repeat) { |
1068 | /* Remove request from queue. */ | | 1076 | /* Remove request from queue. */ |
1069 | | | 1077 | |
1070 | KASSERTMSG(!SIMPLEQ_EMPTY(&pipe->up_queue), | | 1078 | KASSERTMSG(!SIMPLEQ_EMPTY(&pipe->up_queue), |
1071 | "pipe %p is empty, but xfer %p wants to complete", pipe, | | 1079 | "pipe %p is empty, but xfer %p wants to complete", pipe, |
1072 | xfer); | | 1080 | xfer); |
1073 | KASSERTMSG(xfer == SIMPLEQ_FIRST(&pipe->up_queue), | | 1081 | KASSERTMSG(xfer == SIMPLEQ_FIRST(&pipe->up_queue), |
1074 | "xfer %p is not start of queue (%p is at start)", xfer, | | 1082 | "xfer %p is not start of queue (%p is at start)", xfer, |
1075 | SIMPLEQ_FIRST(&pipe->up_queue)); | | 1083 | SIMPLEQ_FIRST(&pipe->up_queue)); |
1076 | | | 1084 | |
1077 | #ifdef DIAGNOSTIC | | 1085 | #ifdef DIAGNOSTIC |
1078 | xfer->ux_state = XFER_BUSY; | | 1086 | xfer->ux_state = XFER_BUSY; |
1079 | #endif | | 1087 | #endif |
1080 | SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next); | | 1088 | SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next); |
1081 | } | | 1089 | } |
1082 | USBHIST_LOG(usbdebug, "xfer %#jx: repeat %jd new head = %#jx", | | 1090 | USBHIST_LOG(usbdebug, "xfer %#jx: repeat %jd new head = %#jx", |
1083 | (uintptr_t)xfer, repeat, (uintptr_t)SIMPLEQ_FIRST(&pipe->up_queue), | | 1091 | (uintptr_t)xfer, repeat, (uintptr_t)SIMPLEQ_FIRST(&pipe->up_queue), |
1084 | 0); | | 1092 | 0); |
1085 | | | 1093 | |
1086 | /* Count completed transfers. */ | | 1094 | /* Count completed transfers. */ |
1087 | ++pipe->up_dev->ud_bus->ub_stats.uds_requests | | 1095 | ++pipe->up_dev->ud_bus->ub_stats.uds_requests |
1088 | [pipe->up_endpoint->ue_edesc->bmAttributes & UE_XFERTYPE]; | | 1096 | [pipe->up_endpoint->ue_edesc->bmAttributes & UE_XFERTYPE]; |
1089 | | | 1097 | |
1090 | xfer->ux_done = 1; | | 1098 | xfer->ux_done = 1; |
1091 | if (!xfer->ux_status && xfer->ux_actlen < xfer->ux_length && | | 1099 | if (!xfer->ux_status && xfer->ux_actlen < xfer->ux_length && |
1092 | !(xfer->ux_flags & USBD_SHORT_XFER_OK)) { | | 1100 | !(xfer->ux_flags & USBD_SHORT_XFER_OK)) { |
1093 | USBHIST_LOG(usbdebug, "short transfer %jd < %jd", | | 1101 | USBHIST_LOG(usbdebug, "short transfer %jd < %jd", |
1094 | xfer->ux_actlen, xfer->ux_length, 0, 0); | | 1102 | xfer->ux_actlen, xfer->ux_length, 0, 0); |
1095 | xfer->ux_status = USBD_SHORT_XFER; | | 1103 | xfer->ux_status = USBD_SHORT_XFER; |
1096 | } | | 1104 | } |
1097 | | | 1105 | |
1098 | USBHIST_LOG(usbdebug, "xfer %#jx doing done %#jx", (uintptr_t)xfer, | | 1106 | USBHIST_LOG(usbdebug, "xfer %#jx doing done %#jx", (uintptr_t)xfer, |
1099 | (uintptr_t)pipe->up_methods->upm_done, 0, 0); | | 1107 | (uintptr_t)pipe->up_methods->upm_done, 0, 0); |
1100 | SDT_PROBE2(usb, device, xfer, done, xfer, xfer->ux_status); | | 1108 | SDT_PROBE2(usb, device, xfer, done, xfer, xfer->ux_status); |
1101 | pipe->up_methods->upm_done(xfer); | | 1109 | pipe->up_methods->upm_done(xfer); |
1102 | | | 1110 | |
1103 | if (xfer->ux_length != 0 && xfer->ux_buffer != xfer->ux_buf) { | | 1111 | if (xfer->ux_length != 0 && xfer->ux_buffer != xfer->ux_buf) { |
1104 | KDASSERTMSG(xfer->ux_actlen <= xfer->ux_length, | | 1112 | KDASSERTMSG(xfer->ux_actlen <= xfer->ux_length, |
1105 | "actlen %d length %d",xfer->ux_actlen, xfer->ux_length); | | 1113 | "actlen %d length %d",xfer->ux_actlen, xfer->ux_length); |
1106 | | | 1114 | |
1107 | /* Only if IN transfer */ | | 1115 | /* Only if IN transfer */ |
1108 | if (usbd_xfer_isread(xfer)) { | | 1116 | if (usbd_xfer_isread(xfer)) { |
1109 | memcpy(xfer->ux_buffer, xfer->ux_buf, xfer->ux_actlen); | | 1117 | memcpy(xfer->ux_buffer, xfer->ux_buf, xfer->ux_actlen); |
1110 | } | | 1118 | } |
1111 | } | | 1119 | } |
1112 | | | 1120 | |
1113 | USBHIST_LOG(usbdebug, "xfer %#jx doing callback %#jx status %jd", | | 1121 | USBHIST_LOG(usbdebug, "xfer %#jx doing callback %#jx status %jd", |
1114 | (uintptr_t)xfer, (uintptr_t)xfer->ux_callback, xfer->ux_status, 0); | | 1122 | (uintptr_t)xfer, (uintptr_t)xfer->ux_callback, xfer->ux_status, 0); |
1115 | | | 1123 | |
1116 | if (xfer->ux_callback) { | | 1124 | if (xfer->ux_callback) { |
1117 | if (!polling) { | | 1125 | if (!polling) { |
1118 | KASSERT(pipe->up_callingxfer == NULL); | | 1126 | KASSERT(pipe->up_callingxfer == NULL); |
1119 | pipe->up_callingxfer = xfer; | | 1127 | pipe->up_callingxfer = xfer; |
1120 | mutex_exit(pipe->up_dev->ud_bus->ub_lock); | | 1128 | mutex_exit(pipe->up_dev->ud_bus->ub_lock); |
1121 | if (!(pipe->up_flags & USBD_MPSAFE)) | | 1129 | if (!(pipe->up_flags & USBD_MPSAFE)) |
1122 | KERNEL_LOCK(1, curlwp); | | 1130 | KERNEL_LOCK(1, curlwp); |
1123 | } | | 1131 | } |
1124 | | | 1132 | |
1125 | xfer->ux_callback(xfer, xfer->ux_priv, xfer->ux_status); | | 1133 | xfer->ux_callback(xfer, xfer->ux_priv, xfer->ux_status); |
1126 | | | 1134 | |
1127 | if (!polling) { | | 1135 | if (!polling) { |
1128 | if (!(pipe->up_flags & USBD_MPSAFE)) | | 1136 | if (!(pipe->up_flags & USBD_MPSAFE)) |
1129 | KERNEL_UNLOCK_ONE(curlwp); | | 1137 | KERNEL_UNLOCK_ONE(curlwp); |
1130 | mutex_enter(pipe->up_dev->ud_bus->ub_lock); | | 1138 | mutex_enter(pipe->up_dev->ud_bus->ub_lock); |
1131 | KASSERT(pipe->up_callingxfer == xfer); | | 1139 | KASSERT(pipe->up_callingxfer == xfer); |
1132 | pipe->up_callingxfer = NULL; | | 1140 | pipe->up_callingxfer = NULL; |
1133 | cv_broadcast(&pipe->up_callingcv); | | 1141 | cv_broadcast(&pipe->up_callingcv); |
1134 | } | | 1142 | } |
1135 | } | | 1143 | } |
1136 | | | 1144 | |
1137 | if (sync && !polling) { | | 1145 | if (sync && !polling) { |
1138 | USBHIST_LOG(usbdebug, "<- done xfer %#jx, wakeup", | | 1146 | USBHIST_LOG(usbdebug, "<- done xfer %#jx, wakeup", |
1139 | (uintptr_t)xfer, 0, 0, 0); | | 1147 | (uintptr_t)xfer, 0, 0, 0); |
1140 | cv_broadcast(&xfer->ux_cv); | | 1148 | cv_broadcast(&xfer->ux_cv); |
1141 | } | | 1149 | } |
1142 | | | 1150 | |
1143 | if (repeat) { | | 1151 | if (repeat) { |
1144 | xfer->ux_actlen = 0; | | 1152 | xfer->ux_actlen = 0; |
1145 | xfer->ux_status = USBD_NOT_STARTED; | | 1153 | xfer->ux_status = USBD_NOT_STARTED; |
1146 | } else { | | 1154 | } else { |
1147 | /* XXX should we stop the queue on all errors? */ | | 1155 | /* XXX should we stop the queue on all errors? */ |
1148 | if (erred && pipe->up_iface != NULL) /* not control pipe */ | | 1156 | if (erred && pipe->up_iface != NULL) /* not control pipe */ |
1149 | pipe->up_running = 0; | | 1157 | pipe->up_running = 0; |
1150 | } | | 1158 | } |
1151 | if (pipe->up_running && pipe->up_serialise) | | 1159 | if (pipe->up_running && pipe->up_serialise) |
1152 | usbd_start_next(pipe); | | 1160 | usbd_start_next(pipe); |
1153 | } | | 1161 | } |
1154 | | | 1162 | |
1155 | /* Called with USB lock held. */ | | 1163 | /* Called with USB lock held. */ |
1156 | static usbd_status | | | |
1157 | usb_insert_transfer(struct usbd_xfer *xfer) | | | |
1158 | { | | | |
1159 | struct usbd_pipe *pipe = xfer->ux_pipe; | | | |
1160 | usbd_status err; | | | |
1161 | | | | |
1162 | USBHIST_FUNC(); USBHIST_CALLARGS(usbdebug, | | | |
1163 | "xfer = %#jx pipe = %#jx running = %jd timeout = %jd", | | | |
1164 | (uintptr_t)xfer, (uintptr_t)pipe, | | | |
1165 | pipe->up_running, xfer->ux_timeout); | | | |
1166 | | | | |
1167 | KASSERT(mutex_owned(pipe->up_dev->ud_bus->ub_lock)); | | | |
1168 | KASSERTMSG(xfer->ux_state == XFER_BUSY, "xfer %p state is %x", xfer, | | | |
1169 | xfer->ux_state); | | | |
1170 | | | | |
1171 | #ifdef DIAGNOSTIC | | | |
1172 | xfer->ux_state = XFER_ONQU; | | | |
1173 | #endif | | | |
1174 | SIMPLEQ_INSERT_TAIL(&pipe->up_queue, xfer, ux_next); | | | |
1175 | if (pipe->up_running && pipe->up_serialise) | | | |
1176 | err = USBD_IN_PROGRESS; | | | |
1177 | else { | | | |
1178 | pipe->up_running = 1; | | | |
1179 | err = USBD_NORMAL_COMPLETION; | | | |
1180 | } | | | |
1181 | USBHIST_LOG(usbdebug, "<- done xfer %#jx, err %jd", (uintptr_t)xfer, | | | |
1182 | err, 0, 0); | | | |
1183 | return err; | | | |
1184 | } | | | |
1185 | | | | |
1186 | /* Called with USB lock held. */ | | | |
1187 | void | | 1164 | void |
1188 | usbd_start_next(struct usbd_pipe *pipe) | | 1165 | usbd_start_next(struct usbd_pipe *pipe) |
1189 | { | | 1166 | { |
1190 | struct usbd_xfer *xfer; | | 1167 | struct usbd_xfer *xfer; |
1191 | usbd_status err; | | 1168 | usbd_status err; |
1192 | | | 1169 | |
1193 | USBHIST_FUNC(); | | 1170 | USBHIST_FUNC(); |
1194 | | | 1171 | |
1195 | KASSERT(pipe != NULL); | | 1172 | KASSERT(pipe != NULL); |
1196 | KASSERT(pipe->up_methods != NULL); | | 1173 | KASSERT(pipe->up_methods != NULL); |
1197 | KASSERT(pipe->up_methods->upm_start != NULL); | | 1174 | KASSERT(pipe->up_methods->upm_start != NULL); |
1198 | KASSERT(pipe->up_serialise == true); | | 1175 | KASSERT(pipe->up_serialise == true); |
1199 | | | 1176 | |
1200 | int polling = pipe->up_dev->ud_bus->ub_usepolling; | | 1177 | int polling = pipe->up_dev->ud_bus->ub_usepolling; |
1201 | KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock)); | | 1178 | KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock)); |
1202 | | | 1179 | |
1203 | /* Get next request in queue. */ | | 1180 | /* Get next request in queue. */ |
1204 | xfer = SIMPLEQ_FIRST(&pipe->up_queue); | | 1181 | xfer = SIMPLEQ_FIRST(&pipe->up_queue); |
1205 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx, xfer = %#jx", (uintptr_t)pipe, | | 1182 | USBHIST_CALLARGS(usbdebug, "pipe = %#jx, xfer = %#jx", (uintptr_t)pipe, |
1206 | (uintptr_t)xfer, 0, 0); | | 1183 | (uintptr_t)xfer, 0, 0); |
1207 | if (xfer == NULL) { | | 1184 | if (xfer == NULL) { |
1208 | pipe->up_running = 0; | | 1185 | pipe->up_running = 0; |
1209 | } else { | | 1186 | } else { |
1210 | if (!polling) | | 1187 | if (!polling) |
1211 | mutex_exit(pipe->up_dev->ud_bus->ub_lock); | | 1188 | mutex_exit(pipe->up_dev->ud_bus->ub_lock); |
1212 | SDT_PROBE2(usb, device, pipe, start, pipe, xfer); | | 1189 | SDT_PROBE2(usb, device, pipe, start, pipe, xfer); |
1213 | err = pipe->up_methods->upm_start(xfer); | | 1190 | err = pipe->up_methods->upm_start(xfer); |
1214 | if (!polling) | | 1191 | if (!polling) |
1215 | mutex_enter(pipe->up_dev->ud_bus->ub_lock); | | 1192 | mutex_enter(pipe->up_dev->ud_bus->ub_lock); |
1216 | | | 1193 | |
1217 | if (err != USBD_IN_PROGRESS) { | | 1194 | if (err != USBD_IN_PROGRESS) { |
1218 | USBHIST_LOG(usbdebug, "error = %jd", err, 0, 0, 0); | | 1195 | USBHIST_LOG(usbdebug, "error = %jd", err, 0, 0, 0); |
1219 | pipe->up_running = 0; | | 1196 | pipe->up_running = 0; |
1220 | /* XXX do what? */ | | 1197 | /* XXX do what? */ |
1221 | } | | 1198 | } |
1222 | } | | 1199 | } |
1223 | | | 1200 | |
1224 | KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock)); | | 1201 | KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock)); |
1225 | } | | 1202 | } |
1226 | | | 1203 | |
1227 | usbd_status | | 1204 | usbd_status |
1228 | usbd_do_request(struct usbd_device *dev, usb_device_request_t *req, void *data) | | 1205 | usbd_do_request(struct usbd_device *dev, usb_device_request_t *req, void *data) |
1229 | { | | 1206 | { |
1230 | | | 1207 | |
1231 | return usbd_do_request_flags(dev, req, data, 0, 0, | | 1208 | return usbd_do_request_flags(dev, req, data, 0, 0, |
1232 | USBD_DEFAULT_TIMEOUT); | | 1209 | USBD_DEFAULT_TIMEOUT); |
1233 | } | | 1210 | } |
1234 | | | 1211 | |
1235 | usbd_status | | 1212 | usbd_status |
1236 | usbd_do_request_flags(struct usbd_device *dev, usb_device_request_t *req, | | 1213 | usbd_do_request_flags(struct usbd_device *dev, usb_device_request_t *req, |
1237 | void *data, uint16_t flags, int *actlen, uint32_t timeout) | | 1214 | void *data, uint16_t flags, int *actlen, uint32_t timeout) |
1238 | { | | 1215 | { |
1239 | size_t len = UGETW(req->wLength); | | 1216 | size_t len = UGETW(req->wLength); |
1240 | | | 1217 | |
1241 | return usbd_do_request_len(dev, req, len, data, flags, actlen, timeout); | | 1218 | return usbd_do_request_len(dev, req, len, data, flags, actlen, timeout); |
1242 | } | | 1219 | } |
1243 | | | 1220 | |
1244 | usbd_status | | 1221 | usbd_status |
1245 | usbd_do_request_len(struct usbd_device *dev, usb_device_request_t *req, | | 1222 | usbd_do_request_len(struct usbd_device *dev, usb_device_request_t *req, |
1246 | size_t len, void *data, uint16_t flags, int *actlen, uint32_t timeout) | | 1223 | size_t len, void *data, uint16_t flags, int *actlen, uint32_t timeout) |
1247 | { | | 1224 | { |
1248 | struct usbd_xfer *xfer; | | 1225 | struct usbd_xfer *xfer; |
1249 | usbd_status err; | | 1226 | usbd_status err; |
1250 | | | 1227 | |
1251 | KASSERT(len >= UGETW(req->wLength)); | | 1228 | KASSERT(len >= UGETW(req->wLength)); |
1252 | | | 1229 | |
1253 | USBHIST_FUNC(); | | 1230 | USBHIST_FUNC(); |
1254 | USBHIST_CALLARGS(usbdebug, "dev=%#jx req=%jx flags=%jx len=%jx", | | 1231 | USBHIST_CALLARGS(usbdebug, "dev=%#jx req=%jx flags=%jx len=%jx", |
1255 | (uintptr_t)dev, (uintptr_t)req, flags, len); | | 1232 | (uintptr_t)dev, (uintptr_t)req, flags, len); |
1256 | | | 1233 | |
1257 | ASSERT_SLEEPABLE(); | | 1234 | ASSERT_SLEEPABLE(); |
1258 | | | 1235 | |
1259 | int error = usbd_create_xfer(dev->ud_pipe0, len, 0, 0, &xfer); | | 1236 | int error = usbd_create_xfer(dev->ud_pipe0, len, 0, 0, &xfer); |
1260 | if (error) | | 1237 | if (error) |
1261 | return USBD_NOMEM; | | 1238 | return USBD_NOMEM; |
1262 | | | 1239 | |
1263 | usbd_setup_default_xfer(xfer, dev, 0, timeout, req, data, | | 1240 | usbd_setup_default_xfer(xfer, dev, 0, timeout, req, data, |
1264 | UGETW(req->wLength), flags, NULL); | | 1241 | UGETW(req->wLength), flags, NULL); |
1265 | KASSERT(xfer->ux_pipe == dev->ud_pipe0); | | 1242 | KASSERT(xfer->ux_pipe == dev->ud_pipe0); |
1266 | err = usbd_sync_transfer(xfer); | | 1243 | err = usbd_sync_transfer(xfer); |
1267 | #if defined(USB_DEBUG) || defined(DIAGNOSTIC) | | 1244 | #if defined(USB_DEBUG) || defined(DIAGNOSTIC) |
1268 | if (xfer->ux_actlen > xfer->ux_length) { | | 1245 | if (xfer->ux_actlen > xfer->ux_length) { |
1269 | USBHIST_LOG(usbdebug, "overrun addr = %jd type = 0x%02jx", | | 1246 | USBHIST_LOG(usbdebug, "overrun addr = %jd type = 0x%02jx", |
1270 | dev->ud_addr, xfer->ux_request.bmRequestType, 0, 0); | | 1247 | dev->ud_addr, xfer->ux_request.bmRequestType, 0, 0); |
1271 | USBHIST_LOG(usbdebug, " req = 0x%02jx val = %jd " | | 1248 | USBHIST_LOG(usbdebug, " req = 0x%02jx val = %jd " |
1272 | "index = %jd", | | 1249 | "index = %jd", |
1273 | xfer->ux_request.bRequest, UGETW(xfer->ux_request.wValue), | | 1250 | xfer->ux_request.bRequest, UGETW(xfer->ux_request.wValue), |
1274 | UGETW(xfer->ux_request.wIndex), 0); | | 1251 | UGETW(xfer->ux_request.wIndex), 0); |
1275 | USBHIST_LOG(usbdebug, " rlen = %jd length = %jd " | | 1252 | USBHIST_LOG(usbdebug, " rlen = %jd length = %jd " |
1276 | "actlen = %jd", | | 1253 | "actlen = %jd", |
1277 | UGETW(xfer->ux_request.wLength), | | 1254 | UGETW(xfer->ux_request.wLength), |
1278 | xfer->ux_length, xfer->ux_actlen, 0); | | 1255 | xfer->ux_length, xfer->ux_actlen, 0); |
1279 | } | | 1256 | } |
1280 | #endif | | 1257 | #endif |
1281 | if (actlen != NULL) | | 1258 | if (actlen != NULL) |
1282 | *actlen = xfer->ux_actlen; | | 1259 | *actlen = xfer->ux_actlen; |
1283 | | | 1260 | |
1284 | usbd_destroy_xfer(xfer); | | 1261 | usbd_destroy_xfer(xfer); |
1285 | | | 1262 | |
1286 | if (err) { | | 1263 | if (err) { |
1287 | USBHIST_LOG(usbdebug, "returning err = %jd", err, 0, 0, 0); | | 1264 | USBHIST_LOG(usbdebug, "returning err = %jd", err, 0, 0, 0); |
1288 | } | | 1265 | } |
1289 | return err; | | 1266 | return err; |
1290 | } | | 1267 | } |
1291 | | | 1268 | |
1292 | static void | | 1269 | static void |
1293 | usbd_request_async_cb(struct usbd_xfer *xfer, void *priv, usbd_status status) | | 1270 | usbd_request_async_cb(struct usbd_xfer *xfer, void *priv, usbd_status status) |
1294 | { | | 1271 | { |
1295 | usbd_destroy_xfer(xfer); | | 1272 | usbd_destroy_xfer(xfer); |
1296 | } | | 1273 | } |
1297 | | | 1274 | |
1298 | /* | | 1275 | /* |
1299 | * Execute a request without waiting for completion. | | 1276 | * Execute a request without waiting for completion. |
1300 | * Can be used from interrupt context. | | 1277 | * Can be used from interrupt context. |
1301 | */ | | 1278 | */ |
1302 | usbd_status | | 1279 | usbd_status |
1303 | usbd_request_async(struct usbd_device *dev, struct usbd_xfer *xfer, | | 1280 | usbd_request_async(struct usbd_device *dev, struct usbd_xfer *xfer, |
1304 | usb_device_request_t *req, void *priv, usbd_callback callback) | | 1281 | usb_device_request_t *req, void *priv, usbd_callback callback) |
1305 | { | | 1282 | { |
1306 | usbd_status err; | | 1283 | usbd_status err; |
1307 | | | 1284 | |
1308 | if (callback == NULL) | | 1285 | if (callback == NULL) |
1309 | callback = usbd_request_async_cb; | | 1286 | callback = usbd_request_async_cb; |
1310 | | | 1287 | |
1311 | usbd_setup_default_xfer(xfer, dev, priv, | | 1288 | usbd_setup_default_xfer(xfer, dev, priv, |
1312 | USBD_DEFAULT_TIMEOUT, req, NULL, UGETW(req->wLength), 0, | | 1289 | USBD_DEFAULT_TIMEOUT, req, NULL, UGETW(req->wLength), 0, |
1313 | callback); | | 1290 | callback); |
1314 | err = usbd_transfer(xfer); | | 1291 | err = usbd_transfer(xfer); |
1315 | if (err != USBD_IN_PROGRESS) { | | 1292 | if (err != USBD_IN_PROGRESS) { |
1316 | usbd_destroy_xfer(xfer); | | 1293 | usbd_destroy_xfer(xfer); |
1317 | return (err); | | 1294 | return (err); |
1318 | } | | 1295 | } |
1319 | return (USBD_NORMAL_COMPLETION); | | 1296 | return (USBD_NORMAL_COMPLETION); |
1320 | } | | 1297 | } |
1321 | | | 1298 | |
1322 | const struct usbd_quirks * | | 1299 | const struct usbd_quirks * |
1323 | usbd_get_quirks(struct usbd_device *dev) | | 1300 | usbd_get_quirks(struct usbd_device *dev) |
1324 | { | | 1301 | { |
1325 | #ifdef DIAGNOSTIC | | 1302 | #ifdef DIAGNOSTIC |
1326 | if (dev == NULL) { | | 1303 | if (dev == NULL) { |
1327 | printf("usbd_get_quirks: dev == NULL\n"); | | 1304 | printf("usbd_get_quirks: dev == NULL\n"); |
1328 | return 0; | | 1305 | return 0; |
1329 | } | | 1306 | } |
1330 | #endif | | 1307 | #endif |
1331 | return dev->ud_quirks; | | 1308 | return dev->ud_quirks; |
1332 | } | | 1309 | } |
1333 | | | 1310 | |
1334 | /* XXX do periodic free() of free list */ | | 1311 | /* XXX do periodic free() of free list */ |
1335 | | | 1312 | |
1336 | /* | | 1313 | /* |
1337 | * Called from keyboard driver when in polling mode. | | 1314 | * Called from keyboard driver when in polling mode. |
1338 | */ | | 1315 | */ |
1339 | void | | 1316 | void |
1340 | usbd_dopoll(struct usbd_interface *iface) | | 1317 | usbd_dopoll(struct usbd_interface *iface) |
1341 | { | | 1318 | { |
1342 | iface->ui_dev->ud_bus->ub_methods->ubm_dopoll(iface->ui_dev->ud_bus); | | 1319 | iface->ui_dev->ud_bus->ub_methods->ubm_dopoll(iface->ui_dev->ud_bus); |
1343 | } | | 1320 | } |
1344 | | | 1321 | |
1345 | /* | | 1322 | /* |
1346 | * This is for keyboard driver as well, which only operates in polling | | 1323 | * This is for keyboard driver as well, which only operates in polling |
1347 | * mode from the ask root, etc., prompt and from DDB. | | 1324 | * mode from the ask root, etc., prompt and from DDB. |
1348 | */ | | 1325 | */ |
1349 | void | | 1326 | void |
1350 | usbd_set_polling(struct usbd_device *dev, int on) | | 1327 | usbd_set_polling(struct usbd_device *dev, int on) |
1351 | { | | 1328 | { |
1352 | if (on) | | 1329 | if (on) |
1353 | dev->ud_bus->ub_usepolling++; | | 1330 | dev->ud_bus->ub_usepolling++; |
1354 | else | | 1331 | else |
1355 | dev->ud_bus->ub_usepolling--; | | 1332 | dev->ud_bus->ub_usepolling--; |
1356 | | | 1333 | |
1357 | /* Kick the host controller when switching modes */ | | 1334 | /* Kick the host controller when switching modes */ |
1358 | mutex_enter(dev->ud_bus->ub_lock); | | 1335 | mutex_enter(dev->ud_bus->ub_lock); |
1359 | dev->ud_bus->ub_methods->ubm_softint(dev->ud_bus); | | 1336 | dev->ud_bus->ub_methods->ubm_softint(dev->ud_bus); |
1360 | mutex_exit(dev->ud_bus->ub_lock); | | 1337 | mutex_exit(dev->ud_bus->ub_lock); |
1361 | } | | 1338 | } |
1362 | | | 1339 | |
1363 | | | 1340 | |
1364 | usb_endpoint_descriptor_t * | | 1341 | usb_endpoint_descriptor_t * |
1365 | usbd_get_endpoint_descriptor(struct usbd_interface *iface, uint8_t address) | | 1342 | usbd_get_endpoint_descriptor(struct usbd_interface *iface, uint8_t address) |
1366 | { | | 1343 | { |
1367 | struct usbd_endpoint *ep; | | 1344 | struct usbd_endpoint *ep; |
1368 | int i; | | 1345 | int i; |
1369 | | | 1346 | |
1370 | for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) { | | 1347 | for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) { |
1371 | ep = &iface->ui_endpoints[i]; | | 1348 | ep = &iface->ui_endpoints[i]; |
1372 | if (ep->ue_edesc->bEndpointAddress == address) | | 1349 | if (ep->ue_edesc->bEndpointAddress == address) |
1373 | return iface->ui_endpoints[i].ue_edesc; | | 1350 | return iface->ui_endpoints[i].ue_edesc; |
1374 | } | | 1351 | } |
1375 | return NULL; | | 1352 | return NULL; |
1376 | } | | 1353 | } |
1377 | | | 1354 | |
1378 | /* | | 1355 | /* |
1379 | * usbd_ratecheck() can limit the number of error messages that occurs. | | 1356 | * usbd_ratecheck() can limit the number of error messages that occurs. |
1380 | * When a device is unplugged it may take up to 0.25s for the hub driver | | 1357 | * When a device is unplugged it may take up to 0.25s for the hub driver |
1381 | * to notice it. If the driver continuously tries to do I/O operations | | 1358 | * to notice it. If the driver continuously tries to do I/O operations |
1382 | * this can generate a large number of messages. | | 1359 | * this can generate a large number of messages. |
1383 | */ | | 1360 | */ |
1384 | int | | 1361 | int |
1385 | usbd_ratecheck(struct timeval *last) | | 1362 | usbd_ratecheck(struct timeval *last) |
1386 | { | | 1363 | { |
1387 | static struct timeval errinterval = { 0, 250000 }; /* 0.25 s*/ | | 1364 | static struct timeval errinterval = { 0, 250000 }; /* 0.25 s*/ |
1388 | | | 1365 | |
1389 | return ratecheck(last, &errinterval); | | 1366 | return ratecheck(last, &errinterval); |
1390 | } | | 1367 | } |
1391 | | | 1368 | |
1392 | /* | | 1369 | /* |
1393 | * Search for a vendor/product pair in an array. The item size is | | 1370 | * Search for a vendor/product pair in an array. The item size is |
1394 | * given as an argument. | | 1371 | * given as an argument. |
1395 | */ | | 1372 | */ |
1396 | const struct usb_devno * | | 1373 | const struct usb_devno * |
1397 | usb_match_device(const struct usb_devno *tbl, u_int nentries, u_int sz, | | 1374 | usb_match_device(const struct usb_devno *tbl, u_int nentries, u_int sz, |
1398 | uint16_t vendor, uint16_t product) | | 1375 | uint16_t vendor, uint16_t product) |
1399 | { | | 1376 | { |
1400 | while (nentries-- > 0) { | | 1377 | while (nentries-- > 0) { |
1401 | uint16_t tproduct = tbl->ud_product; | | 1378 | uint16_t tproduct = tbl->ud_product; |
1402 | if (tbl->ud_vendor == vendor && | | 1379 | if (tbl->ud_vendor == vendor && |
1403 | (tproduct == product || tproduct == USB_PRODUCT_ANY)) | | 1380 | (tproduct == product || tproduct == USB_PRODUCT_ANY)) |
1404 | return tbl; | | 1381 | return tbl; |
1405 | tbl = (const struct usb_devno *)((const char *)tbl + sz); | | 1382 | tbl = (const struct usb_devno *)((const char *)tbl + sz); |
1406 | } | | 1383 | } |
1407 | return NULL; | | 1384 | return NULL; |
1408 | } | | 1385 | } |
1409 | | | 1386 | |
1410 | usbd_status | | 1387 | usbd_status |
1411 | usbd_get_string(struct usbd_device *dev, int si, char *buf) | | 1388 | usbd_get_string(struct usbd_device *dev, int si, char *buf) |
1412 | { | | 1389 | { |
1413 | return usbd_get_string0(dev, si, buf, 1); | | 1390 | return usbd_get_string0(dev, si, buf, 1); |
1414 | } | | 1391 | } |
1415 | | | 1392 | |
1416 | usbd_status | | 1393 | usbd_status |
1417 | usbd_get_string0(struct usbd_device *dev, int si, char *buf, int unicode) | | 1394 | usbd_get_string0(struct usbd_device *dev, int si, char *buf, int unicode) |
1418 | { | | 1395 | { |
1419 | int swap = dev->ud_quirks->uq_flags & UQ_SWAP_UNICODE; | | 1396 | int swap = dev->ud_quirks->uq_flags & UQ_SWAP_UNICODE; |
1420 | usb_string_descriptor_t us; | | 1397 | usb_string_descriptor_t us; |
1421 | char *s; | | 1398 | char *s; |
1422 | int i, n; | | 1399 | int i, n; |
1423 | uint16_t c; | | 1400 | uint16_t c; |
1424 | usbd_status err; | | 1401 | usbd_status err; |
1425 | int size; | | 1402 | int size; |
1426 | | | 1403 | |
1427 | USBHIST_FUNC(); USBHIST_CALLED(usbdebug); | | 1404 | USBHIST_FUNC(); USBHIST_CALLED(usbdebug); |
1428 | | | 1405 | |
1429 | buf[0] = '\0'; | | 1406 | buf[0] = '\0'; |
1430 | if (si == 0) | | 1407 | if (si == 0) |
1431 | return USBD_INVAL; | | 1408 | return USBD_INVAL; |
1432 | if (dev->ud_quirks->uq_flags & UQ_NO_STRINGS) | | 1409 | if (dev->ud_quirks->uq_flags & UQ_NO_STRINGS) |
1433 | return USBD_STALLED; | | 1410 | return USBD_STALLED; |
1434 | if (dev->ud_langid == USBD_NOLANG) { | | 1411 | if (dev->ud_langid == USBD_NOLANG) { |
1435 | /* Set up default language */ | | 1412 | /* Set up default language */ |
1436 | err = usbd_get_string_desc(dev, USB_LANGUAGE_TABLE, 0, &us, | | 1413 | err = usbd_get_string_desc(dev, USB_LANGUAGE_TABLE, 0, &us, |
1437 | &size); | | 1414 | &size); |
1438 | if (err || size < 4) { | | 1415 | if (err || size < 4) { |
1439 | USBHIST_LOG(usbdebug, "getting lang failed, using 0", | | 1416 | USBHIST_LOG(usbdebug, "getting lang failed, using 0", |
1440 | 0, 0, 0, 0); | | 1417 | 0, 0, 0, 0); |
1441 | dev->ud_langid = 0; /* Well, just pick something then */ | | 1418 | dev->ud_langid = 0; /* Well, just pick something then */ |
1442 | } else { | | 1419 | } else { |
1443 | /* Pick the first language as the default. */ | | 1420 | /* Pick the first language as the default. */ |
1444 | dev->ud_langid = UGETW(us.bString[0]); | | 1421 | dev->ud_langid = UGETW(us.bString[0]); |
1445 | } | | 1422 | } |
1446 | } | | 1423 | } |
1447 | err = usbd_get_string_desc(dev, si, dev->ud_langid, &us, &size); | | 1424 | err = usbd_get_string_desc(dev, si, dev->ud_langid, &us, &size); |
1448 | if (err) | | 1425 | if (err) |
1449 | return err; | | 1426 | return err; |
1450 | s = buf; | | 1427 | s = buf; |
1451 | n = size / 2 - 1; | | 1428 | n = size / 2 - 1; |
1452 | if (unicode) { | | 1429 | if (unicode) { |
1453 | for (i = 0; i < n; i++) { | | 1430 | for (i = 0; i < n; i++) { |
1454 | c = UGETW(us.bString[i]); | | 1431 | c = UGETW(us.bString[i]); |
1455 | if (swap) | | 1432 | if (swap) |
1456 | c = (c >> 8) | (c << 8); | | 1433 | c = (c >> 8) | (c << 8); |
1457 | s += wput_utf8(s, 3, c); | | 1434 | s += wput_utf8(s, 3, c); |
1458 | } | | 1435 | } |
1459 | *s++ = 0; | | 1436 | *s++ = 0; |
1460 | } | | 1437 | } |
1461 | #ifdef COMPAT_30 | | 1438 | #ifdef COMPAT_30 |
1462 | else { | | 1439 | else { |
1463 | for (i = 0; i < n; i++) { | | 1440 | for (i = 0; i < n; i++) { |
1464 | c = UGETW(us.bString[i]); | | 1441 | c = UGETW(us.bString[i]); |
1465 | if (swap) | | 1442 | if (swap) |
1466 | c = (c >> 8) | (c << 8); | | 1443 | c = (c >> 8) | (c << 8); |
1467 | *s++ = (c < 0x80) ? c : '?'; | | 1444 | *s++ = (c < 0x80) ? c : '?'; |
1468 | } | | 1445 | } |
1469 | *s++ = 0; | | 1446 | *s++ = 0; |
1470 | } | | 1447 | } |
1471 | #endif | | 1448 | #endif |
1472 | return USBD_NORMAL_COMPLETION; | | 1449 | return USBD_NORMAL_COMPLETION; |
1473 | } | | 1450 | } |
1474 | | | 1451 | |
1475 | /* | | 1452 | /* |
1476 | * usbd_xfer_trycomplete(xfer) | | 1453 | * usbd_xfer_trycomplete(xfer) |
1477 | * | | 1454 | * |
1478 | * Try to claim xfer for completion. Return true if successful, | | 1455 | * Try to claim xfer for completion. Return true if successful, |
1479 | * false if the xfer has been synchronously aborted or has timed | | 1456 | * false if the xfer has been synchronously aborted or has timed |
1480 | * out. | | 1457 | * out. |
1481 | * | | 1458 | * |
1482 | * If this returns true, caller is responsible for setting | | 1459 | * If this returns true, caller is responsible for setting |
1483 | * xfer->ux_status and calling usb_transfer_complete. To be used | | 1460 | * xfer->ux_status and calling usb_transfer_complete. To be used |
1484 | * in a host controller interrupt handler. | | 1461 | * in a host controller interrupt handler. |
1485 | * | | 1462 | * |
1486 | * Caller must either hold the bus lock or have the bus in polling | | 1463 | * Caller must either hold the bus lock or have the bus in polling |
1487 | * mode. | | 1464 | * mode. |
1488 | */ | | 1465 | */ |
1489 | bool | | 1466 | bool |
1490 | usbd_xfer_trycomplete(struct usbd_xfer *xfer) | | 1467 | usbd_xfer_trycomplete(struct usbd_xfer *xfer) |
1491 | { | | 1468 | { |
1492 | struct usbd_bus *bus __diagused = xfer->ux_bus; | | 1469 | struct usbd_bus *bus __diagused = xfer->ux_bus; |
1493 | | | 1470 | |
1494 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); | | 1471 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); |
1495 | | | 1472 | |
1496 | /* | | 1473 | /* |
1497 | * If software has completed it, either by synchronous abort or | | 1474 | * If software has completed it, either by synchronous abort or |
1498 | * by timeout, too late. | | 1475 | * by timeout, too late. |
1499 | */ | | 1476 | */ |
1500 | if (xfer->ux_status != USBD_IN_PROGRESS) | | 1477 | if (xfer->ux_status != USBD_IN_PROGRESS) |
1501 | return false; | | 1478 | return false; |
1502 | | | 1479 | |
1503 | /* | | 1480 | /* |
1504 | * We are completing the xfer. Cancel the timeout if we can, | | 1481 | * We are completing the xfer. Cancel the timeout if we can, |
1505 | * but only asynchronously. See usbd_xfer_cancel_timeout_async | | 1482 | * but only asynchronously. See usbd_xfer_cancel_timeout_async |
1506 | * for why we need not wait for the callout or task here. | | 1483 | * for why we need not wait for the callout or task here. |
1507 | */ | | 1484 | */ |
1508 | usbd_xfer_cancel_timeout_async(xfer); | | 1485 | usbd_xfer_cancel_timeout_async(xfer); |
1509 | | | 1486 | |
1510 | /* Success! Note: Caller must set xfer->ux_status afterwar. */ | | 1487 | /* Success! Note: Caller must set xfer->ux_status afterwar. */ |
1511 | return true; | | 1488 | return true; |
1512 | } | | 1489 | } |
1513 | | | 1490 | |
1514 | /* | | 1491 | /* |
1515 | * usbd_xfer_abort(xfer) | | 1492 | * usbd_xfer_abort(xfer) |
1516 | * | | 1493 | * |
1517 | * Try to claim xfer to abort. If successful, mark it completed | | 1494 | * Try to claim xfer to abort. If successful, mark it completed |
1518 | * with USBD_CANCELLED and call the bus-specific method to abort | | 1495 | * with USBD_CANCELLED and call the bus-specific method to abort |
1519 | * at the hardware level. | | 1496 | * at the hardware level. |
1520 | * | | 1497 | * |
1521 | * To be called in thread context from struct | | 1498 | * To be called in thread context from struct |
1522 | * usbd_pipe_methods::upm_abort. | | 1499 | * usbd_pipe_methods::upm_abort. |
1523 | * | | 1500 | * |
1524 | * Caller must hold the bus lock. | | 1501 | * Caller must hold the bus lock. |
1525 | */ | | 1502 | */ |
1526 | void | | 1503 | void |
1527 | usbd_xfer_abort(struct usbd_xfer *xfer) | | 1504 | usbd_xfer_abort(struct usbd_xfer *xfer) |
1528 | { | | 1505 | { |
1529 | struct usbd_bus *bus = xfer->ux_bus; | | 1506 | struct usbd_bus *bus = xfer->ux_bus; |
1530 | | | 1507 | |
1531 | KASSERT(mutex_owned(bus->ub_lock)); | | 1508 | KASSERT(mutex_owned(bus->ub_lock)); |
1532 | | | 1509 | |
1533 | /* | | 1510 | /* |
1534 | * If host controller interrupt or timer interrupt has | | 1511 | * If host controller interrupt or timer interrupt has |
1535 | * completed it, too late. But the xfer cannot be | | 1512 | * completed it, too late. But the xfer cannot be |
1536 | * cancelled already -- only one caller can synchronously | | 1513 | * cancelled already -- only one caller can synchronously |
1537 | * abort. | | 1514 | * abort. |
1538 | */ | | 1515 | */ |
1539 | KASSERT(xfer->ux_status != USBD_CANCELLED); | | 1516 | KASSERT(xfer->ux_status != USBD_CANCELLED); |
1540 | if (xfer->ux_status != USBD_IN_PROGRESS) | | 1517 | if (xfer->ux_status != USBD_IN_PROGRESS) |
1541 | return; | | 1518 | return; |
1542 | | | 1519 | |
1543 | /* | | 1520 | /* |
1544 | * Cancel the timeout if we can, but only asynchronously; see | | 1521 | * Cancel the timeout if we can, but only asynchronously; see |
1545 | * usbd_xfer_cancel_timeout_async for why we need not wait for | | 1522 | * usbd_xfer_cancel_timeout_async for why we need not wait for |
1546 | * the callout or task here. | | 1523 | * the callout or task here. |
1547 | */ | | 1524 | */ |
1548 | usbd_xfer_cancel_timeout_async(xfer); | | 1525 | usbd_xfer_cancel_timeout_async(xfer); |
1549 | | | 1526 | |
1550 | /* | | 1527 | /* |
1551 | * We beat everyone else. Claim the status as cancelled, do | | 1528 | * We beat everyone else. Claim the status as cancelled, do |
1552 | * the bus-specific dance to abort the hardware, and complete | | 1529 | * the bus-specific dance to abort the hardware, and complete |
1553 | * the xfer. | | 1530 | * the xfer. |
1554 | */ | | 1531 | */ |
1555 | xfer->ux_status = USBD_CANCELLED; | | 1532 | xfer->ux_status = USBD_CANCELLED; |
1556 | bus->ub_methods->ubm_abortx(xfer); | | 1533 | bus->ub_methods->ubm_abortx(xfer); |
1557 | usb_transfer_complete(xfer); | | 1534 | usb_transfer_complete(xfer); |
1558 | } | | 1535 | } |
1559 | | | 1536 | |
1560 | /* | | 1537 | /* |
1561 | * usbd_xfer_timeout(xfer) | | 1538 | * usbd_xfer_timeout(xfer) |
1562 | * | | 1539 | * |
1563 | * Called at IPL_SOFTCLOCK when too much time has elapsed waiting | | 1540 | * Called at IPL_SOFTCLOCK when too much time has elapsed waiting |
1564 | * for xfer to complete. Since we can't abort the xfer at | | 1541 | * for xfer to complete. Since we can't abort the xfer at |
1565 | * IPL_SOFTCLOCK, defer to a usb_task to run it in thread context, | | 1542 | * IPL_SOFTCLOCK, defer to a usb_task to run it in thread context, |
1566 | * unless the xfer has completed or aborted concurrently -- and if | | 1543 | * unless the xfer has completed or aborted concurrently -- and if |
1567 | * the xfer has also been resubmitted, take care of rescheduling | | 1544 | * the xfer has also been resubmitted, take care of rescheduling |
1568 | * the callout. | | 1545 | * the callout. |
1569 | */ | | 1546 | */ |
1570 | static void | | 1547 | static void |
1571 | usbd_xfer_timeout(void *cookie) | | 1548 | usbd_xfer_timeout(void *cookie) |
1572 | { | | 1549 | { |
1573 | struct usbd_xfer *xfer = cookie; | | 1550 | struct usbd_xfer *xfer = cookie; |
1574 | struct usbd_bus *bus = xfer->ux_bus; | | 1551 | struct usbd_bus *bus = xfer->ux_bus; |
1575 | struct usbd_device *dev = xfer->ux_pipe->up_dev; | | 1552 | struct usbd_device *dev = xfer->ux_pipe->up_dev; |
1576 | | | 1553 | |
1577 | /* Acquire the lock so we can transition the timeout state. */ | | 1554 | /* Acquire the lock so we can transition the timeout state. */ |
1578 | mutex_enter(bus->ub_lock); | | 1555 | mutex_enter(bus->ub_lock); |
1579 | | | 1556 | |
1580 | /* | | 1557 | /* |
1581 | * Use usbd_xfer_probe_timeout to check whether the timeout is | | 1558 | * Use usbd_xfer_probe_timeout to check whether the timeout is |
1582 | * still valid, or to reschedule the callout if necessary. If | | 1559 | * still valid, or to reschedule the callout if necessary. If |
1583 | * it is still valid, schedule the task. | | 1560 | * it is still valid, schedule the task. |
1584 | */ | | 1561 | */ |
1585 | if (usbd_xfer_probe_timeout(xfer)) | | 1562 | if (usbd_xfer_probe_timeout(xfer)) |
1586 | usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); | | 1563 | usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); |
1587 | | | 1564 | |
1588 | /* | | 1565 | /* |
1589 | * Notify usbd_xfer_cancel_timeout_async that we may have | | 1566 | * Notify usbd_xfer_cancel_timeout_async that we may have |
1590 | * scheduled the task. This causes callout_invoking to return | | 1567 | * scheduled the task. This causes callout_invoking to return |
1591 | * false in usbd_xfer_cancel_timeout_async so that it can tell | | 1568 | * false in usbd_xfer_cancel_timeout_async so that it can tell |
1592 | * which stage in the callout->task->abort process we're at. | | 1569 | * which stage in the callout->task->abort process we're at. |
1593 | */ | | 1570 | */ |
1594 | callout_ack(&xfer->ux_callout); | | 1571 | callout_ack(&xfer->ux_callout); |
1595 | | | 1572 | |
1596 | /* All done -- release the lock. */ | | 1573 | /* All done -- release the lock. */ |
1597 | mutex_exit(bus->ub_lock); | | 1574 | mutex_exit(bus->ub_lock); |
1598 | } | | 1575 | } |
1599 | | | 1576 | |
1600 | /* | | 1577 | /* |
1601 | * usbd_xfer_timeout_task(xfer) | | 1578 | * usbd_xfer_timeout_task(xfer) |
1602 | * | | 1579 | * |
1603 | * Called in thread context when too much time has elapsed waiting | | 1580 | * Called in thread context when too much time has elapsed waiting |
1604 | * for xfer to complete. Abort the xfer with USBD_TIMEOUT, unless | | 1581 | * for xfer to complete. Abort the xfer with USBD_TIMEOUT, unless |
1605 | * it has completed or aborted concurrently -- and if the xfer has | | 1582 | * it has completed or aborted concurrently -- and if the xfer has |
1606 | * also been resubmitted, take care of rescheduling the callout. | | 1583 | * also been resubmitted, take care of rescheduling the callout. |
1607 | */ | | 1584 | */ |
1608 | static void | | 1585 | static void |
1609 | usbd_xfer_timeout_task(void *cookie) | | 1586 | usbd_xfer_timeout_task(void *cookie) |
1610 | { | | 1587 | { |
1611 | struct usbd_xfer *xfer = cookie; | | 1588 | struct usbd_xfer *xfer = cookie; |
1612 | struct usbd_bus *bus = xfer->ux_bus; | | 1589 | struct usbd_bus *bus = xfer->ux_bus; |
1613 | | | 1590 | |
1614 | /* Acquire the lock so we can transition the timeout state. */ | | 1591 | /* Acquire the lock so we can transition the timeout state. */ |
1615 | mutex_enter(bus->ub_lock); | | 1592 | mutex_enter(bus->ub_lock); |
1616 | | | 1593 | |
1617 | /* | | 1594 | /* |
1618 | * Use usbd_xfer_probe_timeout to check whether the timeout is | | 1595 | * Use usbd_xfer_probe_timeout to check whether the timeout is |
1619 | * still valid, or to reschedule the callout if necessary. If | | 1596 | * still valid, or to reschedule the callout if necessary. If |
1620 | * it is not valid -- the timeout has been asynchronously | | 1597 | * it is not valid -- the timeout has been asynchronously |
1621 | * cancelled, or the xfer has already been resubmitted -- then | | 1598 | * cancelled, or the xfer has already been resubmitted -- then |
1622 | * we're done here. | | 1599 | * we're done here. |
1623 | */ | | 1600 | */ |
1624 | if (!usbd_xfer_probe_timeout(xfer)) | | 1601 | if (!usbd_xfer_probe_timeout(xfer)) |
1625 | goto out; | | 1602 | goto out; |
1626 | | | 1603 | |
1627 | /* | | 1604 | /* |
1628 | * May have completed or been aborted, but we're the only one | | 1605 | * May have completed or been aborted, but we're the only one |
1629 | * who can time it out. If it has completed or been aborted, | | 1606 | * who can time it out. If it has completed or been aborted, |
1630 | * no need to timeout. | | 1607 | * no need to timeout. |
1631 | */ | | 1608 | */ |
1632 | KASSERT(xfer->ux_status != USBD_TIMEOUT); | | 1609 | KASSERT(xfer->ux_status != USBD_TIMEOUT); |
1633 | if (xfer->ux_status != USBD_IN_PROGRESS) | | 1610 | if (xfer->ux_status != USBD_IN_PROGRESS) |
1634 | goto out; | | 1611 | goto out; |
1635 | | | 1612 | |
1636 | /* | | 1613 | /* |
1637 | * We beat everyone else. Claim the status as timed out, do | | 1614 | * We beat everyone else. Claim the status as timed out, do |
1638 | * the bus-specific dance to abort the hardware, and complete | | 1615 | * the bus-specific dance to abort the hardware, and complete |
1639 | * the xfer. | | 1616 | * the xfer. |
1640 | */ | | 1617 | */ |
1641 | xfer->ux_status = USBD_TIMEOUT; | | 1618 | xfer->ux_status = USBD_TIMEOUT; |
1642 | bus->ub_methods->ubm_abortx(xfer); | | 1619 | bus->ub_methods->ubm_abortx(xfer); |
1643 | usb_transfer_complete(xfer); | | 1620 | usb_transfer_complete(xfer); |
1644 | | | 1621 | |
1645 | out: /* All done -- release the lock. */ | | 1622 | out: /* All done -- release the lock. */ |
1646 | mutex_exit(bus->ub_lock); | | 1623 | mutex_exit(bus->ub_lock); |
1647 | } | | 1624 | } |
1648 | | | 1625 | |
1649 | /* | | 1626 | /* |
1650 | * usbd_xfer_probe_timeout(xfer) | | 1627 | * usbd_xfer_probe_timeout(xfer) |
1651 | * | | 1628 | * |
1652 | * Probe the status of xfer's timeout. Acknowledge and process a | | 1629 | * Probe the status of xfer's timeout. Acknowledge and process a |
1653 | * request to reschedule. Return true if the timeout is still | | 1630 | * request to reschedule. Return true if the timeout is still |
1654 | * valid and the caller should take further action (queueing a | | 1631 | * valid and the caller should take further action (queueing a |
1655 | * task or aborting the xfer), false if it must stop here. | | 1632 | * task or aborting the xfer), false if it must stop here. |
1656 | */ | | 1633 | */ |
1657 | static bool | | 1634 | static bool |
1658 | usbd_xfer_probe_timeout(struct usbd_xfer *xfer) | | 1635 | usbd_xfer_probe_timeout(struct usbd_xfer *xfer) |
1659 | { | | 1636 | { |
1660 | struct usbd_bus *bus = xfer->ux_bus; | | 1637 | struct usbd_bus *bus = xfer->ux_bus; |
1661 | bool valid; | | 1638 | bool valid; |
1662 | | | 1639 | |
1663 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); | | 1640 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); |
1664 | | | 1641 | |
1665 | /* The timeout must be set. */ | | 1642 | /* The timeout must be set. */ |
1666 | KASSERT(xfer->ux_timeout_set); | | 1643 | KASSERT(xfer->ux_timeout_set); |
1667 | | | 1644 | |
1668 | /* | | 1645 | /* |
1669 | * Neither callout nor task may be pending; they execute | | 1646 | * Neither callout nor task may be pending; they execute |
1670 | * alternately in lock step. | | 1647 | * alternately in lock step. |
1671 | */ | | 1648 | */ |
1672 | KASSERT(!callout_pending(&xfer->ux_callout)); | | 1649 | KASSERT(!callout_pending(&xfer->ux_callout)); |
1673 | KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); | | 1650 | KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); |
1674 | | | 1651 | |
1675 | /* There are a few cases... */ | | 1652 | /* There are a few cases... */ |
1676 | if (bus->ub_methods->ubm_dying(bus)) { | | 1653 | if (bus->ub_methods->ubm_dying(bus)) { |
1677 | /* Host controller dying. Drop it all on the floor. */ | | 1654 | /* Host controller dying. Drop it all on the floor. */ |
1678 | xfer->ux_timeout_set = false; | | 1655 | xfer->ux_timeout_set = false; |
1679 | xfer->ux_timeout_reset = false; | | 1656 | xfer->ux_timeout_reset = false; |
1680 | valid = false; | | 1657 | valid = false; |
1681 | } else if (xfer->ux_timeout_reset) { | | 1658 | } else if (xfer->ux_timeout_reset) { |
1682 | /* | | 1659 | /* |
1683 | * The xfer completed _and_ got resubmitted while we | | 1660 | * The xfer completed _and_ got resubmitted while we |
1684 | * waited for the lock. Acknowledge the request to | | 1661 | * waited for the lock. Acknowledge the request to |
1685 | * reschedule, and reschedule it if there is a timeout | | 1662 | * reschedule, and reschedule it if there is a timeout |
1686 | * and the bus is not polling. | | 1663 | * and the bus is not polling. |
1687 | */ | | 1664 | */ |
1688 | xfer->ux_timeout_reset = false; | | 1665 | xfer->ux_timeout_reset = false; |
1689 | if (xfer->ux_timeout && !bus->ub_usepolling) { | | 1666 | if (xfer->ux_timeout && !bus->ub_usepolling) { |
1690 | KASSERT(xfer->ux_timeout_set); | | 1667 | KASSERT(xfer->ux_timeout_set); |
1691 | callout_schedule(&xfer->ux_callout, | | 1668 | callout_schedule(&xfer->ux_callout, |
1692 | mstohz(xfer->ux_timeout)); | | 1669 | mstohz(xfer->ux_timeout)); |
1693 | } else { | | 1670 | } else { |
1694 | /* No more callout or task scheduled. */ | | 1671 | /* No more callout or task scheduled. */ |
1695 | xfer->ux_timeout_set = false; | | 1672 | xfer->ux_timeout_set = false; |
1696 | } | | 1673 | } |
1697 | valid = false; | | 1674 | valid = false; |
1698 | } else if (xfer->ux_status != USBD_IN_PROGRESS) { | | 1675 | } else if (xfer->ux_status != USBD_IN_PROGRESS) { |
1699 | /* | | 1676 | /* |
1700 | * The xfer has completed by hardware completion or by | | 1677 | * The xfer has completed by hardware completion or by |
1701 | * software abort, and has not been resubmitted, so the | | 1678 | * software abort, and has not been resubmitted, so the |
1702 | * timeout must be unset, and is no longer valid for | | 1679 | * timeout must be unset, and is no longer valid for |
1703 | * the caller. | | 1680 | * the caller. |
1704 | */ | | 1681 | */ |
1705 | xfer->ux_timeout_set = false; | | 1682 | xfer->ux_timeout_set = false; |
1706 | valid = false; | | 1683 | valid = false; |
1707 | } else { | | 1684 | } else { |
1708 | /* | | 1685 | /* |
1709 | * The xfer has not yet completed, so the timeout is | | 1686 | * The xfer has not yet completed, so the timeout is |
1710 | * valid. | | 1687 | * valid. |
1711 | */ | | 1688 | */ |
1712 | valid = true; | | 1689 | valid = true; |
1713 | } | | 1690 | } |
1714 | | | 1691 | |
1715 | /* Any reset must have been processed. */ | | 1692 | /* Any reset must have been processed. */ |
1716 | KASSERT(!xfer->ux_timeout_reset); | | 1693 | KASSERT(!xfer->ux_timeout_reset); |
1717 | | | 1694 | |
1718 | /* | | 1695 | /* |
1719 | * Either we claim the timeout is set, or the callout is idle. | | 1696 | * Either we claim the timeout is set, or the callout is idle. |
1720 | * If the timeout is still set, we may be handing off to the | | 1697 | * If the timeout is still set, we may be handing off to the |
1721 | * task instead, so this is an if but not an iff. | | 1698 | * task instead, so this is an if but not an iff. |
1722 | */ | | 1699 | */ |
1723 | KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout)); | | 1700 | KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout)); |
1724 | | | 1701 | |
1725 | /* | | 1702 | /* |
1726 | * The task must be idle now. | | 1703 | * The task must be idle now. |
1727 | * | | 1704 | * |
1728 | * - If the caller is the callout, _and_ the timeout is still | | 1705 | * - If the caller is the callout, _and_ the timeout is still |
1729 | * valid, the caller will schedule it, but it hasn't been | | 1706 | * valid, the caller will schedule it, but it hasn't been |
1730 | * scheduled yet. (If the timeout is not valid, the task | | 1707 | * scheduled yet. (If the timeout is not valid, the task |
1731 | * should not be scheduled.) | | 1708 | * should not be scheduled.) |
1732 | * | | 1709 | * |
1733 | * - If the caller is the task, it cannot be scheduled again | | 1710 | * - If the caller is the task, it cannot be scheduled again |
1734 | * until the callout runs again, which won't happen until we | | 1711 | * until the callout runs again, which won't happen until we |
1735 | * next release the lock. | | 1712 | * next release the lock. |
1736 | */ | | 1713 | */ |
1737 | KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); | | 1714 | KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); |
1738 | | | 1715 | |
1739 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); | | 1716 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); |
1740 | | | 1717 | |
1741 | return valid; | | 1718 | return valid; |
1742 | } | | 1719 | } |
1743 | | | 1720 | |
1744 | /* | | 1721 | /* |
1745 | * usbd_xfer_schedule_timeout(xfer) | | 1722 | * usbd_xfer_schedule_timeout(xfer) |
1746 | * | | 1723 | * |
1747 | * Ensure that xfer has a timeout. If the callout is already | | 1724 | * Ensure that xfer has a timeout. If the callout is already |
1748 | * queued or the task is already running, request that they | | 1725 | * queued or the task is already running, request that they |
1749 | * reschedule the callout. If not, and if we're not polling, | | 1726 | * reschedule the callout. If not, and if we're not polling, |
1750 | * schedule the callout anew. | | 1727 | * schedule the callout anew. |
1751 | * | | 1728 | * |
1752 | * To be called in thread context from struct | | 1729 | * To be called in thread context from struct |
1753 | * usbd_pipe_methods::upm_start. | | 1730 | * usbd_pipe_methods::upm_start. |
1754 | */ | | 1731 | */ |
1755 | void | | 1732 | void |
1756 | usbd_xfer_schedule_timeout(struct usbd_xfer *xfer) | | 1733 | usbd_xfer_schedule_timeout(struct usbd_xfer *xfer) |
1757 | { | | 1734 | { |
1758 | struct usbd_bus *bus = xfer->ux_bus; | | 1735 | struct usbd_bus *bus = xfer->ux_bus; |
1759 | | | 1736 | |
1760 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); | | 1737 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); |
1761 | | | 1738 | |
1762 | if (xfer->ux_timeout_set) { | | 1739 | if (xfer->ux_timeout_set) { |
1763 | /* | | 1740 | /* |
1764 | * Callout or task has fired from a prior completed | | 1741 | * Callout or task has fired from a prior completed |
1765 | * xfer but has not yet noticed that the xfer is done. | | 1742 | * xfer but has not yet noticed that the xfer is done. |
1766 | * Ask it to reschedule itself to ux_timeout. | | 1743 | * Ask it to reschedule itself to ux_timeout. |
1767 | */ | | 1744 | */ |
1768 | xfer->ux_timeout_reset = true; | | 1745 | xfer->ux_timeout_reset = true; |
1769 | } else if (xfer->ux_timeout && !bus->ub_usepolling) { | | 1746 | } else if (xfer->ux_timeout && !bus->ub_usepolling) { |
1770 | /* Callout is not scheduled. Schedule it. */ | | 1747 | /* Callout is not scheduled. Schedule it. */ |
1771 | KASSERT(!callout_pending(&xfer->ux_callout)); | | 1748 | KASSERT(!callout_pending(&xfer->ux_callout)); |
1772 | callout_schedule(&xfer->ux_callout, mstohz(xfer->ux_timeout)); | | 1749 | callout_schedule(&xfer->ux_callout, mstohz(xfer->ux_timeout)); |
1773 | xfer->ux_timeout_set = true; | | 1750 | xfer->ux_timeout_set = true; |
1774 | } | | 1751 | } |
1775 | | | 1752 | |
1776 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); | | 1753 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); |
1777 | } | | 1754 | } |
1778 | | | 1755 | |
1779 | /* | | 1756 | /* |
1780 | * usbd_xfer_cancel_timeout_async(xfer) | | 1757 | * usbd_xfer_cancel_timeout_async(xfer) |
1781 | * | | 1758 | * |
1782 | * Cancel the callout and the task of xfer, which have not yet run | | 1759 | * Cancel the callout and the task of xfer, which have not yet run |
1783 | * to completion, but don't wait for the callout or task to finish | | 1760 | * to completion, but don't wait for the callout or task to finish |
1784 | * running. | | 1761 | * running. |
1785 | * | | 1762 | * |
1786 | * If they have already fired, at worst they are waiting for the | | 1763 | * If they have already fired, at worst they are waiting for the |
1787 | * bus lock. They will see that the xfer is no longer in progress | | 1764 | * bus lock. They will see that the xfer is no longer in progress |
1788 | * and give up, or they will see that the xfer has been | | 1765 | * and give up, or they will see that the xfer has been |
1789 | * resubmitted with a new timeout and reschedule the callout. | | 1766 | * resubmitted with a new timeout and reschedule the callout. |
1790 | * | | 1767 | * |
1791 | * If a resubmitted request completed so fast that the callout | | 1768 | * If a resubmitted request completed so fast that the callout |
1792 | * didn't have time to process a timer reset, just cancel the | | 1769 | * didn't have time to process a timer reset, just cancel the |
1793 | * timer reset. | | 1770 | * timer reset. |
1794 | */ | | 1771 | */ |
1795 | static void | | 1772 | static void |
1796 | usbd_xfer_cancel_timeout_async(struct usbd_xfer *xfer) | | 1773 | usbd_xfer_cancel_timeout_async(struct usbd_xfer *xfer) |
1797 | { | | 1774 | { |
1798 | struct usbd_bus *bus __diagused = xfer->ux_bus; | | 1775 | struct usbd_bus *bus __diagused = xfer->ux_bus; |
1799 | | | 1776 | |
1800 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); | | 1777 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); |
1801 | | | 1778 | |
1802 | /* | | 1779 | /* |
1803 | * If the timer wasn't running anyway, forget about it. This | | 1780 | * If the timer wasn't running anyway, forget about it. This |
1804 | * can happen if we are completing an isochronous transfer | | 1781 | * can happen if we are completing an isochronous transfer |
1805 | * which doesn't use the same timeout logic. | | 1782 | * which doesn't use the same timeout logic. |
1806 | */ | | 1783 | */ |
1807 | if (!xfer->ux_timeout_set) | | 1784 | if (!xfer->ux_timeout_set) |
1808 | return; | | 1785 | return; |
1809 | | | 1786 | |
1810 | xfer->ux_timeout_reset = false; | | 1787 | xfer->ux_timeout_reset = false; |
1811 | if (!callout_stop(&xfer->ux_callout)) { | | 1788 | if (!callout_stop(&xfer->ux_callout)) { |
1812 | /* | | 1789 | /* |
1813 | * We stopped the callout before it ran. The timeout | | 1790 | * We stopped the callout before it ran. The timeout |
1814 | * is no longer set. | | 1791 | * is no longer set. |
1815 | */ | | 1792 | */ |
1816 | xfer->ux_timeout_set = false; | | 1793 | xfer->ux_timeout_set = false; |
1817 | } else if (callout_invoking(&xfer->ux_callout)) { | | 1794 | } else if (callout_invoking(&xfer->ux_callout)) { |
1818 | /* | | 1795 | /* |
1819 | * The callout has begun to run but it has not yet | | 1796 | * The callout has begun to run but it has not yet |
1820 | * acquired the lock and called callout_ack. The task | | 1797 | * acquired the lock and called callout_ack. The task |
1821 | * cannot be queued yet, and the callout cannot have | | 1798 | * cannot be queued yet, and the callout cannot have |
1822 | * been rescheduled yet. | | 1799 | * been rescheduled yet. |
1823 | * | | 1800 | * |
1824 | * By the time the callout acquires the lock, we will | | 1801 | * By the time the callout acquires the lock, we will |
1825 | * have transitioned from USBD_IN_PROGRESS to a | | 1802 | * have transitioned from USBD_IN_PROGRESS to a |
1826 | * completed status, and possibly also resubmitted the | | 1803 | * completed status, and possibly also resubmitted the |
1827 | * xfer and set xfer->ux_timeout_reset = true. In both | | 1804 | * xfer and set xfer->ux_timeout_reset = true. In both |
1828 | * cases, the callout will DTRT, so no further action | | 1805 | * cases, the callout will DTRT, so no further action |
1829 | * is needed here. | | 1806 | * is needed here. |
1830 | */ | | 1807 | */ |
1831 | } else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) { | | 1808 | } else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) { |
1832 | /* | | 1809 | /* |
1833 | * The callout had fired and scheduled the task, but we | | 1810 | * The callout had fired and scheduled the task, but we |
1834 | * stopped the task before it could run. The timeout | | 1811 | * stopped the task before it could run. The timeout |
1835 | * is therefore no longer set -- the next resubmission | | 1812 | * is therefore no longer set -- the next resubmission |
1836 | * of the xfer must schedule a new timeout. | | 1813 | * of the xfer must schedule a new timeout. |
1837 | * | | 1814 | * |
1838 | * The callout should not be pending at this point: | | 1815 | * The callout should not be pending at this point: |
1839 | * it is scheduled only under the lock, and only when | | 1816 | * it is scheduled only under the lock, and only when |
1840 | * xfer->ux_timeout_set is false, or by the callout or | | 1817 | * xfer->ux_timeout_set is false, or by the callout or |
1841 | * task itself when xfer->ux_timeout_reset is true. | | 1818 | * task itself when xfer->ux_timeout_reset is true. |
1842 | */ | | 1819 | */ |
1843 | xfer->ux_timeout_set = false; | | 1820 | xfer->ux_timeout_set = false; |
1844 | } | | 1821 | } |
1845 | | | 1822 | |
1846 | /* | | 1823 | /* |
1847 | * The callout cannot be scheduled and the task cannot be | | 1824 | * The callout cannot be scheduled and the task cannot be |
1848 | * queued at this point. Either we cancelled them, or they are | | 1825 | * queued at this point. Either we cancelled them, or they are |
1849 | * already running and waiting for the bus lock. | | 1826 | * already running and waiting for the bus lock. |
1850 | */ | | 1827 | */ |
1851 | KASSERT(!callout_pending(&xfer->ux_callout)); | | 1828 | KASSERT(!callout_pending(&xfer->ux_callout)); |
1852 | KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); | | 1829 | KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); |
1853 | | | 1830 | |
1854 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); | | 1831 | KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); |
1855 | } | | 1832 | } |