| @@ -1,2347 +1,2347 @@ | | | @@ -1,2347 +1,2347 @@ |
1 | /* $NetBSD: ata.c,v 1.132.8.28 2017/08/12 22:31:50 jdolecek Exp $ */ | | 1 | /* $NetBSD: ata.c,v 1.132.8.29 2017/08/15 11:21:32 jdolecek Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved. | | 4 | * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved. |
5 | * | | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions | | 7 | * modification, are permitted provided that the following conditions |
8 | * are met: | | 8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright | | 9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. | | 10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright | | 11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the | | 12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. | | 13 | * documentation and/or other materials provided with the distribution. |
14 | * | | 14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | */ | | 25 | */ |
26 | | | 26 | |
27 | #include <sys/cdefs.h> | | 27 | #include <sys/cdefs.h> |
28 | __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.132.8.28 2017/08/12 22:31:50 jdolecek Exp $"); | | 28 | __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.132.8.29 2017/08/15 11:21:32 jdolecek Exp $"); |
29 | | | 29 | |
30 | #include "opt_ata.h" | | 30 | #include "opt_ata.h" |
31 | | | 31 | |
32 | #include <sys/param.h> | | 32 | #include <sys/param.h> |
33 | #include <sys/systm.h> | | 33 | #include <sys/systm.h> |
34 | #include <sys/kernel.h> | | 34 | #include <sys/kernel.h> |
35 | #include <sys/malloc.h> | | 35 | #include <sys/malloc.h> |
36 | #include <sys/device.h> | | 36 | #include <sys/device.h> |
37 | #include <sys/conf.h> | | 37 | #include <sys/conf.h> |
38 | #include <sys/fcntl.h> | | 38 | #include <sys/fcntl.h> |
39 | #include <sys/proc.h> | | 39 | #include <sys/proc.h> |
40 | #include <sys/kthread.h> | | 40 | #include <sys/kthread.h> |
41 | #include <sys/errno.h> | | 41 | #include <sys/errno.h> |
42 | #include <sys/ataio.h> | | 42 | #include <sys/ataio.h> |
43 | #include <sys/kmem.h> | | 43 | #include <sys/kmem.h> |
44 | #include <sys/intr.h> | | 44 | #include <sys/intr.h> |
45 | #include <sys/bus.h> | | 45 | #include <sys/bus.h> |
46 | #include <sys/once.h> | | 46 | #include <sys/once.h> |
47 | #include <sys/bitops.h> | | 47 | #include <sys/bitops.h> |
48 | | | 48 | |
49 | #define ATABUS_PRIVATE | | 49 | #define ATABUS_PRIVATE |
50 | | | 50 | |
51 | #include <dev/ata/ataconf.h> | | 51 | #include <dev/ata/ataconf.h> |
52 | #include <dev/ata/atareg.h> | | 52 | #include <dev/ata/atareg.h> |
53 | #include <dev/ata/atavar.h> | | 53 | #include <dev/ata/atavar.h> |
54 | #include <dev/ic/wdcvar.h> /* for PIOBM */ | | 54 | #include <dev/ic/wdcvar.h> /* for PIOBM */ |
55 | | | 55 | |
56 | #include "locators.h" | | 56 | #include "locators.h" |
57 | | | 57 | |
58 | #include "atapibus.h" | | 58 | #include "atapibus.h" |
59 | #include "ataraid.h" | | 59 | #include "ataraid.h" |
60 | #include "sata_pmp.h" | | 60 | #include "sata_pmp.h" |
61 | | | 61 | |
62 | #if NATARAID > 0 | | 62 | #if NATARAID > 0 |
63 | #include <dev/ata/ata_raidvar.h> | | 63 | #include <dev/ata/ata_raidvar.h> |
64 | #endif | | 64 | #endif |
65 | #if NSATA_PMP > 0 | | 65 | #if NSATA_PMP > 0 |
66 | #include <dev/ata/satapmpvar.h> | | 66 | #include <dev/ata/satapmpvar.h> |
67 | #endif | | 67 | #endif |
68 | #include <dev/ata/satapmpreg.h> | | 68 | #include <dev/ata/satapmpreg.h> |
69 | | | 69 | |
70 | #define DEBUG_FUNCS 0x08 | | 70 | #define DEBUG_FUNCS 0x08 |
71 | #define DEBUG_PROBE 0x10 | | 71 | #define DEBUG_PROBE 0x10 |
72 | #define DEBUG_DETACH 0x20 | | 72 | #define DEBUG_DETACH 0x20 |
73 | #define DEBUG_XFERS 0x40 | | 73 | #define DEBUG_XFERS 0x40 |
74 | #ifdef ATADEBUG | | 74 | #ifdef ATADEBUG |
75 | #ifndef ATADEBUG_MASK | | 75 | #ifndef ATADEBUG_MASK |
76 | #define ATADEBUG_MASK 0 | | 76 | #define ATADEBUG_MASK 0 |
77 | #endif | | 77 | #endif |
78 | int atadebug_mask = ATADEBUG_MASK; | | 78 | int atadebug_mask = ATADEBUG_MASK; |
79 | #define ATADEBUG_PRINT(args, level) \ | | 79 | #define ATADEBUG_PRINT(args, level) \ |
80 | if (atadebug_mask & (level)) \ | | 80 | if (atadebug_mask & (level)) \ |
81 | printf args | | 81 | printf args |
82 | #else | | 82 | #else |
83 | #define ATADEBUG_PRINT(args, level) | | 83 | #define ATADEBUG_PRINT(args, level) |
84 | #endif | | 84 | #endif |
85 | | | 85 | |
86 | static ONCE_DECL(ata_init_ctrl); | | 86 | static ONCE_DECL(ata_init_ctrl); |
87 | | | 87 | |
88 | /* | | 88 | /* |
89 | * A queue of atabus instances, used to ensure the same bus probe order | | 89 | * A queue of atabus instances, used to ensure the same bus probe order |
90 | * for a given hardware configuration at each boot. Kthread probing | | 90 | * for a given hardware configuration at each boot. Kthread probing |
91 | * devices on a atabus. Only one probing at once. | | 91 | * devices on a atabus. Only one probing at once. |
92 | */ | | 92 | */ |
93 | static TAILQ_HEAD(, atabus_initq) atabus_initq_head; | | 93 | static TAILQ_HEAD(, atabus_initq) atabus_initq_head; |
94 | static kmutex_t atabus_qlock; | | 94 | static kmutex_t atabus_qlock; |
95 | static kcondvar_t atabus_qcv; | | 95 | static kcondvar_t atabus_qcv; |
96 | static lwp_t * atabus_cfg_lwp; | | 96 | static lwp_t * atabus_cfg_lwp; |
97 | | | 97 | |
98 | /***************************************************************************** | | 98 | /***************************************************************************** |
99 | * ATA bus layer. | | 99 | * ATA bus layer. |
100 | * | | 100 | * |
101 | * ATA controllers attach an atabus instance, which handles probing the bus | | 101 | * ATA controllers attach an atabus instance, which handles probing the bus |
102 | * for drives, etc. | | 102 | * for drives, etc. |
103 | *****************************************************************************/ | | 103 | *****************************************************************************/ |
104 | | | 104 | |
105 | dev_type_open(atabusopen); | | 105 | dev_type_open(atabusopen); |
106 | dev_type_close(atabusclose); | | 106 | dev_type_close(atabusclose); |
107 | dev_type_ioctl(atabusioctl); | | 107 | dev_type_ioctl(atabusioctl); |
108 | | | 108 | |
109 | const struct cdevsw atabus_cdevsw = { | | 109 | const struct cdevsw atabus_cdevsw = { |
110 | .d_open = atabusopen, | | 110 | .d_open = atabusopen, |
111 | .d_close = atabusclose, | | 111 | .d_close = atabusclose, |
112 | .d_read = noread, | | 112 | .d_read = noread, |
113 | .d_write = nowrite, | | 113 | .d_write = nowrite, |
114 | .d_ioctl = atabusioctl, | | 114 | .d_ioctl = atabusioctl, |
115 | .d_stop = nostop, | | 115 | .d_stop = nostop, |
116 | .d_tty = notty, | | 116 | .d_tty = notty, |
117 | .d_poll = nopoll, | | 117 | .d_poll = nopoll, |
118 | .d_mmap = nommap, | | 118 | .d_mmap = nommap, |
119 | .d_kqfilter = nokqfilter, | | 119 | .d_kqfilter = nokqfilter, |
120 | .d_discard = nodiscard, | | 120 | .d_discard = nodiscard, |
121 | .d_flag = D_OTHER | | 121 | .d_flag = D_OTHER |
122 | }; | | 122 | }; |
123 | | | 123 | |
124 | extern struct cfdriver atabus_cd; | | 124 | extern struct cfdriver atabus_cd; |
125 | | | 125 | |
126 | static void atabus_childdetached(device_t, device_t); | | 126 | static void atabus_childdetached(device_t, device_t); |
127 | static int atabus_rescan(device_t, const char *, const int *); | | 127 | static int atabus_rescan(device_t, const char *, const int *); |
128 | static bool atabus_resume(device_t, const pmf_qual_t *); | | 128 | static bool atabus_resume(device_t, const pmf_qual_t *); |
129 | static bool atabus_suspend(device_t, const pmf_qual_t *); | | 129 | static bool atabus_suspend(device_t, const pmf_qual_t *); |
130 | static void atabusconfig_thread(void *); | | 130 | static void atabusconfig_thread(void *); |
131 | | | 131 | |
132 | static void ata_channel_idle(struct ata_channel *); | | 132 | static void ata_channel_idle(struct ata_channel *); |
133 | static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *); | | 133 | static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *); |
134 | static void ata_channel_freeze_locked(struct ata_channel *); | | 134 | static void ata_channel_freeze_locked(struct ata_channel *); |
135 | /* | | 135 | /* |
136 | * atabus_init: | | 136 | * atabus_init: |
137 | * | | 137 | * |
138 | * Initialize ATA subsystem structures. | | 138 | * Initialize ATA subsystem structures. |
139 | */ | | 139 | */ |
140 | static int | | 140 | static int |
141 | atabus_init(void) | | 141 | atabus_init(void) |
142 | { | | 142 | { |
143 | | | 143 | |
144 | TAILQ_INIT(&atabus_initq_head); | | 144 | TAILQ_INIT(&atabus_initq_head); |
145 | mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE); | | 145 | mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE); |
146 | cv_init(&atabus_qcv, "atainitq"); | | 146 | cv_init(&atabus_qcv, "atainitq"); |
147 | return 0; | | 147 | return 0; |
148 | } | | 148 | } |
149 | | | 149 | |
150 | /* | | 150 | /* |
151 | * atabusprint: | | 151 | * atabusprint: |
152 | * | | 152 | * |
153 | * Autoconfiguration print routine used by ATA controllers when | | 153 | * Autoconfiguration print routine used by ATA controllers when |
154 | * attaching an atabus instance. | | 154 | * attaching an atabus instance. |
155 | */ | | 155 | */ |
156 | int | | 156 | int |
157 | atabusprint(void *aux, const char *pnp) | | 157 | atabusprint(void *aux, const char *pnp) |
158 | { | | 158 | { |
159 | struct ata_channel *chan = aux; | | 159 | struct ata_channel *chan = aux; |
160 | | | 160 | |
161 | if (pnp) | | 161 | if (pnp) |
162 | aprint_normal("atabus at %s", pnp); | | 162 | aprint_normal("atabus at %s", pnp); |
163 | aprint_normal(" channel %d", chan->ch_channel); | | 163 | aprint_normal(" channel %d", chan->ch_channel); |
164 | | | 164 | |
165 | return (UNCONF); | | 165 | return (UNCONF); |
166 | } | | 166 | } |
167 | | | 167 | |
168 | /* | | 168 | /* |
169 | * ataprint: | | 169 | * ataprint: |
170 | * | | 170 | * |
171 | * Autoconfiguration print routine. | | 171 | * Autoconfiguration print routine. |
172 | */ | | 172 | */ |
173 | int | | 173 | int |
174 | ataprint(void *aux, const char *pnp) | | 174 | ataprint(void *aux, const char *pnp) |
175 | { | | 175 | { |
176 | struct ata_device *adev = aux; | | 176 | struct ata_device *adev = aux; |
177 | | | 177 | |
178 | if (pnp) | | 178 | if (pnp) |
179 | aprint_normal("wd at %s", pnp); | | 179 | aprint_normal("wd at %s", pnp); |
180 | aprint_normal(" drive %d", adev->adev_drv_data->drive); | | 180 | aprint_normal(" drive %d", adev->adev_drv_data->drive); |
181 | | | 181 | |
182 | return (UNCONF); | | 182 | return (UNCONF); |
183 | } | | 183 | } |
184 | | | 184 | |
185 | static void | | 185 | static void |
186 | ata_queue_reset(struct ata_queue *chq) | | 186 | ata_queue_reset(struct ata_queue *chq) |
187 | { | | 187 | { |
188 | /* make sure that we can use polled commands */ | | 188 | /* make sure that we can use polled commands */ |
189 | TAILQ_INIT(&chq->queue_xfer); | | 189 | TAILQ_INIT(&chq->queue_xfer); |
190 | TAILQ_INIT(&chq->active_xfers); | | 190 | TAILQ_INIT(&chq->active_xfers); |
191 | chq->queue_freeze = 0; | | 191 | chq->queue_freeze = 0; |
192 | chq->queue_active = 0; | | 192 | chq->queue_active = 0; |
193 | chq->active_xfers_used = 0; | | 193 | chq->active_xfers_used = 0; |
194 | chq->queue_xfers_avail = __BIT(chq->queue_openings) - 1; | | 194 | chq->queue_xfers_avail = __BIT(chq->queue_openings) - 1; |
195 | } | | 195 | } |
196 | | | 196 | |
197 | struct ata_xfer * | | 197 | struct ata_xfer * |
198 | ata_queue_hwslot_to_xfer(struct ata_channel *chp, int hwslot) | | 198 | ata_queue_hwslot_to_xfer(struct ata_channel *chp, int hwslot) |
199 | { | | 199 | { |
200 | struct ata_queue *chq = chp->ch_queue; | | 200 | struct ata_queue *chq = chp->ch_queue; |
201 | struct ata_xfer *xfer = NULL; | | 201 | struct ata_xfer *xfer = NULL; |
202 | | | 202 | |
203 | mutex_enter(&chp->ch_lock); | | 203 | mutex_enter(&chp->ch_lock); |
204 | | | 204 | |
205 | KASSERTMSG(hwslot < chq->queue_openings, "hwslot %d > openings %d", | | 205 | KASSERTMSG(hwslot < chq->queue_openings, "hwslot %d > openings %d", |
206 | hwslot, chq->queue_openings); | | 206 | hwslot, chq->queue_openings); |
207 | KASSERT((chq->active_xfers_used & __BIT(hwslot)) != 0); | | 207 | KASSERT((chq->active_xfers_used & __BIT(hwslot)) != 0); |
208 | | | 208 | |
209 | /* Usually the first entry will be the one */ | | 209 | /* Usually the first entry will be the one */ |
210 | TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) { | | 210 | TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) { |
211 | if (xfer->c_slot == hwslot) | | 211 | if (xfer->c_slot == hwslot) |
212 | break; | | 212 | break; |
213 | } | | 213 | } |
214 | | | 214 | |
215 | mutex_exit(&chp->ch_lock); | | 215 | mutex_exit(&chp->ch_lock); |
216 | | | 216 | |
217 | KASSERTMSG((xfer != NULL), | | 217 | KASSERTMSG((xfer != NULL), |
218 | "%s: xfer with slot %d not found (active %x)", __func__, | | 218 | "%s: xfer with slot %d not found (active %x)", __func__, |
219 | hwslot, chq->active_xfers_used); | | 219 | hwslot, chq->active_xfers_used); |
220 | | | 220 | |
221 | return xfer; | | 221 | return xfer; |
222 | } | | 222 | } |
223 | | | 223 | |
224 | /* | | 224 | /* |
225 | * This interface is supposed only to be used when there is exactly | | 225 | * This interface is supposed only to be used when there is exactly |
226 | * one outstanding command, when there is no information about the slot, | | 226 | * one outstanding command, when there is no information about the slot, |
227 | * which triggered the command. ata_queue_hwslot_to_xfer() interface | | 227 | * which triggered the command. ata_queue_hwslot_to_xfer() interface |
228 | * is preferred in all NCQ cases. | | 228 | * is preferred in all NCQ cases. |
229 | */ | | 229 | */ |
230 | struct ata_xfer * | | 230 | struct ata_xfer * |
231 | ata_queue_get_active_xfer(struct ata_channel *chp) | | 231 | ata_queue_get_active_xfer(struct ata_channel *chp) |
232 | { | | 232 | { |
233 | struct ata_xfer *xfer = NULL; | | 233 | struct ata_xfer *xfer = NULL; |
234 | | | 234 | |
235 | mutex_enter(&chp->ch_lock); | | 235 | mutex_enter(&chp->ch_lock); |
236 | | | 236 | |
237 | KASSERT(chp->ch_queue->queue_active <= 1); | | 237 | KASSERT(chp->ch_queue->queue_active <= 1); |
238 | xfer = TAILQ_FIRST(&chp->ch_queue->active_xfers); | | 238 | xfer = TAILQ_FIRST(&chp->ch_queue->active_xfers); |
239 | | | 239 | |
240 | mutex_exit(&chp->ch_lock); | | 240 | mutex_exit(&chp->ch_lock); |
241 | | | 241 | |
242 | return xfer; | | 242 | return xfer; |
243 | } | | 243 | } |
244 | | | 244 | |
245 | struct ata_xfer * | | 245 | struct ata_xfer * |
246 | ata_queue_drive_active_xfer(struct ata_channel *chp, int drive) | | 246 | ata_queue_drive_active_xfer(struct ata_channel *chp, int drive) |
247 | { | | 247 | { |
248 | struct ata_xfer *xfer = NULL; | | 248 | struct ata_xfer *xfer = NULL; |
249 | | | 249 | |
250 | mutex_enter(&chp->ch_lock); | | 250 | mutex_enter(&chp->ch_lock); |
251 | | | 251 | |
252 | TAILQ_FOREACH(xfer, &chp->ch_queue->active_xfers, c_activechain) { | | 252 | TAILQ_FOREACH(xfer, &chp->ch_queue->active_xfers, c_activechain) { |
253 | if (xfer->c_drive == drive) | | 253 | if (xfer->c_drive == drive) |
254 | break; | | 254 | break; |
255 | } | | 255 | } |
256 | KASSERT(xfer != NULL); | | 256 | KASSERT(xfer != NULL); |
257 | | | 257 | |
258 | mutex_exit(&chp->ch_lock); | | 258 | mutex_exit(&chp->ch_lock); |
259 | | | 259 | |
260 | return xfer; | | 260 | return xfer; |
261 | } | | 261 | } |
262 | | | 262 | |
263 | static void | | 263 | static void |
264 | ata_xfer_init(struct ata_xfer *xfer, uint8_t slot) | | 264 | ata_xfer_init(struct ata_xfer *xfer, uint8_t slot) |
265 | { | | 265 | { |
266 | memset(xfer, 0, sizeof(*xfer)); | | 266 | memset(xfer, 0, sizeof(*xfer)); |
267 | | | 267 | |
268 | xfer->c_slot = slot; | | 268 | xfer->c_slot = slot; |
269 | | | 269 | |
270 | cv_init(&xfer->c_active, "ataact"); | | 270 | cv_init(&xfer->c_active, "ataact"); |
271 | callout_init(&xfer->c_timo_callout, 0); /* XXX MPSAFE */ | | 271 | callout_init(&xfer->c_timo_callout, 0); /* XXX MPSAFE */ |
272 | callout_init(&xfer->c_retry_callout, 0); /* XXX MPSAFE */ | | 272 | callout_init(&xfer->c_retry_callout, 0); /* XXX MPSAFE */ |
273 | } | | 273 | } |
274 | | | 274 | |
275 | static void | | 275 | static void |
276 | ata_xfer_destroy(struct ata_xfer *xfer) | | 276 | ata_xfer_destroy(struct ata_xfer *xfer) |
277 | { | | 277 | { |
278 | callout_halt(&xfer->c_timo_callout, NULL); /* XXX MPSAFE */ | | 278 | callout_halt(&xfer->c_timo_callout, NULL); /* XXX MPSAFE */ |
279 | callout_destroy(&xfer->c_timo_callout); | | 279 | callout_destroy(&xfer->c_timo_callout); |
280 | callout_halt(&xfer->c_retry_callout, NULL); /* XXX MPSAFE */ | | 280 | callout_halt(&xfer->c_retry_callout, NULL); /* XXX MPSAFE */ |
281 | callout_destroy(&xfer->c_retry_callout); | | 281 | callout_destroy(&xfer->c_retry_callout); |
282 | cv_destroy(&xfer->c_active); | | 282 | cv_destroy(&xfer->c_active); |
283 | } | | 283 | } |
284 | | | 284 | |
285 | struct ata_queue * | | 285 | struct ata_queue * |
286 | ata_queue_alloc(uint8_t openings) | | 286 | ata_queue_alloc(uint8_t openings) |
287 | { | | 287 | { |
288 | if (openings == 0) | | 288 | if (openings == 0) |
289 | openings = 1; | | 289 | openings = 1; |
290 | | | 290 | |
291 | if (openings > ATA_MAX_OPENINGS) | | 291 | if (openings > ATA_MAX_OPENINGS) |
292 | openings = ATA_MAX_OPENINGS; | | 292 | openings = ATA_MAX_OPENINGS; |
293 | | | 293 | |
294 | struct ata_queue *chq = malloc(offsetof(struct ata_queue, queue_xfers[openings]), | | 294 | struct ata_queue *chq = malloc(offsetof(struct ata_queue, queue_xfers[openings]), |
295 | M_DEVBUF, M_WAITOK | M_ZERO); | | 295 | M_DEVBUF, M_WAITOK | M_ZERO); |
296 | | | 296 | |
297 | chq->queue_openings = openings; | | 297 | chq->queue_openings = openings; |
298 | ata_queue_reset(chq); | | 298 | ata_queue_reset(chq); |
299 | | | 299 | |
300 | cv_init(&chq->queue_busy, "ataqbusy"); | | 300 | cv_init(&chq->queue_busy, "ataqbusy"); |
301 | cv_init(&chq->queue_drain, "atdrn"); | | 301 | cv_init(&chq->queue_drain, "atdrn"); |
302 | | | 302 | |
303 | for (uint8_t i = 0; i < openings; i++) | | 303 | for (uint8_t i = 0; i < openings; i++) |
304 | ata_xfer_init(&chq->queue_xfers[i], i); | | 304 | ata_xfer_init(&chq->queue_xfers[i], i); |
305 | | | 305 | |
306 | return chq; | | 306 | return chq; |
307 | } | | 307 | } |
308 | | | 308 | |
309 | void | | 309 | void |
310 | ata_queue_free(struct ata_queue *chq) | | 310 | ata_queue_free(struct ata_queue *chq) |
311 | { | | 311 | { |
312 | for (uint8_t i = 0; i < chq->queue_openings; i++) | | 312 | for (uint8_t i = 0; i < chq->queue_openings; i++) |
313 | ata_xfer_destroy(&chq->queue_xfers[i]); | | 313 | ata_xfer_destroy(&chq->queue_xfers[i]); |
314 | | | 314 | |
315 | cv_destroy(&chq->queue_busy); | | 315 | cv_destroy(&chq->queue_busy); |
316 | cv_destroy(&chq->queue_drain); | | 316 | cv_destroy(&chq->queue_drain); |
317 | | | 317 | |
318 | free(chq, M_DEVBUF); | | 318 | free(chq, M_DEVBUF); |
319 | } | | 319 | } |
320 | | | 320 | |
321 | void | | 321 | void |
322 | ata_channel_init(struct ata_channel *chp) | | 322 | ata_channel_init(struct ata_channel *chp) |
323 | { | | 323 | { |
324 | mutex_init(&chp->ch_lock, MUTEX_DEFAULT, IPL_BIO); | | 324 | mutex_init(&chp->ch_lock, MUTEX_DEFAULT, IPL_BIO); |
325 | cv_init(&chp->ch_thr_idle, "atath"); | | 325 | cv_init(&chp->ch_thr_idle, "atath"); |
326 | } | | 326 | } |
327 | | | 327 | |
328 | /* | | 328 | /* |
329 | * ata_channel_attach: | | 329 | * ata_channel_attach: |
330 | * | | 330 | * |
331 | * Common parts of attaching an atabus to an ATA controller channel. | | 331 | * Common parts of attaching an atabus to an ATA controller channel. |
332 | */ | | 332 | */ |
333 | void | | 333 | void |
334 | ata_channel_attach(struct ata_channel *chp) | | 334 | ata_channel_attach(struct ata_channel *chp) |
335 | { | | 335 | { |
336 | if (chp->ch_flags & ATACH_DISABLED) | | 336 | if (chp->ch_flags & ATACH_DISABLED) |
337 | return; | | 337 | return; |
338 | | | 338 | |
339 | KASSERT(chp->ch_queue != NULL); | | 339 | KASSERT(chp->ch_queue != NULL); |
340 | | | 340 | |
341 | ata_channel_init(chp); | | 341 | ata_channel_init(chp); |
342 | | | 342 | |
343 | chp->atabus = config_found_ia(chp->ch_atac->atac_dev, "ata", chp, | | 343 | chp->atabus = config_found_ia(chp->ch_atac->atac_dev, "ata", chp, |
344 | atabusprint); | | 344 | atabusprint); |
345 | } | | 345 | } |
346 | | | 346 | |
347 | void | | 347 | void |
348 | ata_channel_destroy(struct ata_channel *chp) | | 348 | ata_channel_destroy(struct ata_channel *chp) |
349 | { | | 349 | { |
350 | mutex_destroy(&chp->ch_lock); | | 350 | mutex_destroy(&chp->ch_lock); |
351 | cv_destroy(&chp->ch_thr_idle); | | 351 | cv_destroy(&chp->ch_thr_idle); |
352 | } | | 352 | } |
353 | | | 353 | |
354 | /* | | 354 | /* |
355 | * ata_channel_detach: | | 355 | * ata_channel_detach: |
356 | * | | 356 | * |
357 | * Common parts of detaching an atabus to an ATA controller channel. | | 357 | * Common parts of detaching an atabus to an ATA controller channel. |
358 | */ | | 358 | */ |
359 | void | | 359 | void |
360 | ata_channel_detach(struct ata_channel *chp) | | 360 | ata_channel_detach(struct ata_channel *chp) |
361 | { | | 361 | { |
362 | if (chp->ch_flags & ATACH_DISABLED) | | 362 | if (chp->ch_flags & ATACH_DISABLED) |
363 | return; | | 363 | return; |
364 | | | 364 | |
365 | ata_channel_destroy(chp); | | 365 | ata_channel_destroy(chp); |
366 | } | | 366 | } |
367 | | | 367 | |
368 | static void | | 368 | static void |
369 | atabusconfig(struct atabus_softc *atabus_sc) | | 369 | atabusconfig(struct atabus_softc *atabus_sc) |
370 | { | | 370 | { |
371 | struct ata_channel *chp = atabus_sc->sc_chan; | | 371 | struct ata_channel *chp = atabus_sc->sc_chan; |
372 | struct atac_softc *atac = chp->ch_atac; | | 372 | struct atac_softc *atac = chp->ch_atac; |
373 | struct atabus_initq *atabus_initq = NULL; | | 373 | struct atabus_initq *atabus_initq = NULL; |
374 | int i, error; | | 374 | int i, error; |
375 | | | 375 | |
376 | /* we are in the atabus's thread context */ | | 376 | /* we are in the atabus's thread context */ |
377 | mutex_enter(&chp->ch_lock); | | 377 | mutex_enter(&chp->ch_lock); |
378 | chp->ch_flags |= ATACH_TH_RUN; | | 378 | chp->ch_flags |= ATACH_TH_RUN; |
379 | mutex_exit(&chp->ch_lock); | | 379 | mutex_exit(&chp->ch_lock); |
380 | | | 380 | |
381 | /* | | 381 | /* |
382 | * Probe for the drives attached to controller, unless a PMP | | 382 | * Probe for the drives attached to controller, unless a PMP |
383 | * is already known | | 383 | * is already known |
384 | */ | | 384 | */ |
385 | /* XXX for SATA devices we will power up all drives at once */ | | 385 | /* XXX for SATA devices we will power up all drives at once */ |
386 | if (chp->ch_satapmp_nports == 0) | | 386 | if (chp->ch_satapmp_nports == 0) |
387 | (*atac->atac_probe)(chp); | | 387 | (*atac->atac_probe)(chp); |
388 | | | 388 | |
389 | if (chp->ch_ndrives >= 2) { | | 389 | if (chp->ch_ndrives >= 2) { |
390 | ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n", | | 390 | ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n", |
391 | chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type), | | 391 | chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type), |
392 | DEBUG_PROBE); | | 392 | DEBUG_PROBE); |
393 | } | | 393 | } |
394 | | | 394 | |
395 | /* next operations will occurs in a separate thread */ | | 395 | /* next operations will occurs in a separate thread */ |
396 | mutex_enter(&chp->ch_lock); | | 396 | mutex_enter(&chp->ch_lock); |
397 | chp->ch_flags &= ~ATACH_TH_RUN; | | 397 | chp->ch_flags &= ~ATACH_TH_RUN; |
398 | mutex_exit(&chp->ch_lock); | | 398 | mutex_exit(&chp->ch_lock); |
399 | | | 399 | |
400 | /* Make sure the devices probe in atabus order to avoid jitter. */ | | 400 | /* Make sure the devices probe in atabus order to avoid jitter. */ |
401 | mutex_enter(&atabus_qlock); | | 401 | mutex_enter(&atabus_qlock); |
402 | for (;;) { | | 402 | for (;;) { |
403 | atabus_initq = TAILQ_FIRST(&atabus_initq_head); | | 403 | atabus_initq = TAILQ_FIRST(&atabus_initq_head); |
404 | if (atabus_initq->atabus_sc == atabus_sc) | | 404 | if (atabus_initq->atabus_sc == atabus_sc) |
405 | break; | | 405 | break; |
406 | cv_wait(&atabus_qcv, &atabus_qlock); | | 406 | cv_wait(&atabus_qcv, &atabus_qlock); |
407 | } | | 407 | } |
408 | mutex_exit(&atabus_qlock); | | 408 | mutex_exit(&atabus_qlock); |
409 | | | 409 | |
410 | mutex_enter(&chp->ch_lock); | | 410 | mutex_enter(&chp->ch_lock); |
411 | | | 411 | |
412 | /* If no drives, abort here */ | | 412 | /* If no drives, abort here */ |
413 | if (chp->ch_drive == NULL) | | 413 | if (chp->ch_drive == NULL) |
414 | goto out; | | 414 | goto out; |
415 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); | | 415 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); |
416 | for (i = 0; i < chp->ch_ndrives; i++) | | 416 | for (i = 0; i < chp->ch_ndrives; i++) |
417 | if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) | | 417 | if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) |
418 | break; | | 418 | break; |
419 | if (i == chp->ch_ndrives) | | 419 | if (i == chp->ch_ndrives) |
420 | goto out; | | 420 | goto out; |
421 | | | 421 | |
422 | /* Shortcut in case we've been shutdown */ | | 422 | /* Shortcut in case we've been shutdown */ |
423 | if (chp->ch_flags & ATACH_SHUTDOWN) | | 423 | if (chp->ch_flags & ATACH_SHUTDOWN) |
424 | goto out; | | 424 | goto out; |
425 | | | 425 | |
426 | mutex_exit(&chp->ch_lock); | | 426 | mutex_exit(&chp->ch_lock); |
427 | | | 427 | |
428 | if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread, | | 428 | if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread, |
429 | atabus_sc, &atabus_cfg_lwp, | | 429 | atabus_sc, &atabus_cfg_lwp, |
430 | "%scnf", device_xname(atac->atac_dev))) != 0) | | 430 | "%scnf", device_xname(atac->atac_dev))) != 0) |
431 | aprint_error_dev(atac->atac_dev, | | 431 | aprint_error_dev(atac->atac_dev, |
432 | "unable to create config thread: error %d\n", error); | | 432 | "unable to create config thread: error %d\n", error); |
433 | return; | | 433 | return; |
434 | | | 434 | |
435 | out: | | 435 | out: |
436 | mutex_exit(&chp->ch_lock); | | 436 | mutex_exit(&chp->ch_lock); |
437 | | | 437 | |
438 | mutex_enter(&atabus_qlock); | | 438 | mutex_enter(&atabus_qlock); |
439 | TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); | | 439 | TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); |
440 | cv_broadcast(&atabus_qcv); | | 440 | cv_broadcast(&atabus_qcv); |
441 | mutex_exit(&atabus_qlock); | | 441 | mutex_exit(&atabus_qlock); |
442 | | | 442 | |
443 | free(atabus_initq, M_DEVBUF); | | 443 | free(atabus_initq, M_DEVBUF); |
444 | | | 444 | |
445 | ata_delref(chp); | | 445 | ata_delref(chp); |
446 | | | 446 | |
447 | config_pending_decr(atac->atac_dev); | | 447 | config_pending_decr(atac->atac_dev); |
448 | } | | 448 | } |
449 | | | 449 | |
450 | /* | | 450 | /* |
451 | * atabus_configthread: finish attach of atabus's childrens, in a separate | | 451 | * atabus_configthread: finish attach of atabus's childrens, in a separate |
452 | * kernel thread. | | 452 | * kernel thread. |
453 | */ | | 453 | */ |
454 | static void | | 454 | static void |
455 | atabusconfig_thread(void *arg) | | 455 | atabusconfig_thread(void *arg) |
456 | { | | 456 | { |
457 | struct atabus_softc *atabus_sc = arg; | | 457 | struct atabus_softc *atabus_sc = arg; |
458 | struct ata_channel *chp = atabus_sc->sc_chan; | | 458 | struct ata_channel *chp = atabus_sc->sc_chan; |
459 | struct atac_softc *atac = chp->ch_atac; | | 459 | struct atac_softc *atac = chp->ch_atac; |
460 | struct atabus_initq *atabus_initq = NULL; | | 460 | struct atabus_initq *atabus_initq = NULL; |
461 | int i, s; | | 461 | int i, s; |
462 | | | 462 | |
463 | /* XXX seems wrong */ | | 463 | /* XXX seems wrong */ |
464 | mutex_enter(&atabus_qlock); | | 464 | mutex_enter(&atabus_qlock); |
465 | atabus_initq = TAILQ_FIRST(&atabus_initq_head); | | 465 | atabus_initq = TAILQ_FIRST(&atabus_initq_head); |
466 | KASSERT(atabus_initq->atabus_sc == atabus_sc); | | 466 | KASSERT(atabus_initq->atabus_sc == atabus_sc); |
467 | mutex_exit(&atabus_qlock); | | 467 | mutex_exit(&atabus_qlock); |
468 | | | 468 | |
469 | /* | | 469 | /* |
470 | * First look for a port multiplier | | 470 | * First look for a port multiplier |
471 | */ | | 471 | */ |
472 | if (chp->ch_ndrives == PMP_MAX_DRIVES && | | 472 | if (chp->ch_ndrives == PMP_MAX_DRIVES && |
473 | chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) { | | 473 | chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) { |
474 | #if NSATA_PMP > 0 | | 474 | #if NSATA_PMP > 0 |
475 | satapmp_attach(chp); | | 475 | satapmp_attach(chp); |
476 | #else | | 476 | #else |
477 | aprint_error_dev(atabus_sc->sc_dev, | | 477 | aprint_error_dev(atabus_sc->sc_dev, |
478 | "SATA port multiplier not supported\n"); | | 478 | "SATA port multiplier not supported\n"); |
479 | /* no problems going on, all drives are ATA_DRIVET_NONE */ | | 479 | /* no problems going on, all drives are ATA_DRIVET_NONE */ |
480 | #endif | | 480 | #endif |
481 | } | | 481 | } |
482 | | | 482 | |
483 | /* | | 483 | /* |
484 | * Attach an ATAPI bus, if needed. | | 484 | * Attach an ATAPI bus, if needed. |
485 | */ | | 485 | */ |
486 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); | | 486 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); |
487 | for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) { | | 487 | for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) { |
488 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) { | | 488 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) { |
489 | #if NATAPIBUS > 0 | | 489 | #if NATAPIBUS > 0 |
490 | (*atac->atac_atapibus_attach)(atabus_sc); | | 490 | (*atac->atac_atapibus_attach)(atabus_sc); |
491 | #else | | 491 | #else |
492 | /* | | 492 | /* |
493 | * Fake the autoconfig "not configured" message | | 493 | * Fake the autoconfig "not configured" message |
494 | */ | | 494 | */ |
495 | aprint_normal("atapibus at %s not configured\n", | | 495 | aprint_normal("atapibus at %s not configured\n", |
496 | device_xname(atac->atac_dev)); | | 496 | device_xname(atac->atac_dev)); |
497 | chp->atapibus = NULL; | | 497 | chp->atapibus = NULL; |
498 | s = splbio(); | | 498 | s = splbio(); |
499 | for (i = 0; i < chp->ch_ndrives; i++) { | | 499 | for (i = 0; i < chp->ch_ndrives; i++) { |
500 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) | | 500 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) |
501 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; | | 501 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; |
502 | } | | 502 | } |
503 | splx(s); | | 503 | splx(s); |
504 | #endif | | 504 | #endif |
505 | break; | | 505 | break; |
506 | } | | 506 | } |
507 | } | | 507 | } |
508 | | | 508 | |
509 | for (i = 0; i < chp->ch_ndrives; i++) { | | 509 | for (i = 0; i < chp->ch_ndrives; i++) { |
510 | struct ata_device adev; | | 510 | struct ata_device adev; |
511 | if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA && | | 511 | if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA && |
512 | chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) { | | 512 | chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) { |
513 | continue; | | 513 | continue; |
514 | } | | 514 | } |
515 | if (chp->ch_drive[i].drv_softc != NULL) | | 515 | if (chp->ch_drive[i].drv_softc != NULL) |
516 | continue; | | 516 | continue; |
517 | memset(&adev, 0, sizeof(struct ata_device)); | | 517 | memset(&adev, 0, sizeof(struct ata_device)); |
518 | adev.adev_bustype = atac->atac_bustype_ata; | | 518 | adev.adev_bustype = atac->atac_bustype_ata; |
519 | adev.adev_channel = chp->ch_channel; | | 519 | adev.adev_channel = chp->ch_channel; |
520 | adev.adev_drv_data = &chp->ch_drive[i]; | | 520 | adev.adev_drv_data = &chp->ch_drive[i]; |
521 | chp->ch_drive[i].drv_softc = config_found_ia(atabus_sc->sc_dev, | | 521 | chp->ch_drive[i].drv_softc = config_found_ia(atabus_sc->sc_dev, |
522 | "ata_hl", &adev, ataprint); | | 522 | "ata_hl", &adev, ataprint); |
523 | if (chp->ch_drive[i].drv_softc != NULL) { | | 523 | if (chp->ch_drive[i].drv_softc != NULL) { |
524 | ata_probe_caps(&chp->ch_drive[i]); | | 524 | ata_probe_caps(&chp->ch_drive[i]); |
525 | } else { | | 525 | } else { |
526 | s = splbio(); | | 526 | s = splbio(); |
527 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; | | 527 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; |
528 | splx(s); | | 528 | splx(s); |
529 | } | | 529 | } |
530 | } | | 530 | } |
531 | | | 531 | |
532 | /* now that we know the drives, the controller can set its modes */ | | 532 | /* now that we know the drives, the controller can set its modes */ |
533 | if (atac->atac_set_modes) { | | 533 | if (atac->atac_set_modes) { |
534 | (*atac->atac_set_modes)(chp); | | 534 | (*atac->atac_set_modes)(chp); |
535 | ata_print_modes(chp); | | 535 | ata_print_modes(chp); |
536 | } | | 536 | } |
537 | #if NATARAID > 0 | | 537 | #if NATARAID > 0 |
538 | if (atac->atac_cap & ATAC_CAP_RAID) { | | 538 | if (atac->atac_cap & ATAC_CAP_RAID) { |
539 | for (i = 0; i < chp->ch_ndrives; i++) { | | 539 | for (i = 0; i < chp->ch_ndrives; i++) { |
540 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) { | | 540 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) { |
541 | ata_raid_check_component( | | 541 | ata_raid_check_component( |
542 | chp->ch_drive[i].drv_softc); | | 542 | chp->ch_drive[i].drv_softc); |
543 | } | | 543 | } |
544 | } | | 544 | } |
545 | } | | 545 | } |
546 | #endif /* NATARAID > 0 */ | | 546 | #endif /* NATARAID > 0 */ |
547 | | | 547 | |
548 | /* | | 548 | /* |
549 | * reset drive_flags for unattached devices, reset state for attached | | 549 | * reset drive_flags for unattached devices, reset state for attached |
550 | * ones | | 550 | * ones |
551 | */ | | 551 | */ |
552 | s = splbio(); | | 552 | s = splbio(); |
553 | for (i = 0; i < chp->ch_ndrives; i++) { | | 553 | for (i = 0; i < chp->ch_ndrives; i++) { |
554 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) | | 554 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) |
555 | continue; | | 555 | continue; |
556 | if (chp->ch_drive[i].drv_softc == NULL) { | | 556 | if (chp->ch_drive[i].drv_softc == NULL) { |
557 | chp->ch_drive[i].drive_flags = 0; | | 557 | chp->ch_drive[i].drive_flags = 0; |
558 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; | | 558 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; |
559 | } else | | 559 | } else |
560 | chp->ch_drive[i].state = 0; | | 560 | chp->ch_drive[i].state = 0; |
561 | } | | 561 | } |
562 | splx(s); | | 562 | splx(s); |
563 | | | 563 | |
564 | mutex_enter(&atabus_qlock); | | 564 | mutex_enter(&atabus_qlock); |
565 | TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); | | 565 | TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq); |
566 | cv_broadcast(&atabus_qcv); | | 566 | cv_broadcast(&atabus_qcv); |
567 | mutex_exit(&atabus_qlock); | | 567 | mutex_exit(&atabus_qlock); |
568 | | | 568 | |
569 | free(atabus_initq, M_DEVBUF); | | 569 | free(atabus_initq, M_DEVBUF); |
570 | | | 570 | |
571 | ata_delref(chp); | | 571 | ata_delref(chp); |
572 | | | 572 | |
573 | config_pending_decr(atac->atac_dev); | | 573 | config_pending_decr(atac->atac_dev); |
574 | kthread_exit(0); | | 574 | kthread_exit(0); |
575 | } | | 575 | } |
576 | | | 576 | |
577 | /* | | 577 | /* |
578 | * atabus_thread: | | 578 | * atabus_thread: |
579 | * | | 579 | * |
580 | * Worker thread for the ATA bus. | | 580 | * Worker thread for the ATA bus. |
581 | */ | | 581 | */ |
582 | static void | | 582 | static void |
583 | atabus_thread(void *arg) | | 583 | atabus_thread(void *arg) |
584 | { | | 584 | { |
585 | struct atabus_softc *sc = arg; | | 585 | struct atabus_softc *sc = arg; |
586 | struct ata_channel *chp = sc->sc_chan; | | 586 | struct ata_channel *chp = sc->sc_chan; |
587 | struct ata_queue *chq = chp->ch_queue; | | 587 | struct ata_queue *chq = chp->ch_queue; |
588 | struct ata_xfer *xfer; | | 588 | struct ata_xfer *xfer; |
589 | int i; | | 589 | int i; |
590 | | | 590 | |
591 | mutex_enter(&chp->ch_lock); | | 591 | mutex_enter(&chp->ch_lock); |
592 | chp->ch_flags |= ATACH_TH_RUN; | | 592 | chp->ch_flags |= ATACH_TH_RUN; |
593 | | | 593 | |
594 | /* | | 594 | /* |
595 | * Probe the drives. Reset type to indicate to controllers | | 595 | * Probe the drives. Reset type to indicate to controllers |
596 | * that can re-probe that all drives must be probed.. | | 596 | * that can re-probe that all drives must be probed.. |
597 | * | | 597 | * |
598 | * Note: ch_ndrives may be changed during the probe. | | 598 | * Note: ch_ndrives may be changed during the probe. |
599 | */ | | 599 | */ |
600 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); | | 600 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); |
601 | for (i = 0; i < chp->ch_ndrives; i++) { | | 601 | for (i = 0; i < chp->ch_ndrives; i++) { |
602 | chp->ch_drive[i].drive_flags = 0; | | 602 | chp->ch_drive[i].drive_flags = 0; |
603 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; | | 603 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; |
604 | } | | 604 | } |
605 | mutex_exit(&chp->ch_lock); | | 605 | mutex_exit(&chp->ch_lock); |
606 | | | 606 | |
607 | atabusconfig(sc); | | 607 | atabusconfig(sc); |
608 | | | 608 | |
609 | mutex_enter(&chp->ch_lock); | | 609 | mutex_enter(&chp->ch_lock); |
610 | for (;;) { | | 610 | for (;;) { |
611 | if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_SHUTDOWN)) == 0 && | | 611 | if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_SHUTDOWN)) == 0 && |
612 | (chq->queue_active == 0 || chq->queue_freeze == 0)) { | | 612 | (chq->queue_active == 0 || chq->queue_freeze == 0)) { |
613 | chp->ch_flags &= ~ATACH_TH_RUN; | | 613 | chp->ch_flags &= ~ATACH_TH_RUN; |
614 | cv_wait(&chp->ch_thr_idle, &chp->ch_lock); | | 614 | cv_wait(&chp->ch_thr_idle, &chp->ch_lock); |
615 | chp->ch_flags |= ATACH_TH_RUN; | | 615 | chp->ch_flags |= ATACH_TH_RUN; |
616 | } | | 616 | } |
617 | if (chp->ch_flags & ATACH_SHUTDOWN) { | | 617 | if (chp->ch_flags & ATACH_SHUTDOWN) { |
618 | break; | | 618 | break; |
619 | } | | 619 | } |
620 | if (chp->ch_flags & ATACH_TH_RESCAN) { | | 620 | if (chp->ch_flags & ATACH_TH_RESCAN) { |
621 | chp->ch_flags &= ~ATACH_TH_RESCAN; | | 621 | chp->ch_flags &= ~ATACH_TH_RESCAN; |
622 | mutex_exit(&chp->ch_lock); | | 622 | mutex_exit(&chp->ch_lock); |
623 | atabusconfig(sc); | | 623 | atabusconfig(sc); |
624 | mutex_enter(&chp->ch_lock); | | 624 | mutex_enter(&chp->ch_lock); |
625 | } | | 625 | } |
626 | if (chp->ch_flags & ATACH_TH_RESET) { | | 626 | if (chp->ch_flags & ATACH_TH_RESET) { |
627 | /* | | 627 | /* |
628 | * ata_reset_channel() will freeze 2 times, so | | 628 | * ata_reset_channel() will freeze 2 times, so |
629 | * unfreeze one time. Not a problem as we're at splbio | | 629 | * unfreeze one time. Not a problem as we're at splbio |
630 | */ | | 630 | */ |
631 | mutex_exit(&chp->ch_lock); | | 631 | mutex_exit(&chp->ch_lock); |
632 | ata_channel_thaw(chp); | | 632 | ata_channel_thaw(chp); |
633 | ata_reset_channel(chp, AT_WAIT | chp->ch_reset_flags); | | 633 | ata_reset_channel(chp, AT_WAIT | chp->ch_reset_flags); |
634 | mutex_enter(&chp->ch_lock); | | 634 | mutex_enter(&chp->ch_lock); |
635 | } else if (chq->queue_active > 0 && chq->queue_freeze == 1) { | | 635 | } else if (chq->queue_active > 0 && chq->queue_freeze == 1) { |
636 | /* | | 636 | /* |
637 | * Caller has bumped queue_freeze, decrease it. This | | 637 | * Caller has bumped queue_freeze, decrease it. This |
638 | * flow shalt never be executed for NCQ commands. | | 638 | * flow shalt never be executed for NCQ commands. |
639 | */ | | 639 | */ |
640 | KASSERT((chp->ch_flags & ATACH_NCQ) == 0); | | 640 | KASSERT((chp->ch_flags & ATACH_NCQ) == 0); |
641 | KASSERT(chq->queue_active == 1); | | 641 | KASSERT(chq->queue_active == 1); |
642 | mutex_exit(&chp->ch_lock); | | 642 | mutex_exit(&chp->ch_lock); |
643 | | | 643 | |
644 | ata_channel_thaw(chp); | | 644 | ata_channel_thaw(chp); |
645 | xfer = ata_queue_get_active_xfer(chp); | | 645 | xfer = ata_queue_get_active_xfer(chp); |
646 | KASSERT(xfer != NULL); | | 646 | KASSERT(xfer != NULL); |
647 | (*xfer->c_start)(xfer->c_chp, xfer); | | 647 | (*xfer->c_start)(xfer->c_chp, xfer); |
648 | mutex_enter(&chp->ch_lock); | | 648 | mutex_enter(&chp->ch_lock); |
649 | } else if (chq->queue_freeze > 1) | | 649 | } else if (chq->queue_freeze > 1) |
650 | panic("%s: queue_freeze", __func__); | | 650 | panic("%s: queue_freeze", __func__); |
651 | } | | 651 | } |
652 | chp->ch_thread = NULL; | | 652 | chp->ch_thread = NULL; |
653 | cv_signal(&chp->ch_thr_idle); | | 653 | cv_signal(&chp->ch_thr_idle); |
654 | mutex_exit(&chp->ch_lock); | | 654 | mutex_exit(&chp->ch_lock); |
655 | kthread_exit(0); | | 655 | kthread_exit(0); |
656 | } | | 656 | } |
657 | | | 657 | |
658 | void | | 658 | void |
659 | ata_thread_wake(struct ata_channel *chp) | | 659 | ata_thread_wake(struct ata_channel *chp) |
660 | { | | 660 | { |
661 | mutex_enter(&chp->ch_lock); | | 661 | mutex_enter(&chp->ch_lock); |
662 | ata_channel_freeze_locked(chp); | | 662 | ata_channel_freeze_locked(chp); |
663 | cv_signal(&chp->ch_thr_idle); | | 663 | cv_signal(&chp->ch_thr_idle); |
664 | mutex_exit(&chp->ch_lock); | | 664 | mutex_exit(&chp->ch_lock); |
665 | } | | 665 | } |
666 | | | 666 | |
667 | /* | | 667 | /* |
668 | * atabus_match: | | 668 | * atabus_match: |
669 | * | | 669 | * |
670 | * Autoconfiguration match routine. | | 670 | * Autoconfiguration match routine. |
671 | */ | | 671 | */ |
672 | static int | | 672 | static int |
673 | atabus_match(device_t parent, cfdata_t cf, void *aux) | | 673 | atabus_match(device_t parent, cfdata_t cf, void *aux) |
674 | { | | 674 | { |
675 | struct ata_channel *chp = aux; | | 675 | struct ata_channel *chp = aux; |
676 | | | 676 | |
677 | if (chp == NULL) | | 677 | if (chp == NULL) |
678 | return (0); | | 678 | return (0); |
679 | | | 679 | |
680 | if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel && | | 680 | if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel && |
681 | cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT) | | 681 | cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT) |
682 | return (0); | | 682 | return (0); |
683 | | | 683 | |
684 | return (1); | | 684 | return (1); |
685 | } | | 685 | } |
686 | | | 686 | |
687 | /* | | 687 | /* |
688 | * atabus_attach: | | 688 | * atabus_attach: |
689 | * | | 689 | * |
690 | * Autoconfiguration attach routine. | | 690 | * Autoconfiguration attach routine. |
691 | */ | | 691 | */ |
692 | static void | | 692 | static void |
693 | atabus_attach(device_t parent, device_t self, void *aux) | | 693 | atabus_attach(device_t parent, device_t self, void *aux) |
694 | { | | 694 | { |
695 | struct atabus_softc *sc = device_private(self); | | 695 | struct atabus_softc *sc = device_private(self); |
696 | struct ata_channel *chp = aux; | | 696 | struct ata_channel *chp = aux; |
697 | struct atabus_initq *initq; | | 697 | struct atabus_initq *initq; |
698 | int error; | | 698 | int error; |
699 | | | 699 | |
700 | sc->sc_chan = chp; | | 700 | sc->sc_chan = chp; |
701 | | | 701 | |
702 | aprint_normal("\n"); | | 702 | aprint_normal("\n"); |
703 | aprint_naive("\n"); | | 703 | aprint_naive("\n"); |
704 | | | 704 | |
705 | sc->sc_dev = self; | | 705 | sc->sc_dev = self; |
706 | | | 706 | |
707 | if (ata_addref(chp)) | | 707 | if (ata_addref(chp)) |
708 | return; | | 708 | return; |
709 | | | 709 | |
710 | RUN_ONCE(&ata_init_ctrl, atabus_init); | | 710 | RUN_ONCE(&ata_init_ctrl, atabus_init); |
711 | | | 711 | |
712 | initq = malloc(sizeof(*initq), M_DEVBUF, M_WAITOK); | | 712 | initq = malloc(sizeof(*initq), M_DEVBUF, M_WAITOK); |
713 | initq->atabus_sc = sc; | | 713 | initq->atabus_sc = sc; |
714 | mutex_enter(&atabus_qlock); | | 714 | mutex_enter(&atabus_qlock); |
715 | TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq); | | 715 | TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq); |
716 | mutex_exit(&atabus_qlock); | | 716 | mutex_exit(&atabus_qlock); |
717 | config_pending_incr(sc->sc_dev); | | 717 | config_pending_incr(sc->sc_dev); |
718 | | | 718 | |
719 | if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc, | | 719 | if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc, |
720 | &chp->ch_thread, "%s", device_xname(self))) != 0) | | 720 | &chp->ch_thread, "%s", device_xname(self))) != 0) |
721 | aprint_error_dev(self, | | 721 | aprint_error_dev(self, |
722 | "unable to create kernel thread: error %d\n", error); | | 722 | "unable to create kernel thread: error %d\n", error); |
723 | | | 723 | |
724 | if (!pmf_device_register(self, atabus_suspend, atabus_resume)) | | 724 | if (!pmf_device_register(self, atabus_suspend, atabus_resume)) |
725 | aprint_error_dev(self, "couldn't establish power handler\n"); | | 725 | aprint_error_dev(self, "couldn't establish power handler\n"); |
726 | } | | 726 | } |
727 | | | 727 | |
728 | /* | | 728 | /* |
729 | * atabus_detach: | | 729 | * atabus_detach: |
730 | * | | 730 | * |
731 | * Autoconfiguration detach routine. | | 731 | * Autoconfiguration detach routine. |
732 | */ | | 732 | */ |
733 | static int | | 733 | static int |
734 | atabus_detach(device_t self, int flags) | | 734 | atabus_detach(device_t self, int flags) |
735 | { | | 735 | { |
736 | struct atabus_softc *sc = device_private(self); | | 736 | struct atabus_softc *sc = device_private(self); |
737 | struct ata_channel *chp = sc->sc_chan; | | 737 | struct ata_channel *chp = sc->sc_chan; |
738 | device_t dev = NULL; | | 738 | device_t dev = NULL; |
739 | int i, error = 0; | | 739 | int i, error = 0; |
740 | | | 740 | |
741 | /* Shutdown the channel. */ | | 741 | /* Shutdown the channel. */ |
742 | mutex_enter(&chp->ch_lock); | | 742 | mutex_enter(&chp->ch_lock); |
743 | chp->ch_flags |= ATACH_SHUTDOWN; | | 743 | chp->ch_flags |= ATACH_SHUTDOWN; |
744 | while (chp->ch_thread != NULL) { | | 744 | while (chp->ch_thread != NULL) { |
745 | cv_signal(&chp->ch_thr_idle); | | 745 | cv_signal(&chp->ch_thr_idle); |
746 | cv_wait(&chp->ch_thr_idle, &chp->ch_lock); | | 746 | cv_wait(&chp->ch_thr_idle, &chp->ch_lock); |
747 | } | | 747 | } |
748 | mutex_exit(&chp->ch_lock); | | 748 | mutex_exit(&chp->ch_lock); |
749 | | | 749 | |
750 | /* | | 750 | /* |
751 | * Detach atapibus and its children. | | 751 | * Detach atapibus and its children. |
752 | */ | | 752 | */ |
753 | if ((dev = chp->atapibus) != NULL) { | | 753 | if ((dev = chp->atapibus) != NULL) { |
754 | ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n", | | 754 | ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n", |
755 | device_xname(self), device_xname(dev)), DEBUG_DETACH); | | 755 | device_xname(self), device_xname(dev)), DEBUG_DETACH); |
756 | | | 756 | |
757 | error = config_detach(dev, flags); | | 757 | error = config_detach(dev, flags); |
758 | if (error) | | 758 | if (error) |
759 | goto out; | | 759 | goto out; |
760 | KASSERT(chp->atapibus == NULL); | | 760 | KASSERT(chp->atapibus == NULL); |
761 | } | | 761 | } |
762 | | | 762 | |
763 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); | | 763 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); |
764 | | | 764 | |
765 | /* | | 765 | /* |
766 | * Detach our other children. | | 766 | * Detach our other children. |
767 | */ | | 767 | */ |
768 | for (i = 0; i < chp->ch_ndrives; i++) { | | 768 | for (i = 0; i < chp->ch_ndrives; i++) { |
769 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) | | 769 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) |
770 | continue; | | 770 | continue; |
771 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) | | 771 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) |
772 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; | | 772 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; |
773 | if ((dev = chp->ch_drive[i].drv_softc) != NULL) { | | 773 | if ((dev = chp->ch_drive[i].drv_softc) != NULL) { |
774 | ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__, | | 774 | ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__, |
775 | __LINE__, device_xname(self), device_xname(dev)), | | 775 | __LINE__, device_xname(self), device_xname(dev)), |
776 | DEBUG_DETACH); | | 776 | DEBUG_DETACH); |
777 | error = config_detach(dev, flags); | | 777 | error = config_detach(dev, flags); |
778 | if (error) | | 778 | if (error) |
779 | goto out; | | 779 | goto out; |
780 | KASSERT(chp->ch_drive[i].drv_softc == NULL); | | 780 | KASSERT(chp->ch_drive[i].drv_softc == NULL); |
781 | KASSERT(chp->ch_drive[i].drive_type == 0); | | 781 | KASSERT(chp->ch_drive[i].drive_type == 0); |
782 | } | | 782 | } |
783 | } | | 783 | } |
784 | atabus_free_drives(chp); | | 784 | atabus_free_drives(chp); |
785 | | | 785 | |
786 | out: | | 786 | out: |
787 | #ifdef ATADEBUG | | 787 | #ifdef ATADEBUG |
788 | if (dev != NULL && error != 0) | | 788 | if (dev != NULL && error != 0) |
789 | ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__, | | 789 | ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__, |
790 | device_xname(self), error, device_xname(dev)), | | 790 | device_xname(self), error, device_xname(dev)), |
791 | DEBUG_DETACH); | | 791 | DEBUG_DETACH); |
792 | #endif /* ATADEBUG */ | | 792 | #endif /* ATADEBUG */ |
793 | | | 793 | |
794 | return (error); | | 794 | return (error); |
795 | } | | 795 | } |
796 | | | 796 | |
797 | void | | 797 | void |
798 | atabus_childdetached(device_t self, device_t child) | | 798 | atabus_childdetached(device_t self, device_t child) |
799 | { | | 799 | { |
800 | bool found = false; | | 800 | bool found = false; |
801 | struct atabus_softc *sc = device_private(self); | | 801 | struct atabus_softc *sc = device_private(self); |
802 | struct ata_channel *chp = sc->sc_chan; | | 802 | struct ata_channel *chp = sc->sc_chan; |
803 | int i; | | 803 | int i; |
804 | | | 804 | |
805 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); | | 805 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); |
806 | /* | | 806 | /* |
807 | * atapibus detached. | | 807 | * atapibus detached. |
808 | */ | | 808 | */ |
809 | if (child == chp->atapibus) { | | 809 | if (child == chp->atapibus) { |
810 | chp->atapibus = NULL; | | 810 | chp->atapibus = NULL; |
811 | found = true; | | 811 | found = true; |
812 | for (i = 0; i < chp->ch_ndrives; i++) { | | 812 | for (i = 0; i < chp->ch_ndrives; i++) { |
813 | if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI) | | 813 | if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI) |
814 | continue; | | 814 | continue; |
815 | KASSERT(chp->ch_drive[i].drv_softc != NULL); | | 815 | KASSERT(chp->ch_drive[i].drv_softc != NULL); |
816 | chp->ch_drive[i].drv_softc = NULL; | | 816 | chp->ch_drive[i].drv_softc = NULL; |
817 | chp->ch_drive[i].drive_flags = 0; | | 817 | chp->ch_drive[i].drive_flags = 0; |
818 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; | | 818 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; |
819 | } | | 819 | } |
820 | } | | 820 | } |
821 | | | 821 | |
822 | /* | | 822 | /* |
823 | * Detach our other children. | | 823 | * Detach our other children. |
824 | */ | | 824 | */ |
825 | for (i = 0; i < chp->ch_ndrives; i++) { | | 825 | for (i = 0; i < chp->ch_ndrives; i++) { |
826 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) | | 826 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) |
827 | continue; | | 827 | continue; |
828 | if (child == chp->ch_drive[i].drv_softc) { | | 828 | if (child == chp->ch_drive[i].drv_softc) { |
829 | chp->ch_drive[i].drv_softc = NULL; | | 829 | chp->ch_drive[i].drv_softc = NULL; |
830 | chp->ch_drive[i].drive_flags = 0; | | 830 | chp->ch_drive[i].drive_flags = 0; |
831 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) | | 831 | if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM) |
832 | chp->ch_satapmp_nports = 0; | | 832 | chp->ch_satapmp_nports = 0; |
833 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; | | 833 | chp->ch_drive[i].drive_type = ATA_DRIVET_NONE; |
834 | found = true; | | 834 | found = true; |
835 | } | | 835 | } |
836 | } | | 836 | } |
837 | | | 837 | |
838 | if (!found) | | 838 | if (!found) |
839 | panic("%s: unknown child %p", device_xname(self), | | 839 | panic("%s: unknown child %p", device_xname(self), |
840 | (const void *)child); | | 840 | (const void *)child); |
841 | } | | 841 | } |
842 | | | 842 | |
843 | CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc), | | 843 | CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc), |
844 | atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan, | | 844 | atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan, |
845 | atabus_childdetached, DVF_DETACH_SHUTDOWN); | | 845 | atabus_childdetached, DVF_DETACH_SHUTDOWN); |
846 | | | 846 | |
847 | /***************************************************************************** | | 847 | /***************************************************************************** |
848 | * Common ATA bus operations. | | 848 | * Common ATA bus operations. |
849 | *****************************************************************************/ | | 849 | *****************************************************************************/ |
850 | | | 850 | |
851 | /* allocate/free the channel's ch_drive[] array */ | | 851 | /* allocate/free the channel's ch_drive[] array */ |
852 | int | | 852 | int |
853 | atabus_alloc_drives(struct ata_channel *chp, int ndrives) | | 853 | atabus_alloc_drives(struct ata_channel *chp, int ndrives) |
854 | { | | 854 | { |
855 | int i; | | 855 | int i; |
856 | if (chp->ch_ndrives != ndrives) | | 856 | if (chp->ch_ndrives != ndrives) |
857 | atabus_free_drives(chp); | | 857 | atabus_free_drives(chp); |
858 | if (chp->ch_drive == NULL) { | | 858 | if (chp->ch_drive == NULL) { |
859 | chp->ch_drive = malloc( | | 859 | chp->ch_drive = malloc( |
860 | sizeof(struct ata_drive_datas) * ndrives, | | 860 | sizeof(struct ata_drive_datas) * ndrives, |
861 | M_DEVBUF, M_NOWAIT | M_ZERO); | | 861 | M_DEVBUF, M_NOWAIT | M_ZERO); |
862 | } | | 862 | } |
863 | if (chp->ch_drive == NULL) { | | 863 | if (chp->ch_drive == NULL) { |
864 | aprint_error_dev(chp->ch_atac->atac_dev, | | 864 | aprint_error_dev(chp->ch_atac->atac_dev, |
865 | "can't alloc drive array\n"); | | 865 | "can't alloc drive array\n"); |
866 | chp->ch_ndrives = 0; | | 866 | chp->ch_ndrives = 0; |
867 | return ENOMEM; | | 867 | return ENOMEM; |
868 | }; | | 868 | }; |
869 | for (i = 0; i < ndrives; i++) { | | 869 | for (i = 0; i < ndrives; i++) { |
870 | chp->ch_drive[i].chnl_softc = chp; | | 870 | chp->ch_drive[i].chnl_softc = chp; |
871 | chp->ch_drive[i].drive = i; | | 871 | chp->ch_drive[i].drive = i; |
872 | } | | 872 | } |
873 | chp->ch_ndrives = ndrives; | | 873 | chp->ch_ndrives = ndrives; |
874 | return 0; | | 874 | return 0; |
875 | } | | 875 | } |
876 | | | 876 | |
877 | void | | 877 | void |
878 | atabus_free_drives(struct ata_channel *chp) | | 878 | atabus_free_drives(struct ata_channel *chp) |
879 | { | | 879 | { |
880 | #ifdef DIAGNOSTIC | | 880 | #ifdef DIAGNOSTIC |
881 | int i; | | 881 | int i; |
882 | int dopanic = 0; | | 882 | int dopanic = 0; |
883 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); | | 883 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); |
884 | for (i = 0; i < chp->ch_ndrives; i++) { | | 884 | for (i = 0; i < chp->ch_ndrives; i++) { |
885 | if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) { | | 885 | if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) { |
886 | printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n", | | 886 | printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n", |
887 | device_xname(chp->atabus), i, | | 887 | device_xname(chp->atabus), i, |
888 | chp->ch_drive[i].drive_type); | | 888 | chp->ch_drive[i].drive_type); |
889 | dopanic = 1; | | 889 | dopanic = 1; |
890 | } | | 890 | } |
891 | if (chp->ch_drive[i].drv_softc != NULL) { | | 891 | if (chp->ch_drive[i].drv_softc != NULL) { |
892 | printf("%s: ch_drive[%d] attached to %s\n", | | 892 | printf("%s: ch_drive[%d] attached to %s\n", |
893 | device_xname(chp->atabus), i, | | 893 | device_xname(chp->atabus), i, |
894 | device_xname(chp->ch_drive[i].drv_softc)); | | 894 | device_xname(chp->ch_drive[i].drv_softc)); |
895 | dopanic = 1; | | 895 | dopanic = 1; |
896 | } | | 896 | } |
897 | } | | 897 | } |
898 | if (dopanic) | | 898 | if (dopanic) |
899 | panic("atabus_free_drives"); | | 899 | panic("atabus_free_drives"); |
900 | #endif | | 900 | #endif |
901 | | | 901 | |
902 | if (chp->ch_drive == NULL) | | 902 | if (chp->ch_drive == NULL) |
903 | return; | | 903 | return; |
904 | chp->ch_ndrives = 0; | | 904 | chp->ch_ndrives = 0; |
905 | free(chp->ch_drive, M_DEVBUF); | | 905 | free(chp->ch_drive, M_DEVBUF); |
906 | chp->ch_drive = NULL; | | 906 | chp->ch_drive = NULL; |
907 | } | | 907 | } |
908 | | | 908 | |
909 | /* Get the disk's parameters */ | | 909 | /* Get the disk's parameters */ |
910 | int | | 910 | int |
911 | ata_get_params(struct ata_drive_datas *drvp, uint8_t flags, | | 911 | ata_get_params(struct ata_drive_datas *drvp, uint8_t flags, |
912 | struct ataparams *prms) | | 912 | struct ataparams *prms) |
913 | { | | 913 | { |
914 | struct ata_xfer *xfer; | | 914 | struct ata_xfer *xfer; |
915 | struct ata_channel *chp = drvp->chnl_softc; | | 915 | struct ata_channel *chp = drvp->chnl_softc; |
916 | struct atac_softc *atac = chp->ch_atac; | | 916 | struct atac_softc *atac = chp->ch_atac; |
917 | char *tb; | | 917 | char *tb; |
918 | int i, rv; | | 918 | int i, rv; |
919 | uint16_t *p; | | 919 | uint16_t *p; |
920 | | | 920 | |
921 | ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS); | | 921 | ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS); |
922 | | | 922 | |
923 | xfer = ata_get_xfer(chp); | | 923 | xfer = ata_get_xfer(chp); |
924 | if (xfer == NULL) { | | 924 | if (xfer == NULL) { |
925 | ATADEBUG_PRINT(("%s: no xfer\n", __func__), | | 925 | ATADEBUG_PRINT(("%s: no xfer\n", __func__), |
926 | DEBUG_FUNCS|DEBUG_PROBE); | | 926 | DEBUG_FUNCS|DEBUG_PROBE); |
927 | return CMD_AGAIN; | | 927 | return CMD_AGAIN; |
928 | } | | 928 | } |
929 | | | 929 | |
930 | tb = kmem_zalloc(DEV_BSIZE, KM_SLEEP); | | 930 | tb = kmem_zalloc(DEV_BSIZE, KM_SLEEP); |
931 | memset(prms, 0, sizeof(struct ataparams)); | | 931 | memset(prms, 0, sizeof(struct ataparams)); |
932 | | | 932 | |
933 | if (drvp->drive_type == ATA_DRIVET_ATA) { | | 933 | if (drvp->drive_type == ATA_DRIVET_ATA) { |
934 | xfer->c_ata_c.r_command = WDCC_IDENTIFY; | | 934 | xfer->c_ata_c.r_command = WDCC_IDENTIFY; |
935 | xfer->c_ata_c.r_st_bmask = WDCS_DRDY; | | 935 | xfer->c_ata_c.r_st_bmask = WDCS_DRDY; |
936 | xfer->c_ata_c.r_st_pmask = WDCS_DRQ; | | 936 | xfer->c_ata_c.r_st_pmask = WDCS_DRQ; |
937 | xfer->c_ata_c.timeout = 3000; /* 3s */ | | 937 | xfer->c_ata_c.timeout = 3000; /* 3s */ |
938 | } else if (drvp->drive_type == ATA_DRIVET_ATAPI) { | | 938 | } else if (drvp->drive_type == ATA_DRIVET_ATAPI) { |
939 | xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE; | | 939 | xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE; |
940 | xfer->c_ata_c.r_st_bmask = 0; | | 940 | xfer->c_ata_c.r_st_bmask = 0; |
941 | xfer->c_ata_c.r_st_pmask = WDCS_DRQ; | | 941 | xfer->c_ata_c.r_st_pmask = WDCS_DRQ; |
942 | xfer->c_ata_c.timeout = 10000; /* 10s */ | | 942 | xfer->c_ata_c.timeout = 10000; /* 10s */ |
943 | } else { | | 943 | } else { |
944 | ATADEBUG_PRINT(("ata_get_parms: no disks\n"), | | 944 | ATADEBUG_PRINT(("ata_get_parms: no disks\n"), |
945 | DEBUG_FUNCS|DEBUG_PROBE); | | 945 | DEBUG_FUNCS|DEBUG_PROBE); |
946 | rv = CMD_ERR; | | 946 | rv = CMD_ERR; |
947 | goto out; | | 947 | goto out; |
948 | } | | 948 | } |
949 | xfer->c_ata_c.flags = AT_READ | flags; | | 949 | xfer->c_ata_c.flags = AT_READ | flags; |
950 | xfer->c_ata_c.data = tb; | | 950 | xfer->c_ata_c.data = tb; |
951 | xfer->c_ata_c.bcount = DEV_BSIZE; | | 951 | xfer->c_ata_c.bcount = DEV_BSIZE; |
952 | if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, | | 952 | if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, |
953 | xfer) != ATACMD_COMPLETE) { | | 953 | xfer) != ATACMD_COMPLETE) { |
954 | ATADEBUG_PRINT(("ata_get_parms: wdc_exec_command failed\n"), | | 954 | ATADEBUG_PRINT(("ata_get_parms: wdc_exec_command failed\n"), |
955 | DEBUG_FUNCS|DEBUG_PROBE); | | 955 | DEBUG_FUNCS|DEBUG_PROBE); |
956 | rv = CMD_AGAIN; | | 956 | rv = CMD_AGAIN; |
957 | goto out; | | 957 | goto out; |
958 | } | | 958 | } |
959 | if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { | | 959 | if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { |
960 | ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n", | | 960 | ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n", |
961 | xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE); | | 961 | xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE); |
962 | rv = CMD_ERR; | | 962 | rv = CMD_ERR; |
963 | goto out; | | 963 | goto out; |
964 | } | | 964 | } |
965 | /* if we didn't read any data something is wrong */ | | 965 | /* if we didn't read any data something is wrong */ |
966 | if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) { | | 966 | if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) { |
967 | rv = CMD_ERR; | | 967 | rv = CMD_ERR; |
968 | goto out; | | 968 | goto out; |
969 | } | | 969 | } |
970 | | | 970 | |
971 | /* Read in parameter block. */ | | 971 | /* Read in parameter block. */ |
972 | memcpy(prms, tb, sizeof(struct ataparams)); | | 972 | memcpy(prms, tb, sizeof(struct ataparams)); |
973 | | | 973 | |
974 | /* | | 974 | /* |
975 | * Shuffle string byte order. | | 975 | * Shuffle string byte order. |
976 | * ATAPI NEC, Mitsumi and Pioneer drives and | | 976 | * ATAPI NEC, Mitsumi and Pioneer drives and |
977 | * old ATA TDK CompactFlash cards | | 977 | * old ATA TDK CompactFlash cards |
978 | * have different byte order. | | 978 | * have different byte order. |
979 | */ | | 979 | */ |
980 | #if BYTE_ORDER == BIG_ENDIAN | | 980 | #if BYTE_ORDER == BIG_ENDIAN |
981 | # define M(n) prms->atap_model[(n) ^ 1] | | 981 | # define M(n) prms->atap_model[(n) ^ 1] |
982 | #else | | 982 | #else |
983 | # define M(n) prms->atap_model[n] | | 983 | # define M(n) prms->atap_model[n] |
984 | #endif | | 984 | #endif |
985 | if ( | | 985 | if ( |
986 | #if BYTE_ORDER == BIG_ENDIAN | | 986 | #if BYTE_ORDER == BIG_ENDIAN |
987 | ! | | 987 | ! |
988 | #endif | | 988 | #endif |
989 | ((drvp->drive_type == ATA_DRIVET_ATAPI) ? | | 989 | ((drvp->drive_type == ATA_DRIVET_ATAPI) ? |
990 | ((M(0) == 'N' && M(1) == 'E') || | | 990 | ((M(0) == 'N' && M(1) == 'E') || |
991 | (M(0) == 'F' && M(1) == 'X') || | | 991 | (M(0) == 'F' && M(1) == 'X') || |
992 | (M(0) == 'P' && M(1) == 'i')) : | | 992 | (M(0) == 'P' && M(1) == 'i')) : |
993 | ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) { | | 993 | ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) { |
994 | rv = CMD_OK; | | 994 | rv = CMD_OK; |
995 | goto out; | | 995 | goto out; |
996 | } | | 996 | } |
997 | #undef M | | 997 | #undef M |
998 | for (i = 0; i < sizeof(prms->atap_model); i += 2) { | | 998 | for (i = 0; i < sizeof(prms->atap_model); i += 2) { |
999 | p = (uint16_t *)(prms->atap_model + i); | | 999 | p = (uint16_t *)(prms->atap_model + i); |
1000 | *p = bswap16(*p); | | 1000 | *p = bswap16(*p); |
1001 | } | | 1001 | } |
1002 | for (i = 0; i < sizeof(prms->atap_serial); i += 2) { | | 1002 | for (i = 0; i < sizeof(prms->atap_serial); i += 2) { |
1003 | p = (uint16_t *)(prms->atap_serial + i); | | 1003 | p = (uint16_t *)(prms->atap_serial + i); |
1004 | *p = bswap16(*p); | | 1004 | *p = bswap16(*p); |
1005 | } | | 1005 | } |
1006 | for (i = 0; i < sizeof(prms->atap_revision); i += 2) { | | 1006 | for (i = 0; i < sizeof(prms->atap_revision); i += 2) { |
1007 | p = (uint16_t *)(prms->atap_revision + i); | | 1007 | p = (uint16_t *)(prms->atap_revision + i); |
1008 | *p = bswap16(*p); | | 1008 | *p = bswap16(*p); |
1009 | } | | 1009 | } |
1010 | | | 1010 | |
1011 | rv = CMD_OK; | | 1011 | rv = CMD_OK; |
1012 | out: | | 1012 | out: |
1013 | kmem_free(tb, DEV_BSIZE); | | 1013 | kmem_free(tb, DEV_BSIZE); |
1014 | ata_free_xfer(chp, xfer); | | 1014 | ata_free_xfer(chp, xfer); |
1015 | return rv; | | 1015 | return rv; |
1016 | } | | 1016 | } |
1017 | | | 1017 | |
1018 | int | | 1018 | int |
1019 | ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags) | | 1019 | ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags) |
1020 | { | | 1020 | { |
1021 | struct ata_xfer *xfer; | | 1021 | struct ata_xfer *xfer; |
1022 | int rv; | | 1022 | int rv; |
1023 | struct ata_channel *chp = drvp->chnl_softc; | | 1023 | struct ata_channel *chp = drvp->chnl_softc; |
1024 | struct atac_softc *atac = chp->ch_atac; | | 1024 | struct atac_softc *atac = chp->ch_atac; |
1025 | | | 1025 | |
1026 | ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS); | | 1026 | ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS); |
1027 | | | 1027 | |
1028 | xfer = ata_get_xfer(chp); | | 1028 | xfer = ata_get_xfer(chp); |
1029 | if (xfer == NULL) { | | 1029 | if (xfer == NULL) { |
1030 | ATADEBUG_PRINT(("%s: no xfer\n", __func__), | | 1030 | ATADEBUG_PRINT(("%s: no xfer\n", __func__), |
1031 | DEBUG_FUNCS|DEBUG_PROBE); | | 1031 | DEBUG_FUNCS|DEBUG_PROBE); |
1032 | return CMD_AGAIN; | | 1032 | return CMD_AGAIN; |
1033 | } | | 1033 | } |
1034 | | | 1034 | |
1035 | xfer->c_ata_c.r_command = SET_FEATURES; | | 1035 | xfer->c_ata_c.r_command = SET_FEATURES; |
1036 | xfer->c_ata_c.r_st_bmask = 0; | | 1036 | xfer->c_ata_c.r_st_bmask = 0; |
1037 | xfer->c_ata_c.r_st_pmask = 0; | | 1037 | xfer->c_ata_c.r_st_pmask = 0; |
1038 | xfer->c_ata_c.r_features = WDSF_SET_MODE; | | 1038 | xfer->c_ata_c.r_features = WDSF_SET_MODE; |
1039 | xfer->c_ata_c.r_count = mode; | | 1039 | xfer->c_ata_c.r_count = mode; |
1040 | xfer->c_ata_c.flags = flags; | | 1040 | xfer->c_ata_c.flags = flags; |
1041 | xfer->c_ata_c.timeout = 1000; /* 1s */ | | 1041 | xfer->c_ata_c.timeout = 1000; /* 1s */ |
1042 | if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, | | 1042 | if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, |
1043 | xfer) != ATACMD_COMPLETE) { | | 1043 | xfer) != ATACMD_COMPLETE) { |
1044 | rv = CMD_AGAIN; | | 1044 | rv = CMD_AGAIN; |
1045 | goto out; | | 1045 | goto out; |
1046 | } | | 1046 | } |
1047 | if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { | | 1047 | if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { |
1048 | rv = CMD_ERR; | | 1048 | rv = CMD_ERR; |
1049 | goto out; | | 1049 | goto out; |
1050 | } | | 1050 | } |
1051 | | | 1051 | |
1052 | rv = CMD_OK; | | 1052 | rv = CMD_OK; |
1053 | | | 1053 | |
1054 | out: | | 1054 | out: |
1055 | ata_free_xfer(chp, xfer); | | 1055 | ata_free_xfer(chp, xfer); |
1056 | return rv; | | 1056 | return rv; |
1057 | } | | 1057 | } |
1058 | | | 1058 | |
1059 | int | | 1059 | int |
1060 | ata_read_log_ext_ncq(struct ata_drive_datas *drvp, uint8_t flags, | | 1060 | ata_read_log_ext_ncq(struct ata_drive_datas *drvp, uint8_t flags, |
1061 | uint8_t *slot, uint8_t *status, uint8_t *err) | | 1061 | uint8_t *slot, uint8_t *status, uint8_t *err) |
1062 | { | | 1062 | { |
1063 | struct ata_xfer *xfer; | | 1063 | struct ata_xfer *xfer; |
1064 | int rv; | | 1064 | int rv; |
1065 | struct ata_channel *chp = drvp->chnl_softc; | | 1065 | struct ata_channel *chp = drvp->chnl_softc; |
1066 | struct atac_softc *atac = chp->ch_atac; | | 1066 | struct atac_softc *atac = chp->ch_atac; |
1067 | uint8_t *tb, cksum, page; | | 1067 | uint8_t *tb, cksum, page; |
1068 | | | 1068 | |
1069 | ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS); | | 1069 | ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS); |
1070 | | | 1070 | |
1071 | /* Only NCQ ATA drives support/need this */ | | 1071 | /* Only NCQ ATA drives support/need this */ |
1072 | if (drvp->drive_type != ATA_DRIVET_ATA || | | 1072 | if (drvp->drive_type != ATA_DRIVET_ATA || |
1073 | (drvp->drive_flags & ATA_DRIVE_NCQ) == 0) | | 1073 | (drvp->drive_flags & ATA_DRIVE_NCQ) == 0) |
1074 | return EOPNOTSUPP; | | 1074 | return EOPNOTSUPP; |
1075 | | | 1075 | |
1076 | xfer = ata_get_xfer_ext(chp, C_RECOVERY, 0); | | 1076 | xfer = ata_get_xfer_ext(chp, C_RECOVERY, 0); |
1077 | | | 1077 | |
1078 | tb = drvp->recovery_blk; | | 1078 | tb = drvp->recovery_blk; |
1079 | memset(tb, 0, DEV_BSIZE); | | 1079 | memset(tb, 0, DEV_BSIZE); |
1080 | | | 1080 | |
1081 | /* | | 1081 | /* |
1082 | * We could use READ LOG DMA EXT if drive supports it (i.e. | | 1082 | * We could use READ LOG DMA EXT if drive supports it (i.e. |
1083 | * when it supports Streaming feature) to avoid PIO command, | | 1083 | * when it supports Streaming feature) to avoid PIO command, |
1084 | * and to make this a little faster. Realistically, it | | 1084 | * and to make this a little faster. Realistically, it |
1085 | * should not matter. | | 1085 | * should not matter. |
1086 | */ | | 1086 | */ |
1087 | xfer->c_flags |= C_RECOVERY; | | 1087 | xfer->c_flags |= C_RECOVERY; |
1088 | xfer->c_ata_c.r_command = WDCC_READ_LOG_EXT; | | 1088 | xfer->c_ata_c.r_command = WDCC_READ_LOG_EXT; |
1089 | xfer->c_ata_c.r_lba = page = WDCC_LOG_PAGE_NCQ; | | 1089 | xfer->c_ata_c.r_lba = page = WDCC_LOG_PAGE_NCQ; |
1090 | xfer->c_ata_c.r_st_bmask = WDCS_DRDY; | | 1090 | xfer->c_ata_c.r_st_bmask = WDCS_DRDY; |
1091 | xfer->c_ata_c.r_st_pmask = WDCS_DRDY; | | 1091 | xfer->c_ata_c.r_st_pmask = WDCS_DRDY; |
1092 | xfer->c_ata_c.r_count = 1; | | 1092 | xfer->c_ata_c.r_count = 1; |
1093 | xfer->c_ata_c.r_device = WDSD_LBA; | | 1093 | xfer->c_ata_c.r_device = WDSD_LBA; |
1094 | xfer->c_ata_c.flags = AT_READ | AT_LBA | AT_LBA48 | flags; | | 1094 | xfer->c_ata_c.flags = AT_READ | AT_LBA | AT_LBA48 | flags; |
1095 | xfer->c_ata_c.timeout = 1000; /* 1s */ | | 1095 | xfer->c_ata_c.timeout = 1000; /* 1s */ |
1096 | xfer->c_ata_c.data = tb; | | 1096 | xfer->c_ata_c.data = tb; |
1097 | xfer->c_ata_c.bcount = DEV_BSIZE; | | 1097 | xfer->c_ata_c.bcount = DEV_BSIZE; |
1098 | | | 1098 | |
1099 | if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, | | 1099 | if ((*atac->atac_bustype_ata->ata_exec_command)(drvp, |
1100 | xfer) != ATACMD_COMPLETE) { | | 1100 | xfer) != ATACMD_COMPLETE) { |
1101 | rv = EAGAIN; | | 1101 | rv = EAGAIN; |
1102 | goto out; | | 1102 | goto out; |
1103 | } | | 1103 | } |
1104 | if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { | | 1104 | if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) { |
1105 | rv = EINVAL; | | 1105 | rv = EINVAL; |
1106 | goto out; | | 1106 | goto out; |
1107 | } | | 1107 | } |
1108 | | | 1108 | |
1109 | cksum = 0; | | 1109 | cksum = 0; |
1110 | for (int i = 0; i < DEV_BSIZE; i++) | | 1110 | for (int i = 0; i < DEV_BSIZE; i++) |
1111 | cksum += tb[i]; | | 1111 | cksum += tb[i]; |
1112 | if (cksum != 0) { | | 1112 | if (cksum != 0) { |
1113 | aprint_error_dev(drvp->drv_softc, | | 1113 | aprint_error_dev(drvp->drv_softc, |
1114 | "invalid checksum %x for READ LOG EXT page %x\n", | | 1114 | "invalid checksum %x for READ LOG EXT page %x\n", |
1115 | cksum, page); | | 1115 | cksum, page); |
1116 | rv = EINVAL; | | 1116 | rv = EINVAL; |
1117 | goto out; | | 1117 | goto out; |
1118 | } | | 1118 | } |
1119 | | | 1119 | |
1120 | if (tb[0] & WDCC_LOG_NQ) { | | 1120 | if (tb[0] & WDCC_LOG_NQ) { |
1121 | /* not queued command */ | | 1121 | /* not queued command */ |
1122 | rv = EOPNOTSUPP; | | 1122 | rv = EOPNOTSUPP; |
1123 | goto out; | | 1123 | goto out; |
1124 | } | | 1124 | } |
1125 | | | 1125 | |
1126 | *slot = tb[0] & 0x1f; | | 1126 | *slot = tb[0] & 0x1f; |
1127 | *status = tb[2]; | | 1127 | *status = tb[2]; |
1128 | *err = tb[3]; | | 1128 | *err = tb[3]; |
1129 | | | 1129 | |
1130 | KASSERTMSG((*status & WDCS_ERR), | | 1130 | KASSERTMSG((*status & WDCS_ERR), |
1131 | "%s: non-error command slot %d reported by READ LOG EXT page %x: " | | 1131 | "%s: non-error command slot %d reported by READ LOG EXT page %x: " |
1132 | "err %x status %x\n", | | 1132 | "err %x status %x\n", |
1133 | device_xname(drvp->drv_softc), *slot, page, *err, *status); | | 1133 | device_xname(drvp->drv_softc), *slot, page, *err, *status); |
1134 | | | 1134 | |
1135 | rv = 0; | | 1135 | rv = 0; |
1136 | | | 1136 | |
1137 | out: | | 1137 | out: |
1138 | ata_free_xfer(chp, xfer); | | 1138 | ata_free_xfer(chp, xfer); |
1139 | return rv; | | 1139 | return rv; |
1140 | } | | 1140 | } |
1141 | | | 1141 | |
1142 | #if NATA_DMA | | 1142 | #if NATA_DMA |
1143 | void | | 1143 | void |
1144 | ata_dmaerr(struct ata_drive_datas *drvp, int flags) | | 1144 | ata_dmaerr(struct ata_drive_datas *drvp, int flags) |
1145 | { | | 1145 | { |
1146 | /* | | 1146 | /* |
1147 | * Downgrade decision: if we get NERRS_MAX in NXFER. | | 1147 | * Downgrade decision: if we get NERRS_MAX in NXFER. |
1148 | * We start with n_dmaerrs set to NERRS_MAX-1 so that the | | 1148 | * We start with n_dmaerrs set to NERRS_MAX-1 so that the |
1149 | * first error within the first NXFER ops will immediatly trigger | | 1149 | * first error within the first NXFER ops will immediatly trigger |
1150 | * a downgrade. | | 1150 | * a downgrade. |
1151 | * If we got an error and n_xfers is bigger than NXFER reset counters. | | 1151 | * If we got an error and n_xfers is bigger than NXFER reset counters. |
1152 | */ | | 1152 | */ |
1153 | drvp->n_dmaerrs++; | | 1153 | drvp->n_dmaerrs++; |
1154 | if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) { | | 1154 | if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) { |
1155 | ata_downgrade_mode(drvp, flags); | | 1155 | ata_downgrade_mode(drvp, flags); |
1156 | drvp->n_dmaerrs = NERRS_MAX-1; | | 1156 | drvp->n_dmaerrs = NERRS_MAX-1; |
1157 | drvp->n_xfers = 0; | | 1157 | drvp->n_xfers = 0; |
1158 | return; | | 1158 | return; |
1159 | } | | 1159 | } |
1160 | if (drvp->n_xfers > NXFER) { | | 1160 | if (drvp->n_xfers > NXFER) { |
1161 | drvp->n_dmaerrs = 1; /* just got an error */ | | 1161 | drvp->n_dmaerrs = 1; /* just got an error */ |
1162 | drvp->n_xfers = 1; /* restart counting from this error */ | | 1162 | drvp->n_xfers = 1; /* restart counting from this error */ |
1163 | } | | 1163 | } |
1164 | } | | 1164 | } |
1165 | #endif /* NATA_DMA */ | | 1165 | #endif /* NATA_DMA */ |
1166 | | | 1166 | |
1167 | /* | | 1167 | /* |
1168 | * freeze the queue and wait for the controller to be idle. Caller has to | | 1168 | * freeze the queue and wait for the controller to be idle. Caller has to |
1169 | * unfreeze/restart the queue | | 1169 | * unfreeze/restart the queue |
1170 | */ | | 1170 | */ |
1171 | static void | | 1171 | static void |
1172 | ata_channel_idle(struct ata_channel *chp) | | 1172 | ata_channel_idle(struct ata_channel *chp) |
1173 | { | | 1173 | { |
1174 | int s = splbio(); | | 1174 | int s = splbio(); |
1175 | ata_channel_freeze(chp); | | 1175 | ata_channel_freeze(chp); |
1176 | while (chp->ch_queue->queue_active > 0) { | | 1176 | while (chp->ch_queue->queue_active > 0) { |
1177 | chp->ch_queue->queue_flags |= QF_IDLE_WAIT; | | 1177 | chp->ch_queue->queue_flags |= QF_IDLE_WAIT; |
1178 | tsleep(&chp->ch_queue->queue_flags, PRIBIO, "qidl", 0); | | 1178 | tsleep(&chp->ch_queue->queue_flags, PRIBIO, "qidl", 0); |
1179 | } | | 1179 | } |
1180 | splx(s); | | 1180 | splx(s); |
1181 | } | | 1181 | } |
1182 | | | 1182 | |
1183 | /* | | 1183 | /* |
1184 | * Add a command to the queue and start controller. | | 1184 | * Add a command to the queue and start controller. |
1185 | * | | 1185 | * |
1186 | * MUST BE CALLED AT splbio()! | | 1186 | * MUST BE CALLED AT splbio()! |
1187 | */ | | 1187 | */ |
1188 | void | | 1188 | void |
1189 | ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer) | | 1189 | ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer) |
1190 | { | | 1190 | { |
1191 | | | 1191 | |
1192 | ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer, | | 1192 | ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer, |
1193 | chp->ch_channel, xfer->c_drive), DEBUG_XFERS); | | 1193 | chp->ch_channel, xfer->c_drive), DEBUG_XFERS); |
1194 | | | 1194 | |
1195 | /* complete xfer setup */ | | 1195 | /* complete xfer setup */ |
1196 | xfer->c_chp = chp; | | 1196 | xfer->c_chp = chp; |
1197 | | | 1197 | |
1198 | mutex_enter(&chp->ch_lock); | | 1198 | mutex_enter(&chp->ch_lock); |
1199 | | | 1199 | |
1200 | /* | | 1200 | /* |
1201 | * Standard commands are added to the end of command list, but | | 1201 | * Standard commands are added to the end of command list, but |
1202 | * recovery commands must be run immediatelly. | | 1202 | * recovery commands must be run immediatelly. |
1203 | */ | | 1203 | */ |
1204 | if ((xfer->c_flags & C_RECOVERY) == 0) | | 1204 | if ((xfer->c_flags & C_RECOVERY) == 0) |
1205 | TAILQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer, | | 1205 | TAILQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer, |
1206 | c_xferchain); | | 1206 | c_xferchain); |
1207 | else | | 1207 | else |
1208 | TAILQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer, | | 1208 | TAILQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer, |
1209 | c_xferchain); | | 1209 | c_xferchain); |
1210 | ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n", | | 1210 | ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n", |
1211 | chp->ch_flags), DEBUG_XFERS); | | 1211 | chp->ch_flags), DEBUG_XFERS); |
1212 | | | 1212 | |
1213 | /* | | 1213 | /* |
1214 | * if polling and can sleep, wait for the xfer to be at head of queue | | 1214 | * if polling and can sleep, wait for the xfer to be at head of queue |
1215 | */ | | 1215 | */ |
1216 | if ((xfer->c_flags & (C_POLL | C_WAIT)) == (C_POLL | C_WAIT)) { | | 1216 | if ((xfer->c_flags & (C_POLL | C_WAIT)) == (C_POLL | C_WAIT)) { |
1217 | while (chp->ch_queue->queue_active > 0 || | | 1217 | while (chp->ch_queue->queue_active > 0 || |
1218 | TAILQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) { | | 1218 | TAILQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) { |
1219 | xfer->c_flags |= C_WAITACT; | | 1219 | xfer->c_flags |= C_WAITACT; |
1220 | cv_wait(&xfer->c_active, &chp->ch_lock); | | 1220 | cv_wait(&xfer->c_active, &chp->ch_lock); |
1221 | xfer->c_flags &= ~C_WAITACT; | | 1221 | xfer->c_flags &= ~C_WAITACT; |
1222 | | | 1222 | |
1223 | /* | | 1223 | /* |
1224 | * Free xfer now if it there was attempt to free it | | 1224 | * Free xfer now if it there was attempt to free it |
1225 | * while we were waiting. | | 1225 | * while we were waiting. |
1226 | */ | | 1226 | */ |
1227 | if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) { | | 1227 | if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) { |
1228 | ata_free_xfer(chp, xfer); | | 1228 | ata_free_xfer(chp, xfer); |
1229 | return; | | 1229 | return; |
1230 | } | | 1230 | } |
1231 | } | | 1231 | } |
1232 | } | | 1232 | } |
1233 | | | 1233 | |
1234 | mutex_exit(&chp->ch_lock); | | 1234 | mutex_exit(&chp->ch_lock); |
1235 | | | 1235 | |
1236 | atastart(chp); | | 1236 | atastart(chp); |
1237 | } | | 1237 | } |
1238 | | | 1238 | |
1239 | /* | | 1239 | /* |
1240 | * Start I/O on a controller, for the given channel. | | 1240 | * Start I/O on a controller, for the given channel. |
1241 | * The first xfer may be not for our channel if the channel queues | | 1241 | * The first xfer may be not for our channel if the channel queues |
1242 | * are shared. | | 1242 | * are shared. |
1243 | * | | 1243 | * |
1244 | * MUST BE CALLED AT splbio()! | | 1244 | * MUST BE CALLED AT splbio()! |
1245 | */ | | 1245 | */ |
1246 | void | | 1246 | void |
1247 | atastart(struct ata_channel *chp) | | 1247 | atastart(struct ata_channel *chp) |
1248 | { | | 1248 | { |
1249 | struct atac_softc *atac = chp->ch_atac; | | 1249 | struct atac_softc *atac = chp->ch_atac; |
1250 | struct ata_queue *chq = chp->ch_queue; | | 1250 | struct ata_queue *chq = chp->ch_queue; |
1251 | struct ata_xfer *xfer, *axfer; | | 1251 | struct ata_xfer *xfer, *axfer; |
1252 | bool immediate; | | 1252 | bool recovery; |
1253 | | | 1253 | |
1254 | #ifdef ATA_DEBUG | | 1254 | #ifdef ATA_DEBUG |
1255 | int spl1, spl2; | | 1255 | int spl1, spl2; |
1256 | | | 1256 | |
1257 | spl1 = splbio(); | | 1257 | spl1 = splbio(); |
1258 | spl2 = splbio(); | | 1258 | spl2 = splbio(); |
1259 | if (spl2 != spl1) { | | 1259 | if (spl2 != spl1) { |
1260 | printf("atastart: not at splbio()\n"); | | 1260 | printf("atastart: not at splbio()\n"); |
1261 | panic("atastart"); | | 1261 | panic("atastart"); |
1262 | } | | 1262 | } |
1263 | splx(spl2); | | 1263 | splx(spl2); |
1264 | splx(spl1); | | 1264 | splx(spl1); |
1265 | #endif /* ATA_DEBUG */ | | 1265 | #endif /* ATA_DEBUG */ |
1266 | | | 1266 | |
1267 | again: | | 1267 | again: |
1268 | mutex_enter(&chp->ch_lock); | | 1268 | mutex_enter(&chp->ch_lock); |
1269 | | | 1269 | |
1270 | KASSERT(chq->queue_active <= chq->queue_openings); | | 1270 | KASSERT(chq->queue_active <= chq->queue_openings); |
1271 | if (chq->queue_active == chq->queue_openings) { | | 1271 | if (chq->queue_active == chq->queue_openings) { |
1272 | goto out; /* channel completely busy */ | | 1272 | goto out; /* channel completely busy */ |
1273 | } | | 1273 | } |
1274 | | | 1274 | |
1275 | /* is there a xfer ? */ | | 1275 | /* is there a xfer ? */ |
1276 | if ((xfer = TAILQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) | | 1276 | if ((xfer = TAILQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) |
1277 | goto out; | | 1277 | goto out; |
1278 | | | 1278 | |
1279 | immediate = ISSET(xfer->c_flags, C_RECOVERY); | | 1279 | recovery = ISSET(xfer->c_flags, C_RECOVERY); |
1280 | | | 1280 | |
1281 | /* is the queue frozen? */ | | 1281 | /* is the queue frozen? */ |
1282 | if (__predict_false(!immediate && chq->queue_freeze > 0)) { | | 1282 | if (__predict_false(!recovery && chq->queue_freeze > 0)) { |
1283 | if (chq->queue_flags & QF_IDLE_WAIT) { | | 1283 | if (chq->queue_flags & QF_IDLE_WAIT) { |
1284 | chq->queue_flags &= ~QF_IDLE_WAIT; | | 1284 | chq->queue_flags &= ~QF_IDLE_WAIT; |
1285 | wakeup(&chq->queue_flags); | | 1285 | wakeup(&chq->queue_flags); |
1286 | } | | 1286 | } |
1287 | goto out; /* queue frozen */ | | 1287 | goto out; /* queue frozen */ |
1288 | } | | 1288 | } |
1289 | | | 1289 | |
1290 | /* all xfers on same queue must belong to the same channel */ | | 1290 | /* all xfers on same queue must belong to the same channel */ |
1291 | KASSERT(xfer->c_chp == chp); | | 1291 | KASSERT(xfer->c_chp == chp); |
1292 | | | 1292 | |
1293 | /* | | 1293 | /* |
1294 | * Can only take the command if there are no current active | | 1294 | * Can only take the command if there are no current active |
1295 | * commands, or if the command is NCQ and the active commands are also | | 1295 | * commands, or if the command is NCQ and the active commands are also |
1296 | * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based | | 1296 | * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based |
1297 | * switching, can only send commands to single drive. | | 1297 | * switching, can only send commands to single drive. |
1298 | * Need only check first xfer. | | 1298 | * Need only check first xfer. |
1299 | * XXX FIS-based switching - revisit | | 1299 | * XXX FIS-based switching - revisit |
1300 | */ | | 1300 | */ |
1301 | if (!immediate && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) { | | 1301 | if (!recovery && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) { |
1302 | if (!ISSET(xfer->c_flags, C_NCQ) || | | 1302 | if (!ISSET(xfer->c_flags, C_NCQ) || |
1303 | !ISSET(axfer->c_flags, C_NCQ) || | | 1303 | !ISSET(axfer->c_flags, C_NCQ) || |
1304 | xfer->c_drive != axfer->c_drive) | | 1304 | xfer->c_drive != axfer->c_drive) |
1305 | goto out; | | 1305 | goto out; |
1306 | } | | 1306 | } |
1307 | | | 1307 | |
1308 | struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive]; | | 1308 | struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive]; |
1309 | | | 1309 | |
1310 | /* | | 1310 | /* |
1311 | * if someone is waiting for the command to be active, wake it up | | 1311 | * if someone is waiting for the command to be active, wake it up |
1312 | * and let it process the command | | 1312 | * and let it process the command |
1313 | */ | | 1313 | */ |
1314 | if (xfer->c_flags & C_WAITACT) { | | 1314 | if (xfer->c_flags & C_WAITACT) { |
1315 | ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d " | | 1315 | ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d " |
1316 | "wait active\n", xfer, chp->ch_channel, xfer->c_drive), | | 1316 | "wait active\n", xfer, chp->ch_channel, xfer->c_drive), |
1317 | DEBUG_XFERS); | | 1317 | DEBUG_XFERS); |
1318 | cv_signal(&xfer->c_active); | | 1318 | cv_signal(&xfer->c_active); |
1319 | goto out; | | 1319 | goto out; |
1320 | } | | 1320 | } |
1321 | | | 1321 | |
1322 | ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d\n", xfer, | | 1322 | ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d\n", xfer, |
1323 | chp->ch_channel, xfer->c_drive), DEBUG_XFERS); | | 1323 | chp->ch_channel, xfer->c_drive), DEBUG_XFERS); |
1324 | if (drvp->drive_flags & ATA_DRIVE_RESET) { | | 1324 | if (drvp->drive_flags & ATA_DRIVE_RESET) { |
1325 | drvp->drive_flags &= ~ATA_DRIVE_RESET; | | 1325 | drvp->drive_flags &= ~ATA_DRIVE_RESET; |
1326 | drvp->state = 0; | | 1326 | drvp->state = 0; |
1327 | } | | 1327 | } |
1328 | | | 1328 | |
1329 | if (ISSET(xfer->c_flags, C_NCQ)) | | 1329 | if (ISSET(xfer->c_flags, C_NCQ)) |
1330 | SET(chp->ch_flags, ATACH_NCQ); | | 1330 | SET(chp->ch_flags, ATACH_NCQ); |
1331 | else | | 1331 | else |
1332 | CLR(chp->ch_flags, ATACH_NCQ); | | 1332 | CLR(chp->ch_flags, ATACH_NCQ); |
1333 | | | 1333 | |
1334 | ata_activate_xfer_locked(chp, xfer); | | 1334 | ata_activate_xfer_locked(chp, xfer); |
1335 | | | 1335 | |
1336 | if (atac->atac_cap & ATAC_CAP_NOIRQ) | | 1336 | if (atac->atac_cap & ATAC_CAP_NOIRQ) |
1337 | KASSERT(xfer->c_flags & C_POLL); | | 1337 | KASSERT(xfer->c_flags & C_POLL); |
1338 | | | 1338 | |
1339 | mutex_exit(&chp->ch_lock); | | 1339 | mutex_exit(&chp->ch_lock); |
1340 | | | 1340 | |
1341 | /* | | 1341 | /* |
1342 | * XXX MPSAFE can't keep the lock, xfer->c_start() might call the done | | 1342 | * XXX MPSAFE can't keep the lock, xfer->c_start() might call the done |
1343 | * routine for polled commands. | | 1343 | * routine for polled commands. |
1344 | */ | | 1344 | */ |
1345 | xfer->c_start(chp, xfer); | | 1345 | xfer->c_start(chp, xfer); |
1346 | | | 1346 | |
1347 | /* Queue more commands if possible */ | | 1347 | /* Queue more commands if possible, but not during recovery */ |
1348 | if (chq->queue_active < chq->queue_openings) | | 1348 | if (!recovery && chq->queue_active < chq->queue_openings) |
1349 | goto again; | | 1349 | goto again; |
1350 | | | 1350 | |
1351 | return; | | 1351 | return; |
1352 | | | 1352 | |
1353 | out: | | 1353 | out: |
1354 | mutex_exit(&chp->ch_lock); | | 1354 | mutex_exit(&chp->ch_lock); |
1355 | } | | 1355 | } |
1356 | | | 1356 | |
1357 | /* | | 1357 | /* |
1358 | * Does it's own locking, does not require splbio(). | | 1358 | * Does it's own locking, does not require splbio(). |
1359 | * flags - whether to block waiting for free xfer | | 1359 | * flags - whether to block waiting for free xfer |
1360 | * openings - limit of openings supported by device, <= 0 means tag not | | 1360 | * openings - limit of openings supported by device, <= 0 means tag not |
1361 | * relevant, and any available xfer can be returned | | 1361 | * relevant, and any available xfer can be returned |
1362 | */ | | 1362 | */ |
1363 | struct ata_xfer * | | 1363 | struct ata_xfer * |
1364 | ata_get_xfer_ext(struct ata_channel *chp, int flags, uint8_t openings) | | 1364 | ata_get_xfer_ext(struct ata_channel *chp, int flags, uint8_t openings) |
1365 | { | | 1365 | { |
1366 | struct ata_queue *chq = chp->ch_queue; | | 1366 | struct ata_queue *chq = chp->ch_queue; |
1367 | struct ata_xfer *xfer = NULL; | | 1367 | struct ata_xfer *xfer = NULL; |
1368 | uint32_t avail, slot, mask; | | 1368 | uint32_t avail, slot, mask; |
1369 | int error; | | 1369 | int error; |
1370 | | | 1370 | |
1371 | ATADEBUG_PRINT(("%s: channel %d flags %x openings %d\n", | | 1371 | ATADEBUG_PRINT(("%s: channel %d flags %x openings %d\n", |
1372 | __func__, chp->ch_channel, flags, openings), | | 1372 | __func__, chp->ch_channel, flags, openings), |
1373 | DEBUG_XFERS); | | 1373 | DEBUG_XFERS); |
1374 | | | 1374 | |
1375 | mutex_enter(&chp->ch_lock); | | 1375 | mutex_enter(&chp->ch_lock); |
1376 | | | 1376 | |
1377 | /* | | 1377 | /* |
1378 | * When openings is just 1, can't reserve anything for | | 1378 | * When openings is just 1, can't reserve anything for |
1379 | * recovery. KASSERT() here is to catch code which naively | | 1379 | * recovery. KASSERT() here is to catch code which naively |
1380 | * relies on C_RECOVERY to work under this condition. | | 1380 | * relies on C_RECOVERY to work under this condition. |
1381 | */ | | 1381 | */ |
1382 | KASSERT((flags & C_RECOVERY) == 0 || chq->queue_openings > 1); | | 1382 | KASSERT((flags & C_RECOVERY) == 0 || chq->queue_openings > 1); |
1383 | | | 1383 | |
1384 | if (flags & C_RECOVERY) { | | 1384 | if (flags & C_RECOVERY) { |
1385 | mask = UINT32_MAX; | | 1385 | mask = UINT32_MAX; |
1386 | } else { | | 1386 | } else { |
1387 | if (openings <= 0 || openings > chq->queue_openings) | | 1387 | if (openings <= 0 || openings > chq->queue_openings) |
1388 | openings = chq->queue_openings; | | 1388 | openings = chq->queue_openings; |
1389 | | | 1389 | |
1390 | if (openings > 1) { | | 1390 | if (openings > 1) { |
1391 | mask = __BIT(openings - 1) - 1; | | 1391 | mask = __BIT(openings - 1) - 1; |
1392 | } else { | | 1392 | } else { |
1393 | mask = UINT32_MAX; | | 1393 | mask = UINT32_MAX; |
1394 | } | | 1394 | } |
1395 | } | | 1395 | } |
1396 | | | 1396 | |
1397 | retry: | | 1397 | retry: |
1398 | avail = ffs32(chq->queue_xfers_avail & mask); | | 1398 | avail = ffs32(chq->queue_xfers_avail & mask); |
1399 | if (avail == 0) { | | 1399 | if (avail == 0) { |
1400 | /* | | 1400 | /* |
1401 | * Catch code which tries to get another recovery xfer while | | 1401 | * Catch code which tries to get another recovery xfer while |
1402 | * already holding one (wrong recursion). | | 1402 | * already holding one (wrong recursion). |
1403 | */ | | 1403 | */ |
1404 | KASSERTMSG((flags & C_RECOVERY) == 0, | | 1404 | KASSERTMSG((flags & C_RECOVERY) == 0, |
1405 | "recovery xfer busy openings %d mask %x avail %x", | | 1405 | "recovery xfer busy openings %d mask %x avail %x", |
1406 | openings, mask, chq->queue_xfers_avail); | | 1406 | openings, mask, chq->queue_xfers_avail); |
1407 | | | 1407 | |
1408 | if (flags & C_WAIT) { | | 1408 | if (flags & C_WAIT) { |
1409 | chq->queue_flags |= QF_NEED_XFER; | | 1409 | chq->queue_flags |= QF_NEED_XFER; |
1410 | error = cv_wait_sig(&chq->queue_busy, &chp->ch_lock); | | 1410 | error = cv_wait_sig(&chq->queue_busy, &chp->ch_lock); |
1411 | if (error == 0) | | 1411 | if (error == 0) |
1412 | goto retry; | | 1412 | goto retry; |
1413 | } | | 1413 | } |
1414 | | | 1414 | |
1415 | goto out; | | 1415 | goto out; |
1416 | } | | 1416 | } |
1417 | | | 1417 | |
1418 | slot = avail - 1; | | 1418 | slot = avail - 1; |
1419 | xfer = &chq->queue_xfers[slot]; | | 1419 | xfer = &chq->queue_xfers[slot]; |
1420 | chq->queue_xfers_avail &= ~__BIT(slot); | | 1420 | chq->queue_xfers_avail &= ~__BIT(slot); |
1421 | | | 1421 | |
1422 | KASSERT((chq->active_xfers_used & __BIT(slot)) == 0); | | 1422 | KASSERT((chq->active_xfers_used & __BIT(slot)) == 0); |
1423 | | | 1423 | |
1424 | /* zero everything after the callout member */ | | 1424 | /* zero everything after the callout member */ |
1425 | memset(&xfer->c_startzero, 0, | | 1425 | memset(&xfer->c_startzero, 0, |
1426 | sizeof(struct ata_xfer) - offsetof(struct ata_xfer, c_startzero)); | | 1426 | sizeof(struct ata_xfer) - offsetof(struct ata_xfer, c_startzero)); |
1427 | | | 1427 | |
1428 | out: | | 1428 | out: |
1429 | mutex_exit(&chp->ch_lock); | | 1429 | mutex_exit(&chp->ch_lock); |
1430 | return xfer; | | 1430 | return xfer; |
1431 | } | | 1431 | } |
1432 | | | 1432 | |
1433 | /* | | 1433 | /* |
1434 | * ata_deactivate_xfer() must be always called prior to ata_free_xfer() | | 1434 | * ata_deactivate_xfer() must be always called prior to ata_free_xfer() |
1435 | */ | | 1435 | */ |
1436 | void | | 1436 | void |
1437 | ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer) | | 1437 | ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer) |
1438 | { | | 1438 | { |
1439 | struct ata_queue *chq = chp->ch_queue; | | 1439 | struct ata_queue *chq = chp->ch_queue; |
1440 | | | 1440 | |
1441 | mutex_enter(&chp->ch_lock); | | 1441 | mutex_enter(&chp->ch_lock); |
1442 | | | 1442 | |
1443 | if (xfer->c_flags & (C_WAITACT|C_WAITTIMO)) { | | 1443 | if (xfer->c_flags & (C_WAITACT|C_WAITTIMO)) { |
1444 | /* Someone is waiting for this xfer, so we can't free now */ | | 1444 | /* Someone is waiting for this xfer, so we can't free now */ |
1445 | xfer->c_flags |= C_FREE; | | 1445 | xfer->c_flags |= C_FREE; |
1446 | cv_signal(&xfer->c_active); | | 1446 | cv_signal(&xfer->c_active); |
1447 | goto out; | | 1447 | goto out; |
1448 | } | | 1448 | } |
1449 | | | 1449 | |
1450 | #if NATA_PIOBM /* XXX wdc dependent code */ | | 1450 | #if NATA_PIOBM /* XXX wdc dependent code */ |
1451 | if (xfer->c_flags & C_PIOBM) { | | 1451 | if (xfer->c_flags & C_PIOBM) { |
1452 | struct wdc_softc *wdc = CHAN_TO_WDC(chp); | | 1452 | struct wdc_softc *wdc = CHAN_TO_WDC(chp); |
1453 | | | 1453 | |
1454 | /* finish the busmastering PIO */ | | 1454 | /* finish the busmastering PIO */ |
1455 | (*wdc->piobm_done)(wdc->dma_arg, | | 1455 | (*wdc->piobm_done)(wdc->dma_arg, |
1456 | chp->ch_channel, xfer->c_drive); | | 1456 | chp->ch_channel, xfer->c_drive); |
1457 | chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT); | | 1457 | chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT); |
1458 | } | | 1458 | } |
1459 | #endif | | 1459 | #endif |
1460 | | | 1460 | |
1461 | KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0); | | 1461 | KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0); |
1462 | KASSERT((chq->queue_xfers_avail & __BIT(xfer->c_slot)) == 0); | | 1462 | KASSERT((chq->queue_xfers_avail & __BIT(xfer->c_slot)) == 0); |
1463 | chq->queue_xfers_avail |= __BIT(xfer->c_slot); | | 1463 | chq->queue_xfers_avail |= __BIT(xfer->c_slot); |
1464 | | | 1464 | |
1465 | out: | | 1465 | out: |
1466 | if (chq->queue_flags & QF_NEED_XFER) { | | 1466 | if (chq->queue_flags & QF_NEED_XFER) { |
1467 | chq->queue_flags &= ~QF_NEED_XFER; | | 1467 | chq->queue_flags &= ~QF_NEED_XFER; |
1468 | cv_broadcast(&chq->queue_busy); | | 1468 | cv_broadcast(&chq->queue_busy); |
1469 | } | | 1469 | } |
1470 | | | 1470 | |
1471 | mutex_exit(&chp->ch_lock); | | 1471 | mutex_exit(&chp->ch_lock); |
1472 | } | | 1472 | } |
1473 | | | 1473 | |
1474 | static void | | 1474 | static void |
1475 | ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer) | | 1475 | ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer) |
1476 | { | | 1476 | { |
1477 | struct ata_queue * const chq = chp->ch_queue; | | 1477 | struct ata_queue * const chq = chp->ch_queue; |
1478 | | | 1478 | |
1479 | KASSERT(mutex_owned(&chp->ch_lock)); | | 1479 | KASSERT(mutex_owned(&chp->ch_lock)); |
1480 | | | 1480 | |
1481 | KASSERT(chq->queue_active < chq->queue_openings); | | 1481 | KASSERT(chq->queue_active < chq->queue_openings); |
1482 | KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0); | | 1482 | KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0); |
1483 | | | 1483 | |
1484 | TAILQ_REMOVE(&chq->queue_xfer, xfer, c_xferchain); | | 1484 | TAILQ_REMOVE(&chq->queue_xfer, xfer, c_xferchain); |
1485 | TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain); | | 1485 | TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain); |
1486 | chq->active_xfers_used |= __BIT(xfer->c_slot); | | 1486 | chq->active_xfers_used |= __BIT(xfer->c_slot); |
1487 | chq->queue_active++; | | 1487 | chq->queue_active++; |
1488 | } | | 1488 | } |
1489 | | | 1489 | |
1490 | void | | 1490 | void |
1491 | ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer) | | 1491 | ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer) |
1492 | { | | 1492 | { |
1493 | struct ata_queue * const chq = chp->ch_queue; | | 1493 | struct ata_queue * const chq = chp->ch_queue; |
1494 | | | 1494 | |
1495 | mutex_enter(&chp->ch_lock); | | 1495 | mutex_enter(&chp->ch_lock); |
1496 | | | 1496 | |
1497 | KASSERT(chq->queue_active > 0); | | 1497 | KASSERT(chq->queue_active > 0); |
1498 | KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0); | | 1498 | KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0); |
1499 | | | 1499 | |
1500 | callout_stop(&xfer->c_timo_callout); | | 1500 | callout_stop(&xfer->c_timo_callout); |
1501 | | | 1501 | |
1502 | if (callout_invoking(&xfer->c_timo_callout)) | | 1502 | if (callout_invoking(&xfer->c_timo_callout)) |
1503 | xfer->c_flags |= C_WAITTIMO; | | 1503 | xfer->c_flags |= C_WAITTIMO; |
1504 | | | 1504 | |
1505 | TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain); | | 1505 | TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain); |
1506 | chq->active_xfers_used &= ~__BIT(xfer->c_slot); | | 1506 | chq->active_xfers_used &= ~__BIT(xfer->c_slot); |
1507 | chq->queue_active--; | | 1507 | chq->queue_active--; |
1508 | | | 1508 | |
1509 | mutex_exit(&chp->ch_lock); | | 1509 | mutex_exit(&chp->ch_lock); |
1510 | } | | 1510 | } |
1511 | | | 1511 | |
1512 | /* | | 1512 | /* |
1513 | * Called in c_intr hook. Must be called before before any deactivations | | 1513 | * Called in c_intr hook. Must be called before before any deactivations |
1514 | * are done - if there is drain pending, it calls c_kill_xfer hook which | | 1514 | * are done - if there is drain pending, it calls c_kill_xfer hook which |
1515 | * deactivates the xfer. | | 1515 | * deactivates the xfer. |
1516 | * Calls c_kill_xfer with channel lock free. | | 1516 | * Calls c_kill_xfer with channel lock free. |
1517 | * Returns true if caller should just exit without further processing. | | 1517 | * Returns true if caller should just exit without further processing. |
1518 | * Caller must not further access any part of xfer or any related controller | | 1518 | * Caller must not further access any part of xfer or any related controller |
1519 | * structures in that case, it should just return. | | 1519 | * structures in that case, it should just return. |
1520 | */ | | 1520 | */ |
1521 | bool | | 1521 | bool |
1522 | ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer) | | 1522 | ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer) |
1523 | { | | 1523 | { |
1524 | int drive = xfer->c_drive; | | 1524 | int drive = xfer->c_drive; |
1525 | bool draining = false; | | 1525 | bool draining = false; |
1526 | | | 1526 | |
1527 | mutex_enter(&chp->ch_lock); | | 1527 | mutex_enter(&chp->ch_lock); |
1528 | | | 1528 | |
1529 | if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) { | | 1529 | if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) { |
1530 | mutex_exit(&chp->ch_lock); | | 1530 | mutex_exit(&chp->ch_lock); |
1531 | | | 1531 | |
1532 | (*xfer->c_kill_xfer)(chp, xfer, KILL_GONE); | | 1532 | (*xfer->c_kill_xfer)(chp, xfer, KILL_GONE); |
1533 | | | 1533 | |
1534 | mutex_enter(&chp->ch_lock); | | 1534 | mutex_enter(&chp->ch_lock); |
1535 | chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; | | 1535 | chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN; |
1536 | cv_signal(&chp->ch_queue->queue_drain); | | 1536 | cv_signal(&chp->ch_queue->queue_drain); |
1537 | draining = true; | | 1537 | draining = true; |
1538 | } | | 1538 | } |
1539 | | | 1539 | |
1540 | mutex_exit(&chp->ch_lock); | | 1540 | mutex_exit(&chp->ch_lock); |
1541 | | | 1541 | |
1542 | return draining; | | 1542 | return draining; |
1543 | } | | 1543 | } |
1544 | | | 1544 | |
1545 | /* | | 1545 | /* |
1546 | * Check for race of normal transfer handling vs. timeout. | | 1546 | * Check for race of normal transfer handling vs. timeout. |
1547 | */ | | 1547 | */ |
1548 | bool | | 1548 | bool |
1549 | ata_timo_xfer_check(struct ata_xfer *xfer) | | 1549 | ata_timo_xfer_check(struct ata_xfer *xfer) |
1550 | { | | 1550 | { |
1551 | struct ata_channel *chp = xfer->c_chp; | | 1551 | struct ata_channel *chp = xfer->c_chp; |
1552 | struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; | | 1552 | struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; |
1553 | | | 1553 | |
1554 | mutex_enter(&chp->ch_lock); | | 1554 | mutex_enter(&chp->ch_lock); |
1555 | | | 1555 | |
1556 | callout_ack(&xfer->c_timo_callout); | | 1556 | callout_ack(&xfer->c_timo_callout); |
1557 | | | 1557 | |
1558 | if (xfer->c_flags & C_WAITTIMO) { | | 1558 | if (xfer->c_flags & C_WAITTIMO) { |
1559 | xfer->c_flags &= ~C_WAITTIMO; | | 1559 | xfer->c_flags &= ~C_WAITTIMO; |
1560 | | | 1560 | |
1561 | /* Handle race vs. ata_free_xfer() */ | | 1561 | /* Handle race vs. ata_free_xfer() */ |
1562 | if (xfer->c_flags & C_FREE) { | | 1562 | if (xfer->c_flags & C_FREE) { |
1563 | xfer->c_flags &= ~C_FREE; | | 1563 | xfer->c_flags &= ~C_FREE; |
1564 | mutex_exit(&chp->ch_lock); | | 1564 | mutex_exit(&chp->ch_lock); |
1565 | | | 1565 | |
1566 | aprint_normal_dev(drvp->drv_softc, | | 1566 | aprint_normal_dev(drvp->drv_softc, |
1567 | "xfer %d freed while invoking timeout\n", | | 1567 | "xfer %d freed while invoking timeout\n", |
1568 | xfer->c_slot); | | 1568 | xfer->c_slot); |
1569 | | | 1569 | |
1570 | ata_free_xfer(chp, xfer); | | 1570 | ata_free_xfer(chp, xfer); |
1571 | return true; | | 1571 | return true; |
1572 | } | | 1572 | } |
1573 | | | 1573 | |
1574 | /* Handle race vs. callout_stop() in ata_deactivate_xfer() */ | | 1574 | /* Handle race vs. callout_stop() in ata_deactivate_xfer() */ |
1575 | if (!callout_expired(&xfer->c_timo_callout)) { | | 1575 | if (!callout_expired(&xfer->c_timo_callout)) { |
1576 | mutex_exit(&chp->ch_lock); | | 1576 | mutex_exit(&chp->ch_lock); |
1577 | | | 1577 | |
1578 | aprint_normal_dev(drvp->drv_softc, | | 1578 | aprint_normal_dev(drvp->drv_softc, |
1579 | "xfer %d deactivated while invoking timeout\n", | | 1579 | "xfer %d deactivated while invoking timeout\n", |
1580 | xfer->c_slot); | | 1580 | xfer->c_slot); |
1581 | return true; | | 1581 | return true; |
1582 | } | | 1582 | } |
1583 | } | | 1583 | } |
1584 | | | 1584 | |
1585 | mutex_exit(&chp->ch_lock); | | 1585 | mutex_exit(&chp->ch_lock); |
1586 | | | 1586 | |
1587 | /* No race, proceed with timeout handling */ | | 1587 | /* No race, proceed with timeout handling */ |
1588 | return false; | | 1588 | return false; |
1589 | } | | 1589 | } |
1590 | | | 1590 | |
1591 | void | | 1591 | void |
1592 | ata_timeout(void *v) | | 1592 | ata_timeout(void *v) |
1593 | { | | 1593 | { |
1594 | struct ata_xfer *xfer = v; | | 1594 | struct ata_xfer *xfer = v; |
1595 | int s; | | 1595 | int s; |
1596 | | | 1596 | |
1597 | ATADEBUG_PRINT(("%s: slot %d\n", __func__, xfer->c_slot), | | 1597 | ATADEBUG_PRINT(("%s: slot %d\n", __func__, xfer->c_slot), |
1598 | DEBUG_FUNCS|DEBUG_XFERS); | | 1598 | DEBUG_FUNCS|DEBUG_XFERS); |
1599 | | | 1599 | |
1600 | s = splbio(); /* XXX MPSAFE */ | | 1600 | s = splbio(); /* XXX MPSAFE */ |
1601 | | | 1601 | |
1602 | if (ata_timo_xfer_check(xfer)) { | | 1602 | if (ata_timo_xfer_check(xfer)) { |
1603 | /* Already logged */ | | 1603 | /* Already logged */ |
1604 | goto out; | | 1604 | goto out; |
1605 | } | | 1605 | } |
1606 | | | 1606 | |
1607 | /* Mark as timed out. Do not print anything, wd(4) will. */ | | 1607 | /* Mark as timed out. Do not print anything, wd(4) will. */ |
1608 | xfer->c_flags |= C_TIMEOU; | | 1608 | xfer->c_flags |= C_TIMEOU; |
1609 | xfer->c_intr(xfer->c_chp, xfer, 0); | | 1609 | xfer->c_intr(xfer->c_chp, xfer, 0); |
1610 | | | 1610 | |
1611 | out: | | 1611 | out: |
1612 | splx(s); | | 1612 | splx(s); |
1613 | } | | 1613 | } |
1614 | | | 1614 | |
1615 | /* | | 1615 | /* |
1616 | * Kill off all active xfers for a ata_channel. | | 1616 | * Kill off all active xfers for a ata_channel. |
1617 | * | | 1617 | * |
1618 | * Must be called at splbio(). | | 1618 | * Must be called at splbio(). |
1619 | */ | | 1619 | */ |
1620 | void | | 1620 | void |
1621 | ata_kill_active(struct ata_channel *chp, int reason, int flags) | | 1621 | ata_kill_active(struct ata_channel *chp, int reason, int flags) |
1622 | { | | 1622 | { |
1623 | struct ata_queue * const chq = chp->ch_queue; | | 1623 | struct ata_queue * const chq = chp->ch_queue; |
1624 | struct ata_xfer *xfer, *xfernext; | | 1624 | struct ata_xfer *xfer, *xfernext; |
1625 | | | 1625 | |
1626 | TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) { | | 1626 | TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) { |
1627 | (*xfer->c_kill_xfer)(xfer->c_chp, xfer, reason); | | 1627 | (*xfer->c_kill_xfer)(xfer->c_chp, xfer, reason); |
1628 | } | | 1628 | } |
1629 | | | 1629 | |
1630 | if (flags & AT_RST_EMERG) | | 1630 | if (flags & AT_RST_EMERG) |
1631 | ata_queue_reset(chq); | | 1631 | ata_queue_reset(chq); |
1632 | } | | 1632 | } |
1633 | | | 1633 | |
1634 | /* | | 1634 | /* |
1635 | * Kill off all pending xfers for a drive. | | 1635 | * Kill off all pending xfers for a drive. |
1636 | */ | | 1636 | */ |
1637 | void | | 1637 | void |
1638 | ata_kill_pending(struct ata_drive_datas *drvp) | | 1638 | ata_kill_pending(struct ata_drive_datas *drvp) |
1639 | { | | 1639 | { |
1640 | struct ata_channel * const chp = drvp->chnl_softc; | | 1640 | struct ata_channel * const chp = drvp->chnl_softc; |
1641 | struct ata_queue * const chq = chp->ch_queue; | | 1641 | struct ata_queue * const chq = chp->ch_queue; |
1642 | struct ata_xfer *xfer, *xfernext; | | 1642 | struct ata_xfer *xfer, *xfernext; |
1643 | | | 1643 | |
1644 | mutex_enter(&chp->ch_lock); | | 1644 | mutex_enter(&chp->ch_lock); |
1645 | | | 1645 | |
1646 | /* Kill all pending transfers */ | | 1646 | /* Kill all pending transfers */ |
1647 | TAILQ_FOREACH_SAFE(xfer, &chq->queue_xfer, c_xferchain, xfernext) { | | 1647 | TAILQ_FOREACH_SAFE(xfer, &chq->queue_xfer, c_xferchain, xfernext) { |
1648 | KASSERT(xfer->c_chp == chp); | | 1648 | KASSERT(xfer->c_chp == chp); |
1649 | | | 1649 | |
1650 | if (xfer->c_drive != drvp->drive) | | 1650 | if (xfer->c_drive != drvp->drive) |
1651 | continue; | | 1651 | continue; |
1652 | | | 1652 | |
1653 | TAILQ_REMOVE(&chp->ch_queue->queue_xfer, xfer, c_xferchain); | | 1653 | TAILQ_REMOVE(&chp->ch_queue->queue_xfer, xfer, c_xferchain); |
1654 | | | 1654 | |
1655 | /* | | 1655 | /* |
1656 | * Keep the lock, so that we get deadlock (and 'locking against | | 1656 | * Keep the lock, so that we get deadlock (and 'locking against |
1657 | * myself' with LOCKDEBUG), instead of silent | | 1657 | * myself' with LOCKDEBUG), instead of silent |
1658 | * data corruption, if the hook tries to call back into | | 1658 | * data corruption, if the hook tries to call back into |
1659 | * middle layer for inactive xfer. | | 1659 | * middle layer for inactive xfer. |
1660 | */ | | 1660 | */ |
1661 | (*xfer->c_kill_xfer)(chp, xfer, KILL_GONE_INACTIVE); | | 1661 | (*xfer->c_kill_xfer)(chp, xfer, KILL_GONE_INACTIVE); |
1662 | } | | 1662 | } |
1663 | | | 1663 | |
1664 | /* Wait until all active transfers on the drive finish */ | | 1664 | /* Wait until all active transfers on the drive finish */ |
1665 | while (chq->queue_active > 0) { | | 1665 | while (chq->queue_active > 0) { |
1666 | bool drv_active = false; | | 1666 | bool drv_active = false; |
1667 | | | 1667 | |
1668 | TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) { | | 1668 | TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) { |
1669 | KASSERT(xfer->c_chp == chp); | | 1669 | KASSERT(xfer->c_chp == chp); |
1670 | | | 1670 | |
1671 | if (xfer->c_drive == drvp->drive) { | | 1671 | if (xfer->c_drive == drvp->drive) { |
1672 | drv_active = true; | | 1672 | drv_active = true; |
1673 | break; | | 1673 | break; |
1674 | } | | 1674 | } |
1675 | } | | 1675 | } |
1676 | | | 1676 | |
1677 | if (!drv_active) { | | 1677 | if (!drv_active) { |
1678 | /* all finished */ | | 1678 | /* all finished */ |
1679 | break; | | 1679 | break; |
1680 | } | | 1680 | } |
1681 | | | 1681 | |
1682 | drvp->drive_flags |= ATA_DRIVE_WAITDRAIN; | | 1682 | drvp->drive_flags |= ATA_DRIVE_WAITDRAIN; |
1683 | cv_wait(&chq->queue_drain, &chp->ch_lock); | | 1683 | cv_wait(&chq->queue_drain, &chp->ch_lock); |
1684 | } | | 1684 | } |
1685 | | | 1685 | |
1686 | mutex_exit(&chp->ch_lock); | | 1686 | mutex_exit(&chp->ch_lock); |
1687 | } | | 1687 | } |
1688 | | | 1688 | |
1689 | static void | | 1689 | static void |
1690 | ata_channel_freeze_locked(struct ata_channel *chp) | | 1690 | ata_channel_freeze_locked(struct ata_channel *chp) |
1691 | { | | 1691 | { |
1692 | chp->ch_queue->queue_freeze++; | | 1692 | chp->ch_queue->queue_freeze++; |
1693 | } | | 1693 | } |
1694 | | | 1694 | |
1695 | void | | 1695 | void |
1696 | ata_channel_freeze(struct ata_channel *chp) | | 1696 | ata_channel_freeze(struct ata_channel *chp) |
1697 | { | | 1697 | { |
1698 | mutex_enter(&chp->ch_lock); | | 1698 | mutex_enter(&chp->ch_lock); |
1699 | ata_channel_freeze_locked(chp); | | 1699 | ata_channel_freeze_locked(chp); |
1700 | mutex_exit(&chp->ch_lock); | | 1700 | mutex_exit(&chp->ch_lock); |
1701 | } | | 1701 | } |
1702 | | | 1702 | |
1703 | void | | 1703 | void |
1704 | ata_channel_thaw(struct ata_channel *chp) | | 1704 | ata_channel_thaw(struct ata_channel *chp) |
1705 | { | | 1705 | { |
1706 | mutex_enter(&chp->ch_lock); | | 1706 | mutex_enter(&chp->ch_lock); |
1707 | chp->ch_queue->queue_freeze--; | | 1707 | chp->ch_queue->queue_freeze--; |
1708 | mutex_exit(&chp->ch_lock); | | 1708 | mutex_exit(&chp->ch_lock); |
1709 | } | | 1709 | } |
1710 | | | 1710 | |
1711 | /* | | 1711 | /* |
1712 | * ata_reset_channel: | | 1712 | * ata_reset_channel: |
1713 | * | | 1713 | * |
1714 | * Reset and ATA channel. | | 1714 | * Reset and ATA channel. |
1715 | * | | 1715 | * |
1716 | * MUST BE CALLED AT splbio()! | | 1716 | * MUST BE CALLED AT splbio()! |
1717 | */ | | 1717 | */ |
1718 | void | | 1718 | void |
1719 | ata_reset_channel(struct ata_channel *chp, int flags) | | 1719 | ata_reset_channel(struct ata_channel *chp, int flags) |
1720 | { | | 1720 | { |
1721 | struct atac_softc *atac = chp->ch_atac; | | 1721 | struct atac_softc *atac = chp->ch_atac; |
1722 | int drive; | | 1722 | int drive; |
1723 | | | 1723 | |
1724 | #ifdef ATA_DEBUG | | 1724 | #ifdef ATA_DEBUG |
1725 | int spl1, spl2; | | 1725 | int spl1, spl2; |
1726 | | | 1726 | |
1727 | spl1 = splbio(); | | 1727 | spl1 = splbio(); |
1728 | spl2 = splbio(); | | 1728 | spl2 = splbio(); |
1729 | if (spl2 != spl1) { | | 1729 | if (spl2 != spl1) { |
1730 | printf("ata_reset_channel: not at splbio()\n"); | | 1730 | printf("ata_reset_channel: not at splbio()\n"); |
1731 | panic("ata_reset_channel"); | | 1731 | panic("ata_reset_channel"); |
1732 | } | | 1732 | } |
1733 | splx(spl2); | | 1733 | splx(spl2); |
1734 | splx(spl1); | | 1734 | splx(spl1); |
1735 | #endif /* ATA_DEBUG */ | | 1735 | #endif /* ATA_DEBUG */ |
1736 | | | 1736 | |
1737 | ata_channel_freeze(chp); | | 1737 | ata_channel_freeze(chp); |
1738 | | | 1738 | |
1739 | /* | | 1739 | /* |
1740 | * If we can poll or wait it's OK, otherwise wake up the | | 1740 | * If we can poll or wait it's OK, otherwise wake up the |
1741 | * kernel thread to do it for us. | | 1741 | * kernel thread to do it for us. |
1742 | */ | | 1742 | */ |
1743 | ATADEBUG_PRINT(("ata_reset_channel flags 0x%x ch_flags 0x%x\n", | | 1743 | ATADEBUG_PRINT(("ata_reset_channel flags 0x%x ch_flags 0x%x\n", |
1744 | flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS); | | 1744 | flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS); |
1745 | if ((flags & (AT_POLL | AT_WAIT)) == 0) { | | 1745 | if ((flags & (AT_POLL | AT_WAIT)) == 0) { |
1746 | if (chp->ch_flags & ATACH_TH_RESET) { | | 1746 | if (chp->ch_flags & ATACH_TH_RESET) { |
1747 | /* No need to schedule a reset more than one time. */ | | 1747 | /* No need to schedule a reset more than one time. */ |
1748 | ata_channel_thaw(chp); | | 1748 | ata_channel_thaw(chp); |
1749 | return; | | 1749 | return; |
1750 | } | | 1750 | } |
1751 | mutex_enter(&chp->ch_lock); | | 1751 | mutex_enter(&chp->ch_lock); |
1752 | chp->ch_flags |= ATACH_TH_RESET; | | 1752 | chp->ch_flags |= ATACH_TH_RESET; |
1753 | chp->ch_reset_flags = flags & AT_RST_EMERG; | | 1753 | chp->ch_reset_flags = flags & AT_RST_EMERG; |
1754 | cv_signal(&chp->ch_thr_idle); | | 1754 | cv_signal(&chp->ch_thr_idle); |
1755 | mutex_exit(&chp->ch_lock); | | 1755 | mutex_exit(&chp->ch_lock); |
1756 | return; | | 1756 | return; |
1757 | } | | 1757 | } |
1758 | | | 1758 | |
1759 | (*atac->atac_bustype_ata->ata_reset_channel)(chp, flags); | | 1759 | (*atac->atac_bustype_ata->ata_reset_channel)(chp, flags); |
1760 | | | 1760 | |
1761 | mutex_enter(&chp->ch_lock); | | 1761 | mutex_enter(&chp->ch_lock); |
1762 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); | | 1762 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); |
1763 | for (drive = 0; drive < chp->ch_ndrives; drive++) | | 1763 | for (drive = 0; drive < chp->ch_ndrives; drive++) |
1764 | chp->ch_drive[drive].state = 0; | | 1764 | chp->ch_drive[drive].state = 0; |
1765 | | | 1765 | |
1766 | chp->ch_flags &= ~ATACH_TH_RESET; | | 1766 | chp->ch_flags &= ~ATACH_TH_RESET; |
1767 | mutex_exit(&chp->ch_lock); | | 1767 | mutex_exit(&chp->ch_lock); |
1768 | | | 1768 | |
1769 | if (flags & AT_RST_EMERG) { | | 1769 | if (flags & AT_RST_EMERG) { |
1770 | /* make sure that we can use polled commands */ | | 1770 | /* make sure that we can use polled commands */ |
1771 | ata_queue_reset(chp->ch_queue); | | 1771 | ata_queue_reset(chp->ch_queue); |
1772 | } else { | | 1772 | } else { |
1773 | ata_channel_thaw(chp); | | 1773 | ata_channel_thaw(chp); |
1774 | atastart(chp); | | 1774 | atastart(chp); |
1775 | } | | 1775 | } |
1776 | } | | 1776 | } |
1777 | | | 1777 | |
1778 | int | | 1778 | int |
1779 | ata_addref(struct ata_channel *chp) | | 1779 | ata_addref(struct ata_channel *chp) |
1780 | { | | 1780 | { |
1781 | struct atac_softc *atac = chp->ch_atac; | | 1781 | struct atac_softc *atac = chp->ch_atac; |
1782 | struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; | | 1782 | struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; |
1783 | int s, error = 0; | | 1783 | int s, error = 0; |
1784 | | | 1784 | |
1785 | s = splbio(); | | 1785 | s = splbio(); |
1786 | if (adapt->adapt_refcnt++ == 0 && | | 1786 | if (adapt->adapt_refcnt++ == 0 && |
1787 | adapt->adapt_enable != NULL) { | | 1787 | adapt->adapt_enable != NULL) { |
1788 | error = (*adapt->adapt_enable)(atac->atac_dev, 1); | | 1788 | error = (*adapt->adapt_enable)(atac->atac_dev, 1); |
1789 | if (error) | | 1789 | if (error) |
1790 | adapt->adapt_refcnt--; | | 1790 | adapt->adapt_refcnt--; |
1791 | } | | 1791 | } |
1792 | splx(s); | | 1792 | splx(s); |
1793 | return (error); | | 1793 | return (error); |
1794 | } | | 1794 | } |
1795 | | | 1795 | |
1796 | void | | 1796 | void |
1797 | ata_delref(struct ata_channel *chp) | | 1797 | ata_delref(struct ata_channel *chp) |
1798 | { | | 1798 | { |
1799 | struct atac_softc *atac = chp->ch_atac; | | 1799 | struct atac_softc *atac = chp->ch_atac; |
1800 | struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; | | 1800 | struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic; |
1801 | int s; | | 1801 | int s; |
1802 | | | 1802 | |
1803 | s = splbio(); | | 1803 | s = splbio(); |
1804 | if (adapt->adapt_refcnt-- == 1 && | | 1804 | if (adapt->adapt_refcnt-- == 1 && |
1805 | adapt->adapt_enable != NULL) | | 1805 | adapt->adapt_enable != NULL) |
1806 | (void) (*adapt->adapt_enable)(atac->atac_dev, 0); | | 1806 | (void) (*adapt->adapt_enable)(atac->atac_dev, 0); |
1807 | splx(s); | | 1807 | splx(s); |
1808 | } | | 1808 | } |
1809 | | | 1809 | |
1810 | void | | 1810 | void |
1811 | ata_print_modes(struct ata_channel *chp) | | 1811 | ata_print_modes(struct ata_channel *chp) |
1812 | { | | 1812 | { |
1813 | struct atac_softc *atac = chp->ch_atac; | | 1813 | struct atac_softc *atac = chp->ch_atac; |
1814 | int drive; | | 1814 | int drive; |
1815 | struct ata_drive_datas *drvp; | | 1815 | struct ata_drive_datas *drvp; |
1816 | | | 1816 | |
1817 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); | | 1817 | KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL); |
1818 | for (drive = 0; drive < chp->ch_ndrives; drive++) { | | 1818 | for (drive = 0; drive < chp->ch_ndrives; drive++) { |
1819 | drvp = &chp->ch_drive[drive]; | | 1819 | drvp = &chp->ch_drive[drive]; |
1820 | if (drvp->drive_type == ATA_DRIVET_NONE || | | 1820 | if (drvp->drive_type == ATA_DRIVET_NONE || |
1821 | drvp->drv_softc == NULL) | | 1821 | drvp->drv_softc == NULL) |
1822 | continue; | | 1822 | continue; |
1823 | aprint_verbose("%s(%s:%d:%d): using PIO mode %d", | | 1823 | aprint_verbose("%s(%s:%d:%d): using PIO mode %d", |
1824 | device_xname(drvp->drv_softc), | | 1824 | device_xname(drvp->drv_softc), |
1825 | device_xname(atac->atac_dev), | | 1825 | device_xname(atac->atac_dev), |
1826 | chp->ch_channel, drvp->drive, drvp->PIO_mode); | | 1826 | chp->ch_channel, drvp->drive, drvp->PIO_mode); |
1827 | #if NATA_DMA | | 1827 | #if NATA_DMA |
1828 | if (drvp->drive_flags & ATA_DRIVE_DMA) | | 1828 | if (drvp->drive_flags & ATA_DRIVE_DMA) |
1829 | aprint_verbose(", DMA mode %d", drvp->DMA_mode); | | 1829 | aprint_verbose(", DMA mode %d", drvp->DMA_mode); |
1830 | #if NATA_UDMA | | 1830 | #if NATA_UDMA |
1831 | if (drvp->drive_flags & ATA_DRIVE_UDMA) { | | 1831 | if (drvp->drive_flags & ATA_DRIVE_UDMA) { |
1832 | aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode); | | 1832 | aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode); |
1833 | if (drvp->UDMA_mode == 2) | | 1833 | if (drvp->UDMA_mode == 2) |
1834 | aprint_verbose(" (Ultra/33)"); | | 1834 | aprint_verbose(" (Ultra/33)"); |
1835 | else if (drvp->UDMA_mode == 4) | | 1835 | else if (drvp->UDMA_mode == 4) |
1836 | aprint_verbose(" (Ultra/66)"); | | 1836 | aprint_verbose(" (Ultra/66)"); |
1837 | else if (drvp->UDMA_mode == 5) | | 1837 | else if (drvp->UDMA_mode == 5) |
1838 | aprint_verbose(" (Ultra/100)"); | | 1838 | aprint_verbose(" (Ultra/100)"); |
1839 | else if (drvp->UDMA_mode == 6) | | 1839 | else if (drvp->UDMA_mode == 6) |
1840 | aprint_verbose(" (Ultra/133)"); | | 1840 | aprint_verbose(" (Ultra/133)"); |
1841 | } | | 1841 | } |
1842 | #endif /* NATA_UDMA */ | | 1842 | #endif /* NATA_UDMA */ |
1843 | #endif /* NATA_DMA */ | | 1843 | #endif /* NATA_DMA */ |
1844 | #if NATA_DMA || NATA_PIOBM | | 1844 | #if NATA_DMA || NATA_PIOBM |
1845 | if (0 | | 1845 | if (0 |
1846 | #if NATA_DMA | | 1846 | #if NATA_DMA |
1847 | || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) | | 1847 | || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) |
1848 | #endif | | 1848 | #endif |
1849 | #if NATA_PIOBM | | 1849 | #if NATA_PIOBM |
1850 | /* PIOBM capable controllers use DMA for PIO commands */ | | 1850 | /* PIOBM capable controllers use DMA for PIO commands */ |
1851 | || (atac->atac_cap & ATAC_CAP_PIOBM) | | 1851 | || (atac->atac_cap & ATAC_CAP_PIOBM) |
1852 | #endif | | 1852 | #endif |
1853 | ) | | 1853 | ) |
1854 | aprint_verbose(" (using DMA)"); | | 1854 | aprint_verbose(" (using DMA)"); |
1855 | | | 1855 | |
1856 | if (drvp->drive_flags & ATA_DRIVE_NCQ) { | | 1856 | if (drvp->drive_flags & ATA_DRIVE_NCQ) { |
1857 | aprint_verbose(", NCQ (%d tags)%s", | | 1857 | aprint_verbose(", NCQ (%d tags)%s", |
1858 | ATA_REAL_OPENINGS(chp->ch_queue->queue_openings), | | 1858 | ATA_REAL_OPENINGS(chp->ch_queue->queue_openings), |
1859 | (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO) | | 1859 | (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO) |
1860 | ? " w/PRIO" : ""); | | 1860 | ? " w/PRIO" : ""); |
1861 | } else if (drvp->drive_flags & ATA_DRIVE_WFUA) | | 1861 | } else if (drvp->drive_flags & ATA_DRIVE_WFUA) |
1862 | aprint_verbose(", WRITE DMA FUA EXT"); | | 1862 | aprint_verbose(", WRITE DMA FUA EXT"); |
1863 | | | 1863 | |
1864 | #endif /* NATA_DMA || NATA_PIOBM */ | | 1864 | #endif /* NATA_DMA || NATA_PIOBM */ |
1865 | aprint_verbose("\n"); | | 1865 | aprint_verbose("\n"); |
1866 | } | | 1866 | } |
1867 | } | | 1867 | } |
1868 | | | 1868 | |
1869 | #if NATA_DMA | | 1869 | #if NATA_DMA |
1870 | /* | | 1870 | /* |
1871 | * downgrade the transfer mode of a drive after an error. return 1 if | | 1871 | * downgrade the transfer mode of a drive after an error. return 1 if |
1872 | * downgrade was possible, 0 otherwise. | | 1872 | * downgrade was possible, 0 otherwise. |
1873 | * | | 1873 | * |
1874 | * MUST BE CALLED AT splbio()! | | 1874 | * MUST BE CALLED AT splbio()! |
1875 | */ | | 1875 | */ |
1876 | int | | 1876 | int |
1877 | ata_downgrade_mode(struct ata_drive_datas *drvp, int flags) | | 1877 | ata_downgrade_mode(struct ata_drive_datas *drvp, int flags) |
1878 | { | | 1878 | { |
1879 | struct ata_channel *chp = drvp->chnl_softc; | | 1879 | struct ata_channel *chp = drvp->chnl_softc; |
1880 | struct atac_softc *atac = chp->ch_atac; | | 1880 | struct atac_softc *atac = chp->ch_atac; |
1881 | device_t drv_dev = drvp->drv_softc; | | 1881 | device_t drv_dev = drvp->drv_softc; |
1882 | int cf_flags = device_cfdata(drv_dev)->cf_flags; | | 1882 | int cf_flags = device_cfdata(drv_dev)->cf_flags; |
1883 | | | 1883 | |
1884 | /* if drive or controller don't know its mode, we can't do much */ | | 1884 | /* if drive or controller don't know its mode, we can't do much */ |
1885 | if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 || | | 1885 | if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 || |
1886 | (atac->atac_set_modes == NULL)) | | 1886 | (atac->atac_set_modes == NULL)) |
1887 | return 0; | | 1887 | return 0; |
1888 | /* current drive mode was set by a config flag, let it this way */ | | 1888 | /* current drive mode was set by a config flag, let it this way */ |
1889 | if ((cf_flags & ATA_CONFIG_PIO_SET) || | | 1889 | if ((cf_flags & ATA_CONFIG_PIO_SET) || |
1890 | (cf_flags & ATA_CONFIG_DMA_SET) || | | 1890 | (cf_flags & ATA_CONFIG_DMA_SET) || |
1891 | (cf_flags & ATA_CONFIG_UDMA_SET)) | | 1891 | (cf_flags & ATA_CONFIG_UDMA_SET)) |
1892 | return 0; | | 1892 | return 0; |
1893 | | | 1893 | |
1894 | #if NATA_UDMA | | 1894 | #if NATA_UDMA |
1895 | /* | | 1895 | /* |
1896 | * If we were using Ultra-DMA mode, downgrade to the next lower mode. | | 1896 | * If we were using Ultra-DMA mode, downgrade to the next lower mode. |
1897 | */ | | 1897 | */ |
1898 | if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) { | | 1898 | if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) { |
1899 | drvp->UDMA_mode--; | | 1899 | drvp->UDMA_mode--; |
1900 | aprint_error_dev(drv_dev, | | 1900 | aprint_error_dev(drv_dev, |
1901 | "transfer error, downgrading to Ultra-DMA mode %d\n", | | 1901 | "transfer error, downgrading to Ultra-DMA mode %d\n", |
1902 | drvp->UDMA_mode); | | 1902 | drvp->UDMA_mode); |
1903 | } | | 1903 | } |
1904 | #endif | | 1904 | #endif |
1905 | | | 1905 | |
1906 | /* | | 1906 | /* |
1907 | * If we were using ultra-DMA, don't downgrade to multiword DMA. | | 1907 | * If we were using ultra-DMA, don't downgrade to multiword DMA. |
1908 | */ | | 1908 | */ |
1909 | else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) { | | 1909 | else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) { |
1910 | drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); | | 1910 | drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); |
1911 | drvp->PIO_mode = drvp->PIO_cap; | | 1911 | drvp->PIO_mode = drvp->PIO_cap; |
1912 | aprint_error_dev(drv_dev, | | 1912 | aprint_error_dev(drv_dev, |
1913 | "transfer error, downgrading to PIO mode %d\n", | | 1913 | "transfer error, downgrading to PIO mode %d\n", |
1914 | drvp->PIO_mode); | | 1914 | drvp->PIO_mode); |
1915 | } else /* already using PIO, can't downgrade */ | | 1915 | } else /* already using PIO, can't downgrade */ |
1916 | return 0; | | 1916 | return 0; |
1917 | | | 1917 | |
1918 | (*atac->atac_set_modes)(chp); | | 1918 | (*atac->atac_set_modes)(chp); |
1919 | ata_print_modes(chp); | | 1919 | ata_print_modes(chp); |
1920 | /* reset the channel, which will schedule all drives for setup */ | | 1920 | /* reset the channel, which will schedule all drives for setup */ |
1921 | ata_reset_channel(chp, flags); | | 1921 | ata_reset_channel(chp, flags); |
1922 | return 1; | | 1922 | return 1; |
1923 | } | | 1923 | } |
1924 | #endif /* NATA_DMA */ | | 1924 | #endif /* NATA_DMA */ |
1925 | | | 1925 | |
1926 | /* | | 1926 | /* |
1927 | * Probe drive's capabilities, for use by the controller later | | 1927 | * Probe drive's capabilities, for use by the controller later |
1928 | * Assumes drvp points to an existing drive. | | 1928 | * Assumes drvp points to an existing drive. |
1929 | */ | | 1929 | */ |
1930 | void | | 1930 | void |
1931 | ata_probe_caps(struct ata_drive_datas *drvp) | | 1931 | ata_probe_caps(struct ata_drive_datas *drvp) |
1932 | { | | 1932 | { |
1933 | struct ataparams params, params2; | | 1933 | struct ataparams params, params2; |
1934 | struct ata_channel *chp = drvp->chnl_softc; | | 1934 | struct ata_channel *chp = drvp->chnl_softc; |
1935 | struct atac_softc *atac = chp->ch_atac; | | 1935 | struct atac_softc *atac = chp->ch_atac; |
1936 | device_t drv_dev = drvp->drv_softc; | | 1936 | device_t drv_dev = drvp->drv_softc; |
1937 | int i, printed = 0, s; | | 1937 | int i, printed = 0, s; |
1938 | const char *sep = ""; | | 1938 | const char *sep = ""; |
1939 | int cf_flags; | | 1939 | int cf_flags; |
1940 | | | 1940 | |
1941 | if (ata_get_params(drvp, AT_WAIT, ¶ms) != CMD_OK) { | | 1941 | if (ata_get_params(drvp, AT_WAIT, ¶ms) != CMD_OK) { |
1942 | /* IDENTIFY failed. Can't tell more about the device */ | | 1942 | /* IDENTIFY failed. Can't tell more about the device */ |
1943 | return; | | 1943 | return; |
1944 | } | | 1944 | } |
1945 | if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) == | | 1945 | if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) == |
1946 | (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) { | | 1946 | (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) { |
1947 | /* | | 1947 | /* |
1948 | * Controller claims 16 and 32 bit transfers. | | 1948 | * Controller claims 16 and 32 bit transfers. |
1949 | * Re-do an IDENTIFY with 32-bit transfers, | | 1949 | * Re-do an IDENTIFY with 32-bit transfers, |
1950 | * and compare results. | | 1950 | * and compare results. |
1951 | */ | | 1951 | */ |
1952 | s = splbio(); | | 1952 | s = splbio(); |
1953 | drvp->drive_flags |= ATA_DRIVE_CAP32; | | 1953 | drvp->drive_flags |= ATA_DRIVE_CAP32; |
1954 | splx(s); | | 1954 | splx(s); |
1955 | ata_get_params(drvp, AT_WAIT, ¶ms2); | | 1955 | ata_get_params(drvp, AT_WAIT, ¶ms2); |
1956 | if (memcmp(¶ms, ¶ms2, sizeof(struct ataparams)) != 0) { | | 1956 | if (memcmp(¶ms, ¶ms2, sizeof(struct ataparams)) != 0) { |
1957 | /* Not good. fall back to 16bits */ | | 1957 | /* Not good. fall back to 16bits */ |
1958 | s = splbio(); | | 1958 | s = splbio(); |
1959 | drvp->drive_flags &= ~ATA_DRIVE_CAP32; | | 1959 | drvp->drive_flags &= ~ATA_DRIVE_CAP32; |
1960 | splx(s); | | 1960 | splx(s); |
1961 | } else { | | 1961 | } else { |
1962 | aprint_verbose_dev(drv_dev, "32-bit data port\n"); | | 1962 | aprint_verbose_dev(drv_dev, "32-bit data port\n"); |
1963 | } | | 1963 | } |
1964 | } | | 1964 | } |
1965 | #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */ | | 1965 | #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */ |
1966 | if (params.atap_ata_major > 0x01 && | | 1966 | if (params.atap_ata_major > 0x01 && |
1967 | params.atap_ata_major != 0xffff) { | | 1967 | params.atap_ata_major != 0xffff) { |
1968 | for (i = 14; i > 0; i--) { | | 1968 | for (i = 14; i > 0; i--) { |
1969 | if (params.atap_ata_major & (1 << i)) { | | 1969 | if (params.atap_ata_major & (1 << i)) { |
1970 | aprint_verbose_dev(drv_dev, | | 1970 | aprint_verbose_dev(drv_dev, |
1971 | "ATA version %d\n", i); | | 1971 | "ATA version %d\n", i); |
1972 | drvp->ata_vers = i; | | 1972 | drvp->ata_vers = i; |
1973 | break; | | 1973 | break; |
1974 | } | | 1974 | } |
1975 | } | | 1975 | } |
1976 | } | | 1976 | } |
1977 | #endif | | 1977 | #endif |
1978 | | | 1978 | |
1979 | /* An ATAPI device is at last PIO mode 3 */ | | 1979 | /* An ATAPI device is at last PIO mode 3 */ |
1980 | if (drvp->drive_type == ATA_DRIVET_ATAPI) | | 1980 | if (drvp->drive_type == ATA_DRIVET_ATAPI) |
1981 | drvp->PIO_mode = 3; | | 1981 | drvp->PIO_mode = 3; |
1982 | | | 1982 | |
1983 | /* | | 1983 | /* |
1984 | * It's not in the specs, but it seems that some drive | | 1984 | * It's not in the specs, but it seems that some drive |
1985 | * returns 0xffff in atap_extensions when this field is invalid | | 1985 | * returns 0xffff in atap_extensions when this field is invalid |
1986 | */ | | 1986 | */ |
1987 | if (params.atap_extensions != 0xffff && | | 1987 | if (params.atap_extensions != 0xffff && |
1988 | (params.atap_extensions & WDC_EXT_MODES)) { | | 1988 | (params.atap_extensions & WDC_EXT_MODES)) { |
1989 | /* | | 1989 | /* |
1990 | * XXX some drives report something wrong here (they claim to | | 1990 | * XXX some drives report something wrong here (they claim to |
1991 | * support PIO mode 8 !). As mode is coded on 3 bits in | | 1991 | * support PIO mode 8 !). As mode is coded on 3 bits in |
1992 | * SET FEATURE, limit it to 7 (so limit i to 4). | | 1992 | * SET FEATURE, limit it to 7 (so limit i to 4). |
1993 | * If higher mode than 7 is found, abort. | | 1993 | * If higher mode than 7 is found, abort. |
1994 | */ | | 1994 | */ |
1995 | for (i = 7; i >= 0; i--) { | | 1995 | for (i = 7; i >= 0; i--) { |
1996 | if ((params.atap_piomode_supp & (1 << i)) == 0) | | 1996 | if ((params.atap_piomode_supp & (1 << i)) == 0) |
1997 | continue; | | 1997 | continue; |
1998 | if (i > 4) | | 1998 | if (i > 4) |
1999 | return; | | 1999 | return; |
2000 | /* | | 2000 | /* |
2001 | * See if mode is accepted. | | 2001 | * See if mode is accepted. |
2002 | * If the controller can't set its PIO mode, | | 2002 | * If the controller can't set its PIO mode, |
2003 | * assume the defaults are good, so don't try | | 2003 | * assume the defaults are good, so don't try |
2004 | * to set it | | 2004 | * to set it |
2005 | */ | | 2005 | */ |
2006 | if (atac->atac_set_modes) | | 2006 | if (atac->atac_set_modes) |
2007 | /* | | 2007 | /* |
2008 | * It's OK to pool here, it's fast enough | | 2008 | * It's OK to pool here, it's fast enough |
2009 | * to not bother waiting for interrupt | | 2009 | * to not bother waiting for interrupt |
2010 | */ | | 2010 | */ |
2011 | if (ata_set_mode(drvp, 0x08 | (i + 3), | | 2011 | if (ata_set_mode(drvp, 0x08 | (i + 3), |
2012 | AT_WAIT) != CMD_OK) | | 2012 | AT_WAIT) != CMD_OK) |
2013 | continue; | | 2013 | continue; |
2014 | if (!printed) { | | 2014 | if (!printed) { |
2015 | aprint_verbose_dev(drv_dev, | | 2015 | aprint_verbose_dev(drv_dev, |
2016 | "drive supports PIO mode %d", i + 3); | | 2016 | "drive supports PIO mode %d", i + 3); |
2017 | sep = ","; | | 2017 | sep = ","; |
2018 | printed = 1; | | 2018 | printed = 1; |
2019 | } | | 2019 | } |
2020 | /* | | 2020 | /* |
2021 | * If controller's driver can't set its PIO mode, | | 2021 | * If controller's driver can't set its PIO mode, |
2022 | * get the highter one for the drive. | | 2022 | * get the highter one for the drive. |
2023 | */ | | 2023 | */ |
2024 | if (atac->atac_set_modes == NULL || | | 2024 | if (atac->atac_set_modes == NULL || |
2025 | atac->atac_pio_cap >= i + 3) { | | 2025 | atac->atac_pio_cap >= i + 3) { |
2026 | drvp->PIO_mode = i + 3; | | 2026 | drvp->PIO_mode = i + 3; |
2027 | drvp->PIO_cap = i + 3; | | 2027 | drvp->PIO_cap = i + 3; |
2028 | break; | | 2028 | break; |
2029 | } | | 2029 | } |
2030 | } | | 2030 | } |
2031 | if (!printed) { | | 2031 | if (!printed) { |
2032 | /* | | 2032 | /* |
2033 | * We didn't find a valid PIO mode. | | 2033 | * We didn't find a valid PIO mode. |
2034 | * Assume the values returned for DMA are buggy too | | 2034 | * Assume the values returned for DMA are buggy too |
2035 | */ | | 2035 | */ |
2036 | return; | | 2036 | return; |
2037 | } | | 2037 | } |
2038 | s = splbio(); | | 2038 | s = splbio(); |
2039 | drvp->drive_flags |= ATA_DRIVE_MODE; | | 2039 | drvp->drive_flags |= ATA_DRIVE_MODE; |
2040 | splx(s); | | 2040 | splx(s); |
2041 | printed = 0; | | 2041 | printed = 0; |
2042 | for (i = 7; i >= 0; i--) { | | 2042 | for (i = 7; i >= 0; i--) { |
2043 | if ((params.atap_dmamode_supp & (1 << i)) == 0) | | 2043 | if ((params.atap_dmamode_supp & (1 << i)) == 0) |
2044 | continue; | | 2044 | continue; |
2045 | #if NATA_DMA | | 2045 | #if NATA_DMA |
2046 | if ((atac->atac_cap & ATAC_CAP_DMA) && | | 2046 | if ((atac->atac_cap & ATAC_CAP_DMA) && |
2047 | atac->atac_set_modes != NULL) | | 2047 | atac->atac_set_modes != NULL) |
2048 | if (ata_set_mode(drvp, 0x20 | i, AT_WAIT) | | 2048 | if (ata_set_mode(drvp, 0x20 | i, AT_WAIT) |
2049 | != CMD_OK) | | 2049 | != CMD_OK) |
2050 | continue; | | 2050 | continue; |
2051 | #endif | | 2051 | #endif |
2052 | if (!printed) { | | 2052 | if (!printed) { |
2053 | aprint_verbose("%s DMA mode %d", sep, i); | | 2053 | aprint_verbose("%s DMA mode %d", sep, i); |
2054 | sep = ","; | | 2054 | sep = ","; |
2055 | printed = 1; | | 2055 | printed = 1; |
2056 | } | | 2056 | } |
2057 | #if NATA_DMA | | 2057 | #if NATA_DMA |
2058 | if (atac->atac_cap & ATAC_CAP_DMA) { | | 2058 | if (atac->atac_cap & ATAC_CAP_DMA) { |
2059 | if (atac->atac_set_modes != NULL && | | 2059 | if (atac->atac_set_modes != NULL && |
2060 | atac->atac_dma_cap < i) | | 2060 | atac->atac_dma_cap < i) |
2061 | continue; | | 2061 | continue; |
2062 | drvp->DMA_mode = i; | | 2062 | drvp->DMA_mode = i; |
2063 | drvp->DMA_cap = i; | | 2063 | drvp->DMA_cap = i; |
2064 | s = splbio(); | | 2064 | s = splbio(); |
2065 | drvp->drive_flags |= ATA_DRIVE_DMA; | | 2065 | drvp->drive_flags |= ATA_DRIVE_DMA; |
2066 | splx(s); | | 2066 | splx(s); |
2067 | } | | 2067 | } |
2068 | #endif | | 2068 | #endif |
2069 | break; | | 2069 | break; |
2070 | } | | 2070 | } |
2071 | if (params.atap_extensions & WDC_EXT_UDMA_MODES) { | | 2071 | if (params.atap_extensions & WDC_EXT_UDMA_MODES) { |
2072 | printed = 0; | | 2072 | printed = 0; |
2073 | for (i = 7; i >= 0; i--) { | | 2073 | for (i = 7; i >= 0; i--) { |
2074 | if ((params.atap_udmamode_supp & (1 << i)) | | 2074 | if ((params.atap_udmamode_supp & (1 << i)) |
2075 | == 0) | | 2075 | == 0) |
2076 | continue; | | 2076 | continue; |
2077 | #if NATA_UDMA | | 2077 | #if NATA_UDMA |
2078 | if (atac->atac_set_modes != NULL && | | 2078 | if (atac->atac_set_modes != NULL && |
2079 | (atac->atac_cap & ATAC_CAP_UDMA)) | | 2079 | (atac->atac_cap & ATAC_CAP_UDMA)) |
2080 | if (ata_set_mode(drvp, 0x40 | i, | | 2080 | if (ata_set_mode(drvp, 0x40 | i, |
2081 | AT_WAIT) != CMD_OK) | | 2081 | AT_WAIT) != CMD_OK) |
2082 | continue; | | 2082 | continue; |
2083 | #endif | | 2083 | #endif |
2084 | if (!printed) { | | 2084 | if (!printed) { |
2085 | aprint_verbose("%s Ultra-DMA mode %d", | | 2085 | aprint_verbose("%s Ultra-DMA mode %d", |
2086 | sep, i); | | 2086 | sep, i); |
2087 | if (i == 2) | | 2087 | if (i == 2) |
2088 | aprint_verbose(" (Ultra/33)"); | | 2088 | aprint_verbose(" (Ultra/33)"); |
2089 | else if (i == 4) | | 2089 | else if (i == 4) |
2090 | aprint_verbose(" (Ultra/66)"); | | 2090 | aprint_verbose(" (Ultra/66)"); |
2091 | else if (i == 5) | | 2091 | else if (i == 5) |
2092 | aprint_verbose(" (Ultra/100)"); | | 2092 | aprint_verbose(" (Ultra/100)"); |
2093 | else if (i == 6) | | 2093 | else if (i == 6) |
2094 | aprint_verbose(" (Ultra/133)"); | | 2094 | aprint_verbose(" (Ultra/133)"); |
2095 | sep = ","; | | 2095 | sep = ","; |
2096 | printed = 1; | | 2096 | printed = 1; |
2097 | } | | 2097 | } |
2098 | #if NATA_UDMA | | 2098 | #if NATA_UDMA |
2099 | if (atac->atac_cap & ATAC_CAP_UDMA) { | | 2099 | if (atac->atac_cap & ATAC_CAP_UDMA) { |
2100 | if (atac->atac_set_modes != NULL && | | 2100 | if (atac->atac_set_modes != NULL && |
2101 | atac->atac_udma_cap < i) | | 2101 | atac->atac_udma_cap < i) |
2102 | continue; | | 2102 | continue; |
2103 | drvp->UDMA_mode = i; | | 2103 | drvp->UDMA_mode = i; |
2104 | drvp->UDMA_cap = i; | | 2104 | drvp->UDMA_cap = i; |
2105 | s = splbio(); | | 2105 | s = splbio(); |
2106 | drvp->drive_flags |= ATA_DRIVE_UDMA; | | 2106 | drvp->drive_flags |= ATA_DRIVE_UDMA; |
2107 | splx(s); | | 2107 | splx(s); |
2108 | } | | 2108 | } |
2109 | #endif | | 2109 | #endif |
2110 | break; | | 2110 | break; |
2111 | } | | 2111 | } |
2112 | } | | 2112 | } |
2113 | } | | 2113 | } |
2114 | | | 2114 | |
2115 | s = splbio(); | | 2115 | s = splbio(); |
2116 | drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM; | | 2116 | drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM; |
2117 | if (drvp->drive_type == ATA_DRIVET_ATAPI) { | | 2117 | if (drvp->drive_type == ATA_DRIVET_ATAPI) { |
2118 | if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM) | | 2118 | if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM) |
2119 | drvp->drive_flags |= ATA_DRIVE_NOSTREAM; | | 2119 | drvp->drive_flags |= ATA_DRIVE_NOSTREAM; |
2120 | } else { | | 2120 | } else { |
2121 | if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM) | | 2121 | if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM) |
2122 | drvp->drive_flags |= ATA_DRIVE_NOSTREAM; | | 2122 | drvp->drive_flags |= ATA_DRIVE_NOSTREAM; |
2123 | } | | 2123 | } |
2124 | splx(s); | | 2124 | splx(s); |
2125 | | | 2125 | |
2126 | /* Try to guess ATA version here, if it didn't get reported */ | | 2126 | /* Try to guess ATA version here, if it didn't get reported */ |
2127 | if (drvp->ata_vers == 0) { | | 2127 | if (drvp->ata_vers == 0) { |
2128 | #if NATA_UDMA | | 2128 | #if NATA_UDMA |
2129 | if (drvp->drive_flags & ATA_DRIVE_UDMA) | | 2129 | if (drvp->drive_flags & ATA_DRIVE_UDMA) |
2130 | drvp->ata_vers = 4; /* should be at last ATA-4 */ | | 2130 | drvp->ata_vers = 4; /* should be at last ATA-4 */ |
2131 | else | | 2131 | else |
2132 | #endif | | 2132 | #endif |
2133 | if (drvp->PIO_cap > 2) | | 2133 | if (drvp->PIO_cap > 2) |
2134 | drvp->ata_vers = 2; /* should be at last ATA-2 */ | | 2134 | drvp->ata_vers = 2; /* should be at last ATA-2 */ |
2135 | } | | 2135 | } |
2136 | cf_flags = device_cfdata(drv_dev)->cf_flags; | | 2136 | cf_flags = device_cfdata(drv_dev)->cf_flags; |
2137 | if (cf_flags & ATA_CONFIG_PIO_SET) { | | 2137 | if (cf_flags & ATA_CONFIG_PIO_SET) { |
2138 | s = splbio(); | | 2138 | s = splbio(); |
2139 | drvp->PIO_mode = | | 2139 | drvp->PIO_mode = |
2140 | (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF; | | 2140 | (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF; |
2141 | drvp->drive_flags |= ATA_DRIVE_MODE; | | 2141 | drvp->drive_flags |= ATA_DRIVE_MODE; |
2142 | splx(s); | | 2142 | splx(s); |
2143 | } | | 2143 | } |
2144 | #if NATA_DMA | | 2144 | #if NATA_DMA |
2145 | if ((atac->atac_cap & ATAC_CAP_DMA) == 0) { | | 2145 | if ((atac->atac_cap & ATAC_CAP_DMA) == 0) { |
2146 | /* don't care about DMA modes */ | | 2146 | /* don't care about DMA modes */ |
2147 | return; | | 2147 | return; |
2148 | } | | 2148 | } |
2149 | if (cf_flags & ATA_CONFIG_DMA_SET) { | | 2149 | if (cf_flags & ATA_CONFIG_DMA_SET) { |
2150 | s = splbio(); | | 2150 | s = splbio(); |
2151 | if ((cf_flags & ATA_CONFIG_DMA_MODES) == | | 2151 | if ((cf_flags & ATA_CONFIG_DMA_MODES) == |
2152 | ATA_CONFIG_DMA_DISABLE) { | | 2152 | ATA_CONFIG_DMA_DISABLE) { |
2153 | drvp->drive_flags &= ~ATA_DRIVE_DMA; | | 2153 | drvp->drive_flags &= ~ATA_DRIVE_DMA; |
2154 | } else { | | 2154 | } else { |
2155 | drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >> | | 2155 | drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >> |
2156 | ATA_CONFIG_DMA_OFF; | | 2156 | ATA_CONFIG_DMA_OFF; |
2157 | drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE; | | 2157 | drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE; |
2158 | } | | 2158 | } |
2159 | splx(s); | | 2159 | splx(s); |
2160 | } | | 2160 | } |
2161 | | | 2161 | |
2162 | /* | | 2162 | /* |
2163 | * Probe WRITE DMA FUA EXT. Support is mandatory for devices | | 2163 | * Probe WRITE DMA FUA EXT. Support is mandatory for devices |
2164 | * supporting LBA48, but nevertheless confirm with the feature flag. | | 2164 | * supporting LBA48, but nevertheless confirm with the feature flag. |
2165 | */ | | 2165 | */ |
2166 | if (drvp->drive_flags & ATA_DRIVE_DMA) { | | 2166 | if (drvp->drive_flags & ATA_DRIVE_DMA) { |
2167 | if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0 | | 2167 | if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0 |
2168 | && (params.atap_cmd_def & ATA_CMDE_WFE)) { | | 2168 | && (params.atap_cmd_def & ATA_CMDE_WFE)) { |
2169 | drvp->drive_flags |= ATA_DRIVE_WFUA; | | 2169 | drvp->drive_flags |= ATA_DRIVE_WFUA; |
2170 | aprint_verbose("%s WRITE DMA FUA", sep); | | 2170 | aprint_verbose("%s WRITE DMA FUA", sep); |
2171 | sep = ","; | | 2171 | sep = ","; |
2172 | } | | 2172 | } |
2173 | } | | 2173 | } |
2174 | | | 2174 | |
2175 | /* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */ | | 2175 | /* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */ |
2176 | s = splbio(); | | 2176 | s = splbio(); |
2177 | drvp->drv_openings = 1; | | 2177 | drvp->drv_openings = 1; |
2178 | if (params.atap_sata_caps & SATA_NATIVE_CMDQ) { | | 2178 | if (params.atap_sata_caps & SATA_NATIVE_CMDQ) { |
2179 | if (atac->atac_cap & ATAC_CAP_NCQ) | | 2179 | if (atac->atac_cap & ATAC_CAP_NCQ) |
2180 | drvp->drive_flags |= ATA_DRIVE_NCQ; | | 2180 | drvp->drive_flags |= ATA_DRIVE_NCQ; |
2181 | drvp->drv_openings = | | 2181 | drvp->drv_openings = |
2182 | (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1; | | 2182 | (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1; |
2183 | aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings); | | 2183 | aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings); |
2184 | sep = ","; | | 2184 | sep = ","; |
2185 | | | 2185 | |
2186 | if (params.atap_sata_caps & SATA_NCQ_PRIO) { | | 2186 | if (params.atap_sata_caps & SATA_NCQ_PRIO) { |
2187 | drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO; | | 2187 | drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO; |
2188 | aprint_verbose(" w/PRIO"); | | 2188 | aprint_verbose(" w/PRIO"); |
2189 | } | | 2189 | } |
2190 | } | | 2190 | } |
2191 | splx(s); | | 2191 | splx(s); |
2192 | | | 2192 | |
2193 | if (printed) | | 2193 | if (printed) |
2194 | aprint_verbose("\n"); | | 2194 | aprint_verbose("\n"); |
2195 | | | 2195 | |
2196 | #if NATA_UDMA | | 2196 | #if NATA_UDMA |
2197 | if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) { | | 2197 | if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) { |
2198 | /* don't care about UDMA modes */ | | 2198 | /* don't care about UDMA modes */ |
2199 | return; | | 2199 | return; |
2200 | } | | 2200 | } |
2201 | if (cf_flags & ATA_CONFIG_UDMA_SET) { | | 2201 | if (cf_flags & ATA_CONFIG_UDMA_SET) { |
2202 | s = splbio(); | | 2202 | s = splbio(); |
2203 | if ((cf_flags & ATA_CONFIG_UDMA_MODES) == | | 2203 | if ((cf_flags & ATA_CONFIG_UDMA_MODES) == |
2204 | ATA_CONFIG_UDMA_DISABLE) { | | 2204 | ATA_CONFIG_UDMA_DISABLE) { |
2205 | drvp->drive_flags &= ~ATA_DRIVE_UDMA; | | 2205 | drvp->drive_flags &= ~ATA_DRIVE_UDMA; |
2206 | } else { | | 2206 | } else { |
2207 | drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >> | | 2207 | drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >> |
2208 | ATA_CONFIG_UDMA_OFF; | | 2208 | ATA_CONFIG_UDMA_OFF; |
2209 | drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE; | | 2209 | drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE; |
2210 | } | | 2210 | } |
2211 | splx(s); | | 2211 | splx(s); |
2212 | } | | 2212 | } |
2213 | #endif /* NATA_UDMA */ | | 2213 | #endif /* NATA_UDMA */ |
2214 | #endif /* NATA_DMA */ | | 2214 | #endif /* NATA_DMA */ |
2215 | } | | 2215 | } |
2216 | | | 2216 | |
2217 | /* management of the /dev/atabus* devices */ | | 2217 | /* management of the /dev/atabus* devices */ |
2218 | int | | 2218 | int |
2219 | atabusopen(dev_t dev, int flag, int fmt, struct lwp *l) | | 2219 | atabusopen(dev_t dev, int flag, int fmt, struct lwp *l) |
2220 | { | | 2220 | { |
2221 | struct atabus_softc *sc; | | 2221 | struct atabus_softc *sc; |
2222 | int error; | | 2222 | int error; |
2223 | | | 2223 | |
2224 | sc = device_lookup_private(&atabus_cd, minor(dev)); | | 2224 | sc = device_lookup_private(&atabus_cd, minor(dev)); |
2225 | if (sc == NULL) | | 2225 | if (sc == NULL) |
2226 | return (ENXIO); | | 2226 | return (ENXIO); |
2227 | | | 2227 | |
2228 | if (sc->sc_flags & ATABUSCF_OPEN) | | 2228 | if (sc->sc_flags & ATABUSCF_OPEN) |
2229 | return (EBUSY); | | 2229 | return (EBUSY); |
2230 | | | 2230 | |
2231 | if ((error = ata_addref(sc->sc_chan)) != 0) | | 2231 | if ((error = ata_addref(sc->sc_chan)) != 0) |
2232 | return (error); | | 2232 | return (error); |
2233 | | | 2233 | |
2234 | sc->sc_flags |= ATABUSCF_OPEN; | | 2234 | sc->sc_flags |= ATABUSCF_OPEN; |
2235 | | | 2235 | |
2236 | return (0); | | 2236 | return (0); |
2237 | } | | 2237 | } |
2238 | | | 2238 | |
2239 | | | 2239 | |
2240 | int | | 2240 | int |
2241 | atabusclose(dev_t dev, int flag, int fmt, struct lwp *l) | | 2241 | atabusclose(dev_t dev, int flag, int fmt, struct lwp *l) |
2242 | { | | 2242 | { |
2243 | struct atabus_softc *sc = | | 2243 | struct atabus_softc *sc = |
2244 | device_lookup_private(&atabus_cd, minor(dev)); | | 2244 | device_lookup_private(&atabus_cd, minor(dev)); |
2245 | | | 2245 | |
2246 | ata_delref(sc->sc_chan); | | 2246 | ata_delref(sc->sc_chan); |
2247 | | | 2247 | |
2248 | sc->sc_flags &= ~ATABUSCF_OPEN; | | 2248 | sc->sc_flags &= ~ATABUSCF_OPEN; |
2249 | | | 2249 | |
2250 | return (0); | | 2250 | return (0); |
2251 | } | | 2251 | } |
2252 | | | 2252 | |
2253 | int | | 2253 | int |
2254 | atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) | | 2254 | atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) |
2255 | { | | 2255 | { |
2256 | struct atabus_softc *sc = | | 2256 | struct atabus_softc *sc = |
2257 | device_lookup_private(&atabus_cd, minor(dev)); | | 2257 | device_lookup_private(&atabus_cd, minor(dev)); |
2258 | struct ata_channel *chp = sc->sc_chan; | | 2258 | struct ata_channel *chp = sc->sc_chan; |
2259 | int min_drive, max_drive, drive; | | 2259 | int min_drive, max_drive, drive; |
2260 | int error; | | 2260 | int error; |
2261 | int s; | | 2261 | int s; |
2262 | | | 2262 | |
2263 | /* | | 2263 | /* |
2264 | * Enforce write permission for ioctls that change the | | 2264 | * Enforce write permission for ioctls that change the |
2265 | * state of the bus. Host adapter specific ioctls must | | 2265 | * state of the bus. Host adapter specific ioctls must |
2266 | * be checked by the adapter driver. | | 2266 | * be checked by the adapter driver. |
2267 | */ | | 2267 | */ |
2268 | switch (cmd) { | | 2268 | switch (cmd) { |
2269 | case ATABUSIOSCAN: | | 2269 | case ATABUSIOSCAN: |
2270 | case ATABUSIODETACH: | | 2270 | case ATABUSIODETACH: |
2271 | case ATABUSIORESET: | | 2271 | case ATABUSIORESET: |
2272 | if ((flag & FWRITE) == 0) | | 2272 | if ((flag & FWRITE) == 0) |
2273 | return (EBADF); | | 2273 | return (EBADF); |
2274 | } | | 2274 | } |
2275 | | | 2275 | |
2276 | switch (cmd) { | | 2276 | switch (cmd) { |
2277 | case ATABUSIORESET: | | 2277 | case ATABUSIORESET: |
2278 | s = splbio(); | | 2278 | s = splbio(); |
2279 | ata_reset_channel(sc->sc_chan, AT_WAIT | AT_POLL); | | 2279 | ata_reset_channel(sc->sc_chan, AT_WAIT | AT_POLL); |
2280 | splx(s); | | 2280 | splx(s); |
2281 | return 0; | | 2281 | return 0; |
2282 | case ATABUSIOSCAN: | | 2282 | case ATABUSIOSCAN: |
2283 | { | | 2283 | { |
2284 | #if 0 | | 2284 | #if 0 |
2285 | struct atabusioscan_args *a= | | 2285 | struct atabusioscan_args *a= |
2286 | (struct atabusioscan_args *)addr; | | 2286 | (struct atabusioscan_args *)addr; |
2287 | #endif | | 2287 | #endif |
2288 | if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || | | 2288 | if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || |
2289 | (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) | | 2289 | (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) |
2290 | return (EOPNOTSUPP); | | 2290 | return (EOPNOTSUPP); |
2291 | return (EOPNOTSUPP); | | 2291 | return (EOPNOTSUPP); |
2292 | } | | 2292 | } |
2293 | case ATABUSIODETACH: | | 2293 | case ATABUSIODETACH: |
2294 | { | | 2294 | { |
2295 | struct atabusiodetach_args *a= | | 2295 | struct atabusiodetach_args *a= |
2296 | (struct atabusiodetach_args *)addr; | | 2296 | (struct atabusiodetach_args *)addr; |
2297 | if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || | | 2297 | if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) || |
2298 | (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) | | 2298 | (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD)) |
2299 | return (EOPNOTSUPP); | | 2299 | return (EOPNOTSUPP); |
2300 | switch (a->at_dev) { | | 2300 | switch (a->at_dev) { |
2301 | case -1: | | 2301 | case -1: |
2302 | min_drive = 0; | | 2302 | min_drive = 0; |
2303 | max_drive = 1; | | 2303 | max_drive = 1; |
2304 | break; | | 2304 | break; |
2305 | case 0: | | 2305 | case 0: |
2306 | case 1: | | 2306 | case 1: |
2307 | min_drive = max_drive = a->at_dev; | | 2307 | min_drive = max_drive = a->at_dev; |
2308 | break; | | 2308 | break; |
2309 | default: | | 2309 | default: |
2310 | return (EINVAL); | | 2310 | return (EINVAL); |
2311 | } | | 2311 | } |
2312 | for (drive = min_drive; drive <= max_drive; drive++) { | | 2312 | for (drive = min_drive; drive <= max_drive; drive++) { |
2313 | if (chp->ch_drive[drive].drv_softc != NULL) { | | 2313 | if (chp->ch_drive[drive].drv_softc != NULL) { |
2314 | error = config_detach( | | 2314 | error = config_detach( |
2315 | chp->ch_drive[drive].drv_softc, 0); | | 2315 | chp->ch_drive[drive].drv_softc, 0); |
2316 | if (error) | | 2316 | if (error) |
2317 | return (error); | | 2317 | return (error); |
2318 | KASSERT(chp->ch_drive[drive].drv_softc == NULL); | | 2318 | KASSERT(chp->ch_drive[drive].drv_softc == NULL); |
2319 | } | | 2319 | } |
2320 | } | | 2320 | } |
2321 | return 0; | | 2321 | return 0; |
2322 | } | | 2322 | } |
2323 | default: | | 2323 | default: |
2324 | return ENOTTY; | | 2324 | return ENOTTY; |
2325 | } | | 2325 | } |
2326 | } | | 2326 | } |
2327 | | | 2327 | |
2328 | static bool | | 2328 | static bool |
2329 | atabus_suspend(device_t dv, const pmf_qual_t *qual) | | 2329 | atabus_suspend(device_t dv, const pmf_qual_t *qual) |
2330 | { | | 2330 | { |
2331 | struct atabus_softc *sc = device_private(dv); | | 2331 | struct atabus_softc *sc = device_private(dv); |
2332 | struct ata_channel *chp = sc->sc_chan; | | 2332 | struct ata_channel *chp = sc->sc_chan; |
2333 | | | 2333 | |
2334 | ata_channel_idle(chp); | | 2334 | ata_channel_idle(chp); |
2335 | | | 2335 | |
2336 | return true; | | 2336 | return true; |
2337 | } | | 2337 | } |
2338 | | | 2338 | |
2339 | static bool | | 2339 | static bool |
2340 | atabus_resume(device_t dv, const pmf_qual_t *qual) | | 2340 | atabus_resume(device_t dv, const pmf_qual_t *qual) |
2341 | { | | 2341 | { |
2342 | struct atabus_softc *sc = device_private(dv); | | 2342 | struct atabus_softc *sc = device_private(dv); |
2343 | struct ata_channel *chp = sc->sc_chan; | | 2343 | struct ata_channel *chp = sc->sc_chan; |
2344 | | | 2344 | |
2345 | /* | | 2345 | /* |
2346 | * XXX joerg: with wdc, the first channel unfreezes the controler. | | 2346 | * XXX joerg: with wdc, the first channel unfreezes the controler. |
2347 | * Move this the reset and queue idling into wdc. | | 2347 | * Move this the reset and queue idling into wdc. |