Sun May 30 11:24:02 2021 UTC ()
ld(4): Block requests while suspended until resumed.

Otherwise nothing stops us from continuing to feed I/O to the disk
controller when it expects that the queues are quiesced as it pokes
registers to change its power states.  Fixes resume during disk
activity on my T480 with nvme.


(riastradh)
diff -r1.111 -r1.112 src/sys/dev/ld.c
diff -r1.34 -r1.35 src/sys/dev/ldvar.h

cvs diff -r1.111 -r1.112 src/sys/dev/ld.c (switch to unified diff)

--- src/sys/dev/ld.c 2020/08/02 01:17:56 1.111
+++ src/sys/dev/ld.c 2021/05/30 11:24:02 1.112
@@ -1,709 +1,766 @@ @@ -1,709 +1,766 @@
1/* $NetBSD: ld.c,v 1.111 2020/08/02 01:17:56 riastradh Exp $ */ 1/* $NetBSD: ld.c,v 1.112 2021/05/30 11:24:02 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum. 8 * by Andrew Doran and Charles M. Hannum.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Disk driver for use by RAID controllers. 33 * Disk driver for use by RAID controllers.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.111 2020/08/02 01:17:56 riastradh Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.112 2021/05/30 11:24:02 riastradh Exp $");
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40#include <sys/systm.h> 40#include <sys/systm.h>
41#include <sys/kernel.h> 41#include <sys/kernel.h>
42#include <sys/device.h> 42#include <sys/device.h>
43#include <sys/queue.h> 43#include <sys/queue.h>
44#include <sys/proc.h> 44#include <sys/proc.h>
45#include <sys/buf.h> 45#include <sys/buf.h>
46#include <sys/bufq.h> 46#include <sys/bufq.h>
47#include <sys/endian.h> 47#include <sys/endian.h>
48#include <sys/disklabel.h> 48#include <sys/disklabel.h>
49#include <sys/disk.h> 49#include <sys/disk.h>
50#include <sys/dkio.h> 50#include <sys/dkio.h>
51#include <sys/stat.h> 51#include <sys/stat.h>
52#include <sys/conf.h> 52#include <sys/conf.h>
53#include <sys/fcntl.h> 53#include <sys/fcntl.h>
54#include <sys/vnode.h> 54#include <sys/vnode.h>
55#include <sys/syslog.h> 55#include <sys/syslog.h>
56#include <sys/mutex.h> 56#include <sys/mutex.h>
57#include <sys/module.h> 57#include <sys/module.h>
58#include <sys/reboot.h> 58#include <sys/reboot.h>
59 59
60#include <dev/ldvar.h> 60#include <dev/ldvar.h>
61 61
62#include "ioconf.h" 62#include "ioconf.h"
63 63
64static void ldminphys(struct buf *bp); 64static void ldminphys(struct buf *bp);
65static bool ld_suspend(device_t, const pmf_qual_t *); 65static bool ld_suspend(device_t, const pmf_qual_t *);
 66static bool ld_resume(device_t, const pmf_qual_t *);
66static bool ld_shutdown(device_t, int); 67static bool ld_shutdown(device_t, int);
67static int ld_diskstart(device_t, struct buf *bp); 68static int ld_diskstart(device_t, struct buf *bp);
68static void ld_iosize(device_t, int *); 69static void ld_iosize(device_t, int *);
69static int ld_dumpblocks(device_t, void *, daddr_t, int); 70static int ld_dumpblocks(device_t, void *, daddr_t, int);
70static void ld_fake_geometry(struct ld_softc *); 71static void ld_fake_geometry(struct ld_softc *);
71static void ld_set_geometry(struct ld_softc *); 72static void ld_set_geometry(struct ld_softc *);
72static void ld_config_interrupts (device_t); 73static void ld_config_interrupts (device_t);
73static int ld_lastclose(device_t); 74static int ld_lastclose(device_t);
74static int ld_discard(device_t, off_t, off_t); 75static int ld_discard(device_t, off_t, off_t);
75static int ld_flush(device_t, bool); 76static int ld_flush(device_t, bool);
76 77
77static dev_type_open(ldopen); 78static dev_type_open(ldopen);
78static dev_type_close(ldclose); 79static dev_type_close(ldclose);
79static dev_type_read(ldread); 80static dev_type_read(ldread);
80static dev_type_write(ldwrite); 81static dev_type_write(ldwrite);
81static dev_type_ioctl(ldioctl); 82static dev_type_ioctl(ldioctl);
82static dev_type_strategy(ldstrategy); 83static dev_type_strategy(ldstrategy);
83static dev_type_dump(lddump); 84static dev_type_dump(lddump);
84static dev_type_size(ldsize); 85static dev_type_size(ldsize);
85static dev_type_discard(lddiscard); 86static dev_type_discard(lddiscard);
86 87
87const struct bdevsw ld_bdevsw = { 88const struct bdevsw ld_bdevsw = {
88 .d_open = ldopen, 89 .d_open = ldopen,
89 .d_close = ldclose, 90 .d_close = ldclose,
90 .d_strategy = ldstrategy, 91 .d_strategy = ldstrategy,
91 .d_ioctl = ldioctl, 92 .d_ioctl = ldioctl,
92 .d_dump = lddump, 93 .d_dump = lddump,
93 .d_psize = ldsize, 94 .d_psize = ldsize,
94 .d_discard = lddiscard, 95 .d_discard = lddiscard,
95 .d_flag = D_DISK | D_MPSAFE 96 .d_flag = D_DISK | D_MPSAFE
96}; 97};
97 98
98const struct cdevsw ld_cdevsw = { 99const struct cdevsw ld_cdevsw = {
99 .d_open = ldopen, 100 .d_open = ldopen,
100 .d_close = ldclose, 101 .d_close = ldclose,
101 .d_read = ldread, 102 .d_read = ldread,
102 .d_write = ldwrite, 103 .d_write = ldwrite,
103 .d_ioctl = ldioctl, 104 .d_ioctl = ldioctl,
104 .d_stop = nostop, 105 .d_stop = nostop,
105 .d_tty = notty, 106 .d_tty = notty,
106 .d_poll = nopoll, 107 .d_poll = nopoll,
107 .d_mmap = nommap, 108 .d_mmap = nommap,
108 .d_kqfilter = nokqfilter, 109 .d_kqfilter = nokqfilter,
109 .d_discard = lddiscard, 110 .d_discard = lddiscard,
110 .d_flag = D_DISK | D_MPSAFE 111 .d_flag = D_DISK | D_MPSAFE
111}; 112};
112 113
113static const struct dkdriver lddkdriver = { 114static const struct dkdriver lddkdriver = {
114 .d_open = ldopen, 115 .d_open = ldopen,
115 .d_close = ldclose, 116 .d_close = ldclose,
116 .d_strategy = ldstrategy, 117 .d_strategy = ldstrategy,
117 .d_iosize = ld_iosize, 118 .d_iosize = ld_iosize,
118 .d_minphys = ldminphys, 119 .d_minphys = ldminphys,
119 .d_diskstart = ld_diskstart, 120 .d_diskstart = ld_diskstart,
120 .d_dumpblocks = ld_dumpblocks, 121 .d_dumpblocks = ld_dumpblocks,
121 .d_lastclose = ld_lastclose, 122 .d_lastclose = ld_lastclose,
122 .d_discard = ld_discard 123 .d_discard = ld_discard
123}; 124};
124 125
125void 126void
126ldattach(struct ld_softc *sc, const char *default_strategy) 127ldattach(struct ld_softc *sc, const char *default_strategy)
127{ 128{
128 device_t self = sc->sc_dv; 129 device_t self = sc->sc_dv;
129 struct dk_softc *dksc = &sc->sc_dksc; 130 struct dk_softc *dksc = &sc->sc_dksc;
130 131
131 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM); 132 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
132 cv_init(&sc->sc_drain, "lddrain"); 133 cv_init(&sc->sc_drain, "lddrain");
133 134
134 if ((sc->sc_flags & LDF_ENABLED) == 0) { 135 if ((sc->sc_flags & LDF_ENABLED) == 0) {
135 return; 136 return;
136 } 137 }
137 138
138 /* don't attach a disk that we cannot handle */ 139 /* don't attach a disk that we cannot handle */
139 if (sc->sc_secsize < DEV_BSIZE) { 140 if (sc->sc_secsize < DEV_BSIZE) {
140 sc->sc_flags &= ~LDF_ENABLED; 141 sc->sc_flags &= ~LDF_ENABLED;
141 return; 142 return;
142 } 143 }
143 144
144 /* Initialise dk and disk structure. */ 145 /* Initialise dk and disk structure. */
145 dk_init(dksc, self, DKTYPE_LD); 146 dk_init(dksc, self, DKTYPE_LD);
146 disk_init(&dksc->sc_dkdev, dksc->sc_xname, &lddkdriver); 147 disk_init(&dksc->sc_dkdev, dksc->sc_xname, &lddkdriver);
147 148
148 if (sc->sc_maxxfer > MAXPHYS) 149 if (sc->sc_maxxfer > MAXPHYS)
149 sc->sc_maxxfer = MAXPHYS; 150 sc->sc_maxxfer = MAXPHYS;
150 151
151 /* Build synthetic geometry if necessary. */ 152 /* Build synthetic geometry if necessary. */
152 if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 || 153 if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 ||
153 sc->sc_ncylinders == 0) 154 sc->sc_ncylinders == 0)
154 ld_fake_geometry(sc); 155 ld_fake_geometry(sc);
155 156
156 sc->sc_disksize512 = sc->sc_secperunit * sc->sc_secsize / DEV_BSIZE; 157 sc->sc_disksize512 = sc->sc_secperunit * sc->sc_secsize / DEV_BSIZE;
157 158
158 if (sc->sc_flags & LDF_NO_RND) 159 if (sc->sc_flags & LDF_NO_RND)
159 dksc->sc_flags |= DKF_NO_RND; 160 dksc->sc_flags |= DKF_NO_RND;
160 161
161 /* Attach dk and disk subsystems */ 162 /* Attach dk and disk subsystems */
162 dk_attach(dksc); 163 dk_attach(dksc);
163 disk_attach(&dksc->sc_dkdev); 164 disk_attach(&dksc->sc_dkdev);
164 ld_set_geometry(sc); 165 ld_set_geometry(sc);
165 166
166 bufq_alloc(&dksc->sc_bufq, default_strategy, BUFQ_SORT_RAWBLOCK); 167 bufq_alloc(&dksc->sc_bufq, default_strategy, BUFQ_SORT_RAWBLOCK);
167 168
168 /* Register with PMF */ 169 /* Register with PMF */
169 if (!pmf_device_register1(dksc->sc_dev, ld_suspend, NULL, ld_shutdown)) 170 if (!pmf_device_register1(dksc->sc_dev, ld_suspend, ld_resume,
 171 ld_shutdown))
170 aprint_error_dev(dksc->sc_dev, 172 aprint_error_dev(dksc->sc_dev,
171 "couldn't establish power handler\n"); 173 "couldn't establish power handler\n");
172 174
173 /* Discover wedges on this disk. */ 175 /* Discover wedges on this disk. */
174 config_interrupts(sc->sc_dv, ld_config_interrupts); 176 config_interrupts(sc->sc_dv, ld_config_interrupts);
175} 177}
176 178
177int 179int
178ldadjqparam(struct ld_softc *sc, int xmax) 180ldadjqparam(struct ld_softc *sc, int xmax)
179{ 181{
180 182
181 mutex_enter(&sc->sc_mutex); 183 mutex_enter(&sc->sc_mutex);
182 sc->sc_maxqueuecnt = xmax; 184 sc->sc_maxqueuecnt = xmax;
183 mutex_exit(&sc->sc_mutex); 185 mutex_exit(&sc->sc_mutex);
184 186
185 return (0); 187 return (0);
186} 188}
187 189
188int 190int
189ldbegindetach(struct ld_softc *sc, int flags) 191ldbegindetach(struct ld_softc *sc, int flags)
190{ 192{
191 struct dk_softc *dksc = &sc->sc_dksc; 193 struct dk_softc *dksc = &sc->sc_dksc;
192 int error; 194 int error;
193 195
194 /* If we never attached properly, no problem with detaching. */ 196 /* If we never attached properly, no problem with detaching. */
195 if ((sc->sc_flags & LDF_ENABLED) == 0) 197 if ((sc->sc_flags & LDF_ENABLED) == 0)
196 return 0; 198 return 0;
197 199
198 /* 200 /*
199 * If the disk is still open, back out before we commit to 201 * If the disk is still open, back out before we commit to
200 * detaching. 202 * detaching.
201 */ 203 */
202 error = disk_begindetach(&dksc->sc_dkdev, ld_lastclose, dksc->sc_dev, 204 error = disk_begindetach(&dksc->sc_dkdev, ld_lastclose, dksc->sc_dev,
203 flags); 205 flags);
204 if (error) 206 if (error)
205 return error; 207 return error;
206 208
207 /* We are now committed to detaching. Prevent new xfers. */ 209 /* We are now committed to detaching. Prevent new xfers. */
208 ldadjqparam(sc, 0); 210 ldadjqparam(sc, 0);
209 211
210 return 0; 212 return 0;
211} 213}
212 214
213void 215void
214ldenddetach(struct ld_softc *sc) 216ldenddetach(struct ld_softc *sc)
215{ 217{
216 struct dk_softc *dksc = &sc->sc_dksc; 218 struct dk_softc *dksc = &sc->sc_dksc;
217 int bmaj, cmaj, i, mn; 219 int bmaj, cmaj, i, mn;
218 220
219 if ((sc->sc_flags & LDF_ENABLED) == 0) 221 if ((sc->sc_flags & LDF_ENABLED) == 0)
220 return; 222 return;
221 223
222 /* Wait for commands queued with the hardware to complete. */ 224 /* Wait for commands queued with the hardware to complete. */
223 mutex_enter(&sc->sc_mutex); 225 mutex_enter(&sc->sc_mutex);
224 while (sc->sc_queuecnt > 0) { 226 while (sc->sc_queuecnt > 0) {
225 if (cv_timedwait(&sc->sc_drain, &sc->sc_mutex, 30 * hz)) { 227 if (cv_timedwait(&sc->sc_drain, &sc->sc_mutex, 30 * hz)) {
226 /* 228 /*
227 * XXX This seems like a recipe for crashing on 229 * XXX This seems like a recipe for crashing on
228 * use after free... 230 * use after free...
229 */ 231 */
230 printf("%s: not drained\n", dksc->sc_xname); 232 printf("%s: not drained\n", dksc->sc_xname);
231 break; 233 break;
232 } 234 }
233 } 235 }
234 mutex_exit(&sc->sc_mutex); 236 mutex_exit(&sc->sc_mutex);
235 237
236 /* Kill off any queued buffers. */ 238 /* Kill off any queued buffers. */
237 dk_drain(dksc); 239 dk_drain(dksc);
238 bufq_free(dksc->sc_bufq); 240 bufq_free(dksc->sc_bufq);
239 241
240 /* Locate the major numbers. */ 242 /* Locate the major numbers. */
241 bmaj = bdevsw_lookup_major(&ld_bdevsw); 243 bmaj = bdevsw_lookup_major(&ld_bdevsw);
242 cmaj = cdevsw_lookup_major(&ld_cdevsw); 244 cmaj = cdevsw_lookup_major(&ld_cdevsw);
243 245
244 /* Nuke the vnodes for any open instances. */ 246 /* Nuke the vnodes for any open instances. */
245 for (i = 0; i < MAXPARTITIONS; i++) { 247 for (i = 0; i < MAXPARTITIONS; i++) {
246 mn = DISKMINOR(device_unit(dksc->sc_dev), i); 248 mn = DISKMINOR(device_unit(dksc->sc_dev), i);
247 vdevgone(bmaj, mn, mn, VBLK); 249 vdevgone(bmaj, mn, mn, VBLK);
248 vdevgone(cmaj, mn, mn, VCHR); 250 vdevgone(cmaj, mn, mn, VCHR);
249 } 251 }
250 252
251 /* Delete all of our wedges. */ 253 /* Delete all of our wedges. */
252 dkwedge_delall(&dksc->sc_dkdev); 254 dkwedge_delall(&dksc->sc_dkdev);
253 255
254 /* Detach from the disk list. */ 256 /* Detach from the disk list. */
255 disk_detach(&dksc->sc_dkdev); 257 disk_detach(&dksc->sc_dkdev);
256 disk_destroy(&dksc->sc_dkdev); 258 disk_destroy(&dksc->sc_dkdev);
257 259
258 dk_detach(dksc); 260 dk_detach(dksc);
259 261
260 /* Deregister with PMF */ 262 /* Deregister with PMF */
261 pmf_device_deregister(dksc->sc_dev); 263 pmf_device_deregister(dksc->sc_dev);
262 264
263 /* 265 /*
264 * XXX We can't really flush the cache here, because the 266 * XXX We can't really flush the cache here, because the
265 * XXX device may already be non-existent from the controller's 267 * XXX device may already be non-existent from the controller's
266 * XXX perspective. 268 * XXX perspective.
267 */ 269 */
268#if 0 270#if 0
269 ld_flush(dksc->sc_dev, false); 271 ld_flush(dksc->sc_dev, false);
270#endif 272#endif
271 cv_destroy(&sc->sc_drain); 273 cv_destroy(&sc->sc_drain);
272 mutex_destroy(&sc->sc_mutex); 274 mutex_destroy(&sc->sc_mutex);
273} 275}
274 276
275/* ARGSUSED */ 277/* ARGSUSED */
276static bool 278static bool
277ld_suspend(device_t dev, const pmf_qual_t *qual) 279ld_suspend(device_t dev, const pmf_qual_t *qual)
278{ 280{
279 return ld_shutdown(dev, 0); 281 struct ld_softc *sc = device_private(dev);
 282 int queuecnt;
 283 bool ok = false;
 284
 285 /* Block new requests and wait for outstanding requests to drain. */
 286 mutex_enter(&sc->sc_mutex);
 287 KASSERT((sc->sc_flags & LDF_SUSPEND) == 0);
 288 sc->sc_flags |= LDF_SUSPEND;
 289 while ((queuecnt = sc->sc_queuecnt) > 0) {
 290 if (cv_timedwait(&sc->sc_drain, &sc->sc_mutex, 30 * hz))
 291 break;
 292 }
 293 mutex_exit(&sc->sc_mutex);
 294
 295 /* Block suspend if we couldn't drain everything in 30sec. */
 296 if (queuecnt > 0) {
 297 device_printf(dev, "timeout draining buffers\n");
 298 goto out;
 299 }
 300
 301 /* Flush cache before we lose power. If we can't, block suspend. */
 302 if (ld_flush(dev, /*poll*/false) != 0) {
 303 device_printf(dev, "failed to flush cache\n");
 304 goto out;
 305 }
 306
 307 /* Success! */
 308 ok = true;
 309
 310out: if (!ok)
 311 (void)ld_resume(dev, qual);
 312 return ok;
 313}
 314
 315static bool
 316ld_resume(device_t dev, const pmf_qual_t *qual)
 317{
 318 struct ld_softc *sc = device_private(dev);
 319
 320 /* Allow new requests to come in. */
 321 mutex_enter(&sc->sc_mutex);
 322 KASSERT(sc->sc_flags & LDF_SUSPEND);
 323 sc->sc_flags &= ~LDF_SUSPEND;
 324 mutex_exit(&sc->sc_mutex);
 325
 326 /* Restart any pending queued requests. */
 327 dk_start(&sc->sc_dksc, NULL);
 328
 329 return true;
280} 330}
281 331
282/* ARGSUSED */ 332/* ARGSUSED */
283static bool 333static bool
284ld_shutdown(device_t dev, int flags) 334ld_shutdown(device_t dev, int flags)
285{ 335{
286 if ((flags & RB_NOSYNC) == 0 && ld_flush(dev, true) != 0) 336 if ((flags & RB_NOSYNC) == 0 && ld_flush(dev, true) != 0)
287 return false; 337 return false;
288 338
289 return true; 339 return true;
290} 340}
291 341
292/* ARGSUSED */ 342/* ARGSUSED */
293static int 343static int
294ldopen(dev_t dev, int flags, int fmt, struct lwp *l) 344ldopen(dev_t dev, int flags, int fmt, struct lwp *l)
295{ 345{
296 struct ld_softc *sc; 346 struct ld_softc *sc;
297 struct dk_softc *dksc; 347 struct dk_softc *dksc;
298 int unit; 348 int unit;
299 349
300 unit = DISKUNIT(dev); 350 unit = DISKUNIT(dev);
301 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL) 351 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
302 return (ENXIO); 352 return (ENXIO);
303 353
304 if ((sc->sc_flags & LDF_ENABLED) == 0) 354 if ((sc->sc_flags & LDF_ENABLED) == 0)
305 return (ENODEV); 355 return (ENODEV);
306 356
307 dksc = &sc->sc_dksc; 357 dksc = &sc->sc_dksc;
308 358
309 return dk_open(dksc, dev, flags, fmt, l); 359 return dk_open(dksc, dev, flags, fmt, l);
310} 360}
311 361
312static int 362static int
313ld_lastclose(device_t self) 363ld_lastclose(device_t self)
314{ 364{
315 ld_flush(self, false); 365 ld_flush(self, false);
316 366
317 return 0; 367 return 0;
318} 368}
319 369
320/* ARGSUSED */ 370/* ARGSUSED */
321static int 371static int
322ldclose(dev_t dev, int flags, int fmt, struct lwp *l) 372ldclose(dev_t dev, int flags, int fmt, struct lwp *l)
323{ 373{
324 struct ld_softc *sc; 374 struct ld_softc *sc;
325 struct dk_softc *dksc; 375 struct dk_softc *dksc;
326 int unit; 376 int unit;
327 377
328 unit = DISKUNIT(dev); 378 unit = DISKUNIT(dev);
329 sc = device_lookup_private(&ld_cd, unit); 379 sc = device_lookup_private(&ld_cd, unit);
330 dksc = &sc->sc_dksc; 380 dksc = &sc->sc_dksc;
331 381
332 return dk_close(dksc, dev, flags, fmt, l); 382 return dk_close(dksc, dev, flags, fmt, l);
333} 383}
334 384
335/* ARGSUSED */ 385/* ARGSUSED */
336static int 386static int
337ldread(dev_t dev, struct uio *uio, int ioflag) 387ldread(dev_t dev, struct uio *uio, int ioflag)
338{ 388{
339 389
340 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio)); 390 return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
341} 391}
342 392
343/* ARGSUSED */ 393/* ARGSUSED */
344static int 394static int
345ldwrite(dev_t dev, struct uio *uio, int ioflag) 395ldwrite(dev_t dev, struct uio *uio, int ioflag)
346{ 396{
347 397
348 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio)); 398 return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
349} 399}
350 400
351/* ARGSUSED */ 401/* ARGSUSED */
352static int 402static int
353ldioctl(dev_t dev, u_long cmd, void *addr, int32_t flag, struct lwp *l) 403ldioctl(dev_t dev, u_long cmd, void *addr, int32_t flag, struct lwp *l)
354{ 404{
355 struct ld_softc *sc; 405 struct ld_softc *sc;
356 struct dk_softc *dksc; 406 struct dk_softc *dksc;
357 int unit, error; 407 int unit, error;
358 408
359 unit = DISKUNIT(dev); 409 unit = DISKUNIT(dev);
360 sc = device_lookup_private(&ld_cd, unit); 410 sc = device_lookup_private(&ld_cd, unit);
361 dksc = &sc->sc_dksc; 411 dksc = &sc->sc_dksc;
362 412
363 error = 0; 413 error = 0;
364 414
365 /* 415 /*
366 * Some common checks so that individual attachments wouldn't need 416 * Some common checks so that individual attachments wouldn't need
367 * to duplicate them. 417 * to duplicate them.
368 */ 418 */
369 switch (cmd) { 419 switch (cmd) {
370 case DIOCCACHESYNC: 420 case DIOCCACHESYNC:
371 /* 421 /*
372 * XXX Do we really need to care about having a writable 422 * XXX Do we really need to care about having a writable
373 * file descriptor here? 423 * file descriptor here?
374 */ 424 */
375 if ((flag & FWRITE) == 0) 425 if ((flag & FWRITE) == 0)
376 error = EBADF; 426 error = EBADF;
377 else 427 else
378 error = 0; 428 error = 0;
379 break; 429 break;
380 } 430 }
381 431
382 if (error != 0) 432 if (error != 0)
383 return (error); 433 return (error);
384 434
385 if (sc->sc_ioctl) { 435 if (sc->sc_ioctl) {
386 if ((sc->sc_flags & LDF_MPSAFE) == 0) 436 if ((sc->sc_flags & LDF_MPSAFE) == 0)
387 KERNEL_LOCK(1, curlwp); 437 KERNEL_LOCK(1, curlwp);
388 error = (*sc->sc_ioctl)(sc, cmd, addr, flag, 0); 438 error = (*sc->sc_ioctl)(sc, cmd, addr, flag, 0);
389 if ((sc->sc_flags & LDF_MPSAFE) == 0) 439 if ((sc->sc_flags & LDF_MPSAFE) == 0)
390 KERNEL_UNLOCK_ONE(curlwp); 440 KERNEL_UNLOCK_ONE(curlwp);
391 if (error != EPASSTHROUGH) 441 if (error != EPASSTHROUGH)
392 return (error); 442 return (error);
393 } 443 }
394 444
395 /* something not handled by the attachment */ 445 /* something not handled by the attachment */
396 return dk_ioctl(dksc, dev, cmd, addr, flag, l); 446 return dk_ioctl(dksc, dev, cmd, addr, flag, l);
397} 447}
398 448
399/* 449/*
400 * Flush the device's cache. 450 * Flush the device's cache.
401 */ 451 */
402static int 452static int
403ld_flush(device_t self, bool poll) 453ld_flush(device_t self, bool poll)
404{ 454{
405 int error = 0; 455 int error = 0;
406 struct ld_softc *sc = device_private(self); 456 struct ld_softc *sc = device_private(self);
407 457
408 if (sc->sc_ioctl) { 458 if (sc->sc_ioctl) {
409 if ((sc->sc_flags & LDF_MPSAFE) == 0) 459 if ((sc->sc_flags & LDF_MPSAFE) == 0)
410 KERNEL_LOCK(1, curlwp); 460 KERNEL_LOCK(1, curlwp);
411 error = (*sc->sc_ioctl)(sc, DIOCCACHESYNC, NULL, 0, poll); 461 error = (*sc->sc_ioctl)(sc, DIOCCACHESYNC, NULL, 0, poll);
412 if ((sc->sc_flags & LDF_MPSAFE) == 0) 462 if ((sc->sc_flags & LDF_MPSAFE) == 0)
413 KERNEL_UNLOCK_ONE(curlwp); 463 KERNEL_UNLOCK_ONE(curlwp);
414 if (error != 0) 464 if (error != 0)
415 device_printf(self, "unable to flush cache\n"); 465 device_printf(self, "unable to flush cache\n");
416 } 466 }
417 467
418 return error; 468 return error;
419} 469}
420 470
421static void 471static void
422ldstrategy(struct buf *bp) 472ldstrategy(struct buf *bp)
423{ 473{
424 struct ld_softc *sc; 474 struct ld_softc *sc;
425 struct dk_softc *dksc; 475 struct dk_softc *dksc;
426 int unit; 476 int unit;
427 477
428 unit = DISKUNIT(bp->b_dev); 478 unit = DISKUNIT(bp->b_dev);
429 sc = device_lookup_private(&ld_cd, unit); 479 sc = device_lookup_private(&ld_cd, unit);
430 dksc = &sc->sc_dksc; 480 dksc = &sc->sc_dksc;
431 481
432 dk_strategy(dksc, bp); 482 dk_strategy(dksc, bp);
433} 483}
434 484
435static int 485static int
436ld_diskstart(device_t dev, struct buf *bp) 486ld_diskstart(device_t dev, struct buf *bp)
437{ 487{
438 struct ld_softc *sc = device_private(dev); 488 struct ld_softc *sc = device_private(dev);
439 int error; 489 int error;
440 490
441 if (sc->sc_queuecnt >= sc->sc_maxqueuecnt) 491 if (sc->sc_queuecnt >= sc->sc_maxqueuecnt ||
 492 sc->sc_flags & LDF_SUSPEND) {
 493 if (sc->sc_flags & LDF_SUSPEND)
 494 aprint_debug_dev(dev, "i/o blocked while suspended\n");
442 return EAGAIN; 495 return EAGAIN;
 496 }
443 497
444 if ((sc->sc_flags & LDF_MPSAFE) == 0) 498 if ((sc->sc_flags & LDF_MPSAFE) == 0)
445 KERNEL_LOCK(1, curlwp); 499 KERNEL_LOCK(1, curlwp);
446 500
447 mutex_enter(&sc->sc_mutex); 501 mutex_enter(&sc->sc_mutex);
448 502
449 if (sc->sc_queuecnt >= sc->sc_maxqueuecnt) 503 if (sc->sc_queuecnt >= sc->sc_maxqueuecnt ||
 504 sc->sc_flags & LDF_SUSPEND) {
 505 if (sc->sc_flags & LDF_SUSPEND)
 506 aprint_debug_dev(dev, "i/o blocked while suspended\n");
450 error = EAGAIN; 507 error = EAGAIN;
451 else { 508 } else {
452 error = (*sc->sc_start)(sc, bp); 509 error = (*sc->sc_start)(sc, bp);
453 if (error == 0) 510 if (error == 0)
454 sc->sc_queuecnt++; 511 sc->sc_queuecnt++;
455 } 512 }
456 513
457 mutex_exit(&sc->sc_mutex); 514 mutex_exit(&sc->sc_mutex);
458 515
459 if ((sc->sc_flags & LDF_MPSAFE) == 0) 516 if ((sc->sc_flags & LDF_MPSAFE) == 0)
460 KERNEL_UNLOCK_ONE(curlwp); 517 KERNEL_UNLOCK_ONE(curlwp);
461 518
462 return error; 519 return error;
463} 520}
464 521
465void 522void
466lddone(struct ld_softc *sc, struct buf *bp) 523lddone(struct ld_softc *sc, struct buf *bp)
467{ 524{
468 struct dk_softc *dksc = &sc->sc_dksc; 525 struct dk_softc *dksc = &sc->sc_dksc;
469 526
470 dk_done(dksc, bp); 527 dk_done(dksc, bp);
471 528
472 mutex_enter(&sc->sc_mutex); 529 mutex_enter(&sc->sc_mutex);
473 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) { 530 if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
474 cv_broadcast(&sc->sc_drain); 531 cv_broadcast(&sc->sc_drain);
475 mutex_exit(&sc->sc_mutex); 532 mutex_exit(&sc->sc_mutex);
476 dk_start(dksc, NULL); 533 dk_start(dksc, NULL);
477 } else 534 } else
478 mutex_exit(&sc->sc_mutex); 535 mutex_exit(&sc->sc_mutex);
479} 536}
480 537
481static int 538static int
482ldsize(dev_t dev) 539ldsize(dev_t dev)
483{ 540{
484 struct ld_softc *sc; 541 struct ld_softc *sc;
485 struct dk_softc *dksc; 542 struct dk_softc *dksc;
486 int unit; 543 int unit;
487 544
488 unit = DISKUNIT(dev); 545 unit = DISKUNIT(dev);
489 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL) 546 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
490 return (-1); 547 return (-1);
491 dksc = &sc->sc_dksc; 548 dksc = &sc->sc_dksc;
492 549
493 if ((sc->sc_flags & LDF_ENABLED) == 0) 550 if ((sc->sc_flags & LDF_ENABLED) == 0)
494 return (-1); 551 return (-1);
495 552
496 return dk_size(dksc, dev); 553 return dk_size(dksc, dev);
497} 554}
498 555
499/* 556/*
500 * Take a dump. 557 * Take a dump.
501 */ 558 */
502static int 559static int
503lddump(dev_t dev, daddr_t blkno, void *va, size_t size) 560lddump(dev_t dev, daddr_t blkno, void *va, size_t size)
504{ 561{
505 struct ld_softc *sc; 562 struct ld_softc *sc;
506 struct dk_softc *dksc; 563 struct dk_softc *dksc;
507 int unit; 564 int unit;
508 565
509 unit = DISKUNIT(dev); 566 unit = DISKUNIT(dev);
510 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL) 567 if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
511 return (ENXIO); 568 return (ENXIO);
512 dksc = &sc->sc_dksc; 569 dksc = &sc->sc_dksc;
513 570
514 if ((sc->sc_flags & LDF_ENABLED) == 0) 571 if ((sc->sc_flags & LDF_ENABLED) == 0)
515 return (ENODEV); 572 return (ENODEV);
516 573
517 return dk_dump(dksc, dev, blkno, va, size, 0); 574 return dk_dump(dksc, dev, blkno, va, size, 0);
518} 575}
519 576
520static int 577static int
521ld_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk) 578ld_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
522{ 579{
523 struct ld_softc *sc = device_private(dev); 580 struct ld_softc *sc = device_private(dev);
524 581
525 if (sc->sc_dump == NULL) 582 if (sc->sc_dump == NULL)
526 return (ENODEV); 583 return (ENODEV);
527 584
528 return (*sc->sc_dump)(sc, va, blkno, nblk); 585 return (*sc->sc_dump)(sc, va, blkno, nblk);
529} 586}
530 587
531/* 588/*
532 * Adjust the size of a transfer. 589 * Adjust the size of a transfer.
533 */ 590 */
534static void 591static void
535ldminphys(struct buf *bp) 592ldminphys(struct buf *bp)
536{ 593{
537 int unit; 594 int unit;
538 struct ld_softc *sc; 595 struct ld_softc *sc;
539 596
540 unit = DISKUNIT(bp->b_dev); 597 unit = DISKUNIT(bp->b_dev);
541 sc = device_lookup_private(&ld_cd, unit); 598 sc = device_lookup_private(&ld_cd, unit);
542 599
543 ld_iosize(sc->sc_dv, &bp->b_bcount); 600 ld_iosize(sc->sc_dv, &bp->b_bcount);
544 minphys(bp); 601 minphys(bp);
545} 602}
546 603
547static void 604static void
548ld_iosize(device_t d, int *countp) 605ld_iosize(device_t d, int *countp)
549{ 606{
550 struct ld_softc *sc = device_private(d); 607 struct ld_softc *sc = device_private(d);
551 608
552 if (*countp > sc->sc_maxxfer) 609 if (*countp > sc->sc_maxxfer)
553 *countp = sc->sc_maxxfer; 610 *countp = sc->sc_maxxfer;
554} 611}
555 612
556static void 613static void
557ld_fake_geometry(struct ld_softc *sc) 614ld_fake_geometry(struct ld_softc *sc)
558{ 615{
559 uint64_t ncyl; 616 uint64_t ncyl;
560 617
561 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */ 618 if (sc->sc_secperunit <= 528 * 2048) /* 528MB */
562 sc->sc_nheads = 16; 619 sc->sc_nheads = 16;
563 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */ 620 else if (sc->sc_secperunit <= 1024 * 2048) /* 1GB */
564 sc->sc_nheads = 32; 621 sc->sc_nheads = 32;
565 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */ 622 else if (sc->sc_secperunit <= 21504 * 2048) /* 21GB */
566 sc->sc_nheads = 64; 623 sc->sc_nheads = 64;
567 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */ 624 else if (sc->sc_secperunit <= 43008 * 2048) /* 42GB */
568 sc->sc_nheads = 128; 625 sc->sc_nheads = 128;
569 else 626 else
570 sc->sc_nheads = 255; 627 sc->sc_nheads = 255;
571 628
572 sc->sc_nsectors = 63; 629 sc->sc_nsectors = 63;
573 sc->sc_ncylinders = INT_MAX; 630 sc->sc_ncylinders = INT_MAX;
574 ncyl = sc->sc_secperunit / 631 ncyl = sc->sc_secperunit /
575 (sc->sc_nheads * sc->sc_nsectors); 632 (sc->sc_nheads * sc->sc_nsectors);
576 if (ncyl < INT_MAX) 633 if (ncyl < INT_MAX)
577 sc->sc_ncylinders = (int)ncyl; 634 sc->sc_ncylinders = (int)ncyl;
578} 635}
579 636
580static void 637static void
581ld_set_geometry(struct ld_softc *sc) 638ld_set_geometry(struct ld_softc *sc)
582{ 639{
583 struct dk_softc *dksc = &sc->sc_dksc; 640 struct dk_softc *dksc = &sc->sc_dksc;
584 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; 641 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
585 char tbuf[9]; 642 char tbuf[9];
586 643
587 format_bytes(tbuf, sizeof(tbuf), sc->sc_secperunit * 644 format_bytes(tbuf, sizeof(tbuf), sc->sc_secperunit *
588 sc->sc_secsize); 645 sc->sc_secsize);
589 aprint_normal_dev(dksc->sc_dev, "%s, %d cyl, %d head, %d sec, " 646 aprint_normal_dev(dksc->sc_dev, "%s, %d cyl, %d head, %d sec, "
590 "%d bytes/sect x %"PRIu64" sectors\n", 647 "%d bytes/sect x %"PRIu64" sectors\n",
591 tbuf, sc->sc_ncylinders, sc->sc_nheads, 648 tbuf, sc->sc_ncylinders, sc->sc_nheads,
592 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit); 649 sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
593 650
594 memset(dg, 0, sizeof(*dg)); 651 memset(dg, 0, sizeof(*dg));
595 dg->dg_secperunit = sc->sc_secperunit; 652 dg->dg_secperunit = sc->sc_secperunit;
596 dg->dg_secsize = sc->sc_secsize; 653 dg->dg_secsize = sc->sc_secsize;
597 dg->dg_nsectors = sc->sc_nsectors; 654 dg->dg_nsectors = sc->sc_nsectors;
598 dg->dg_ntracks = sc->sc_nheads; 655 dg->dg_ntracks = sc->sc_nheads;
599 dg->dg_ncylinders = sc->sc_ncylinders; 656 dg->dg_ncylinders = sc->sc_ncylinders;
600 657
601 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, sc->sc_typename); 658 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, sc->sc_typename);
602} 659}
603 660
604static void 661static void
605ld_config_interrupts(device_t d) 662ld_config_interrupts(device_t d)
606{ 663{
607 struct ld_softc *sc = device_private(d); 664 struct ld_softc *sc = device_private(d);
608 struct dk_softc *dksc = &sc->sc_dksc; 665 struct dk_softc *dksc = &sc->sc_dksc;
609 666
610 dkwedge_discover(&dksc->sc_dkdev); 667 dkwedge_discover(&dksc->sc_dkdev);
611} 668}
612 669
613static int 670static int
614ld_discard(device_t dev, off_t pos, off_t len) 671ld_discard(device_t dev, off_t pos, off_t len)
615{ 672{
616 struct ld_softc *sc = device_private(dev); 673 struct ld_softc *sc = device_private(dev);
617 struct buf dbuf, *bp = &dbuf; 674 struct buf dbuf, *bp = &dbuf;
618 int error = 0; 675 int error = 0;
619 676
620 KASSERT(len <= INT_MAX); 677 KASSERT(len <= INT_MAX);
621 678
622 if (sc->sc_discard == NULL) 679 if (sc->sc_discard == NULL)
623 return (ENODEV); 680 return (ENODEV);
624 681
625 if ((sc->sc_flags & LDF_MPSAFE) == 0) 682 if ((sc->sc_flags & LDF_MPSAFE) == 0)
626 KERNEL_LOCK(1, curlwp); 683 KERNEL_LOCK(1, curlwp);
627 684
628 buf_init(bp); 685 buf_init(bp);
629 bp->b_vp = NULL; 686 bp->b_vp = NULL;
630 bp->b_data = NULL; 687 bp->b_data = NULL;
631 bp->b_bufsize = 0; 688 bp->b_bufsize = 0;
632 bp->b_rawblkno = pos / sc->sc_secsize; 689 bp->b_rawblkno = pos / sc->sc_secsize;
633 bp->b_bcount = len; 690 bp->b_bcount = len;
634 bp->b_flags = B_WRITE; 691 bp->b_flags = B_WRITE;
635 bp->b_cflags = BC_BUSY; 692 bp->b_cflags = BC_BUSY;
636 693
637 error = (*sc->sc_discard)(sc, bp); 694 error = (*sc->sc_discard)(sc, bp);
638 if (error == 0) 695 if (error == 0)
639 error = biowait(bp); 696 error = biowait(bp);
640 697
641 buf_destroy(bp); 698 buf_destroy(bp);
642 699
643 if ((sc->sc_flags & LDF_MPSAFE) == 0) 700 if ((sc->sc_flags & LDF_MPSAFE) == 0)
644 KERNEL_UNLOCK_ONE(curlwp); 701 KERNEL_UNLOCK_ONE(curlwp);
645 702
646 return error; 703 return error;
647} 704}
648 705
649void 706void
650lddiscardend(struct ld_softc *sc, struct buf *bp) 707lddiscardend(struct ld_softc *sc, struct buf *bp)
651{ 708{
652 709
653 if (bp->b_error) 710 if (bp->b_error)
654 bp->b_resid = bp->b_bcount; 711 bp->b_resid = bp->b_bcount;
655 biodone(bp); 712 biodone(bp);
656} 713}
657 714
658static int 715static int
659lddiscard(dev_t dev, off_t pos, off_t len) 716lddiscard(dev_t dev, off_t pos, off_t len)
660{ 717{
661 struct ld_softc *sc; 718 struct ld_softc *sc;
662 struct dk_softc *dksc; 719 struct dk_softc *dksc;
663 int unit; 720 int unit;
664 721
665 unit = DISKUNIT(dev); 722 unit = DISKUNIT(dev);
666 sc = device_lookup_private(&ld_cd, unit); 723 sc = device_lookup_private(&ld_cd, unit);
667 dksc = &sc->sc_dksc; 724 dksc = &sc->sc_dksc;
668 725
669 return dk_discard(dksc, dev, pos, len); 726 return dk_discard(dksc, dev, pos, len);
670} 727}
671 728
672MODULE(MODULE_CLASS_DRIVER, ld, "dk_subr"); 729MODULE(MODULE_CLASS_DRIVER, ld, "dk_subr");
673 730
674#ifdef _MODULE 731#ifdef _MODULE
675CFDRIVER_DECL(ld, DV_DISK, NULL); 732CFDRIVER_DECL(ld, DV_DISK, NULL);
676#endif 733#endif
677 734
678static int 735static int
679ld_modcmd(modcmd_t cmd, void *opaque) 736ld_modcmd(modcmd_t cmd, void *opaque)
680{ 737{
681#ifdef _MODULE 738#ifdef _MODULE
682 devmajor_t bmajor, cmajor; 739 devmajor_t bmajor, cmajor;
683#endif 740#endif
684 int error = 0; 741 int error = 0;
685 742
686#ifdef _MODULE 743#ifdef _MODULE
687 switch (cmd) { 744 switch (cmd) {
688 case MODULE_CMD_INIT: 745 case MODULE_CMD_INIT:
689 bmajor = cmajor = -1; 746 bmajor = cmajor = -1;
690 error = devsw_attach(ld_cd.cd_name, &ld_bdevsw, &bmajor, 747 error = devsw_attach(ld_cd.cd_name, &ld_bdevsw, &bmajor,
691 &ld_cdevsw, &cmajor); 748 &ld_cdevsw, &cmajor);
692 if (error) 749 if (error)
693 break; 750 break;
694 error = config_cfdriver_attach(&ld_cd); 751 error = config_cfdriver_attach(&ld_cd);
695 break; 752 break;
696 case MODULE_CMD_FINI: 753 case MODULE_CMD_FINI:
697 error = config_cfdriver_detach(&ld_cd); 754 error = config_cfdriver_detach(&ld_cd);
698 if (error) 755 if (error)
699 break; 756 break;
700 devsw_detach(&ld_bdevsw, &ld_cdevsw); 757 devsw_detach(&ld_bdevsw, &ld_cdevsw);
701 break; 758 break;
702 default: 759 default:
703 error = ENOTTY; 760 error = ENOTTY;
704 break; 761 break;
705 } 762 }
706#endif 763#endif
707 764
708 return error; 765 return error;
709} 766}

cvs diff -r1.34 -r1.35 src/sys/dev/ldvar.h (switch to unified diff)

--- src/sys/dev/ldvar.h 2020/08/02 01:17:56 1.34
+++ src/sys/dev/ldvar.h 2021/05/30 11:24:02 1.35
@@ -1,81 +1,82 @@ @@ -1,81 +1,82 @@
1/* $NetBSD: ldvar.h,v 1.34 2020/08/02 01:17:56 riastradh Exp $ */ 1/* $NetBSD: ldvar.h,v 1.35 2021/05/30 11:24:02 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#ifndef _DEV_LDVAR_H_ 32#ifndef _DEV_LDVAR_H_
33#define _DEV_LDVAR_H_ 33#define _DEV_LDVAR_H_
34 34
35#include <sys/mutex.h> 35#include <sys/mutex.h>
36#include <sys/rndsource.h> 36#include <sys/rndsource.h>
37 37
38#include <dev/dkvar.h> /* for dk_softc */ 38#include <dev/dkvar.h> /* for dk_softc */
39 39
40struct ld_softc { 40struct ld_softc {
41 struct dk_softc sc_dksc; 41 struct dk_softc sc_dksc;
42 kmutex_t sc_mutex; 42 kmutex_t sc_mutex;
43 kcondvar_t sc_drain; 43 kcondvar_t sc_drain;
44 44
45 int sc_queuecnt; /* current h/w queue depth */ 45 int sc_queuecnt; /* current h/w queue depth */
46 int sc_ncylinders; /* # cylinders */ 46 int sc_ncylinders; /* # cylinders */
47 int sc_nheads; /* # heads */ 47 int sc_nheads; /* # heads */
48 int sc_nsectors; /* # sectors per track */ 48 int sc_nsectors; /* # sectors per track */
49 uint64_t sc_disksize512; 49 uint64_t sc_disksize512;
50 50
51 /* 51 /*
52 * The following are filled by hardware specific attachment code. 52 * The following are filled by hardware specific attachment code.
53 */ 53 */
54 device_t sc_dv; 54 device_t sc_dv;
55 int sc_flags; /* control flags */ 55 int sc_flags; /* control flags */
56 uint64_t sc_secperunit; /* # sectors in total */ 56 uint64_t sc_secperunit; /* # sectors in total */
57 int sc_secsize; /* sector size in bytes */ 57 int sc_secsize; /* sector size in bytes */
58 int sc_maxxfer; /* max xfer size in bytes */ 58 int sc_maxxfer; /* max xfer size in bytes */
59 int sc_maxqueuecnt; /* maximum h/w queue depth */ 59 int sc_maxqueuecnt; /* maximum h/w queue depth */
60 char *sc_typename; /* inquiry data */ 60 char *sc_typename; /* inquiry data */
61 61
62 int (*sc_dump)(struct ld_softc *, void *, int, int); 62 int (*sc_dump)(struct ld_softc *, void *, int, int);
63 int (*sc_ioctl)(struct ld_softc *, u_long, void *, int32_t, bool); 63 int (*sc_ioctl)(struct ld_softc *, u_long, void *, int32_t, bool);
64 int (*sc_start)(struct ld_softc *, struct buf *); 64 int (*sc_start)(struct ld_softc *, struct buf *);
65 int (*sc_discard)(struct ld_softc *, struct buf *); 65 int (*sc_discard)(struct ld_softc *, struct buf *);
66}; 66};
67 67
68/* sc_flags */ 68/* sc_flags */
69#define LDF_ENABLED 0x001 /* device enabled */ 69#define LDF_ENABLED 0x001 /* device enabled */
70#define LDF_UNUSED0 0x020 /* was LDF_DRAIN */ 70#define LDF_UNUSED0 0x020 /* was LDF_DRAIN */
71#define LDF_NO_RND 0x040 /* do not attach rnd source */ 71#define LDF_NO_RND 0x040 /* do not attach rnd source */
72#define LDF_MPSAFE 0x080 /* backend is MPSAFE */ 72#define LDF_MPSAFE 0x080 /* backend is MPSAFE */
 73#define LDF_SUSPEND 0x100 /* disk is suspended until resume */
73 74
74int ldadjqparam(struct ld_softc *, int); 75int ldadjqparam(struct ld_softc *, int);
75void ldattach(struct ld_softc *, const char *); 76void ldattach(struct ld_softc *, const char *);
76int ldbegindetach(struct ld_softc *, int); 77int ldbegindetach(struct ld_softc *, int);
77void ldenddetach(struct ld_softc *); 78void ldenddetach(struct ld_softc *);
78void lddone(struct ld_softc *, struct buf *); 79void lddone(struct ld_softc *, struct buf *);
79void lddiscardend(struct ld_softc *, struct buf *); 80void lddiscardend(struct ld_softc *, struct buf *);
80 81
81#endif /* !_DEV_LDVAR_H_ */ 82#endif /* !_DEV_LDVAR_H_ */