Sat Jun 20 19:24:27 2009 UTC ()
add a workaround for drm:

for device mmap()'s, if the D_NEGOFFSAFE flag is set, do not check
if the offset is negative.

this should go away with the test itself when all drivers are audited
and checked to not fail with negative offsets.


(mrg)
diff -r1.134 -r1.135 src/sys/sys/conf.h
diff -r1.55 -r1.56 src/sys/uvm/uvm_device.c

cvs diff -r1.134 -r1.135 src/sys/sys/conf.h (switch to unified diff)

--- src/sys/sys/conf.h 2009/02/02 14:00:27 1.134
+++ src/sys/sys/conf.h 2009/06/20 19:24:27 1.135
@@ -1,255 +1,256 @@ @@ -1,255 +1,256 @@
1/* $NetBSD: conf.h,v 1.134 2009/02/02 14:00:27 haad Exp $ */ 1/* $NetBSD: conf.h,v 1.135 2009/06/20 19:24:27 mrg Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1990, 1993 4 * Copyright (c) 1990, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc. 6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed 7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph 8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc. 10 * the permission of UNIX System Laboratories, Inc.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors 20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software 21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission. 22 * without specific prior written permission.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE. 34 * SUCH DAMAGE.
35 * 35 *
36 * @(#)conf.h 8.5 (Berkeley) 1/9/95 36 * @(#)conf.h 8.5 (Berkeley) 1/9/95
37 */ 37 */
38 38
39#ifndef _SYS_CONF_H_ 39#ifndef _SYS_CONF_H_
40#define _SYS_CONF_H_ 40#define _SYS_CONF_H_
41 41
42/* 42/*
43 * Definitions of device driver entry switches 43 * Definitions of device driver entry switches
44 */ 44 */
45 45
46#include <sys/queue.h> 46#include <sys/queue.h>
47 47
48struct buf; 48struct buf;
49struct knote; 49struct knote;
50struct lwp; 50struct lwp;
51struct tty; 51struct tty;
52struct uio; 52struct uio;
53struct vnode; 53struct vnode;
54 54
55/* 55/*
56 * Types for d_type 56 * Types for d_type
57 */ 57 */
58#define D_OTHER 0x0000 58#define D_OTHER 0x0000
59#define D_TAPE 0x0001 59#define D_TAPE 0x0001
60#define D_DISK 0x0002 60#define D_DISK 0x0002
61#define D_TTY 0x0003 61#define D_TTY 0x0003
62#define D_TYPEMASK 0x00ff 62#define D_TYPEMASK 0x00ff
63#define D_MPSAFE 0x0100 63#define D_MPSAFE 0x0100
 64#define D_NEGOFFSAFE 0x0200
64 65
65/* 66/*
66 * Block device switch table 67 * Block device switch table
67 */ 68 */
68struct bdevsw { 69struct bdevsw {
69 int (*d_open)(dev_t, int, int, struct lwp *); 70 int (*d_open)(dev_t, int, int, struct lwp *);
70 int (*d_close)(dev_t, int, int, struct lwp *); 71 int (*d_close)(dev_t, int, int, struct lwp *);
71 void (*d_strategy)(struct buf *); 72 void (*d_strategy)(struct buf *);
72 int (*d_ioctl)(dev_t, u_long, void *, int, struct lwp *); 73 int (*d_ioctl)(dev_t, u_long, void *, int, struct lwp *);
73 int (*d_dump)(dev_t, daddr_t, void *, size_t); 74 int (*d_dump)(dev_t, daddr_t, void *, size_t);
74 int (*d_psize)(dev_t); 75 int (*d_psize)(dev_t);
75 int d_flag; 76 int d_flag;
76}; 77};
77 78
78/* 79/*
79 * Character device switch table 80 * Character device switch table
80 */ 81 */
81struct cdevsw { 82struct cdevsw {
82 int (*d_open)(dev_t, int, int, struct lwp *); 83 int (*d_open)(dev_t, int, int, struct lwp *);
83 int (*d_close)(dev_t, int, int, struct lwp *); 84 int (*d_close)(dev_t, int, int, struct lwp *);
84 int (*d_read)(dev_t, struct uio *, int); 85 int (*d_read)(dev_t, struct uio *, int);
85 int (*d_write)(dev_t, struct uio *, int); 86 int (*d_write)(dev_t, struct uio *, int);
86 int (*d_ioctl)(dev_t, u_long, void *, int, struct lwp *); 87 int (*d_ioctl)(dev_t, u_long, void *, int, struct lwp *);
87 void (*d_stop)(struct tty *, int); 88 void (*d_stop)(struct tty *, int);
88 struct tty * (*d_tty)(dev_t); 89 struct tty * (*d_tty)(dev_t);
89 int (*d_poll)(dev_t, int, struct lwp *); 90 int (*d_poll)(dev_t, int, struct lwp *);
90 paddr_t (*d_mmap)(dev_t, off_t, int); 91 paddr_t (*d_mmap)(dev_t, off_t, int);
91 int (*d_kqfilter)(dev_t, struct knote *); 92 int (*d_kqfilter)(dev_t, struct knote *);
92 int d_flag; 93 int d_flag;
93}; 94};
94 95
95#ifdef _KERNEL 96#ifdef _KERNEL
96 97
97#include <sys/mutex.h> 98#include <sys/mutex.h>
98extern kmutex_t device_lock; 99extern kmutex_t device_lock;
99 100
100int devsw_attach(const char *, const struct bdevsw *, devmajor_t *, 101int devsw_attach(const char *, const struct bdevsw *, devmajor_t *,
101 const struct cdevsw *, devmajor_t *); 102 const struct cdevsw *, devmajor_t *);
102int devsw_detach(const struct bdevsw *, const struct cdevsw *); 103int devsw_detach(const struct bdevsw *, const struct cdevsw *);
103const struct bdevsw *bdevsw_lookup(dev_t); 104const struct bdevsw *bdevsw_lookup(dev_t);
104const struct cdevsw *cdevsw_lookup(dev_t); 105const struct cdevsw *cdevsw_lookup(dev_t);
105devmajor_t bdevsw_lookup_major(const struct bdevsw *); 106devmajor_t bdevsw_lookup_major(const struct bdevsw *);
106devmajor_t cdevsw_lookup_major(const struct cdevsw *); 107devmajor_t cdevsw_lookup_major(const struct cdevsw *);
107 108
108#define dev_type_open(n) int n (dev_t, int, int, struct lwp *) 109#define dev_type_open(n) int n (dev_t, int, int, struct lwp *)
109#define dev_type_close(n) int n (dev_t, int, int, struct lwp *) 110#define dev_type_close(n) int n (dev_t, int, int, struct lwp *)
110#define dev_type_read(n) int n (dev_t, struct uio *, int) 111#define dev_type_read(n) int n (dev_t, struct uio *, int)
111#define dev_type_write(n) int n (dev_t, struct uio *, int) 112#define dev_type_write(n) int n (dev_t, struct uio *, int)
112#define dev_type_ioctl(n) \ 113#define dev_type_ioctl(n) \
113 int n (dev_t, u_long, void *, int, struct lwp *) 114 int n (dev_t, u_long, void *, int, struct lwp *)
114#define dev_type_stop(n) void n (struct tty *, int) 115#define dev_type_stop(n) void n (struct tty *, int)
115#define dev_type_tty(n) struct tty * n (dev_t) 116#define dev_type_tty(n) struct tty * n (dev_t)
116#define dev_type_poll(n) int n (dev_t, int, struct lwp *) 117#define dev_type_poll(n) int n (dev_t, int, struct lwp *)
117#define dev_type_mmap(n) paddr_t n (dev_t, off_t, int) 118#define dev_type_mmap(n) paddr_t n (dev_t, off_t, int)
118#define dev_type_strategy(n) void n (struct buf *) 119#define dev_type_strategy(n) void n (struct buf *)
119#define dev_type_dump(n) int n (dev_t, daddr_t, void *, size_t) 120#define dev_type_dump(n) int n (dev_t, daddr_t, void *, size_t)
120#define dev_type_size(n) int n (dev_t) 121#define dev_type_size(n) int n (dev_t)
121#define dev_type_kqfilter(n) int n (dev_t, struct knote *) 122#define dev_type_kqfilter(n) int n (dev_t, struct knote *)
122 123
123#define noopen ((dev_type_open((*)))enodev) 124#define noopen ((dev_type_open((*)))enodev)
124#define noclose ((dev_type_close((*)))enodev) 125#define noclose ((dev_type_close((*)))enodev)
125#define noread ((dev_type_read((*)))enodev) 126#define noread ((dev_type_read((*)))enodev)
126#define nowrite ((dev_type_write((*)))enodev) 127#define nowrite ((dev_type_write((*)))enodev)
127#define noioctl ((dev_type_ioctl((*)))enodev) 128#define noioctl ((dev_type_ioctl((*)))enodev)
128#define nostop ((dev_type_stop((*)))enodev) 129#define nostop ((dev_type_stop((*)))enodev)
129#define notty NULL 130#define notty NULL
130#define nopoll seltrue 131#define nopoll seltrue
131#define nommap ((dev_type_mmap((*)))enodev) 132#define nommap ((dev_type_mmap((*)))enodev)
132#define nodump ((dev_type_dump((*)))enodev) 133#define nodump ((dev_type_dump((*)))enodev)
133#define nosize NULL 134#define nosize NULL
134#define nokqfilter seltrue_kqfilter 135#define nokqfilter seltrue_kqfilter
135 136
136#define nullopen ((dev_type_open((*)))nullop) 137#define nullopen ((dev_type_open((*)))nullop)
137#define nullclose ((dev_type_close((*)))nullop) 138#define nullclose ((dev_type_close((*)))nullop)
138#define nullread ((dev_type_read((*)))nullop) 139#define nullread ((dev_type_read((*)))nullop)
139#define nullwrite ((dev_type_write((*)))nullop) 140#define nullwrite ((dev_type_write((*)))nullop)
140#define nullioctl ((dev_type_ioctl((*)))nullop) 141#define nullioctl ((dev_type_ioctl((*)))nullop)
141#define nullstop ((dev_type_stop((*)))nullop) 142#define nullstop ((dev_type_stop((*)))nullop)
142#define nullpoll ((dev_type_poll((*)))nullop) 143#define nullpoll ((dev_type_poll((*)))nullop)
143#define nullmmap ((dev_type_mmap((*)))nullop) 144#define nullmmap ((dev_type_mmap((*)))nullop)
144#define nulldump ((dev_type_dump((*)))nullop) 145#define nulldump ((dev_type_dump((*)))nullop)
145#define nullkqfilter ((dev_type_kqfilter((*)))eopnotsupp) 146#define nullkqfilter ((dev_type_kqfilter((*)))eopnotsupp)
146 147
147/* device access wrappers. */ 148/* device access wrappers. */
148 149
149dev_type_open(bdev_open); 150dev_type_open(bdev_open);
150dev_type_close(bdev_close); 151dev_type_close(bdev_close);
151dev_type_strategy(bdev_strategy); 152dev_type_strategy(bdev_strategy);
152dev_type_ioctl(bdev_ioctl); 153dev_type_ioctl(bdev_ioctl);
153dev_type_dump(bdev_dump); 154dev_type_dump(bdev_dump);
154 155
155dev_type_open(cdev_open); 156dev_type_open(cdev_open);
156dev_type_close(cdev_close); 157dev_type_close(cdev_close);
157dev_type_read(cdev_read); 158dev_type_read(cdev_read);
158dev_type_write(cdev_write); 159dev_type_write(cdev_write);
159dev_type_ioctl(cdev_ioctl); 160dev_type_ioctl(cdev_ioctl);
160dev_type_stop(cdev_stop); 161dev_type_stop(cdev_stop);
161dev_type_tty(cdev_tty); 162dev_type_tty(cdev_tty);
162dev_type_poll(cdev_poll); 163dev_type_poll(cdev_poll);
163dev_type_mmap(cdev_mmap); 164dev_type_mmap(cdev_mmap);
164dev_type_kqfilter(cdev_kqfilter); 165dev_type_kqfilter(cdev_kqfilter);
165 166
166int cdev_type(dev_t); 167int cdev_type(dev_t);
167int bdev_type(dev_t); 168int bdev_type(dev_t);
168 169
169/* symbolic sleep message strings */ 170/* symbolic sleep message strings */
170extern const char devopn[], devio[], devwait[], devin[], devout[]; 171extern const char devopn[], devio[], devwait[], devin[], devout[];
171extern const char devioc[], devcls[]; 172extern const char devioc[], devcls[];
172 173
173#endif /* _KERNEL */ 174#endif /* _KERNEL */
174 175
175/* 176/*
176 * Line discipline switch table 177 * Line discipline switch table
177 */ 178 */
178struct linesw { 179struct linesw {
179 const char *l_name; /* Linesw name */ 180 const char *l_name; /* Linesw name */
180 181
181 LIST_ENTRY(linesw) l_list; 182 LIST_ENTRY(linesw) l_list;
182 u_int l_refcnt; /* locked by ttyldisc_list_slock */ 183 u_int l_refcnt; /* locked by ttyldisc_list_slock */
183 int l_no; /* legacy discipline number (for TIOCGETD) */ 184 int l_no; /* legacy discipline number (for TIOCGETD) */
184 185
185 int (*l_open) (dev_t, struct tty *); 186 int (*l_open) (dev_t, struct tty *);
186 int (*l_close) (struct tty *, int); 187 int (*l_close) (struct tty *, int);
187 int (*l_read) (struct tty *, struct uio *, int); 188 int (*l_read) (struct tty *, struct uio *, int);
188 int (*l_write) (struct tty *, struct uio *, int); 189 int (*l_write) (struct tty *, struct uio *, int);
189 int (*l_ioctl) (struct tty *, u_long, void *, int, 190 int (*l_ioctl) (struct tty *, u_long, void *, int,
190 struct lwp *); 191 struct lwp *);
191 int (*l_rint) (int, struct tty *); 192 int (*l_rint) (int, struct tty *);
192 int (*l_start) (struct tty *); 193 int (*l_start) (struct tty *);
193 int (*l_modem) (struct tty *, int); 194 int (*l_modem) (struct tty *, int);
194 int (*l_poll) (struct tty *, int, struct lwp *); 195 int (*l_poll) (struct tty *, int, struct lwp *);
195}; 196};
196 197
197#ifdef _KERNEL 198#ifdef _KERNEL
198void ttyldisc_init(void); 199void ttyldisc_init(void);
199int ttyldisc_attach(struct linesw *); 200int ttyldisc_attach(struct linesw *);
200int ttyldisc_detach(struct linesw *); 201int ttyldisc_detach(struct linesw *);
201struct linesw *ttyldisc_lookup(const char *); 202struct linesw *ttyldisc_lookup(const char *);
202struct linesw *ttyldisc_lookup_bynum(int); 203struct linesw *ttyldisc_lookup_bynum(int);
203struct linesw *ttyldisc_default(void); 204struct linesw *ttyldisc_default(void);
204void ttyldisc_release(struct linesw *); 205void ttyldisc_release(struct linesw *);
205 206
206/* For those defining their own line disciplines: */ 207/* For those defining their own line disciplines: */
207#define ttynodisc ((int (*)(dev_t, struct tty *))enodev) 208#define ttynodisc ((int (*)(dev_t, struct tty *))enodev)
208#define ttyerrclose ((int (*)(struct tty *, int))enodev) 209#define ttyerrclose ((int (*)(struct tty *, int))enodev)
209#define ttyerrio ((int (*)(struct tty *, struct uio *, int))enodev) 210#define ttyerrio ((int (*)(struct tty *, struct uio *, int))enodev)
210#define ttyerrinput ((int (*)(int, struct tty *))enodev) 211#define ttyerrinput ((int (*)(int, struct tty *))enodev)
211#define ttyerrstart ((int (*)(struct tty *))enodev) 212#define ttyerrstart ((int (*)(struct tty *))enodev)
212 213
213int ttyerrpoll (struct tty *, int, struct lwp *); 214int ttyerrpoll (struct tty *, int, struct lwp *);
214int ttynullioctl(struct tty *, u_long, void *, int, struct lwp *); 215int ttynullioctl(struct tty *, u_long, void *, int, struct lwp *);
215 216
216int iskmemdev(dev_t); 217int iskmemdev(dev_t);
217int seltrue_kqfilter(dev_t, struct knote *); 218int seltrue_kqfilter(dev_t, struct knote *);
218#endif 219#endif
219 220
220#ifdef _KERNEL 221#ifdef _KERNEL
221 222
222#define DEV_MEM 0 /* minor device 0 is physical memory */ 223#define DEV_MEM 0 /* minor device 0 is physical memory */
223#define DEV_KMEM 1 /* minor device 1 is kernel memory */ 224#define DEV_KMEM 1 /* minor device 1 is kernel memory */
224#define DEV_NULL 2 /* minor device 2 is EOF/rathole */ 225#define DEV_NULL 2 /* minor device 2 is EOF/rathole */
225#ifdef COMPAT_16 226#ifdef COMPAT_16
226#define _DEV_ZERO_oARM 3 /* reserved: old ARM /dev/zero minor */ 227#define _DEV_ZERO_oARM 3 /* reserved: old ARM /dev/zero minor */
227#endif 228#endif
228#define DEV_ZERO 12 /* minor device 12 is '\0'/rathole */ 229#define DEV_ZERO 12 /* minor device 12 is '\0'/rathole */
229 230
230#endif /* _KERNEL */ 231#endif /* _KERNEL */
231 232
232struct devsw_conv { 233struct devsw_conv {
233 const char *d_name; 234 const char *d_name;
234 devmajor_t d_bmajor; 235 devmajor_t d_bmajor;
235 devmajor_t d_cmajor; 236 devmajor_t d_cmajor;
236}; 237};
237 238
238#ifdef _KERNEL 239#ifdef _KERNEL
239void devsw_init(void); 240void devsw_init(void);
240const char *devsw_blk2name(devmajor_t); 241const char *devsw_blk2name(devmajor_t);
241const char *cdevsw_getname(devmajor_t); 242const char *cdevsw_getname(devmajor_t);
242const char *bdevsw_getname(devmajor_t); 243const char *bdevsw_getname(devmajor_t);
243devmajor_t devsw_name2blk(const char *, char *, size_t); 244devmajor_t devsw_name2blk(const char *, char *, size_t);
244devmajor_t devsw_name2chr(const char *, char *, size_t); 245devmajor_t devsw_name2chr(const char *, char *, size_t);
245dev_t devsw_chr2blk(dev_t); 246dev_t devsw_chr2blk(dev_t);
246dev_t devsw_blk2chr(dev_t); 247dev_t devsw_blk2chr(dev_t);
247#endif /* _KERNEL */ 248#endif /* _KERNEL */
248 249
249#ifdef _KERNEL 250#ifdef _KERNEL
250struct device; 251struct device;
251void setroot(struct device *, int); 252void setroot(struct device *, int);
252void swapconf(void); 253void swapconf(void);
253#endif /* _KERNEL */ 254#endif /* _KERNEL */
254 255
255#endif /* !_SYS_CONF_H_ */ 256#endif /* !_SYS_CONF_H_ */

cvs diff -r1.55 -r1.56 src/sys/uvm/uvm_device.c (switch to unified diff)

--- src/sys/uvm/uvm_device.c 2008/12/17 20:51:39 1.55
+++ src/sys/uvm/uvm_device.c 2009/06/20 19:24:27 1.56
@@ -1,452 +1,453 @@ @@ -1,452 +1,453 @@
1/* $NetBSD: uvm_device.c,v 1.55 2008/12/17 20:51:39 cegger Exp $ */ 1/* $NetBSD: uvm_device.c,v 1.56 2009/06/20 19:24:27 mrg Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software 16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement: 17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and 18 * This product includes software developed by Charles D. Cranor and
19 * Washington University. 19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products 20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission. 21 * derived from this software without specific prior written permission.
22 * 22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * 33 *
34 * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp 34 * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
35 */ 35 */
36 36
37/* 37/*
38 * uvm_device.c: the device pager. 38 * uvm_device.c: the device pager.
39 */ 39 */
40 40
41#include <sys/cdefs.h> 41#include <sys/cdefs.h>
42__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.55 2008/12/17 20:51:39 cegger Exp $"); 42__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.56 2009/06/20 19:24:27 mrg Exp $");
43 43
44#include "opt_uvmhist.h" 44#include "opt_uvmhist.h"
45 45
46#include <sys/param.h> 46#include <sys/param.h>
47#include <sys/systm.h> 47#include <sys/systm.h>
48#include <sys/conf.h> 48#include <sys/conf.h>
49#include <sys/proc.h> 49#include <sys/proc.h>
50#include <sys/malloc.h> 50#include <sys/malloc.h>
51#include <sys/vnode.h> 51#include <sys/vnode.h>
52 52
53#include <uvm/uvm.h> 53#include <uvm/uvm.h>
54#include <uvm/uvm_device.h> 54#include <uvm/uvm_device.h>
55 55
56/* 56/*
57 * private global data structure 57 * private global data structure
58 * 58 *
59 * we keep a list of active device objects in the system. 59 * we keep a list of active device objects in the system.
60 */ 60 */
61 61
62LIST_HEAD(udv_list_struct, uvm_device); 62LIST_HEAD(udv_list_struct, uvm_device);
63static struct udv_list_struct udv_list; 63static struct udv_list_struct udv_list;
64static kmutex_t udv_lock; 64static kmutex_t udv_lock;
65 65
66/* 66/*
67 * functions 67 * functions
68 */ 68 */
69 69
70static void udv_init(void); 70static void udv_init(void);
71static void udv_reference(struct uvm_object *); 71static void udv_reference(struct uvm_object *);
72static void udv_detach(struct uvm_object *); 72static void udv_detach(struct uvm_object *);
73static int udv_fault(struct uvm_faultinfo *, vaddr_t, 73static int udv_fault(struct uvm_faultinfo *, vaddr_t,
74 struct vm_page **, int, int, vm_prot_t, 74 struct vm_page **, int, int, vm_prot_t,
75 int); 75 int);
76 76
77/* 77/*
78 * master pager structure 78 * master pager structure
79 */ 79 */
80 80
81const struct uvm_pagerops uvm_deviceops = { 81const struct uvm_pagerops uvm_deviceops = {
82 .pgo_init = udv_init, 82 .pgo_init = udv_init,
83 .pgo_reference = udv_reference, 83 .pgo_reference = udv_reference,
84 .pgo_detach = udv_detach, 84 .pgo_detach = udv_detach,
85 .pgo_fault = udv_fault, 85 .pgo_fault = udv_fault,
86}; 86};
87 87
88/* 88/*
89 * the ops! 89 * the ops!
90 */ 90 */
91 91
92/* 92/*
93 * udv_init 93 * udv_init
94 * 94 *
95 * init pager private data structures. 95 * init pager private data structures.
96 */ 96 */
97 97
98static void 98static void
99udv_init(void) 99udv_init(void)
100{ 100{
101 LIST_INIT(&udv_list); 101 LIST_INIT(&udv_list);
102 mutex_init(&udv_lock, MUTEX_DEFAULT, IPL_NONE); 102 mutex_init(&udv_lock, MUTEX_DEFAULT, IPL_NONE);
103} 103}
104 104
105/* 105/*
106 * udv_attach 106 * udv_attach
107 * 107 *
108 * get a VM object that is associated with a device. allocate a new 108 * get a VM object that is associated with a device. allocate a new
109 * one if needed. 109 * one if needed.
110 * 110 *
111 * => caller must _not_ already be holding the lock on the uvm_object. 111 * => caller must _not_ already be holding the lock on the uvm_object.
112 * => in fact, nothing should be locked so that we can sleep here. 112 * => in fact, nothing should be locked so that we can sleep here.
113 */ 113 */
114 114
115struct uvm_object * 115struct uvm_object *
116udv_attach(void *arg, vm_prot_t accessprot, 116udv_attach(void *arg, vm_prot_t accessprot,
117 voff_t off, /* used only for access check */ 117 voff_t off, /* used only for access check */
118 vsize_t size /* used only for access check */) 118 vsize_t size /* used only for access check */)
119{ 119{
120 dev_t device = *((dev_t *)arg); 120 dev_t device = *((dev_t *)arg);
121 struct uvm_device *udv, *lcv; 121 struct uvm_device *udv, *lcv;
122 const struct cdevsw *cdev; 122 const struct cdevsw *cdev;
123 dev_type_mmap((*mapfn)); 123 dev_type_mmap((*mapfn));
124 124
125 UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist); 125 UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
126 126
127 UVMHIST_LOG(maphist, "(device=0x%x)", device,0,0,0); 127 UVMHIST_LOG(maphist, "(device=0x%x)", device,0,0,0);
128 128
129 /* 129 /*
130 * before we do anything, ensure this device supports mmap 130 * before we do anything, ensure this device supports mmap
131 */ 131 */
132 132
133 cdev = cdevsw_lookup(device); 133 cdev = cdevsw_lookup(device);
134 if (cdev == NULL) { 134 if (cdev == NULL) {
135 return (NULL); 135 return (NULL);
136 } 136 }
137 mapfn = cdev->d_mmap; 137 mapfn = cdev->d_mmap;
138 if (mapfn == NULL || mapfn == nommap || mapfn == nullmmap) { 138 if (mapfn == NULL || mapfn == nommap || mapfn == nullmmap) {
139 return(NULL); 139 return(NULL);
140 } 140 }
141 141
142 /* 142 /*
143 * Negative offsets on the object are not allowed. 143 * Negative offsets on the object are not allowed.
144 */ 144 */
145 145
146 if (off != UVM_UNKNOWN_OFFSET && off < 0) 146 if ((cdev->d_flag & D_NEGOFFSAFE) == 0 &&
 147 off != UVM_UNKNOWN_OFFSET && off < 0)
147 return(NULL); 148 return(NULL);
148 149
149 /* 150 /*
150 * Check that the specified range of the device allows the 151 * Check that the specified range of the device allows the
151 * desired protection. 152 * desired protection.
152 * 153 *
153 * XXX assumes VM_PROT_* == PROT_* 154 * XXX assumes VM_PROT_* == PROT_*
154 * XXX clobbers off and size, but nothing else here needs them. 155 * XXX clobbers off and size, but nothing else here needs them.
155 */ 156 */
156 157
157 while (size != 0) { 158 while (size != 0) {
158 if (cdev_mmap(device, off, accessprot) == -1) { 159 if (cdev_mmap(device, off, accessprot) == -1) {
159 return (NULL); 160 return (NULL);
160 } 161 }
161 off += PAGE_SIZE; size -= PAGE_SIZE; 162 off += PAGE_SIZE; size -= PAGE_SIZE;
162 } 163 }
163 164
164 /* 165 /*
165 * keep looping until we get it 166 * keep looping until we get it
166 */ 167 */
167 168
168 for (;;) { 169 for (;;) {
169 170
170 /* 171 /*
171 * first, attempt to find it on the main list 172 * first, attempt to find it on the main list
172 */ 173 */
173 174
174 mutex_enter(&udv_lock); 175 mutex_enter(&udv_lock);
175 LIST_FOREACH(lcv, &udv_list, u_list) { 176 LIST_FOREACH(lcv, &udv_list, u_list) {
176 if (device == lcv->u_device) 177 if (device == lcv->u_device)
177 break; 178 break;
178 } 179 }
179 180
180 /* 181 /*
181 * got it on main list. put a hold on it and unlock udv_lock. 182 * got it on main list. put a hold on it and unlock udv_lock.
182 */ 183 */
183 184
184 if (lcv) { 185 if (lcv) {
185 186
186 /* 187 /*
187 * if someone else has a hold on it, sleep and start 188 * if someone else has a hold on it, sleep and start
188 * over again. 189 * over again.
189 */ 190 */
190 191
191 if (lcv->u_flags & UVM_DEVICE_HOLD) { 192 if (lcv->u_flags & UVM_DEVICE_HOLD) {
192 lcv->u_flags |= UVM_DEVICE_WANTED; 193 lcv->u_flags |= UVM_DEVICE_WANTED;
193 UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, false, 194 UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, false,
194 "udv_attach",0); 195 "udv_attach",0);
195 continue; 196 continue;
196 } 197 }
197 198
198 /* we are now holding it */ 199 /* we are now holding it */
199 lcv->u_flags |= UVM_DEVICE_HOLD; 200 lcv->u_flags |= UVM_DEVICE_HOLD;
200 mutex_exit(&udv_lock); 201 mutex_exit(&udv_lock);
201 202
202 /* 203 /*
203 * bump reference count, unhold, return. 204 * bump reference count, unhold, return.
204 */ 205 */
205 206
206 mutex_enter(&lcv->u_obj.vmobjlock); 207 mutex_enter(&lcv->u_obj.vmobjlock);
207 lcv->u_obj.uo_refs++; 208 lcv->u_obj.uo_refs++;
208 mutex_exit(&lcv->u_obj.vmobjlock); 209 mutex_exit(&lcv->u_obj.vmobjlock);
209 210
210 mutex_enter(&udv_lock); 211 mutex_enter(&udv_lock);
211 if (lcv->u_flags & UVM_DEVICE_WANTED) 212 if (lcv->u_flags & UVM_DEVICE_WANTED)
212 wakeup(lcv); 213 wakeup(lcv);
213 lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD); 214 lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
214 mutex_exit(&udv_lock); 215 mutex_exit(&udv_lock);
215 return(&lcv->u_obj); 216 return(&lcv->u_obj);
216 } 217 }
217 218
218 /* 219 /*
219 * did not find it on main list. need to malloc a new one. 220 * did not find it on main list. need to malloc a new one.
220 */ 221 */
221 222
222 mutex_exit(&udv_lock); 223 mutex_exit(&udv_lock);
223 /* NOTE: we could sleep in the following malloc() */ 224 /* NOTE: we could sleep in the following malloc() */
224 udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK); 225 udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK);
225 mutex_enter(&udv_lock); 226 mutex_enter(&udv_lock);
226 227
227 /* 228 /*
228 * now we have to double check to make sure no one added it 229 * now we have to double check to make sure no one added it
229 * to the list while we were sleeping... 230 * to the list while we were sleeping...
230 */ 231 */
231 232
232 LIST_FOREACH(lcv, &udv_list, u_list) { 233 LIST_FOREACH(lcv, &udv_list, u_list) {
233 if (device == lcv->u_device) 234 if (device == lcv->u_device)
234 break; 235 break;
235 } 236 }
236 237
237 /* 238 /*
238 * did we lose a race to someone else? 239 * did we lose a race to someone else?
239 * free our memory and retry. 240 * free our memory and retry.
240 */ 241 */
241 242
242 if (lcv) { 243 if (lcv) {
243 mutex_exit(&udv_lock); 244 mutex_exit(&udv_lock);
244 free(udv, M_TEMP); 245 free(udv, M_TEMP);
245 continue; 246 continue;
246 } 247 }
247 248
248 /* 249 /*
249 * we have it! init the data structures, add to list 250 * we have it! init the data structures, add to list
250 * and return. 251 * and return.
251 */ 252 */
252 253
253 UVM_OBJ_INIT(&udv->u_obj, &uvm_deviceops, 1); 254 UVM_OBJ_INIT(&udv->u_obj, &uvm_deviceops, 1);
254 udv->u_flags = 0; 255 udv->u_flags = 0;
255 udv->u_device = device; 256 udv->u_device = device;
256 LIST_INSERT_HEAD(&udv_list, udv, u_list); 257 LIST_INSERT_HEAD(&udv_list, udv, u_list);
257 mutex_exit(&udv_lock); 258 mutex_exit(&udv_lock);
258 return(&udv->u_obj); 259 return(&udv->u_obj);
259 } 260 }
260 /*NOTREACHED*/ 261 /*NOTREACHED*/
261} 262}
262 263
263/* 264/*
264 * udv_reference 265 * udv_reference
265 * 266 *
266 * add a reference to a VM object. Note that the reference count must 267 * add a reference to a VM object. Note that the reference count must
267 * already be one (the passed in reference) so there is no chance of the 268 * already be one (the passed in reference) so there is no chance of the
268 * udv being released or locked out here. 269 * udv being released or locked out here.
269 * 270 *
270 * => caller must call with object unlocked. 271 * => caller must call with object unlocked.
271 */ 272 */
272 273
273static void 274static void
274udv_reference(struct uvm_object *uobj) 275udv_reference(struct uvm_object *uobj)
275{ 276{
276 UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist); 277 UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist);
277 278
278 mutex_enter(&uobj->vmobjlock); 279 mutex_enter(&uobj->vmobjlock);
279 uobj->uo_refs++; 280 uobj->uo_refs++;
280 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", 281 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
281 uobj, uobj->uo_refs,0,0); 282 uobj, uobj->uo_refs,0,0);
282 mutex_exit(&uobj->vmobjlock); 283 mutex_exit(&uobj->vmobjlock);
283} 284}
284 285
285/* 286/*
286 * udv_detach 287 * udv_detach
287 * 288 *
288 * remove a reference to a VM object. 289 * remove a reference to a VM object.
289 * 290 *
290 * => caller must call with object unlocked and map locked. 291 * => caller must call with object unlocked and map locked.
291 */ 292 */
292 293
293static void 294static void
294udv_detach(struct uvm_object *uobj) 295udv_detach(struct uvm_object *uobj)
295{ 296{
296 struct uvm_device *udv = (struct uvm_device *)uobj; 297 struct uvm_device *udv = (struct uvm_device *)uobj;
297 UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist); 298 UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
298 299
299 /* 300 /*
300 * loop until done 301 * loop until done
301 */ 302 */
302again: 303again:
303 mutex_enter(&uobj->vmobjlock); 304 mutex_enter(&uobj->vmobjlock);
304 if (uobj->uo_refs > 1) { 305 if (uobj->uo_refs > 1) {
305 uobj->uo_refs--; 306 uobj->uo_refs--;
306 mutex_exit(&uobj->vmobjlock); 307 mutex_exit(&uobj->vmobjlock);
307 UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d", 308 UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
308 uobj,uobj->uo_refs,0,0); 309 uobj,uobj->uo_refs,0,0);
309 return; 310 return;
310 } 311 }
311 312
312 /* 313 /*
313 * is it being held? if so, wait until others are done. 314 * is it being held? if so, wait until others are done.
314 */ 315 */
315 316
316 mutex_enter(&udv_lock); 317 mutex_enter(&udv_lock);
317 if (udv->u_flags & UVM_DEVICE_HOLD) { 318 if (udv->u_flags & UVM_DEVICE_HOLD) {
318 udv->u_flags |= UVM_DEVICE_WANTED; 319 udv->u_flags |= UVM_DEVICE_WANTED;
319 mutex_exit(&uobj->vmobjlock); 320 mutex_exit(&uobj->vmobjlock);
320 UVM_UNLOCK_AND_WAIT(udv, &udv_lock, false, "udv_detach",0); 321 UVM_UNLOCK_AND_WAIT(udv, &udv_lock, false, "udv_detach",0);
321 goto again; 322 goto again;
322 } 323 }
323 324
324 /* 325 /*
325 * got it! nuke it now. 326 * got it! nuke it now.
326 */ 327 */
327 328
328 LIST_REMOVE(udv, u_list); 329 LIST_REMOVE(udv, u_list);
329 if (udv->u_flags & UVM_DEVICE_WANTED) 330 if (udv->u_flags & UVM_DEVICE_WANTED)
330 wakeup(udv); 331 wakeup(udv);
331 mutex_exit(&udv_lock); 332 mutex_exit(&udv_lock);
332 mutex_exit(&uobj->vmobjlock); 333 mutex_exit(&uobj->vmobjlock);
333 UVM_OBJ_DESTROY(uobj); 334 UVM_OBJ_DESTROY(uobj);
334 free(udv, M_TEMP); 335 free(udv, M_TEMP);
335 UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0); 336 UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0);
336} 337}
337 338
338/* 339/*
339 * udv_fault: non-standard fault routine for device "pages" 340 * udv_fault: non-standard fault routine for device "pages"
340 * 341 *
341 * => rather than having a "get" function, we have a fault routine 342 * => rather than having a "get" function, we have a fault routine
342 * since we don't return vm_pages we need full control over the 343 * since we don't return vm_pages we need full control over the
343 * pmap_enter map in 344 * pmap_enter map in
344 * => all the usual fault data structured are locked by the caller 345 * => all the usual fault data structured are locked by the caller
345 * (i.e. maps(read), amap (if any), uobj) 346 * (i.e. maps(read), amap (if any), uobj)
346 * => on return, we unlock all fault data structures 347 * => on return, we unlock all fault data structures
347 * => flags: PGO_ALLPAGES: get all of the pages 348 * => flags: PGO_ALLPAGES: get all of the pages
348 * PGO_LOCKED: fault data structures are locked 349 * PGO_LOCKED: fault data structures are locked
349 * XXX: currently PGO_LOCKED is always required ... consider removing 350 * XXX: currently PGO_LOCKED is always required ... consider removing
350 * it as a flag 351 * it as a flag
351 * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx] 352 * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
352 */ 353 */
353 354
354static int 355static int
355udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps, 356udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
356 int npages, int centeridx, vm_prot_t access_type, 357 int npages, int centeridx, vm_prot_t access_type,
357 int flags) 358 int flags)
358{ 359{
359 struct vm_map_entry *entry = ufi->entry; 360 struct vm_map_entry *entry = ufi->entry;
360 struct uvm_object *uobj = entry->object.uvm_obj; 361 struct uvm_object *uobj = entry->object.uvm_obj;
361 struct uvm_device *udv = (struct uvm_device *)uobj; 362 struct uvm_device *udv = (struct uvm_device *)uobj;
362 vaddr_t curr_va; 363 vaddr_t curr_va;
363 off_t curr_offset; 364 off_t curr_offset;
364 paddr_t paddr, mdpgno; 365 paddr_t paddr, mdpgno;
365 int lcv, retval; 366 int lcv, retval;
366 dev_t device; 367 dev_t device;
367 vm_prot_t mapprot; 368 vm_prot_t mapprot;
368 UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist); 369 UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist);
369 UVMHIST_LOG(maphist," flags=%d", flags,0,0,0); 370 UVMHIST_LOG(maphist," flags=%d", flags,0,0,0);
370 371
371 /* 372 /*
372 * we do not allow device mappings to be mapped copy-on-write 373 * we do not allow device mappings to be mapped copy-on-write
373 * so we kill any attempt to do so here. 374 * so we kill any attempt to do so here.
374 */ 375 */
375 376
376 if (UVM_ET_ISCOPYONWRITE(entry)) { 377 if (UVM_ET_ISCOPYONWRITE(entry)) {
377 UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)", 378 UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
378 entry->etype, 0,0,0); 379 entry->etype, 0,0,0);
379 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); 380 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
380 return(EIO); 381 return(EIO);
381 } 382 }
382 383
383 /* 384 /*
384 * get device map function. 385 * get device map function.
385 */ 386 */
386 387
387 device = udv->u_device; 388 device = udv->u_device;
388 if (cdevsw_lookup(device) == NULL) { 389 if (cdevsw_lookup(device) == NULL) {
389 /* XXX This should not happen */ 390 /* XXX This should not happen */
390 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); 391 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
391 return (EIO); 392 return (EIO);
392 } 393 }
393 394
394 /* 395 /*
395 * now we must determine the offset in udv to use and the VA to 396 * now we must determine the offset in udv to use and the VA to
396 * use for pmap_enter. note that we always use orig_map's pmap 397 * use for pmap_enter. note that we always use orig_map's pmap
397 * for pmap_enter (even if we have a submap). since virtual 398 * for pmap_enter (even if we have a submap). since virtual
398 * addresses in a submap must match the main map, this is ok. 399 * addresses in a submap must match the main map, this is ok.
399 */ 400 */
400 401
401 /* udv offset = (offset from start of entry) + entry's offset */ 402 /* udv offset = (offset from start of entry) + entry's offset */
402 curr_offset = entry->offset + (vaddr - entry->start); 403 curr_offset = entry->offset + (vaddr - entry->start);
403 /* pmap va = vaddr (virtual address of pps[0]) */ 404 /* pmap va = vaddr (virtual address of pps[0]) */
404 curr_va = vaddr; 405 curr_va = vaddr;
405 406
406 /* 407 /*
407 * loop over the page range entering in as needed 408 * loop over the page range entering in as needed
408 */ 409 */
409 410
410 retval = 0; 411 retval = 0;
411 for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE, 412 for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
412 curr_va += PAGE_SIZE) { 413 curr_va += PAGE_SIZE) {
413 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx) 414 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
414 continue; 415 continue;
415 416
416 if (pps[lcv] == PGO_DONTCARE) 417 if (pps[lcv] == PGO_DONTCARE)
417 continue; 418 continue;
418 419
419 mdpgno = cdev_mmap(device, curr_offset, access_type); 420 mdpgno = cdev_mmap(device, curr_offset, access_type);
420 if (mdpgno == -1) { 421 if (mdpgno == -1) {
421 retval = EIO; 422 retval = EIO;
422 break; 423 break;
423 } 424 }
424 paddr = pmap_phys_address(mdpgno); 425 paddr = pmap_phys_address(mdpgno);
425 mapprot = ufi->entry->protection; 426 mapprot = ufi->entry->protection;
426 UVMHIST_LOG(maphist, 427 UVMHIST_LOG(maphist,
427 " MAPPING: device: pm=0x%x, va=0x%x, pa=0x%lx, at=%d", 428 " MAPPING: device: pm=0x%x, va=0x%x, pa=0x%lx, at=%d",
428 ufi->orig_map->pmap, curr_va, paddr, mapprot); 429 ufi->orig_map->pmap, curr_va, paddr, mapprot);
429 if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, 430 if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr,
430 mapprot, PMAP_CANFAIL | mapprot) != 0) { 431 mapprot, PMAP_CANFAIL | mapprot) != 0) {
431 /* 432 /*
432 * pmap_enter() didn't have the resource to 433 * pmap_enter() didn't have the resource to
433 * enter this mapping. Unlock everything, 434 * enter this mapping. Unlock everything,
434 * wait for the pagedaemon to free up some 435 * wait for the pagedaemon to free up some
435 * pages, and then tell uvm_fault() to start 436 * pages, and then tell uvm_fault() to start
436 * the fault again. 437 * the fault again.
437 * 438 *
438 * XXX Needs some rethinking for the PGO_ALLPAGES 439 * XXX Needs some rethinking for the PGO_ALLPAGES
439 * XXX case. 440 * XXX case.
440 */ 441 */
441 pmap_update(ufi->orig_map->pmap); /* sync what we have so far */ 442 pmap_update(ufi->orig_map->pmap); /* sync what we have so far */
442 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, 443 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
443 uobj, NULL); 444 uobj, NULL);
444 uvm_wait("udv_fault"); 445 uvm_wait("udv_fault");
445 return (ERESTART); 446 return (ERESTART);
446 } 447 }
447 } 448 }
448 449
449 pmap_update(ufi->orig_map->pmap); 450 pmap_update(ufi->orig_map->pmap);
450 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); 451 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
451 return (retval); 452 return (retval);
452} 453}