Thu Jun 25 17:01:20 2020 UTC ()
Register NVMM as an actual pseudo-device. Without PMF handler, to
explicitly disallow ACPI suspend if NVMM is running.

Should fix PR/55406.


(maxv)
diff -r1.2 -r1.3 src/sys/dev/nvmm/files.nvmm
diff -r1.30 -r1.31 src/sys/dev/nvmm/nvmm.c
diff -r1.1 -r1.2 src/sys/modules/nvmm/nvmm.ioconf

cvs diff -r1.2 -r1.3 src/sys/dev/nvmm/files.nvmm (switch to unified diff)

--- src/sys/dev/nvmm/files.nvmm 2019/03/28 19:00:40 1.2
+++ src/sys/dev/nvmm/files.nvmm 2020/06/25 17:01:19 1.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: files.nvmm,v 1.2 2019/03/28 19:00:40 maxv Exp $ 1# $NetBSD: files.nvmm,v 1.3 2020/06/25 17:01:19 maxv Exp $
2 2
3defpseudo nvmm 3defpseudodev nvmm
4 4
5file dev/nvmm/nvmm.c nvmm 5file dev/nvmm/nvmm.c nvmm
6 6
7ifdef amd64 7ifdef amd64
8file dev/nvmm/x86/nvmm_x86.c nvmm 8file dev/nvmm/x86/nvmm_x86.c nvmm
9file dev/nvmm/x86/nvmm_x86_svm.c nvmm 9file dev/nvmm/x86/nvmm_x86_svm.c nvmm
10file dev/nvmm/x86/nvmm_x86_svmfunc.S nvmm 10file dev/nvmm/x86/nvmm_x86_svmfunc.S nvmm
11file dev/nvmm/x86/nvmm_x86_vmx.c nvmm 11file dev/nvmm/x86/nvmm_x86_vmx.c nvmm
12file dev/nvmm/x86/nvmm_x86_vmxfunc.S nvmm 12file dev/nvmm/x86/nvmm_x86_vmxfunc.S nvmm
13endif 13endif
14 14

cvs diff -r1.30 -r1.31 src/sys/dev/nvmm/nvmm.c (switch to unified diff)

--- src/sys/dev/nvmm/nvmm.c 2020/05/24 08:08:49 1.30
+++ src/sys/dev/nvmm/nvmm.c 2020/06/25 17:01:19 1.31
@@ -1,1228 +1,1312 @@ @@ -1,1228 +1,1312 @@
1/* $NetBSD: nvmm.c,v 1.30 2020/05/24 08:08:49 maxv Exp $ */ 1/* $NetBSD: nvmm.c,v 1.31 2020/06/25 17:01:19 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018-2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.30 2020/05/24 08:08:49 maxv Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.31 2020/06/25 17:01:19 maxv Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38 38
39#include <sys/cpu.h> 39#include <sys/cpu.h>
40#include <sys/conf.h> 40#include <sys/conf.h>
41#include <sys/kmem.h> 41#include <sys/kmem.h>
42#include <sys/module.h> 42#include <sys/module.h>
43#include <sys/proc.h> 43#include <sys/proc.h>
44#include <sys/mman.h> 44#include <sys/mman.h>
45#include <sys/file.h> 45#include <sys/file.h>
46#include <sys/filedesc.h> 46#include <sys/filedesc.h>
47#include <sys/kauth.h> 47#include <sys/device.h>
48 48
49#include <uvm/uvm.h> 49#include <uvm/uvm.h>
50#include <uvm/uvm_page.h> 50#include <uvm/uvm_page.h>
51 51
52#include "ioconf.h" 52#include "ioconf.h"
53 53
54#include <dev/nvmm/nvmm.h> 54#include <dev/nvmm/nvmm.h>
55#include <dev/nvmm/nvmm_internal.h> 55#include <dev/nvmm/nvmm_internal.h>
56#include <dev/nvmm/nvmm_ioctl.h> 56#include <dev/nvmm/nvmm_ioctl.h>
57 57
58static struct nvmm_machine machines[NVMM_MAX_MACHINES]; 58static struct nvmm_machine machines[NVMM_MAX_MACHINES];
59static volatile unsigned int nmachines __cacheline_aligned; 59static volatile unsigned int nmachines __cacheline_aligned;
60 60
61static const struct nvmm_impl *nvmm_impl_list[] = { 61static const struct nvmm_impl *nvmm_impl_list[] = {
62 &nvmm_x86_svm, /* x86 AMD SVM */ 62 &nvmm_x86_svm, /* x86 AMD SVM */
63 &nvmm_x86_vmx /* x86 Intel VMX */ 63 &nvmm_x86_vmx /* x86 Intel VMX */
64}; 64};
65 65
66static const struct nvmm_impl *nvmm_impl = NULL; 66static const struct nvmm_impl *nvmm_impl = NULL;
67 67
68static struct nvmm_owner root_owner; 68static struct nvmm_owner root_owner;
69 69
70/* -------------------------------------------------------------------------- */ 70/* -------------------------------------------------------------------------- */
71 71
72static int 72static int
73nvmm_machine_alloc(struct nvmm_machine **ret) 73nvmm_machine_alloc(struct nvmm_machine **ret)
74{ 74{
75 struct nvmm_machine *mach; 75 struct nvmm_machine *mach;
76 size_t i; 76 size_t i;
77 77
78 for (i = 0; i < NVMM_MAX_MACHINES; i++) { 78 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
79 mach = &machines[i]; 79 mach = &machines[i];
80 80
81 rw_enter(&mach->lock, RW_WRITER); 81 rw_enter(&mach->lock, RW_WRITER);
82 if (mach->present) { 82 if (mach->present) {
83 rw_exit(&mach->lock); 83 rw_exit(&mach->lock);
84 continue; 84 continue;
85 } 85 }
86 86
87 mach->present = true; 87 mach->present = true;
88 mach->time = time_second; 88 mach->time = time_second;
89 *ret = mach; 89 *ret = mach;
90 atomic_inc_uint(&nmachines); 90 atomic_inc_uint(&nmachines);
91 return 0; 91 return 0;
92 } 92 }
93 93
94 return ENOBUFS; 94 return ENOBUFS;
95} 95}
96 96
97static void 97static void
98nvmm_machine_free(struct nvmm_machine *mach) 98nvmm_machine_free(struct nvmm_machine *mach)
99{ 99{
100 KASSERT(rw_write_held(&mach->lock)); 100 KASSERT(rw_write_held(&mach->lock));
101 KASSERT(mach->present); 101 KASSERT(mach->present);
102 mach->present = false; 102 mach->present = false;
103 atomic_dec_uint(&nmachines); 103 atomic_dec_uint(&nmachines);
104} 104}
105 105
106static int 106static int
107nvmm_machine_get(struct nvmm_owner *owner, nvmm_machid_t machid, 107nvmm_machine_get(struct nvmm_owner *owner, nvmm_machid_t machid,
108 struct nvmm_machine **ret, bool writer) 108 struct nvmm_machine **ret, bool writer)
109{ 109{
110 struct nvmm_machine *mach; 110 struct nvmm_machine *mach;
111 krw_t op = writer ? RW_WRITER : RW_READER; 111 krw_t op = writer ? RW_WRITER : RW_READER;
112 112
113 if (machid >= NVMM_MAX_MACHINES) { 113 if (machid >= NVMM_MAX_MACHINES) {
114 return EINVAL; 114 return EINVAL;
115 } 115 }
116 mach = &machines[machid]; 116 mach = &machines[machid];
117 117
118 rw_enter(&mach->lock, op); 118 rw_enter(&mach->lock, op);
119 if (!mach->present) { 119 if (!mach->present) {
120 rw_exit(&mach->lock); 120 rw_exit(&mach->lock);
121 return ENOENT; 121 return ENOENT;
122 } 122 }
123 if (owner != &root_owner && mach->owner != owner) { 123 if (owner != &root_owner && mach->owner != owner) {
124 rw_exit(&mach->lock); 124 rw_exit(&mach->lock);
125 return EPERM; 125 return EPERM;
126 } 126 }
127 *ret = mach; 127 *ret = mach;
128 128
129 return 0; 129 return 0;
130} 130}
131 131
132static void 132static void
133nvmm_machine_put(struct nvmm_machine *mach) 133nvmm_machine_put(struct nvmm_machine *mach)
134{ 134{
135 rw_exit(&mach->lock); 135 rw_exit(&mach->lock);
136} 136}
137 137
138/* -------------------------------------------------------------------------- */ 138/* -------------------------------------------------------------------------- */
139 139
140static int 140static int
141nvmm_vcpu_alloc(struct nvmm_machine *mach, nvmm_cpuid_t cpuid, 141nvmm_vcpu_alloc(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
142 struct nvmm_cpu **ret) 142 struct nvmm_cpu **ret)
143{ 143{
144 struct nvmm_cpu *vcpu; 144 struct nvmm_cpu *vcpu;
145 145
146 if (cpuid >= NVMM_MAX_VCPUS) { 146 if (cpuid >= NVMM_MAX_VCPUS) {
147 return EINVAL; 147 return EINVAL;
148 } 148 }
149 vcpu = &mach->cpus[cpuid]; 149 vcpu = &mach->cpus[cpuid];
150 150
151 mutex_enter(&vcpu->lock); 151 mutex_enter(&vcpu->lock);
152 if (vcpu->present) { 152 if (vcpu->present) {
153 mutex_exit(&vcpu->lock); 153 mutex_exit(&vcpu->lock);
154 return EBUSY; 154 return EBUSY;
155 } 155 }
156 156
157 vcpu->present = true; 157 vcpu->present = true;
158 vcpu->comm = NULL; 158 vcpu->comm = NULL;
159 vcpu->hcpu_last = -1; 159 vcpu->hcpu_last = -1;
160 *ret = vcpu; 160 *ret = vcpu;
161 return 0; 161 return 0;
162} 162}
163 163
164static void 164static void
165nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 165nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
166{ 166{
167 KASSERT(mutex_owned(&vcpu->lock)); 167 KASSERT(mutex_owned(&vcpu->lock));
168 vcpu->present = false; 168 vcpu->present = false;
169 if (vcpu->comm != NULL) { 169 if (vcpu->comm != NULL) {
170 uvm_deallocate(kernel_map, (vaddr_t)vcpu->comm, PAGE_SIZE); 170 uvm_deallocate(kernel_map, (vaddr_t)vcpu->comm, PAGE_SIZE);
171 } 171 }
172} 172}
173 173
174static int 174static int
175nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid, 175nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
176 struct nvmm_cpu **ret) 176 struct nvmm_cpu **ret)
177{ 177{
178 struct nvmm_cpu *vcpu; 178 struct nvmm_cpu *vcpu;
179 179
180 if (cpuid >= NVMM_MAX_VCPUS) { 180 if (cpuid >= NVMM_MAX_VCPUS) {
181 return EINVAL; 181 return EINVAL;
182 } 182 }
183 vcpu = &mach->cpus[cpuid]; 183 vcpu = &mach->cpus[cpuid];
184 184
185 mutex_enter(&vcpu->lock); 185 mutex_enter(&vcpu->lock);
186 if (!vcpu->present) { 186 if (!vcpu->present) {
187 mutex_exit(&vcpu->lock); 187 mutex_exit(&vcpu->lock);
188 return ENOENT; 188 return ENOENT;
189 } 189 }
190 *ret = vcpu; 190 *ret = vcpu;
191 191
192 return 0; 192 return 0;
193} 193}
194 194
195static void 195static void
196nvmm_vcpu_put(struct nvmm_cpu *vcpu) 196nvmm_vcpu_put(struct nvmm_cpu *vcpu)
197{ 197{
198 mutex_exit(&vcpu->lock); 198 mutex_exit(&vcpu->lock);
199} 199}
200 200
201/* -------------------------------------------------------------------------- */ 201/* -------------------------------------------------------------------------- */
202 202
203static void 203static void
204nvmm_kill_machines(struct nvmm_owner *owner) 204nvmm_kill_machines(struct nvmm_owner *owner)
205{ 205{
206 struct nvmm_machine *mach; 206 struct nvmm_machine *mach;
207 struct nvmm_cpu *vcpu; 207 struct nvmm_cpu *vcpu;
208 size_t i, j; 208 size_t i, j;
209 int error; 209 int error;
210 210
211 for (i = 0; i < NVMM_MAX_MACHINES; i++) { 211 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
212 mach = &machines[i]; 212 mach = &machines[i];
213 213
214 rw_enter(&mach->lock, RW_WRITER); 214 rw_enter(&mach->lock, RW_WRITER);
215 if (!mach->present || mach->owner != owner) { 215 if (!mach->present || mach->owner != owner) {
216 rw_exit(&mach->lock); 216 rw_exit(&mach->lock);
217 continue; 217 continue;
218 } 218 }
219 219
220 /* Kill it. */ 220 /* Kill it. */
221 for (j = 0; j < NVMM_MAX_VCPUS; j++) { 221 for (j = 0; j < NVMM_MAX_VCPUS; j++) {
222 error = nvmm_vcpu_get(mach, j, &vcpu); 222 error = nvmm_vcpu_get(mach, j, &vcpu);
223 if (error) 223 if (error)
224 continue; 224 continue;
225 (*nvmm_impl->vcpu_destroy)(mach, vcpu); 225 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
226 nvmm_vcpu_free(mach, vcpu); 226 nvmm_vcpu_free(mach, vcpu);
227 nvmm_vcpu_put(vcpu); 227 nvmm_vcpu_put(vcpu);
228 } 228 }
229 (*nvmm_impl->machine_destroy)(mach); 229 (*nvmm_impl->machine_destroy)(mach);
230 uvmspace_free(mach->vm); 230 uvmspace_free(mach->vm);
231 231
232 /* Drop the kernel UOBJ refs. */ 232 /* Drop the kernel UOBJ refs. */
233 for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) { 233 for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) {
234 if (!mach->hmap[j].present) 234 if (!mach->hmap[j].present)
235 continue; 235 continue;
236 uao_detach(mach->hmap[j].uobj); 236 uao_detach(mach->hmap[j].uobj);
237 } 237 }
238 238
239 nvmm_machine_free(mach); 239 nvmm_machine_free(mach);
240 240
241 rw_exit(&mach->lock); 241 rw_exit(&mach->lock);
242 } 242 }
243} 243}
244 244
245/* -------------------------------------------------------------------------- */ 245/* -------------------------------------------------------------------------- */
246 246
247static int 247static int
248nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args) 248nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args)
249{ 249{
250 args->cap.version = NVMM_KERN_VERSION; 250 args->cap.version = NVMM_KERN_VERSION;
251 args->cap.state_size = nvmm_impl->state_size; 251 args->cap.state_size = nvmm_impl->state_size;
252 args->cap.max_machines = NVMM_MAX_MACHINES; 252 args->cap.max_machines = NVMM_MAX_MACHINES;
253 args->cap.max_vcpus = NVMM_MAX_VCPUS; 253 args->cap.max_vcpus = NVMM_MAX_VCPUS;
254 args->cap.max_ram = NVMM_MAX_RAM; 254 args->cap.max_ram = NVMM_MAX_RAM;
255 255
256 (*nvmm_impl->capability)(&args->cap); 256 (*nvmm_impl->capability)(&args->cap);
257 257
258 return 0; 258 return 0;
259} 259}
260 260
261static int 261static int
262nvmm_machine_create(struct nvmm_owner *owner, 262nvmm_machine_create(struct nvmm_owner *owner,
263 struct nvmm_ioc_machine_create *args) 263 struct nvmm_ioc_machine_create *args)
264{ 264{
265 struct nvmm_machine *mach; 265 struct nvmm_machine *mach;
266 int error; 266 int error;
267 267
268 error = nvmm_machine_alloc(&mach); 268 error = nvmm_machine_alloc(&mach);
269 if (error) 269 if (error)
270 return error; 270 return error;
271 271
272 /* Curproc owns the machine. */ 272 /* Curproc owns the machine. */
273 mach->owner = owner; 273 mach->owner = owner;
274 274
275 /* Zero out the host mappings. */ 275 /* Zero out the host mappings. */
276 memset(&mach->hmap, 0, sizeof(mach->hmap)); 276 memset(&mach->hmap, 0, sizeof(mach->hmap));
277 277
278 /* Create the machine vmspace. */ 278 /* Create the machine vmspace. */
279 mach->gpa_begin = 0; 279 mach->gpa_begin = 0;
280 mach->gpa_end = NVMM_MAX_RAM; 280 mach->gpa_end = NVMM_MAX_RAM;
281 mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false); 281 mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false);
282 282
283 /* Create the comm uobj. */ 283 /* Create the comm uobj. */
284 mach->commuobj = uao_create(NVMM_MAX_VCPUS * PAGE_SIZE, 0); 284 mach->commuobj = uao_create(NVMM_MAX_VCPUS * PAGE_SIZE, 0);
285 285
286 (*nvmm_impl->machine_create)(mach); 286 (*nvmm_impl->machine_create)(mach);
287 287
288 args->machid = mach->machid; 288 args->machid = mach->machid;
289 nvmm_machine_put(mach); 289 nvmm_machine_put(mach);
290 290
291 return 0; 291 return 0;
292} 292}
293 293
294static int 294static int
295nvmm_machine_destroy(struct nvmm_owner *owner, 295nvmm_machine_destroy(struct nvmm_owner *owner,
296 struct nvmm_ioc_machine_destroy *args) 296 struct nvmm_ioc_machine_destroy *args)
297{ 297{
298 struct nvmm_machine *mach; 298 struct nvmm_machine *mach;
299 struct nvmm_cpu *vcpu; 299 struct nvmm_cpu *vcpu;
300 int error; 300 int error;
301 size_t i; 301 size_t i;
302 302
303 error = nvmm_machine_get(owner, args->machid, &mach, true); 303 error = nvmm_machine_get(owner, args->machid, &mach, true);
304 if (error) 304 if (error)
305 return error; 305 return error;
306 306
307 for (i = 0; i < NVMM_MAX_VCPUS; i++) { 307 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
308 error = nvmm_vcpu_get(mach, i, &vcpu); 308 error = nvmm_vcpu_get(mach, i, &vcpu);
309 if (error) 309 if (error)
310 continue; 310 continue;
311 311
312 (*nvmm_impl->vcpu_destroy)(mach, vcpu); 312 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
313 nvmm_vcpu_free(mach, vcpu); 313 nvmm_vcpu_free(mach, vcpu);
314 nvmm_vcpu_put(vcpu); 314 nvmm_vcpu_put(vcpu);
315 } 315 }
316 316
317 (*nvmm_impl->machine_destroy)(mach); 317 (*nvmm_impl->machine_destroy)(mach);
318 318
319 /* Free the machine vmspace. */ 319 /* Free the machine vmspace. */
320 uvmspace_free(mach->vm); 320 uvmspace_free(mach->vm);
321 321
322 /* Drop the kernel UOBJ refs. */ 322 /* Drop the kernel UOBJ refs. */
323 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 323 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
324 if (!mach->hmap[i].present) 324 if (!mach->hmap[i].present)
325 continue; 325 continue;
326 uao_detach(mach->hmap[i].uobj); 326 uao_detach(mach->hmap[i].uobj);
327 } 327 }
328 328
329 nvmm_machine_free(mach); 329 nvmm_machine_free(mach);
330 nvmm_machine_put(mach); 330 nvmm_machine_put(mach);
331 331
332 return 0; 332 return 0;
333} 333}
334 334
335static int 335static int
336nvmm_machine_configure(struct nvmm_owner *owner, 336nvmm_machine_configure(struct nvmm_owner *owner,
337 struct nvmm_ioc_machine_configure *args) 337 struct nvmm_ioc_machine_configure *args)
338{ 338{
339 struct nvmm_machine *mach; 339 struct nvmm_machine *mach;
340 size_t allocsz; 340 size_t allocsz;
341 uint64_t op; 341 uint64_t op;
342 void *data; 342 void *data;
343 int error; 343 int error;
344 344
345 op = NVMM_MACH_CONF_MD(args->op); 345 op = NVMM_MACH_CONF_MD(args->op);
346 if (__predict_false(op >= nvmm_impl->mach_conf_max)) { 346 if (__predict_false(op >= nvmm_impl->mach_conf_max)) {
347 return EINVAL; 347 return EINVAL;
348 } 348 }
349 349
350 allocsz = nvmm_impl->mach_conf_sizes[op]; 350 allocsz = nvmm_impl->mach_conf_sizes[op];
351 data = kmem_alloc(allocsz, KM_SLEEP); 351 data = kmem_alloc(allocsz, KM_SLEEP);
352 352
353 error = nvmm_machine_get(owner, args->machid, &mach, true); 353 error = nvmm_machine_get(owner, args->machid, &mach, true);
354 if (error) { 354 if (error) {
355 kmem_free(data, allocsz); 355 kmem_free(data, allocsz);
356 return error; 356 return error;
357 } 357 }
358 358
359 error = copyin(args->conf, data, allocsz); 359 error = copyin(args->conf, data, allocsz);
360 if (error) { 360 if (error) {
361 goto out; 361 goto out;
362 } 362 }
363 363
364 error = (*nvmm_impl->machine_configure)(mach, op, data); 364 error = (*nvmm_impl->machine_configure)(mach, op, data);
365 365
366out: 366out:
367 nvmm_machine_put(mach); 367 nvmm_machine_put(mach);
368 kmem_free(data, allocsz); 368 kmem_free(data, allocsz);
369 return error; 369 return error;
370} 370}
371 371
372static int 372static int
373nvmm_vcpu_create(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_create *args) 373nvmm_vcpu_create(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_create *args)
374{ 374{
375 struct nvmm_machine *mach; 375 struct nvmm_machine *mach;
376 struct nvmm_cpu *vcpu; 376 struct nvmm_cpu *vcpu;
377 int error; 377 int error;
378 378
379 error = nvmm_machine_get(owner, args->machid, &mach, false); 379 error = nvmm_machine_get(owner, args->machid, &mach, false);
380 if (error) 380 if (error)
381 return error; 381 return error;
382 382
383 error = nvmm_vcpu_alloc(mach, args->cpuid, &vcpu); 383 error = nvmm_vcpu_alloc(mach, args->cpuid, &vcpu);
384 if (error) 384 if (error)
385 goto out; 385 goto out;
386 386
387 /* Allocate the comm page. */ 387 /* Allocate the comm page. */
388 uao_reference(mach->commuobj); 388 uao_reference(mach->commuobj);
389 error = uvm_map(kernel_map, (vaddr_t *)&vcpu->comm, PAGE_SIZE, 389 error = uvm_map(kernel_map, (vaddr_t *)&vcpu->comm, PAGE_SIZE,
390 mach->commuobj, args->cpuid * PAGE_SIZE, 0, UVM_MAPFLAG(UVM_PROT_RW, 390 mach->commuobj, args->cpuid * PAGE_SIZE, 0, UVM_MAPFLAG(UVM_PROT_RW,
391 UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0)); 391 UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
392 if (error) { 392 if (error) {
393 uao_detach(mach->commuobj); 393 uao_detach(mach->commuobj);
394 nvmm_vcpu_free(mach, vcpu); 394 nvmm_vcpu_free(mach, vcpu);
395 nvmm_vcpu_put(vcpu); 395 nvmm_vcpu_put(vcpu);
396 goto out; 396 goto out;
397 } 397 }
398 error = uvm_map_pageable(kernel_map, (vaddr_t)vcpu->comm, 398 error = uvm_map_pageable(kernel_map, (vaddr_t)vcpu->comm,
399 (vaddr_t)vcpu->comm + PAGE_SIZE, false, 0); 399 (vaddr_t)vcpu->comm + PAGE_SIZE, false, 0);
400 if (error) { 400 if (error) {
401 nvmm_vcpu_free(mach, vcpu); 401 nvmm_vcpu_free(mach, vcpu);
402 nvmm_vcpu_put(vcpu); 402 nvmm_vcpu_put(vcpu);
403 goto out; 403 goto out;
404 } 404 }
405 memset(vcpu->comm, 0, PAGE_SIZE); 405 memset(vcpu->comm, 0, PAGE_SIZE);
406 406
407 error = (*nvmm_impl->vcpu_create)(mach, vcpu); 407 error = (*nvmm_impl->vcpu_create)(mach, vcpu);
408 if (error) { 408 if (error) {
409 nvmm_vcpu_free(mach, vcpu); 409 nvmm_vcpu_free(mach, vcpu);
410 nvmm_vcpu_put(vcpu); 410 nvmm_vcpu_put(vcpu);
411 goto out; 411 goto out;
412 } 412 }
413 413
414 nvmm_vcpu_put(vcpu); 414 nvmm_vcpu_put(vcpu);
415 415
416 atomic_inc_uint(&mach->ncpus); 416 atomic_inc_uint(&mach->ncpus);
417 417
418out: 418out:
419 nvmm_machine_put(mach); 419 nvmm_machine_put(mach);
420 return error; 420 return error;
421} 421}
422 422
423static int 423static int
424nvmm_vcpu_destroy(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_destroy *args) 424nvmm_vcpu_destroy(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_destroy *args)
425{ 425{
426 struct nvmm_machine *mach; 426 struct nvmm_machine *mach;
427 struct nvmm_cpu *vcpu; 427 struct nvmm_cpu *vcpu;
428 int error; 428 int error;
429 429
430 error = nvmm_machine_get(owner, args->machid, &mach, false); 430 error = nvmm_machine_get(owner, args->machid, &mach, false);
431 if (error) 431 if (error)
432 return error; 432 return error;
433 433
434 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 434 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
435 if (error) 435 if (error)
436 goto out; 436 goto out;
437 437
438 (*nvmm_impl->vcpu_destroy)(mach, vcpu); 438 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
439 nvmm_vcpu_free(mach, vcpu); 439 nvmm_vcpu_free(mach, vcpu);
440 nvmm_vcpu_put(vcpu); 440 nvmm_vcpu_put(vcpu);
441 441
442 atomic_dec_uint(&mach->ncpus); 442 atomic_dec_uint(&mach->ncpus);
443 443
444out: 444out:
445 nvmm_machine_put(mach); 445 nvmm_machine_put(mach);
446 return error; 446 return error;
447} 447}
448 448
449static int 449static int
450nvmm_vcpu_configure(struct nvmm_owner *owner, 450nvmm_vcpu_configure(struct nvmm_owner *owner,
451 struct nvmm_ioc_vcpu_configure *args) 451 struct nvmm_ioc_vcpu_configure *args)
452{ 452{
453 struct nvmm_machine *mach; 453 struct nvmm_machine *mach;
454 struct nvmm_cpu *vcpu; 454 struct nvmm_cpu *vcpu;
455 size_t allocsz; 455 size_t allocsz;
456 uint64_t op; 456 uint64_t op;
457 void *data; 457 void *data;
458 int error; 458 int error;
459 459
460 op = NVMM_VCPU_CONF_MD(args->op); 460 op = NVMM_VCPU_CONF_MD(args->op);
461 if (__predict_false(op >= nvmm_impl->vcpu_conf_max)) 461 if (__predict_false(op >= nvmm_impl->vcpu_conf_max))
462 return EINVAL; 462 return EINVAL;
463 463
464 allocsz = nvmm_impl->vcpu_conf_sizes[op]; 464 allocsz = nvmm_impl->vcpu_conf_sizes[op];
465 data = kmem_alloc(allocsz, KM_SLEEP); 465 data = kmem_alloc(allocsz, KM_SLEEP);
466 466
467 error = nvmm_machine_get(owner, args->machid, &mach, false); 467 error = nvmm_machine_get(owner, args->machid, &mach, false);
468 if (error) { 468 if (error) {
469 kmem_free(data, allocsz); 469 kmem_free(data, allocsz);
470 return error; 470 return error;
471 } 471 }
472 472
473 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 473 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
474 if (error) { 474 if (error) {
475 nvmm_machine_put(mach); 475 nvmm_machine_put(mach);
476 kmem_free(data, allocsz); 476 kmem_free(data, allocsz);
477 return error; 477 return error;
478 } 478 }
479 479
480 error = copyin(args->conf, data, allocsz); 480 error = copyin(args->conf, data, allocsz);
481 if (error) { 481 if (error) {
482 goto out; 482 goto out;
483 } 483 }
484 484
485 error = (*nvmm_impl->vcpu_configure)(vcpu, op, data); 485 error = (*nvmm_impl->vcpu_configure)(vcpu, op, data);
486 486
487out: 487out:
488 nvmm_vcpu_put(vcpu); 488 nvmm_vcpu_put(vcpu);
489 nvmm_machine_put(mach); 489 nvmm_machine_put(mach);
490 kmem_free(data, allocsz); 490 kmem_free(data, allocsz);
491 return error; 491 return error;
492} 492}
493 493
494static int 494static int
495nvmm_vcpu_setstate(struct nvmm_owner *owner, 495nvmm_vcpu_setstate(struct nvmm_owner *owner,
496 struct nvmm_ioc_vcpu_setstate *args) 496 struct nvmm_ioc_vcpu_setstate *args)
497{ 497{
498 struct nvmm_machine *mach; 498 struct nvmm_machine *mach;
499 struct nvmm_cpu *vcpu; 499 struct nvmm_cpu *vcpu;
500 int error; 500 int error;
501 501
502 error = nvmm_machine_get(owner, args->machid, &mach, false); 502 error = nvmm_machine_get(owner, args->machid, &mach, false);
503 if (error) 503 if (error)
504 return error; 504 return error;
505 505
506 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 506 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
507 if (error) 507 if (error)
508 goto out; 508 goto out;
509 509
510 (*nvmm_impl->vcpu_setstate)(vcpu); 510 (*nvmm_impl->vcpu_setstate)(vcpu);
511 nvmm_vcpu_put(vcpu); 511 nvmm_vcpu_put(vcpu);
512 512
513out: 513out:
514 nvmm_machine_put(mach); 514 nvmm_machine_put(mach);
515 return error; 515 return error;
516} 516}
517 517
518static int 518static int
519nvmm_vcpu_getstate(struct nvmm_owner *owner, 519nvmm_vcpu_getstate(struct nvmm_owner *owner,
520 struct nvmm_ioc_vcpu_getstate *args) 520 struct nvmm_ioc_vcpu_getstate *args)
521{ 521{
522 struct nvmm_machine *mach; 522 struct nvmm_machine *mach;
523 struct nvmm_cpu *vcpu; 523 struct nvmm_cpu *vcpu;
524 int error; 524 int error;
525 525
526 error = nvmm_machine_get(owner, args->machid, &mach, false); 526 error = nvmm_machine_get(owner, args->machid, &mach, false);
527 if (error) 527 if (error)
528 return error; 528 return error;
529 529
530 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 530 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
531 if (error) 531 if (error)
532 goto out; 532 goto out;
533 533
534 (*nvmm_impl->vcpu_getstate)(vcpu); 534 (*nvmm_impl->vcpu_getstate)(vcpu);
535 nvmm_vcpu_put(vcpu); 535 nvmm_vcpu_put(vcpu);
536 536
537out: 537out:
538 nvmm_machine_put(mach); 538 nvmm_machine_put(mach);
539 return error; 539 return error;
540} 540}
541 541
542static int 542static int
543nvmm_vcpu_inject(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_inject *args) 543nvmm_vcpu_inject(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_inject *args)
544{ 544{
545 struct nvmm_machine *mach; 545 struct nvmm_machine *mach;
546 struct nvmm_cpu *vcpu; 546 struct nvmm_cpu *vcpu;
547 int error; 547 int error;
548 548
549 error = nvmm_machine_get(owner, args->machid, &mach, false); 549 error = nvmm_machine_get(owner, args->machid, &mach, false);
550 if (error) 550 if (error)
551 return error; 551 return error;
552 552
553 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 553 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
554 if (error) 554 if (error)
555 goto out; 555 goto out;
556 556
557 error = (*nvmm_impl->vcpu_inject)(vcpu); 557 error = (*nvmm_impl->vcpu_inject)(vcpu);
558 nvmm_vcpu_put(vcpu); 558 nvmm_vcpu_put(vcpu);
559 559
560out: 560out:
561 nvmm_machine_put(mach); 561 nvmm_machine_put(mach);
562 return error; 562 return error;
563} 563}
564 564
565static int 565static int
566nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 566nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
567 struct nvmm_vcpu_exit *exit) 567 struct nvmm_vcpu_exit *exit)
568{ 568{
569 struct vmspace *vm = mach->vm; 569 struct vmspace *vm = mach->vm;
570 int ret; 570 int ret;
571 571
572 while (1) { 572 while (1) {
573 /* Got a signal? Or pending resched? Leave. */ 573 /* Got a signal? Or pending resched? Leave. */
574 if (__predict_false(nvmm_return_needed())) { 574 if (__predict_false(nvmm_return_needed())) {
575 exit->reason = NVMM_VCPU_EXIT_NONE; 575 exit->reason = NVMM_VCPU_EXIT_NONE;
576 return 0; 576 return 0;
577 } 577 }
578 578
579 /* Run the VCPU. */ 579 /* Run the VCPU. */
580 ret = (*nvmm_impl->vcpu_run)(mach, vcpu, exit); 580 ret = (*nvmm_impl->vcpu_run)(mach, vcpu, exit);
581 if (__predict_false(ret != 0)) { 581 if (__predict_false(ret != 0)) {
582 return ret; 582 return ret;
583 } 583 }
584 584
585 /* Process nested page faults. */ 585 /* Process nested page faults. */
586 if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) { 586 if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
587 break; 587 break;
588 } 588 }
589 if (exit->u.mem.gpa >= mach->gpa_end) { 589 if (exit->u.mem.gpa >= mach->gpa_end) {
590 break; 590 break;
591 } 591 }
592 if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, exit->u.mem.prot)) { 592 if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, exit->u.mem.prot)) {
593 break; 593 break;
594 } 594 }
595 } 595 }
596 596
597 return 0; 597 return 0;
598} 598}
599 599
600static int 600static int
601nvmm_vcpu_run(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_run *args) 601nvmm_vcpu_run(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_run *args)
602{ 602{
603 struct nvmm_machine *mach; 603 struct nvmm_machine *mach;
604 struct nvmm_cpu *vcpu; 604 struct nvmm_cpu *vcpu;
605 int error; 605 int error;
606 606
607 error = nvmm_machine_get(owner, args->machid, &mach, false); 607 error = nvmm_machine_get(owner, args->machid, &mach, false);
608 if (error) 608 if (error)
609 return error; 609 return error;
610 610
611 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 611 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
612 if (error) 612 if (error)
613 goto out; 613 goto out;
614 614
615 error = nvmm_do_vcpu_run(mach, vcpu, &args->exit); 615 error = nvmm_do_vcpu_run(mach, vcpu, &args->exit);
616 nvmm_vcpu_put(vcpu); 616 nvmm_vcpu_put(vcpu);
617 617
618out: 618out:
619 nvmm_machine_put(mach); 619 nvmm_machine_put(mach);
620 return error; 620 return error;
621} 621}
622 622
623/* -------------------------------------------------------------------------- */ 623/* -------------------------------------------------------------------------- */
624 624
625static struct uvm_object * 625static struct uvm_object *
626nvmm_hmapping_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size, 626nvmm_hmapping_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size,
627 size_t *off) 627 size_t *off)
628{ 628{
629 struct nvmm_hmapping *hmapping; 629 struct nvmm_hmapping *hmapping;
630 size_t i; 630 size_t i;
631 631
632 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 632 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
633 hmapping = &mach->hmap[i]; 633 hmapping = &mach->hmap[i];
634 if (!hmapping->present) { 634 if (!hmapping->present) {
635 continue; 635 continue;
636 } 636 }
637 if (hva >= hmapping->hva && 637 if (hva >= hmapping->hva &&
638 hva + size <= hmapping->hva + hmapping->size) { 638 hva + size <= hmapping->hva + hmapping->size) {
639 *off = hva - hmapping->hva; 639 *off = hva - hmapping->hva;
640 return hmapping->uobj; 640 return hmapping->uobj;
641 } 641 }
642 } 642 }
643 643
644 return NULL; 644 return NULL;
645} 645}
646 646
647static int 647static int
648nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size) 648nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size)
649{ 649{
650 struct nvmm_hmapping *hmapping; 650 struct nvmm_hmapping *hmapping;
651 size_t i; 651 size_t i;
652 652
653 if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) { 653 if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) {
654 return EINVAL; 654 return EINVAL;
655 } 655 }
656 if (hva == 0) { 656 if (hva == 0) {
657 return EINVAL; 657 return EINVAL;
658 } 658 }
659 659
660 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 660 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
661 hmapping = &mach->hmap[i]; 661 hmapping = &mach->hmap[i];
662 if (!hmapping->present) { 662 if (!hmapping->present) {
663 continue; 663 continue;
664 } 664 }
665 665
666 if (hva >= hmapping->hva && 666 if (hva >= hmapping->hva &&
667 hva + size <= hmapping->hva + hmapping->size) { 667 hva + size <= hmapping->hva + hmapping->size) {
668 break; 668 break;
669 } 669 }
670 670
671 if (hva >= hmapping->hva && 671 if (hva >= hmapping->hva &&
672 hva < hmapping->hva + hmapping->size) { 672 hva < hmapping->hva + hmapping->size) {
673 return EEXIST; 673 return EEXIST;
674 } 674 }
675 if (hva + size > hmapping->hva && 675 if (hva + size > hmapping->hva &&
676 hva + size <= hmapping->hva + hmapping->size) { 676 hva + size <= hmapping->hva + hmapping->size) {
677 return EEXIST; 677 return EEXIST;
678 } 678 }
679 if (hva <= hmapping->hva && 679 if (hva <= hmapping->hva &&
680 hva + size >= hmapping->hva + hmapping->size) { 680 hva + size >= hmapping->hva + hmapping->size) {
681 return EEXIST; 681 return EEXIST;
682 } 682 }
683 } 683 }
684 684
685 return 0; 685 return 0;
686} 686}
687 687
688static struct nvmm_hmapping * 688static struct nvmm_hmapping *
689nvmm_hmapping_alloc(struct nvmm_machine *mach) 689nvmm_hmapping_alloc(struct nvmm_machine *mach)
690{ 690{
691 struct nvmm_hmapping *hmapping; 691 struct nvmm_hmapping *hmapping;
692 size_t i; 692 size_t i;
693 693
694 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 694 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
695 hmapping = &mach->hmap[i]; 695 hmapping = &mach->hmap[i];
696 if (!hmapping->present) { 696 if (!hmapping->present) {
697 hmapping->present = true; 697 hmapping->present = true;
698 return hmapping; 698 return hmapping;
699 } 699 }
700 } 700 }
701 701
702 return NULL; 702 return NULL;
703} 703}
704 704
705static int 705static int
706nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size) 706nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size)
707{ 707{
708 struct vmspace *vmspace = curproc->p_vmspace; 708 struct vmspace *vmspace = curproc->p_vmspace;
709 struct nvmm_hmapping *hmapping; 709 struct nvmm_hmapping *hmapping;
710 size_t i; 710 size_t i;
711 711
712 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 712 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
713 hmapping = &mach->hmap[i]; 713 hmapping = &mach->hmap[i];
714 if (!hmapping->present || hmapping->hva != hva || 714 if (!hmapping->present || hmapping->hva != hva ||
715 hmapping->size != size) { 715 hmapping->size != size) {
716 continue; 716 continue;
717 } 717 }
718 718
719 uvm_unmap(&vmspace->vm_map, hmapping->hva, 719 uvm_unmap(&vmspace->vm_map, hmapping->hva,
720 hmapping->hva + hmapping->size); 720 hmapping->hva + hmapping->size);
721 uao_detach(hmapping->uobj); 721 uao_detach(hmapping->uobj);
722 722
723 hmapping->uobj = NULL; 723 hmapping->uobj = NULL;
724 hmapping->present = false; 724 hmapping->present = false;
725 725
726 return 0; 726 return 0;
727 } 727 }
728 728
729 return ENOENT; 729 return ENOENT;
730} 730}
731 731
732static int 732static int
733nvmm_hva_map(struct nvmm_owner *owner, struct nvmm_ioc_hva_map *args) 733nvmm_hva_map(struct nvmm_owner *owner, struct nvmm_ioc_hva_map *args)
734{ 734{
735 struct vmspace *vmspace = curproc->p_vmspace; 735 struct vmspace *vmspace = curproc->p_vmspace;
736 struct nvmm_machine *mach; 736 struct nvmm_machine *mach;
737 struct nvmm_hmapping *hmapping; 737 struct nvmm_hmapping *hmapping;
738 vaddr_t uva; 738 vaddr_t uva;
739 int error; 739 int error;
740 740
741 error = nvmm_machine_get(owner, args->machid, &mach, true); 741 error = nvmm_machine_get(owner, args->machid, &mach, true);
742 if (error) 742 if (error)
743 return error; 743 return error;
744 744
745 error = nvmm_hmapping_validate(mach, args->hva, args->size); 745 error = nvmm_hmapping_validate(mach, args->hva, args->size);
746 if (error) 746 if (error)
747 goto out; 747 goto out;
748 748
749 hmapping = nvmm_hmapping_alloc(mach); 749 hmapping = nvmm_hmapping_alloc(mach);
750 if (hmapping == NULL) { 750 if (hmapping == NULL) {
751 error = ENOBUFS; 751 error = ENOBUFS;
752 goto out; 752 goto out;
753 } 753 }
754 754
755 hmapping->hva = args->hva; 755 hmapping->hva = args->hva;
756 hmapping->size = args->size; 756 hmapping->size = args->size;
757 hmapping->uobj = uao_create(hmapping->size, 0); 757 hmapping->uobj = uao_create(hmapping->size, 0);
758 uva = hmapping->hva; 758 uva = hmapping->hva;
759 759
760 /* Take a reference for the user. */ 760 /* Take a reference for the user. */
761 uao_reference(hmapping->uobj); 761 uao_reference(hmapping->uobj);
762 762
763 /* Map the uobj into the user address space, as pageable. */ 763 /* Map the uobj into the user address space, as pageable. */
764 error = uvm_map(&vmspace->vm_map, &uva, hmapping->size, hmapping->uobj, 764 error = uvm_map(&vmspace->vm_map, &uva, hmapping->size, hmapping->uobj,
765 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE, 765 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
766 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP)); 766 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
767 if (error) { 767 if (error) {
768 uao_detach(hmapping->uobj); 768 uao_detach(hmapping->uobj);
769 } 769 }
770 770
771out: 771out:
772 nvmm_machine_put(mach); 772 nvmm_machine_put(mach);
773 return error; 773 return error;
774} 774}
775 775
776static int 776static int
777nvmm_hva_unmap(struct nvmm_owner *owner, struct nvmm_ioc_hva_unmap *args) 777nvmm_hva_unmap(struct nvmm_owner *owner, struct nvmm_ioc_hva_unmap *args)
778{ 778{
779 struct nvmm_machine *mach; 779 struct nvmm_machine *mach;
780 int error; 780 int error;
781 781
782 error = nvmm_machine_get(owner, args->machid, &mach, true); 782 error = nvmm_machine_get(owner, args->machid, &mach, true);
783 if (error) 783 if (error)
784 return error; 784 return error;
785 785
786 error = nvmm_hmapping_free(mach, args->hva, args->size); 786 error = nvmm_hmapping_free(mach, args->hva, args->size);
787 787
788 nvmm_machine_put(mach); 788 nvmm_machine_put(mach);
789 return error; 789 return error;
790} 790}
791 791
792/* -------------------------------------------------------------------------- */ 792/* -------------------------------------------------------------------------- */
793 793
794static int 794static int
795nvmm_gpa_map(struct nvmm_owner *owner, struct nvmm_ioc_gpa_map *args) 795nvmm_gpa_map(struct nvmm_owner *owner, struct nvmm_ioc_gpa_map *args)
796{ 796{
797 struct nvmm_machine *mach; 797 struct nvmm_machine *mach;
798 struct uvm_object *uobj; 798 struct uvm_object *uobj;
799 gpaddr_t gpa; 799 gpaddr_t gpa;
800 size_t off; 800 size_t off;
801 int error; 801 int error;
802 802
803 error = nvmm_machine_get(owner, args->machid, &mach, false); 803 error = nvmm_machine_get(owner, args->machid, &mach, false);
804 if (error) 804 if (error)
805 return error; 805 return error;
806 806
807 if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) { 807 if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) {
808 error = EINVAL; 808 error = EINVAL;
809 goto out; 809 goto out;
810 } 810 }
811 811
812 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 || 812 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 ||
813 (args->hva % PAGE_SIZE) != 0) { 813 (args->hva % PAGE_SIZE) != 0) {
814 error = EINVAL; 814 error = EINVAL;
815 goto out; 815 goto out;
816 } 816 }
817 if (args->hva == 0) { 817 if (args->hva == 0) {
818 error = EINVAL; 818 error = EINVAL;
819 goto out; 819 goto out;
820 } 820 }
821 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) { 821 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
822 error = EINVAL; 822 error = EINVAL;
823 goto out; 823 goto out;
824 } 824 }
825 if (args->gpa + args->size <= args->gpa) { 825 if (args->gpa + args->size <= args->gpa) {
826 error = EINVAL; 826 error = EINVAL;
827 goto out; 827 goto out;
828 } 828 }
829 if (args->gpa + args->size > mach->gpa_end) { 829 if (args->gpa + args->size > mach->gpa_end) {
830 error = EINVAL; 830 error = EINVAL;
831 goto out; 831 goto out;
832 } 832 }
833 gpa = args->gpa; 833 gpa = args->gpa;
834 834
835 uobj = nvmm_hmapping_getuobj(mach, args->hva, args->size, &off); 835 uobj = nvmm_hmapping_getuobj(mach, args->hva, args->size, &off);
836 if (uobj == NULL) { 836 if (uobj == NULL) {
837 error = EINVAL; 837 error = EINVAL;
838 goto out; 838 goto out;
839 } 839 }
840 840
841 /* Take a reference for the machine. */ 841 /* Take a reference for the machine. */
842 uao_reference(uobj); 842 uao_reference(uobj);
843 843
844 /* Map the uobj into the machine address space, as pageable. */ 844 /* Map the uobj into the machine address space, as pageable. */
845 error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0, 845 error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0,
846 UVM_MAPFLAG(args->prot, UVM_PROT_RWX, UVM_INH_NONE, 846 UVM_MAPFLAG(args->prot, UVM_PROT_RWX, UVM_INH_NONE,
847 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP)); 847 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
848 if (error) { 848 if (error) {
849 uao_detach(uobj); 849 uao_detach(uobj);
850 goto out; 850 goto out;
851 } 851 }
852 if (gpa != args->gpa) { 852 if (gpa != args->gpa) {
853 uao_detach(uobj); 853 uao_detach(uobj);
854 printf("[!] uvm_map problem\n"); 854 printf("[!] uvm_map problem\n");
855 error = EINVAL; 855 error = EINVAL;
856 goto out; 856 goto out;
857 } 857 }
858 858
859out: 859out:
860 nvmm_machine_put(mach); 860 nvmm_machine_put(mach);
861 return error; 861 return error;
862} 862}
863 863
864static int 864static int
865nvmm_gpa_unmap(struct nvmm_owner *owner, struct nvmm_ioc_gpa_unmap *args) 865nvmm_gpa_unmap(struct nvmm_owner *owner, struct nvmm_ioc_gpa_unmap *args)
866{ 866{
867 struct nvmm_machine *mach; 867 struct nvmm_machine *mach;
868 gpaddr_t gpa; 868 gpaddr_t gpa;
869 int error; 869 int error;
870 870
871 error = nvmm_machine_get(owner, args->machid, &mach, false); 871 error = nvmm_machine_get(owner, args->machid, &mach, false);
872 if (error) 872 if (error)
873 return error; 873 return error;
874 874
875 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) { 875 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) {
876 error = EINVAL; 876 error = EINVAL;
877 goto out; 877 goto out;
878 } 878 }
879 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) { 879 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
880 error = EINVAL; 880 error = EINVAL;
881 goto out; 881 goto out;
882 } 882 }
883 if (args->gpa + args->size <= args->gpa) { 883 if (args->gpa + args->size <= args->gpa) {
884 error = EINVAL; 884 error = EINVAL;
885 goto out; 885 goto out;
886 } 886 }
887 if (args->gpa + args->size >= mach->gpa_end) { 887 if (args->gpa + args->size >= mach->gpa_end) {
888 error = EINVAL; 888 error = EINVAL;
889 goto out; 889 goto out;
890 } 890 }
891 gpa = args->gpa; 891 gpa = args->gpa;
892 892
893 /* Unmap the memory from the machine. */ 893 /* Unmap the memory from the machine. */
894 uvm_unmap(&mach->vm->vm_map, gpa, gpa + args->size); 894 uvm_unmap(&mach->vm->vm_map, gpa, gpa + args->size);
895 895
896out: 896out:
897 nvmm_machine_put(mach); 897 nvmm_machine_put(mach);
898 return error; 898 return error;
899} 899}
900 900
901/* -------------------------------------------------------------------------- */ 901/* -------------------------------------------------------------------------- */
902 902
903static int 903static int
904nvmm_ctl_mach_info(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args) 904nvmm_ctl_mach_info(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
905{ 905{
906 struct nvmm_ctl_mach_info ctl; 906 struct nvmm_ctl_mach_info ctl;
907 struct nvmm_machine *mach; 907 struct nvmm_machine *mach;
908 struct nvmm_cpu *vcpu; 908 struct nvmm_cpu *vcpu;
909 int error; 909 int error;
910 size_t i; 910 size_t i;
911 911
912 if (args->size != sizeof(ctl)) 912 if (args->size != sizeof(ctl))
913 return EINVAL; 913 return EINVAL;
914 error = copyin(args->data, &ctl, sizeof(ctl)); 914 error = copyin(args->data, &ctl, sizeof(ctl));
915 if (error) 915 if (error)
916 return error; 916 return error;
917 917
918 error = nvmm_machine_get(owner, ctl.machid, &mach, true); 918 error = nvmm_machine_get(owner, ctl.machid, &mach, true);
919 if (error) 919 if (error)
920 return error; 920 return error;
921 921
922 ctl.nvcpus = 0; 922 ctl.nvcpus = 0;
923 for (i = 0; i < NVMM_MAX_VCPUS; i++) { 923 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
924 error = nvmm_vcpu_get(mach, i, &vcpu); 924 error = nvmm_vcpu_get(mach, i, &vcpu);
925 if (error) 925 if (error)
926 continue; 926 continue;
927 ctl.nvcpus++; 927 ctl.nvcpus++;
928 nvmm_vcpu_put(vcpu); 928 nvmm_vcpu_put(vcpu);
929 } 929 }
930 930
931 ctl.nram = 0; 931 ctl.nram = 0;
932 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 932 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
933 if (!mach->hmap[i].present) 933 if (!mach->hmap[i].present)
934 continue; 934 continue;
935 ctl.nram += mach->hmap[i].size; 935 ctl.nram += mach->hmap[i].size;
936 } 936 }
937 937
938 ctl.pid = mach->owner->pid; 938 ctl.pid = mach->owner->pid;
939 ctl.time = mach->time; 939 ctl.time = mach->time;
940 940
941 nvmm_machine_put(mach); 941 nvmm_machine_put(mach);
942 942
943 error = copyout(&ctl, args->data, sizeof(ctl)); 943 error = copyout(&ctl, args->data, sizeof(ctl));
944 if (error) 944 if (error)
945 return error; 945 return error;
946 946
947 return 0; 947 return 0;
948} 948}
949 949
950static int 950static int
951nvmm_ctl(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args) 951nvmm_ctl(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
952{ 952{
953 switch (args->op) { 953 switch (args->op) {
954 case NVMM_CTL_MACH_INFO: 954 case NVMM_CTL_MACH_INFO:
955 return nvmm_ctl_mach_info(owner, args); 955 return nvmm_ctl_mach_info(owner, args);
956 default: 956 default:
957 return EINVAL; 957 return EINVAL;
958 } 958 }
959} 959}
960 960
961/* -------------------------------------------------------------------------- */ 961/* -------------------------------------------------------------------------- */
962 962
 963static const struct nvmm_impl *
 964nvmm_ident(void)
 965{
 966 size_t i;
 967
 968 for (i = 0; i < __arraycount(nvmm_impl_list); i++) {
 969 if ((*nvmm_impl_list[i]->ident)())
 970 return nvmm_impl_list[i];
 971 }
 972
 973 return NULL;
 974}
 975
963static int 976static int
964nvmm_init(void) 977nvmm_init(void)
965{ 978{
966 size_t i, n; 979 size_t i, n;
967 980
968 for (i = 0; i < __arraycount(nvmm_impl_list); i++) { 981 nvmm_impl = nvmm_ident();
969 if (!(*nvmm_impl_list[i]->ident)()) { 982 if (nvmm_impl == NULL)
970 continue; 
971 } 
972 nvmm_impl = nvmm_impl_list[i]; 
973 break; 
974 } 
975 if (nvmm_impl == NULL) { 
976 printf("NVMM: CPU not supported\n"); 
977 return ENOTSUP; 983 return ENOTSUP;
978 } 
979 984
980 for (i = 0; i < NVMM_MAX_MACHINES; i++) { 985 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
981 machines[i].machid = i; 986 machines[i].machid = i;
982 rw_init(&machines[i].lock); 987 rw_init(&machines[i].lock);
983 for (n = 0; n < NVMM_MAX_VCPUS; n++) { 988 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
984 machines[i].cpus[n].present = false; 989 machines[i].cpus[n].present = false;
985 machines[i].cpus[n].cpuid = n; 990 machines[i].cpus[n].cpuid = n;
986 mutex_init(&machines[i].cpus[n].lock, MUTEX_DEFAULT, 991 mutex_init(&machines[i].cpus[n].lock, MUTEX_DEFAULT,
987 IPL_NONE); 992 IPL_NONE);
988 } 993 }
989 } 994 }
990 995
991 (*nvmm_impl->init)(); 996 (*nvmm_impl->init)();
992 997
993 return 0; 998 return 0;
994} 999}
995 1000
996static void 1001static void
997nvmm_fini(void) 1002nvmm_fini(void)
998{ 1003{
999 size_t i, n; 1004 size_t i, n;
1000 1005
1001 for (i = 0; i < NVMM_MAX_MACHINES; i++) { 1006 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
1002 rw_destroy(&machines[i].lock); 1007 rw_destroy(&machines[i].lock);
1003 for (n = 0; n < NVMM_MAX_VCPUS; n++) { 1008 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
1004 mutex_destroy(&machines[i].cpus[n].lock); 1009 mutex_destroy(&machines[i].cpus[n].lock);
1005 } 1010 }
1006 } 1011 }
1007 1012
1008 (*nvmm_impl->fini)(); 1013 (*nvmm_impl->fini)();
1009 nvmm_impl = NULL; 1014 nvmm_impl = NULL;
1010} 1015}
1011 1016
1012/* -------------------------------------------------------------------------- */ 1017/* -------------------------------------------------------------------------- */
1013 1018
1014static dev_type_open(nvmm_open); 1019static dev_type_open(nvmm_open);
1015 1020
1016const struct cdevsw nvmm_cdevsw = { 1021const struct cdevsw nvmm_cdevsw = {
1017 .d_open = nvmm_open, 1022 .d_open = nvmm_open,
1018 .d_close = noclose, 1023 .d_close = noclose,
1019 .d_read = noread, 1024 .d_read = noread,
1020 .d_write = nowrite, 1025 .d_write = nowrite,
1021 .d_ioctl = noioctl, 1026 .d_ioctl = noioctl,
1022 .d_stop = nostop, 1027 .d_stop = nostop,
1023 .d_tty = notty, 1028 .d_tty = notty,
1024 .d_poll = nopoll, 1029 .d_poll = nopoll,
1025 .d_mmap = nommap, 1030 .d_mmap = nommap,
1026 .d_kqfilter = nokqfilter, 1031 .d_kqfilter = nokqfilter,
1027 .d_discard = nodiscard, 1032 .d_discard = nodiscard,
1028 .d_flag = D_OTHER | D_MPSAFE 1033 .d_flag = D_OTHER | D_MPSAFE
1029}; 1034};
1030 1035
1031static int nvmm_ioctl(file_t *, u_long, void *); 1036static int nvmm_ioctl(file_t *, u_long, void *);
1032static int nvmm_close(file_t *); 1037static int nvmm_close(file_t *);
1033static int nvmm_mmap(file_t *, off_t *, size_t, int, int *, int *, 1038static int nvmm_mmap(file_t *, off_t *, size_t, int, int *, int *,
1034 struct uvm_object **, int *); 1039 struct uvm_object **, int *);
1035 1040
1036const struct fileops nvmm_fileops = { 1041const struct fileops nvmm_fileops = {
1037 .fo_read = fbadop_read, 1042 .fo_read = fbadop_read,
1038 .fo_write = fbadop_write, 1043 .fo_write = fbadop_write,
1039 .fo_ioctl = nvmm_ioctl, 1044 .fo_ioctl = nvmm_ioctl,
1040 .fo_fcntl = fnullop_fcntl, 1045 .fo_fcntl = fnullop_fcntl,
1041 .fo_poll = fnullop_poll, 1046 .fo_poll = fnullop_poll,
1042 .fo_stat = fbadop_stat, 1047 .fo_stat = fbadop_stat,
1043 .fo_close = nvmm_close, 1048 .fo_close = nvmm_close,
1044 .fo_kqfilter = fnullop_kqfilter, 1049 .fo_kqfilter = fnullop_kqfilter,
1045 .fo_restart = fnullop_restart, 1050 .fo_restart = fnullop_restart,
1046 .fo_mmap = nvmm_mmap, 1051 .fo_mmap = nvmm_mmap,
1047}; 1052};
1048 1053
1049static int 1054static int
1050nvmm_open(dev_t dev, int flags, int type, struct lwp *l) 1055nvmm_open(dev_t dev, int flags, int type, struct lwp *l)
1051{ 1056{
1052 struct nvmm_owner *owner; 1057 struct nvmm_owner *owner;
1053 struct file *fp; 1058 struct file *fp;
1054 int error, fd; 1059 int error, fd;
1055 1060
1056 if (__predict_false(nvmm_impl == NULL)) 1061 if (__predict_false(nvmm_impl == NULL))
1057 return ENXIO; 1062 return ENXIO;
1058 if (minor(dev) != 0) 1063 if (minor(dev) != 0)
1059 return EXDEV; 1064 return EXDEV;
1060 if (!(flags & O_CLOEXEC)) 1065 if (!(flags & O_CLOEXEC))
1061 return EINVAL; 1066 return EINVAL;
1062 error = fd_allocfile(&fp, &fd); 1067 error = fd_allocfile(&fp, &fd);
1063 if (error) 1068 if (error)
1064 return error; 1069 return error;
1065 1070
1066 if (OFLAGS(flags) & O_WRONLY) { 1071 if (OFLAGS(flags) & O_WRONLY) {
1067 owner = &root_owner; 1072 owner = &root_owner;
1068 } else { 1073 } else {
1069 owner = kmem_alloc(sizeof(*owner), KM_SLEEP); 1074 owner = kmem_alloc(sizeof(*owner), KM_SLEEP);
1070 owner->pid = l->l_proc->p_pid; 1075 owner->pid = l->l_proc->p_pid;
1071 } 1076 }
1072 1077
1073 return fd_clone(fp, fd, flags, &nvmm_fileops, owner); 1078 return fd_clone(fp, fd, flags, &nvmm_fileops, owner);
1074} 1079}
1075 1080
1076static int 1081static int
1077nvmm_close(file_t *fp) 1082nvmm_close(file_t *fp)
1078{ 1083{
1079 struct nvmm_owner *owner = fp->f_data; 1084 struct nvmm_owner *owner = fp->f_data;
1080 1085
1081 KASSERT(owner != NULL); 1086 KASSERT(owner != NULL);
1082 nvmm_kill_machines(owner); 1087 nvmm_kill_machines(owner);
1083 if (owner != &root_owner) { 1088 if (owner != &root_owner) {
1084 kmem_free(owner, sizeof(*owner)); 1089 kmem_free(owner, sizeof(*owner));
1085 } 1090 }
1086 fp->f_data = NULL; 1091 fp->f_data = NULL;
1087 1092
1088 return 0; 1093 return 0;
1089} 1094}
1090 1095
1091static int 1096static int
1092nvmm_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp, 1097nvmm_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
1093 int *advicep, struct uvm_object **uobjp, int *maxprotp) 1098 int *advicep, struct uvm_object **uobjp, int *maxprotp)
1094{ 1099{
1095 struct nvmm_owner *owner = fp->f_data; 1100 struct nvmm_owner *owner = fp->f_data;
1096 struct nvmm_machine *mach; 1101 struct nvmm_machine *mach;
1097 nvmm_machid_t machid; 1102 nvmm_machid_t machid;
1098 nvmm_cpuid_t cpuid; 1103 nvmm_cpuid_t cpuid;
1099 int error; 1104 int error;
1100 1105
1101 if (prot & PROT_EXEC) 1106 if (prot & PROT_EXEC)
1102 return EACCES; 1107 return EACCES;
1103 if (size != PAGE_SIZE) 1108 if (size != PAGE_SIZE)
1104 return EINVAL; 1109 return EINVAL;
1105 1110
1106 cpuid = NVMM_COMM_CPUID(*offp); 1111 cpuid = NVMM_COMM_CPUID(*offp);
1107 if (__predict_false(cpuid >= NVMM_MAX_VCPUS)) 1112 if (__predict_false(cpuid >= NVMM_MAX_VCPUS))
1108 return EINVAL; 1113 return EINVAL;
1109 1114
1110 machid = NVMM_COMM_MACHID(*offp); 1115 machid = NVMM_COMM_MACHID(*offp);
1111 error = nvmm_machine_get(owner, machid, &mach, false); 1116 error = nvmm_machine_get(owner, machid, &mach, false);
1112 if (error) 1117 if (error)
1113 return error; 1118 return error;
1114 1119
1115 uao_reference(mach->commuobj); 1120 uao_reference(mach->commuobj);
1116 *uobjp = mach->commuobj; 1121 *uobjp = mach->commuobj;
1117 *offp = cpuid * PAGE_SIZE; 1122 *offp = cpuid * PAGE_SIZE;
1118 *maxprotp = prot; 1123 *maxprotp = prot;
1119 *advicep = UVM_ADV_RANDOM; 1124 *advicep = UVM_ADV_RANDOM;
1120 1125
1121 nvmm_machine_put(mach); 1126 nvmm_machine_put(mach);
1122 return 0; 1127 return 0;
1123} 1128}
1124 1129
1125static int 1130static int
1126nvmm_ioctl(file_t *fp, u_long cmd, void *data) 1131nvmm_ioctl(file_t *fp, u_long cmd, void *data)
1127{ 1132{
1128 struct nvmm_owner *owner = fp->f_data; 1133 struct nvmm_owner *owner = fp->f_data;
1129 1134
1130 KASSERT(owner != NULL); 1135 KASSERT(owner != NULL);
1131 1136
1132 switch (cmd) { 1137 switch (cmd) {
1133 case NVMM_IOC_CAPABILITY: 1138 case NVMM_IOC_CAPABILITY:
1134 return nvmm_capability(owner, data); 1139 return nvmm_capability(owner, data);
1135 case NVMM_IOC_MACHINE_CREATE: 1140 case NVMM_IOC_MACHINE_CREATE:
1136 return nvmm_machine_create(owner, data); 1141 return nvmm_machine_create(owner, data);
1137 case NVMM_IOC_MACHINE_DESTROY: 1142 case NVMM_IOC_MACHINE_DESTROY:
1138 return nvmm_machine_destroy(owner, data); 1143 return nvmm_machine_destroy(owner, data);
1139 case NVMM_IOC_MACHINE_CONFIGURE: 1144 case NVMM_IOC_MACHINE_CONFIGURE:
1140 return nvmm_machine_configure(owner, data); 1145 return nvmm_machine_configure(owner, data);
1141 case NVMM_IOC_VCPU_CREATE: 1146 case NVMM_IOC_VCPU_CREATE:
1142 return nvmm_vcpu_create(owner, data); 1147 return nvmm_vcpu_create(owner, data);
1143 case NVMM_IOC_VCPU_DESTROY: 1148 case NVMM_IOC_VCPU_DESTROY:
1144 return nvmm_vcpu_destroy(owner, data); 1149 return nvmm_vcpu_destroy(owner, data);
1145 case NVMM_IOC_VCPU_CONFIGURE: 1150 case NVMM_IOC_VCPU_CONFIGURE:
1146 return nvmm_vcpu_configure(owner, data); 1151 return nvmm_vcpu_configure(owner, data);
1147 case NVMM_IOC_VCPU_SETSTATE: 1152 case NVMM_IOC_VCPU_SETSTATE:
1148 return nvmm_vcpu_setstate(owner, data); 1153 return nvmm_vcpu_setstate(owner, data);
1149 case NVMM_IOC_VCPU_GETSTATE: 1154 case NVMM_IOC_VCPU_GETSTATE:
1150 return nvmm_vcpu_getstate(owner, data); 1155 return nvmm_vcpu_getstate(owner, data);
1151 case NVMM_IOC_VCPU_INJECT: 1156 case NVMM_IOC_VCPU_INJECT:
1152 return nvmm_vcpu_inject(owner, data); 1157 return nvmm_vcpu_inject(owner, data);
1153 case NVMM_IOC_VCPU_RUN: 1158 case NVMM_IOC_VCPU_RUN:
1154 return nvmm_vcpu_run(owner, data); 1159 return nvmm_vcpu_run(owner, data);
1155 case NVMM_IOC_GPA_MAP: 1160 case NVMM_IOC_GPA_MAP:
1156 return nvmm_gpa_map(owner, data); 1161 return nvmm_gpa_map(owner, data);
1157 case NVMM_IOC_GPA_UNMAP: 1162 case NVMM_IOC_GPA_UNMAP:
1158 return nvmm_gpa_unmap(owner, data); 1163 return nvmm_gpa_unmap(owner, data);
1159 case NVMM_IOC_HVA_MAP: 1164 case NVMM_IOC_HVA_MAP:
1160 return nvmm_hva_map(owner, data); 1165 return nvmm_hva_map(owner, data);
1161 case NVMM_IOC_HVA_UNMAP: 1166 case NVMM_IOC_HVA_UNMAP:
1162 return nvmm_hva_unmap(owner, data); 1167 return nvmm_hva_unmap(owner, data);
1163 case NVMM_IOC_CTL: 1168 case NVMM_IOC_CTL:
1164 return nvmm_ctl(owner, data); 1169 return nvmm_ctl(owner, data);
1165 default: 1170 default:
1166 return EINVAL; 1171 return EINVAL;
1167 } 1172 }
1168} 1173}
1169 1174
1170/* -------------------------------------------------------------------------- */ 1175/* -------------------------------------------------------------------------- */
1171 1176
 1177static int nvmm_match(device_t, cfdata_t, void *);
 1178static void nvmm_attach(device_t, device_t, void *);
 1179static int nvmm_detach(device_t, int);
 1180
 1181extern struct cfdriver nvmm_cd;
 1182
 1183CFATTACH_DECL_NEW(nvmm, 0, nvmm_match, nvmm_attach, nvmm_detach, NULL);
 1184
 1185static struct cfdata nvmm_cfdata[] = {
 1186 {
 1187 .cf_name = "nvmm",
 1188 .cf_atname = "nvmm",
 1189 .cf_unit = 0,
 1190 .cf_fstate = FSTATE_STAR,
 1191 .cf_loc = NULL,
 1192 .cf_flags = 0,
 1193 .cf_pspec = NULL,
 1194 },
 1195 { NULL, NULL, 0, FSTATE_NOTFOUND, NULL, 0, NULL }
 1196};
 1197
 1198static int
 1199nvmm_match(device_t self, cfdata_t cfdata, void *arg)
 1200{
 1201 return 1;
 1202}
 1203
 1204static void
 1205nvmm_attach(device_t parent, device_t self, void *aux)
 1206{
 1207 int error;
 1208
 1209 error = nvmm_init();
 1210 if (error)
 1211 panic("%s: impossible", __func__);
 1212 aprint_normal_dev(self, "attached\n");
 1213}
 1214
 1215static int
 1216nvmm_detach(device_t self, int flags)
 1217{
 1218 if (nmachines > 0)
 1219 return EBUSY;
 1220 nvmm_fini();
 1221 return 0;
 1222}
 1223
1172void 1224void
1173nvmmattach(int nunits) 1225nvmmattach(int nunits)
1174{ 1226{
1175 /* nothing */ 1227 /* nothing */
1176} 1228}
1177 1229
1178MODULE(MODULE_CLASS_MISC, nvmm, NULL); 1230MODULE(MODULE_CLASS_MISC, nvmm, NULL);
1179 1231
 1232#if defined(_MODULE)
 1233CFDRIVER_DECL(nvmm, DV_VIRTUAL, NULL);
 1234#endif
 1235
1180static int 1236static int
1181nvmm_modcmd(modcmd_t cmd, void *arg) 1237nvmm_modcmd(modcmd_t cmd, void *arg)
1182{ 1238{
 1239#if defined(_MODULE)
 1240 devmajor_t bmajor = NODEVMAJOR;
 1241 devmajor_t cmajor = 345;
 1242#endif
1183 int error; 1243 int error;
1184 1244
1185 switch (cmd) { 1245 switch (cmd) {
1186 case MODULE_CMD_INIT: 1246 case MODULE_CMD_INIT:
1187 error = nvmm_init(); 1247 if (nvmm_ident() == NULL) {
 1248 aprint_error("%s: cpu not supported\n",
 1249 nvmm_cd.cd_name);
 1250 return ENOTSUP;
 1251 }
 1252#if defined(_MODULE)
 1253 error = config_cfdriver_attach(&nvmm_cd);
1188 if (error) 1254 if (error)
1189 return error; 1255 return error;
 1256#endif
 1257 error = config_cfattach_attach(nvmm_cd.cd_name, &nvmm_ca);
 1258 if (error) {
 1259 config_cfdriver_detach(&nvmm_cd);
 1260 aprint_error("%s: config_cfattach_attach failed\n",
 1261 nvmm_cd.cd_name);
 1262 return error;
 1263 }
 1264
 1265 error = config_cfdata_attach(nvmm_cfdata, 1);
 1266 if (error) {
 1267 config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
 1268 config_cfdriver_detach(&nvmm_cd);
 1269 aprint_error("%s: unable to register cfdata\n",
 1270 nvmm_cd.cd_name);
 1271 return error;
 1272 }
 1273
 1274 if (config_attach_pseudo(nvmm_cfdata) == NULL) {
 1275 aprint_error("%s: config_attach_pseudo failed\n",
 1276 nvmm_cd.cd_name);
 1277 config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
 1278 config_cfdriver_detach(&nvmm_cd);
 1279 return ENXIO;
 1280 }
1190 1281
1191#if defined(_MODULE) 1282#if defined(_MODULE)
1192 { 1283 /* mknod /dev/nvmm c 345 0 */
1193 devmajor_t bmajor = NODEVMAJOR; 1284 error = devsw_attach(nvmm_cd.cd_name, NULL, &bmajor,
1194 devmajor_t cmajor = 345; 1285 &nvmm_cdevsw, &cmajor);
1195 1286 if (error) {
1196 /* mknod /dev/nvmm c 345 0 */ 1287 aprint_error("%s: unable to register devsw\n",
1197 error = devsw_attach("nvmm", NULL, &bmajor, 1288 nvmm_cd.cd_name);
1198 &nvmm_cdevsw, &cmajor); 1289 config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
1199 if (error) { 1290 config_cfdriver_detach(&nvmm_cd);
1200 nvmm_fini(); 1291 return error;
1201 return error; 
1202 } 
1203 } 1292 }
1204#endif 1293#endif
1205 return 0; 1294 return 0;
1206 
1207 case MODULE_CMD_FINI: 1295 case MODULE_CMD_FINI:
1208 if (nmachines > 0) { 1296 error = config_cfdata_detach(nvmm_cfdata);
1209 return EBUSY; 1297 if (error)
1210 } 1298 return error;
 1299 error = config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
 1300 if (error)
 1301 return error;
1211#if defined(_MODULE) 1302#if defined(_MODULE)
1212 { 1303 config_cfdriver_detach(&nvmm_cd);
1213 error = devsw_detach(NULL, &nvmm_cdevsw); 1304 devsw_detach(NULL, &nvmm_cdevsw);
1214 if (error) { 
1215 return error; 
1216 } 
1217 } 
1218#endif 1305#endif
1219 nvmm_fini(); 
1220 return 0; 1306 return 0;
1221 
1222 case MODULE_CMD_AUTOUNLOAD: 1307 case MODULE_CMD_AUTOUNLOAD:
1223 return EBUSY; 1308 return EBUSY;
1224 
1225 default: 1309 default:
1226 return ENOTTY; 1310 return ENOTTY;
1227 } 1311 }
1228} 1312}

cvs diff -r1.1 -r1.2 src/sys/modules/nvmm/nvmm.ioconf (switch to unified diff)

--- src/sys/modules/nvmm/nvmm.ioconf 2018/11/07 07:43:08 1.1
+++ src/sys/modules/nvmm/nvmm.ioconf 2020/06/25 17:01:20 1.2
@@ -1,7 +1,8 @@ @@ -1,7 +1,8 @@
1# $NetBSD: nvmm.ioconf,v 1.1 2018/11/07 07:43:08 maxv Exp $ 1# $NetBSD: nvmm.ioconf,v 1.2 2020/06/25 17:01:20 maxv Exp $
2 2
3ioconf nvmm 3ioconf nvmm
4 4
5include "conf/files" 5include "conf/files"
 6include "dev/nvmm/files.nvmm"
6 7
7pseudo-device nvmm 8pseudo-device nvmm