Wed Aug 26 16:28:17 2020 UTC ()
nvmm: misc improvements

 - use mach->ncpus to get the number of vcpus, now that we have it
 - don't forget to decrement mach->ncpus when a machine gets killed
 - add more __predict_false()


(maxv)
diff -r1.35 -r1.36 src/sys/dev/nvmm/nvmm.c

cvs diff -r1.35 -r1.36 src/sys/dev/nvmm/nvmm.c (switch to unified diff)

--- src/sys/dev/nvmm/nvmm.c 2020/08/18 17:04:37 1.35
+++ src/sys/dev/nvmm/nvmm.c 2020/08/26 16:28:17 1.36
@@ -1,1315 +1,1307 @@ @@ -1,1315 +1,1307 @@
1/* $NetBSD: nvmm.c,v 1.35 2020/08/18 17:04:37 maxv Exp $ */ 1/* $NetBSD: nvmm.c,v 1.36 2020/08/26 16:28:17 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018-2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.35 2020/08/18 17:04:37 maxv Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.36 2020/08/26 16:28:17 maxv Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38 38
39#include <sys/cpu.h> 39#include <sys/cpu.h>
40#include <sys/conf.h> 40#include <sys/conf.h>
41#include <sys/kmem.h> 41#include <sys/kmem.h>
42#include <sys/module.h> 42#include <sys/module.h>
43#include <sys/proc.h> 43#include <sys/proc.h>
44#include <sys/mman.h> 44#include <sys/mman.h>
45#include <sys/file.h> 45#include <sys/file.h>
46#include <sys/filedesc.h> 46#include <sys/filedesc.h>
47#include <sys/device.h> 47#include <sys/device.h>
48 48
49#include <uvm/uvm.h> 49#include <uvm/uvm.h>
50#include <uvm/uvm_page.h> 50#include <uvm/uvm_page.h>
51 51
52#include "ioconf.h" 52#include "ioconf.h"
53 53
54#include <dev/nvmm/nvmm.h> 54#include <dev/nvmm/nvmm.h>
55#include <dev/nvmm/nvmm_internal.h> 55#include <dev/nvmm/nvmm_internal.h>
56#include <dev/nvmm/nvmm_ioctl.h> 56#include <dev/nvmm/nvmm_ioctl.h>
57 57
58static struct nvmm_machine machines[NVMM_MAX_MACHINES]; 58static struct nvmm_machine machines[NVMM_MAX_MACHINES];
59static volatile unsigned int nmachines __cacheline_aligned; 59static volatile unsigned int nmachines __cacheline_aligned;
60 60
61static const struct nvmm_impl *nvmm_impl_list[] = { 61static const struct nvmm_impl *nvmm_impl_list[] = {
62#if defined(__x86_64__) 62#if defined(__x86_64__)
63 &nvmm_x86_svm, /* x86 AMD SVM */ 63 &nvmm_x86_svm, /* x86 AMD SVM */
64 &nvmm_x86_vmx /* x86 Intel VMX */ 64 &nvmm_x86_vmx /* x86 Intel VMX */
65#endif 65#endif
66}; 66};
67 67
68static const struct nvmm_impl *nvmm_impl = NULL; 68static const struct nvmm_impl *nvmm_impl = NULL;
69 69
70static struct nvmm_owner root_owner; 70static struct nvmm_owner root_owner;
71 71
72/* -------------------------------------------------------------------------- */ 72/* -------------------------------------------------------------------------- */
73 73
74static int 74static int
75nvmm_machine_alloc(struct nvmm_machine **ret) 75nvmm_machine_alloc(struct nvmm_machine **ret)
76{ 76{
77 struct nvmm_machine *mach; 77 struct nvmm_machine *mach;
78 size_t i; 78 size_t i;
79 79
80 for (i = 0; i < NVMM_MAX_MACHINES; i++) { 80 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
81 mach = &machines[i]; 81 mach = &machines[i];
82 82
83 rw_enter(&mach->lock, RW_WRITER); 83 rw_enter(&mach->lock, RW_WRITER);
84 if (mach->present) { 84 if (mach->present) {
85 rw_exit(&mach->lock); 85 rw_exit(&mach->lock);
86 continue; 86 continue;
87 } 87 }
88 88
89 mach->present = true; 89 mach->present = true;
90 mach->time = time_second; 90 mach->time = time_second;
91 *ret = mach; 91 *ret = mach;
92 atomic_inc_uint(&nmachines); 92 atomic_inc_uint(&nmachines);
93 return 0; 93 return 0;
94 } 94 }
95 95
96 return ENOBUFS; 96 return ENOBUFS;
97} 97}
98 98
99static void 99static void
100nvmm_machine_free(struct nvmm_machine *mach) 100nvmm_machine_free(struct nvmm_machine *mach)
101{ 101{
102 KASSERT(rw_write_held(&mach->lock)); 102 KASSERT(rw_write_held(&mach->lock));
103 KASSERT(mach->present); 103 KASSERT(mach->present);
104 mach->present = false; 104 mach->present = false;
105 atomic_dec_uint(&nmachines); 105 atomic_dec_uint(&nmachines);
106} 106}
107 107
108static int 108static int
109nvmm_machine_get(struct nvmm_owner *owner, nvmm_machid_t machid, 109nvmm_machine_get(struct nvmm_owner *owner, nvmm_machid_t machid,
110 struct nvmm_machine **ret, bool writer) 110 struct nvmm_machine **ret, bool writer)
111{ 111{
112 struct nvmm_machine *mach; 112 struct nvmm_machine *mach;
113 krw_t op = writer ? RW_WRITER : RW_READER; 113 krw_t op = writer ? RW_WRITER : RW_READER;
114 114
115 if (machid >= NVMM_MAX_MACHINES) { 115 if (__predict_false(machid >= NVMM_MAX_MACHINES)) {
116 return EINVAL; 116 return EINVAL;
117 } 117 }
118 mach = &machines[machid]; 118 mach = &machines[machid];
119 119
120 rw_enter(&mach->lock, op); 120 rw_enter(&mach->lock, op);
121 if (!mach->present) { 121 if (__predict_false(!mach->present)) {
122 rw_exit(&mach->lock); 122 rw_exit(&mach->lock);
123 return ENOENT; 123 return ENOENT;
124 } 124 }
125 if (owner != &root_owner && mach->owner != owner) { 125 if (__predict_false(mach->owner != owner && owner != &root_owner)) {
126 rw_exit(&mach->lock); 126 rw_exit(&mach->lock);
127 return EPERM; 127 return EPERM;
128 } 128 }
129 *ret = mach; 129 *ret = mach;
130 130
131 return 0; 131 return 0;
132} 132}
133 133
134static void 134static void
135nvmm_machine_put(struct nvmm_machine *mach) 135nvmm_machine_put(struct nvmm_machine *mach)
136{ 136{
137 rw_exit(&mach->lock); 137 rw_exit(&mach->lock);
138} 138}
139 139
140/* -------------------------------------------------------------------------- */ 140/* -------------------------------------------------------------------------- */
141 141
142static int 142static int
143nvmm_vcpu_alloc(struct nvmm_machine *mach, nvmm_cpuid_t cpuid, 143nvmm_vcpu_alloc(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
144 struct nvmm_cpu **ret) 144 struct nvmm_cpu **ret)
145{ 145{
146 struct nvmm_cpu *vcpu; 146 struct nvmm_cpu *vcpu;
147 147
148 if (cpuid >= NVMM_MAX_VCPUS) { 148 if (cpuid >= NVMM_MAX_VCPUS) {
149 return EINVAL; 149 return EINVAL;
150 } 150 }
151 vcpu = &mach->cpus[cpuid]; 151 vcpu = &mach->cpus[cpuid];
152 152
153 mutex_enter(&vcpu->lock); 153 mutex_enter(&vcpu->lock);
154 if (vcpu->present) { 154 if (vcpu->present) {
155 mutex_exit(&vcpu->lock); 155 mutex_exit(&vcpu->lock);
156 return EBUSY; 156 return EBUSY;
157 } 157 }
158 158
159 vcpu->present = true; 159 vcpu->present = true;
160 vcpu->comm = NULL; 160 vcpu->comm = NULL;
161 vcpu->hcpu_last = -1; 161 vcpu->hcpu_last = -1;
162 *ret = vcpu; 162 *ret = vcpu;
163 return 0; 163 return 0;
164} 164}
165 165
166static void 166static void
167nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 167nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
168{ 168{
169 KASSERT(mutex_owned(&vcpu->lock)); 169 KASSERT(mutex_owned(&vcpu->lock));
170 vcpu->present = false; 170 vcpu->present = false;
171 if (vcpu->comm != NULL) { 171 if (vcpu->comm != NULL) {
172 uvm_deallocate(kernel_map, (vaddr_t)vcpu->comm, PAGE_SIZE); 172 uvm_deallocate(kernel_map, (vaddr_t)vcpu->comm, PAGE_SIZE);
173 } 173 }
174} 174}
175 175
176static int 176static int
177nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid, 177nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
178 struct nvmm_cpu **ret) 178 struct nvmm_cpu **ret)
179{ 179{
180 struct nvmm_cpu *vcpu; 180 struct nvmm_cpu *vcpu;
181 181
182 if (cpuid >= NVMM_MAX_VCPUS) { 182 if (__predict_false(cpuid >= NVMM_MAX_VCPUS)) {
183 return EINVAL; 183 return EINVAL;
184 } 184 }
185 vcpu = &mach->cpus[cpuid]; 185 vcpu = &mach->cpus[cpuid];
186 186
187 mutex_enter(&vcpu->lock); 187 mutex_enter(&vcpu->lock);
188 if (!vcpu->present) { 188 if (__predict_false(!vcpu->present)) {
189 mutex_exit(&vcpu->lock); 189 mutex_exit(&vcpu->lock);
190 return ENOENT; 190 return ENOENT;
191 } 191 }
192 *ret = vcpu; 192 *ret = vcpu;
193 193
194 return 0; 194 return 0;
195} 195}
196 196
197static void 197static void
198nvmm_vcpu_put(struct nvmm_cpu *vcpu) 198nvmm_vcpu_put(struct nvmm_cpu *vcpu)
199{ 199{
200 mutex_exit(&vcpu->lock); 200 mutex_exit(&vcpu->lock);
201} 201}
202 202
203/* -------------------------------------------------------------------------- */ 203/* -------------------------------------------------------------------------- */
204 204
205static void 205static void
206nvmm_kill_machines(struct nvmm_owner *owner) 206nvmm_kill_machines(struct nvmm_owner *owner)
207{ 207{
208 struct nvmm_machine *mach; 208 struct nvmm_machine *mach;
209 struct nvmm_cpu *vcpu; 209 struct nvmm_cpu *vcpu;
210 size_t i, j; 210 size_t i, j;
211 int error; 211 int error;
212 212
213 for (i = 0; i < NVMM_MAX_MACHINES; i++) { 213 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
214 mach = &machines[i]; 214 mach = &machines[i];
215 215
216 rw_enter(&mach->lock, RW_WRITER); 216 rw_enter(&mach->lock, RW_WRITER);
217 if (!mach->present || mach->owner != owner) { 217 if (!mach->present || mach->owner != owner) {
218 rw_exit(&mach->lock); 218 rw_exit(&mach->lock);
219 continue; 219 continue;
220 } 220 }
221 221
222 /* Kill it. */ 222 /* Kill it. */
223 for (j = 0; j < NVMM_MAX_VCPUS; j++) { 223 for (j = 0; j < NVMM_MAX_VCPUS; j++) {
224 error = nvmm_vcpu_get(mach, j, &vcpu); 224 error = nvmm_vcpu_get(mach, j, &vcpu);
225 if (error) 225 if (error)
226 continue; 226 continue;
227 (*nvmm_impl->vcpu_destroy)(mach, vcpu); 227 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
228 nvmm_vcpu_free(mach, vcpu); 228 nvmm_vcpu_free(mach, vcpu);
229 nvmm_vcpu_put(vcpu); 229 nvmm_vcpu_put(vcpu);
 230 atomic_dec_uint(&mach->ncpus);
230 } 231 }
231 (*nvmm_impl->machine_destroy)(mach); 232 (*nvmm_impl->machine_destroy)(mach);
232 uvmspace_free(mach->vm); 233 uvmspace_free(mach->vm);
233 234
234 /* Drop the kernel UOBJ refs. */ 235 /* Drop the kernel UOBJ refs. */
235 for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) { 236 for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) {
236 if (!mach->hmap[j].present) 237 if (!mach->hmap[j].present)
237 continue; 238 continue;
238 uao_detach(mach->hmap[j].uobj); 239 uao_detach(mach->hmap[j].uobj);
239 } 240 }
240 241
241 nvmm_machine_free(mach); 242 nvmm_machine_free(mach);
242 243
243 rw_exit(&mach->lock); 244 rw_exit(&mach->lock);
244 } 245 }
245} 246}
246 247
247/* -------------------------------------------------------------------------- */ 248/* -------------------------------------------------------------------------- */
248 249
249static int 250static int
250nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args) 251nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args)
251{ 252{
252 args->cap.version = NVMM_KERN_VERSION; 253 args->cap.version = NVMM_KERN_VERSION;
253 args->cap.state_size = nvmm_impl->state_size; 254 args->cap.state_size = nvmm_impl->state_size;
254 args->cap.max_machines = NVMM_MAX_MACHINES; 255 args->cap.max_machines = NVMM_MAX_MACHINES;
255 args->cap.max_vcpus = NVMM_MAX_VCPUS; 256 args->cap.max_vcpus = NVMM_MAX_VCPUS;
256 args->cap.max_ram = NVMM_MAX_RAM; 257 args->cap.max_ram = NVMM_MAX_RAM;
257 258
258 (*nvmm_impl->capability)(&args->cap); 259 (*nvmm_impl->capability)(&args->cap);
259 260
260 return 0; 261 return 0;
261} 262}
262 263
263static int 264static int
264nvmm_machine_create(struct nvmm_owner *owner, 265nvmm_machine_create(struct nvmm_owner *owner,
265 struct nvmm_ioc_machine_create *args) 266 struct nvmm_ioc_machine_create *args)
266{ 267{
267 struct nvmm_machine *mach; 268 struct nvmm_machine *mach;
268 int error; 269 int error;
269 270
270 error = nvmm_machine_alloc(&mach); 271 error = nvmm_machine_alloc(&mach);
271 if (error) 272 if (error)
272 return error; 273 return error;
273 274
274 /* Curproc owns the machine. */ 275 /* Curproc owns the machine. */
275 mach->owner = owner; 276 mach->owner = owner;
276 277
277 /* Zero out the host mappings. */ 278 /* Zero out the host mappings. */
278 memset(&mach->hmap, 0, sizeof(mach->hmap)); 279 memset(&mach->hmap, 0, sizeof(mach->hmap));
279 280
280 /* Create the machine vmspace. */ 281 /* Create the machine vmspace. */
281 mach->gpa_begin = 0; 282 mach->gpa_begin = 0;
282 mach->gpa_end = NVMM_MAX_RAM; 283 mach->gpa_end = NVMM_MAX_RAM;
283 mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false); 284 mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false);
284 285
285 /* Create the comm uobj. */ 286 /* Create the comm uobj. */
286 mach->commuobj = uao_create(NVMM_MAX_VCPUS * PAGE_SIZE, 0); 287 mach->commuobj = uao_create(NVMM_MAX_VCPUS * PAGE_SIZE, 0);
287 288
288 (*nvmm_impl->machine_create)(mach); 289 (*nvmm_impl->machine_create)(mach);
289 290
290 args->machid = mach->machid; 291 args->machid = mach->machid;
291 nvmm_machine_put(mach); 292 nvmm_machine_put(mach);
292 293
293 return 0; 294 return 0;
294} 295}
295 296
296static int 297static int
297nvmm_machine_destroy(struct nvmm_owner *owner, 298nvmm_machine_destroy(struct nvmm_owner *owner,
298 struct nvmm_ioc_machine_destroy *args) 299 struct nvmm_ioc_machine_destroy *args)
299{ 300{
300 struct nvmm_machine *mach; 301 struct nvmm_machine *mach;
301 struct nvmm_cpu *vcpu; 302 struct nvmm_cpu *vcpu;
302 int error; 303 int error;
303 size_t i; 304 size_t i;
304 305
305 error = nvmm_machine_get(owner, args->machid, &mach, true); 306 error = nvmm_machine_get(owner, args->machid, &mach, true);
306 if (error) 307 if (error)
307 return error; 308 return error;
308 309
309 for (i = 0; i < NVMM_MAX_VCPUS; i++) { 310 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
310 error = nvmm_vcpu_get(mach, i, &vcpu); 311 error = nvmm_vcpu_get(mach, i, &vcpu);
311 if (error) 312 if (error)
312 continue; 313 continue;
313 314
314 (*nvmm_impl->vcpu_destroy)(mach, vcpu); 315 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
315 nvmm_vcpu_free(mach, vcpu); 316 nvmm_vcpu_free(mach, vcpu);
316 nvmm_vcpu_put(vcpu); 317 nvmm_vcpu_put(vcpu);
 318 atomic_dec_uint(&mach->ncpus);
317 } 319 }
318 320
319 (*nvmm_impl->machine_destroy)(mach); 321 (*nvmm_impl->machine_destroy)(mach);
320 322
321 /* Free the machine vmspace. */ 323 /* Free the machine vmspace. */
322 uvmspace_free(mach->vm); 324 uvmspace_free(mach->vm);
323 325
324 /* Drop the kernel UOBJ refs. */ 326 /* Drop the kernel UOBJ refs. */
325 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 327 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
326 if (!mach->hmap[i].present) 328 if (!mach->hmap[i].present)
327 continue; 329 continue;
328 uao_detach(mach->hmap[i].uobj); 330 uao_detach(mach->hmap[i].uobj);
329 } 331 }
330 332
331 nvmm_machine_free(mach); 333 nvmm_machine_free(mach);
332 nvmm_machine_put(mach); 334 nvmm_machine_put(mach);
333 335
334 return 0; 336 return 0;
335} 337}
336 338
337static int 339static int
338nvmm_machine_configure(struct nvmm_owner *owner, 340nvmm_machine_configure(struct nvmm_owner *owner,
339 struct nvmm_ioc_machine_configure *args) 341 struct nvmm_ioc_machine_configure *args)
340{ 342{
341 struct nvmm_machine *mach; 343 struct nvmm_machine *mach;
342 size_t allocsz; 344 size_t allocsz;
343 uint64_t op; 345 uint64_t op;
344 void *data; 346 void *data;
345 int error; 347 int error;
346 348
347 op = NVMM_MACH_CONF_MD(args->op); 349 op = NVMM_MACH_CONF_MD(args->op);
348 if (__predict_false(op >= nvmm_impl->mach_conf_max)) { 350 if (__predict_false(op >= nvmm_impl->mach_conf_max)) {
349 return EINVAL; 351 return EINVAL;
350 } 352 }
351 353
352 allocsz = nvmm_impl->mach_conf_sizes[op]; 354 allocsz = nvmm_impl->mach_conf_sizes[op];
353 data = kmem_alloc(allocsz, KM_SLEEP); 355 data = kmem_alloc(allocsz, KM_SLEEP);
354 356
355 error = nvmm_machine_get(owner, args->machid, &mach, true); 357 error = nvmm_machine_get(owner, args->machid, &mach, true);
356 if (error) { 358 if (error) {
357 kmem_free(data, allocsz); 359 kmem_free(data, allocsz);
358 return error; 360 return error;
359 } 361 }
360 362
361 error = copyin(args->conf, data, allocsz); 363 error = copyin(args->conf, data, allocsz);
362 if (error) { 364 if (error) {
363 goto out; 365 goto out;
364 } 366 }
365 367
366 error = (*nvmm_impl->machine_configure)(mach, op, data); 368 error = (*nvmm_impl->machine_configure)(mach, op, data);
367 369
368out: 370out:
369 nvmm_machine_put(mach); 371 nvmm_machine_put(mach);
370 kmem_free(data, allocsz); 372 kmem_free(data, allocsz);
371 return error; 373 return error;
372} 374}
373 375
374static int 376static int
375nvmm_vcpu_create(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_create *args) 377nvmm_vcpu_create(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_create *args)
376{ 378{
377 struct nvmm_machine *mach; 379 struct nvmm_machine *mach;
378 struct nvmm_cpu *vcpu; 380 struct nvmm_cpu *vcpu;
379 int error; 381 int error;
380 382
381 error = nvmm_machine_get(owner, args->machid, &mach, false); 383 error = nvmm_machine_get(owner, args->machid, &mach, false);
382 if (error) 384 if (error)
383 return error; 385 return error;
384 386
385 error = nvmm_vcpu_alloc(mach, args->cpuid, &vcpu); 387 error = nvmm_vcpu_alloc(mach, args->cpuid, &vcpu);
386 if (error) 388 if (error)
387 goto out; 389 goto out;
388 390
389 /* Allocate the comm page. */ 391 /* Allocate the comm page. */
390 uao_reference(mach->commuobj); 392 uao_reference(mach->commuobj);
391 error = uvm_map(kernel_map, (vaddr_t *)&vcpu->comm, PAGE_SIZE, 393 error = uvm_map(kernel_map, (vaddr_t *)&vcpu->comm, PAGE_SIZE,
392 mach->commuobj, args->cpuid * PAGE_SIZE, 0, UVM_MAPFLAG(UVM_PROT_RW, 394 mach->commuobj, args->cpuid * PAGE_SIZE, 0, UVM_MAPFLAG(UVM_PROT_RW,
393 UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0)); 395 UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
394 if (error) { 396 if (error) {
395 uao_detach(mach->commuobj); 397 uao_detach(mach->commuobj);
396 nvmm_vcpu_free(mach, vcpu); 398 nvmm_vcpu_free(mach, vcpu);
397 nvmm_vcpu_put(vcpu); 399 nvmm_vcpu_put(vcpu);
398 goto out; 400 goto out;
399 } 401 }
400 error = uvm_map_pageable(kernel_map, (vaddr_t)vcpu->comm, 402 error = uvm_map_pageable(kernel_map, (vaddr_t)vcpu->comm,
401 (vaddr_t)vcpu->comm + PAGE_SIZE, false, 0); 403 (vaddr_t)vcpu->comm + PAGE_SIZE, false, 0);
402 if (error) { 404 if (error) {
403 nvmm_vcpu_free(mach, vcpu); 405 nvmm_vcpu_free(mach, vcpu);
404 nvmm_vcpu_put(vcpu); 406 nvmm_vcpu_put(vcpu);
405 goto out; 407 goto out;
406 } 408 }
407 memset(vcpu->comm, 0, PAGE_SIZE); 409 memset(vcpu->comm, 0, PAGE_SIZE);
408 410
409 error = (*nvmm_impl->vcpu_create)(mach, vcpu); 411 error = (*nvmm_impl->vcpu_create)(mach, vcpu);
410 if (error) { 412 if (error) {
411 nvmm_vcpu_free(mach, vcpu); 413 nvmm_vcpu_free(mach, vcpu);
412 nvmm_vcpu_put(vcpu); 414 nvmm_vcpu_put(vcpu);
413 goto out; 415 goto out;
414 } 416 }
415 417
416 nvmm_vcpu_put(vcpu); 418 nvmm_vcpu_put(vcpu);
417 
418 atomic_inc_uint(&mach->ncpus); 419 atomic_inc_uint(&mach->ncpus);
419 420
420out: 421out:
421 nvmm_machine_put(mach); 422 nvmm_machine_put(mach);
422 return error; 423 return error;
423} 424}
424 425
425static int 426static int
426nvmm_vcpu_destroy(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_destroy *args) 427nvmm_vcpu_destroy(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_destroy *args)
427{ 428{
428 struct nvmm_machine *mach; 429 struct nvmm_machine *mach;
429 struct nvmm_cpu *vcpu; 430 struct nvmm_cpu *vcpu;
430 int error; 431 int error;
431 432
432 error = nvmm_machine_get(owner, args->machid, &mach, false); 433 error = nvmm_machine_get(owner, args->machid, &mach, false);
433 if (error) 434 if (error)
434 return error; 435 return error;
435 436
436 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 437 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
437 if (error) 438 if (error)
438 goto out; 439 goto out;
439 440
440 (*nvmm_impl->vcpu_destroy)(mach, vcpu); 441 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
441 nvmm_vcpu_free(mach, vcpu); 442 nvmm_vcpu_free(mach, vcpu);
442 nvmm_vcpu_put(vcpu); 443 nvmm_vcpu_put(vcpu);
443 
444 atomic_dec_uint(&mach->ncpus); 444 atomic_dec_uint(&mach->ncpus);
445 445
446out: 446out:
447 nvmm_machine_put(mach); 447 nvmm_machine_put(mach);
448 return error; 448 return error;
449} 449}
450 450
451static int 451static int
452nvmm_vcpu_configure(struct nvmm_owner *owner, 452nvmm_vcpu_configure(struct nvmm_owner *owner,
453 struct nvmm_ioc_vcpu_configure *args) 453 struct nvmm_ioc_vcpu_configure *args)
454{ 454{
455 struct nvmm_machine *mach; 455 struct nvmm_machine *mach;
456 struct nvmm_cpu *vcpu; 456 struct nvmm_cpu *vcpu;
457 size_t allocsz; 457 size_t allocsz;
458 uint64_t op; 458 uint64_t op;
459 void *data; 459 void *data;
460 int error; 460 int error;
461 461
462 op = NVMM_VCPU_CONF_MD(args->op); 462 op = NVMM_VCPU_CONF_MD(args->op);
463 if (__predict_false(op >= nvmm_impl->vcpu_conf_max)) 463 if (__predict_false(op >= nvmm_impl->vcpu_conf_max))
464 return EINVAL; 464 return EINVAL;
465 465
466 allocsz = nvmm_impl->vcpu_conf_sizes[op]; 466 allocsz = nvmm_impl->vcpu_conf_sizes[op];
467 data = kmem_alloc(allocsz, KM_SLEEP); 467 data = kmem_alloc(allocsz, KM_SLEEP);
468 468
469 error = nvmm_machine_get(owner, args->machid, &mach, false); 469 error = nvmm_machine_get(owner, args->machid, &mach, false);
470 if (error) { 470 if (error) {
471 kmem_free(data, allocsz); 471 kmem_free(data, allocsz);
472 return error; 472 return error;
473 } 473 }
474 474
475 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 475 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
476 if (error) { 476 if (error) {
477 nvmm_machine_put(mach); 477 nvmm_machine_put(mach);
478 kmem_free(data, allocsz); 478 kmem_free(data, allocsz);
479 return error; 479 return error;
480 } 480 }
481 481
482 error = copyin(args->conf, data, allocsz); 482 error = copyin(args->conf, data, allocsz);
483 if (error) { 483 if (error) {
484 goto out; 484 goto out;
485 } 485 }
486 486
487 error = (*nvmm_impl->vcpu_configure)(vcpu, op, data); 487 error = (*nvmm_impl->vcpu_configure)(vcpu, op, data);
488 488
489out: 489out:
490 nvmm_vcpu_put(vcpu); 490 nvmm_vcpu_put(vcpu);
491 nvmm_machine_put(mach); 491 nvmm_machine_put(mach);
492 kmem_free(data, allocsz); 492 kmem_free(data, allocsz);
493 return error; 493 return error;
494} 494}
495 495
496static int 496static int
497nvmm_vcpu_setstate(struct nvmm_owner *owner, 497nvmm_vcpu_setstate(struct nvmm_owner *owner,
498 struct nvmm_ioc_vcpu_setstate *args) 498 struct nvmm_ioc_vcpu_setstate *args)
499{ 499{
500 struct nvmm_machine *mach; 500 struct nvmm_machine *mach;
501 struct nvmm_cpu *vcpu; 501 struct nvmm_cpu *vcpu;
502 int error; 502 int error;
503 503
504 error = nvmm_machine_get(owner, args->machid, &mach, false); 504 error = nvmm_machine_get(owner, args->machid, &mach, false);
505 if (error) 505 if (error)
506 return error; 506 return error;
507 507
508 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 508 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
509 if (error) 509 if (error)
510 goto out; 510 goto out;
511 511
512 (*nvmm_impl->vcpu_setstate)(vcpu); 512 (*nvmm_impl->vcpu_setstate)(vcpu);
513 nvmm_vcpu_put(vcpu); 513 nvmm_vcpu_put(vcpu);
514 514
515out: 515out:
516 nvmm_machine_put(mach); 516 nvmm_machine_put(mach);
517 return error; 517 return error;
518} 518}
519 519
520static int 520static int
521nvmm_vcpu_getstate(struct nvmm_owner *owner, 521nvmm_vcpu_getstate(struct nvmm_owner *owner,
522 struct nvmm_ioc_vcpu_getstate *args) 522 struct nvmm_ioc_vcpu_getstate *args)
523{ 523{
524 struct nvmm_machine *mach; 524 struct nvmm_machine *mach;
525 struct nvmm_cpu *vcpu; 525 struct nvmm_cpu *vcpu;
526 int error; 526 int error;
527 527
528 error = nvmm_machine_get(owner, args->machid, &mach, false); 528 error = nvmm_machine_get(owner, args->machid, &mach, false);
529 if (error) 529 if (error)
530 return error; 530 return error;
531 531
532 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 532 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
533 if (error) 533 if (error)
534 goto out; 534 goto out;
535 535
536 (*nvmm_impl->vcpu_getstate)(vcpu); 536 (*nvmm_impl->vcpu_getstate)(vcpu);
537 nvmm_vcpu_put(vcpu); 537 nvmm_vcpu_put(vcpu);
538 538
539out: 539out:
540 nvmm_machine_put(mach); 540 nvmm_machine_put(mach);
541 return error; 541 return error;
542} 542}
543 543
544static int 544static int
545nvmm_vcpu_inject(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_inject *args) 545nvmm_vcpu_inject(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_inject *args)
546{ 546{
547 struct nvmm_machine *mach; 547 struct nvmm_machine *mach;
548 struct nvmm_cpu *vcpu; 548 struct nvmm_cpu *vcpu;
549 int error; 549 int error;
550 550
551 error = nvmm_machine_get(owner, args->machid, &mach, false); 551 error = nvmm_machine_get(owner, args->machid, &mach, false);
552 if (error) 552 if (error)
553 return error; 553 return error;
554 554
555 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 555 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
556 if (error) 556 if (error)
557 goto out; 557 goto out;
558 558
559 error = (*nvmm_impl->vcpu_inject)(vcpu); 559 error = (*nvmm_impl->vcpu_inject)(vcpu);
560 nvmm_vcpu_put(vcpu); 560 nvmm_vcpu_put(vcpu);
561 561
562out: 562out:
563 nvmm_machine_put(mach); 563 nvmm_machine_put(mach);
564 return error; 564 return error;
565} 565}
566 566
567static int 567static int
568nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 568nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
569 struct nvmm_vcpu_exit *exit) 569 struct nvmm_vcpu_exit *exit)
570{ 570{
571 struct vmspace *vm = mach->vm; 571 struct vmspace *vm = mach->vm;
572 int ret; 572 int ret;
573 573
574 while (1) { 574 while (1) {
575 /* Got a signal? Or pending resched? Leave. */ 575 /* Got a signal? Or pending resched? Leave. */
576 if (__predict_false(nvmm_return_needed())) { 576 if (__predict_false(nvmm_return_needed())) {
577 exit->reason = NVMM_VCPU_EXIT_NONE; 577 exit->reason = NVMM_VCPU_EXIT_NONE;
578 return 0; 578 return 0;
579 } 579 }
580 580
581 /* Run the VCPU. */ 581 /* Run the VCPU. */
582 ret = (*nvmm_impl->vcpu_run)(mach, vcpu, exit); 582 ret = (*nvmm_impl->vcpu_run)(mach, vcpu, exit);
583 if (__predict_false(ret != 0)) { 583 if (__predict_false(ret != 0)) {
584 return ret; 584 return ret;
585 } 585 }
586 586
587 /* Process nested page faults. */ 587 /* Process nested page faults. */
588 if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) { 588 if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
589 break; 589 break;
590 } 590 }
591 if (exit->u.mem.gpa >= mach->gpa_end) { 591 if (exit->u.mem.gpa >= mach->gpa_end) {
592 break; 592 break;
593 } 593 }
594 if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, exit->u.mem.prot)) { 594 if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, exit->u.mem.prot)) {
595 break; 595 break;
596 } 596 }
597 } 597 }
598 598
599 return 0; 599 return 0;
600} 600}
601 601
602static int 602static int
603nvmm_vcpu_run(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_run *args) 603nvmm_vcpu_run(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_run *args)
604{ 604{
605 struct nvmm_machine *mach; 605 struct nvmm_machine *mach;
606 struct nvmm_cpu *vcpu; 606 struct nvmm_cpu *vcpu;
607 int error; 607 int error;
608 608
609 error = nvmm_machine_get(owner, args->machid, &mach, false); 609 error = nvmm_machine_get(owner, args->machid, &mach, false);
610 if (error) 610 if (error)
611 return error; 611 return error;
612 612
613 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu); 613 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
614 if (error) 614 if (error)
615 goto out; 615 goto out;
616 616
617 error = nvmm_do_vcpu_run(mach, vcpu, &args->exit); 617 error = nvmm_do_vcpu_run(mach, vcpu, &args->exit);
618 nvmm_vcpu_put(vcpu); 618 nvmm_vcpu_put(vcpu);
619 619
620out: 620out:
621 nvmm_machine_put(mach); 621 nvmm_machine_put(mach);
622 return error; 622 return error;
623} 623}
624 624
625/* -------------------------------------------------------------------------- */ 625/* -------------------------------------------------------------------------- */
626 626
627static struct uvm_object * 627static struct uvm_object *
628nvmm_hmapping_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size, 628nvmm_hmapping_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size,
629 size_t *off) 629 size_t *off)
630{ 630{
631 struct nvmm_hmapping *hmapping; 631 struct nvmm_hmapping *hmapping;
632 size_t i; 632 size_t i;
633 633
634 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 634 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
635 hmapping = &mach->hmap[i]; 635 hmapping = &mach->hmap[i];
636 if (!hmapping->present) { 636 if (!hmapping->present) {
637 continue; 637 continue;
638 } 638 }
639 if (hva >= hmapping->hva && 639 if (hva >= hmapping->hva &&
640 hva + size <= hmapping->hva + hmapping->size) { 640 hva + size <= hmapping->hva + hmapping->size) {
641 *off = hva - hmapping->hva; 641 *off = hva - hmapping->hva;
642 return hmapping->uobj; 642 return hmapping->uobj;
643 } 643 }
644 } 644 }
645 645
646 return NULL; 646 return NULL;
647} 647}
648 648
649static int 649static int
650nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size) 650nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size)
651{ 651{
652 struct nvmm_hmapping *hmapping; 652 struct nvmm_hmapping *hmapping;
653 size_t i; 653 size_t i;
654 654
655 if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) { 655 if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) {
656 return EINVAL; 656 return EINVAL;
657 } 657 }
658 if (hva == 0) { 658 if (hva == 0) {
659 return EINVAL; 659 return EINVAL;
660 } 660 }
661 661
662 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 662 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
663 hmapping = &mach->hmap[i]; 663 hmapping = &mach->hmap[i];
664 if (!hmapping->present) { 664 if (!hmapping->present) {
665 continue; 665 continue;
666 } 666 }
667 667
668 if (hva >= hmapping->hva && 668 if (hva >= hmapping->hva &&
669 hva + size <= hmapping->hva + hmapping->size) { 669 hva + size <= hmapping->hva + hmapping->size) {
670 break; 670 break;
671 } 671 }
672 672
673 if (hva >= hmapping->hva && 673 if (hva >= hmapping->hva &&
674 hva < hmapping->hva + hmapping->size) { 674 hva < hmapping->hva + hmapping->size) {
675 return EEXIST; 675 return EEXIST;
676 } 676 }
677 if (hva + size > hmapping->hva && 677 if (hva + size > hmapping->hva &&
678 hva + size <= hmapping->hva + hmapping->size) { 678 hva + size <= hmapping->hva + hmapping->size) {
679 return EEXIST; 679 return EEXIST;
680 } 680 }
681 if (hva <= hmapping->hva && 681 if (hva <= hmapping->hva &&
682 hva + size >= hmapping->hva + hmapping->size) { 682 hva + size >= hmapping->hva + hmapping->size) {
683 return EEXIST; 683 return EEXIST;
684 } 684 }
685 } 685 }
686 686
687 return 0; 687 return 0;
688} 688}
689 689
690static struct nvmm_hmapping * 690static struct nvmm_hmapping *
691nvmm_hmapping_alloc(struct nvmm_machine *mach) 691nvmm_hmapping_alloc(struct nvmm_machine *mach)
692{ 692{
693 struct nvmm_hmapping *hmapping; 693 struct nvmm_hmapping *hmapping;
694 size_t i; 694 size_t i;
695 695
696 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 696 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
697 hmapping = &mach->hmap[i]; 697 hmapping = &mach->hmap[i];
698 if (!hmapping->present) { 698 if (!hmapping->present) {
699 hmapping->present = true; 699 hmapping->present = true;
700 return hmapping; 700 return hmapping;
701 } 701 }
702 } 702 }
703 703
704 return NULL; 704 return NULL;
705} 705}
706 706
707static int 707static int
708nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size) 708nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size)
709{ 709{
710 struct vmspace *vmspace = curproc->p_vmspace; 710 struct vmspace *vmspace = curproc->p_vmspace;
711 struct nvmm_hmapping *hmapping; 711 struct nvmm_hmapping *hmapping;
712 size_t i; 712 size_t i;
713 713
714 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 714 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
715 hmapping = &mach->hmap[i]; 715 hmapping = &mach->hmap[i];
716 if (!hmapping->present || hmapping->hva != hva || 716 if (!hmapping->present || hmapping->hva != hva ||
717 hmapping->size != size) { 717 hmapping->size != size) {
718 continue; 718 continue;
719 } 719 }
720 720
721 uvm_unmap(&vmspace->vm_map, hmapping->hva, 721 uvm_unmap(&vmspace->vm_map, hmapping->hva,
722 hmapping->hva + hmapping->size); 722 hmapping->hva + hmapping->size);
723 uao_detach(hmapping->uobj); 723 uao_detach(hmapping->uobj);
724 724
725 hmapping->uobj = NULL; 725 hmapping->uobj = NULL;
726 hmapping->present = false; 726 hmapping->present = false;
727 727
728 return 0; 728 return 0;
729 } 729 }
730 730
731 return ENOENT; 731 return ENOENT;
732} 732}
733 733
734static int 734static int
735nvmm_hva_map(struct nvmm_owner *owner, struct nvmm_ioc_hva_map *args) 735nvmm_hva_map(struct nvmm_owner *owner, struct nvmm_ioc_hva_map *args)
736{ 736{
737 struct vmspace *vmspace = curproc->p_vmspace; 737 struct vmspace *vmspace = curproc->p_vmspace;
738 struct nvmm_machine *mach; 738 struct nvmm_machine *mach;
739 struct nvmm_hmapping *hmapping; 739 struct nvmm_hmapping *hmapping;
740 vaddr_t uva; 740 vaddr_t uva;
741 int error; 741 int error;
742 742
743 error = nvmm_machine_get(owner, args->machid, &mach, true); 743 error = nvmm_machine_get(owner, args->machid, &mach, true);
744 if (error) 744 if (error)
745 return error; 745 return error;
746 746
747 error = nvmm_hmapping_validate(mach, args->hva, args->size); 747 error = nvmm_hmapping_validate(mach, args->hva, args->size);
748 if (error) 748 if (error)
749 goto out; 749 goto out;
750 750
751 hmapping = nvmm_hmapping_alloc(mach); 751 hmapping = nvmm_hmapping_alloc(mach);
752 if (hmapping == NULL) { 752 if (hmapping == NULL) {
753 error = ENOBUFS; 753 error = ENOBUFS;
754 goto out; 754 goto out;
755 } 755 }
756 756
757 hmapping->hva = args->hva; 757 hmapping->hva = args->hva;
758 hmapping->size = args->size; 758 hmapping->size = args->size;
759 hmapping->uobj = uao_create(hmapping->size, 0); 759 hmapping->uobj = uao_create(hmapping->size, 0);
760 uva = hmapping->hva; 760 uva = hmapping->hva;
761 761
762 /* Take a reference for the user. */ 762 /* Take a reference for the user. */
763 uao_reference(hmapping->uobj); 763 uao_reference(hmapping->uobj);
764 764
765 /* Map the uobj into the user address space, as pageable. */ 765 /* Map the uobj into the user address space, as pageable. */
766 error = uvm_map(&vmspace->vm_map, &uva, hmapping->size, hmapping->uobj, 766 error = uvm_map(&vmspace->vm_map, &uva, hmapping->size, hmapping->uobj,
767 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE, 767 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
768 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP)); 768 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
769 if (error) { 769 if (error) {
770 uao_detach(hmapping->uobj); 770 uao_detach(hmapping->uobj);
771 } 771 }
772 772
773out: 773out:
774 nvmm_machine_put(mach); 774 nvmm_machine_put(mach);
775 return error; 775 return error;
776} 776}
777 777
778static int 778static int
779nvmm_hva_unmap(struct nvmm_owner *owner, struct nvmm_ioc_hva_unmap *args) 779nvmm_hva_unmap(struct nvmm_owner *owner, struct nvmm_ioc_hva_unmap *args)
780{ 780{
781 struct nvmm_machine *mach; 781 struct nvmm_machine *mach;
782 int error; 782 int error;
783 783
784 error = nvmm_machine_get(owner, args->machid, &mach, true); 784 error = nvmm_machine_get(owner, args->machid, &mach, true);
785 if (error) 785 if (error)
786 return error; 786 return error;
787 787
788 error = nvmm_hmapping_free(mach, args->hva, args->size); 788 error = nvmm_hmapping_free(mach, args->hva, args->size);
789 789
790 nvmm_machine_put(mach); 790 nvmm_machine_put(mach);
791 return error; 791 return error;
792} 792}
793 793
794/* -------------------------------------------------------------------------- */ 794/* -------------------------------------------------------------------------- */
795 795
796static int 796static int
797nvmm_gpa_map(struct nvmm_owner *owner, struct nvmm_ioc_gpa_map *args) 797nvmm_gpa_map(struct nvmm_owner *owner, struct nvmm_ioc_gpa_map *args)
798{ 798{
799 struct nvmm_machine *mach; 799 struct nvmm_machine *mach;
800 struct uvm_object *uobj; 800 struct uvm_object *uobj;
801 gpaddr_t gpa; 801 gpaddr_t gpa;
802 size_t off; 802 size_t off;
803 int error; 803 int error;
804 804
805 error = nvmm_machine_get(owner, args->machid, &mach, false); 805 error = nvmm_machine_get(owner, args->machid, &mach, false);
806 if (error) 806 if (error)
807 return error; 807 return error;
808 808
809 if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) { 809 if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) {
810 error = EINVAL; 810 error = EINVAL;
811 goto out; 811 goto out;
812 } 812 }
813 813
814 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 || 814 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 ||
815 (args->hva % PAGE_SIZE) != 0) { 815 (args->hva % PAGE_SIZE) != 0) {
816 error = EINVAL; 816 error = EINVAL;
817 goto out; 817 goto out;
818 } 818 }
819 if (args->hva == 0) { 819 if (args->hva == 0) {
820 error = EINVAL; 820 error = EINVAL;
821 goto out; 821 goto out;
822 } 822 }
823 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) { 823 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
824 error = EINVAL; 824 error = EINVAL;
825 goto out; 825 goto out;
826 } 826 }
827 if (args->gpa + args->size <= args->gpa) { 827 if (args->gpa + args->size <= args->gpa) {
828 error = EINVAL; 828 error = EINVAL;
829 goto out; 829 goto out;
830 } 830 }
831 if (args->gpa + args->size > mach->gpa_end) { 831 if (args->gpa + args->size > mach->gpa_end) {
832 error = EINVAL; 832 error = EINVAL;
833 goto out; 833 goto out;
834 } 834 }
835 gpa = args->gpa; 835 gpa = args->gpa;
836 836
837 uobj = nvmm_hmapping_getuobj(mach, args->hva, args->size, &off); 837 uobj = nvmm_hmapping_getuobj(mach, args->hva, args->size, &off);
838 if (uobj == NULL) { 838 if (uobj == NULL) {
839 error = EINVAL; 839 error = EINVAL;
840 goto out; 840 goto out;
841 } 841 }
842 842
843 /* Take a reference for the machine. */ 843 /* Take a reference for the machine. */
844 uao_reference(uobj); 844 uao_reference(uobj);
845 845
846 /* Map the uobj into the machine address space, as pageable. */ 846 /* Map the uobj into the machine address space, as pageable. */
847 error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0, 847 error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0,
848 UVM_MAPFLAG(args->prot, UVM_PROT_RWX, UVM_INH_NONE, 848 UVM_MAPFLAG(args->prot, UVM_PROT_RWX, UVM_INH_NONE,
849 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP)); 849 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
850 if (error) { 850 if (error) {
851 uao_detach(uobj); 851 uao_detach(uobj);
852 goto out; 852 goto out;
853 } 853 }
854 if (gpa != args->gpa) { 854 if (gpa != args->gpa) {
855 uao_detach(uobj); 855 uao_detach(uobj);
856 printf("[!] uvm_map problem\n"); 856 printf("[!] uvm_map problem\n");
857 error = EINVAL; 857 error = EINVAL;
858 goto out; 858 goto out;
859 } 859 }
860 860
861out: 861out:
862 nvmm_machine_put(mach); 862 nvmm_machine_put(mach);
863 return error; 863 return error;
864} 864}
865 865
866static int 866static int
867nvmm_gpa_unmap(struct nvmm_owner *owner, struct nvmm_ioc_gpa_unmap *args) 867nvmm_gpa_unmap(struct nvmm_owner *owner, struct nvmm_ioc_gpa_unmap *args)
868{ 868{
869 struct nvmm_machine *mach; 869 struct nvmm_machine *mach;
870 gpaddr_t gpa; 870 gpaddr_t gpa;
871 int error; 871 int error;
872 872
873 error = nvmm_machine_get(owner, args->machid, &mach, false); 873 error = nvmm_machine_get(owner, args->machid, &mach, false);
874 if (error) 874 if (error)
875 return error; 875 return error;
876 876
877 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) { 877 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) {
878 error = EINVAL; 878 error = EINVAL;
879 goto out; 879 goto out;
880 } 880 }
881 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) { 881 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
882 error = EINVAL; 882 error = EINVAL;
883 goto out; 883 goto out;
884 } 884 }
885 if (args->gpa + args->size <= args->gpa) { 885 if (args->gpa + args->size <= args->gpa) {
886 error = EINVAL; 886 error = EINVAL;
887 goto out; 887 goto out;
888 } 888 }
889 if (args->gpa + args->size >= mach->gpa_end) { 889 if (args->gpa + args->size >= mach->gpa_end) {
890 error = EINVAL; 890 error = EINVAL;
891 goto out; 891 goto out;
892 } 892 }
893 gpa = args->gpa; 893 gpa = args->gpa;
894 894
895 /* Unmap the memory from the machine. */ 895 /* Unmap the memory from the machine. */
896 uvm_unmap(&mach->vm->vm_map, gpa, gpa + args->size); 896 uvm_unmap(&mach->vm->vm_map, gpa, gpa + args->size);
897 897
898out: 898out:
899 nvmm_machine_put(mach); 899 nvmm_machine_put(mach);
900 return error; 900 return error;
901} 901}
902 902
903/* -------------------------------------------------------------------------- */ 903/* -------------------------------------------------------------------------- */
904 904
905static int 905static int
906nvmm_ctl_mach_info(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args) 906nvmm_ctl_mach_info(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
907{ 907{
908 struct nvmm_ctl_mach_info ctl; 908 struct nvmm_ctl_mach_info ctl;
909 struct nvmm_machine *mach; 909 struct nvmm_machine *mach;
910 struct nvmm_cpu *vcpu; 
911 int error; 910 int error;
912 size_t i; 911 size_t i;
913 912
914 if (args->size != sizeof(ctl)) 913 if (args->size != sizeof(ctl))
915 return EINVAL; 914 return EINVAL;
916 error = copyin(args->data, &ctl, sizeof(ctl)); 915 error = copyin(args->data, &ctl, sizeof(ctl));
917 if (error) 916 if (error)
918 return error; 917 return error;
919 918
920 error = nvmm_machine_get(owner, ctl.machid, &mach, true); 919 error = nvmm_machine_get(owner, ctl.machid, &mach, true);
921 if (error) 920 if (error)
922 return error; 921 return error;
923 922
924 ctl.nvcpus = 0; 923 ctl.nvcpus = mach->ncpus;
925 for (i = 0; i < NVMM_MAX_VCPUS; i++) { 
926 error = nvmm_vcpu_get(mach, i, &vcpu); 
927 if (error) 
928 continue; 
929 ctl.nvcpus++; 
930 nvmm_vcpu_put(vcpu); 
931 } 
932 924
933 ctl.nram = 0; 925 ctl.nram = 0;
934 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) { 926 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
935 if (!mach->hmap[i].present) 927 if (!mach->hmap[i].present)
936 continue; 928 continue;
937 ctl.nram += mach->hmap[i].size; 929 ctl.nram += mach->hmap[i].size;
938 } 930 }
939 931
940 ctl.pid = mach->owner->pid; 932 ctl.pid = mach->owner->pid;
941 ctl.time = mach->time; 933 ctl.time = mach->time;
942 934
943 nvmm_machine_put(mach); 935 nvmm_machine_put(mach);
944 936
945 error = copyout(&ctl, args->data, sizeof(ctl)); 937 error = copyout(&ctl, args->data, sizeof(ctl));
946 if (error) 938 if (error)
947 return error; 939 return error;
948 940
949 return 0; 941 return 0;
950} 942}
951 943
952static int 944static int
953nvmm_ctl(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args) 945nvmm_ctl(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
954{ 946{
955 switch (args->op) { 947 switch (args->op) {
956 case NVMM_CTL_MACH_INFO: 948 case NVMM_CTL_MACH_INFO:
957 return nvmm_ctl_mach_info(owner, args); 949 return nvmm_ctl_mach_info(owner, args);
958 default: 950 default:
959 return EINVAL; 951 return EINVAL;
960 } 952 }
961} 953}
962 954
963/* -------------------------------------------------------------------------- */ 955/* -------------------------------------------------------------------------- */
964 956
965static const struct nvmm_impl * 957static const struct nvmm_impl *
966nvmm_ident(void) 958nvmm_ident(void)
967{ 959{
968 size_t i; 960 size_t i;
969 961
970 for (i = 0; i < __arraycount(nvmm_impl_list); i++) { 962 for (i = 0; i < __arraycount(nvmm_impl_list); i++) {
971 if ((*nvmm_impl_list[i]->ident)()) 963 if ((*nvmm_impl_list[i]->ident)())
972 return nvmm_impl_list[i]; 964 return nvmm_impl_list[i];
973 } 965 }
974 966
975 return NULL; 967 return NULL;
976} 968}
977 969
978static int 970static int
979nvmm_init(void) 971nvmm_init(void)
980{ 972{
981 size_t i, n; 973 size_t i, n;
982 974
983 nvmm_impl = nvmm_ident(); 975 nvmm_impl = nvmm_ident();
984 if (nvmm_impl == NULL) 976 if (nvmm_impl == NULL)
985 return ENOTSUP; 977 return ENOTSUP;
986 978
987 for (i = 0; i < NVMM_MAX_MACHINES; i++) { 979 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
988 machines[i].machid = i; 980 machines[i].machid = i;
989 rw_init(&machines[i].lock); 981 rw_init(&machines[i].lock);
990 for (n = 0; n < NVMM_MAX_VCPUS; n++) { 982 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
991 machines[i].cpus[n].present = false; 983 machines[i].cpus[n].present = false;
992 machines[i].cpus[n].cpuid = n; 984 machines[i].cpus[n].cpuid = n;
993 mutex_init(&machines[i].cpus[n].lock, MUTEX_DEFAULT, 985 mutex_init(&machines[i].cpus[n].lock, MUTEX_DEFAULT,
994 IPL_NONE); 986 IPL_NONE);
995 } 987 }
996 } 988 }
997 989
998 (*nvmm_impl->init)(); 990 (*nvmm_impl->init)();
999 991
1000 return 0; 992 return 0;
1001} 993}
1002 994
1003static void 995static void
1004nvmm_fini(void) 996nvmm_fini(void)
1005{ 997{
1006 size_t i, n; 998 size_t i, n;
1007 999
1008 for (i = 0; i < NVMM_MAX_MACHINES; i++) { 1000 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
1009 rw_destroy(&machines[i].lock); 1001 rw_destroy(&machines[i].lock);
1010 for (n = 0; n < NVMM_MAX_VCPUS; n++) { 1002 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
1011 mutex_destroy(&machines[i].cpus[n].lock); 1003 mutex_destroy(&machines[i].cpus[n].lock);
1012 } 1004 }
1013 } 1005 }
1014 1006
1015 (*nvmm_impl->fini)(); 1007 (*nvmm_impl->fini)();
1016 nvmm_impl = NULL; 1008 nvmm_impl = NULL;
1017} 1009}
1018 1010
1019/* -------------------------------------------------------------------------- */ 1011/* -------------------------------------------------------------------------- */
1020 1012
1021static dev_type_open(nvmm_open); 1013static dev_type_open(nvmm_open);
1022 1014
1023const struct cdevsw nvmm_cdevsw = { 1015const struct cdevsw nvmm_cdevsw = {
1024 .d_open = nvmm_open, 1016 .d_open = nvmm_open,
1025 .d_close = noclose, 1017 .d_close = noclose,
1026 .d_read = noread, 1018 .d_read = noread,
1027 .d_write = nowrite, 1019 .d_write = nowrite,
1028 .d_ioctl = noioctl, 1020 .d_ioctl = noioctl,
1029 .d_stop = nostop, 1021 .d_stop = nostop,
1030 .d_tty = notty, 1022 .d_tty = notty,
1031 .d_poll = nopoll, 1023 .d_poll = nopoll,
1032 .d_mmap = nommap, 1024 .d_mmap = nommap,
1033 .d_kqfilter = nokqfilter, 1025 .d_kqfilter = nokqfilter,
1034 .d_discard = nodiscard, 1026 .d_discard = nodiscard,
1035 .d_flag = D_OTHER | D_MPSAFE 1027 .d_flag = D_OTHER | D_MPSAFE
1036}; 1028};
1037 1029
1038static int nvmm_ioctl(file_t *, u_long, void *); 1030static int nvmm_ioctl(file_t *, u_long, void *);
1039static int nvmm_close(file_t *); 1031static int nvmm_close(file_t *);
1040static int nvmm_mmap(file_t *, off_t *, size_t, int, int *, int *, 1032static int nvmm_mmap(file_t *, off_t *, size_t, int, int *, int *,
1041 struct uvm_object **, int *); 1033 struct uvm_object **, int *);
1042 1034
1043static const struct fileops nvmm_fileops = { 1035static const struct fileops nvmm_fileops = {
1044 .fo_read = fbadop_read, 1036 .fo_read = fbadop_read,
1045 .fo_write = fbadop_write, 1037 .fo_write = fbadop_write,
1046 .fo_ioctl = nvmm_ioctl, 1038 .fo_ioctl = nvmm_ioctl,
1047 .fo_fcntl = fnullop_fcntl, 1039 .fo_fcntl = fnullop_fcntl,
1048 .fo_poll = fnullop_poll, 1040 .fo_poll = fnullop_poll,
1049 .fo_stat = fbadop_stat, 1041 .fo_stat = fbadop_stat,
1050 .fo_close = nvmm_close, 1042 .fo_close = nvmm_close,
1051 .fo_kqfilter = fnullop_kqfilter, 1043 .fo_kqfilter = fnullop_kqfilter,
1052 .fo_restart = fnullop_restart, 1044 .fo_restart = fnullop_restart,
1053 .fo_mmap = nvmm_mmap, 1045 .fo_mmap = nvmm_mmap,
1054}; 1046};
1055 1047
1056static int 1048static int
1057nvmm_open(dev_t dev, int flags, int type, struct lwp *l) 1049nvmm_open(dev_t dev, int flags, int type, struct lwp *l)
1058{ 1050{
1059 struct nvmm_owner *owner; 1051 struct nvmm_owner *owner;
1060 struct file *fp; 1052 struct file *fp;
1061 int error, fd; 1053 int error, fd;
1062 1054
1063 if (__predict_false(nvmm_impl == NULL)) 1055 if (__predict_false(nvmm_impl == NULL))
1064 return ENXIO; 1056 return ENXIO;
1065 if (minor(dev) != 0) 1057 if (minor(dev) != 0)
1066 return EXDEV; 1058 return EXDEV;
1067 if (!(flags & O_CLOEXEC)) 1059 if (!(flags & O_CLOEXEC))
1068 return EINVAL; 1060 return EINVAL;
1069 error = fd_allocfile(&fp, &fd); 1061 error = fd_allocfile(&fp, &fd);
1070 if (error) 1062 if (error)
1071 return error; 1063 return error;
1072 1064
1073 if (OFLAGS(flags) & O_WRONLY) { 1065 if (OFLAGS(flags) & O_WRONLY) {
1074 owner = &root_owner; 1066 owner = &root_owner;
1075 } else { 1067 } else {
1076 owner = kmem_alloc(sizeof(*owner), KM_SLEEP); 1068 owner = kmem_alloc(sizeof(*owner), KM_SLEEP);
1077 owner->pid = l->l_proc->p_pid; 1069 owner->pid = l->l_proc->p_pid;
1078 } 1070 }
1079 1071
1080 return fd_clone(fp, fd, flags, &nvmm_fileops, owner); 1072 return fd_clone(fp, fd, flags, &nvmm_fileops, owner);
1081} 1073}
1082 1074
1083static int 1075static int
1084nvmm_close(file_t *fp) 1076nvmm_close(file_t *fp)
1085{ 1077{
1086 struct nvmm_owner *owner = fp->f_data; 1078 struct nvmm_owner *owner = fp->f_data;
1087 1079
1088 KASSERT(owner != NULL); 1080 KASSERT(owner != NULL);
1089 nvmm_kill_machines(owner); 1081 nvmm_kill_machines(owner);
1090 if (owner != &root_owner) { 1082 if (owner != &root_owner) {
1091 kmem_free(owner, sizeof(*owner)); 1083 kmem_free(owner, sizeof(*owner));
1092 } 1084 }
1093 fp->f_data = NULL; 1085 fp->f_data = NULL;
1094 1086
1095 return 0; 1087 return 0;
1096} 1088}
1097 1089
1098static int 1090static int
1099nvmm_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp, 1091nvmm_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
1100 int *advicep, struct uvm_object **uobjp, int *maxprotp) 1092 int *advicep, struct uvm_object **uobjp, int *maxprotp)
1101{ 1093{
1102 struct nvmm_owner *owner = fp->f_data; 1094 struct nvmm_owner *owner = fp->f_data;
1103 struct nvmm_machine *mach; 1095 struct nvmm_machine *mach;
1104 nvmm_machid_t machid; 1096 nvmm_machid_t machid;
1105 nvmm_cpuid_t cpuid; 1097 nvmm_cpuid_t cpuid;
1106 int error; 1098 int error;
1107 1099
1108 if (prot & PROT_EXEC) 1100 if (prot & PROT_EXEC)
1109 return EACCES; 1101 return EACCES;
1110 if (size != PAGE_SIZE) 1102 if (size != PAGE_SIZE)
1111 return EINVAL; 1103 return EINVAL;
1112 1104
1113 cpuid = NVMM_COMM_CPUID(*offp); 1105 cpuid = NVMM_COMM_CPUID(*offp);
1114 if (__predict_false(cpuid >= NVMM_MAX_VCPUS)) 1106 if (__predict_false(cpuid >= NVMM_MAX_VCPUS))
1115 return EINVAL; 1107 return EINVAL;
1116 1108
1117 machid = NVMM_COMM_MACHID(*offp); 1109 machid = NVMM_COMM_MACHID(*offp);
1118 error = nvmm_machine_get(owner, machid, &mach, false); 1110 error = nvmm_machine_get(owner, machid, &mach, false);
1119 if (error) 1111 if (error)
1120 return error; 1112 return error;
1121 1113
1122 uao_reference(mach->commuobj); 1114 uao_reference(mach->commuobj);
1123 *uobjp = mach->commuobj; 1115 *uobjp = mach->commuobj;
1124 *offp = cpuid * PAGE_SIZE; 1116 *offp = cpuid * PAGE_SIZE;
1125 *maxprotp = prot; 1117 *maxprotp = prot;
1126 *advicep = UVM_ADV_RANDOM; 1118 *advicep = UVM_ADV_RANDOM;
1127 1119
1128 nvmm_machine_put(mach); 1120 nvmm_machine_put(mach);
1129 return 0; 1121 return 0;
1130} 1122}
1131 1123
1132static int 1124static int
1133nvmm_ioctl(file_t *fp, u_long cmd, void *data) 1125nvmm_ioctl(file_t *fp, u_long cmd, void *data)
1134{ 1126{
1135 struct nvmm_owner *owner = fp->f_data; 1127 struct nvmm_owner *owner = fp->f_data;
1136 1128
1137 KASSERT(owner != NULL); 1129 KASSERT(owner != NULL);
1138 1130
1139 switch (cmd) { 1131 switch (cmd) {
1140 case NVMM_IOC_CAPABILITY: 1132 case NVMM_IOC_CAPABILITY:
1141 return nvmm_capability(owner, data); 1133 return nvmm_capability(owner, data);
1142 case NVMM_IOC_MACHINE_CREATE: 1134 case NVMM_IOC_MACHINE_CREATE:
1143 return nvmm_machine_create(owner, data); 1135 return nvmm_machine_create(owner, data);
1144 case NVMM_IOC_MACHINE_DESTROY: 1136 case NVMM_IOC_MACHINE_DESTROY:
1145 return nvmm_machine_destroy(owner, data); 1137 return nvmm_machine_destroy(owner, data);
1146 case NVMM_IOC_MACHINE_CONFIGURE: 1138 case NVMM_IOC_MACHINE_CONFIGURE:
1147 return nvmm_machine_configure(owner, data); 1139 return nvmm_machine_configure(owner, data);
1148 case NVMM_IOC_VCPU_CREATE: 1140 case NVMM_IOC_VCPU_CREATE:
1149 return nvmm_vcpu_create(owner, data); 1141 return nvmm_vcpu_create(owner, data);
1150 case NVMM_IOC_VCPU_DESTROY: 1142 case NVMM_IOC_VCPU_DESTROY:
1151 return nvmm_vcpu_destroy(owner, data); 1143 return nvmm_vcpu_destroy(owner, data);
1152 case NVMM_IOC_VCPU_CONFIGURE: 1144 case NVMM_IOC_VCPU_CONFIGURE:
1153 return nvmm_vcpu_configure(owner, data); 1145 return nvmm_vcpu_configure(owner, data);
1154 case NVMM_IOC_VCPU_SETSTATE: 1146 case NVMM_IOC_VCPU_SETSTATE:
1155 return nvmm_vcpu_setstate(owner, data); 1147 return nvmm_vcpu_setstate(owner, data);
1156 case NVMM_IOC_VCPU_GETSTATE: 1148 case NVMM_IOC_VCPU_GETSTATE:
1157 return nvmm_vcpu_getstate(owner, data); 1149 return nvmm_vcpu_getstate(owner, data);
1158 case NVMM_IOC_VCPU_INJECT: 1150 case NVMM_IOC_VCPU_INJECT:
1159 return nvmm_vcpu_inject(owner, data); 1151 return nvmm_vcpu_inject(owner, data);
1160 case NVMM_IOC_VCPU_RUN: 1152 case NVMM_IOC_VCPU_RUN:
1161 return nvmm_vcpu_run(owner, data); 1153 return nvmm_vcpu_run(owner, data);
1162 case NVMM_IOC_GPA_MAP: 1154 case NVMM_IOC_GPA_MAP:
1163 return nvmm_gpa_map(owner, data); 1155 return nvmm_gpa_map(owner, data);
1164 case NVMM_IOC_GPA_UNMAP: 1156 case NVMM_IOC_GPA_UNMAP:
1165 return nvmm_gpa_unmap(owner, data); 1157 return nvmm_gpa_unmap(owner, data);
1166 case NVMM_IOC_HVA_MAP: 1158 case NVMM_IOC_HVA_MAP:
1167 return nvmm_hva_map(owner, data); 1159 return nvmm_hva_map(owner, data);
1168 case NVMM_IOC_HVA_UNMAP: 1160 case NVMM_IOC_HVA_UNMAP:
1169 return nvmm_hva_unmap(owner, data); 1161 return nvmm_hva_unmap(owner, data);
1170 case NVMM_IOC_CTL: 1162 case NVMM_IOC_CTL:
1171 return nvmm_ctl(owner, data); 1163 return nvmm_ctl(owner, data);
1172 default: 1164 default:
1173 return EINVAL; 1165 return EINVAL;
1174 } 1166 }
1175} 1167}
1176 1168
1177/* -------------------------------------------------------------------------- */ 1169/* -------------------------------------------------------------------------- */
1178 1170
1179static int nvmm_match(device_t, cfdata_t, void *); 1171static int nvmm_match(device_t, cfdata_t, void *);
1180static void nvmm_attach(device_t, device_t, void *); 1172static void nvmm_attach(device_t, device_t, void *);
1181static int nvmm_detach(device_t, int); 1173static int nvmm_detach(device_t, int);
1182 1174
1183extern struct cfdriver nvmm_cd; 1175extern struct cfdriver nvmm_cd;
1184 1176
1185CFATTACH_DECL_NEW(nvmm, 0, nvmm_match, nvmm_attach, nvmm_detach, NULL); 1177CFATTACH_DECL_NEW(nvmm, 0, nvmm_match, nvmm_attach, nvmm_detach, NULL);
1186 1178
1187static struct cfdata nvmm_cfdata[] = { 1179static struct cfdata nvmm_cfdata[] = {
1188 { 1180 {
1189 .cf_name = "nvmm", 1181 .cf_name = "nvmm",
1190 .cf_atname = "nvmm", 1182 .cf_atname = "nvmm",
1191 .cf_unit = 0, 1183 .cf_unit = 0,
1192 .cf_fstate = FSTATE_STAR, 1184 .cf_fstate = FSTATE_STAR,
1193 .cf_loc = NULL, 1185 .cf_loc = NULL,
1194 .cf_flags = 0, 1186 .cf_flags = 0,
1195 .cf_pspec = NULL, 1187 .cf_pspec = NULL,
1196 }, 1188 },
1197 { NULL, NULL, 0, FSTATE_NOTFOUND, NULL, 0, NULL } 1189 { NULL, NULL, 0, FSTATE_NOTFOUND, NULL, 0, NULL }
1198}; 1190};
1199 1191
1200static int 1192static int
1201nvmm_match(device_t self, cfdata_t cfdata, void *arg) 1193nvmm_match(device_t self, cfdata_t cfdata, void *arg)
1202{ 1194{
1203 return 1; 1195 return 1;
1204} 1196}
1205 1197
1206static void 1198static void
1207nvmm_attach(device_t parent, device_t self, void *aux) 1199nvmm_attach(device_t parent, device_t self, void *aux)
1208{ 1200{
1209 int error; 1201 int error;
1210 1202
1211 error = nvmm_init(); 1203 error = nvmm_init();
1212 if (error) 1204 if (error)
1213 panic("%s: impossible", __func__); 1205 panic("%s: impossible", __func__);
1214 aprint_normal_dev(self, "attached, using backend %s\n", 1206 aprint_normal_dev(self, "attached, using backend %s\n",
1215 nvmm_impl->name); 1207 nvmm_impl->name);
1216} 1208}
1217 1209
1218static int 1210static int
1219nvmm_detach(device_t self, int flags) 1211nvmm_detach(device_t self, int flags)
1220{ 1212{
1221 if (atomic_load_relaxed(&nmachines) > 0) 1213 if (atomic_load_relaxed(&nmachines) > 0)
1222 return EBUSY; 1214 return EBUSY;
1223 nvmm_fini(); 1215 nvmm_fini();
1224 return 0; 1216 return 0;
1225} 1217}
1226 1218
1227void 1219void
1228nvmmattach(int nunits) 1220nvmmattach(int nunits)
1229{ 1221{
1230 /* nothing */ 1222 /* nothing */
1231} 1223}
1232 1224
1233MODULE(MODULE_CLASS_MISC, nvmm, NULL); 1225MODULE(MODULE_CLASS_MISC, nvmm, NULL);
1234 1226
1235#if defined(_MODULE) 1227#if defined(_MODULE)
1236CFDRIVER_DECL(nvmm, DV_VIRTUAL, NULL); 1228CFDRIVER_DECL(nvmm, DV_VIRTUAL, NULL);
1237#endif 1229#endif
1238 1230
1239static int 1231static int
1240nvmm_modcmd(modcmd_t cmd, void *arg) 1232nvmm_modcmd(modcmd_t cmd, void *arg)
1241{ 1233{
1242#if defined(_MODULE) 1234#if defined(_MODULE)
1243 devmajor_t bmajor = NODEVMAJOR; 1235 devmajor_t bmajor = NODEVMAJOR;
1244 devmajor_t cmajor = 345; 1236 devmajor_t cmajor = 345;
1245#endif 1237#endif
1246 int error; 1238 int error;
1247 1239
1248 switch (cmd) { 1240 switch (cmd) {
1249 case MODULE_CMD_INIT: 1241 case MODULE_CMD_INIT:
1250 if (nvmm_ident() == NULL) { 1242 if (nvmm_ident() == NULL) {
1251 aprint_error("%s: cpu not supported\n", 1243 aprint_error("%s: cpu not supported\n",
1252 nvmm_cd.cd_name); 1244 nvmm_cd.cd_name);
1253 return ENOTSUP; 1245 return ENOTSUP;
1254 } 1246 }
1255#if defined(_MODULE) 1247#if defined(_MODULE)
1256 error = config_cfdriver_attach(&nvmm_cd); 1248 error = config_cfdriver_attach(&nvmm_cd);
1257 if (error) 1249 if (error)
1258 return error; 1250 return error;
1259#endif 1251#endif
1260 error = config_cfattach_attach(nvmm_cd.cd_name, &nvmm_ca); 1252 error = config_cfattach_attach(nvmm_cd.cd_name, &nvmm_ca);
1261 if (error) { 1253 if (error) {
1262 config_cfdriver_detach(&nvmm_cd); 1254 config_cfdriver_detach(&nvmm_cd);
1263 aprint_error("%s: config_cfattach_attach failed\n", 1255 aprint_error("%s: config_cfattach_attach failed\n",
1264 nvmm_cd.cd_name); 1256 nvmm_cd.cd_name);
1265 return error; 1257 return error;
1266 } 1258 }
1267 1259
1268 error = config_cfdata_attach(nvmm_cfdata, 1); 1260 error = config_cfdata_attach(nvmm_cfdata, 1);
1269 if (error) { 1261 if (error) {
1270 config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca); 1262 config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
1271 config_cfdriver_detach(&nvmm_cd); 1263 config_cfdriver_detach(&nvmm_cd);
1272 aprint_error("%s: unable to register cfdata\n", 1264 aprint_error("%s: unable to register cfdata\n",
1273 nvmm_cd.cd_name); 1265 nvmm_cd.cd_name);
1274 return error; 1266 return error;
1275 } 1267 }
1276 1268
1277 if (config_attach_pseudo(nvmm_cfdata) == NULL) { 1269 if (config_attach_pseudo(nvmm_cfdata) == NULL) {
1278 aprint_error("%s: config_attach_pseudo failed\n", 1270 aprint_error("%s: config_attach_pseudo failed\n",
1279 nvmm_cd.cd_name); 1271 nvmm_cd.cd_name);
1280 config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca); 1272 config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
1281 config_cfdriver_detach(&nvmm_cd); 1273 config_cfdriver_detach(&nvmm_cd);
1282 return ENXIO; 1274 return ENXIO;
1283 } 1275 }
1284 1276
1285#if defined(_MODULE) 1277#if defined(_MODULE)
1286 /* mknod /dev/nvmm c 345 0 */ 1278 /* mknod /dev/nvmm c 345 0 */
1287 error = devsw_attach(nvmm_cd.cd_name, NULL, &bmajor, 1279 error = devsw_attach(nvmm_cd.cd_name, NULL, &bmajor,
1288 &nvmm_cdevsw, &cmajor); 1280 &nvmm_cdevsw, &cmajor);
1289 if (error) { 1281 if (error) {
1290 aprint_error("%s: unable to register devsw\n", 1282 aprint_error("%s: unable to register devsw\n",
1291 nvmm_cd.cd_name); 1283 nvmm_cd.cd_name);
1292 config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca); 1284 config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
1293 config_cfdriver_detach(&nvmm_cd); 1285 config_cfdriver_detach(&nvmm_cd);
1294 return error; 1286 return error;
1295 } 1287 }
1296#endif 1288#endif
1297 return 0; 1289 return 0;
1298 case MODULE_CMD_FINI: 1290 case MODULE_CMD_FINI:
1299 error = config_cfdata_detach(nvmm_cfdata); 1291 error = config_cfdata_detach(nvmm_cfdata);
1300 if (error) 1292 if (error)
1301 return error; 1293 return error;
1302 error = config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca); 1294 error = config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
1303 if (error) 1295 if (error)
1304 return error; 1296 return error;
1305#if defined(_MODULE) 1297#if defined(_MODULE)
1306 config_cfdriver_detach(&nvmm_cd); 1298 config_cfdriver_detach(&nvmm_cd);
1307 devsw_detach(NULL, &nvmm_cdevsw); 1299 devsw_detach(NULL, &nvmm_cdevsw);
1308#endif 1300#endif
1309 return 0; 1301 return 0;
1310 case MODULE_CMD_AUTOUNLOAD: 1302 case MODULE_CMD_AUTOUNLOAD:
1311 return EBUSY; 1303 return EBUSY;
1312 default: 1304 default:
1313 return ENOTTY; 1305 return ENOTTY;
1314 } 1306 }
1315} 1307}