Tue Apr 21 16:57:40 2020 UTC ()
It is possible to use grant_v2 with HVM guest; but the status table has
to be mapped using XENMEM_add_to_physmap.


(bouyer)
diff -r1.29.2.2 -r1.29.2.3 src/sys/arch/xen/xen/xengnt.c

cvs diff -r1.29.2.2 -r1.29.2.3 src/sys/arch/xen/xen/xengnt.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xengnt.c 2020/04/20 19:42:10 1.29.2.2
+++ src/sys/arch/xen/xen/xengnt.c 2020/04/21 16:57:40 1.29.2.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xengnt.c,v 1.29.2.2 2020/04/20 19:42:10 bouyer Exp $ */ 1/* $NetBSD: xengnt.c,v 1.29.2.3 2020/04/21 16:57:40 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -16,90 +16,75 @@ @@ -16,90 +16,75 @@
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: xengnt.c,v 1.29.2.2 2020/04/20 19:42:10 bouyer Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: xengnt.c,v 1.29.2.3 2020/04/21 16:57:40 bouyer Exp $");
30 30
31#include <sys/types.h> 31#include <sys/types.h>
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/systm.h> 33#include <sys/systm.h>
34#include <sys/kmem.h> 34#include <sys/kmem.h>
35#include <sys/queue.h> 35#include <sys/queue.h>
36#include <sys/extent.h> 36#include <sys/extent.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38#include <sys/mutex.h> 38#include <sys/mutex.h>
39#include <uvm/uvm.h> 39#include <uvm/uvm.h>
40 40
41#include <xen/hypervisor.h> 41#include <xen/hypervisor.h>
42#include <xen/xen.h> 42#include <xen/xen.h>
43#include <xen/granttables.h> 43#include <xen/granttables.h>
44 44
45#include "opt_xen.h" 45#include "opt_xen.h"
46 46
47/*  
48 * grant table v2 is not supported for HVM guests on 4.11 at last. 
49 * see xen/arch/x86/hvm/hypercall.c in Xen sources (missing 
50 * GNTTABOP_get_status_frames) 
51 */ 
52 
53#ifdef XENPV 
54#define USE_GRANT_V2 
55#endif 
56 
57/* #define XENDEBUG */ 47/* #define XENDEBUG */
58#ifdef XENDEBUG 48#ifdef XENDEBUG
59#define DPRINTF(x) printf x 49#define DPRINTF(x) printf x
60#else 50#else
61#define DPRINTF(x) 51#define DPRINTF(x)
62#endif 52#endif
63 53
64/* External tools reserve first few grant table entries. */ 54/* External tools reserve first few grant table entries. */
65#define NR_RESERVED_ENTRIES 8 55#define NR_RESERVED_ENTRIES 8
66 56
67/* Current number of frames making up the grant table */ 57/* Current number of frames making up the grant table */
68int gnt_nr_grant_frames; 58int gnt_nr_grant_frames;
69/* Maximum number of frames that can make up the grant table */ 59/* Maximum number of frames that can make up the grant table */
70int gnt_max_grant_frames; 60int gnt_max_grant_frames;
71 61
72/* table of free grant entries */ 62/* table of free grant entries */
73grant_ref_t *gnt_entries; 63grant_ref_t *gnt_entries;
74/* last free entry */ 64/* last free entry */
75int last_gnt_entry; 65int last_gnt_entry;
76/* empty entry in the list */ 66/* empty entry in the list */
77#define XENGNT_NO_ENTRY 0xffffffff 67#define XENGNT_NO_ENTRY 0xffffffff
78 68
79/* VM address of the grant table */ 69/* VM address of the grant table */
80#ifdef USE_GRANT_V2 
81#define NR_GRANT_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_v2_t)) 70#define NR_GRANT_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_v2_t))
82#define NR_GRANT_STATUS_PER_PAGE (PAGE_SIZE / sizeof(grant_status_t)) 71#define NR_GRANT_STATUS_PER_PAGE (PAGE_SIZE / sizeof(grant_status_t))
83 72
84grant_entry_v2_t *grant_table; 73grant_entry_v2_t *grant_table;
85/* Number of grant status frames */ 74/* Number of grant status frames */
86int gnt_status_frames; 75int gnt_status_frames;
87 76
88grant_status_t *grant_status; 77grant_status_t *grant_status;
89#else /* USE_GRANT_V2 */ 
90#define NR_GRANT_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_v1_t)) 
91grant_entry_v1_t *grant_table; 
92#endif /* USE_GRANT_V2 */ 
93kmutex_t grant_lock; 78kmutex_t grant_lock;
94 79
95static grant_ref_t xengnt_get_entry(void); 80static grant_ref_t xengnt_get_entry(void);
96static void xengnt_free_entry(grant_ref_t); 81static void xengnt_free_entry(grant_ref_t);
97static int xengnt_more_entries(void); 82static int xengnt_more_entries(void);
98static int xengnt_map_status(void); 83static int xengnt_map_status(void);
99 84
100void 85void
101xengnt_init(void) 86xengnt_init(void)
102{ 87{
103 struct gnttab_query_size query; 88 struct gnttab_query_size query;
104 int rc; 89 int rc;
105 int nr_grant_entries; 90 int nr_grant_entries;
@@ -108,55 +93,51 @@ xengnt_init(void) @@ -108,55 +93,51 @@ xengnt_init(void)
108 query.dom = DOMID_SELF; 93 query.dom = DOMID_SELF;
109 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); 94 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
110 if ((rc < 0) || (query.status != GNTST_okay)) 95 if ((rc < 0) || (query.status != GNTST_okay))
111 gnt_max_grant_frames = 4; /* Legacy max number of frames */ 96 gnt_max_grant_frames = 4; /* Legacy max number of frames */
112 else 97 else
113 gnt_max_grant_frames = query.max_nr_frames; 98 gnt_max_grant_frames = query.max_nr_frames;
114 99
115 /* 100 /*
116 * Always allocate max number of grant frames, never expand in runtime 101 * Always allocate max number of grant frames, never expand in runtime
117 */ 102 */
118 gnt_nr_grant_frames = gnt_max_grant_frames; 103 gnt_nr_grant_frames = gnt_max_grant_frames;
119 104
120 105
121#ifdef USE_GRANT_V2 
122 struct gnttab_set_version gntversion; 106 struct gnttab_set_version gntversion;
123 gntversion.version = 2; 107 gntversion.version = 2;
124 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gntversion, 1); 108 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gntversion, 1);
125 if (rc < 0 || gntversion.version != 2) 109 if (rc < 0 || gntversion.version != 2)
126 panic("GNTTABOP_set_version 2 failed %d", rc); 110 panic("GNTTABOP_set_version 2 failed %d", rc);
127#endif /* USE_GRANT_V2 */ 
128 111
129 nr_grant_entries = 112 nr_grant_entries =
130 gnt_max_grant_frames * NR_GRANT_ENTRIES_PER_PAGE; 113 gnt_max_grant_frames * NR_GRANT_ENTRIES_PER_PAGE;
131 114
132 grant_table = (void *)uvm_km_alloc(kernel_map, 115 grant_table = (void *)uvm_km_alloc(kernel_map,
133 gnt_max_grant_frames * PAGE_SIZE, 0, UVM_KMF_VAONLY); 116 gnt_max_grant_frames * PAGE_SIZE, 0, UVM_KMF_VAONLY);
134 if (grant_table == NULL) 117 if (grant_table == NULL)
135 panic("xengnt_init() table no VM space"); 118 panic("xengnt_init() table no VM space");
136 119
137 gnt_entries = kmem_alloc((nr_grant_entries + 1) * sizeof(grant_ref_t), 120 gnt_entries = kmem_alloc((nr_grant_entries + 1) * sizeof(grant_ref_t),
138 KM_SLEEP); 121 KM_SLEEP);
139 for (i = 0; i <= nr_grant_entries; i++) 122 for (i = 0; i <= nr_grant_entries; i++)
140 gnt_entries[i] = XENGNT_NO_ENTRY; 123 gnt_entries[i] = XENGNT_NO_ENTRY;
141 124
142#ifdef USE_GRANT_V2 
143 gnt_status_frames = 125 gnt_status_frames =
144 round_page(nr_grant_entries * sizeof(grant_status_t)) / PAGE_SIZE; 126 round_page(nr_grant_entries * sizeof(grant_status_t)) / PAGE_SIZE;
145 grant_status = (void *)uvm_km_alloc(kernel_map, 127 grant_status = (void *)uvm_km_alloc(kernel_map,
146 gnt_status_frames * PAGE_SIZE, 0, UVM_KMF_VAONLY); 128 gnt_status_frames * PAGE_SIZE, 0, UVM_KMF_VAONLY);
147 if (grant_status == NULL) 129 if (grant_status == NULL)
148 panic("xengnt_init() status no VM space"); 130 panic("xengnt_init() status no VM space");
149#endif /* USE_GRANT_V2 */ 
150 131
151 mutex_init(&grant_lock, MUTEX_DEFAULT, IPL_VM); 132 mutex_init(&grant_lock, MUTEX_DEFAULT, IPL_VM);
152 133
153 xengnt_resume(); 134 xengnt_resume();
154 135
155} 136}
156 137
157/* 138/*
158 * Resume grant table state 139 * Resume grant table state
159 */ 140 */
160bool 141bool
161xengnt_resume(void) 142xengnt_resume(void)
162{ 143{
@@ -184,87 +165,99 @@ xengnt_suspend(void) { @@ -184,87 +165,99 @@ xengnt_suspend(void) {
184 int i; 165 int i;
185 166
186 mutex_enter(&grant_lock); 167 mutex_enter(&grant_lock);
187 KASSERT(gnt_entries[last_gnt_entry] == XENGNT_NO_ENTRY); 168 KASSERT(gnt_entries[last_gnt_entry] == XENGNT_NO_ENTRY);
188 169
189 for (i = 0; i < last_gnt_entry; i++) { 170 for (i = 0; i < last_gnt_entry; i++) {
190 /* invalidate all grant entries (necessary for resume) */ 171 /* invalidate all grant entries (necessary for resume) */
191 gnt_entries[i] = XENGNT_NO_ENTRY; 172 gnt_entries[i] = XENGNT_NO_ENTRY;
192 } 173 }
193  174
194 /* Remove virtual => machine mapping for grant table */ 175 /* Remove virtual => machine mapping for grant table */
195 pmap_kremove((vaddr_t)grant_table, gnt_nr_grant_frames * PAGE_SIZE); 176 pmap_kremove((vaddr_t)grant_table, gnt_nr_grant_frames * PAGE_SIZE);
196 177
197#ifdef USE_GRANT_V2 
198 /* Remove virtual => machine mapping for status table */ 178 /* Remove virtual => machine mapping for status table */
199 pmap_kremove((vaddr_t)grant_status, gnt_status_frames * PAGE_SIZE); 179 pmap_kremove((vaddr_t)grant_status, gnt_status_frames * PAGE_SIZE);
200#endif 
201 180
202 pmap_update(pmap_kernel()); 181 pmap_update(pmap_kernel());
203 mutex_exit(&grant_lock); 182 mutex_exit(&grant_lock);
204 return true; 183 return true;
205} 184}
206 185
207/* 186/*
208 * Get status frames and enter them into the VA space. 187 * Get status frames and enter them into the VA space.
209 */ 188 */
210static int 189static int
211xengnt_map_status(void) 190xengnt_map_status(void)
212{ 191{
213#ifdef USE_GRANT_V2 
214 gnttab_get_status_frames_t getstatus; 
215 uint64_t *pages; 192 uint64_t *pages;
216 size_t sz; 193 size_t sz;
217 int err; 
218 
219 KASSERT(mutex_owned(&grant_lock)); 194 KASSERT(mutex_owned(&grant_lock));
220 195
221 sz = gnt_status_frames * sizeof(*pages); 196 sz = gnt_status_frames * sizeof(*pages);
222 pages = kmem_alloc(sz, KM_NOSLEEP); 197 pages = kmem_alloc(sz, KM_NOSLEEP);
223 if (pages == NULL) 198 if (pages == NULL)
224 return ENOMEM; 199 return ENOMEM;
225 200
 201#ifdef XENPV
 202 gnttab_get_status_frames_t getstatus;
 203 int err;
 204
226 getstatus.dom = DOMID_SELF; 205 getstatus.dom = DOMID_SELF;
227 getstatus.nr_frames = gnt_status_frames; 206 getstatus.nr_frames = gnt_status_frames;
228 set_xen_guest_handle(getstatus.frame_list, pages); 207 set_xen_guest_handle(getstatus.frame_list, pages);
229 208
230 /* 209 /*
231 * get the status frames, and return the list of their virtual 210 * get the status frames, and return the list of their virtual
232 * addresses in 'pages' 211 * addresses in 'pages'
233 */ 212 */
234 if ((err = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames, 213 if ((err = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
235 &getstatus, 1)) != 0) 214 &getstatus, 1)) != 0)
236 panic("%s: get_status_frames failed: %d", __func__, err); 215 panic("%s: get_status_frames failed: %d", __func__, err);
237 if (getstatus.status != GNTST_okay) { 216 if (getstatus.status != GNTST_okay) {
238 aprint_error("%s: get_status_frames returned %d\n", 217 aprint_error("%s: get_status_frames returned %d\n",
239 __func__, getstatus.status); 218 __func__, getstatus.status);
240 kmem_free(pages, sz); 219 kmem_free(pages, sz);
241 return ENOMEM; 220 return ENOMEM;
242 } 221 }
 222#else /* XENPV */
 223 for (int i = 0; i < gnt_status_frames; i++) {
 224 struct vm_page *pg;
 225 struct xen_add_to_physmap xmap;
 226
 227 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
 228 pages[i] = atop(uvm_vm_page_to_phys(pg));
 229
 230 xmap.domid = DOMID_SELF;
 231 xmap.space = XENMAPSPACE_grant_table;
 232 xmap.idx = i | XENMAPIDX_grant_table_status;
 233 xmap.gpfn = pages[i];
243 234
 235 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xmap) < 0)
 236 panic("%s: Unable to add grant tables\n", __func__);
 237 }
 238#endif /* XENPV */
244 /* 239 /*
245 * map between status_table addresses and the machine addresses of 240 * map between status_table addresses and the machine addresses of
246 * the status table frames 241 * the status table frames
247 */ 242 */
248 for (int i = 0; i < gnt_status_frames; i++) { 243 for (int i = 0; i < gnt_status_frames; i++) {
249 pmap_kenter_ma(((vaddr_t)grant_status) + i * PAGE_SIZE, 244 pmap_kenter_ma(((vaddr_t)grant_status) + i * PAGE_SIZE,
250 ((paddr_t)pages[i]) << PAGE_SHIFT, 245 ((paddr_t)pages[i]) << PAGE_SHIFT,
251 VM_PROT_WRITE, 0); 246 VM_PROT_WRITE, 0);
252 } 247 }
253 pmap_update(pmap_kernel()); 248 pmap_update(pmap_kernel());
254 249
255 kmem_free(pages, sz); 250 kmem_free(pages, sz);
256 
257#endif /* USE_GRANT_V2 */ 
258 return 0; 251 return 0;
259} 252}
260 253
261/* 254/*
262 * Add another page to the grant table 255 * Add another page to the grant table
263 * Returns 0 on success, ENOMEM on failure 256 * Returns 0 on success, ENOMEM on failure
264 */ 257 */
265static int 258static int
266xengnt_more_entries(void) 259xengnt_more_entries(void)
267{ 260{
268 gnttab_setup_table_t setup; 261 gnttab_setup_table_t setup;
269 u_long *pages; 262 u_long *pages;
270 int nframes_new = gnt_nr_grant_frames + 1; 263 int nframes_new = gnt_nr_grant_frames + 1;
@@ -288,27 +281,27 @@ xengnt_more_entries(void) @@ -288,27 +281,27 @@ xengnt_more_entries(void)
288 */ 281 */
289 struct vm_page *pg; 282 struct vm_page *pg;
290 struct xen_add_to_physmap xmap; 283 struct xen_add_to_physmap xmap;
291 284
292 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO); 285 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
293 pages[gnt_nr_grant_frames] = atop(uvm_vm_page_to_phys(pg)); 286 pages[gnt_nr_grant_frames] = atop(uvm_vm_page_to_phys(pg));
294 287
295 xmap.domid = DOMID_SELF; 288 xmap.domid = DOMID_SELF;
296 xmap.space = XENMAPSPACE_grant_table; 289 xmap.space = XENMAPSPACE_grant_table;
297 xmap.idx = gnt_nr_grant_frames; 290 xmap.idx = gnt_nr_grant_frames;
298 xmap.gpfn = pages[gnt_nr_grant_frames]; 291 xmap.gpfn = pages[gnt_nr_grant_frames];
299 292
300 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xmap) < 0) 293 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xmap) < 0)
301 panic("%s: Unable to register HYPERVISOR_shared_info\n", __func__); 294 panic("%s: Unable to add grant frames\n", __func__);
302 295
303 } else { 296 } else {
304 setup.dom = DOMID_SELF; 297 setup.dom = DOMID_SELF;
305 setup.nr_frames = nframes_new; 298 setup.nr_frames = nframes_new;
306 set_xen_guest_handle(setup.frame_list, pages); 299 set_xen_guest_handle(setup.frame_list, pages);
307 300
308 /* 301 /*
309 * setup the grant table, made of nframes_new frames 302 * setup the grant table, made of nframes_new frames
310 * and return the list of their virtual addresses 303 * and return the list of their virtual addresses
311 * in 'pages' 304 * in 'pages'
312 */ 305 */
313 if (HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0) 306 if (HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0)
314 panic("%s: setup table failed", __func__); 307 panic("%s: setup table failed", __func__);
@@ -388,27 +381,26 @@ xengnt_get_entry(void) @@ -388,27 +381,26 @@ xengnt_get_entry(void)
388static void 381static void
389xengnt_free_entry(grant_ref_t entry) 382xengnt_free_entry(grant_ref_t entry)
390{ 383{
391 mutex_enter(&grant_lock); 384 mutex_enter(&grant_lock);
392 KASSERT(entry > NR_RESERVED_ENTRIES); 385 KASSERT(entry > NR_RESERVED_ENTRIES);
393 KASSERT(gnt_entries[last_gnt_entry] == XENGNT_NO_ENTRY); 386 KASSERT(gnt_entries[last_gnt_entry] == XENGNT_NO_ENTRY);
394 KASSERT(last_gnt_entry >= 0); 387 KASSERT(last_gnt_entry >= 0);
395 KASSERT(last_gnt_entry <= gnt_max_grant_frames * NR_GRANT_ENTRIES_PER_PAGE); 388 KASSERT(last_gnt_entry <= gnt_max_grant_frames * NR_GRANT_ENTRIES_PER_PAGE);
396 gnt_entries[last_gnt_entry] = entry; 389 gnt_entries[last_gnt_entry] = entry;
397 last_gnt_entry++; 390 last_gnt_entry++;
398 mutex_exit(&grant_lock); 391 mutex_exit(&grant_lock);
399} 392}
400 393
401#ifdef USE_GRANT_V2 
402int 394int
403xengnt_grant_access(domid_t dom, paddr_t ma, int ro, grant_ref_t *entryp) 395xengnt_grant_access(domid_t dom, paddr_t ma, int ro, grant_ref_t *entryp)
404{ 396{
405 mutex_enter(&grant_lock); 397 mutex_enter(&grant_lock);
406 398
407 *entryp = xengnt_get_entry(); 399 *entryp = xengnt_get_entry();
408 if (__predict_false(*entryp == XENGNT_NO_ENTRY)) { 400 if (__predict_false(*entryp == XENGNT_NO_ENTRY)) {
409 mutex_exit(&grant_lock); 401 mutex_exit(&grant_lock);
410 return ENOMEM; 402 return ENOMEM;
411 } 403 }
412 404
413 grant_table[*entryp].full_page.frame = ma >> PAGE_SHIFT; 405 grant_table[*entryp].full_page.frame = ma >> PAGE_SHIFT;
414 grant_table[*entryp].hdr.domid = dom; 406 grant_table[*entryp].hdr.domid = dom;
@@ -441,77 +433,13 @@ xengnt_revoke_access(grant_ref_t entry) @@ -441,77 +433,13 @@ xengnt_revoke_access(grant_ref_t entry)
441 * against compiler reordering. May need full barrier 433 * against compiler reordering. May need full barrier
442 * on other architectures. 434 * on other architectures.
443 */ 435 */
444 __insn_barrier(); 436 __insn_barrier();
445 } 437 }
446 xengnt_free_entry(entry); 438 xengnt_free_entry(entry);
447} 439}
448 440
449int 441int
450xengnt_status(grant_ref_t entry) 442xengnt_status(grant_ref_t entry)
451{ 443{
452 return grant_status[entry] & (GTF_reading|GTF_writing); 444 return grant_status[entry] & (GTF_reading|GTF_writing);
453} 445}
454#else /* USE_GRANT_V2 */ 
455 
456int  
457xengnt_grant_access(domid_t dom, paddr_t ma, int ro, grant_ref_t *entryp) 
458{  
459 mutex_enter(&grant_lock); 
460 
461 *entryp = xengnt_get_entry(); 
462 if (__predict_false(*entryp == XENGNT_NO_ENTRY)) { 
463 mutex_exit(&grant_lock); 
464 return ENOMEM; 
465 } 
466 
467 grant_table[*entryp].frame = ma >> PAGE_SHIFT; 
468 grant_table[*entryp].domid = dom; 
469 /*  
470 * ensure that the above values reach global visibility 
471 * before permitting frame's access (done when we set flags)  
472 */ 
473 xen_rmb(); 
474 grant_table[*entryp].flags =  
475 GTF_permit_access | (ro ? GTF_readonly : 0); 
476 mutex_exit(&grant_lock);  
477 return 0; 
478} 
479 
480 
481static inline uint16_t 
482xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t val, uint16_t newval)  
483{ 
484 unsigned long result; 
485 
486 __asm volatile(__LOCK_PREFIX 
487 "cmpxchgw %w1,%2" 
488 :"=a" (result) 
489 :"q"(newval), "m" (*ptr), "0" (val) 
490 :"memory"); 
491 
492 return result; 
493} 
494 
495void 
496xengnt_revoke_access(grant_ref_t entry) 
497{ 
498 
499 uint16_t flags, nflags; 
500 
501 nflags = grant_table[entry].flags; 
502 
503 do { 
504 if ((flags = nflags) & (GTF_reading|GTF_writing)) 
505 panic("xengnt_revoke_access: still in use"); 
506 nflags = xen_atomic_cmpxchg16(&grant_table[entry].flags, 
507 flags, 0); 
508 } while (nflags != flags); 
509 xengnt_free_entry(entry); 
510} 
511 
512int 
513xengnt_status(grant_ref_t entry) 
514{ 
515 return (grant_table[entry].flags & (GTF_reading|GTF_writing)); 
516} 
517#endif /* USE_GRANT_V2 */